xen-devel.lists.xenproject.org archive mirror
 help / color / mirror / Atom feed
* [PATCH] FPU LWP 8/8: LWP: Add LWP support for SVM guests
@ 2011-05-03 20:18 Wei Huang
  0 siblings, 0 replies; only message in thread
From: Wei Huang @ 2011-05-03 20:18 UTC (permalink / raw)
  To: 'xen-devel@lists.xensource.com', keir, Jan Beulich

[-- Attachment #1: Type: text/plain, Size: 334 bytes --]

LWP: Add LWP support for SVM guests

This patch enables SVM to handle LWP related MSRs and CPUID. It 
intercepts guests read/write to LWP_CFG. It also save/restore LWP_CFG 
when guests touch this MSR. The LWP_CBADDR MSR is not intercepted 
because this MSR is handled by xsave/xrstor.

Signed-off-by: Wei Huang <wei.huang2@amd.com>



[-- Attachment #2: lwp8.txt --]
[-- Type: text/plain, Size: 6095 bytes --]

# HG changeset patch
# User Wei Huang <wei.huang2@amd.com>
# Date 1304449528 18000
# Node ID 8b4b5cad0089f9c215e84dbcf0145ea533d3ee40
# Parent  dbd907bf172c399fc432b973a6668f1ba8545292
LWP: Add LWP support for SVM guests

This patch enables SVM to handle LWP related MSRs and CPUID. It intercepts guests read/write to LWP_CFG. It also save/restore LWP_CFG when guests touch this MSR. The LWP_CBADDR MSR is not intercepted because this MSR is handled by xsave/xrstor.

Signed-off-by: Wei Huang <wei.huang2@amd.com>

diff -r dbd907bf172c -r 8b4b5cad0089 xen/arch/x86/hvm/svm/svm.c
--- a/xen/arch/x86/hvm/svm/svm.c	Tue May 03 14:01:30 2011 -0500
+++ b/xen/arch/x86/hvm/svm/svm.c	Tue May 03 14:05:28 2011 -0500
@@ -58,7 +58,8 @@
 #include <asm/hvm/trace.h>
 #include <asm/hap.h>
 #include <asm/apic.h>
-#include <asm/debugger.h>       
+#include <asm/debugger.h>
+#include <asm/xstate.h>
 
 u32 svm_feature_flags;
 
@@ -695,6 +696,44 @@
     *(u16 *)(hypercall_page + (__HYPERVISOR_iret * 32)) = 0x0b0f; /* ud2 */
 }
 
+static inline void svm_lwp_save(struct vcpu *v)
+{
+    /* Don't mess up with other guests. Disable LWP for next VCPU. */
+    if ( v->arch.hvm_svm.guest_lwp_cfg )
+        wrmsrl(MSR_AMD64_LWP_CFG, 0x0);
+}
+
+static inline void svm_lwp_load(struct vcpu *v)
+{
+    /* Only LWP_CFG is reloaded. LWP_CBADDR will be reloaded via xrstor. */
+   if ( v->arch.hvm_svm.guest_lwp_cfg ) 
+       wrmsrl(MSR_AMD64_LWP_CFG, v->arch.hvm_svm.guest_lwp_cfg);
+}
+
+/* Update LWP_CFG MSR (0xc0000105). Return -1 if error; otherwise returns 0. */
+static int svm_update_lwp_cfg(struct vcpu *v, uint64_t msr_content)
+{
+    unsigned int eax, ebx, ecx, edx;
+    uint32_t msr_low;
+    
+    if ( cpu_has_lwp )
+    {
+        hvm_cpuid(0x8000001c, &eax, &ebx, &ecx, &edx);
+        msr_low = (uint32_t)msr_content;
+        
+        /* generate #GP if guest tries to turn on unsupported features. */
+        if ( msr_low & ~edx)
+            return -1;
+        
+        wrmsrl(MSR_AMD64_LWP_CFG, msr_content);
+        /* CPU might automatically correct reserved bits. So read it back. */
+        rdmsrl(MSR_AMD64_LWP_CFG, msr_content);
+        v->arch.hvm_svm.guest_lwp_cfg = msr_content;
+    }
+
+    return 0;
+}
+
 static void svm_ctxt_switch_from(struct vcpu *v)
 {
     int cpu = smp_processor_id();
@@ -703,6 +742,7 @@
 
     svm_save_dr(v);
     vpmu_save(v);
+    svm_lwp_save(v);
 
     svm_sync_vmcb(v);
     svm_vmload(per_cpu(root_vmcb, cpu));
@@ -746,6 +786,7 @@
     svm_vmload(vmcb);
     vmcb->cleanbits.bytes = 0;
     vpmu_load(v);
+    svm_lwp_load(v);
 
     if ( cpu_has_rdtscp )
         wrmsrl(MSR_TSC_AUX, hvm_msr_tsc_aux(v));
@@ -1120,6 +1161,24 @@
         if ( vlapic_hw_disabled(vcpu_vlapic(v)) )
             __clear_bit(X86_FEATURE_APIC & 31, edx);
         break;
+    case 0x8000001c: 
+    {
+        /* LWP capability CPUID */
+        uint64_t lwp_cfg = v->arch.hvm_svm.guest_lwp_cfg;
+
+        if ( cpu_has_lwp )
+        {
+            if ( !(v->arch.xcr0 & XSTATE_LWP) )
+           {
+                *eax = 0x0;
+                break;
+            }
+
+            /* turn on available bit and other features specified in lwp_cfg */
+            *eax = (*edx & lwp_cfg) | 0x00000001;
+        }
+        break;
+    }
     default:
         break;
     }
@@ -1227,6 +1286,10 @@
         *msr_content = vmcb_get_lastinttoip(vmcb);
         break;
 
+    case MSR_AMD64_LWP_CFG:
+        *msr_content = v->arch.hvm_svm.guest_lwp_cfg;
+        break;
+
     case MSR_K7_PERFCTR0:
     case MSR_K7_PERFCTR1:
     case MSR_K7_PERFCTR2:
@@ -1323,6 +1386,11 @@
 
     case MSR_IA32_LASTINTTOIP:
         vmcb_set_lastinttoip(vmcb, msr_content);
+        break;
+
+    case MSR_AMD64_LWP_CFG:
+        if ( svm_update_lwp_cfg(v, msr_content) < 0 )
+            goto gpf;
         break;
 
     case MSR_K7_PERFCTR0:
diff -r dbd907bf172c -r 8b4b5cad0089 xen/arch/x86/hvm/svm/vmcb.c
--- a/xen/arch/x86/hvm/svm/vmcb.c	Tue May 03 14:01:30 2011 -0500
+++ b/xen/arch/x86/hvm/svm/vmcb.c	Tue May 03 14:05:28 2011 -0500
@@ -120,6 +120,11 @@
     svm_disable_intercept_for_msr(v, MSR_LSTAR);
     svm_disable_intercept_for_msr(v, MSR_STAR);
     svm_disable_intercept_for_msr(v, MSR_SYSCALL_MASK);
+
+    /* LWP_CBADDR MSR is saved and restored by FPU code. So SVM doesn't need to
+     * intercept it. */
+    if ( cpu_has_lwp )
+        svm_disable_intercept_for_msr(v, MSR_AMD64_LWP_CBADDR);
 
     vmcb->_msrpm_base_pa = (u64)virt_to_maddr(arch_svm->msrpm);
     vmcb->_iopm_base_pa  = (u64)virt_to_maddr(hvm_io_bitmap);
diff -r dbd907bf172c -r 8b4b5cad0089 xen/include/asm-x86/cpufeature.h
--- a/xen/include/asm-x86/cpufeature.h	Tue May 03 14:01:30 2011 -0500
+++ b/xen/include/asm-x86/cpufeature.h	Tue May 03 14:05:28 2011 -0500
@@ -208,6 +208,8 @@
 
 #define cpu_has_xsave           boot_cpu_has(X86_FEATURE_XSAVE)
 
+#define cpu_has_lwp             boot_cpu_has(X86_FEATURE_LWP)
+
 #define cpu_has_arch_perfmon    boot_cpu_has(X86_FEATURE_ARCH_PERFMON)
 
 #define cpu_has_rdtscp          boot_cpu_has(X86_FEATURE_RDTSCP)
diff -r dbd907bf172c -r 8b4b5cad0089 xen/include/asm-x86/hvm/svm/vmcb.h
--- a/xen/include/asm-x86/hvm/svm/vmcb.h	Tue May 03 14:01:30 2011 -0500
+++ b/xen/include/asm-x86/hvm/svm/vmcb.h	Tue May 03 14:05:28 2011 -0500
@@ -512,6 +512,9 @@
     uint64_t guest_sysenter_cs;
     uint64_t guest_sysenter_esp;
     uint64_t guest_sysenter_eip;
+    
+    /* AMD lightweight profiling MSR */
+    uint64_t guest_lwp_cfg;
 };
 
 struct vmcb_struct *alloc_vmcb(void);
diff -r dbd907bf172c -r 8b4b5cad0089 xen/include/asm-x86/msr-index.h
--- a/xen/include/asm-x86/msr-index.h	Tue May 03 14:01:30 2011 -0500
+++ b/xen/include/asm-x86/msr-index.h	Tue May 03 14:05:28 2011 -0500
@@ -253,6 +253,10 @@
 #define MSR_AMD_PATCHLEVEL		0x0000008b
 #define MSR_AMD_PATCHLOADER		0xc0010020
 
+/* AMD Lightweight Profiling MSRs */
+#define MSR_AMD64_LWP_CFG		0xc0000105
+#define MSR_AMD64_LWP_CBADDR		0xc0000106
+
 /* AMD OS Visible Workaround MSRs */
 #define MSR_AMD_OSVW_ID_LENGTH          0xc0010140
 #define MSR_AMD_OSVW_STATUS             0xc0010141

[-- Attachment #3: Type: text/plain, Size: 138 bytes --]

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xensource.com
http://lists.xensource.com/xen-devel

^ permalink raw reply	[flat|nested] only message in thread

only message in thread, other threads:[~2011-05-03 20:18 UTC | newest]

Thread overview: (only message) (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2011-05-03 20:18 [PATCH] FPU LWP 8/8: LWP: Add LWP support for SVM guests Wei Huang

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).