* [PATCH][RFC] FPU LWP 4/5: enable LWP for SVM guest
@ 2011-04-14 20:39 Wei Huang
2011-04-15 9:17 ` Jan Beulich
0 siblings, 1 reply; 2+ messages in thread
From: Wei Huang @ 2011-04-14 20:39 UTC (permalink / raw)
To: 'xen-devel@lists.xensource.com'
[-- Attachment #1: Type: text/plain, Size: 206 bytes --]
This patch enables LWP support for SVM guests. It saves and restores
LWP_CFG MSRs on each VCPU context switch. Additionally it handles LWP
CPUIDs and MSRs.
Signed-off-by: Wei Huang <wei.huang2@amd.com>
[-- Attachment #2: lwp4.txt --]
[-- Type: text/plain, Size: 5412 bytes --]
# HG changeset patch
# User Wei Huang <wei.huang2@amd.com>
# Date 1302812238 18000
# Branch lwp3
# Node ID ee8656e10937121d2780862351a245ed874273fa
# Parent bccbc5ecf62e49482c06149bb94dfc3dee8882f1
SVM: enable LWP for SVM guest
This patch enables LWP support for SVM guests. It saves and restores LWP_CFG MSRs on each VCPU context switch. Additionally it handles LWP CPUIDs and MSRs.
Signed-off-by: Wei Huang <wei.huang2@amd.com>
diff -r bccbc5ecf62e -r ee8656e10937 xen/arch/x86/hvm/svm/svm.c
--- a/xen/arch/x86/hvm/svm/svm.c Thu Apr 14 15:15:36 2011 -0500
+++ b/xen/arch/x86/hvm/svm/svm.c Thu Apr 14 15:17:18 2011 -0500
@@ -680,6 +680,26 @@
*(u16 *)(hypercall_page + (__HYPERVISOR_iret * 32)) = 0x0b0f; /* ud2 */
}
+static void svm_lwp_save(struct vcpu *v)
+{
+ if ( cpu_has_lwp )
+ {
+ rdmsrl(MSR_AMD64_LWP_CFG, v->arch.hvm_svm.guest_lwp_cfg);
+ wrmsrl(MSR_AMD64_LWP_CFG, 0x0);
+ /* Disable LWP for next VCPU */
+ wrmsrl(MSR_AMD64_LWP_CBADDR, 0x0);
+ }
+}
+
+static void svm_lwp_load(struct vcpu *v)
+{
+ if ( cpu_has_lwp )
+ {
+ /* Only LWP_CFG is reloaded. LWP_CBADDR will be reloaded via xrstor */
+ wrmsrl(MSR_AMD64_LWP_CFG, v->arch.hvm_svm.guest_lwp_cfg);
+ }
+}
+
static void svm_ctxt_switch_from(struct vcpu *v)
{
int cpu = smp_processor_id();
@@ -688,6 +708,7 @@
svm_save_dr(v);
vpmu_save(v);
+ svm_lwp_save(v);
svm_sync_vmcb(v);
svm_vmload(per_cpu(root_vmcb, cpu));
@@ -731,6 +752,7 @@
svm_vmload(vmcb);
vmcb->cleanbits.bytes = 0;
vpmu_load(v);
+ svm_lwp_load(v);
if ( cpu_has_rdtscp )
wrmsrl(MSR_TSC_AUX, hvm_msr_tsc_aux(v));
@@ -1100,6 +1122,24 @@
if ( vlapic_hw_disabled(vcpu_vlapic(v)) )
__clear_bit(X86_FEATURE_APIC & 31, edx);
break;
+ case 0x8000001c:
+ {
+ /* LWP capability CPUID */
+ uint64_t lwp_cfg = v->arch.hvm_svm.guest_lwp_cfg;
+
+ if ( cpu_has_lwp )
+ {
+ if ( !(v->arch.xcr0 & XSTATE_LWP) )
+ {
+ *eax = 0x0;
+ break;
+ }
+
+ /* turn on avail bit and other features specified in lwp_cfg */
+ *eax = (*edx & lwp_cfg) | 0x00000001;
+ }
+ break;
+ }
default:
break;
}
@@ -1189,6 +1229,10 @@
case MSR_IA32_LASTINTTOIP:
*msr_content = vmcb_get_lastinttoip(vmcb);
+ break;
+
+ case MSR_AMD64_LWP_CFG:
+ *msr_content = v->arch.hvm_svm.guest_lwp_cfg;
break;
case MSR_K7_PERFCTR0:
@@ -1287,6 +1331,24 @@
case MSR_IA32_LASTINTTOIP:
vmcb_set_lastinttoip(vmcb, msr_content);
+ break;
+
+ case MSR_AMD64_LWP_CFG:
+ if ( cpu_has_lwp )
+ {
+ unsigned int eax, ebx, ecx, edx;
+ uint32_t msr_low;
+
+ hvm_cpuid(0x8000001c, &eax, &ebx, &ecx, &edx);
+ msr_low = (uint32_t)msr_content;
+
+ /* generate #GP if guest triest to turn on unsupported features. */
+ if ( msr_low & ~edx)
+ goto gpf;
+
+ wrmsrl(msr, msr_content);
+ v->arch.hvm_svm.guest_lwp_cfg = msr_content;
+ }
break;
case MSR_K7_PERFCTR0:
diff -r bccbc5ecf62e -r ee8656e10937 xen/arch/x86/hvm/svm/vmcb.c
--- a/xen/arch/x86/hvm/svm/vmcb.c Thu Apr 14 15:15:36 2011 -0500
+++ b/xen/arch/x86/hvm/svm/vmcb.c Thu Apr 14 15:17:18 2011 -0500
@@ -120,6 +120,8 @@
svm_disable_intercept_for_msr(v, MSR_LSTAR);
svm_disable_intercept_for_msr(v, MSR_STAR);
svm_disable_intercept_for_msr(v, MSR_SYSCALL_MASK);
+ if ( cpu_has_lwp)
+ svm_disable_intercept_for_msr(v, MSR_AMD64_LWP_CBADDR);
vmcb->_msrpm_base_pa = (u64)virt_to_maddr(arch_svm->msrpm);
vmcb->_iopm_base_pa = (u64)virt_to_maddr(hvm_io_bitmap);
diff -r bccbc5ecf62e -r ee8656e10937 xen/include/asm-x86/cpufeature.h
--- a/xen/include/asm-x86/cpufeature.h Thu Apr 14 15:15:36 2011 -0500
+++ b/xen/include/asm-x86/cpufeature.h Thu Apr 14 15:17:18 2011 -0500
@@ -208,6 +208,8 @@
#define cpu_has_xsave boot_cpu_has(X86_FEATURE_XSAVE)
+#define cpu_has_lwp boot_cpu_has(X86_FEATURE_LWP)
+
#define cpu_has_arch_perfmon boot_cpu_has(X86_FEATURE_ARCH_PERFMON)
#define cpu_has_rdtscp boot_cpu_has(X86_FEATURE_RDTSCP)
diff -r bccbc5ecf62e -r ee8656e10937 xen/include/asm-x86/hvm/svm/vmcb.h
--- a/xen/include/asm-x86/hvm/svm/vmcb.h Thu Apr 14 15:15:36 2011 -0500
+++ b/xen/include/asm-x86/hvm/svm/vmcb.h Thu Apr 14 15:17:18 2011 -0500
@@ -507,6 +507,9 @@
uint64_t guest_sysenter_cs;
uint64_t guest_sysenter_esp;
uint64_t guest_sysenter_eip;
+
+ /* AMD lightweight profiling MSR */
+ uint64_t guest_lwp_cfg;
};
struct vmcb_struct *alloc_vmcb(void);
diff -r bccbc5ecf62e -r ee8656e10937 xen/include/asm-x86/msr-index.h
--- a/xen/include/asm-x86/msr-index.h Thu Apr 14 15:15:36 2011 -0500
+++ b/xen/include/asm-x86/msr-index.h Thu Apr 14 15:17:18 2011 -0500
@@ -253,6 +253,10 @@
#define MSR_AMD_PATCHLEVEL 0x0000008b
#define MSR_AMD_PATCHLOADER 0xc0010020
+/* AMD Lightweight Profiling MSRs */
+#define MSR_AMD64_LWP_CFG 0xc0000105
+#define MSR_AMD64_LWP_CBADDR 0xc0000106
+
/* AMD OS Visible Workaround MSRs */
#define MSR_AMD_OSVW_ID_LENGTH 0xc0010140
#define MSR_AMD_OSVW_STATUS 0xc0010141
[-- Attachment #3: Type: text/plain, Size: 138 bytes --]
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xensource.com
http://lists.xensource.com/xen-devel
^ permalink raw reply [flat|nested] 2+ messages in thread
* Re: [PATCH][RFC] FPU LWP 4/5: enable LWP for SVM guest
2011-04-14 20:39 [PATCH][RFC] FPU LWP 4/5: enable LWP for SVM guest Wei Huang
@ 2011-04-15 9:17 ` Jan Beulich
0 siblings, 0 replies; 2+ messages in thread
From: Jan Beulich @ 2011-04-15 9:17 UTC (permalink / raw)
To: Wei Huang; +Cc: xen-devel@lists.xensource.com
>>> On 14.04.11 at 22:39, Wei Huang <wei.huang2@amd.com> wrote:
>+static void svm_lwp_save(struct vcpu *v)
>+{
>+ if ( cpu_has_lwp )
>+ {
>+ rdmsrl(MSR_AMD64_LWP_CFG, v->arch.hvm_svm.guest_lwp_cfg);
>+ wrmsrl(MSR_AMD64_LWP_CFG, 0x0);
>+ /* Disable LWP for next VCPU */
>+ wrmsrl(MSR_AMD64_LWP_CBADDR, 0x0);
>+ }
>+}
>+
>+static void svm_lwp_load(struct vcpu *v)
>+{
>+ if ( cpu_has_lwp )
>+ {
>+ /* Only LWP_CFG is reloaded. LWP_CBADDR will be reloaded via xrstor */
>+ wrmsrl(MSR_AMD64_LWP_CFG, v->arch.hvm_svm.guest_lwp_cfg);
>+ }
>+}
I think I had pointed this out before - to avoid these MSR accesses
for guests not using LWP, you should track whether the vCPU has
LWP support enabled (e.g. non-zero written into LWP_CFG) and
key off of that instead of cpu_has_lwp.
Jan
^ permalink raw reply [flat|nested] 2+ messages in thread
end of thread, other threads:[~2011-04-15 9:17 UTC | newest]
Thread overview: 2+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2011-04-14 20:39 [PATCH][RFC] FPU LWP 4/5: enable LWP for SVM guest Wei Huang
2011-04-15 9:17 ` Jan Beulich
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).