* [PATCH v7] KVM: VMX: Enable XSAVE/XRSTOR for guest
@ 2010-05-31 11:54 Sheng Yang
2010-06-01 8:34 ` Avi Kivity
2010-06-01 17:12 ` Marcelo Tosatti
0 siblings, 2 replies; 4+ messages in thread
From: Sheng Yang @ 2010-05-31 11:54 UTC (permalink / raw)
To: Avi Kivity; +Cc: Marcelo Tosatti, kvm, Dexuan Cui, Sheng Yang
From: Dexuan Cui <dexuan.cui@intel.com>
This patch enable guest to use XSAVE/XRSTOR instructions.
We assume that host_xcr0 would use all possible bits that OS supported.
And we loaded xcr0 in the same way we handled fpu - do it as late as we can.
Signed-off-by: Dexuan Cui <dexuan.cui@intel.com>
Signed-off-by: Sheng Yang <sheng@linux.intel.com>
---
Change from v6:
Make kvm_set_xcr() generically.
arch/x86/include/asm/kvm_host.h | 2 +
arch/x86/include/asm/vmx.h | 1 +
arch/x86/kvm/kvm_cache_regs.h | 6 ++
arch/x86/kvm/vmx.c | 13 ++++
arch/x86/kvm/x86.c | 126 ++++++++++++++++++++++++++++++++++++--
include/linux/kvm_host.h | 2 +-
6 files changed, 142 insertions(+), 8 deletions(-)
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index d08bb4a..271487b 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -302,6 +302,7 @@ struct kvm_vcpu_arch {
} update_pte;
struct fpu guest_fpu;
+ u64 xcr0;
gva_t mmio_fault_cr2;
struct kvm_pio_request pio;
@@ -605,6 +606,7 @@ int kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val);
unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu);
void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw);
void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l);
+void kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr);
int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata);
int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data);
diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h
index 9e6779f..346ea66 100644
--- a/arch/x86/include/asm/vmx.h
+++ b/arch/x86/include/asm/vmx.h
@@ -266,6 +266,7 @@ enum vmcs_field {
#define EXIT_REASON_EPT_VIOLATION 48
#define EXIT_REASON_EPT_MISCONFIG 49
#define EXIT_REASON_WBINVD 54
+#define EXIT_REASON_XSETBV 55
/*
* Interruption-information format
diff --git a/arch/x86/kvm/kvm_cache_regs.h b/arch/x86/kvm/kvm_cache_regs.h
index d2a98f8..6491ac8 100644
--- a/arch/x86/kvm/kvm_cache_regs.h
+++ b/arch/x86/kvm/kvm_cache_regs.h
@@ -71,4 +71,10 @@ static inline ulong kvm_read_cr4(struct kvm_vcpu *vcpu)
return kvm_read_cr4_bits(vcpu, ~0UL);
}
+static inline u64 kvm_read_edx_eax(struct kvm_vcpu *vcpu)
+{
+ return (kvm_register_read(vcpu, VCPU_REGS_RAX) & -1u)
+ | ((u64)(kvm_register_read(vcpu, VCPU_REGS_RDX) & -1u) << 32);
+}
+
#endif
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 99ae513..8649627 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -36,6 +36,8 @@
#include <asm/vmx.h>
#include <asm/virtext.h>
#include <asm/mce.h>
+#include <asm/i387.h>
+#include <asm/xcr.h>
#include "trace.h"
@@ -3354,6 +3356,16 @@ static int handle_wbinvd(struct kvm_vcpu *vcpu)
return 1;
}
+static int handle_xsetbv(struct kvm_vcpu *vcpu)
+{
+ u64 new_bv = kvm_read_edx_eax(vcpu);
+ u32 index = kvm_register_read(vcpu, VCPU_REGS_RCX);
+
+ kvm_set_xcr(vcpu, index, new_bv);
+ skip_emulated_instruction(vcpu);
+ return 1;
+}
+
static int handle_apic_access(struct kvm_vcpu *vcpu)
{
return emulate_instruction(vcpu, 0, 0, 0) == EMULATE_DONE;
@@ -3632,6 +3644,7 @@ static int (*kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu) = {
[EXIT_REASON_TPR_BELOW_THRESHOLD] = handle_tpr_below_threshold,
[EXIT_REASON_APIC_ACCESS] = handle_apic_access,
[EXIT_REASON_WBINVD] = handle_wbinvd,
+ [EXIT_REASON_XSETBV] = handle_xsetbv,
[EXIT_REASON_TASK_SWITCH] = handle_task_switch,
[EXIT_REASON_MCE_DURING_VMENTRY] = handle_machine_check,
[EXIT_REASON_EPT_VIOLATION] = handle_ept_violation,
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 7be1d36..0fcd8de 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -64,6 +64,7 @@
(~(unsigned long)(X86_CR4_VME | X86_CR4_PVI | X86_CR4_TSD | X86_CR4_DE\
| X86_CR4_PSE | X86_CR4_PAE | X86_CR4_MCE \
| X86_CR4_PGE | X86_CR4_PCE | X86_CR4_OSFXSR \
+ | X86_CR4_OSXSAVE \
| X86_CR4_OSXMMEXCPT | X86_CR4_VMXE))
#define CR8_RESERVED_BITS (~(unsigned long)X86_CR8_TPR)
@@ -149,6 +150,13 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
{ NULL }
};
+u64 __read_mostly host_xcr0;
+
+static inline u32 bit(int bitno)
+{
+ return 1 << (bitno & 31);
+}
+
static void kvm_on_user_return(struct user_return_notifier *urn)
{
unsigned slot;
@@ -473,6 +481,58 @@ void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw)
}
EXPORT_SYMBOL_GPL(kvm_lmsw);
+int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
+{
+ u64 xcr0;
+
+ /* Only support XCR_XFEATURE_ENABLED_MASK(xcr0) now */
+ if (index != XCR_XFEATURE_ENABLED_MASK)
+ return 1;
+ xcr0 = xcr;
+ if (kvm_x86_ops->get_cpl(vcpu) != 0)
+ return 1;
+ if (!(xcr0 & XSTATE_FP))
+ return 1;
+ if ((xcr0 & XSTATE_YMM) && !(xcr0 & XSTATE_SSE))
+ return 1;
+ if (xcr0 & ~host_xcr0)
+ return 1;
+ vcpu->arch.xcr0 = xcr0;
+ xsetbv(XCR_XFEATURE_ENABLED_MASK, vcpu->arch.xcr0);
+ return 0;
+}
+
+void kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
+{
+ if (__kvm_set_xcr(vcpu, index, xcr))
+ kvm_inject_gp(vcpu, 0);
+}
+EXPORT_SYMBOL_GPL(kvm_set_xcr);
+
+static bool guest_cpuid_has_xsave(struct kvm_vcpu *vcpu)
+{
+ struct kvm_cpuid_entry2 *best;
+
+ best = kvm_find_cpuid_entry(vcpu, 1, 0);
+ return best && (best->ecx & bit(X86_FEATURE_XSAVE));
+}
+
+static void update_cpuid(struct kvm_vcpu *vcpu)
+{
+ struct kvm_cpuid_entry2 *best;
+
+ best = kvm_find_cpuid_entry(vcpu, 1, 0);
+ if (!best)
+ return;
+
+ /* Update OSXSAVE bit */
+ if (cpu_has_xsave && best->function == 0x1) {
+ best->ecx &= ~(bit(X86_FEATURE_OSXSAVE));
+ if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE))
+ best->ecx |= bit(X86_FEATURE_OSXSAVE);
+ }
+}
+
int __kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
{
unsigned long old_cr4 = kvm_read_cr4(vcpu);
@@ -481,6 +541,9 @@ int __kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
if (cr4 & CR4_RESERVED_BITS)
return 1;
+ if (!guest_cpuid_has_xsave(vcpu) && (cr4 & X86_CR4_OSXSAVE))
+ return 1;
+
if (is_long_mode(vcpu)) {
if (!(cr4 & X86_CR4_PAE))
return 1;
@@ -497,6 +560,9 @@ int __kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
if ((cr4 ^ old_cr4) & pdptr_bits)
kvm_mmu_reset_context(vcpu);
+ if ((cr4 ^ old_cr4) & X86_CR4_OSXSAVE)
+ update_cpuid(vcpu);
+
return 0;
}
@@ -665,11 +731,6 @@ int kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val)
}
EXPORT_SYMBOL_GPL(kvm_get_dr);
-static inline u32 bit(int bitno)
-{
- return 1 << (bitno & 31);
-}
-
/*
* List of msr numbers which we expose to userspace through KVM_GET_MSRS
* and KVM_SET_MSRS, and KVM_GET_MSR_INDEX_LIST.
@@ -1813,6 +1874,7 @@ static int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
r = 0;
kvm_apic_set_version(vcpu);
kvm_x86_ops->cpuid_update(vcpu);
+ update_cpuid(vcpu);
out_free:
vfree(cpuid_entries);
@@ -1836,6 +1898,7 @@ static int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
vcpu->arch.cpuid_nent = cpuid->nent;
kvm_apic_set_version(vcpu);
kvm_x86_ops->cpuid_update(vcpu);
+ update_cpuid(vcpu);
return 0;
out:
@@ -1916,7 +1979,7 @@ static void do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
0 /* Reserved */ | F(CX16) | 0 /* xTPR Update, PDCM */ |
0 /* Reserved, DCA */ | F(XMM4_1) |
F(XMM4_2) | F(X2APIC) | F(MOVBE) | F(POPCNT) |
- 0 /* Reserved, XSAVE, OSXSAVE */;
+ 0 /* Reserved, AES */ | F(XSAVE) | 0 /* OSXSAVE */;
/* cpuid 0x80000001.ecx */
const u32 kvm_supported_word6_x86_features =
F(LAHF_LM) | F(CMP_LEGACY) | F(SVM) | 0 /* ExtApicSpace */ |
@@ -1931,7 +1994,7 @@ static void do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
switch (function) {
case 0:
- entry->eax = min(entry->eax, (u32)0xb);
+ entry->eax = min(entry->eax, (u32)0xd);
break;
case 1:
entry->edx &= kvm_supported_word0_x86_features;
@@ -1989,6 +2052,20 @@ static void do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
}
break;
}
+ case 0xd: {
+ int i;
+
+ entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
+ for (i = 1; *nent < maxnent; ++i) {
+ if (entry[i - 1].eax == 0 && i != 2)
+ break;
+ do_cpuid_1_ent(&entry[i], function, i);
+ entry[i].flags |=
+ KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
+ ++*nent;
+ }
+ break;
+ }
case KVM_CPUID_SIGNATURE: {
char signature[12] = "KVMKVMKVM\0\0";
u32 *sigptr = (u32 *)signature;
@@ -4124,6 +4201,9 @@ int kvm_arch_init(void *opaque)
perf_register_guest_info_callbacks(&kvm_guest_cbs);
+ if (cpu_has_xsave)
+ host_xcr0 = xgetbv(XCR_XFEATURE_ENABLED_MASK);
+
return 0;
out:
@@ -4522,6 +4602,24 @@ static void inject_pending_event(struct kvm_vcpu *vcpu)
}
}
+static void kvm_load_guest_xcr0(struct kvm_vcpu *vcpu)
+{
+ if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE) &&
+ !vcpu->guest_xcr0_loaded) {
+ xsetbv(XCR_XFEATURE_ENABLED_MASK, vcpu->arch.xcr0);
+ vcpu->guest_xcr0_loaded = 1;
+ }
+}
+
+static void kvm_put_guest_xcr0(struct kvm_vcpu *vcpu)
+{
+ if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE) &&
+ vcpu->guest_xcr0_loaded) {
+ xsetbv(XCR_XFEATURE_ENABLED_MASK, host_xcr0);
+ vcpu->guest_xcr0_loaded = 0;
+ }
+}
+
static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
{
int r;
@@ -4567,6 +4665,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
kvm_x86_ops->prepare_guest_switch(vcpu);
if (vcpu->fpu_active)
kvm_load_guest_fpu(vcpu);
+ kvm_load_guest_xcr0(vcpu);
atomic_set(&vcpu->guest_mode, 1);
smp_wmb();
@@ -5118,6 +5217,11 @@ void fx_init(struct kvm_vcpu *vcpu)
fpu_alloc(&vcpu->arch.guest_fpu);
fpu_finit(&vcpu->arch.guest_fpu);
+ /*
+ * Ensure guest xcr0 is valid for loading
+ */
+ vcpu->arch.xcr0 = XSTATE_FP;
+
vcpu->arch.cr0 |= X86_CR0_ET;
}
EXPORT_SYMBOL_GPL(fx_init);
@@ -5132,6 +5236,12 @@ void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
if (vcpu->guest_fpu_loaded)
return;
+ /*
+ * Restore all possible states in the guest,
+ * and assume host would use all available bits.
+ * Guest xcr0 would be loaded later.
+ */
+ kvm_put_guest_xcr0(vcpu);
vcpu->guest_fpu_loaded = 1;
unlazy_fpu(current);
fpu_restore_checking(&vcpu->arch.guest_fpu);
@@ -5140,6 +5250,8 @@ void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
{
+ kvm_put_guest_xcr0(vcpu);
+
if (!vcpu->guest_fpu_loaded)
return;
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 4e8fdbf..3784d58 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -88,7 +88,7 @@ struct kvm_vcpu {
int srcu_idx;
int fpu_active;
- int guest_fpu_loaded;
+ int guest_fpu_loaded, guest_xcr0_loaded;
wait_queue_head_t wq;
int sigset_active;
sigset_t sigset;
--
1.7.0.1
^ permalink raw reply related [flat|nested] 4+ messages in thread
* Re: [PATCH v7] KVM: VMX: Enable XSAVE/XRSTOR for guest
2010-05-31 11:54 [PATCH v7] KVM: VMX: Enable XSAVE/XRSTOR for guest Sheng Yang
@ 2010-06-01 8:34 ` Avi Kivity
2010-06-01 17:12 ` Marcelo Tosatti
1 sibling, 0 replies; 4+ messages in thread
From: Avi Kivity @ 2010-06-01 8:34 UTC (permalink / raw)
To: Sheng Yang; +Cc: Marcelo Tosatti, kvm, Dexuan Cui
On 05/31/2010 02:54 PM, Sheng Yang wrote:
> From: Dexuan Cui<dexuan.cui@intel.com>
>
> This patch enable guest to use XSAVE/XRSTOR instructions.
>
> We assume that host_xcr0 would use all possible bits that OS supported.
>
> And we loaded xcr0 in the same way we handled fpu - do it as late as we can.
>
>
Reviewed-by: Avi Kivity <avi@redhat.com>
--
error compiling committee.c: too many arguments to function
^ permalink raw reply [flat|nested] 4+ messages in thread
* Re: [PATCH v7] KVM: VMX: Enable XSAVE/XRSTOR for guest
2010-05-31 11:54 [PATCH v7] KVM: VMX: Enable XSAVE/XRSTOR for guest Sheng Yang
2010-06-01 8:34 ` Avi Kivity
@ 2010-06-01 17:12 ` Marcelo Tosatti
2010-06-01 18:18 ` Avi Kivity
1 sibling, 1 reply; 4+ messages in thread
From: Marcelo Tosatti @ 2010-06-01 17:12 UTC (permalink / raw)
To: Sheng Yang; +Cc: Avi Kivity, kvm, Dexuan Cui
On Mon, May 31, 2010 at 07:54:11PM +0800, Sheng Yang wrote:
> diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
> index 99ae513..8649627 100644
> --- a/arch/x86/kvm/vmx.c
> +++ b/arch/x86/kvm/vmx.c
> @@ -36,6 +36,8 @@
> #include <asm/vmx.h>
> #include <asm/virtext.h>
> #include <asm/mce.h>
> +#include <asm/i387.h>
> +#include <asm/xcr.h>
>
> #include "trace.h"
>
> @@ -3354,6 +3356,16 @@ static int handle_wbinvd(struct kvm_vcpu *vcpu)
> return 1;
> }
>
> +static int handle_xsetbv(struct kvm_vcpu *vcpu)
> +{
> + u64 new_bv = kvm_read_edx_eax(vcpu);
> + u32 index = kvm_register_read(vcpu, VCPU_REGS_RCX);
> +
> + kvm_set_xcr(vcpu, index, new_bv);
> + skip_emulated_instruction(vcpu);
Should only skip_emulated_instruction if no exception was injected.
> diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
> index 7be1d36..0fcd8de 100644
> --- a/arch/x86/kvm/x86.c
> +++ b/arch/x86/kvm/x86.c
> +int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
> +{
> + u64 xcr0;
> +
> + /* Only support XCR_XFEATURE_ENABLED_MASK(xcr0) now */
> + if (index != XCR_XFEATURE_ENABLED_MASK)
> + return 1;
> + xcr0 = xcr;
> + if (kvm_x86_ops->get_cpl(vcpu) != 0)
> + return 1;
> + if (!(xcr0 & XSTATE_FP))
> + return 1;
> + if ((xcr0 & XSTATE_YMM) && !(xcr0 & XSTATE_SSE))
> + return 1;
> + if (xcr0 & ~host_xcr0)
> + return 1;
> + vcpu->arch.xcr0 = xcr0;
> + xsetbv(XCR_XFEATURE_ENABLED_MASK, vcpu->arch.xcr0);
Won't this happen on guest entry, since vcpu->guest_xcr0_loaded == 0?
> @@ -4522,6 +4602,24 @@ static void inject_pending_event(struct kvm_vcpu *vcpu)
> }
> }
>
> +static void kvm_load_guest_xcr0(struct kvm_vcpu *vcpu)
> +{
> + if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE) &&
> + !vcpu->guest_xcr0_loaded) {
> + xsetbv(XCR_XFEATURE_ENABLED_MASK, vcpu->arch.xcr0);
Only necessary if guest and hosts XCR0 differ?
> + vcpu->guest_xcr0_loaded = 1;
> + }
> +}
> +
> +static void kvm_put_guest_xcr0(struct kvm_vcpu *vcpu)
> +{
> + if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE) &&
> + vcpu->guest_xcr0_loaded) {
> + xsetbv(XCR_XFEATURE_ENABLED_MASK, host_xcr0);
> + vcpu->guest_xcr0_loaded = 0;
> + }
> +}
What if you load guest's XCR0, then guest clears CR4.OSXSAVE? (restore
of host_xcr0 should be conditional on guest_xcr0_loaded only).
> +
> static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
> {
> int r;
> @@ -4567,6 +4665,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
> kvm_x86_ops->prepare_guest_switch(vcpu);
> if (vcpu->fpu_active)
> kvm_load_guest_fpu(vcpu);
> + kvm_load_guest_xcr0(vcpu);
>
> atomic_set(&vcpu->guest_mode, 1);
> smp_wmb();
> @@ -5118,6 +5217,11 @@ void fx_init(struct kvm_vcpu *vcpu)
> fpu_alloc(&vcpu->arch.guest_fpu);
> fpu_finit(&vcpu->arch.guest_fpu);
>
> + /*
> + * Ensure guest xcr0 is valid for loading
> + */
> + vcpu->arch.xcr0 = XSTATE_FP;
> +
> vcpu->arch.cr0 |= X86_CR0_ET;
> }
> EXPORT_SYMBOL_GPL(fx_init);
> @@ -5132,6 +5236,12 @@ void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
> if (vcpu->guest_fpu_loaded)
> return;
>
> + /*
> + * Restore all possible states in the guest,
> + * and assume host would use all available bits.
> + * Guest xcr0 would be loaded later.
> + */
> + kvm_put_guest_xcr0(vcpu);
> vcpu->guest_fpu_loaded = 1;
> unlazy_fpu(current);
> fpu_restore_checking(&vcpu->arch.guest_fpu);
> @@ -5140,6 +5250,8 @@ void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
>
> void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
> {
> + kvm_put_guest_xcr0(vcpu);
> +
Should be in kvm_arch_vcpu_put?
> if (!vcpu->guest_fpu_loaded)
> return;
>
> diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
> index 4e8fdbf..3784d58 100644
> --- a/include/linux/kvm_host.h
> +++ b/include/linux/kvm_host.h
> @@ -88,7 +88,7 @@ struct kvm_vcpu {
> int srcu_idx;
>
> int fpu_active;
> - int guest_fpu_loaded;
> + int guest_fpu_loaded, guest_xcr0_loaded;
> wait_queue_head_t wq;
> int sigset_active;
> sigset_t sigset;
> --
> 1.7.0.1
>
> --
> To unsubscribe from this list: send the line "unsubscribe kvm" in
> the body of a message to majordomo@vger.kernel.org
> More majordomo info at http://vger.kernel.org/majordomo-info.html
^ permalink raw reply [flat|nested] 4+ messages in thread
* Re: [PATCH v7] KVM: VMX: Enable XSAVE/XRSTOR for guest
2010-06-01 17:12 ` Marcelo Tosatti
@ 2010-06-01 18:18 ` Avi Kivity
0 siblings, 0 replies; 4+ messages in thread
From: Avi Kivity @ 2010-06-01 18:18 UTC (permalink / raw)
To: Marcelo Tosatti; +Cc: Sheng Yang, kvm, Dexuan Cui
On 06/01/2010 08:12 PM, Marcelo Tosatti wrote:
> On Mon, May 31, 2010 at 07:54:11PM +0800, Sheng Yang wrote:
>
>> diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
>> index 99ae513..8649627 100644
>> --- a/arch/x86/kvm/vmx.c
>> +++ b/arch/x86/kvm/vmx.c
>> @@ -36,6 +36,8 @@
>> #include<asm/vmx.h>
>> #include<asm/virtext.h>
>> #include<asm/mce.h>
>> +#include<asm/i387.h>
>> +#include<asm/xcr.h>
>>
>> #include "trace.h"
>>
>> @@ -3354,6 +3356,16 @@ static int handle_wbinvd(struct kvm_vcpu *vcpu)
>> return 1;
>> }
>>
>> +static int handle_xsetbv(struct kvm_vcpu *vcpu)
>> +{
>> + u64 new_bv = kvm_read_edx_eax(vcpu);
>> + u32 index = kvm_register_read(vcpu, VCPU_REGS_RCX);
>> +
>> + kvm_set_xcr(vcpu, index, new_bv);
>> + skip_emulated_instruction(vcpu);
>>
> Should only skip_emulated_instruction if no exception was injected.
>
Good catch. So, unit testing failure cases _is_ important.
>
>> +int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
>> +{
>> + u64 xcr0;
>> +
>> + /* Only support XCR_XFEATURE_ENABLED_MASK(xcr0) now */
>> + if (index != XCR_XFEATURE_ENABLED_MASK)
>> + return 1;
>> + xcr0 = xcr;
>> + if (kvm_x86_ops->get_cpl(vcpu) != 0)
>> + return 1;
>> + if (!(xcr0& XSTATE_FP))
>> + return 1;
>> + if ((xcr0& XSTATE_YMM)&& !(xcr0& XSTATE_SSE))
>> + return 1;
>> + if (xcr0& ~host_xcr0)
>> + return 1;
>> + vcpu->arch.xcr0 = xcr0;
>> + xsetbv(XCR_XFEATURE_ENABLED_MASK, vcpu->arch.xcr0);
>>
> Won't this happen on guest entry, since vcpu->guest_xcr0_loaded == 0?
>
In fact we don't know what vcpu->guest_xcr0_loaded is. Best to unload
it explicitly (and drop the xsetbv() call).
>> + vcpu->guest_xcr0_loaded = 1;
>> + }
>> +}
>> +
>> +static void kvm_put_guest_xcr0(struct kvm_vcpu *vcpu)
>> +{
>> + if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE)&&
>> + vcpu->guest_xcr0_loaded) {
>> + xsetbv(XCR_XFEATURE_ENABLED_MASK, host_xcr0);
>> + vcpu->guest_xcr0_loaded = 0;
>> + }
>> +}
>>
> What if you load guest's XCR0, then guest clears CR4.OSXSAVE? (restore
> of host_xcr0 should be conditional on guest_xcr0_loaded only).
>
Good catch again.
>> @@ -5140,6 +5250,8 @@ void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
>>
>> void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
>> {
>> + kvm_put_guest_xcr0(vcpu);
>> +
>>
> Should be in kvm_arch_vcpu_put?
>
That's called unconditionally from kvm_arch_vcpu_put(), so equivalent.
--
I have a truly marvellous patch that fixes the bug which this
signature is too narrow to contain.
^ permalink raw reply [flat|nested] 4+ messages in thread
end of thread, other threads:[~2010-06-01 18:18 UTC | newest]
Thread overview: 4+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2010-05-31 11:54 [PATCH v7] KVM: VMX: Enable XSAVE/XRSTOR for guest Sheng Yang
2010-06-01 8:34 ` Avi Kivity
2010-06-01 17:12 ` Marcelo Tosatti
2010-06-01 18:18 ` Avi Kivity
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).