* [PATCH v3] KVM: x86: XSAVE/XRSTOR live migration support
@ 2010-06-11 4:36 Sheng Yang
2010-06-13 8:26 ` Avi Kivity
0 siblings, 1 reply; 6+ messages in thread
From: Sheng Yang @ 2010-06-11 4:36 UTC (permalink / raw)
To: Avi Kivity; +Cc: Marcelo Tosatti, kvm, Sheng Yang
This patch enable save/restore of xsave state.
Signed-off-by: Sheng Yang <sheng@linux.intel.com>
---
Documentation/kvm/api.txt | 74 +++++++++++++++++++++
arch/x86/include/asm/kvm.h | 22 ++++++
arch/x86/include/asm/xsave.h | 7 ++-
arch/x86/kvm/x86.c | 145 ++++++++++++++++++++++++++++++++++++++++++
include/linux/kvm.h | 12 ++++
5 files changed, 258 insertions(+), 2 deletions(-)
diff --git a/Documentation/kvm/api.txt b/Documentation/kvm/api.txt
index 159b4ef..ffba03f 100644
--- a/Documentation/kvm/api.txt
+++ b/Documentation/kvm/api.txt
@@ -922,6 +922,80 @@ Define which vcpu is the Bootstrap Processor (BSP). Values are the same
as the vcpu id in KVM_CREATE_VCPU. If this ioctl is not called, the default
is vcpu 0.
+4.41 KVM_GET_XSAVE
+
+Capability: KVM_CAP_XSAVE
+Architectures: x86
+Type: vcpu ioctl
+Parameters: struct kvm_xsave (out)
+Returns: 0 on success, -1 on error
+
+struct kvm_xsave {
+ __u32 region[1024];
+};
+
+This ioctl would copy current vcpu's xsave struct to the userspace.
+
+4.42 KVM_SET_XSAVE
+
+Capability: KVM_CAP_XSAVE
+Architectures: x86
+Type: vcpu ioctl
+Parameters: struct kvm_xsave (in)
+Returns: 0 on success, -1 on error
+
+struct kvm_xsave {
+ __u32 region[1024];
+};
+
+This ioctl would copy userspace's xsave struct to the kernel.
+
+4.43 KVM_GET_XCRS
+
+Capability: KVM_CAP_XCRS
+Architectures: x86
+Type: vcpu ioctl
+Parameters: struct kvm_xcrs (out)
+Returns: 0 on success, -1 on error
+
+struct kvm_xcr {
+ __u32 xcr;
+ __u32 reserved;
+ __u64 value;
+};
+
+struct kvm_xcrs {
+ __u32 nr_xcrs;
+ __u32 flags;
+ struct kvm_xcr xcrs[KVM_MAX_XCRS];
+ __u64 padding[16];
+};
+
+This ioctl would copy current vcpu's xcrs to the userspace.
+
+4.44 KVM_SET_XCRS
+
+Capability: KVM_CAP_XCRS
+Architectures: x86
+Type: vcpu ioctl
+Parameters: struct kvm_xcrs (in)
+Returns: 0 on success, -1 on error
+
+struct kvm_xcr {
+ __u32 xcr;
+ __u32 reserved;
+ __u64 value;
+};
+
+struct kvm_xcrs {
+ __u32 nr_xcrs;
+ __u32 flags;
+ struct kvm_xcr xcrs[KVM_MAX_XCRS];
+ __u64 padding[16];
+};
+
+This ioctl would set vcpu's xcr to the value userspace specified.
+
5. The kvm_run structure
Application code obtains a pointer to the kvm_run structure by
diff --git a/arch/x86/include/asm/kvm.h b/arch/x86/include/asm/kvm.h
index ff90055..4d8dcbd 100644
--- a/arch/x86/include/asm/kvm.h
+++ b/arch/x86/include/asm/kvm.h
@@ -22,6 +22,8 @@
#define __KVM_HAVE_XEN_HVM
#define __KVM_HAVE_VCPU_EVENTS
#define __KVM_HAVE_DEBUGREGS
+#define __KVM_HAVE_XSAVE
+#define __KVM_HAVE_XCRS
/* Architectural interrupt line count. */
#define KVM_NR_INTERRUPTS 256
@@ -299,4 +301,24 @@ struct kvm_debugregs {
__u64 reserved[9];
};
+/* for KVM_CAP_XSAVE */
+struct kvm_xsave {
+ __u32 region[1024];
+};
+
+#define KVM_MAX_XCRS 16
+
+struct kvm_xcr {
+ __u32 xcr;
+ __u32 reserved;
+ __u64 value;
+};
+
+struct kvm_xcrs {
+ __u32 nr_xcrs;
+ __u32 flags;
+ struct kvm_xcr xcrs[KVM_MAX_XCRS];
+ __u64 padding[16];
+};
+
#endif /* _ASM_X86_KVM_H */
diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h
index 29ee4e4..32c3666 100644
--- a/arch/x86/include/asm/xsave.h
+++ b/arch/x86/include/asm/xsave.h
@@ -13,8 +13,11 @@
#define FXSAVE_SIZE 512
-#define XSTATE_YMM_SIZE 256
-#define XSTATE_YMM_OFFSET (512 + 64)
+#define XSAVE_HDR_SIZE 64
+#define XSAVE_HDR_OFFSET FXSAVE_SIZE
+
+#define XSAVE_YMM_SIZE 256
+#define XSAVE_YMM_OFFSET (XSAVE_HDR_SIZE + XSAVE_HDR_OFFSET)
/*
* These are the features that the OS can handle currently.
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 7a4073b..353a445 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -1698,6 +1698,7 @@ int kvm_dev_ioctl_check_extension(long ext)
case KVM_CAP_PCI_SEGMENT:
case KVM_CAP_DEBUGREGS:
case KVM_CAP_X86_ROBUST_SINGLESTEP:
+ case KVM_CAP_XSAVE:
r = 1;
break;
case KVM_CAP_COALESCED_MMIO:
@@ -1721,6 +1722,9 @@ int kvm_dev_ioctl_check_extension(long ext)
case KVM_CAP_MCE:
r = KVM_MAX_MCE_BANKS;
break;
+ case KVM_CAP_XCRS:
+ r = cpu_has_xsave;
+ break;
default:
r = 0;
break;
@@ -2373,6 +2377,83 @@ static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu,
return 0;
}
+static void kvm_vcpu_ioctl_x86_get_xsave(struct kvm_vcpu *vcpu,
+ struct kvm_xsave *guest_xsave)
+{
+ if (cpu_has_xsave)
+ memcpy(guest_xsave->region,
+ &vcpu->arch.guest_fpu.state->xsave,
+ sizeof(struct xsave_struct));
+ else {
+ memcpy(guest_xsave->region,
+ &vcpu->arch.guest_fpu.state->fxsave,
+ sizeof(struct i387_fxsave_struct));
+ *(u64 *)&guest_xsave->region[XSAVE_HDR_OFFSET / sizeof(u32)] =
+ XSTATE_FPSSE;
+ }
+}
+
+static int kvm_vcpu_ioctl_x86_set_xsave(struct kvm_vcpu *vcpu,
+ struct kvm_xsave *guest_xsave)
+{
+ u64 xstate_bv =
+ *(u64 *)&guest_xsave->region[XSAVE_HDR_OFFSET / sizeof(u32)];
+ int size;
+
+ if (cpu_has_xsave) {
+ if (xstate_bv & XSTATE_YMM)
+ size = XSAVE_YMM_OFFSET + XSAVE_YMM_SIZE;
+ else
+ size = XSAVE_HDR_OFFSET + XSAVE_HDR_SIZE;
+ memcpy(&vcpu->arch.guest_fpu.state->xsave,
+ guest_xsave->region, size);
+ } else {
+ if (xstate_bv & ~XSTATE_FPSSE)
+ return -EINVAL;
+ size = sizeof(struct i387_fxsave_struct);
+ memcpy(&vcpu->arch.guest_fpu.state->fxsave,
+ guest_xsave->region, size);
+ }
+ return 0;
+}
+
+static void kvm_vcpu_ioctl_x86_get_xcrs(struct kvm_vcpu *vcpu,
+ struct kvm_xcrs *guest_xcrs)
+{
+ if (!cpu_has_xsave) {
+ guest_xcrs->nr_xcrs = 0;
+ return;
+ }
+
+ guest_xcrs->nr_xcrs = 1;
+ guest_xcrs->flags = 0;
+ guest_xcrs->xcrs[0].xcr = XCR_XFEATURE_ENABLED_MASK;
+ guest_xcrs->xcrs[0].value = vcpu->arch.xcr0;
+}
+
+static int kvm_vcpu_ioctl_x86_set_xcrs(struct kvm_vcpu *vcpu,
+ struct kvm_xcrs *guest_xcrs)
+{
+ int i, r = 0;
+
+ if (!cpu_has_xsave)
+ return -EINVAL;
+
+ if (guest_xcrs->nr_xcrs > KVM_MAX_XCRS)
+ return -EFAULT;
+
+ for (i = 0; i < guest_xcrs->nr_xcrs; i++)
+ /* Only support XCR0 currently */
+ if (guest_xcrs->xcrs[0].xcr == XCR_XFEATURE_ENABLED_MASK) {
+ r = __kvm_set_xcr(vcpu, XCR_XFEATURE_ENABLED_MASK,
+ guest_xcrs->xcrs[0].value);
+ break;
+ }
+ if (r)
+ r = -EFAULT;
+ return r;
+}
+
long kvm_arch_vcpu_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg)
{
@@ -2574,6 +2655,70 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
r = kvm_vcpu_ioctl_x86_set_debugregs(vcpu, &dbgregs);
break;
}
+ case KVM_GET_XSAVE: {
+ struct kvm_xsave *xsave;
+
+ xsave = kzalloc(sizeof(struct kvm_xsave), GFP_KERNEL);
+ r = -ENOMEM;
+ if (!xsave)
+ break;
+
+ kvm_vcpu_ioctl_x86_get_xsave(vcpu, xsave);
+
+ r = -EFAULT;
+ if (copy_to_user(argp, xsave, sizeof(struct kvm_xsave)))
+ break;
+ r = 0;
+ break;
+ }
+ case KVM_SET_XSAVE: {
+ struct kvm_xsave *xsave;
+
+ xsave = kzalloc(sizeof(struct kvm_xsave), GFP_KERNEL);
+ r = -ENOMEM;
+ if (!xsave)
+ break;
+
+ r = -EFAULT;
+ if (copy_from_user(xsave, argp, sizeof(struct kvm_xsave)))
+ break;
+
+ r = kvm_vcpu_ioctl_x86_set_xsave(vcpu, xsave);
+ break;
+ }
+ case KVM_GET_XCRS: {
+ struct kvm_xcrs *xcrs;
+
+ xcrs = kzalloc(sizeof(struct kvm_xcrs), GFP_KERNEL);
+ r = -ENOMEM;
+ if (!xcrs)
+ break;
+
+ kvm_vcpu_ioctl_x86_get_xcrs(vcpu, xcrs);
+
+ r = -EFAULT;
+ if (copy_to_user(argp, xcrs,
+ sizeof(struct kvm_xcrs)))
+ break;
+ r = 0;
+ break;
+ }
+ case KVM_SET_XCRS: {
+ struct kvm_xcrs *xcrs;
+
+ xcrs = kzalloc(sizeof(struct kvm_xcrs), GFP_KERNEL);
+ r = -ENOMEM;
+ if (!xcrs)
+ break;
+
+ r = -EFAULT;
+ if (copy_from_user(xcrs, argp,
+ sizeof(struct kvm_xcrs)))
+ break;
+
+ r = kvm_vcpu_ioctl_x86_set_xcrs(vcpu, xcrs);
+ break;
+ }
default:
r = -EINVAL;
}
diff --git a/include/linux/kvm.h b/include/linux/kvm.h
index 23ea022..6fd40f5 100644
--- a/include/linux/kvm.h
+++ b/include/linux/kvm.h
@@ -524,6 +524,12 @@ struct kvm_enable_cap {
#define KVM_CAP_PPC_OSI 52
#define KVM_CAP_PPC_UNSET_IRQ 53
#define KVM_CAP_ENABLE_CAP 54
+#ifdef __KVM_HAVE_XSAVE
+#define KVM_CAP_XSAVE 55
+#endif
+#ifdef __KVM_HAVE_XCRS
+#define KVM_CAP_XCRS 56
+#endif
#ifdef KVM_CAP_IRQ_ROUTING
@@ -714,6 +720,12 @@ struct kvm_clock_data {
#define KVM_GET_DEBUGREGS _IOR(KVMIO, 0xa1, struct kvm_debugregs)
#define KVM_SET_DEBUGREGS _IOW(KVMIO, 0xa2, struct kvm_debugregs)
#define KVM_ENABLE_CAP _IOW(KVMIO, 0xa3, struct kvm_enable_cap)
+/* Available with KVM_CAP_XSAVE */
+#define KVM_GET_XSAVE _IOR(KVMIO, 0xa4, struct kvm_xsave)
+#define KVM_SET_XSAVE _IOW(KVMIO, 0xa5, struct kvm_xsave)
+/* Available with KVM_CAP_XCRS */
+#define KVM_GET_XCRS _IOR(KVMIO, 0xa6, struct kvm_xcrs)
+#define KVM_SET_XCRS _IOW(KVMIO, 0xa7, struct kvm_xcrs)
#define KVM_DEV_ASSIGN_ENABLE_IOMMU (1 << 0)
--
1.7.0.1
^ permalink raw reply related [flat|nested] 6+ messages in thread* Re: [PATCH v3] KVM: x86: XSAVE/XRSTOR live migration support
2010-06-11 4:36 [PATCH v3] KVM: x86: XSAVE/XRSTOR live migration support Sheng Yang
@ 2010-06-13 8:26 ` Avi Kivity
2010-06-13 9:10 ` Sheng Yang
0 siblings, 1 reply; 6+ messages in thread
From: Avi Kivity @ 2010-06-13 8:26 UTC (permalink / raw)
To: Sheng Yang; +Cc: Marcelo Tosatti, kvm
On 06/11/2010 07:36 AM, Sheng Yang wrote:
> This patch enable save/restore of xsave state.
>
> +static int kvm_vcpu_ioctl_x86_set_xsave(struct kvm_vcpu *vcpu,
> + struct kvm_xsave *guest_xsave)
> +{
> + u64 xstate_bv =
> + *(u64 *)&guest_xsave->region[XSAVE_HDR_OFFSET / sizeof(u32)];
> + int size;
> +
> + if (cpu_has_xsave) {
> + if (xstate_bv& XSTATE_YMM)
> + size = XSAVE_YMM_OFFSET + XSAVE_YMM_SIZE;
> + else
> + size = XSAVE_HDR_OFFSET + XSAVE_HDR_SIZE;
> + memcpy(&vcpu->arch.guest_fpu.state->xsave,
> + guest_xsave->region, size);
>
This allows userspace to overflow host memory by specifying XSTATE_YMM
on a host that doesn't support it.
Better to just use the host's size of the structure.
> + } else {
> + if (xstate_bv& ~XSTATE_FPSSE)
> + return -EINVAL;
> + size = sizeof(struct i387_fxsave_struct);
> + memcpy(&vcpu->arch.guest_fpu.state->fxsave,
> + guest_xsave->region, size);
> + }
> + return 0;
> +}
> +
>
> +
> +static int kvm_vcpu_ioctl_x86_set_xcrs(struct kvm_vcpu *vcpu,
> + struct kvm_xcrs *guest_xcrs)
> +{
> + int i, r = 0;
> +
> + if (!cpu_has_xsave)
> + return -EINVAL;
>
Too strict?
> +
> + if (guest_xcrs->nr_xcrs> KVM_MAX_XCRS)
> + return -EFAULT;
>
EFAULT is for faults during access to userspace. EINVAL or E2BIG.
Need to ensure flags is 0 for forward compatibility.
> +
> + for (i = 0; i< guest_xcrs->nr_xcrs; i++)
> + /* Only support XCR0 currently */
> + if (guest_xcrs->xcrs[0].xcr == XCR_XFEATURE_ENABLED_MASK) {
> + r = __kvm_set_xcr(vcpu, XCR_XFEATURE_ENABLED_MASK,
> + guest_xcrs->xcrs[0].value);
> + break;
> + }
> + if (r)
> + r = -EFAULT;
>
EINVAL
> + return r;
> +}
> +
>
--
error compiling committee.c: too many arguments to function
^ permalink raw reply [flat|nested] 6+ messages in thread* Re: [PATCH v3] KVM: x86: XSAVE/XRSTOR live migration support
2010-06-13 8:26 ` Avi Kivity
@ 2010-06-13 9:10 ` Sheng Yang
2010-06-13 9:13 ` Avi Kivity
0 siblings, 1 reply; 6+ messages in thread
From: Sheng Yang @ 2010-06-13 9:10 UTC (permalink / raw)
To: Avi Kivity; +Cc: Marcelo Tosatti, kvm
On Sunday 13 June 2010 16:26:18 Avi Kivity wrote:
> On 06/11/2010 07:36 AM, Sheng Yang wrote:
> > This patch enable save/restore of xsave state.
> >
> > +static int kvm_vcpu_ioctl_x86_set_xsave(struct kvm_vcpu *vcpu,
> > + struct kvm_xsave *guest_xsave)
> > +{
> > + u64 xstate_bv =
> > + *(u64 *)&guest_xsave->region[XSAVE_HDR_OFFSET / sizeof(u32)];
> > + int size;
> > +
> > + if (cpu_has_xsave) {
> > + if (xstate_bv& XSTATE_YMM)
> > + size = XSAVE_YMM_OFFSET + XSAVE_YMM_SIZE;
> > + else
> > + size = XSAVE_HDR_OFFSET + XSAVE_HDR_SIZE;
> > + memcpy(&vcpu->arch.guest_fpu.state->xsave,
> > + guest_xsave->region, size);
>
> This allows userspace to overflow host memory by specifying XSTATE_YMM
> on a host that doesn't support it.
>
> Better to just use the host's size of the structure.
Yes, should good enough.
>
> > + } else {
> > + if (xstate_bv& ~XSTATE_FPSSE)
> > + return -EINVAL;
> > + size = sizeof(struct i387_fxsave_struct);
> > + memcpy(&vcpu->arch.guest_fpu.state->fxsave,
> > + guest_xsave->region, size);
> > + }
> > + return 0;
> > +}
> > +
> >
> > +
> > +static int kvm_vcpu_ioctl_x86_set_xcrs(struct kvm_vcpu *vcpu,
> > + struct kvm_xcrs *guest_xcrs)
> > +{
> > + int i, r = 0;
> > +
> > + if (!cpu_has_xsave)
> > + return -EINVAL;
>
> Too strict?
For no cpu_has_xsave, the KVM_CAP_XCRS would return 0, so this ioctl shouldn't be
called.
>
> > +
> > + if (guest_xcrs->nr_xcrs> KVM_MAX_XCRS)
> > + return -EFAULT;
>
> EFAULT is for faults during access to userspace. EINVAL or E2BIG.
>
> Need to ensure flags is 0 for forward compatibility.
OK.
>
> > +
> > + for (i = 0; i< guest_xcrs->nr_xcrs; i++)
> > + /* Only support XCR0 currently */
> > + if (guest_xcrs->xcrs[0].xcr == XCR_XFEATURE_ENABLED_MASK) {
> > + r = __kvm_set_xcr(vcpu, XCR_XFEATURE_ENABLED_MASK,
> > + guest_xcrs->xcrs[0].value);
> > + break;
> > + }
> > + if (r)
> > + r = -EFAULT;
>
> EINVAL
OK
>
> > + return r;
> > +}
> > +
--
regards
Yang, Sheng
^ permalink raw reply [flat|nested] 6+ messages in thread* Re: [PATCH v3] KVM: x86: XSAVE/XRSTOR live migration support
2010-06-13 9:10 ` Sheng Yang
@ 2010-06-13 9:13 ` Avi Kivity
2010-06-13 9:29 ` [PATCH v4] " Sheng Yang
0 siblings, 1 reply; 6+ messages in thread
From: Avi Kivity @ 2010-06-13 9:13 UTC (permalink / raw)
To: Sheng Yang; +Cc: Marcelo Tosatti, kvm
On 06/13/2010 12:10 PM, Sheng Yang wrote:
>
>>> +
>>> +static int kvm_vcpu_ioctl_x86_set_xcrs(struct kvm_vcpu *vcpu,
>>> + struct kvm_xcrs *guest_xcrs)
>>> +{
>>> + int i, r = 0;
>>> +
>>> + if (!cpu_has_xsave)
>>> + return -EINVAL;
>>>
>> Too strict?
>>
> For no cpu_has_xsave, the KVM_CAP_XCRS would return 0, so this ioctl shouldn't be
> called.
>
Right.
--
error compiling committee.c: too many arguments to function
^ permalink raw reply [flat|nested] 6+ messages in thread* [PATCH v4] KVM: x86: XSAVE/XRSTOR live migration support
2010-06-13 9:13 ` Avi Kivity
@ 2010-06-13 9:29 ` Sheng Yang
2010-06-14 20:33 ` Marcelo Tosatti
0 siblings, 1 reply; 6+ messages in thread
From: Sheng Yang @ 2010-06-13 9:29 UTC (permalink / raw)
To: Avi Kivity; +Cc: Marcelo Tosatti, kvm, Sheng Yang
This patch enable save/restore of xsave state.
Signed-off-by: Sheng Yang <sheng@linux.intel.com>
---
Documentation/kvm/api.txt | 74 ++++++++++++++++++++++
arch/x86/include/asm/kvm.h | 22 +++++++
arch/x86/include/asm/xsave.h | 7 ++-
arch/x86/kvm/x86.c | 139 ++++++++++++++++++++++++++++++++++++++++++
include/linux/kvm.h | 12 ++++
5 files changed, 252 insertions(+), 2 deletions(-)
diff --git a/Documentation/kvm/api.txt b/Documentation/kvm/api.txt
index 159b4ef..ffba03f 100644
--- a/Documentation/kvm/api.txt
+++ b/Documentation/kvm/api.txt
@@ -922,6 +922,80 @@ Define which vcpu is the Bootstrap Processor (BSP). Values are the same
as the vcpu id in KVM_CREATE_VCPU. If this ioctl is not called, the default
is vcpu 0.
+4.41 KVM_GET_XSAVE
+
+Capability: KVM_CAP_XSAVE
+Architectures: x86
+Type: vcpu ioctl
+Parameters: struct kvm_xsave (out)
+Returns: 0 on success, -1 on error
+
+struct kvm_xsave {
+ __u32 region[1024];
+};
+
+This ioctl would copy current vcpu's xsave struct to the userspace.
+
+4.42 KVM_SET_XSAVE
+
+Capability: KVM_CAP_XSAVE
+Architectures: x86
+Type: vcpu ioctl
+Parameters: struct kvm_xsave (in)
+Returns: 0 on success, -1 on error
+
+struct kvm_xsave {
+ __u32 region[1024];
+};
+
+This ioctl would copy userspace's xsave struct to the kernel.
+
+4.43 KVM_GET_XCRS
+
+Capability: KVM_CAP_XCRS
+Architectures: x86
+Type: vcpu ioctl
+Parameters: struct kvm_xcrs (out)
+Returns: 0 on success, -1 on error
+
+struct kvm_xcr {
+ __u32 xcr;
+ __u32 reserved;
+ __u64 value;
+};
+
+struct kvm_xcrs {
+ __u32 nr_xcrs;
+ __u32 flags;
+ struct kvm_xcr xcrs[KVM_MAX_XCRS];
+ __u64 padding[16];
+};
+
+This ioctl would copy current vcpu's xcrs to the userspace.
+
+4.44 KVM_SET_XCRS
+
+Capability: KVM_CAP_XCRS
+Architectures: x86
+Type: vcpu ioctl
+Parameters: struct kvm_xcrs (in)
+Returns: 0 on success, -1 on error
+
+struct kvm_xcr {
+ __u32 xcr;
+ __u32 reserved;
+ __u64 value;
+};
+
+struct kvm_xcrs {
+ __u32 nr_xcrs;
+ __u32 flags;
+ struct kvm_xcr xcrs[KVM_MAX_XCRS];
+ __u64 padding[16];
+};
+
+This ioctl would set vcpu's xcr to the value userspace specified.
+
5. The kvm_run structure
Application code obtains a pointer to the kvm_run structure by
diff --git a/arch/x86/include/asm/kvm.h b/arch/x86/include/asm/kvm.h
index ff90055..4d8dcbd 100644
--- a/arch/x86/include/asm/kvm.h
+++ b/arch/x86/include/asm/kvm.h
@@ -22,6 +22,8 @@
#define __KVM_HAVE_XEN_HVM
#define __KVM_HAVE_VCPU_EVENTS
#define __KVM_HAVE_DEBUGREGS
+#define __KVM_HAVE_XSAVE
+#define __KVM_HAVE_XCRS
/* Architectural interrupt line count. */
#define KVM_NR_INTERRUPTS 256
@@ -299,4 +301,24 @@ struct kvm_debugregs {
__u64 reserved[9];
};
+/* for KVM_CAP_XSAVE */
+struct kvm_xsave {
+ __u32 region[1024];
+};
+
+#define KVM_MAX_XCRS 16
+
+struct kvm_xcr {
+ __u32 xcr;
+ __u32 reserved;
+ __u64 value;
+};
+
+struct kvm_xcrs {
+ __u32 nr_xcrs;
+ __u32 flags;
+ struct kvm_xcr xcrs[KVM_MAX_XCRS];
+ __u64 padding[16];
+};
+
#endif /* _ASM_X86_KVM_H */
diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h
index 29ee4e4..32c3666 100644
--- a/arch/x86/include/asm/xsave.h
+++ b/arch/x86/include/asm/xsave.h
@@ -13,8 +13,11 @@
#define FXSAVE_SIZE 512
-#define XSTATE_YMM_SIZE 256
-#define XSTATE_YMM_OFFSET (512 + 64)
+#define XSAVE_HDR_SIZE 64
+#define XSAVE_HDR_OFFSET FXSAVE_SIZE
+
+#define XSAVE_YMM_SIZE 256
+#define XSAVE_YMM_OFFSET (XSAVE_HDR_SIZE + XSAVE_HDR_OFFSET)
/*
* These are the features that the OS can handle currently.
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 7a4073b..682b5f2 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -1698,6 +1698,7 @@ int kvm_dev_ioctl_check_extension(long ext)
case KVM_CAP_PCI_SEGMENT:
case KVM_CAP_DEBUGREGS:
case KVM_CAP_X86_ROBUST_SINGLESTEP:
+ case KVM_CAP_XSAVE:
r = 1;
break;
case KVM_CAP_COALESCED_MMIO:
@@ -1721,6 +1722,9 @@ int kvm_dev_ioctl_check_extension(long ext)
case KVM_CAP_MCE:
r = KVM_MAX_MCE_BANKS;
break;
+ case KVM_CAP_XCRS:
+ r = cpu_has_xsave;
+ break;
default:
r = 0;
break;
@@ -2373,6 +2377,77 @@ static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu,
return 0;
}
+static void kvm_vcpu_ioctl_x86_get_xsave(struct kvm_vcpu *vcpu,
+ struct kvm_xsave *guest_xsave)
+{
+ if (cpu_has_xsave)
+ memcpy(guest_xsave->region,
+ &vcpu->arch.guest_fpu.state->xsave,
+ sizeof(struct xsave_struct));
+ else {
+ memcpy(guest_xsave->region,
+ &vcpu->arch.guest_fpu.state->fxsave,
+ sizeof(struct i387_fxsave_struct));
+ *(u64 *)&guest_xsave->region[XSAVE_HDR_OFFSET / sizeof(u32)] =
+ XSTATE_FPSSE;
+ }
+}
+
+static int kvm_vcpu_ioctl_x86_set_xsave(struct kvm_vcpu *vcpu,
+ struct kvm_xsave *guest_xsave)
+{
+ u64 xstate_bv =
+ *(u64 *)&guest_xsave->region[XSAVE_HDR_OFFSET / sizeof(u32)];
+
+ if (cpu_has_xsave)
+ memcpy(&vcpu->arch.guest_fpu.state->xsave,
+ guest_xsave->region, sizeof(struct xsave_struct));
+ else {
+ if (xstate_bv & ~XSTATE_FPSSE)
+ return -EINVAL;
+ memcpy(&vcpu->arch.guest_fpu.state->fxsave,
+ guest_xsave->region, sizeof(struct i387_fxsave_struct));
+ }
+ return 0;
+}
+
+static void kvm_vcpu_ioctl_x86_get_xcrs(struct kvm_vcpu *vcpu,
+ struct kvm_xcrs *guest_xcrs)
+{
+ if (!cpu_has_xsave) {
+ guest_xcrs->nr_xcrs = 0;
+ return;
+ }
+
+ guest_xcrs->nr_xcrs = 1;
+ guest_xcrs->flags = 0;
+ guest_xcrs->xcrs[0].xcr = XCR_XFEATURE_ENABLED_MASK;
+ guest_xcrs->xcrs[0].value = vcpu->arch.xcr0;
+}
+
+static int kvm_vcpu_ioctl_x86_set_xcrs(struct kvm_vcpu *vcpu,
+ struct kvm_xcrs *guest_xcrs)
+{
+ int i, r = 0;
+
+ if (!cpu_has_xsave)
+ return -EINVAL;
+
+ if (guest_xcrs->nr_xcrs > KVM_MAX_XCRS || guest_xcrs->flags)
+ return -EINVAL;
+
+ for (i = 0; i < guest_xcrs->nr_xcrs; i++)
+ /* Only support XCR0 currently */
+ if (guest_xcrs->xcrs[0].xcr == XCR_XFEATURE_ENABLED_MASK) {
+ r = __kvm_set_xcr(vcpu, XCR_XFEATURE_ENABLED_MASK,
+ guest_xcrs->xcrs[0].value);
+ break;
+ }
+ if (r)
+ r = -EINVAL;
+ return r;
+}
+
long kvm_arch_vcpu_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg)
{
@@ -2574,6 +2649,70 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
r = kvm_vcpu_ioctl_x86_set_debugregs(vcpu, &dbgregs);
break;
}
+ case KVM_GET_XSAVE: {
+ struct kvm_xsave *xsave;
+
+ xsave = kzalloc(sizeof(struct kvm_xsave), GFP_KERNEL);
+ r = -ENOMEM;
+ if (!xsave)
+ break;
+
+ kvm_vcpu_ioctl_x86_get_xsave(vcpu, xsave);
+
+ r = -EFAULT;
+ if (copy_to_user(argp, xsave, sizeof(struct kvm_xsave)))
+ break;
+ r = 0;
+ break;
+ }
+ case KVM_SET_XSAVE: {
+ struct kvm_xsave *xsave;
+
+ xsave = kzalloc(sizeof(struct kvm_xsave), GFP_KERNEL);
+ r = -ENOMEM;
+ if (!xsave)
+ break;
+
+ r = -EFAULT;
+ if (copy_from_user(xsave, argp, sizeof(struct kvm_xsave)))
+ break;
+
+ r = kvm_vcpu_ioctl_x86_set_xsave(vcpu, xsave);
+ break;
+ }
+ case KVM_GET_XCRS: {
+ struct kvm_xcrs *xcrs;
+
+ xcrs = kzalloc(sizeof(struct kvm_xcrs), GFP_KERNEL);
+ r = -ENOMEM;
+ if (!xcrs)
+ break;
+
+ kvm_vcpu_ioctl_x86_get_xcrs(vcpu, xcrs);
+
+ r = -EFAULT;
+ if (copy_to_user(argp, xcrs,
+ sizeof(struct kvm_xcrs)))
+ break;
+ r = 0;
+ break;
+ }
+ case KVM_SET_XCRS: {
+ struct kvm_xcrs *xcrs;
+
+ xcrs = kzalloc(sizeof(struct kvm_xcrs), GFP_KERNEL);
+ r = -ENOMEM;
+ if (!xcrs)
+ break;
+
+ r = -EFAULT;
+ if (copy_from_user(xcrs, argp,
+ sizeof(struct kvm_xcrs)))
+ break;
+
+ r = kvm_vcpu_ioctl_x86_set_xcrs(vcpu, xcrs);
+ break;
+ }
default:
r = -EINVAL;
}
diff --git a/include/linux/kvm.h b/include/linux/kvm.h
index 23ea022..6fd40f5 100644
--- a/include/linux/kvm.h
+++ b/include/linux/kvm.h
@@ -524,6 +524,12 @@ struct kvm_enable_cap {
#define KVM_CAP_PPC_OSI 52
#define KVM_CAP_PPC_UNSET_IRQ 53
#define KVM_CAP_ENABLE_CAP 54
+#ifdef __KVM_HAVE_XSAVE
+#define KVM_CAP_XSAVE 55
+#endif
+#ifdef __KVM_HAVE_XCRS
+#define KVM_CAP_XCRS 56
+#endif
#ifdef KVM_CAP_IRQ_ROUTING
@@ -714,6 +720,12 @@ struct kvm_clock_data {
#define KVM_GET_DEBUGREGS _IOR(KVMIO, 0xa1, struct kvm_debugregs)
#define KVM_SET_DEBUGREGS _IOW(KVMIO, 0xa2, struct kvm_debugregs)
#define KVM_ENABLE_CAP _IOW(KVMIO, 0xa3, struct kvm_enable_cap)
+/* Available with KVM_CAP_XSAVE */
+#define KVM_GET_XSAVE _IOR(KVMIO, 0xa4, struct kvm_xsave)
+#define KVM_SET_XSAVE _IOW(KVMIO, 0xa5, struct kvm_xsave)
+/* Available with KVM_CAP_XCRS */
+#define KVM_GET_XCRS _IOR(KVMIO, 0xa6, struct kvm_xcrs)
+#define KVM_SET_XCRS _IOW(KVMIO, 0xa7, struct kvm_xcrs)
#define KVM_DEV_ASSIGN_ENABLE_IOMMU (1 << 0)
--
1.7.0.1
^ permalink raw reply related [flat|nested] 6+ messages in thread
end of thread, other threads:[~2010-06-14 20:45 UTC | newest]
Thread overview: 6+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2010-06-11 4:36 [PATCH v3] KVM: x86: XSAVE/XRSTOR live migration support Sheng Yang
2010-06-13 8:26 ` Avi Kivity
2010-06-13 9:10 ` Sheng Yang
2010-06-13 9:13 ` Avi Kivity
2010-06-13 9:29 ` [PATCH v4] " Sheng Yang
2010-06-14 20:33 ` Marcelo Tosatti
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox