kvm.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH] KVM: x86: XSAVE/XRSTOR live migration support
@ 2010-05-27  9:48 Sheng Yang
  2010-05-27 10:02 ` Avi Kivity
  0 siblings, 1 reply; 6+ messages in thread
From: Sheng Yang @ 2010-05-27  9:48 UTC (permalink / raw)
  To: Avi Kivity, Marcelo Tosatti; +Cc: kvm, Sheng Yang

This patch enable save/restore of xsave state.

Signed-off-by: Sheng Yang <sheng@linux.intel.com>
---
 arch/x86/include/asm/kvm.h |   29 ++++++++++++++++
 arch/x86/kvm/x86.c         |   79 ++++++++++++++++++++++++++++++++++++++++++++
 include/linux/kvm.h        |    6 +++
 3 files changed, 114 insertions(+), 0 deletions(-)

diff --git a/arch/x86/include/asm/kvm.h b/arch/x86/include/asm/kvm.h
index ff90055..d3f4d9f 100644
--- a/arch/x86/include/asm/kvm.h
+++ b/arch/x86/include/asm/kvm.h
@@ -22,6 +22,7 @@
 #define __KVM_HAVE_XEN_HVM
 #define __KVM_HAVE_VCPU_EVENTS
 #define __KVM_HAVE_DEBUGREGS
+#define __KVM_HAVE_XSAVE
 
 /* Architectural interrupt line count. */
 #define KVM_NR_INTERRUPTS 256
@@ -299,4 +300,32 @@ struct kvm_debugregs {
 	__u64 reserved[9];
 };
 
+/* for KVM_CAP_XSAVE */
+struct kvm_xsave {
+	struct {
+		__u16 cwd;
+		__u16 swd;
+		__u16 twd;
+		__u16 fop;
+		__u64 rip;
+		__u64 rdp;
+		__u32 mxcsr;
+		__u32 mxcsr_mask;
+		__u32 st_space[32];
+		__u32 xmm_space[64];
+		__u32 padding[12];
+		__u32 sw_reserved[12];
+	} i387;
+	struct {
+		__u64 xstate_bv;
+		__u64 reserved1[2];
+		__u64 reserved2[5];
+	} xsave_hdr;
+	struct {
+		__u32 ymmh_space[64];
+	} ymmh;
+	__u64 xcr0;
+	__u32 padding[256];
+};
+
 #endif /* _ASM_X86_KVM_H */
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index e7acc9d..5badba2 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -1711,6 +1711,9 @@ int kvm_dev_ioctl_check_extension(long ext)
 	case KVM_CAP_MCE:
 		r = KVM_MAX_MCE_BANKS;
 		break;
+	case KVM_CAP_XSAVE:
+		r = cpu_has_xsave;
+		break;
 	default:
 		r = 0;
 		break;
@@ -2363,6 +2366,59 @@ static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu,
 	return 0;
 }
 
+static void kvm_vcpu_ioctl_x86_get_xsave(struct kvm_vcpu *vcpu,
+					struct kvm_xsave *guest_xsave)
+{
+	struct xsave_struct *xsave = &vcpu->arch.guest_fpu.state->xsave;
+
+	if (!cpu_has_xsave)
+		return;
+
+	guest_xsave->i387.cwd = xsave->i387.cwd;
+	guest_xsave->i387.swd = xsave->i387.swd;
+	guest_xsave->i387.twd = xsave->i387.twd;
+	guest_xsave->i387.fop = xsave->i387.fop;
+	guest_xsave->i387.rip = xsave->i387.rip;
+	guest_xsave->i387.rdp = xsave->i387.rdp;
+	memcpy(guest_xsave->i387.st_space, xsave->i387.st_space, 128);
+	memcpy(guest_xsave->i387.xmm_space, xsave->i387.xmm_space,
+			sizeof guest_xsave->i387.xmm_space);
+
+	guest_xsave->xsave_hdr.xstate_bv = xsave->xsave_hdr.xstate_bv;
+	memcpy(guest_xsave->ymmh.ymmh_space, xsave->ymmh.ymmh_space,
+			sizeof xsave->ymmh.ymmh_space);
+
+	guest_xsave->xcr0 = vcpu->arch.xcr0;
+}
+
+static int kvm_vcpu_ioctl_x86_set_xsave(struct kvm_vcpu *vcpu,
+					struct kvm_xsave *guest_xsave)
+{
+	struct xsave_struct *xsave = &vcpu->arch.guest_fpu.state->xsave;
+
+	if (!cpu_has_xsave)
+		return -EINVAL;
+
+	xsave->i387.cwd = guest_xsave->i387.cwd;
+	xsave->i387.swd = guest_xsave->i387.swd;
+	xsave->i387.twd = guest_xsave->i387.twd;
+	xsave->i387.fop = guest_xsave->i387.fop;
+	xsave->i387.rip = guest_xsave->i387.rip;
+	xsave->i387.rdp = guest_xsave->i387.rdp;
+	memcpy(xsave->i387.st_space, guest_xsave->i387.st_space, 128);
+	memcpy(xsave->i387.xmm_space, guest_xsave->i387.xmm_space,
+			sizeof guest_xsave->i387.xmm_space);
+
+	xsave->xsave_hdr.xstate_bv = guest_xsave->xsave_hdr.xstate_bv;
+	memcpy(xsave->ymmh.ymmh_space, guest_xsave->ymmh.ymmh_space,
+		sizeof guest_xsave->ymmh.ymmh_space);
+
+	/* set_xsave may override the initial value of xcr0... */
+	if (guest_xsave->xcr0 != 0)
+		kvm_set_xcr0(vcpu, guest_xsave->xcr0);
+	return 0;
+}
+
 long kvm_arch_vcpu_ioctl(struct file *filp,
 			 unsigned int ioctl, unsigned long arg)
 {
@@ -2564,6 +2620,29 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
 		r = kvm_vcpu_ioctl_x86_set_debugregs(vcpu, &dbgregs);
 		break;
 	}
+	case KVM_GET_XSAVE: {
+		struct kvm_xsave xsave;
+
+		kvm_vcpu_ioctl_x86_get_xsave(vcpu, &xsave);
+
+		r = -EFAULT;
+		if (copy_to_user(argp, &xsave,
+				 sizeof(struct kvm_xsave)))
+			break;
+		r = 0;
+		break;
+	}
+	case KVM_SET_XSAVE: {
+		struct kvm_xsave xsave;
+
+		r = -EFAULT;
+		if (copy_from_user(&xsave, argp,
+				   sizeof(struct kvm_xsave)))
+			break;
+
+		r = kvm_vcpu_ioctl_x86_set_xsave(vcpu, &xsave);
+		break;
+	}
 	default:
 		r = -EINVAL;
 	}
diff --git a/include/linux/kvm.h b/include/linux/kvm.h
index 23ea022..5006761 100644
--- a/include/linux/kvm.h
+++ b/include/linux/kvm.h
@@ -524,6 +524,9 @@ struct kvm_enable_cap {
 #define KVM_CAP_PPC_OSI 52
 #define KVM_CAP_PPC_UNSET_IRQ 53
 #define KVM_CAP_ENABLE_CAP 54
+#ifdef __KVM_HAVE_XSAVE
+#define KVM_CAP_XSAVE 55
+#endif
 
 #ifdef KVM_CAP_IRQ_ROUTING
 
@@ -714,6 +717,9 @@ struct kvm_clock_data {
 #define KVM_GET_DEBUGREGS         _IOR(KVMIO,  0xa1, struct kvm_debugregs)
 #define KVM_SET_DEBUGREGS         _IOW(KVMIO,  0xa2, struct kvm_debugregs)
 #define KVM_ENABLE_CAP            _IOW(KVMIO,  0xa3, struct kvm_enable_cap)
+/* Available with KVM_CAP_XSAVE */
+#define KVM_GET_XSAVE		  _IOR(KVMIO,  0xa4, struct kvm_xsave)
+#define KVM_SET_XSAVE		  _IOW(KVMIO,  0xa5, struct kvm_xsave)
 
 #define KVM_DEV_ASSIGN_ENABLE_IOMMU	(1 << 0)
 
-- 
1.7.0.1


^ permalink raw reply related	[flat|nested] 6+ messages in thread

* Re: [PATCH] KVM: x86: XSAVE/XRSTOR live migration support
  2010-05-27  9:48 [PATCH] KVM: x86: XSAVE/XRSTOR live migration support Sheng Yang
@ 2010-05-27 10:02 ` Avi Kivity
  2010-05-27 10:33   ` Sheng Yang
  2010-05-31 11:21   ` Sheng Yang
  0 siblings, 2 replies; 6+ messages in thread
From: Avi Kivity @ 2010-05-27 10:02 UTC (permalink / raw)
  To: Sheng Yang; +Cc: Marcelo Tosatti, kvm

On 05/27/2010 12:48 PM, Sheng Yang wrote:
> This patch enable save/restore of xsave state.
>
> Signed-off-by: Sheng Yang<sheng@linux.intel.com>
> ---
>   arch/x86/include/asm/kvm.h |   29 ++++++++++++++++
>   arch/x86/kvm/x86.c         |   79 ++++++++++++++++++++++++++++++++++++++++++++
>   include/linux/kvm.h        |    6 +++
>    

Documentation/kvm/api.txt +++++++++++++

>
> +/* for KVM_CAP_XSAVE */
> +struct kvm_xsave {
> +	struct {
> +		__u16 cwd;
> +		__u16 swd;
> +		__u16 twd;
> +		__u16 fop;
> +		__u64 rip;
> +		__u64 rdp;
> +		__u32 mxcsr;
> +		__u32 mxcsr_mask;
> +		__u32 st_space[32];
> +		__u32 xmm_space[64];
> +		__u32 padding[12];
> +		__u32 sw_reserved[12];
> +	} i387;
> +	struct {
> +		__u64 xstate_bv;
> +		__u64 reserved1[2];
> +		__u64 reserved2[5];
> +	} xsave_hdr;
> +	struct {
> +		__u32 ymmh_space[64];
> +	} ymmh;
> +	__u64 xcr0;
> +	__u32 padding[256];
> +};
>    

Need to reserve way more space here for future xsave growth.  I think at 
least 4K.  LRB wa 32x512bit = 1K (though it probably isn't a candidate 
for vmx).  Would be good to get an opinion from your processor architects.

I don't think we need to detail the contents of the structures since 
they're described by the SDM; so we can have just a large array that is 
1:1 with the xsave as saved by the fpu.

If we do that then xcr0 needs to be in a separate structure, say 
kvm_xcr, with a flags field and reserved space of its own for future xcr 
growth.

> @@ -2363,6 +2366,59 @@ static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu,
>   	return 0;
>   }
>
> +static void kvm_vcpu_ioctl_x86_get_xsave(struct kvm_vcpu *vcpu,
> +					struct kvm_xsave *guest_xsave)
> +{
> +	struct xsave_struct *xsave =&vcpu->arch.guest_fpu.state->xsave;
> +
> +	if (!cpu_has_xsave)
> +		return;
>    

Hm, it would be nice to make it backward compatible and return the 
legacy fpu instead.  I think the layouts are compatible?

> +
> +	guest_xsave->i387.cwd = xsave->i387.cwd;
> +	guest_xsave->i387.swd = xsave->i387.swd;
> +	guest_xsave->i387.twd = xsave->i387.twd;
> +	guest_xsave->i387.fop = xsave->i387.fop;
> +	guest_xsave->i387.rip = xsave->i387.rip;
> +	guest_xsave->i387.rdp = xsave->i387.rdp;
> +	memcpy(guest_xsave->i387.st_space, xsave->i387.st_space, 128);
> +	memcpy(guest_xsave->i387.xmm_space, xsave->i387.xmm_space,
> +			sizeof guest_xsave->i387.xmm_space);
> +
> +	guest_xsave->xsave_hdr.xstate_bv = xsave->xsave_hdr.xstate_bv;
> +	memcpy(guest_xsave->ymmh.ymmh_space, xsave->ymmh.ymmh_space,
> +			sizeof xsave->ymmh.ymmh_space);
>    

And we can do a big memcpy here.  But we need to limit it to what the 
host actually allocated.

> +
> +	guest_xsave->xcr0 = vcpu->arch.xcr0;
> +}
> +
>
>   long kvm_arch_vcpu_ioctl(struct file *filp,
>   			 unsigned int ioctl, unsigned long arg)
>   {
> @@ -2564,6 +2620,29 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
>   		r = kvm_vcpu_ioctl_x86_set_debugregs(vcpu,&dbgregs);
>   		break;
>   	}
> +	case KVM_GET_XSAVE: {
> +		struct kvm_xsave xsave;
>    

Too big for stack (especially if we reserve room for growth).

> diff --git a/include/linux/kvm.h b/include/linux/kvm.h
> index 23ea022..5006761 100644
> --- a/include/linux/kvm.h
> +++ b/include/linux/kvm.h
> @@ -524,6 +524,9 @@ struct kvm_enable_cap {
>   #define KVM_CAP_PPC_OSI 52
>   #define KVM_CAP_PPC_UNSET_IRQ 53
>   #define KVM_CAP_ENABLE_CAP 54
> +#ifdef __KVM_HAVE_XSAVE
> +#define KVM_CAP_XSAVE 55
> +#endif
>    

Might make sense to have a separate KVM_CAP_XCR, just for consistency.


-- 
error compiling committee.c: too many arguments to function


^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH] KVM: x86: XSAVE/XRSTOR live migration support
  2010-05-27 10:02 ` Avi Kivity
@ 2010-05-27 10:33   ` Sheng Yang
  2010-05-27 11:34     ` Avi Kivity
  2010-05-31 11:21   ` Sheng Yang
  1 sibling, 1 reply; 6+ messages in thread
From: Sheng Yang @ 2010-05-27 10:33 UTC (permalink / raw)
  To: Avi Kivity; +Cc: Marcelo Tosatti, kvm

On Thursday 27 May 2010 18:02:31 Avi Kivity wrote:
> On 05/27/2010 12:48 PM, Sheng Yang wrote:
> > This patch enable save/restore of xsave state.
> > 
> > Signed-off-by: Sheng Yang<sheng@linux.intel.com>
> > ---
> > 
> >   arch/x86/include/asm/kvm.h |   29 ++++++++++++++++
> >   arch/x86/kvm/x86.c         |   79
> >   ++++++++++++++++++++++++++++++++++++++++++++ include/linux/kvm.h      
> >    |    6 +++
> 
> Documentation/kvm/api.txt +++++++++++++

Yes...
> 
> > +/* for KVM_CAP_XSAVE */
> > +struct kvm_xsave {
> > +	struct {
> > +		__u16 cwd;
> > +		__u16 swd;
> > +		__u16 twd;
> > +		__u16 fop;
> > +		__u64 rip;
> > +		__u64 rdp;
> > +		__u32 mxcsr;
> > +		__u32 mxcsr_mask;
> > +		__u32 st_space[32];
> > +		__u32 xmm_space[64];
> > +		__u32 padding[12];
> > +		__u32 sw_reserved[12];
> > +	} i387;
> > +	struct {
> > +		__u64 xstate_bv;
> > +		__u64 reserved1[2];
> > +		__u64 reserved2[5];
> > +	} xsave_hdr;
> > +	struct {
> > +		__u32 ymmh_space[64];
> > +	} ymmh;
> > +	__u64 xcr0;
> > +	__u32 padding[256];
> > +};
> 
> Need to reserve way more space here for future xsave growth.  I think at
> least 4K.  LRB wa 32x512bit = 1K (though it probably isn't a candidate
> for vmx).  Would be good to get an opinion from your processor architects.

Would check it.
> 
> I don't think we need to detail the contents of the structures since
> they're described by the SDM; so we can have just a large array that is
> 1:1 with the xsave as saved by the fpu.

Um, I've tried that, but failed mysteriously... Would check what's wrong.
> 
> If we do that then xcr0 needs to be in a separate structure, say
> kvm_xcr, with a flags field and reserved space of its own for future xcr
> growth.

I meant to put it into sregs, but found it's already full... How about "extended 
sregs"?
> 
> > @@ -2363,6 +2366,59 @@ static int kvm_vcpu_ioctl_x86_set_debugregs(struct
> > kvm_vcpu *vcpu,
> > 
> >   	return 0;
> >   
> >   }
> > 
> > +static void kvm_vcpu_ioctl_x86_get_xsave(struct kvm_vcpu *vcpu,
> > +					struct kvm_xsave *guest_xsave)
> > +{
> > +	struct xsave_struct *xsave =&vcpu->arch.guest_fpu.state->xsave;
> > +
> > +	if (!cpu_has_xsave)
> > +		return;
> 
> Hm, it would be nice to make it backward compatible and return the
> legacy fpu instead.  I think the layouts are compatible?

Sound good.  But seems we still need KVM_CAP_XSAVE to use this interface, and 
other processors would still go FPU interface. Seems didn't improve much?
> 
> > +
> > +	guest_xsave->i387.cwd = xsave->i387.cwd;
> > +	guest_xsave->i387.swd = xsave->i387.swd;
> > +	guest_xsave->i387.twd = xsave->i387.twd;
> > +	guest_xsave->i387.fop = xsave->i387.fop;
> > +	guest_xsave->i387.rip = xsave->i387.rip;
> > +	guest_xsave->i387.rdp = xsave->i387.rdp;
> > +	memcpy(guest_xsave->i387.st_space, xsave->i387.st_space, 128);
> > +	memcpy(guest_xsave->i387.xmm_space, xsave->i387.xmm_space,
> > +			sizeof guest_xsave->i387.xmm_space);
> > +
> > +	guest_xsave->xsave_hdr.xstate_bv = xsave->xsave_hdr.xstate_bv;
> > +	memcpy(guest_xsave->ymmh.ymmh_space, xsave->ymmh.ymmh_space,
> > +			sizeof xsave->ymmh.ymmh_space);
> 
> And we can do a big memcpy here.  But we need to limit it to what the
> host actually allocated.

Would try.
> 
> > +
> > +	guest_xsave->xcr0 = vcpu->arch.xcr0;
> > +}
> > +
> > 
> >   long kvm_arch_vcpu_ioctl(struct file *filp,
> >   
> >   			 unsigned int ioctl, unsigned long arg)
> >   
> >   {
> > 
> > @@ -2564,6 +2620,29 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
> > 
> >   		r = kvm_vcpu_ioctl_x86_set_debugregs(vcpu,&dbgregs);
> >   		break;
> >   	
> >   	}
> > 
> > +	case KVM_GET_XSAVE: {
> > +		struct kvm_xsave xsave;
> 
> Too big for stack (especially if we reserve room for growth).

Oops...
> 
> > diff --git a/include/linux/kvm.h b/include/linux/kvm.h
> > index 23ea022..5006761 100644
> > --- a/include/linux/kvm.h
> > +++ b/include/linux/kvm.h
> > @@ -524,6 +524,9 @@ struct kvm_enable_cap {
> > 
> >   #define KVM_CAP_PPC_OSI 52
> >   #define KVM_CAP_PPC_UNSET_IRQ 53
> >   #define KVM_CAP_ENABLE_CAP 54
> > 
> > +#ifdef __KVM_HAVE_XSAVE
> > +#define KVM_CAP_XSAVE 55
> > +#endif
> 
> Might make sense to have a separate KVM_CAP_XCR, just for consistency.

Maybe EXTENDED_SREGS? But still every future field in the struct need a CAP...

--
regards
Yang, Sheng

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH] KVM: x86: XSAVE/XRSTOR live migration support
  2010-05-27 10:33   ` Sheng Yang
@ 2010-05-27 11:34     ` Avi Kivity
  0 siblings, 0 replies; 6+ messages in thread
From: Avi Kivity @ 2010-05-27 11:34 UTC (permalink / raw)
  To: Sheng Yang; +Cc: Marcelo Tosatti, kvm

On 05/27/2010 01:33 PM, Sheng Yang wrote:
>
>> If we do that then xcr0 needs to be in a separate structure, say
>> kvm_xcr, with a flags field and reserved space of its own for future xcr
>> growth.
>>      
> I meant to put it into sregs, but found it's already full... How about "extended
> sregs"?
>    

Isn't this what xcr means?  xtended control register?

>>> +static void kvm_vcpu_ioctl_x86_get_xsave(struct kvm_vcpu *vcpu,
>>> +					struct kvm_xsave *guest_xsave)
>>> +{
>>> +	struct xsave_struct *xsave =&vcpu->arch.guest_fpu.state->xsave;
>>> +
>>> +	if (!cpu_has_xsave)
>>> +		return;
>>>        
>> Hm, it would be nice to make it backward compatible and return the
>> legacy fpu instead.  I think the layouts are compatible?
>>      
> Sound good.  But seems we still need KVM_CAP_XSAVE to use this interface, and
> other processors would still go FPU interface. Seems didn't improve much?
>    

I would like the new interface to be used in all cases, this way we can 
deprecate the old one in a few years.

>>> diff --git a/include/linux/kvm.h b/include/linux/kvm.h
>>> index 23ea022..5006761 100644
>>> --- a/include/linux/kvm.h
>>> +++ b/include/linux/kvm.h
>>> @@ -524,6 +524,9 @@ struct kvm_enable_cap {
>>>
>>>    #define KVM_CAP_PPC_OSI 52
>>>    #define KVM_CAP_PPC_UNSET_IRQ 53
>>>    #define KVM_CAP_ENABLE_CAP 54
>>>
>>> +#ifdef __KVM_HAVE_XSAVE
>>> +#define KVM_CAP_XSAVE 55
>>> +#endif
>>>        
>> Might make sense to have a separate KVM_CAP_XCR, just for consistency.
>>      
> Maybe EXTENDED_SREGS? But still every future field in the struct need a CAP...
>    

Might do

struct kvm_xcr {
     __u32 xcr;
     __u32 reserved;
     __u64 value;
};

struct kvm_xcrs {
     __u32 nr_xcrs;
     __u32 flags;
     struct kvm_xcr xcrs[KVM_MAX_XCRS];
     ... reserved;
};

which would allow new xcrs to be added easily.

You'll need to change kvm_set_xcr0() to kvm_set_xcr() for this to work 
though.

-- 
error compiling committee.c: too many arguments to function


^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH] KVM: x86: XSAVE/XRSTOR live migration support
  2010-05-27 10:02 ` Avi Kivity
  2010-05-27 10:33   ` Sheng Yang
@ 2010-05-31 11:21   ` Sheng Yang
  2010-05-31 11:26     ` Avi Kivity
  1 sibling, 1 reply; 6+ messages in thread
From: Sheng Yang @ 2010-05-31 11:21 UTC (permalink / raw)
  To: Avi Kivity; +Cc: Marcelo Tosatti, kvm

On Thursday 27 May 2010 18:02:31 Avi Kivity wrote:
> On 05/27/2010 12:48 PM, Sheng Yang wrote:
> > This patch enable save/restore of xsave state.
> > 
> > Signed-off-by: Sheng Yang<sheng@linux.intel.com>
> > ---
> > 
> >   arch/x86/include/asm/kvm.h |   29 ++++++++++++++++
> >   arch/x86/kvm/x86.c         |   79
> >   ++++++++++++++++++++++++++++++++++++++++++++ include/linux/kvm.h      
> >    |    6 +++
> 
> Documentation/kvm/api.txt +++++++++++++
> 
> > +/* for KVM_CAP_XSAVE */
> > +struct kvm_xsave {
> > +	struct {
> > +		__u16 cwd;
> > +		__u16 swd;
> > +		__u16 twd;
> > +		__u16 fop;
> > +		__u64 rip;
> > +		__u64 rdp;
> > +		__u32 mxcsr;
> > +		__u32 mxcsr_mask;
> > +		__u32 st_space[32];
> > +		__u32 xmm_space[64];
> > +		__u32 padding[12];
> > +		__u32 sw_reserved[12];
> > +	} i387;
> > +	struct {
> > +		__u64 xstate_bv;
> > +		__u64 reserved1[2];
> > +		__u64 reserved2[5];
> > +	} xsave_hdr;
> > +	struct {
> > +		__u32 ymmh_space[64];
> > +	} ymmh;
> > +	__u64 xcr0;
> > +	__u32 padding[256];
> > +};
> 
> Need to reserve way more space here for future xsave growth.  I think at
> least 4K.  LRB wa 32x512bit = 1K (though it probably isn't a candidate
> for vmx).  Would be good to get an opinion from your processor architects.
> 
> I don't think we need to detail the contents of the structures since
> they're described by the SDM; so we can have just a large array that is
> 1:1 with the xsave as saved by the fpu.

I think we can reserve one page here. But one big array make it harder to work 
with QEmu CPUState. Do we need lots of marcos in QEmu to parse the array? Also 
it's easier to transfer get/set_fpu to get/set_xsave interface using current 
structure I think.

--
regards
Yang, Sheng

> 
> If we do that then xcr0 needs to be in a separate structure, say
> kvm_xcr, with a flags field and reserved space of its own for future xcr
> growth.
> 
> > @@ -2363,6 +2366,59 @@ static int kvm_vcpu_ioctl_x86_set_debugregs(struct
> > kvm_vcpu *vcpu,
> > 
> >   	return 0;
> >   
> >   }
> > 
> > +static void kvm_vcpu_ioctl_x86_get_xsave(struct kvm_vcpu *vcpu,
> > +					struct kvm_xsave *guest_xsave)
> > +{
> > +	struct xsave_struct *xsave =&vcpu->arch.guest_fpu.state->xsave;
> > +
> > +	if (!cpu_has_xsave)
> > +		return;
> 
> Hm, it would be nice to make it backward compatible and return the
> legacy fpu instead.  I think the layouts are compatible?
> 
> > +
> > +	guest_xsave->i387.cwd = xsave->i387.cwd;
> > +	guest_xsave->i387.swd = xsave->i387.swd;
> > +	guest_xsave->i387.twd = xsave->i387.twd;
> > +	guest_xsave->i387.fop = xsave->i387.fop;
> > +	guest_xsave->i387.rip = xsave->i387.rip;
> > +	guest_xsave->i387.rdp = xsave->i387.rdp;
> > +	memcpy(guest_xsave->i387.st_space, xsave->i387.st_space, 128);
> > +	memcpy(guest_xsave->i387.xmm_space, xsave->i387.xmm_space,
> > +			sizeof guest_xsave->i387.xmm_space);
> > +
> > +	guest_xsave->xsave_hdr.xstate_bv = xsave->xsave_hdr.xstate_bv;
> > +	memcpy(guest_xsave->ymmh.ymmh_space, xsave->ymmh.ymmh_space,
> > +			sizeof xsave->ymmh.ymmh_space);
> 
> And we can do a big memcpy here.  But we need to limit it to what the
> host actually allocated.
> 
> > +
> > +	guest_xsave->xcr0 = vcpu->arch.xcr0;
> > +}
> > +
> > 
> >   long kvm_arch_vcpu_ioctl(struct file *filp,
> >   
> >   			 unsigned int ioctl, unsigned long arg)
> >   
> >   {
> > 
> > @@ -2564,6 +2620,29 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
> > 
> >   		r = kvm_vcpu_ioctl_x86_set_debugregs(vcpu,&dbgregs);
> >   		break;
> >   	
> >   	}
> > 
> > +	case KVM_GET_XSAVE: {
> > +		struct kvm_xsave xsave;
> 
> Too big for stack (especially if we reserve room for growth).
> 
> > diff --git a/include/linux/kvm.h b/include/linux/kvm.h
> > index 23ea022..5006761 100644
> > --- a/include/linux/kvm.h
> > +++ b/include/linux/kvm.h
> > @@ -524,6 +524,9 @@ struct kvm_enable_cap {
> > 
> >   #define KVM_CAP_PPC_OSI 52
> >   #define KVM_CAP_PPC_UNSET_IRQ 53
> >   #define KVM_CAP_ENABLE_CAP 54
> > 
> > +#ifdef __KVM_HAVE_XSAVE
> > +#define KVM_CAP_XSAVE 55
> > +#endif
> 
> Might make sense to have a separate KVM_CAP_XCR, just for consistency.

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH] KVM: x86: XSAVE/XRSTOR live migration support
  2010-05-31 11:21   ` Sheng Yang
@ 2010-05-31 11:26     ` Avi Kivity
  0 siblings, 0 replies; 6+ messages in thread
From: Avi Kivity @ 2010-05-31 11:26 UTC (permalink / raw)
  To: Sheng Yang; +Cc: Marcelo Tosatti, kvm

On 05/31/2010 02:21 PM, Sheng Yang wrote:
>
>> Need to reserve way more space here for future xsave growth.  I think at
>> least 4K.  LRB wa 32x512bit = 1K (though it probably isn't a candidate
>> for vmx).  Would be good to get an opinion from your processor architects.
>>
>> I don't think we need to detail the contents of the structures since
>> they're described by the SDM; so we can have just a large array that is
>> 1:1 with the xsave as saved by the fpu.
>>      
> I think we can reserve one page here. But one big array make it harder to work
> with QEmu CPUState. Do we need lots of marcos in QEmu to parse the array? Also
> it's easier to transfer get/set_fpu to get/set_xsave interface using current
> structure I think.
>    

We'll need that code somewhere, so we aren't losing anything by putting 
it in userspace (in fact, qemu already has to have most of this code 
since it supports fxsave/fxrstor emulation).

What we gain is that if we make a bug, it is easier to fix it in 
userspace than in the kernel.

-- 
I have a truly marvellous patch that fixes the bug which this
signature is too narrow to contain.


^ permalink raw reply	[flat|nested] 6+ messages in thread

end of thread, other threads:[~2010-05-31 11:26 UTC | newest]

Thread overview: 6+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2010-05-27  9:48 [PATCH] KVM: x86: XSAVE/XRSTOR live migration support Sheng Yang
2010-05-27 10:02 ` Avi Kivity
2010-05-27 10:33   ` Sheng Yang
2010-05-27 11:34     ` Avi Kivity
2010-05-31 11:21   ` Sheng Yang
2010-05-31 11:26     ` Avi Kivity

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).