From mboxrd@z Thu Jan 1 00:00:00 1970 From: Andrew Cooper Subject: Re: [PATCH v2] x86: MSR_IA32_BNDCFGS save/restore Date: Fri, 13 Dec 2013 17:57:36 +0000 Message-ID: <52AB4A90.9070902@citrix.com> References: <529F47D2020000780010A0E4@nat28.tlf.novell.com> <52AB21A2020000780010D06C@nat28.tlf.novell.com> Mime-Version: 1.0 Content-Type: text/plain; charset="us-ascii" Content-Transfer-Encoding: 7bit Return-path: In-Reply-To: <52AB21A2020000780010D06C@nat28.tlf.novell.com> List-Unsubscribe: , List-Post: List-Help: List-Subscribe: , Sender: xen-devel-bounces@lists.xen.org Errors-To: xen-devel-bounces@lists.xen.org To: Jan Beulich , Jinsong Liu Cc: "xen-devel@lists.xen.org" , "keir@xen.org" , "Ian.Campbell@citrix.com" , "haoxudong.hao@gmail.com" List-Id: xen-devel@lists.xenproject.org On 13/12/2013 14:02, Jan Beulich wrote: > Signed-off-by: Jan Beulich > > --- a/xen/arch/x86/hvm/vmx/vmx.c > +++ b/xen/arch/x86/hvm/vmx/vmx.c > @@ -580,6 +580,55 @@ static int vmx_load_vmcs_ctxt(struct vcp > return 0; > } > > +static unsigned int __init vmx_init_msr(void) > +{ > + return !!cpu_has_mpx; > +} > + > +static void vmx_save_msr(struct vcpu *v, struct hvm_msr *ctxt) > +{ > + vmx_vmcs_enter(v); > + > + if ( cpu_has_mpx ) > + { > + __vmread(GUEST_BNDCFGS, &ctxt->msr[ctxt->count].val); > + if ( ctxt->msr[ctxt->count].val ) > + ctxt->msr[ctxt->count++].index = MSR_IA32_BNDCFGS; > + } > + > + vmx_vmcs_exit(v); > +} > + > +static int vmx_load_msr(struct vcpu *v, struct hvm_msr *ctxt) > +{ > + unsigned int i; > + int err = 0; > + > + vmx_vmcs_enter(v); > + > + for ( i = 0; i < ctxt->count; ++i ) > + { > + switch ( ctxt->msr[i].index ) > + { > + case MSR_IA32_BNDCFGS: > + if ( cpu_has_mpx ) > + __vmwrite(GUEST_BNDCFGS, ctxt->msr[i].val); > + else > + err = -ENXIO; > + break; > + default: > + continue; This will skip setting _rsvd for an MSR we don't recognise. Doesn't this interfere with the error checking in the caller? ~Andrew > + } > + if ( err ) > + break; > + ctxt->msr[i]._rsvd = 1; > + } > + > + vmx_vmcs_exit(v); > + > + return err; > +} > + > static void vmx_fpu_enter(struct vcpu *v) > { > vcpu_restore_fpu_lazy(v); > @@ -1602,6 +1651,9 @@ static struct hvm_function_table __initd > .vcpu_destroy = vmx_vcpu_destroy, > .save_cpu_ctxt = vmx_save_vmcs_ctxt, > .load_cpu_ctxt = vmx_load_vmcs_ctxt, > + .init_msr = vmx_init_msr, > + .save_msr = vmx_save_msr, > + .load_msr = vmx_load_msr, > .get_interrupt_shadow = vmx_get_interrupt_shadow, > .set_interrupt_shadow = vmx_set_interrupt_shadow, > .guest_x86_mode = vmx_guest_x86_mode, > --- a/xen/include/asm-x86/hvm/vmx/vmcs.h > +++ b/xen/include/asm-x86/hvm/vmx/vmcs.h > @@ -367,6 +367,8 @@ enum vmcs_field { > GUEST_PDPTR2_HIGH = 0x0000280f, > GUEST_PDPTR3 = 0x00002810, > GUEST_PDPTR3_HIGH = 0x00002811, > + GUEST_BNDCFGS = 0x00002812, > + GUEST_BNDCFGS_HIGH = 0x00002813, > HOST_PAT = 0x00002c00, > HOST_PAT_HIGH = 0x00002c01, > HOST_EFER = 0x00002c02, > > >