From mboxrd@z Thu Jan 1 00:00:00 1970 From: Andrew Cooper Subject: Re: [PATCH] nested VMX: don't ignore mapping errors Date: Mon, 11 Nov 2013 10:37:28 +0000 Message-ID: <5280B368.6030404@citrix.com> References: <5280A3080200007800101A18@nat28.tlf.novell.com> Mime-Version: 1.0 Content-Type: multipart/mixed; boundary="===============1713035553614100358==" Return-path: Received: from mail6.bemta14.messagelabs.com ([193.109.254.103]) by lists.xen.org with esmtp (Exim 4.72) (envelope-from ) id 1Vfos9-0002rL-DW for xen-devel@lists.xenproject.org; Mon, 11 Nov 2013 10:37:33 +0000 In-Reply-To: <5280A3080200007800101A18@nat28.tlf.novell.com> List-Unsubscribe: , List-Post: List-Help: List-Subscribe: , Sender: xen-devel-bounces@lists.xen.org Errors-To: xen-devel-bounces@lists.xen.org To: Jan Beulich Cc: xen-devel , Eddie Dong , Jun Nakajima List-Id: xen-devel@lists.xenproject.org --===============1713035553614100358== Content-Type: multipart/alternative; boundary="------------040305040203020100080909" --------------040305040203020100080909 Content-Type: text/plain; charset="ISO-8859-1" Content-Transfer-Encoding: 7bit On 11/11/13 08:27, Jan Beulich wrote: > Rather than ignoring failures to map the virtual VMCS as well as MSR or > I/O port bitmaps, convert those into failures of the respective > instructions (avoiding to dereference NULL pointers). Ultimately such > failures should be handled transparently (by using transient mappings > when they actually need to be accessed, just like nested SVM does). > > Once at it, also eliminate a pointless (and incomplete) result check: > Non-global (i.e. non-permanent) map operations can't fail. > > Signed-off-by: Jan Beulich > > --- a/xen/arch/x86/hvm/vmx/vvmx.c > +++ b/xen/arch/x86/hvm/vmx/vvmx.c > @@ -747,7 +747,7 @@ static void __clear_current_vvmcs(struct > __vmpclear(virt_to_maddr(nvcpu->nv_n2vmcx)); > } > > -static void __map_msr_bitmap(struct vcpu *v) > +static bool_t __must_check _map_msr_bitmap(struct vcpu *v) > { > struct nestedvmx *nvmx = &vcpu_2_nvmx(v); > unsigned long gpa; > @@ -756,9 +756,11 @@ static void __map_msr_bitmap(struct vcpu > hvm_unmap_guest_frame(nvmx->msrbitmap, 1); > gpa = __get_vvmcs(vcpu_nestedhvm(v).nv_vvmcx, MSR_BITMAP); > nvmx->msrbitmap = hvm_map_guest_frame_ro(gpa >> PAGE_SHIFT, 1); > + > + return nvmx->msrbitmap != NULL; > } > > -static void __map_io_bitmap(struct vcpu *v, u64 vmcs_reg) > +static bool_t __must_check _map_io_bitmap(struct vcpu *v, u64 vmcs_reg) > { > struct nestedvmx *nvmx = &vcpu_2_nvmx(v); > unsigned long gpa; > @@ -769,12 +771,14 @@ static void __map_io_bitmap(struct vcpu > hvm_unmap_guest_frame(nvmx->iobitmap[index], 1); > gpa = __get_vvmcs(vcpu_nestedhvm(v).nv_vvmcx, vmcs_reg); > nvmx->iobitmap[index] = hvm_map_guest_frame_ro(gpa >> PAGE_SHIFT, 1); > + > + return nvmx->iobitmap[index] != NULL; > } > > -static inline void map_io_bitmap_all(struct vcpu *v) > +static inline bool_t __must_check map_io_bitmap_all(struct vcpu *v) > { > - __map_io_bitmap (v, IO_BITMAP_A); > - __map_io_bitmap (v, IO_BITMAP_B); > + return _map_io_bitmap(v, IO_BITMAP_A) && > + _map_io_bitmap(v, IO_BITMAP_B); > } > > static void nvmx_purge_vvmcs(struct vcpu *v) > @@ -1610,9 +1614,15 @@ int nvmx_handle_vmptrld(struct cpu_user_ > if ( nvcpu->nv_vvmcxaddr == VMCX_EADDR ) > { > nvcpu->nv_vvmcx = hvm_map_guest_frame_rw(gpa >> PAGE_SHIFT, 1); > - nvcpu->nv_vvmcxaddr = gpa; > - map_io_bitmap_all (v); > - __map_msr_bitmap(v); > + if ( nvcpu->nv_vvmcx ) > + nvcpu->nv_vvmcxaddr = gpa; > + if ( !nvcpu->nv_vvmcx || > + !map_io_bitmap_all(v) || > + !_map_msr_bitmap(v) ) > + { > + vmreturn(regs, VMFAIL_VALID); > + goto out; > + } > } > > if ( cpu_has_vmx_vmcs_shadowing ) > @@ -1678,9 +1688,8 @@ int nvmx_handle_vmclear(struct cpu_user_ > { > /* Even if this VMCS isn't the current one, we must clear it. */ > vvmcs = hvm_map_guest_frame_rw(gpa >> PAGE_SHIFT, 0); > - if ( vvmcs ) > - clear_vvmcs_launched(&nvmx->launched_list, > - domain_page_map_to_mfn(vvmcs)); > + clear_vvmcs_launched(&nvmx->launched_list, > + domain_page_map_to_mfn(vvmcs)); hvm_map_guest_frame_rw() is able to return NULL in quite a few cases. Are you sure it is safe to remove the NULL check? domain_map_page_to_mfn() doesn't look able to cope. ~Andrew > hvm_unmap_guest_frame(vvmcs, 0); > } > > @@ -1724,6 +1733,7 @@ int nvmx_handle_vmwrite(struct cpu_user_ > struct nestedvcpu *nvcpu = &vcpu_nestedhvm(v); > unsigned long operand; > u64 vmcs_encoding; > + bool_t okay = 1; > > if ( decode_vmx_inst(regs, &decode, &operand, 0) > != X86EMUL_OKAY ) > @@ -1732,16 +1742,21 @@ int nvmx_handle_vmwrite(struct cpu_user_ > vmcs_encoding = reg_read(regs, decode.reg2); > __set_vvmcs(nvcpu->nv_vvmcx, vmcs_encoding, operand); > > - if ( vmcs_encoding == IO_BITMAP_A || vmcs_encoding == IO_BITMAP_A_HIGH ) > - __map_io_bitmap (v, IO_BITMAP_A); > - else if ( vmcs_encoding == IO_BITMAP_B || > - vmcs_encoding == IO_BITMAP_B_HIGH ) > - __map_io_bitmap (v, IO_BITMAP_B); > + switch ( vmcs_encoding ) > + { > + case IO_BITMAP_A: case IO_BITMAP_A_HIGH: > + okay = _map_io_bitmap(v, IO_BITMAP_A); > + break; > + case IO_BITMAP_B: case IO_BITMAP_B_HIGH: > + okay = _map_io_bitmap(v, IO_BITMAP_B); > + break; > + case MSR_BITMAP: case MSR_BITMAP_HIGH: > + okay = _map_msr_bitmap(v); > + break; > + } > > - if ( vmcs_encoding == MSR_BITMAP || vmcs_encoding == MSR_BITMAP_HIGH ) > - __map_msr_bitmap(v); > + vmreturn(regs, okay ? VMSUCCEED : VMFAIL_VALID); > > - vmreturn(regs, VMSUCCEED); > return X86EMUL_OKAY; > } > > > > > > _______________________________________________ > Xen-devel mailing list > Xen-devel@lists.xen.org > http://lists.xen.org/xen-devel --------------040305040203020100080909 Content-Type: text/html; charset="ISO-8859-1" Content-Transfer-Encoding: 7bit
On 11/11/13 08:27, Jan Beulich wrote:
Rather than ignoring failures to map the virtual VMCS as well as MSR or
I/O port bitmaps, convert those into failures of the respective
instructions (avoiding to dereference NULL pointers). Ultimately such
failures should be handled transparently (by using transient mappings
when they actually need to be accessed, just like nested SVM does).

Once at it, also eliminate a pointless (and incomplete) result check:
Non-global (i.e. non-permanent) map operations can't fail.

Signed-off-by: Jan Beulich <jbeulich@suse.com>

--- a/xen/arch/x86/hvm/vmx/vvmx.c
+++ b/xen/arch/x86/hvm/vmx/vvmx.c
@@ -747,7 +747,7 @@ static void __clear_current_vvmcs(struct
         __vmpclear(virt_to_maddr(nvcpu->nv_n2vmcx));
 }
 
-static void __map_msr_bitmap(struct vcpu *v)
+static bool_t __must_check _map_msr_bitmap(struct vcpu *v)
 {
     struct nestedvmx *nvmx = &vcpu_2_nvmx(v);
     unsigned long gpa;
@@ -756,9 +756,11 @@ static void __map_msr_bitmap(struct vcpu
         hvm_unmap_guest_frame(nvmx->msrbitmap, 1);
     gpa = __get_vvmcs(vcpu_nestedhvm(v).nv_vvmcx, MSR_BITMAP);
     nvmx->msrbitmap = hvm_map_guest_frame_ro(gpa >> PAGE_SHIFT, 1);
+
+    return nvmx->msrbitmap != NULL;
 }
 
-static void __map_io_bitmap(struct vcpu *v, u64 vmcs_reg)
+static bool_t __must_check _map_io_bitmap(struct vcpu *v, u64 vmcs_reg)
 {
     struct nestedvmx *nvmx = &vcpu_2_nvmx(v);
     unsigned long gpa;
@@ -769,12 +771,14 @@ static void __map_io_bitmap(struct vcpu 
         hvm_unmap_guest_frame(nvmx->iobitmap[index], 1);
     gpa = __get_vvmcs(vcpu_nestedhvm(v).nv_vvmcx, vmcs_reg);
     nvmx->iobitmap[index] = hvm_map_guest_frame_ro(gpa >> PAGE_SHIFT, 1);
+
+    return nvmx->iobitmap[index] != NULL;
 }
 
-static inline void map_io_bitmap_all(struct vcpu *v)
+static inline bool_t __must_check map_io_bitmap_all(struct vcpu *v)
 {
-   __map_io_bitmap (v, IO_BITMAP_A);
-   __map_io_bitmap (v, IO_BITMAP_B);
+   return _map_io_bitmap(v, IO_BITMAP_A) &&
+          _map_io_bitmap(v, IO_BITMAP_B);
 }
 
 static void nvmx_purge_vvmcs(struct vcpu *v)
@@ -1610,9 +1614,15 @@ int nvmx_handle_vmptrld(struct cpu_user_
     if ( nvcpu->nv_vvmcxaddr == VMCX_EADDR )
     {
         nvcpu->nv_vvmcx = hvm_map_guest_frame_rw(gpa >> PAGE_SHIFT, 1);
-        nvcpu->nv_vvmcxaddr = gpa;
-        map_io_bitmap_all (v);
-        __map_msr_bitmap(v);
+        if ( nvcpu->nv_vvmcx )
+            nvcpu->nv_vvmcxaddr = gpa;
+        if ( !nvcpu->nv_vvmcx ||
+             !map_io_bitmap_all(v) ||
+             !_map_msr_bitmap(v) )
+        {
+            vmreturn(regs, VMFAIL_VALID);
+            goto out;
+        }
     }
 
     if ( cpu_has_vmx_vmcs_shadowing )
@@ -1678,9 +1688,8 @@ int nvmx_handle_vmclear(struct cpu_user_
     {
         /* Even if this VMCS isn't the current one, we must clear it. */
         vvmcs = hvm_map_guest_frame_rw(gpa >> PAGE_SHIFT, 0);
-        if ( vvmcs ) 
-            clear_vvmcs_launched(&nvmx->launched_list,
-                domain_page_map_to_mfn(vvmcs));
+        clear_vvmcs_launched(&nvmx->launched_list,
+                             domain_page_map_to_mfn(vvmcs));

hvm_map_guest_frame_rw() is able to return NULL in quite a few cases.

Are you sure it is safe to remove the NULL check? domain_map_page_to_mfn() doesn't look able to cope.

~Andrew

         hvm_unmap_guest_frame(vvmcs, 0);
     }
 
@@ -1724,6 +1733,7 @@ int nvmx_handle_vmwrite(struct cpu_user_
     struct nestedvcpu *nvcpu = &vcpu_nestedhvm(v);
     unsigned long operand; 
     u64 vmcs_encoding;
+    bool_t okay = 1;
 
     if ( decode_vmx_inst(regs, &decode, &operand, 0)
              != X86EMUL_OKAY )
@@ -1732,16 +1742,21 @@ int nvmx_handle_vmwrite(struct cpu_user_
     vmcs_encoding = reg_read(regs, decode.reg2);
     __set_vvmcs(nvcpu->nv_vvmcx, vmcs_encoding, operand);
 
-    if ( vmcs_encoding == IO_BITMAP_A || vmcs_encoding == IO_BITMAP_A_HIGH )
-        __map_io_bitmap (v, IO_BITMAP_A);
-    else if ( vmcs_encoding == IO_BITMAP_B || 
-              vmcs_encoding == IO_BITMAP_B_HIGH )
-        __map_io_bitmap (v, IO_BITMAP_B);
+    switch ( vmcs_encoding )
+    {
+    case IO_BITMAP_A: case IO_BITMAP_A_HIGH:
+        okay = _map_io_bitmap(v, IO_BITMAP_A);
+        break;
+    case IO_BITMAP_B: case IO_BITMAP_B_HIGH:
+        okay = _map_io_bitmap(v, IO_BITMAP_B);
+        break;
+    case MSR_BITMAP: case MSR_BITMAP_HIGH:
+        okay = _map_msr_bitmap(v);
+        break;
+    }
 
-    if ( vmcs_encoding == MSR_BITMAP || vmcs_encoding == MSR_BITMAP_HIGH )
-        __map_msr_bitmap(v);
+    vmreturn(regs, okay ? VMSUCCEED : VMFAIL_VALID);
 
-    vmreturn(regs, VMSUCCEED);
     return X86EMUL_OKAY;
 }
 




_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel

--------------040305040203020100080909-- --===============1713035553614100358== Content-Type: text/plain; charset="us-ascii" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Content-Disposition: inline _______________________________________________ Xen-devel mailing list Xen-devel@lists.xen.org http://lists.xen.org/xen-devel --===============1713035553614100358==--