xen-devel.lists.xenproject.org archive mirror
 help / color / mirror / Atom feed
* [PATCH] nested VMX: don't ignore mapping errors
@ 2013-11-11  8:27 Jan Beulich
  2013-11-11 10:37 ` Andrew Cooper
  0 siblings, 1 reply; 7+ messages in thread
From: Jan Beulich @ 2013-11-11  8:27 UTC (permalink / raw)
  To: xen-devel; +Cc: Eddie Dong, Jun Nakajima

[-- Attachment #1: Type: text/plain, Size: 4572 bytes --]

Rather than ignoring failures to map the virtual VMCS as well as MSR or
I/O port bitmaps, convert those into failures of the respective
instructions (avoiding to dereference NULL pointers). Ultimately such
failures should be handled transparently (by using transient mappings
when they actually need to be accessed, just like nested SVM does).

Once at it, also eliminate a pointless (and incomplete) result check:
Non-global (i.e. non-permanent) map operations can't fail.

Signed-off-by: Jan Beulich <jbeulich@suse.com>

--- a/xen/arch/x86/hvm/vmx/vvmx.c
+++ b/xen/arch/x86/hvm/vmx/vvmx.c
@@ -747,7 +747,7 @@ static void __clear_current_vvmcs(struct
         __vmpclear(virt_to_maddr(nvcpu->nv_n2vmcx));
 }
 
-static void __map_msr_bitmap(struct vcpu *v)
+static bool_t __must_check _map_msr_bitmap(struct vcpu *v)
 {
     struct nestedvmx *nvmx = &vcpu_2_nvmx(v);
     unsigned long gpa;
@@ -756,9 +756,11 @@ static void __map_msr_bitmap(struct vcpu
         hvm_unmap_guest_frame(nvmx->msrbitmap, 1);
     gpa = __get_vvmcs(vcpu_nestedhvm(v).nv_vvmcx, MSR_BITMAP);
     nvmx->msrbitmap = hvm_map_guest_frame_ro(gpa >> PAGE_SHIFT, 1);
+
+    return nvmx->msrbitmap != NULL;
 }
 
-static void __map_io_bitmap(struct vcpu *v, u64 vmcs_reg)
+static bool_t __must_check _map_io_bitmap(struct vcpu *v, u64 vmcs_reg)
 {
     struct nestedvmx *nvmx = &vcpu_2_nvmx(v);
     unsigned long gpa;
@@ -769,12 +771,14 @@ static void __map_io_bitmap(struct vcpu 
         hvm_unmap_guest_frame(nvmx->iobitmap[index], 1);
     gpa = __get_vvmcs(vcpu_nestedhvm(v).nv_vvmcx, vmcs_reg);
     nvmx->iobitmap[index] = hvm_map_guest_frame_ro(gpa >> PAGE_SHIFT, 1);
+
+    return nvmx->iobitmap[index] != NULL;
 }
 
-static inline void map_io_bitmap_all(struct vcpu *v)
+static inline bool_t __must_check map_io_bitmap_all(struct vcpu *v)
 {
-   __map_io_bitmap (v, IO_BITMAP_A);
-   __map_io_bitmap (v, IO_BITMAP_B);
+   return _map_io_bitmap(v, IO_BITMAP_A) &&
+          _map_io_bitmap(v, IO_BITMAP_B);
 }
 
 static void nvmx_purge_vvmcs(struct vcpu *v)
@@ -1610,9 +1614,15 @@ int nvmx_handle_vmptrld(struct cpu_user_
     if ( nvcpu->nv_vvmcxaddr == VMCX_EADDR )
     {
         nvcpu->nv_vvmcx = hvm_map_guest_frame_rw(gpa >> PAGE_SHIFT, 1);
-        nvcpu->nv_vvmcxaddr = gpa;
-        map_io_bitmap_all (v);
-        __map_msr_bitmap(v);
+        if ( nvcpu->nv_vvmcx )
+            nvcpu->nv_vvmcxaddr = gpa;
+        if ( !nvcpu->nv_vvmcx ||
+             !map_io_bitmap_all(v) ||
+             !_map_msr_bitmap(v) )
+        {
+            vmreturn(regs, VMFAIL_VALID);
+            goto out;
+        }
     }
 
     if ( cpu_has_vmx_vmcs_shadowing )
@@ -1678,9 +1688,8 @@ int nvmx_handle_vmclear(struct cpu_user_
     {
         /* Even if this VMCS isn't the current one, we must clear it. */
         vvmcs = hvm_map_guest_frame_rw(gpa >> PAGE_SHIFT, 0);
-        if ( vvmcs ) 
-            clear_vvmcs_launched(&nvmx->launched_list,
-                domain_page_map_to_mfn(vvmcs));
+        clear_vvmcs_launched(&nvmx->launched_list,
+                             domain_page_map_to_mfn(vvmcs));
         hvm_unmap_guest_frame(vvmcs, 0);
     }
 
@@ -1724,6 +1733,7 @@ int nvmx_handle_vmwrite(struct cpu_user_
     struct nestedvcpu *nvcpu = &vcpu_nestedhvm(v);
     unsigned long operand; 
     u64 vmcs_encoding;
+    bool_t okay = 1;
 
     if ( decode_vmx_inst(regs, &decode, &operand, 0)
              != X86EMUL_OKAY )
@@ -1732,16 +1742,21 @@ int nvmx_handle_vmwrite(struct cpu_user_
     vmcs_encoding = reg_read(regs, decode.reg2);
     __set_vvmcs(nvcpu->nv_vvmcx, vmcs_encoding, operand);
 
-    if ( vmcs_encoding == IO_BITMAP_A || vmcs_encoding == IO_BITMAP_A_HIGH )
-        __map_io_bitmap (v, IO_BITMAP_A);
-    else if ( vmcs_encoding == IO_BITMAP_B || 
-              vmcs_encoding == IO_BITMAP_B_HIGH )
-        __map_io_bitmap (v, IO_BITMAP_B);
+    switch ( vmcs_encoding )
+    {
+    case IO_BITMAP_A: case IO_BITMAP_A_HIGH:
+        okay = _map_io_bitmap(v, IO_BITMAP_A);
+        break;
+    case IO_BITMAP_B: case IO_BITMAP_B_HIGH:
+        okay = _map_io_bitmap(v, IO_BITMAP_B);
+        break;
+    case MSR_BITMAP: case MSR_BITMAP_HIGH:
+        okay = _map_msr_bitmap(v);
+        break;
+    }
 
-    if ( vmcs_encoding == MSR_BITMAP || vmcs_encoding == MSR_BITMAP_HIGH )
-        __map_msr_bitmap(v);
+    vmreturn(regs, okay ? VMSUCCEED : VMFAIL_VALID);
 
-    vmreturn(regs, VMSUCCEED);
     return X86EMUL_OKAY;
 }
 



[-- Attachment #2: nVMX-map-errors.patch --]
[-- Type: text/plain, Size: 4611 bytes --]

nested VMX: don't ignore mapping errors

Rather than ignoring failures to map the virtual VMCS as well as MSR or
I/O port bitmaps, convert those into failures of the respective
instructions (avoiding to dereference NULL pointers). Ultimately such
failures should be handled transparently (by using transient mappings
when they actually need to be accessed, just like nested SVM does).

Once at it, also eliminate a pointless (and incomplete) result check:
Non-global (i.e. non-permanent) map operations can't fail.

Signed-off-by: Jan Beulich <jbeulich@suse.com>

--- a/xen/arch/x86/hvm/vmx/vvmx.c
+++ b/xen/arch/x86/hvm/vmx/vvmx.c
@@ -747,7 +747,7 @@ static void __clear_current_vvmcs(struct
         __vmpclear(virt_to_maddr(nvcpu->nv_n2vmcx));
 }
 
-static void __map_msr_bitmap(struct vcpu *v)
+static bool_t __must_check _map_msr_bitmap(struct vcpu *v)
 {
     struct nestedvmx *nvmx = &vcpu_2_nvmx(v);
     unsigned long gpa;
@@ -756,9 +756,11 @@ static void __map_msr_bitmap(struct vcpu
         hvm_unmap_guest_frame(nvmx->msrbitmap, 1);
     gpa = __get_vvmcs(vcpu_nestedhvm(v).nv_vvmcx, MSR_BITMAP);
     nvmx->msrbitmap = hvm_map_guest_frame_ro(gpa >> PAGE_SHIFT, 1);
+
+    return nvmx->msrbitmap != NULL;
 }
 
-static void __map_io_bitmap(struct vcpu *v, u64 vmcs_reg)
+static bool_t __must_check _map_io_bitmap(struct vcpu *v, u64 vmcs_reg)
 {
     struct nestedvmx *nvmx = &vcpu_2_nvmx(v);
     unsigned long gpa;
@@ -769,12 +771,14 @@ static void __map_io_bitmap(struct vcpu 
         hvm_unmap_guest_frame(nvmx->iobitmap[index], 1);
     gpa = __get_vvmcs(vcpu_nestedhvm(v).nv_vvmcx, vmcs_reg);
     nvmx->iobitmap[index] = hvm_map_guest_frame_ro(gpa >> PAGE_SHIFT, 1);
+
+    return nvmx->iobitmap[index] != NULL;
 }
 
-static inline void map_io_bitmap_all(struct vcpu *v)
+static inline bool_t __must_check map_io_bitmap_all(struct vcpu *v)
 {
-   __map_io_bitmap (v, IO_BITMAP_A);
-   __map_io_bitmap (v, IO_BITMAP_B);
+   return _map_io_bitmap(v, IO_BITMAP_A) &&
+          _map_io_bitmap(v, IO_BITMAP_B);
 }
 
 static void nvmx_purge_vvmcs(struct vcpu *v)
@@ -1610,9 +1614,15 @@ int nvmx_handle_vmptrld(struct cpu_user_
     if ( nvcpu->nv_vvmcxaddr == VMCX_EADDR )
     {
         nvcpu->nv_vvmcx = hvm_map_guest_frame_rw(gpa >> PAGE_SHIFT, 1);
-        nvcpu->nv_vvmcxaddr = gpa;
-        map_io_bitmap_all (v);
-        __map_msr_bitmap(v);
+        if ( nvcpu->nv_vvmcx )
+            nvcpu->nv_vvmcxaddr = gpa;
+        if ( !nvcpu->nv_vvmcx ||
+             !map_io_bitmap_all(v) ||
+             !_map_msr_bitmap(v) )
+        {
+            vmreturn(regs, VMFAIL_VALID);
+            goto out;
+        }
     }
 
     if ( cpu_has_vmx_vmcs_shadowing )
@@ -1678,9 +1688,8 @@ int nvmx_handle_vmclear(struct cpu_user_
     {
         /* Even if this VMCS isn't the current one, we must clear it. */
         vvmcs = hvm_map_guest_frame_rw(gpa >> PAGE_SHIFT, 0);
-        if ( vvmcs ) 
-            clear_vvmcs_launched(&nvmx->launched_list,
-                domain_page_map_to_mfn(vvmcs));
+        clear_vvmcs_launched(&nvmx->launched_list,
+                             domain_page_map_to_mfn(vvmcs));
         hvm_unmap_guest_frame(vvmcs, 0);
     }
 
@@ -1724,6 +1733,7 @@ int nvmx_handle_vmwrite(struct cpu_user_
     struct nestedvcpu *nvcpu = &vcpu_nestedhvm(v);
     unsigned long operand; 
     u64 vmcs_encoding;
+    bool_t okay = 1;
 
     if ( decode_vmx_inst(regs, &decode, &operand, 0)
              != X86EMUL_OKAY )
@@ -1732,16 +1742,21 @@ int nvmx_handle_vmwrite(struct cpu_user_
     vmcs_encoding = reg_read(regs, decode.reg2);
     __set_vvmcs(nvcpu->nv_vvmcx, vmcs_encoding, operand);
 
-    if ( vmcs_encoding == IO_BITMAP_A || vmcs_encoding == IO_BITMAP_A_HIGH )
-        __map_io_bitmap (v, IO_BITMAP_A);
-    else if ( vmcs_encoding == IO_BITMAP_B || 
-              vmcs_encoding == IO_BITMAP_B_HIGH )
-        __map_io_bitmap (v, IO_BITMAP_B);
+    switch ( vmcs_encoding )
+    {
+    case IO_BITMAP_A: case IO_BITMAP_A_HIGH:
+        okay = _map_io_bitmap(v, IO_BITMAP_A);
+        break;
+    case IO_BITMAP_B: case IO_BITMAP_B_HIGH:
+        okay = _map_io_bitmap(v, IO_BITMAP_B);
+        break;
+    case MSR_BITMAP: case MSR_BITMAP_HIGH:
+        okay = _map_msr_bitmap(v);
+        break;
+    }
 
-    if ( vmcs_encoding == MSR_BITMAP || vmcs_encoding == MSR_BITMAP_HIGH )
-        __map_msr_bitmap(v);
+    vmreturn(regs, okay ? VMSUCCEED : VMFAIL_VALID);
 
-    vmreturn(regs, VMSUCCEED);
     return X86EMUL_OKAY;
 }
 

[-- Attachment #3: Type: text/plain, Size: 126 bytes --]

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 7+ messages in thread

end of thread, other threads:[~2013-11-18  1:57 UTC | newest]

Thread overview: 7+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2013-11-11  8:27 [PATCH] nested VMX: don't ignore mapping errors Jan Beulich
2013-11-11 10:37 ` Andrew Cooper
2013-11-11 10:45   ` Jan Beulich
2013-11-11 11:06   ` [PATCH v2] " Jan Beulich
2013-11-11 11:13     ` Andrew Cooper
2013-11-15 10:57     ` Ping: " Jan Beulich
2013-11-18  1:57     ` Dong, Eddie

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).