xen-devel.lists.xenproject.org archive mirror
 help / color / mirror / Atom feed
* [PATCH] nested VMX: don't ignore mapping errors
@ 2013-11-11  8:27 Jan Beulich
  2013-11-11 10:37 ` Andrew Cooper
  0 siblings, 1 reply; 7+ messages in thread
From: Jan Beulich @ 2013-11-11  8:27 UTC (permalink / raw)
  To: xen-devel; +Cc: Eddie Dong, Jun Nakajima

[-- Attachment #1: Type: text/plain, Size: 4572 bytes --]

Rather than ignoring failures to map the virtual VMCS as well as MSR or
I/O port bitmaps, convert those into failures of the respective
instructions (avoiding to dereference NULL pointers). Ultimately such
failures should be handled transparently (by using transient mappings
when they actually need to be accessed, just like nested SVM does).

Once at it, also eliminate a pointless (and incomplete) result check:
Non-global (i.e. non-permanent) map operations can't fail.

Signed-off-by: Jan Beulich <jbeulich@suse.com>

--- a/xen/arch/x86/hvm/vmx/vvmx.c
+++ b/xen/arch/x86/hvm/vmx/vvmx.c
@@ -747,7 +747,7 @@ static void __clear_current_vvmcs(struct
         __vmpclear(virt_to_maddr(nvcpu->nv_n2vmcx));
 }
 
-static void __map_msr_bitmap(struct vcpu *v)
+static bool_t __must_check _map_msr_bitmap(struct vcpu *v)
 {
     struct nestedvmx *nvmx = &vcpu_2_nvmx(v);
     unsigned long gpa;
@@ -756,9 +756,11 @@ static void __map_msr_bitmap(struct vcpu
         hvm_unmap_guest_frame(nvmx->msrbitmap, 1);
     gpa = __get_vvmcs(vcpu_nestedhvm(v).nv_vvmcx, MSR_BITMAP);
     nvmx->msrbitmap = hvm_map_guest_frame_ro(gpa >> PAGE_SHIFT, 1);
+
+    return nvmx->msrbitmap != NULL;
 }
 
-static void __map_io_bitmap(struct vcpu *v, u64 vmcs_reg)
+static bool_t __must_check _map_io_bitmap(struct vcpu *v, u64 vmcs_reg)
 {
     struct nestedvmx *nvmx = &vcpu_2_nvmx(v);
     unsigned long gpa;
@@ -769,12 +771,14 @@ static void __map_io_bitmap(struct vcpu 
         hvm_unmap_guest_frame(nvmx->iobitmap[index], 1);
     gpa = __get_vvmcs(vcpu_nestedhvm(v).nv_vvmcx, vmcs_reg);
     nvmx->iobitmap[index] = hvm_map_guest_frame_ro(gpa >> PAGE_SHIFT, 1);
+
+    return nvmx->iobitmap[index] != NULL;
 }
 
-static inline void map_io_bitmap_all(struct vcpu *v)
+static inline bool_t __must_check map_io_bitmap_all(struct vcpu *v)
 {
-   __map_io_bitmap (v, IO_BITMAP_A);
-   __map_io_bitmap (v, IO_BITMAP_B);
+   return _map_io_bitmap(v, IO_BITMAP_A) &&
+          _map_io_bitmap(v, IO_BITMAP_B);
 }
 
 static void nvmx_purge_vvmcs(struct vcpu *v)
@@ -1610,9 +1614,15 @@ int nvmx_handle_vmptrld(struct cpu_user_
     if ( nvcpu->nv_vvmcxaddr == VMCX_EADDR )
     {
         nvcpu->nv_vvmcx = hvm_map_guest_frame_rw(gpa >> PAGE_SHIFT, 1);
-        nvcpu->nv_vvmcxaddr = gpa;
-        map_io_bitmap_all (v);
-        __map_msr_bitmap(v);
+        if ( nvcpu->nv_vvmcx )
+            nvcpu->nv_vvmcxaddr = gpa;
+        if ( !nvcpu->nv_vvmcx ||
+             !map_io_bitmap_all(v) ||
+             !_map_msr_bitmap(v) )
+        {
+            vmreturn(regs, VMFAIL_VALID);
+            goto out;
+        }
     }
 
     if ( cpu_has_vmx_vmcs_shadowing )
@@ -1678,9 +1688,8 @@ int nvmx_handle_vmclear(struct cpu_user_
     {
         /* Even if this VMCS isn't the current one, we must clear it. */
         vvmcs = hvm_map_guest_frame_rw(gpa >> PAGE_SHIFT, 0);
-        if ( vvmcs ) 
-            clear_vvmcs_launched(&nvmx->launched_list,
-                domain_page_map_to_mfn(vvmcs));
+        clear_vvmcs_launched(&nvmx->launched_list,
+                             domain_page_map_to_mfn(vvmcs));
         hvm_unmap_guest_frame(vvmcs, 0);
     }
 
@@ -1724,6 +1733,7 @@ int nvmx_handle_vmwrite(struct cpu_user_
     struct nestedvcpu *nvcpu = &vcpu_nestedhvm(v);
     unsigned long operand; 
     u64 vmcs_encoding;
+    bool_t okay = 1;
 
     if ( decode_vmx_inst(regs, &decode, &operand, 0)
              != X86EMUL_OKAY )
@@ -1732,16 +1742,21 @@ int nvmx_handle_vmwrite(struct cpu_user_
     vmcs_encoding = reg_read(regs, decode.reg2);
     __set_vvmcs(nvcpu->nv_vvmcx, vmcs_encoding, operand);
 
-    if ( vmcs_encoding == IO_BITMAP_A || vmcs_encoding == IO_BITMAP_A_HIGH )
-        __map_io_bitmap (v, IO_BITMAP_A);
-    else if ( vmcs_encoding == IO_BITMAP_B || 
-              vmcs_encoding == IO_BITMAP_B_HIGH )
-        __map_io_bitmap (v, IO_BITMAP_B);
+    switch ( vmcs_encoding )
+    {
+    case IO_BITMAP_A: case IO_BITMAP_A_HIGH:
+        okay = _map_io_bitmap(v, IO_BITMAP_A);
+        break;
+    case IO_BITMAP_B: case IO_BITMAP_B_HIGH:
+        okay = _map_io_bitmap(v, IO_BITMAP_B);
+        break;
+    case MSR_BITMAP: case MSR_BITMAP_HIGH:
+        okay = _map_msr_bitmap(v);
+        break;
+    }
 
-    if ( vmcs_encoding == MSR_BITMAP || vmcs_encoding == MSR_BITMAP_HIGH )
-        __map_msr_bitmap(v);
+    vmreturn(regs, okay ? VMSUCCEED : VMFAIL_VALID);
 
-    vmreturn(regs, VMSUCCEED);
     return X86EMUL_OKAY;
 }
 



[-- Attachment #2: nVMX-map-errors.patch --]
[-- Type: text/plain, Size: 4611 bytes --]

nested VMX: don't ignore mapping errors

Rather than ignoring failures to map the virtual VMCS as well as MSR or
I/O port bitmaps, convert those into failures of the respective
instructions (avoiding to dereference NULL pointers). Ultimately such
failures should be handled transparently (by using transient mappings
when they actually need to be accessed, just like nested SVM does).

Once at it, also eliminate a pointless (and incomplete) result check:
Non-global (i.e. non-permanent) map operations can't fail.

Signed-off-by: Jan Beulich <jbeulich@suse.com>

--- a/xen/arch/x86/hvm/vmx/vvmx.c
+++ b/xen/arch/x86/hvm/vmx/vvmx.c
@@ -747,7 +747,7 @@ static void __clear_current_vvmcs(struct
         __vmpclear(virt_to_maddr(nvcpu->nv_n2vmcx));
 }
 
-static void __map_msr_bitmap(struct vcpu *v)
+static bool_t __must_check _map_msr_bitmap(struct vcpu *v)
 {
     struct nestedvmx *nvmx = &vcpu_2_nvmx(v);
     unsigned long gpa;
@@ -756,9 +756,11 @@ static void __map_msr_bitmap(struct vcpu
         hvm_unmap_guest_frame(nvmx->msrbitmap, 1);
     gpa = __get_vvmcs(vcpu_nestedhvm(v).nv_vvmcx, MSR_BITMAP);
     nvmx->msrbitmap = hvm_map_guest_frame_ro(gpa >> PAGE_SHIFT, 1);
+
+    return nvmx->msrbitmap != NULL;
 }
 
-static void __map_io_bitmap(struct vcpu *v, u64 vmcs_reg)
+static bool_t __must_check _map_io_bitmap(struct vcpu *v, u64 vmcs_reg)
 {
     struct nestedvmx *nvmx = &vcpu_2_nvmx(v);
     unsigned long gpa;
@@ -769,12 +771,14 @@ static void __map_io_bitmap(struct vcpu 
         hvm_unmap_guest_frame(nvmx->iobitmap[index], 1);
     gpa = __get_vvmcs(vcpu_nestedhvm(v).nv_vvmcx, vmcs_reg);
     nvmx->iobitmap[index] = hvm_map_guest_frame_ro(gpa >> PAGE_SHIFT, 1);
+
+    return nvmx->iobitmap[index] != NULL;
 }
 
-static inline void map_io_bitmap_all(struct vcpu *v)
+static inline bool_t __must_check map_io_bitmap_all(struct vcpu *v)
 {
-   __map_io_bitmap (v, IO_BITMAP_A);
-   __map_io_bitmap (v, IO_BITMAP_B);
+   return _map_io_bitmap(v, IO_BITMAP_A) &&
+          _map_io_bitmap(v, IO_BITMAP_B);
 }
 
 static void nvmx_purge_vvmcs(struct vcpu *v)
@@ -1610,9 +1614,15 @@ int nvmx_handle_vmptrld(struct cpu_user_
     if ( nvcpu->nv_vvmcxaddr == VMCX_EADDR )
     {
         nvcpu->nv_vvmcx = hvm_map_guest_frame_rw(gpa >> PAGE_SHIFT, 1);
-        nvcpu->nv_vvmcxaddr = gpa;
-        map_io_bitmap_all (v);
-        __map_msr_bitmap(v);
+        if ( nvcpu->nv_vvmcx )
+            nvcpu->nv_vvmcxaddr = gpa;
+        if ( !nvcpu->nv_vvmcx ||
+             !map_io_bitmap_all(v) ||
+             !_map_msr_bitmap(v) )
+        {
+            vmreturn(regs, VMFAIL_VALID);
+            goto out;
+        }
     }
 
     if ( cpu_has_vmx_vmcs_shadowing )
@@ -1678,9 +1688,8 @@ int nvmx_handle_vmclear(struct cpu_user_
     {
         /* Even if this VMCS isn't the current one, we must clear it. */
         vvmcs = hvm_map_guest_frame_rw(gpa >> PAGE_SHIFT, 0);
-        if ( vvmcs ) 
-            clear_vvmcs_launched(&nvmx->launched_list,
-                domain_page_map_to_mfn(vvmcs));
+        clear_vvmcs_launched(&nvmx->launched_list,
+                             domain_page_map_to_mfn(vvmcs));
         hvm_unmap_guest_frame(vvmcs, 0);
     }
 
@@ -1724,6 +1733,7 @@ int nvmx_handle_vmwrite(struct cpu_user_
     struct nestedvcpu *nvcpu = &vcpu_nestedhvm(v);
     unsigned long operand; 
     u64 vmcs_encoding;
+    bool_t okay = 1;
 
     if ( decode_vmx_inst(regs, &decode, &operand, 0)
              != X86EMUL_OKAY )
@@ -1732,16 +1742,21 @@ int nvmx_handle_vmwrite(struct cpu_user_
     vmcs_encoding = reg_read(regs, decode.reg2);
     __set_vvmcs(nvcpu->nv_vvmcx, vmcs_encoding, operand);
 
-    if ( vmcs_encoding == IO_BITMAP_A || vmcs_encoding == IO_BITMAP_A_HIGH )
-        __map_io_bitmap (v, IO_BITMAP_A);
-    else if ( vmcs_encoding == IO_BITMAP_B || 
-              vmcs_encoding == IO_BITMAP_B_HIGH )
-        __map_io_bitmap (v, IO_BITMAP_B);
+    switch ( vmcs_encoding )
+    {
+    case IO_BITMAP_A: case IO_BITMAP_A_HIGH:
+        okay = _map_io_bitmap(v, IO_BITMAP_A);
+        break;
+    case IO_BITMAP_B: case IO_BITMAP_B_HIGH:
+        okay = _map_io_bitmap(v, IO_BITMAP_B);
+        break;
+    case MSR_BITMAP: case MSR_BITMAP_HIGH:
+        okay = _map_msr_bitmap(v);
+        break;
+    }
 
-    if ( vmcs_encoding == MSR_BITMAP || vmcs_encoding == MSR_BITMAP_HIGH )
-        __map_msr_bitmap(v);
+    vmreturn(regs, okay ? VMSUCCEED : VMFAIL_VALID);
 
-    vmreturn(regs, VMSUCCEED);
     return X86EMUL_OKAY;
 }
 

[-- Attachment #3: Type: text/plain, Size: 126 bytes --]

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH] nested VMX: don't ignore mapping errors
  2013-11-11  8:27 [PATCH] nested VMX: don't ignore mapping errors Jan Beulich
@ 2013-11-11 10:37 ` Andrew Cooper
  2013-11-11 10:45   ` Jan Beulich
  2013-11-11 11:06   ` [PATCH v2] " Jan Beulich
  0 siblings, 2 replies; 7+ messages in thread
From: Andrew Cooper @ 2013-11-11 10:37 UTC (permalink / raw)
  To: Jan Beulich; +Cc: xen-devel, Eddie Dong, Jun Nakajima


[-- Attachment #1.1: Type: text/plain, Size: 5052 bytes --]

On 11/11/13 08:27, Jan Beulich wrote:
> Rather than ignoring failures to map the virtual VMCS as well as MSR or
> I/O port bitmaps, convert those into failures of the respective
> instructions (avoiding to dereference NULL pointers). Ultimately such
> failures should be handled transparently (by using transient mappings
> when they actually need to be accessed, just like nested SVM does).
>
> Once at it, also eliminate a pointless (and incomplete) result check:
> Non-global (i.e. non-permanent) map operations can't fail.
>
> Signed-off-by: Jan Beulich <jbeulich@suse.com>
>
> --- a/xen/arch/x86/hvm/vmx/vvmx.c
> +++ b/xen/arch/x86/hvm/vmx/vvmx.c
> @@ -747,7 +747,7 @@ static void __clear_current_vvmcs(struct
>          __vmpclear(virt_to_maddr(nvcpu->nv_n2vmcx));
>  }
>  
> -static void __map_msr_bitmap(struct vcpu *v)
> +static bool_t __must_check _map_msr_bitmap(struct vcpu *v)
>  {
>      struct nestedvmx *nvmx = &vcpu_2_nvmx(v);
>      unsigned long gpa;
> @@ -756,9 +756,11 @@ static void __map_msr_bitmap(struct vcpu
>          hvm_unmap_guest_frame(nvmx->msrbitmap, 1);
>      gpa = __get_vvmcs(vcpu_nestedhvm(v).nv_vvmcx, MSR_BITMAP);
>      nvmx->msrbitmap = hvm_map_guest_frame_ro(gpa >> PAGE_SHIFT, 1);
> +
> +    return nvmx->msrbitmap != NULL;
>  }
>  
> -static void __map_io_bitmap(struct vcpu *v, u64 vmcs_reg)
> +static bool_t __must_check _map_io_bitmap(struct vcpu *v, u64 vmcs_reg)
>  {
>      struct nestedvmx *nvmx = &vcpu_2_nvmx(v);
>      unsigned long gpa;
> @@ -769,12 +771,14 @@ static void __map_io_bitmap(struct vcpu 
>          hvm_unmap_guest_frame(nvmx->iobitmap[index], 1);
>      gpa = __get_vvmcs(vcpu_nestedhvm(v).nv_vvmcx, vmcs_reg);
>      nvmx->iobitmap[index] = hvm_map_guest_frame_ro(gpa >> PAGE_SHIFT, 1);
> +
> +    return nvmx->iobitmap[index] != NULL;
>  }
>  
> -static inline void map_io_bitmap_all(struct vcpu *v)
> +static inline bool_t __must_check map_io_bitmap_all(struct vcpu *v)
>  {
> -   __map_io_bitmap (v, IO_BITMAP_A);
> -   __map_io_bitmap (v, IO_BITMAP_B);
> +   return _map_io_bitmap(v, IO_BITMAP_A) &&
> +          _map_io_bitmap(v, IO_BITMAP_B);
>  }
>  
>  static void nvmx_purge_vvmcs(struct vcpu *v)
> @@ -1610,9 +1614,15 @@ int nvmx_handle_vmptrld(struct cpu_user_
>      if ( nvcpu->nv_vvmcxaddr == VMCX_EADDR )
>      {
>          nvcpu->nv_vvmcx = hvm_map_guest_frame_rw(gpa >> PAGE_SHIFT, 1);
> -        nvcpu->nv_vvmcxaddr = gpa;
> -        map_io_bitmap_all (v);
> -        __map_msr_bitmap(v);
> +        if ( nvcpu->nv_vvmcx )
> +            nvcpu->nv_vvmcxaddr = gpa;
> +        if ( !nvcpu->nv_vvmcx ||
> +             !map_io_bitmap_all(v) ||
> +             !_map_msr_bitmap(v) )
> +        {
> +            vmreturn(regs, VMFAIL_VALID);
> +            goto out;
> +        }
>      }
>  
>      if ( cpu_has_vmx_vmcs_shadowing )
> @@ -1678,9 +1688,8 @@ int nvmx_handle_vmclear(struct cpu_user_
>      {
>          /* Even if this VMCS isn't the current one, we must clear it. */
>          vvmcs = hvm_map_guest_frame_rw(gpa >> PAGE_SHIFT, 0);
> -        if ( vvmcs ) 
> -            clear_vvmcs_launched(&nvmx->launched_list,
> -                domain_page_map_to_mfn(vvmcs));
> +        clear_vvmcs_launched(&nvmx->launched_list,
> +                             domain_page_map_to_mfn(vvmcs));

hvm_map_guest_frame_rw() is able to return NULL in quite a few cases.

Are you sure it is safe to remove the NULL check?
domain_map_page_to_mfn() doesn't look able to cope.

~Andrew

>          hvm_unmap_guest_frame(vvmcs, 0);
>      }
>  
> @@ -1724,6 +1733,7 @@ int nvmx_handle_vmwrite(struct cpu_user_
>      struct nestedvcpu *nvcpu = &vcpu_nestedhvm(v);
>      unsigned long operand; 
>      u64 vmcs_encoding;
> +    bool_t okay = 1;
>  
>      if ( decode_vmx_inst(regs, &decode, &operand, 0)
>               != X86EMUL_OKAY )
> @@ -1732,16 +1742,21 @@ int nvmx_handle_vmwrite(struct cpu_user_
>      vmcs_encoding = reg_read(regs, decode.reg2);
>      __set_vvmcs(nvcpu->nv_vvmcx, vmcs_encoding, operand);
>  
> -    if ( vmcs_encoding == IO_BITMAP_A || vmcs_encoding == IO_BITMAP_A_HIGH )
> -        __map_io_bitmap (v, IO_BITMAP_A);
> -    else if ( vmcs_encoding == IO_BITMAP_B || 
> -              vmcs_encoding == IO_BITMAP_B_HIGH )
> -        __map_io_bitmap (v, IO_BITMAP_B);
> +    switch ( vmcs_encoding )
> +    {
> +    case IO_BITMAP_A: case IO_BITMAP_A_HIGH:
> +        okay = _map_io_bitmap(v, IO_BITMAP_A);
> +        break;
> +    case IO_BITMAP_B: case IO_BITMAP_B_HIGH:
> +        okay = _map_io_bitmap(v, IO_BITMAP_B);
> +        break;
> +    case MSR_BITMAP: case MSR_BITMAP_HIGH:
> +        okay = _map_msr_bitmap(v);
> +        break;
> +    }
>  
> -    if ( vmcs_encoding == MSR_BITMAP || vmcs_encoding == MSR_BITMAP_HIGH )
> -        __map_msr_bitmap(v);
> +    vmreturn(regs, okay ? VMSUCCEED : VMFAIL_VALID);
>  
> -    vmreturn(regs, VMSUCCEED);
>      return X86EMUL_OKAY;
>  }
>  
>
>
>
>
> _______________________________________________
> Xen-devel mailing list
> Xen-devel@lists.xen.org
> http://lists.xen.org/xen-devel


[-- Attachment #1.2: Type: text/html, Size: 5799 bytes --]

[-- Attachment #2: Type: text/plain, Size: 126 bytes --]

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH] nested VMX: don't ignore mapping errors
  2013-11-11 10:37 ` Andrew Cooper
@ 2013-11-11 10:45   ` Jan Beulich
  2013-11-11 11:06   ` [PATCH v2] " Jan Beulich
  1 sibling, 0 replies; 7+ messages in thread
From: Jan Beulich @ 2013-11-11 10:45 UTC (permalink / raw)
  To: Andrew Cooper; +Cc: xen-devel, Eddie Dong, Jun Nakajima

>>> On 11.11.13 at 11:37, Andrew Cooper <andrew.cooper3@citrix.com> wrote:
> On 11/11/13 08:27, Jan Beulich wrote:
>> @@ -1678,9 +1688,8 @@ int nvmx_handle_vmclear(struct cpu_user_
>>      {
>>          /* Even if this VMCS isn't the current one, we must clear it. */
>>          vvmcs = hvm_map_guest_frame_rw(gpa >> PAGE_SHIFT, 0);
>> -        if ( vvmcs ) 
>> -            clear_vvmcs_launched(&nvmx->launched_list,
>> -                domain_page_map_to_mfn(vvmcs));
>> +        clear_vvmcs_launched(&nvmx->launched_list,
>> +                             domain_page_map_to_mfn(vvmcs));
> 
> hvm_map_guest_frame_rw() is able to return NULL in quite a few cases.
> 
> Are you sure it is safe to remove the NULL check?

Indeed, now that I look at it - I recalled (and didn't further
check) that it would return NULL only as the result of
__map_domain_page_global() doing so (and
__map_domain_page() can't). I'll drop the change above
(and the similar case in the corresponding nested SVM
patch), but still that won't yield correct code...

Jan

^ permalink raw reply	[flat|nested] 7+ messages in thread

* [PATCH v2] nested VMX: don't ignore mapping errors
  2013-11-11 10:37 ` Andrew Cooper
  2013-11-11 10:45   ` Jan Beulich
@ 2013-11-11 11:06   ` Jan Beulich
  2013-11-11 11:13     ` Andrew Cooper
                       ` (2 more replies)
  1 sibling, 3 replies; 7+ messages in thread
From: Jan Beulich @ 2013-11-11 11:06 UTC (permalink / raw)
  To: xen-devel; +Cc: Andrew Cooper, Eddie Dong, Jun Nakajima

[-- Attachment #1: Type: text/plain, Size: 4374 bytes --]

Rather than ignoring failures to map the virtual VMCS as well as MSR or
I/O port bitmaps, convert those into failures of the respective
instructions (avoiding to dereference NULL pointers). Ultimately such
failures should be handled transparently (by using transient mappings
when they actually need to be accessed, just like nested SVM does).

Once at it, also eliminate a pointless (and incomplete) result check:
Non-global (i.e. non-permanent) map operations can't fail.

Signed-off-by: Jan Beulich <jbeulich@suse.com>
---
v2: Remove the change to nvmx_handle_vmclear() -
    hvm_map_guest_frame_*() can return NULL for reasons other than
    map_domain_page_global() doing so (pointed out by Andrew Cooper).
    That leaves the function broken though (it ought to handle the
    shared/paged out cases in an appropriate way).

--- a/xen/arch/x86/hvm/vmx/vvmx.c
+++ b/xen/arch/x86/hvm/vmx/vvmx.c
@@ -747,7 +747,7 @@ static void __clear_current_vvmcs(struct
         __vmpclear(virt_to_maddr(nvcpu->nv_n2vmcx));
 }
 
-static void __map_msr_bitmap(struct vcpu *v)
+static bool_t __must_check _map_msr_bitmap(struct vcpu *v)
 {
     struct nestedvmx *nvmx = &vcpu_2_nvmx(v);
     unsigned long gpa;
@@ -756,9 +756,11 @@ static void __map_msr_bitmap(struct vcpu
         hvm_unmap_guest_frame(nvmx->msrbitmap, 1);
     gpa = __get_vvmcs(vcpu_nestedhvm(v).nv_vvmcx, MSR_BITMAP);
     nvmx->msrbitmap = hvm_map_guest_frame_ro(gpa >> PAGE_SHIFT, 1);
+
+    return nvmx->msrbitmap != NULL;
 }
 
-static void __map_io_bitmap(struct vcpu *v, u64 vmcs_reg)
+static bool_t __must_check _map_io_bitmap(struct vcpu *v, u64 vmcs_reg)
 {
     struct nestedvmx *nvmx = &vcpu_2_nvmx(v);
     unsigned long gpa;
@@ -769,12 +771,14 @@ static void __map_io_bitmap(struct vcpu 
         hvm_unmap_guest_frame(nvmx->iobitmap[index], 1);
     gpa = __get_vvmcs(vcpu_nestedhvm(v).nv_vvmcx, vmcs_reg);
     nvmx->iobitmap[index] = hvm_map_guest_frame_ro(gpa >> PAGE_SHIFT, 1);
+
+    return nvmx->iobitmap[index] != NULL;
 }
 
-static inline void map_io_bitmap_all(struct vcpu *v)
+static inline bool_t __must_check map_io_bitmap_all(struct vcpu *v)
 {
-   __map_io_bitmap (v, IO_BITMAP_A);
-   __map_io_bitmap (v, IO_BITMAP_B);
+   return _map_io_bitmap(v, IO_BITMAP_A) &&
+          _map_io_bitmap(v, IO_BITMAP_B);
 }
 
 static void nvmx_purge_vvmcs(struct vcpu *v)
@@ -1610,9 +1614,15 @@ int nvmx_handle_vmptrld(struct cpu_user_
     if ( nvcpu->nv_vvmcxaddr == VMCX_EADDR )
     {
         nvcpu->nv_vvmcx = hvm_map_guest_frame_rw(gpa >> PAGE_SHIFT, 1);
-        nvcpu->nv_vvmcxaddr = gpa;
-        map_io_bitmap_all (v);
-        __map_msr_bitmap(v);
+        if ( nvcpu->nv_vvmcx )
+            nvcpu->nv_vvmcxaddr = gpa;
+        if ( !nvcpu->nv_vvmcx ||
+             !map_io_bitmap_all(v) ||
+             !_map_msr_bitmap(v) )
+        {
+            vmreturn(regs, VMFAIL_VALID);
+            goto out;
+        }
     }
 
     if ( cpu_has_vmx_vmcs_shadowing )
@@ -1724,6 +1734,7 @@ int nvmx_handle_vmwrite(struct cpu_user_
     struct nestedvcpu *nvcpu = &vcpu_nestedhvm(v);
     unsigned long operand; 
     u64 vmcs_encoding;
+    bool_t okay = 1;
 
     if ( decode_vmx_inst(regs, &decode, &operand, 0)
              != X86EMUL_OKAY )
@@ -1732,16 +1743,21 @@ int nvmx_handle_vmwrite(struct cpu_user_
     vmcs_encoding = reg_read(regs, decode.reg2);
     __set_vvmcs(nvcpu->nv_vvmcx, vmcs_encoding, operand);
 
-    if ( vmcs_encoding == IO_BITMAP_A || vmcs_encoding == IO_BITMAP_A_HIGH )
-        __map_io_bitmap (v, IO_BITMAP_A);
-    else if ( vmcs_encoding == IO_BITMAP_B || 
-              vmcs_encoding == IO_BITMAP_B_HIGH )
-        __map_io_bitmap (v, IO_BITMAP_B);
+    switch ( vmcs_encoding )
+    {
+    case IO_BITMAP_A: case IO_BITMAP_A_HIGH:
+        okay = _map_io_bitmap(v, IO_BITMAP_A);
+        break;
+    case IO_BITMAP_B: case IO_BITMAP_B_HIGH:
+        okay = _map_io_bitmap(v, IO_BITMAP_B);
+        break;
+    case MSR_BITMAP: case MSR_BITMAP_HIGH:
+        okay = _map_msr_bitmap(v);
+        break;
+    }
 
-    if ( vmcs_encoding == MSR_BITMAP || vmcs_encoding == MSR_BITMAP_HIGH )
-        __map_msr_bitmap(v);
+    vmreturn(regs, okay ? VMSUCCEED : VMFAIL_VALID);
 
-    vmreturn(regs, VMSUCCEED);
     return X86EMUL_OKAY;
 }
 



[-- Attachment #2: nVMX-map-errors.patch --]
[-- Type: text/plain, Size: 4413 bytes --]

nested VMX: don't ignore mapping errors

Rather than ignoring failures to map the virtual VMCS as well as MSR or
I/O port bitmaps, convert those into failures of the respective
instructions (avoiding to dereference NULL pointers). Ultimately such
failures should be handled transparently (by using transient mappings
when they actually need to be accessed, just like nested SVM does).

Once at it, also eliminate a pointless (and incomplete) result check:
Non-global (i.e. non-permanent) map operations can't fail.

Signed-off-by: Jan Beulich <jbeulich@suse.com>
---
v2: Remove the change to nvmx_handle_vmclear() -
    hvm_map_guest_frame_*() can return NULL for reasons other than
    map_domain_page_global() doing so (pointed out by Andrew Cooper).
    That leaves the function broken though (it ought to handle the
    shared/paged out cases in an appropriate way).

--- a/xen/arch/x86/hvm/vmx/vvmx.c
+++ b/xen/arch/x86/hvm/vmx/vvmx.c
@@ -747,7 +747,7 @@ static void __clear_current_vvmcs(struct
         __vmpclear(virt_to_maddr(nvcpu->nv_n2vmcx));
 }
 
-static void __map_msr_bitmap(struct vcpu *v)
+static bool_t __must_check _map_msr_bitmap(struct vcpu *v)
 {
     struct nestedvmx *nvmx = &vcpu_2_nvmx(v);
     unsigned long gpa;
@@ -756,9 +756,11 @@ static void __map_msr_bitmap(struct vcpu
         hvm_unmap_guest_frame(nvmx->msrbitmap, 1);
     gpa = __get_vvmcs(vcpu_nestedhvm(v).nv_vvmcx, MSR_BITMAP);
     nvmx->msrbitmap = hvm_map_guest_frame_ro(gpa >> PAGE_SHIFT, 1);
+
+    return nvmx->msrbitmap != NULL;
 }
 
-static void __map_io_bitmap(struct vcpu *v, u64 vmcs_reg)
+static bool_t __must_check _map_io_bitmap(struct vcpu *v, u64 vmcs_reg)
 {
     struct nestedvmx *nvmx = &vcpu_2_nvmx(v);
     unsigned long gpa;
@@ -769,12 +771,14 @@ static void __map_io_bitmap(struct vcpu 
         hvm_unmap_guest_frame(nvmx->iobitmap[index], 1);
     gpa = __get_vvmcs(vcpu_nestedhvm(v).nv_vvmcx, vmcs_reg);
     nvmx->iobitmap[index] = hvm_map_guest_frame_ro(gpa >> PAGE_SHIFT, 1);
+
+    return nvmx->iobitmap[index] != NULL;
 }
 
-static inline void map_io_bitmap_all(struct vcpu *v)
+static inline bool_t __must_check map_io_bitmap_all(struct vcpu *v)
 {
-   __map_io_bitmap (v, IO_BITMAP_A);
-   __map_io_bitmap (v, IO_BITMAP_B);
+   return _map_io_bitmap(v, IO_BITMAP_A) &&
+          _map_io_bitmap(v, IO_BITMAP_B);
 }
 
 static void nvmx_purge_vvmcs(struct vcpu *v)
@@ -1610,9 +1614,15 @@ int nvmx_handle_vmptrld(struct cpu_user_
     if ( nvcpu->nv_vvmcxaddr == VMCX_EADDR )
     {
         nvcpu->nv_vvmcx = hvm_map_guest_frame_rw(gpa >> PAGE_SHIFT, 1);
-        nvcpu->nv_vvmcxaddr = gpa;
-        map_io_bitmap_all (v);
-        __map_msr_bitmap(v);
+        if ( nvcpu->nv_vvmcx )
+            nvcpu->nv_vvmcxaddr = gpa;
+        if ( !nvcpu->nv_vvmcx ||
+             !map_io_bitmap_all(v) ||
+             !_map_msr_bitmap(v) )
+        {
+            vmreturn(regs, VMFAIL_VALID);
+            goto out;
+        }
     }
 
     if ( cpu_has_vmx_vmcs_shadowing )
@@ -1724,6 +1734,7 @@ int nvmx_handle_vmwrite(struct cpu_user_
     struct nestedvcpu *nvcpu = &vcpu_nestedhvm(v);
     unsigned long operand; 
     u64 vmcs_encoding;
+    bool_t okay = 1;
 
     if ( decode_vmx_inst(regs, &decode, &operand, 0)
              != X86EMUL_OKAY )
@@ -1732,16 +1743,21 @@ int nvmx_handle_vmwrite(struct cpu_user_
     vmcs_encoding = reg_read(regs, decode.reg2);
     __set_vvmcs(nvcpu->nv_vvmcx, vmcs_encoding, operand);
 
-    if ( vmcs_encoding == IO_BITMAP_A || vmcs_encoding == IO_BITMAP_A_HIGH )
-        __map_io_bitmap (v, IO_BITMAP_A);
-    else if ( vmcs_encoding == IO_BITMAP_B || 
-              vmcs_encoding == IO_BITMAP_B_HIGH )
-        __map_io_bitmap (v, IO_BITMAP_B);
+    switch ( vmcs_encoding )
+    {
+    case IO_BITMAP_A: case IO_BITMAP_A_HIGH:
+        okay = _map_io_bitmap(v, IO_BITMAP_A);
+        break;
+    case IO_BITMAP_B: case IO_BITMAP_B_HIGH:
+        okay = _map_io_bitmap(v, IO_BITMAP_B);
+        break;
+    case MSR_BITMAP: case MSR_BITMAP_HIGH:
+        okay = _map_msr_bitmap(v);
+        break;
+    }
 
-    if ( vmcs_encoding == MSR_BITMAP || vmcs_encoding == MSR_BITMAP_HIGH )
-        __map_msr_bitmap(v);
+    vmreturn(regs, okay ? VMSUCCEED : VMFAIL_VALID);
 
-    vmreturn(regs, VMSUCCEED);
     return X86EMUL_OKAY;
 }
 

[-- Attachment #3: Type: text/plain, Size: 126 bytes --]

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH v2] nested VMX: don't ignore mapping errors
  2013-11-11 11:06   ` [PATCH v2] " Jan Beulich
@ 2013-11-11 11:13     ` Andrew Cooper
  2013-11-15 10:57     ` Ping: " Jan Beulich
  2013-11-18  1:57     ` Dong, Eddie
  2 siblings, 0 replies; 7+ messages in thread
From: Andrew Cooper @ 2013-11-11 11:13 UTC (permalink / raw)
  To: Jan Beulich; +Cc: xen-devel, Eddie Dong, Jun Nakajima

On 11/11/13 11:06, Jan Beulich wrote:
> Rather than ignoring failures to map the virtual VMCS as well as MSR or
> I/O port bitmaps, convert those into failures of the respective
> instructions (avoiding to dereference NULL pointers). Ultimately such
> failures should be handled transparently (by using transient mappings
> when they actually need to be accessed, just like nested SVM does).
>
> Once at it, also eliminate a pointless (and incomplete) result check:
> Non-global (i.e. non-permanent) map operations can't fail.
>
> Signed-off-by: Jan Beulich <jbeulich@suse.com>

Looks better.  It might be worth leaving a comment around
_map_{io,msr}_bitmap() indicating that support for shared/paged pages is
currently TODO.

I also think the commit message should indicate that shared/paged pages
wont work.  This fundamentally means that nested virt is currently
mutually exclusive with sharing/paging.

Either way, Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>

> ---
> v2: Remove the change to nvmx_handle_vmclear() -
>     hvm_map_guest_frame_*() can return NULL for reasons other than
>     map_domain_page_global() doing so (pointed out by Andrew Cooper).
>     That leaves the function broken though (it ought to handle the
>     shared/paged out cases in an appropriate way).
>
> --- a/xen/arch/x86/hvm/vmx/vvmx.c
> +++ b/xen/arch/x86/hvm/vmx/vvmx.c
> @@ -747,7 +747,7 @@ static void __clear_current_vvmcs(struct
>          __vmpclear(virt_to_maddr(nvcpu->nv_n2vmcx));
>  }
>  
> -static void __map_msr_bitmap(struct vcpu *v)
> +static bool_t __must_check _map_msr_bitmap(struct vcpu *v)
>  {
>      struct nestedvmx *nvmx = &vcpu_2_nvmx(v);
>      unsigned long gpa;
> @@ -756,9 +756,11 @@ static void __map_msr_bitmap(struct vcpu
>          hvm_unmap_guest_frame(nvmx->msrbitmap, 1);
>      gpa = __get_vvmcs(vcpu_nestedhvm(v).nv_vvmcx, MSR_BITMAP);
>      nvmx->msrbitmap = hvm_map_guest_frame_ro(gpa >> PAGE_SHIFT, 1);
> +
> +    return nvmx->msrbitmap != NULL;
>  }
>  
> -static void __map_io_bitmap(struct vcpu *v, u64 vmcs_reg)
> +static bool_t __must_check _map_io_bitmap(struct vcpu *v, u64 vmcs_reg)
>  {
>      struct nestedvmx *nvmx = &vcpu_2_nvmx(v);
>      unsigned long gpa;
> @@ -769,12 +771,14 @@ static void __map_io_bitmap(struct vcpu 
>          hvm_unmap_guest_frame(nvmx->iobitmap[index], 1);
>      gpa = __get_vvmcs(vcpu_nestedhvm(v).nv_vvmcx, vmcs_reg);
>      nvmx->iobitmap[index] = hvm_map_guest_frame_ro(gpa >> PAGE_SHIFT, 1);
> +
> +    return nvmx->iobitmap[index] != NULL;
>  }
>  
> -static inline void map_io_bitmap_all(struct vcpu *v)
> +static inline bool_t __must_check map_io_bitmap_all(struct vcpu *v)
>  {
> -   __map_io_bitmap (v, IO_BITMAP_A);
> -   __map_io_bitmap (v, IO_BITMAP_B);
> +   return _map_io_bitmap(v, IO_BITMAP_A) &&
> +          _map_io_bitmap(v, IO_BITMAP_B);
>  }
>  
>  static void nvmx_purge_vvmcs(struct vcpu *v)
> @@ -1610,9 +1614,15 @@ int nvmx_handle_vmptrld(struct cpu_user_
>      if ( nvcpu->nv_vvmcxaddr == VMCX_EADDR )
>      {
>          nvcpu->nv_vvmcx = hvm_map_guest_frame_rw(gpa >> PAGE_SHIFT, 1);
> -        nvcpu->nv_vvmcxaddr = gpa;
> -        map_io_bitmap_all (v);
> -        __map_msr_bitmap(v);
> +        if ( nvcpu->nv_vvmcx )
> +            nvcpu->nv_vvmcxaddr = gpa;
> +        if ( !nvcpu->nv_vvmcx ||
> +             !map_io_bitmap_all(v) ||
> +             !_map_msr_bitmap(v) )
> +        {
> +            vmreturn(regs, VMFAIL_VALID);
> +            goto out;
> +        }
>      }
>  
>      if ( cpu_has_vmx_vmcs_shadowing )
> @@ -1724,6 +1734,7 @@ int nvmx_handle_vmwrite(struct cpu_user_
>      struct nestedvcpu *nvcpu = &vcpu_nestedhvm(v);
>      unsigned long operand; 
>      u64 vmcs_encoding;
> +    bool_t okay = 1;
>  
>      if ( decode_vmx_inst(regs, &decode, &operand, 0)
>               != X86EMUL_OKAY )
> @@ -1732,16 +1743,21 @@ int nvmx_handle_vmwrite(struct cpu_user_
>      vmcs_encoding = reg_read(regs, decode.reg2);
>      __set_vvmcs(nvcpu->nv_vvmcx, vmcs_encoding, operand);
>  
> -    if ( vmcs_encoding == IO_BITMAP_A || vmcs_encoding == IO_BITMAP_A_HIGH )
> -        __map_io_bitmap (v, IO_BITMAP_A);
> -    else if ( vmcs_encoding == IO_BITMAP_B || 
> -              vmcs_encoding == IO_BITMAP_B_HIGH )
> -        __map_io_bitmap (v, IO_BITMAP_B);
> +    switch ( vmcs_encoding )
> +    {
> +    case IO_BITMAP_A: case IO_BITMAP_A_HIGH:
> +        okay = _map_io_bitmap(v, IO_BITMAP_A);
> +        break;
> +    case IO_BITMAP_B: case IO_BITMAP_B_HIGH:
> +        okay = _map_io_bitmap(v, IO_BITMAP_B);
> +        break;
> +    case MSR_BITMAP: case MSR_BITMAP_HIGH:
> +        okay = _map_msr_bitmap(v);
> +        break;
> +    }
>  
> -    if ( vmcs_encoding == MSR_BITMAP || vmcs_encoding == MSR_BITMAP_HIGH )
> -        __map_msr_bitmap(v);
> +    vmreturn(regs, okay ? VMSUCCEED : VMFAIL_VALID);
>  
> -    vmreturn(regs, VMSUCCEED);
>      return X86EMUL_OKAY;
>  }
>  
>
>

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Ping: [PATCH v2] nested VMX: don't ignore mapping errors
  2013-11-11 11:06   ` [PATCH v2] " Jan Beulich
  2013-11-11 11:13     ` Andrew Cooper
@ 2013-11-15 10:57     ` Jan Beulich
  2013-11-18  1:57     ` Dong, Eddie
  2 siblings, 0 replies; 7+ messages in thread
From: Jan Beulich @ 2013-11-15 10:57 UTC (permalink / raw)
  To: Donald D Dugger, Eddie Dong, Jun Nakajima; +Cc: Andrew Cooper, xen-devel

Still waiting for a response from the VMX maintainers...

Jan

>>> On 11.11.13 at 12:06, "Jan Beulich" <JBeulich@suse.com> wrote:
> Rather than ignoring failures to map the virtual VMCS as well as MSR or
> I/O port bitmaps, convert those into failures of the respective
> instructions (avoiding to dereference NULL pointers). Ultimately such
> failures should be handled transparently (by using transient mappings
> when they actually need to be accessed, just like nested SVM does).
> 
> Signed-off-by: Jan Beulich <jbeulich@suse.com>
> ---
> v2: Remove the change to nvmx_handle_vmclear() -
>     hvm_map_guest_frame_*() can return NULL for reasons other than
>     map_domain_page_global() doing so (pointed out by Andrew Cooper).
>     That leaves the function broken though (it ought to handle the
>     shared/paged out cases in an appropriate way).
> 
> --- a/xen/arch/x86/hvm/vmx/vvmx.c
> +++ b/xen/arch/x86/hvm/vmx/vvmx.c
> @@ -747,7 +747,7 @@ static void __clear_current_vvmcs(struct
>          __vmpclear(virt_to_maddr(nvcpu->nv_n2vmcx));
>  }
>  
> -static void __map_msr_bitmap(struct vcpu *v)
> +static bool_t __must_check _map_msr_bitmap(struct vcpu *v)
>  {
>      struct nestedvmx *nvmx = &vcpu_2_nvmx(v);
>      unsigned long gpa;
> @@ -756,9 +756,11 @@ static void __map_msr_bitmap(struct vcpu
>          hvm_unmap_guest_frame(nvmx->msrbitmap, 1);
>      gpa = __get_vvmcs(vcpu_nestedhvm(v).nv_vvmcx, MSR_BITMAP);
>      nvmx->msrbitmap = hvm_map_guest_frame_ro(gpa >> PAGE_SHIFT, 1);
> +
> +    return nvmx->msrbitmap != NULL;
>  }
>  
> -static void __map_io_bitmap(struct vcpu *v, u64 vmcs_reg)
> +static bool_t __must_check _map_io_bitmap(struct vcpu *v, u64 vmcs_reg)
>  {
>      struct nestedvmx *nvmx = &vcpu_2_nvmx(v);
>      unsigned long gpa;
> @@ -769,12 +771,14 @@ static void __map_io_bitmap(struct vcpu 
>          hvm_unmap_guest_frame(nvmx->iobitmap[index], 1);
>      gpa = __get_vvmcs(vcpu_nestedhvm(v).nv_vvmcx, vmcs_reg);
>      nvmx->iobitmap[index] = hvm_map_guest_frame_ro(gpa >> PAGE_SHIFT, 1);
> +
> +    return nvmx->iobitmap[index] != NULL;
>  }
>  
> -static inline void map_io_bitmap_all(struct vcpu *v)
> +static inline bool_t __must_check map_io_bitmap_all(struct vcpu *v)
>  {
> -   __map_io_bitmap (v, IO_BITMAP_A);
> -   __map_io_bitmap (v, IO_BITMAP_B);
> +   return _map_io_bitmap(v, IO_BITMAP_A) &&
> +          _map_io_bitmap(v, IO_BITMAP_B);
>  }
>  
>  static void nvmx_purge_vvmcs(struct vcpu *v)
> @@ -1610,9 +1614,15 @@ int nvmx_handle_vmptrld(struct cpu_user_
>      if ( nvcpu->nv_vvmcxaddr == VMCX_EADDR )
>      {
>          nvcpu->nv_vvmcx = hvm_map_guest_frame_rw(gpa >> PAGE_SHIFT, 1);
> -        nvcpu->nv_vvmcxaddr = gpa;
> -        map_io_bitmap_all (v);
> -        __map_msr_bitmap(v);
> +        if ( nvcpu->nv_vvmcx )
> +            nvcpu->nv_vvmcxaddr = gpa;
> +        if ( !nvcpu->nv_vvmcx ||
> +             !map_io_bitmap_all(v) ||
> +             !_map_msr_bitmap(v) )
> +        {
> +            vmreturn(regs, VMFAIL_VALID);
> +            goto out;
> +        }
>      }
>  
>      if ( cpu_has_vmx_vmcs_shadowing )
> @@ -1724,6 +1734,7 @@ int nvmx_handle_vmwrite(struct cpu_user_
>      struct nestedvcpu *nvcpu = &vcpu_nestedhvm(v);
>      unsigned long operand; 
>      u64 vmcs_encoding;
> +    bool_t okay = 1;
>  
>      if ( decode_vmx_inst(regs, &decode, &operand, 0)
>               != X86EMUL_OKAY )
> @@ -1732,16 +1743,21 @@ int nvmx_handle_vmwrite(struct cpu_user_
>      vmcs_encoding = reg_read(regs, decode.reg2);
>      __set_vvmcs(nvcpu->nv_vvmcx, vmcs_encoding, operand);
>  
> -    if ( vmcs_encoding == IO_BITMAP_A || vmcs_encoding == IO_BITMAP_A_HIGH )
> -        __map_io_bitmap (v, IO_BITMAP_A);
> -    else if ( vmcs_encoding == IO_BITMAP_B || 
> -              vmcs_encoding == IO_BITMAP_B_HIGH )
> -        __map_io_bitmap (v, IO_BITMAP_B);
> +    switch ( vmcs_encoding )
> +    {
> +    case IO_BITMAP_A: case IO_BITMAP_A_HIGH:
> +        okay = _map_io_bitmap(v, IO_BITMAP_A);
> +        break;
> +    case IO_BITMAP_B: case IO_BITMAP_B_HIGH:
> +        okay = _map_io_bitmap(v, IO_BITMAP_B);
> +        break;
> +    case MSR_BITMAP: case MSR_BITMAP_HIGH:
> +        okay = _map_msr_bitmap(v);
> +        break;
> +    }
>  
> -    if ( vmcs_encoding == MSR_BITMAP || vmcs_encoding == MSR_BITMAP_HIGH )
> -        __map_msr_bitmap(v);
> +    vmreturn(regs, okay ? VMSUCCEED : VMFAIL_VALID);
>  
> -    vmreturn(regs, VMSUCCEED);
>      return X86EMUL_OKAY;
>  }
>  

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH v2] nested VMX: don't ignore mapping errors
  2013-11-11 11:06   ` [PATCH v2] " Jan Beulich
  2013-11-11 11:13     ` Andrew Cooper
  2013-11-15 10:57     ` Ping: " Jan Beulich
@ 2013-11-18  1:57     ` Dong, Eddie
  2 siblings, 0 replies; 7+ messages in thread
From: Dong, Eddie @ 2013-11-18  1:57 UTC (permalink / raw)
  To: Jan Beulich, xen-devel; +Cc: Andrew Cooper, Dong, Eddie, Nakajima, Jun

Acked by: Eddie Dong <eddie.dong@intel.com>

-----Original Message-----
From: Jan Beulich [mailto:JBeulich@suse.com] 
Sent: Monday, November 11, 2013 7:07 PM
To: xen-devel
Cc: Andrew Cooper; Dong, Eddie; Nakajima, Jun
Subject: [PATCH v2] nested VMX: don't ignore mapping errors

Rather than ignoring failures to map the virtual VMCS as well as MSR or I/O port bitmaps, convert those into failures of the respective instructions (avoiding to dereference NULL pointers). Ultimately such failures should be handled transparently (by using transient mappings when they actually need to be accessed, just like nested SVM does).

Once at it, also eliminate a pointless (and incomplete) result check:
Non-global (i.e. non-permanent) map operations can't fail.

Signed-off-by: Jan Beulich <jbeulich@suse.com>
---
v2: Remove the change to nvmx_handle_vmclear() -
    hvm_map_guest_frame_*() can return NULL for reasons other than
    map_domain_page_global() doing so (pointed out by Andrew Cooper).
    That leaves the function broken though (it ought to handle the
    shared/paged out cases in an appropriate way).

--- a/xen/arch/x86/hvm/vmx/vvmx.c
+++ b/xen/arch/x86/hvm/vmx/vvmx.c
@@ -747,7 +747,7 @@ static void __clear_current_vvmcs(struct
         __vmpclear(virt_to_maddr(nvcpu->nv_n2vmcx));
 }
 
-static void __map_msr_bitmap(struct vcpu *v)
+static bool_t __must_check _map_msr_bitmap(struct vcpu *v)
 {
     struct nestedvmx *nvmx = &vcpu_2_nvmx(v);
     unsigned long gpa;
@@ -756,9 +756,11 @@ static void __map_msr_bitmap(struct vcpu
         hvm_unmap_guest_frame(nvmx->msrbitmap, 1);
     gpa = __get_vvmcs(vcpu_nestedhvm(v).nv_vvmcx, MSR_BITMAP);
     nvmx->msrbitmap = hvm_map_guest_frame_ro(gpa >> PAGE_SHIFT, 1);
+
+    return nvmx->msrbitmap != NULL;
 }
 
-static void __map_io_bitmap(struct vcpu *v, u64 vmcs_reg)
+static bool_t __must_check _map_io_bitmap(struct vcpu *v, u64 vmcs_reg)
 {
     struct nestedvmx *nvmx = &vcpu_2_nvmx(v);
     unsigned long gpa;
@@ -769,12 +771,14 @@ static void __map_io_bitmap(struct vcpu 
         hvm_unmap_guest_frame(nvmx->iobitmap[index], 1);
     gpa = __get_vvmcs(vcpu_nestedhvm(v).nv_vvmcx, vmcs_reg);
     nvmx->iobitmap[index] = hvm_map_guest_frame_ro(gpa >> PAGE_SHIFT, 1);
+
+    return nvmx->iobitmap[index] != NULL;
 }
 
-static inline void map_io_bitmap_all(struct vcpu *v)
+static inline bool_t __must_check map_io_bitmap_all(struct vcpu *v)
 {
-   __map_io_bitmap (v, IO_BITMAP_A);
-   __map_io_bitmap (v, IO_BITMAP_B);
+   return _map_io_bitmap(v, IO_BITMAP_A) &&
+          _map_io_bitmap(v, IO_BITMAP_B);
 }
 
 static void nvmx_purge_vvmcs(struct vcpu *v) @@ -1610,9 +1614,15 @@ int nvmx_handle_vmptrld(struct cpu_user_
     if ( nvcpu->nv_vvmcxaddr == VMCX_EADDR )
     {
         nvcpu->nv_vvmcx = hvm_map_guest_frame_rw(gpa >> PAGE_SHIFT, 1);
-        nvcpu->nv_vvmcxaddr = gpa;
-        map_io_bitmap_all (v);
-        __map_msr_bitmap(v);
+        if ( nvcpu->nv_vvmcx )
+            nvcpu->nv_vvmcxaddr = gpa;
+        if ( !nvcpu->nv_vvmcx ||
+             !map_io_bitmap_all(v) ||
+             !_map_msr_bitmap(v) )
+        {
+            vmreturn(regs, VMFAIL_VALID);
+            goto out;
+        }
     }
 
     if ( cpu_has_vmx_vmcs_shadowing )
@@ -1724,6 +1734,7 @@ int nvmx_handle_vmwrite(struct cpu_user_
     struct nestedvcpu *nvcpu = &vcpu_nestedhvm(v);
     unsigned long operand; 
     u64 vmcs_encoding;
+    bool_t okay = 1;
 
     if ( decode_vmx_inst(regs, &decode, &operand, 0)
              != X86EMUL_OKAY )
@@ -1732,16 +1743,21 @@ int nvmx_handle_vmwrite(struct cpu_user_
     vmcs_encoding = reg_read(regs, decode.reg2);
     __set_vvmcs(nvcpu->nv_vvmcx, vmcs_encoding, operand);
 
-    if ( vmcs_encoding == IO_BITMAP_A || vmcs_encoding == IO_BITMAP_A_HIGH )
-        __map_io_bitmap (v, IO_BITMAP_A);
-    else if ( vmcs_encoding == IO_BITMAP_B || 
-              vmcs_encoding == IO_BITMAP_B_HIGH )
-        __map_io_bitmap (v, IO_BITMAP_B);
+    switch ( vmcs_encoding )
+    {
+    case IO_BITMAP_A: case IO_BITMAP_A_HIGH:
+        okay = _map_io_bitmap(v, IO_BITMAP_A);
+        break;
+    case IO_BITMAP_B: case IO_BITMAP_B_HIGH:
+        okay = _map_io_bitmap(v, IO_BITMAP_B);
+        break;
+    case MSR_BITMAP: case MSR_BITMAP_HIGH:
+        okay = _map_msr_bitmap(v);
+        break;
+    }
 
-    if ( vmcs_encoding == MSR_BITMAP || vmcs_encoding == MSR_BITMAP_HIGH )
-        __map_msr_bitmap(v);
+    vmreturn(regs, okay ? VMSUCCEED : VMFAIL_VALID);
 
-    vmreturn(regs, VMSUCCEED);
     return X86EMUL_OKAY;
 }

^ permalink raw reply	[flat|nested] 7+ messages in thread

end of thread, other threads:[~2013-11-18  1:57 UTC | newest]

Thread overview: 7+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2013-11-11  8:27 [PATCH] nested VMX: don't ignore mapping errors Jan Beulich
2013-11-11 10:37 ` Andrew Cooper
2013-11-11 10:45   ` Jan Beulich
2013-11-11 11:06   ` [PATCH v2] " Jan Beulich
2013-11-11 11:13     ` Andrew Cooper
2013-11-15 10:57     ` Ping: " Jan Beulich
2013-11-18  1:57     ` Dong, Eddie

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).