xen-devel.lists.xenproject.org archive mirror
 help / color / mirror / Atom feed
From: Dongxiao Xu <dongxiao.xu@intel.com>
To: xen-devel@lists.xensource.com
Subject: [PATCH 3/3] nested vmx: fix CR0/CR4 emulation
Date: Thu, 27 Dec 2012 14:16:13 +0800	[thread overview]
Message-ID: <1356588973-13106-4-git-send-email-dongxiao.xu@intel.com> (raw)
In-Reply-To: <1356588973-13106-1-git-send-email-dongxiao.xu@intel.com>

While emulate CR0 and CR4 for nested virtualization, set the CR0/CR4
guest host mask to 0xffffffff in shadow VMCS, then calculate the
corresponding read shadow separately for CR0 and CR4. While getting
the VM exit for CR0/CR4 access, check if L1 VMM owns the bit. If so,
inject the VM exit to L1 VMM. Otherwise, L0 will handle it and sync
the value to L1 virtual VMCS.

Signed-off-by: Dongxiao Xu <dongxiao.xu@intel.com>
---
 xen/arch/x86/hvm/vmx/vvmx.c |  121 ++++++++++++++++++++++++++++++++++---------
 1 files changed, 96 insertions(+), 25 deletions(-)

diff --git a/xen/arch/x86/hvm/vmx/vvmx.c b/xen/arch/x86/hvm/vmx/vvmx.c
index 0bcea19..40edaf1 100644
--- a/xen/arch/x86/hvm/vmx/vvmx.c
+++ b/xen/arch/x86/hvm/vmx/vvmx.c
@@ -833,6 +833,7 @@ static void load_shadow_guest_state(struct vcpu *v)
     void *vvmcs = nvcpu->nv_vvmcx;
     int i;
     u32 control;
+    u64 cr_gh_mask, cr_read_shadow;
 
     /* vvmcs.gstate to shadow vmcs.gstate */
     for ( i = 0; i < ARRAY_SIZE(vmcs_gstate_field); i++ )
@@ -854,10 +855,20 @@ static void load_shadow_guest_state(struct vcpu *v)
     vvmcs_to_shadow(vvmcs, VM_ENTRY_EXCEPTION_ERROR_CODE);
     vvmcs_to_shadow(vvmcs, VM_ENTRY_INSTRUCTION_LEN);
 
-    vvmcs_to_shadow(vvmcs, CR0_READ_SHADOW);
-    vvmcs_to_shadow(vvmcs, CR4_READ_SHADOW);
-    vvmcs_to_shadow(vvmcs, CR0_GUEST_HOST_MASK);
-    vvmcs_to_shadow(vvmcs, CR4_GUEST_HOST_MASK);
+    /*
+     * While emulate CR0 and CR4 for nested virtualization, set the CR0/CR4
+     * guest host mask to 0xffffffff in shadow VMCS (follow the host L1 VMCS),
+     * then calculate the corresponding read shadow separately for CR0 and CR4.
+     */
+    cr_gh_mask = __get_vvmcs(vvmcs, CR0_GUEST_HOST_MASK);
+    cr_read_shadow = (__get_vvmcs(vvmcs, GUEST_CR0) & ~cr_gh_mask) |
+                          (__get_vvmcs(vvmcs, CR0_READ_SHADOW) & cr_gh_mask);
+    __vmwrite(CR0_READ_SHADOW, cr_read_shadow);
+
+    cr_gh_mask = __get_vvmcs(vvmcs, CR4_GUEST_HOST_MASK);
+    cr_read_shadow = (__get_vvmcs(vvmcs, GUEST_CR4) & ~cr_gh_mask) |
+                          (__get_vvmcs(vvmcs, CR4_READ_SHADOW) & cr_gh_mask);
+    __vmwrite(CR4_READ_SHADOW, cr_read_shadow);
 
     /* TODO: PDPTRs for nested ept */
     /* TODO: CR3 target control */
@@ -913,8 +924,6 @@ static void virtual_vmentry(struct cpu_user_regs *regs)
 static void sync_vvmcs_guest_state(struct vcpu *v, struct cpu_user_regs *regs)
 {
     int i;
-    unsigned long mask;
-    unsigned long cr;
     struct nestedvcpu *nvcpu = &vcpu_nestedhvm(v);
     void *vvmcs = nvcpu->nv_vvmcx;
 
@@ -925,23 +934,6 @@ static void sync_vvmcs_guest_state(struct vcpu *v, struct cpu_user_regs *regs)
     __set_vvmcs(vvmcs, GUEST_RIP, regs->eip);
     __set_vvmcs(vvmcs, GUEST_RSP, regs->esp);
 
-    /* SDM 20.6.6: L2 guest execution may change GUEST CR0/CR4 */
-    mask = __get_vvmcs(vvmcs, CR0_GUEST_HOST_MASK);
-    if ( ~mask )
-    {
-        cr = __get_vvmcs(vvmcs, GUEST_CR0);
-        cr = (cr & mask) | (__vmread(GUEST_CR0) & ~mask);
-        __set_vvmcs(vvmcs, GUEST_CR0, cr);
-    }
-
-    mask = __get_vvmcs(vvmcs, CR4_GUEST_HOST_MASK);
-    if ( ~mask )
-    {
-        cr = __get_vvmcs(vvmcs, GUEST_CR4);
-        cr = (cr & mask) | (__vmread(GUEST_CR4) & ~mask);
-        __set_vvmcs(vvmcs, GUEST_CR4, cr);
-    }
-
     /* CR3 sync if exec doesn't want cr3 load exiting: i.e. nested EPT */
     if ( !(__n2_exec_control(v) & CPU_BASED_CR3_LOAD_EXITING) )
         shadow_to_vvmcs(vvmcs, GUEST_CR3);
@@ -1745,8 +1737,87 @@ int nvmx_n2_vmexit_handler(struct cpu_user_regs *regs,
                 nvcpu->nv_vmexit_pending = 1;
         }
         else  /* CR0, CR4, CLTS, LMSW */
-            nvcpu->nv_vmexit_pending = 1;
-
+        {
+            /*
+             * While getting the VM exit for CR0/CR4 access, check if L1 VMM owns
+             * the bit.
+             * If so, inject the VM exit to L1 VMM.
+             * Otherwise, L0 will handle it and sync the value to L1 virtual VMCS.
+             */
+            unsigned long old_val, val, changed_bits;
+            switch ( VMX_CONTROL_REG_ACCESS_TYPE(exit_qualification) )
+            {
+            case VMX_CONTROL_REG_ACCESS_TYPE_MOV_TO_CR:
+            {
+                unsigned long gp = VMX_CONTROL_REG_ACCESS_GPR(exit_qualification);
+                unsigned long *reg;
+                if ( (reg = decode_register(gp, guest_cpu_user_regs(), 0)) == NULL )
+                {
+                    gdprintk(XENLOG_ERR, "invalid gpr: %lx\n", gp);
+                    break;
+                }
+                val = *reg;
+                if ( cr == 0 )
+                {
+                    u64 cr0_gh_mask = __get_vvmcs(nvcpu->nv_vvmcx, CR0_GUEST_HOST_MASK);
+                    old_val = __vmread(CR0_READ_SHADOW);
+                    changed_bits = old_val ^ val;
+                    if ( changed_bits & cr0_gh_mask )
+                        nvcpu->nv_vmexit_pending = 1;
+                    else
+                    {
+                        u64 guest_cr0 = __get_vvmcs(nvcpu->nv_vvmcx, GUEST_CR0);
+                        __set_vvmcs(nvcpu->nv_vvmcx, GUEST_CR0, (guest_cr0 & cr0_gh_mask) | (val & ~cr0_gh_mask));
+                    }
+                }
+                else if ( cr == 4 )
+                {
+                    u64 cr4_gh_mask = __get_vvmcs(nvcpu->nv_vvmcx, CR4_GUEST_HOST_MASK);
+                    old_val = __vmread(CR4_READ_SHADOW);
+                    changed_bits = old_val ^ val;
+                    if ( changed_bits & cr4_gh_mask )
+                        nvcpu->nv_vmexit_pending = 1;
+                    else
+                    {
+                        u64 guest_cr4 = __get_vvmcs(nvcpu->nv_vvmcx, GUEST_CR4);
+                        __set_vvmcs(nvcpu->nv_vvmcx, GUEST_CR4, (guest_cr4 & cr4_gh_mask) | (val & ~cr4_gh_mask));
+                    }
+                }
+                else
+                    nvcpu->nv_vmexit_pending = 1;
+                break;
+            }
+            case VMX_CONTROL_REG_ACCESS_TYPE_CLTS:
+            {
+                u64 cr0_gh_mask = __get_vvmcs(nvcpu->nv_vvmcx, CR0_GUEST_HOST_MASK);
+                if ( cr0_gh_mask & X86_CR0_TS )
+                    nvcpu->nv_vmexit_pending = 1;
+                else
+                {
+                    u64 guest_cr0 = __get_vvmcs(nvcpu->nv_vvmcx, GUEST_CR0);
+                    __set_vvmcs(nvcpu->nv_vvmcx, GUEST_CR0, (guest_cr0 & ~X86_CR0_TS));
+                }
+                break;
+            }
+            case VMX_CONTROL_REG_ACCESS_TYPE_LMSW:
+            {
+                u64 cr0_gh_mask = __get_vvmcs(nvcpu->nv_vvmcx, CR0_GUEST_HOST_MASK);
+                old_val = __vmread(CR0_READ_SHADOW) & 0xf;
+                val = (exit_qualification >> 16) & 0xf;
+                changed_bits = old_val ^ val;
+                if ( changed_bits & cr0_gh_mask )
+                    nvcpu->nv_vmexit_pending = 1;
+                else
+                {
+                    u64 guest_cr0 = __get_vvmcs(nvcpu->nv_vvmcx, GUEST_CR0);
+                    __set_vvmcs(nvcpu->nv_vvmcx, GUEST_CR0, (guest_cr0 & cr0_gh_mask) | (val & ~cr0_gh_mask));
+                }
+                break;
+            }
+            default:
+                break;
+            }
+        }
         break;
     }
     case EXIT_REASON_APIC_ACCESS:
-- 
1.7.1

  parent reply	other threads:[~2012-12-27  6:16 UTC|newest]

Thread overview: 6+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2012-12-27  6:16 [PATCH 0/3] nested vmx bug fixes Dongxiao Xu
2012-12-27  6:16 ` [PATCH 1/3] nested vmx: emulate IA32_VMX_MISC MSR Dongxiao Xu
2013-01-03 11:49   ` Jan Beulich
2012-12-27  6:16 ` [PATCH 2/3] nested vmx: synchronize page fault error code match and mask Dongxiao Xu
2012-12-27  6:16 ` Dongxiao Xu [this message]
2013-01-03 11:53 ` [PATCH 0/3] nested vmx bug fixes Jan Beulich

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1356588973-13106-4-git-send-email-dongxiao.xu@intel.com \
    --to=dongxiao.xu@intel.com \
    --cc=xen-devel@lists.xensource.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).