xen-devel.lists.xenproject.org archive mirror
 help / color / mirror / Atom feed
From: Xiantao Zhang <xiantao.zhang@intel.com>
To: xen-devel@lists.xen.org
Cc: keir@xen.org, jun.nakajima@intel.com, tim@xen.org,
	eddie.dong@intel.com, JBeulich@suse.com,
	Zhang Xiantao <xiantao.zhang@intel.com>
Subject: [PATCH v5 09/10] nVMX: virutalize VPID capability to nested VMM.
Date: Wed,  9 Jan 2013 12:16:23 +0800	[thread overview]
Message-ID: <1357704984-11614-10-git-send-email-xiantao.zhang@intel.com> (raw)
In-Reply-To: <1357704984-11614-1-git-send-email-xiantao.zhang@intel.com>

From: Zhang Xiantao <xiantao.zhang@intel.com>

Virtualize VPID for the nested vmm, use host's VPID
to emualte guest's VPID. For each virtual vmentry, if
guest'v vpid is changed, allocate a new host VPID for
L2 guest.

Signed-off-by: Zhang Xiantao <xiantao.zhang@intel.com>
Acked-by: Tim Deegan <tim@xen.org>
---
 xen/arch/x86/hvm/vmx/vmx.c         |   11 ++++++-
 xen/arch/x86/hvm/vmx/vvmx.c        |   51 +++++++++++++++++++++++++++++++++++-
 xen/include/asm-x86/hvm/vmx/vvmx.h |    2 +
 3 files changed, 61 insertions(+), 3 deletions(-)

diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index 60d7955..9e13ec6 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -2584,10 +2584,14 @@ void vmx_vmexit_handler(struct cpu_user_regs *regs)
             update_guest_eip();
         break;
 
+    case EXIT_REASON_INVVPID:
+        if ( nvmx_handle_invvpid(regs) == X86EMUL_OKAY )
+            update_guest_eip();
+        break;
+
     case EXIT_REASON_MWAIT_INSTRUCTION:
     case EXIT_REASON_MONITOR_INSTRUCTION:
     case EXIT_REASON_GETSEC:
-    case EXIT_REASON_INVVPID:
         /*
          * We should never exit on GETSEC because CR4.SMXE is always 0 when
          * running in guest context, and the CPU checks that before getting
@@ -2705,8 +2709,11 @@ void vmx_vmenter_helper(void)
 
     if ( !cpu_has_vmx_vpid )
         goto out;
+    if ( nestedhvm_vcpu_in_guestmode(curr) )
+        p_asid = &vcpu_nestedhvm(curr).nv_n2asid;
+    else
+        p_asid = &curr->arch.hvm_vcpu.n1asid;
 
-    p_asid = &curr->arch.hvm_vcpu.n1asid;
     old_asid = p_asid->asid;
     need_flush = hvm_asid_handle_vmenter(p_asid);
     new_asid = p_asid->asid;
diff --git a/xen/arch/x86/hvm/vmx/vvmx.c b/xen/arch/x86/hvm/vmx/vvmx.c
index 04d54b9..2427fd4 100644
--- a/xen/arch/x86/hvm/vmx/vvmx.c
+++ b/xen/arch/x86/hvm/vmx/vvmx.c
@@ -42,6 +42,7 @@ int nvmx_vcpu_initialise(struct vcpu *v)
 	goto out;
     }
     nvmx->ept.enabled = 0;
+    nvmx->guest_vpid = 0;
     nvmx->vmxon_region_pa = 0;
     nvcpu->nv_vvmcx = NULL;
     nvcpu->nv_vvmcxaddr = VMCX_EADDR;
@@ -882,6 +883,16 @@ uint64_t get_shadow_eptp(struct vcpu *v)
     return ept_get_eptp(ept);
 }
 
+static bool_t nvmx_vpid_enabled(struct nestedvcpu *nvcpu)
+{
+    uint32_t second_cntl;
+
+    second_cntl = __get_vvmcs(nvcpu->nv_vvmcx, SECONDARY_VM_EXEC_CONTROL);
+    if ( second_cntl & SECONDARY_EXEC_ENABLE_VPID )
+        return 1;
+    return 0;
+}
+
 static void virtual_vmentry(struct cpu_user_regs *regs)
 {
     struct vcpu *v = current;
@@ -930,6 +941,18 @@ static void virtual_vmentry(struct cpu_user_regs *regs)
     if ( nestedhvm_paging_mode_hap(v) )
         __vmwrite(EPT_POINTER, get_shadow_eptp(v));
 
+    /* nested VPID support! */
+    if ( cpu_has_vmx_vpid && nvmx_vpid_enabled(nvcpu) )
+    {
+        struct nestedvmx *nvmx = &vcpu_2_nvmx(v);
+        uint32_t new_vpid =  __get_vvmcs(vvmcs, VIRTUAL_PROCESSOR_ID);
+        if ( nvmx->guest_vpid != new_vpid )
+        {
+            hvm_asid_flush_vcpu_asid(&vcpu_nestedhvm(v).nv_n2asid);
+            nvmx->guest_vpid = new_vpid;
+        }
+    }
+
 }
 
 static void sync_vvmcs_guest_state(struct vcpu *v, struct cpu_user_regs *regs)
@@ -1221,7 +1244,7 @@ int nvmx_handle_vmlaunch(struct cpu_user_regs *regs)
     if ( vcpu_nestedhvm(v).nv_vvmcxaddr == VMCX_EADDR )
     {
         vmreturn (regs, VMFAIL_INVALID);
-        return X86EMUL_OKAY;        
+        return X86EMUL_OKAY;
     }
 
     launched = __get_vvmcs(vcpu_nestedhvm(v).nv_vvmcx,
@@ -1423,6 +1446,32 @@ int nvmx_handle_invept(struct cpu_user_regs *regs)
     return X86EMUL_OKAY;
 }
 
+int nvmx_handle_invvpid(struct cpu_user_regs *regs)
+{
+    struct vmx_inst_decoded decode;
+    unsigned long vpid;
+    int ret;
+
+    if ( (ret = decode_vmx_inst(regs, &decode, &vpid, 0)) != X86EMUL_OKAY )
+        return ret;
+
+
+    switch ( reg_read(regs, decode.reg2) )
+    {
+    /* Just invalidate all tlb entries for all types! */
+    case INVVPID_INDIVIDUAL_ADDR:
+    case INVVPID_SINGLE_CONTEXT:
+    case INVVPID_ALL_CONTEXT:
+        hvm_asid_flush_vcpu_asid(&vcpu_nestedhvm(current).nv_n2asid);
+        break;
+    default:
+        vmreturn(regs, VMFAIL_INVALID);
+        return X86EMUL_OKAY;
+    }
+
+    vmreturn(regs, VMSUCCEED);
+    return X86EMUL_OKAY;
+}
 
 #define __emul_value(enable1, default1) \
     ((enable1 | default1) << 32 | (default1))
diff --git a/xen/include/asm-x86/hvm/vmx/vvmx.h b/xen/include/asm-x86/hvm/vmx/vvmx.h
index aeac01a..f5fcad0 100644
--- a/xen/include/asm-x86/hvm/vmx/vvmx.h
+++ b/xen/include/asm-x86/hvm/vmx/vvmx.h
@@ -37,6 +37,7 @@ struct nestedvmx {
         uint32_t exit_reason;
         uint32_t exit_qual;
     } ept;
+    uint32_t guest_vpid;
 };
 
 #define vcpu_2_nvmx(v)	(vcpu_nestedhvm(v).u.nvmx)
@@ -192,6 +193,7 @@ int nvmx_handle_vmwrite(struct cpu_user_regs *regs);
 int nvmx_handle_vmresume(struct cpu_user_regs *regs);
 int nvmx_handle_vmlaunch(struct cpu_user_regs *regs);
 int nvmx_handle_invept(struct cpu_user_regs *regs);
+int nvmx_handle_invvpid(struct cpu_user_regs *regs);
 int nvmx_msr_read_intercept(unsigned int msr,
                                 u64 *msr_content);
 int nvmx_msr_write_intercept(unsigned int msr,
-- 
1.7.1

  parent reply	other threads:[~2013-01-09  4:16 UTC|newest]

Thread overview: 15+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2013-01-09  4:16 [PATCH v5 00/10] Nested VMX: Add virtual EPT & VPID support to L1 VMM Xiantao Zhang
2013-01-09  4:16 ` [PATCH v5 01/10] nestedhap: Change hostcr3 and p2m->cr3 to meaningful words Xiantao Zhang
2013-01-09  4:16 ` [PATCH v5 02/10] nestedhap: Change nested p2m's walker to vendor-specific Xiantao Zhang
2013-01-09  4:16 ` [PATCH v5 03/10] nested_ept: Implement guest ept's walker Xiantao Zhang
2013-01-09  4:16 ` [PATCH v5 04/10] EPT: Make ept data structure or operations neutral Xiantao Zhang
2013-01-09  4:16 ` [PATCH v5 05/10] nEPT: Try to enable EPT paging for L2 guest Xiantao Zhang
2013-01-09  4:16 ` [PATCH v5 06/10] nEPT: Sync PDPTR fields if L2 guest in PAE paging mode Xiantao Zhang
2013-01-09  4:16 ` [PATCH v5 07/10] nEPT: Use minimal permission for nested p2m Xiantao Zhang
2013-01-09  4:16 ` [PATCH v5 08/10] nEPT: handle invept instruction from L1 VMM Xiantao Zhang
2013-01-09  4:16 ` Xiantao Zhang [this message]
2013-01-09  4:16 ` [PATCH v5 10/10] nEPT: Expose EPT & VPID capablities to " Xiantao Zhang
2013-01-09 11:38 ` [PATCH v5 00/10] Nested VMX: Add virtual EPT & VPID support " Jan Beulich
2013-01-10 12:46   ` Tim Deegan
2013-01-10 13:55     ` Nakajima, Jun
2013-01-11  0:55       ` Dong, Eddie

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1357704984-11614-10-git-send-email-xiantao.zhang@intel.com \
    --to=xiantao.zhang@intel.com \
    --cc=JBeulich@suse.com \
    --cc=eddie.dong@intel.com \
    --cc=jun.nakajima@intel.com \
    --cc=keir@xen.org \
    --cc=tim@xen.org \
    --cc=xen-devel@lists.xen.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).