From: Xiantao Zhang <xiantao.zhang@intel.com>
To: xen-devel@lists.xen.org
Cc: keir@xen.org, jun.nakajima@intel.com, tim@xen.org,
eddie.dong@intel.com, JBeulich@suse.com,
Zhang Xiantao <xiantao.zhang@intel.com>
Subject: [PATCH v3 09/10] nVMX: virutalize VPID capability to nested VMM.
Date: Thu, 20 Dec 2012 23:43:50 +0800 [thread overview]
Message-ID: <1356018231-26440-10-git-send-email-xiantao.zhang@intel.com> (raw)
In-Reply-To: <1356018231-26440-1-git-send-email-xiantao.zhang@intel.com>
From: Zhang Xiantao <xiantao.zhang@intel.com>
Virtualize VPID for the nested vmm, use host's VPID
to emualte guest's VPID. For each virtual vmentry, if
guest'v vpid is changed, allocate a new host VPID for
L2 guest.
Signed-off-by: Zhang Xiantao <xiantao.zhang@intel.com>
Acked-by: Tim Deegan <tim@xen.org>
---
xen/arch/x86/hvm/vmx/vmx.c | 11 ++++++-
xen/arch/x86/hvm/vmx/vvmx.c | 56 ++++++++++++++++++++++++++++++++++-
xen/include/asm-x86/hvm/vmx/vvmx.h | 2 +
3 files changed, 65 insertions(+), 4 deletions(-)
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index 94cac17..0e479f8 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -2578,10 +2578,14 @@ void vmx_vmexit_handler(struct cpu_user_regs *regs)
update_guest_eip();
break;
+ case EXIT_REASON_INVVPID:
+ if ( nvmx_handle_invvpid(regs) == X86EMUL_OKAY )
+ update_guest_eip();
+ break;
+
case EXIT_REASON_MWAIT_INSTRUCTION:
case EXIT_REASON_MONITOR_INSTRUCTION:
case EXIT_REASON_GETSEC:
- case EXIT_REASON_INVVPID:
/*
* We should never exit on GETSEC because CR4.SMXE is always 0 when
* running in guest context, and the CPU checks that before getting
@@ -2699,8 +2703,11 @@ void vmx_vmenter_helper(void)
if ( !cpu_has_vmx_vpid )
goto out;
+ if ( nestedhvm_vcpu_in_guestmode(curr) )
+ p_asid = &vcpu_nestedhvm(curr).nv_n2asid;
+ else
+ p_asid = &curr->arch.hvm_vcpu.n1asid;
- p_asid = &curr->arch.hvm_vcpu.n1asid;
old_asid = p_asid->asid;
need_flush = hvm_asid_handle_vmenter(p_asid);
new_asid = p_asid->asid;
diff --git a/xen/arch/x86/hvm/vmx/vvmx.c b/xen/arch/x86/hvm/vmx/vvmx.c
index 8346387..0e1a5ee 100644
--- a/xen/arch/x86/hvm/vmx/vvmx.c
+++ b/xen/arch/x86/hvm/vmx/vvmx.c
@@ -42,6 +42,7 @@ int nvmx_vcpu_initialise(struct vcpu *v)
goto out;
}
nvmx->ept.enabled = 0;
+ nvmx->guest_vpid = 0;
nvmx->vmxon_region_pa = 0;
nvcpu->nv_vvmcx = NULL;
nvcpu->nv_vvmcxaddr = VMCX_EADDR;
@@ -848,6 +849,16 @@ static uint64_t get_shadow_eptp(struct vcpu *v)
return ept_get_eptp(ept);
}
+static bool_t nvmx_vpid_enabled(struct nestedvcpu *nvcpu)
+{
+ uint32_t second_cntl;
+
+ second_cntl = __get_vvmcs(nvcpu->nv_vvmcx, SECONDARY_VM_EXEC_CONTROL);
+ if ( second_cntl & SECONDARY_EXEC_ENABLE_VPID )
+ return 1;
+ return 0;
+}
+
static void virtual_vmentry(struct cpu_user_regs *regs)
{
struct vcpu *v = current;
@@ -896,6 +907,18 @@ static void virtual_vmentry(struct cpu_user_regs *regs)
if ( nestedhvm_paging_mode_hap(v) )
__vmwrite(EPT_POINTER, get_shadow_eptp(v));
+ /* nested VPID support! */
+ if ( cpu_has_vmx_vpid && nvmx_vpid_enabled(nvcpu) )
+ {
+ struct nestedvmx *nvmx = &vcpu_2_nvmx(v);
+ uint32_t new_vpid = __get_vvmcs(vvmcs, VIRTUAL_PROCESSOR_ID);
+ if ( nvmx->guest_vpid != new_vpid )
+ {
+ hvm_asid_flush_vcpu_asid(&vcpu_nestedhvm(v).nv_n2asid);
+ nvmx->guest_vpid = new_vpid;
+ }
+ }
+
}
static void sync_vvmcs_guest_state(struct vcpu *v, struct cpu_user_regs *regs)
@@ -1187,7 +1210,7 @@ int nvmx_handle_vmlaunch(struct cpu_user_regs *regs)
if ( vcpu_nestedhvm(v).nv_vvmcxaddr == VMCX_EADDR )
{
vmreturn (regs, VMFAIL_INVALID);
- return X86EMUL_OKAY;
+ return X86EMUL_OKAY;
}
launched = __get_vvmcs(vcpu_nestedhvm(v).nv_vvmcx,
@@ -1370,7 +1393,6 @@ int nvmx_handle_invept(struct cpu_user_regs *regs)
return X86EMUL_EXCEPTION;
inv_type = reg_read(regs, decode.reg2);
- gdprintk(XENLOG_DEBUG,"inv_type:%ld, eptp:%lx\n", inv_type, eptp);
switch ( inv_type ) {
case INVEPT_SINGLE_CONTEXT:
@@ -1402,6 +1424,36 @@ int nvmx_handle_invept(struct cpu_user_regs *regs)
(((__emul_value(enable1, default1) & host_value) & (~0ul << 32)) | \
((uint32_t)(__emul_value(enable1, default1) | host_value)))
+int nvmx_handle_invvpid(struct cpu_user_regs *regs)
+{
+ struct vmx_inst_decoded decode;
+ unsigned long vpid;
+ u64 inv_type;
+
+ if ( !cpu_has_vmx_vpid )
+ return X86EMUL_EXCEPTION;
+
+ if ( decode_vmx_inst(regs, &decode, &vpid, 0) != X86EMUL_OKAY )
+ return X86EMUL_EXCEPTION;
+
+ inv_type = reg_read(regs, decode.reg2);
+ gdprintk(XENLOG_DEBUG,"inv_type:%ld, vpid:%lx\n", inv_type, vpid);
+
+ switch ( inv_type ) {
+ /* Just invalidate all tlb entries for all types! */
+ case INVVPID_INDIVIDUAL_ADDR:
+ case INVVPID_SINGLE_CONTEXT:
+ case INVVPID_ALL_CONTEXT:
+ hvm_asid_flush_vcpu_asid(&vcpu_nestedhvm(current).nv_n2asid);
+ break;
+ default:
+ return X86EMUL_EXCEPTION;
+ }
+ vmreturn(regs, VMSUCCEED);
+
+ return X86EMUL_OKAY;
+}
+
/*
* Capability reporting
*/
diff --git a/xen/include/asm-x86/hvm/vmx/vvmx.h b/xen/include/asm-x86/hvm/vmx/vvmx.h
index 03ab987..af702c4 100644
--- a/xen/include/asm-x86/hvm/vmx/vvmx.h
+++ b/xen/include/asm-x86/hvm/vmx/vvmx.h
@@ -37,6 +37,7 @@ struct nestedvmx {
uint32_t exit_reason;
uint32_t exit_qual;
} ept;
+ uint32_t guest_vpid;
};
#define vcpu_2_nvmx(v) (vcpu_nestedhvm(v).u.nvmx)
@@ -192,6 +193,7 @@ int nvmx_handle_vmwrite(struct cpu_user_regs *regs);
int nvmx_handle_vmresume(struct cpu_user_regs *regs);
int nvmx_handle_vmlaunch(struct cpu_user_regs *regs);
int nvmx_handle_invept(struct cpu_user_regs *regs);
+int nvmx_handle_invvpid(struct cpu_user_regs *regs);
int nvmx_msr_read_intercept(unsigned int msr,
u64 *msr_content);
int nvmx_msr_write_intercept(unsigned int msr,
--
1.7.1
next prev parent reply other threads:[~2012-12-20 15:43 UTC|newest]
Thread overview: 24+ messages / expand[flat|nested] mbox.gz Atom feed top
2012-12-20 15:43 [PATCH v3 00/10] Nested VMX: Add virtual EPT & VPID support to L1 VMM Xiantao Zhang
2012-12-20 13:55 ` Tim Deegan
2012-12-21 1:27 ` Zhang, Xiantao
2012-12-20 15:43 ` [PATCH v3 01/10] nestedhap: Change hostcr3 and p2m->cr3 to meaningful words Xiantao Zhang
2012-12-20 12:11 ` Tim Deegan
2012-12-20 15:43 ` [PATCH v3 02/10] nestedhap: Change nested p2m's walker to vendor-specific Xiantao Zhang
2012-12-20 15:43 ` [PATCH v3 03/10] nested_ept: Implement guest ept's walker Xiantao Zhang
2012-12-20 12:51 ` Tim Deegan
2012-12-24 9:01 ` Zhang, Xiantao
2013-01-10 11:19 ` Tim Deegan
2012-12-20 15:43 ` [PATCH v3 04/10] EPT: Make ept data structure or operations neutral Xiantao Zhang
2012-12-20 13:01 ` Tim Deegan
2012-12-20 15:43 ` [PATCH v3 05/10] nEPT: Try to enable EPT paging for L2 guest Xiantao Zhang
2012-12-20 15:43 ` [PATCH v3 06/10] nEPT: Sync PDPTR fields if L2 guest in PAE paging mode Xiantao Zhang
2012-12-20 9:39 ` Jan Beulich
2012-12-20 12:18 ` Tim Deegan
2012-12-20 15:43 ` [PATCH v3 07/10] nEPT: Use minimal permission for nested p2m Xiantao Zhang
2012-12-20 13:10 ` Tim Deegan
2012-12-20 15:43 ` [PATCH v3 08/10] nEPT: handle invept instruction from L1 VMM Xiantao Zhang
2012-12-20 9:54 ` Jan Beulich
2012-12-21 1:14 ` Zhang, Xiantao
2012-12-20 15:43 ` Xiantao Zhang [this message]
2012-12-20 9:56 ` [PATCH v3 09/10] nVMX: virutalize VPID capability to nested VMM Jan Beulich
2012-12-20 15:43 ` [PATCH v3 10/10] nEPT: expost EPT & VPID capablities to L1 VMM Xiantao Zhang
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1356018231-26440-10-git-send-email-xiantao.zhang@intel.com \
--to=xiantao.zhang@intel.com \
--cc=JBeulich@suse.com \
--cc=eddie.dong@intel.com \
--cc=jun.nakajima@intel.com \
--cc=keir@xen.org \
--cc=tim@xen.org \
--cc=xen-devel@lists.xen.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).