From: Christoph Egger <Christoph.Egger@amd.com>
To: "xen-devel@lists.xensource.com" <xen-devel@lists.xensource.com>
Subject: [PATCH] nestedhvm: ASID emulation
Date: Wed, 13 Apr 2011 10:57:55 +0200 [thread overview]
Message-ID: <4DA56593.1060300@amd.com> (raw)
[-- Attachment #1: Type: text/plain, Size: 409 bytes --]
Implement ASID emulation.
This allows the l1 guest to run the l2 guest using hw ASID.
Signed-off-by: Christoph Egger <Christoph.Egger@amd.com>
--
---to satisfy European Law for business letters:
Advanced Micro Devices GmbH
Einsteinring 24, 85689 Dornach b. Muenchen
Geschaeftsfuehrer: Alberto Bozzo, Andrew Bowd
Sitz: Dornach, Gemeinde Aschheim, Landkreis Muenchen
Registergericht Muenchen, HRB Nr. 43632
[-- Attachment #2: xen_nh_asid.diff --]
[-- Type: text/plain, Size: 11097 bytes --]
diff -r 40c33d1a9c21 -r a93873d200a6 xen/arch/x86/hvm/asid.c
--- a/xen/arch/x86/hvm/asid.c
+++ b/xen/arch/x86/hvm/asid.c
@@ -48,9 +48,9 @@
/* Per-CPU ASID management. */
struct hvm_asid_data {
- u64 core_asid_generation;
- u32 next_asid;
- u32 max_asid;
+ uint64_t core_asid_generation;
+ uint32_t next_asid;
+ uint32_t max_asid;
bool_t disabled;
};
@@ -58,7 +58,7 @@ static DEFINE_PER_CPU(struct hvm_asid_da
void hvm_asid_init(int nasids)
{
- static s8 g_disabled = -1;
+ static int8_t g_disabled = -1;
struct hvm_asid_data *data = &this_cpu(hvm_asid_data);
data->max_asid = nasids - 1;
@@ -102,10 +102,14 @@ void hvm_asid_flush_core(void)
data->disabled = 1;
}
-bool_t hvm_asid_handle_vmenter(void)
+bool_t hvm_asid_handle_vmenter(bool_t run_n2guest)
{
- struct vcpu *curr = current;
+ int need_flush = 0;
+ struct vcpu *v = current;
struct hvm_asid_data *data = &this_cpu(hvm_asid_data);
+ struct nestedvcpu *nv;
+
+ nv = &vcpu_nestedhvm(v);
/* On erratum #170 systems we must flush the TLB.
* Generation overruns are taken here, too. */
@@ -113,30 +117,72 @@ bool_t hvm_asid_handle_vmenter(void)
goto disabled;
/* Test if VCPU has valid ASID. */
- if ( curr->arch.hvm_vcpu.asid_generation == data->core_asid_generation )
- return 0;
-
- /* If there are no free ASIDs, need to go to a new generation */
- if ( unlikely(data->next_asid > data->max_asid) )
- {
- hvm_asid_flush_core();
- data->next_asid = 1;
- if ( data->disabled )
- goto disabled;
+ if ( v->arch.hvm_vcpu.asid_generation == data->core_asid_generation ) {
+ if ( run_n2guest ) {
+ if ( !nv->nv_new_vasid && data->next_asid > nv->nv_n2asid ) {
+ /* l1 guest doesn't request a new asid */
+ /* When asid generation changed last time when we were
+ * were going to run l1 guest then
+ * next_asid <= nv->nv_n2asid.
+ */
+ ASSERT(nv->nv_n2asid != 0);
+ ASSERT(nv->nv_n1asid != nv->nv_n2asid);
+ v->arch.hvm_vcpu.asid = nv->nv_n2asid;
+ return 0;
+ }
+ } else if ( data->next_asid > nv->nv_n1asid ) {
+ /* When asid generation changed last time when we were going to
+ * run l2 guest then next_asid <= nv->nv_n1asid.
+ */
+ ASSERT(nv->nv_n1asid != 0);
+ ASSERT(nv->nv_n1asid != nv->nv_n2asid);
+ v->arch.hvm_vcpu.asid = nv->nv_n1asid;
+ return 0;
+ }
}
- /* Now guaranteed to be a free ASID. */
- curr->arch.hvm_vcpu.asid = data->next_asid++;
- curr->arch.hvm_vcpu.asid_generation = data->core_asid_generation;
+ do {
+ /* If there are no free ASIDs, need to go to a new generation */
+ if ( unlikely(data->next_asid > data->max_asid) )
+ {
+ hvm_asid_flush_core();
+ data->next_asid = 1;
+ if ( data->disabled )
+ goto disabled;
+ }
- /*
- * When we assign ASID 1, flush all TLB entries as we are starting a new
- * generation, and all old ASID allocations are now stale.
- */
- return (curr->arch.hvm_vcpu.asid == 1);
+ if ( data->next_asid == 1 ) {
+ /* We start a new generation, so all old ASID allocations are
+ * stale now. Ensure we flush the TLB also in case of another
+ * iteration.
+ */
+ need_flush = 1;
+ }
+
+ /* Now guaranteed to be a free ASID. */
+ if ( run_n2guest )
+ /* nv_n1asid might have an asid from an old generation.
+ * We handle this on next vmenter.
+ */
+ nv->nv_n2asid = data->next_asid++;
+ else
+ /* nv_n2asid might have an asid from an old generation.
+ * We handle this on next vmenter.
+ */
+ nv->nv_n1asid = data->next_asid++;
+
+ /* Make sure an asid isn't used twice */
+ } while (nv->nv_n2asid == nv->nv_n1asid);
+
+ ASSERT(nv->nv_n1asid != 0);
+
+ v->arch.hvm_vcpu.asid = (run_n2guest) ? nv->nv_n2asid : nv->nv_n1asid;
+ v->arch.hvm_vcpu.asid_generation = data->core_asid_generation;
+
+ return need_flush;
disabled:
- curr->arch.hvm_vcpu.asid = 0;
+ v->arch.hvm_vcpu.asid = 0;
return 0;
}
diff -r 40c33d1a9c21 -r a93873d200a6 xen/arch/x86/hvm/nestedhvm.c
--- a/xen/arch/x86/hvm/nestedhvm.c
+++ b/xen/arch/x86/hvm/nestedhvm.c
@@ -61,6 +61,9 @@ nestedhvm_vcpu_reset(struct vcpu *v)
nv->nv_vvmcxaddr = VMCX_EADDR;
nv->nv_flushp2m = 0;
nv->nv_p2m = NULL;
+ nv->nv_new_vasid = 0;
+ nv->nv_n1asid = 0;
+ nv->nv_n2asid = 0;
if ( hvm_funcs.nhvm_vcpu_reset )
hvm_funcs.nhvm_vcpu_reset(v);
diff -r 40c33d1a9c21 -r a93873d200a6 xen/arch/x86/hvm/svm/asid.c
--- a/xen/arch/x86/hvm/svm/asid.c
+++ b/xen/arch/x86/hvm/svm/asid.c
@@ -22,6 +22,7 @@
#include <xen/perfc.h>
#include <asm/hvm/svm/asid.h>
#include <asm/amd.h>
+#include <asm/hvm/nestedhvm.h>
void svm_asid_init(struct cpuinfo_x86 *c)
{
@@ -42,7 +43,13 @@ asmlinkage void svm_asid_handle_vmrun(vo
{
struct vcpu *curr = current;
struct vmcb_struct *vmcb = curr->arch.hvm_svm.vmcb;
- bool_t need_flush = hvm_asid_handle_vmenter();
+ bool_t need_flush;
+ bool_t vcpu_guestmode = 0;
+
+ if ( nestedhvm_enabled(curr->domain) && nestedhvm_vcpu_in_guestmode(curr) )
+ vcpu_guestmode = 1;
+
+ need_flush = hvm_asid_handle_vmenter(vcpu_guestmode);
/* ASID 0 indicates that ASIDs are disabled. */
if ( curr->arch.hvm_vcpu.asid == 0 )
diff -r 40c33d1a9c21 -r a93873d200a6 xen/arch/x86/hvm/svm/nestedsvm.c
--- a/xen/arch/x86/hvm/svm/nestedsvm.c
+++ b/xen/arch/x86/hvm/svm/nestedsvm.c
@@ -261,8 +261,6 @@ int nsvm_vcpu_hostrestore(struct vcpu *v
/* Cleanbits */
n1vmcb->cleanbits.bytes = 0;
- hvm_asid_flush_vcpu(v);
-
return 0;
}
@@ -408,9 +406,7 @@ static int nsvm_vmcb_prepare4vmrun(struc
if (rc)
return rc;
- /* ASID */
- hvm_asid_flush_vcpu(v);
- /* n2vmcb->_guest_asid = ns_vmcb->_guest_asid; */
+ /* ASID - Emulation handled in hvm_asid_handle_vmenter() */
/* TLB control */
n2vmcb->tlb_control = n1vmcb->tlb_control | ns_vmcb->tlb_control;
@@ -605,8 +601,8 @@ nsvm_vcpu_vmentry(struct vcpu *v, struct
svm->ns_vmcb_guestcr3 = ns_vmcb->_cr3;
svm->ns_vmcb_hostcr3 = ns_vmcb->_h_cr3;
- nv->nv_flushp2m = (ns_vmcb->tlb_control
- || (svm->ns_guest_asid != ns_vmcb->_guest_asid));
+ nv->nv_new_vasid = (svm->ns_guest_asid != ns_vmcb->_guest_asid);
+ nv->nv_flushp2m = (ns_vmcb->tlb_control || nv->nv_new_vasid);
svm->ns_guest_asid = ns_vmcb->_guest_asid;
/* nested paging for the guest */
diff -r 40c33d1a9c21 -r a93873d200a6 xen/arch/x86/hvm/svm/svm.c
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -1580,6 +1580,17 @@ static void svm_vmexit_do_invalidate_cac
__update_guest_eip(regs, inst_len);
}
+static void svm_invlpga_intercept(struct vcpu *v,
+ unsigned long vaddr, uint32_t asid)
+{
+ struct nestedvcpu *nv = &vcpu_nestedhvm(v);
+ if (asid == 0)
+ asid = nv->nv_n1asid; /* remap to l1 guest asid */
+ else
+ asid = nv->nv_n2asid; /* remap to l2 guest asid */
+ svm_invlpga(vaddr, asid);
+}
+
static void svm_invlpg_intercept(unsigned long vaddr)
{
struct vcpu *curr = current;
@@ -1892,10 +1903,12 @@ asmlinkage void svm_vmexit_handler(struc
case VMEXIT_CR0_READ ... VMEXIT_CR15_READ:
case VMEXIT_CR0_WRITE ... VMEXIT_CR15_WRITE:
case VMEXIT_INVLPG:
- case VMEXIT_INVLPGA:
if ( !handle_mmio() )
hvm_inject_exception(TRAP_gp_fault, 0, 0);
break;
+ case VMEXIT_INVLPGA:
+ svm_invlpga_intercept(v, regs->rax, regs->ecx);
+ break;
case VMEXIT_VMMCALL:
if ( (inst_len = __get_instruction_length(v, INSTR_VMCALL)) == 0 )
diff -r 40c33d1a9c21 -r a93873d200a6 xen/arch/x86/hvm/vmx/vmx.c
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -2673,7 +2673,7 @@ asmlinkage void vmx_vmenter_helper(void)
goto out;
old_asid = curr->arch.hvm_vcpu.asid;
- need_flush = hvm_asid_handle_vmenter();
+ need_flush = hvm_asid_handle_vmenter(0 /* false */);
new_asid = curr->arch.hvm_vcpu.asid;
if ( unlikely(new_asid != old_asid) )
diff -r 40c33d1a9c21 -r a93873d200a6 xen/include/asm-x86/hvm/asid.h
--- a/xen/include/asm-x86/hvm/asid.h
+++ b/xen/include/asm-x86/hvm/asid.h
@@ -35,7 +35,7 @@ void hvm_asid_flush_core(void);
/* Called before entry to guest context. Checks ASID allocation, returns a
* boolean indicating whether all ASIDs must be flushed. */
-bool_t hvm_asid_handle_vmenter(void);
+bool_t hvm_asid_handle_vmenter(bool_t run_n2guest);
#endif /* __ASM_X86_HVM_ASID_H__ */
diff -r 40c33d1a9c21 -r a93873d200a6 xen/include/asm-x86/hvm/svm/asid.h
--- a/xen/include/asm-x86/hvm/svm/asid.h
+++ b/xen/include/asm-x86/hvm/svm/asid.h
@@ -34,10 +34,7 @@ static inline void svm_asid_g_invlpg(str
{
#if 0
/* Optimization? */
- asm volatile (".byte 0x0F,0x01,0xDF \n"
- : /* output */
- : /* input */
- "a" (g_vaddr), "c"(v->arch.hvm_svm.vmcb->guest_asid) );
+ svm_invlpga(g_vaddr, v->arch.hvm_svm.vmcb->guest_asid);
#endif
/* Safe fallback. Take a new ASID. */
diff -r 40c33d1a9c21 -r a93873d200a6 xen/include/asm-x86/hvm/svm/svm.h
--- a/xen/include/asm-x86/hvm/svm/svm.h
+++ b/xen/include/asm-x86/hvm/svm/svm.h
@@ -60,6 +60,15 @@ static inline void svm_vmsave(void *vmcb
: : "a" (__pa(vmcb)) : "memory" );
}
+static inline void svm_invlpga(unsigned long vaddr, uint32_t asid)
+{
+ asm volatile (
+ ".byte 0x0f,0x01,0xdf"
+ : /* output */
+ : /* input */
+ "a" (vaddr), "c" (asid));
+}
+
unsigned long *svm_msrbit(unsigned long *msr_bitmap, uint32_t msr);
void __update_guest_eip(struct cpu_user_regs *regs, unsigned int inst_len);
diff -r 40c33d1a9c21 -r a93873d200a6 xen/include/asm-x86/hvm/vcpu.h
--- a/xen/include/asm-x86/hvm/vcpu.h
+++ b/xen/include/asm-x86/hvm/vcpu.h
@@ -49,6 +49,11 @@ struct nestedvcpu {
uint64_t nv_n1vmcx_pa; /* host physical address of nv_n1vmcx */
uint64_t nv_n2vmcx_pa; /* host physical address of nv_n2vmcx */
+ /* ASID emulation */
+ bool_t nv_new_vasid; /* true when l1 guest requests new virtual asid */
+ uint32_t nv_n1asid; /* hw ASID number used to run l1 guest */
+ uint32_t nv_n2asid; /* hw ASID number used to run l2 guest */
+
/* SVM/VMX arch specific */
union {
struct nestedsvm nsvm;
@@ -100,8 +105,8 @@ struct hvm_vcpu {
bool_t hcall_preempted;
bool_t hcall_64bit;
- u64 asid_generation;
- u32 asid;
+ uint64_t asid_generation;
+ uint32_t asid;
u32 msr_tsc_aux;
[-- Attachment #3: Type: text/plain, Size: 138 bytes --]
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xensource.com
http://lists.xensource.com/xen-devel
next reply other threads:[~2011-04-13 8:57 UTC|newest]
Thread overview: 22+ messages / expand[flat|nested] mbox.gz Atom feed top
2011-04-13 8:57 Christoph Egger [this message]
2011-04-13 9:18 ` [PATCH] nestedhvm: ASID emulation Keir Fraser
-- strict thread matches above, loose matches on Subject: below --
2011-04-13 10:37 Christoph Egger
2011-04-13 13:27 ` Keir Fraser
2011-04-13 14:26 ` Christoph Egger
2011-04-13 15:05 ` Keir Fraser
2011-04-13 15:19 ` Christoph Egger
2011-04-13 16:22 ` Keir Fraser
2011-04-14 9:26 ` Christoph Egger
2011-04-14 10:28 ` Keir Fraser
2011-04-14 14:01 ` Christoph Egger
2011-04-14 14:43 ` Keir Fraser
2011-04-15 8:20 ` Christoph Egger
2011-04-15 9:05 ` Keir Fraser
2011-04-15 9:08 ` Christoph Egger
2011-04-15 9:24 ` Keir Fraser
2011-04-15 9:57 ` Christoph Egger
2011-04-15 12:53 ` Keir Fraser
2011-04-15 12:49 ` Christoph Egger
2011-04-15 13:40 ` Christoph Egger
2011-04-13 13:51 ` Christoph Egger
2011-04-13 14:48 ` Christoph Egger
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=4DA56593.1060300@amd.com \
--to=christoph.egger@amd.com \
--cc=xen-devel@lists.xensource.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).