From: xiantao.zhang@intel.com
To: xen-devel@lists.xensource.com
Cc: eddie.dong@intel.com, Zhang Xiantao <xiantao.zhang@intel.com>,
keir@xen.org, jun.nakajima@intel.com, JBeulich@suse.com
Subject: [PATCH 01/11] nestedhap: Change hostcr3 and p2m->cr3 to meaningful words
Date: Tue, 11 Dec 2012 01:57:13 +0800 [thread overview]
Message-ID: <1355162243-11857-2-git-send-email-xiantao.zhang@intel.com> (raw)
In-Reply-To: <1355162243-11857-1-git-send-email-xiantao.zhang@intel.com>
From: Zhang Xiantao <xiantao.zhang@intel.com>
VMX doesn't have the concept about host cr3 for nested p2m,
and only SVM has, so change it to netural words.
Signed-off-by: Zhang Xiantao <xiantao.zhang@intel.com>
---
xen/arch/x86/hvm/hvm.c | 6 +++---
xen/arch/x86/hvm/svm/svm.c | 2 +-
xen/arch/x86/hvm/vmx/vmx.c | 2 +-
xen/arch/x86/hvm/vmx/vvmx.c | 2 +-
xen/arch/x86/mm/hap/nested_hap.c | 15 ++++++++-------
xen/arch/x86/mm/mm-locks.h | 2 +-
xen/arch/x86/mm/p2m.c | 26 +++++++++++++-------------
xen/include/asm-x86/hvm/hvm.h | 4 ++--
xen/include/asm-x86/hvm/vmx/vvmx.h | 2 +-
xen/include/asm-x86/p2m.h | 16 ++++++++--------
10 files changed, 39 insertions(+), 38 deletions(-)
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index b6026d7..85bc9be 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -4536,10 +4536,10 @@ uint64_t nhvm_vcpu_guestcr3(struct vcpu *v)
return -EOPNOTSUPP;
}
-uint64_t nhvm_vcpu_hostcr3(struct vcpu *v)
+uint64_t nhvm_vcpu_p2m_base(struct vcpu *v)
{
- if (hvm_funcs.nhvm_vcpu_hostcr3)
- return hvm_funcs.nhvm_vcpu_hostcr3(v);
+ if (hvm_funcs.nhvm_vcpu_p2m_base)
+ return hvm_funcs.nhvm_vcpu_p2m_base(v);
return -EOPNOTSUPP;
}
diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c
index 4c4abfc..6c469ec 100644
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -2003,7 +2003,7 @@ static struct hvm_function_table __read_mostly svm_function_table = {
.nhvm_vcpu_vmexit = nsvm_vcpu_vmexit_inject,
.nhvm_vcpu_vmexit_trap = nsvm_vcpu_vmexit_trap,
.nhvm_vcpu_guestcr3 = nsvm_vcpu_guestcr3,
- .nhvm_vcpu_hostcr3 = nsvm_vcpu_hostcr3,
+ .nhvm_vcpu_p2m_base = nsvm_vcpu_hostcr3,
.nhvm_vcpu_asid = nsvm_vcpu_asid,
.nhvm_vmcx_guest_intercepts_trap = nsvm_vmcb_guest_intercepts_trap,
.nhvm_vmcx_hap_enabled = nsvm_vmcb_hap_enabled,
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index 9fb9562..47d8ca6 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -1504,7 +1504,7 @@ static struct hvm_function_table __read_mostly vmx_function_table = {
.nhvm_vcpu_destroy = nvmx_vcpu_destroy,
.nhvm_vcpu_reset = nvmx_vcpu_reset,
.nhvm_vcpu_guestcr3 = nvmx_vcpu_guestcr3,
- .nhvm_vcpu_hostcr3 = nvmx_vcpu_hostcr3,
+ .nhvm_vcpu_p2m_base = nvmx_vcpu_eptp_base,
.nhvm_vcpu_asid = nvmx_vcpu_asid,
.nhvm_vmcx_guest_intercepts_trap = nvmx_intercepts_exception,
.nhvm_vcpu_vmexit_trap = nvmx_vmexit_trap,
diff --git a/xen/arch/x86/hvm/vmx/vvmx.c b/xen/arch/x86/hvm/vmx/vvmx.c
index b005816..6d1a736 100644
--- a/xen/arch/x86/hvm/vmx/vvmx.c
+++ b/xen/arch/x86/hvm/vmx/vvmx.c
@@ -94,7 +94,7 @@ uint64_t nvmx_vcpu_guestcr3(struct vcpu *v)
return 0;
}
-uint64_t nvmx_vcpu_hostcr3(struct vcpu *v)
+uint64_t nvmx_vcpu_eptp_base(struct vcpu *v)
{
/* TODO */
ASSERT(0);
diff --git a/xen/arch/x86/mm/hap/nested_hap.c b/xen/arch/x86/mm/hap/nested_hap.c
index 317875d..f9a5edc 100644
--- a/xen/arch/x86/mm/hap/nested_hap.c
+++ b/xen/arch/x86/mm/hap/nested_hap.c
@@ -48,9 +48,10 @@
* 1. If #NPF is from L1 guest, then we crash the guest VM (same as old
* code)
* 2. If #NPF is from L2 guest, then we continue from (3)
- * 3. Get h_cr3 from L1 guest. Map h_cr3 into L0 hypervisor address space.
- * 4. Walk the h_cr3 page table
- * 5. - if not present, then we inject #NPF back to L1 guest and
+ * 3. Get np2m base from L1 guest. Map np2m base into L0 hypervisor address space.
+ * 4. Walk the np2m's page table
+ * 5. - if not present or permission check failure, then we inject #NPF back to
+ * L1 guest and
* re-launch L1 guest (L1 guest will either treat this #NPF as MMIO,
* or fix its p2m table for L2 guest)
* 6. - if present, then we will get the a new translated value L1-GPA
@@ -89,7 +90,7 @@ nestedp2m_write_p2m_entry(struct p2m_domain *p2m, unsigned long gfn,
if (old_flags & _PAGE_PRESENT)
flush_tlb_mask(p2m->dirty_cpumask);
-
+
paging_unlock(d);
}
@@ -110,7 +111,7 @@ nestedhap_fix_p2m(struct vcpu *v, struct p2m_domain *p2m,
/* If this p2m table has been flushed or recycled under our feet,
* leave it alone. We'll pick up the right one as we try to
* vmenter the guest. */
- if ( p2m->cr3 == nhvm_vcpu_hostcr3(v) )
+ if ( p2m->np2m_base == nhvm_vcpu_p2m_base(v) )
{
unsigned long gfn, mask;
mfn_t mfn;
@@ -186,7 +187,7 @@ nestedhap_walk_L1_p2m(struct vcpu *v, paddr_t L2_gpa, paddr_t *L1_gpa,
uint32_t pfec;
unsigned long nested_cr3, gfn;
- nested_cr3 = nhvm_vcpu_hostcr3(v);
+ nested_cr3 = nhvm_vcpu_p2m_base(v);
pfec = PFEC_user_mode | PFEC_page_present;
if (access_w)
@@ -221,7 +222,7 @@ nestedhvm_hap_nested_page_fault(struct vcpu *v, paddr_t *L2_gpa,
p2m_type_t p2mt_10;
p2m = p2m_get_hostp2m(d); /* L0 p2m */
- nested_p2m = p2m_get_nestedp2m(v, nhvm_vcpu_hostcr3(v));
+ nested_p2m = p2m_get_nestedp2m(v, nhvm_vcpu_p2m_base(v));
/* walk the L1 P2M table */
rv = nestedhap_walk_L1_p2m(v, *L2_gpa, &L1_gpa, &page_order_21,
diff --git a/xen/arch/x86/mm/mm-locks.h b/xen/arch/x86/mm/mm-locks.h
index 3700e32..1817f81 100644
--- a/xen/arch/x86/mm/mm-locks.h
+++ b/xen/arch/x86/mm/mm-locks.h
@@ -249,7 +249,7 @@ declare_mm_order_constraint(per_page_sharing)
* A per-domain lock that protects the mapping from nested-CR3 to
* nested-p2m. In particular it covers:
* - the array of nested-p2m tables, and all LRU activity therein; and
- * - setting the "cr3" field of any p2m table to a non-CR3_EADDR value.
+ * - setting the "cr3" field of any p2m table to a non-P2M_BASE_EAADR value.
* (i.e. assigning a p2m table to be the shadow of that cr3 */
/* PoD lock (per-p2m-table)
diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
index e351942..62c2d78 100644
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -81,7 +81,7 @@ static void p2m_initialise(struct domain *d, struct p2m_domain *p2m)
p2m->domain = d;
p2m->default_access = p2m_access_rwx;
- p2m->cr3 = CR3_EADDR;
+ p2m->np2m_base = P2M_BASE_EADDR;
if ( hap_enabled(d) && cpu_has_vmx )
ept_p2m_init(p2m);
@@ -1445,7 +1445,7 @@ p2m_flush_table(struct p2m_domain *p2m)
ASSERT(page_list_empty(&p2m->pod.single));
/* This is no longer a valid nested p2m for any address space */
- p2m->cr3 = CR3_EADDR;
+ p2m->np2m_base = P2M_BASE_EADDR;
/* Zap the top level of the trie */
top = mfn_to_page(pagetable_get_mfn(p2m_get_pagetable(p2m)));
@@ -1483,7 +1483,7 @@ p2m_flush_nestedp2m(struct domain *d)
}
struct p2m_domain *
-p2m_get_nestedp2m(struct vcpu *v, uint64_t cr3)
+p2m_get_nestedp2m(struct vcpu *v, uint64_t np2m_base)
{
/* Use volatile to prevent gcc to cache nv->nv_p2m in a cpu register as
* this may change within the loop by an other (v)cpu.
@@ -1492,8 +1492,8 @@ p2m_get_nestedp2m(struct vcpu *v, uint64_t cr3)
struct domain *d;
struct p2m_domain *p2m;
- /* Mask out low bits; this avoids collisions with CR3_EADDR */
- cr3 &= ~(0xfffull);
+ /* Mask out low bits; this avoids collisions with P2M_BASE_EADDR */
+ np2m_base &= ~(0xfffull);
if (nv->nv_flushp2m && nv->nv_p2m) {
nv->nv_p2m = NULL;
@@ -1505,14 +1505,14 @@ p2m_get_nestedp2m(struct vcpu *v, uint64_t cr3)
if ( p2m )
{
p2m_lock(p2m);
- if ( p2m->cr3 == cr3 || p2m->cr3 == CR3_EADDR )
+ if ( p2m->np2m_base == np2m_base || p2m->np2m_base == P2M_BASE_EADDR )
{
nv->nv_flushp2m = 0;
p2m_getlru_nestedp2m(d, p2m);
nv->nv_p2m = p2m;
- if (p2m->cr3 == CR3_EADDR)
+ if (p2m->np2m_base == P2M_BASE_EADDR)
hvm_asid_flush_vcpu(v);
- p2m->cr3 = cr3;
+ p2m->np2m_base = np2m_base;
cpumask_set_cpu(v->processor, p2m->dirty_cpumask);
p2m_unlock(p2m);
nestedp2m_unlock(d);
@@ -1527,7 +1527,7 @@ p2m_get_nestedp2m(struct vcpu *v, uint64_t cr3)
p2m_flush_table(p2m);
p2m_lock(p2m);
nv->nv_p2m = p2m;
- p2m->cr3 = cr3;
+ p2m->np2m_base = np2m_base;
nv->nv_flushp2m = 0;
hvm_asid_flush_vcpu(v);
cpumask_set_cpu(v->processor, p2m->dirty_cpumask);
@@ -1543,7 +1543,7 @@ p2m_get_p2m(struct vcpu *v)
if (!nestedhvm_is_n2(v))
return p2m_get_hostp2m(v->domain);
- return p2m_get_nestedp2m(v, nhvm_vcpu_hostcr3(v));
+ return p2m_get_nestedp2m(v, nhvm_vcpu_p2m_base(v));
}
unsigned long paging_gva_to_gfn(struct vcpu *v,
@@ -1561,15 +1561,15 @@ unsigned long paging_gva_to_gfn(struct vcpu *v,
struct p2m_domain *p2m;
const struct paging_mode *mode;
uint32_t pfec_21 = *pfec;
- uint64_t ncr3 = nhvm_vcpu_hostcr3(v);
+ uint64_t np2m_base = nhvm_vcpu_p2m_base(v);
/* translate l2 guest va into l2 guest gfn */
- p2m = p2m_get_nestedp2m(v, ncr3);
+ p2m = p2m_get_nestedp2m(v, np2m_base);
mode = paging_get_nestedmode(v);
gfn = mode->gva_to_gfn(v, p2m, va, pfec);
/* translate l2 guest gfn into l1 guest gfn */
- return hostmode->p2m_ga_to_gfn(v, hostp2m, ncr3,
+ return hostmode->p2m_ga_to_gfn(v, hostp2m, np2m_base,
gfn << PAGE_SHIFT, &pfec_21, NULL);
}
diff --git a/xen/include/asm-x86/hvm/hvm.h b/xen/include/asm-x86/hvm/hvm.h
index fdb0f58..d3535b6 100644
--- a/xen/include/asm-x86/hvm/hvm.h
+++ b/xen/include/asm-x86/hvm/hvm.h
@@ -170,7 +170,7 @@ struct hvm_function_table {
uint64_t exitcode);
int (*nhvm_vcpu_vmexit_trap)(struct vcpu *v, struct hvm_trap *trap);
uint64_t (*nhvm_vcpu_guestcr3)(struct vcpu *v);
- uint64_t (*nhvm_vcpu_hostcr3)(struct vcpu *v);
+ uint64_t (*nhvm_vcpu_p2m_base)(struct vcpu *v);
uint32_t (*nhvm_vcpu_asid)(struct vcpu *v);
int (*nhvm_vmcx_guest_intercepts_trap)(struct vcpu *v,
unsigned int trapnr, int errcode);
@@ -475,7 +475,7 @@ uint64_t nhvm_vcpu_guestcr3(struct vcpu *v);
/* returns l1 guest's cr3 that points to the page table used to
* translate l2 guest physical address to l1 guest physical address.
*/
-uint64_t nhvm_vcpu_hostcr3(struct vcpu *v);
+uint64_t nhvm_vcpu_p2m_base(struct vcpu *v);
/* returns the asid number l1 guest wants to use to run the l2 guest */
uint32_t nhvm_vcpu_asid(struct vcpu *v);
diff --git a/xen/include/asm-x86/hvm/vmx/vvmx.h b/xen/include/asm-x86/hvm/vmx/vvmx.h
index dce2cd8..d97011d 100644
--- a/xen/include/asm-x86/hvm/vmx/vvmx.h
+++ b/xen/include/asm-x86/hvm/vmx/vvmx.h
@@ -99,7 +99,7 @@ int nvmx_vcpu_initialise(struct vcpu *v);
void nvmx_vcpu_destroy(struct vcpu *v);
int nvmx_vcpu_reset(struct vcpu *v);
uint64_t nvmx_vcpu_guestcr3(struct vcpu *v);
-uint64_t nvmx_vcpu_hostcr3(struct vcpu *v);
+uint64_t nvmx_vcpu_eptp_base(struct vcpu *v);
uint32_t nvmx_vcpu_asid(struct vcpu *v);
enum hvm_intblk nvmx_intr_blocked(struct vcpu *v);
int nvmx_intercepts_exception(struct vcpu *v,
diff --git a/xen/include/asm-x86/p2m.h b/xen/include/asm-x86/p2m.h
index 907a817..1807ad6 100644
--- a/xen/include/asm-x86/p2m.h
+++ b/xen/include/asm-x86/p2m.h
@@ -197,17 +197,17 @@ struct p2m_domain {
struct domain *domain; /* back pointer to domain */
- /* Nested p2ms only: nested-CR3 value that this p2m shadows.
- * This can be cleared to CR3_EADDR under the per-p2m lock but
+ /* Nested p2ms only: nested p2m base value that this p2m shadows.
+ * This can be cleared to P2M_BASE_EADDR under the per-p2m lock but
* needs both the per-p2m lock and the per-domain nestedp2m lock
* to set it to any other value. */
-#define CR3_EADDR (~0ULL)
- uint64_t cr3;
+#define P2M_BASE_EADDR (~0ULL)
+ uint64_t np2m_base;
/* Nested p2ms: linked list of n2pms allocated to this domain.
* The host p2m hasolds the head of the list and the np2ms are
* threaded on in LRU order. */
- struct list_head np2m_list;
+ struct list_head np2m_list;
/* Host p2m: when this flag is set, don't flush all the nested-p2m
@@ -282,11 +282,11 @@ struct p2m_domain {
/* get host p2m table */
#define p2m_get_hostp2m(d) ((d)->arch.p2m)
-/* Get p2m table (re)usable for specified cr3.
+/* Get p2m table (re)usable for specified np2m base.
* Automatically destroys and re-initializes a p2m if none found.
- * If cr3 == 0 then v->arch.hvm_vcpu.guest_cr[3] is used.
+ * If np2m_base == 0 then v->arch.hvm_vcpu.guest_cr[3] is used.
*/
-struct p2m_domain *p2m_get_nestedp2m(struct vcpu *v, uint64_t cr3);
+struct p2m_domain *p2m_get_nestedp2m(struct vcpu *v, uint64_t np2m_base);
/* If vcpu is in host mode then behaviour matches p2m_get_hostp2m().
* If vcpu is in guest mode then behaviour matches p2m_get_nestedp2m().
--
1.7.1
next prev parent reply other threads:[~2012-12-10 17:57 UTC|newest]
Thread overview: 27+ messages / expand[flat|nested] mbox.gz Atom feed top
2012-12-10 17:57 [PATCH 00/11] Add virtual EPT support Xen xiantao.zhang
2012-12-10 17:57 ` xiantao.zhang [this message]
2012-12-13 14:52 ` [PATCH 01/11] nestedhap: Change hostcr3 and p2m->cr3 to meaningful words Tim Deegan
2012-12-10 17:57 ` [PATCH 02/11] nestedhap: Change nested p2m's walker to vendor-specific xiantao.zhang
2012-12-13 14:52 ` Tim Deegan
2012-12-10 17:57 ` [PATCH 03/11] nEPT: Implement guest ept's walker xiantao.zhang
2012-12-13 15:41 ` Tim Deegan
2012-12-10 17:57 ` [PATCH 04/11] nEPT: Do further permission check for sucessful translation xiantao.zhang
2012-12-13 15:47 ` Tim Deegan
2012-12-10 17:57 ` [PATCH 05/11] EPT: Make ept data structure or operations neutral xiantao.zhang
2012-12-13 16:04 ` Tim Deegan
2012-12-17 8:57 ` Zhang, Xiantao
2012-12-17 9:56 ` Jan Beulich
2012-12-10 17:57 ` [PATCH 06/11] nEPT: Try to enable EPT paging for L2 guest xiantao.zhang
2012-12-13 16:16 ` Tim Deegan
2012-12-10 17:57 ` [PATCH 07/11] nEPT: Sync PDPTR fields if L2 guest in PAE paging mode xiantao.zhang
2012-12-13 16:17 ` Tim Deegan
2012-12-10 17:57 ` [PATCH 08/11] nEPT: Use minimal permission for nested p2m xiantao.zhang
2012-12-13 16:43 ` Tim Deegan
2012-12-10 17:57 ` [PATCH 09/11] nEPT: handle invept instruction from L1 VMM xiantao.zhang
2012-12-13 16:56 ` Tim Deegan
2012-12-10 17:57 ` [PATCH 10/11] nEPT: expost EPT capablity to " xiantao.zhang
2012-12-13 17:03 ` Tim Deegan
2012-12-10 17:57 ` [PATCH 11/11] nVMX: Expose VPID capability to nested VMM xiantao.zhang
2012-12-13 17:15 ` Tim Deegan
2012-12-13 0:31 ` [PATCH 00/11] Add virtual EPT support Xen Zhang, Xiantao
2012-12-13 10:25 ` Jan Beulich
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1355162243-11857-2-git-send-email-xiantao.zhang@intel.com \
--to=xiantao.zhang@intel.com \
--cc=JBeulich@suse.com \
--cc=eddie.dong@intel.com \
--cc=jun.nakajima@intel.com \
--cc=keir@xen.org \
--cc=xen-devel@lists.xensource.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).