* [PATCH 03/04] p2m: determine the page size in use
@ 2011-08-26 14:45 Christoph Egger
0 siblings, 0 replies; only message in thread
From: Christoph Egger @ 2011-08-26 14:45 UTC (permalink / raw)
To: xen-devel@lists.xensource.com, Tim Deegan
[-- Attachment #1: Type: text/plain, Size: 264 bytes --]
--
---to satisfy European Law for business letters:
Advanced Micro Devices GmbH
Einsteinring 24, 85689 Dornach b. Muenchen
Geschaeftsfuehrer: Alberto Bozzo, Andrew Bowd
Sitz: Dornach, Gemeinde Aschheim, Landkreis Muenchen
Registergericht Muenchen, HRB Nr. 43632
[-- Attachment #2: xen_superpage3.diff --]
[-- Type: text/plain, Size: 7681 bytes --]
determine the page size in use
Signed-off-by: Christoph Egger <Christoph.Egger@amd.com>
diff -r 61ab53a4af83 -r c0ab99142868 xen/arch/x86/mm/guest_walk.c
--- a/xen/arch/x86/mm/guest_walk.c
+++ b/xen/arch/x86/mm/guest_walk.c
@@ -140,6 +140,7 @@ guest_walk_tables(struct vcpu *v, struct
perfc_incr(guest_walk);
memset(gw, 0, sizeof(*gw));
gw->va = va;
+ gw->page_order = PAGE_ORDER_4K;
/* Mandatory bits that must be set in every entry. We invert NX and
* the invalid bits, to calculate as if there were an "X" bit that
@@ -187,12 +188,16 @@ guest_walk_tables(struct vcpu *v, struct
if ( pse1G )
{
+ gfn_t start;
+ int flags;
+
/* Generate a fake l1 table entry so callers don't all
* have to understand superpages. */
- gfn_t start = guest_l3e_get_gfn(gw->l3e);
+ start = guest_l3e_get_gfn(gw->l3e);
/* Grant full access in the l1e, since all the guest entry's
* access controls are enforced in the l3e. */
- int flags = (_PAGE_PRESENT|_PAGE_USER|_PAGE_RW|
+ gw->page_order = PAGE_ORDER_1G;
+ flags = (_PAGE_PRESENT|_PAGE_USER|_PAGE_RW|
_PAGE_ACCESSED|_PAGE_DIRTY);
/* Import cache-control bits. Note that _PAGE_PAT is actually
* _PAGE_PSE, and it is always set. We will clear it in case
@@ -255,14 +260,18 @@ guest_walk_tables(struct vcpu *v, struct
if ( pse2M )
{
+ gfn_t start;
+ int flags;
+
/* Special case: this guest VA is in a PSE superpage, so there's
* no guest l1e. We make one up so that the propagation code
* can generate a shadow l1 table. Start with the gfn of the
* first 4k-page of the superpage. */
- gfn_t start = guest_l2e_get_gfn(gw->l2e);
+ start = guest_l2e_get_gfn(gw->l2e);
/* Grant full access in the l1e, since all the guest entry's
* access controls are enforced in the shadow l2e. */
- int flags = (_PAGE_PRESENT|_PAGE_USER|_PAGE_RW|
+ gw->page_order = PAGE_ORDER_2M;
+ flags = (_PAGE_PRESENT|_PAGE_USER|_PAGE_RW|
_PAGE_ACCESSED|_PAGE_DIRTY);
/* Import cache-control bits. Note that _PAGE_PAT is actually
* _PAGE_PSE, and it is always set. We will clear it in case
@@ -306,6 +315,7 @@ guest_walk_tables(struct vcpu *v, struct
&rc);
if(l1p == NULL)
goto out;
+ gw->page_order = PAGE_ORDER_4K;
gw->l1e = l1p[guest_l1_table_offset(va)];
gflags = guest_l1e_get_flags(gw->l1e) ^ iflags;
rc |= ((gflags & mflags) ^ mflags);
diff -r 61ab53a4af83 -r c0ab99142868 xen/arch/x86/mm/hap/guest_walk.c
--- a/xen/arch/x86/mm/hap/guest_walk.c
+++ b/xen/arch/x86/mm/hap/guest_walk.c
@@ -59,6 +59,9 @@ unsigned long hap_p2m_ga_to_gfn(GUEST_PA
p2m_access_t p2ma;
walk_t gw;
+ if (page_order)
+ *page_order = PAGE_ORDER_4K;
+
/* Get the top-level table's MFN */
top_mfn = gfn_to_mfn_type_p2m(p2m, cr3 >> PAGE_SHIFT,
&p2mt, &p2ma, p2m_unshare, NULL);
@@ -95,6 +98,10 @@ unsigned long hap_p2m_ga_to_gfn(GUEST_PA
{
gfn_t gfn = guest_l1e_get_gfn(gw.l1e);
gfn_to_mfn_type_p2m(p2m, gfn_x(gfn), &p2mt, &p2ma, p2m_unshare, NULL);
+
+ if ( page_order )
+ *page_order = gw.page_order;
+
if ( p2m_is_paging(p2mt) )
{
ASSERT(!p2m_is_nestedp2m(p2m));
diff -r 61ab53a4af83 -r c0ab99142868 xen/arch/x86/mm/hap/hap.c
--- a/xen/arch/x86/mm/hap/hap.c
+++ b/xen/arch/x86/mm/hap/hap.c
@@ -893,6 +893,8 @@ static unsigned long hap_gva_to_gfn_real
struct vcpu *v, struct p2m_domain *p2m, unsigned long gva, uint32_t *pfec,
unsigned int *page_order)
{
+ if ( page_order )
+ *page_order = PAGE_ORDER_4K;
return ((paddr_t)gva >> PAGE_SHIFT);
}
@@ -900,6 +902,8 @@ static unsigned long hap_p2m_ga_to_gfn_r
struct vcpu *v, struct p2m_domain *p2m, unsigned long cr3,
paddr_t ga, uint32_t *pfec, unsigned int *page_order)
{
+ if ( page_order )
+ *page_order = PAGE_ORDER_4K;
return (ga >> PAGE_SHIFT);
}
diff -r 61ab53a4af83 -r c0ab99142868 xen/arch/x86/mm/p2m-ept.c
--- a/xen/arch/x86/mm/p2m-ept.c
+++ b/xen/arch/x86/mm/p2m-ept.c
@@ -523,6 +523,9 @@ static mfn_t ept_get_entry(struct p2m_do
*t = p2m_mmio_dm;
*a = p2m_access_n;
+ if ( page_order )
+ *page_order = PAGE_ORDER_4K;
+
/* This pfn is higher than the highest the p2m map currently holds */
if ( gfn > p2m->max_mapped_pfn )
goto out;
diff -r 61ab53a4af83 -r c0ab99142868 xen/arch/x86/mm/p2m-pt.c
--- a/xen/arch/x86/mm/p2m-pt.c
+++ b/xen/arch/x86/mm/p2m-pt.c
@@ -568,6 +568,8 @@ pod_retry_l3:
else
p2mt = p2m_mmio_dm;
+ if ( page_order )
+ *page_order = PAGE_ORDER_1G;
goto out;
}
#endif
@@ -621,6 +623,8 @@ pod_retry_l2:
else
p2mt = p2m_mmio_dm;
+ if ( page_order )
+ *page_order = PAGE_ORDER_2M;
goto out;
}
@@ -670,6 +674,8 @@ pod_retry_l1:
p2mt = p2m_mmio_dm;
}
+ if ( page_order )
+ *page_order = PAGE_ORDER_4K;
out:
*t = p2mt;
return mfn;
@@ -695,6 +701,9 @@ p2m_gfn_to_mfn(struct p2m_domain *p2m, u
/* Not implemented except with EPT */
*a = p2m_access_rwx;
+ if ( page_order )
+ *page_order = PAGE_ORDER_4K;
+
if ( gfn > p2m->max_mapped_pfn )
/* This pfn is higher than the highest the p2m map currently holds */
return _mfn(INVALID_MFN);
@@ -755,6 +764,8 @@ pod_retry_l3:
unmap_domain_page(l3e);
ASSERT(mfn_valid(mfn) || !p2m_is_ram(*t));
+ if ( page_order )
+ *page_order = PAGE_ORDER_1G;
return (p2m_is_valid(*t)) ? mfn : _mfn(INVALID_MFN);
}
@@ -789,6 +800,8 @@ pod_retry_l2:
unmap_domain_page(l2e);
ASSERT(mfn_valid(mfn) || !p2m_is_ram(*t));
+ if ( page_order )
+ *page_order = PAGE_ORDER_2M;
return (p2m_is_valid(*t)) ? mfn : _mfn(INVALID_MFN);
}
@@ -819,6 +832,8 @@ pod_retry_l1:
unmap_domain_page(l1e);
ASSERT(mfn_valid(mfn) || !p2m_is_ram(*t));
+ if ( page_order )
+ *page_order = PAGE_ORDER_4K;
return (p2m_is_valid(*t) || p2m_is_grant(*t)) ? mfn : _mfn(INVALID_MFN);
}
diff -r 61ab53a4af83 -r c0ab99142868 xen/arch/x86/mm/shadow/multi.c
--- a/xen/arch/x86/mm/shadow/multi.c
+++ b/xen/arch/x86/mm/shadow/multi.c
@@ -3756,10 +3756,16 @@ sh_gva_to_gfn(struct vcpu *v, struct p2m
walk_t gw;
gfn_t gfn;
uint32_t missing;
+#if (SHADOW_OPTIMIZATIONS & SHOPT_VIRTUAL_TLB)
+ unsigned long vtlb_gfn;
+#endif /* (SHADOW_OPTIMIZATIONS & SHOPT_VIRTUAL_TLB) */
+
+ if ( page_order )
+ *page_order = PAGE_ORDER_4K;
#if (SHADOW_OPTIMIZATIONS & SHOPT_VIRTUAL_TLB)
/* Check the vTLB cache first */
- unsigned long vtlb_gfn = vtlb_lookup(v, va, pfec[0]);
+ vtlb_gfn = vtlb_lookup(v, va, pfec[0]);
if ( VALID_GFN(vtlb_gfn) )
return vtlb_gfn;
#endif /* (SHADOW_OPTIMIZATIONS & SHOPT_VIRTUAL_TLB) */
diff -r 61ab53a4af83 -r c0ab99142868 xen/include/asm-x86/guest_pt.h
--- a/xen/include/asm-x86/guest_pt.h
+++ b/xen/include/asm-x86/guest_pt.h
@@ -236,6 +236,7 @@ guest_supports_nx(struct vcpu *v)
typedef struct guest_pagetable_walk walk_t;
struct guest_pagetable_walk
{
+ unsigned int page_order;
unsigned long va; /* Address we were looking for */
#if GUEST_PAGING_LEVELS >= 3
#if GUEST_PAGING_LEVELS >= 4
[-- Attachment #3: Type: text/plain, Size: 138 bytes --]
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xensource.com
http://lists.xensource.com/xen-devel
^ permalink raw reply [flat|nested] only message in thread
only message in thread, other threads:[~2011-08-26 14:45 UTC | newest]
Thread overview: (only message) (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2011-08-26 14:45 [PATCH 03/04] p2m: determine the page size in use Christoph Egger
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).