* [PATCH v3 1/4] xen/domain_page: Convert map_domain_page_global() to using mfn_t
@ 2015-07-09 14:54 Ben Catterall
2015-07-09 14:54 ` [PATCH v3 2/4] xen/domain_page: Convert copy/clear_domain_page() " Ben Catterall
` (3 more replies)
0 siblings, 4 replies; 6+ messages in thread
From: Ben Catterall @ 2015-07-09 14:54 UTC (permalink / raw)
To: xen-devel
Cc: keir, ian.campbell, andrew.cooper3, tim, stefano.stabellini,
Jan Beulich
From: Andrew Cooper <andrew.cooper3@citrix.com>
The sh_map/unmap wrappers can be dropped, and take the opportunity to turn
some #define's into static inlines, for added type saftey.
As part of adding the type safety, GCC highlights an problematic include cycle
with arm/mm.h including domain_page.h which includes xen/mm.h and falls over
__page_to_mfn being used before being declared. Simply dropping the inclusion
of domain_page.h fixes the compilation issue.
Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
CC: Jan Beulich <JBeulich@suse.com>
CC: Tim Deegan <tim@xen.org>
CC: Ian Campbell <ian.campbell@citrix.com>
CC: Stefano Stabellini <stefano.stabellini@citrix.com>
---
Changed since v2
* (un)map_domain_page_global() now take ptr-to-const
Reviewed-by: Jan Beulich <jbeulich@suse.com>
For ARM: Acked-by: Ian Campbell <ian.campbell@citrix.com>
---
xen/arch/arm/mm.c | 6 ++----
xen/arch/x86/domain_page.c | 9 ++++-----
xen/arch/x86/mm/shadow/multi.c | 10 +++++-----
xen/arch/x86/mm/shadow/private.h | 12 ------------
xen/include/asm-arm/mm.h | 1 -
xen/include/xen/domain_page.h | 22 +++++++++++++++++-----
6 files changed, 28 insertions(+), 32 deletions(-)
diff --git a/xen/arch/arm/mm.c b/xen/arch/arm/mm.c
index ff1b330..d479048 100644
--- a/xen/arch/arm/mm.c
+++ b/xen/arch/arm/mm.c
@@ -271,11 +271,9 @@ void clear_fixmap(unsigned map)
}
#ifdef CONFIG_DOMAIN_PAGE
-void *map_domain_page_global(unsigned long mfn)
+void *map_domain_page_global(mfn_t mfn)
{
- mfn_t m = _mfn(mfn);
-
- return vmap(&m, 1);
+ return vmap(&mfn, 1);
}
void unmap_domain_page_global(const void *va)
diff --git a/xen/arch/x86/domain_page.c b/xen/arch/x86/domain_page.c
index d684b2f..0f7548b 100644
--- a/xen/arch/x86/domain_page.c
+++ b/xen/arch/x86/domain_page.c
@@ -302,17 +302,16 @@ int mapcache_vcpu_init(struct vcpu *v)
return 0;
}
-void *map_domain_page_global(unsigned long mfn)
+void *map_domain_page_global(mfn_t mfn)
{
- mfn_t m = _mfn(mfn);
ASSERT(!in_irq() && local_irq_is_enabled());
#ifdef NDEBUG
- if ( mfn <= PFN_DOWN(__pa(HYPERVISOR_VIRT_END - 1)) )
- return mfn_to_virt(mfn);
+ if ( mfn_x(mfn) <= PFN_DOWN(__pa(HYPERVISOR_VIRT_END - 1)) )
+ return mfn_to_virt(mfn_x(mfn));
#endif
- return vmap(&m, 1);
+ return vmap(&mfn, 1);
}
void unmap_domain_page_global(const void *ptr)
diff --git a/xen/arch/x86/mm/shadow/multi.c b/xen/arch/x86/mm/shadow/multi.c
index 4058a9c..19644d2 100644
--- a/xen/arch/x86/mm/shadow/multi.c
+++ b/xen/arch/x86/mm/shadow/multi.c
@@ -3806,7 +3806,7 @@ sh_detach_old_tables(struct vcpu *v)
if ( v->arch.paging.shadow.guest_vtable )
{
if ( shadow_mode_external(d) || shadow_mode_translate(d) )
- sh_unmap_domain_page_global(v->arch.paging.shadow.guest_vtable);
+ unmap_domain_page_global(v->arch.paging.shadow.guest_vtable);
v->arch.paging.shadow.guest_vtable = NULL;
}
#endif // !NDEBUG
@@ -3977,8 +3977,8 @@ sh_update_cr3(struct vcpu *v, int do_locking)
if ( shadow_mode_external(d) || shadow_mode_translate(d) )
{
if ( v->arch.paging.shadow.guest_vtable )
- sh_unmap_domain_page_global(v->arch.paging.shadow.guest_vtable);
- v->arch.paging.shadow.guest_vtable = sh_map_domain_page_global(gmfn);
+ unmap_domain_page_global(v->arch.paging.shadow.guest_vtable);
+ v->arch.paging.shadow.guest_vtable = map_domain_page_global(gmfn);
/* PAGING_LEVELS==4 implies 64-bit, which means that
* map_domain_page_global can't fail */
BUG_ON(v->arch.paging.shadow.guest_vtable == NULL);
@@ -4010,8 +4010,8 @@ sh_update_cr3(struct vcpu *v, int do_locking)
if ( shadow_mode_external(d) || shadow_mode_translate(d) )
{
if ( v->arch.paging.shadow.guest_vtable )
- sh_unmap_domain_page_global(v->arch.paging.shadow.guest_vtable);
- v->arch.paging.shadow.guest_vtable = sh_map_domain_page_global(gmfn);
+ unmap_domain_page_global(v->arch.paging.shadow.guest_vtable);
+ v->arch.paging.shadow.guest_vtable = map_domain_page_global(gmfn);
/* Does this really need map_domain_page_global? Handle the
* error properly if so. */
BUG_ON(v->arch.paging.shadow.guest_vtable == NULL); /* XXX */
diff --git a/xen/arch/x86/mm/shadow/private.h b/xen/arch/x86/mm/shadow/private.h
index f72ea9f..eff39dc 100644
--- a/xen/arch/x86/mm/shadow/private.h
+++ b/xen/arch/x86/mm/shadow/private.h
@@ -517,18 +517,6 @@ sh_unmap_domain_page(void *p)
unmap_domain_page(p);
}
-static inline void *
-sh_map_domain_page_global(mfn_t mfn)
-{
- return map_domain_page_global(mfn_x(mfn));
-}
-
-static inline void
-sh_unmap_domain_page_global(void *p)
-{
- unmap_domain_page_global(p);
-}
-
/**************************************************************************/
/* Shadow-page refcounting. */
diff --git a/xen/include/asm-arm/mm.h b/xen/include/asm-arm/mm.h
index 3601140..2e1f21a 100644
--- a/xen/include/asm-arm/mm.h
+++ b/xen/include/asm-arm/mm.h
@@ -5,7 +5,6 @@
#include <xen/kernel.h>
#include <asm/page.h>
#include <public/xen.h>
-#include <xen/domain_page.h>
#include <xen/pdx.h>
/* Align Xen to a 2 MiB boundary. */
diff --git a/xen/include/xen/domain_page.h b/xen/include/xen/domain_page.h
index b7a710b..af07235 100644
--- a/xen/include/xen/domain_page.h
+++ b/xen/include/xen/domain_page.h
@@ -41,11 +41,15 @@ unsigned long domain_page_map_to_mfn(const void *va);
* address spaces (not just within the VCPU that created the mapping). Global
* mappings can also be unmapped from any context.
*/
-void *map_domain_page_global(unsigned long mfn);
+void *map_domain_page_global(mfn_t mfn);
void unmap_domain_page_global(const void *va);
#define __map_domain_page(pg) map_domain_page(__page_to_mfn(pg))
-#define __map_domain_page_global(pg) map_domain_page_global(__page_to_mfn(pg))
+
+static inline void *__map_domain_page_global(const struct page_info *pg)
+{
+ return map_domain_page_global(_mfn(__page_to_mfn(pg)));
+}
#define DMCACHE_ENTRY_VALID 1U
#define DMCACHE_ENTRY_HELD 2U
@@ -117,9 +121,17 @@ domain_mmap_cache_destroy(struct domain_mmap_cache *cache)
mfn_to_virt(smfn))
#define domain_page_map_to_mfn(va) virt_to_mfn((unsigned long)(va))
-#define map_domain_page_global(mfn) mfn_to_virt(mfn)
-#define __map_domain_page_global(pg) page_to_virt(pg)
-#define unmap_domain_page_global(va) ((void)(va))
+static inline void *map_domain_page_global(mfn_t mfn)
+{
+ return mfn_to_virt(mfn_x(mfn));
+}
+
+static inline void *__map_domain_page_global(const struct page_info *pg)
+{
+ return page_to_virt(pg);
+}
+
+static inline void unmap_domain_page_global(const void *va) {};
struct domain_mmap_cache {
};
--
2.1.4
^ permalink raw reply related [flat|nested] 6+ messages in thread
* [PATCH v3 2/4] xen/domain_page: Convert copy/clear_domain_page() to using mfn_t
2015-07-09 14:54 [PATCH v3 1/4] xen/domain_page: Convert map_domain_page_global() to using mfn_t Ben Catterall
@ 2015-07-09 14:54 ` Ben Catterall
2015-07-09 14:54 ` [PATCH v3 3/4] Convert map_domain_page() to use the new mfn_t type Ben Catterall
` (2 subsequent siblings)
3 siblings, 0 replies; 6+ messages in thread
From: Ben Catterall @ 2015-07-09 14:54 UTC (permalink / raw)
To: xen-devel
Cc: keir, ian.campbell, andrew.cooper3, tim, stefano.stabellini,
jbeulich, Ben Catterall
From: Andrew Cooper <andrew.cooper3@citrix.com>
Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
[Convert grant_table.c to pass mfn_t types and fix ARM compiling]
Signed-off-by: Ben Catterall <Ben.Catterall@citrix.com>
Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
Acked-by: Jan Beulich <jbeulich@suse.com>
---
xen/arch/x86/mm.c | 7 ++++---
xen/common/grant_table.c | 2 +-
xen/common/kimage.c | 12 ++++++------
xen/common/memory.c | 12 +++++-------
xen/include/xen/domain_page.h | 15 ++++++---------
5 files changed, 22 insertions(+), 26 deletions(-)
diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index b011c95..df9c190 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -3293,7 +3293,7 @@ long do_mmuext_op(
/* A page is dirtied when it's being cleared. */
paging_mark_dirty(pg_owner, page_to_mfn(page));
- clear_domain_page(page_to_mfn(page));
+ clear_domain_page(_mfn(page_to_mfn(page)));
put_page_and_type(page);
break;
@@ -3327,7 +3327,8 @@ long do_mmuext_op(
/* A page is dirtied when it's being copied to. */
paging_mark_dirty(pg_owner, page_to_mfn(dst_page));
- copy_domain_page(page_to_mfn(dst_page), page_to_mfn(src_page));
+ copy_domain_page(_mfn(page_to_mfn(dst_page)),
+ _mfn(page_to_mfn(src_page)));
put_page_and_type(dst_page);
put_page(src_page);
@@ -6003,7 +6004,7 @@ int create_perdomain_mapping(struct domain *d, unsigned long va,
pg = alloc_domheap_page(d, MEMF_no_owner);
if ( pg )
{
- clear_domain_page(page_to_mfn(pg));
+ clear_domain_page(_mfn(page_to_mfn(pg)));
if ( !IS_NIL(ppg) )
*ppg++ = pg;
l1tab[l1_table_offset(va)] =
diff --git a/xen/common/grant_table.c b/xen/common/grant_table.c
index dc3487d..681a553 100644
--- a/xen/common/grant_table.c
+++ b/xen/common/grant_table.c
@@ -1848,7 +1848,7 @@ gnttab_transfer(
goto unlock_and_copyback;
}
- copy_domain_page(page_to_mfn(new_page), mfn);
+ copy_domain_page(_mfn(page_to_mfn(new_page)), _mfn(mfn));
page->count_info &= ~(PGC_count_mask|PGC_allocated);
free_domheap_page(page);
diff --git a/xen/common/kimage.c b/xen/common/kimage.c
index 8c4854d..742e4e8 100644
--- a/xen/common/kimage.c
+++ b/xen/common/kimage.c
@@ -77,7 +77,7 @@ static struct page_info *kimage_alloc_zeroed_page(unsigned memflags)
if ( !page )
return NULL;
- clear_domain_page(page_to_mfn(page));
+ clear_domain_page(_mfn(page_to_mfn(page)));
return page;
}
@@ -409,7 +409,7 @@ static struct page_info *kimage_alloc_crash_control_page(struct kexec_image *ima
if ( page )
{
image->next_crash_page = hole_end;
- clear_domain_page(page_to_mfn(page));
+ clear_domain_page(_mfn(page_to_mfn(page)));
}
return page;
@@ -637,15 +637,15 @@ static struct page_info *kimage_alloc_page(struct kexec_image *image,
if ( old )
{
/* If so move it. */
- unsigned long old_mfn = *old >> PAGE_SHIFT;
- unsigned long mfn = addr >> PAGE_SHIFT;
+ mfn_t old_mfn = _mfn(*old >> PAGE_SHIFT);
+ mfn_t mfn = _mfn(addr >> PAGE_SHIFT);
copy_domain_page(mfn, old_mfn);
clear_domain_page(old_mfn);
*old = (addr & ~PAGE_MASK) | IND_SOURCE;
unmap_domain_page(old);
- page = mfn_to_page(old_mfn);
+ page = mfn_to_page(mfn_x(old_mfn));
break;
}
else
@@ -917,7 +917,7 @@ int kimage_build_ind(struct kexec_image *image, unsigned long ind_mfn,
goto done;
}
- copy_domain_page(page_to_mfn(xen_page), mfn);
+ copy_domain_page(_mfn(page_to_mfn(xen_page)), _mfn(mfn));
put_page(guest_page);
ret = kimage_add_page(image, page_to_maddr(xen_page));
diff --git a/xen/common/memory.c b/xen/common/memory.c
index c84fcdd..ae4c32e 100644
--- a/xen/common/memory.c
+++ b/xen/common/memory.c
@@ -1170,25 +1170,23 @@ long do_memory_op(unsigned long cmd, XEN_GUEST_HANDLE_PARAM(void) arg)
return rc;
}
-#ifdef CONFIG_DOMAIN_PAGE
-void clear_domain_page(unsigned long mfn)
+void clear_domain_page(mfn_t mfn)
{
- void *ptr = map_domain_page(mfn);
+ void *ptr = map_domain_page(mfn_x(mfn));
clear_page(ptr);
unmap_domain_page(ptr);
}
-void copy_domain_page(unsigned long dmfn, unsigned long smfn)
+void copy_domain_page(mfn_t dest, mfn_t source)
{
- const void *src = map_domain_page(smfn);
- void *dst = map_domain_page(dmfn);
+ const void *src = map_domain_page(mfn_x(source));
+ void *dst = map_domain_page(mfn_x(dest));
copy_page(dst, src);
unmap_domain_page(dst);
unmap_domain_page(src);
}
-#endif
void destroy_ring_for_helper(
void **_va, struct page_info *page)
diff --git a/xen/include/xen/domain_page.h b/xen/include/xen/domain_page.h
index af07235..9bfeef0 100644
--- a/xen/include/xen/domain_page.h
+++ b/xen/include/xen/domain_page.h
@@ -11,6 +11,12 @@
#include <xen/mm.h>
+/*
+ * Clear a given page frame, or copy between two of them.
+ */
+void clear_domain_page(mfn_t mfn);
+void copy_domain_page(mfn_t dst, const mfn_t src);
+
#ifdef CONFIG_DOMAIN_PAGE
/*
@@ -25,12 +31,6 @@ void *map_domain_page(unsigned long mfn);
*/
void unmap_domain_page(const void *va);
-/*
- * Clear a given page frame, or copy between two of them.
- */
-void clear_domain_page(unsigned long mfn);
-void copy_domain_page(unsigned long dmfn, unsigned long smfn);
-
/*
* Given a VA from map_domain_page(), return its underlying MFN.
*/
@@ -116,9 +116,6 @@ domain_mmap_cache_destroy(struct domain_mmap_cache *cache)
#define map_domain_page(mfn) mfn_to_virt(mfn)
#define __map_domain_page(pg) page_to_virt(pg)
#define unmap_domain_page(va) ((void)(va))
-#define clear_domain_page(mfn) clear_page(mfn_to_virt(mfn))
-#define copy_domain_page(dmfn, smfn) copy_page(mfn_to_virt(dmfn), \
- mfn_to_virt(smfn))
#define domain_page_map_to_mfn(va) virt_to_mfn((unsigned long)(va))
static inline void *map_domain_page_global(mfn_t mfn)
--
2.1.4
^ permalink raw reply related [flat|nested] 6+ messages in thread
* [PATCH v3 3/4] Convert map_domain_page() to use the new mfn_t type
2015-07-09 14:54 [PATCH v3 1/4] xen/domain_page: Convert map_domain_page_global() to using mfn_t Ben Catterall
2015-07-09 14:54 ` [PATCH v3 2/4] xen/domain_page: Convert copy/clear_domain_page() " Ben Catterall
@ 2015-07-09 14:54 ` Ben Catterall
2015-07-10 16:14 ` Ian Campbell
2015-07-09 14:54 ` [PATCH v3 4/4] Remove sh_{un}map_domain_page() and hap_{un}map_domain_page() Ben Catterall
2015-07-10 14:44 ` [PATCH v3 1/4] xen/domain_page: Convert map_domain_page_global() to using mfn_t Tim Deegan
3 siblings, 1 reply; 6+ messages in thread
From: Ben Catterall @ 2015-07-09 14:54 UTC (permalink / raw)
To: xen-devel
Cc: keir, ian.campbell, andrew.cooper3, tim, stefano.stabellini,
jbeulich, Ben Catterall
Reworked the internals and declaration, applying (un)boxing
where needed. Converted calls to map_domain_page() to
provide mfn_t types, boxing where needed.
Signed-off-by: Ben Catterall <Ben.Catterall@citrix.com>
Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
---
Changed since v1:
* Created paddr_to_mfn() and mfn_to_paddr() for both x86 and ARM
* Converted code to use the new paddr_to_mfn() rather than e.g.
paddr>>PAGE_SHIFT
Changed since v2:
* Switch to using paddr_to_pfn() and pfn_to_addr().
* Removed paddr_to_mfn() and mfn_to_paddr()
* Added missing blank line
---
xen/arch/arm/domain_build.c | 2 +-
xen/arch/arm/kernel.c | 2 +-
xen/arch/arm/mm.c | 12 +++++-----
xen/arch/arm/p2m.c | 4 ++--
xen/arch/arm/traps.c | 4 ++--
xen/arch/x86/debug.c | 10 ++++----
xen/arch/x86/domain.c | 4 ++--
xen/arch/x86/domain_build.c | 10 ++++----
xen/arch/x86/domain_page.c | 22 ++++++++---------
xen/arch/x86/domctl.c | 2 +-
xen/arch/x86/mm.c | 40 +++++++++++++++----------------
xen/arch/x86/mm/guest_walk.c | 2 +-
xen/arch/x86/mm/hap/guest_walk.c | 2 +-
xen/arch/x86/mm/mem_sharing.c | 4 ++--
xen/arch/x86/mm/p2m-ept.c | 22 ++++++++---------
xen/arch/x86/mm/p2m-pod.c | 8 +++----
xen/arch/x86/mm/p2m-pt.c | 28 +++++++++++-----------
xen/arch/x86/mm/p2m.c | 2 +-
xen/arch/x86/mm/paging.c | 32 ++++++++++++-------------
xen/arch/x86/mm/shadow/common.c | 2 +-
xen/arch/x86/mm/shadow/multi.c | 4 ++--
xen/arch/x86/mm/shadow/private.h | 2 +-
xen/arch/x86/smpboot.c | 2 +-
xen/arch/x86/tboot.c | 5 ++--
xen/arch/x86/traps.c | 12 +++++-----
xen/arch/x86/x86_64/mm.c | 14 +++++------
xen/arch/x86/x86_64/traps.c | 10 ++++----
xen/arch/x86/x86_emulate.c | 10 ++++----
xen/common/grant_table.c | 4 ++--
xen/common/kexec.c | 4 ++--
xen/common/kimage.c | 10 ++++----
xen/common/memory.c | 6 ++---
xen/common/tmem_xen.c | 6 ++---
xen/drivers/passthrough/amd/iommu_guest.c | 10 ++++----
xen/drivers/passthrough/amd/iommu_map.c | 14 +++++------
xen/drivers/passthrough/vtd/x86/vtd.c | 2 +-
xen/include/asm-x86/hap.h | 2 +-
xen/include/asm-x86/page.h | 7 +++---
xen/include/asm-x86/paging.h | 2 +-
xen/include/xen/domain_page.h | 8 +++----
40 files changed, 175 insertions(+), 173 deletions(-)
diff --git a/xen/arch/arm/domain_build.c b/xen/arch/arm/domain_build.c
index 8556afd..a059de6 100644
--- a/xen/arch/arm/domain_build.c
+++ b/xen/arch/arm/domain_build.c
@@ -1408,7 +1408,7 @@ static void initrd_load(struct kernel_info *kinfo)
return;
}
- dst = map_domain_page(ma>>PAGE_SHIFT);
+ dst = map_domain_page(_mfn(paddr_to_pfn(ma)));
copy_from_paddr(dst + s, paddr + offs, l);
diff --git a/xen/arch/arm/kernel.c b/xen/arch/arm/kernel.c
index 209c3dd..f641b12 100644
--- a/xen/arch/arm/kernel.c
+++ b/xen/arch/arm/kernel.c
@@ -182,7 +182,7 @@ static void kernel_zimage_load(struct kernel_info *info)
return;
}
- dst = map_domain_page(ma>>PAGE_SHIFT);
+ dst = map_domain_page(_mfn(paddr_to_pfn(ma)));
copy_from_paddr(dst + s, paddr + offs, l);
diff --git a/xen/arch/arm/mm.c b/xen/arch/arm/mm.c
index d479048..ae0f34c 100644
--- a/xen/arch/arm/mm.c
+++ b/xen/arch/arm/mm.c
@@ -213,7 +213,7 @@ void dump_pt_walk(paddr_t ttbr, paddr_t addr,
else
root_table = 0;
- mapping = map_domain_page(root_pfn + root_table);
+ mapping = map_domain_page(_mfn(root_pfn + root_table));
for ( level = root_level; ; level++ )
{
@@ -230,7 +230,7 @@ void dump_pt_walk(paddr_t ttbr, paddr_t addr,
/* For next iteration */
unmap_domain_page(mapping);
- mapping = map_domain_page(pte.walk.base);
+ mapping = map_domain_page(_mfn(pte.walk.base));
}
unmap_domain_page(mapping);
@@ -282,11 +282,11 @@ void unmap_domain_page_global(const void *va)
}
/* Map a page of domheap memory */
-void *map_domain_page(unsigned long mfn)
+void *map_domain_page(mfn_t mfn)
{
unsigned long flags;
lpae_t *map = this_cpu(xen_dommap);
- unsigned long slot_mfn = mfn & ~LPAE_ENTRY_MASK;
+ unsigned long slot_mfn = mfn_x(mfn) & ~LPAE_ENTRY_MASK;
vaddr_t va;
lpae_t pte;
int i, slot;
@@ -339,7 +339,7 @@ void *map_domain_page(unsigned long mfn)
va = (DOMHEAP_VIRT_START
+ (slot << SECOND_SHIFT)
- + ((mfn & LPAE_ENTRY_MASK) << THIRD_SHIFT));
+ + ((mfn_x(mfn) & LPAE_ENTRY_MASK) << THIRD_SHIFT));
/*
* We may not have flushed this specific subpage at map time,
@@ -386,7 +386,7 @@ unsigned long domain_page_map_to_mfn(const void *ptr)
void flush_page_to_ram(unsigned long mfn)
{
- void *v = map_domain_page(mfn);
+ void *v = map_domain_page(_mfn(mfn));
clean_and_invalidate_dcache_va_range(v, PAGE_SIZE);
unmap_domain_page(v);
diff --git a/xen/arch/arm/p2m.c b/xen/arch/arm/p2m.c
index 903fa3f..18fe91f 100644
--- a/xen/arch/arm/p2m.c
+++ b/xen/arch/arm/p2m.c
@@ -206,7 +206,7 @@ static paddr_t __p2m_lookup(struct domain *d, paddr_t paddr, p2m_type_t *t)
/* Map for next level */
unmap_domain_page(map);
- map = map_domain_page(pte.p2m.base);
+ map = map_domain_page(_mfn(pte.p2m.base));
}
unmap_domain_page(map);
@@ -1078,7 +1078,7 @@ static int apply_p2m_changes(struct domain *d,
int i;
if ( mappings[level+1] )
unmap_domain_page(mappings[level+1]);
- mappings[level+1] = map_domain_page(entry->p2m.base);
+ mappings[level+1] = map_domain_page(_mfn(entry->p2m.base));
cur_offset[level] = offset;
/* Any mapping further down is now invalid */
for ( i = level+1; i < 4; i++ )
diff --git a/xen/arch/arm/traps.c b/xen/arch/arm/traps.c
index 06fb40f..9d2bd6a 100644
--- a/xen/arch/arm/traps.c
+++ b/xen/arch/arm/traps.c
@@ -2293,7 +2293,7 @@ void dump_guest_s1_walk(struct domain *d, vaddr_t addr)
printk("Failed TTBR0 maddr lookup\n");
goto done;
}
- first = map_domain_page(paddr>>PAGE_SHIFT);
+ first = map_domain_page(_mfn(paddr_to_pfn(paddr)));
offset = addr >> (12+10);
printk("1ST[0x%"PRIx32"] (0x%"PRIpaddr") = 0x%08"PRIx32"\n",
@@ -2309,7 +2309,7 @@ void dump_guest_s1_walk(struct domain *d, vaddr_t addr)
printk("Failed L1 entry maddr lookup\n");
goto done;
}
- second = map_domain_page(paddr>>PAGE_SHIFT);
+ second = map_domain_page(_mfn(paddr_to_pfn(paddr)));
offset = (addr >> 12) & 0x3FF;
printk("2ND[0x%"PRIx32"] (0x%"PRIpaddr") = 0x%08"PRIx32"\n",
offset, paddr, second[offset]);
diff --git a/xen/arch/x86/debug.c b/xen/arch/x86/debug.c
index 801dcf2..ee41463 100644
--- a/xen/arch/x86/debug.c
+++ b/xen/arch/x86/debug.c
@@ -108,7 +108,7 @@ dbg_pv_va2mfn(dbgva_t vaddr, struct domain *dp, uint64_t pgd3val)
if ( pgd3val == 0 )
{
- l4t = map_domain_page(mfn);
+ l4t = map_domain_page(_mfn(mfn));
l4e = l4t[l4_table_offset(vaddr)];
unmap_domain_page(l4t);
mfn = l4e_get_pfn(l4e);
@@ -120,7 +120,7 @@ dbg_pv_va2mfn(dbgva_t vaddr, struct domain *dp, uint64_t pgd3val)
return INVALID_MFN;
}
- l3t = map_domain_page(mfn);
+ l3t = map_domain_page(_mfn(mfn));
l3e = l3t[l3_table_offset(vaddr)];
unmap_domain_page(l3t);
mfn = l3e_get_pfn(l3e);
@@ -134,7 +134,7 @@ dbg_pv_va2mfn(dbgva_t vaddr, struct domain *dp, uint64_t pgd3val)
}
}
- l2t = map_domain_page(mfn);
+ l2t = map_domain_page(_mfn(mfn));
l2e = l2t[l2_table_offset(vaddr)];
unmap_domain_page(l2t);
mfn = l2e_get_pfn(l2e);
@@ -146,7 +146,7 @@ dbg_pv_va2mfn(dbgva_t vaddr, struct domain *dp, uint64_t pgd3val)
DBGP1("l2 PAGE not present. vaddr:%lx cr3:%lx\n", vaddr, cr3);
return INVALID_MFN;
}
- l1t = map_domain_page(mfn);
+ l1t = map_domain_page(_mfn(mfn));
l1e = l1t[l1_table_offset(vaddr)];
unmap_domain_page(l1t);
mfn = l1e_get_pfn(l1e);
@@ -175,7 +175,7 @@ unsigned int dbg_rw_guest_mem(struct domain *dp, void * __user gaddr,
if ( mfn == INVALID_MFN )
break;
- va = map_domain_page(mfn);
+ va = map_domain_page(_mfn(mfn));
va = va + (addr & (PAGE_SIZE-1));
if ( toaddr )
diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
index 956ac70..34ecd7c 100644
--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -914,7 +914,7 @@ int arch_set_info_guest(
fail |= xen_pfn_to_cr3(pfn) != c.nat->ctrlreg[1];
}
} else {
- l4_pgentry_t *l4tab = map_domain_page(pfn);
+ l4_pgentry_t *l4tab = map_domain_page(_mfn(pfn));
pfn = l4e_get_pfn(*l4tab);
unmap_domain_page(l4tab);
@@ -1074,7 +1074,7 @@ int arch_set_info_guest(
{
l4_pgentry_t *l4tab;
- l4tab = map_domain_page(pagetable_get_pfn(v->arch.guest_table));
+ l4tab = map_domain_page(_mfn(pagetable_get_pfn(v->arch.guest_table)));
*l4tab = l4e_from_pfn(page_to_mfn(cr3_page),
_PAGE_PRESENT|_PAGE_RW|_PAGE_USER|_PAGE_ACCESSED);
unmap_domain_page(l4tab);
diff --git a/xen/arch/x86/domain_build.c b/xen/arch/x86/domain_build.c
index a06379c..18cf6aa 100644
--- a/xen/arch/x86/domain_build.c
+++ b/xen/arch/x86/domain_build.c
@@ -630,7 +630,7 @@ static __init void pvh_fixup_page_tables_for_hap(struct vcpu *v,
ASSERT(paging_mode_enabled(v->domain));
- l4start = map_domain_page(pagetable_get_pfn(v->arch.guest_table));
+ l4start = map_domain_page(_mfn(pagetable_get_pfn(v->arch.guest_table)));
/* Clear entries prior to guest L4 start */
pl4e = l4start + l4_table_offset(v_start);
@@ -746,7 +746,7 @@ static __init void setup_pv_physmap(struct domain *d, unsigned long pgtbl_pfn,
unsigned long nr_pages)
{
struct page_info *page = NULL;
- l4_pgentry_t *pl4e, *l4start = map_domain_page(pgtbl_pfn);
+ l4_pgentry_t *pl4e, *l4start = map_domain_page(_mfn(pgtbl_pfn));
l3_pgentry_t *pl3e = NULL;
l2_pgentry_t *pl2e = NULL;
l1_pgentry_t *pl1e = NULL;
@@ -789,7 +789,7 @@ static __init void setup_pv_physmap(struct domain *d, unsigned long pgtbl_pfn,
clear_page(pl3e);
*pl4e = l4e_from_page(page, L4_PROT);
} else
- pl3e = map_domain_page(l4e_get_pfn(*pl4e));
+ pl3e = map_domain_page(_mfn(l4e_get_pfn(*pl4e)));
pl3e += l3_table_offset(vphysmap_start);
if ( !l3e_get_intpte(*pl3e) )
@@ -816,7 +816,7 @@ static __init void setup_pv_physmap(struct domain *d, unsigned long pgtbl_pfn,
*pl3e = l3e_from_page(page, L3_PROT);
}
else
- pl2e = map_domain_page(l3e_get_pfn(*pl3e));
+ pl2e = map_domain_page(_mfn(l3e_get_pfn(*pl3e)));
pl2e += l2_table_offset(vphysmap_start);
if ( !l2e_get_intpte(*pl2e) )
@@ -844,7 +844,7 @@ static __init void setup_pv_physmap(struct domain *d, unsigned long pgtbl_pfn,
*pl2e = l2e_from_page(page, L2_PROT);
}
else
- pl1e = map_domain_page(l2e_get_pfn(*pl2e));
+ pl1e = map_domain_page(_mfn(l2e_get_pfn(*pl2e)));
pl1e += l1_table_offset(vphysmap_start);
BUG_ON(l1e_get_intpte(*pl1e));
diff --git a/xen/arch/x86/domain_page.c b/xen/arch/x86/domain_page.c
index 0f7548b..d86f8fe 100644
--- a/xen/arch/x86/domain_page.c
+++ b/xen/arch/x86/domain_page.c
@@ -66,7 +66,7 @@ void __init mapcache_override_current(struct vcpu *v)
#define MAPCACHE_L1ENT(idx) \
__linear_l1_table[l1_linear_offset(MAPCACHE_VIRT_START + pfn_to_paddr(idx))]
-void *map_domain_page(unsigned long mfn)
+void *map_domain_page(mfn_t mfn)
{
unsigned long flags;
unsigned int idx, i;
@@ -76,31 +76,31 @@ void *map_domain_page(unsigned long mfn)
struct vcpu_maphash_entry *hashent;
#ifdef NDEBUG
- if ( mfn <= PFN_DOWN(__pa(HYPERVISOR_VIRT_END - 1)) )
- return mfn_to_virt(mfn);
+ if ( mfn_x(mfn) <= PFN_DOWN(__pa(HYPERVISOR_VIRT_END - 1)) )
+ return mfn_to_virt(mfn_x(mfn));
#endif
v = mapcache_current_vcpu();
if ( !v || !is_pv_vcpu(v) )
- return mfn_to_virt(mfn);
+ return mfn_to_virt(mfn_x(mfn));
dcache = &v->domain->arch.pv_domain.mapcache;
vcache = &v->arch.pv_vcpu.mapcache;
if ( !dcache->inuse )
- return mfn_to_virt(mfn);
+ return mfn_to_virt(mfn_x(mfn));
perfc_incr(map_domain_page_count);
local_irq_save(flags);
- hashent = &vcache->hash[MAPHASH_HASHFN(mfn)];
- if ( hashent->mfn == mfn )
+ hashent = &vcache->hash[MAPHASH_HASHFN(mfn_x(mfn))];
+ if ( hashent->mfn == mfn_x(mfn) )
{
idx = hashent->idx;
ASSERT(idx < dcache->entries);
hashent->refcnt++;
ASSERT(hashent->refcnt);
- ASSERT(l1e_get_pfn(MAPCACHE_L1ENT(idx)) == mfn);
+ ASSERT(l1e_get_pfn(MAPCACHE_L1ENT(idx)) == mfn_x(mfn));
goto out;
}
@@ -135,7 +135,7 @@ void *map_domain_page(unsigned long mfn)
else
{
/* Replace a hash entry instead. */
- i = MAPHASH_HASHFN(mfn);
+ i = MAPHASH_HASHFN(mfn_x(mfn));
do {
hashent = &vcache->hash[i];
if ( hashent->idx != MAPHASHENT_NOTINUSE && !hashent->refcnt )
@@ -149,7 +149,7 @@ void *map_domain_page(unsigned long mfn)
}
if ( ++i == MAPHASH_ENTRIES )
i = 0;
- } while ( i != MAPHASH_HASHFN(mfn) );
+ } while ( i != MAPHASH_HASHFN(mfn_x(mfn)) );
}
BUG_ON(idx >= dcache->entries);
@@ -165,7 +165,7 @@ void *map_domain_page(unsigned long mfn)
spin_unlock(&dcache->lock);
- l1e_write(&MAPCACHE_L1ENT(idx), l1e_from_pfn(mfn, __PAGE_HYPERVISOR_RW));
+ l1e_write(&MAPCACHE_L1ENT(idx), l1e_from_pfn(mfn_x(mfn), __PAGE_HYPERVISOR_RW));
out:
local_irq_restore(flags);
diff --git a/xen/arch/x86/domctl.c b/xen/arch/x86/domctl.c
index b5047db..bf62a88 100644
--- a/xen/arch/x86/domctl.c
+++ b/xen/arch/x86/domctl.c
@@ -1316,7 +1316,7 @@ void arch_get_info_guest(struct vcpu *v, vcpu_guest_context_u c)
else
{
const l4_pgentry_t *l4e =
- map_domain_page(pagetable_get_pfn(v->arch.guest_table));
+ map_domain_page(_mfn(pagetable_get_pfn(v->arch.guest_table)));
c.cmp->ctrlreg[3] = compat_pfn_to_cr3(l4e_get_pfn(*l4e));
unmap_domain_page(l4e);
diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index df9c190..342414f 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -1174,7 +1174,7 @@ static int alloc_l1_table(struct page_info *page)
unsigned int i;
int ret = 0;
- pl1e = map_domain_page(pfn);
+ pl1e = map_domain_page(_mfn(pfn));
for ( i = 0; i < L1_PAGETABLE_ENTRIES; i++ )
{
@@ -1255,7 +1255,7 @@ static int alloc_l2_table(struct page_info *page, unsigned long type,
unsigned int i;
int rc = 0;
- pl2e = map_domain_page(pfn);
+ pl2e = map_domain_page(_mfn(pfn));
for ( i = page->nr_validated_ptes; i < L2_PAGETABLE_ENTRIES; i++ )
{
@@ -1304,7 +1304,7 @@ static int alloc_l3_table(struct page_info *page)
unsigned int i;
int rc = 0, partial = page->partial_pte;
- pl3e = map_domain_page(pfn);
+ pl3e = map_domain_page(_mfn(pfn));
/*
* PAE guests allocate full pages, but aren't required to initialize
@@ -1396,7 +1396,7 @@ void init_guest_l4_table(l4_pgentry_t l4tab[], const struct domain *d,
void fill_ro_mpt(unsigned long mfn)
{
- l4_pgentry_t *l4tab = map_domain_page(mfn);
+ l4_pgentry_t *l4tab = map_domain_page(_mfn(mfn));
l4tab[l4_table_offset(RO_MPT_VIRT_START)] =
idle_pg_table[l4_table_offset(RO_MPT_VIRT_START)];
@@ -1405,7 +1405,7 @@ void fill_ro_mpt(unsigned long mfn)
void zap_ro_mpt(unsigned long mfn)
{
- l4_pgentry_t *l4tab = map_domain_page(mfn);
+ l4_pgentry_t *l4tab = map_domain_page(_mfn(mfn));
l4tab[l4_table_offset(RO_MPT_VIRT_START)] = l4e_empty();
unmap_domain_page(l4tab);
@@ -1415,7 +1415,7 @@ static int alloc_l4_table(struct page_info *page)
{
struct domain *d = page_get_owner(page);
unsigned long pfn = page_to_mfn(page);
- l4_pgentry_t *pl4e = map_domain_page(pfn);
+ l4_pgentry_t *pl4e = map_domain_page(_mfn(pfn));
unsigned int i;
int rc = 0, partial = page->partial_pte;
@@ -1471,7 +1471,7 @@ static void free_l1_table(struct page_info *page)
l1_pgentry_t *pl1e;
unsigned int i;
- pl1e = map_domain_page(pfn);
+ pl1e = map_domain_page(_mfn(pfn));
for ( i = 0; i < L1_PAGETABLE_ENTRIES; i++ )
if ( is_guest_l1_slot(i) )
@@ -1489,7 +1489,7 @@ static int free_l2_table(struct page_info *page, int preemptible)
unsigned int i = page->nr_validated_ptes - 1;
int err = 0;
- pl2e = map_domain_page(pfn);
+ pl2e = map_domain_page(_mfn(pfn));
ASSERT(page->nr_validated_ptes);
do {
@@ -1518,7 +1518,7 @@ static int free_l3_table(struct page_info *page)
int rc = 0, partial = page->partial_pte;
unsigned int i = page->nr_validated_ptes - !partial;
- pl3e = map_domain_page(pfn);
+ pl3e = map_domain_page(_mfn(pfn));
do {
if ( is_guest_l3_slot(i) )
@@ -1553,7 +1553,7 @@ static int free_l4_table(struct page_info *page)
{
struct domain *d = page_get_owner(page);
unsigned long pfn = page_to_mfn(page);
- l4_pgentry_t *pl4e = map_domain_page(pfn);
+ l4_pgentry_t *pl4e = map_domain_page(_mfn(pfn));
int rc = 0, partial = page->partial_pte;
unsigned int i = page->nr_validated_ptes - !partial;
@@ -2653,7 +2653,7 @@ int vcpu_destroy_pagetables(struct vcpu *v)
if ( is_pv_32bit_vcpu(v) )
{
- l4tab = map_domain_page(mfn);
+ l4tab = map_domain_page(_mfn(mfn));
mfn = l4e_get_pfn(*l4tab);
}
@@ -2709,7 +2709,7 @@ int new_guest_cr3(unsigned long mfn)
if ( is_pv_32bit_domain(d) )
{
unsigned long gt_mfn = pagetable_get_pfn(curr->arch.guest_table);
- l4_pgentry_t *pl4e = map_domain_page(gt_mfn);
+ l4_pgentry_t *pl4e = map_domain_page(_mfn(gt_mfn));
rc = paging_mode_refcounts(d)
? -EINVAL /* Old code was broken, but what should it be? */
@@ -3768,7 +3768,7 @@ static int create_grant_pte_mapping(
}
mfn = page_to_mfn(page);
- va = map_domain_page(mfn);
+ va = map_domain_page(_mfn(mfn));
va = (void *)((unsigned long)va + ((unsigned long)pte_addr & ~PAGE_MASK));
if ( !page_lock(page) )
@@ -3823,7 +3823,7 @@ static int destroy_grant_pte_mapping(
}
mfn = page_to_mfn(page);
- va = map_domain_page(mfn);
+ va = map_domain_page(_mfn(mfn));
va = (void *)((unsigned long)va + ((unsigned long)addr & ~PAGE_MASK));
if ( !page_lock(page) )
@@ -4501,7 +4501,7 @@ long do_update_descriptor(u64 pa, u64 desc)
paging_mark_dirty(dom, mfn);
/* All is good so make the update. */
- gdt_pent = map_domain_page(mfn);
+ gdt_pent = map_domain_page(_mfn(mfn));
write_atomic((uint64_t *)&gdt_pent[offset], *(uint64_t *)&d);
unmap_domain_page(gdt_pent);
@@ -5039,7 +5039,7 @@ static int ptwr_emulated_update(
adjust_guest_l1e(nl1e, d);
/* Checked successfully: do the update (write or cmpxchg). */
- pl1e = map_domain_page(mfn);
+ pl1e = map_domain_page(_mfn(mfn));
pl1e = (l1_pgentry_t *)((unsigned long)pl1e + (addr & ~PAGE_MASK));
if ( do_cmpxchg )
{
@@ -5954,7 +5954,7 @@ int create_perdomain_mapping(struct domain *d, unsigned long va,
l3tab[l3_table_offset(va)] = l3e_from_page(pg, __PAGE_HYPERVISOR);
}
else
- l2tab = map_domain_page(l3e_get_pfn(l3tab[l3_table_offset(va)]));
+ l2tab = map_domain_page(_mfn(l3e_get_pfn(l3tab[l3_table_offset(va)])));
unmap_domain_page(l3tab);
@@ -5996,7 +5996,7 @@ int create_perdomain_mapping(struct domain *d, unsigned long va,
*pl2e = l2e_from_page(pg, __PAGE_HYPERVISOR);
}
else if ( !l1tab )
- l1tab = map_domain_page(l2e_get_pfn(*pl2e));
+ l1tab = map_domain_page(_mfn(l2e_get_pfn(*pl2e)));
if ( ppg &&
!(l1e_get_flags(l1tab[l1_table_offset(va)]) & _PAGE_PRESENT) )
@@ -6047,7 +6047,7 @@ void destroy_perdomain_mapping(struct domain *d, unsigned long va,
if ( l3e_get_flags(*pl3e) & _PAGE_PRESENT )
{
- const l2_pgentry_t *l2tab = map_domain_page(l3e_get_pfn(*pl3e));
+ const l2_pgentry_t *l2tab = map_domain_page(_mfn(l3e_get_pfn(*pl3e)));
const l2_pgentry_t *pl2e = l2tab + l2_table_offset(va);
unsigned int i = l1_table_offset(va);
@@ -6055,7 +6055,7 @@ void destroy_perdomain_mapping(struct domain *d, unsigned long va,
{
if ( l2e_get_flags(*pl2e) & _PAGE_PRESENT )
{
- l1_pgentry_t *l1tab = map_domain_page(l2e_get_pfn(*pl2e));
+ l1_pgentry_t *l1tab = map_domain_page(_mfn(l2e_get_pfn(*pl2e)));
for ( ; nr && i < L1_PAGETABLE_ENTRIES; --nr, ++i )
{
diff --git a/xen/arch/x86/mm/guest_walk.c b/xen/arch/x86/mm/guest_walk.c
index 9c6c74f..30a653d 100644
--- a/xen/arch/x86/mm/guest_walk.c
+++ b/xen/arch/x86/mm/guest_walk.c
@@ -121,7 +121,7 @@ void *map_domain_gfn(struct p2m_domain *p2m, gfn_t gfn, mfn_t *mfn,
*mfn = _mfn(page_to_mfn(page));
ASSERT(mfn_valid(mfn_x(*mfn)));
- map = map_domain_page(mfn_x(*mfn));
+ map = map_domain_page(*mfn);
return map;
}
diff --git a/xen/arch/x86/mm/hap/guest_walk.c b/xen/arch/x86/mm/hap/guest_walk.c
index 381a196..62ab454 100644
--- a/xen/arch/x86/mm/hap/guest_walk.c
+++ b/xen/arch/x86/mm/hap/guest_walk.c
@@ -87,7 +87,7 @@ unsigned long hap_p2m_ga_to_gfn(GUEST_PAGING_LEVELS)(
/* Map the top-level table and call the tree-walker */
ASSERT(mfn_valid(mfn_x(top_mfn)));
- top_map = map_domain_page(mfn_x(top_mfn));
+ top_map = map_domain_page(top_mfn);
#if GUEST_PAGING_LEVELS == 3
top_map += (cr3 & ~(PAGE_MASK | 31));
#endif
diff --git a/xen/arch/x86/mm/mem_sharing.c b/xen/arch/x86/mm/mem_sharing.c
index 16e329e..1a01e45 100644
--- a/xen/arch/x86/mm/mem_sharing.c
+++ b/xen/arch/x86/mm/mem_sharing.c
@@ -1210,8 +1210,8 @@ int __mem_sharing_unshare_page(struct domain *d,
return -ENOMEM;
}
- s = map_domain_page(__page_to_mfn(old_page));
- t = map_domain_page(__page_to_mfn(page));
+ s = map_domain_page(_mfn(__page_to_mfn(old_page)));
+ t = map_domain_page(_mfn(__page_to_mfn(page)));
memcpy(t, s, PAGE_SIZE);
unmap_domain_page(s);
unmap_domain_page(t);
diff --git a/xen/arch/x86/mm/p2m-ept.c b/xen/arch/x86/mm/p2m-ept.c
index a8737be..e7ff739 100644
--- a/xen/arch/x86/mm/p2m-ept.c
+++ b/xen/arch/x86/mm/p2m-ept.c
@@ -246,7 +246,7 @@ static void ept_free_entry(struct p2m_domain *p2m, ept_entry_t *ept_entry, int l
if ( level > 1 )
{
- ept_entry_t *epte = map_domain_page(ept_entry->mfn);
+ ept_entry_t *epte = map_domain_page(_mfn(ept_entry->mfn));
for ( int i = 0; i < EPT_PAGETABLE_ENTRIES; i++ )
ept_free_entry(p2m, epte + i, level - 1);
unmap_domain_page(epte);
@@ -271,7 +271,7 @@ static int ept_split_super_page(struct p2m_domain *p2m, ept_entry_t *ept_entry,
if ( !ept_set_middle_entry(p2m, &new_ept) )
return 0;
- table = map_domain_page(new_ept.mfn);
+ table = map_domain_page(_mfn(new_ept.mfn));
trunk = 1UL << ((level - 1) * EPT_TABLE_ORDER);
for ( int i = 0; i < EPT_PAGETABLE_ENTRIES; i++ )
@@ -359,7 +359,7 @@ static int ept_next_level(struct p2m_domain *p2m, bool_t read_only,
mfn = e.mfn;
unmap_domain_page(*table);
- *table = map_domain_page(mfn);
+ *table = map_domain_page(_mfn(mfn));
*gfn_remainder &= (1UL << shift) - 1;
return GUEST_TABLE_NORMAL_PAGE;
}
@@ -372,7 +372,7 @@ static int ept_next_level(struct p2m_domain *p2m, bool_t read_only,
static bool_t ept_invalidate_emt(mfn_t mfn, bool_t recalc, int level)
{
int rc;
- ept_entry_t *epte = map_domain_page(mfn_x(mfn));
+ ept_entry_t *epte = map_domain_page(mfn);
unsigned int i;
bool_t changed = 0;
@@ -414,7 +414,7 @@ static int ept_invalidate_emt_range(struct p2m_domain *p2m,
unsigned int i, index;
int wrc, rc = 0, ret = GUEST_TABLE_MAP_FAILED;
- table = map_domain_page(pagetable_get_pfn(p2m_get_pagetable(p2m)));
+ table = map_domain_page(_mfn(pagetable_get_pfn(p2m_get_pagetable(p2m))));
for ( i = ept_get_wl(&p2m->ept); i > target; --i )
{
ret = ept_next_level(p2m, 1, &table, &gfn_remainder, i);
@@ -498,7 +498,7 @@ static int resolve_misconfig(struct p2m_domain *p2m, unsigned long gfn)
ept_entry_t e;
unsigned int i;
- epte = map_domain_page(mfn);
+ epte = map_domain_page(_mfn(mfn));
i = (gfn >> (level * EPT_TABLE_ORDER)) & (EPT_PAGETABLE_ENTRIES - 1);
e = atomic_read_ept_entry(&epte[i]);
@@ -689,7 +689,7 @@ ept_set_entry(struct p2m_domain *p2m, unsigned long gfn, mfn_t mfn,
(target == 0));
ASSERT(!p2m_is_foreign(p2mt) || target == 0);
- table = map_domain_page(pagetable_get_pfn(p2m_get_pagetable(p2m)));
+ table = map_domain_page(_mfn(pagetable_get_pfn(p2m_get_pagetable(p2m))));
ret = GUEST_TABLE_MAP_FAILED;
for ( i = ept_get_wl(ept); i > target; i-- )
@@ -840,7 +840,7 @@ static mfn_t ept_get_entry(struct p2m_domain *p2m,
unsigned long gfn, p2m_type_t *t, p2m_access_t* a,
p2m_query_t q, unsigned int *page_order)
{
- ept_entry_t *table = map_domain_page(pagetable_get_pfn(p2m_get_pagetable(p2m)));
+ ept_entry_t *table = map_domain_page(_mfn(pagetable_get_pfn(p2m_get_pagetable(p2m))));
unsigned long gfn_remainder = gfn;
ept_entry_t *ept_entry;
u32 index;
@@ -944,7 +944,7 @@ void ept_walk_table(struct domain *d, unsigned long gfn)
{
struct p2m_domain *p2m = p2m_get_hostp2m(d);
struct ept_data *ept = &p2m->ept;
- ept_entry_t *table = map_domain_page(pagetable_get_pfn(p2m_get_pagetable(p2m)));
+ ept_entry_t *table = map_domain_page(_mfn(pagetable_get_pfn(p2m_get_pagetable(p2m))));
unsigned long gfn_remainder = gfn;
int i;
@@ -977,7 +977,7 @@ void ept_walk_table(struct domain *d, unsigned long gfn)
{
gfn_remainder &= (1UL << (i*EPT_TABLE_ORDER)) - 1;
- next = map_domain_page(ept_entry->mfn);
+ next = map_domain_page(_mfn(ept_entry->mfn));
unmap_domain_page(table);
@@ -1188,7 +1188,7 @@ static void ept_dump_p2m_table(unsigned char key)
char c = 0;
gfn_remainder = gfn;
- table = map_domain_page(pagetable_get_pfn(p2m_get_pagetable(p2m)));
+ table = map_domain_page(_mfn(pagetable_get_pfn(p2m_get_pagetable(p2m))));
for ( i = ept_get_wl(ept); i > 0; i-- )
{
diff --git a/xen/arch/x86/mm/p2m-pod.c b/xen/arch/x86/mm/p2m-pod.c
index 0679f00..6e27bcd 100644
--- a/xen/arch/x86/mm/p2m-pod.c
+++ b/xen/arch/x86/mm/p2m-pod.c
@@ -109,7 +109,7 @@ p2m_pod_cache_add(struct p2m_domain *p2m,
*/
for ( i = 0; i < (1 << order); i++ )
{
- char *b = map_domain_page(mfn_x(page_to_mfn(page)) + i);
+ char *b = map_domain_page(_mfn(mfn_x(page_to_mfn(page)) + i));
clear_page(b);
unmap_domain_page(b);
}
@@ -710,7 +710,7 @@ p2m_pod_zero_check_superpage(struct p2m_domain *p2m, unsigned long gfn)
for ( i=0; i<SUPERPAGE_PAGES; i++ )
{
/* Quick zero-check */
- map = map_domain_page(mfn_x(mfn0) + i);
+ map = map_domain_page(_mfn(mfn_x(mfn0) + i));
for ( j=0; j<16; j++ )
if( *(map+j) != 0 )
@@ -743,7 +743,7 @@ p2m_pod_zero_check_superpage(struct p2m_domain *p2m, unsigned long gfn)
/* Finally, do a full zero-check */
for ( i=0; i < SUPERPAGE_PAGES; i++ )
{
- map = map_domain_page(mfn_x(mfn0) + i);
+ map = map_domain_page(_mfn(mfn_x(mfn0) + i));
for ( j=0; j<PAGE_SIZE/sizeof(*map); j++ )
if( *(map+j) != 0 )
@@ -815,7 +815,7 @@ p2m_pod_zero_check(struct p2m_domain *p2m, unsigned long *gfns, int count)
&& ( (mfn_to_page(mfns[i])->count_info & PGC_allocated) != 0 )
&& ( (mfn_to_page(mfns[i])->count_info & (PGC_page_table|PGC_xen_heap)) == 0 )
&& ( (mfn_to_page(mfns[i])->count_info & PGC_count_mask) <= max_ref ) )
- map[i] = map_domain_page(mfn_x(mfns[i]));
+ map[i] = map_domain_page(mfns[i]);
else
map[i] = NULL;
}
diff --git a/xen/arch/x86/mm/p2m-pt.c b/xen/arch/x86/mm/p2m-pt.c
index e50b6fa..a6dd464 100644
--- a/xen/arch/x86/mm/p2m-pt.c
+++ b/xen/arch/x86/mm/p2m-pt.c
@@ -146,7 +146,7 @@ p2m_free_entry(struct p2m_domain *p2m, l1_pgentry_t *p2m_entry, int page_order)
if ( page_order > PAGE_ORDER_2M )
{
- l1_pgentry_t *l3_table = map_domain_page(l1e_get_pfn(*p2m_entry));
+ l1_pgentry_t *l3_table = map_domain_page(_mfn(l1e_get_pfn(*p2m_entry)));
for ( int i = 0; i < L3_PAGETABLE_ENTRIES; i++ )
p2m_free_entry(p2m, l3_table + i, page_order - 9);
unmap_domain_page(l3_table);
@@ -280,7 +280,7 @@ p2m_next_level(struct p2m_domain *p2m, void **table,
p2m->write_p2m_entry(p2m, gfn, p2m_entry, new_entry, 2);
}
- next = map_domain_page(l1e_get_pfn(*p2m_entry));
+ next = map_domain_page(_mfn(l1e_get_pfn(*p2m_entry)));
if ( unmap )
unmap_domain_page(*table);
*table = next;
@@ -304,7 +304,7 @@ static int p2m_pt_set_recalc_range(struct p2m_domain *p2m,
l1_pgentry_t *pent, *plast;
int err = 0;
- table = map_domain_page(mfn_x(pagetable_get_mfn(p2m_get_pagetable(p2m))));
+ table = map_domain_page(pagetable_get_mfn(p2m_get_pagetable(p2m)));
for ( i = 4; i-- > level; )
{
remainder = gfn_remainder;
@@ -366,7 +366,7 @@ static int do_recalc(struct p2m_domain *p2m, unsigned long gfn)
l1_pgentry_t *pent;
int err = 0;
- table = map_domain_page(mfn_x(pagetable_get_mfn(p2m_get_pagetable(p2m))));
+ table = map_domain_page(pagetable_get_mfn(p2m_get_pagetable(p2m)));
while ( --level )
{
unsigned long remainder = gfn_remainder;
@@ -524,7 +524,7 @@ p2m_pt_set_entry(struct p2m_domain *p2m, unsigned long gfn, mfn_t mfn,
if ( rc < 0 )
return rc;
- table = map_domain_page(mfn_x(pagetable_get_mfn(p2m_get_pagetable(p2m))));
+ table = map_domain_page(pagetable_get_mfn(p2m_get_pagetable(p2m)));
rc = p2m_next_level(p2m, &table, &gfn_remainder, gfn,
L4_PAGETABLE_SHIFT - PAGE_SHIFT,
L4_PAGETABLE_ENTRIES, PGT_l3_page_table, 1);
@@ -716,7 +716,7 @@ p2m_pt_get_entry(struct p2m_domain *p2m, unsigned long gfn,
mfn = pagetable_get_mfn(p2m_get_pagetable(p2m));
{
- l4_pgentry_t *l4e = map_domain_page(mfn_x(mfn));
+ l4_pgentry_t *l4e = map_domain_page(mfn);
l4e += l4_table_offset(addr);
if ( (l4e_get_flags(*l4e) & _PAGE_PRESENT) == 0 )
{
@@ -728,7 +728,7 @@ p2m_pt_get_entry(struct p2m_domain *p2m, unsigned long gfn,
unmap_domain_page(l4e);
}
{
- l3_pgentry_t *l3e = map_domain_page(mfn_x(mfn));
+ l3_pgentry_t *l3e = map_domain_page(mfn);
l3e += l3_table_offset(addr);
pod_retry_l3:
flags = l3e_get_flags(*l3e);
@@ -769,7 +769,7 @@ pod_retry_l3:
unmap_domain_page(l3e);
}
- l2e = map_domain_page(mfn_x(mfn));
+ l2e = map_domain_page(mfn);
l2e += l2_table_offset(addr);
pod_retry_l2:
@@ -807,7 +807,7 @@ pod_retry_l2:
recalc = 1;
unmap_domain_page(l2e);
- l1e = map_domain_page(mfn_x(mfn));
+ l1e = map_domain_page(mfn);
l1e += l1_table_offset(addr);
pod_retry_l1:
flags = l1e_get_flags(*l1e);
@@ -849,7 +849,7 @@ static void p2m_pt_change_entry_type_global(struct p2m_domain *p2m,
ASSERT(hap_enabled(p2m->domain));
- tab = map_domain_page(mfn_x(pagetable_get_mfn(p2m_get_pagetable(p2m))));
+ tab = map_domain_page(pagetable_get_mfn(p2m_get_pagetable(p2m)));
for ( changed = i = 0; i < (1 << PAGETABLE_ORDER); ++i )
{
l1_pgentry_t e = tab[i];
@@ -929,7 +929,7 @@ long p2m_pt_audit_p2m(struct p2m_domain *p2m)
l4_pgentry_t *l4e;
l3_pgentry_t *l3e;
int i4, i3;
- l4e = map_domain_page(mfn_x(pagetable_get_mfn(p2m_get_pagetable(p2m))));
+ l4e = map_domain_page(pagetable_get_mfn(p2m_get_pagetable(p2m)));
gfn = 0;
for ( i4 = 0; i4 < L4_PAGETABLE_ENTRIES; i4++ )
@@ -939,7 +939,7 @@ long p2m_pt_audit_p2m(struct p2m_domain *p2m)
gfn += 1 << (L4_PAGETABLE_SHIFT - PAGE_SHIFT);
continue;
}
- l3e = map_domain_page(l4e_get_pfn(l4e[i4]));
+ l3e = map_domain_page(_mfn(l4e_get_pfn(l4e[i4])));
for ( i3 = 0;
i3 < L3_PAGETABLE_ENTRIES;
i3++ )
@@ -974,7 +974,7 @@ long p2m_pt_audit_p2m(struct p2m_domain *p2m)
}
}
- l2e = map_domain_page(l3e_get_pfn(l3e[i3]));
+ l2e = map_domain_page(_mfn(l3e_get_pfn(l3e[i3])));
for ( i2 = 0; i2 < L2_PAGETABLE_ENTRIES; i2++ )
{
if ( !(l2e_get_flags(l2e[i2]) & _PAGE_PRESENT) )
@@ -1010,7 +1010,7 @@ long p2m_pt_audit_p2m(struct p2m_domain *p2m)
continue;
}
- l1e = map_domain_page(l2e_get_pfn(l2e[i2]));
+ l1e = map_domain_page(_mfn(l2e_get_pfn(l2e[i2])));
for ( i1 = 0; i1 < L1_PAGETABLE_ENTRIES; i1++, gfn++ )
{
diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
index 64ffeeb..4fa3cd8 100644
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -1285,7 +1285,7 @@ int p2m_mem_paging_prep(struct domain *d, unsigned long gfn, uint64_t buffer)
int rc;
ASSERT( mfn_valid(mfn) );
- guest_map = map_domain_page(mfn_x(mfn));
+ guest_map = map_domain_page(mfn);
rc = copy_from_user(guest_map, user_ptr, PAGE_SIZE);
unmap_domain_page(guest_map);
if ( rc )
diff --git a/xen/arch/x86/mm/paging.c b/xen/arch/x86/mm/paging.c
index 59d4720..7089155 100644
--- a/xen/arch/x86/mm/paging.c
+++ b/xen/arch/x86/mm/paging.c
@@ -81,7 +81,7 @@ static mfn_t paging_new_log_dirty_leaf(struct domain *d)
mfn_t mfn = paging_new_log_dirty_page(d);
if ( mfn_valid(mfn) )
{
- void *leaf = map_domain_page(mfn_x(mfn));
+ void *leaf = map_domain_page(mfn);
clear_page(leaf);
unmap_domain_page(leaf);
}
@@ -95,7 +95,7 @@ static mfn_t paging_new_log_dirty_node(struct domain *d)
if ( mfn_valid(mfn) )
{
int i;
- mfn_t *node = map_domain_page(mfn_x(mfn));
+ mfn_t *node = map_domain_page(mfn);
for ( i = 0; i < LOGDIRTY_NODE_ENTRIES; i++ )
node[i] = _mfn(INVALID_MFN);
unmap_domain_page(node);
@@ -107,7 +107,7 @@ static mfn_t paging_new_log_dirty_node(struct domain *d)
static mfn_t *paging_map_log_dirty_bitmap(struct domain *d)
{
if ( likely(mfn_valid(d->arch.paging.log_dirty.top)) )
- return map_domain_page(mfn_x(d->arch.paging.log_dirty.top));
+ return map_domain_page(d->arch.paging.log_dirty.top);
return NULL;
}
@@ -144,7 +144,7 @@ static int paging_free_log_dirty_bitmap(struct domain *d, int rc)
return -EBUSY;
}
- l4 = map_domain_page(mfn_x(d->arch.paging.log_dirty.top));
+ l4 = map_domain_page(d->arch.paging.log_dirty.top);
i4 = d->arch.paging.preempt.log_dirty.i4;
i3 = d->arch.paging.preempt.log_dirty.i3;
rc = 0;
@@ -154,14 +154,14 @@ static int paging_free_log_dirty_bitmap(struct domain *d, int rc)
if ( !mfn_valid(l4[i4]) )
continue;
- l3 = map_domain_page(mfn_x(l4[i4]));
+ l3 = map_domain_page(l4[i4]);
for ( ; i3 < LOGDIRTY_NODE_ENTRIES; i3++ )
{
if ( !mfn_valid(l3[i3]) )
continue;
- l2 = map_domain_page(mfn_x(l3[i3]));
+ l2 = map_domain_page(l3[i3]);
for ( i2 = 0; i2 < LOGDIRTY_NODE_ENTRIES; i2++ )
if ( mfn_valid(l2[i2]) )
@@ -311,7 +311,7 @@ void paging_mark_gfn_dirty(struct domain *d, unsigned long pfn)
if ( !mfn_valid(mfn) )
goto out;
- l3 = map_domain_page(mfn_x(mfn));
+ l3 = map_domain_page(mfn);
mfn = l3[i3];
if ( !mfn_valid(mfn) )
l3[i3] = mfn = paging_new_log_dirty_node(d);
@@ -319,7 +319,7 @@ void paging_mark_gfn_dirty(struct domain *d, unsigned long pfn)
if ( !mfn_valid(mfn) )
goto out;
- l2 = map_domain_page(mfn_x(mfn));
+ l2 = map_domain_page(mfn);
mfn = l2[i2];
if ( !mfn_valid(mfn) )
l2[i2] = mfn = paging_new_log_dirty_leaf(d);
@@ -327,7 +327,7 @@ void paging_mark_gfn_dirty(struct domain *d, unsigned long pfn)
if ( !mfn_valid(mfn) )
goto out;
- l1 = map_domain_page(mfn_x(mfn));
+ l1 = map_domain_page(mfn);
changed = !__test_and_set_bit(i1, l1);
unmap_domain_page(l1);
if ( changed )
@@ -384,25 +384,25 @@ int paging_mfn_is_dirty(struct domain *d, mfn_t gmfn)
if ( !mfn_valid(mfn) )
return 0;
- l4 = map_domain_page(mfn_x(mfn));
+ l4 = map_domain_page(mfn);
mfn = l4[L4_LOGDIRTY_IDX(pfn)];
unmap_domain_page(l4);
if ( !mfn_valid(mfn) )
return 0;
- l3 = map_domain_page(mfn_x(mfn));
+ l3 = map_domain_page(mfn);
mfn = l3[L3_LOGDIRTY_IDX(pfn)];
unmap_domain_page(l3);
if ( !mfn_valid(mfn) )
return 0;
- l2 = map_domain_page(mfn_x(mfn));
+ l2 = map_domain_page(mfn);
mfn = l2[L2_LOGDIRTY_IDX(pfn)];
unmap_domain_page(l2);
if ( !mfn_valid(mfn) )
return 0;
- l1 = map_domain_page(mfn_x(mfn));
+ l1 = map_domain_page(mfn);
rv = test_bit(L1_LOGDIRTY_IDX(pfn), l1);
unmap_domain_page(l1);
return rv;
@@ -476,18 +476,18 @@ static int paging_log_dirty_op(struct domain *d,
for ( ; (pages < sc->pages) && (i4 < LOGDIRTY_NODE_ENTRIES); i4++, i3 = 0 )
{
- l3 = (l4 && mfn_valid(l4[i4])) ? map_domain_page(mfn_x(l4[i4])) : NULL;
+ l3 = (l4 && mfn_valid(l4[i4])) ? map_domain_page(l4[i4]) : NULL;
for ( ; (pages < sc->pages) && (i3 < LOGDIRTY_NODE_ENTRIES); i3++ )
{
l2 = ((l3 && mfn_valid(l3[i3])) ?
- map_domain_page(mfn_x(l3[i3])) : NULL);
+ map_domain_page(l3[i3]) : NULL);
for ( i2 = 0;
(pages < sc->pages) && (i2 < LOGDIRTY_NODE_ENTRIES);
i2++ )
{
unsigned int bytes = PAGE_SIZE;
l1 = ((l2 && mfn_valid(l2[i2])) ?
- map_domain_page(mfn_x(l2[i2])) : NULL);
+ map_domain_page(l2[i2]) : NULL);
if ( unlikely(((sc->pages - pages + 7) >> 3) < bytes) )
bytes = (unsigned int)((sc->pages - pages + 7) >> 3);
if ( likely(peek) )
diff --git a/xen/arch/x86/mm/shadow/common.c b/xen/arch/x86/mm/shadow/common.c
index 97de133..c36ffeb 100644
--- a/xen/arch/x86/mm/shadow/common.c
+++ b/xen/arch/x86/mm/shadow/common.c
@@ -3393,7 +3393,7 @@ static void sh_unshadow_for_p2m_change(struct domain *d, unsigned long gfn,
if ( (l1e_get_flags(new) & _PAGE_PRESENT)
&& !(l1e_get_flags(new) & _PAGE_PSE)
&& mfn_valid(nmfn) )
- npte = map_domain_page(mfn_x(nmfn));
+ npte = map_domain_page(nmfn);
for ( i = 0; i < L1_PAGETABLE_ENTRIES; i++ )
{
diff --git a/xen/arch/x86/mm/shadow/multi.c b/xen/arch/x86/mm/shadow/multi.c
index 19644d2..0a942f8 100644
--- a/xen/arch/x86/mm/shadow/multi.c
+++ b/xen/arch/x86/mm/shadow/multi.c
@@ -256,7 +256,7 @@ shadow_check_gl1e(struct vcpu *v, walk_t *gw)
return 0;
/* Can't just pull-through because mfn may have changed */
- l1p = map_domain_page(mfn_x(gw->l1mfn));
+ l1p = map_domain_page(gw->l1mfn);
nl1e.l1 = l1p[guest_l1_table_offset(gw->va)].l1;
unmap_domain_page(l1p);
@@ -384,7 +384,7 @@ sh_guest_map_l1e(struct vcpu *v, unsigned long addr,
{
if ( gl1mfn )
*gl1mfn = mfn_x(gw.l1mfn);
- pl1e = map_domain_page(mfn_x(gw.l1mfn)) +
+ pl1e = map_domain_page(gw.l1mfn) +
(guest_l1_table_offset(addr) * sizeof(guest_l1e_t));
}
diff --git a/xen/arch/x86/mm/shadow/private.h b/xen/arch/x86/mm/shadow/private.h
index eff39dc..31b36ef 100644
--- a/xen/arch/x86/mm/shadow/private.h
+++ b/xen/arch/x86/mm/shadow/private.h
@@ -508,7 +508,7 @@ sh_mfn_is_a_page_table(mfn_t gmfn)
static inline void *
sh_map_domain_page(mfn_t mfn)
{
- return map_domain_page(mfn_x(mfn));
+ return map_domain_page(mfn);
}
static inline void
diff --git a/xen/arch/x86/smpboot.c b/xen/arch/x86/smpboot.c
index c73aa1b..fd61610 100644
--- a/xen/arch/x86/smpboot.c
+++ b/xen/arch/x86/smpboot.c
@@ -668,7 +668,7 @@ static void cpu_smpboot_free(unsigned int cpu)
if ( per_cpu(stubs.addr, cpu) )
{
unsigned long mfn = per_cpu(stubs.mfn, cpu);
- unsigned char *stub_page = map_domain_page(mfn);
+ unsigned char *stub_page = map_domain_page(_mfn(mfn));
unsigned int i;
memset(stub_page + STUB_BUF_CPU_OFFS(cpu), 0xcc, STUB_BUF_SIZE);
diff --git a/xen/arch/x86/tboot.c b/xen/arch/x86/tboot.c
index 01b9530..88142d2 100644
--- a/xen/arch/x86/tboot.c
+++ b/xen/arch/x86/tboot.c
@@ -161,7 +161,7 @@ static void update_iommu_mac(vmac_ctx_t *ctx, uint64_t pt_maddr, int level)
if ( pt_maddr == 0 )
return;
- pt_vaddr = (struct dma_pte *)map_domain_page(pt_maddr >> PAGE_SHIFT_4K);
+ pt_vaddr = (struct dma_pte *)map_domain_page(_mfn(paddr_to_pfn(pt_maddr)));
vmac_update((void *)pt_vaddr, PAGE_SIZE, ctx);
for ( i = 0; i < PTE_NUM; i++ )
@@ -194,7 +194,8 @@ static void update_pagetable_mac(vmac_ctx_t *ctx)
{
if ( page->count_info & PGC_page_table )
{
- void *pg = map_domain_page(mfn);
+ void *pg = map_domain_page(_mfn(mfn));
+
vmac_update(pg, PAGE_SIZE, ctx);
unmap_domain_page(pg);
}
diff --git a/xen/arch/x86/traps.c b/xen/arch/x86/traps.c
index c07bbae..2dc0666 100644
--- a/xen/arch/x86/traps.c
+++ b/xen/arch/x86/traps.c
@@ -1325,7 +1325,7 @@ static enum pf_type __page_fault_type(
mfn = cr3 >> PAGE_SHIFT;
- l4t = map_domain_page(mfn);
+ l4t = map_domain_page(_mfn(mfn));
l4e = l4e_read_atomic(&l4t[l4_table_offset(addr)]);
mfn = l4e_get_pfn(l4e);
unmap_domain_page(l4t);
@@ -1334,7 +1334,7 @@ static enum pf_type __page_fault_type(
return real_fault;
page_user &= l4e_get_flags(l4e);
- l3t = map_domain_page(mfn);
+ l3t = map_domain_page(_mfn(mfn));
l3e = l3e_read_atomic(&l3t[l3_table_offset(addr)]);
mfn = l3e_get_pfn(l3e);
unmap_domain_page(l3t);
@@ -1345,7 +1345,7 @@ static enum pf_type __page_fault_type(
if ( l3e_get_flags(l3e) & _PAGE_PSE )
goto leaf;
- l2t = map_domain_page(mfn);
+ l2t = map_domain_page(_mfn(mfn));
l2e = l2e_read_atomic(&l2t[l2_table_offset(addr)]);
mfn = l2e_get_pfn(l2e);
unmap_domain_page(l2t);
@@ -1356,7 +1356,7 @@ static enum pf_type __page_fault_type(
if ( l2e_get_flags(l2e) & _PAGE_PSE )
goto leaf;
- l1t = map_domain_page(mfn);
+ l1t = map_domain_page(_mfn(mfn));
l1e = l1e_read_atomic(&l1t[l1_table_offset(addr)]);
mfn = l1e_get_pfn(l1e);
unmap_domain_page(l1t);
@@ -2201,7 +2201,7 @@ static int emulate_privileged_op(struct cpu_user_regs *regs)
* context. This is needed for some systems which (ab)use IN/OUT
* to communicate with BIOS code in system-management mode.
*/
- io_emul_stub = map_domain_page(this_cpu(stubs.mfn)) +
+ io_emul_stub = map_domain_page(_mfn(this_cpu(stubs.mfn))) +
(this_cpu(stubs.addr) & ~PAGE_MASK) +
STUB_BUF_SIZE / 2;
/* movq $host_to_guest_gpr_switch,%rcx */
@@ -2397,7 +2397,7 @@ static int emulate_privileged_op(struct cpu_user_regs *regs)
else
{
l4_pgentry_t *pl4e =
- map_domain_page(pagetable_get_pfn(v->arch.guest_table));
+ map_domain_page(_mfn(pagetable_get_pfn(v->arch.guest_table)));
mfn = l4e_get_pfn(*pl4e);
unmap_domain_page(pl4e);
diff --git a/xen/arch/x86/x86_64/mm.c b/xen/arch/x86/x86_64/mm.c
index db5346c..98310f3 100644
--- a/xen/arch/x86/x86_64/mm.c
+++ b/xen/arch/x86/x86_64/mm.c
@@ -59,7 +59,7 @@ void *do_page_walk(struct vcpu *v, unsigned long addr)
if ( !is_pv_vcpu(v) || !is_canonical_address(addr) )
return NULL;
- l4t = map_domain_page(mfn);
+ l4t = map_domain_page(_mfn(mfn));
l4e = l4t[l4_table_offset(addr)];
unmap_domain_page(l4t);
if ( !(l4e_get_flags(l4e) & _PAGE_PRESENT) )
@@ -77,7 +77,7 @@ void *do_page_walk(struct vcpu *v, unsigned long addr)
goto ret;
}
- l2t = map_domain_page(mfn);
+ l2t = map_domain_page(_mfn(mfn));
l2e = l2t[l2_table_offset(addr)];
unmap_domain_page(l2t);
mfn = l2e_get_pfn(l2e);
@@ -89,7 +89,7 @@ void *do_page_walk(struct vcpu *v, unsigned long addr)
goto ret;
}
- l1t = map_domain_page(mfn);
+ l1t = map_domain_page(_mfn(mfn));
l1e = l1t[l1_table_offset(addr)];
unmap_domain_page(l1t);
mfn = l1e_get_pfn(l1e);
@@ -97,7 +97,7 @@ void *do_page_walk(struct vcpu *v, unsigned long addr)
return NULL;
ret:
- return map_domain_page(mfn) + (addr & ~PAGE_MASK);
+ return map_domain_page(_mfn(mfn)) + (addr & ~PAGE_MASK);
}
/*
@@ -1197,7 +1197,7 @@ int handle_memadd_fault(unsigned long addr, struct cpu_user_regs *regs)
mfn = (read_cr3()) >> PAGE_SHIFT;
- pl4e = map_domain_page(mfn);
+ pl4e = map_domain_page(_mfn(mfn));
l4e = pl4e[0];
@@ -1206,7 +1206,7 @@ int handle_memadd_fault(unsigned long addr, struct cpu_user_regs *regs)
mfn = l4e_get_pfn(l4e);
/* We don't need get page type here since it is current CR3 */
- pl3e = map_domain_page(mfn);
+ pl3e = map_domain_page(_mfn(mfn));
l3e = pl3e[3];
@@ -1214,7 +1214,7 @@ int handle_memadd_fault(unsigned long addr, struct cpu_user_regs *regs)
goto unmap;
mfn = l3e_get_pfn(l3e);
- pl2e = map_domain_page(mfn);
+ pl2e = map_domain_page(_mfn(mfn));
l2e = pl2e[l2_table_offset(addr)];
diff --git a/xen/arch/x86/x86_64/traps.c b/xen/arch/x86/x86_64/traps.c
index 61bd053..0846a19 100644
--- a/xen/arch/x86/x86_64/traps.c
+++ b/xen/arch/x86/x86_64/traps.c
@@ -175,7 +175,7 @@ void show_page_walk(unsigned long addr)
if ( !is_canonical_address(addr) )
return;
- l4t = map_domain_page(mfn);
+ l4t = map_domain_page(_mfn(mfn));
l4e = l4t[l4_table_offset(addr)];
unmap_domain_page(l4t);
mfn = l4e_get_pfn(l4e);
@@ -187,7 +187,7 @@ void show_page_walk(unsigned long addr)
!mfn_valid(mfn) )
return;
- l3t = map_domain_page(mfn);
+ l3t = map_domain_page(_mfn(mfn));
l3e = l3t[l3_table_offset(addr)];
unmap_domain_page(l3t);
mfn = l3e_get_pfn(l3e);
@@ -201,7 +201,7 @@ void show_page_walk(unsigned long addr)
!mfn_valid(mfn) )
return;
- l2t = map_domain_page(mfn);
+ l2t = map_domain_page(_mfn(mfn));
l2e = l2t[l2_table_offset(addr)];
unmap_domain_page(l2t);
mfn = l2e_get_pfn(l2e);
@@ -215,7 +215,7 @@ void show_page_walk(unsigned long addr)
!mfn_valid(mfn) )
return;
- l1t = map_domain_page(mfn);
+ l1t = map_domain_page(_mfn(mfn));
l1e = l1t[l1_table_offset(addr)];
unmap_domain_page(l1t);
mfn = l1e_get_pfn(l1e);
@@ -381,7 +381,7 @@ void __devinit subarch_percpu_traps_init(void)
/* IST_MAX IST pages + 1 syscall page + 1 guard page + primary stack. */
BUILD_BUG_ON((IST_MAX + 2) * PAGE_SIZE + PRIMARY_STACK_SIZE > STACK_SIZE);
- stub_page = map_domain_page(this_cpu(stubs.mfn));
+ stub_page = map_domain_page(_mfn(this_cpu(stubs.mfn)));
/* Trampoline for SYSCALL entry from 64-bit mode. */
wrmsrl(MSR_LSTAR, stub_va);
diff --git a/xen/arch/x86/x86_emulate.c b/xen/arch/x86/x86_emulate.c
index 51c8e44..28132b5 100644
--- a/xen/arch/x86/x86_emulate.c
+++ b/xen/arch/x86/x86_emulate.c
@@ -23,11 +23,11 @@
#define cpu_has_amd_erratum(nr) \
cpu_has_amd_erratum(¤t_cpu_data, AMD_ERRATUM_##nr)
-#define get_stub(stb) ({ \
- BUILD_BUG_ON(STUB_BUF_SIZE / 2 < MAX_INST_LEN + 1); \
- (stb).addr = this_cpu(stubs.addr) + STUB_BUF_SIZE / 2; \
- ((stb).ptr = map_domain_page(this_cpu(stubs.mfn))) + \
- ((stb).addr & ~PAGE_MASK); \
+#define get_stub(stb) ({ \
+ BUILD_BUG_ON(STUB_BUF_SIZE / 2 < MAX_INST_LEN + 1); \
+ (stb).addr = this_cpu(stubs.addr) + STUB_BUF_SIZE / 2; \
+ ((stb).ptr = map_domain_page(_mfn(this_cpu(stubs.mfn)))) + \
+ ((stb).addr & ~PAGE_MASK); \
})
#define put_stub(stb) ({ \
if ( (stb).ptr ) \
diff --git a/xen/common/grant_table.c b/xen/common/grant_table.c
index 681a553..92f078e 100644
--- a/xen/common/grant_table.c
+++ b/xen/common/grant_table.c
@@ -2427,7 +2427,7 @@ static int gnttab_copy_claim_buf(const struct gnttab_copy *op,
buf->have_type = 1;
}
- buf->virt = map_domain_page(buf->frame);
+ buf->virt = map_domain_page(_mfn(buf->frame));
rc = GNTST_okay;
out:
@@ -2945,7 +2945,7 @@ static int __gnttab_cache_flush(gnttab_cache_flush_t *cflush,
}
}
- v = map_domain_page(mfn);
+ v = map_domain_page(_mfn(mfn));
v += cflush->offset;
if ( (cflush->op & GNTTAB_CACHE_INVAL) && (cflush->op & GNTTAB_CACHE_CLEAN) )
diff --git a/xen/common/kexec.c b/xen/common/kexec.c
index 7d91547..7dd2700 100644
--- a/xen/common/kexec.c
+++ b/xen/common/kexec.c
@@ -912,7 +912,7 @@ static int kexec_segments_from_ind_page(unsigned long mfn,
kimage_entry_t *entry;
int ret = 0;
- page = map_domain_page(mfn);
+ page = map_domain_page(_mfn(mfn));
/*
* Walk the indirection page list, adding destination pages to the
@@ -934,7 +934,7 @@ static int kexec_segments_from_ind_page(unsigned long mfn,
break;
case IND_INDIRECTION:
unmap_domain_page(page);
- entry = page = map_domain_page(mfn);
+ entry = page = map_domain_page(_mfn(mfn));
continue;
case IND_DONE:
goto done;
diff --git a/xen/common/kimage.c b/xen/common/kimage.c
index 742e4e8..dcc010e 100644
--- a/xen/common/kimage.c
+++ b/xen/common/kimage.c
@@ -495,10 +495,10 @@ static void kimage_terminate(struct kexec_image *image)
* Call unmap_domain_page(ptr) after the loop exits.
*/
#define for_each_kimage_entry(image, ptr, entry) \
- for ( ptr = map_domain_page(image->head >> PAGE_SHIFT); \
+ for ( ptr = map_domain_page(_mfn(paddr_to_pfn(image->head))); \
(entry = *ptr) && !(entry & IND_DONE); \
ptr = (entry & IND_INDIRECTION) ? \
- (unmap_domain_page(ptr), map_domain_page(entry >> PAGE_SHIFT)) \
+ (unmap_domain_page(ptr), map_domain_page(_mfn(paddr_to_pfn(entry)))) \
: ptr + 1 )
static void kimage_free_entry(kimage_entry_t entry)
@@ -748,7 +748,7 @@ static int kimage_load_crash_segment(struct kexec_image *image,
dchunk = PAGE_SIZE;
schunk = min(dchunk, sbytes);
- dest_va = map_domain_page(dest_mfn);
+ dest_va = map_domain_page(_mfn(dest_mfn));
if ( !dest_va )
return -EINVAL;
@@ -866,7 +866,7 @@ int kimage_build_ind(struct kexec_image *image, unsigned long ind_mfn,
int ret = 0;
paddr_t dest = KIMAGE_NO_DEST;
- page = map_domain_page(ind_mfn);
+ page = map_domain_page(_mfn(ind_mfn));
if ( !page )
return -ENOMEM;
@@ -892,7 +892,7 @@ int kimage_build_ind(struct kexec_image *image, unsigned long ind_mfn,
break;
case IND_INDIRECTION:
unmap_domain_page(page);
- page = map_domain_page(mfn);
+ page = map_domain_page(_mfn(mfn));
entry = page;
continue;
case IND_DONE:
diff --git a/xen/common/memory.c b/xen/common/memory.c
index ae4c32e..e5d49d8 100644
--- a/xen/common/memory.c
+++ b/xen/common/memory.c
@@ -1172,7 +1172,7 @@ long do_memory_op(unsigned long cmd, XEN_GUEST_HANDLE_PARAM(void) arg)
void clear_domain_page(mfn_t mfn)
{
- void *ptr = map_domain_page(mfn_x(mfn));
+ void *ptr = map_domain_page(mfn);
clear_page(ptr);
unmap_domain_page(ptr);
@@ -1180,8 +1180,8 @@ void clear_domain_page(mfn_t mfn)
void copy_domain_page(mfn_t dest, mfn_t source)
{
- const void *src = map_domain_page(mfn_x(source));
- void *dst = map_domain_page(mfn_x(dest));
+ const void *src = map_domain_page(source);
+ void *dst = map_domain_page(dest);
copy_page(dst, src);
unmap_domain_page(dst);
diff --git a/xen/common/tmem_xen.c b/xen/common/tmem_xen.c
index 5ef131b..71cb7d5 100644
--- a/xen/common/tmem_xen.c
+++ b/xen/common/tmem_xen.c
@@ -77,7 +77,7 @@ static inline void *cli_get_page(xen_pfn_t cmfn, unsigned long *pcli_mfn,
*pcli_mfn = page_to_mfn(page);
*pcli_pfp = page;
- return map_domain_page(*pcli_mfn);
+ return map_domain_page(_mfn(*pcli_mfn));
}
static inline void cli_put_page(void *cli_va, struct page_info *cli_pfp,
@@ -104,7 +104,7 @@ int tmem_copy_from_client(struct page_info *pfp,
ASSERT(pfp != NULL);
tmem_mfn = page_to_mfn(pfp);
- tmem_va = map_domain_page(tmem_mfn);
+ tmem_va = map_domain_page(_mfn(tmem_mfn));
if ( guest_handle_is_null(clibuf) )
{
cli_va = cli_get_page(cmfn, &cli_mfn, &cli_pfp, 0);
@@ -174,7 +174,7 @@ int tmem_copy_to_client(xen_pfn_t cmfn, struct page_info *pfp,
return -EFAULT;
}
tmem_mfn = page_to_mfn(pfp);
- tmem_va = map_domain_page(tmem_mfn);
+ tmem_va = map_domain_page(_mfn(tmem_mfn));
if ( cli_va )
{
memcpy(cli_va, tmem_va, PAGE_SIZE);
diff --git a/xen/drivers/passthrough/amd/iommu_guest.c b/xen/drivers/passthrough/amd/iommu_guest.c
index 7b0c102..b513073 100644
--- a/xen/drivers/passthrough/amd/iommu_guest.c
+++ b/xen/drivers/passthrough/amd/iommu_guest.c
@@ -203,7 +203,7 @@ void guest_iommu_add_ppr_log(struct domain *d, u32 entry[])
sizeof(ppr_entry_t), tail);
ASSERT(mfn_valid(mfn));
- log_base = map_domain_page(mfn);
+ log_base = map_domain_page(_mfn(mfn));
log = log_base + tail % (PAGE_SIZE / sizeof(ppr_entry_t));
/* Convert physical device id back into virtual device id */
@@ -252,7 +252,7 @@ void guest_iommu_add_event_log(struct domain *d, u32 entry[])
sizeof(event_entry_t), tail);
ASSERT(mfn_valid(mfn));
- log_base = map_domain_page(mfn);
+ log_base = map_domain_page(_mfn(mfn));
log = log_base + tail % (PAGE_SIZE / sizeof(event_entry_t));
/* re-write physical device id into virtual device id */
@@ -377,7 +377,7 @@ static int do_completion_wait(struct domain *d, cmd_entry_t *cmd)
gaddr_64 = (gaddr_hi << 32) | (gaddr_lo << 3);
gfn = gaddr_64 >> PAGE_SHIFT;
- vaddr = map_domain_page(mfn_x(get_gfn(d, gfn ,&p2mt)));
+ vaddr = map_domain_page(get_gfn(d, gfn ,&p2mt));
put_gfn(d, gfn);
write_u64_atomic((uint64_t *)(vaddr + (gaddr_64 & (PAGE_SIZE-1))),
@@ -425,7 +425,7 @@ static int do_invalidate_dte(struct domain *d, cmd_entry_t *cmd)
ASSERT(mfn_valid(dte_mfn));
/* Read guest dte information */
- dte_base = map_domain_page(dte_mfn);
+ dte_base = map_domain_page(_mfn(dte_mfn));
gdte = dte_base + gbdf % (PAGE_SIZE / sizeof(dev_entry_t));
@@ -506,7 +506,7 @@ static void guest_iommu_process_command(unsigned long _d)
sizeof(cmd_entry_t), head);
ASSERT(mfn_valid(cmd_mfn));
- cmd_base = map_domain_page(cmd_mfn);
+ cmd_base = map_domain_page(_mfn(cmd_mfn));
cmd = cmd_base + head % entries_per_page;
opcode = get_field_from_reg_u32(cmd->data[1],
diff --git a/xen/drivers/passthrough/amd/iommu_map.c b/xen/drivers/passthrough/amd/iommu_map.c
index 64c5225..586c441 100644
--- a/xen/drivers/passthrough/amd/iommu_map.c
+++ b/xen/drivers/passthrough/amd/iommu_map.c
@@ -42,7 +42,7 @@ void clear_iommu_pte_present(unsigned long l1_mfn, unsigned long gfn)
{
u64 *table, *pte;
- table = map_domain_page(l1_mfn);
+ table = map_domain_page(_mfn(l1_mfn));
pte = table + pfn_to_pde_idx(gfn, IOMMU_PAGING_MODE_LEVEL_1);
*pte = 0;
unmap_domain_page(table);
@@ -115,7 +115,7 @@ static bool_t set_iommu_pte_present(unsigned long pt_mfn, unsigned long gfn,
u32 *pde;
bool_t need_flush = 0;
- table = map_domain_page(pt_mfn);
+ table = map_domain_page(_mfn(pt_mfn));
pde = (u32*)(table + pfn_to_pde_idx(gfn, pde_level));
@@ -349,12 +349,12 @@ static int iommu_update_pde_count(struct domain *d, unsigned long pt_mfn,
next_level = merge_level - 1;
/* get pde at merge level */
- table = map_domain_page(pt_mfn);
+ table = map_domain_page(_mfn(pt_mfn));
pde = table + pfn_to_pde_idx(gfn, merge_level);
/* get page table of next level */
ntable_maddr = amd_iommu_get_next_table_from_pte((u32*)pde);
- ntable = map_domain_page(ntable_maddr >> PAGE_SHIFT);
+ ntable = map_domain_page(_mfn(paddr_to_pfn(ntable_maddr)));
/* get the first mfn of next level */
first_mfn = amd_iommu_get_next_table_from_pte((u32*)ntable) >> PAGE_SHIFT;
@@ -400,7 +400,7 @@ static int iommu_merge_pages(struct domain *d, unsigned long pt_mfn,
ASSERT( spin_is_locked(&hd->arch.mapping_lock) && pt_mfn );
- table = map_domain_page(pt_mfn);
+ table = map_domain_page(_mfn(pt_mfn));
pde = table + pfn_to_pde_idx(gfn, merge_level);
/* get first mfn */
@@ -412,7 +412,7 @@ static int iommu_merge_pages(struct domain *d, unsigned long pt_mfn,
return 1;
}
- ntable = map_domain_page(ntable_mfn);
+ ntable = map_domain_page(_mfn(ntable_mfn));
first_mfn = amd_iommu_get_next_table_from_pte((u32*)ntable) >> PAGE_SHIFT;
if ( first_mfn == 0 )
@@ -467,7 +467,7 @@ static int iommu_pde_from_gfn(struct domain *d, unsigned long pfn,
unsigned int next_level = level - 1;
pt_mfn[level] = next_table_mfn;
- next_table_vaddr = map_domain_page(next_table_mfn);
+ next_table_vaddr = map_domain_page(_mfn(next_table_mfn));
pde = next_table_vaddr + pfn_to_pde_idx(pfn, level);
/* Here might be a super page frame */
diff --git a/xen/drivers/passthrough/vtd/x86/vtd.c b/xen/drivers/passthrough/vtd/x86/vtd.c
index 109234e..8beec8c 100644
--- a/xen/drivers/passthrough/vtd/x86/vtd.c
+++ b/xen/drivers/passthrough/vtd/x86/vtd.c
@@ -41,7 +41,7 @@ boolean_param("iommu_inclusive_mapping", iommu_inclusive_mapping);
void *map_vtd_domain_page(u64 maddr)
{
- return map_domain_page(maddr >> PAGE_SHIFT_4K);
+ return map_domain_page(_mfn(paddr_to_pfn(maddr)));
}
void unmap_vtd_domain_page(void *va)
diff --git a/xen/include/asm-x86/hap.h b/xen/include/asm-x86/hap.h
index 7876527..ca590f3 100644
--- a/xen/include/asm-x86/hap.h
+++ b/xen/include/asm-x86/hap.h
@@ -37,7 +37,7 @@
static inline void *
hap_map_domain_page(mfn_t mfn)
{
- return map_domain_page(mfn_x(mfn));
+ return map_domain_page(mfn);
}
static inline void
diff --git a/xen/include/asm-x86/page.h b/xen/include/asm-x86/page.h
index e26daaf..87b3341 100644
--- a/xen/include/asm-x86/page.h
+++ b/xen/include/asm-x86/page.h
@@ -172,9 +172,9 @@ static inline l4_pgentry_t l4e_from_paddr(paddr_t pa, unsigned int flags)
#define l3e_to_l2e(x) ((l2_pgentry_t *)__va(l3e_get_paddr(x)))
#define l4e_to_l3e(x) ((l3_pgentry_t *)__va(l4e_get_paddr(x)))
-#define map_l1t_from_l2e(x) ((l1_pgentry_t *)map_domain_page(l2e_get_pfn(x)))
-#define map_l2t_from_l3e(x) ((l2_pgentry_t *)map_domain_page(l3e_get_pfn(x)))
-#define map_l3t_from_l4e(x) ((l3_pgentry_t *)map_domain_page(l4e_get_pfn(x)))
+#define map_l1t_from_l2e(x) ((l1_pgentry_t *)map_domain_page(_mfn(l2e_get_pfn(x))))
+#define map_l2t_from_l3e(x) ((l2_pgentry_t *)map_domain_page(_mfn(l3e_get_pfn(x))))
+#define map_l3t_from_l4e(x) ((l3_pgentry_t *)map_domain_page(_mfn(l4e_get_pfn(x))))
/* Given a virtual address, get an entry offset into a page table. */
#define l1_table_offset(a) \
@@ -234,6 +234,7 @@ void copy_page_sse2(void *, const void *);
#define __pfn_to_paddr(pfn) ((paddr_t)(pfn) << PAGE_SHIFT)
#define __paddr_to_pfn(pa) ((unsigned long)((pa) >> PAGE_SHIFT))
+
/* Convert between machine frame numbers and spage-info structures. */
#define __mfn_to_spage(mfn) (spage_table + pfn_to_sdx(mfn))
#define __spage_to_mfn(pg) sdx_to_pfn((unsigned long)((pg) - spage_table))
diff --git a/xen/include/asm-x86/paging.h b/xen/include/asm-x86/paging.h
index 9c32665..7a09881 100644
--- a/xen/include/asm-x86/paging.h
+++ b/xen/include/asm-x86/paging.h
@@ -376,7 +376,7 @@ guest_map_l1e(struct vcpu *v, unsigned long addr, unsigned long *gl1mfn)
!= _PAGE_PRESENT )
return NULL;
*gl1mfn = l2e_get_pfn(l2e);
- return (l1_pgentry_t *)map_domain_page(*gl1mfn) + l1_table_offset(addr);
+ return (l1_pgentry_t *)map_domain_page(_mfn(*gl1mfn)) + l1_table_offset(addr);
}
/* Pull down the mapping we got from guest_map_l1e() */
diff --git a/xen/include/xen/domain_page.h b/xen/include/xen/domain_page.h
index 9bfeef0..c1d630c 100644
--- a/xen/include/xen/domain_page.h
+++ b/xen/include/xen/domain_page.h
@@ -23,7 +23,7 @@ void copy_domain_page(mfn_t dst, const mfn_t src);
* Map a given page frame, returning the mapped virtual address. The page is
* then accessible within the current VCPU until a corresponding unmap call.
*/
-void *map_domain_page(unsigned long mfn);
+void *map_domain_page(mfn_t mfn);
/*
* Pass a VA within a page previously mapped in the context of the
@@ -44,7 +44,7 @@ unsigned long domain_page_map_to_mfn(const void *va);
void *map_domain_page_global(mfn_t mfn);
void unmap_domain_page_global(const void *va);
-#define __map_domain_page(pg) map_domain_page(__page_to_mfn(pg))
+#define __map_domain_page(pg) map_domain_page(_mfn(__page_to_mfn(pg)))
static inline void *__map_domain_page_global(const struct page_info *pg)
{
@@ -84,7 +84,7 @@ map_domain_page_with_cache(unsigned long mfn, struct domain_mmap_cache *cache)
}
cache->mfn = mfn;
- cache->va = map_domain_page(mfn);
+ cache->va = map_domain_page(_mfn(mfn));
cache->flags = DMCACHE_ENTRY_HELD | DMCACHE_ENTRY_VALID;
done:
@@ -113,7 +113,7 @@ domain_mmap_cache_destroy(struct domain_mmap_cache *cache)
#else /* !CONFIG_DOMAIN_PAGE */
-#define map_domain_page(mfn) mfn_to_virt(mfn)
+#define map_domain_page(mfn) mfn_to_virt(mfn_x(mfn))
#define __map_domain_page(pg) page_to_virt(pg)
#define unmap_domain_page(va) ((void)(va))
#define domain_page_map_to_mfn(va) virt_to_mfn((unsigned long)(va))
--
2.1.4
^ permalink raw reply related [flat|nested] 6+ messages in thread
* [PATCH v3 4/4] Remove sh_{un}map_domain_page() and hap_{un}map_domain_page()
2015-07-09 14:54 [PATCH v3 1/4] xen/domain_page: Convert map_domain_page_global() to using mfn_t Ben Catterall
2015-07-09 14:54 ` [PATCH v3 2/4] xen/domain_page: Convert copy/clear_domain_page() " Ben Catterall
2015-07-09 14:54 ` [PATCH v3 3/4] Convert map_domain_page() to use the new mfn_t type Ben Catterall
@ 2015-07-09 14:54 ` Ben Catterall
2015-07-10 14:44 ` [PATCH v3 1/4] xen/domain_page: Convert map_domain_page_global() to using mfn_t Tim Deegan
3 siblings, 0 replies; 6+ messages in thread
From: Ben Catterall @ 2015-07-09 14:54 UTC (permalink / raw)
To: xen-devel
Cc: keir, ian.campbell, andrew.cooper3, tim, stefano.stabellini,
jbeulich, Ben Catterall
Removed as they were wrappers around map_domain_page() to
make it appear to take an mfn_t type.
Signed-off-by: Ben Catterall <Ben.Catterall@citrix.com>
Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
Reviewed-by: Tim Deegan <tim@xen.org>
---
xen/arch/x86/mm/hap/hap.c | 4 +-
xen/arch/x86/mm/shadow/common.c | 22 +++---
xen/arch/x86/mm/shadow/multi.c | 152 +++++++++++++++++++--------------------
xen/arch/x86/mm/shadow/private.h | 13 ----
xen/include/asm-x86/hap.h | 15 ----
5 files changed, 89 insertions(+), 117 deletions(-)
diff --git a/xen/arch/x86/mm/hap/hap.c b/xen/arch/x86/mm/hap/hap.c
index d0d3f1e..63980af 100644
--- a/xen/arch/x86/mm/hap/hap.c
+++ b/xen/arch/x86/mm/hap/hap.c
@@ -395,7 +395,7 @@ static void hap_install_xen_entries_in_l4(struct vcpu *v, mfn_t l4mfn)
struct domain *d = v->domain;
l4_pgentry_t *l4e;
- l4e = hap_map_domain_page(l4mfn);
+ l4e = map_domain_page(l4mfn);
/* Copy the common Xen mappings from the idle domain */
memcpy(&l4e[ROOT_PAGETABLE_FIRST_XEN_SLOT],
@@ -411,7 +411,7 @@ static void hap_install_xen_entries_in_l4(struct vcpu *v, mfn_t l4mfn)
l4e[l4_table_offset(LINEAR_PT_VIRT_START)] =
l4e_from_pfn(mfn_x(l4mfn), __PAGE_HYPERVISOR);
- hap_unmap_domain_page(l4e);
+ unmap_domain_page(l4e);
}
static mfn_t hap_make_monitor_table(struct vcpu *v)
diff --git a/xen/arch/x86/mm/shadow/common.c b/xen/arch/x86/mm/shadow/common.c
index c36ffeb..6574206 100644
--- a/xen/arch/x86/mm/shadow/common.c
+++ b/xen/arch/x86/mm/shadow/common.c
@@ -781,11 +781,11 @@ static void oos_hash_add(struct vcpu *v, mfn_t gmfn)
if ( swap )
SWAP(oos_snapshot[idx], oos_snapshot[oidx]);
- gptr = sh_map_domain_page(oos[oidx]);
- gsnpptr = sh_map_domain_page(oos_snapshot[oidx]);
+ gptr = map_domain_page(oos[oidx]);
+ gsnpptr = map_domain_page(oos_snapshot[oidx]);
memcpy(gsnpptr, gptr, PAGE_SIZE);
- sh_unmap_domain_page(gptr);
- sh_unmap_domain_page(gsnpptr);
+ unmap_domain_page(gptr);
+ unmap_domain_page(gsnpptr);
}
/* Remove an MFN from the list of out-of-sync guest pagetables */
@@ -1498,7 +1498,7 @@ mfn_t shadow_alloc(struct domain *d,
p = __map_domain_page(sp);
ASSERT(p != NULL);
clear_page(p);
- sh_unmap_domain_page(p);
+ unmap_domain_page(p);
INIT_PAGE_LIST_ENTRY(&sp->list);
page_list_add(sp, &tmp_list);
sp->u.sh.type = shadow_type;
@@ -2524,7 +2524,7 @@ static int sh_remove_shadow_via_pointer(struct domain *d, mfn_t smfn)
if (sp->up == 0) return 0;
pmfn = _mfn(sp->up >> PAGE_SHIFT);
ASSERT(mfn_valid(pmfn));
- vaddr = sh_map_domain_page(pmfn);
+ vaddr = map_domain_page(pmfn);
ASSERT(vaddr);
vaddr += sp->up & (PAGE_SIZE-1);
ASSERT(l1e_get_pfn(*(l1_pgentry_t *)vaddr) == mfn_x(smfn));
@@ -2554,7 +2554,7 @@ static int sh_remove_shadow_via_pointer(struct domain *d, mfn_t smfn)
default: BUG(); /* Some wierd unknown shadow type */
}
- sh_unmap_domain_page(vaddr);
+ unmap_domain_page(vaddr);
if ( rc )
perfc_incr(shadow_up_pointer);
else
@@ -3028,7 +3028,7 @@ int shadow_enable(struct domain *d, u32 mode)
e[i] = ((0x400000U * i)
| _PAGE_PRESENT | _PAGE_RW | _PAGE_USER
| _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_PSE);
- sh_unmap_domain_page(e);
+ unmap_domain_page(e);
pg->u.inuse.type_info = PGT_l2_page_table | 1 | PGT_validated;
}
@@ -3631,8 +3631,8 @@ int shadow_track_dirty_vram(struct domain *d,
if ( sl1mfn != map_mfn )
{
if ( map_sl1p )
- sh_unmap_domain_page(map_sl1p);
- map_sl1p = sh_map_domain_page(_mfn(sl1mfn));
+ unmap_domain_page(map_sl1p);
+ map_sl1p = map_domain_page(_mfn(sl1mfn));
map_mfn = sl1mfn;
}
sl1e = map_sl1p + (sl1ma & ~PAGE_MASK);
@@ -3663,7 +3663,7 @@ int shadow_track_dirty_vram(struct domain *d,
}
if ( map_sl1p )
- sh_unmap_domain_page(map_sl1p);
+ unmap_domain_page(map_sl1p);
memcpy(dirty_bitmap, dirty_vram->dirty_bitmap, dirty_size);
memset(dirty_vram->dirty_bitmap, 0, dirty_size);
diff --git a/xen/arch/x86/mm/shadow/multi.c b/xen/arch/x86/mm/shadow/multi.c
index 0a942f8..00e8f1f 100644
--- a/xen/arch/x86/mm/shadow/multi.c
+++ b/xen/arch/x86/mm/shadow/multi.c
@@ -221,16 +221,16 @@ shadow_check_gwalk(struct vcpu *v, unsigned long va, walk_t *gw, int version)
#if GUEST_PAGING_LEVELS >= 4 /* 64-bit only... */
l4p = (guest_l4e_t *)v->arch.paging.shadow.guest_vtable;
mismatch |= (gw->l4e.l4 != l4p[guest_l4_table_offset(va)].l4);
- l3p = sh_map_domain_page(gw->l3mfn);
+ l3p = map_domain_page(gw->l3mfn);
mismatch |= (gw->l3e.l3 != l3p[guest_l3_table_offset(va)].l3);
- sh_unmap_domain_page(l3p);
+ unmap_domain_page(l3p);
#else
mismatch |= (gw->l3e.l3 !=
v->arch.paging.shadow.gl3e[guest_l3_table_offset(va)].l3);
#endif
- l2p = sh_map_domain_page(gw->l2mfn);
+ l2p = map_domain_page(gw->l2mfn);
mismatch |= (gw->l2e.l2 != l2p[guest_l2_table_offset(va)].l2);
- sh_unmap_domain_page(l2p);
+ unmap_domain_page(l2p);
#else
l2p = (guest_l2e_t *)v->arch.paging.shadow.guest_vtable;
mismatch |= (gw->l2e.l2 != l2p[guest_l2_table_offset(va)].l2);
@@ -238,9 +238,9 @@ shadow_check_gwalk(struct vcpu *v, unsigned long va, walk_t *gw, int version)
if ( !(guest_supports_superpages(v) &&
(guest_l2e_get_flags(gw->l2e) & _PAGE_PSE)) )
{
- l1p = sh_map_domain_page(gw->l1mfn);
+ l1p = map_domain_page(gw->l1mfn);
mismatch |= (gw->l1e.l1 != l1p[guest_l1_table_offset(va)].l1);
- sh_unmap_domain_page(l1p);
+ unmap_domain_page(l1p);
}
return !mismatch;
@@ -802,7 +802,7 @@ shadow_write_entries(void *d, void *s, int entries, mfn_t mfn)
if ( __copy_to_user(d, d, sizeof (unsigned long)) != 0 )
{
perfc_incr(shadow_linear_map_failed);
- map = sh_map_domain_page(mfn);
+ map = map_domain_page(mfn);
dst = map + ((unsigned long)dst & (PAGE_SIZE - 1));
}
@@ -810,7 +810,7 @@ shadow_write_entries(void *d, void *s, int entries, mfn_t mfn)
for ( i = 0; i < entries; i++ )
safe_write_entry(dst++, src++);
- if ( map != NULL ) sh_unmap_domain_page(map);
+ if ( map != NULL ) unmap_domain_page(map);
}
/* type is only used to distinguish grant map pages from ordinary RAM
@@ -1256,7 +1256,7 @@ static inline void increment_ptr_to_guest_entry(void *ptr)
#define _SHADOW_FOREACH_L1E(_sl1mfn, _sl1e, _gl1p, _done, _code) \
do { \
int _i; \
- shadow_l1e_t *_sp = sh_map_domain_page((_sl1mfn)); \
+ shadow_l1e_t *_sp = map_domain_page((_sl1mfn)); \
ASSERT(mfn_to_page(_sl1mfn)->u.sh.type == SH_type_l1_shadow \
|| mfn_to_page(_sl1mfn)->u.sh.type == SH_type_fl1_shadow);\
for ( _i = 0; _i < SHADOW_L1_PAGETABLE_ENTRIES; _i++ ) \
@@ -1267,7 +1267,7 @@ do { \
if ( _done ) break; \
increment_ptr_to_guest_entry(_gl1p); \
} \
- sh_unmap_domain_page(_sp); \
+ unmap_domain_page(_sp); \
} while (0)
/* 32-bit l1, on PAE or 64-bit shadows: need to walk both pages of shadow */
@@ -1298,7 +1298,7 @@ do { \
ASSERT(mfn_to_page(_sl2mfn)->u.sh.type == SH_type_l2_32_shadow); \
for ( _j = 0; _j < 4 && !__done; _j++ ) \
{ \
- shadow_l2e_t *_sp = sh_map_domain_page(_sl2mfn); \
+ shadow_l2e_t *_sp = map_domain_page(_sl2mfn); \
for ( _i = 0; _i < SHADOW_L2_PAGETABLE_ENTRIES; _i += 2 ) \
{ \
(_sl2e) = _sp + _i; \
@@ -1307,7 +1307,7 @@ do { \
if ( (__done = (_done)) ) break; \
increment_ptr_to_guest_entry(_gl2p); \
} \
- sh_unmap_domain_page(_sp); \
+ unmap_domain_page(_sp); \
if ( _j < 3 ) _sl2mfn = sh_next_page(_sl2mfn); \
} \
} while (0)
@@ -1318,7 +1318,7 @@ do { \
#define SHADOW_FOREACH_L2E(_sl2mfn, _sl2e, _gl2p, _done, _dom, _code) \
do { \
int _i; \
- shadow_l2e_t *_sp = sh_map_domain_page((_sl2mfn)); \
+ shadow_l2e_t *_sp = map_domain_page((_sl2mfn)); \
ASSERT(shadow_mode_external(_dom)); \
ASSERT(mfn_to_page(_sl2mfn)->u.sh.type == SH_type_l2_pae_shadow \
|| mfn_to_page(_sl2mfn)->u.sh.type == SH_type_l2h_pae_shadow); \
@@ -1330,7 +1330,7 @@ do { \
if ( _done ) break; \
increment_ptr_to_guest_entry(_gl2p); \
} \
- sh_unmap_domain_page(_sp); \
+ unmap_domain_page(_sp); \
} while (0)
#else
@@ -1340,7 +1340,7 @@ do { \
do { \
int _i; \
int _xen = !shadow_mode_external(_dom); \
- shadow_l2e_t *_sp = sh_map_domain_page((_sl2mfn)); \
+ shadow_l2e_t *_sp = map_domain_page((_sl2mfn)); \
ASSERT(mfn_to_page(_sl2mfn)->u.sh.type == SH_type_l2_64_shadow ||\
mfn_to_page(_sl2mfn)->u.sh.type == SH_type_l2h_64_shadow);\
for ( _i = 0; _i < SHADOW_L2_PAGETABLE_ENTRIES; _i++ ) \
@@ -1357,7 +1357,7 @@ do { \
increment_ptr_to_guest_entry(_gl2p); \
} \
} \
- sh_unmap_domain_page(_sp); \
+ unmap_domain_page(_sp); \
} while (0)
#endif /* different kinds of l2 */
@@ -1368,7 +1368,7 @@ do { \
#define SHADOW_FOREACH_L3E(_sl3mfn, _sl3e, _gl3p, _done, _code) \
do { \
int _i; \
- shadow_l3e_t *_sp = sh_map_domain_page((_sl3mfn)); \
+ shadow_l3e_t *_sp = map_domain_page((_sl3mfn)); \
ASSERT(mfn_to_page(_sl3mfn)->u.sh.type == SH_type_l3_64_shadow);\
for ( _i = 0; _i < SHADOW_L3_PAGETABLE_ENTRIES; _i++ ) \
{ \
@@ -1378,13 +1378,13 @@ do { \
if ( _done ) break; \
increment_ptr_to_guest_entry(_gl3p); \
} \
- sh_unmap_domain_page(_sp); \
+ unmap_domain_page(_sp); \
} while (0)
/* 64-bit l4: avoid Xen mappings */
#define SHADOW_FOREACH_L4E(_sl4mfn, _sl4e, _gl4p, _done, _dom, _code) \
do { \
- shadow_l4e_t *_sp = sh_map_domain_page((_sl4mfn)); \
+ shadow_l4e_t *_sp = map_domain_page((_sl4mfn)); \
int _xen = !shadow_mode_external(_dom); \
int _i; \
ASSERT(mfn_to_page(_sl4mfn)->u.sh.type == SH_type_l4_64_shadow);\
@@ -1399,7 +1399,7 @@ do { \
} \
increment_ptr_to_guest_entry(_gl4p); \
} \
- sh_unmap_domain_page(_sp); \
+ unmap_domain_page(_sp); \
} while (0)
#endif
@@ -1419,7 +1419,7 @@ void sh_install_xen_entries_in_l4(struct domain *d, mfn_t gl4mfn, mfn_t sl4mfn)
shadow_l4e_t *sl4e;
unsigned int slots;
- sl4e = sh_map_domain_page(sl4mfn);
+ sl4e = map_domain_page(sl4mfn);
BUILD_BUG_ON(sizeof (l4_pgentry_t) != sizeof (shadow_l4e_t));
/* Copy the common Xen mappings from the idle domain */
@@ -1462,7 +1462,7 @@ void sh_install_xen_entries_in_l4(struct domain *d, mfn_t gl4mfn, mfn_t sl4mfn)
shadow_l4e_from_mfn(gl4mfn, __PAGE_HYPERVISOR);
}
- sh_unmap_domain_page(sl4e);
+ unmap_domain_page(sl4e);
}
#endif
@@ -1478,7 +1478,7 @@ static void sh_install_xen_entries_in_l2h(struct domain *d, mfn_t sl2hmfn)
if ( !is_pv_32bit_domain(d) )
return;
- sl2e = sh_map_domain_page(sl2hmfn);
+ sl2e = map_domain_page(sl2hmfn);
BUILD_BUG_ON(sizeof (l2_pgentry_t) != sizeof (shadow_l2e_t));
/* Copy the common Xen mappings from the idle domain */
@@ -1487,7 +1487,7 @@ static void sh_install_xen_entries_in_l2h(struct domain *d, mfn_t sl2hmfn)
&compat_idle_pg_table_l2[l2_table_offset(HIRO_COMPAT_MPT_VIRT_START)],
COMPAT_L2_PAGETABLE_XEN_SLOTS(d) * sizeof(*sl2e));
- sh_unmap_domain_page(sl2e);
+ unmap_domain_page(sl2e);
}
#endif
@@ -1607,7 +1607,7 @@ sh_make_monitor_table(struct vcpu *v)
/* Install an l3 table and an l2 table that will hold the shadow
* linear map entries. This overrides the linear map entry that
* was installed by sh_install_xen_entries_in_l4. */
- l4e = sh_map_domain_page(m4mfn);
+ l4e = map_domain_page(m4mfn);
m3mfn = shadow_alloc(d, SH_type_monitor_table, 0);
mfn_to_page(m3mfn)->shadow_flags = 3;
@@ -1616,9 +1616,9 @@ sh_make_monitor_table(struct vcpu *v)
m2mfn = shadow_alloc(d, SH_type_monitor_table, 0);
mfn_to_page(m2mfn)->shadow_flags = 2;
- l3e = sh_map_domain_page(m3mfn);
+ l3e = map_domain_page(m3mfn);
l3e[0] = l3e_from_pfn(mfn_x(m2mfn), __PAGE_HYPERVISOR);
- sh_unmap_domain_page(l3e);
+ unmap_domain_page(l3e);
if ( is_pv_32bit_domain(d) )
{
@@ -1630,13 +1630,13 @@ sh_make_monitor_table(struct vcpu *v)
m2mfn = shadow_alloc(d, SH_type_monitor_table, 0);
mfn_to_page(m2mfn)->shadow_flags = 2;
- l3e = sh_map_domain_page(m3mfn);
+ l3e = map_domain_page(m3mfn);
l3e[3] = l3e_from_pfn(mfn_x(m2mfn), _PAGE_PRESENT);
sh_install_xen_entries_in_l2h(d, m2mfn);
- sh_unmap_domain_page(l3e);
+ unmap_domain_page(l3e);
}
- sh_unmap_domain_page(l4e);
+ unmap_domain_page(l4e);
}
#endif /* SHADOW_PAGING_LEVELS < 4 */
return m4mfn;
@@ -2029,7 +2029,7 @@ void sh_destroy_monitor_table(struct vcpu *v, mfn_t mmfn)
#if SHADOW_PAGING_LEVELS != 4
{
mfn_t m3mfn;
- l4_pgentry_t *l4e = sh_map_domain_page(mmfn);
+ l4_pgentry_t *l4e = map_domain_page(mmfn);
l3_pgentry_t *l3e;
int linear_slot = shadow_l4_table_offset(SH_LINEAR_PT_VIRT_START);
@@ -2037,10 +2037,10 @@ void sh_destroy_monitor_table(struct vcpu *v, mfn_t mmfn)
* for the linear map */
ASSERT(l4e_get_flags(l4e[linear_slot]) & _PAGE_PRESENT);
m3mfn = _mfn(l4e_get_pfn(l4e[linear_slot]));
- l3e = sh_map_domain_page(m3mfn);
+ l3e = map_domain_page(m3mfn);
ASSERT(l3e_get_flags(l3e[0]) & _PAGE_PRESENT);
shadow_free(d, _mfn(l3e_get_pfn(l3e[0])));
- sh_unmap_domain_page(l3e);
+ unmap_domain_page(l3e);
shadow_free(d, m3mfn);
if ( is_pv_32bit_domain(d) )
@@ -2049,13 +2049,13 @@ void sh_destroy_monitor_table(struct vcpu *v, mfn_t mmfn)
* Xen VAs at 3GB-4GB */
ASSERT(l4e_get_flags(l4e[0]) & _PAGE_PRESENT);
m3mfn = _mfn(l4e_get_pfn(l4e[0]));
- l3e = sh_map_domain_page(m3mfn);
+ l3e = map_domain_page(m3mfn);
ASSERT(l3e_get_flags(l3e[3]) & _PAGE_PRESENT);
shadow_free(d, _mfn(l3e_get_pfn(l3e[3])));
- sh_unmap_domain_page(l3e);
+ unmap_domain_page(l3e);
shadow_free(d, m3mfn);
}
- sh_unmap_domain_page(l4e);
+ unmap_domain_page(l4e);
}
#endif
@@ -2293,9 +2293,9 @@ static int validate_gl1e(struct vcpu *v, void *new_ge, mfn_t sl1mfn, void *se)
ASSERT(mfn_valid(snpmfn));
- snp = sh_map_domain_page(snpmfn);
+ snp = map_domain_page(snpmfn);
snp[guest_index(new_ge)] = new_gl1e;
- sh_unmap_domain_page(snp);
+ unmap_domain_page(snp);
}
#endif /* OOS */
@@ -2322,8 +2322,8 @@ void sh_resync_l1(struct vcpu *v, mfn_t gl1mfn, mfn_t snpmfn)
sl1mfn = get_shadow_status(d, gl1mfn, SH_type_l1_shadow);
ASSERT(mfn_valid(sl1mfn)); /* Otherwise we would not have been called */
- snp = sh_map_domain_page(snpmfn);
- gp = sh_map_domain_page(gl1mfn);
+ snp = map_domain_page(snpmfn);
+ gp = map_domain_page(gl1mfn);
gl1p = gp;
SHADOW_FOREACH_L1E(sl1mfn, sl1p, &gl1p, 0, {
@@ -2345,8 +2345,8 @@ void sh_resync_l1(struct vcpu *v, mfn_t gl1mfn, mfn_t snpmfn)
}
});
- sh_unmap_domain_page(gp);
- sh_unmap_domain_page(snp);
+ unmap_domain_page(gp);
+ unmap_domain_page(snp);
/* Setting shadow L1 entries should never need us to flush the TLB */
ASSERT(!(rc & SHADOW_SET_FLUSH));
@@ -2442,7 +2442,7 @@ sh_map_and_validate(struct vcpu *v, mfn_t gmfn,
guest_idx = guest_index(new_gp);
map_mfn = smfn;
shadow_idx = shadow_index(&map_mfn, guest_idx);
- sl1p = sh_map_domain_page(map_mfn);
+ sl1p = map_domain_page(map_mfn);
/* Validate one entry at a time */
while ( size )
@@ -2454,8 +2454,8 @@ sh_map_and_validate(struct vcpu *v, mfn_t gmfn,
{
/* We have moved to another page of the shadow */
map_mfn = smfn2;
- sh_unmap_domain_page(sl1p);
- sl1p = sh_map_domain_page(map_mfn);
+ unmap_domain_page(sl1p);
+ sl1p = map_domain_page(map_mfn);
}
result |= validate_ge(v,
new_gp,
@@ -2464,7 +2464,7 @@ sh_map_and_validate(struct vcpu *v, mfn_t gmfn,
size -= sizeof(guest_l1e_t);
new_gp += sizeof(guest_l1e_t);
}
- sh_unmap_domain_page(sl1p);
+ unmap_domain_page(sl1p);
return result;
}
@@ -2620,7 +2620,7 @@ static void sh_prefetch(struct vcpu *v, walk_t *gw,
if ( mfn_valid(gw->l1mfn) )
{
/* Normal guest page; grab the next guest entry */
- gl1p = sh_map_domain_page(gw->l1mfn);
+ gl1p = map_domain_page(gw->l1mfn);
gl1p += guest_l1_table_offset(gw->va);
#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC)
@@ -2629,7 +2629,7 @@ static void sh_prefetch(struct vcpu *v, walk_t *gw,
mfn_t snpmfn = oos_snapshot_lookup(d, gw->l1mfn);
ASSERT(mfn_valid(snpmfn));
- snpl1p = sh_map_domain_page(snpmfn);
+ snpl1p = map_domain_page(snpmfn);
snpl1p += guest_l1_table_offset(gw->va);
}
#endif /* OOS */
@@ -2677,10 +2677,10 @@ static void sh_prefetch(struct vcpu *v, walk_t *gw,
#endif /* OOS */
}
if ( gl1p != NULL )
- sh_unmap_domain_page(gl1p);
+ unmap_domain_page(gl1p);
#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC)
if ( snpl1p != NULL )
- sh_unmap_domain_page(snpl1p);
+ unmap_domain_page(snpl1p);
#endif /* OOS */
}
@@ -3169,9 +3169,9 @@ static int sh_page_fault(struct vcpu *v,
ASSERT(mfn_valid(snpmfn));
- snp = sh_map_domain_page(snpmfn);
+ snp = map_domain_page(snpmfn);
snp[guest_l1_table_offset(va)] = gw.l1e;
- sh_unmap_domain_page(snp);
+ unmap_domain_page(snp);
}
#endif /* OOS */
@@ -3695,11 +3695,11 @@ sh_update_linear_entries(struct vcpu *v)
else
{
l4_pgentry_t *ml4e;
- ml4e = sh_map_domain_page(pagetable_get_mfn(v->arch.monitor_table));
+ ml4e = map_domain_page(pagetable_get_mfn(v->arch.monitor_table));
ml4e[l4_table_offset(SH_LINEAR_PT_VIRT_START)] =
l4e_from_pfn(pagetable_get_pfn(v->arch.shadow_table[0]),
__PAGE_HYPERVISOR);
- sh_unmap_domain_page(ml4e);
+ unmap_domain_page(ml4e);
}
}
@@ -3733,17 +3733,17 @@ sh_update_linear_entries(struct vcpu *v)
l4_pgentry_t *ml4e;
l3_pgentry_t *ml3e;
int linear_slot = shadow_l4_table_offset(SH_LINEAR_PT_VIRT_START);
- ml4e = sh_map_domain_page(pagetable_get_mfn(v->arch.monitor_table));
+ ml4e = map_domain_page(pagetable_get_mfn(v->arch.monitor_table));
ASSERT(l4e_get_flags(ml4e[linear_slot]) & _PAGE_PRESENT);
l3mfn = _mfn(l4e_get_pfn(ml4e[linear_slot]));
- ml3e = sh_map_domain_page(l3mfn);
- sh_unmap_domain_page(ml4e);
+ ml3e = map_domain_page(l3mfn);
+ unmap_domain_page(ml4e);
ASSERT(l3e_get_flags(ml3e[0]) & _PAGE_PRESENT);
l2mfn = _mfn(l3e_get_pfn(ml3e[0]));
- ml2e = sh_map_domain_page(l2mfn);
- sh_unmap_domain_page(ml3e);
+ ml2e = map_domain_page(l2mfn);
+ unmap_domain_page(ml3e);
}
/* Shadow l3 tables are made up by sh_update_cr3 */
@@ -3759,7 +3759,7 @@ sh_update_linear_entries(struct vcpu *v)
}
if ( v != current )
- sh_unmap_domain_page(ml2e);
+ unmap_domain_page(ml2e);
}
else
domain_crash(d); /* XXX */
@@ -4002,10 +4002,10 @@ sh_update_cr3(struct vcpu *v, int do_locking)
// cache control.
guest_idx &= ~3;
- gl3e = ((guest_l3e_t *)sh_map_domain_page(gmfn)) + guest_idx;
+ gl3e = ((guest_l3e_t *)map_domain_page(gmfn)) + guest_idx;
for ( i = 0; i < 4 ; i++ )
v->arch.paging.shadow.gl3e[i] = gl3e[i];
- sh_unmap_domain_page(gl3e);
+ unmap_domain_page(gl3e);
#elif GUEST_PAGING_LEVELS == 2
if ( shadow_mode_external(d) || shadow_mode_translate(d) )
{
@@ -4213,14 +4213,14 @@ int sh_rm_write_access_from_sl1p(struct domain *d, mfn_t gmfn,
&& sp->u.sh.type != SH_type_fl1_shadow) )
goto fail;
- sl1p = sh_map_domain_page(smfn);
+ sl1p = map_domain_page(smfn);
sl1p += off;
sl1e = *sl1p;
if ( ((shadow_l1e_get_flags(sl1e) & (_PAGE_PRESENT|_PAGE_RW))
!= (_PAGE_PRESENT|_PAGE_RW))
|| (mfn_x(shadow_l1e_get_mfn(sl1e)) != mfn_x(gmfn)) )
{
- sh_unmap_domain_page(sl1p);
+ unmap_domain_page(sl1p);
goto fail;
}
@@ -4229,7 +4229,7 @@ int sh_rm_write_access_from_sl1p(struct domain *d, mfn_t gmfn,
r = shadow_set_l1e(d, sl1p, sl1e, p2m_ram_rw, smfn);
ASSERT( !(r & SHADOW_SET_ERROR) );
- sh_unmap_domain_page(sl1p);
+ unmap_domain_page(sl1p);
perfc_incr(shadow_writeable_h_7);
return 1;
@@ -4486,7 +4486,7 @@ static void sh_pagetable_dying(struct vcpu *v, paddr_t gpa)
if ( !fast_path )
{
- gl3pa = sh_map_domain_page(l3mfn);
+ gl3pa = map_domain_page(l3mfn);
gl3e = (guest_l3e_t *)(gl3pa + ((unsigned long)gpa & ~PAGE_MASK));
}
for ( i = 0; i < 4; i++ )
@@ -4666,7 +4666,7 @@ static void *emulate_map_dest(struct vcpu *v,
{
/* Whole write fits on a single page */
sh_ctxt->mfn2 = _mfn(INVALID_MFN);
- map = sh_map_domain_page(sh_ctxt->mfn1) + (vaddr & ~PAGE_MASK);
+ map = map_domain_page(sh_ctxt->mfn1) + (vaddr & ~PAGE_MASK);
}
else
{
@@ -4775,7 +4775,7 @@ static void emulate_unmap_dest(struct vcpu *v,
vunmap((void *)((unsigned long)addr & PAGE_MASK));
}
else
- sh_unmap_domain_page(addr);
+ unmap_domain_page(addr);
atomic_inc(&v->domain->arch.paging.shadow.gtable_dirty_version);
}
@@ -4946,7 +4946,7 @@ int sh_audit_l1_table(struct vcpu *v, mfn_t sl1mfn, mfn_t x)
}
#endif
- gl1e = gp = sh_map_domain_page(gl1mfn);
+ gl1e = gp = map_domain_page(gl1mfn);
SHADOW_FOREACH_L1E(sl1mfn, sl1e, &gl1e, done, {
if ( sh_l1e_is_magic(*sl1e) )
@@ -4987,7 +4987,7 @@ int sh_audit_l1_table(struct vcpu *v, mfn_t sl1mfn, mfn_t x)
}
}
});
- sh_unmap_domain_page(gp);
+ unmap_domain_page(gp);
return done;
}
@@ -5038,7 +5038,7 @@ int sh_audit_l2_table(struct vcpu *v, mfn_t sl2mfn, mfn_t x)
AUDIT_FAIL_MIN(2, "gmfn %lx is out of sync", mfn_x(gl2mfn));
#endif
- gl2e = gp = sh_map_domain_page(gl2mfn);
+ gl2e = gp = map_domain_page(gl2mfn);
SHADOW_FOREACH_L2E(sl2mfn, sl2e, &gl2e, done, d, {
s = sh_audit_flags(v, 2, guest_l2e_get_flags(*gl2e),
@@ -5064,7 +5064,7 @@ int sh_audit_l2_table(struct vcpu *v, mfn_t sl2mfn, mfn_t x)
gfn_x(gfn), &p2mt)), mfn_x(gmfn), mfn_x(mfn));
}
});
- sh_unmap_domain_page(gp);
+ unmap_domain_page(gp);
return 0;
}
@@ -5090,7 +5090,7 @@ int sh_audit_l3_table(struct vcpu *v, mfn_t sl3mfn, mfn_t x)
AUDIT_FAIL_MIN(3, "gmfn %lx is out of sync", mfn_x(gl3mfn));
#endif
- gl3e = gp = sh_map_domain_page(gl3mfn);
+ gl3e = gp = map_domain_page(gl3mfn);
SHADOW_FOREACH_L3E(sl3mfn, sl3e, &gl3e, done, {
s = sh_audit_flags(v, 3, guest_l3e_get_flags(*gl3e),
@@ -5115,7 +5115,7 @@ int sh_audit_l3_table(struct vcpu *v, mfn_t sl3mfn, mfn_t x)
gfn_x(gfn), mfn_x(gmfn), mfn_x(mfn));
}
});
- sh_unmap_domain_page(gp);
+ unmap_domain_page(gp);
return 0;
}
@@ -5140,7 +5140,7 @@ int sh_audit_l4_table(struct vcpu *v, mfn_t sl4mfn, mfn_t x)
AUDIT_FAIL_MIN(4, "gmfn %lx is out of sync", mfn_x(gl4mfn));
#endif
- gl4e = gp = sh_map_domain_page(gl4mfn);
+ gl4e = gp = map_domain_page(gl4mfn);
SHADOW_FOREACH_L4E(sl4mfn, sl4e, &gl4e, done, d,
{
s = sh_audit_flags(v, 4, guest_l4e_get_flags(*gl4e),
@@ -5160,7 +5160,7 @@ int sh_audit_l4_table(struct vcpu *v, mfn_t sl4mfn, mfn_t x)
gfn_x(gfn), mfn_x(gmfn), mfn_x(mfn));
}
});
- sh_unmap_domain_page(gp);
+ unmap_domain_page(gp);
return 0;
}
#endif /* GUEST_PAGING_LEVELS >= 4 */
diff --git a/xen/arch/x86/mm/shadow/private.h b/xen/arch/x86/mm/shadow/private.h
index 31b36ef..48f1798 100644
--- a/xen/arch/x86/mm/shadow/private.h
+++ b/xen/arch/x86/mm/shadow/private.h
@@ -504,19 +504,6 @@ sh_mfn_is_a_page_table(mfn_t gmfn)
return type_info && (type_info <= PGT_l4_page_table);
}
-// Provide mfn_t-aware versions of common xen functions
-static inline void *
-sh_map_domain_page(mfn_t mfn)
-{
- return map_domain_page(mfn);
-}
-
-static inline void
-sh_unmap_domain_page(void *p)
-{
- unmap_domain_page(p);
-}
-
/**************************************************************************/
/* Shadow-page refcounting. */
diff --git a/xen/include/asm-x86/hap.h b/xen/include/asm-x86/hap.h
index ca590f3..c13e536 100644
--- a/xen/include/asm-x86/hap.h
+++ b/xen/include/asm-x86/hap.h
@@ -32,21 +32,6 @@
printk("hap error: %s(): " _f, __func__, ##_a)
/************************************************/
-/* hap domain page mapping */
-/************************************************/
-static inline void *
-hap_map_domain_page(mfn_t mfn)
-{
- return map_domain_page(mfn);
-}
-
-static inline void
-hap_unmap_domain_page(void *p)
-{
- unmap_domain_page(p);
-}
-
-/************************************************/
/* hap domain level functions */
/************************************************/
void hap_domain_init(struct domain *d);
--
2.1.4
^ permalink raw reply related [flat|nested] 6+ messages in thread
* Re: [PATCH v3 1/4] xen/domain_page: Convert map_domain_page_global() to using mfn_t
2015-07-09 14:54 [PATCH v3 1/4] xen/domain_page: Convert map_domain_page_global() to using mfn_t Ben Catterall
` (2 preceding siblings ...)
2015-07-09 14:54 ` [PATCH v3 4/4] Remove sh_{un}map_domain_page() and hap_{un}map_domain_page() Ben Catterall
@ 2015-07-10 14:44 ` Tim Deegan
3 siblings, 0 replies; 6+ messages in thread
From: Tim Deegan @ 2015-07-10 14:44 UTC (permalink / raw)
To: Ben Catterall
Cc: keir, ian.campbell, andrew.cooper3, stefano.stabellini,
Jan Beulich, xen-devel
At 15:54 +0100 on 09 Jul (1436457245), Ben Catterall wrote:
> From: Andrew Cooper <andrew.cooper3@citrix.com>
>
> The sh_map/unmap wrappers can be dropped, and take the opportunity to turn
> some #define's into static inlines, for added type saftey.
>
> As part of adding the type safety, GCC highlights an problematic include cycle
> with arm/mm.h including domain_page.h which includes xen/mm.h and falls over
> __page_to_mfn being used before being declared. Simply dropping the inclusion
> of domain_page.h fixes the compilation issue.
>
> Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
Acked-by: Tim Deegan <tim@xen.org>
(For the x86/mm/shadow parts, for this whole series).
Cheers,
Tim.
^ permalink raw reply [flat|nested] 6+ messages in thread
* Re: [PATCH v3 3/4] Convert map_domain_page() to use the new mfn_t type
2015-07-09 14:54 ` [PATCH v3 3/4] Convert map_domain_page() to use the new mfn_t type Ben Catterall
@ 2015-07-10 16:14 ` Ian Campbell
0 siblings, 0 replies; 6+ messages in thread
From: Ian Campbell @ 2015-07-10 16:14 UTC (permalink / raw)
To: Ben Catterall
Cc: keir, andrew.cooper3, tim, stefano.stabellini, jbeulich,
xen-devel
On Thu, 2015-07-09 at 15:54 +0100, Ben Catterall wrote:
> xen/arch/arm/domain_build.c | 2 +-
> xen/arch/arm/kernel.c | 2 +-
> xen/arch/arm/mm.c | 12 +++++-----
> xen/arch/arm/p2m.c | 4 ++--
> xen/arch/arm/traps.c | 4 ++--
Acked-by: Ian Campbell <ian.campbell@citrix.com>
^ permalink raw reply [flat|nested] 6+ messages in thread
end of thread, other threads:[~2015-07-10 16:14 UTC | newest]
Thread overview: 6+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2015-07-09 14:54 [PATCH v3 1/4] xen/domain_page: Convert map_domain_page_global() to using mfn_t Ben Catterall
2015-07-09 14:54 ` [PATCH v3 2/4] xen/domain_page: Convert copy/clear_domain_page() " Ben Catterall
2015-07-09 14:54 ` [PATCH v3 3/4] Convert map_domain_page() to use the new mfn_t type Ben Catterall
2015-07-10 16:14 ` Ian Campbell
2015-07-09 14:54 ` [PATCH v3 4/4] Remove sh_{un}map_domain_page() and hap_{un}map_domain_page() Ben Catterall
2015-07-10 14:44 ` [PATCH v3 1/4] xen/domain_page: Convert map_domain_page_global() to using mfn_t Tim Deegan
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).