* [PATCH 01 of 11] x86/mm: make p2m lock into an rwlock
2012-05-10 14:59 [PATCH 00 of 11] Use a reader-writer lock for the p2m Tim Deegan
@ 2012-05-10 14:59 ` Tim Deegan
2012-05-10 14:59 ` [PATCH 02 of 11] x86/mm: Introduce get_page_from_gfn() Tim Deegan
` (10 subsequent siblings)
11 siblings, 0 replies; 15+ messages in thread
From: Tim Deegan @ 2012-05-10 14:59 UTC (permalink / raw)
To: xen-devel; +Cc: Andres Lagar-Cavilla
# HG changeset patch
# User Tim Deegan <tim@xen.org>
# Date 1336661656 -3600
# Node ID 4a99c5456e9d8aa707bbd57bb4f4af88e1d456ca
# Parent 8a86d841e6d42fbffc9e20d3028875dd4990882d
x86/mm: make p2m lock into an rwlock
Because the p2m lock was already recursive, we need to add a new
mm-lock class of recursive rwlocks.
Signed-off-by: Tim Deegan <tim@xen.org>
diff -r 8a86d841e6d4 -r 4a99c5456e9d xen/arch/x86/mm/mm-locks.h
--- a/xen/arch/x86/mm/mm-locks.h Tue May 08 13:36:24 2012 +0200
+++ b/xen/arch/x86/mm/mm-locks.h Thu May 10 15:54:16 2012 +0100
@@ -97,13 +97,71 @@ static inline void _mm_enforce_order_loc
__set_lock_level(level);
}
+
+static inline void mm_rwlock_init(mm_rwlock_t *l)
+{
+ rwlock_init(&l->lock);
+ l->locker = -1;
+ l->locker_function = "nobody";
+ l->unlock_level = 0;
+}
+
+static inline int mm_write_locked_by_me(mm_rwlock_t *l)
+{
+ return (l->locker == get_processor_id());
+}
+
+static inline void _mm_write_lock(mm_rwlock_t *l, const char *func, int level)
+{
+ if ( !mm_write_locked_by_me(l) )
+ {
+ __check_lock_level(level);
+ write_lock(&l->lock);
+ l->locker = get_processor_id();
+ l->locker_function = func;
+ l->unlock_level = __get_lock_level();
+ __set_lock_level(level);
+ }
+ l->recurse_count++;
+}
+
+static inline void mm_write_unlock(mm_rwlock_t *l)
+{
+ if ( --(l->recurse_count) != 0 )
+ return;
+ l->locker = -1;
+ l->locker_function = "nobody";
+ __set_lock_level(l->unlock_level);
+ write_unlock(&l->lock);
+}
+
+static inline void _mm_read_lock(mm_rwlock_t *l, int level)
+{
+ __check_lock_level(level);
+ read_lock(&l->lock);
+ /* There's nowhere to store the per-CPU unlock level so we can't
+ * set the lock level. */
+}
+
+static inline void mm_read_unlock(mm_rwlock_t *l)
+{
+ read_unlock(&l->lock);
+}
+
/* This wrapper uses the line number to express the locking order below */
#define declare_mm_lock(name) \
static inline void mm_lock_##name(mm_lock_t *l, const char *func, int rec)\
{ _mm_lock(l, func, __LINE__, rec); }
+#define declare_mm_rwlock(name) \
+ static inline void mm_write_lock_##name(mm_rwlock_t *l, const char *func) \
+ { _mm_write_lock(l, func, __LINE__); } \
+ static inline void mm_read_lock_##name(mm_rwlock_t *l) \
+ { _mm_read_lock(l, __LINE__); }
/* These capture the name of the calling function */
#define mm_lock(name, l) mm_lock_##name(l, __func__, 0)
#define mm_lock_recursive(name, l) mm_lock_##name(l, __func__, 1)
+#define mm_write_lock(name, l) mm_write_lock_##name(l, __func__)
+#define mm_read_lock(name, l) mm_read_lock_##name(l)
/* This wrapper is intended for "external" locks which do not use
* the mm_lock_t types. Such locks inside the mm code are also subject
@@ -152,27 +210,24 @@ declare_mm_lock(nestedp2m)
#define nestedp2m_unlock(d) mm_unlock(&(d)->arch.nested_p2m_lock)
/* P2M lock (per-p2m-table)
- *
- * This protects all queries and updates to the p2m table.
*
- * A note about ordering:
- * The order established here is enforced on all mutations of a p2m.
- * For lookups, the order established here is enforced only for hap
- * domains (1. shadow domains present a few nasty inversions;
- * 2. shadow domains do not support paging and sharing,
- * the main sources of dynamic p2m mutations)
- *
- * The lock is recursive as it is common for a code path to look up a gfn
- * and later mutate it.
+ * This protects all queries and updates to the p2m table.
+ * Queries may be made under the read lock but all modifications
+ * need the main (write) lock.
+ *
+ * The write lock is recursive as it is common for a code path to look
+ * up a gfn and later mutate it.
*/
-declare_mm_lock(p2m)
-#define p2m_lock(p) mm_lock_recursive(p2m, &(p)->lock)
-#define gfn_lock(p,g,o) mm_lock_recursive(p2m, &(p)->lock)
-#define p2m_unlock(p) mm_unlock(&(p)->lock)
-#define gfn_unlock(p,g,o) mm_unlock(&(p)->lock)
-#define p2m_locked_by_me(p) mm_locked_by_me(&(p)->lock)
-#define gfn_locked_by_me(p,g) mm_locked_by_me(&(p)->lock)
+declare_mm_rwlock(p2m);
+#define p2m_lock(p) mm_write_lock(p2m, &(p)->lock);
+#define p2m_unlock(p) mm_write_unlock(&(p)->lock);
+#define gfn_lock(p,g,o) p2m_lock(p)
+#define gfn_unlock(p,g,o) p2m_unlock(p)
+#define p2m_read_lock(p) mm_read_lock(p2m, &(p)->lock)
+#define p2m_read_unlock(p) mm_read_unlock(&(p)->lock)
+#define p2m_locked_by_me(p) mm_write_locked_by_me(&(p)->lock)
+#define gfn_locked_by_me(p,g) p2m_locked_by_me(p)
/* Sharing per page lock
*
diff -r 8a86d841e6d4 -r 4a99c5456e9d xen/arch/x86/mm/p2m.c
--- a/xen/arch/x86/mm/p2m.c Tue May 08 13:36:24 2012 +0200
+++ b/xen/arch/x86/mm/p2m.c Thu May 10 15:54:16 2012 +0100
@@ -71,7 +71,7 @@ boolean_param("hap_2mb", opt_hap_2mb);
/* Init the datastructures for later use by the p2m code */
static void p2m_initialise(struct domain *d, struct p2m_domain *p2m)
{
- mm_lock_init(&p2m->lock);
+ mm_rwlock_init(&p2m->lock);
mm_lock_init(&p2m->pod.lock);
INIT_LIST_HEAD(&p2m->np2m_list);
INIT_PAGE_LIST_HEAD(&p2m->pages);
diff -r 8a86d841e6d4 -r 4a99c5456e9d xen/include/asm-x86/mm.h
--- a/xen/include/asm-x86/mm.h Tue May 08 13:36:24 2012 +0200
+++ b/xen/include/asm-x86/mm.h Thu May 10 15:54:16 2012 +0100
@@ -649,4 +649,12 @@ typedef struct mm_lock {
const char *locker_function; /* func that took it */
} mm_lock_t;
+typedef struct mm_rwlock {
+ rwlock_t lock;
+ int unlock_level;
+ int recurse_count;
+ int locker; /* CPU that holds the write lock */
+ const char *locker_function; /* func that took it */
+} mm_rwlock_t;
+
#endif /* __ASM_X86_MM_H__ */
diff -r 8a86d841e6d4 -r 4a99c5456e9d xen/include/asm-x86/p2m.h
--- a/xen/include/asm-x86/p2m.h Tue May 08 13:36:24 2012 +0200
+++ b/xen/include/asm-x86/p2m.h Thu May 10 15:54:16 2012 +0100
@@ -192,7 +192,7 @@ typedef unsigned int p2m_query_t;
/* Per-p2m-table state */
struct p2m_domain {
/* Lock that protects updates to the p2m */
- mm_lock_t lock;
+ mm_rwlock_t lock;
/* Shadow translated domain: p2m mapping */
pagetable_t phys_table;
^ permalink raw reply [flat|nested] 15+ messages in thread* [PATCH 02 of 11] x86/mm: Introduce get_page_from_gfn()
2012-05-10 14:59 [PATCH 00 of 11] Use a reader-writer lock for the p2m Tim Deegan
2012-05-10 14:59 ` [PATCH 01 of 11] x86/mm: make p2m lock into an rwlock Tim Deegan
@ 2012-05-10 14:59 ` Tim Deegan
2012-05-10 14:59 ` [PATCH 03 of 11] arm: Implement get_page_from_gfn() Tim Deegan
` (9 subsequent siblings)
11 siblings, 0 replies; 15+ messages in thread
From: Tim Deegan @ 2012-05-10 14:59 UTC (permalink / raw)
To: xen-devel; +Cc: Andres Lagar-Cavilla
# HG changeset patch
# User Tim Deegan <tim@xen.org>
# Date 1336661656 -3600
# Node ID e03806b10f0026590e7775008f24d2a96051552e
# Parent 4a99c5456e9d8aa707bbd57bb4f4af88e1d456ca
x86/mm: Introduce get_page_from_gfn().
This new function does a p2m lookup under the read lock, falling back
to the write lock only if it needs to make a change. If the GFN is
backed by RAM, it takes a refcount on the underlying page.
The following patches will convert many paths that currently use
get_gfn/put_gfn to use the new interface. That will avoid serializing
p2m accesses in the common case where no updates are needed (i.e. no
page-sharing, VM paging or other p2m trickery).
Signed-off-by: Tim Deegan <tim@xen.org>
diff -r 4a99c5456e9d -r e03806b10f00 xen/arch/x86/mm/p2m.c
--- a/xen/arch/x86/mm/p2m.c Thu May 10 15:54:16 2012 +0100
+++ b/xen/arch/x86/mm/p2m.c Thu May 10 15:54:16 2012 +0100
@@ -207,6 +207,59 @@ void __put_gfn(struct p2m_domain *p2m, u
gfn_unlock(p2m, gfn, 0);
}
+/* Atomically look up a GFN and take a reference count on the backing page. */
+struct page_info *get_page_from_gfn_p2m(
+ struct domain *d, struct p2m_domain *p2m, unsigned long gfn,
+ p2m_type_t *t, p2m_access_t *a, p2m_query_t q)
+{
+ struct page_info *page = NULL;
+ p2m_access_t _a;
+ p2m_type_t _t;
+ mfn_t mfn;
+
+ /* Allow t or a to be NULL */
+ t = t ?: &_t;
+ a = a ?: &_a;
+
+ if ( likely(!p2m_locked_by_me(p2m)) )
+ {
+ /* Fast path: look up and get out */
+ p2m_read_lock(p2m);
+ mfn = __get_gfn_type_access(p2m, gfn, t, a, 0, NULL, 0);
+ if ( (p2m_is_ram(*t) || p2m_is_grant(*t))
+ && mfn_valid(mfn)
+ && !((q & P2M_UNSHARE) && p2m_is_shared(*t)) )
+ {
+ page = mfn_to_page(mfn);
+ if ( !get_page(page, d)
+ /* Page could be shared */
+ && !get_page(page, dom_cow) )
+ page = NULL;
+ }
+ p2m_read_unlock(p2m);
+
+ if ( page )
+ return page;
+
+ /* Error path: not a suitable GFN at all */
+ if ( !p2m_is_ram(*t) && !p2m_is_paging(*t) && !p2m_is_magic(*t) )
+ return NULL;
+ }
+
+ /* Slow path: take the write lock and do fixups */
+ mfn = get_gfn_type_access(p2m, gfn, t, a, q, NULL);
+ if ( p2m_is_ram(*t) && mfn_valid(mfn) )
+ {
+ page = mfn_to_page(mfn);
+ if ( !get_page(page, d) )
+ page = NULL;
+ }
+ put_gfn(d, gfn);
+
+ return page;
+}
+
+
int set_p2m_entry(struct p2m_domain *p2m, unsigned long gfn, mfn_t mfn,
unsigned int page_order, p2m_type_t p2mt, p2m_access_t p2ma)
{
diff -r 4a99c5456e9d -r e03806b10f00 xen/include/asm-x86/p2m.h
--- a/xen/include/asm-x86/p2m.h Thu May 10 15:54:16 2012 +0100
+++ b/xen/include/asm-x86/p2m.h Thu May 10 15:54:16 2012 +0100
@@ -377,6 +377,33 @@ static inline mfn_t get_gfn_query_unlock
return __get_gfn_type_access(p2m_get_hostp2m(d), gfn, t, &a, 0, NULL, 0);
}
+/* Atomically look up a GFN and take a reference count on the backing page.
+ * This makes sure the page doesn't get freed (or shared) underfoot,
+ * and should be used by any path that intends to write to the backing page.
+ * Returns NULL if the page is not backed by RAM.
+ * The caller is responsible for calling put_page() afterwards. */
+struct page_info *get_page_from_gfn_p2m(struct domain *d,
+ struct p2m_domain *p2m,
+ unsigned long gfn,
+ p2m_type_t *t, p2m_access_t *a,
+ p2m_query_t q);
+
+static inline struct page_info *get_page_from_gfn(
+ struct domain *d, unsigned long gfn, p2m_type_t *t, p2m_query_t q)
+{
+ struct page_info *page;
+
+ if ( paging_mode_translate(d) )
+ return get_page_from_gfn_p2m(d, p2m_get_hostp2m(d), gfn, t, NULL, q);
+
+ /* Non-translated guests see 1-1 RAM mappings everywhere */
+ if (t)
+ *t = p2m_ram_rw;
+ page = __mfn_to_page(gfn);
+ return get_page(page, d) ? page : NULL;
+}
+
+
/* General conversion function from mfn to gfn */
static inline unsigned long mfn_to_gfn(struct domain *d, mfn_t mfn)
{
^ permalink raw reply [flat|nested] 15+ messages in thread* [PATCH 03 of 11] arm: Implement get_page_from_gfn()
2012-05-10 14:59 [PATCH 00 of 11] Use a reader-writer lock for the p2m Tim Deegan
2012-05-10 14:59 ` [PATCH 01 of 11] x86/mm: make p2m lock into an rwlock Tim Deegan
2012-05-10 14:59 ` [PATCH 02 of 11] x86/mm: Introduce get_page_from_gfn() Tim Deegan
@ 2012-05-10 14:59 ` Tim Deegan
2012-05-10 15:21 ` Ian Campbell
2012-05-10 14:59 ` [PATCH 04 of 11] x86/hvm: Use get_page_from_gfn() instead of get_gfn()/put_gfn Tim Deegan
` (8 subsequent siblings)
11 siblings, 1 reply; 15+ messages in thread
From: Tim Deegan @ 2012-05-10 14:59 UTC (permalink / raw)
To: xen-devel; +Cc: Andres Lagar-Cavilla
# HG changeset patch
# User Tim Deegan <tim@xen.org>
# Date 1336661656 -3600
# Node ID d774bb5c6326d1a7e88a3bfadc546d154a2ad895
# Parent e03806b10f0026590e7775008f24d2a96051552e
arm: Implement get_page_from_gfn()
We will be calling this from common code, so add a basic
implementation to arch/arm.
After 4.2 we should reshuffle some of the p2m interface out of
arch/x86 into common headers; for now duplicate a little bit of it.
Signed-off-by: Tim Deegan <tim@xen.org>
diff -r e03806b10f00 -r d774bb5c6326 xen/include/asm-arm/p2m.h
--- a/xen/include/asm-arm/p2m.h Thu May 10 15:54:16 2012 +0100
+++ b/xen/include/asm-arm/p2m.h Thu May 10 15:54:16 2012 +0100
@@ -53,6 +53,28 @@ p2m_pod_decrease_reservation(struct doma
xen_pfn_t gpfn,
unsigned int order);
+/* Look up a GFN and take a reference count on the backing page. */
+typedef int p2m_type_t;
+typedef unsigned int p2m_query_t;
+#define P2M_ALLOC (1u<<0) /* Populate PoD and paged-out entries */
+#define P2M_UNSHARE (1u<<1) /* Break CoW sharing */
+
+static inline struct page_info *get_page_from_gfn(
+ struct domain *d, unsigned long gfn, p2m_type_t *t, p2m_query_t q)
+{
+ struct page_info *page;
+ unsigned long mfn = gmfn_to_mfn(d, gfn);
+
+ ASSERT(t == NULL);
+
+ if (!mfn_valid(mfn))
+ return NULL;
+ page = mfn_to_page(mfn);
+ if ( !get_page(page, d) )
+ return NULL;
+ return page;
+}
+
/* Compatibility function exporting the old untyped interface */
static inline unsigned long get_gfn_untyped(struct domain *d, unsigned long gpfn)
{
^ permalink raw reply [flat|nested] 15+ messages in thread* Re: [PATCH 03 of 11] arm: Implement get_page_from_gfn()
2012-05-10 14:59 ` [PATCH 03 of 11] arm: Implement get_page_from_gfn() Tim Deegan
@ 2012-05-10 15:21 ` Ian Campbell
0 siblings, 0 replies; 15+ messages in thread
From: Ian Campbell @ 2012-05-10 15:21 UTC (permalink / raw)
To: Tim Deegan; +Cc: Andres Lagar-Cavilla, xen-devel@lists.xen.org
On Thu, 2012-05-10 at 15:59 +0100, Tim Deegan wrote:
> # HG changeset patch
> # User Tim Deegan <tim@xen.org>
> # Date 1336661656 -3600
> # Node ID d774bb5c6326d1a7e88a3bfadc546d154a2ad895
> # Parent e03806b10f0026590e7775008f24d2a96051552e
> arm: Implement get_page_from_gfn()
>
> We will be calling this from common code, so add a basic
> implementation to arch/arm.
>
> After 4.2 we should reshuffle some of the p2m interface out of
> arch/x86 into common headers; for now duplicate a little bit of it.
>
> Signed-off-by: Tim Deegan <tim@xen.org>
Acked-by: Ian Campbell <ian.campbell@citrix.com>
Feel free to commit along with the rest of the series.
Ian.
>
> diff -r e03806b10f00 -r d774bb5c6326 xen/include/asm-arm/p2m.h
> --- a/xen/include/asm-arm/p2m.h Thu May 10 15:54:16 2012 +0100
> +++ b/xen/include/asm-arm/p2m.h Thu May 10 15:54:16 2012 +0100
> @@ -53,6 +53,28 @@ p2m_pod_decrease_reservation(struct doma
> xen_pfn_t gpfn,
> unsigned int order);
>
> +/* Look up a GFN and take a reference count on the backing page. */
> +typedef int p2m_type_t;
> +typedef unsigned int p2m_query_t;
> +#define P2M_ALLOC (1u<<0) /* Populate PoD and paged-out entries */
> +#define P2M_UNSHARE (1u<<1) /* Break CoW sharing */
> +
> +static inline struct page_info *get_page_from_gfn(
> + struct domain *d, unsigned long gfn, p2m_type_t *t, p2m_query_t q)
> +{
> + struct page_info *page;
> + unsigned long mfn = gmfn_to_mfn(d, gfn);
> +
> + ASSERT(t == NULL);
> +
> + if (!mfn_valid(mfn))
> + return NULL;
> + page = mfn_to_page(mfn);
> + if ( !get_page(page, d) )
> + return NULL;
> + return page;
> +}
> +
> /* Compatibility function exporting the old untyped interface */
> static inline unsigned long get_gfn_untyped(struct domain *d, unsigned long gpfn)
> {
>
> _______________________________________________
> Xen-devel mailing list
> Xen-devel@lists.xen.org
> http://lists.xen.org/xen-devel
^ permalink raw reply [flat|nested] 15+ messages in thread
* [PATCH 04 of 11] x86/hvm: Use get_page_from_gfn() instead of get_gfn()/put_gfn
2012-05-10 14:59 [PATCH 00 of 11] Use a reader-writer lock for the p2m Tim Deegan
` (2 preceding siblings ...)
2012-05-10 14:59 ` [PATCH 03 of 11] arm: Implement get_page_from_gfn() Tim Deegan
@ 2012-05-10 14:59 ` Tim Deegan
2012-05-10 14:59 ` [PATCH 05 of 11] x86/mm: " Tim Deegan
` (7 subsequent siblings)
11 siblings, 0 replies; 15+ messages in thread
From: Tim Deegan @ 2012-05-10 14:59 UTC (permalink / raw)
To: xen-devel; +Cc: Andres Lagar-Cavilla
# HG changeset patch
# User Tim Deegan <tim@xen.org>
# Date 1336661656 -3600
# Node ID 9da426cdc7e478e426bcbd5391b8deacdc85db1e
# Parent d774bb5c6326d1a7e88a3bfadc546d154a2ad895
x86/hvm: Use get_page_from_gfn() instead of get_gfn()/put_gfn.
Signed-off-by: Tim Deegan <tim@xen.org>
Signed-off-by: Andres Lagar-Cavilla <andres@lagarcavilla.org>
diff -r d774bb5c6326 -r 9da426cdc7e4 xen/arch/x86/hvm/emulate.c
--- a/xen/arch/x86/hvm/emulate.c Thu May 10 15:54:16 2012 +0100
+++ b/xen/arch/x86/hvm/emulate.c Thu May 10 15:54:16 2012 +0100
@@ -60,34 +60,25 @@ static int hvmemul_do_io(
ioreq_t *p = get_ioreq(curr);
unsigned long ram_gfn = paddr_to_pfn(ram_gpa);
p2m_type_t p2mt;
- mfn_t ram_mfn;
+ struct page_info *ram_page;
int rc;
/* Check for paged out page */
- ram_mfn = get_gfn_unshare(curr->domain, ram_gfn, &p2mt);
+ ram_page = get_page_from_gfn(curr->domain, ram_gfn, &p2mt, P2M_UNSHARE);
if ( p2m_is_paging(p2mt) )
{
- put_gfn(curr->domain, ram_gfn);
+ if ( ram_page )
+ put_page(ram_page);
p2m_mem_paging_populate(curr->domain, ram_gfn);
return X86EMUL_RETRY;
}
if ( p2m_is_shared(p2mt) )
{
- put_gfn(curr->domain, ram_gfn);
+ if ( ram_page )
+ put_page(ram_page);
return X86EMUL_RETRY;
}
- /* Maintain a ref on the mfn to ensure liveness. Put the gfn
- * to avoid potential deadlock wrt event channel lock, later. */
- if ( mfn_valid(mfn_x(ram_mfn)) )
- if ( !get_page(mfn_to_page(mfn_x(ram_mfn)),
- curr->domain) )
- {
- put_gfn(curr->domain, ram_gfn);
- return X86EMUL_RETRY;
- }
- put_gfn(curr->domain, ram_gfn);
-
/*
* Weird-sized accesses have undefined behaviour: we discard writes
* and read all-ones.
@@ -98,8 +89,8 @@ static int hvmemul_do_io(
ASSERT(p_data != NULL); /* cannot happen with a REP prefix */
if ( dir == IOREQ_READ )
memset(p_data, ~0, size);
- if ( mfn_valid(mfn_x(ram_mfn)) )
- put_page(mfn_to_page(mfn_x(ram_mfn)));
+ if ( ram_page )
+ put_page(ram_page);
return X86EMUL_UNHANDLEABLE;
}
@@ -120,8 +111,8 @@ static int hvmemul_do_io(
unsigned int bytes = vio->mmio_large_write_bytes;
if ( (addr >= pa) && ((addr + size) <= (pa + bytes)) )
{
- if ( mfn_valid(mfn_x(ram_mfn)) )
- put_page(mfn_to_page(mfn_x(ram_mfn)));
+ if ( ram_page )
+ put_page(ram_page);
return X86EMUL_OKAY;
}
}
@@ -133,8 +124,8 @@ static int hvmemul_do_io(
{
memcpy(p_data, &vio->mmio_large_read[addr - pa],
size);
- if ( mfn_valid(mfn_x(ram_mfn)) )
- put_page(mfn_to_page(mfn_x(ram_mfn)));
+ if ( ram_page )
+ put_page(ram_page);
return X86EMUL_OKAY;
}
}
@@ -148,8 +139,8 @@ static int hvmemul_do_io(
vio->io_state = HVMIO_none;
if ( p_data == NULL )
{
- if ( mfn_valid(mfn_x(ram_mfn)) )
- put_page(mfn_to_page(mfn_x(ram_mfn)));
+ if ( ram_page )
+ put_page(ram_page);
return X86EMUL_UNHANDLEABLE;
}
goto finish_access;
@@ -159,13 +150,13 @@ static int hvmemul_do_io(
(addr == (vio->mmio_large_write_pa +
vio->mmio_large_write_bytes)) )
{
- if ( mfn_valid(mfn_x(ram_mfn)) )
- put_page(mfn_to_page(mfn_x(ram_mfn)));
+ if ( ram_page )
+ put_page(ram_page);
return X86EMUL_RETRY;
}
default:
- if ( mfn_valid(mfn_x(ram_mfn)) )
- put_page(mfn_to_page(mfn_x(ram_mfn)));
+ if ( ram_page )
+ put_page(ram_page);
return X86EMUL_UNHANDLEABLE;
}
@@ -173,8 +164,8 @@ static int hvmemul_do_io(
{
gdprintk(XENLOG_WARNING, "WARNING: io already pending (%d)?\n",
p->state);
- if ( mfn_valid(mfn_x(ram_mfn)) )
- put_page(mfn_to_page(mfn_x(ram_mfn)));
+ if ( ram_page )
+ put_page(ram_page);
return X86EMUL_UNHANDLEABLE;
}
@@ -226,8 +217,8 @@ static int hvmemul_do_io(
if ( rc != X86EMUL_OKAY )
{
- if ( mfn_valid(mfn_x(ram_mfn)) )
- put_page(mfn_to_page(mfn_x(ram_mfn)));
+ if ( ram_page )
+ put_page(ram_page);
return rc;
}
@@ -263,8 +254,8 @@ static int hvmemul_do_io(
}
}
- if ( mfn_valid(mfn_x(ram_mfn)) )
- put_page(mfn_to_page(mfn_x(ram_mfn)));
+ if ( ram_page )
+ put_page(ram_page);
return X86EMUL_OKAY;
}
diff -r d774bb5c6326 -r 9da426cdc7e4 xen/arch/x86/hvm/hvm.c
--- a/xen/arch/x86/hvm/hvm.c Thu May 10 15:54:16 2012 +0100
+++ b/xen/arch/x86/hvm/hvm.c Thu May 10 15:54:16 2012 +0100
@@ -395,48 +395,41 @@ int prepare_ring_for_helper(
{
struct page_info *page;
p2m_type_t p2mt;
- unsigned long mfn;
void *va;
- mfn = mfn_x(get_gfn_unshare(d, gmfn, &p2mt));
- if ( !p2m_is_ram(p2mt) )
- {
- put_gfn(d, gmfn);
- return -EINVAL;
- }
+ page = get_page_from_gfn(d, gmfn, &p2mt, P2M_UNSHARE);
if ( p2m_is_paging(p2mt) )
{
- put_gfn(d, gmfn);
+ if ( page )
+ put_page(page);
p2m_mem_paging_populate(d, gmfn);
return -ENOENT;
}
if ( p2m_is_shared(p2mt) )
{
- put_gfn(d, gmfn);
+ if ( page )
+ put_page(page);
return -ENOENT;
}
- ASSERT(mfn_valid(mfn));
-
- page = mfn_to_page(mfn);
- if ( !get_page_and_type(page, d, PGT_writable_page) )
+ if ( !page )
+ return -EINVAL;
+
+ if ( !get_page_type(page, PGT_writable_page) )
{
- put_gfn(d, gmfn);
+ put_page(page);
return -EINVAL;
}
- va = map_domain_page_global(mfn);
+ va = __map_domain_page_global(page);
if ( va == NULL )
{
put_page_and_type(page);
- put_gfn(d, gmfn);
return -ENOMEM;
}
*_va = va;
*_page = page;
- put_gfn(d, gmfn);
-
return 0;
}
@@ -1607,8 +1600,8 @@ int hvm_mov_from_cr(unsigned int cr, uns
int hvm_set_cr0(unsigned long value)
{
struct vcpu *v = current;
- p2m_type_t p2mt;
- unsigned long gfn, mfn, old_value = v->arch.hvm_vcpu.guest_cr[0];
+ unsigned long gfn, old_value = v->arch.hvm_vcpu.guest_cr[0];
+ struct page_info *page;
HVM_DBG_LOG(DBG_LEVEL_VMMU, "Update CR0 value = %lx", value);
@@ -1647,23 +1640,20 @@ int hvm_set_cr0(unsigned long value)
{
/* The guest CR3 must be pointing to the guest physical. */
gfn = v->arch.hvm_vcpu.guest_cr[3]>>PAGE_SHIFT;
- mfn = mfn_x(get_gfn(v->domain, gfn, &p2mt));
- if ( !p2m_is_ram(p2mt) || !mfn_valid(mfn) ||
- !get_page(mfn_to_page(mfn), v->domain))
+ page = get_page_from_gfn(v->domain, gfn, NULL, P2M_ALLOC);
+ if ( !page )
{
- put_gfn(v->domain, gfn);
- gdprintk(XENLOG_ERR, "Invalid CR3 value = %lx (mfn=%lx)\n",
- v->arch.hvm_vcpu.guest_cr[3], mfn);
+ gdprintk(XENLOG_ERR, "Invalid CR3 value = %lx\n",
+ v->arch.hvm_vcpu.guest_cr[3]);
domain_crash(v->domain);
return X86EMUL_UNHANDLEABLE;
}
/* Now arch.guest_table points to machine physical. */
- v->arch.guest_table = pagetable_from_pfn(mfn);
+ v->arch.guest_table = pagetable_from_page(page);
HVM_DBG_LOG(DBG_LEVEL_VMMU, "Update CR3 value = %lx, mfn = %lx",
- v->arch.hvm_vcpu.guest_cr[3], mfn);
- put_gfn(v->domain, gfn);
+ v->arch.hvm_vcpu.guest_cr[3], page_to_mfn(page));
}
}
else if ( !(value & X86_CR0_PG) && (old_value & X86_CR0_PG) )
@@ -1738,26 +1728,21 @@ int hvm_set_cr0(unsigned long value)
int hvm_set_cr3(unsigned long value)
{
- unsigned long mfn;
- p2m_type_t p2mt;
struct vcpu *v = current;
+ struct page_info *page;
if ( hvm_paging_enabled(v) && !paging_mode_hap(v->domain) &&
(value != v->arch.hvm_vcpu.guest_cr[3]) )
{
/* Shadow-mode CR3 change. Check PDBR and update refcounts. */
HVM_DBG_LOG(DBG_LEVEL_VMMU, "CR3 value = %lx", value);
- mfn = mfn_x(get_gfn(v->domain, value >> PAGE_SHIFT, &p2mt));
- if ( !p2m_is_ram(p2mt) || !mfn_valid(mfn) ||
- !get_page(mfn_to_page(mfn), v->domain) )
- {
- put_gfn(v->domain, value >> PAGE_SHIFT);
- goto bad_cr3;
- }
+ page = get_page_from_gfn(v->domain, value >> PAGE_SHIFT,
+ NULL, P2M_ALLOC);
+ if ( !page )
+ goto bad_cr3;
put_page(pagetable_get_page(v->arch.guest_table));
- v->arch.guest_table = pagetable_from_pfn(mfn);
- put_gfn(v->domain, value >> PAGE_SHIFT);
+ v->arch.guest_table = pagetable_from_page(page);
HVM_DBG_LOG(DBG_LEVEL_VMMU, "Update CR3 value = %lx", value);
}
@@ -1914,46 +1899,29 @@ int hvm_virtual_to_linear_addr(
static void *__hvm_map_guest_frame(unsigned long gfn, bool_t writable)
{
void *map;
- unsigned long mfn;
p2m_type_t p2mt;
- struct page_info *pg;
+ struct page_info *page;
struct domain *d = current->domain;
- int rc;
-
- mfn = mfn_x(writable
- ? get_gfn_unshare(d, gfn, &p2mt)
- : get_gfn(d, gfn, &p2mt));
- if ( (p2m_is_shared(p2mt) && writable) || !p2m_is_ram(p2mt) )
+
+ page = get_page_from_gfn(d, gfn, &p2mt,
+ writable ? P2M_UNSHARE : P2M_ALLOC);
+ if ( (p2m_is_shared(p2mt) && writable) || !page )
{
- put_gfn(d, gfn);
+ if ( page )
+ put_page(page);
return NULL;
}
if ( p2m_is_paging(p2mt) )
{
- put_gfn(d, gfn);
+ put_page(page);
p2m_mem_paging_populate(d, gfn);
return NULL;
}
- ASSERT(mfn_valid(mfn));
-
if ( writable )
- paging_mark_dirty(d, mfn);
-
- /* Get a ref on the page, considering that it could be shared */
- pg = mfn_to_page(mfn);
- rc = get_page(pg, d);
- if ( !rc && !writable )
- /* Page could be shared */
- rc = get_page(pg, dom_cow);
- if ( !rc )
- {
- put_gfn(d, gfn);
- return NULL;
- }
-
- map = map_domain_page(mfn);
- put_gfn(d, gfn);
+ paging_mark_dirty(d, page_to_mfn(page));
+
+ map = __map_domain_page(page);
return map;
}
@@ -2358,7 +2326,8 @@ static enum hvm_copy_result __hvm_copy(
void *buf, paddr_t addr, int size, unsigned int flags, uint32_t pfec)
{
struct vcpu *curr = current;
- unsigned long gfn, mfn;
+ unsigned long gfn;
+ struct page_info *page;
p2m_type_t p2mt;
char *p;
int count, todo = size;
@@ -2402,32 +2371,33 @@ static enum hvm_copy_result __hvm_copy(
gfn = addr >> PAGE_SHIFT;
}
- mfn = mfn_x(get_gfn_unshare(curr->domain, gfn, &p2mt));
+ page = get_page_from_gfn(curr->domain, gfn, &p2mt, P2M_UNSHARE);
if ( p2m_is_paging(p2mt) )
{
- put_gfn(curr->domain, gfn);
+ if ( page )
+ put_page(page);
p2m_mem_paging_populate(curr->domain, gfn);
return HVMCOPY_gfn_paged_out;
}
if ( p2m_is_shared(p2mt) )
{
- put_gfn(curr->domain, gfn);
+ if ( page )
+ put_page(page);
return HVMCOPY_gfn_shared;
}
if ( p2m_is_grant(p2mt) )
{
- put_gfn(curr->domain, gfn);
+ if ( page )
+ put_page(page);
return HVMCOPY_unhandleable;
}
- if ( !p2m_is_ram(p2mt) )
+ if ( !page )
{
- put_gfn(curr->domain, gfn);
return HVMCOPY_bad_gfn_to_mfn;
}
- ASSERT(mfn_valid(mfn));
-
- p = (char *)map_domain_page(mfn) + (addr & ~PAGE_MASK);
+
+ p = (char *)__map_domain_page(page) + (addr & ~PAGE_MASK);
if ( flags & HVMCOPY_to_guest )
{
@@ -2437,12 +2407,12 @@ static enum hvm_copy_result __hvm_copy(
if ( xchg(&lastpage, gfn) != gfn )
gdprintk(XENLOG_DEBUG, "guest attempted write to read-only"
" memory page. gfn=%#lx, mfn=%#lx\n",
- gfn, mfn);
+ gfn, page_to_mfn(page));
}
else
{
memcpy(p, buf, count);
- paging_mark_dirty(curr->domain, mfn);
+ paging_mark_dirty(curr->domain, page_to_mfn(page));
}
}
else
@@ -2455,7 +2425,7 @@ static enum hvm_copy_result __hvm_copy(
addr += count;
buf += count;
todo -= count;
- put_gfn(curr->domain, gfn);
+ put_page(page);
}
return HVMCOPY_okay;
@@ -2464,7 +2434,8 @@ static enum hvm_copy_result __hvm_copy(
static enum hvm_copy_result __hvm_clear(paddr_t addr, int size)
{
struct vcpu *curr = current;
- unsigned long gfn, mfn;
+ unsigned long gfn;
+ struct page_info *page;
p2m_type_t p2mt;
char *p;
int count, todo = size;
@@ -2500,32 +2471,35 @@ static enum hvm_copy_result __hvm_clear(
return HVMCOPY_bad_gva_to_gfn;
}
- mfn = mfn_x(get_gfn_unshare(curr->domain, gfn, &p2mt));
+ page = get_page_from_gfn(curr->domain, gfn, &p2mt, P2M_UNSHARE);
if ( p2m_is_paging(p2mt) )
{
+ if ( page )
+ put_page(page);
p2m_mem_paging_populate(curr->domain, gfn);
- put_gfn(curr->domain, gfn);
return HVMCOPY_gfn_paged_out;
}
if ( p2m_is_shared(p2mt) )
{
- put_gfn(curr->domain, gfn);
+ if ( page )
+ put_page(page);
return HVMCOPY_gfn_shared;
}
if ( p2m_is_grant(p2mt) )
{
- put_gfn(curr->domain, gfn);
+ if ( page )
+ put_page(page);
return HVMCOPY_unhandleable;
}
- if ( !p2m_is_ram(p2mt) )
+ if ( !page )
{
- put_gfn(curr->domain, gfn);
+ if ( page )
+ put_page(page);
return HVMCOPY_bad_gfn_to_mfn;
}
- ASSERT(mfn_valid(mfn));
-
- p = (char *)map_domain_page(mfn) + (addr & ~PAGE_MASK);
+
+ p = (char *)__map_domain_page(page) + (addr & ~PAGE_MASK);
if ( p2mt == p2m_ram_ro )
{
@@ -2533,19 +2507,19 @@ static enum hvm_copy_result __hvm_clear(
if ( xchg(&lastpage, gfn) != gfn )
gdprintk(XENLOG_DEBUG, "guest attempted write to read-only"
" memory page. gfn=%#lx, mfn=%#lx\n",
- gfn, mfn);
+ gfn, page_to_mfn(page));
}
else
{
memset(p, 0x00, count);
- paging_mark_dirty(curr->domain, mfn);
+ paging_mark_dirty(curr->domain, page_to_mfn(page));
}
unmap_domain_page(p);
addr += count;
todo -= count;
- put_gfn(curr->domain, gfn);
+ put_page(page);
}
return HVMCOPY_okay;
@@ -4000,35 +3974,16 @@ long do_hvm_op(unsigned long op, XEN_GUE
for ( pfn = a.first_pfn; pfn < a.first_pfn + a.nr; pfn++ )
{
- p2m_type_t t;
- mfn_t mfn = get_gfn_unshare(d, pfn, &t);
- if ( p2m_is_paging(t) )
+ struct page_info *page;
+ page = get_page_from_gfn(d, pfn, NULL, P2M_UNSHARE);
+ if ( page )
{
- put_gfn(d, pfn);
- p2m_mem_paging_populate(d, pfn);
- rc = -EINVAL;
- goto param_fail3;
- }
- if( p2m_is_shared(t) )
- {
- /* If it insists on not unsharing itself, crash the domain
- * rather than crashing the host down in mark dirty */
- gdprintk(XENLOG_WARNING,
- "shared pfn 0x%lx modified?\n", pfn);
- domain_crash(d);
- put_gfn(d, pfn);
- rc = -EINVAL;
- goto param_fail3;
- }
-
- if ( mfn_x(mfn) != INVALID_MFN )
- {
- paging_mark_dirty(d, mfn_x(mfn));
+ paging_mark_dirty(d, page_to_mfn(page));
/* These are most probably not page tables any more */
/* don't take a long time and don't die either */
- sh_remove_shadows(d->vcpu[0], mfn, 1, 0);
+ sh_remove_shadows(d->vcpu[0], _mfn(page_to_mfn(page)), 1, 0);
+ put_page(page);
}
- put_gfn(d, pfn);
}
param_fail3:
diff -r d774bb5c6326 -r 9da426cdc7e4 xen/arch/x86/hvm/stdvga.c
--- a/xen/arch/x86/hvm/stdvga.c Thu May 10 15:54:16 2012 +0100
+++ b/xen/arch/x86/hvm/stdvga.c Thu May 10 15:54:16 2012 +0100
@@ -482,7 +482,8 @@ static int mmio_move(struct hvm_hw_stdvg
if ( hvm_copy_to_guest_phys(data, &tmp, p->size) !=
HVMCOPY_okay )
{
- (void)get_gfn(d, data >> PAGE_SHIFT, &p2mt);
+ struct page_info *dp = get_page_from_gfn(
+ d, data >> PAGE_SHIFT, &p2mt, P2M_ALLOC);
/*
* The only case we handle is vga_mem <-> vga_mem.
* Anything else disables caching and leaves it to qemu-dm.
@@ -490,11 +491,12 @@ static int mmio_move(struct hvm_hw_stdvg
if ( (p2mt != p2m_mmio_dm) || (data < VGA_MEM_BASE) ||
((data + p->size) > (VGA_MEM_BASE + VGA_MEM_SIZE)) )
{
- put_gfn(d, data >> PAGE_SHIFT);
+ if ( dp )
+ put_page(dp);
return 0;
}
+ ASSERT(!dp);
stdvga_mem_write(data, tmp, p->size);
- put_gfn(d, data >> PAGE_SHIFT);
}
data += sign * p->size;
addr += sign * p->size;
@@ -508,15 +510,16 @@ static int mmio_move(struct hvm_hw_stdvg
if ( hvm_copy_from_guest_phys(&tmp, data, p->size) !=
HVMCOPY_okay )
{
- (void)get_gfn(d, data >> PAGE_SHIFT, &p2mt);
+ struct page_info *dp = get_page_from_gfn(
+ d, data >> PAGE_SHIFT, &p2mt, P2M_ALLOC);
if ( (p2mt != p2m_mmio_dm) || (data < VGA_MEM_BASE) ||
((data + p->size) > (VGA_MEM_BASE + VGA_MEM_SIZE)) )
{
- put_gfn(d, data >> PAGE_SHIFT);
+ if ( dp )
+ put_page(dp);
return 0;
}
tmp = stdvga_mem_read(data, p->size);
- put_gfn(d, data >> PAGE_SHIFT);
}
stdvga_mem_write(addr, tmp, p->size);
data += sign * p->size;
diff -r d774bb5c6326 -r 9da426cdc7e4 xen/arch/x86/hvm/svm/svm.c
--- a/xen/arch/x86/hvm/svm/svm.c Thu May 10 15:54:16 2012 +0100
+++ b/xen/arch/x86/hvm/svm/svm.c Thu May 10 15:54:16 2012 +0100
@@ -232,8 +232,7 @@ static int svm_vmcb_save(struct vcpu *v,
static int svm_vmcb_restore(struct vcpu *v, struct hvm_hw_cpu *c)
{
- unsigned long mfn = 0;
- p2m_type_t p2mt;
+ struct page_info *page = NULL;
struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
struct p2m_domain *p2m = p2m_get_hostp2m(v->domain);
@@ -250,10 +249,10 @@ static int svm_vmcb_restore(struct vcpu
{
if ( c->cr0 & X86_CR0_PG )
{
- mfn = mfn_x(get_gfn(v->domain, c->cr3 >> PAGE_SHIFT, &p2mt));
- if ( !p2m_is_ram(p2mt) || !get_page(mfn_to_page(mfn), v->domain) )
+ page = get_page_from_gfn(v->domain, c->cr3 >> PAGE_SHIFT,
+ NULL, P2M_ALLOC);
+ if ( !page )
{
- put_gfn(v->domain, c->cr3 >> PAGE_SHIFT);
gdprintk(XENLOG_ERR, "Invalid CR3 value=0x%"PRIx64"\n",
c->cr3);
return -EINVAL;
@@ -263,9 +262,8 @@ static int svm_vmcb_restore(struct vcpu
if ( v->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PG )
put_page(pagetable_get_page(v->arch.guest_table));
- v->arch.guest_table = pagetable_from_pfn(mfn);
- if ( c->cr0 & X86_CR0_PG )
- put_gfn(v->domain, c->cr3 >> PAGE_SHIFT);
+ v->arch.guest_table =
+ page ? pagetable_from_page(page) : pagetable_null();
}
v->arch.hvm_vcpu.guest_cr[0] = c->cr0 | X86_CR0_ET;
diff -r d774bb5c6326 -r 9da426cdc7e4 xen/arch/x86/hvm/viridian.c
--- a/xen/arch/x86/hvm/viridian.c Thu May 10 15:54:16 2012 +0100
+++ b/xen/arch/x86/hvm/viridian.c Thu May 10 15:54:16 2012 +0100
@@ -134,18 +134,19 @@ void dump_apic_assist(struct vcpu *v)
static void enable_hypercall_page(struct domain *d)
{
unsigned long gmfn = d->arch.hvm_domain.viridian.hypercall_gpa.fields.pfn;
- unsigned long mfn = get_gfn_untyped(d, gmfn);
+ struct page_info *page = get_page_from_gfn(d, gmfn, NULL, P2M_ALLOC);
uint8_t *p;
- if ( !mfn_valid(mfn) ||
- !get_page_and_type(mfn_to_page(mfn), d, PGT_writable_page) )
+ if ( !page || !get_page_type(page, PGT_writable_page) )
{
- put_gfn(d, gmfn);
- gdprintk(XENLOG_WARNING, "Bad GMFN %lx (MFN %lx)\n", gmfn, mfn);
+ if ( page )
+ put_page(page);
+ gdprintk(XENLOG_WARNING, "Bad GMFN %lx (MFN %lx)\n", gmfn,
+ page_to_mfn(page));
return;
}
- p = map_domain_page(mfn);
+ p = __map_domain_page(page);
/*
* We set the bit 31 in %eax (reserved field in the Viridian hypercall
@@ -162,15 +163,14 @@ static void enable_hypercall_page(struct
unmap_domain_page(p);
- put_page_and_type(mfn_to_page(mfn));
- put_gfn(d, gmfn);
+ put_page_and_type(page);
}
void initialize_apic_assist(struct vcpu *v)
{
struct domain *d = v->domain;
unsigned long gmfn = v->arch.hvm_vcpu.viridian.apic_assist.fields.pfn;
- unsigned long mfn = get_gfn_untyped(d, gmfn);
+ struct page_info *page = get_page_from_gfn(d, gmfn, NULL, P2M_ALLOC);
uint8_t *p;
/*
@@ -183,22 +183,22 @@ void initialize_apic_assist(struct vcpu
* details of how Windows uses the page.
*/
- if ( !mfn_valid(mfn) ||
- !get_page_and_type(mfn_to_page(mfn), d, PGT_writable_page) )
+ if ( !page || !get_page_type(page, PGT_writable_page) )
{
- put_gfn(d, gmfn);
- gdprintk(XENLOG_WARNING, "Bad GMFN %lx (MFN %lx)\n", gmfn, mfn);
+ if ( page )
+ put_page(page);
+ gdprintk(XENLOG_WARNING, "Bad GMFN %lx (MFN %lx)\n", gmfn,
+ page_to_mfn(page));
return;
}
- p = map_domain_page(mfn);
+ p = __map_domain_page(page);
*(u32 *)p = 0;
unmap_domain_page(p);
- put_page_and_type(mfn_to_page(mfn));
- put_gfn(d, gmfn);
+ put_page_and_type(page);
}
int wrmsr_viridian_regs(uint32_t idx, uint64_t val)
diff -r d774bb5c6326 -r 9da426cdc7e4 xen/arch/x86/hvm/vmx/vmx.c
--- a/xen/arch/x86/hvm/vmx/vmx.c Thu May 10 15:54:16 2012 +0100
+++ b/xen/arch/x86/hvm/vmx/vmx.c Thu May 10 15:54:16 2012 +0100
@@ -480,17 +480,16 @@ static void vmx_vmcs_save(struct vcpu *v
static int vmx_restore_cr0_cr3(
struct vcpu *v, unsigned long cr0, unsigned long cr3)
{
- unsigned long mfn = 0;
- p2m_type_t p2mt;
+ struct page_info *page = NULL;
if ( paging_mode_shadow(v->domain) )
{
if ( cr0 & X86_CR0_PG )
{
- mfn = mfn_x(get_gfn(v->domain, cr3 >> PAGE_SHIFT, &p2mt));
- if ( !p2m_is_ram(p2mt) || !get_page(mfn_to_page(mfn), v->domain) )
+ page = get_page_from_gfn(v->domain, cr3 >> PAGE_SHIFT,
+ NULL, P2M_ALLOC);
+ if ( !page )
{
- put_gfn(v->domain, cr3 >> PAGE_SHIFT);
gdprintk(XENLOG_ERR, "Invalid CR3 value=0x%lx\n", cr3);
return -EINVAL;
}
@@ -499,9 +498,8 @@ static int vmx_restore_cr0_cr3(
if ( hvm_paging_enabled(v) )
put_page(pagetable_get_page(v->arch.guest_table));
- v->arch.guest_table = pagetable_from_pfn(mfn);
- if ( cr0 & X86_CR0_PG )
- put_gfn(v->domain, cr3 >> PAGE_SHIFT);
+ v->arch.guest_table =
+ page ? pagetable_from_page(page) : pagetable_null();
}
v->arch.hvm_vcpu.guest_cr[0] = cr0 | X86_CR0_ET;
@@ -1035,8 +1033,9 @@ static void vmx_set_interrupt_shadow(str
static void vmx_load_pdptrs(struct vcpu *v)
{
- unsigned long cr3 = v->arch.hvm_vcpu.guest_cr[3], mfn;
+ unsigned long cr3 = v->arch.hvm_vcpu.guest_cr[3];
uint64_t *guest_pdptrs;
+ struct page_info *page;
p2m_type_t p2mt;
char *p;
@@ -1047,24 +1046,19 @@ static void vmx_load_pdptrs(struct vcpu
if ( (cr3 & 0x1fUL) && !hvm_pcid_enabled(v) )
goto crash;
- mfn = mfn_x(get_gfn_unshare(v->domain, cr3 >> PAGE_SHIFT, &p2mt));
- if ( !p2m_is_ram(p2mt) || !mfn_valid(mfn) ||
- /* If we didn't succeed in unsharing, get_page will fail
- * (page still belongs to dom_cow) */
- !get_page(mfn_to_page(mfn), v->domain) )
+ page = get_page_from_gfn(v->domain, cr3 >> PAGE_SHIFT, &p2mt, P2M_UNSHARE);
+ if ( !page )
{
/* Ideally you don't want to crash but rather go into a wait
* queue, but this is the wrong place. We're holding at least
* the paging lock */
gdprintk(XENLOG_ERR,
- "Bad cr3 on load pdptrs gfn %lx mfn %lx type %d\n",
- cr3 >> PAGE_SHIFT, mfn, (int) p2mt);
- put_gfn(v->domain, cr3 >> PAGE_SHIFT);
+ "Bad cr3 on load pdptrs gfn %lx type %d\n",
+ cr3 >> PAGE_SHIFT, (int) p2mt);
goto crash;
}
- put_gfn(v->domain, cr3 >> PAGE_SHIFT);
-
- p = map_domain_page(mfn);
+
+ p = __map_domain_page(page);
guest_pdptrs = (uint64_t *)(p + (cr3 & ~PAGE_MASK));
@@ -1090,7 +1084,7 @@ static void vmx_load_pdptrs(struct vcpu
vmx_vmcs_exit(v);
unmap_domain_page(p);
- put_page(mfn_to_page(mfn));
+ put_page(page);
return;
crash:
^ permalink raw reply [flat|nested] 15+ messages in thread* [PATCH 05 of 11] x86/mm: Use get_page_from_gfn() instead of get_gfn()/put_gfn
2012-05-10 14:59 [PATCH 00 of 11] Use a reader-writer lock for the p2m Tim Deegan
` (3 preceding siblings ...)
2012-05-10 14:59 ` [PATCH 04 of 11] x86/hvm: Use get_page_from_gfn() instead of get_gfn()/put_gfn Tim Deegan
@ 2012-05-10 14:59 ` Tim Deegan
2012-05-10 14:59 ` [PATCH 06 of 11] x86: " Tim Deegan
` (6 subsequent siblings)
11 siblings, 0 replies; 15+ messages in thread
From: Tim Deegan @ 2012-05-10 14:59 UTC (permalink / raw)
To: xen-devel; +Cc: Andres Lagar-Cavilla
# HG changeset patch
# User Tim Deegan <tim@xen.org>
# Date 1336661656 -3600
# Node ID c68f83ff7bf403637ade5a6002a7749226602c05
# Parent 9da426cdc7e478e426bcbd5391b8deacdc85db1e
x86/mm: Use get_page_from_gfn() instead of get_gfn()/put_gfn.
Signed-off-by: Tim Deegan <tim@xen.org>
Signed-off-by: Andres Lagar-Cavilla <andres@lagarcavilla.org>
diff -r 9da426cdc7e4 -r c68f83ff7bf4 xen/arch/x86/mm.c
--- a/xen/arch/x86/mm.c Thu May 10 15:54:16 2012 +0100
+++ b/xen/arch/x86/mm.c Thu May 10 15:54:16 2012 +0100
@@ -651,7 +651,8 @@ int map_ldt_shadow_page(unsigned int off
{
struct vcpu *v = current;
struct domain *d = v->domain;
- unsigned long gmfn, mfn;
+ unsigned long gmfn;
+ struct page_info *page;
l1_pgentry_t l1e, nl1e;
unsigned long gva = v->arch.pv_vcpu.ldt_base + (off << PAGE_SHIFT);
int okay;
@@ -663,28 +664,24 @@ int map_ldt_shadow_page(unsigned int off
return 0;
gmfn = l1e_get_pfn(l1e);
- mfn = get_gfn_untyped(d, gmfn);
- if ( unlikely(!mfn_valid(mfn)) )
+ page = get_page_from_gfn(d, gmfn, NULL, P2M_ALLOC);
+ if ( unlikely(!page) )
+ return 0;
+
+ okay = get_page_type(page, PGT_seg_desc_page);
+ if ( unlikely(!okay) )
{
- put_gfn(d, gmfn);
+ put_page(page);
return 0;
}
- okay = get_page_and_type(mfn_to_page(mfn), d, PGT_seg_desc_page);
- if ( unlikely(!okay) )
- {
- put_gfn(d, gmfn);
- return 0;
- }
-
- nl1e = l1e_from_pfn(mfn, l1e_get_flags(l1e) | _PAGE_RW);
+ nl1e = l1e_from_pfn(page_to_mfn(page), l1e_get_flags(l1e) | _PAGE_RW);
spin_lock(&v->arch.pv_vcpu.shadow_ldt_lock);
l1e_write(&v->arch.perdomain_ptes[off + 16], nl1e);
v->arch.pv_vcpu.shadow_ldt_mapcnt++;
spin_unlock(&v->arch.pv_vcpu.shadow_ldt_lock);
- put_gfn(d, gmfn);
return 1;
}
@@ -1819,7 +1816,6 @@ static int mod_l1_entry(l1_pgentry_t *pl
{
l1_pgentry_t ol1e;
struct domain *pt_dom = pt_vcpu->domain;
- p2m_type_t p2mt;
int rc = 0;
if ( unlikely(__copy_from_user(&ol1e, pl1e, sizeof(ol1e)) != 0) )
@@ -1835,22 +1831,21 @@ static int mod_l1_entry(l1_pgentry_t *pl
if ( l1e_get_flags(nl1e) & _PAGE_PRESENT )
{
/* Translate foreign guest addresses. */
- unsigned long mfn, gfn;
- gfn = l1e_get_pfn(nl1e);
- mfn = mfn_x(get_gfn(pg_dom, gfn, &p2mt));
- if ( !p2m_is_ram(p2mt) || unlikely(mfn == INVALID_MFN) )
+ struct page_info *page = NULL;
+ if ( paging_mode_translate(pg_dom) )
{
- put_gfn(pg_dom, gfn);
- return -EINVAL;
+ page = get_page_from_gfn(pg_dom, l1e_get_pfn(nl1e), NULL, P2M_ALLOC);
+ if ( !page )
+ return -EINVAL;
+ nl1e = l1e_from_pfn(page_to_mfn(page), l1e_get_flags(nl1e));
}
- ASSERT((mfn & ~(PADDR_MASK >> PAGE_SHIFT)) == 0);
- nl1e = l1e_from_pfn(mfn, l1e_get_flags(nl1e));
if ( unlikely(l1e_get_flags(nl1e) & l1_disallow_mask(pt_dom)) )
{
MEM_LOG("Bad L1 flags %x",
l1e_get_flags(nl1e) & l1_disallow_mask(pt_dom));
- put_gfn(pg_dom, gfn);
+ if ( page )
+ put_page(page);
return -EINVAL;
}
@@ -1860,15 +1855,21 @@ static int mod_l1_entry(l1_pgentry_t *pl
adjust_guest_l1e(nl1e, pt_dom);
if ( UPDATE_ENTRY(l1, pl1e, ol1e, nl1e, gl1mfn, pt_vcpu,
preserve_ad) )
+ {
+ if ( page )
+ put_page(page);
return 0;
- put_gfn(pg_dom, gfn);
+ }
+ if ( page )
+ put_page(page);
return -EBUSY;
}
switch ( rc = get_page_from_l1e(nl1e, pt_dom, pg_dom) )
{
default:
- put_gfn(pg_dom, gfn);
+ if ( page )
+ put_page(page);
return rc;
case 0:
break;
@@ -1876,7 +1877,9 @@ static int mod_l1_entry(l1_pgentry_t *pl
l1e_remove_flags(nl1e, _PAGE_RW);
break;
}
-
+ if ( page )
+ put_page(page);
+
adjust_guest_l1e(nl1e, pt_dom);
if ( unlikely(!UPDATE_ENTRY(l1, pl1e, ol1e, nl1e, gl1mfn, pt_vcpu,
preserve_ad)) )
@@ -1884,7 +1887,6 @@ static int mod_l1_entry(l1_pgentry_t *pl
ol1e = nl1e;
rc = -EBUSY;
}
- put_gfn(pg_dom, gfn);
}
else if ( unlikely(!UPDATE_ENTRY(l1, pl1e, ol1e, nl1e, gl1mfn, pt_vcpu,
preserve_ad)) )
@@ -3042,7 +3044,6 @@ int do_mmuext_op(
type = PGT_l4_page_table;
pin_page: {
- unsigned long mfn;
struct page_info *page;
/* Ignore pinning of invalid paging levels. */
@@ -3052,25 +3053,28 @@ int do_mmuext_op(
if ( paging_mode_refcounts(pg_owner) )
break;
- mfn = get_gfn_untyped(pg_owner, op.arg1.mfn);
- rc = get_page_and_type_from_pagenr(mfn, type, pg_owner, 0, 1);
+ page = get_page_from_gfn(pg_owner, op.arg1.mfn, NULL, P2M_ALLOC);
+ if ( unlikely(!page) )
+ {
+ rc = -EINVAL;
+ break;
+ }
+
+ rc = get_page_type_preemptible(page, type);
okay = !rc;
if ( unlikely(!okay) )
{
if ( rc == -EINTR )
rc = -EAGAIN;
else if ( rc != -EAGAIN )
- MEM_LOG("Error while pinning mfn %lx", mfn);
- put_gfn(pg_owner, op.arg1.mfn);
+ MEM_LOG("Error while pinning mfn %lx", page_to_mfn(page));
+ put_page(page);
break;
}
- page = mfn_to_page(mfn);
-
if ( (rc = xsm_memory_pin_page(d, page)) != 0 )
{
put_page_and_type(page);
- put_gfn(pg_owner, op.arg1.mfn);
okay = 0;
break;
}
@@ -3078,16 +3082,15 @@ int do_mmuext_op(
if ( unlikely(test_and_set_bit(_PGT_pinned,
&page->u.inuse.type_info)) )
{
- MEM_LOG("Mfn %lx already pinned", mfn);
+ MEM_LOG("Mfn %lx already pinned", page_to_mfn(page));
put_page_and_type(page);
- put_gfn(pg_owner, op.arg1.mfn);
okay = 0;
break;
}
/* A page is dirtied when its pin status is set. */
- paging_mark_dirty(pg_owner, mfn);
-
+ paging_mark_dirty(pg_owner, page_to_mfn(page));
+
/* We can race domain destruction (domain_relinquish_resources). */
if ( unlikely(pg_owner != d) )
{
@@ -3099,35 +3102,29 @@ int do_mmuext_op(
spin_unlock(&pg_owner->page_alloc_lock);
if ( drop_ref )
put_page_and_type(page);
- put_gfn(pg_owner, op.arg1.mfn);
}
break;
}
case MMUEXT_UNPIN_TABLE: {
- unsigned long mfn;
struct page_info *page;
if ( paging_mode_refcounts(pg_owner) )
break;
- mfn = get_gfn_untyped(pg_owner, op.arg1.mfn);
- if ( unlikely(!(okay = get_page_from_pagenr(mfn, pg_owner))) )
+ page = get_page_from_gfn(pg_owner, op.arg1.mfn, NULL, P2M_ALLOC);
+ if ( unlikely(!page) )
{
- put_gfn(pg_owner, op.arg1.mfn);
- MEM_LOG("Mfn %lx bad domain", mfn);
+ MEM_LOG("Mfn %lx bad domain", op.arg1.mfn);
break;
}
- page = mfn_to_page(mfn);
-
if ( !test_and_clear_bit(_PGT_pinned, &page->u.inuse.type_info) )
{
okay = 0;
put_page(page);
- put_gfn(pg_owner, op.arg1.mfn);
- MEM_LOG("Mfn %lx not pinned", mfn);
+ MEM_LOG("Mfn %lx not pinned", op.arg1.mfn);
break;
}
@@ -3135,40 +3132,43 @@ int do_mmuext_op(
put_page(page);
/* A page is dirtied when its pin status is cleared. */
- paging_mark_dirty(pg_owner, mfn);
-
- put_gfn(pg_owner, op.arg1.mfn);
+ paging_mark_dirty(pg_owner, page_to_mfn(page));
+
break;
}
case MMUEXT_NEW_BASEPTR:
- okay = new_guest_cr3(get_gfn_untyped(d, op.arg1.mfn));
- put_gfn(d, op.arg1.mfn);
+ okay = (!paging_mode_translate(d)
+ && new_guest_cr3(op.arg1.mfn));
break;
+
#ifdef __x86_64__
case MMUEXT_NEW_USER_BASEPTR: {
- unsigned long old_mfn, mfn;
-
- mfn = get_gfn_untyped(d, op.arg1.mfn);
- if ( mfn != 0 )
+ unsigned long old_mfn;
+
+ if ( paging_mode_translate(current->domain) )
+ {
+ okay = 0;
+ break;
+ }
+
+ if ( op.arg1.mfn != 0 )
{
if ( paging_mode_refcounts(d) )
- okay = get_page_from_pagenr(mfn, d);
+ okay = get_page_from_pagenr(op.arg1.mfn, d);
else
okay = !get_page_and_type_from_pagenr(
- mfn, PGT_root_page_table, d, 0, 0);
+ op.arg1.mfn, PGT_root_page_table, d, 0, 0);
if ( unlikely(!okay) )
{
- put_gfn(d, op.arg1.mfn);
- MEM_LOG("Error while installing new mfn %lx", mfn);
+ MEM_LOG("Error while installing new mfn %lx", op.arg1.mfn);
break;
}
}
old_mfn = pagetable_get_pfn(curr->arch.guest_table_user);
- curr->arch.guest_table_user = pagetable_from_pfn(mfn);
- put_gfn(d, op.arg1.mfn);
+ curr->arch.guest_table_user = pagetable_from_pfn(op.arg1.mfn);
if ( old_mfn != 0 )
{
@@ -3283,28 +3283,27 @@ int do_mmuext_op(
}
case MMUEXT_CLEAR_PAGE: {
- unsigned long mfn;
+ struct page_info *page;
unsigned char *ptr;
- mfn = get_gfn_untyped(d, op.arg1.mfn);
- okay = !get_page_and_type_from_pagenr(
- mfn, PGT_writable_page, d, 0, 0);
- if ( unlikely(!okay) )
+ page = get_page_from_gfn(d, op.arg1.mfn, NULL, P2M_ALLOC);
+ if ( !page || !get_page_type(page, PGT_writable_page) )
{
- put_gfn(d, op.arg1.mfn);
- MEM_LOG("Error while clearing mfn %lx", mfn);
+ if ( page )
+ put_page(page);
+ MEM_LOG("Error while clearing mfn %lx", op.arg1.mfn);
+ okay = 0;
break;
}
/* A page is dirtied when it's being cleared. */
- paging_mark_dirty(d, mfn);
-
- ptr = fixmap_domain_page(mfn);
+ paging_mark_dirty(d, page_to_mfn(page));
+
+ ptr = fixmap_domain_page(page_to_mfn(page));
clear_page(ptr);
fixunmap_domain_page(ptr);
- put_page_and_type(mfn_to_page(mfn));
- put_gfn(d, op.arg1.mfn);
+ put_page_and_type(page);
break;
}
@@ -3312,42 +3311,38 @@ int do_mmuext_op(
{
const unsigned char *src;
unsigned char *dst;
- unsigned long src_mfn, mfn;
-
- src_mfn = get_gfn_untyped(d, op.arg2.src_mfn);
- okay = get_page_from_pagenr(src_mfn, d);
+ struct page_info *src_page, *dst_page;
+
+ src_page = get_page_from_gfn(d, op.arg2.src_mfn, NULL, P2M_ALLOC);
+ if ( unlikely(!src_page) )
+ {
+ okay = 0;
+ MEM_LOG("Error while copying from mfn %lx", op.arg2.src_mfn);
+ break;
+ }
+
+ dst_page = get_page_from_gfn(d, op.arg1.mfn, NULL, P2M_ALLOC);
+ okay = (dst_page && get_page_type(dst_page, PGT_writable_page));
if ( unlikely(!okay) )
{
- put_gfn(d, op.arg2.src_mfn);
- MEM_LOG("Error while copying from mfn %lx", src_mfn);
+ put_page(src_page);
+ if ( dst_page )
+ put_page(dst_page);
+ MEM_LOG("Error while copying to mfn %lx", op.arg1.mfn);
break;
}
- mfn = get_gfn_untyped(d, op.arg1.mfn);
- okay = !get_page_and_type_from_pagenr(
- mfn, PGT_writable_page, d, 0, 0);
- if ( unlikely(!okay) )
- {
- put_gfn(d, op.arg1.mfn);
- put_page(mfn_to_page(src_mfn));
- put_gfn(d, op.arg2.src_mfn);
- MEM_LOG("Error while copying to mfn %lx", mfn);
- break;
- }
-
/* A page is dirtied when it's being copied to. */
- paging_mark_dirty(d, mfn);
-
- src = map_domain_page(src_mfn);
- dst = fixmap_domain_page(mfn);
+ paging_mark_dirty(d, page_to_mfn(dst_page));
+
+ src = __map_domain_page(src_page);
+ dst = fixmap_domain_page(page_to_mfn(dst_page));
copy_page(dst, src);
fixunmap_domain_page(dst);
unmap_domain_page(src);
- put_page_and_type(mfn_to_page(mfn));
- put_gfn(d, op.arg1.mfn);
- put_page(mfn_to_page(src_mfn));
- put_gfn(d, op.arg2.src_mfn);
+ put_page_and_type(dst_page);
+ put_page(src_page);
break;
}
@@ -3538,29 +3533,26 @@ int do_mmu_update(
req.ptr -= cmd;
gmfn = req.ptr >> PAGE_SHIFT;
- mfn = mfn_x(get_gfn(pt_owner, gmfn, &p2mt));
- if ( !p2m_is_valid(p2mt) )
- mfn = INVALID_MFN;
+ page = get_page_from_gfn(pt_owner, gmfn, &p2mt, P2M_ALLOC);
if ( p2m_is_paged(p2mt) )
{
- put_gfn(pt_owner, gmfn);
+ ASSERT(!page);
p2m_mem_paging_populate(pg_owner, gmfn);
rc = -ENOENT;
break;
}
- if ( unlikely(!get_page_from_pagenr(mfn, pt_owner)) )
+ if ( unlikely(!page) )
{
MEM_LOG("Could not get page for normal update");
- put_gfn(pt_owner, gmfn);
break;
}
+ mfn = page_to_mfn(page);
va = map_domain_page_with_cache(mfn, &mapcache);
va = (void *)((unsigned long)va +
(unsigned long)(req.ptr & ~PAGE_MASK));
- page = mfn_to_page(mfn);
if ( page_lock(page) )
{
@@ -3569,22 +3561,23 @@ int do_mmu_update(
case PGT_l1_page_table:
{
l1_pgentry_t l1e = l1e_from_intpte(req.val);
- p2m_type_t l1e_p2mt;
- unsigned long l1egfn = l1e_get_pfn(l1e), l1emfn;
-
- l1emfn = mfn_x(get_gfn(pg_owner, l1egfn, &l1e_p2mt));
+ p2m_type_t l1e_p2mt = p2m_ram_rw;
+ struct page_info *target = NULL;
+
+ if ( paging_mode_translate(pg_owner) )
+ target = get_page_from_gfn(pg_owner, l1e_get_pfn(l1e),
+ &l1e_p2mt, P2M_ALLOC);
if ( p2m_is_paged(l1e_p2mt) )
{
- put_gfn(pg_owner, l1egfn);
+ if ( target )
+ put_page(target);
p2m_mem_paging_populate(pg_owner, l1e_get_pfn(l1e));
rc = -ENOENT;
break;
}
- else if ( p2m_ram_paging_in == l1e_p2mt &&
- !mfn_valid(l1emfn) )
+ else if ( p2m_ram_paging_in == l1e_p2mt && !target )
{
- put_gfn(pg_owner, l1egfn);
rc = -ENOENT;
break;
}
@@ -3601,7 +3594,8 @@ int do_mmu_update(
rc = mem_sharing_unshare_page(pg_owner, gfn, 0);
if ( rc )
{
- put_gfn(pg_owner, l1egfn);
+ if ( target )
+ put_page(target);
/* Notify helper, don't care about errors, will not
* sleep on wq, since we're a foreign domain. */
(void)mem_sharing_notify_enomem(pg_owner, gfn, 0);
@@ -3614,112 +3608,22 @@ int do_mmu_update(
rc = mod_l1_entry(va, l1e, mfn,
cmd == MMU_PT_UPDATE_PRESERVE_AD, v,
pg_owner);
- put_gfn(pg_owner, l1egfn);
+ if ( target )
+ put_page(target);
}
break;
case PGT_l2_page_table:
- {
- l2_pgentry_t l2e = l2e_from_intpte(req.val);
- p2m_type_t l2e_p2mt;
- unsigned long l2egfn = l2e_get_pfn(l2e), l2emfn;
-
- l2emfn = mfn_x(get_gfn(pg_owner, l2egfn, &l2e_p2mt));
-
- if ( p2m_is_paged(l2e_p2mt) )
- {
- put_gfn(pg_owner, l2egfn);
- p2m_mem_paging_populate(pg_owner, l2egfn);
- rc = -ENOENT;
- break;
- }
- else if ( p2m_ram_paging_in == l2e_p2mt &&
- !mfn_valid(l2emfn) )
- {
- put_gfn(pg_owner, l2egfn);
- rc = -ENOENT;
- break;
- }
- else if ( p2m_ram_shared == l2e_p2mt )
- {
- put_gfn(pg_owner, l2egfn);
- MEM_LOG("Unexpected attempt to map shared page.\n");
- break;
- }
-
-
- rc = mod_l2_entry(va, l2e, mfn,
+ rc = mod_l2_entry(va, l2e_from_intpte(req.val), mfn,
cmd == MMU_PT_UPDATE_PRESERVE_AD, v);
- put_gfn(pg_owner, l2egfn);
- }
- break;
+ break;
case PGT_l3_page_table:
- {
- l3_pgentry_t l3e = l3e_from_intpte(req.val);
- p2m_type_t l3e_p2mt;
- unsigned long l3egfn = l3e_get_pfn(l3e), l3emfn;
-
- l3emfn = mfn_x(get_gfn(pg_owner, l3egfn, &l3e_p2mt));
-
- if ( p2m_is_paged(l3e_p2mt) )
- {
- put_gfn(pg_owner, l3egfn);
- p2m_mem_paging_populate(pg_owner, l3egfn);
- rc = -ENOENT;
- break;
- }
- else if ( p2m_ram_paging_in == l3e_p2mt &&
- !mfn_valid(l3emfn) )
- {
- put_gfn(pg_owner, l3egfn);
- rc = -ENOENT;
- break;
- }
- else if ( p2m_ram_shared == l3e_p2mt )
- {
- put_gfn(pg_owner, l3egfn);
- MEM_LOG("Unexpected attempt to map shared page.\n");
- break;
- }
-
- rc = mod_l3_entry(va, l3e, mfn,
+ rc = mod_l3_entry(va, l3e_from_intpte(req.val), mfn,
cmd == MMU_PT_UPDATE_PRESERVE_AD, 1, v);
- put_gfn(pg_owner, l3egfn);
- }
- break;
+ break;
#if CONFIG_PAGING_LEVELS >= 4
case PGT_l4_page_table:
- {
- l4_pgentry_t l4e = l4e_from_intpte(req.val);
- p2m_type_t l4e_p2mt;
- unsigned long l4egfn = l4e_get_pfn(l4e), l4emfn;
-
- l4emfn = mfn_x(get_gfn(pg_owner, l4egfn, &l4e_p2mt));
-
- if ( p2m_is_paged(l4e_p2mt) )
- {
- put_gfn(pg_owner, l4egfn);
- p2m_mem_paging_populate(pg_owner, l4egfn);
- rc = -ENOENT;
- break;
- }
- else if ( p2m_ram_paging_in == l4e_p2mt &&
- !mfn_valid(l4emfn) )
- {
- put_gfn(pg_owner, l4egfn);
- rc = -ENOENT;
- break;
- }
- else if ( p2m_ram_shared == l4e_p2mt )
- {
- put_gfn(pg_owner, l4egfn);
- MEM_LOG("Unexpected attempt to map shared page.\n");
- break;
- }
-
- rc = mod_l4_entry(va, l4e, mfn,
+ rc = mod_l4_entry(va, l4e_from_intpte(req.val), mfn,
cmd == MMU_PT_UPDATE_PRESERVE_AD, 1, v);
- put_gfn(pg_owner, l4egfn);
- }
break;
#endif
case PGT_writable_page:
@@ -3742,7 +3646,6 @@ int do_mmu_update(
unmap_domain_page_with_cache(va, &mapcache);
put_page(page);
- put_gfn(pt_owner, gmfn);
}
break;
@@ -3829,18 +3732,17 @@ static int create_grant_pte_mapping(
adjust_guest_l1e(nl1e, d);
gmfn = pte_addr >> PAGE_SHIFT;
- mfn = get_gfn_untyped(d, gmfn);
-
- if ( unlikely(!get_page_from_pagenr(mfn, current->domain)) )
+ page = get_page_from_gfn(d, gmfn, NULL, P2M_ALLOC);
+
+ if ( unlikely(!page) )
{
- put_gfn(d, gmfn);
MEM_LOG("Could not get page for normal update");
return GNTST_general_error;
}
+ mfn = page_to_mfn(page);
va = map_domain_page(mfn);
va = (void *)((unsigned long)va + ((unsigned long)pte_addr & ~PAGE_MASK));
- page = mfn_to_page(mfn);
if ( !page_lock(page) )
{
@@ -3871,7 +3773,6 @@ static int create_grant_pte_mapping(
failed:
unmap_domain_page(va);
put_page(page);
- put_gfn(d, gmfn);
return rc;
}
@@ -3886,18 +3787,17 @@ static int destroy_grant_pte_mapping(
l1_pgentry_t ol1e;
gmfn = addr >> PAGE_SHIFT;
- mfn = get_gfn_untyped(d, gmfn);
-
- if ( unlikely(!get_page_from_pagenr(mfn, current->domain)) )
+ page = get_page_from_gfn(d, gmfn, NULL, P2M_ALLOC);
+
+ if ( unlikely(!page) )
{
- put_gfn(d, gmfn);
MEM_LOG("Could not get page for normal update");
return GNTST_general_error;
}
+ mfn = page_to_mfn(page);
va = map_domain_page(mfn);
va = (void *)((unsigned long)va + ((unsigned long)addr & ~PAGE_MASK));
- page = mfn_to_page(mfn);
if ( !page_lock(page) )
{
@@ -3942,7 +3842,6 @@ static int destroy_grant_pte_mapping(
failed:
unmap_domain_page(va);
put_page(page);
- put_gfn(d, gmfn);
return rc;
}
@@ -4465,11 +4364,17 @@ long set_gdt(struct vcpu *v,
/* Check the pages in the new GDT. */
for ( i = 0; i < nr_pages; i++ )
{
+ struct page_info *page;
pfns[i] = frames[i];
- mfn = frames[i] = get_gfn_untyped(d, frames[i]);
- if ( !mfn_valid(mfn) ||
- !get_page_and_type(mfn_to_page(mfn), d, PGT_seg_desc_page) )
+ page = get_page_from_gfn(d, frames[i], NULL, P2M_ALLOC);
+ if ( !page )
goto fail;
+ if ( !get_page_type(page, PGT_seg_desc_page) )
+ {
+ put_page(page);
+ goto fail;
+ }
+ mfn = frames[i] = page_to_mfn(page);
}
/* Tear down the old GDT. */
@@ -4482,7 +4387,6 @@ long set_gdt(struct vcpu *v,
v->arch.pv_vcpu.gdt_frames[i] = frames[i];
l1e_write(&v->arch.perdomain_ptes[i],
l1e_from_pfn(frames[i], __PAGE_HYPERVISOR));
- put_gfn(d, pfns[i]);
}
xfree(pfns);
@@ -4492,7 +4396,6 @@ long set_gdt(struct vcpu *v,
while ( i-- > 0 )
{
put_page_and_type(mfn_to_page(frames[i]));
- put_gfn(d, pfns[i]);
}
xfree(pfns);
return -EINVAL;
@@ -4538,21 +4441,16 @@ long do_update_descriptor(u64 pa, u64 de
*(u64 *)&d = desc;
- mfn = get_gfn_untyped(dom, gmfn);
+ page = get_page_from_gfn(dom, gmfn, NULL, P2M_ALLOC);
if ( (((unsigned int)pa % sizeof(struct desc_struct)) != 0) ||
- !mfn_valid(mfn) ||
+ !page ||
!check_descriptor(dom, &d) )
{
- put_gfn(dom, gmfn);
+ if ( page )
+ put_page(page);
return -EINVAL;
}
-
- page = mfn_to_page(mfn);
- if ( unlikely(!get_page(page, dom)) )
- {
- put_gfn(dom, gmfn);
- return -EINVAL;
- }
+ mfn = page_to_mfn(page);
/* Check if the given frame is in use in an unsafe context. */
switch ( page->u.inuse.type_info & PGT_type_mask )
@@ -4580,7 +4478,6 @@ long do_update_descriptor(u64 pa, u64 de
out:
put_page(page);
- put_gfn(dom, gmfn);
return ret;
}
diff -r 9da426cdc7e4 -r c68f83ff7bf4 xen/arch/x86/mm/guest_walk.c
--- a/xen/arch/x86/mm/guest_walk.c Thu May 10 15:54:16 2012 +0100
+++ b/xen/arch/x86/mm/guest_walk.c Thu May 10 15:54:16 2012 +0100
@@ -94,39 +94,37 @@ static inline void *map_domain_gfn(struc
p2m_type_t *p2mt,
uint32_t *rc)
{
- p2m_access_t p2ma;
+ struct page_info *page;
void *map;
/* Translate the gfn, unsharing if shared */
- *mfn = get_gfn_type_access(p2m, gfn_x(gfn), p2mt, &p2ma,
- P2M_ALLOC | P2M_UNSHARE, NULL);
+ page = get_page_from_gfn_p2m(p2m->domain, p2m, gfn_x(gfn), p2mt, NULL,
+ P2M_ALLOC | P2M_UNSHARE);
if ( p2m_is_paging(*p2mt) )
{
ASSERT(!p2m_is_nestedp2m(p2m));
- __put_gfn(p2m, gfn_x(gfn));
+ if ( page )
+ put_page(page);
p2m_mem_paging_populate(p2m->domain, gfn_x(gfn));
*rc = _PAGE_PAGED;
return NULL;
}
if ( p2m_is_shared(*p2mt) )
{
- __put_gfn(p2m, gfn_x(gfn));
+ if ( page )
+ put_page(page);
*rc = _PAGE_SHARED;
return NULL;
}
- if ( !p2m_is_ram(*p2mt) )
+ if ( !page )
{
- __put_gfn(p2m, gfn_x(gfn));
*rc |= _PAGE_PRESENT;
return NULL;
}
+ *mfn = _mfn(page_to_mfn(page));
ASSERT(mfn_valid(mfn_x(*mfn)));
-
- /* Get an extra ref to the page to ensure liveness of the map.
- * Then we can safely put gfn */
- page_get_owner_and_reference(mfn_to_page(mfn_x(*mfn)));
+
map = map_domain_page(mfn_x(*mfn));
- __put_gfn(p2m, gfn_x(gfn));
return map;
}
diff -r 9da426cdc7e4 -r c68f83ff7bf4 xen/arch/x86/mm/hap/guest_walk.c
--- a/xen/arch/x86/mm/hap/guest_walk.c Thu May 10 15:54:16 2012 +0100
+++ b/xen/arch/x86/mm/hap/guest_walk.c Thu May 10 15:54:16 2012 +0100
@@ -54,34 +54,36 @@ unsigned long hap_p2m_ga_to_gfn(GUEST_PA
mfn_t top_mfn;
void *top_map;
p2m_type_t p2mt;
- p2m_access_t p2ma;
walk_t gw;
unsigned long top_gfn;
+ struct page_info *top_page;
/* Get the top-level table's MFN */
top_gfn = cr3 >> PAGE_SHIFT;
- top_mfn = get_gfn_type_access(p2m, top_gfn, &p2mt, &p2ma,
- P2M_ALLOC | P2M_UNSHARE, NULL);
+ top_page = get_page_from_gfn_p2m(p2m->domain, p2m, top_gfn,
+ &p2mt, NULL, P2M_ALLOC | P2M_UNSHARE);
if ( p2m_is_paging(p2mt) )
{
ASSERT(!p2m_is_nestedp2m(p2m));
pfec[0] = PFEC_page_paged;
- __put_gfn(p2m, top_gfn);
+ if ( top_page )
+ put_page(top_page);
p2m_mem_paging_populate(p2m->domain, cr3 >> PAGE_SHIFT);
return INVALID_GFN;
}
if ( p2m_is_shared(p2mt) )
{
pfec[0] = PFEC_page_shared;
- __put_gfn(p2m, top_gfn);
+ if ( top_page )
+ put_page(top_page);
return INVALID_GFN;
}
- if ( !p2m_is_ram(p2mt) )
+ if ( !top_page )
{
pfec[0] &= ~PFEC_page_present;
- __put_gfn(p2m, top_gfn);
return INVALID_GFN;
}
+ top_mfn = _mfn(page_to_mfn(top_page));
/* Map the top-level table and call the tree-walker */
ASSERT(mfn_valid(mfn_x(top_mfn)));
@@ -91,31 +93,30 @@ unsigned long hap_p2m_ga_to_gfn(GUEST_PA
#endif
missing = guest_walk_tables(v, p2m, ga, &gw, pfec[0], top_mfn, top_map);
unmap_domain_page(top_map);
- __put_gfn(p2m, top_gfn);
+ put_page(top_page);
/* Interpret the answer */
if ( missing == 0 )
{
gfn_t gfn = guest_l1e_get_gfn(gw.l1e);
- (void)get_gfn_type_access(p2m, gfn_x(gfn), &p2mt, &p2ma,
- P2M_ALLOC | P2M_UNSHARE, NULL);
+ struct page_info *page;
+ page = get_page_from_gfn_p2m(p2m->domain, p2m, gfn_x(gfn), &p2mt,
+ NULL, P2M_ALLOC | P2M_UNSHARE);
+ if ( page )
+ put_page(page);
if ( p2m_is_paging(p2mt) )
{
ASSERT(!p2m_is_nestedp2m(p2m));
pfec[0] = PFEC_page_paged;
- __put_gfn(p2m, gfn_x(gfn));
p2m_mem_paging_populate(p2m->domain, gfn_x(gfn));
return INVALID_GFN;
}
if ( p2m_is_shared(p2mt) )
{
pfec[0] = PFEC_page_shared;
- __put_gfn(p2m, gfn_x(gfn));
return INVALID_GFN;
}
- __put_gfn(p2m, gfn_x(gfn));
-
if ( page_order )
*page_order = guest_walk_to_page_order(&gw);
^ permalink raw reply [flat|nested] 15+ messages in thread* [PATCH 06 of 11] x86: Use get_page_from_gfn() instead of get_gfn()/put_gfn
2012-05-10 14:59 [PATCH 00 of 11] Use a reader-writer lock for the p2m Tim Deegan
` (4 preceding siblings ...)
2012-05-10 14:59 ` [PATCH 05 of 11] x86/mm: " Tim Deegan
@ 2012-05-10 14:59 ` Tim Deegan
2012-05-10 14:59 ` [PATCH 07 of 11] common: " Tim Deegan
` (5 subsequent siblings)
11 siblings, 0 replies; 15+ messages in thread
From: Tim Deegan @ 2012-05-10 14:59 UTC (permalink / raw)
To: xen-devel; +Cc: Andres Lagar-Cavilla
# HG changeset patch
# User Tim Deegan <tim@xen.org>
# Date 1336661656 -3600
# Node ID d19b3ba026fd844d21a250f768f70a1543d6bbd7
# Parent c68f83ff7bf403637ade5a6002a7749226602c05
x86: Use get_page_from_gfn() instead of get_gfn()/put_gfn.
Signed-off-by: Tim Deegan <tim@xen.org>
diff -r c68f83ff7bf4 -r d19b3ba026fd xen/arch/x86/domain.c
--- a/xen/arch/x86/domain.c Thu May 10 15:54:16 2012 +0100
+++ b/xen/arch/x86/domain.c Thu May 10 15:54:16 2012 +0100
@@ -716,7 +716,7 @@ int arch_set_info_guest(
{
struct domain *d = v->domain;
unsigned long cr3_gfn;
- unsigned long cr3_pfn = INVALID_MFN;
+ struct page_info *cr3_page;
unsigned long flags, cr4;
unsigned int i;
int rc = 0, compat;
@@ -925,46 +925,45 @@ int arch_set_info_guest(
if ( !compat )
{
cr3_gfn = xen_cr3_to_pfn(c.nat->ctrlreg[3]);
- cr3_pfn = get_gfn_untyped(d, cr3_gfn);
+ cr3_page = get_page_from_gfn(d, cr3_gfn, NULL, P2M_ALLOC);
- if ( !mfn_valid(cr3_pfn) ||
- (paging_mode_refcounts(d)
- ? !get_page(mfn_to_page(cr3_pfn), d)
- : !get_page_and_type(mfn_to_page(cr3_pfn), d,
- PGT_base_page_table)) )
+ if ( !cr3_page )
{
- put_gfn(d, cr3_gfn);
+ destroy_gdt(v);
+ return -EINVAL;
+ }
+ if ( !paging_mode_refcounts(d)
+ && !get_page_type(cr3_page, PGT_base_page_table) )
+ {
+ put_page(cr3_page);
destroy_gdt(v);
return -EINVAL;
}
- v->arch.guest_table = pagetable_from_pfn(cr3_pfn);
- put_gfn(d, cr3_gfn);
+ v->arch.guest_table = pagetable_from_page(cr3_page);
#ifdef __x86_64__
if ( c.nat->ctrlreg[1] )
{
cr3_gfn = xen_cr3_to_pfn(c.nat->ctrlreg[1]);
- cr3_pfn = get_gfn_untyped(d, cr3_gfn);
+ cr3_page = get_page_from_gfn(d, cr3_gfn, NULL, P2M_ALLOC);
- if ( !mfn_valid(cr3_pfn) ||
- (paging_mode_refcounts(d)
- ? !get_page(mfn_to_page(cr3_pfn), d)
- : !get_page_and_type(mfn_to_page(cr3_pfn), d,
- PGT_base_page_table)) )
+ if ( !cr3_page ||
+ (!paging_mode_refcounts(d)
+ && !get_page_type(cr3_page, PGT_base_page_table)) )
{
- cr3_pfn = pagetable_get_pfn(v->arch.guest_table);
+ if (cr3_page)
+ put_page(cr3_page);
+ cr3_page = pagetable_get_page(v->arch.guest_table);
v->arch.guest_table = pagetable_null();
if ( paging_mode_refcounts(d) )
- put_page(mfn_to_page(cr3_pfn));
+ put_page(cr3_page);
else
- put_page_and_type(mfn_to_page(cr3_pfn));
- put_gfn(d, cr3_gfn);
+ put_page_and_type(cr3_page);
destroy_gdt(v);
return -EINVAL;
}
- v->arch.guest_table_user = pagetable_from_pfn(cr3_pfn);
- put_gfn(d, cr3_gfn);
+ v->arch.guest_table_user = pagetable_from_page(cr3_page);
}
else if ( !(flags & VGCF_in_kernel) )
{
@@ -977,23 +976,25 @@ int arch_set_info_guest(
l4_pgentry_t *l4tab;
cr3_gfn = compat_cr3_to_pfn(c.cmp->ctrlreg[3]);
- cr3_pfn = get_gfn_untyped(d, cr3_gfn);
+ cr3_page = get_page_from_gfn(d, cr3_gfn, NULL, P2M_ALLOC);
- if ( !mfn_valid(cr3_pfn) ||
- (paging_mode_refcounts(d)
- ? !get_page(mfn_to_page(cr3_pfn), d)
- : !get_page_and_type(mfn_to_page(cr3_pfn), d,
- PGT_l3_page_table)) )
+ if ( !cr3_page)
{
- put_gfn(d, cr3_gfn);
+ destroy_gdt(v);
+ return -EINVAL;
+ }
+
+ if (!paging_mode_refcounts(d)
+ && !get_page_type(cr3_page, PGT_l3_page_table) )
+ {
+ put_page(cr3_page);
destroy_gdt(v);
return -EINVAL;
}
l4tab = __va(pagetable_get_paddr(v->arch.guest_table));
- *l4tab = l4e_from_pfn(
- cr3_pfn, _PAGE_PRESENT|_PAGE_RW|_PAGE_USER|_PAGE_ACCESSED);
- put_gfn(d, cr3_gfn);
+ *l4tab = l4e_from_pfn(page_to_mfn(cr3_page),
+ _PAGE_PRESENT|_PAGE_RW|_PAGE_USER|_PAGE_ACCESSED);
#endif
}
@@ -1064,7 +1065,7 @@ map_vcpu_info(struct vcpu *v, unsigned l
struct domain *d = v->domain;
void *mapping;
vcpu_info_t *new_info;
- unsigned long mfn;
+ struct page_info *page;
int i;
if ( offset > (PAGE_SIZE - sizeof(vcpu_info_t)) )
@@ -1077,19 +1078,20 @@ map_vcpu_info(struct vcpu *v, unsigned l
if ( (v != current) && !test_bit(_VPF_down, &v->pause_flags) )
return -EINVAL;
- mfn = get_gfn_untyped(d, gfn);
- if ( !mfn_valid(mfn) ||
- !get_page_and_type(mfn_to_page(mfn), d, PGT_writable_page) )
+ page = get_page_from_gfn(d, gfn, NULL, P2M_ALLOC);
+ if ( !page )
+ return -EINVAL;
+
+ if ( !get_page_type(page, PGT_writable_page) )
{
- put_gfn(d, gfn);
+ put_page(page);
return -EINVAL;
}
- mapping = map_domain_page_global(mfn);
+ mapping = __map_domain_page_global(page);
if ( mapping == NULL )
{
- put_page_and_type(mfn_to_page(mfn));
- put_gfn(d, gfn);
+ put_page_and_type(page);
return -ENOMEM;
}
@@ -1106,7 +1108,7 @@ map_vcpu_info(struct vcpu *v, unsigned l
}
v->vcpu_info = new_info;
- v->arch.pv_vcpu.vcpu_info_mfn = mfn;
+ v->arch.pv_vcpu.vcpu_info_mfn = page_to_mfn(page);
/* Set new vcpu_info pointer /before/ setting pending flags. */
wmb();
@@ -1119,7 +1121,6 @@ map_vcpu_info(struct vcpu *v, unsigned l
for ( i = 0; i < BITS_PER_EVTCHN_WORD(d); i++ )
set_bit(i, &vcpu_info(v, evtchn_pending_sel));
- put_gfn(d, gfn);
return 0;
}
diff -r c68f83ff7bf4 -r d19b3ba026fd xen/arch/x86/domctl.c
--- a/xen/arch/x86/domctl.c Thu May 10 15:54:16 2012 +0100
+++ b/xen/arch/x86/domctl.c Thu May 10 15:54:16 2012 +0100
@@ -202,16 +202,16 @@ long arch_do_domctl(
for ( j = 0; j < k; j++ )
{
- unsigned long type = 0, mfn = get_gfn_untyped(d, arr[j]);
+ unsigned long type = 0;
- page = mfn_to_page(mfn);
+ page = get_page_from_gfn(d, arr[j], NULL, P2M_ALLOC);
- if ( unlikely(!mfn_valid(mfn)) ||
- unlikely(is_xen_heap_mfn(mfn)) )
+ if ( unlikely(!page) ||
+ unlikely(is_xen_heap_page(page)) )
type = XEN_DOMCTL_PFINFO_XTAB;
else if ( xsm_getpageframeinfo(page) != 0 )
;
- else if ( likely(get_page(page, d)) )
+ else
{
switch( page->u.inuse.type_info & PGT_type_mask )
{
@@ -231,13 +231,10 @@ long arch_do_domctl(
if ( page->u.inuse.type_info & PGT_pinned )
type |= XEN_DOMCTL_PFINFO_LPINTAB;
+ }
+ if ( page )
put_page(page);
- }
- else
- type = XEN_DOMCTL_PFINFO_XTAB;
-
- put_gfn(d, arr[j]);
arr[j] = type;
}
@@ -304,21 +301,21 @@ long arch_do_domctl(
{
struct page_info *page;
unsigned long gfn = arr32[j];
- unsigned long mfn = get_gfn_untyped(d, gfn);
- page = mfn_to_page(mfn);
+ page = get_page_from_gfn(d, gfn, NULL, P2M_ALLOC);
if ( domctl->cmd == XEN_DOMCTL_getpageframeinfo3)
arr32[j] = 0;
- if ( unlikely(!mfn_valid(mfn)) ||
- unlikely(is_xen_heap_mfn(mfn)) )
+ if ( unlikely(!page) ||
+ unlikely(is_xen_heap_page(page)) )
arr32[j] |= XEN_DOMCTL_PFINFO_XTAB;
else if ( xsm_getpageframeinfo(page) != 0 )
{
- put_gfn(d, gfn);
+ put_page(page);
continue;
- } else if ( likely(get_page(page, d)) )
+ }
+ else
{
unsigned long type = 0;
@@ -341,12 +338,10 @@ long arch_do_domctl(
if ( page->u.inuse.type_info & PGT_pinned )
type |= XEN_DOMCTL_PFINFO_LPINTAB;
arr32[j] |= type;
+ }
+
+ if ( page )
put_page(page);
- }
- else
- arr32[j] |= XEN_DOMCTL_PFINFO_XTAB;
-
- put_gfn(d, gfn);
}
if ( copy_to_guest_offset(domctl->u.getpageframeinfo2.array,
@@ -419,7 +414,7 @@ long arch_do_domctl(
{
struct domain *d = rcu_lock_domain_by_id(domctl->domain);
unsigned long gmfn = domctl->u.hypercall_init.gmfn;
- unsigned long mfn;
+ struct page_info *page;
void *hypercall_page;
ret = -ESRCH;
@@ -433,26 +428,25 @@ long arch_do_domctl(
break;
}
- mfn = get_gfn_untyped(d, gmfn);
+ page = get_page_from_gfn(d, gmfn, NULL, P2M_ALLOC);
ret = -EACCES;
- if ( !mfn_valid(mfn) ||
- !get_page_and_type(mfn_to_page(mfn), d, PGT_writable_page) )
+ if ( !page || !get_page_type(page, PGT_writable_page) )
{
- put_gfn(d, gmfn);
+ if ( page )
+ put_page(page);
rcu_unlock_domain(d);
break;
}
ret = 0;
- hypercall_page = map_domain_page(mfn);
+ hypercall_page = __map_domain_page(page);
hypercall_page_initialise(d, hypercall_page);
unmap_domain_page(hypercall_page);
- put_page_and_type(mfn_to_page(mfn));
+ put_page_and_type(page);
- put_gfn(d, gmfn);
rcu_unlock_domain(d);
}
break;
diff -r c68f83ff7bf4 -r d19b3ba026fd xen/arch/x86/physdev.c
--- a/xen/arch/x86/physdev.c Thu May 10 15:54:16 2012 +0100
+++ b/xen/arch/x86/physdev.c Thu May 10 15:54:16 2012 +0100
@@ -306,26 +306,27 @@ ret_t do_physdev_op(int cmd, XEN_GUEST_H
case PHYSDEVOP_pirq_eoi_gmfn_v1: {
struct physdev_pirq_eoi_gmfn info;
unsigned long mfn;
+ struct page_info *page;
ret = -EFAULT;
if ( copy_from_guest(&info, arg, 1) != 0 )
break;
ret = -EINVAL;
- mfn = get_gfn_untyped(current->domain, info.gmfn);
- if ( !mfn_valid(mfn) ||
- !get_page_and_type(mfn_to_page(mfn), v->domain,
- PGT_writable_page) )
+ page = get_page_from_gfn(current->domain, info.gmfn, NULL, P2M_ALLOC);
+ if ( !page )
+ break;
+ if ( !get_page_type(page, PGT_writable_page) )
{
- put_gfn(current->domain, info.gmfn);
+ put_page(page);
break;
}
+ mfn = page_to_mfn(page);
if ( cmpxchg(&v->domain->arch.pv_domain.pirq_eoi_map_mfn,
0, mfn) != 0 )
{
put_page_and_type(mfn_to_page(mfn));
- put_gfn(current->domain, info.gmfn);
ret = -EBUSY;
break;
}
@@ -335,14 +336,12 @@ ret_t do_physdev_op(int cmd, XEN_GUEST_H
{
v->domain->arch.pv_domain.pirq_eoi_map_mfn = 0;
put_page_and_type(mfn_to_page(mfn));
- put_gfn(current->domain, info.gmfn);
ret = -ENOSPC;
break;
}
if ( cmd == PHYSDEVOP_pirq_eoi_gmfn_v1 )
v->domain->arch.pv_domain.auto_unmask = 1;
- put_gfn(current->domain, info.gmfn);
ret = 0;
break;
}
diff -r c68f83ff7bf4 -r d19b3ba026fd xen/arch/x86/traps.c
--- a/xen/arch/x86/traps.c Thu May 10 15:54:16 2012 +0100
+++ b/xen/arch/x86/traps.c Thu May 10 15:54:16 2012 +0100
@@ -662,9 +662,9 @@ int wrmsr_hypervisor_regs(uint32_t idx,
case 0:
{
void *hypercall_page;
- unsigned long mfn;
unsigned long gmfn = val >> 12;
unsigned int idx = val & 0xfff;
+ struct page_info *page;
if ( idx > 0 )
{
@@ -674,24 +674,23 @@ int wrmsr_hypervisor_regs(uint32_t idx,
return 0;
}
- mfn = get_gfn_untyped(d, gmfn);
-
- if ( !mfn_valid(mfn) ||
- !get_page_and_type(mfn_to_page(mfn), d, PGT_writable_page) )
+ page = get_page_from_gfn(d, gmfn, NULL, P2M_ALLOC);
+
+ if ( !page || !get_page_type(page, PGT_writable_page) )
{
- put_gfn(d, gmfn);
+ if ( page )
+ put_page(page);
gdprintk(XENLOG_WARNING,
"Bad GMFN %lx (MFN %lx) to MSR %08x\n",
- gmfn, mfn, base + idx);
+ gmfn, page_to_mfn(page), base + idx);
return 0;
}
- hypercall_page = map_domain_page(mfn);
+ hypercall_page = __map_domain_page(page);
hypercall_page_initialise(d, hypercall_page);
unmap_domain_page(hypercall_page);
- put_page_and_type(mfn_to_page(mfn));
- put_gfn(d, gmfn);
+ put_page_and_type(page);
break;
}
@@ -2374,7 +2373,8 @@ static int emulate_privileged_op(struct
break;
case 3: {/* Write CR3 */
- unsigned long mfn, gfn;
+ unsigned long gfn;
+ struct page_info *page;
domain_lock(v->domain);
if ( !is_pv_32on64_vcpu(v) )
{
@@ -2384,9 +2384,10 @@ static int emulate_privileged_op(struct
gfn = compat_cr3_to_pfn(*reg);
#endif
}
- mfn = get_gfn_untyped(v->domain, gfn);
- rc = new_guest_cr3(mfn);
- put_gfn(v->domain, gfn);
+ page = get_page_from_gfn(v->domain, gfn, NULL, P2M_ALLOC);
+ rc = page ? new_guest_cr3(page_to_mfn(page)) : 0;
+ if ( page )
+ put_page(page);
domain_unlock(v->domain);
if ( rc == 0 ) /* not okay */
goto fail;
^ permalink raw reply [flat|nested] 15+ messages in thread* [PATCH 07 of 11] common: Use get_page_from_gfn() instead of get_gfn()/put_gfn
2012-05-10 14:59 [PATCH 00 of 11] Use a reader-writer lock for the p2m Tim Deegan
` (5 preceding siblings ...)
2012-05-10 14:59 ` [PATCH 06 of 11] x86: " Tim Deegan
@ 2012-05-10 14:59 ` Tim Deegan
2012-05-10 14:59 ` [PATCH 08 of 11] grant-tables: " Tim Deegan
` (4 subsequent siblings)
11 siblings, 0 replies; 15+ messages in thread
From: Tim Deegan @ 2012-05-10 14:59 UTC (permalink / raw)
To: xen-devel; +Cc: Andres Lagar-Cavilla
# HG changeset patch
# User Tim Deegan <tim@xen.org>
# Date 1336661656 -3600
# Node ID 28d15a29ab59d29a1fd5deb79ceda5d557343f14
# Parent d19b3ba026fd844d21a250f768f70a1543d6bbd7
common: Use get_page_from_gfn() instead of get_gfn()/put_gfn.
Signed-off-by: Tim Deegan <tim@xen.org>
Signed-off-by: Andres Lagar-Cavilla <andres@lagarcavilla.org>
diff -r d19b3ba026fd -r 28d15a29ab59 xen/common/memory.c
--- a/xen/common/memory.c Thu May 10 15:54:16 2012 +0100
+++ b/xen/common/memory.c Thu May 10 15:54:16 2012 +0100
@@ -676,7 +676,7 @@ long do_memory_op(unsigned long cmd, XEN
case XENMEM_remove_from_physmap:
{
struct xen_remove_from_physmap xrfp;
- unsigned long mfn;
+ struct page_info *page;
struct domain *d;
if ( copy_from_guest(&xrfp, arg, 1) )
@@ -694,15 +694,15 @@ long do_memory_op(unsigned long cmd, XEN
domain_lock(d);
- mfn = get_gfn_untyped(d, xrfp.gpfn);
-
- if ( mfn_valid(mfn) )
- guest_physmap_remove_page(d, xrfp.gpfn, mfn, 0);
+ page = get_page_from_gfn(d, xrfp.gpfn, NULL, P2M_ALLOC);
+ if ( page )
+ {
+ guest_physmap_remove_page(d, xrfp.gpfn, page_to_mfn(page), 0);
+ put_page(page);
+ }
else
rc = -ENOENT;
- put_gfn(d, xrfp.gpfn);
-
domain_unlock(d);
rcu_unlock_domain(d);
diff -r d19b3ba026fd -r 28d15a29ab59 xen/common/tmem_xen.c
--- a/xen/common/tmem_xen.c Thu May 10 15:54:16 2012 +0100
+++ b/xen/common/tmem_xen.c Thu May 10 15:54:16 2012 +0100
@@ -107,30 +107,25 @@ static inline void cli_put_page(tmem_cli
static inline void *cli_get_page(tmem_cli_mfn_t cmfn, unsigned long *pcli_mfn,
pfp_t **pcli_pfp, bool_t cli_write)
{
- unsigned long cli_mfn;
p2m_type_t t;
struct page_info *page;
- int ret;
- cli_mfn = mfn_x(get_gfn(current->domain, cmfn, &t));
- if ( t != p2m_ram_rw || !mfn_valid(cli_mfn) )
+ page = get_page_from_gfn(current->domain, cmfn, &t, P2M_ALLOC);
+ if ( !page || t != p2m_ram_rw )
{
- put_gfn(current->domain, (unsigned long) cmfn);
- return NULL;
+ if ( page )
+ put_page(page);
}
- page = mfn_to_page(cli_mfn);
- if ( cli_write )
- ret = get_page_and_type(page, current->domain, PGT_writable_page);
- else
- ret = get_page(page, current->domain);
- if ( !ret )
+
+ if ( cli_write && !get_page_type(page, PGT_writable_page) )
{
- put_gfn(current->domain, (unsigned long) cmfn);
+ put_page(page);
return NULL;
}
- *pcli_mfn = cli_mfn;
+
+ *pcli_mfn = page_to_mfn(page);
*pcli_pfp = (pfp_t *)page;
- return map_domain_page(cli_mfn);
+ return map_domain_page(*pcli_mfn);
}
static inline void cli_put_page(tmem_cli_mfn_t cmfn, void *cli_va, pfp_t *cli_pfp,
@@ -144,7 +139,6 @@ static inline void cli_put_page(tmem_cli
else
put_page((struct page_info *)cli_pfp);
unmap_domain_page(cli_va);
- put_gfn(current->domain, (unsigned long) cmfn);
}
#endif
diff -r d19b3ba026fd -r 28d15a29ab59 xen/xsm/flask/hooks.c
--- a/xen/xsm/flask/hooks.c Thu May 10 15:54:16 2012 +0100
+++ b/xen/xsm/flask/hooks.c Thu May 10 15:54:16 2012 +0100
@@ -1318,6 +1318,7 @@ static int flask_mmu_normal_update(struc
struct domain_security_struct *dsec;
u32 fsid;
struct avc_audit_data ad;
+ struct page_info *page = NULL;
if (d != t)
rc = domain_has_perm(d, t, SECCLASS_MMU, MMU__REMOTE_REMAP);
@@ -1333,8 +1334,9 @@ static int flask_mmu_normal_update(struc
map_perms |= MMU__MAP_WRITE;
AVC_AUDIT_DATA_INIT(&ad, MEMORY);
- fmfn = get_gfn_untyped(f, l1e_get_pfn(l1e_from_intpte(fpte)));
-
+ page = get_page_from_gfn(f, l1e_get_pfn(l1e_from_intpte(fpte)),
+ NULL, P2M_ALLOC);
+ fmfn = page ? page_to_mfn(page) : INVALID_MFN;
ad.sdom = d;
ad.tdom = f;
ad.memory.pte = fpte;
@@ -1342,7 +1344,8 @@ static int flask_mmu_normal_update(struc
rc = get_mfn_sid(fmfn, &fsid);
- put_gfn(f, fmfn);
+ if ( page )
+ put_page(page);
if ( rc )
return rc;
@@ -1370,7 +1373,7 @@ static int flask_update_va_mapping(struc
int rc = 0;
u32 psid;
u32 map_perms = MMU__MAP_READ;
- unsigned long mfn;
+ struct page_info *page = NULL;
struct domain_security_struct *dsec;
if ( !(l1e_get_flags(pte) & _PAGE_PRESENT) )
@@ -1381,8 +1384,10 @@ static int flask_update_va_mapping(struc
dsec = d->ssid;
- mfn = get_gfn_untyped(f, l1e_get_pfn(pte));
- rc = get_mfn_sid(mfn, &psid);
+ page = get_page_from_gfn(f, l1e_get_pfn(pte), NULL, P2M_ALLOC);
+ rc = get_mfn_sid(page ? page_to_mfn(page) : INVALID_MFN, &psid);
+ if ( page )
+ put_page(page);
if ( rc )
return rc;
^ permalink raw reply [flat|nested] 15+ messages in thread* [PATCH 08 of 11] grant-tables: Use get_page_from_gfn() instead of get_gfn()/put_gfn
2012-05-10 14:59 [PATCH 00 of 11] Use a reader-writer lock for the p2m Tim Deegan
` (6 preceding siblings ...)
2012-05-10 14:59 ` [PATCH 07 of 11] common: " Tim Deegan
@ 2012-05-10 14:59 ` Tim Deegan
2012-05-10 14:59 ` [PATCH 09 of 11] x86/hvm: use unlocked p2m lookups in hvmemul_rep_movs() Tim Deegan
` (3 subsequent siblings)
11 siblings, 0 replies; 15+ messages in thread
From: Tim Deegan @ 2012-05-10 14:59 UTC (permalink / raw)
To: xen-devel; +Cc: Andres Lagar-Cavilla
# HG changeset patch
# User Andres Lagar-Cavilla <andres@lagarcavilla.org>
# Date 1336661656 -3600
# Node ID ed61cd76e3e3fad3ec4c216669d1e9ee8c0bfa4b
# Parent 28d15a29ab59d29a1fd5deb79ceda5d557343f14
grant-tables: Use get_page_from_gfn() instead of get_gfn()/put_gfn.
This requires some careful re-engineering of __get_paged_frame and its callers.
Functions that previously returned gfn's to be put now return pages to be put.
Tested with Win7 + Citrix PV drivers guest, using speedtest for networking
(yes!) plus the loginVSI framework to constantly hit disk.
Signed-off-by: Andres Lagar-Cavilla <andres@lagarcavilla.org>
diff -r 28d15a29ab59 -r ed61cd76e3e3 xen/common/grant_table.c
--- a/xen/common/grant_table.c Thu May 10 15:54:16 2012 +0100
+++ b/xen/common/grant_table.c Thu May 10 15:54:16 2012 +0100
@@ -107,18 +107,6 @@ static unsigned inline int max_nr_maptra
return (max_nr_grant_frames * MAX_MAPTRACK_TO_GRANTS_RATIO);
}
-#ifdef CONFIG_X86
-#define gfn_to_mfn_private(_d, _gfn) ({ \
- p2m_type_t __p2mt; \
- unsigned long __x; \
- __x = mfn_x(get_gfn_unshare((_d), (_gfn), &__p2mt)); \
- if ( p2m_is_shared(__p2mt) || !p2m_is_valid(__p2mt) ) \
- __x = INVALID_MFN; \
- __x; })
-#else
-#define gfn_to_mfn_private(_d, _gfn) gmfn_to_mfn(_d, _gfn)
-#endif
-
#define SHGNT_PER_PAGE_V1 (PAGE_SIZE / sizeof(grant_entry_v1_t))
#define shared_entry_v1(t, e) \
((t)->shared_v1[(e)/SHGNT_PER_PAGE_V1][(e)%SHGNT_PER_PAGE_V1])
@@ -141,41 +129,41 @@ shared_entry_header(struct grant_table *
#define active_entry(t, e) \
((t)->active[(e)/ACGNT_PER_PAGE][(e)%ACGNT_PER_PAGE])
-/* Check if the page has been paged out. If rc == GNTST_okay, caller must do put_gfn(rd, gfn) */
-static int __get_paged_frame(unsigned long gfn, unsigned long *frame, int readonly, struct domain *rd)
+/* Check if the page has been paged out, or needs unsharing.
+ If rc == GNTST_okay, *page contains the page struct with a ref taken.
+ Caller must do put_page(*page).
+ If any error, *page = NULL, *frame = INVALID_MFN, no ref taken. */
+static int __get_paged_frame(unsigned long gfn, unsigned long *frame, struct page_info **page,
+ int readonly, struct domain *rd)
{
int rc = GNTST_okay;
#if defined(P2M_PAGED_TYPES) || defined(P2M_SHARED_TYPES)
p2m_type_t p2mt;
- mfn_t mfn;
-
- if ( readonly )
- mfn = get_gfn(rd, gfn, &p2mt);
- else
+
+ *page = get_page_from_gfn(rd, gfn, &p2mt,
+ (readonly) ? P2M_ALLOC : P2M_UNSHARE);
+ if ( !(*page) )
{
- mfn = get_gfn_unshare(rd, gfn, &p2mt);
+ *frame = INVALID_MFN;
if ( p2m_is_shared(p2mt) )
+ return GNTST_eagain;
+ if ( p2m_is_paging(p2mt) )
{
- put_gfn(rd, gfn);
+ p2m_mem_paging_populate(rd, gfn);
return GNTST_eagain;
}
+ return GNTST_bad_page;
}
-
- if ( p2m_is_valid(p2mt) ) {
- *frame = mfn_x(mfn);
- if ( p2m_is_paging(p2mt) )
- {
- put_gfn(rd, gfn);
- p2m_mem_paging_populate(rd, gfn);
- rc = GNTST_eagain;
- }
- } else {
- put_gfn(rd, gfn);
- *frame = INVALID_MFN;
- rc = GNTST_bad_page;
+ *frame = page_to_mfn(*page);
+#else
+ *frame = gmfn_to_mfn(rd, gfn);
+ *page = mfn_valid(*frame) ? mfn_to_page(*frame) : NULL;
+ if ( (!(*page)) || (!get_page(*page, rd)) )
+ {
+ *frame = INVALID_MFN;
+ *page = NULL;
+ rc = GNTST_bad_page;
}
-#else
- *frame = readonly ? gmfn_to_mfn(rd, gfn) : gfn_to_mfn_private(rd, gfn);
#endif
return rc;
@@ -470,12 +458,11 @@ static void
__gnttab_map_grant_ref(
struct gnttab_map_grant_ref *op)
{
- struct domain *ld, *rd, *owner;
+ struct domain *ld, *rd, *owner = NULL;
struct vcpu *led;
int handle;
- unsigned long gfn = INVALID_GFN;
unsigned long frame = 0, nr_gets = 0;
- struct page_info *pg;
+ struct page_info *pg = NULL;
int rc = GNTST_okay;
u32 old_pin;
u32 act_pin;
@@ -573,13 +560,11 @@ static void
{
unsigned long frame;
- gfn = sha1 ? sha1->frame : sha2->full_page.frame;
- rc = __get_paged_frame(gfn, &frame, !!(op->flags & GNTMAP_readonly), rd);
+ unsigned long gfn = sha1 ? sha1->frame : sha2->full_page.frame;
+ rc = __get_paged_frame(gfn, &frame, &pg,
+ !!(op->flags & GNTMAP_readonly), rd);
if ( rc != GNTST_okay )
- {
- gfn = INVALID_GFN;
goto unlock_out_clear;
- }
act->gfn = gfn;
act->domid = ld->domain_id;
act->frame = frame;
@@ -606,9 +591,17 @@ static void
spin_unlock(&rd->grant_table->lock);
- pg = mfn_valid(frame) ? mfn_to_page(frame) : NULL;
-
- if ( !pg || (owner = page_get_owner_and_reference(pg)) == dom_io )
+ /* pg may be set, with a refcount included, from __get_paged_frame */
+ if ( !pg )
+ {
+ pg = mfn_valid(frame) ? mfn_to_page(frame) : NULL;
+ if ( pg )
+ owner = page_get_owner_and_reference(pg);
+ }
+ else
+ owner = page_get_owner(pg);
+
+ if ( !pg || (owner == dom_io) )
{
/* Only needed the reference to confirm dom_io ownership. */
if ( pg )
@@ -708,8 +701,6 @@ static void
op->handle = handle;
op->status = GNTST_okay;
- if ( gfn != INVALID_GFN )
- put_gfn(rd, gfn);
rcu_unlock_domain(rd);
return;
@@ -748,8 +739,6 @@ static void
gnttab_clear_flag(_GTF_reading, status);
unlock_out:
- if ( gfn != INVALID_GFN )
- put_gfn(rd, gfn);
spin_unlock(&rd->grant_table->lock);
op->status = rc;
put_maptrack_handle(ld->grant_table, handle);
@@ -1479,7 +1468,16 @@ gnttab_transfer(
return -EFAULT;
}
- mfn = gfn_to_mfn_private(d, gop.mfn);
+#ifdef CONFIG_X86
+ {
+ p2m_type_t __p2mt;
+ mfn = mfn_x(get_gfn_unshare(d, gop.mfn, &__p2mt));
+ if ( p2m_is_shared(__p2mt) || !p2m_is_valid(__p2mt) )
+ mfn = INVALID_MFN;
+ }
+#else
+ mfn = gmfn_to_mfn(d, gop.mfn);
+#endif
/* Check the passed page frame for basic validity. */
if ( unlikely(!mfn_valid(mfn)) )
@@ -1723,15 +1721,14 @@ static void __fixup_status_for_pin(const
}
/* Grab a frame number from a grant entry and update the flags and pin
- count as appropriate. Note that this does *not* update the page
- type or reference counts, and does not check that the mfn is
- actually valid. If *gfn != INVALID_GFN, and rc == GNTST_okay, then
- we leave this function holding the p2m entry for *gfn in *owning_domain */
+ count as appropriate. If rc == GNTST_okay, note that this *does*
+ take one ref count on the target page, stored in *page.
+ If there is any error, *page = NULL, no ref taken. */
static int
__acquire_grant_for_copy(
struct domain *rd, unsigned long gref, struct domain *ld, int readonly,
- unsigned long *frame, unsigned long *gfn, unsigned *page_off, unsigned *length,
- unsigned allow_transitive, struct domain **owning_domain)
+ unsigned long *frame, struct page_info **page,
+ unsigned *page_off, unsigned *length, unsigned allow_transitive)
{
grant_entry_v1_t *sha1;
grant_entry_v2_t *sha2;
@@ -1746,11 +1743,9 @@ static int
unsigned trans_page_off;
unsigned trans_length;
int is_sub_page;
- struct domain *ignore;
s16 rc = GNTST_okay;
- *owning_domain = NULL;
- *gfn = INVALID_GFN;
+ *page = NULL;
spin_lock(&rd->grant_table->lock);
@@ -1827,14 +1822,13 @@ static int
spin_unlock(&rd->grant_table->lock);
rc = __acquire_grant_for_copy(td, trans_gref, rd,
- readonly, &grant_frame, gfn,
- &trans_page_off, &trans_length,
- 0, &ignore);
+ readonly, &grant_frame, page,
+ &trans_page_off, &trans_length, 0);
spin_lock(&rd->grant_table->lock);
if ( rc != GNTST_okay ) {
__fixup_status_for_pin(act, status);
- rcu_unlock_domain(td);
+ rcu_unlock_domain(td);
spin_unlock(&rd->grant_table->lock);
return rc;
}
@@ -1846,56 +1840,49 @@ static int
if ( act->pin != old_pin )
{
__fixup_status_for_pin(act, status);
- rcu_unlock_domain(td);
+ rcu_unlock_domain(td);
spin_unlock(&rd->grant_table->lock);
+ put_page(*page);
return __acquire_grant_for_copy(rd, gref, ld, readonly,
- frame, gfn, page_off, length,
- allow_transitive,
- owning_domain);
+ frame, page, page_off, length,
+ allow_transitive);
}
/* The actual remote remote grant may or may not be a
sub-page, but we always treat it as one because that
blocks mappings of transitive grants. */
is_sub_page = 1;
- *owning_domain = td;
act->gfn = -1ul;
}
else if ( sha1 )
{
- *gfn = sha1->frame;
- rc = __get_paged_frame(*gfn, &grant_frame, readonly, rd);
+ rc = __get_paged_frame(sha1->frame, &grant_frame, page, readonly, rd);
if ( rc != GNTST_okay )
goto unlock_out;
- act->gfn = *gfn;
+ act->gfn = sha1->frame;
is_sub_page = 0;
trans_page_off = 0;
trans_length = PAGE_SIZE;
- *owning_domain = rd;
}
else if ( !(sha2->hdr.flags & GTF_sub_page) )
{
- *gfn = sha2->full_page.frame;
- rc = __get_paged_frame(*gfn, &grant_frame, readonly, rd);
+ rc = __get_paged_frame(sha2->full_page.frame, &grant_frame, page, readonly, rd);
if ( rc != GNTST_okay )
goto unlock_out;
- act->gfn = *gfn;
+ act->gfn = sha2->full_page.frame;
is_sub_page = 0;
trans_page_off = 0;
trans_length = PAGE_SIZE;
- *owning_domain = rd;
}
else
{
- *gfn = sha2->sub_page.frame;
- rc = __get_paged_frame(*gfn, &grant_frame, readonly, rd);
+ rc = __get_paged_frame(sha2->sub_page.frame, &grant_frame, page, readonly, rd);
if ( rc != GNTST_okay )
goto unlock_out;
- act->gfn = *gfn;
+ act->gfn = sha2->sub_page.frame;
is_sub_page = 1;
trans_page_off = sha2->sub_page.page_off;
trans_length = sha2->sub_page.length;
- *owning_domain = rd;
}
if ( !act->pin )
@@ -1911,7 +1898,9 @@ static int
}
else
{
- *owning_domain = rd;
+ ASSERT(mfn_valid(act->frame));
+ *page = mfn_to_page(act->frame);
+ (void)page_get_owner_and_reference(*page);
}
act->pin += readonly ? GNTPIN_hstr_inc : GNTPIN_hstw_inc;
@@ -1930,11 +1919,11 @@ static void
struct gnttab_copy *op)
{
struct domain *sd = NULL, *dd = NULL;
- struct domain *source_domain = NULL, *dest_domain = NULL;
- unsigned long s_frame, d_frame, s_gfn = INVALID_GFN, d_gfn = INVALID_GFN;
+ unsigned long s_frame, d_frame;
+ struct page_info *s_pg = NULL, *d_pg = NULL;
char *sp, *dp;
s16 rc = GNTST_okay;
- int have_d_grant = 0, have_s_grant = 0, have_s_ref = 0;
+ int have_d_grant = 0, have_s_grant = 0;
int src_is_gref, dest_is_gref;
if ( ((op->source.offset + op->len) > PAGE_SIZE) ||
@@ -1972,82 +1961,54 @@ static void
{
unsigned source_off, source_len;
rc = __acquire_grant_for_copy(sd, op->source.u.ref, current->domain, 1,
- &s_frame, &s_gfn, &source_off, &source_len, 1,
- &source_domain);
+ &s_frame, &s_pg, &source_off, &source_len, 1);
if ( rc != GNTST_okay )
goto error_out;
have_s_grant = 1;
if ( op->source.offset < source_off ||
op->len > source_len )
- PIN_FAIL(error_put_s_gfn, GNTST_general_error,
+ PIN_FAIL(error_out, GNTST_general_error,
"copy source out of bounds: %d < %d || %d > %d\n",
op->source.offset, source_off,
op->len, source_len);
}
else
{
-#ifdef CONFIG_X86
- s_gfn = op->source.u.gmfn;
- rc = __get_paged_frame(op->source.u.gmfn, &s_frame, 1, sd);
+ rc = __get_paged_frame(op->source.u.gmfn, &s_frame, &s_pg, 1, sd);
if ( rc != GNTST_okay )
- goto error_out;
-#else
- s_frame = gmfn_to_mfn(sd, op->source.u.gmfn);
-#endif
- source_domain = sd;
+ PIN_FAIL(error_out, rc,
+ "source frame %lx invalid.\n", s_frame);
}
- if ( unlikely(!mfn_valid(s_frame)) )
- PIN_FAIL(error_put_s_gfn, GNTST_general_error,
- "source frame %lx invalid.\n", s_frame);
- /* For the source frame, the page could still be shared, so
- * don't assume ownership by source_domain */
- if ( !page_get_owner_and_reference(mfn_to_page(s_frame)) )
- {
- if ( !sd->is_dying )
- gdprintk(XENLOG_WARNING, "Could not get src frame %lx\n", s_frame);
- rc = GNTST_general_error;
- goto error_put_s_gfn;
- }
- have_s_ref = 1;
if ( dest_is_gref )
{
unsigned dest_off, dest_len;
rc = __acquire_grant_for_copy(dd, op->dest.u.ref, current->domain, 0,
- &d_frame, &d_gfn, &dest_off, &dest_len, 1,
- &dest_domain);
+ &d_frame, &d_pg, &dest_off, &dest_len, 1);
if ( rc != GNTST_okay )
- goto error_put_s_gfn;
+ goto error_out;
have_d_grant = 1;
if ( op->dest.offset < dest_off ||
op->len > dest_len )
- PIN_FAIL(error_put_d_gfn, GNTST_general_error,
+ PIN_FAIL(error_out, GNTST_general_error,
"copy dest out of bounds: %d < %d || %d > %d\n",
op->dest.offset, dest_off,
op->len, dest_len);
}
else
{
-#ifdef CONFIG_X86
- d_gfn = op->dest.u.gmfn;
- rc = __get_paged_frame(op->dest.u.gmfn, &d_frame, 0, dd);
+ rc = __get_paged_frame(op->dest.u.gmfn, &d_frame, &d_pg, 0, dd);
if ( rc != GNTST_okay )
- goto error_put_s_gfn;
-#else
- d_frame = gmfn_to_mfn(dd, op->dest.u.gmfn);
-#endif
- dest_domain = dd;
+ PIN_FAIL(error_out, rc,
+ "destination frame %lx invalid.\n", d_frame);
}
- if ( unlikely(!mfn_valid(d_frame)) )
- PIN_FAIL(error_put_d_gfn, GNTST_general_error,
- "destination frame %lx invalid.\n", d_frame);
- if ( !get_page_and_type(mfn_to_page(d_frame), dest_domain,
- PGT_writable_page) )
+
+ if ( !get_page_type(d_pg, PGT_writable_page) )
{
if ( !dd->is_dying )
gdprintk(XENLOG_WARNING, "Could not get dst frame %lx\n", d_frame);
rc = GNTST_general_error;
- goto error_put_d_gfn;
+ goto error_out;
}
sp = map_domain_page(s_frame);
@@ -2060,16 +2021,12 @@ static void
gnttab_mark_dirty(dd, d_frame);
- put_page_and_type(mfn_to_page(d_frame));
- error_put_d_gfn:
- if ( (d_gfn != INVALID_GFN) && (dest_domain) )
- put_gfn(dest_domain, d_gfn);
- error_put_s_gfn:
- if ( (s_gfn != INVALID_GFN) && (source_domain) )
- put_gfn(source_domain, s_gfn);
+ put_page_type(d_pg);
error_out:
- if ( have_s_ref )
- put_page(mfn_to_page(s_frame));
+ if ( d_pg )
+ put_page(d_pg);
+ if ( s_pg )
+ put_page(s_pg);
if ( have_s_grant )
__release_grant_for_copy(sd, op->source.u.ref, 1);
if ( have_d_grant )
^ permalink raw reply [flat|nested] 15+ messages in thread* [PATCH 09 of 11] x86/hvm: use unlocked p2m lookups in hvmemul_rep_movs()
2012-05-10 14:59 [PATCH 00 of 11] Use a reader-writer lock for the p2m Tim Deegan
` (7 preceding siblings ...)
2012-05-10 14:59 ` [PATCH 08 of 11] grant-tables: " Tim Deegan
@ 2012-05-10 14:59 ` Tim Deegan
2012-05-10 14:59 ` [PATCH 10 of 11] x86/hvm/svm: used unlocked p2m lookups in trace and error paths Tim Deegan
` (2 subsequent siblings)
11 siblings, 0 replies; 15+ messages in thread
From: Tim Deegan @ 2012-05-10 14:59 UTC (permalink / raw)
To: xen-devel; +Cc: Andres Lagar-Cavilla
# HG changeset patch
# User Tim Deegan <tim@xen.org>
# Date 1336661656 -3600
# Node ID d514c4cfcd2b18baafa3aa61cb4cc4971cdbeecc
# Parent ed61cd76e3e3fad3ec4c216669d1e9ee8c0bfa4b
x86/hvm: use unlocked p2m lookups in hvmemul_rep_movs()
The eventual hvm_copy or IO emulations will re-check the p2m and DTRT.
Signed-off-by: Tim Deegan <tim@xen.org>
diff -r ed61cd76e3e3 -r d514c4cfcd2b xen/arch/x86/hvm/emulate.c
--- a/xen/arch/x86/hvm/emulate.c Thu May 10 15:54:16 2012 +0100
+++ b/xen/arch/x86/hvm/emulate.c Thu May 10 15:54:16 2012 +0100
@@ -677,7 +677,6 @@ static int hvmemul_rep_movs(
p2m_type_t sp2mt, dp2mt;
int rc, df = !!(ctxt->regs->eflags & X86_EFLAGS_DF);
char *buf;
- struct two_gfns tg;
rc = hvmemul_virtual_to_linear(
src_seg, src_offset, bytes_per_rep, reps, hvm_access_read,
@@ -705,25 +704,17 @@ static int hvmemul_rep_movs(
if ( rc != X86EMUL_OKAY )
return rc;
- get_two_gfns(current->domain, sgpa >> PAGE_SHIFT, &sp2mt, NULL, NULL,
- current->domain, dgpa >> PAGE_SHIFT, &dp2mt, NULL, NULL,
- P2M_ALLOC, &tg);
+ /* Check for MMIO ops */
+ (void) get_gfn_query_unlocked(current->domain, sgpa >> PAGE_SHIFT, &sp2mt);
+ (void) get_gfn_query_unlocked(current->domain, dgpa >> PAGE_SHIFT, &dp2mt);
- if ( !p2m_is_ram(sp2mt) && !p2m_is_grant(sp2mt) )
- {
- rc = hvmemul_do_mmio(
+ if ( sp2mt == p2m_mmio_dm )
+ return hvmemul_do_mmio(
sgpa, reps, bytes_per_rep, dgpa, IOREQ_READ, df, NULL);
- put_two_gfns(&tg);
- return rc;
- }
- if ( !p2m_is_ram(dp2mt) && !p2m_is_grant(dp2mt) )
- {
- rc = hvmemul_do_mmio(
+ if ( dp2mt == p2m_mmio_dm )
+ return hvmemul_do_mmio(
dgpa, reps, bytes_per_rep, sgpa, IOREQ_WRITE, df, NULL);
- put_two_gfns(&tg);
- return rc;
- }
/* RAM-to-RAM copy: emulate as equivalent of memmove(dgpa, sgpa, bytes). */
bytes = *reps * bytes_per_rep;
@@ -738,10 +729,7 @@ static int hvmemul_rep_movs(
* can be emulated by a source-to-buffer-to-destination block copy.
*/
if ( ((dgpa + bytes_per_rep) > sgpa) && (dgpa < (sgpa + bytes)) )
- {
- put_two_gfns(&tg);
return X86EMUL_UNHANDLEABLE;
- }
/* Adjust destination address for reverse copy. */
if ( df )
@@ -750,10 +738,7 @@ static int hvmemul_rep_movs(
/* Allocate temporary buffer. Fall back to slow emulation if this fails. */
buf = xmalloc_bytes(bytes);
if ( buf == NULL )
- {
- put_two_gfns(&tg);
return X86EMUL_UNHANDLEABLE;
- }
/*
* We do a modicum of checking here, just for paranoia's sake and to
@@ -764,7 +749,6 @@ static int hvmemul_rep_movs(
rc = hvm_copy_to_guest_phys(dgpa, buf, bytes);
xfree(buf);
- put_two_gfns(&tg);
if ( rc == HVMCOPY_gfn_paged_out )
return X86EMUL_RETRY;
^ permalink raw reply [flat|nested] 15+ messages in thread* [PATCH 10 of 11] x86/hvm/svm: used unlocked p2m lookups in trace and error paths
2012-05-10 14:59 [PATCH 00 of 11] Use a reader-writer lock for the p2m Tim Deegan
` (8 preceding siblings ...)
2012-05-10 14:59 ` [PATCH 09 of 11] x86/hvm: use unlocked p2m lookups in hvmemul_rep_movs() Tim Deegan
@ 2012-05-10 14:59 ` Tim Deegan
2012-05-10 14:59 ` [PATCH 11 of 11] x86/p2m, arm/p2m: remove get_gfn_untyped() Tim Deegan
2012-05-16 13:53 ` [PATCH 00 of 11] Use a reader-writer lock for the p2m Andres Lagar-Cavilla
11 siblings, 0 replies; 15+ messages in thread
From: Tim Deegan @ 2012-05-10 14:59 UTC (permalink / raw)
To: xen-devel; +Cc: Andres Lagar-Cavilla
# HG changeset patch
# User Andres Lagar-Cavilla <andres@lagarcavilla.org>
# Date 1336661656 -3600
# Node ID 65768e16352c6bb0e11bb6c661da5137e99ecceb
# Parent d514c4cfcd2b18baafa3aa61cb4cc4971cdbeecc
x86/hvm/svm: used unlocked p2m lookups in trace and error paths.
Signed-off-by: Andres Lagar-Cavilla <andres@lagarcavilla.org>
diff -r d514c4cfcd2b -r 65768e16352c xen/arch/x86/hvm/svm/svm.c
--- a/xen/arch/x86/hvm/svm/svm.c Thu May 10 15:54:16 2012 +0100
+++ b/xen/arch/x86/hvm/svm/svm.c Thu May 10 15:54:16 2012 +0100
@@ -1321,8 +1321,7 @@ static void svm_do_nested_pgfault(struct
p2m = p2m_get_p2m(v);
_d.gpa = gpa;
_d.qualification = 0;
- mfn = get_gfn_type_access(p2m, gfn, &_d.p2mt, &p2ma, 0, NULL);
- __put_gfn(p2m, gfn);
+ mfn = __get_gfn_type_access(p2m, gfn, &_d.p2mt, &p2ma, 0, NULL, 0);
_d.mfn = mfn_x(mfn);
__trace_var(TRC_HVM_NPF, 0, sizeof(_d), &_d);
@@ -1343,8 +1342,7 @@ static void svm_do_nested_pgfault(struct
if ( p2m == NULL )
p2m = p2m_get_p2m(v);
/* Everything else is an error. */
- mfn = get_gfn_type_access(p2m, gfn, &p2mt, &p2ma, 0, NULL);
- __put_gfn(p2m, gfn);
+ mfn = __get_gfn_type_access(p2m, gfn, &p2mt, &p2ma, 0, NULL, 0);
gdprintk(XENLOG_ERR,
"SVM violation gpa %#"PRIpaddr", mfn %#lx, type %i\n",
gpa, mfn_x(mfn), p2mt);
^ permalink raw reply [flat|nested] 15+ messages in thread* [PATCH 11 of 11] x86/p2m, arm/p2m: remove get_gfn_untyped()
2012-05-10 14:59 [PATCH 00 of 11] Use a reader-writer lock for the p2m Tim Deegan
` (9 preceding siblings ...)
2012-05-10 14:59 ` [PATCH 10 of 11] x86/hvm/svm: used unlocked p2m lookups in trace and error paths Tim Deegan
@ 2012-05-10 14:59 ` Tim Deegan
2012-05-10 15:24 ` Ian Campbell
2012-05-16 13:53 ` [PATCH 00 of 11] Use a reader-writer lock for the p2m Andres Lagar-Cavilla
11 siblings, 1 reply; 15+ messages in thread
From: Tim Deegan @ 2012-05-10 14:59 UTC (permalink / raw)
To: xen-devel; +Cc: Andres Lagar-Cavilla
# HG changeset patch
# User Tim Deegan <tim@xen.org>
# Date 1336661656 -3600
# Node ID d95f0fb6c358f7e2624d317da6e405e8d733603a
# Parent 65768e16352c6bb0e11bb6c661da5137e99ecceb
x86/p2m, arm/p2m: remove get_gfn_untyped().
Adjust its only user to use get_gfn.
Signed-off-by: Andres Lagar-Cavilla <andres@lagarcavilla.org>
Signed-off-by: Tim Deegan <tim@xen.org>
diff -r 65768e16352c -r d95f0fb6c358 xen/arch/x86/mm.c
--- a/xen/arch/x86/mm.c Thu May 10 15:54:16 2012 +0100
+++ b/xen/arch/x86/mm.c Thu May 10 15:54:16 2012 +0100
@@ -4524,6 +4524,7 @@ static int xenmem_add_to_physmap_once(
unsigned long gfn = 0; /* gcc ... */
unsigned long prev_mfn, mfn = 0, gpfn, idx;
int rc;
+ p2m_type_t p2mt;
switch ( xatp->space )
{
@@ -4596,7 +4597,7 @@ static int xenmem_add_to_physmap_once(
put_page(page);
/* Remove previously mapped page if it was present. */
- prev_mfn = get_gfn_untyped(d, xatp->gpfn);
+ prev_mfn = mfn_x(get_gfn(d, xatp->gpfn, &p2mt));
if ( mfn_valid(prev_mfn) )
{
if ( is_xen_heap_mfn(prev_mfn) )
diff -r 65768e16352c -r d95f0fb6c358 xen/include/asm-arm/p2m.h
--- a/xen/include/asm-arm/p2m.h Thu May 10 15:54:16 2012 +0100
+++ b/xen/include/asm-arm/p2m.h Thu May 10 15:54:16 2012 +0100
@@ -75,12 +75,6 @@ static inline struct page_info *get_page
return page;
}
-/* Compatibility function exporting the old untyped interface */
-static inline unsigned long get_gfn_untyped(struct domain *d, unsigned long gpfn)
-{
- return gmfn_to_mfn(d, gpfn);
-}
-
int get_page_type(struct page_info *page, unsigned long type);
int is_iomem_page(unsigned long mfn);
static inline int get_page_and_type(struct page_info *page,
diff -r 65768e16352c -r d95f0fb6c358 xen/include/asm-x86/p2m.h
--- a/xen/include/asm-x86/p2m.h Thu May 10 15:54:16 2012 +0100
+++ b/xen/include/asm-x86/p2m.h Thu May 10 15:54:16 2012 +0100
@@ -339,17 +339,6 @@ static inline mfn_t get_gfn_type(struct
#define get_gfn_unshare(d, g, t) get_gfn_type((d), (g), (t), \
P2M_ALLOC | P2M_UNSHARE)
-/* Compatibility function exporting the old untyped interface */
-static inline unsigned long get_gfn_untyped(struct domain *d, unsigned long gpfn)
-{
- mfn_t mfn;
- p2m_type_t t;
- mfn = get_gfn(d, gpfn, &t);
- if ( p2m_is_valid(t) )
- return mfn_x(mfn);
- return INVALID_MFN;
-}
-
/* Will release the p2m_lock for this gfn entry. */
void __put_gfn(struct p2m_domain *p2m, unsigned long gfn);
^ permalink raw reply [flat|nested] 15+ messages in thread* Re: [PATCH 11 of 11] x86/p2m, arm/p2m: remove get_gfn_untyped()
2012-05-10 14:59 ` [PATCH 11 of 11] x86/p2m, arm/p2m: remove get_gfn_untyped() Tim Deegan
@ 2012-05-10 15:24 ` Ian Campbell
0 siblings, 0 replies; 15+ messages in thread
From: Ian Campbell @ 2012-05-10 15:24 UTC (permalink / raw)
To: Tim Deegan; +Cc: Andres Lagar-Cavilla, xen-devel@lists.xen.org
On Thu, 2012-05-10 at 15:59 +0100, Tim Deegan wrote:
> # HG changeset patch
> # User Tim Deegan <tim@xen.org>
> # Date 1336661656 -3600
> # Node ID d95f0fb6c358f7e2624d317da6e405e8d733603a
> # Parent 65768e16352c6bb0e11bb6c661da5137e99ecceb
> x86/p2m, arm/p2m: remove get_gfn_untyped().
>
> Adjust its only user to use get_gfn.
>
> Signed-off-by: Andres Lagar-Cavilla <andres@lagarcavilla.org>
> Signed-off-by: Tim Deegan <tim@xen.org>
(trivial) ARM bit:
Acked-by: Ian Campbell <ian.campbell@citrix.com>
>
> diff -r 65768e16352c -r d95f0fb6c358 xen/arch/x86/mm.c
> --- a/xen/arch/x86/mm.c Thu May 10 15:54:16 2012 +0100
> +++ b/xen/arch/x86/mm.c Thu May 10 15:54:16 2012 +0100
> @@ -4524,6 +4524,7 @@ static int xenmem_add_to_physmap_once(
> unsigned long gfn = 0; /* gcc ... */
> unsigned long prev_mfn, mfn = 0, gpfn, idx;
> int rc;
> + p2m_type_t p2mt;
>
> switch ( xatp->space )
> {
> @@ -4596,7 +4597,7 @@ static int xenmem_add_to_physmap_once(
> put_page(page);
>
> /* Remove previously mapped page if it was present. */
> - prev_mfn = get_gfn_untyped(d, xatp->gpfn);
> + prev_mfn = mfn_x(get_gfn(d, xatp->gpfn, &p2mt));
> if ( mfn_valid(prev_mfn) )
> {
> if ( is_xen_heap_mfn(prev_mfn) )
> diff -r 65768e16352c -r d95f0fb6c358 xen/include/asm-arm/p2m.h
> --- a/xen/include/asm-arm/p2m.h Thu May 10 15:54:16 2012 +0100
> +++ b/xen/include/asm-arm/p2m.h Thu May 10 15:54:16 2012 +0100
> @@ -75,12 +75,6 @@ static inline struct page_info *get_page
> return page;
> }
>
> -/* Compatibility function exporting the old untyped interface */
> -static inline unsigned long get_gfn_untyped(struct domain *d, unsigned long gpfn)
> -{
> - return gmfn_to_mfn(d, gpfn);
> -}
> -
> int get_page_type(struct page_info *page, unsigned long type);
> int is_iomem_page(unsigned long mfn);
> static inline int get_page_and_type(struct page_info *page,
> diff -r 65768e16352c -r d95f0fb6c358 xen/include/asm-x86/p2m.h
> --- a/xen/include/asm-x86/p2m.h Thu May 10 15:54:16 2012 +0100
> +++ b/xen/include/asm-x86/p2m.h Thu May 10 15:54:16 2012 +0100
> @@ -339,17 +339,6 @@ static inline mfn_t get_gfn_type(struct
> #define get_gfn_unshare(d, g, t) get_gfn_type((d), (g), (t), \
> P2M_ALLOC | P2M_UNSHARE)
>
> -/* Compatibility function exporting the old untyped interface */
> -static inline unsigned long get_gfn_untyped(struct domain *d, unsigned long gpfn)
> -{
> - mfn_t mfn;
> - p2m_type_t t;
> - mfn = get_gfn(d, gpfn, &t);
> - if ( p2m_is_valid(t) )
> - return mfn_x(mfn);
> - return INVALID_MFN;
> -}
> -
> /* Will release the p2m_lock for this gfn entry. */
> void __put_gfn(struct p2m_domain *p2m, unsigned long gfn);
>
>
> _______________________________________________
> Xen-devel mailing list
> Xen-devel@lists.xen.org
> http://lists.xen.org/xen-devel
^ permalink raw reply [flat|nested] 15+ messages in thread
* Re: [PATCH 00 of 11] Use a reader-writer lock for the p2m
2012-05-10 14:59 [PATCH 00 of 11] Use a reader-writer lock for the p2m Tim Deegan
` (10 preceding siblings ...)
2012-05-10 14:59 ` [PATCH 11 of 11] x86/p2m, arm/p2m: remove get_gfn_untyped() Tim Deegan
@ 2012-05-16 13:53 ` Andres Lagar-Cavilla
11 siblings, 0 replies; 15+ messages in thread
From: Andres Lagar-Cavilla @ 2012-05-16 13:53 UTC (permalink / raw)
To: Tim Deegan; +Cc: Andres Lagar-Cavilla, xen-devel
> This is a cleaned-up version of my patch of two weeks ago to make the
> p2m lock into an rwlock, with some updates from Andres folded in.
> With these applied, p2m lookups are no longer serialized in the
> common case (where there are few updates).
>
> I hope to check these in next week, before we branch for 4.2.
We've found this to not break our testing:
Acked-by: Andres Lagar-Cavilla <andres@lagarcavilla.org>
Thanks!
Andres
>
> Signed-off-by: Tim Deegan <tim@xen.org>
> Signed-off-by: Andres Lagar-Cavilla <andres@lagarcavilla.org>
>
>
^ permalink raw reply [flat|nested] 15+ messages in thread