xen-devel.lists.xenproject.org archive mirror
 help / color / mirror / Atom feed
From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
To: linux-kernel@vger.kernel.org, jeremy@goop.org, hpa@zytor.com
Cc: Jan Beulich <JBeulich@novell.com>,
	xen-devel@lists.xensource.com,
	Konrad Rzeszutek Wilk <konrad@kernel.org>,
	Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Subject: [PATCH 02/10] xen/p2m: change p2m_missing_* to p2m_identity_*
Date: Tue, 21 Dec 2010 16:37:32 -0500	[thread overview]
Message-ID: <1292967460-15709-3-git-send-email-konrad.wilk@oracle.com> (raw)
In-Reply-To: <1292967460-15709-1-git-send-email-konrad.wilk@oracle.com>

From: Konrad Rzeszutek Wilk <konrad@kernel.org>

We are going to alter how we think about P2M. Most of the
P2M contains MFN, and areas that are not populated are
considered to be "missing". Missing means that the PFN
is either not set for this guest (not have that much memory
allocated) or is under the balloon driver ownership.

We are instead now going to think of those not populated
areas as "identity." Meaning that that the PFN for which
we would get the p2m_identity we will provide the the PFN
value back instead of P2M_MISSING. Essentially treating
those regions as PFN==MFN.

Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
---
 arch/x86/xen/mmu.c |   59 ++++++++++++++++++++++++++-------------------------
 1 files changed, 30 insertions(+), 29 deletions(-)

diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index 44924e5..d6d0276 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -209,9 +209,9 @@ unsigned long xen_max_p2m_pfn __read_mostly;
 #define MAX_P2M_PFN		(P2M_TOP_PER_PAGE * P2M_MID_PER_PAGE * P2M_PER_PAGE)
 
 /* Placeholders for holes in the address space */
-static RESERVE_BRK_ARRAY(unsigned long, p2m_missing, P2M_PER_PAGE);
-static RESERVE_BRK_ARRAY(unsigned long *, p2m_mid_missing, P2M_MID_PER_PAGE);
-static RESERVE_BRK_ARRAY(unsigned long, p2m_mid_missing_mfn, P2M_MID_PER_PAGE);
+static RESERVE_BRK_ARRAY(unsigned long, p2m_identity, P2M_PER_PAGE);
+static RESERVE_BRK_ARRAY(unsigned long *, p2m_mid_identity, P2M_MID_PER_PAGE);
+static RESERVE_BRK_ARRAY(unsigned long, p2m_mid_identity_mfn, P2M_MID_PER_PAGE);
 
 static RESERVE_BRK_ARRAY(unsigned long **, p2m_top, P2M_TOP_PER_PAGE);
 static RESERVE_BRK_ARRAY(unsigned long, p2m_top_mfn, P2M_TOP_PER_PAGE);
@@ -241,7 +241,7 @@ static void p2m_top_init(unsigned long ***top)
 	unsigned i;
 
 	for (i = 0; i < P2M_TOP_PER_PAGE; i++)
-		top[i] = p2m_mid_missing;
+		top[i] = p2m_mid_identity;
 }
 
 static void p2m_top_mfn_init(unsigned long *top)
@@ -249,7 +249,7 @@ static void p2m_top_mfn_init(unsigned long *top)
 	unsigned i;
 
 	for (i = 0; i < P2M_TOP_PER_PAGE; i++)
-		top[i] = virt_to_mfn(p2m_mid_missing_mfn);
+		top[i] = virt_to_mfn(p2m_mid_identity_mfn);
 }
 
 static void p2m_top_mfn_p_init(unsigned long **top)
@@ -257,7 +257,7 @@ static void p2m_top_mfn_p_init(unsigned long **top)
 	unsigned i;
 
 	for (i = 0; i < P2M_TOP_PER_PAGE; i++)
-		top[i] = p2m_mid_missing_mfn;
+		top[i] = p2m_mid_identity_mfn;
 }
 
 static void p2m_mid_init(unsigned long **mid)
@@ -265,7 +265,7 @@ static void p2m_mid_init(unsigned long **mid)
 	unsigned i;
 
 	for (i = 0; i < P2M_MID_PER_PAGE; i++)
-		mid[i] = p2m_missing;
+		mid[i] = p2m_identity;
 }
 
 static void p2m_mid_mfn_init(unsigned long *mid)
@@ -273,7 +273,7 @@ static void p2m_mid_mfn_init(unsigned long *mid)
 	unsigned i;
 
 	for (i = 0; i < P2M_MID_PER_PAGE; i++)
-		mid[i] = virt_to_mfn(p2m_missing);
+		mid[i] = virt_to_mfn(p2m_identity);
 }
 
 static void p2m_init(unsigned long *p2m)
@@ -300,8 +300,8 @@ void xen_build_mfn_list_list(void)
 
 	/* Pre-initialize p2m_top_mfn to be completely missing */
 	if (p2m_top_mfn == NULL) {
-		p2m_mid_missing_mfn = extend_brk(PAGE_SIZE, PAGE_SIZE);
-		p2m_mid_mfn_init(p2m_mid_missing_mfn);
+		p2m_mid_identity_mfn = extend_brk(PAGE_SIZE, PAGE_SIZE);
+		p2m_mid_mfn_init(p2m_mid_identity_mfn);
 
 		p2m_top_mfn_p = extend_brk(PAGE_SIZE, PAGE_SIZE);
 		p2m_top_mfn_p_init(p2m_top_mfn_p);
@@ -310,7 +310,7 @@ void xen_build_mfn_list_list(void)
 		p2m_top_mfn_init(p2m_top_mfn);
 	} else {
 		/* Reinitialise, mfn's all change after migration */
-		p2m_mid_mfn_init(p2m_mid_missing_mfn);
+		p2m_mid_mfn_init(p2m_mid_identity_mfn);
 	}
 
 	for (pfn = 0; pfn < xen_max_p2m_pfn; pfn += P2M_PER_PAGE) {
@@ -326,15 +326,15 @@ void xen_build_mfn_list_list(void)
 		 * they're just missing, just update the stored mfn,
 		 * since all could have changed over a migrate.
 		 */
-		if (mid == p2m_mid_missing) {
+		if (mid == p2m_mid_identity) {
 			BUG_ON(mididx);
-			BUG_ON(mid_mfn_p != p2m_mid_missing_mfn);
-			p2m_top_mfn[topidx] = virt_to_mfn(p2m_mid_missing_mfn);
+			BUG_ON(mid_mfn_p != p2m_mid_identity_mfn);
+			p2m_top_mfn[topidx] = virt_to_mfn(p2m_mid_identity_mfn);
 			pfn += (P2M_MID_PER_PAGE - 1) * P2M_PER_PAGE;
 			continue;
 		}
 
-		if (mid_mfn_p == p2m_mid_missing_mfn) {
+		if (mid_mfn_p == p2m_mid_identity_mfn) {
 			/*
 			 * XXX boot-time only!  We should never find
 			 * missing parts of the mfn tree after
@@ -370,11 +370,11 @@ void __init xen_build_dynamic_phys_to_machine(void)
 
 	xen_max_p2m_pfn = max_pfn;
 
-	p2m_missing = extend_brk(PAGE_SIZE, PAGE_SIZE);
-	p2m_init(p2m_missing);
+	p2m_identity = extend_brk(PAGE_SIZE, PAGE_SIZE);
+	p2m_init(p2m_identity);
 
-	p2m_mid_missing = extend_brk(PAGE_SIZE, PAGE_SIZE);
-	p2m_mid_init(p2m_mid_missing);
+	p2m_mid_identity = extend_brk(PAGE_SIZE, PAGE_SIZE);
+	p2m_mid_init(p2m_mid_identity);
 
 	p2m_top = extend_brk(PAGE_SIZE, PAGE_SIZE);
 	p2m_top_init(p2m_top);
@@ -388,7 +388,7 @@ void __init xen_build_dynamic_phys_to_machine(void)
 		unsigned topidx = p2m_top_index(pfn);
 		unsigned mididx = p2m_mid_index(pfn);
 
-		if (p2m_top[topidx] == p2m_mid_missing) {
+		if (p2m_top[topidx] == p2m_mid_identity) {
 			unsigned long **mid = extend_brk(PAGE_SIZE, PAGE_SIZE);
 			p2m_mid_init(mid);
 
@@ -443,7 +443,7 @@ static bool alloc_p2m(unsigned long pfn)
 	top_p = &p2m_top[topidx];
 	mid = *top_p;
 
-	if (mid == p2m_mid_missing) {
+	if (mid == p2m_mid_identity) {
 		/* Mid level is missing, allocate a new one */
 		mid = alloc_p2m_page();
 		if (!mid)
@@ -451,7 +451,7 @@ static bool alloc_p2m(unsigned long pfn)
 
 		p2m_mid_init(mid);
 
-		if (cmpxchg(top_p, p2m_mid_missing, mid) != p2m_mid_missing)
+		if (cmpxchg(top_p, p2m_mid_identity, mid) != p2m_mid_identity)
 			free_p2m_page(mid);
 	}
 
@@ -460,9 +460,9 @@ static bool alloc_p2m(unsigned long pfn)
 
 	BUG_ON(virt_to_mfn(mid_mfn) != *top_mfn_p);
 
-	if (mid_mfn == p2m_mid_missing_mfn) {
+	if (mid_mfn == p2m_mid_identity_mfn) {
 		/* Separately check the mid mfn level */
-		unsigned long missing_mfn;
+		unsigned long identity_mfn;
 		unsigned long mid_mfn_mfn;
 
 		mid_mfn = alloc_p2m_page();
@@ -471,15 +471,16 @@ static bool alloc_p2m(unsigned long pfn)
 
 		p2m_mid_mfn_init(mid_mfn);
 
-		missing_mfn = virt_to_mfn(p2m_mid_missing_mfn);
+		identity_mfn = virt_to_mfn(p2m_mid_identity_mfn);
 		mid_mfn_mfn = virt_to_mfn(mid_mfn);
-		if (cmpxchg(top_mfn_p, missing_mfn, mid_mfn_mfn) != missing_mfn)
+		if (cmpxchg(top_mfn_p, identity_mfn, mid_mfn_mfn) !=
+		    identity_mfn)
 			free_p2m_page(mid_mfn);
 		else
 			p2m_top_mfn_p[topidx] = mid_mfn;
 	}
 
-	if (p2m_top[topidx][mididx] == p2m_missing) {
+	if (p2m_top[topidx][mididx] == p2m_identity) {
 		/* p2m leaf page is missing */
 		unsigned long *p2m;
 
@@ -489,7 +490,7 @@ static bool alloc_p2m(unsigned long pfn)
 
 		p2m_init(p2m);
 
-		if (cmpxchg(&mid[mididx], p2m_missing, p2m) != p2m_missing)
+		if (cmpxchg(&mid[mididx], p2m_identity, p2m) != p2m_identity)
 			free_p2m_page(p2m);
 		else
 			mid_mfn[mididx] = virt_to_mfn(p2m);
@@ -512,7 +513,7 @@ bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn)
 	mididx = p2m_mid_index(pfn);
 	idx = p2m_index(pfn);
 
-	if (p2m_top[topidx][mididx] == p2m_missing)
+	if (p2m_top[topidx][mididx] == p2m_identity)
 		return mfn == INVALID_P2M_ENTRY;
 
 	p2m_top[topidx][mididx][idx] = mfn;
-- 
1.7.1

  parent reply	other threads:[~2010-12-21 21:37 UTC|newest]

Thread overview: 38+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2010-12-21 21:37 [RFC PATCH v1] Consider void entries in the P2M as 1-1 mapping Konrad Rzeszutek Wilk
2010-12-21 21:37 ` [PATCH 01/10] xen: Make all reserved pages for the balloon be INVALID_P2M_ENTRY Konrad Rzeszutek Wilk
2010-12-21 22:19   ` Jeremy Fitzhardinge
2010-12-21 23:22     ` H. Peter Anvin
2010-12-22  8:47     ` Ian Campbell
2010-12-22 14:53     ` Konrad Rzeszutek Wilk
2010-12-22 15:46       ` Jeremy Fitzhardinge
2010-12-21 21:37 ` Konrad Rzeszutek Wilk [this message]
2010-12-21 22:41   ` [PATCH 02/10] xen/p2m: change p2m_missing_* to p2m_identity_* Jeremy Fitzhardinge
2010-12-22 14:59     ` Konrad Rzeszutek Wilk
2010-12-22 20:36       ` [SPAM] " Jeremy Fitzhardinge
2010-12-21 21:37 ` [PATCH 03/10] xen/mmu: Add the notion of IDENTITY_P2M_ENTRY Konrad Rzeszutek Wilk
2010-12-22  8:44   ` Ian Campbell
2010-12-21 21:37 ` [PATCH 04/10] xen/mmu: For 1-1 mapping, automatically set _PAGE_IOMAP Konrad Rzeszutek Wilk
2010-12-21 22:29   ` Jeremy Fitzhardinge
2010-12-22 15:02     ` Konrad Rzeszutek Wilk
2010-12-22 16:27       ` [Xen-devel] " Ian Campbell
2010-12-21 21:37 ` [PATCH 05/10] xen/setup: Set identity mapping for non-RAM E820 and E820 gaps Konrad Rzeszutek Wilk
2010-12-21 22:34   ` Jeremy Fitzhardinge
2010-12-22 15:04     ` Konrad Rzeszutek Wilk
2010-12-22  8:49   ` [Xen-devel] " Ian Campbell
2010-12-21 21:37 ` [PATCH 06/10] xen/setup: Only set identity mapping in E820 regions when privileged Konrad Rzeszutek Wilk
2010-12-21 22:37   ` Jeremy Fitzhardinge
2010-12-22 15:07     ` Konrad Rzeszutek Wilk
2010-12-21 21:37 ` [PATCH 07/10] xen/mmu: Work with 1-1 mappings when allocating new top/middle entries Konrad Rzeszutek Wilk
2010-12-21 22:37   ` Jeremy Fitzhardinge
2010-12-22 15:10     ` Konrad Rzeszutek Wilk
2010-12-22  8:54   ` [Xen-devel] " Ian Campbell
2010-12-22 17:47     ` Konrad Rzeszutek Wilk
2010-12-21 21:37 ` [PATCH 08/10] xen/mmu: Bugfix. Fill the top entry page with appropriate middle layer pointers Konrad Rzeszutek Wilk
2010-12-21 22:38   ` Jeremy Fitzhardinge
2010-12-22 15:11     ` Konrad Rzeszutek Wilk
2010-12-21 21:37 ` [PATCH 09/10] xen/mmu: Be aware of p2m_[mid_|]missing when saving/restore Konrad Rzeszutek Wilk
2010-12-21 21:37 ` [PATCH 10/10] xen/mmu: Warn against races Konrad Rzeszutek Wilk
2010-12-22  8:36 ` [Xen-devel] [RFC PATCH v1] Consider void entries in the P2M as 1-1 mapping Ian Campbell
2010-12-22 15:06   ` Konrad Rzeszutek Wilk
2010-12-22 16:26     ` Ian Campbell
2010-12-22 18:01       ` Konrad Rzeszutek Wilk

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1292967460-15709-3-git-send-email-konrad.wilk@oracle.com \
    --to=konrad.wilk@oracle.com \
    --cc=JBeulich@novell.com \
    --cc=hpa@zytor.com \
    --cc=jeremy@goop.org \
    --cc=konrad@kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=xen-devel@lists.xensource.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).