public inbox for linux-kernel@vger.kernel.org
 help / color / mirror / Atom feed
From: Jeremy Fitzhardinge <jeremy@goop.org>
To: Yinghai Lu <yinghai@kernel.org>
Cc: Ingo Molnar <mingo@redhat.com>,
	Linux Kernel Mailing List <linux-kernel@vger.kernel.org>,
	the arch/x86 maintainers <x86@kernel.org>
Subject: memblock vs early_res
Date: Fri, 17 Sep 2010 13:47:44 -0700	[thread overview]
Message-ID: <4C93D3F0.3030307@goop.org> (raw)

[-- Attachment #1: Type: text/plain, Size: 371 bytes --]

 Hi Yinghai,

I have the patch below floating around in my tree to make sure that
early-reserved highmem is honoured when freeing unreserved memory.  I
was trying to rebase it to current linux-next and noticed that all the
early_res stuff has been replaced with memblock.

Is this still an issue?  What would the memblock version of this patch
look like?

Thanks,
    J


[-- Attachment #2: early_res-highmem.patch --]
[-- Type: text/plain, Size: 3760 bytes --]

From 0a1c234a9fabcc2e71dc7a6da7ae1cb073207281 Mon Sep 17 00:00:00 2001
From: Gianluca Guida <gianluca.guida@citrix.com>
Date: Sun, 2 Aug 2009 01:25:48 +0100
Subject: [PATCH] x86/32: honor reservations of high memory

Make high memory initialization honor early reserved ranges.

Signed-off-by: Gianluca Guida <gianluca.guida@citrix.com>
Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>

diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
index bca7909..573bc7f 100644
--- a/arch/x86/mm/init_32.c
+++ b/arch/x86/mm/init_32.c
@@ -432,22 +432,45 @@ static int __init add_highpages_work_fn(unsigned long start_pfn,
 {
 	int node_pfn;
 	struct page *page;
+	phys_addr_t chunk_end, chunk_max;
 	unsigned long final_start_pfn, final_end_pfn;
-	struct add_highpages_data *data;
-
-	data = (struct add_highpages_data *)datax;
+	struct add_highpages_data *data = (struct add_highpages_data *)datax;
 
 	final_start_pfn = max(start_pfn, data->start_pfn);
 	final_end_pfn = min(end_pfn, data->end_pfn);
 	if (final_start_pfn >= final_end_pfn)
 		return 0;
 
-	for (node_pfn = final_start_pfn; node_pfn < final_end_pfn;
-	     node_pfn++) {
-		if (!pfn_valid(node_pfn))
-			continue;
-		page = pfn_to_page(node_pfn);
-		add_one_highpage_init(page);
+	chunk_end = PFN_PHYS(final_start_pfn);
+	chunk_max = PFN_PHYS(final_end_pfn);
+
+	/*
+	 * Check for reserved areas.
+	 */
+	for (;;) {
+		phys_addr_t chunk_start;
+		chunk_start = early_res_next_free(chunk_end);
+		
+		/*
+		 * Reserved area. Just count high mem pages.
+		 */
+		for (node_pfn = PFN_DOWN(chunk_end);
+		     node_pfn < PFN_DOWN(chunk_start); node_pfn++) {
+			if (pfn_valid(node_pfn))
+				totalhigh_pages++;
+		}
+
+		if (chunk_start >= chunk_max)
+			break;
+
+		chunk_end = early_res_next_reserved(chunk_start, chunk_max);
+		for (node_pfn = PFN_DOWN(chunk_start);
+		     node_pfn < PFN_DOWN(chunk_end); node_pfn++) {
+			if (!pfn_valid(node_pfn))
+				continue;
+			page = pfn_to_page(node_pfn);
+			add_one_highpage_init(page);
+		}
 	}
 
 	return 0;
@@ -461,7 +484,6 @@ void __init add_highpages_with_active_regions(int nid, unsigned long start_pfn,
 
 	data.start_pfn = start_pfn;
 	data.end_pfn = end_pfn;
-
 	work_with_active_regions(nid, add_highpages_work_fn, &data);
 }
 
diff --git a/include/linux/early_res.h b/include/linux/early_res.h
index 29c09f5..37317e1 100644
--- a/include/linux/early_res.h
+++ b/include/linux/early_res.h
@@ -8,6 +8,9 @@ extern void free_early(u64 start, u64 end);
 void free_early_partial(u64 start, u64 end);
 extern void early_res_to_bootmem(u64 start, u64 end);
 
+extern u64 early_res_next_free(u64 start);
+extern u64 early_res_next_reserved(u64 addr, u64 max);
+
 void reserve_early_without_check(u64 start, u64 end, char *name);
 u64 find_early_area(u64 ei_start, u64 ei_last, u64 start, u64 end,
 			 u64 size, u64 align);
diff --git a/kernel/early_res.c b/kernel/early_res.c
index 7bfae88..b663c62 100644
--- a/kernel/early_res.c
+++ b/kernel/early_res.c
@@ -44,6 +44,36 @@ static int __init find_overlapped_early(u64 start, u64 end)
 	return i;
 }
 
+u64 __init early_res_next_free(u64 addr)
+{
+	int i;
+	u64 end = addr;
+	struct early_res *r;
+
+	for (i = 0; i < max_early_res; i++) {
+		r = &early_res[i];
+		if (addr >= r->start && addr < r->end) {
+			end = r->end;
+			break;
+		}
+	}
+	return end;
+}
+
+u64 __init early_res_next_reserved(u64 addr, u64 max)
+{
+	int i;
+	struct early_res *r;
+	u64 next_res = max;
+
+	for (i = 0; i < max_early_res && early_res[i].end; i++) {
+		r = &early_res[i];
+		if ((r->start >= addr) && (r->start < next_res))
+			next_res = r->start;
+	}
+	return next_res;
+}
+
 /*
  * Drop the i-th range from the early reservation map,
  * by copying any higher ranges down one over it, and

             reply	other threads:[~2010-09-17 20:47 UTC|newest]

Thread overview: 6+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2010-09-17 20:47 Jeremy Fitzhardinge [this message]
2010-09-17 22:47 ` memblock vs early_res Yinghai Lu
2010-09-17 23:11   ` Jeremy Fitzhardinge
2010-09-18  6:10     ` Yinghai Lu
2010-09-18  6:21       ` Jeremy Fitzhardinge
2010-09-22  0:09       ` Jeremy Fitzhardinge

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=4C93D3F0.3030307@goop.org \
    --to=jeremy@goop.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=mingo@redhat.com \
    --cc=x86@kernel.org \
    --cc=yinghai@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox