linux-arch.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Yinghai Lu <yinghai@kernel.org>
To: Ingo Molnar <mingo@elte.hu>, Thomas Gleixner <tglx@linutronix.de>,
	"H. Peter Anvin" <hpa@zytor.com>,
	Andrew Morton <akpm@linux-foundation.org>,
	David Miller <davem@davemloft.net>,
	Be
Cc: Johannes Weiner <hannes@cmpxchg.org>,
	linux-kernel@vger.kernel.org, linux-arch@vger.kernel.org,
	Yinghai Lu <yinghai@kernel.org>
Subject: [PATCH 07/31] lmb: Add reserve_lmb/free_lmb
Date: Sun, 28 Mar 2010 19:43:00 -0700	[thread overview]
Message-ID: <1269830604-26214-8-git-send-email-yinghai@kernel.org> (raw)
In-Reply-To: <1269830604-26214-1-git-send-email-yinghai@kernel.org>

They will check if the region array is big enough.

__check_and_double_region_array will try to double the region if that array spare
slots if not big enough.
find_lmb_area() is used to find good postion for new region array.
Old array will be copied to new array.

Arch code should provide to get_max_mapped, so the new array have accessiable
address

Signed-off-by: Yinghai Lu <yinghai@kernel.org>
---
 include/linux/lmb.h |    4 ++
 mm/lmb.c            |   89 +++++++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 93 insertions(+), 0 deletions(-)

diff --git a/include/linux/lmb.h b/include/linux/lmb.h
index 05234bd..95ae3f4 100644
--- a/include/linux/lmb.h
+++ b/include/linux/lmb.h
@@ -83,9 +83,13 @@ lmb_end_pfn(struct lmb_region *type, unsigned long region_nr)
 	       lmb_size_pages(type, region_nr);
 }
 
+void reserve_lmb(u64 start, u64 end, char *name);
+void free_lmb(u64 start, u64 end);
+void add_lmb_memory(u64 start, u64 end);
 u64 __find_lmb_area(u64 ei_start, u64 ei_last, u64 start, u64 end,
 			 u64 size, u64 align);
 u64 find_lmb_area(u64 start, u64 end, u64 size, u64 align);
+u64 get_max_mapped(void);
 
 #include <asm/lmb.h>
 
diff --git a/mm/lmb.c b/mm/lmb.c
index d5d5dc4..9798458 100644
--- a/mm/lmb.c
+++ b/mm/lmb.c
@@ -551,6 +551,95 @@ int lmb_find(struct lmb_property *res)
 	return -1;
 }
 
+u64 __weak __init get_max_mapped(void)
+{
+	u64 end = max_low_pfn;
+
+	end <<= PAGE_SHIFT;
+
+	return end;
+}
+
+static void __init __check_and_double_region_array(struct lmb_region *type,
+			 struct lmb_property *static_region,
+			 u64 ex_start, u64 ex_end)
+{
+	u64 start, end, size, mem;
+	struct lmb_property *new, *old;
+	unsigned long rgnsz = type->nr_regions;
+
+	/* Do we have enough slots left ? */
+	if ((rgnsz - type->cnt) > max_t(unsigned long, rgnsz/8, 2))
+		return;
+
+	old = type->region;
+	/* Double the array size */
+	size = sizeof(struct lmb_property) * rgnsz * 2;
+	if (old == static_region)
+		start = 0;
+	else
+		start = __pa(old) + sizeof(struct lmb_property) * rgnsz;
+	end = ex_start;
+	mem = -1ULL;
+	if (start + size < end)
+		mem = find_lmb_area(start, end, size, sizeof(struct lmb_property));
+	if (mem == -1ULL) {
+		start = ex_end;
+		end = get_max_mapped();
+		if (start + size < end)
+			mem = find_lmb_area(start, end, size, sizeof(struct lmb_property));
+	}
+	if (mem == -1ULL)
+		panic("can not find more space for lmb.reserved.region array");
+
+	new = __va(mem);
+	/* Copy old to new */
+	memcpy(&new[0], &old[0], sizeof(struct lmb_property) * rgnsz);
+	memset(&new[rgnsz], 0, sizeof(struct lmb_property) * rgnsz);
+
+	memset(&old[0], 0, sizeof(struct lmb_property) * rgnsz);
+	type->region = new;
+	type->nr_regions = rgnsz * 2;
+	printk(KERN_DEBUG "lmb.reserved.region array is doubled to %ld at [%llx - %llx]\n",
+		type->nr_regions, mem, mem + size - 1);
+
+	/* Reserve new array and free old one */
+	lmb_reserve(mem, sizeof(struct lmb_property) * rgnsz * 2);
+	if (old != static_region)
+		lmb_free(__pa(old), sizeof(struct lmb_property) * rgnsz);
+}
+
+void __init add_lmb_memory(u64 start, u64 end)
+{
+	__check_and_double_region_array(&lmb.memory, &lmb_memory_region[0], start, end);
+	lmb_add(start, end - start);
+}
+
+void __init reserve_lmb(u64 start, u64 end, char *name)
+{
+	if (start == end)
+		return;
+
+	if (WARN_ONCE(start > end, "reserve_lmb: wrong range [%#llx, %#llx]\n", start, end))
+		return;
+
+	__check_and_double_region_array(&lmb.reserved, &lmb_reserved_region[0], start, end);
+	lmb_reserve(start, end - start);
+}
+
+void __init free_lmb(u64 start, u64 end)
+{
+	if (start == end)
+		return;
+
+	if (WARN_ONCE(start > end, "free_lmb: wrong range [%#llx, %#llx]\n", start, end))
+		return;
+
+	/* keep punching hole, could run out of slots too */
+	__check_and_double_region_array(&lmb.reserved, &lmb_reserved_region[0], start, end);
+	lmb_free(start, end - start);
+}
+
 static int __init find_overlapped_early(u64 start, u64 end)
 {
 	int i;
-- 
1.6.4.2

WARNING: multiple messages have this Message-ID (diff)
From: Yinghai Lu <yinghai@kernel.org>
To: Ingo Molnar <mingo@elte.hu>, Thomas Gleixner <tglx@linutronix.de>,
	"H. Peter Anvin" <hpa@zytor.com>,
	Andrew Morton <akpm@linux-foundation.org>,
	David Miller <davem@davemloft.net>,
	Benjamin Herrenschmidt <benh@kernel.crashing.org>,
	Linus Torvalds <torvalds@linux-foundation.org>
Cc: Johannes Weiner <hannes@cmpxchg.org>,
	linux-kernel@vger.kernel.org, linux-arch@vger.kernel.org,
	Yinghai Lu <yinghai@kernel.org>
Subject: [PATCH 07/31] lmb: Add reserve_lmb/free_lmb
Date: Sun, 28 Mar 2010 19:43:00 -0700	[thread overview]
Message-ID: <1269830604-26214-8-git-send-email-yinghai@kernel.org> (raw)
Message-ID: <20100329024300.5IgeX6a6-DKs7pBe4n2wJNXk8VlsT5eT-7uqPhOAR28@z> (raw)
In-Reply-To: <1269830604-26214-1-git-send-email-yinghai@kernel.org>

They will check if the region array is big enough.

__check_and_double_region_array will try to double the region if that array spare
slots if not big enough.
find_lmb_area() is used to find good postion for new region array.
Old array will be copied to new array.

Arch code should provide to get_max_mapped, so the new array have accessiable
address

Signed-off-by: Yinghai Lu <yinghai@kernel.org>
---
 include/linux/lmb.h |    4 ++
 mm/lmb.c            |   89 +++++++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 93 insertions(+), 0 deletions(-)

diff --git a/include/linux/lmb.h b/include/linux/lmb.h
index 05234bd..95ae3f4 100644
--- a/include/linux/lmb.h
+++ b/include/linux/lmb.h
@@ -83,9 +83,13 @@ lmb_end_pfn(struct lmb_region *type, unsigned long region_nr)
 	       lmb_size_pages(type, region_nr);
 }
 
+void reserve_lmb(u64 start, u64 end, char *name);
+void free_lmb(u64 start, u64 end);
+void add_lmb_memory(u64 start, u64 end);
 u64 __find_lmb_area(u64 ei_start, u64 ei_last, u64 start, u64 end,
 			 u64 size, u64 align);
 u64 find_lmb_area(u64 start, u64 end, u64 size, u64 align);
+u64 get_max_mapped(void);
 
 #include <asm/lmb.h>
 
diff --git a/mm/lmb.c b/mm/lmb.c
index d5d5dc4..9798458 100644
--- a/mm/lmb.c
+++ b/mm/lmb.c
@@ -551,6 +551,95 @@ int lmb_find(struct lmb_property *res)
 	return -1;
 }
 
+u64 __weak __init get_max_mapped(void)
+{
+	u64 end = max_low_pfn;
+
+	end <<= PAGE_SHIFT;
+
+	return end;
+}
+
+static void __init __check_and_double_region_array(struct lmb_region *type,
+			 struct lmb_property *static_region,
+			 u64 ex_start, u64 ex_end)
+{
+	u64 start, end, size, mem;
+	struct lmb_property *new, *old;
+	unsigned long rgnsz = type->nr_regions;
+
+	/* Do we have enough slots left ? */
+	if ((rgnsz - type->cnt) > max_t(unsigned long, rgnsz/8, 2))
+		return;
+
+	old = type->region;
+	/* Double the array size */
+	size = sizeof(struct lmb_property) * rgnsz * 2;
+	if (old == static_region)
+		start = 0;
+	else
+		start = __pa(old) + sizeof(struct lmb_property) * rgnsz;
+	end = ex_start;
+	mem = -1ULL;
+	if (start + size < end)
+		mem = find_lmb_area(start, end, size, sizeof(struct lmb_property));
+	if (mem == -1ULL) {
+		start = ex_end;
+		end = get_max_mapped();
+		if (start + size < end)
+			mem = find_lmb_area(start, end, size, sizeof(struct lmb_property));
+	}
+	if (mem == -1ULL)
+		panic("can not find more space for lmb.reserved.region array");
+
+	new = __va(mem);
+	/* Copy old to new */
+	memcpy(&new[0], &old[0], sizeof(struct lmb_property) * rgnsz);
+	memset(&new[rgnsz], 0, sizeof(struct lmb_property) * rgnsz);
+
+	memset(&old[0], 0, sizeof(struct lmb_property) * rgnsz);
+	type->region = new;
+	type->nr_regions = rgnsz * 2;
+	printk(KERN_DEBUG "lmb.reserved.region array is doubled to %ld at [%llx - %llx]\n",
+		type->nr_regions, mem, mem + size - 1);
+
+	/* Reserve new array and free old one */
+	lmb_reserve(mem, sizeof(struct lmb_property) * rgnsz * 2);
+	if (old != static_region)
+		lmb_free(__pa(old), sizeof(struct lmb_property) * rgnsz);
+}
+
+void __init add_lmb_memory(u64 start, u64 end)
+{
+	__check_and_double_region_array(&lmb.memory, &lmb_memory_region[0], start, end);
+	lmb_add(start, end - start);
+}
+
+void __init reserve_lmb(u64 start, u64 end, char *name)
+{
+	if (start == end)
+		return;
+
+	if (WARN_ONCE(start > end, "reserve_lmb: wrong range [%#llx, %#llx]\n", start, end))
+		return;
+
+	__check_and_double_region_array(&lmb.reserved, &lmb_reserved_region[0], start, end);
+	lmb_reserve(start, end - start);
+}
+
+void __init free_lmb(u64 start, u64 end)
+{
+	if (start == end)
+		return;
+
+	if (WARN_ONCE(start > end, "free_lmb: wrong range [%#llx, %#llx]\n", start, end))
+		return;
+
+	/* keep punching hole, could run out of slots too */
+	__check_and_double_region_array(&lmb.reserved, &lmb_reserved_region[0], start, end);
+	lmb_free(start, end - start);
+}
+
 static int __init find_overlapped_early(u64 start, u64 end)
 {
 	int i;
-- 
1.6.4.2


  parent reply	other threads:[~2010-03-29  2:43 UTC|newest]

Thread overview: 104+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2010-03-29  2:42 [PATCH -v9 00/31] use lmb with x86 Yinghai Lu
2010-03-29  2:42 ` Yinghai Lu
2010-03-29  2:42 ` [PATCH 01/31] x86: Make smp_locks end with page alignment Yinghai Lu
2010-03-29  2:42   ` Yinghai Lu
2010-03-29  2:42 ` [PATCH 02/31] x86: Make sure free_init_pages() free pages in boundary Yinghai Lu
2010-03-29  2:42   ` Yinghai Lu
2010-03-29 16:57   ` Ingo Molnar
2010-03-29 16:59     ` Yinghai Lu
2010-03-29  2:42 ` [PATCH 03/31] x86: Do not free zero sized per cpu areas Yinghai Lu
2010-03-29  2:42   ` Yinghai Lu
2010-03-29  2:42 ` [PATCH 04/31] lmb: Move lmb.c to mm/ Yinghai Lu
2010-03-29  2:42   ` Yinghai Lu
2010-03-29  2:42 ` [PATCH 05/31] lmb: Seperate region array from lmb_region struct Yinghai Lu
2010-03-29  2:42   ` Yinghai Lu
2010-03-29  2:42 ` [PATCH 06/31] lmb: Add find_lmb_area() Yinghai Lu
2010-03-29  2:42   ` Yinghai Lu
2010-03-29  2:43 ` Yinghai Lu [this message]
2010-03-29  2:43   ` [PATCH 07/31] lmb: Add reserve_lmb/free_lmb Yinghai Lu
2010-03-29 12:22   ` Michael Ellerman
2010-03-29 16:45     ` Yinghai Lu
2010-03-29 22:20       ` Michael Ellerman
2010-03-29 22:37         ` Yinghai Lu
2010-03-29 23:34           ` Benjamin Herrenschmidt
2010-03-29 23:53             ` Yinghai Lu
2010-03-30  4:13               ` Michael Ellerman
2010-03-30  4:21                 ` Yinghai Lu
2010-03-30  5:29                   ` Benjamin Herrenschmidt
2010-03-30  5:40                     ` Yinghai Lu
2010-03-30  5:24               ` Benjamin Herrenschmidt
2010-03-29 23:31         ` Benjamin Herrenschmidt
2010-03-30  0:03           ` Yinghai Lu
2010-03-30  5:26             ` Benjamin Herrenschmidt
2010-03-30  6:12               ` Yinghai Lu
2010-03-30  6:46                 ` Michael Ellerman
2010-03-30  6:57                   ` Yinghai Lu
2010-03-30 21:30                 ` Benjamin Herrenschmidt
2010-03-30 22:42                   ` Yinghai Lu
2010-03-29 21:49     ` Benjamin Herrenschmidt
2010-03-29  2:43 ` [PATCH 08/31] lmb: Add find_lmb_area_size() Yinghai Lu
2010-03-29  2:43   ` Yinghai Lu
2010-03-29  2:43 ` [PATCH 09/31] bootmem, x86: Add weak version of reserve_bootmem_generic Yinghai Lu
2010-03-29  2:43   ` Yinghai Lu
2010-03-29  2:43 ` [PATCH 10/31] lmb: Add lmb_to_bootmem() Yinghai Lu
2010-03-29  2:43   ` Yinghai Lu
2010-03-29  2:43 ` [PATCH 11/31] lmb: Add get_free_all_memory_range() Yinghai Lu
2010-03-29  2:43   ` Yinghai Lu
2010-03-29  2:43 ` [PATCH 12/31] lmb: Add lmb_register_active_regions() and lmb_hole_size() Yinghai Lu
2010-03-29  2:43   ` Yinghai Lu
2010-03-29  2:43 ` [PATCH 13/31] lmb: Prepare to include linux/lmb.h in core file Yinghai Lu
2010-03-29  2:43   ` Yinghai Lu
2010-03-29  2:43 ` [PATCH 14/31] lmb: Add find_memory_core_early() Yinghai Lu
2010-03-29  2:43   ` Yinghai Lu
2010-03-29  2:43 ` [PATCH 15/31] lmb: Add find_lmb_area_node() Yinghai Lu
2010-03-29  2:43   ` Yinghai Lu
2010-03-29  2:43 ` [PATCH 16/31] lmb: Add lmb_free_memory_size() Yinghai Lu
2010-03-29  2:43   ` Yinghai Lu
2010-03-29  2:43 ` [PATCH 17/31] lmb: Add lmb_memory_size() Yinghai Lu
2010-03-29  2:43   ` Yinghai Lu
2010-03-29  2:43 ` [PATCH 18/31] lmb: Add reserve_lmb_overlap_ok() Yinghai Lu
2010-03-29  2:43   ` Yinghai Lu
2010-03-29  2:43 ` [PATCH 19/31] lmb: Use lmb_debug to control debug message print out Yinghai Lu
2010-03-29  2:43   ` Yinghai Lu
2010-03-29  2:43 ` [PATCH 20/31] lmb: Add __NOT_KEEP_LMB to put lmb code to .init Yinghai Lu
2010-03-29  2:43   ` Yinghai Lu
2010-03-29 12:07   ` Michael Ellerman
2010-03-29 16:20     ` Yinghai Lu
2010-03-29 18:34       ` David Miller
2010-03-29 18:39         ` Yinghai Lu
2010-03-29 19:11           ` David Miller
2010-03-29 21:44             ` Benjamin Herrenschmidt
2010-03-29  2:43 ` [PATCH 21/31] x86: Add sanitize_e820_map() Yinghai Lu
2010-03-29  2:43   ` Yinghai Lu
2010-03-29  2:43 ` [PATCH 22/31] x86: Use lmb to replace early_res Yinghai Lu
2010-03-29  2:43   ` Yinghai Lu
2010-03-29  2:43 ` [PATCH 23/31] x86: Replace e820_/_early string with lmb_ Yinghai Lu
2010-03-29  2:43   ` Yinghai Lu
2010-03-29  2:43 ` [PATCH 24/31] x86: Remove not used early_res code Yinghai Lu
2010-03-29  2:43   ` Yinghai Lu
2010-03-29  2:43 ` [PATCH 25/31] x86, lmb: Use lmb_memory_size()/lmb_free_memory_size() to get correct dma_reserve Yinghai Lu
2010-03-29  2:43   ` Yinghai Lu
2010-03-29  2:43 ` [PATCH 26/31] x86: Align e820 ram range to page Yinghai Lu
2010-03-29  2:43   ` Yinghai Lu
2010-03-29  2:43 ` [PATCH 27/31] x86: Use wake_system_ram_range instead of e820_any_mapped in agp path Yinghai Lu
2010-03-29  2:43   ` Yinghai Lu
2010-03-29  2:43 ` [PATCH 28/31] x86: Add get_centaur_ram_top() Yinghai Lu
2010-03-29  2:43   ` Yinghai Lu
2010-03-29  2:43 ` [PATCH 29/31] x86: Make e820_any_mapped to __init Yinghai Lu
2010-03-29  2:43   ` Yinghai Lu
2010-03-29  2:43 ` [PATCH 30/31] x86: Use walk_system_ream_range()instead of e820.map directly Yinghai Lu
2010-03-29  2:43   ` Yinghai Lu
2010-03-29  2:43 ` [PATCH 31/31] x86: make e820 to be __initdata Yinghai Lu
2010-03-29  2:43   ` Yinghai Lu
2010-03-29 12:22 ` [PATCH -v9 00/31] use lmb with x86 Michael Ellerman
2010-03-29 12:22   ` Michael Ellerman
2010-03-29 16:52   ` Yinghai Lu
2010-03-29 20:39     ` Yinghai Lu
2010-03-29 22:10     ` Michael Ellerman
2010-03-29 22:17       ` Yinghai Lu
2010-03-29 22:32         ` Michael Ellerman
2010-03-29 22:32           ` Michael Ellerman
2010-03-29 22:41           ` Yinghai Lu
2010-03-29 23:33           ` Benjamin Herrenschmidt
2010-03-29 23:29         ` Benjamin Herrenschmidt
2010-03-29 23:47           ` Yinghai Lu

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1269830604-26214-8-git-send-email-yinghai@kernel.org \
    --to=yinghai@kernel.org \
    --cc=akpm@linux-foundation.org \
    --cc=davem@davemloft.net \
    --cc=hannes@cmpxchg.org \
    --cc=hpa@zytor.com \
    --cc=linux-arch@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=mingo@elte.hu \
    --cc=tglx@linutronix.de \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).