linux-arch.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Yinghai Lu <yinghai@kernel.org>
To: Ingo Molnar <mingo@elte.hu>, Thomas Gleixner <tglx@linutronix.de>,
	"H. Peter Anvin" <hpa@zytor.com>,
	Andrew Morton <akpm@linux-foundation.org>,
	David Miller <davem@davemloft.net>,
	Be
Cc: Johannes Weiner <hannes@cmpxchg.org>,
	linux-kernel@vger.kernel.org, linux-arch@vger.kernel.org,
	Yinghai Lu <yinghai@kernel.org>
Subject: [PATCH 06/24] lmb: Add find_lmb_area()
Date: Fri, 26 Mar 2010 15:21:36 -0700	[thread overview]
Message-ID: <1269642114-16588-7-git-send-email-yinghai@kernel.org> (raw)
In-Reply-To: <1269642114-16588-1-git-send-email-yinghai@kernel.org>

It will try find area according with size/align in specified range (start, end).

Need use it find correct buffer for new lmb.reserved.region.

also make it more easy for x86 to use lmb.
x86 early_res is using find/reserve pattern instead of alloc.

find_lmb_area() will hohor goal

When we need temperary buff for range array etc for range work, if We are using
lmb_alloc(), We will need to add some post fix code for buffer that is used
by range array, because it is in the lmb.reserved already.

Signed-off-by: Yinghai Lu <yinghai@kernel.org>
---
 include/linux/lmb.h |    4 ++
 mm/lmb.c            |   81 +++++++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 85 insertions(+), 0 deletions(-)

diff --git a/include/linux/lmb.h b/include/linux/lmb.h
index e14ea8d..05234bd 100644
--- a/include/linux/lmb.h
+++ b/include/linux/lmb.h
@@ -83,6 +83,10 @@ lmb_end_pfn(struct lmb_region *type, unsigned long region_nr)
 	       lmb_size_pages(type, region_nr);
 }
 
+u64 __find_lmb_area(u64 ei_start, u64 ei_last, u64 start, u64 end,
+			 u64 size, u64 align);
+u64 find_lmb_area(u64 start, u64 end, u64 size, u64 align);
+
 #include <asm/lmb.h>
 
 #endif /* __KERNEL__ */
diff --git a/mm/lmb.c b/mm/lmb.c
index 65b62dc..d5d5dc4 100644
--- a/mm/lmb.c
+++ b/mm/lmb.c
@@ -11,9 +11,13 @@
  */
 
 #include <linux/kernel.h>
+#include <linux/types.h>
 #include <linux/init.h>
 #include <linux/bitops.h>
 #include <linux/lmb.h>
+#include <linux/bootmem.h>
+#include <linux/mm.h>
+#include <linux/range.h>
 
 #define LMB_ALLOC_ANYWHERE	0
 
@@ -546,3 +550,80 @@ int lmb_find(struct lmb_property *res)
 	}
 	return -1;
 }
+
+static int __init find_overlapped_early(u64 start, u64 end)
+{
+	int i;
+	struct lmb_property *r;
+
+	for (i = 0; i < lmb.reserved.cnt && lmb.reserved.region[i].size; i++) {
+		r = &lmb.reserved.region[i];
+		if (end > r->base && start < (r->base + r->size))
+			break;
+	}
+
+	return i;
+}
+
+/* Check for already reserved areas */
+static inline bool __init bad_addr(u64 *addrp, u64 size, u64 align)
+{
+	int i;
+	u64 addr = *addrp;
+	bool changed = false;
+	struct lmb_property *r;
+again:
+	i = find_overlapped_early(addr, addr + size);
+	r = &lmb.reserved.region[i];
+	if (i < lmb.reserved.cnt && r->size) {
+		*addrp = addr = round_up(r->base + r->size, align);
+		changed = true;
+		goto again;
+	}
+	return changed;
+}
+
+u64 __init __find_lmb_area(u64 ei_start, u64 ei_last, u64 start, u64 end,
+				 u64 size, u64 align)
+{
+	u64 addr, last;
+
+	addr = round_up(ei_start, align);
+	if (addr < start)
+		addr = round_up(start, align);
+	if (addr >= ei_last)
+		goto out;
+	while (bad_addr(&addr, size, align) && addr+size <= ei_last)
+		;
+	last = addr + size;
+	if (last > ei_last)
+		goto out;
+	if (last > end)
+		goto out;
+
+	return addr;
+
+out:
+	return -1ULL;
+}
+
+/*
+ * Find a free area with specified alignment in a specific range.
+ */
+u64 __init find_lmb_area(u64 start, u64 end, u64 size, u64 align)
+{
+	int i;
+
+	for (i = 0; i < lmb.memory.cnt; i++) {
+		u64 ei_start = lmb.memory.region[i].base;
+		u64 ei_last = ei_start + lmb.memory.region[i].size;
+		u64 addr;
+
+		addr = __find_lmb_area(ei_start, ei_last, start, end,
+					 size, align);
+
+		if (addr != -1ULL)
+			return addr;
+	}
+	return -1ULL;
+}
-- 
1.6.4.2

WARNING: multiple messages have this Message-ID (diff)
From: Yinghai Lu <yinghai@kernel.org>
To: Ingo Molnar <mingo@elte.hu>, Thomas Gleixner <tglx@linutronix.de>,
	"H. Peter Anvin" <hpa@zytor.com>,
	Andrew Morton <akpm@linux-foundation.org>,
	David Miller <davem@davemloft.net>,
	Benjamin Herrenschmidt <benh@kernel.crashing.org>,
	Linus Torvalds <torvalds@linux-foundation.org>
Cc: Johannes Weiner <hannes@cmpxchg.org>,
	linux-kernel@vger.kernel.org, linux-arch@vger.kernel.org,
	Yinghai Lu <yinghai@kernel.org>
Subject: [PATCH 06/24] lmb: Add find_lmb_area()
Date: Fri, 26 Mar 2010 15:21:36 -0700	[thread overview]
Message-ID: <1269642114-16588-7-git-send-email-yinghai@kernel.org> (raw)
Message-ID: <20100326222136.Gv_n8rEOI08dIDY6GWGK4W0uy32gOBd07ohZdc_fJrQ@z> (raw)
In-Reply-To: <1269642114-16588-1-git-send-email-yinghai@kernel.org>

It will try find area according with size/align in specified range (start, end).

Need use it find correct buffer for new lmb.reserved.region.

also make it more easy for x86 to use lmb.
x86 early_res is using find/reserve pattern instead of alloc.

find_lmb_area() will hohor goal

When we need temperary buff for range array etc for range work, if We are using
lmb_alloc(), We will need to add some post fix code for buffer that is used
by range array, because it is in the lmb.reserved already.

Signed-off-by: Yinghai Lu <yinghai@kernel.org>
---
 include/linux/lmb.h |    4 ++
 mm/lmb.c            |   81 +++++++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 85 insertions(+), 0 deletions(-)

diff --git a/include/linux/lmb.h b/include/linux/lmb.h
index e14ea8d..05234bd 100644
--- a/include/linux/lmb.h
+++ b/include/linux/lmb.h
@@ -83,6 +83,10 @@ lmb_end_pfn(struct lmb_region *type, unsigned long region_nr)
 	       lmb_size_pages(type, region_nr);
 }
 
+u64 __find_lmb_area(u64 ei_start, u64 ei_last, u64 start, u64 end,
+			 u64 size, u64 align);
+u64 find_lmb_area(u64 start, u64 end, u64 size, u64 align);
+
 #include <asm/lmb.h>
 
 #endif /* __KERNEL__ */
diff --git a/mm/lmb.c b/mm/lmb.c
index 65b62dc..d5d5dc4 100644
--- a/mm/lmb.c
+++ b/mm/lmb.c
@@ -11,9 +11,13 @@
  */
 
 #include <linux/kernel.h>
+#include <linux/types.h>
 #include <linux/init.h>
 #include <linux/bitops.h>
 #include <linux/lmb.h>
+#include <linux/bootmem.h>
+#include <linux/mm.h>
+#include <linux/range.h>
 
 #define LMB_ALLOC_ANYWHERE	0
 
@@ -546,3 +550,80 @@ int lmb_find(struct lmb_property *res)
 	}
 	return -1;
 }
+
+static int __init find_overlapped_early(u64 start, u64 end)
+{
+	int i;
+	struct lmb_property *r;
+
+	for (i = 0; i < lmb.reserved.cnt && lmb.reserved.region[i].size; i++) {
+		r = &lmb.reserved.region[i];
+		if (end > r->base && start < (r->base + r->size))
+			break;
+	}
+
+	return i;
+}
+
+/* Check for already reserved areas */
+static inline bool __init bad_addr(u64 *addrp, u64 size, u64 align)
+{
+	int i;
+	u64 addr = *addrp;
+	bool changed = false;
+	struct lmb_property *r;
+again:
+	i = find_overlapped_early(addr, addr + size);
+	r = &lmb.reserved.region[i];
+	if (i < lmb.reserved.cnt && r->size) {
+		*addrp = addr = round_up(r->base + r->size, align);
+		changed = true;
+		goto again;
+	}
+	return changed;
+}
+
+u64 __init __find_lmb_area(u64 ei_start, u64 ei_last, u64 start, u64 end,
+				 u64 size, u64 align)
+{
+	u64 addr, last;
+
+	addr = round_up(ei_start, align);
+	if (addr < start)
+		addr = round_up(start, align);
+	if (addr >= ei_last)
+		goto out;
+	while (bad_addr(&addr, size, align) && addr+size <= ei_last)
+		;
+	last = addr + size;
+	if (last > ei_last)
+		goto out;
+	if (last > end)
+		goto out;
+
+	return addr;
+
+out:
+	return -1ULL;
+}
+
+/*
+ * Find a free area with specified alignment in a specific range.
+ */
+u64 __init find_lmb_area(u64 start, u64 end, u64 size, u64 align)
+{
+	int i;
+
+	for (i = 0; i < lmb.memory.cnt; i++) {
+		u64 ei_start = lmb.memory.region[i].base;
+		u64 ei_last = ei_start + lmb.memory.region[i].size;
+		u64 addr;
+
+		addr = __find_lmb_area(ei_start, ei_last, start, end,
+					 size, align);
+
+		if (addr != -1ULL)
+			return addr;
+	}
+	return -1ULL;
+}
-- 
1.6.4.2


  parent reply	other threads:[~2010-03-26 22:25 UTC|newest]

Thread overview: 72+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2010-03-26 22:21 [PATCH -v8 00/24] use lmb with x86 Yinghai Lu
2010-03-26 22:21 ` Yinghai Lu
2010-03-26 22:21 ` [PATCH 01/24] x86: Make smp_locks end with page alignment Yinghai Lu
2010-03-26 22:21   ` Yinghai Lu
2010-03-26 23:33   ` Johannes Weiner
2010-03-26 22:21 ` [PATCH 02/24] x86: Make sure free_init_pages() free pages in boundary Yinghai Lu
2010-03-26 22:21   ` Yinghai Lu
2010-03-26 23:06   ` Johannes Weiner
2010-03-26 23:06     ` Johannes Weiner
2010-03-26 23:45     ` Yinghai Lu
2010-03-27  0:07       ` Johannes Weiner
2010-03-27  0:07         ` Johannes Weiner
2010-03-27  0:17         ` Yinghai Lu
2010-03-27  0:17           ` Yinghai Lu
2010-03-27  1:19         ` [PATCH -v3] " Yinghai Lu
2010-03-28  0:03           ` Johannes Weiner
2010-03-28  0:50             ` Yinghai Lu
2010-03-28  1:01               ` Johannes Weiner
2010-03-28  1:58                 ` Yinghai Lu
2010-03-28 23:35                   ` [patch v5] x86: page-alin initrd area size Johannes Weiner
2010-03-29  0:41                     ` Yinghai Lu
2010-03-26 22:21 ` [PATCH 03/24] x86: Do not free zero sized per cpu areas Yinghai Lu
2010-03-26 22:21   ` Yinghai Lu
2010-03-26 23:42   ` Johannes Weiner
2010-03-26 23:42     ` Johannes Weiner
2010-03-26 23:49     ` Yinghai Lu
2010-03-26 23:49       ` Yinghai Lu
2010-03-26 23:54       ` Johannes Weiner
2010-03-26 23:56         ` Yinghai Lu
2010-03-27  1:18   ` [PATCH -v8] " Yinghai Lu
2010-03-26 22:21 ` [PATCH 04/24] lmb: Move lmb.c to mm/ Yinghai Lu
2010-03-26 22:21   ` Yinghai Lu
2010-03-26 22:21 ` [PATCH 05/24] lmb: Seperate region array from lmb_region struct Yinghai Lu
2010-03-26 22:21   ` Yinghai Lu
2010-03-26 22:21 ` Yinghai Lu [this message]
2010-03-26 22:21   ` [PATCH 06/24] lmb: Add find_lmb_area() Yinghai Lu
2010-03-26 22:21 ` [PATCH 07/24] lmb: Add reserve_lmb/free_lmb Yinghai Lu
2010-03-26 22:21   ` Yinghai Lu
2010-03-26 22:21 ` [PATCH 08/24] lmb: Add find_lmb_area_size() Yinghai Lu
2010-03-26 22:21   ` Yinghai Lu
2010-03-26 22:21 ` [PATCH 09/24] bootmem, x86: Add weak version of reserve_bootmem_generic Yinghai Lu
2010-03-26 22:21   ` Yinghai Lu
2010-03-26 22:21 ` [PATCH 10/24] lmb: Add lmb_to_bootmem() Yinghai Lu
2010-03-26 22:21   ` Yinghai Lu
2010-03-26 22:21 ` [PATCH 11/24] lmb: Add get_free_all_memory_range() Yinghai Lu
2010-03-26 22:21   ` Yinghai Lu
2010-03-26 22:21 ` [PATCH 12/24] lmb: Add lmb_register_active_regions() and lmb_hole_size() Yinghai Lu
2010-03-26 22:21   ` Yinghai Lu
2010-03-26 22:21 ` [PATCH 13/24] lmb: Prepare to include linux/lmb.h in core file Yinghai Lu
2010-03-26 22:21   ` Yinghai Lu
2010-03-26 22:21 ` [PATCH 14/24] lmb: Add find_memory_core_early() Yinghai Lu
2010-03-26 22:21   ` Yinghai Lu
2010-03-26 22:21 ` [PATCH 15/24] lmb: Add find_lmb_area_node() Yinghai Lu
2010-03-26 22:21   ` Yinghai Lu
2010-03-26 22:21 ` [PATCH 16/24] lmb: Add lmb_free_memory_size() Yinghai Lu
2010-03-26 22:21   ` Yinghai Lu
2010-03-26 22:21 ` [PATCH 17/24] lmb: Add lmb_memory_size() Yinghai Lu
2010-03-26 22:21   ` Yinghai Lu
2010-03-26 22:21 ` [PATCH 18/24] lmb: Add reserve_lmb_overlap_ok() Yinghai Lu
2010-03-26 22:21   ` Yinghai Lu
2010-03-26 22:21 ` [PATCH 19/24] x86: Add sanitize_e820_map() Yinghai Lu
2010-03-26 22:21   ` Yinghai Lu
2010-03-26 22:21 ` [PATCH 20/24] x86: Use lmb to replace early_res Yinghai Lu
2010-03-26 22:21   ` Yinghai Lu
2010-03-26 22:21 ` [PATCH 21/24] x86: Replace e820_/_early string with lmb_ Yinghai Lu
2010-03-26 22:21   ` Yinghai Lu
2010-03-26 22:21 ` [PATCH 22/24] x86: Remove not used early_res code Yinghai Lu
2010-03-26 22:21   ` Yinghai Lu
2010-03-26 22:21 ` [PATCH 23/24] x86, lmb: Use lmb_memory_size()/lmb_free_memory_size() to get correct dma_reserve Yinghai Lu
2010-03-26 22:21   ` Yinghai Lu
2010-03-26 22:21 ` [PATCH 24/24] x86: Align e820 ram range to page Yinghai Lu
2010-03-26 22:21   ` Yinghai Lu

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1269642114-16588-7-git-send-email-yinghai@kernel.org \
    --to=yinghai@kernel.org \
    --cc=akpm@linux-foundation.org \
    --cc=davem@davemloft.net \
    --cc=hannes@cmpxchg.org \
    --cc=hpa@zytor.com \
    --cc=linux-arch@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=mingo@elte.hu \
    --cc=tglx@linutronix.de \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).