From mboxrd@z Thu Jan 1 00:00:00 1970 From: Yinghai Lu Subject: [PATCH 08/37] x86, lmb: Add lmb_find_area_size() Date: Fri, 14 May 2010 12:45:34 -0700 Message-ID: <1273866363-14249-9-git-send-email-yinghai@kernel.org> References: <1273866363-14249-1-git-send-email-yinghai@kernel.org> Return-path: Received: from rcsinet10.oracle.com ([148.87.113.121]:59870 "EHLO rcsinet10.oracle.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1759685Ab0ENTsj (ORCPT ); Fri, 14 May 2010 15:48:39 -0400 In-Reply-To: <1273866363-14249-1-git-send-email-yinghai@kernel.org> Sender: linux-arch-owner@vger.kernel.org List-ID: To: Ingo Molnar , Thomas Gleixner , "H. Peter Anvin" , Andrew Morton , David Miller , Be Cc: Linus Torvalds , Johannes Weiner , linux-kernel@vger.kernel.org, linux-arch@vger.kernel.org, Yinghai Lu size is returned according free range. Will be used to find free ranges for early_memtest and memory corruption check Do not mess it up with lib/lmb.c yet. Signed-off-by: Yinghai Lu --- arch/x86/include/asm/lmb.h | 8 ++++ arch/x86/mm/Makefile | 2 + arch/x86/mm/lmb.c | 88 ++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 98 insertions(+), 0 deletions(-) create mode 100644 arch/x86/include/asm/lmb.h create mode 100644 arch/x86/mm/lmb.c diff --git a/arch/x86/include/asm/lmb.h b/arch/x86/include/asm/lmb.h new file mode 100644 index 0000000..aa3a66e --- /dev/null +++ b/arch/x86/include/asm/lmb.h @@ -0,0 +1,8 @@ +#ifndef _X86_LMB_H +#define _X86_LMB_H + +#define ARCH_DISCARD_LMB + +u64 lmb_find_area_size(u64 start, u64 *sizep, u64 align); + +#endif diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile index a4c7683..8ab0505 100644 --- a/arch/x86/mm/Makefile +++ b/arch/x86/mm/Makefile @@ -26,4 +26,6 @@ obj-$(CONFIG_NUMA) += numa.o numa_$(BITS).o obj-$(CONFIG_K8_NUMA) += k8topology_64.o obj-$(CONFIG_ACPI_NUMA) += srat_$(BITS).o +obj-$(CONFIG_HAVE_LMB) += lmb.o + obj-$(CONFIG_MEMTEST) += memtest.o diff --git a/arch/x86/mm/lmb.c b/arch/x86/mm/lmb.c new file mode 100644 index 0000000..9d26eed --- /dev/null +++ b/arch/x86/mm/lmb.c @@ -0,0 +1,88 @@ +#include +#include +#include +#include +#include +#include +#include +#include + +/* Check for already reserved areas */ +static inline bool __init bad_addr_size(u64 *addrp, u64 *sizep, u64 align) +{ + int i; + u64 addr = *addrp, last; + u64 size = *sizep; + bool changed = false; +again: + last = addr + size; + for (i = 0; i < lmb.reserved.cnt && lmb.reserved.regions[i].size; i++) { + struct lmb_region *r = &lmb.reserved.regions[i]; + if (last > r->base && addr < r->base) { + size = r->base - addr; + changed = true; + goto again; + } + if (last > (r->base + r->size) && addr < (r->base + r->size)) { + addr = round_up(r->base + r->size, align); + size = last - addr; + changed = true; + goto again; + } + if (last <= (r->base + r->size) && addr >= r->base) { + (*sizep)++; + return false; + } + } + if (changed) { + *addrp = addr; + *sizep = size; + } + return changed; +} + +static u64 __init __lmb_find_area_size(u64 ei_start, u64 ei_last, u64 start, + u64 *sizep, u64 align) +{ + u64 addr, last; + + addr = round_up(ei_start, align); + if (addr < start) + addr = round_up(start, align); + if (addr >= ei_last) + goto out; + *sizep = ei_last - addr; + while (bad_addr_size(&addr, sizep, align) && addr + *sizep <= ei_last) + ; + last = addr + *sizep; + if (last > ei_last) + goto out; + + return addr; + +out: + return LMB_ERROR; +} + +/* + * Find next free range after *start + */ +u64 __init lmb_find_area_size(u64 start, u64 *sizep, u64 align) +{ + int i; + + for (i = 0; i < lmb.memory.cnt; i++) { + u64 ei_start = lmb.memory.regions[i].base; + u64 ei_last = ei_start + lmb.memory.regions[i].size; + u64 addr; + + addr = __lmb_find_area_size(ei_start, ei_last, start, + sizep, align); + + if (addr != LMB_ERROR) + return addr; + } + + return LMB_ERROR; +} + -- 1.6.4.2 From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from rcsinet10.oracle.com ([148.87.113.121]:59870 "EHLO rcsinet10.oracle.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1759685Ab0ENTsj (ORCPT ); Fri, 14 May 2010 15:48:39 -0400 From: Yinghai Lu Subject: [PATCH 08/37] x86, lmb: Add lmb_find_area_size() Date: Fri, 14 May 2010 12:45:34 -0700 Message-ID: <1273866363-14249-9-git-send-email-yinghai@kernel.org> In-Reply-To: <1273866363-14249-1-git-send-email-yinghai@kernel.org> References: <1273866363-14249-1-git-send-email-yinghai@kernel.org> Sender: linux-arch-owner@vger.kernel.org List-ID: To: Ingo Molnar , Thomas Gleixner , "H. Peter Anvin" , Andrew Morton , David Miller , Benjamin Herrenschmidt Cc: Linus Torvalds , Johannes Weiner , linux-kernel@vger.kernel.org, linux-arch@vger.kernel.org, Yinghai Lu Message-ID: <20100514194534.LQ2kHGojMiC0GkskUHUyt9sRxGCcLLq0L04e29p4O-U@z> size is returned according free range. Will be used to find free ranges for early_memtest and memory corruption check Do not mess it up with lib/lmb.c yet. Signed-off-by: Yinghai Lu --- arch/x86/include/asm/lmb.h | 8 ++++ arch/x86/mm/Makefile | 2 + arch/x86/mm/lmb.c | 88 ++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 98 insertions(+), 0 deletions(-) create mode 100644 arch/x86/include/asm/lmb.h create mode 100644 arch/x86/mm/lmb.c diff --git a/arch/x86/include/asm/lmb.h b/arch/x86/include/asm/lmb.h new file mode 100644 index 0000000..aa3a66e --- /dev/null +++ b/arch/x86/include/asm/lmb.h @@ -0,0 +1,8 @@ +#ifndef _X86_LMB_H +#define _X86_LMB_H + +#define ARCH_DISCARD_LMB + +u64 lmb_find_area_size(u64 start, u64 *sizep, u64 align); + +#endif diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile index a4c7683..8ab0505 100644 --- a/arch/x86/mm/Makefile +++ b/arch/x86/mm/Makefile @@ -26,4 +26,6 @@ obj-$(CONFIG_NUMA) += numa.o numa_$(BITS).o obj-$(CONFIG_K8_NUMA) += k8topology_64.o obj-$(CONFIG_ACPI_NUMA) += srat_$(BITS).o +obj-$(CONFIG_HAVE_LMB) += lmb.o + obj-$(CONFIG_MEMTEST) += memtest.o diff --git a/arch/x86/mm/lmb.c b/arch/x86/mm/lmb.c new file mode 100644 index 0000000..9d26eed --- /dev/null +++ b/arch/x86/mm/lmb.c @@ -0,0 +1,88 @@ +#include +#include +#include +#include +#include +#include +#include +#include + +/* Check for already reserved areas */ +static inline bool __init bad_addr_size(u64 *addrp, u64 *sizep, u64 align) +{ + int i; + u64 addr = *addrp, last; + u64 size = *sizep; + bool changed = false; +again: + last = addr + size; + for (i = 0; i < lmb.reserved.cnt && lmb.reserved.regions[i].size; i++) { + struct lmb_region *r = &lmb.reserved.regions[i]; + if (last > r->base && addr < r->base) { + size = r->base - addr; + changed = true; + goto again; + } + if (last > (r->base + r->size) && addr < (r->base + r->size)) { + addr = round_up(r->base + r->size, align); + size = last - addr; + changed = true; + goto again; + } + if (last <= (r->base + r->size) && addr >= r->base) { + (*sizep)++; + return false; + } + } + if (changed) { + *addrp = addr; + *sizep = size; + } + return changed; +} + +static u64 __init __lmb_find_area_size(u64 ei_start, u64 ei_last, u64 start, + u64 *sizep, u64 align) +{ + u64 addr, last; + + addr = round_up(ei_start, align); + if (addr < start) + addr = round_up(start, align); + if (addr >= ei_last) + goto out; + *sizep = ei_last - addr; + while (bad_addr_size(&addr, sizep, align) && addr + *sizep <= ei_last) + ; + last = addr + *sizep; + if (last > ei_last) + goto out; + + return addr; + +out: + return LMB_ERROR; +} + +/* + * Find next free range after *start + */ +u64 __init lmb_find_area_size(u64 start, u64 *sizep, u64 align) +{ + int i; + + for (i = 0; i < lmb.memory.cnt; i++) { + u64 ei_start = lmb.memory.regions[i].base; + u64 ei_last = ei_start + lmb.memory.regions[i].size; + u64 addr; + + addr = __lmb_find_area_size(ei_start, ei_last, start, + sizep, align); + + if (addr != LMB_ERROR) + return addr; + } + + return LMB_ERROR; +} + -- 1.6.4.2