From mboxrd@z Thu Jan 1 00:00:00 1970 From: Yinghai Lu Subject: [PATCH 08/35] x86,lmb: Add lmb_reserve_area/lmb_free_area Date: Thu, 13 May 2010 17:19:29 -0700 Message-ID: <1273796396-29649-9-git-send-email-yinghai@kernel.org> References: <1273796396-29649-1-git-send-email-yinghai@kernel.org> Return-path: In-Reply-To: <1273796396-29649-1-git-send-email-yinghai@kernel.org> Sender: linux-kernel-owner@vger.kernel.org To: Ingo Molnar , Thomas Gleixner , "H. Peter Anvin" , Andrew Morton Cc: David Miller , Benjamin Herrenschmidt , Linus Torvalds , Johannes Weiner , linux-kernel@vger.kernel.org, linux-arch@vger.kernel.org, Yinghai Lu List-Id: linux-arch.vger.kernel.org they are wrappers for core versions. they are taking start/end/name instead of base/size. could add more debug print out -v2: change get_max_mapped() to lmb.default_alloc_limit according to Michael Ellerman and Ben change to lmb_reserve_area and lmb_free_area according to Michael Ellerman -v3: call check_and_double after reserve/free, so could avoid to use find_lmb_area. Suggested by Michael Ellerman Signed-off-by: Yinghai Lu --- arch/x86/include/asm/lmb.h | 4 ++++ arch/x86/mm/lmb.c | 27 +++++++++++++++++++++++++++ 2 files changed, 31 insertions(+), 0 deletions(-) diff --git a/arch/x86/include/asm/lmb.h b/arch/x86/include/asm/lmb.h index 2f06714..bc85c1e 100644 --- a/arch/x86/include/asm/lmb.h +++ b/arch/x86/include/asm/lmb.h @@ -6,4 +6,8 @@ u64 lmb_find_area_size(u64 start, u64 *sizep, u64 align); void lmb_to_bootmem(u64 start, u64 end); +void lmb_reserve_area(u64 start, u64 end, char *name); +void lmb_free_area(u64 start, u64 end); +void lmb_add_memory(u64 start, u64 end); + #endif diff --git a/arch/x86/mm/lmb.c b/arch/x86/mm/lmb.c index 37a05e2..0dbe05b 100644 --- a/arch/x86/mm/lmb.c +++ b/arch/x86/mm/lmb.c @@ -117,3 +117,30 @@ void __init lmb_to_bootmem(u64 start, u64 end) lmb.reserved.cnt = 0; } #endif + +void __init lmb_add_memory(u64 start, u64 end) +{ + lmb_add_region(&lmb.memory, start, end - start); +} + +void __init lmb_reserve_area(u64 start, u64 end, char *name) +{ + if (start == end) + return; + + if (WARN_ONCE(start > end, "lmb_reserve_area: wrong range [%#llx, %#llx]\n", start, end)) + return; + + lmb_add_region(&lmb.reserved, start, end - start); +} + +void __init lmb_free_area(u64 start, u64 end) +{ + if (start == end) + return; + + if (WARN_ONCE(start > end, "lmb_free_area: wrong range [%#llx, %#llx]\n", start, end)) + return; + + lmb_free(start, end - start); +} -- 1.6.4.2 From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from rcsinet10.oracle.com ([148.87.113.121]:53030 "EHLO rcsinet10.oracle.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1753832Ab0ENAYJ (ORCPT ); Thu, 13 May 2010 20:24:09 -0400 From: Yinghai Lu Subject: [PATCH 08/35] x86,lmb: Add lmb_reserve_area/lmb_free_area Date: Thu, 13 May 2010 17:19:29 -0700 Message-ID: <1273796396-29649-9-git-send-email-yinghai@kernel.org> In-Reply-To: <1273796396-29649-1-git-send-email-yinghai@kernel.org> References: <1273796396-29649-1-git-send-email-yinghai@kernel.org> Sender: linux-arch-owner@vger.kernel.org List-ID: To: Ingo Molnar , Thomas Gleixner , "H. Peter Anvin" , Andrew Morton Cc: David Miller , Benjamin Herrenschmidt , Linus Torvalds , Johannes Weiner , linux-kernel@vger.kernel.org, linux-arch@vger.kernel.org, Yinghai Lu Message-ID: <20100514001929.vNaA8AEKOf01CadcVHxpT9RjoaEMVwDSKYf1dmtSCAI@z> they are wrappers for core versions. they are taking start/end/name instead of base/size. could add more debug print out -v2: change get_max_mapped() to lmb.default_alloc_limit according to Michael Ellerman and Ben change to lmb_reserve_area and lmb_free_area according to Michael Ellerman -v3: call check_and_double after reserve/free, so could avoid to use find_lmb_area. Suggested by Michael Ellerman Signed-off-by: Yinghai Lu --- arch/x86/include/asm/lmb.h | 4 ++++ arch/x86/mm/lmb.c | 27 +++++++++++++++++++++++++++ 2 files changed, 31 insertions(+), 0 deletions(-) diff --git a/arch/x86/include/asm/lmb.h b/arch/x86/include/asm/lmb.h index 2f06714..bc85c1e 100644 --- a/arch/x86/include/asm/lmb.h +++ b/arch/x86/include/asm/lmb.h @@ -6,4 +6,8 @@ u64 lmb_find_area_size(u64 start, u64 *sizep, u64 align); void lmb_to_bootmem(u64 start, u64 end); +void lmb_reserve_area(u64 start, u64 end, char *name); +void lmb_free_area(u64 start, u64 end); +void lmb_add_memory(u64 start, u64 end); + #endif diff --git a/arch/x86/mm/lmb.c b/arch/x86/mm/lmb.c index 37a05e2..0dbe05b 100644 --- a/arch/x86/mm/lmb.c +++ b/arch/x86/mm/lmb.c @@ -117,3 +117,30 @@ void __init lmb_to_bootmem(u64 start, u64 end) lmb.reserved.cnt = 0; } #endif + +void __init lmb_add_memory(u64 start, u64 end) +{ + lmb_add_region(&lmb.memory, start, end - start); +} + +void __init lmb_reserve_area(u64 start, u64 end, char *name) +{ + if (start == end) + return; + + if (WARN_ONCE(start > end, "lmb_reserve_area: wrong range [%#llx, %#llx]\n", start, end)) + return; + + lmb_add_region(&lmb.reserved, start, end - start); +} + +void __init lmb_free_area(u64 start, u64 end) +{ + if (start == end) + return; + + if (WARN_ONCE(start > end, "lmb_free_area: wrong range [%#llx, %#llx]\n", start, end)) + return; + + lmb_free(start, end - start); +} -- 1.6.4.2