* [PATCH v2, part3 01/12] mm: enhance free_reserved_area() to support poisoning memory with zero
2013-03-16 17:03 [PATCH v2, part3 00/12] accurately calculate zone->managed_pages Jiang Liu
@ 2013-03-16 17:03 ` Jiang Liu
2013-03-16 17:37 ` Geert Uytterhoeven
2013-03-16 17:03 ` [PATCH v2, part3 02/12] mm/ARM64: kill poison_init_mem() Jiang Liu
` (10 subsequent siblings)
11 siblings, 1 reply; 17+ messages in thread
From: Jiang Liu @ 2013-03-16 17:03 UTC (permalink / raw)
To: Andrew Morton, David Rientjes
Cc: Jiang Liu, Wen Congyang, Mel Gorman, Minchan Kim,
KAMEZAWA Hiroyuki, Michal Hocko, Jianguo Wu, linux-mm,
linux-kernel, Geert Uytterhoeven
Address comments from last round of code review.
1) Enhance free_reserved_area() to support poisoning memory with zero.
This could be used to get rid of poison_init_mem() on ARM64.
2) Other minor fixes.
Signed-off-by: Jiang Liu <jiang.liu@huawei.com>
Cc: Geert Uytterhoeven <geert@linux-m68k.org>
---
arch/alpha/kernel/sys_nautilus.c | 2 +-
arch/alpha/mm/init.c | 4 ++--
arch/arm/mm/init.c | 8 ++++----
arch/arm64/mm/init.c | 4 ++--
arch/avr32/mm/init.c | 4 ++--
arch/blackfin/mm/init.c | 4 ++--
arch/c6x/mm/init.c | 4 ++--
arch/cris/mm/init.c | 2 +-
arch/frv/mm/init.c | 4 ++--
arch/h8300/mm/init.c | 4 ++--
arch/ia64/mm/init.c | 2 +-
arch/m32r/mm/init.c | 4 ++--
arch/m68k/mm/init.c | 4 ++--
arch/microblaze/mm/init.c | 4 ++--
arch/openrisc/mm/init.c | 4 ++--
arch/parisc/mm/init.c | 4 ++--
arch/powerpc/kernel/kvm.c | 2 +-
arch/powerpc/mm/mem.c | 2 +-
arch/s390/mm/init.c | 2 +-
arch/sh/mm/init.c | 4 ++--
arch/um/kernel/mem.c | 2 +-
arch/unicore32/mm/init.c | 4 ++--
arch/xtensa/mm/init.c | 4 ++--
include/linux/mm.h | 11 ++++++-----
mm/page_alloc.c | 4 ++--
25 files changed, 49 insertions(+), 48 deletions(-)
diff --git a/arch/alpha/kernel/sys_nautilus.c b/arch/alpha/kernel/sys_nautilus.c
index a8b9d66..7f4e7bf 100644
--- a/arch/alpha/kernel/sys_nautilus.c
+++ b/arch/alpha/kernel/sys_nautilus.c
@@ -234,7 +234,7 @@ nautilus_init_pci(void)
memtop = pci_mem;
if (memtop > alpha_mv.min_mem_address) {
free_reserved_area((unsigned long)__va(alpha_mv.min_mem_address),
- (unsigned long)__va(memtop), 0, NULL);
+ (unsigned long)__va(memtop), -1, NULL);
printk("nautilus_init_pci: %ldk freed\n",
(memtop - alpha_mv.min_mem_address) >> 10);
}
diff --git a/arch/alpha/mm/init.c b/arch/alpha/mm/init.c
index 0ba85ee..9930837 100644
--- a/arch/alpha/mm/init.c
+++ b/arch/alpha/mm/init.c
@@ -319,13 +319,13 @@ mem_init(void)
void
free_initmem(void)
{
- free_initmem_default(0);
+ free_initmem_default(-1);
}
#ifdef CONFIG_BLK_DEV_INITRD
void
free_initrd_mem(unsigned long start, unsigned long end)
{
- free_reserved_area(start, end, 0, "initrd");
+ free_reserved_area(start, end, -1, "initrd");
}
#endif
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index 9a5cdc0..e922456 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -600,7 +600,7 @@ void __init mem_init(void)
#ifdef CONFIG_SA1111
/* now that our DMA memory is actually so designated, we can free it */
- free_reserved_area(__va(PHYS_PFN_OFFSET), swapper_pg_dir, 0, NULL);
+ free_reserved_area(__va(PHYS_PFN_OFFSET), swapper_pg_dir, -1, NULL);
#endif
free_highpages();
@@ -728,12 +728,12 @@ void free_initmem(void)
extern char __tcm_start, __tcm_end;
poison_init_mem(&__tcm_start, &__tcm_end - &__tcm_start);
- free_reserved_area(&__tcm_start, &__tcm_end, 0, "TCM link");
+ free_reserved_area(&__tcm_start, &__tcm_end, -1, "TCM link");
#endif
poison_init_mem(__init_begin, __init_end - __init_begin);
if (!machine_is_integrator() && !machine_is_cintegrator())
- free_initmem_default(0);
+ free_initmem_default(-1);
}
#ifdef CONFIG_BLK_DEV_INITRD
@@ -744,7 +744,7 @@ void free_initrd_mem(unsigned long start, unsigned long end)
{
if (!keep_initrd) {
poison_init_mem((void *)start, PAGE_ALIGN(end) - start);
- free_reserved_area(start, end, 0, "initrd");
+ free_reserved_area(start, end, -1, "initrd");
}
}
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index f497ca7..e58dd7f 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -387,7 +387,7 @@ void __init mem_init(void)
void free_initmem(void)
{
poison_init_mem(__init_begin, __init_end - __init_begin);
- free_initmem_default(0);
+ free_initmem_default(-1);
}
#ifdef CONFIG_BLK_DEV_INITRD
@@ -398,7 +398,7 @@ void free_initrd_mem(unsigned long start, unsigned long end)
{
if (!keep_initrd) {
poison_init_mem((void *)start, PAGE_ALIGN(end) - start);
- free_reserved_area(start, end, 0, "initrd");
+ free_reserved_area(start, end, -1, "initrd");
}
}
diff --git a/arch/avr32/mm/init.c b/arch/avr32/mm/init.c
index e66e840..871f98a 100644
--- a/arch/avr32/mm/init.c
+++ b/arch/avr32/mm/init.c
@@ -148,12 +148,12 @@ void __init mem_init(void)
void free_initmem(void)
{
- free_initmem_default(0);
+ free_initmem_default(-1);
}
#ifdef CONFIG_BLK_DEV_INITRD
void free_initrd_mem(unsigned long start, unsigned long end)
{
- free_reserved_area(start, end, 0, "initrd");
+ free_reserved_area(start, end, -1, "initrd");
}
#endif
diff --git a/arch/blackfin/mm/init.c b/arch/blackfin/mm/init.c
index 82d01a7..e64286b 100644
--- a/arch/blackfin/mm/init.c
+++ b/arch/blackfin/mm/init.c
@@ -133,7 +133,7 @@ void __init mem_init(void)
void __init free_initrd_mem(unsigned long start, unsigned long end)
{
#ifndef CONFIG_MPU
- free_reserved_area(start, end, 0, "initrd");
+ free_reserved_area(start, end, -1, "initrd");
#endif
}
#endif
@@ -141,7 +141,7 @@ void __init free_initrd_mem(unsigned long start, unsigned long end)
void __init_refok free_initmem(void)
{
#if defined CONFIG_RAMKERNEL && !defined CONFIG_MPU
- free_initmem_default(0);
+ free_initmem_default(-1);
if (memory_start == (unsigned long)(&__init_end))
memory_start = (unsigned long)(&__init_begin);
#endif
diff --git a/arch/c6x/mm/init.c b/arch/c6x/mm/init.c
index a9fcd89..ce39b48 100644
--- a/arch/c6x/mm/init.c
+++ b/arch/c6x/mm/init.c
@@ -77,11 +77,11 @@ void __init mem_init(void)
#ifdef CONFIG_BLK_DEV_INITRD
void __init free_initrd_mem(unsigned long start, unsigned long end)
{
- free_reserved_area(start, end, 0, "initrd");
+ free_reserved_area(start, end, -1, "initrd");
}
#endif
void __init free_initmem(void)
{
- free_initmem_default(0);
+ free_initmem_default(-1);
}
diff --git a/arch/cris/mm/init.c b/arch/cris/mm/init.c
index 9ac8094..8fec263 100644
--- a/arch/cris/mm/init.c
+++ b/arch/cris/mm/init.c
@@ -65,5 +65,5 @@ mem_init(void)
void
free_initmem(void)
{
- free_initmem_default(0);
+ free_initmem_default(-1);
}
diff --git a/arch/frv/mm/init.c b/arch/frv/mm/init.c
index dee354f..a421948 100644
--- a/arch/frv/mm/init.c
+++ b/arch/frv/mm/init.c
@@ -162,7 +162,7 @@ void __init mem_init(void)
void free_initmem(void)
{
#if defined(CONFIG_RAMKERNEL) && !defined(CONFIG_PROTECT_KERNEL)
- free_initmem_default(0);
+ free_initmem_default(-1);
#endif
} /* end free_initmem() */
@@ -173,6 +173,6 @@ void free_initmem(void)
#ifdef CONFIG_BLK_DEV_INITRD
void __init free_initrd_mem(unsigned long start, unsigned long end)
{
- free_reserved_area(start, end, 0, "initrd");
+ free_reserved_area(start, end, -1, "initrd");
} /* end free_initrd_mem() */
#endif
diff --git a/arch/h8300/mm/init.c b/arch/h8300/mm/init.c
index ff349d7..488e2a3 100644
--- a/arch/h8300/mm/init.c
+++ b/arch/h8300/mm/init.c
@@ -161,7 +161,7 @@ void __init mem_init(void)
#ifdef CONFIG_BLK_DEV_INITRD
void free_initrd_mem(unsigned long start, unsigned long end)
{
- free_reserved_area(start, end, 0, "initrd");
+ free_reserved_area(start, end, -1, "initrd");
}
#endif
@@ -169,7 +169,7 @@ void
free_initmem(void)
{
#ifdef CONFIG_RAMKERNEL
- free_initmem_default(0);
+ free_initmem_default(-1);
#endif
}
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
index d1fe4b4..941568a 100644
--- a/arch/ia64/mm/init.c
+++ b/arch/ia64/mm/init.c
@@ -156,7 +156,7 @@ free_initmem (void)
{
free_reserved_area((unsigned long)ia64_imva(__init_begin),
(unsigned long)ia64_imva(__init_end),
- 0, "unused kernel");
+ -1, "unused kernel");
}
void __init
diff --git a/arch/m32r/mm/init.c b/arch/m32r/mm/init.c
index ab4cbce..58ea4d6 100644
--- a/arch/m32r/mm/init.c
+++ b/arch/m32r/mm/init.c
@@ -181,7 +181,7 @@ void __init mem_init(void)
*======================================================================*/
void free_initmem(void)
{
- free_initmem_default(0);
+ free_initmem_default(-1);
}
#ifdef CONFIG_BLK_DEV_INITRD
@@ -191,6 +191,6 @@ void free_initmem(void)
*======================================================================*/
void free_initrd_mem(unsigned long start, unsigned long end)
{
- free_reserved_area(start, end, 0, "initrd");
+ free_reserved_area(start, end, -1, "initrd");
}
#endif
diff --git a/arch/m68k/mm/init.c b/arch/m68k/mm/init.c
index b5c1ab1..291ca0f 100644
--- a/arch/m68k/mm/init.c
+++ b/arch/m68k/mm/init.c
@@ -110,7 +110,7 @@ void __init paging_init(void)
void free_initmem(void)
{
#ifndef CONFIG_MMU_SUN3
- free_initmem_default(0);
+ free_initmem_default(-1);
#endif /* CONFIG_MMU_SUN3 */
}
@@ -202,6 +202,6 @@ void __init mem_init(void)
#ifdef CONFIG_BLK_DEV_INITRD
void free_initrd_mem(unsigned long start, unsigned long end)
{
- free_reserved_area(start, end, 0, "initrd");
+ free_reserved_area(start, end, -1, "initrd");
}
#endif
diff --git a/arch/microblaze/mm/init.c b/arch/microblaze/mm/init.c
index 170aacd..6b8711d 100644
--- a/arch/microblaze/mm/init.c
+++ b/arch/microblaze/mm/init.c
@@ -235,13 +235,13 @@ void __init setup_memory(void)
#ifdef CONFIG_BLK_DEV_INITRD
void free_initrd_mem(unsigned long start, unsigned long end)
{
- free_reserved_area(start, end, 0, "initrd");
+ free_reserved_area(start, end, -1, "initrd");
}
#endif
void free_initmem(void)
{
- free_initmem_default(0);
+ free_initmem_default(-1);
}
void __init mem_init(void)
diff --git a/arch/openrisc/mm/init.c b/arch/openrisc/mm/init.c
index b0139a7..3b9f017 100644
--- a/arch/openrisc/mm/init.c
+++ b/arch/openrisc/mm/init.c
@@ -250,11 +250,11 @@ void __init mem_init(void)
#ifdef CONFIG_BLK_DEV_INITRD
void free_initrd_mem(unsigned long start, unsigned long end)
{
- free_reserved_area(start, end, 0, "initrd");
+ free_reserved_area(start, end, -1, "initrd");
}
#endif
void free_initmem(void)
{
- free_initmem_default(0);
+ free_initmem_default(-1);
}
diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c
index 157b931..27f3f88 100644
--- a/arch/parisc/mm/init.c
+++ b/arch/parisc/mm/init.c
@@ -532,7 +532,7 @@ void free_initmem(void)
* pages are no-longer executable */
flush_icache_range(init_begin, init_end);
- num_physpages += free_initmem_default(0);
+ num_physpages += free_initmem_default(-1);
/* set up a new led state on systems shipped LED State panel */
pdc_chassis_send_status(PDC_CHASSIS_DIRECT_BCOMPLETE);
@@ -1099,6 +1099,6 @@ void flush_tlb_all(void)
#ifdef CONFIG_BLK_DEV_INITRD
void free_initrd_mem(unsigned long start, unsigned long end)
{
- num_physpages += free_reserved_area(start, end, 0, "initrd");
+ num_physpages += free_reserved_area(start, end, -1, "initrd");
}
#endif
diff --git a/arch/powerpc/kernel/kvm.c b/arch/powerpc/kernel/kvm.c
index 6782221..4d3e37d 100644
--- a/arch/powerpc/kernel/kvm.c
+++ b/arch/powerpc/kernel/kvm.c
@@ -756,7 +756,7 @@ static __init void kvm_free_tmp(void)
end = (ulong)&kvm_tmp[ARRAY_SIZE(kvm_tmp)] & PAGE_MASK;
/* Free the tmp space we don't need */
- free_reserved_area(start, end, 0, NULL);
+ free_reserved_area(start, end, -1, NULL);
}
static int __init kvm_guest_init(void)
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index 740c835..3974615 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -411,7 +411,7 @@ void free_initmem(void)
#ifdef CONFIG_BLK_DEV_INITRD
void __init free_initrd_mem(unsigned long start, unsigned long end)
{
- free_reserved_area(start, end, 0, "initrd");
+ free_reserved_area(start, end, -1, "initrd");
}
#endif
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index 70bda9e..554b3e1 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -156,7 +156,7 @@ void __init mem_init(void)
void free_initmem(void)
{
- free_initmem_default(0);
+ free_initmem_default(POISON_FREE_INITMEM);
}
#ifdef CONFIG_BLK_DEV_INITRD
diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c
index 20f9ead..31294f1 100644
--- a/arch/sh/mm/init.c
+++ b/arch/sh/mm/init.c
@@ -499,13 +499,13 @@ void __init mem_init(void)
void free_initmem(void)
{
- free_initmem_default(0);
+ free_initmem_default(-1);
}
#ifdef CONFIG_BLK_DEV_INITRD
void free_initrd_mem(unsigned long start, unsigned long end)
{
- free_reserved_area(start, end, 0, "initrd");
+ free_reserved_area(start, end, -1, "initrd");
}
#endif
diff --git a/arch/um/kernel/mem.c b/arch/um/kernel/mem.c
index 9df292b..1e84189 100644
--- a/arch/um/kernel/mem.c
+++ b/arch/um/kernel/mem.c
@@ -244,7 +244,7 @@ void free_initmem(void)
#ifdef CONFIG_BLK_DEV_INITRD
void free_initrd_mem(unsigned long start, unsigned long end)
{
- free_reserved_area(start, end, 0, "initrd");
+ free_reserved_area(start, end, -1, "initrd");
}
#endif
diff --git a/arch/unicore32/mm/init.c b/arch/unicore32/mm/init.c
index 63df12d..5614b05 100644
--- a/arch/unicore32/mm/init.c
+++ b/arch/unicore32/mm/init.c
@@ -476,7 +476,7 @@ void __init mem_init(void)
void free_initmem(void)
{
- free_initmem_default(0);
+ free_initmem_default(-1);
}
#ifdef CONFIG_BLK_DEV_INITRD
@@ -486,7 +486,7 @@ static int keep_initrd;
void free_initrd_mem(unsigned long start, unsigned long end)
{
if (!keep_initrd)
- free_reserved_area(start, end, 0, "initrd");
+ free_reserved_area(start, end, -1, "initrd");
}
static int __init keepinitrd_setup(char *__unused)
diff --git a/arch/xtensa/mm/init.c b/arch/xtensa/mm/init.c
index bba125b..6f70647 100644
--- a/arch/xtensa/mm/init.c
+++ b/arch/xtensa/mm/init.c
@@ -214,11 +214,11 @@ extern int initrd_is_mapped;
void free_initrd_mem(unsigned long start, unsigned long end)
{
if (initrd_is_mapped)
- free_reserved_area(start, end, 0, "initrd");
+ free_reserved_area(start, end, -1, "initrd");
}
#endif
void free_initmem(void)
{
- free_initmem_default(0);
+ free_initmem_default(-1);
}
diff --git a/include/linux/mm.h b/include/linux/mm.h
index a1aeaec..add5f0a 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1297,7 +1297,7 @@ extern void free_initmem(void);
/*
* Free reserved pages within range [PAGE_ALIGN(start), end & PAGE_MASK)
* into the buddy system. The freed pages will be poisoned with pattern
- * "poison" if it's non-zero.
+ * "poison" if it's within range [0, UCHAR_MAX].
* Return pages freed into the buddy system.
*/
extern unsigned long free_reserved_area(unsigned long start, unsigned long end,
@@ -1337,15 +1337,16 @@ static inline void mark_page_reserved(struct page *page)
/*
* Default method to free all the __init memory into the buddy system.
- * The freed pages will be poisoned with pattern "poison" if it is
- * non-zero. Return pages freed into the buddy system.
+ * The freed pages will be poisoned with pattern "poison" if it's within
+ * range [0, UCHAR_MAX].
+ * Return pages freed into the buddy system.
*/
static inline unsigned long free_initmem_default(int poison)
{
extern char __init_begin[], __init_end[];
- return free_reserved_area(PAGE_ALIGN((unsigned long)&__init_begin) ,
- ((unsigned long)&__init_end) & PAGE_MASK,
+ return free_reserved_area((unsigned long)&__init_begin ,
+ (unsigned long)&__init_end,
poison, "unused kernel");
}
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index aea4b9b..8c7b366 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -5130,13 +5130,13 @@ unsigned long free_reserved_area(unsigned long start, unsigned long end,
pos = start = PAGE_ALIGN(start);
end &= PAGE_MASK;
for (pages = 0; pos < end; pos += PAGE_SIZE, pages++) {
- if (poison)
+ if ((unsigned int)poison <= 0xFF)
memset((void *)pos, poison, PAGE_SIZE);
free_reserved_page(virt_to_page(pos));
}
if (pages && s)
- pr_info("Freeing %s memory: %ldK (%lx - %lx)\n",
+ pr_info("Freeing %s memory: %ldKiB (%lx - %lx)\n",
s, pages << (PAGE_SHIFT - 10), start, end);
return pages;
--
1.7.9.5
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply related [flat|nested] 17+ messages in thread
* Re: [PATCH v2, part3 01/12] mm: enhance free_reserved_area() to support poisoning memory with zero
2013-03-16 17:03 ` [PATCH v2, part3 01/12] mm: enhance free_reserved_area() to support poisoning memory with zero Jiang Liu
@ 2013-03-16 17:37 ` Geert Uytterhoeven
2013-03-16 17:48 ` Jiang Liu
0 siblings, 1 reply; 17+ messages in thread
From: Geert Uytterhoeven @ 2013-03-16 17:37 UTC (permalink / raw)
To: Jiang Liu
Cc: Andrew Morton, David Rientjes, Jiang Liu, Wen Congyang,
Mel Gorman, Minchan Kim, KAMEZAWA Hiroyuki, Michal Hocko,
Jianguo Wu, linux-mm, linux-kernel
On Sat, Mar 16, 2013 at 6:03 PM, Jiang Liu <liuj97@gmail.com> wrote:
> --- a/mm/page_alloc.c
> +++ b/mm/page_alloc.c
> @@ -5130,13 +5130,13 @@ unsigned long free_reserved_area(unsigned long start, unsigned long end,
> pos = start = PAGE_ALIGN(start);
> end &= PAGE_MASK;
> for (pages = 0; pos < end; pos += PAGE_SIZE, pages++) {
> - if (poison)
> + if ((unsigned int)poison <= 0xFF)
"if (poison >= 0)"? No cast needed.
> memset((void *)pos, poison, PAGE_SIZE);
Gr{oetje,eeting}s,
Geert
--
Geert Uytterhoeven -- There's lots of Linux beyond ia32 -- geert@linux-m68k.org
In personal conversations with technical people, I call myself a hacker. But
when I'm talking to journalists I just say "programmer" or something like that.
-- Linus Torvalds
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 17+ messages in thread
* Re: [PATCH v2, part3 01/12] mm: enhance free_reserved_area() to support poisoning memory with zero
2013-03-16 17:37 ` Geert Uytterhoeven
@ 2013-03-16 17:48 ` Jiang Liu
0 siblings, 0 replies; 17+ messages in thread
From: Jiang Liu @ 2013-03-16 17:48 UTC (permalink / raw)
To: Geert Uytterhoeven
Cc: Andrew Morton, David Rientjes, Jiang Liu, Wen Congyang,
Mel Gorman, Minchan Kim, KAMEZAWA Hiroyuki, Michal Hocko,
Jianguo Wu, linux-mm, linux-kernel
On 03/17/2013 01:37 AM, Geert Uytterhoeven wrote:
> On Sat, Mar 16, 2013 at 6:03 PM, Jiang Liu <liuj97@gmail.com> wrote:
>> --- a/mm/page_alloc.c
>> +++ b/mm/page_alloc.c
>> @@ -5130,13 +5130,13 @@ unsigned long free_reserved_area(unsigned long start, unsigned long end,
>> pos = start = PAGE_ALIGN(start);
>> end &= PAGE_MASK;
>> for (pages = 0; pos < end; pos += PAGE_SIZE, pages++) {
>> - if (poison)
>> + if ((unsigned int)poison <= 0xFF)
>
> "if (poison >= 0)"? No cast needed.
Hi Geert,
We constraints valid inputs as [0, UCHAR_MAX], so the code follows the comment as below.
/*
* Free reserved pages within range [PAGE_ALIGN(start), end & PAGE_MASK)
* into the buddy system. The freed pages will be poisoned with pattern
* "poison" if it's within range [0, UCHAR_MAX].
* Return pages freed into the buddy system.
*/
Regards!
Gerry
>
>> memset((void *)pos, poison, PAGE_SIZE);
>
> Gr{oetje,eeting}s,
>
> Geert
>
> --
> Geert Uytterhoeven -- There's lots of Linux beyond ia32 -- geert@linux-m68k.org
>
> In personal conversations with technical people, I call myself a hacker. But
> when I'm talking to journalists I just say "programmer" or something like that.
> -- Linus Torvalds
>
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 17+ messages in thread
* [PATCH v2, part3 02/12] mm/ARM64: kill poison_init_mem()
2013-03-16 17:03 [PATCH v2, part3 00/12] accurately calculate zone->managed_pages Jiang Liu
2013-03-16 17:03 ` [PATCH v2, part3 01/12] mm: enhance free_reserved_area() to support poisoning memory with zero Jiang Liu
@ 2013-03-16 17:03 ` Jiang Liu
2013-03-17 21:46 ` Will Deacon
2013-03-16 17:03 ` [PATCH v2, part3 03/12] mm/x86: use common help functions to furthur simplify code Jiang Liu
` (9 subsequent siblings)
11 siblings, 1 reply; 17+ messages in thread
From: Jiang Liu @ 2013-03-16 17:03 UTC (permalink / raw)
To: Andrew Morton, David Rientjes
Cc: Jiang Liu, Wen Congyang, Mel Gorman, Minchan Kim,
KAMEZAWA Hiroyuki, Michal Hocko, Jianguo Wu, linux-mm,
linux-kernel, Catalin Marinas, Will Deacon, linux-arm-kernel
Use free_reserved_area() to kill poison_init_mem() on ARM64.
Signed-off-by: Jiang Liu <jiang.liu@huawei.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Will Deacon <will.deacon@arm.com>
Cc: linux-arm-kernel@lists.infradead.org
Cc: linux-kernel@vger.kernel.org
---
arch/arm64/mm/init.c | 17 +++--------------
1 file changed, 3 insertions(+), 14 deletions(-)
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index e58dd7f..b87bdb8 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -197,14 +197,6 @@ void __init bootmem_init(void)
max_pfn = max_low_pfn = max;
}
-/*
- * Poison init memory with an undefined instruction (0x0).
- */
-static inline void poison_init_mem(void *s, size_t count)
-{
- memset(s, 0, count);
-}
-
#ifndef CONFIG_SPARSEMEM_VMEMMAP
static inline void free_memmap(unsigned long start_pfn, unsigned long end_pfn)
{
@@ -386,8 +378,7 @@ void __init mem_init(void)
void free_initmem(void)
{
- poison_init_mem(__init_begin, __init_end - __init_begin);
- free_initmem_default(-1);
+ free_initmem_default(0);
}
#ifdef CONFIG_BLK_DEV_INITRD
@@ -396,10 +387,8 @@ static int keep_initrd;
void free_initrd_mem(unsigned long start, unsigned long end)
{
- if (!keep_initrd) {
- poison_init_mem((void *)start, PAGE_ALIGN(end) - start);
- free_reserved_area(start, end, -1, "initrd");
- }
+ if (!keep_initrd)
+ free_reserved_area(start, end, 0, "initrd");
}
static int __init keepinitrd_setup(char *__unused)
--
1.7.9.5
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply related [flat|nested] 17+ messages in thread
* Re: [PATCH v2, part3 02/12] mm/ARM64: kill poison_init_mem()
2013-03-16 17:03 ` [PATCH v2, part3 02/12] mm/ARM64: kill poison_init_mem() Jiang Liu
@ 2013-03-17 21:46 ` Will Deacon
2013-03-18 17:21 ` Jiang Liu
0 siblings, 1 reply; 17+ messages in thread
From: Will Deacon @ 2013-03-17 21:46 UTC (permalink / raw)
To: Jiang Liu
Cc: Andrew Morton, David Rientjes, Jiang Liu, Wen Congyang,
Mel Gorman, Minchan Kim, KAMEZAWA Hiroyuki, Michal Hocko,
Jianguo Wu, linux-mm@kvack.org, linux-kernel@vger.kernel.org,
Catalin Marinas, linux-arm-kernel@lists.infradead.org
On Sat, Mar 16, 2013 at 05:03:23PM +0000, Jiang Liu wrote:
> Use free_reserved_area() to kill poison_init_mem() on ARM64.
>
> Signed-off-by: Jiang Liu <jiang.liu@huawei.com>
> Cc: Catalin Marinas <catalin.marinas@arm.com>
> Cc: Will Deacon <will.deacon@arm.com>
> Cc: linux-arm-kernel@lists.infradead.org
> Cc: linux-kernel@vger.kernel.org
> ---
> arch/arm64/mm/init.c | 17 +++--------------
> 1 file changed, 3 insertions(+), 14 deletions(-)
>
> diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
> index e58dd7f..b87bdb8 100644
> --- a/arch/arm64/mm/init.c
> +++ b/arch/arm64/mm/init.c
> @@ -197,14 +197,6 @@ void __init bootmem_init(void)
> max_pfn = max_low_pfn = max;
> }
>
> -/*
> - * Poison init memory with an undefined instruction (0x0).
> - */
> -static inline void poison_init_mem(void *s, size_t count)
> -{
> - memset(s, 0, count);
> -}
> -
> #ifndef CONFIG_SPARSEMEM_VMEMMAP
> static inline void free_memmap(unsigned long start_pfn, unsigned long end_pfn)
> {
> @@ -386,8 +378,7 @@ void __init mem_init(void)
>
> void free_initmem(void)
> {
> - poison_init_mem(__init_begin, __init_end - __init_begin);
> - free_initmem_default(-1);
> + free_initmem_default(0);
This change looks unrelated to $subject. We should probably just poison with
0 from the outset, when free_initmem_default is introduced.
Will
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 17+ messages in thread
* Re: [PATCH v2, part3 02/12] mm/ARM64: kill poison_init_mem()
2013-03-17 21:46 ` Will Deacon
@ 2013-03-18 17:21 ` Jiang Liu
0 siblings, 0 replies; 17+ messages in thread
From: Jiang Liu @ 2013-03-18 17:21 UTC (permalink / raw)
To: Will Deacon
Cc: Andrew Morton, David Rientjes, Jiang Liu, Wen Congyang,
Mel Gorman, Minchan Kim, KAMEZAWA Hiroyuki, Michal Hocko,
Jianguo Wu, linux-mm@kvack.org, linux-kernel@vger.kernel.org,
Catalin Marinas, linux-arm-kernel@lists.infradead.org
On 03/18/2013 05:46 AM, Will Deacon wrote:
> On Sat, Mar 16, 2013 at 05:03:23PM +0000, Jiang Liu wrote:
>> Use free_reserved_area() to kill poison_init_mem() on ARM64.
>>
>> Signed-off-by: Jiang Liu <jiang.liu@huawei.com>
>> Cc: Catalin Marinas <catalin.marinas@arm.com>
>> Cc: Will Deacon <will.deacon@arm.com>
>> Cc: linux-arm-kernel@lists.infradead.org
>> Cc: linux-kernel@vger.kernel.org
>> ---
>> arch/arm64/mm/init.c | 17 +++--------------
>> 1 file changed, 3 insertions(+), 14 deletions(-)
>>
>> diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
>> index e58dd7f..b87bdb8 100644
>> --- a/arch/arm64/mm/init.c
>> +++ b/arch/arm64/mm/init.c
>> @@ -197,14 +197,6 @@ void __init bootmem_init(void)
>> max_pfn = max_low_pfn = max;
>> }
>>
>> -/*
>> - * Poison init memory with an undefined instruction (0x0).
>> - */
>> -static inline void poison_init_mem(void *s, size_t count)
>> -{
>> - memset(s, 0, count);
>> -}
>> -
>> #ifndef CONFIG_SPARSEMEM_VMEMMAP
>> static inline void free_memmap(unsigned long start_pfn, unsigned long end_pfn)
>> {
>> @@ -386,8 +378,7 @@ void __init mem_init(void)
>>
>> void free_initmem(void)
>> {
>> - poison_init_mem(__init_begin, __init_end - __init_begin);
>> - free_initmem_default(-1);
>> + free_initmem_default(0);
>
> This change looks unrelated to $subject. We should probably just poison with
> 0 from the outset, when free_initmem_default is introduced.
Hi Will,
As you have suggested, this patch should be merged into patchset which
introduces free_initmem_default(). I have a plan to merge it in v3, but the v2
patchset has been merged into -mm tree, so I generated another patch against the
mm tree.
free_initmem_default(-1) doesn't poison the freed memory and
free_initmem_default(0) poisons the freed memory with 0, so it's needed to
kill poison_init_mem().
regards!
Gerry
>
> Will
> --
> To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
> the body of a message to majordomo@vger.kernel.org
> More majordomo info at http://vger.kernel.org/majordomo-info.html
> Please read the FAQ at http://www.tux.org/lkml/
>
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 17+ messages in thread
* [PATCH v2, part3 03/12] mm/x86: use common help functions to furthur simplify code
2013-03-16 17:03 [PATCH v2, part3 00/12] accurately calculate zone->managed_pages Jiang Liu
2013-03-16 17:03 ` [PATCH v2, part3 01/12] mm: enhance free_reserved_area() to support poisoning memory with zero Jiang Liu
2013-03-16 17:03 ` [PATCH v2, part3 02/12] mm/ARM64: kill poison_init_mem() Jiang Liu
@ 2013-03-16 17:03 ` Jiang Liu
2013-03-16 17:03 ` [PATCH v2, part3 04/12] mm/tile: use common help functions to free reserved pages Jiang Liu
` (8 subsequent siblings)
11 siblings, 0 replies; 17+ messages in thread
From: Jiang Liu @ 2013-03-16 17:03 UTC (permalink / raw)
To: Andrew Morton, David Rientjes
Cc: Jiang Liu, Wen Congyang, Mel Gorman, Minchan Kim,
KAMEZAWA Hiroyuki, Michal Hocko, Jianguo Wu, linux-mm,
linux-kernel, Thomas Gleixner, Ingo Molnar, H. Peter Anvin, x86,
Yinghai Lu, Tang Chen
Use common help functions to free reserved pages.
Signed-off-by: Jiang Liu <jiang.liu@huawei.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: x86@kernel.org
Cc: Yinghai Lu <yinghai@kernel.org>
Cc: Tang Chen <tangchen@cn.fujitsu.com>
Cc: Wen Congyang <wency@cn.fujitsu.com>
Cc: Jianguo Wu <wujianguo@huawei.com>
Cc: linux-kernel@vger.kernel.org
---
arch/x86/mm/init.c | 14 +++-----------
arch/x86/mm/init_64.c | 4 ++--
2 files changed, 5 insertions(+), 13 deletions(-)
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index 1120b82..de63100 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -334,7 +334,6 @@ int devmem_is_allowed(unsigned long pagenr)
void free_init_pages(char *what, unsigned long begin, unsigned long end)
{
- unsigned long addr;
unsigned long begin_aligned, end_aligned;
/* Make sure boundaries are page aligned */
@@ -349,8 +348,6 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
if (begin >= end)
return;
- addr = begin;
-
/*
* If debugging page accesses then do not free this memory but
* mark them not present - any buggy init-section access will
@@ -369,18 +366,13 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
set_memory_nx(begin, (end - begin) >> PAGE_SHIFT);
set_memory_rw(begin, (end - begin) >> PAGE_SHIFT);
- printk(KERN_INFO "Freeing %s: %luk freed\n", what, (end - begin) >> 10);
-
- for (; addr < end; addr += PAGE_SIZE) {
- memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
- free_reserved_page(virt_to_page(addr));
- }
+ free_reserved_area(begin, end, POISON_FREE_INITMEM, what);
#endif
}
void free_initmem(void)
{
- free_init_pages("unused kernel memory",
+ free_init_pages("unused kernel",
(unsigned long)(&__init_begin),
(unsigned long)(&__init_end));
}
@@ -397,7 +389,7 @@ void __init free_initrd_mem(unsigned long start, unsigned long end)
* - relocate_initrd()
* So here We can do PAGE_ALIGN() safely to get partial page to be freed
*/
- free_init_pages("initrd memory", start, PAGE_ALIGN(end));
+ free_init_pages("initrd", start, PAGE_ALIGN(end));
}
#endif
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index 6087e02..05ef3ff 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -1152,11 +1152,11 @@ void mark_rodata_ro(void)
set_memory_ro(start, (end-start) >> PAGE_SHIFT);
#endif
- free_init_pages("unused kernel memory",
+ free_init_pages("unused kernel",
(unsigned long) page_address(virt_to_page(text_end)),
(unsigned long)
page_address(virt_to_page(rodata_start)));
- free_init_pages("unused kernel memory",
+ free_init_pages("unused kernel",
(unsigned long) page_address(virt_to_page(rodata_end)),
(unsigned long) page_address(virt_to_page(data_start)));
}
--
1.7.9.5
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply related [flat|nested] 17+ messages in thread
* [PATCH v2, part3 04/12] mm/tile: use common help functions to free reserved pages
2013-03-16 17:03 [PATCH v2, part3 00/12] accurately calculate zone->managed_pages Jiang Liu
` (2 preceding siblings ...)
2013-03-16 17:03 ` [PATCH v2, part3 03/12] mm/x86: use common help functions to furthur simplify code Jiang Liu
@ 2013-03-16 17:03 ` Jiang Liu
2013-03-16 17:03 ` [PATCH v2, part3 05/12] mm/powertv: " Jiang Liu
` (7 subsequent siblings)
11 siblings, 0 replies; 17+ messages in thread
From: Jiang Liu @ 2013-03-16 17:03 UTC (permalink / raw)
To: Andrew Morton, David Rientjes
Cc: Jiang Liu, Wen Congyang, Mel Gorman, Minchan Kim,
KAMEZAWA Hiroyuki, Michal Hocko, Jianguo Wu, linux-mm,
linux-kernel, Chris Metcalf
Use common help functions to free reserved pages.
Signed-off-by: Jiang Liu <jiang.liu@huawei.com>
Cc: Chris Metcalf <cmetcalf@tilera.com>
Cc: Wen Congyang <wency@cn.fujitsu.com>
Cc: linux-kernel@vger.kernel.org
---
arch/tile/mm/init.c | 7 ++-----
1 file changed, 2 insertions(+), 5 deletions(-)
diff --git a/arch/tile/mm/init.c b/arch/tile/mm/init.c
index 2749515..ccfeb3f 100644
--- a/arch/tile/mm/init.c
+++ b/arch/tile/mm/init.c
@@ -720,7 +720,7 @@ static void __init init_free_pfn_range(unsigned long start, unsigned long end)
}
init_page_count(page);
__free_pages(page, order);
- totalram_pages += count;
+ adjust_managed_page_count(page, count);
page += count;
pfn += count;
@@ -1024,16 +1024,13 @@ static void free_init_pages(char *what, unsigned long begin, unsigned long end)
pte_clear(&init_mm, addr, ptep);
continue;
}
- __ClearPageReserved(page);
- init_page_count(page);
if (pte_huge(*ptep))
BUG_ON(!kdata_huge);
else
set_pte_at(&init_mm, addr, ptep,
pfn_pte(pfn, PAGE_KERNEL));
memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
- free_page(addr);
- totalram_pages++;
+ free_reserved_page(page);
}
pr_info("Freeing %s: %ldk freed\n", what, (end - begin) >> 10);
}
--
1.7.9.5
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply related [flat|nested] 17+ messages in thread
* [PATCH v2, part3 05/12] mm/powertv: use common help functions to free reserved pages
2013-03-16 17:03 [PATCH v2, part3 00/12] accurately calculate zone->managed_pages Jiang Liu
` (3 preceding siblings ...)
2013-03-16 17:03 ` [PATCH v2, part3 04/12] mm/tile: use common help functions to free reserved pages Jiang Liu
@ 2013-03-16 17:03 ` Jiang Liu
2013-03-16 17:03 ` [PATCH v2, part3 06/12] mm/acornfb: " Jiang Liu
` (6 subsequent siblings)
11 siblings, 0 replies; 17+ messages in thread
From: Jiang Liu @ 2013-03-16 17:03 UTC (permalink / raw)
To: Andrew Morton, David Rientjes
Cc: Jiang Liu, Wen Congyang, Mel Gorman, Minchan Kim,
KAMEZAWA Hiroyuki, Michal Hocko, Jianguo Wu, linux-mm,
linux-kernel, Ralf Baechle, linux-mips
Use common help functions to free reserved pages.
Signed-off-by: Jiang Liu <jiang.liu@huawei.com>
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: Jiang Liu <jiang.liu@huawei.com>
Cc: linux-mips@linux-mips.org
Cc: linux-kernel@vger.kernel.org
---
arch/mips/powertv/asic/asic_devices.c | 13 ++-----------
1 file changed, 2 insertions(+), 11 deletions(-)
diff --git a/arch/mips/powertv/asic/asic_devices.c b/arch/mips/powertv/asic/asic_devices.c
index bce1872..746b06d 100644
--- a/arch/mips/powertv/asic/asic_devices.c
+++ b/arch/mips/powertv/asic/asic_devices.c
@@ -529,17 +529,8 @@ EXPORT_SYMBOL(asic_resource_get);
*/
void platform_release_memory(void *ptr, int size)
{
- unsigned long addr;
- unsigned long end;
-
- addr = ((unsigned long)ptr + (PAGE_SIZE - 1)) & PAGE_MASK;
- end = ((unsigned long)ptr + size) & PAGE_MASK;
-
- for (; addr < end; addr += PAGE_SIZE) {
- ClearPageReserved(virt_to_page(__va(addr)));
- init_page_count(virt_to_page(__va(addr)));
- free_page((unsigned long)__va(addr));
- }
+ free_reserved_area((unsigned long)ptr, (unsigned long)(ptr + size),
+ -1, NULL);
}
EXPORT_SYMBOL(platform_release_memory);
--
1.7.9.5
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply related [flat|nested] 17+ messages in thread
* [PATCH v2, part3 06/12] mm/acornfb: use common help functions to free reserved pages
2013-03-16 17:03 [PATCH v2, part3 00/12] accurately calculate zone->managed_pages Jiang Liu
` (4 preceding siblings ...)
2013-03-16 17:03 ` [PATCH v2, part3 05/12] mm/powertv: " Jiang Liu
@ 2013-03-16 17:03 ` Jiang Liu
2013-03-16 17:03 ` [PATCH v2, part3 07/12] mm: accurately calculate zone->managed_pages for highmem zones Jiang Liu
` (5 subsequent siblings)
11 siblings, 0 replies; 17+ messages in thread
From: Jiang Liu @ 2013-03-16 17:03 UTC (permalink / raw)
To: Andrew Morton, David Rientjes
Cc: Jiang Liu, Wen Congyang, Mel Gorman, Minchan Kim,
KAMEZAWA Hiroyuki, Michal Hocko, Jianguo Wu, linux-mm,
linux-kernel, linux-fbdev
Use common help functions to free reserved pages.
Signed-off-by: Jiang Liu <jiang.liu@huawei.com>
Cc: Florian Tobias Schandinat
Cc: linux-fbdev@vger.kernel.org
Cc: linux-kernel@vger.kernel.org
---
drivers/video/acornfb.c | 28 ++--------------------------
1 file changed, 2 insertions(+), 26 deletions(-)
diff --git a/drivers/video/acornfb.c b/drivers/video/acornfb.c
index 6488a73..344f2bb 100644
--- a/drivers/video/acornfb.c
+++ b/drivers/video/acornfb.c
@@ -1188,32 +1188,8 @@ static int acornfb_detect_monitortype(void)
static inline void
free_unused_pages(unsigned int virtual_start, unsigned int virtual_end)
{
- int mb_freed = 0;
-
- /*
- * Align addresses
- */
- virtual_start = PAGE_ALIGN(virtual_start);
- virtual_end = PAGE_ALIGN(virtual_end);
-
- while (virtual_start < virtual_end) {
- struct page *page;
-
- /*
- * Clear page reserved bit,
- * set count to 1, and free
- * the page.
- */
- page = virt_to_page(virtual_start);
- ClearPageReserved(page);
- init_page_count(page);
- free_page(virtual_start);
-
- virtual_start += PAGE_SIZE;
- mb_freed += PAGE_SIZE / 1024;
- }
-
- printk("acornfb: freed %dK memory\n", mb_freed);
+ free_reserved_area(virtual_start, PAGE_ALIGN(virtual_end),
+ -1, "acornfb");
}
static int acornfb_probe(struct platform_device *dev)
--
1.7.9.5
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply related [flat|nested] 17+ messages in thread
* [PATCH v2, part3 07/12] mm: accurately calculate zone->managed_pages for highmem zones
2013-03-16 17:03 [PATCH v2, part3 00/12] accurately calculate zone->managed_pages Jiang Liu
` (5 preceding siblings ...)
2013-03-16 17:03 ` [PATCH v2, part3 06/12] mm/acornfb: " Jiang Liu
@ 2013-03-16 17:03 ` Jiang Liu
2013-03-16 17:03 ` [PATCH v2, part3 08/12] mm: use a dedicated lock to protect totalram_pages and zone->managed_pages Jiang Liu
` (4 subsequent siblings)
11 siblings, 0 replies; 17+ messages in thread
From: Jiang Liu @ 2013-03-16 17:03 UTC (permalink / raw)
To: Andrew Morton, David Rientjes
Cc: Jiang Liu, Wen Congyang, Mel Gorman, Minchan Kim,
KAMEZAWA Hiroyuki, Michal Hocko, Jianguo Wu, linux-mm,
linux-kernel, H. Peter Anvin, x86, Tejun Heo, Joonsoo Kim,
Yinghai Lu, Marek Szyprowski
Commit "mm: introduce new field 'managed_pages' to struct zone" assumes
that all highmem pages will be freed into the buddy system by function
mem_init(). But that's not always true, some architectures may reserve
some highmem pages during boot. For example PPC may allocate highmem
pages for giagant HugeTLB pages, and several architectures have code to
check PageReserved flag to exclude highmem pages allocated during boot
when freeing highmem pages into the buddy system.
So do the same thing for highmem zones as normal zones, which is to:
1) reset all zones' managed_pages to zero in mem_init()
2) recalculate managed_pages for each zone when freeing pages into the
buddy system.
Signed-off-by: Jiang Liu <jiang.liu@huawei.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: x86@kernel.org
Cc: Tejun Heo <tj@kernel.org>
Cc: Joonsoo Kim <js1304@gmail.com>
Cc: Yinghai Lu <yinghai@kernel.org>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Kamezawa Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Marek Szyprowski <m.szyprowski@samsung.com>
Cc: linux-kernel@vger.kernel.org
Cc: linux-mm@kvack.org
---
arch/x86/mm/highmem_32.c | 6 ++++++
include/linux/bootmem.h | 1 +
mm/bootmem.c | 32 ++++++++++++++++++--------------
mm/nobootmem.c | 32 +++++++++++++++++---------------
mm/page_alloc.c | 1 +
5 files changed, 43 insertions(+), 29 deletions(-)
diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
index 252b8f5..4500142 100644
--- a/arch/x86/mm/highmem_32.c
+++ b/arch/x86/mm/highmem_32.c
@@ -1,6 +1,7 @@
#include <linux/highmem.h>
#include <linux/module.h>
#include <linux/swap.h> /* for totalram_pages */
+#include <linux/bootmem.h>
void *kmap(struct page *page)
{
@@ -121,6 +122,11 @@ void __init set_highmem_pages_init(void)
struct zone *zone;
int nid;
+ /*
+ * Explicitly reset zone->managed_pages because set_highmem_pages_init()
+ * is invoked before free_all_bootmem()
+ */
+ reset_all_zones_managed_pages();
for_each_zone(zone) {
unsigned long zone_start_pfn, zone_end_pfn;
diff --git a/include/linux/bootmem.h b/include/linux/bootmem.h
index 190ff06..b0806c9 100644
--- a/include/linux/bootmem.h
+++ b/include/linux/bootmem.h
@@ -47,6 +47,7 @@ extern unsigned long init_bootmem(unsigned long addr, unsigned long memend);
extern unsigned long free_low_memory_core_early(int nodeid);
extern unsigned long free_all_bootmem_node(pg_data_t *pgdat);
extern unsigned long free_all_bootmem(void);
+extern void reset_all_zones_managed_pages(void);
extern void free_bootmem_node(pg_data_t *pgdat,
unsigned long addr,
diff --git a/mm/bootmem.c b/mm/bootmem.c
index b93376c..7f71b31 100644
--- a/mm/bootmem.c
+++ b/mm/bootmem.c
@@ -241,20 +241,26 @@ static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata)
return count;
}
-static void reset_node_lowmem_managed_pages(pg_data_t *pgdat)
+static int reset_managed_pages_done __initdata;
+
+static inline void __init reset_node_managed_pages(pg_data_t *pgdat)
{
struct zone *z;
- /*
- * In free_area_init_core(), highmem zone's managed_pages is set to
- * present_pages, and bootmem allocator doesn't allocate from highmem
- * zones. So there's no need to recalculate managed_pages because all
- * highmem pages will be managed by the buddy system. Here highmem
- * zone also includes highmem movable zone.
- */
+ if (reset_managed_pages_done)
+ return;
+
for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++)
- if (!is_highmem(z))
- z->managed_pages = 0;
+ z->managed_pages = 0;
+}
+
+void __init reset_all_zones_managed_pages(void)
+{
+ struct pglist_data *pgdat;
+
+ for_each_online_pgdat(pgdat)
+ reset_node_managed_pages(pgdat);
+ reset_managed_pages_done = 1;
}
/**
@@ -266,7 +272,7 @@ static void reset_node_lowmem_managed_pages(pg_data_t *pgdat)
unsigned long __init free_all_bootmem_node(pg_data_t *pgdat)
{
register_page_bootmem_info_node(pgdat);
- reset_node_lowmem_managed_pages(pgdat);
+ reset_node_managed_pages(pgdat);
return free_all_bootmem_core(pgdat->bdata);
}
@@ -279,10 +285,8 @@ unsigned long __init free_all_bootmem(void)
{
unsigned long total_pages = 0;
bootmem_data_t *bdata;
- struct pglist_data *pgdat;
- for_each_online_pgdat(pgdat)
- reset_node_lowmem_managed_pages(pgdat);
+ reset_all_zones_managed_pages();
list_for_each_entry(bdata, &bdata_list, list)
total_pages += free_all_bootmem_core(bdata);
diff --git a/mm/nobootmem.c b/mm/nobootmem.c
index b8294fc..3db0f67 100644
--- a/mm/nobootmem.c
+++ b/mm/nobootmem.c
@@ -137,20 +137,25 @@ unsigned long __init free_low_memory_core_early(int nodeid)
return count;
}
-static void reset_node_lowmem_managed_pages(pg_data_t *pgdat)
+static int reset_managed_pages_done __initdata;
+
+static inline void __init reset_node_managed_pages(pg_data_t *pgdat)
{
struct zone *z;
- /*
- * In free_area_init_core(), highmem zone's managed_pages is set to
- * present_pages, and bootmem allocator doesn't allocate from highmem
- * zones. So there's no need to recalculate managed_pages because all
- * highmem pages will be managed by the buddy system. Here highmem
- * zone also includes highmem movable zone.
- */
+ if (reset_managed_pages_done)
+ return;
for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++)
- if (!is_highmem(z))
- z->managed_pages = 0;
+ z->managed_pages = 0;
+}
+
+void __init reset_all_zones_managed_pages(void)
+{
+ struct pglist_data *pgdat;
+
+ for_each_online_pgdat(pgdat)
+ reset_node_managed_pages(pgdat);
+ reset_managed_pages_done = 1;
}
/**
@@ -162,7 +167,7 @@ static void reset_node_lowmem_managed_pages(pg_data_t *pgdat)
unsigned long __init free_all_bootmem_node(pg_data_t *pgdat)
{
register_page_bootmem_info_node(pgdat);
- reset_node_lowmem_managed_pages(pgdat);
+ reset_node_managed_pages(pgdat);
/* free_low_memory_core_early(MAX_NUMNODES) will be called later */
return 0;
@@ -175,10 +180,7 @@ unsigned long __init free_all_bootmem_node(pg_data_t *pgdat)
*/
unsigned long __init free_all_bootmem(void)
{
- struct pglist_data *pgdat;
-
- for_each_online_pgdat(pgdat)
- reset_node_lowmem_managed_pages(pgdat);
+ reset_all_zones_managed_pages();
/*
* We need to use MAX_NUMNODES instead of NODE_DATA(0)->node_id
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 8c7b366..23bb4d7 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -5147,6 +5147,7 @@ void free_highmem_page(struct page *page)
{
__free_reserved_page(page);
totalram_pages++;
+ page_zone(page)->managed_pages++;
totalhigh_pages++;
}
#endif
--
1.7.9.5
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply related [flat|nested] 17+ messages in thread
* [PATCH v2, part3 08/12] mm: use a dedicated lock to protect totalram_pages and zone->managed_pages
2013-03-16 17:03 [PATCH v2, part3 00/12] accurately calculate zone->managed_pages Jiang Liu
` (6 preceding siblings ...)
2013-03-16 17:03 ` [PATCH v2, part3 07/12] mm: accurately calculate zone->managed_pages for highmem zones Jiang Liu
@ 2013-03-16 17:03 ` Jiang Liu
2013-03-16 17:03 ` [PATCH v2, part3 09/12] mm: avoid using __free_pages_bootmem() at runtime Jiang Liu
` (3 subsequent siblings)
11 siblings, 0 replies; 17+ messages in thread
From: Jiang Liu @ 2013-03-16 17:03 UTC (permalink / raw)
To: Andrew Morton, David Rientjes
Cc: Jiang Liu, Wen Congyang, Mel Gorman, Minchan Kim,
KAMEZAWA Hiroyuki, Michal Hocko, Jianguo Wu, linux-mm,
linux-kernel, Michel Lespinasse, Rik van Riel
Currently lock_memory_hotplug()/unlock_memory_hotplug() are used to
protect totalram_pages and zone->managed_pages. Other than the memory
hotplug driver, totalram_pages and zone->managed_pages may be modified
by Xen balloon, virtio_balloon etc at runtime. For those case, memory
hotplug lock is a little too heavy, so introduce a dedicated lock to
protect them.
Now the locking rules for totalram_pages and zone->managed_pages have
been simpilied as:
1) no locking for read accesses because they are unsigned long.
2) no locking for write accesses at boot time in single-threaded context.
3) serialize write accesses at run time by managed_page_count_lock.
Also adjust zone->managed_pages when dealing with reserved pages.
Signed-off-by: Jiang Liu <jiang.liu@huawei.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Michel Lespinasse <walken@google.com>
Cc: Rik van Riel <riel@redhat.com>
Cc: Minchan Kim <minchan@kernel.org>
Cc: linux-mm@kvack.org (open list:MEMORY MANAGEMENT)
Cc: linux-kernel@vger.kernel.org (open list)
---
include/linux/mm.h | 6 ++----
include/linux/mmzone.h | 14 ++++++++++----
mm/page_alloc.c | 19 +++++++++++++++++++
3 files changed, 31 insertions(+), 8 deletions(-)
diff --git a/include/linux/mm.h b/include/linux/mm.h
index add5f0a..f1c0827 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1302,6 +1302,7 @@ extern void free_initmem(void);
*/
extern unsigned long free_reserved_area(unsigned long start, unsigned long end,
int poison, char *s);
+
#ifdef CONFIG_HIGHMEM
/*
* Free a highmem page into the buddy system, adjusting totalhigh_pages
@@ -1310,10 +1311,7 @@ extern unsigned long free_reserved_area(unsigned long start, unsigned long end,
extern void free_highmem_page(struct page *page);
#endif
-static inline void adjust_managed_page_count(struct page *page, long count)
-{
- totalram_pages += count;
-}
+extern void adjust_managed_page_count(struct page *page, long count);
/* Free the reserved page into the buddy system, so it gets managed. */
static inline void __free_reserved_page(struct page *page)
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index ab20a60..deb7377 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -474,10 +474,16 @@ struct zone {
* frequently read in proximity to zone->lock. It's good to
* give them a chance of being in the same cacheline.
*
- * Write access to present_pages and managed_pages at runtime should
- * be protected by lock_memory_hotplug()/unlock_memory_hotplug().
- * Any reader who can't tolerant drift of present_pages and
- * managed_pages should hold memory hotplug lock to get a stable value.
+ * Write access to present_pages at runtime should be protected by
+ * lock_memory_hotplug()/unlock_memory_hotplug(). Any reader who can't
+ * tolerant drift of present_pages should hold memory hotplug lock to
+ * get a stable value.
+ *
+ * Read access to managed_pages should be safe because it's unsigned
+ * long. Write access to zone->managed_pages and totalram_pages are
+ * protected by managed_page_count_lock at runtime. Basically only
+ * adjust_managed_page_count() should be used instead of directly
+ * touching zone->managed_pages and totalram_pages.
*/
unsigned long spanned_pages;
unsigned long present_pages;
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 23bb4d7..9d08d06 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -98,6 +98,9 @@ nodemask_t node_states[NR_NODE_STATES] __read_mostly = {
};
EXPORT_SYMBOL(node_states);
+/* Protect totalram_pages and zone->managed_pages */
+static DEFINE_SPINLOCK(managed_page_count_lock);
+
unsigned long totalram_pages __read_mostly;
unsigned long totalreserve_pages __read_mostly;
/*
@@ -5122,6 +5125,22 @@ early_param("movablecore", cmdline_parse_movablecore);
#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
+void adjust_managed_page_count(struct page *page, long count)
+{
+ bool lock = (system_state != SYSTEM_BOOTING);
+
+ /* No need to acquire the lock during boot */
+ if (lock)
+ spin_lock(&managed_page_count_lock);
+
+ page_zone(page)->managed_pages += count;
+ totalram_pages += count;
+
+ if (lock)
+ spin_unlock(&managed_page_count_lock);
+}
+EXPORT_SYMBOL(adjust_managed_page_count);
+
unsigned long free_reserved_area(unsigned long start, unsigned long end,
int poison, char *s)
{
--
1.7.9.5
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply related [flat|nested] 17+ messages in thread
* [PATCH v2, part3 09/12] mm: avoid using __free_pages_bootmem() at runtime
2013-03-16 17:03 [PATCH v2, part3 00/12] accurately calculate zone->managed_pages Jiang Liu
` (7 preceding siblings ...)
2013-03-16 17:03 ` [PATCH v2, part3 08/12] mm: use a dedicated lock to protect totalram_pages and zone->managed_pages Jiang Liu
@ 2013-03-16 17:03 ` Jiang Liu
2013-03-16 17:03 ` [PATCH v2, part3 10/12] mm: correctly update zone->mamaged_pages Jiang Liu
` (2 subsequent siblings)
11 siblings, 0 replies; 17+ messages in thread
From: Jiang Liu @ 2013-03-16 17:03 UTC (permalink / raw)
To: Andrew Morton, David Rientjes
Cc: Jiang Liu, Wen Congyang, Mel Gorman, Minchan Kim,
KAMEZAWA Hiroyuki, Michal Hocko, Jianguo Wu, linux-mm,
linux-kernel, Thomas Gleixner, Ingo Molnar, H. Peter Anvin,
Yinghai Lu, x86, Tang Chen, Yasuaki Ishimatsu
Avoid using __free_pages_bootmem() at runtime, so we could easily
manage totalram_pages and zone->managed_pages. With this change applied,
__free_pages_bootmem() is only used by bootmem.c and nobootmem.c at
boot time, so mark it as __init. And other callers of
__free_pages_bootmem() have been switched to free_reserved_page(),
which handles totalram_pages and zone->managed_pages in a safe way.
This patch also fix a bug in free_pagetable() for x86_64, which should
increase zone->managed_pages instead of zone->present_pages when freeing
reserved pages.
And free_reserved_page() protects totalram_pages and zone->managed_pages
with managed_pages_count_lock, so remove the redundant ppb_lock lock in
put_page_bootmem(). This makes the locking rules much more clear.
Signed-off-by: Jiang Liu <jiang.liu@huawei.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Yinghai Lu <yinghai@kernel.org>
Cc: x86@kernel.org
Cc: Wen Congyang <wency@cn.fujitsu.com>
Cc: Tang Chen <tangchen@cn.fujitsu.com>
Cc: Yasuaki Ishimatsu <isimatu.yasuaki@jp.fujitsu.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Minchan Kim <minchan@kernel.org>
Cc: linux-kernel@vger.kernel.org
Cc: linux-mm@kvack.org
---
arch/x86/mm/init_64.c | 18 ++----------------
mm/memory_hotplug.c | 16 ++--------------
mm/page_alloc.c | 9 +--------
3 files changed, 5 insertions(+), 38 deletions(-)
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index 05ef3ff..5e19126 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -694,36 +694,22 @@ EXPORT_SYMBOL_GPL(arch_add_memory);
static void __meminit free_pagetable(struct page *page, int order)
{
- struct zone *zone;
- bool bootmem = false;
unsigned long magic;
unsigned int nr_pages = 1 << order;
/* bootmem page has reserved flag */
if (PageReserved(page)) {
__ClearPageReserved(page);
- bootmem = true;
magic = (unsigned long)page->lru.next;
if (magic == SECTION_INFO || magic == MIX_SECTION_INFO) {
while (nr_pages--)
put_page_bootmem(page++);
} else
- __free_pages_bootmem(page, order);
+ while (nr_pages--)
+ free_reserved_page(page++);
} else
free_pages((unsigned long)page_address(page), order);
-
- /*
- * SECTION_INFO pages and MIX_SECTION_INFO pages
- * are all allocated by bootmem.
- */
- if (bootmem) {
- zone = page_zone(page);
- zone_span_writelock(zone);
- zone->present_pages += nr_pages;
- zone_span_writeunlock(zone);
- totalram_pages += nr_pages;
- }
}
static void __meminit free_pte_table(pte_t *pte_start, pmd_t *pmd)
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 40c3c78..a1debd0 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -101,12 +101,9 @@ void get_page_bootmem(unsigned long info, struct page *page,
atomic_inc(&page->_count);
}
-/* reference to __meminit __free_pages_bootmem is valid
- * so use __ref to tell modpost not to generate a warning */
-void __ref put_page_bootmem(struct page *page)
+void put_page_bootmem(struct page *page)
{
unsigned long type;
- static DEFINE_MUTEX(ppb_lock);
type = (unsigned long) page->lru.next;
BUG_ON(type < MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE ||
@@ -116,17 +113,8 @@ void __ref put_page_bootmem(struct page *page)
ClearPagePrivate(page);
set_page_private(page, 0);
INIT_LIST_HEAD(&page->lru);
-
- /*
- * Please refer to comment for __free_pages_bootmem()
- * for why we serialize here.
- */
- mutex_lock(&ppb_lock);
- __free_pages_bootmem(page, 0);
- mutex_unlock(&ppb_lock);
- totalram_pages++;
+ free_reserved_page(page);
}
-
}
#ifndef CONFIG_SPARSEMEM_VMEMMAP
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 9d08d06..b0ca2b7 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -740,14 +740,7 @@ static void __free_pages_ok(struct page *page, unsigned int order)
local_irq_restore(flags);
}
-/*
- * Read access to zone->managed_pages is safe because it's unsigned long,
- * but we still need to serialize writers. Currently all callers of
- * __free_pages_bootmem() except put_page_bootmem() should only be used
- * at boot time. So for shorter boot time, we shift the burden to
- * put_page_bootmem() to serialize writers.
- */
-void __meminit __free_pages_bootmem(struct page *page, unsigned int order)
+void __init __free_pages_bootmem(struct page *page, unsigned int order)
{
unsigned int nr_pages = 1 << order;
unsigned int loop;
--
1.7.9.5
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply related [flat|nested] 17+ messages in thread
* [PATCH v2, part3 10/12] mm: correctly update zone->mamaged_pages
2013-03-16 17:03 [PATCH v2, part3 00/12] accurately calculate zone->managed_pages Jiang Liu
` (8 preceding siblings ...)
2013-03-16 17:03 ` [PATCH v2, part3 09/12] mm: avoid using __free_pages_bootmem() at runtime Jiang Liu
@ 2013-03-16 17:03 ` Jiang Liu
2013-03-16 17:03 ` [PATCH v2, part3 11/12] mm: report available pages as "MemTotal" for each NUMA node Jiang Liu
2013-03-16 17:03 ` [PATCH v2, part3 12/12] mm: concentrate adjusting of totalram_pages Jiang Liu
11 siblings, 0 replies; 17+ messages in thread
From: Jiang Liu @ 2013-03-16 17:03 UTC (permalink / raw)
To: Andrew Morton, David Rientjes
Cc: Jiang Liu, Wen Congyang, Mel Gorman, Minchan Kim,
KAMEZAWA Hiroyuki, Michal Hocko, Jianguo Wu, linux-mm,
linux-kernel, Chris Metcalf, Rusty Russell, Michael S. Tsirkin,
Konrad Rzeszutek Wilk, Jeremy Fitzhardinge, Tang Chen,
Yasuaki Ishimatsu, virtualization, xen-devel
Enhance adjust_managed_page_count() to adjust totalhigh_pages for
highmem pages. And change code which directly adjusts totalram_pages
to use adjust_managed_page_count() because it adjusts totalram_pages,
totalhigh_pages and zone->managed_pages altogether in a safe way.
Remove inc_totalhigh_pages() and dec_totalhigh_pages() from xen/balloon
driver bacause adjust_managed_page_count() has already adjusted
totalhigh_pages.
This patch also fixes two bugs:
1) enhances virtio_balloon driver to adjust totalhigh_pages when
reserve/unreserve pages.
2) enhance memory_hotplug.c to adjust totalhigh_pages when hot-removing
memory.
We still need to deal with modifications of totalram_pages in file
arch/powerpc/platforms/pseries/cmm.c, but need help from PPC experts.
Signed-off-by: Jiang Liu <jiang.liu@huawei.com>
Cc: Chris Metcalf <cmetcalf@tilera.com>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Cc: "Michael S. Tsirkin" <mst@redhat.com>
Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Cc: Jeremy Fitzhardinge <jeremy@goop.org>
Cc: Wen Congyang <wency@cn.fujitsu.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Tang Chen <tangchen@cn.fujitsu.com>
Cc: Yasuaki Ishimatsu <isimatu.yasuaki@jp.fujitsu.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Minchan Kim <minchan@kernel.org>
Cc: linux-kernel@vger.kernel.org
Cc: virtualization@lists.linux-foundation.org
Cc: xen-devel@lists.xensource.com
Cc: linux-mm@kvack.org
---
drivers/virtio/virtio_balloon.c | 8 +++++---
drivers/xen/balloon.c | 23 +++++------------------
mm/hugetlb.c | 2 +-
mm/memory_hotplug.c | 15 +++------------
mm/page_alloc.c | 10 +++++-----
5 files changed, 19 insertions(+), 39 deletions(-)
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index 797e1c7..4828c90 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -148,7 +148,7 @@ static void fill_balloon(struct virtio_balloon *vb, size_t num)
}
set_page_pfns(vb->pfns + vb->num_pfns, page);
vb->num_pages += VIRTIO_BALLOON_PAGES_PER_PAGE;
- totalram_pages--;
+ adjust_managed_page_count(page, -1);
}
/* Did we get any? */
@@ -160,11 +160,13 @@ static void fill_balloon(struct virtio_balloon *vb, size_t num)
static void release_pages_by_pfn(const u32 pfns[], unsigned int num)
{
unsigned int i;
+ struct page *page;
/* Find pfns pointing at start of each page, get pages and free them. */
for (i = 0; i < num; i += VIRTIO_BALLOON_PAGES_PER_PAGE) {
- balloon_page_free(balloon_pfn_to_page(pfns[i]));
- totalram_pages++;
+ page = balloon_pfn_to_page(pfns[i]);
+ balloon_page_free(page);
+ adjust_managed_page_count(page, 1);
}
}
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
index a56776d..eefba39 100644
--- a/drivers/xen/balloon.c
+++ b/drivers/xen/balloon.c
@@ -89,14 +89,6 @@ EXPORT_SYMBOL_GPL(balloon_stats);
/* We increase/decrease in batches which fit in a page */
static xen_pfn_t frame_list[PAGE_SIZE / sizeof(unsigned long)];
-#ifdef CONFIG_HIGHMEM
-#define inc_totalhigh_pages() (totalhigh_pages++)
-#define dec_totalhigh_pages() (totalhigh_pages--)
-#else
-#define inc_totalhigh_pages() do {} while (0)
-#define dec_totalhigh_pages() do {} while (0)
-#endif
-
/* List of ballooned pages, threaded through the mem_map array. */
static LIST_HEAD(ballooned_pages);
@@ -132,9 +124,7 @@ static void __balloon_append(struct page *page)
static void balloon_append(struct page *page)
{
__balloon_append(page);
- if (PageHighMem(page))
- dec_totalhigh_pages();
- totalram_pages--;
+ adjust_managed_page_count(page, -1);
}
/* balloon_retrieve: rescue a page from the balloon, if it is not empty. */
@@ -151,13 +141,12 @@ static struct page *balloon_retrieve(bool prefer_highmem)
page = list_entry(ballooned_pages.next, struct page, lru);
list_del(&page->lru);
- if (PageHighMem(page)) {
+ if (PageHighMem(page))
balloon_stats.balloon_high--;
- inc_totalhigh_pages();
- } else
+ else
balloon_stats.balloon_low--;
- totalram_pages++;
+ adjust_managed_page_count(page, 1);
return page;
}
@@ -372,9 +361,7 @@ static enum bp_state increase_reservation(unsigned long nr_pages)
#endif
/* Relinquish the page back to the allocator. */
- ClearPageReserved(page);
- init_page_count(page);
- __free_page(page);
+ __free_reserved_page(page);
}
balloon_stats.current_pages += rc;
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 344e7fe..4fb9a04 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1246,7 +1246,7 @@ static void __init gather_bootmem_prealloc(void)
* side-effects, like CommitLimit going negative.
*/
if (h->order > (MAX_ORDER - 1))
- totalram_pages += 1 << h->order;
+ adjust_managed_page_count(page, 1 << h->order);
}
}
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index a1debd0..97454b3 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -760,20 +760,13 @@ EXPORT_SYMBOL_GPL(__online_page_set_limits);
void __online_page_increment_counters(struct page *page)
{
- totalram_pages++;
-
-#ifdef CONFIG_HIGHMEM
- if (PageHighMem(page))
- totalhigh_pages++;
-#endif
+ adjust_managed_page_count(page, 1);
}
EXPORT_SYMBOL_GPL(__online_page_increment_counters);
void __online_page_free(struct page *page)
{
- ClearPageReserved(page);
- init_page_count(page);
- __free_page(page);
+ __free_reserved_page(page);
}
EXPORT_SYMBOL_GPL(__online_page_free);
@@ -970,7 +963,6 @@ int __ref online_pages(unsigned long pfn, unsigned long nr_pages, int online_typ
return ret;
}
- zone->managed_pages += onlined_pages;
zone->present_pages += onlined_pages;
zone->zone_pgdat->node_present_pages += onlined_pages;
if (onlined_pages) {
@@ -1554,10 +1546,9 @@ repeat:
/* reset pagetype flags and makes migrate type to be MOVABLE */
undo_isolate_page_range(start_pfn, end_pfn, MIGRATE_MOVABLE);
/* removal success */
- zone->managed_pages -= offlined_pages;
+ adjust_managed_page_count(pfn_to_page(start_pfn), -offlined_pages);
zone->present_pages -= offlined_pages;
zone->zone_pgdat->node_present_pages -= offlined_pages;
- totalram_pages -= offlined_pages;
init_per_zone_wmark_min();
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index b0ca2b7..6834104 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -775,11 +775,7 @@ void __init init_cma_reserved_pageblock(struct page *page)
set_page_refcounted(page);
set_pageblock_migratetype(page, MIGRATE_CMA);
__free_pages(page, pageblock_order);
- totalram_pages += pageblock_nr_pages;
-#ifdef CONFIG_HIGHMEM
- if (PageHighMem(page))
- totalhigh_pages += pageblock_nr_pages;
-#endif
+ adjust_managed_page_count(page, pageblock_nr_pages);
}
#endif
@@ -5128,6 +5124,10 @@ void adjust_managed_page_count(struct page *page, long count)
page_zone(page)->managed_pages += count;
totalram_pages += count;
+#ifdef CONFIG_HIGHMEM
+ if (PageHighMem(page))
+ totalhigh_pages += count;
+#endif
if (lock)
spin_unlock(&managed_page_count_lock);
--
1.7.9.5
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply related [flat|nested] 17+ messages in thread
* [PATCH v2, part3 11/12] mm: report available pages as "MemTotal" for each NUMA node
2013-03-16 17:03 [PATCH v2, part3 00/12] accurately calculate zone->managed_pages Jiang Liu
` (9 preceding siblings ...)
2013-03-16 17:03 ` [PATCH v2, part3 10/12] mm: correctly update zone->mamaged_pages Jiang Liu
@ 2013-03-16 17:03 ` Jiang Liu
2013-03-16 17:03 ` [PATCH v2, part3 12/12] mm: concentrate adjusting of totalram_pages Jiang Liu
11 siblings, 0 replies; 17+ messages in thread
From: Jiang Liu @ 2013-03-16 17:03 UTC (permalink / raw)
To: Andrew Morton, David Rientjes
Cc: Jiang Liu, Wen Congyang, Mel Gorman, Minchan Kim,
KAMEZAWA Hiroyuki, Michal Hocko, Jianguo Wu, linux-mm,
linux-kernel
As reported by https://bugzilla.kernel.org/show_bug.cgi?id=53501,
"MemTotal" from /proc/meminfo means memory pages managed by the buddy
system (managed_pages), but "MemTotal" from /sys/.../node/nodex/meminfo
means phsical pages present (present_pages) within the NUMA node.
There's a difference between managed_pages and present_pages due to
bootmem allocator and reserved pages.
And Documentation/filesystems/proc.txt says
MemTotal: Total usable ram (i.e. physical ram minus a few reserved
bits and the kernel binary code)
So change /sys/.../node/nodex/meminfo to report available pages within
the node as "MemTotal".
Signed-off-by: Jiang Liu <jiang.liu@huawei.com>
Reported-by: sworddragon2@aol.com
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Minchan Kim <minchan@kernel.org>
Cc: linux-mm@kvack.org
Cc: linux-kernel@vger.kernel.org
---
mm/page_alloc.c | 6 +++++-
1 file changed, 5 insertions(+), 1 deletion(-)
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 6834104..ebfb042 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -2878,9 +2878,13 @@ EXPORT_SYMBOL(si_meminfo);
#ifdef CONFIG_NUMA
void si_meminfo_node(struct sysinfo *val, int nid)
{
+ int zone_type; /* needs to be signed */
+ unsigned long managed_pages = 0;
pg_data_t *pgdat = NODE_DATA(nid);
- val->totalram = pgdat->node_present_pages;
+ for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++)
+ managed_pages += pgdat->node_zones[zone_type].managed_pages;
+ val->totalram = managed_pages;
val->freeram = node_page_state(nid, NR_FREE_PAGES);
#ifdef CONFIG_HIGHMEM
val->totalhigh = pgdat->node_zones[ZONE_HIGHMEM].managed_pages;
--
1.7.9.5
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply related [flat|nested] 17+ messages in thread
* [PATCH v2, part3 12/12] mm: concentrate adjusting of totalram_pages
2013-03-16 17:03 [PATCH v2, part3 00/12] accurately calculate zone->managed_pages Jiang Liu
` (10 preceding siblings ...)
2013-03-16 17:03 ` [PATCH v2, part3 11/12] mm: report available pages as "MemTotal" for each NUMA node Jiang Liu
@ 2013-03-16 17:03 ` Jiang Liu
11 siblings, 0 replies; 17+ messages in thread
From: Jiang Liu @ 2013-03-16 17:03 UTC (permalink / raw)
To: Andrew Morton, David Rientjes
Cc: Jiang Liu, Wen Congyang, Mel Gorman, Minchan Kim,
KAMEZAWA Hiroyuki, Michal Hocko, Jianguo Wu, linux-mm,
linux-kernel
Concentrate code to modify totalram_pages into the mm core, so the arch
memory initialized code doesn't need to take care of it. With these
changes applied, only following functions from mm core modify global
variable totalram_pages:
free_bootmem_late(), free_all_bootmem(), free_all_bootmem_node(),
adjust_managed_page_count().
Signed-off-by: Jiang Liu <jiang.liu@huawei.com>
---
arch/alpha/mm/init.c | 2 +-
arch/alpha/mm/numa.c | 2 +-
arch/arm/mm/init.c | 3 +--
arch/arm64/mm/init.c | 2 +-
arch/avr32/mm/init.c | 2 --
arch/blackfin/mm/init.c | 2 +-
arch/c6x/mm/init.c | 2 +-
arch/cris/mm/init.c | 2 +-
arch/frv/mm/init.c | 2 +-
arch/h8300/mm/init.c | 2 +-
arch/hexagon/mm/init.c | 3 +--
arch/ia64/mm/init.c | 2 +-
arch/m32r/mm/init.c | 2 +-
arch/m68k/mm/init.c | 4 ++--
arch/microblaze/mm/init.c | 2 +-
arch/mips/mm/init.c | 2 +-
arch/mips/sgi-ip27/ip27-memory.c | 2 +-
arch/mn10300/mm/init.c | 2 +-
arch/openrisc/mm/init.c | 2 +-
arch/parisc/mm/init.c | 4 ++--
arch/powerpc/mm/mem.c | 5 ++---
arch/s390/mm/init.c | 2 +-
arch/score/mm/init.c | 2 +-
arch/sh/mm/init.c | 2 +-
arch/sparc/mm/init_32.c | 3 +--
arch/sparc/mm/init_64.c | 10 ++++------
arch/tile/mm/init.c | 2 +-
arch/um/kernel/mem.c | 2 +-
arch/unicore32/mm/init.c | 2 +-
arch/x86/mm/init_32.c | 2 +-
arch/x86/mm/init_64.c | 2 +-
arch/xtensa/mm/init.c | 2 +-
mm/bootmem.c | 9 ++++++++-
mm/nobootmem.c | 7 ++++++-
34 files changed, 51 insertions(+), 47 deletions(-)
diff --git a/arch/alpha/mm/init.c b/arch/alpha/mm/init.c
index 9930837..ca07a97 100644
--- a/arch/alpha/mm/init.c
+++ b/arch/alpha/mm/init.c
@@ -309,7 +309,7 @@ void __init
mem_init(void)
{
max_mapnr = num_physpages = max_low_pfn;
- totalram_pages += free_all_bootmem();
+ free_all_bootmem();
high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
printk_memory_info();
diff --git a/arch/alpha/mm/numa.c b/arch/alpha/mm/numa.c
index 3388504..857452c 100644
--- a/arch/alpha/mm/numa.c
+++ b/arch/alpha/mm/numa.c
@@ -334,7 +334,7 @@ void __init mem_init(void)
/*
* This will free up the bootmem, ie, slot 0 memory
*/
- totalram_pages += free_all_bootmem_node(NODE_DATA(nid));
+ free_all_bootmem_node(NODE_DATA(nid));
pfn = NODE_DATA(nid)->node_start_pfn;
for (i = 0; i < node_spanned_pages(nid); i++, pfn++)
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index e922456..5925861 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -595,8 +595,7 @@ void __init mem_init(void)
/* this will put all unused low memory onto the freelists */
free_unused_memmap(&meminfo);
-
- totalram_pages += free_all_bootmem();
+ free_all_bootmem();
#ifdef CONFIG_SA1111
/* now that our DMA memory is actually so designated, we can free it */
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index b87bdb8..0f2cf5d 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -284,7 +284,7 @@ void __init mem_init(void)
free_unused_memmap();
#endif
- totalram_pages += free_all_bootmem();
+ free_all_bootmem();
reserved_pages = free_pages = 0;
diff --git a/arch/avr32/mm/init.c b/arch/avr32/mm/init.c
index 871f98a..7e8d55a 100644
--- a/arch/avr32/mm/init.c
+++ b/arch/avr32/mm/init.c
@@ -117,8 +117,6 @@ void __init mem_init(void)
if (pgdat->node_spanned_pages != 0)
node_pages = free_all_bootmem_node(pgdat);
- totalram_pages += node_pages;
-
for (i = 0; i < node_pages; i++)
if (PageReserved(pgdat->node_mem_map + i))
reservedpages++;
diff --git a/arch/blackfin/mm/init.c b/arch/blackfin/mm/init.c
index e64286b..1cc8607 100644
--- a/arch/blackfin/mm/init.c
+++ b/arch/blackfin/mm/init.c
@@ -104,7 +104,7 @@ void __init mem_init(void)
printk(KERN_DEBUG "Kernel managed physical pages: %lu\n", num_physpages);
/* This will put all low memory onto the freelists. */
- totalram_pages = free_all_bootmem();
+ free_all_bootmem();
reservedpages = 0;
for (tmp = ARCH_PFN_OFFSET; tmp < max_mapnr; tmp++)
diff --git a/arch/c6x/mm/init.c b/arch/c6x/mm/init.c
index ce39b48..2c51474 100644
--- a/arch/c6x/mm/init.c
+++ b/arch/c6x/mm/init.c
@@ -64,7 +64,7 @@ void __init mem_init(void)
high_memory = (void *)(memory_end & PAGE_MASK);
/* this will put all memory onto the freelists */
- totalram_pages = free_all_bootmem();
+ free_all_bootmem();
codek = (_etext - _stext) >> 10;
datak = (_end - _sdata) >> 10;
diff --git a/arch/cris/mm/init.c b/arch/cris/mm/init.c
index 8fec263..52b8b56 100644
--- a/arch/cris/mm/init.c
+++ b/arch/cris/mm/init.c
@@ -33,7 +33,7 @@ mem_init(void)
max_mapnr = num_physpages = max_low_pfn - min_low_pfn;
/* this will put all memory onto the freelists */
- totalram_pages = free_all_bootmem();
+ free_all_bootmem();
reservedpages = 0;
for (tmp = 0; tmp < max_mapnr; tmp++) {
diff --git a/arch/frv/mm/init.c b/arch/frv/mm/init.c
index a421948..4215822 100644
--- a/arch/frv/mm/init.c
+++ b/arch/frv/mm/init.c
@@ -123,7 +123,7 @@ void __init mem_init(void)
int codek = 0, datak = 0;
/* this will put all low memory onto the freelists */
- totalram_pages = free_all_bootmem();
+ free_all_bootmem();
#ifdef CONFIG_MMU
for (loop = 0 ; loop < npages ; loop++)
diff --git a/arch/h8300/mm/init.c b/arch/h8300/mm/init.c
index 488e2a3..22fd869 100644
--- a/arch/h8300/mm/init.c
+++ b/arch/h8300/mm/init.c
@@ -140,7 +140,7 @@ void __init mem_init(void)
max_mapnr = num_physpages = MAP_NR(high_memory);
/* this will put all low memory onto the freelists */
- totalram_pages = free_all_bootmem();
+ free_all_bootmem();
codek = (_etext - _stext) >> 10;
datak = (__bss_stop - _sdata) >> 10;
diff --git a/arch/hexagon/mm/init.c b/arch/hexagon/mm/init.c
index 69ffcfd..c048d06e 100644
--- a/arch/hexagon/mm/init.c
+++ b/arch/hexagon/mm/init.c
@@ -69,8 +69,7 @@ unsigned long long kmap_generation;
*/
void __init mem_init(void)
{
- /* No idea where this is actually declared. Seems to evade LXR. */
- totalram_pages += free_all_bootmem();
+ free_all_bootmem();
num_physpages = bootmem_lastpg; /* seriously, what? */
printk(KERN_INFO "totalram_pages = %ld\n", totalram_pages);
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
index 941568a..b5b71e8 100644
--- a/arch/ia64/mm/init.c
+++ b/arch/ia64/mm/init.c
@@ -623,7 +623,7 @@ mem_init (void)
for_each_online_pgdat(pgdat)
if (pgdat->bdata->node_bootmem_map)
- totalram_pages += free_all_bootmem_node(pgdat);
+ free_all_bootmem_node(pgdat);
reserved_pages = 0;
efi_memmap_walk(count_reserved_pages, &reserved_pages);
diff --git a/arch/m32r/mm/init.c b/arch/m32r/mm/init.c
index 58ea4d6..c421c31 100644
--- a/arch/m32r/mm/init.c
+++ b/arch/m32r/mm/init.c
@@ -158,7 +158,7 @@ void __init mem_init(void)
/* this will put all low memory onto the freelists */
for_each_online_node(nid)
- totalram_pages += free_all_bootmem_node(NODE_DATA(nid));
+ free_all_bootmem_node(NODE_DATA(nid));
reservedpages = reservedpages_count() - hole_pages;
codesize = (unsigned long) &_etext - (unsigned long)&_text;
diff --git a/arch/m68k/mm/init.c b/arch/m68k/mm/init.c
index 291ca0f..0450989 100644
--- a/arch/m68k/mm/init.c
+++ b/arch/m68k/mm/init.c
@@ -155,11 +155,11 @@ void __init mem_init(void)
int i;
/* this will put all memory onto the freelists */
- totalram_pages = num_physpages = 0;
+ num_physpages = 0;
for_each_online_pgdat(pgdat) {
num_physpages += pgdat->node_present_pages;
- totalram_pages += free_all_bootmem_node(pgdat);
+ free_all_bootmem_node(pgdat);
for (i = 0; i < pgdat->node_spanned_pages; i++) {
struct page *page = pgdat->node_mem_map + i;
char *addr = page_to_virt(page);
diff --git a/arch/microblaze/mm/init.c b/arch/microblaze/mm/init.c
index 6b8711d..3a434fd 100644
--- a/arch/microblaze/mm/init.c
+++ b/arch/microblaze/mm/init.c
@@ -252,7 +252,7 @@ void __init mem_init(void)
high_memory = (void *)__va(memory_start + lowmem_size - 1);
/* this will put all memory onto the freelists */
- totalram_pages += free_all_bootmem();
+ free_all_bootmem();
for_each_online_pgdat(pgdat) {
unsigned long i;
diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c
index 2e446a7..c1d7b9f 100644
--- a/arch/mips/mm/init.c
+++ b/arch/mips/mm/init.c
@@ -373,7 +373,7 @@ void __init mem_init(void)
#endif
high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT);
- totalram_pages += free_all_bootmem();
+ free_all_bootmem();
setup_zero_pages(); /* Setup zeroed pages. */
reservedpages = ram = 0;
diff --git a/arch/mips/sgi-ip27/ip27-memory.c b/arch/mips/sgi-ip27/ip27-memory.c
index b5ef807..4042e06 100644
--- a/arch/mips/sgi-ip27/ip27-memory.c
+++ b/arch/mips/sgi-ip27/ip27-memory.c
@@ -489,7 +489,7 @@ void __init mem_init(void)
/*
* This will free up the bootmem, ie, slot 0 memory.
*/
- totalram_pages += free_all_bootmem_node(NODE_DATA(node));
+ free_all_bootmem_node(NODE_DATA(node));
}
setup_zero_pages(); /* This comes from node 0 */
diff --git a/arch/mn10300/mm/init.c b/arch/mn10300/mm/init.c
index 5a8ace6..d7312aa 100644
--- a/arch/mn10300/mm/init.c
+++ b/arch/mn10300/mm/init.c
@@ -114,7 +114,7 @@ void __init mem_init(void)
memset(empty_zero_page, 0, PAGE_SIZE);
/* this will put all low memory onto the freelists */
- totalram_pages += free_all_bootmem();
+ free_all_bootmem();
reservedpages = 0;
for (tmp = 0; tmp < num_physpages; tmp++)
diff --git a/arch/openrisc/mm/init.c b/arch/openrisc/mm/init.c
index 3b9f017..71d6b40 100644
--- a/arch/openrisc/mm/init.c
+++ b/arch/openrisc/mm/init.c
@@ -196,7 +196,7 @@ static int __init free_pages_init(void)
int reservedpages, pfn;
/* this will put all low memory onto the freelists */
- totalram_pages = free_all_bootmem();
+ free_all_bootmem();
reservedpages = 0;
for (pfn = 0; pfn < max_low_pfn; pfn++) {
diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c
index 27f3f88..1fe9d841 100644
--- a/arch/parisc/mm/init.c
+++ b/arch/parisc/mm/init.c
@@ -593,13 +593,13 @@ void __init mem_init(void)
#ifndef CONFIG_DISCONTIGMEM
max_mapnr = page_to_pfn(virt_to_page(high_memory - 1)) + 1;
- totalram_pages += free_all_bootmem();
+ free_all_bootmem();
#else
{
int i;
for (i = 0; i < npmem_ranges; i++)
- totalram_pages += free_all_bootmem_node(NODE_DATA(i));
+ free_all_bootmem_node(NODE_DATA(i));
}
#endif
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index 3974615..0e154a9 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -322,13 +322,12 @@ void __init mem_init(void)
for_each_online_node(nid) {
if (NODE_DATA(nid)->node_spanned_pages != 0) {
printk("freeing bootmem node %d\n", nid);
- totalram_pages +=
- free_all_bootmem_node(NODE_DATA(nid));
+ free_all_bootmem_node(NODE_DATA(nid));
}
}
#else
max_mapnr = max_pfn;
- totalram_pages += free_all_bootmem();
+ free_all_bootmem();
#endif
for_each_online_pgdat(pgdat) {
for (i = 0; i < pgdat->node_spanned_pages; i++) {
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index 554b3e1..4a72888 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -134,7 +134,7 @@ void __init mem_init(void)
cmma_init();
/* this will put all low memory onto the freelists */
- totalram_pages += free_all_bootmem();
+ free_all_bootmem();
setup_zero_pages(); /* Setup zeroed pages. */
reservedpages = 0;
diff --git a/arch/score/mm/init.c b/arch/score/mm/init.c
index 1592aad..579fc4e 100644
--- a/arch/score/mm/init.c
+++ b/arch/score/mm/init.c
@@ -81,7 +81,7 @@ void __init mem_init(void)
unsigned long tmp, ram = 0;
high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT);
- totalram_pages += free_all_bootmem();
+ free_all_bootmem();
setup_zero_page(); /* Setup zeroed pages. */
reservedpages = 0;
diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c
index 31294f1..aecd913 100644
--- a/arch/sh/mm/init.c
+++ b/arch/sh/mm/init.c
@@ -422,7 +422,7 @@ void __init mem_init(void)
num_physpages += pgdat->node_present_pages;
if (pgdat->node_spanned_pages)
- totalram_pages += free_all_bootmem_node(pgdat);
+ free_all_bootmem_node(pgdat);
node_high_memory = (void *)__va((pgdat->node_start_pfn +
diff --git a/arch/sparc/mm/init_32.c b/arch/sparc/mm/init_32.c
index af472cf..e96afed 100644
--- a/arch/sparc/mm/init_32.c
+++ b/arch/sparc/mm/init_32.c
@@ -323,8 +323,7 @@ void __init mem_init(void)
max_mapnr = last_valid_pfn - pfn_base;
high_memory = __va(max_low_pfn << PAGE_SHIFT);
-
- totalram_pages = free_all_bootmem();
+ free_all_bootmem();
for (i = 0; sp_banks[i].num_bytes != 0; i++) {
unsigned long start_pfn = sp_banks[i].base_addr >> PAGE_SHIFT;
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
index 8f1715ffd..fde310e 100644
--- a/arch/sparc/mm/init_64.c
+++ b/arch/sparc/mm/init_64.c
@@ -2042,15 +2042,13 @@ void __init mem_init(void)
{
int i;
for_each_online_node(i) {
- if (NODE_DATA(i)->node_spanned_pages != 0) {
- totalram_pages +=
- free_all_bootmem_node(NODE_DATA(i));
- }
+ if (NODE_DATA(i)->node_spanned_pages != 0)
+ free_all_bootmem_node(NODE_DATA(i));
}
- totalram_pages += free_low_memory_core_early(MAX_NUMNODES);
+ free_low_memory_core_early(MAX_NUMNODES);
}
#else
- totalram_pages = free_all_bootmem();
+ free_all_bootmem();
#endif
/* We subtract one to account for the mem_map_zero page
diff --git a/arch/tile/mm/init.c b/arch/tile/mm/init.c
index ccfeb3f..45ce26d 100644
--- a/arch/tile/mm/init.c
+++ b/arch/tile/mm/init.c
@@ -846,7 +846,7 @@ void __init mem_init(void)
set_max_mapnr_init();
/* this will put all bootmem onto the freelists */
- totalram_pages += free_all_bootmem();
+ free_all_bootmem();
#ifndef CONFIG_64BIT
/* count all remaining LOWMEM and give all HIGHMEM to page allocator */
diff --git a/arch/um/kernel/mem.c b/arch/um/kernel/mem.c
index 1e84189..a7dc6c1 100644
--- a/arch/um/kernel/mem.c
+++ b/arch/um/kernel/mem.c
@@ -65,7 +65,7 @@ void __init mem_init(void)
uml_reserved = brk_end;
/* this will put all low memory onto the freelists */
- totalram_pages = free_all_bootmem();
+ free_all_bootmem();
max_low_pfn = totalram_pages;
#ifdef CONFIG_HIGHMEM
setup_highmem(end_iomem, highmem);
diff --git a/arch/unicore32/mm/init.c b/arch/unicore32/mm/init.c
index 5614b05..119b9e8 100644
--- a/arch/unicore32/mm/init.c
+++ b/arch/unicore32/mm/init.c
@@ -392,7 +392,7 @@ void __init mem_init(void)
free_unused_memmap(&meminfo);
/* this will put all unused low memory onto the freelists */
- totalram_pages += free_all_bootmem();
+ free_all_bootmem();
reserved_pages = free_pages = 0;
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
index 4b3b659..857032c 100644
--- a/arch/x86/mm/init_32.c
+++ b/arch/x86/mm/init_32.c
@@ -735,7 +735,7 @@ void __init mem_init(void)
set_highmem_pages_init();
/* this will put all low memory onto the freelists */
- totalram_pages += free_all_bootmem();
+ free_all_bootmem();
reservedpages = 0;
for (tmp = 0; tmp < max_low_pfn; tmp++)
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index 5e19126..f524138 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -1039,7 +1039,7 @@ void __init mem_init(void)
register_page_bootmem_info();
/* this will put all memory onto the freelists */
- totalram_pages = free_all_bootmem();
+ free_all_bootmem();
absent_pages = absent_pages_in_range(0, max_pfn);
reservedpages = max_pfn - totalram_pages - absent_pages;
diff --git a/arch/xtensa/mm/init.c b/arch/xtensa/mm/init.c
index 6f70647..dc6e009 100644
--- a/arch/xtensa/mm/init.c
+++ b/arch/xtensa/mm/init.c
@@ -184,7 +184,7 @@ void __init mem_init(void)
#error HIGHGMEM not implemented in init.c
#endif
- totalram_pages += free_all_bootmem();
+ free_all_bootmem();
reservedpages = ram = 0;
for (tmp = 0; tmp < max_mapnr; tmp++) {
diff --git a/mm/bootmem.c b/mm/bootmem.c
index 7f71b31..a054fc4 100644
--- a/mm/bootmem.c
+++ b/mm/bootmem.c
@@ -271,9 +271,14 @@ void __init reset_all_zones_managed_pages(void)
*/
unsigned long __init free_all_bootmem_node(pg_data_t *pgdat)
{
+ unsigned long pages;
+
register_page_bootmem_info_node(pgdat);
reset_node_managed_pages(pgdat);
- return free_all_bootmem_core(pgdat->bdata);
+ pages = free_all_bootmem_core(pgdat->bdata);
+ totalram_pages += pages;
+
+ return pages;
}
/**
@@ -291,6 +296,8 @@ unsigned long __init free_all_bootmem(void)
list_for_each_entry(bdata, &bdata_list, list)
total_pages += free_all_bootmem_core(bdata);
+ totalram_pages += total_pages;
+
return total_pages;
}
diff --git a/mm/nobootmem.c b/mm/nobootmem.c
index 3db0f67..915b0ea 100644
--- a/mm/nobootmem.c
+++ b/mm/nobootmem.c
@@ -180,6 +180,8 @@ unsigned long __init free_all_bootmem_node(pg_data_t *pgdat)
*/
unsigned long __init free_all_bootmem(void)
{
+ unsigned long pages;
+
reset_all_zones_managed_pages();
/*
@@ -187,7 +189,10 @@ unsigned long __init free_all_bootmem(void)
* because in some case like Node0 doesn't have RAM installed
* low ram will be on Node1
*/
- return free_low_memory_core_early(MAX_NUMNODES);
+ pages = free_low_memory_core_early(MAX_NUMNODES);
+ totalram_pages += pages;
+
+ return pages;
}
/**
--
1.7.9.5
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply related [flat|nested] 17+ messages in thread