* [Patch] New zone ZONE_EASY_RECLAIM take 2[1/5]
2005-11-28 11:36 [Patch] New zone ZONE_EASY_RECLAIM take 2[0/5] Yasunori Goto
@ 2005-11-28 11:36 ` Yasunori Goto
2005-11-28 11:36 ` [Patch] New zone ZONE_EASY_RECLAIM take 2[2/5] Yasunori Goto
` (3 subsequent siblings)
4 siblings, 0 replies; 6+ messages in thread
From: Yasunori Goto @ 2005-11-28 11:36 UTC (permalink / raw)
To: linux-mm, Linux Hotplug Memory Support; +Cc: Joel Schopp, linux-ia64
This defines __GFP flag for new zone with GFP_DMA32.
Signed-off-by: Yasunori Goto <y-goto@jp.fujitsu.com>
Index: new_zone_mm/include/linux/gfp.h
=================================--- new_zone_mm.orig/include/linux/gfp.h 2005-11-17 16:47:04.000000000 +0900
+++ new_zone_mm/include/linux/gfp.h 2005-11-17 17:29:16.000000000 +0900
@@ -16,10 +16,13 @@ struct vm_area_struct;
#define __GFP_HIGHMEM ((__force gfp_t)0x02u)
#ifdef CONFIG_DMA_IS_DMA32
#define __GFP_DMA32 ((__force gfp_t)0x01) /* ZONE_DMA is ZONE_DMA32 */
+#define __GFP_EASY_RECLAIM ((__force gfp_t)0x04u)
#elif BITS_PER_LONG < 64
#define __GFP_DMA32 ((__force gfp_t)0x00) /* ZONE_NORMAL is ZONE_DMA32 */
+#define __GFP_EASY_RECLAIM ((__force gfp_t)0x04u)
#else
#define __GFP_DMA32 ((__force gfp_t)0x04) /* Has own ZONE_DMA32 */
+#define __GFP_EASY_RECLAIM ((__force gfp_t)0x08u)
#endif
/*
@@ -66,7 +69,7 @@ struct vm_area_struct;
#define GFP_USER (__GFP_VALID | __GFP_WAIT | __GFP_IO | __GFP_FS | \
__GFP_HARDWALL)
#define GFP_HIGHUSER (__GFP_VALID | __GFP_WAIT | __GFP_IO | __GFP_FS | \
- __GFP_HIGHMEM | __GFP_HARDWALL)
+ __GFP_HIGHMEM | __GFP_HARDWALL | __GFP_EASY_RECLAIM)
/* Flag - indicates that the buffer will be suitable for DMA. Ignored on some
platforms, used as appropriate on others */
Index: new_zone_mm/include/linux/mmzone.h
=================================--- new_zone_mm.orig/include/linux/mmzone.h 2005-11-17 16:47:04.000000000 +0900
+++ new_zone_mm/include/linux/mmzone.h 2005-11-17 17:29:01.000000000 +0900
@@ -92,7 +92,7 @@ struct per_cpu_pageset {
* be 8 (2 ** 3) zonelists. GFP_ZONETYPES defines the number of possible
* combinations of zone modifiers in "zone modifier space".
*/
-#define GFP_ZONEMASK 0x03
+#define GFP_ZONEMASK 0x0f
/*
* As an optimisation any zone modifier bits which are only valid when
--
Yasunori Goto
^ permalink raw reply [flat|nested] 6+ messages in thread* [Patch] New zone ZONE_EASY_RECLAIM take 2[2/5]
2005-11-28 11:36 [Patch] New zone ZONE_EASY_RECLAIM take 2[0/5] Yasunori Goto
2005-11-28 11:36 ` [Patch] New zone ZONE_EASY_RECLAIM take 2[1/5] Yasunori Goto
@ 2005-11-28 11:36 ` Yasunori Goto
2005-11-28 11:36 ` [Patch] New zone ZONE_EASY_RECLAIM take 2[3/5] Yasunori Goto
` (2 subsequent siblings)
4 siblings, 0 replies; 6+ messages in thread
From: Yasunori Goto @ 2005-11-28 11:36 UTC (permalink / raw)
To: linux-mm, Linux Hotplug Memory Support; +Cc: Joel Schopp, linux-ia64
This defines new zone ZONE_EASY_RECLAIM.
ZONES_SHIFT becomes 3.
Signed-off-by: Yasunori Goto <y-goto@jp.fujitsu.com>
Index: new_zone_mm/include/linux/mmzone.h
=================================--- new_zone_mm.orig/include/linux/mmzone.h 2005-11-17 16:57:15.000000000 +0900
+++ new_zone_mm/include/linux/mmzone.h 2005-11-17 17:07:30.000000000 +0900
@@ -74,9 +74,10 @@ struct per_cpu_pageset {
#define ZONE_DMA32 1
#define ZONE_NORMAL 2
#define ZONE_HIGHMEM 3
+#define ZONE_EASY_RECLAIM 4
-#define MAX_NR_ZONES 4 /* Sync this with ZONES_SHIFT */
-#define ZONES_SHIFT 2 /* ceil(log2(MAX_NR_ZONES)) */
+#define MAX_NR_ZONES 5 /* Sync this with ZONES_SHIFT */
+#define ZONES_SHIFT 3 /* ceil(log2(MAX_NR_ZONES)) */
/*
Index: new_zone_mm/mm/page_alloc.c
=================================--- new_zone_mm.orig/mm/page_alloc.c 2005-11-17 17:05:12.000000000 +0900
+++ new_zone_mm/mm/page_alloc.c 2005-11-17 17:08:17.000000000 +0900
@@ -75,7 +75,7 @@ EXPORT_SYMBOL(totalram_pages);
struct zone *zone_table[1 << ZONETABLE_SHIFT] __read_mostly;
EXPORT_SYMBOL(zone_table);
-static char *zone_names[MAX_NR_ZONES] = { "DMA", "DMA32", "Normal", "HighMem" };
+static char *zone_names[MAX_NR_ZONES] = { "DMA", "DMA32", "Normal", "HighMem", "Easy Reclaim"};
int min_free_kbytes = 1024;
unsigned long __initdata nr_kernel_pages;
--
Yasunori Goto
^ permalink raw reply [flat|nested] 6+ messages in thread* [Patch] New zone ZONE_EASY_RECLAIM take 2[3/5]
2005-11-28 11:36 [Patch] New zone ZONE_EASY_RECLAIM take 2[0/5] Yasunori Goto
2005-11-28 11:36 ` [Patch] New zone ZONE_EASY_RECLAIM take 2[1/5] Yasunori Goto
2005-11-28 11:36 ` [Patch] New zone ZONE_EASY_RECLAIM take 2[2/5] Yasunori Goto
@ 2005-11-28 11:36 ` Yasunori Goto
2005-11-28 11:36 ` [Patch] New zone ZONE_EASY_RECLAIM take 2[4/5] Yasunori Goto
2005-11-28 11:36 ` [Patch] New zone ZONE_EASY_RECLAIM take 2[5/5] Yasunori Goto
4 siblings, 0 replies; 6+ messages in thread
From: Yasunori Goto @ 2005-11-28 11:36 UTC (permalink / raw)
To: linux-mm, Linux Hotplug Memory Support; +Cc: Joel Schopp, linux-ia64
This is changing build_zonelists for new zone.
__GFP_xxxs are flag for requires of page allocation which zone
is prefered. But, it is used as an index number for zonelists[] too.
But after my patch, __GFP_xxx might be set at same time. So,
last set bit number of __GFP is recognized for zonelists' index
by this patch.
Signed-off-by: Yasunori Goto <y-goto@jp.fujitsu.com>
Index: new_zone_mm/mm/page_alloc.c
=================================--- new_zone_mm.orig/mm/page_alloc.c 2005-11-28 16:12:53.000000000 +0900
+++ new_zone_mm/mm/page_alloc.c 2005-11-28 16:15:55.000000000 +0900
@@ -1498,6 +1498,10 @@ static int __init build_zonelists_node(p
struct zone *zone;
default:
BUG();
+ case ZONE_EASY_RECLAIM:
+ zone = pgdat->node_zones + ZONE_EASY_RECLAIM;
+ if (zone->present_pages)
+ zonelist->zones[j++] = zone;
case ZONE_HIGHMEM:
zone = pgdat->node_zones + ZONE_HIGHMEM;
if (zone->present_pages) {
@@ -1526,11 +1530,14 @@ static int __init build_zonelists_node(p
static inline int highest_zone(int zone_bits)
{
int res = ZONE_NORMAL;
- if (zone_bits & (__force int)__GFP_HIGHMEM)
+
+ if (zone_bits = fls((__force int)__GFP_EASY_RECLAIM))
+ res = ZONE_EASY_RECLAIM;
+ else if (zone_bits = fls((__force int)__GFP_HIGHMEM))
res = ZONE_HIGHMEM;
- if (zone_bits & (__force int)__GFP_DMA32)
+ else if (zone_bits = fls((__force int)__GFP_DMA32))
res = ZONE_DMA32;
- if (zone_bits & (__force int)__GFP_DMA)
+ else if (zone_bits = fls((__force int)__GFP_DMA))
res = ZONE_DMA;
return res;
}
Index: new_zone_mm/include/linux/gfp.h
=================================--- new_zone_mm.orig/include/linux/gfp.h 2005-11-28 16:15:48.000000000 +0900
+++ new_zone_mm/include/linux/gfp.h 2005-11-28 16:15:55.000000000 +0900
@@ -79,8 +79,10 @@ struct vm_area_struct;
/* 4GB DMA on some platforms */
#define GFP_DMA32 __GFP_DMA32
-
-#define gfp_zone(mask) ((__force int)((mask) & (__force gfp_t)GFP_ZONEMASK))
+static inline unsigned int gfp_zone(unsigned int mask)
+{
+ return fls(mask & GFP_ZONEMASK);
+}
/*
* There is only one page-allocator function, and two main namespaces to
--
Yasunori Goto
^ permalink raw reply [flat|nested] 6+ messages in thread* [Patch] New zone ZONE_EASY_RECLAIM take 2[4/5]
2005-11-28 11:36 [Patch] New zone ZONE_EASY_RECLAIM take 2[0/5] Yasunori Goto
` (2 preceding siblings ...)
2005-11-28 11:36 ` [Patch] New zone ZONE_EASY_RECLAIM take 2[3/5] Yasunori Goto
@ 2005-11-28 11:36 ` Yasunori Goto
2005-11-28 11:36 ` [Patch] New zone ZONE_EASY_RECLAIM take 2[5/5] Yasunori Goto
4 siblings, 0 replies; 6+ messages in thread
From: Yasunori Goto @ 2005-11-28 11:36 UTC (permalink / raw)
To: linux-mm, Linux Hotplug Memory Support; +Cc: Joel Schopp, linux-ia64
This is for calculation of the watermark zone->pages_min/low/high.
Signed-off-by: Yasunori Goto <y-goto@jp.fujitsu.com>
Index: new_zone_mm/include/linux/mmzone.h
=================================--- new_zone_mm.orig/include/linux/mmzone.h 2005-11-17 17:07:30.000000000 +0900
+++ new_zone_mm/include/linux/mmzone.h 2005-11-17 17:17:51.000000000 +0900
@@ -401,6 +401,11 @@ static inline struct zone *next_zone(str
#define for_each_zone(zone) \
for (zone = pgdat_list->node_zones; zone; zone = next_zone(zone))
+static inline int is_easy_reclaim_idx(int idx)
+{
+ return (idx = ZONE_EASY_RECLAIM);
+}
+
static inline int is_highmem_idx(int idx)
{
return (idx = ZONE_HIGHMEM);
@@ -416,6 +421,11 @@ static inline int is_normal_idx(int idx)
* to ZONE_{DMA/NORMAL/HIGHMEM/etc} in general code to a minimum.
* @zone - pointer to struct zone variable
*/
+static inline int is_easy_reclaim(struct zone *zone)
+{
+ return zone = zone->zone_pgdat->node_zones + ZONE_EASY_RECLAIM;
+}
+
static inline int is_highmem(struct zone *zone)
{
return zone = zone->zone_pgdat->node_zones + ZONE_HIGHMEM;
Index: new_zone_mm/mm/page_alloc.c
=================================--- new_zone_mm.orig/mm/page_alloc.c 2005-11-17 17:09:05.000000000 +0900
+++ new_zone_mm/mm/page_alloc.c 2005-11-17 17:17:51.000000000 +0900
@@ -2495,13 +2495,13 @@ void setup_per_zone_pages_min(void)
/* Calculate total number of !ZONE_HIGHMEM pages */
for_each_zone(zone) {
- if (!is_highmem(zone))
+ if (!is_highmem(zone) && !is_easy_reclaim(zone))
lowmem_pages += zone->present_pages;
}
for_each_zone(zone) {
spin_lock_irqsave(&zone->lru_lock, flags);
- if (is_highmem(zone)) {
+ if (is_highmem(zone) || is_easy_reclaim(zone)) {
/*
* Often, highmem doesn't need to reserve any pages.
* But the pages_min/low/high values are also used for
--
Yasunori Goto
^ permalink raw reply [flat|nested] 6+ messages in thread* [Patch] New zone ZONE_EASY_RECLAIM take 2[5/5]
2005-11-28 11:36 [Patch] New zone ZONE_EASY_RECLAIM take 2[0/5] Yasunori Goto
` (3 preceding siblings ...)
2005-11-28 11:36 ` [Patch] New zone ZONE_EASY_RECLAIM take 2[4/5] Yasunori Goto
@ 2005-11-28 11:36 ` Yasunori Goto
4 siblings, 0 replies; 6+ messages in thread
From: Yasunori Goto @ 2005-11-28 11:36 UTC (permalink / raw)
To: linux-mm, Linux Hotplug Memory Support; +Cc: Joel Schopp, linux-ia64
This is to disable __GFP_EASY_RECLAIM flag at add_to_page_cache().
If this patch is not applied, cache_grow() checks and call BUG(),
at here.
if (flags & ~(SLAB_DMA|SLAB_LEVEL_MASK|SLAB_NO_GROW))
BUG();
This patch is to solve it.
Signed-off-by: Yasunori Goto <y-goto@jp.fujitsu.com>
Index: new_zone_mm/mm/filemap.c
=================================--- new_zone_mm.orig/mm/filemap.c 2005-11-22 15:21:23.000000000 +0900
+++ new_zone_mm/mm/filemap.c 2005-11-22 15:21:27.000000000 +0900
@@ -381,7 +381,7 @@ int filemap_write_and_wait_range(struct
int add_to_page_cache(struct page *page, struct address_space *mapping,
pgoff_t offset, gfp_t gfp_mask)
{
- int error = radix_tree_preload(gfp_mask & ~__GFP_HIGHMEM);
+ int error = radix_tree_preload(gfp_mask & ~(__GFP_HIGHMEM | __GFP_EASY_RECLAIM));
if (error = 0) {
write_lock_irq(&mapping->tree_lock);
--
Yasunori Goto
^ permalink raw reply [flat|nested] 6+ messages in thread