* [PATCH 1/4] lumpy reclaim v2
2006-11-23 16:48 [PATCH 0/4] Lumpy Reclaim V3 Andy Whitcroft
@ 2006-11-23 16:49 ` Andy Whitcroft
2006-11-23 16:49 ` [PATCH 2/4] lumpy cleanup a missplaced comment and simplify some code Andy Whitcroft
` (3 subsequent siblings)
4 siblings, 0 replies; 9+ messages in thread
From: Andy Whitcroft @ 2006-11-23 16:49 UTC (permalink / raw)
To: linux-mm
Cc: Andrew Morton, Peter Zijlstra, Mel Gorman, Andy Whitcroft,
linux-kernel
lumpy reclaim v2
When trying to reclaim pages for a higher order allocation, make reclaim
try to move lumps of pages (fitting the requested order) about, instead
of single pages. This should significantly reduce the number of
reclaimed pages for higher order allocations.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Andy Whitcroft <apw@shadowen.org>
---
diff --git a/fs/buffer.c b/fs/buffer.c
index 64ea099..c73acb7 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -424,7 +424,7 @@ static void free_more_memory(void)
for_each_online_pgdat(pgdat) {
zones = pgdat->node_zonelists[gfp_zone(GFP_NOFS)].zones;
if (*zones)
- try_to_free_pages(zones, GFP_NOFS);
+ try_to_free_pages(zones, 0, GFP_NOFS);
}
}
diff --git a/include/linux/swap.h b/include/linux/swap.h
index 439f9a8..5c26736 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -187,7 +187,7 @@ extern int rotate_reclaimable_page(struc
extern void swap_setup(void);
/* linux/mm/vmscan.c */
-extern unsigned long try_to_free_pages(struct zone **, gfp_t);
+extern unsigned long try_to_free_pages(struct zone **, int, gfp_t);
extern unsigned long shrink_all_memory(unsigned long nr_pages);
extern int vm_swappiness;
extern int remove_mapping(struct address_space *mapping, struct page *page);
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 19ab611..39f48a8 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1368,7 +1368,7 @@ nofail_alloc:
reclaim_state.reclaimed_slab = 0;
p->reclaim_state = &reclaim_state;
- did_some_progress = try_to_free_pages(zonelist->zones, gfp_mask);
+ did_some_progress = try_to_free_pages(zonelist->zones, order, gfp_mask);
p->reclaim_state = NULL;
p->flags &= ~PF_MEMALLOC;
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 7e9caff..4645a3f 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -67,6 +67,8 @@ struct scan_control {
int swappiness;
int all_unreclaimable;
+
+ int order;
};
#define lru_to_page(_head) (list_entry((_head)->prev, struct page, lru))
@@ -623,35 +625,86 @@ keep:
*
* returns how many pages were moved onto *@dst.
*/
+int __isolate_lru_page(struct page *page, int active)
+{
+ int ret = -EINVAL;
+
+ if (PageLRU(page) && (PageActive(page) == active)) {
+ ret = -EBUSY;
+ if (likely(get_page_unless_zero(page))) {
+ /*
+ * Be careful not to clear PageLRU until after we're
+ * sure the page is not being freed elsewhere -- the
+ * page release code relies on it.
+ */
+ ClearPageLRU(page);
+ ret = 0;
+ }
+ }
+
+ return ret;
+}
+
static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
struct list_head *src, struct list_head *dst,
- unsigned long *scanned)
+ unsigned long *scanned, int order)
{
unsigned long nr_taken = 0;
- struct page *page;
- unsigned long scan;
+ struct page *page, *tmp;
+ unsigned long scan, pfn, end_pfn, page_pfn;
+ int active;
for (scan = 0; scan < nr_to_scan && !list_empty(src); scan++) {
- struct list_head *target;
page = lru_to_page(src);
prefetchw_prev_lru_page(page, src, flags);
VM_BUG_ON(!PageLRU(page));
- list_del(&page->lru);
- target = src;
- if (likely(get_page_unless_zero(page))) {
- /*
- * Be careful not to clear PageLRU until after we're
- * sure the page is not being freed elsewhere -- the
- * page release code relies on it.
- */
- ClearPageLRU(page);
- target = dst;
- nr_taken++;
- } /* else it is being freed elsewhere */
+ active = PageActive(page);
+ switch (__isolate_lru_page(page, active)) {
+ case 0:
+ list_move(&page->lru, dst);
+ nr_taken++;
+ break;
+
+ case -EBUSY:
+ /* else it is being freed elsewhere */
+ list_move(&page->lru, src);
+ continue;
+
+ default:
+ BUG();
+ }
- list_add(&page->lru, target);
+ if (!order)
+ continue;
+
+ page_pfn = pfn = __page_to_pfn(page);
+ end_pfn = pfn &= ~((1 << order) - 1);
+ end_pfn += 1 << order;
+ for (; pfn < end_pfn; pfn++) {
+ if (unlikely(pfn == page_pfn))
+ continue;
+ if (unlikely(!pfn_valid(pfn)))
+ break;
+
+ scan++;
+ tmp = __pfn_to_page(pfn);
+ switch (__isolate_lru_page(tmp, active)) {
+ case 0:
+ list_move(&tmp->lru, dst);
+ nr_taken++;
+ continue;
+
+ case -EBUSY:
+ /* else it is being freed elsewhere */
+ list_move(&tmp->lru, src);
+ default:
+ break;
+
+ }
+ break;
+ }
}
*scanned = scan;
@@ -682,7 +735,7 @@ static unsigned long shrink_inactive_lis
nr_taken = isolate_lru_pages(sc->swap_cluster_max,
&zone->inactive_list,
- &page_list, &nr_scan);
+ &page_list, &nr_scan, sc->order);
zone->nr_inactive -= nr_taken;
zone->pages_scanned += nr_scan;
zone->aging_total += nr_scan;
@@ -828,7 +881,7 @@ force_reclaim_mapped:
lru_add_drain();
spin_lock_irq(&zone->lru_lock);
pgmoved = isolate_lru_pages(nr_pages, &zone->active_list,
- &l_hold, &pgscanned);
+ &l_hold, &pgscanned, sc->order);
zone->pages_scanned += pgscanned;
zone->nr_active -= pgmoved;
spin_unlock_irq(&zone->lru_lock);
@@ -1017,7 +1070,7 @@ static unsigned long shrink_zones(int pr
* holds filesystem locks which prevent writeout this might not work, and the
* allocation attempt will fail.
*/
-unsigned long try_to_free_pages(struct zone **zones, gfp_t gfp_mask)
+unsigned long try_to_free_pages(struct zone **zones, int order, gfp_t gfp_mask)
{
int priority;
int ret = 0;
@@ -1032,6 +1085,7 @@ unsigned long try_to_free_pages(struct z
.swap_cluster_max = SWAP_CLUSTER_MAX,
.may_swap = 1,
.swappiness = vm_swappiness,
+ .order = order,
};
delay_swap_prefetch();
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply related [flat|nested] 9+ messages in thread* [PATCH 2/4] lumpy cleanup a missplaced comment and simplify some code
2006-11-23 16:48 [PATCH 0/4] Lumpy Reclaim V3 Andy Whitcroft
2006-11-23 16:49 ` [PATCH 1/4] lumpy reclaim v2 Andy Whitcroft
@ 2006-11-23 16:49 ` Andy Whitcroft
2006-11-23 16:50 ` [PATCH 3/4] lumpy ensure we respect zone boundaries Andy Whitcroft
` (2 subsequent siblings)
4 siblings, 0 replies; 9+ messages in thread
From: Andy Whitcroft @ 2006-11-23 16:49 UTC (permalink / raw)
To: linux-mm
Cc: Andrew Morton, Peter Zijlstra, Mel Gorman, Andy Whitcroft,
linux-kernel
Move the comment for isolate_lru_pages() back to its function
and comment the new function. Add some running commentry on the
area scan. Cleanup the indentation on switch to match the majority
view in mm/*. Finally, clarify the boundary pfn calculations.
Signed-off-by: Andy Whitcroft <apw@shadowen.org>
---
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 4645a3f..3b6ef79 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -609,21 +609,14 @@ keep:
}
/*
- * zone->lru_lock is heavily contended. Some of the functions that
- * shrink the lists perform better by taking out a batch of pages
- * and working on them outside the LRU lock.
+ * Attempt to remove the specified page from its LRU. Only take this
+ * page if it is of the appropriate PageActive status. Pages which
+ * are being freed elsewhere are also ignored.
*
- * For pagecache intensive workloads, this function is the hottest
- * spot in the kernel (apart from copy_*_user functions).
- *
- * Appropriate locks must be held before calling this function.
+ * @page: page to consider
+ * @active: active/inactive flag only take pages of this type
*
- * @nr_to_scan: The number of pages to look through on the list.
- * @src: The LRU list to pull pages off.
- * @dst: The temp list to put pages on to.
- * @scanned: The number of pages that were scanned.
- *
- * returns how many pages were moved onto *@dst.
+ * returns 0 on success, -ve errno on failure.
*/
int __isolate_lru_page(struct page *page, int active)
{
@@ -645,6 +638,23 @@ int __isolate_lru_page(struct page *page
return ret;
}
+/*
+ * zone->lru_lock is heavily contended. Some of the functions that
+ * shrink the lists perform better by taking out a batch of pages
+ * and working on them outside the LRU lock.
+ *
+ * For pagecache intensive workloads, this function is the hottest
+ * spot in the kernel (apart from copy_*_user functions).
+ *
+ * Appropriate locks must be held before calling this function.
+ *
+ * @nr_to_scan: The number of pages to look through on the list.
+ * @src: The LRU list to pull pages off.
+ * @dst: The temp list to put pages on to.
+ * @scanned: The number of pages that were scanned.
+ *
+ * returns how many pages were moved onto *@dst.
+ */
static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
struct list_head *src, struct list_head *dst,
unsigned long *scanned, int order)
@@ -662,26 +672,31 @@ static unsigned long isolate_lru_pages(u
active = PageActive(page);
switch (__isolate_lru_page(page, active)) {
- case 0:
- list_move(&page->lru, dst);
- nr_taken++;
- break;
+ case 0:
+ list_move(&page->lru, dst);
+ nr_taken++;
+ break;
- case -EBUSY:
- /* else it is being freed elsewhere */
- list_move(&page->lru, src);
- continue;
+ case -EBUSY:
+ /* else it is being freed elsewhere */
+ list_move(&page->lru, src);
+ continue;
- default:
- BUG();
+ default:
+ BUG();
}
if (!order)
continue;
- page_pfn = pfn = __page_to_pfn(page);
- end_pfn = pfn &= ~((1 << order) - 1);
- end_pfn += 1 << order;
+ /*
+ * Attempt to take all pages in the order aligned region
+ * surrounding the tag page. Only take those pages of
+ * the same active state as that tag page.
+ */
+ page_pfn = __page_to_pfn(page);
+ pfn = page_pfn & ~((1 << order) - 1);
+ end_pfn = pfn + (1 << order);
for (; pfn < end_pfn; pfn++) {
if (unlikely(pfn == page_pfn))
continue;
@@ -691,17 +706,16 @@ static unsigned long isolate_lru_pages(u
scan++;
tmp = __pfn_to_page(pfn);
switch (__isolate_lru_page(tmp, active)) {
- case 0:
- list_move(&tmp->lru, dst);
- nr_taken++;
- continue;
-
- case -EBUSY:
- /* else it is being freed elsewhere */
- list_move(&tmp->lru, src);
- default:
- break;
+ case 0:
+ list_move(&tmp->lru, dst);
+ nr_taken++;
+ continue;
+ case -EBUSY:
+ /* else it is being freed elsewhere */
+ list_move(&tmp->lru, src);
+ default:
+ break;
}
break;
}
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply related [flat|nested] 9+ messages in thread* [PATCH 3/4] lumpy ensure we respect zone boundaries
2006-11-23 16:48 [PATCH 0/4] Lumpy Reclaim V3 Andy Whitcroft
2006-11-23 16:49 ` [PATCH 1/4] lumpy reclaim v2 Andy Whitcroft
2006-11-23 16:49 ` [PATCH 2/4] lumpy cleanup a missplaced comment and simplify some code Andy Whitcroft
@ 2006-11-23 16:50 ` Andy Whitcroft
2006-11-23 16:50 ` [PATCH 4/4] lumpy take the other active inactive pages in the area Andy Whitcroft
2006-11-23 19:02 ` [PATCH 0/4] Lumpy Reclaim V3 Peter Zijlstra
4 siblings, 0 replies; 9+ messages in thread
From: Andy Whitcroft @ 2006-11-23 16:50 UTC (permalink / raw)
To: linux-mm
Cc: Andrew Morton, Peter Zijlstra, Mel Gorman, Andy Whitcroft,
linux-kernel
When scanning an aligned order N area ensure we only pull out pages
in the same zone as our tag page, else we will manipulate those
pages' LRU under the wrong zone lru_lock. Bad.
Signed-off-by: Andy Whitcroft <apw@shadowen.org>
---
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 3b6ef79..e3be888 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -663,6 +663,7 @@ static unsigned long isolate_lru_pages(u
struct page *page, *tmp;
unsigned long scan, pfn, end_pfn, page_pfn;
int active;
+ int zone_id;
for (scan = 0; scan < nr_to_scan && !list_empty(src); scan++) {
page = lru_to_page(src);
@@ -694,6 +695,7 @@ static unsigned long isolate_lru_pages(u
* surrounding the tag page. Only take those pages of
* the same active state as that tag page.
*/
+ zone_id = page_zone_id(page);
page_pfn = __page_to_pfn(page);
pfn = page_pfn & ~((1 << order) - 1);
end_pfn = pfn + (1 << order);
@@ -703,8 +705,10 @@ static unsigned long isolate_lru_pages(u
if (unlikely(!pfn_valid(pfn)))
break;
- scan++;
tmp = __pfn_to_page(pfn);
+ if (unlikely(page_zone_id(tmp) != zone_id))
+ continue;
+ scan++;
switch (__isolate_lru_page(tmp, active)) {
case 0:
list_move(&tmp->lru, dst);
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply related [flat|nested] 9+ messages in thread* [PATCH 4/4] lumpy take the other active inactive pages in the area
2006-11-23 16:48 [PATCH 0/4] Lumpy Reclaim V3 Andy Whitcroft
` (2 preceding siblings ...)
2006-11-23 16:50 ` [PATCH 3/4] lumpy ensure we respect zone boundaries Andy Whitcroft
@ 2006-11-23 16:50 ` Andy Whitcroft
2006-11-23 19:02 ` [PATCH 0/4] Lumpy Reclaim V3 Peter Zijlstra
4 siblings, 0 replies; 9+ messages in thread
From: Andy Whitcroft @ 2006-11-23 16:50 UTC (permalink / raw)
To: linux-mm
Cc: Andrew Morton, Peter Zijlstra, Mel Gorman, Andy Whitcroft,
linux-kernel
When we scan an order N aligned area around our tag page take any
other pages with a matching active state to that of the tag page.
This will tend to demote areas of the order we are interested from
the active list to the inactive list and from the end of the inactive
list, increasing the chances of such areas coming free together.
Signed-off-by: Andy Whitcroft <apw@shadowen.org>
---
diff --git a/mm/vmscan.c b/mm/vmscan.c
index e3be888..50e95ed 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -713,7 +713,7 @@ static unsigned long isolate_lru_pages(u
case 0:
list_move(&tmp->lru, dst);
nr_taken++;
- continue;
+ break;
case -EBUSY:
/* else it is being freed elsewhere */
@@ -721,7 +721,6 @@ static unsigned long isolate_lru_pages(u
default:
break;
}
- break;
}
}
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply related [flat|nested] 9+ messages in thread* Re: [PATCH 0/4] Lumpy Reclaim V3
2006-11-23 16:48 [PATCH 0/4] Lumpy Reclaim V3 Andy Whitcroft
` (3 preceding siblings ...)
2006-11-23 16:50 ` [PATCH 4/4] lumpy take the other active inactive pages in the area Andy Whitcroft
@ 2006-11-23 19:02 ` Peter Zijlstra
4 siblings, 0 replies; 9+ messages in thread
From: Peter Zijlstra @ 2006-11-23 19:02 UTC (permalink / raw)
To: Andy Whitcroft; +Cc: linux-mm, Andrew Morton, Mel Gorman, linux-kernel
On Thu, 2006-11-23 at 16:48 +0000, Andy Whitcroft wrote:
> lumpy-reclaim-v2 -- Peter Zijlstra's lumpy reclaim prototype,
>
> lumpy-cleanup-a-missplaced-comment-and-simplify-some-code --
> cleanups to move a comment back to where it came from, to make
> the area edge selection more comprehensible and also cleans up
> the switch coding style to match the concensus in mm/*.c,
Sure looks better.
> lumpy-ensure-we-respect-zone-boundaries -- bug fix to ensure we do
> not attempt to take pages from adjacent zones, and
Valid case I guess :-)
> lumpy-take-the-other-active-inactive-pages-in-the-area -- patch to
> increase aggression over the targetted order.
Yeah, I see how this will help.
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
for all 3
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 9+ messages in thread
* [PATCH 1/4] lumpy reclaim v2
2006-12-06 16:59 Andy Whitcroft
@ 2006-12-06 16:59 ` Andy Whitcroft
2006-12-15 4:57 ` Andrew Morton
0 siblings, 1 reply; 9+ messages in thread
From: Andy Whitcroft @ 2006-12-06 16:59 UTC (permalink / raw)
To: Andrew Morton, linux-mm
Cc: Peter Zijlstra, Mel Gorman, Andy Whitcroft, linux-kernel
lumpy reclaim v2
When trying to reclaim pages for a higher order allocation, make reclaim
try to move lumps of pages (fitting the requested order) about, instead
of single pages. This should significantly reduce the number of
reclaimed pages for higher order allocations.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Andy Whitcroft <apw@shadowen.org>
---
diff --git a/fs/buffer.c b/fs/buffer.c
index c953c15..2f8b073 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -374,7 +374,7 @@ static void free_more_memory(void)
for_each_online_pgdat(pgdat) {
zones = pgdat->node_zonelists[gfp_zone(GFP_NOFS)].zones;
if (*zones)
- try_to_free_pages(zones, GFP_NOFS);
+ try_to_free_pages(zones, 0, GFP_NOFS);
}
}
diff --git a/include/linux/swap.h b/include/linux/swap.h
index 439f9a8..5c26736 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -187,7 +187,7 @@ extern int rotate_reclaimable_page(struct page *page);
extern void swap_setup(void);
/* linux/mm/vmscan.c */
-extern unsigned long try_to_free_pages(struct zone **, gfp_t);
+extern unsigned long try_to_free_pages(struct zone **, int, gfp_t);
extern unsigned long shrink_all_memory(unsigned long nr_pages);
extern int vm_swappiness;
extern int remove_mapping(struct address_space *mapping, struct page *page);
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 7938e46..78801c2 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1371,7 +1371,7 @@ nofail_alloc:
reclaim_state.reclaimed_slab = 0;
p->reclaim_state = &reclaim_state;
- did_some_progress = try_to_free_pages(zonelist->zones, gfp_mask);
+ did_some_progress = try_to_free_pages(zonelist->zones, order, gfp_mask);
p->reclaim_state = NULL;
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 80f4444..0f2d961 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -67,6 +67,8 @@ struct scan_control {
int swappiness;
int all_unreclaimable;
+
+ int order;
};
#define lru_to_page(_head) (list_entry((_head)->prev, struct page, lru))
@@ -620,35 +622,86 @@ keep:
*
* returns how many pages were moved onto *@dst.
*/
+int __isolate_lru_page(struct page *page, int active)
+{
+ int ret = -EINVAL;
+
+ if (PageLRU(page) && (PageActive(page) == active)) {
+ ret = -EBUSY;
+ if (likely(get_page_unless_zero(page))) {
+ /*
+ * Be careful not to clear PageLRU until after we're
+ * sure the page is not being freed elsewhere -- the
+ * page release code relies on it.
+ */
+ ClearPageLRU(page);
+ ret = 0;
+ }
+ }
+
+ return ret;
+}
+
static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
struct list_head *src, struct list_head *dst,
- unsigned long *scanned)
+ unsigned long *scanned, int order)
{
unsigned long nr_taken = 0;
- struct page *page;
- unsigned long scan;
+ struct page *page, *tmp;
+ unsigned long scan, pfn, end_pfn, page_pfn;
+ int active;
for (scan = 0; scan < nr_to_scan && !list_empty(src); scan++) {
- struct list_head *target;
page = lru_to_page(src);
prefetchw_prev_lru_page(page, src, flags);
VM_BUG_ON(!PageLRU(page));
- list_del(&page->lru);
- target = src;
- if (likely(get_page_unless_zero(page))) {
- /*
- * Be careful not to clear PageLRU until after we're
- * sure the page is not being freed elsewhere -- the
- * page release code relies on it.
- */
- ClearPageLRU(page);
- target = dst;
- nr_taken++;
- } /* else it is being freed elsewhere */
+ active = PageActive(page);
+ switch (__isolate_lru_page(page, active)) {
+ case 0:
+ list_move(&page->lru, dst);
+ nr_taken++;
+ break;
+
+ case -EBUSY:
+ /* else it is being freed elsewhere */
+ list_move(&page->lru, src);
+ continue;
+
+ default:
+ BUG();
+ }
- list_add(&page->lru, target);
+ if (!order)
+ continue;
+
+ page_pfn = pfn = __page_to_pfn(page);
+ end_pfn = pfn &= ~((1 << order) - 1);
+ end_pfn += 1 << order;
+ for (; pfn < end_pfn; pfn++) {
+ if (unlikely(pfn == page_pfn))
+ continue;
+ if (unlikely(!pfn_valid(pfn)))
+ break;
+
+ scan++;
+ tmp = __pfn_to_page(pfn);
+ switch (__isolate_lru_page(tmp, active)) {
+ case 0:
+ list_move(&tmp->lru, dst);
+ nr_taken++;
+ continue;
+
+ case -EBUSY:
+ /* else it is being freed elsewhere */
+ list_move(&tmp->lru, src);
+ default:
+ break;
+
+ }
+ break;
+ }
}
*scanned = scan;
@@ -679,7 +732,7 @@ static unsigned long shrink_inactive_list(unsigned long max_scan,
nr_taken = isolate_lru_pages(sc->swap_cluster_max,
&zone->inactive_list,
- &page_list, &nr_scan);
+ &page_list, &nr_scan, sc->order);
zone->nr_inactive -= nr_taken;
zone->pages_scanned += nr_scan;
zone->total_scanned += nr_scan;
@@ -825,7 +878,7 @@ force_reclaim_mapped:
lru_add_drain();
spin_lock_irq(&zone->lru_lock);
pgmoved = isolate_lru_pages(nr_pages, &zone->active_list,
- &l_hold, &pgscanned);
+ &l_hold, &pgscanned, sc->order);
zone->pages_scanned += pgscanned;
zone->nr_active -= pgmoved;
spin_unlock_irq(&zone->lru_lock);
@@ -1014,7 +1067,7 @@ static unsigned long shrink_zones(int priority, struct zone **zones,
* holds filesystem locks which prevent writeout this might not work, and the
* allocation attempt will fail.
*/
-unsigned long try_to_free_pages(struct zone **zones, gfp_t gfp_mask)
+unsigned long try_to_free_pages(struct zone **zones, int order, gfp_t gfp_mask)
{
int priority;
int ret = 0;
@@ -1029,6 +1082,7 @@ unsigned long try_to_free_pages(struct zone **zones, gfp_t gfp_mask)
.swap_cluster_max = SWAP_CLUSTER_MAX,
.may_swap = 1,
.swappiness = vm_swappiness,
+ .order = order,
};
delay_swap_prefetch();
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply related [flat|nested] 9+ messages in thread* Re: [PATCH 1/4] lumpy reclaim v2
2006-12-06 16:59 ` [PATCH 1/4] lumpy reclaim v2 Andy Whitcroft
@ 2006-12-15 4:57 ` Andrew Morton
2007-01-26 11:00 ` Andrew Morton
0 siblings, 1 reply; 9+ messages in thread
From: Andrew Morton @ 2006-12-15 4:57 UTC (permalink / raw)
To: Andy Whitcroft; +Cc: linux-mm, Peter Zijlstra, Mel Gorman, linux-kernel
On Wed, 6 Dec 2006 16:59:35 +0000
Andy Whitcroft <apw@shadowen.org> wrote:
> + tmp = __pfn_to_page(pfn);
ia64 doesn't implement __page_to_pfn. Why did you not use page_to_pfn()?
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 9+ messages in thread
* Re: [PATCH 1/4] lumpy reclaim v2
2006-12-15 4:57 ` Andrew Morton
@ 2007-01-26 11:00 ` Andrew Morton
0 siblings, 0 replies; 9+ messages in thread
From: Andrew Morton @ 2007-01-26 11:00 UTC (permalink / raw)
To: Andy Whitcroft, linux-mm, Peter Zijlstra, Mel Gorman,
linux-kernel
On Thu, 14 Dec 2006 20:57:34 -0800
Andrew Morton <akpm@osdl.org> wrote:
> On Wed, 6 Dec 2006 16:59:35 +0000
> Andy Whitcroft <apw@shadowen.org> wrote:
>
> > + tmp = __pfn_to_page(pfn);
>
> ia64 doesn't implement __page_to_pfn. Why did you not use page_to_pfn()?
Poke. I'm still a no-compile on ia64.
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 9+ messages in thread