From: clameter@sgi.com
To: linux-fsdevel@vger.kernel.org, linux-kernel@vger.kernel.org
Cc: Christoph Hellwig <hch@lst.de>, Mel Gorman <mel@skynet.ie>
Cc: William Lee Irwin III <wli@holomorphy.com>, David Chinner <dgc@sgi.com>
Cc: Jens Axboe <jens.axboe@oracle.com>, Badari Pulavarty <pbadari@gmail.com>
Cc: Maxim Levitsky <maximlevitsky@gmail.com>
Subject: [29/37] Large blocksize support: Fix up reclaim counters
Date: Wed, 20 Jun 2007 11:29:36 -0700 [thread overview]
Message-ID: <20070620183012.751140302@sgi.com> (raw)
In-Reply-To: 20070620182907.506775016@sgi.com
[-- Attachment #1: vps_higher_order_reclaim --]
[-- Type: text/plain, Size: 5324 bytes --]
We now have to reclaim compound pages of arbitrary order.
Adjust the counting in vmscan.c to could the number of base
pages.
Also change the active and inactive accounting to do the same.
Signed-off-by: Christoph Lameter <clameter@sgi.com>
---
include/linux/mm_inline.h | 41 +++++++++++++++++++++++++++++++----------
mm/vmscan.c | 22 ++++++++++++----------
2 files changed, 43 insertions(+), 20 deletions(-)
Index: linux-2.6.22-rc4-mm2/mm/vmscan.c
===================================================================
--- linux-2.6.22-rc4-mm2.orig/mm/vmscan.c 2007-06-19 23:27:02.000000000 -0700
+++ linux-2.6.22-rc4-mm2/mm/vmscan.c 2007-06-19 23:27:29.000000000 -0700
@@ -474,14 +474,14 @@ static unsigned long shrink_page_list(st
VM_BUG_ON(PageActive(page));
- sc->nr_scanned++;
+ sc->nr_scanned += compound_pages(page);
if (!sc->may_swap && page_mapped(page))
goto keep_locked;
/* Double the slab pressure for mapped and swapcache pages */
if (page_mapped(page) || PageSwapCache(page))
- sc->nr_scanned++;
+ sc->nr_scanned += compound_pages(page);
if (PageWriteback(page))
goto keep_locked;
@@ -585,7 +585,7 @@ static unsigned long shrink_page_list(st
free_it:
unlock_page(page);
- nr_reclaimed++;
+ nr_reclaimed += compound_pages(page);
if (!pagevec_add(&freed_pvec, page))
__pagevec_release_nonlru(&freed_pvec);
continue;
@@ -677,22 +677,23 @@ static unsigned long isolate_lru_pages(u
unsigned long nr_taken = 0;
unsigned long scan;
- for (scan = 0; scan < nr_to_scan && !list_empty(src); scan++) {
+ for (scan = 0; scan < nr_to_scan && !list_empty(src); ) {
struct page *page;
unsigned long pfn;
unsigned long end_pfn;
unsigned long page_pfn;
+ int pages;
int zone_id;
page = lru_to_page(src);
prefetchw_prev_lru_page(page, src, flags);
-
+ pages = compound_pages(page);
VM_BUG_ON(!PageLRU(page));
switch (__isolate_lru_page(page, mode)) {
case 0:
list_move(&page->lru, dst);
- nr_taken++;
+ nr_taken += pages;
break;
case -EBUSY:
@@ -738,8 +739,8 @@ static unsigned long isolate_lru_pages(u
switch (__isolate_lru_page(cursor_page, mode)) {
case 0:
list_move(&cursor_page->lru, dst);
- nr_taken++;
- scan++;
+ nr_taken += compound_pages(cursor_page);
+ scan+= compound_pages(cursor_page);
break;
case -EBUSY:
@@ -749,6 +750,7 @@ static unsigned long isolate_lru_pages(u
break;
}
}
+ scan += pages;
}
*scanned = scan;
@@ -985,7 +987,7 @@ force_reclaim_mapped:
ClearPageActive(page);
list_move(&page->lru, &zone->inactive_list);
- pgmoved++;
+ pgmoved += compound_pages(page);
if (!pagevec_add(&pvec, page)) {
__mod_zone_page_state(zone, NR_INACTIVE, pgmoved);
spin_unlock_irq(&zone->lru_lock);
@@ -1013,7 +1015,7 @@ force_reclaim_mapped:
SetPageLRU(page);
VM_BUG_ON(!PageActive(page));
list_move(&page->lru, &zone->active_list);
- pgmoved++;
+ pgmoved += compound_pages(page);
if (!pagevec_add(&pvec, page)) {
__mod_zone_page_state(zone, NR_ACTIVE, pgmoved);
pgmoved = 0;
Index: linux-2.6.22-rc4-mm2/include/linux/mm_inline.h
===================================================================
--- linux-2.6.22-rc4-mm2.orig/include/linux/mm_inline.h 2007-06-19 23:27:02.000000000 -0700
+++ linux-2.6.22-rc4-mm2/include/linux/mm_inline.h 2007-06-20 00:22:16.000000000 -0700
@@ -2,46 +2,67 @@ static inline void
add_page_to_active_list(struct zone *zone, struct page *page)
{
list_add(&page->lru, &zone->active_list);
- __inc_zone_state(zone, NR_ACTIVE);
+ if (!PageHead(page))
+ __inc_zone_state(zone, NR_ACTIVE);
+ else
+ __inc_zone_page_state(page, NR_ACTIVE);
}
static inline void
add_page_to_inactive_list(struct zone *zone, struct page *page)
{
list_add(&page->lru, &zone->inactive_list);
- __inc_zone_state(zone, NR_INACTIVE);
+ if (!PageHead(page))
+ __inc_zone_state(zone, NR_INACTIVE);
+ else
+ __inc_zone_page_state(page, NR_INACTIVE);
}
static inline void
add_page_to_inactive_list_tail(struct zone *zone, struct page *page)
{
list_add_tail(&page->lru, &zone->inactive_list);
- __inc_zone_state(zone, NR_INACTIVE);
+ if (!PageHead(page))
+ __inc_zone_state(zone, NR_INACTIVE);
+ else
+ __inc_zone_page_state(page, NR_INACTIVE);
}
static inline void
del_page_from_active_list(struct zone *zone, struct page *page)
{
list_del(&page->lru);
- __dec_zone_state(zone, NR_ACTIVE);
+ if (!PageHead(page))
+ __dec_zone_state(zone, NR_ACTIVE);
+ else
+ __dec_zone_page_state(page, NR_ACTIVE);
}
static inline void
del_page_from_inactive_list(struct zone *zone, struct page *page)
{
list_del(&page->lru);
- __dec_zone_state(zone, NR_INACTIVE);
+ if (!PageHead(page))
+ __dec_zone_state(zone, NR_INACTIVE);
+ else
+ __dec_zone_page_state(page, NR_INACTIVE);
}
static inline void
del_page_from_lru(struct zone *zone, struct page *page)
{
+ enum zone_stat_item counter = NR_ACTIVE;
+
list_del(&page->lru);
- if (PageActive(page)) {
+ if (PageActive(page))
__ClearPageActive(page);
- __dec_zone_state(zone, NR_ACTIVE);
- } else {
- __dec_zone_state(zone, NR_INACTIVE);
- }
+ else
+ counter = NR_INACTIVE;
+
+ if (!PageHead(page))
+ __dec_zone_state(zone, counter);
+ else
+ __dec_zone_page_state(page, counter);
}
+
--
next prev parent reply other threads:[~2007-06-20 18:30 UTC|newest]
Thread overview: 45+ messages / expand[flat|nested] mbox.gz Atom feed top
2007-06-20 18:29 [00/37] Large Blocksize Support V4 clameter
2007-06-20 18:29 ` [01/37] Define functions for page cache handling clameter
2007-06-20 18:29 ` [02/37] Pagecache zeroing: zero_user_segment, zero_user_segments and zero_user clameter
2007-06-20 18:29 ` [03/37] Use page_cache_xxx function in mm/filemap.c clameter
2007-06-20 18:29 ` [04/37] Use page_cache_xxx in mm/page-writeback.c clameter
2007-06-20 18:29 ` [05/37] Use page_cache_xxx in mm/truncate.c clameter
2007-06-20 18:29 ` [06/37] Use page_cache_xxx in mm/rmap.c clameter
2007-06-20 18:29 ` [07/37] Use page_cache_xxx in mm/filemap_xip.c clameter
2007-06-20 18:29 ` [08/37] Use page_cache_xxx in mm/migrate.c clameter
2007-06-20 18:29 ` [09/37] Use page_cache_xxx in fs/libfs.c clameter
2007-06-20 18:29 ` [10/37] Use page_cache_xxx in fs/sync clameter
2007-06-20 18:29 ` [11/37] Use page_cache_xxx in fs/buffer.c clameter
2007-06-20 18:29 ` [12/37] Use page_cache_xxx in mm/mpage.c clameter
2007-06-20 18:29 ` [13/37] Use page_cache_xxx in mm/fadvise.c clameter
2007-06-20 18:29 ` [14/37] Use page_cache_xxx in fs/splice.c clameter
2007-06-20 18:29 ` [15/37] Use page_cache_xxx functions in fs/ext2 clameter
2007-06-20 18:29 ` [16/37] Use page_cache_xxx in fs/ext3 clameter
2007-06-20 18:29 ` [17/37] Use page_cache_xxx in fs/ext4 clameter
2007-06-20 18:29 ` [18/37] Use page_cache_xxx in fs/reiserfs clameter
2007-06-20 18:29 ` [19/37] Use page_cache_xxx for fs/xfs clameter
2007-06-20 18:29 ` [20/37] Fix PAGE SIZE assumption in miscellaneous places clameter
2007-06-20 18:29 ` [21/37] Use page_cache_xxx in drivers/block/loop.c clameter
2007-06-20 18:29 ` [22/37] Use page_cache_xxx in drivers/block/rd.c clameter
2007-06-20 18:29 ` [23/37] compound pages: PageHead/PageTail instead of PageCompound clameter
2007-06-20 18:29 ` [24/37] compound pages: Add new support functions clameter
2007-06-20 18:29 ` [25/37] compound pages: vmstat support clameter
2007-06-20 18:29 ` [26/37] compound pages: Use new compound vmstat functions in SLUB clameter
2007-06-20 18:29 ` [27/37] compound pages: Allow use of get_page_unless_zero with compound pages clameter
2007-06-20 18:29 ` [28/37] compound pages: Allow freeing of compound pages via pagevec clameter
2007-06-20 18:29 ` clameter [this message]
2007-06-20 18:29 ` [30/37] Add VM_BUG_ONs to check for correct page order clameter
2007-06-20 18:29 ` [31/37] Large blocksize support: Core piece clameter
2007-06-21 0:20 ` Bob Picco
2007-06-21 5:26 ` Christoph Lameter
2007-06-20 18:29 ` [32/37] Readahead changes to support large blocksize clameter
2007-06-20 18:29 ` [33/37] Large blocksize: Compound page zeroing and flushing clameter
2007-06-20 18:29 ` [34/37] Large blocksize support in ramfs clameter
2007-06-20 20:50 ` Andreas Dilger
2007-06-20 21:29 ` Christoph Lameter
2007-06-20 18:29 ` [35/37] Large blocksize support in XFS clameter
2007-06-20 18:29 ` [36/37] Large blocksize support for ext2 clameter
2007-06-20 20:56 ` Andreas Dilger
2007-06-20 21:27 ` Christoph Lameter
2007-06-20 22:19 ` Andreas Dilger
2007-06-20 18:29 ` [37/37] Reiserfs: Fix up for mapping_set_gfp_mask clameter
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20070620183012.751140302@sgi.com \
--to=clameter@sgi.com \
--cc=hch@lst.de \
--cc=linux-fsdevel@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=mel@skynet.ie \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).