From: Mel Gorman <mel@csn.ul.ie>
To: Andrea Arcangeli <aarcange@redhat.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>,
Andrew Morton <akpm@linux-foundation.org>,
Rik van Riel <riel@redhat.com>,
Johannes Weiner <hannes@cmpxchg.org>, Mel Gorman <mel@csn.ul.ie>,
linux-mm@kvack.org, linux-kernel@vger.kernel.org
Subject: [PATCH 2/8] mm: vmscan: Convert lumpy_mode into a bitmask
Date: Wed, 17 Nov 2010 16:22:43 +0000 [thread overview]
Message-ID: <1290010969-26721-3-git-send-email-mel@csn.ul.ie> (raw)
In-Reply-To: <1290010969-26721-1-git-send-email-mel@csn.ul.ie>
Currently lumpy_mode is an enum and determines if lumpy reclaim is off,
syncronous or asyncronous. In preparation for using compaction instead of
lumpy reclaim, this patch converts the flags into a bitmap.
Signed-off-by: Mel Gorman <mel@csn.ul.ie>
---
include/trace/events/vmscan.h | 6 ++--
mm/vmscan.c | 46 +++++++++++++++++++++++++----------------
2 files changed, 31 insertions(+), 21 deletions(-)
diff --git a/include/trace/events/vmscan.h b/include/trace/events/vmscan.h
index c255fcc..be76429 100644
--- a/include/trace/events/vmscan.h
+++ b/include/trace/events/vmscan.h
@@ -25,13 +25,13 @@
#define trace_reclaim_flags(page, sync) ( \
(page_is_file_cache(page) ? RECLAIM_WB_FILE : RECLAIM_WB_ANON) | \
- (sync == LUMPY_MODE_SYNC ? RECLAIM_WB_SYNC : RECLAIM_WB_ASYNC) \
+ (sync & LUMPY_MODE_SYNC ? RECLAIM_WB_SYNC : RECLAIM_WB_ASYNC) \
)
#define trace_shrink_flags(file, sync) ( \
- (sync == LUMPY_MODE_SYNC ? RECLAIM_WB_MIXED : \
+ (sync & LUMPY_MODE_SYNC ? RECLAIM_WB_MIXED : \
(file ? RECLAIM_WB_FILE : RECLAIM_WB_ANON)) | \
- (sync == LUMPY_MODE_SYNC ? RECLAIM_WB_SYNC : RECLAIM_WB_ASYNC) \
+ (sync & LUMPY_MODE_SYNC ? RECLAIM_WB_SYNC : RECLAIM_WB_ASYNC) \
)
TRACE_EVENT(mm_vmscan_kswapd_sleep,
diff --git a/mm/vmscan.c b/mm/vmscan.c
index b8a6fdc..37d4f0e 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -51,11 +51,20 @@
#define CREATE_TRACE_POINTS
#include <trace/events/vmscan.h>
-enum lumpy_mode {
- LUMPY_MODE_NONE,
- LUMPY_MODE_ASYNC,
- LUMPY_MODE_SYNC,
-};
+/*
+ * lumpy_mode determines how the inactive list is shrunk
+ * LUMPY_MODE_SINGLE: Reclaim only order-0 pages
+ * LUMPY_MODE_ASYNC: Do not block
+ * LUMPY_MODE_SYNC: Allow blocking e.g. call wait_on_page_writeback
+ * LUMPY_MODE_CONTIGRECLAIM: For high-order allocations, take a reference
+ * page from the LRU and reclaim all pages within a
+ * naturally aligned range
+ */
+typedef unsigned __bitwise__ lumpy_mode;
+#define LUMPY_MODE_SINGLE ((__force lumpy_mode)0x01u)
+#define LUMPY_MODE_ASYNC ((__force lumpy_mode)0x02u)
+#define LUMPY_MODE_SYNC ((__force lumpy_mode)0x04u)
+#define LUMPY_MODE_CONTIGRECLAIM ((__force lumpy_mode)0x08u)
struct scan_control {
/* Incremented by the number of inactive pages that were scanned */
@@ -88,7 +97,7 @@ struct scan_control {
* Intend to reclaim enough continuous memory rather than reclaim
* enough amount of memory. i.e, mode for high order allocation.
*/
- enum lumpy_mode lumpy_reclaim_mode;
+ lumpy_mode lumpy_reclaim_mode;
/* Which cgroup do we reclaim from */
struct mem_cgroup *mem_cgroup;
@@ -274,13 +283,13 @@ unsigned long shrink_slab(unsigned long scanned, gfp_t gfp_mask,
static void set_lumpy_reclaim_mode(int priority, struct scan_control *sc,
bool sync)
{
- enum lumpy_mode mode = sync ? LUMPY_MODE_SYNC : LUMPY_MODE_ASYNC;
+ lumpy_mode mode = sync ? LUMPY_MODE_SYNC : LUMPY_MODE_ASYNC;
/*
* Some reclaim have alredy been failed. No worth to try synchronous
* lumpy reclaim.
*/
- if (sync && sc->lumpy_reclaim_mode == LUMPY_MODE_NONE)
+ if (sync && sc->lumpy_reclaim_mode & LUMPY_MODE_SINGLE)
return;
/*
@@ -288,17 +297,18 @@ static void set_lumpy_reclaim_mode(int priority, struct scan_control *sc,
* trouble getting a small set of contiguous pages, we
* will reclaim both active and inactive pages.
*/
+ sc->lumpy_reclaim_mode = LUMPY_MODE_CONTIGRECLAIM;
if (sc->order > PAGE_ALLOC_COSTLY_ORDER)
- sc->lumpy_reclaim_mode = mode;
+ sc->lumpy_reclaim_mode |= mode;
else if (sc->order && priority < DEF_PRIORITY - 2)
- sc->lumpy_reclaim_mode = mode;
+ sc->lumpy_reclaim_mode |= mode;
else
- sc->lumpy_reclaim_mode = LUMPY_MODE_NONE;
+ sc->lumpy_reclaim_mode = LUMPY_MODE_SINGLE | LUMPY_MODE_ASYNC;
}
static void disable_lumpy_reclaim_mode(struct scan_control *sc)
{
- sc->lumpy_reclaim_mode = LUMPY_MODE_NONE;
+ sc->lumpy_reclaim_mode = LUMPY_MODE_SINGLE | LUMPY_MODE_ASYNC;
}
static inline int is_page_cache_freeable(struct page *page)
@@ -429,7 +439,7 @@ static pageout_t pageout(struct page *page, struct address_space *mapping,
* first attempt to free a range of pages fails.
*/
if (PageWriteback(page) &&
- sc->lumpy_reclaim_mode == LUMPY_MODE_SYNC)
+ (sc->lumpy_reclaim_mode & LUMPY_MODE_SYNC))
wait_on_page_writeback(page);
if (!PageWriteback(page)) {
@@ -615,7 +625,7 @@ static enum page_references page_check_references(struct page *page,
referenced_page = TestClearPageReferenced(page);
/* Lumpy reclaim - ignore references */
- if (sc->lumpy_reclaim_mode != LUMPY_MODE_NONE)
+ if (sc->lumpy_reclaim_mode & LUMPY_MODE_CONTIGRECLAIM)
return PAGEREF_RECLAIM;
/*
@@ -732,7 +742,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
* for any page for which writeback has already
* started.
*/
- if (sc->lumpy_reclaim_mode == LUMPY_MODE_SYNC &&
+ if ((sc->lumpy_reclaim_mode & LUMPY_MODE_SYNC) &&
may_enter_fs)
wait_on_page_writeback(page);
else {
@@ -1317,7 +1327,7 @@ static inline bool should_reclaim_stall(unsigned long nr_taken,
return false;
/* Only stall on lumpy reclaim */
- if (sc->lumpy_reclaim_mode == LUMPY_MODE_NONE)
+ if (sc->lumpy_reclaim_mode & LUMPY_MODE_SINGLE)
return false;
/* If we have relaimed everything on the isolated list, no stall */
@@ -1368,7 +1378,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct zone *zone,
if (scanning_global_lru(sc)) {
nr_taken = isolate_pages_global(nr_to_scan,
&page_list, &nr_scanned, sc->order,
- sc->lumpy_reclaim_mode == LUMPY_MODE_NONE ?
+ sc->lumpy_reclaim_mode & LUMPY_MODE_SINGLE ?
ISOLATE_INACTIVE : ISOLATE_BOTH,
zone, 0, file);
zone->pages_scanned += nr_scanned;
@@ -1381,7 +1391,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct zone *zone,
} else {
nr_taken = mem_cgroup_isolate_pages(nr_to_scan,
&page_list, &nr_scanned, sc->order,
- sc->lumpy_reclaim_mode == LUMPY_MODE_NONE ?
+ sc->lumpy_reclaim_mode & LUMPY_MODE_SINGLE ?
ISOLATE_INACTIVE : ISOLATE_BOTH,
zone, sc->mem_cgroup,
0, file);
--
1.7.1
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Fight unfair telecom policy in Canada: sign http://dissolvethecrtc.ca/
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
next prev parent reply other threads:[~2010-11-17 16:23 UTC|newest]
Thread overview: 34+ messages / expand[flat|nested] mbox.gz Atom feed top
2010-11-17 16:22 [PATCH 0/8] Use memory compaction instead of lumpy reclaim during high-order allocations Mel Gorman
2010-11-17 16:22 ` [PATCH 1/8] mm: compaction: Add trace events for memory compaction activity Mel Gorman
2010-11-17 16:22 ` Mel Gorman [this message]
2010-11-17 16:22 ` [PATCH 3/8] mm: vmscan: Reclaim order-0 and use compaction instead of lumpy reclaim Mel Gorman
2010-11-18 18:09 ` Andrea Arcangeli
2010-11-18 18:30 ` Mel Gorman
2010-11-17 16:22 ` [PATCH 4/8] mm: migration: Allow migration to operate asynchronously and avoid synchronous compaction in the faster path Mel Gorman
2010-11-18 18:21 ` Andrea Arcangeli
2010-11-18 18:34 ` Mel Gorman
2010-11-18 19:00 ` Andrea Arcangeli
2010-11-17 16:22 ` [PATCH 5/8] mm: migration: Cleanup migrate_pages API by matching types for offlining and sync Mel Gorman
2010-11-17 16:22 ` [PATCH 6/8] mm: compaction: Perform a faster scan in try_to_compact_pages() Mel Gorman
2010-11-18 18:34 ` Andrea Arcangeli
2010-11-18 18:50 ` Mel Gorman
2010-11-18 19:08 ` Andrea Arcangeli
2010-11-19 11:16 ` Mel Gorman
2010-11-17 16:22 ` [PATCH 7/8] mm: compaction: Use the LRU to get a hint on where compaction should start Mel Gorman
2010-11-18 9:10 ` KAMEZAWA Hiroyuki
2010-11-18 9:28 ` Mel Gorman
2010-11-18 18:46 ` Andrea Arcangeli
2010-11-19 11:08 ` Mel Gorman
2010-11-17 16:22 ` [PATCH 8/8] mm: vmscan: Rename lumpy_mode to reclaim_mode Mel Gorman
2010-11-17 23:46 ` [PATCH 0/8] Use memory compaction instead of lumpy reclaim during high-order allocations Andrew Morton
2010-11-18 2:03 ` Rik van Riel
2010-11-18 8:12 ` Mel Gorman
2010-11-18 8:26 ` KAMEZAWA Hiroyuki
2010-11-18 8:38 ` Johannes Weiner
2010-11-18 9:20 ` Mel Gorman
2010-11-18 19:49 ` Andrew Morton
2010-11-19 10:48 ` Mel Gorman
2010-11-19 12:43 ` Theodore Tso
2010-11-19 14:05 ` Mel Gorman
2010-11-19 15:45 ` Ted Ts'o
2010-11-18 8:44 ` Mel Gorman
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1290010969-26721-3-git-send-email-mel@csn.ul.ie \
--to=mel@csn.ul.ie \
--cc=aarcange@redhat.com \
--cc=akpm@linux-foundation.org \
--cc=hannes@cmpxchg.org \
--cc=kosaki.motohiro@jp.fujitsu.com \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=riel@redhat.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).