From: Konstantin Khlebnikov <khlebnikov@openvz.org>
To: Andrew Morton <akpm@linux-foundation.org>,
Hugh Dickins <hughd@google.com>,
Johannes Weiner <jweiner@redhat.com>,
KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: linux-mm@kvack.org, linux-kernel@vger.kernel.org
Subject: [PATCH 3/7 v2] mm: rework __isolate_lru_page() file/anon filter
Date: Sat, 03 Mar 2012 13:16:48 +0400 [thread overview]
Message-ID: <20120303091327.17599.80336.stgit@zurg> (raw)
In-Reply-To: <20120229091547.29236.28230.stgit@zurg>
This patch adds file/anon filter bits into isolate_mode_t,
this allows to simplify checks in __isolate_lru_page().
v2:
* use switch () instead of if ()
* fixed lumpy-reclaim isolation mode
Signed-off-by: Konstantin Khlebnikov <khlebnikov@openvz.org>
---
include/linux/mmzone.h | 4 ++++
include/linux/swap.h | 2 +-
mm/compaction.c | 5 +++--
mm/vmscan.c | 49 +++++++++++++++++++++++++++++++-----------------
4 files changed, 40 insertions(+), 20 deletions(-)
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 5f1e4ee..e60dcbd 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -192,6 +192,10 @@ struct lruvec {
#define ISOLATE_UNMAPPED ((__force isolate_mode_t)0x8)
/* Isolate for asynchronous migration */
#define ISOLATE_ASYNC_MIGRATE ((__force isolate_mode_t)0x10)
+/* Isolate swap-backed pages */
+#define ISOLATE_ANON ((__force isolate_mode_t)0x20)
+/* Isolate file-backed pages */
+#define ISOLATE_FILE ((__force isolate_mode_t)0x40)
/* LRU Isolation modes. */
typedef unsigned __bitwise__ isolate_mode_t;
diff --git a/include/linux/swap.h b/include/linux/swap.h
index ba2c8d7..dc6e6a3 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -254,7 +254,7 @@ static inline void lru_cache_add_file(struct page *page)
/* linux/mm/vmscan.c */
extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
gfp_t gfp_mask, nodemask_t *mask);
-extern int __isolate_lru_page(struct page *page, isolate_mode_t mode, int file);
+extern int __isolate_lru_page(struct page *page, isolate_mode_t mode);
extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem,
gfp_t gfp_mask, bool noswap);
extern unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem,
diff --git a/mm/compaction.c b/mm/compaction.c
index 74a8c82..cc054f7 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -261,7 +261,8 @@ static isolate_migrate_t isolate_migratepages(struct zone *zone,
unsigned long last_pageblock_nr = 0, pageblock_nr;
unsigned long nr_scanned = 0, nr_isolated = 0;
struct list_head *migratelist = &cc->migratepages;
- isolate_mode_t mode = ISOLATE_ACTIVE|ISOLATE_INACTIVE;
+ isolate_mode_t mode = ISOLATE_ACTIVE | ISOLATE_INACTIVE |
+ ISOLATE_FILE | ISOLATE_ANON;
/* Do not scan outside zone boundaries */
low_pfn = max(cc->migrate_pfn, zone->zone_start_pfn);
@@ -375,7 +376,7 @@ static isolate_migrate_t isolate_migratepages(struct zone *zone,
mode |= ISOLATE_ASYNC_MIGRATE;
/* Try isolate the page */
- if (__isolate_lru_page(page, mode, 0) != 0)
+ if (__isolate_lru_page(page, mode) != 0)
continue;
VM_BUG_ON(PageTransCompound(page));
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 7de3acc..cce1e14 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1029,28 +1029,35 @@ keep_lumpy:
*
* returns 0 on success, -ve errno on failure.
*/
-int __isolate_lru_page(struct page *page, isolate_mode_t mode, int file)
+int __isolate_lru_page(struct page *page, isolate_mode_t mode)
{
- bool all_lru_mode;
int ret = -EINVAL;
/* Only take pages on the LRU. */
if (!PageLRU(page))
return ret;
- all_lru_mode = (mode & (ISOLATE_ACTIVE|ISOLATE_INACTIVE)) ==
- (ISOLATE_ACTIVE|ISOLATE_INACTIVE);
-
- /*
- * When checking the active state, we need to be sure we are
- * dealing with comparible boolean values. Take the logical not
- * of each.
- */
- if (!all_lru_mode && !PageActive(page) != !(mode & ISOLATE_ACTIVE))
- return ret;
+ switch (mode & (ISOLATE_ACTIVE | ISOLATE_INACTIVE)) {
+ case ISOLATE_ACTIVE:
+ if (!PageActive(page))
+ return ret;
+ break;
+ case ISOLATE_INACTIVE:
+ if (PageActive(page))
+ return ret;
+ break;
+ }
- if (!all_lru_mode && !!page_is_file_cache(page) != file)
- return ret;
+ switch (mode & (ISOLATE_FILE | ISOLATE_ANON)) {
+ case ISOLATE_FILE:
+ if (!page_is_file_cache(page))
+ return ret;
+ break;
+ case ISOLATE_ANON:
+ if (page_is_file_cache(page))
+ return ret;
+ break;
+ }
/*
* When this function is being called for lumpy reclaim, we
@@ -1160,7 +1167,7 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
VM_BUG_ON(!PageLRU(page));
- switch (__isolate_lru_page(page, mode, file)) {
+ switch (__isolate_lru_page(page, mode)) {
case 0:
mem_cgroup_lru_del(page);
list_move(&page->lru, dst);
@@ -1218,7 +1225,7 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
!PageSwapCache(cursor_page))
break;
- if (__isolate_lru_page(cursor_page, mode, file) == 0) {
+ if (__isolate_lru_page(cursor_page, mode) == 0) {
unsigned int isolated_pages;
mem_cgroup_lru_del(cursor_page);
@@ -1503,7 +1510,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct mem_cgroup_zone *mz,
set_reclaim_mode(priority, sc, false);
if (sc->reclaim_mode & RECLAIM_MODE_LUMPYRECLAIM)
- isolate_mode |= ISOLATE_ACTIVE;
+ isolate_mode |= ISOLATE_ACTIVE | ISOLATE_FILE | ISOLATE_ANON;
lru_add_drain();
@@ -1511,6 +1518,10 @@ shrink_inactive_list(unsigned long nr_to_scan, struct mem_cgroup_zone *mz,
isolate_mode |= ISOLATE_UNMAPPED;
if (!sc->may_writepage)
isolate_mode |= ISOLATE_CLEAN;
+ if (file)
+ isolate_mode |= ISOLATE_FILE;
+ else
+ isolate_mode |= ISOLATE_ANON;
spin_lock_irq(&zone->lru_lock);
@@ -1677,6 +1688,10 @@ static void shrink_active_list(unsigned long nr_to_scan,
isolate_mode |= ISOLATE_UNMAPPED;
if (!sc->may_writepage)
isolate_mode |= ISOLATE_CLEAN;
+ if (file)
+ isolate_mode |= ISOLATE_FILE;
+ else
+ isolate_mode |= ISOLATE_ANON;
spin_lock_irq(&zone->lru_lock);
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Fight unfair telecom internet charges in Canada: sign http://stopthemeter.ca/
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
next prev parent reply other threads:[~2012-03-03 9:16 UTC|newest]
Thread overview: 41+ messages / expand[flat|nested] mbox.gz Atom feed top
2012-02-29 9:15 [PATCH v4 ch1 0/7] mm: some cleanup/rework before lru_lock splitting Konstantin Khlebnikov
2012-02-29 9:15 ` [PATCH 1/7] mm/memcg: scanning_global_lru means mem_cgroup_disabled Konstantin Khlebnikov
2012-03-02 5:12 ` KAMEZAWA Hiroyuki
2012-03-06 11:46 ` Glauber Costa
2012-02-29 9:15 ` [PATCH 2/7] mm/memcg: move reclaim_stat into lruvec Konstantin Khlebnikov
2012-03-02 5:14 ` KAMEZAWA Hiroyuki
2012-02-29 9:15 ` [PATCH 3/7] mm: rework __isolate_lru_page() file/anon filter Konstantin Khlebnikov
2012-03-02 5:17 ` KAMEZAWA Hiroyuki
2012-03-02 5:51 ` Konstantin Khlebnikov
2012-03-02 8:17 ` KAMEZAWA Hiroyuki
2012-03-02 8:53 ` Konstantin Khlebnikov
2012-03-06 11:57 ` Glauber Costa
2012-03-06 12:53 ` Konstantin Khlebnikov
2012-03-03 0:22 ` Hugh Dickins
2012-03-03 8:27 ` Konstantin Khlebnikov
2012-03-03 9:20 ` Konstantin Khlebnikov
2012-03-03 9:16 ` Konstantin Khlebnikov [this message]
2012-03-05 0:27 ` [PATCH 3/7 v2] " KAMEZAWA Hiroyuki
2012-03-07 3:22 ` Hugh Dickins
2012-03-08 5:30 ` KAMEZAWA Hiroyuki
2012-03-09 2:06 ` Hugh Dickins
2012-03-09 7:16 ` Konstantin Khlebnikov
2012-03-10 0:04 ` Hugh Dickins
2012-03-10 6:55 ` Konstantin Khlebnikov
2012-03-10 9:46 ` Konstantin Khlebnikov
2012-03-15 1:47 ` Hugh Dickins
2012-03-15 6:03 ` Konstantin Khlebnikov
2012-03-15 23:58 ` Hugh Dickins
2012-02-29 9:15 ` [PATCH 4/7] mm: push lru index into shrink_[in]active_list() Konstantin Khlebnikov
2012-03-02 5:21 ` KAMEZAWA Hiroyuki
2012-03-03 0:24 ` Hugh Dickins
2012-02-29 9:15 ` [PATCH 5/7] mm: rework reclaim_stat counters Konstantin Khlebnikov
2012-03-02 5:28 ` KAMEZAWA Hiroyuki
2012-03-02 6:11 ` Konstantin Khlebnikov
2012-03-02 8:03 ` KAMEZAWA Hiroyuki
2012-02-29 9:16 ` [PATCH 6/7] mm/memcg: rework inactive_ratio calculation Konstantin Khlebnikov
2012-03-02 5:31 ` KAMEZAWA Hiroyuki
2012-03-02 6:24 ` Konstantin Khlebnikov
2012-03-08 5:36 ` KAMEZAWA Hiroyuki
2012-02-29 9:16 ` [PATCH 7/7] mm/memcg: use vm_swappiness from target memory cgroup Konstantin Khlebnikov
2012-03-02 5:32 ` KAMEZAWA Hiroyuki
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20120303091327.17599.80336.stgit@zurg \
--to=khlebnikov@openvz.org \
--cc=akpm@linux-foundation.org \
--cc=hughd@google.com \
--cc=jweiner@redhat.com \
--cc=kamezawa.hiroyu@jp.fujitsu.com \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).