From: Glauber Costa <glommer@openvz.org>
To: <linux-mm@kvack.org>
Cc: Andrew Morton <akpm@linux-foundation.org>,
Mel Gorman <mgorman@suse.de>, <cgroups@vger.kernel.org>,
<kamezawa.hiroyu@jp.fujitsu.com>,
Johannes Weiner <hannes@cmpxchg.org>,
Michal Hocko <mhocko@suse.cz>,
hughd@google.com, Greg Thelen <gthelen@google.com>,
<linux-fsdevel@vger.kernel.org>,
Dave Chinner <dchinner@redhat.com>,
Glauber Costa <glommer@parallels.com>
Subject: [PATCH v5 13/31] fs: convert inode and dentry shrinking to be node aware
Date: Thu, 9 May 2013 10:06:30 +0400 [thread overview]
Message-ID: <1368079608-5611-14-git-send-email-glommer@openvz.org> (raw)
In-Reply-To: <1368079608-5611-1-git-send-email-glommer@openvz.org>
From: Dave Chinner <dchinner@redhat.com>
Now that the shrinker is passing a nodemask in the scan control
structure, we can pass this to the the generic LRU list code to
isolate reclaim to the lists on matching nodes.
This requires a small amount of refactoring of the LRU list API,
which might be best split out into a separate patch.
Signed-off-by: Dave Chinner <dchinner@redhat.com>
Signed-off-by: Glauber Costa <glommer@parallels.com>
Acked-by: Mel Gorman <mgorman@suse.de>
---
fs/dcache.c | 8 +++++---
fs/inode.c | 7 ++++---
fs/internal.h | 6 ++++--
fs/super.c | 22 +++++++++++++---------
fs/xfs/xfs_super.c | 6 ++++--
include/linux/fs.h | 4 ++--
include/linux/list_lru.h | 21 ++++++++++++++++++---
lib/list_lru.c | 18 ++++++++++--------
8 files changed, 60 insertions(+), 32 deletions(-)
diff --git a/fs/dcache.c b/fs/dcache.c
index a2062ef..e83a8c2 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -889,6 +889,7 @@ dentry_lru_isolate(struct list_head *item, spinlock_t *lru_lock, void *arg)
* prune_dcache_sb - shrink the dcache
* @sb: superblock
* @nr_to_scan : number of entries to try to free
+ * @nodes_to_walk: which nodes to scan for freeable entities
*
* Attempt to shrink the superblock dcache LRU by @nr_to_scan entries. This is
* done when we need more memory an called from the superblock shrinker
@@ -897,13 +898,14 @@ dentry_lru_isolate(struct list_head *item, spinlock_t *lru_lock, void *arg)
* This function may fail to free any resources if all the dentries are in
* use.
*/
-long prune_dcache_sb(struct super_block *sb, unsigned long nr_to_scan)
+long prune_dcache_sb(struct super_block *sb, unsigned long nr_to_scan,
+ nodemask_t *nodes_to_walk)
{
LIST_HEAD(dispose);
long freed;
- freed = list_lru_walk(&sb->s_dentry_lru, dentry_lru_isolate,
- &dispose, nr_to_scan);
+ freed = list_lru_walk_nodemask(&sb->s_dentry_lru, dentry_lru_isolate,
+ &dispose, nr_to_scan, nodes_to_walk);
shrink_dentry_list(&dispose);
return freed;
}
diff --git a/fs/inode.c b/fs/inode.c
index ff66f49..3cf4cb0 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -746,13 +746,14 @@ inode_lru_isolate(struct list_head *item, spinlock_t *lru_lock, void *arg)
* to trim from the LRU. Inodes to be freed are moved to a temporary list and
* then are freed outside inode_lock by dispose_list().
*/
-long prune_icache_sb(struct super_block *sb, unsigned long nr_to_scan)
+long prune_icache_sb(struct super_block *sb, unsigned long nr_to_scan,
+ nodemask_t *nodes_to_walk)
{
LIST_HEAD(freeable);
long freed;
- freed = list_lru_walk(&sb->s_inode_lru, inode_lru_isolate,
- &freeable, nr_to_scan);
+ freed = list_lru_walk_nodemask(&sb->s_inode_lru, inode_lru_isolate,
+ &freeable, nr_to_scan, nodes_to_walk);
dispose_list(&freeable);
return freed;
}
diff --git a/fs/internal.h b/fs/internal.h
index bb7b6e4..91900f2 100644
--- a/fs/internal.h
+++ b/fs/internal.h
@@ -110,7 +110,8 @@ extern int open_check_o_direct(struct file *f);
* inode.c
*/
extern spinlock_t inode_sb_list_lock;
-extern long prune_icache_sb(struct super_block *sb, unsigned long nr_to_scan);
+extern long prune_icache_sb(struct super_block *sb, unsigned long nr_to_scan,
+ nodemask_t *nodes_to_scan);
extern void inode_add_lru(struct inode *inode);
/*
@@ -126,7 +127,8 @@ extern int invalidate_inodes(struct super_block *, bool);
* dcache.c
*/
extern struct dentry *__d_alloc(struct super_block *, const struct qstr *);
-extern long prune_dcache_sb(struct super_block *sb, unsigned long nr_to_scan);
+extern long prune_dcache_sb(struct super_block *sb, unsigned long nr_to_scan,
+ nodemask_t *nodes_to_scan);
/*
* read_write.c
diff --git a/fs/super.c b/fs/super.c
index 66f5cde..5c7b879 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -75,10 +75,10 @@ static long super_cache_scan(struct shrinker *shrink, struct shrink_control *sc)
return -1;
if (sb->s_op && sb->s_op->nr_cached_objects)
- fs_objects = sb->s_op->nr_cached_objects(sb);
+ fs_objects = sb->s_op->nr_cached_objects(sb, &sc->nodes_to_scan);
- inodes = list_lru_count(&sb->s_inode_lru);
- dentries = list_lru_count(&sb->s_dentry_lru);
+ inodes = list_lru_count_nodemask(&sb->s_inode_lru, &sc->nodes_to_scan);
+ dentries = list_lru_count_nodemask(&sb->s_dentry_lru, &sc->nodes_to_scan);
total_objects = dentries + inodes + fs_objects + 1;
/* proportion the scan between the caches */
@@ -89,13 +89,14 @@ static long super_cache_scan(struct shrinker *shrink, struct shrink_control *sc)
* prune the dcache first as the icache is pinned by it, then
* prune the icache, followed by the filesystem specific caches
*/
- freed = prune_dcache_sb(sb, dentries);
- freed += prune_icache_sb(sb, inodes);
+ freed = prune_dcache_sb(sb, dentries, &sc->nodes_to_scan);
+ freed += prune_icache_sb(sb, inodes, &sc->nodes_to_scan);
if (fs_objects) {
fs_objects = mult_frac(sc->nr_to_scan, fs_objects,
total_objects);
- freed += sb->s_op->free_cached_objects(sb, fs_objects);
+ freed += sb->s_op->free_cached_objects(sb, fs_objects,
+ &sc->nodes_to_scan);
}
drop_super(sb);
@@ -113,10 +114,13 @@ static long super_cache_count(struct shrinker *shrink, struct shrink_control *sc
return -1;
if (sb->s_op && sb->s_op->nr_cached_objects)
- total_objects = sb->s_op->nr_cached_objects(sb);
+ total_objects = sb->s_op->nr_cached_objects(sb,
+ &sc->nodes_to_scan);
- total_objects += list_lru_count(&sb->s_dentry_lru);
- total_objects += list_lru_count(&sb->s_inode_lru);
+ total_objects += list_lru_count_nodemask(&sb->s_dentry_lru,
+ &sc->nodes_to_scan);
+ total_objects += list_lru_count_nodemask(&sb->s_inode_lru,
+ &sc->nodes_to_scan);
total_objects = vfs_pressure_ratio(total_objects);
drop_super(sb);
diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c
index 1ff991b..7fa60214 100644
--- a/fs/xfs/xfs_super.c
+++ b/fs/xfs/xfs_super.c
@@ -1525,7 +1525,8 @@ xfs_fs_mount(
static long
xfs_fs_nr_cached_objects(
- struct super_block *sb)
+ struct super_block *sb,
+ nodemask_t *nodes_to_count)
{
return xfs_reclaim_inodes_count(XFS_M(sb));
}
@@ -1533,7 +1534,8 @@ xfs_fs_nr_cached_objects(
static long
xfs_fs_free_cached_objects(
struct super_block *sb,
- long nr_to_scan)
+ long nr_to_scan,
+ nodemask_t *nodes_to_scan)
{
return xfs_reclaim_inodes_nr(XFS_M(sb), nr_to_scan);
}
diff --git a/include/linux/fs.h b/include/linux/fs.h
index fc47371..28eb053 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -1609,8 +1609,8 @@ struct super_operations {
ssize_t (*quota_write)(struct super_block *, int, const char *, size_t, loff_t);
#endif
int (*bdev_try_to_free_page)(struct super_block*, struct page*, gfp_t);
- long (*nr_cached_objects)(struct super_block *);
- long (*free_cached_objects)(struct super_block *, long);
+ long (*nr_cached_objects)(struct super_block *, nodemask_t *);
+ long (*free_cached_objects)(struct super_block *, long, nodemask_t *);
};
/*
diff --git a/include/linux/list_lru.h b/include/linux/list_lru.h
index 262d9d8..88c3f0e 100644
--- a/include/linux/list_lru.h
+++ b/include/linux/list_lru.h
@@ -32,15 +32,30 @@ struct list_lru {
int list_lru_init(struct list_lru *lru);
int list_lru_add(struct list_lru *lru, struct list_head *item);
int list_lru_del(struct list_lru *lru, struct list_head *item);
-unsigned long list_lru_count(struct list_lru *lru);
+unsigned long
+list_lru_count_nodemask(struct list_lru *lru, nodemask_t *nodes_to_count);
+
+static inline unsigned long list_lru_count(struct list_lru *lru)
+{
+ return list_lru_count_nodemask(lru, &lru->active_nodes);
+}
+
typedef enum lru_status
(*list_lru_walk_cb)(struct list_head *item, spinlock_t *lock, void *cb_arg);
typedef void (*list_lru_dispose_cb)(struct list_head *dispose_list);
-unsigned long list_lru_walk(struct list_lru *lru, list_lru_walk_cb isolate,
- void *cb_arg, long nr_to_walk);
+unsigned long list_lru_walk_nodemask(struct list_lru *lru, list_lru_walk_cb isolate,
+ void *cb_arg, long nr_to_walk, nodemask_t *nodes_to_walk);
+
+static inline unsigned long
+list_lru_walk(struct list_lru *lru, list_lru_walk_cb isolate,
+ void *cb_arg, long nr_to_walk)
+{
+ return list_lru_walk_nodemask(lru, isolate, cb_arg, nr_to_walk,
+ &lru->active_nodes);
+}
unsigned long
list_lru_dispose_all(struct list_lru *lru, list_lru_dispose_cb dispose);
diff --git a/lib/list_lru.c b/lib/list_lru.c
index 6a2ad81..319c4ba 100644
--- a/lib/list_lru.c
+++ b/lib/list_lru.c
@@ -54,13 +54,14 @@ list_lru_del(
EXPORT_SYMBOL_GPL(list_lru_del);
unsigned long
-list_lru_count(
- struct list_lru *lru)
+list_lru_count_nodemask(
+ struct list_lru *lru,
+ nodemask_t *nodes_to_count)
{
long count = 0;
int nid;
- for_each_node_mask(nid, lru->active_nodes) {
+ for_each_node_mask(nid, *nodes_to_count) {
struct list_lru_node *nlru = &lru->node[nid];
spin_lock(&nlru->lock);
@@ -71,7 +72,7 @@ list_lru_count(
return count;
}
-EXPORT_SYMBOL_GPL(list_lru_count);
+EXPORT_SYMBOL_GPL(list_lru_count_nodemask);
static unsigned long
list_lru_walk_node(
@@ -117,16 +118,17 @@ restart:
}
unsigned long
-list_lru_walk(
+list_lru_walk_nodemask(
struct list_lru *lru,
list_lru_walk_cb isolate,
void *cb_arg,
- long nr_to_walk)
+ long nr_to_walk,
+ nodemask_t *nodes_to_walk)
{
long isolated = 0;
int nid;
- for_each_node_mask(nid, lru->active_nodes) {
+ for_each_node_mask(nid, *nodes_to_walk) {
isolated += list_lru_walk_node(lru, nid, isolate,
cb_arg, &nr_to_walk);
if (nr_to_walk <= 0)
@@ -134,7 +136,7 @@ list_lru_walk(
}
return isolated;
}
-EXPORT_SYMBOL_GPL(list_lru_walk);
+EXPORT_SYMBOL_GPL(list_lru_walk_nodemask);
static unsigned long
list_lru_dispose_all_node(
--
1.8.1.4
next prev parent reply other threads:[~2013-05-09 6:06 UTC|newest]
Thread overview: 57+ messages / expand[flat|nested] mbox.gz Atom feed top
2013-05-09 6:06 [PATCH v5 00/31] kmemcg shrinkers Glauber Costa
2013-05-09 6:06 ` [PATCH v5 01/31] super: fix calculation of shrinkable objects for small numbers Glauber Costa
2013-05-09 6:06 ` [PATCH v5 02/31] vmscan: take at least one pass with shrinkers Glauber Costa
2013-05-09 11:12 ` Mel Gorman
[not found] ` <20130509111226.GR11497-l3A5Bk7waGM@public.gmane.org>
2013-05-09 11:28 ` Glauber Costa
[not found] ` <518B884C.9090704-bzQdu9zFT3WakBO8gow8eQ@public.gmane.org>
2013-05-09 11:35 ` Glauber Costa
2013-05-09 6:06 ` [PATCH v5 03/31] dcache: convert dentry_stat.nr_unused to per-cpu counters Glauber Costa
2013-05-09 6:06 ` [PATCH v5 04/31] dentry: move to per-sb LRU locks Glauber Costa
[not found] ` <1368079608-5611-5-git-send-email-glommer-GEFAQzZX7r8dnm+yROfE0A@public.gmane.org>
2013-05-10 5:29 ` Dave Chinner
2013-05-10 8:16 ` Dave Chinner
2013-05-09 6:06 ` [PATCH v5 05/31] dcache: remove dentries from LRU before putting on dispose list Glauber Costa
2013-05-09 6:06 ` [PATCH v5 06/31] mm: new shrinker API Glauber Costa
2013-05-09 13:30 ` Mel Gorman
[not found] ` <1368079608-5611-1-git-send-email-glommer-GEFAQzZX7r8dnm+yROfE0A@public.gmane.org>
2013-05-09 6:06 ` [PATCH v5 07/31] shrinker: convert superblock shrinkers to new API Glauber Costa
2013-05-09 13:33 ` Mel Gorman
2013-05-09 6:06 ` [PATCH v5 08/31] list: add a new LRU list type Glauber Costa
[not found] ` <1368079608-5611-9-git-send-email-glommer-GEFAQzZX7r8dnm+yROfE0A@public.gmane.org>
2013-05-09 13:37 ` Mel Gorman
[not found] ` <20130509133742.GW11497-l3A5Bk7waGM@public.gmane.org>
2013-05-09 21:02 ` Glauber Costa
2013-05-10 9:21 ` Mel Gorman
2013-05-10 9:56 ` Glauber Costa
[not found] ` <518CC44D.1020409-bzQdu9zFT3WakBO8gow8eQ@public.gmane.org>
2013-05-10 10:01 ` Mel Gorman
2013-05-09 6:06 ` [PATCH v5 09/31] inode: convert inode lru list to generic lru list code Glauber Costa
2013-05-09 6:06 ` [PATCH v5 10/31] dcache: convert to use new lru list infrastructure Glauber Costa
2013-05-09 6:06 ` [PATCH v5 11/31] list_lru: per-node " Glauber Costa
2013-05-09 13:42 ` Mel Gorman
[not found] ` <20130509134246.GX11497-l3A5Bk7waGM@public.gmane.org>
2013-05-09 21:05 ` Glauber Costa
2013-05-09 6:06 ` [PATCH v5 12/31] shrinker: add node awareness Glauber Costa
2013-05-09 6:06 ` Glauber Costa [this message]
2013-05-09 6:06 ` [PATCH v5 14/31] xfs: convert buftarg LRU to generic code Glauber Costa
[not found] ` <1368079608-5611-15-git-send-email-glommer-GEFAQzZX7r8dnm+yROfE0A@public.gmane.org>
2013-05-09 13:43 ` Mel Gorman
2013-05-09 6:06 ` [PATCH v5 15/31] xfs: convert dquot cache lru to list_lru Glauber Costa
2013-05-09 6:06 ` [PATCH v5 16/31] fs: convert fs shrinkers to new scan/count API Glauber Costa
2013-05-09 6:06 ` [PATCH v5 17/31] drivers: convert shrinkers to new count/scan API Glauber Costa
2013-05-09 13:52 ` Mel Gorman
[not found] ` <20130509135209.GZ11497-l3A5Bk7waGM@public.gmane.org>
2013-05-09 21:19 ` Glauber Costa
2013-05-10 9:00 ` Mel Gorman
2013-05-09 6:06 ` [PATCH v5 18/31] shrinker: convert remaining shrinkers to " Glauber Costa
2013-05-09 6:06 ` [PATCH v5 19/31] hugepage: convert huge zero page shrinker to new shrinker API Glauber Costa
[not found] ` <1368079608-5611-20-git-send-email-glommer-GEFAQzZX7r8dnm+yROfE0A@public.gmane.org>
2013-05-10 1:24 ` Kirill A. Shutemov
2013-05-09 6:06 ` [PATCH v5 20/31] shrinker: Kill old ->shrink API Glauber Costa
2013-05-09 13:53 ` Mel Gorman
2013-05-09 6:06 ` [PATCH v5 21/31] vmscan: also shrink slab in memcg pressure Glauber Costa
2013-05-09 6:06 ` [PATCH v5 22/31] memcg,list_lru: duplicate LRUs upon kmemcg creation Glauber Costa
2013-05-09 6:06 ` [PATCH v5 23/31] lru: add an element to a memcg list Glauber Costa
2013-05-09 6:06 ` [PATCH v5 24/31] list_lru: per-memcg walks Glauber Costa
2013-05-09 6:06 ` [PATCH v5 25/31] memcg: per-memcg kmem shrinking Glauber Costa
2013-05-09 6:06 ` [PATCH v5 26/31] memcg: scan cache objects hierarchically Glauber Costa
2013-05-09 6:06 ` [PATCH v5 27/31] super: targeted memcg reclaim Glauber Costa
2013-05-09 6:06 ` [PATCH v5 28/31] memcg: move initialization to memcg creation Glauber Costa
2013-05-09 6:06 ` [PATCH v5 29/31] vmpressure: in-kernel notifications Glauber Costa
2013-05-09 6:06 ` [PATCH v5 30/31] memcg: reap dead memcgs upon global memory pressure Glauber Costa
2013-05-09 6:06 ` [PATCH v5 31/31] memcg: debugging facility to access dangling memcgs Glauber Costa
2013-05-09 10:55 ` [PATCH v5 00/31] kmemcg shrinkers Mel Gorman
[not found] ` <20130509105519.GQ11497-l3A5Bk7waGM@public.gmane.org>
2013-05-09 11:34 ` Glauber Costa
2013-05-09 13:18 ` Dave Chinner
2013-05-09 14:03 ` Mel Gorman
[not found] ` <20130509140311.GB11497-l3A5Bk7waGM@public.gmane.org>
2013-05-09 21:24 ` Glauber Costa
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1368079608-5611-14-git-send-email-glommer@openvz.org \
--to=glommer@openvz.org \
--cc=akpm@linux-foundation.org \
--cc=cgroups@vger.kernel.org \
--cc=dchinner@redhat.com \
--cc=glommer@parallels.com \
--cc=gthelen@google.com \
--cc=hannes@cmpxchg.org \
--cc=hughd@google.com \
--cc=kamezawa.hiroyu@jp.fujitsu.com \
--cc=linux-fsdevel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=mgorman@suse.de \
--cc=mhocko@suse.cz \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).