From: Glauber Costa <glommer@openvz.org>
To: <linux-mm@kvack.org>
Cc: Andrew Morton <akpm@linux-foundation.org>,
Mel Gorman <mgorman@suse.de>, <cgroups@vger.kernel.org>,
<kamezawa.hiroyu@jp.fujitsu.com>,
Johannes Weiner <hannes@cmpxchg.org>,
Michal Hocko <mhocko@suse.cz>,
hughd@google.com, Greg Thelen <gthelen@google.com>,
<linux-fsdevel@vger.kernel.org>,
Glauber Costa <glommer@openvz.org>,
Dave Chinner <dchinner@redhat.com>,
Rik van Riel <riel@redhat.com>
Subject: [PATCH v5 24/31] list_lru: per-memcg walks
Date: Thu, 9 May 2013 10:06:41 +0400 [thread overview]
Message-ID: <1368079608-5611-25-git-send-email-glommer@openvz.org> (raw)
In-Reply-To: <1368079608-5611-1-git-send-email-glommer@openvz.org>
This patch extend the list_lru interfaces to allow for a memcg
parameter. Because most of its users won't need it, instead of
modifying the function signatures we create a new set of _memcg()
functions and write the old API ontop of that.
Signed-off-by: Glauber Costa <glommer@openvz.org>
Cc: Dave Chinner <dchinner@redhat.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Rik van Riel <riel@redhat.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Michal Hocko <mhocko@suse.cz>
Cc: Hugh Dickins <hughd@google.com>
Cc: Kamezawa Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
---
include/linux/list_lru.h | 26 +++++++++---
include/linux/memcontrol.h | 2 +
lib/list_lru.c | 102 +++++++++++++++++++++++++++++++++++----------
3 files changed, 102 insertions(+), 28 deletions(-)
diff --git a/include/linux/list_lru.h b/include/linux/list_lru.h
index 1d2a618..50147c9 100644
--- a/include/linux/list_lru.h
+++ b/include/linux/list_lru.h
@@ -89,22 +89,36 @@ static inline int list_lru_init_memcg(struct list_lru *lru)
int list_lru_add(struct list_lru *lru, struct list_head *item);
int list_lru_del(struct list_lru *lru, struct list_head *item);
-unsigned long
-list_lru_count_nodemask(struct list_lru *lru, nodemask_t *nodes_to_count);
+
+unsigned long list_lru_count_nodemask_memcg(struct list_lru *lru,
+ nodemask_t *nodes_to_count, struct mem_cgroup *memcg);
+
+static inline unsigned long
+list_lru_count_nodemask(struct list_lru *lru, nodemask_t *nodes_to_count)
+{
+ return list_lru_count_nodemask_memcg(lru, nodes_to_count, NULL);
+}
static inline unsigned long list_lru_count(struct list_lru *lru)
{
return list_lru_count_nodemask(lru, &lru->active_nodes);
}
-
typedef enum lru_status
(*list_lru_walk_cb)(struct list_head *item, spinlock_t *lock, void *cb_arg);
-
typedef void (*list_lru_dispose_cb)(struct list_head *dispose_list);
-unsigned long list_lru_walk_nodemask(struct list_lru *lru, list_lru_walk_cb isolate,
- void *cb_arg, long nr_to_walk, nodemask_t *nodes_to_walk);
+unsigned long list_lru_walk_nodemask_memcg(struct list_lru *lru,
+ list_lru_walk_cb isolate, void *cb_arg, long nr_to_walk,
+ nodemask_t *nodes_to_walk, struct mem_cgroup *memcg);
+
+static inline unsigned long list_lru_walk_nodemask(struct list_lru *lru,
+ list_lru_walk_cb isolate, void *cb_arg, long nr_to_walk,
+ nodemask_t *nodes_to_walk)
+{
+ return list_lru_walk_nodemask_memcg(lru, isolate, cb_arg, nr_to_walk,
+ &lru->active_nodes, NULL);
+}
static inline unsigned long
list_lru_walk(struct list_lru *lru, list_lru_walk_cb isolate,
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 1e74610..6dc1d7a 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -592,6 +592,8 @@ static inline bool memcg_kmem_is_active(struct mem_cgroup *memcg)
#define for_each_memcg_cache_index(_idx) \
for (; NULL; )
+#define memcg_limited_groups_array_size 0
+
static inline bool memcg_kmem_enabled(void)
{
return false;
diff --git a/lib/list_lru.c b/lib/list_lru.c
index b65e48d..da9b837 100644
--- a/lib/list_lru.c
+++ b/lib/list_lru.c
@@ -72,6 +72,23 @@ memcg_kmem_lru_of_page(struct list_lru *lru, struct page *page)
return lru_node_of_index(lru, memcg_id, nid);
}
+/*
+ * This helper will loop through all node-data in the LRU, either global or
+ * per-memcg. If memcg is either not present or not used,
+ * memcg_limited_groups_array_size will be 0. _idx starts at -1, and it will
+ * still be allowed to execute once.
+ *
+ * We convention that for _idx = -1, the global node info should be used.
+ * After that, we will go through each of the memcgs, starting at 0.
+ *
+ * We don't need any kind of locking for the loop because
+ * memcg_limited_groups_array_size can only grow, gaining new fields at the
+ * end. The old ones are just copied, and any interesting manipulation happen
+ * in the node list itself, and we already lock the list.
+ */
+#define for_each_memcg_lru_index(_idx) \
+ for ((_idx) = -1; ((_idx) < memcg_limited_groups_array_size); (_idx)++)
+
int
list_lru_add(
struct list_lru *lru,
@@ -131,15 +148,29 @@ list_lru_del(
EXPORT_SYMBOL_GPL(list_lru_del);
unsigned long
-list_lru_count_nodemask(
+list_lru_count_nodemask_memcg(
struct list_lru *lru,
- nodemask_t *nodes_to_count)
+ nodemask_t *nodes_to_count,
+ struct mem_cgroup *memcg)
{
long count = 0;
int nid;
+ nodemask_t nodes;
+ int memcg_id = -1;
- for_each_node_mask(nid, *nodes_to_count) {
- struct list_lru_node *nlru = &lru->node[nid];
+ if (memcg && memcg_kmem_is_active(memcg))
+ memcg_id = memcg_cache_id(memcg);
+ /*
+ * Conservative code can call this setting nodes with node_setall.
+ * This will generate an out of bound access for memcg.
+ */
+ nodes_and(nodes, *nodes_to_count, node_online_map);
+
+ for_each_node_mask(nid, nodes) {
+ struct list_lru_node *nlru;
+ nlru = lru_node_of_index(lru, memcg_id, nid);
+ if (!nlru)
+ continue;
spin_lock(&nlru->lock);
BUG_ON(nlru->nr_items < 0);
@@ -149,17 +180,17 @@ list_lru_count_nodemask(
return count;
}
-EXPORT_SYMBOL_GPL(list_lru_count_nodemask);
+EXPORT_SYMBOL_GPL(list_lru_count_nodemask_memcg);
static unsigned long
list_lru_walk_node(
struct list_lru *lru,
+ struct list_lru_node *nlru,
int nid,
list_lru_walk_cb isolate,
void *cb_arg,
long *nr_to_walk)
{
- struct list_lru_node *nlru = &lru->node[nid];
struct list_head *item, *n;
unsigned long isolated = 0;
@@ -196,25 +227,41 @@ restart:
}
unsigned long
-list_lru_walk_nodemask(
+list_lru_walk_nodemask_memcg(
struct list_lru *lru,
list_lru_walk_cb isolate,
void *cb_arg,
long nr_to_walk,
- nodemask_t *nodes_to_walk)
+ nodemask_t *nodes_to_walk,
+ struct mem_cgroup *memcg)
{
long isolated = 0;
int nid;
+ nodemask_t nodes;
+ int memcg_id = -1;
+ struct list_lru_node *nlru;
+
+ if (memcg && memcg_kmem_is_active(memcg))
+ memcg_id = memcg_cache_id(memcg);
+ /*
+ * Conservative code can call this setting nodes with node_setall.
+ * This will generate an out of bound access for memcg.
+ */
+ nodes_and(nodes, *nodes_to_walk, node_online_map);
+
+ for_each_node_mask(nid, nodes) {
+ nlru = lru_node_of_index(lru, memcg_id, nid);
+ if (!nlru)
+ continue;
- for_each_node_mask(nid, *nodes_to_walk) {
- isolated += list_lru_walk_node(lru, nid, isolate,
+ isolated += list_lru_walk_node(lru, nlru, nid, isolate,
cb_arg, &nr_to_walk);
if (nr_to_walk <= 0)
break;
}
return isolated;
}
-EXPORT_SYMBOL_GPL(list_lru_walk_nodemask);
+EXPORT_SYMBOL_GPL(list_lru_walk_nodemask_memcg);
static unsigned long
list_lru_dispose_all_node(
@@ -222,23 +269,34 @@ list_lru_dispose_all_node(
int nid,
list_lru_dispose_cb dispose)
{
- struct list_lru_node *nlru = &lru->node[nid];
+ struct list_lru_node *nlru;
LIST_HEAD(dispose_list);
unsigned long disposed = 0;
+ int idx;
- spin_lock(&nlru->lock);
- while (!list_empty(&nlru->list)) {
- list_splice_init(&nlru->list, &dispose_list);
- disposed += nlru->nr_items;
- nlru->nr_items = 0;
- node_clear(nid, lru->active_nodes);
- spin_unlock(&nlru->lock);
-
- dispose(&dispose_list);
+ for_each_memcg_lru_index(idx) {
+ nlru = lru_node_of_index(lru, idx, nid);
+ if (!nlru)
+ continue;
spin_lock(&nlru->lock);
+ while (!list_empty(&nlru->list)) {
+ list_splice_init(&nlru->list, &dispose_list);
+
+ if (atomic_long_sub_and_test(nlru->nr_items,
+ &lru->node_totals[nid]))
+ node_clear(nid, lru->active_nodes);
+ disposed += nlru->nr_items;
+ nlru->nr_items = 0;
+ spin_unlock(&nlru->lock);
+
+ dispose(&dispose_list);
+
+ spin_lock(&nlru->lock);
+ }
+ spin_unlock(&nlru->lock);
}
- spin_unlock(&nlru->lock);
+
return disposed;
}
--
1.8.1.4
next prev parent reply other threads:[~2013-05-09 6:07 UTC|newest]
Thread overview: 57+ messages / expand[flat|nested] mbox.gz Atom feed top
2013-05-09 6:06 [PATCH v5 00/31] kmemcg shrinkers Glauber Costa
2013-05-09 6:06 ` [PATCH v5 01/31] super: fix calculation of shrinkable objects for small numbers Glauber Costa
2013-05-09 6:06 ` [PATCH v5 02/31] vmscan: take at least one pass with shrinkers Glauber Costa
2013-05-09 11:12 ` Mel Gorman
[not found] ` <20130509111226.GR11497-l3A5Bk7waGM@public.gmane.org>
2013-05-09 11:28 ` Glauber Costa
[not found] ` <518B884C.9090704-bzQdu9zFT3WakBO8gow8eQ@public.gmane.org>
2013-05-09 11:35 ` Glauber Costa
2013-05-09 6:06 ` [PATCH v5 03/31] dcache: convert dentry_stat.nr_unused to per-cpu counters Glauber Costa
2013-05-09 6:06 ` [PATCH v5 04/31] dentry: move to per-sb LRU locks Glauber Costa
[not found] ` <1368079608-5611-5-git-send-email-glommer-GEFAQzZX7r8dnm+yROfE0A@public.gmane.org>
2013-05-10 5:29 ` Dave Chinner
2013-05-10 8:16 ` Dave Chinner
2013-05-09 6:06 ` [PATCH v5 05/31] dcache: remove dentries from LRU before putting on dispose list Glauber Costa
2013-05-09 6:06 ` [PATCH v5 06/31] mm: new shrinker API Glauber Costa
2013-05-09 13:30 ` Mel Gorman
[not found] ` <1368079608-5611-1-git-send-email-glommer-GEFAQzZX7r8dnm+yROfE0A@public.gmane.org>
2013-05-09 6:06 ` [PATCH v5 07/31] shrinker: convert superblock shrinkers to new API Glauber Costa
2013-05-09 13:33 ` Mel Gorman
2013-05-09 6:06 ` [PATCH v5 08/31] list: add a new LRU list type Glauber Costa
[not found] ` <1368079608-5611-9-git-send-email-glommer-GEFAQzZX7r8dnm+yROfE0A@public.gmane.org>
2013-05-09 13:37 ` Mel Gorman
[not found] ` <20130509133742.GW11497-l3A5Bk7waGM@public.gmane.org>
2013-05-09 21:02 ` Glauber Costa
2013-05-10 9:21 ` Mel Gorman
2013-05-10 9:56 ` Glauber Costa
[not found] ` <518CC44D.1020409-bzQdu9zFT3WakBO8gow8eQ@public.gmane.org>
2013-05-10 10:01 ` Mel Gorman
2013-05-09 6:06 ` [PATCH v5 09/31] inode: convert inode lru list to generic lru list code Glauber Costa
2013-05-09 6:06 ` [PATCH v5 10/31] dcache: convert to use new lru list infrastructure Glauber Costa
2013-05-09 6:06 ` [PATCH v5 11/31] list_lru: per-node " Glauber Costa
2013-05-09 13:42 ` Mel Gorman
[not found] ` <20130509134246.GX11497-l3A5Bk7waGM@public.gmane.org>
2013-05-09 21:05 ` Glauber Costa
2013-05-09 6:06 ` [PATCH v5 12/31] shrinker: add node awareness Glauber Costa
2013-05-09 6:06 ` [PATCH v5 13/31] fs: convert inode and dentry shrinking to be node aware Glauber Costa
2013-05-09 6:06 ` [PATCH v5 14/31] xfs: convert buftarg LRU to generic code Glauber Costa
[not found] ` <1368079608-5611-15-git-send-email-glommer-GEFAQzZX7r8dnm+yROfE0A@public.gmane.org>
2013-05-09 13:43 ` Mel Gorman
2013-05-09 6:06 ` [PATCH v5 15/31] xfs: convert dquot cache lru to list_lru Glauber Costa
2013-05-09 6:06 ` [PATCH v5 16/31] fs: convert fs shrinkers to new scan/count API Glauber Costa
2013-05-09 6:06 ` [PATCH v5 17/31] drivers: convert shrinkers to new count/scan API Glauber Costa
2013-05-09 13:52 ` Mel Gorman
[not found] ` <20130509135209.GZ11497-l3A5Bk7waGM@public.gmane.org>
2013-05-09 21:19 ` Glauber Costa
2013-05-10 9:00 ` Mel Gorman
2013-05-09 6:06 ` [PATCH v5 18/31] shrinker: convert remaining shrinkers to " Glauber Costa
2013-05-09 6:06 ` [PATCH v5 19/31] hugepage: convert huge zero page shrinker to new shrinker API Glauber Costa
[not found] ` <1368079608-5611-20-git-send-email-glommer-GEFAQzZX7r8dnm+yROfE0A@public.gmane.org>
2013-05-10 1:24 ` Kirill A. Shutemov
2013-05-09 6:06 ` [PATCH v5 20/31] shrinker: Kill old ->shrink API Glauber Costa
2013-05-09 13:53 ` Mel Gorman
2013-05-09 6:06 ` [PATCH v5 21/31] vmscan: also shrink slab in memcg pressure Glauber Costa
2013-05-09 6:06 ` [PATCH v5 22/31] memcg,list_lru: duplicate LRUs upon kmemcg creation Glauber Costa
2013-05-09 6:06 ` [PATCH v5 23/31] lru: add an element to a memcg list Glauber Costa
2013-05-09 6:06 ` Glauber Costa [this message]
2013-05-09 6:06 ` [PATCH v5 25/31] memcg: per-memcg kmem shrinking Glauber Costa
2013-05-09 6:06 ` [PATCH v5 26/31] memcg: scan cache objects hierarchically Glauber Costa
2013-05-09 6:06 ` [PATCH v5 27/31] super: targeted memcg reclaim Glauber Costa
2013-05-09 6:06 ` [PATCH v5 28/31] memcg: move initialization to memcg creation Glauber Costa
2013-05-09 6:06 ` [PATCH v5 29/31] vmpressure: in-kernel notifications Glauber Costa
2013-05-09 6:06 ` [PATCH v5 30/31] memcg: reap dead memcgs upon global memory pressure Glauber Costa
2013-05-09 6:06 ` [PATCH v5 31/31] memcg: debugging facility to access dangling memcgs Glauber Costa
2013-05-09 10:55 ` [PATCH v5 00/31] kmemcg shrinkers Mel Gorman
[not found] ` <20130509105519.GQ11497-l3A5Bk7waGM@public.gmane.org>
2013-05-09 11:34 ` Glauber Costa
2013-05-09 13:18 ` Dave Chinner
2013-05-09 14:03 ` Mel Gorman
[not found] ` <20130509140311.GB11497-l3A5Bk7waGM@public.gmane.org>
2013-05-09 21:24 ` Glauber Costa
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1368079608-5611-25-git-send-email-glommer@openvz.org \
--to=glommer@openvz.org \
--cc=akpm@linux-foundation.org \
--cc=cgroups@vger.kernel.org \
--cc=dchinner@redhat.com \
--cc=gthelen@google.com \
--cc=hannes@cmpxchg.org \
--cc=hughd@google.com \
--cc=kamezawa.hiroyu@jp.fujitsu.com \
--cc=linux-fsdevel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=mgorman@suse.de \
--cc=mhocko@suse.cz \
--cc=riel@redhat.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).