linux-fsdevel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Glauber Costa <glommer@openvz.org>
To: <linux-mm@kvack.org>
Cc: <cgroups@vger.kernel.org>,
	Andrew Morton <akpm@linux-foundation.org>,
	Greg Thelen <gthelen@google.com>,
	<kamezawa.hiroyu@jp.fujitsu.com>, Michal Hocko <mhocko@suse.cz>,
	Johannes Weiner <hannes@cmpxchg.org>,
	<linux-fsdevel@vger.kernel.org>,
	Dave Chinner <david@fromorbit.com>,
	Glauber Costa <glommer@openvz.org>,
	Dave Chinner <dchinner@redhat.com>, Mel Gorman <mgorman@suse.de>,
	Rik van Riel <riel@redhat.com>, Hugh Dickins <hughd@google.com>
Subject: [PATCH v6 26/31] memcg: scan cache objects hierarchically
Date: Sun, 12 May 2013 22:13:47 +0400	[thread overview]
Message-ID: <1368382432-25462-27-git-send-email-glommer@openvz.org> (raw)
In-Reply-To: <1368382432-25462-1-git-send-email-glommer@openvz.org>

When reaching shrink_slab, we should descent in children memcg searching
for objects that could be shrunk. This is true even if the memcg does
not have kmem limits on, since the kmem res_counter will also be billed
against the user res_counter of the parent.

It is possible that we will free objects and not free any pages, that
will just harm the child groups without helping the parent group at all.
But at this point, we basically are prepared to pay the price.

Signed-off-by: Glauber Costa <glommer@openvz.org>
Cc: Dave Chinner <dchinner@redhat.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Rik van Riel <riel@redhat.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Michal Hocko <mhocko@suse.cz>
Cc: Hugh Dickins <hughd@google.com>
Cc: Kamezawa Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
---
 include/linux/memcontrol.h |   6 ++
 mm/memcontrol.c            |  13 +++
 mm/vmscan.c                | 206 ++++++++++++++++++++++++++-------------------
 3 files changed, 139 insertions(+), 86 deletions(-)

diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 3eeece8..c8b1412 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -441,6 +441,7 @@ static inline bool memcg_kmem_enabled(void)
 	return static_key_false(&memcg_kmem_enabled_key);
 }
 
+bool memcg_kmem_should_reclaim(struct mem_cgroup *memcg);
 bool memcg_kmem_is_active(struct mem_cgroup *memcg);
 
 /*
@@ -585,6 +586,11 @@ memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp)
 }
 #else
 
+static inline bool memcg_kmem_should_reclaim(struct mem_cgroup *memcg)
+{
+	return false;
+}
+
 static inline bool memcg_kmem_is_active(struct mem_cgroup *memcg)
 {
 	return false;
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index c563550..b8980d1 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -3043,6 +3043,19 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg,
 }
 
 #ifdef CONFIG_MEMCG_KMEM
+bool memcg_kmem_should_reclaim(struct mem_cgroup *memcg)
+{
+	struct mem_cgroup *iter;
+
+	for_each_mem_cgroup_tree(iter, memcg) {
+		if (memcg_kmem_is_active(iter)) {
+			mem_cgroup_iter_break(memcg, iter);
+			return true;
+		}
+	}
+	return false;
+}
+
 static inline bool memcg_can_account_kmem(struct mem_cgroup *memcg)
 {
 	return !mem_cgroup_disabled() && !mem_cgroup_is_root(memcg) &&
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 731e798..9c45e5c 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -148,7 +148,7 @@ static bool global_reclaim(struct scan_control *sc)
 static bool has_kmem_reclaim(struct scan_control *sc)
 {
 	return !sc->target_mem_cgroup ||
-		memcg_kmem_is_active(sc->target_mem_cgroup);
+		memcg_kmem_should_reclaim(sc->target_mem_cgroup);
 }
 
 static unsigned long
@@ -209,6 +209,101 @@ void unregister_shrinker(struct shrinker *shrinker)
 EXPORT_SYMBOL(unregister_shrinker);
 
 #define SHRINK_BATCH 128
+unsigned long
+shrink_slab_one(struct shrinker *shrinker, struct shrink_control *shrinkctl,
+		unsigned long nr_pages_scanned, unsigned long lru_pages)
+{
+	unsigned long freed = 0;
+	unsigned long long delta;
+	long total_scan;
+	long max_pass;
+	long nr;
+	long new_nr;
+	long batch_size = shrinker->batch ? shrinker->batch
+					  : SHRINK_BATCH;
+
+	max_pass = shrinker->count_objects(shrinker, shrinkctl);
+	WARN_ON(max_pass < 0);
+	if (max_pass <= 0)
+		return 0;
+
+	/*
+	 * copy the current shrinker scan count into a local variable
+	 * and zero it so that other concurrent shrinker invocations
+	 * don't also do this scanning work.
+	 */
+	nr = atomic_long_xchg(&shrinker->nr_in_batch, 0);
+
+	total_scan = nr;
+	delta = (4 * nr_pages_scanned) / shrinker->seeks;
+	delta *= max_pass;
+	do_div(delta, lru_pages + 1);
+	total_scan += delta;
+	if (total_scan < 0) {
+		printk(KERN_ERR
+		"shrink_slab: %pF negative objects to delete nr=%ld\n",
+		       shrinker->scan_objects, total_scan);
+		total_scan = max_pass;
+	}
+
+	/*
+	 * We need to avoid excessive windup on filesystem shrinkers
+	 * due to large numbers of GFP_NOFS allocations causing the
+	 * shrinkers to return -1 all the time. This results in a large
+	 * nr being built up so when a shrink that can do some work
+	 * comes along it empties the entire cache due to nr >>>
+	 * max_pass.  This is bad for sustaining a working set in
+	 * memory.
+	 *
+	 * Hence only allow the shrinker to scan the entire cache when
+	 * a large delta change is calculated directly.
+	 */
+	if (delta < max_pass / 4)
+		total_scan = min(total_scan, max_pass / 2);
+
+	/*
+	 * Avoid risking looping forever due to too large nr value:
+	 * never try to free more than twice the estimate number of
+	 * freeable entries.
+	 */
+	if (total_scan > max_pass * 2)
+		total_scan = max_pass * 2;
+
+	trace_mm_shrink_slab_start(shrinker, shrinkctl, nr,
+				nr_pages_scanned, lru_pages,
+				max_pass, delta, total_scan);
+
+	while (total_scan >= batch_size) {
+		long ret;
+
+		shrinkctl->nr_to_scan = batch_size;
+		ret = shrinker->scan_objects(shrinker, shrinkctl);
+		if (ret == -1)
+			break;
+		freed += ret;
+
+		count_vm_events(SLABS_SCANNED, batch_size);
+		total_scan -= batch_size;
+
+		cond_resched();
+	}
+
+	/*
+	 * move the unused scan count back into the shrinker in a
+	 * manner that handles concurrent updates. If we exhausted the
+	 * scan, there is no need to do an update.
+	 */
+	if (total_scan > 0)
+		new_nr = atomic_long_add_return(total_scan,
+				&shrinker->nr_in_batch);
+	else
+		new_nr = atomic_long_read(&shrinker->nr_in_batch);
+
+	trace_mm_shrink_slab_end(shrinker, freed, nr, new_nr);
+
+	return freed;
+}
+
 /*
  * Call the shrink functions to age shrinkable caches
  *
@@ -234,6 +329,7 @@ unsigned long shrink_slab(struct shrink_control *shrinkctl,
 {
 	struct shrinker *shrinker;
 	unsigned long freed = 0;
+	struct mem_cgroup *root = shrinkctl->target_mem_cgroup;
 
 	if (nr_pages_scanned == 0)
 		nr_pages_scanned = SWAP_CLUSTER_MAX;
@@ -245,101 +341,39 @@ unsigned long shrink_slab(struct shrink_control *shrinkctl,
 	}
 
 	list_for_each_entry(shrinker, &shrinker_list, list) {
-		unsigned long long delta;
-		long total_scan;
-		long max_pass;
-		long nr;
-		long new_nr;
-		long batch_size = shrinker->batch ? shrinker->batch
-						  : SHRINK_BATCH;
+		struct mem_cgroup *memcg;
 
 		/*
 		 * If we don't have a target mem cgroup, we scan them all.
 		 * Otherwise we will limit our scan to shrinkers marked as
 		 * memcg aware
 		 */
-		if (shrinkctl->target_mem_cgroup && !shrinker->memcg_shrinker)
-			continue;
-
-		max_pass = shrinker->count_objects(shrinker, shrinkctl);
-		WARN_ON(max_pass < 0);
-		if (max_pass <= 0)
+		if (root && !shrinker->memcg_shrinker)
 			continue;
 
-		/*
-		 * copy the current shrinker scan count into a local variable
-		 * and zero it so that other concurrent shrinker invocations
-		 * don't also do this scanning work.
-		 */
-		nr = atomic_long_xchg(&shrinker->nr_in_batch, 0);
-
-		total_scan = nr;
-		delta = (4 * nr_pages_scanned) / shrinker->seeks;
-		delta *= max_pass;
-		do_div(delta, lru_pages + 1);
-		total_scan += delta;
-		if (total_scan < 0) {
-			printk(KERN_ERR
-			"shrink_slab: %pF negative objects to delete nr=%ld\n",
-			       shrinker->scan_objects, total_scan);
-			total_scan = max_pass;
-		}
-
-		/*
-		 * We need to avoid excessive windup on filesystem shrinkers
-		 * due to large numbers of GFP_NOFS allocations causing the
-		 * shrinkers to return -1 all the time. This results in a large
-		 * nr being built up so when a shrink that can do some work
-		 * comes along it empties the entire cache due to nr >>>
-		 * max_pass.  This is bad for sustaining a working set in
-		 * memory.
-		 *
-		 * Hence only allow the shrinker to scan the entire cache when
-		 * a large delta change is calculated directly.
-		 */
-		if (delta < max_pass / 4)
-			total_scan = min(total_scan, max_pass / 2);
-
-		/*
-		 * Avoid risking looping forever due to too large nr value:
-		 * never try to free more than twice the estimate number of
-		 * freeable entries.
-		 */
-		if (total_scan > max_pass * 2)
-			total_scan = max_pass * 2;
-
-		trace_mm_shrink_slab_start(shrinker, shrinkctl, nr,
-					nr_pages_scanned, lru_pages,
-					max_pass, delta, total_scan);
-
-		while (total_scan >= batch_size) {
-			long ret;
-
-			shrinkctl->nr_to_scan = batch_size;
-			ret = shrinker->scan_objects(shrinker, shrinkctl);
-
-			if (ret == -1)
-				break;
-			freed += ret;
-
-			count_vm_events(SLABS_SCANNED, batch_size);
-			total_scan -= batch_size;
-
-			cond_resched();
-		}
+		memcg = mem_cgroup_iter(root, NULL, NULL);
+		do {
+			/*
+			 * In a hierarchical chain, it might be that not all
+			 * memcgs are kmem active. kmemcg design mandates that
+			 * when one memcg is active, its children will be
+			 * active as well. But it is perfectly possible that
+			 * its parent is not.
+			 *
+			 * We also need to make sure we scan at least once, for
+			 * the global case. So if we don't have a target memcg
+			 * (saved in root), we proceed normally and expect to
+			 * break in the next round.
+			 */
 
-		/*
-		 * move the unused scan count back into the shrinker in a
-		 * manner that handles concurrent updates. If we exhausted the
-		 * scan, there is no need to do an update.
-		 */
-		if (total_scan > 0)
-			new_nr = atomic_long_add_return(total_scan,
-					&shrinker->nr_in_batch);
-		else
-			new_nr = atomic_long_read(&shrinker->nr_in_batch);
+			if (!root || memcg_kmem_is_active(memcg))
+				freed += shrink_slab_one(shrinker, shrinkctl,
+						 nr_pages_scanned, lru_pages);
+			memcg = mem_cgroup_iter(root, memcg, NULL);
+		} while (memcg);
 
-		trace_mm_shrink_slab_end(shrinker, freed, nr, new_nr);
+		/* restore original state */
+		shrinkctl->target_mem_cgroup = root;
 	}
 	up_read(&shrinker_rwsem);
 out:
-- 
1.8.1.4


  parent reply	other threads:[~2013-05-12 18:14 UTC|newest]

Thread overview: 63+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2013-05-12 18:13 [PATCH v6 00/31] kmemcg shrinkers Glauber Costa
2013-05-12 18:13 ` [PATCH v6 01/31] super: fix calculation of shrinkable objects for small numbers Glauber Costa
2013-05-12 18:13 ` [PATCH v6 02/31] dcache: convert dentry_stat.nr_unused to per-cpu counters Glauber Costa
2013-05-12 18:13 ` [PATCH v6 03/31] dentry: move to per-sb LRU locks Glauber Costa
2013-05-12 18:13 ` [PATCH v6 04/31] dcache: remove dentries from LRU before putting on dispose list Glauber Costa
2013-05-14  2:02   ` Dave Chinner
2013-05-14  5:46   ` [PATCH v7 " Dave Chinner
2013-05-14  7:10     ` Dave Chinner
2013-05-14 12:43     ` Glauber Costa
     [not found]       ` <51923158.7040002-bzQdu9zFT3WakBO8gow8eQ@public.gmane.org>
2013-05-14 20:32         ` Dave Chinner
2013-05-12 18:13 ` [PATCH v6 05/31] mm: new shrinker API Glauber Costa
2013-05-12 18:13 ` [PATCH v6 06/31] shrinker: convert superblock shrinkers to new API Glauber Costa
2013-05-12 18:13 ` [PATCH v6 07/31] list: add a new LRU list type Glauber Costa
2013-05-13  9:25   ` Mel Gorman
2013-05-12 18:13 ` [PATCH v6 08/31] inode: convert inode lru list to generic lru list code Glauber Costa
2013-05-12 18:13 ` [PATCH v6 09/31] dcache: convert to use new lru list infrastructure Glauber Costa
     [not found]   ` <1368382432-25462-10-git-send-email-glommer-GEFAQzZX7r8dnm+yROfE0A@public.gmane.org>
2013-05-14  6:59     ` Dave Chinner
2013-05-14  7:50       ` Glauber Costa
2013-05-14 14:01       ` Glauber Costa
2013-05-12 18:13 ` [PATCH v6 10/31] list_lru: per-node " Glauber Costa
2013-05-12 18:13 ` [PATCH v6 11/31] shrinker: add node awareness Glauber Costa
2013-05-12 18:13 ` [PATCH v6 12/31] fs: convert inode and dentry shrinking to be node aware Glauber Costa
     [not found]   ` <1368382432-25462-13-git-send-email-glommer-GEFAQzZX7r8dnm+yROfE0A@public.gmane.org>
2013-05-14  9:52     ` Dave Chinner
2013-05-15 15:27       ` Glauber Costa
2013-05-16  0:02         ` Dave Chinner
2013-05-16  8:03           ` Glauber Costa
2013-05-16 19:14           ` Glauber Costa
2013-05-17  0:51             ` Dave Chinner
2013-05-17  7:29               ` Glauber Costa
     [not found]                 ` <5195DC59.8000205-bzQdu9zFT3WakBO8gow8eQ@public.gmane.org>
2013-05-17 14:49                   ` Glauber Costa
     [not found]                     ` <51964381.8010406-bzQdu9zFT3WakBO8gow8eQ@public.gmane.org>
2013-05-17 22:54                       ` Glauber Costa
2013-05-18  3:39                     ` Dave Chinner
2013-05-18  7:20                       ` Glauber Costa
2013-05-12 18:13 ` [PATCH v6 13/31] xfs: convert buftarg LRU to generic code Glauber Costa
2013-05-12 18:13 ` [PATCH v6 14/31] xfs: convert dquot cache lru to list_lru Glauber Costa
2013-05-12 18:13 ` [PATCH v6 15/31] fs: convert fs shrinkers to new scan/count API Glauber Costa
2013-05-13  6:12   ` Artem Bityutskiy
     [not found]     ` <1368425530.3208.13.camel-Bxnoe/o8FG+Ef9UqXRslZEEOCMrvLtNR@public.gmane.org>
2013-05-13  7:28       ` Glauber Costa
     [not found]         ` <51909610.1010801-bzQdu9zFT3WakBO8gow8eQ@public.gmane.org>
2013-05-13  7:43           ` Artem Bityutskiy
2013-05-13 10:36   ` Jan Kara
2013-05-12 18:13 ` [PATCH v6 16/31] drivers: convert shrinkers to new count/scan API Glauber Costa
2013-05-12 18:13 ` [PATCH v6 17/31] i915: bail out earlier when shrinker cannot acquire mutex Glauber Costa
2013-05-12 18:13 ` [PATCH v6 18/31] shrinker: convert remaining shrinkers to count/scan API Glauber Costa
2013-05-12 18:13 ` [PATCH v6 19/31] hugepage: convert huge zero page shrinker to new shrinker API Glauber Costa
2013-05-12 18:13 ` [PATCH v6 20/31] shrinker: Kill old ->shrink API Glauber Costa
2013-05-12 18:13 ` [PATCH v6 21/31] vmscan: also shrink slab in memcg pressure Glauber Costa
2013-05-12 18:13 ` [PATCH v6 22/31] memcg,list_lru: duplicate LRUs upon kmemcg creation Glauber Costa
2013-05-12 18:13 ` [PATCH v6 23/31] lru: add an element to a memcg list Glauber Costa
2013-05-12 18:13 ` [PATCH v6 24/31] list_lru: per-memcg walks Glauber Costa
2013-05-12 18:13 ` [PATCH v6 25/31] memcg: per-memcg kmem shrinking Glauber Costa
2013-05-12 18:13 ` Glauber Costa [this message]
2013-05-12 18:13 ` [PATCH v6 27/31] vmscan: take at least one pass with shrinkers Glauber Costa
2013-05-12 18:13 ` [PATCH v6 28/31] super: targeted memcg reclaim Glauber Costa
2013-05-12 18:13 ` [PATCH v6 29/31] memcg: move initialization to memcg creation Glauber Costa
2013-05-12 18:13 ` [PATCH v6 30/31] vmpressure: in-kernel notifications Glauber Costa
2013-05-12 18:13 ` [PATCH v6 31/31] memcg: reap dead memcgs upon global memory pressure Glauber Costa
2013-05-13  7:14 ` [PATCH v6 00/31] kmemcg shrinkers Dave Chinner
2013-05-13  7:21   ` Dave Chinner
2013-05-13  8:00   ` Glauber Costa
     [not found]     ` <51909D84.7040800-bzQdu9zFT3WakBO8gow8eQ@public.gmane.org>
2013-05-14  1:48       ` Dave Chinner
2013-05-14  5:22         ` Dave Chinner
2013-05-14  5:45           ` Dave Chinner
2013-05-14  7:38           ` Glauber Costa

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1368382432-25462-27-git-send-email-glommer@openvz.org \
    --to=glommer@openvz.org \
    --cc=akpm@linux-foundation.org \
    --cc=cgroups@vger.kernel.org \
    --cc=david@fromorbit.com \
    --cc=dchinner@redhat.com \
    --cc=gthelen@google.com \
    --cc=hannes@cmpxchg.org \
    --cc=hughd@google.com \
    --cc=kamezawa.hiroyu@jp.fujitsu.com \
    --cc=linux-fsdevel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=mgorman@suse.de \
    --cc=mhocko@suse.cz \
    --cc=riel@redhat.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).