From mboxrd@z Thu Jan 1 00:00:00 1970 From: Glauber Costa Subject: [PATCH v6 21/31] vmscan: also shrink slab in memcg pressure Date: Sun, 12 May 2013 22:13:42 +0400 Message-ID: <1368382432-25462-22-git-send-email-glommer@openvz.org> References: <1368382432-25462-1-git-send-email-glommer@openvz.org> Cc: , Andrew Morton , Greg Thelen , , Michal Hocko , Johannes Weiner , , Dave Chinner , Glauber Costa , Dave Chinner , Mel Gorman , Rik van Riel , Hugh Dickins To: Return-path: Received: from mailhub.sw.ru ([195.214.232.25]:8168 "EHLO relay.sw.ru" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1754798Ab3ELSOe (ORCPT ); Sun, 12 May 2013 14:14:34 -0400 In-Reply-To: <1368382432-25462-1-git-send-email-glommer@openvz.org> Sender: linux-fsdevel-owner@vger.kernel.org List-ID: Without the surrounding infrastructure, this patch is a bit of a hammer: it will basically shrink objects from all memcgs under memcg pressure. At least, however, we will keep the scan limited to the shrinkers marked as per-memcg. Future patches will implement the in-shrinker logic to filter objects based on its memcg association. Signed-off-by: Glauber Costa Cc: Dave Chinner Cc: Mel Gorman Cc: Rik van Riel Cc: Johannes Weiner Cc: Michal Hocko Cc: Hugh Dickins Cc: Kamezawa Hiroyuki Cc: Andrew Morton --- include/linux/memcontrol.h | 17 +++++++++++++++++ include/linux/shrinker.h | 4 ++++ mm/memcontrol.c | 16 +++++++++++++++- mm/vmscan.c | 46 +++++++++++++++++++++++++++++++++++++++++++--- 4 files changed, 79 insertions(+), 4 deletions(-) diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 7b4d9d7..489c6d7 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -200,6 +200,9 @@ void mem_cgroup_split_huge_fixup(struct page *head); bool mem_cgroup_bad_page_check(struct page *page); void mem_cgroup_print_bad_page(struct page *page); #endif + +unsigned long +memcg_zone_reclaimable_pages(struct mem_cgroup *memcg, struct zone *zone); #else /* CONFIG_MEMCG */ struct mem_cgroup; @@ -378,6 +381,12 @@ static inline void mem_cgroup_replace_page_cache(struct page *oldpage, struct page *newpage) { } + +static inline unsigned long +memcg_zone_reclaimable_pages(struct mem_cgroup *memcg, struct zone *zone) +{ + return 0; +} #endif /* CONFIG_MEMCG */ #if !defined(CONFIG_MEMCG) || !defined(CONFIG_DEBUG_VM) @@ -430,6 +439,8 @@ static inline bool memcg_kmem_enabled(void) return static_key_false(&memcg_kmem_enabled_key); } +bool memcg_kmem_is_active(struct mem_cgroup *memcg); + /* * In general, we'll do everything in our power to not incur in any overhead * for non-memcg users for the kmem functions. Not even a function call, if we @@ -563,6 +574,12 @@ memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp) return __memcg_kmem_get_cache(cachep, gfp); } #else + +static inline bool memcg_kmem_is_active(struct mem_cgroup *memcg) +{ + return false; +} + #define for_each_memcg_cache_index(_idx) \ for (; NULL; ) diff --git a/include/linux/shrinker.h b/include/linux/shrinker.h index 00a3e57..3b08869 100644 --- a/include/linux/shrinker.h +++ b/include/linux/shrinker.h @@ -20,6 +20,9 @@ struct shrink_control { /* shrink from these nodes */ nodemask_t nodes_to_scan; + + /* reclaim from this memcg only (if not NULL) */ + struct mem_cgroup *target_mem_cgroup; }; /* @@ -45,6 +48,7 @@ struct shrinker { int seeks; /* seeks to recreate an obj */ long batch; /* reclaim batch size, 0 = default */ + bool memcg_shrinker; /* memcg-aware shrinker */ /* These are for internal use */ struct list_head list; diff --git a/mm/memcontrol.c b/mm/memcontrol.c index e34da3c..980bd2d 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -465,7 +465,7 @@ static inline void memcg_kmem_set_active(struct mem_cgroup *memcg) set_bit(KMEM_ACCOUNTED_ACTIVE, &memcg->kmem_account_flags); } -static bool memcg_kmem_is_active(struct mem_cgroup *memcg) +bool memcg_kmem_is_active(struct mem_cgroup *memcg) { return test_bit(KMEM_ACCOUNTED_ACTIVE, &memcg->kmem_account_flags); } @@ -1044,6 +1044,20 @@ mem_cgroup_zone_nr_lru_pages(struct mem_cgroup *memcg, int nid, int zid, return ret; } +unsigned long +memcg_zone_reclaimable_pages(struct mem_cgroup *memcg, struct zone *zone) +{ + int nid = zone_to_nid(zone); + int zid = zone_idx(zone); + unsigned long val; + + val = mem_cgroup_zone_nr_lru_pages(memcg, nid, zid, LRU_ALL_FILE); + if (do_swap_account) + val += mem_cgroup_zone_nr_lru_pages(memcg, nid, zid, + LRU_ALL_ANON); + return val; +} + static unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg, int nid, unsigned int lru_mask) diff --git a/mm/vmscan.c b/mm/vmscan.c index 64f66f6..eb5e67c 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -139,11 +139,42 @@ static bool global_reclaim(struct scan_control *sc) { return !sc->target_mem_cgroup; } + +/* + * kmem reclaim should usually not be triggered when we are doing targetted + * reclaim. It is only valid when global reclaim is triggered, or when the + * underlying memcg has kmem objects. + */ +static bool has_kmem_reclaim(struct scan_control *sc) +{ + return !sc->target_mem_cgroup || + memcg_kmem_is_active(sc->target_mem_cgroup); +} + +static unsigned long +zone_nr_reclaimable_pages(struct scan_control *sc, struct zone *zone) +{ + if (global_reclaim(sc)) + return zone_reclaimable_pages(zone); + return memcg_zone_reclaimable_pages(sc->target_mem_cgroup, zone); +} + #else static bool global_reclaim(struct scan_control *sc) { return true; } + +static bool has_kmem_reclaim(struct scan_control *sc) +{ + return true; +} + +static unsigned long +zone_nr_reclaimable_pages(struct scan_control *sc, struct zone *zone) +{ + return zone_reclaimable_pages(zone); +} #endif static unsigned long get_lru_size(struct lruvec *lruvec, enum lru_list lru) @@ -222,6 +253,14 @@ unsigned long shrink_slab(struct shrink_control *shrinkctl, long batch_size = shrinker->batch ? shrinker->batch : SHRINK_BATCH; + /* + * If we don't have a target mem cgroup, we scan them all. + * Otherwise we will limit our scan to shrinkers marked as + * memcg aware + */ + if (shrinkctl->target_mem_cgroup && !shrinker->memcg_shrinker) + continue; + max_pass = shrinker->count_objects(shrinker, shrinkctl); WARN_ON(max_pass < 0); if (max_pass <= 0) @@ -2172,9 +2211,9 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist, /* * Don't shrink slabs when reclaiming memory from - * over limit cgroups + * over limit cgroups, unless we know they have kmem objects */ - if (global_reclaim(sc)) { + if (has_kmem_reclaim(sc)) { unsigned long lru_pages = 0; nodes_clear(shrink->nodes_to_scan); @@ -2183,7 +2222,7 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist, if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL)) continue; - lru_pages += zone_reclaimable_pages(zone); + lru_pages += zone_nr_reclaimable_pages(sc, zone); node_set(zone_to_nid(zone), shrink->nodes_to_scan); } @@ -2452,6 +2491,7 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg, }; struct shrink_control shrink = { .gfp_mask = sc.gfp_mask, + .target_mem_cgroup = memcg, }; /* -- 1.8.1.4