From: Mel Gorman <mgorman@suse.de>
To: Peter Zijlstra <peterz@infradead.org>
Cc: Andrew Morton <akpm@linux-foundation.org>,
Johannes Weiner <hannes@cmpxchg.org>,
Vlastimil Babka <vbabka@suse.cz>, Jan Kara <jack@suse.cz>,
Michal Hocko <mhocko@suse.cz>, Hugh Dickins <hughd@google.com>,
Dave Hansen <dave.hansen@intel.com>,
Linux Kernel <linux-kernel@vger.kernel.org>,
Linux-MM <linux-mm@kvack.org>,
Linux-FSDevel <linux-fsdevel@vger.kernel.org>
Subject: Re: [PATCH 04/19] mm: page_alloc: Use jump labels to avoid checking number_of_cpusets
Date: Tue, 13 May 2014 13:28:51 +0100 [thread overview]
Message-ID: <20140513122851.GP23991@suse.de> (raw)
In-Reply-To: <20140513105851.GA30445@twins.programming.kicks-ass.net>
On Tue, May 13, 2014 at 12:58:51PM +0200, Peter Zijlstra wrote:
> On Tue, May 13, 2014 at 10:45:35AM +0100, Mel Gorman wrote:
> > +#ifdef HAVE_JUMP_LABEL
> > +extern struct static_key cpusets_enabled_key;
> > +static inline bool cpusets_enabled(void)
> > +{
> > + return static_key_false(&cpusets_enabled_key);
> > +}
> > +
> > +/* jump label reference count + the top-level cpuset */
> > +#define number_of_cpusets (static_key_count(&cpusets_enabled_key) + 1)
> > +
> > +static inline void cpuset_inc(void)
> > +{
> > + static_key_slow_inc(&cpusets_enabled_key);
> > +}
> > +
> > +static inline void cpuset_dec(void)
> > +{
> > + static_key_slow_dec(&cpusets_enabled_key);
> > +}
> > +
> > +static inline void cpuset_init_count(void) { }
> > +
> > +#else
> > extern int number_of_cpusets; /* How many cpusets are defined in system? */
> >
> > +static inline bool cpusets_enabled(void)
> > +{
> > + return number_of_cpusets > 1;
> > +}
> > +
> > +static inline void cpuset_inc(void)
> > +{
> > + number_of_cpusets++;
> > +}
> > +
> > +static inline void cpuset_dec(void)
> > +{
> > + number_of_cpusets--;
> > +}
> > +
> > +static inline void cpuset_init_count(void)
> > +{
> > + number_of_cpusets = 1;
> > +}
> > +#endif /* HAVE_JUMP_LABEL */
>
> I'm still puzzled by the whole #else branch here, why not
> unconditionally use the jump-label one? Without HAVE_JUMP_LABEL we'll
> revert to a simple atomic_t counter, which should be perfectly fine, no?
No good reason -- the intent was to preserve the old behaviour if jump
labels were not available but there is no good reason for that. I'll delete
the alternative implementation, make number_of_cpusets an inline function
and move cpusets_enabled_key into the __read_mostly section. It's untested
but the patch now looks like
---8<---
mm: page_alloc: Use jump labels to avoid checking number_of_cpusets
If cpusets are not in use then we still check a global variable on every
page allocation. Use jump labels to avoid the overhead.
Signed-off-by: Mel Gorman <mgorman@suse.de>
Reviewed-by: Rik van Riel <riel@redhat.com>
---
include/linux/cpuset.h | 28 +++++++++++++++++++++++++---
kernel/cpuset.c | 14 ++++----------
mm/page_alloc.c | 3 ++-
3 files changed, 31 insertions(+), 14 deletions(-)
diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
index b19d3dc..a94af76 100644
--- a/include/linux/cpuset.h
+++ b/include/linux/cpuset.h
@@ -15,7 +15,27 @@
#ifdef CONFIG_CPUSETS
-extern int number_of_cpusets; /* How many cpusets are defined in system? */
+extern struct static_key cpusets_enabled_key;
+static inline bool cpusets_enabled(void)
+{
+ return static_key_false(&cpusets_enabled_key);
+}
+
+static inline int nr_cpusets(void)
+{
+ /* jump label reference count + the top-level cpuset */
+ return static_key_count(&cpusets_enabled_key) + 1;
+}
+
+static inline void cpuset_inc(void)
+{
+ static_key_slow_inc(&cpusets_enabled_key);
+}
+
+static inline void cpuset_dec(void)
+{
+ static_key_slow_dec(&cpusets_enabled_key);
+}
extern int cpuset_init(void);
extern void cpuset_init_smp(void);
@@ -32,13 +52,13 @@ extern int __cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask);
static inline int cpuset_node_allowed_softwall(int node, gfp_t gfp_mask)
{
- return number_of_cpusets <= 1 ||
+ return nr_cpusets() <= 1 ||
__cpuset_node_allowed_softwall(node, gfp_mask);
}
static inline int cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask)
{
- return number_of_cpusets <= 1 ||
+ return nr_cpusets() <= 1 ||
__cpuset_node_allowed_hardwall(node, gfp_mask);
}
@@ -124,6 +144,8 @@ static inline void set_mems_allowed(nodemask_t nodemask)
#else /* !CONFIG_CPUSETS */
+static inline bool cpusets_enabled(void) { return false; }
+
static inline int cpuset_init(void) { return 0; }
static inline void cpuset_init_smp(void) {}
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index 3d54c41..1300178 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -61,12 +61,7 @@
#include <linux/cgroup.h>
#include <linux/wait.h>
-/*
- * Tracks how many cpusets are currently defined in system.
- * When there is only one cpuset (the root cpuset) we can
- * short circuit some hooks.
- */
-int number_of_cpusets __read_mostly;
+struct static_key cpusets_enabled_key __read_mostly = STATIC_KEY_INIT_FALSE;
/* See "Frequency meter" comments, below. */
@@ -611,7 +606,7 @@ static int generate_sched_domains(cpumask_var_t **domains,
goto done;
}
- csa = kmalloc(number_of_cpusets * sizeof(cp), GFP_KERNEL);
+ csa = kmalloc(nr_cpusets() * sizeof(cp), GFP_KERNEL);
if (!csa)
goto done;
csn = 0;
@@ -1888,7 +1883,7 @@ static int cpuset_css_online(struct cgroup_subsys_state *css)
if (is_spread_slab(parent))
set_bit(CS_SPREAD_SLAB, &cs->flags);
- number_of_cpusets++;
+ cpuset_inc();
if (!test_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags))
goto out_unlock;
@@ -1939,7 +1934,7 @@ static void cpuset_css_offline(struct cgroup_subsys_state *css)
if (is_sched_load_balance(cs))
update_flag(CS_SCHED_LOAD_BALANCE, cs, 0);
- number_of_cpusets--;
+ cpuset_dec();
clear_bit(CS_ONLINE, &cs->flags);
mutex_unlock(&cpuset_mutex);
@@ -1992,7 +1987,6 @@ int __init cpuset_init(void)
if (!alloc_cpumask_var(&cpus_attach, GFP_KERNEL))
BUG();
- number_of_cpusets = 1;
return 0;
}
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 5c559e3..cb12b9a 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1930,7 +1930,8 @@ zonelist_scan:
if (IS_ENABLED(CONFIG_NUMA) && zlc_active &&
!zlc_zone_worth_trying(zonelist, z, allowednodes))
continue;
- if ((alloc_flags & ALLOC_CPUSET) &&
+ if (cpusets_enabled() &&
+ (alloc_flags & ALLOC_CPUSET) &&
!cpuset_zone_allowed_softwall(zone, gfp_mask))
continue;
BUILD_BUG_ON(ALLOC_NO_WATERMARKS < NR_WMARK);
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
next prev parent reply other threads:[~2014-05-13 12:28 UTC|newest]
Thread overview: 103+ messages / expand[flat|nested] mbox.gz Atom feed top
2014-05-13 9:45 [PATCH 00/19] Misc page alloc, shmem, mark_page_accessed and page_waitqueue optimisations v3r33 Mel Gorman
2014-05-13 9:45 ` [PATCH 01/19] mm: page_alloc: Do not update zlc unless the zlc is active Mel Gorman
2014-05-13 9:45 ` [PATCH 02/19] mm: page_alloc: Do not treat a zone that cannot be used for dirty pages as "full" Mel Gorman
2014-05-13 9:45 ` [PATCH 03/19] jump_label: Expose the reference count Mel Gorman
2014-05-13 9:45 ` [PATCH 04/19] mm: page_alloc: Use jump labels to avoid checking number_of_cpusets Mel Gorman
2014-05-13 10:58 ` Peter Zijlstra
2014-05-13 12:28 ` Mel Gorman [this message]
2014-05-13 9:45 ` [PATCH 05/19] mm: page_alloc: Calculate classzone_idx once from the zonelist ref Mel Gorman
2014-05-13 22:25 ` Andrew Morton
2014-05-14 6:32 ` Mel Gorman
2014-05-14 20:29 ` Mel Gorman
2014-05-13 9:45 ` [PATCH 06/19] mm: page_alloc: Only check the zone id check if pages are buddies Mel Gorman
2014-05-13 9:45 ` [PATCH 07/19] mm: page_alloc: Only check the alloc flags and gfp_mask for dirty once Mel Gorman
2014-05-13 9:45 ` [PATCH 08/19] mm: page_alloc: Take the ALLOC_NO_WATERMARK check out of the fast path Mel Gorman
2014-05-13 9:45 ` [PATCH 09/19] mm: page_alloc: Use word-based accesses for get/set pageblock bitmaps Mel Gorman
2014-05-22 9:24 ` Vlastimil Babka
2014-05-22 18:23 ` Andrew Morton
2014-05-22 18:45 ` Vlastimil Babka
2014-05-13 9:45 ` [PATCH 10/19] mm: page_alloc: Reduce number of times page_to_pfn is called Mel Gorman
2014-05-13 13:27 ` Vlastimil Babka
2014-05-13 14:09 ` Mel Gorman
2014-05-13 9:45 ` [PATCH 11/19] mm: page_alloc: Lookup pageblock migratetype with IRQs enabled during free Mel Gorman
2014-05-13 13:36 ` Vlastimil Babka
2014-05-13 14:23 ` Mel Gorman
2014-05-13 9:45 ` [PATCH 12/19] mm: page_alloc: Use unsigned int for order in more places Mel Gorman
2014-05-13 9:45 ` [PATCH 13/19] mm: page_alloc: Convert hot/cold parameter and immediate callers to bool Mel Gorman
2014-05-13 9:45 ` [PATCH 14/19] mm: shmem: Avoid atomic operation during shmem_getpage_gfp Mel Gorman
2014-05-13 9:45 ` [PATCH 15/19] mm: Do not use atomic operations when releasing pages Mel Gorman
2014-05-13 9:45 ` [PATCH 16/19] mm: Do not use unnecessary atomic operations when adding pages to the LRU Mel Gorman
2014-05-13 9:45 ` [PATCH 17/19] fs: buffer: Do not use unnecessary atomic operations when discarding buffers Mel Gorman
2014-05-13 11:09 ` Peter Zijlstra
2014-05-13 12:50 ` Mel Gorman
2014-05-13 13:49 ` Jan Kara
2014-05-13 14:30 ` Mel Gorman
2014-05-13 14:01 ` Peter Zijlstra
2014-05-13 14:46 ` Mel Gorman
2014-05-13 13:50 ` Jan Kara
2014-05-13 22:29 ` Andrew Morton
2014-05-14 6:12 ` Mel Gorman
2014-05-13 9:45 ` [PATCH 18/19] mm: Non-atomically mark page accessed during page cache allocation where possible Mel Gorman
2014-05-13 14:29 ` Theodore Ts'o
2014-05-20 15:49 ` [PATCH] mm: non-atomically mark page accessed during page cache allocation where possible -fix Mel Gorman
2014-05-20 19:34 ` Andrew Morton
2014-05-21 12:09 ` Mel Gorman
2014-05-21 22:11 ` Andrew Morton
2014-05-22 0:07 ` Mel Gorman
2014-05-22 5:35 ` Prabhakar Lad
2014-05-13 9:45 ` [PATCH 19/19] mm: filemap: Avoid unnecessary barries and waitqueue lookups in unlock_page fastpath Mel Gorman
2014-05-13 12:53 ` Mel Gorman
2014-05-13 14:17 ` Peter Zijlstra
2014-05-13 15:27 ` Paul E. McKenney
2014-05-13 15:44 ` Peter Zijlstra
2014-05-13 16:14 ` Paul E. McKenney
2014-05-13 18:57 ` Oleg Nesterov
2014-05-13 20:24 ` Paul E. McKenney
2014-05-14 14:25 ` Oleg Nesterov
2014-05-13 18:22 ` Oleg Nesterov
2014-05-13 18:18 ` Oleg Nesterov
2014-05-13 18:24 ` Peter Zijlstra
2014-05-13 18:52 ` Paul E. McKenney
2014-05-13 19:31 ` Oleg Nesterov
2014-05-13 20:32 ` Paul E. McKenney
2014-05-14 16:11 ` Oleg Nesterov
2014-05-14 16:17 ` Peter Zijlstra
2014-05-16 13:51 ` [PATCH 0/1] ptrace: task_clear_jobctl_trapping()->wake_up_bit() needs mb() Oleg Nesterov
2014-05-16 13:51 ` [PATCH 1/1] " Oleg Nesterov
2014-05-21 9:29 ` Peter Zijlstra
2014-05-21 19:19 ` Andrew Morton
2014-05-21 19:18 ` [PATCH 0/1] " Andrew Morton
2014-05-14 19:29 ` [PATCH 19/19] mm: filemap: Avoid unnecessary barries and waitqueue lookups in unlock_page fastpath Oleg Nesterov
2014-05-14 20:53 ` Mel Gorman
2014-05-15 10:48 ` [PATCH] mm: filemap: Avoid unnecessary barries and waitqueue lookups in unlock_page fastpath v4 Mel Gorman
2014-05-15 13:20 ` Peter Zijlstra
2014-05-15 13:29 ` Peter Zijlstra
2014-05-15 15:34 ` Oleg Nesterov
2014-05-15 15:45 ` Peter Zijlstra
2014-05-15 16:18 ` Mel Gorman
2014-05-15 15:03 ` Oleg Nesterov
2014-05-15 21:24 ` Andrew Morton
2014-05-21 12:15 ` [PATCH] mm: filemap: Avoid unnecessary barries and waitqueue lookups in unlock_page fastpath v5 Mel Gorman
2014-05-21 13:02 ` Peter Zijlstra
2014-05-21 15:33 ` Mel Gorman
2014-05-21 16:08 ` Peter Zijlstra
2014-05-21 21:26 ` Andrew Morton
2014-05-21 21:33 ` Peter Zijlstra
2014-05-21 21:50 ` Andrew Morton
2014-05-22 0:07 ` Mel Gorman
2014-05-22 7:20 ` Peter Zijlstra
2014-05-22 10:40 ` [PATCH] mm: filemap: Avoid unnecessary barriers and waitqueue lookups in unlock_page fastpath v7 Mel Gorman
2014-05-22 10:56 ` Peter Zijlstra
2014-05-22 13:00 ` Mel Gorman
2014-05-22 14:40 ` Mel Gorman
2014-05-22 15:04 ` Peter Zijlstra
2014-05-22 15:36 ` Mel Gorman
2014-05-22 16:58 ` [PATCH] mm: filemap: Avoid unnecessary barriers and waitqueue lookups in unlock_page fastpath v8 Mel Gorman
2014-05-22 6:45 ` [PATCH] mm: filemap: Avoid unnecessary barries and waitqueue lookups in unlock_page fastpath v5 Peter Zijlstra
2014-05-22 8:46 ` Mel Gorman
2014-05-22 17:47 ` Andrew Morton
2014-05-22 19:53 ` Mel Gorman
2014-05-21 23:35 ` Mel Gorman
2014-05-13 16:52 ` [PATCH 19/19] mm: filemap: Avoid unnecessary barries and waitqueue lookups in unlock_page fastpath Peter Zijlstra
2014-05-14 7:31 ` Mel Gorman
2014-05-19 8:57 ` [PATCH] mm: Avoid unnecessary atomic operations during end_page_writeback Mel Gorman
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20140513122851.GP23991@suse.de \
--to=mgorman@suse.de \
--cc=akpm@linux-foundation.org \
--cc=dave.hansen@intel.com \
--cc=hannes@cmpxchg.org \
--cc=hughd@google.com \
--cc=jack@suse.cz \
--cc=linux-fsdevel@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=mhocko@suse.cz \
--cc=peterz@infradead.org \
--cc=vbabka@suse.cz \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).