From: Peter Zijlstra <peterz@infradead.org>
To: Oleg Nesterov <oleg@redhat.com>,
Paul McKenney <paulmck@linux.vnet.ibm.com>
Cc: Mel Gorman <mgorman@suse.de>, Rik van Riel <riel@redhat.com>,
Srikar Dronamraju <srikar@linux.vnet.ibm.com>,
Ingo Molnar <mingo@kernel.org>,
Andrea Arcangeli <aarcange@redhat.com>,
Johannes Weiner <hannes@cmpxchg.org>,
Thomas Gleixner <tglx@linutronix.de>,
Steven Rostedt <rostedt@goodmis.org>,
Linus Torvalds <torvalds@linux-foundation.org>,
linux-kernel@vger.kernel.org,
Peter Zijlstra <peterz@infradead.org>
Subject: [PATCH 3/3] hotplug: Optimize cpu_hotplug_{begin,done}() using rcu_sync
Date: Wed, 02 Oct 2013 16:56:58 +0200 [thread overview]
Message-ID: <20131002150518.748974359@infradead.org> (raw)
In-Reply-To: 20131002145655.361606532@infradead.org
[-- Attachment #1: peterz-cpu-hotplug-2.patch --]
[-- Type: text/plain, Size: 5593 bytes --]
Use the fancy new rcu_sync bits from Oleg to optimize the fancy new
hotplug lock implementation.
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
---
include/linux/cpu.h | 7 ++++---
kernel/cpu.c | 52 +++++++++++++++++++++++-----------------------------
2 files changed, 27 insertions(+), 32 deletions(-)
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -18,6 +18,7 @@
#include <linux/cpumask.h>
#include <linux/percpu.h>
#include <linux/sched.h>
+#include <linux/rcusync.h>
struct device;
@@ -180,7 +181,7 @@ extern void cpu_hotplug_init_task(struct
extern void cpu_hotplug_begin(void);
extern void cpu_hotplug_done(void);
-extern int __cpuhp_state;
+extern struct rcu_sync_struct __cpuhp_rss;
DECLARE_PER_CPU(unsigned int, __cpuhp_refcount);
extern void __get_online_cpus(void);
@@ -204,7 +205,7 @@ static inline void get_online_cpus(void)
* writer will see anything we did within this RCU-sched read-side
* critical section.
*/
- if (likely(!__cpuhp_state))
+ if (likely(rcu_sync_is_idle(&__cpuhp_rss)))
__this_cpu_inc(__cpuhp_refcount);
else
__get_online_cpus(); /* Unconditional memory barrier. */
@@ -231,7 +232,7 @@ static inline void put_online_cpus(void)
/*
* Same as in get_online_cpus().
*/
- if (likely(!__cpuhp_state))
+ if (likely(rcu_sync_is_idle(&__cpuhp_rss)))
__this_cpu_dec(__cpuhp_refcount);
else
__put_online_cpus(); /* Unconditional memory barrier. */
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -49,14 +49,15 @@ static int cpu_hotplug_disabled;
#ifdef CONFIG_HOTPLUG_CPU
-enum { readers_fast = 0, readers_slow, readers_block };
+enum { readers_slow, readers_block };
-int __cpuhp_state;
-EXPORT_SYMBOL_GPL(__cpuhp_state);
+DEFINE_RCU_SCHED_SYNC(__cpuhp_rss);
+EXPORT_SYMBOL_GPL(__cpuhp_rss);
DEFINE_PER_CPU(unsigned int, __cpuhp_refcount);
EXPORT_PER_CPU_SYMBOL_GPL(__cpuhp_refcount);
+static int cpuhp_state = readers_slow;
static atomic_t cpuhp_waitcount;
static DECLARE_WAIT_QUEUE_HEAD(cpuhp_readers);
static DECLARE_WAIT_QUEUE_HEAD(cpuhp_writer);
@@ -68,7 +69,6 @@ void cpu_hotplug_init_task(struct task_s
void __get_online_cpus(void)
{
-again:
__this_cpu_inc(__cpuhp_refcount);
/*
@@ -77,7 +77,7 @@ void __get_online_cpus(void)
* increment-on-one-CPU-and-decrement-on-another problem.
*
* And yes, if the reader misses the writer's assignment of
- * readers_block to __cpuhp_state, then the writer is
+ * readers_block to cpuhp_state, then the writer is
* guaranteed to see the reader's increment. Conversely, any
* readers that increment their __cpuhp_refcount after the
* writer looks are guaranteed to see the readers_block value,
@@ -88,7 +88,7 @@ void __get_online_cpus(void)
smp_mb(); /* A matches D */
- if (likely(__cpuhp_state != readers_block))
+ if (likely(cpuhp_state != readers_block))
return;
/*
@@ -108,19 +108,19 @@ void __get_online_cpus(void)
* and reschedule on the preempt_enable() in get_online_cpus().
*/
preempt_enable_no_resched();
- __wait_event(cpuhp_readers, __cpuhp_state != readers_block);
+ __wait_event(cpuhp_readers, cpuhp_state != readers_block);
preempt_disable();
+ __this_cpu_inc(__cpuhp_refcount);
+
/*
- * Given we've still got preempt_disabled and new cpu_hotplug_begin()
- * must do a synchronize_sched() we're guaranteed a successfull
- * acquisition this time -- even if we wake the current
- * cpu_hotplug_end() now.
+ * cpu_hotplug_done() waits until all pending readers are gone;
+ * this means that a new cpu_hotplug_begin() must observe our
+ * refcount increment and wait for it to go away.
*/
- if (atomic_dec_and_test(&cpuhp_waitcount))
- wake_up(&cpuhp_writer);
- goto again;
+ if (atomic_dec_and_test(&cpuhp_waitcount)) /* A */
+ wake_up(&cpuhp_writer);
}
EXPORT_SYMBOL_GPL(__get_online_cpus);
@@ -183,21 +183,18 @@ void cpu_hotplug_begin(void)
current->cpuhp_ref++;
/* Notify readers to take the slow path. */
- __cpuhp_state = readers_slow;
-
- /* See percpu_down_write(); guarantees all readers take the slow path */
- synchronize_sched();
+ rcu_sync_enter(&__cpuhp_rss);
/*
* Notify new readers to block; up until now, and thus throughout the
* longish synchronize_sched() above, new readers could still come in.
*/
- __cpuhp_state = readers_block;
+ cpuhp_state = readers_block;
smp_mb(); /* D matches A */
/*
- * If they don't see our writer of readers_block to __cpuhp_state,
+ * If they don't see our writer of readers_block to cpuhp_state,
* then we are guaranteed to see their __cpuhp_refcount increment, and
* therefore will wait for them.
*/
@@ -215,26 +212,23 @@ void cpu_hotplug_done(void)
* that new readers might fail to see the results of this writer's
* critical section.
*/
- __cpuhp_state = readers_slow;
+ cpuhp_state = readers_slow;
wake_up_all(&cpuhp_readers);
/*
* The wait_event()/wake_up_all() prevents the race where the readers
- * are delayed between fetching __cpuhp_state and blocking.
+ * are delayed between fetching cpuhp_state and blocking.
*/
- /* See percpu_up_write(); readers will no longer attempt to block. */
- synchronize_sched();
-
- /* Let 'em rip */
- __cpuhp_state = readers_fast;
current->cpuhp_ref--;
/*
- * Wait for any pending readers to be running. This ensures readers
- * after writer and avoids writers starving readers.
+ * Wait for any pending readers to be running. This avoids writers
+ * starving readers.
*/
wait_event(cpuhp_writer, !atomic_read(&cpuhp_waitcount));
+
+ rcu_sync_exit(&__cpuhp_rss);
}
/*
next prev parent reply other threads:[~2013-10-02 15:08 UTC|newest]
Thread overview: 46+ messages / expand[flat|nested] mbox.gz Atom feed top
2013-10-02 14:56 [PATCH 0/3] Optimize the cpu hotplug locking Peter Zijlstra
2013-10-02 14:56 ` [PATCH 1/3] hotplug: Optimize {get,put}_online_cpus() Peter Zijlstra
2013-10-03 14:01 ` Peter Zijlstra
2013-10-03 16:27 ` Paul E. McKenney
2013-10-03 16:26 ` Paul E. McKenney
2013-10-02 14:56 ` [PATCH 2/3] rcu: Create rcu_sync infrastructure Peter Zijlstra
2013-10-02 15:49 ` Oleg Nesterov
2013-10-03 16:42 ` Paul E. McKenney
2013-10-08 8:18 ` Peter Zijlstra
2013-10-03 16:41 ` Paul E. McKenney
2013-10-03 17:00 ` Oleg Nesterov
2013-10-03 17:15 ` Paul E. McKenney
2013-10-03 18:40 ` Peter Zijlstra
2013-10-03 18:45 ` Paul E. McKenney
2013-10-03 18:47 ` Oleg Nesterov
2013-10-03 19:21 ` Paul E. McKenney
2013-10-03 19:32 ` Oleg Nesterov
2013-10-03 19:33 ` Oleg Nesterov
2013-10-03 19:50 ` Paul E. McKenney
2013-10-03 20:00 ` Oleg Nesterov
2013-10-03 21:10 ` Oleg Nesterov
2013-10-03 22:00 ` Paul E. McKenney
2013-10-04 11:29 ` Oleg Nesterov
2013-10-04 16:22 ` Paul E. McKenney
2013-10-04 7:18 ` Peter Zijlstra
2013-10-04 11:15 ` Oleg Nesterov
2013-10-04 11:36 ` Peter Zijlstra
2013-10-04 11:50 ` Oleg Nesterov
2013-10-04 11:44 ` Peter Zijlstra
2013-10-04 12:13 ` Oleg Nesterov
2013-10-04 12:38 ` Peter Zijlstra
2013-10-04 13:31 ` Oleg Nesterov
2013-10-04 14:43 ` Peter Zijlstra
2013-10-04 15:13 ` Oleg Nesterov
2013-10-04 16:25 ` Peter Zijlstra
2013-10-04 19:06 ` Oleg Nesterov
2013-10-04 19:41 ` Peter Zijlstra
2013-10-05 17:31 ` Oleg Nesterov
2013-10-04 7:00 ` Peter Zijlstra
2013-10-03 20:14 ` Paolo Bonzini
2013-10-04 7:01 ` Peter Zijlstra
2013-10-02 14:56 ` Peter Zijlstra [this message]
2013-10-03 16:48 ` [PATCH 3/3] hotplug: Optimize cpu_hotplug_{begin,done}() using rcu_sync Paul E. McKenney
2013-10-03 18:41 ` Peter Zijlstra
2013-10-03 18:46 ` Paul E. McKenney
2013-10-03 19:05 ` Oleg Nesterov
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20131002150518.748974359@infradead.org \
--to=peterz@infradead.org \
--cc=aarcange@redhat.com \
--cc=hannes@cmpxchg.org \
--cc=linux-kernel@vger.kernel.org \
--cc=mgorman@suse.de \
--cc=mingo@kernel.org \
--cc=oleg@redhat.com \
--cc=paulmck@linux.vnet.ibm.com \
--cc=riel@redhat.com \
--cc=rostedt@goodmis.org \
--cc=srikar@linux.vnet.ibm.com \
--cc=tglx@linutronix.de \
--cc=torvalds@linux-foundation.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).