From: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
To: linux-kernel@vger.kernel.org
Cc: mingo@kernel.org, jiangshanlai@gmail.com, dipankar@in.ibm.com,
akpm@linux-foundation.org, mathieu.desnoyers@efficios.com,
josh@joshtriplett.org, tglx@linutronix.de, peterz@infradead.org,
rostedt@goodmis.org, dhowells@redhat.com, edumazet@google.com,
dvhart@linux.intel.com, fweisbec@gmail.com, oleg@redhat.com,
bobby.prani@gmail.com,
"Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
Subject: [PATCH tip/core/rcu 12/19] rcu: Apply rcu_seq operations to _rcu_barrier()
Date: Fri, 17 Jul 2015 16:29:17 -0700 [thread overview]
Message-ID: <1437175764-24096-12-git-send-email-paulmck@linux.vnet.ibm.com> (raw)
In-Reply-To: <1437175764-24096-1-git-send-email-paulmck@linux.vnet.ibm.com>
From: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
The rcu_seq operations were open-coded in _rcu_barrier(), so this commit
replaces the open-coding with the shiny new rcu_seq operations.
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
---
include/trace/events/rcu.h | 1 -
kernel/rcu/tree.c | 72 ++++++++++++----------------------------------
kernel/rcu/tree.h | 2 +-
kernel/rcu/tree_trace.c | 4 +--
4 files changed, 22 insertions(+), 57 deletions(-)
diff --git a/include/trace/events/rcu.h b/include/trace/events/rcu.h
index c78e88ce5ea3..ef72c4aada56 100644
--- a/include/trace/events/rcu.h
+++ b/include/trace/events/rcu.h
@@ -661,7 +661,6 @@ TRACE_EVENT(rcu_torture_read,
* Tracepoint for _rcu_barrier() execution. The string "s" describes
* the _rcu_barrier phase:
* "Begin": _rcu_barrier() started.
- * "Check": _rcu_barrier() checking for piggybacking.
* "EarlyExit": _rcu_barrier() piggybacked, thus early exit.
* "Inc1": _rcu_barrier() piggyback check counter incremented.
* "OfflineNoCB": _rcu_barrier() found callback on never-online CPU
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 338ea61929bd..44245ae4c1c2 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -3568,10 +3568,10 @@ static void rcu_barrier_callback(struct rcu_head *rhp)
struct rcu_state *rsp = rdp->rsp;
if (atomic_dec_and_test(&rsp->barrier_cpu_count)) {
- _rcu_barrier_trace(rsp, "LastCB", -1, rsp->n_barrier_done);
+ _rcu_barrier_trace(rsp, "LastCB", -1, rsp->barrier_sequence);
complete(&rsp->barrier_completion);
} else {
- _rcu_barrier_trace(rsp, "CB", -1, rsp->n_barrier_done);
+ _rcu_barrier_trace(rsp, "CB", -1, rsp->barrier_sequence);
}
}
@@ -3583,7 +3583,7 @@ static void rcu_barrier_func(void *type)
struct rcu_state *rsp = type;
struct rcu_data *rdp = raw_cpu_ptr(rsp->rda);
- _rcu_barrier_trace(rsp, "IRQ", -1, rsp->n_barrier_done);
+ _rcu_barrier_trace(rsp, "IRQ", -1, rsp->barrier_sequence);
atomic_inc(&rsp->barrier_cpu_count);
rsp->call(&rdp->barrier_head, rcu_barrier_callback);
}
@@ -3596,55 +3596,24 @@ static void _rcu_barrier(struct rcu_state *rsp)
{
int cpu;
struct rcu_data *rdp;
- unsigned long snap = READ_ONCE(rsp->n_barrier_done);
- unsigned long snap_done;
+ unsigned long s = rcu_seq_snap(&rsp->barrier_sequence);
- _rcu_barrier_trace(rsp, "Begin", -1, snap);
+ _rcu_barrier_trace(rsp, "Begin", -1, s);
/* Take mutex to serialize concurrent rcu_barrier() requests. */
mutex_lock(&rsp->barrier_mutex);
- /*
- * Ensure that all prior references, including to ->n_barrier_done,
- * are ordered before the _rcu_barrier() machinery.
- */
- smp_mb(); /* See above block comment. */
-
- /*
- * Recheck ->n_barrier_done to see if others did our work for us.
- * This means checking ->n_barrier_done for an even-to-odd-to-even
- * transition. The "if" expression below therefore rounds the old
- * value up to the next even number and adds two before comparing.
- */
- snap_done = rsp->n_barrier_done;
- _rcu_barrier_trace(rsp, "Check", -1, snap_done);
-
- /*
- * If the value in snap is odd, we needed to wait for the current
- * rcu_barrier() to complete, then wait for the next one, in other
- * words, we need the value of snap_done to be three larger than
- * the value of snap. On the other hand, if the value in snap is
- * even, we only had to wait for the next rcu_barrier() to complete,
- * in other words, we need the value of snap_done to be only two
- * greater than the value of snap. The "(snap + 3) & ~0x1" computes
- * this for us (thank you, Linus!).
- */
- if (ULONG_CMP_GE(snap_done, (snap + 3) & ~0x1)) {
- _rcu_barrier_trace(rsp, "EarlyExit", -1, snap_done);
+ /* Did someone else do our work for us? */
+ if (rcu_seq_done(&rsp->barrier_sequence, s)) {
+ _rcu_barrier_trace(rsp, "EarlyExit", -1, rsp->barrier_sequence);
smp_mb(); /* caller's subsequent code after above check. */
mutex_unlock(&rsp->barrier_mutex);
return;
}
- /*
- * Increment ->n_barrier_done to avoid duplicate work. Use
- * WRITE_ONCE() to prevent the compiler from speculating
- * the increment to precede the early-exit check.
- */
- WRITE_ONCE(rsp->n_barrier_done, rsp->n_barrier_done + 1);
- WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 1);
- _rcu_barrier_trace(rsp, "Inc1", -1, rsp->n_barrier_done);
- smp_mb(); /* Order ->n_barrier_done increment with below mechanism. */
+ /* Mark the start of the barrier operation. */
+ rcu_seq_start(&rsp->barrier_sequence);
+ _rcu_barrier_trace(rsp, "Inc1", -1, rsp->barrier_sequence);
/*
* Initialize the count to one rather than to zero in order to
@@ -3668,10 +3637,10 @@ static void _rcu_barrier(struct rcu_state *rsp)
if (rcu_is_nocb_cpu(cpu)) {
if (!rcu_nocb_cpu_needs_barrier(rsp, cpu)) {
_rcu_barrier_trace(rsp, "OfflineNoCB", cpu,
- rsp->n_barrier_done);
+ rsp->barrier_sequence);
} else {
_rcu_barrier_trace(rsp, "OnlineNoCB", cpu,
- rsp->n_barrier_done);
+ rsp->barrier_sequence);
smp_mb__before_atomic();
atomic_inc(&rsp->barrier_cpu_count);
__call_rcu(&rdp->barrier_head,
@@ -3679,11 +3648,11 @@ static void _rcu_barrier(struct rcu_state *rsp)
}
} else if (READ_ONCE(rdp->qlen)) {
_rcu_barrier_trace(rsp, "OnlineQ", cpu,
- rsp->n_barrier_done);
+ rsp->barrier_sequence);
smp_call_function_single(cpu, rcu_barrier_func, rsp, 1);
} else {
_rcu_barrier_trace(rsp, "OnlineNQ", cpu,
- rsp->n_barrier_done);
+ rsp->barrier_sequence);
}
}
put_online_cpus();
@@ -3695,16 +3664,13 @@ static void _rcu_barrier(struct rcu_state *rsp)
if (atomic_dec_and_test(&rsp->barrier_cpu_count))
complete(&rsp->barrier_completion);
- /* Increment ->n_barrier_done to prevent duplicate work. */
- smp_mb(); /* Keep increment after above mechanism. */
- WRITE_ONCE(rsp->n_barrier_done, rsp->n_barrier_done + 1);
- WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 0);
- _rcu_barrier_trace(rsp, "Inc2", -1, rsp->n_barrier_done);
- smp_mb(); /* Keep increment before caller's subsequent code. */
-
/* Wait for all rcu_barrier_callback() callbacks to be invoked. */
wait_for_completion(&rsp->barrier_completion);
+ /* Mark the end of the barrier operation. */
+ _rcu_barrier_trace(rsp, "Inc2", -1, rsp->barrier_sequence);
+ rcu_seq_end(&rsp->barrier_sequence);
+
/* Other rcu_barrier() invocations can now safely proceed. */
mutex_unlock(&rsp->barrier_mutex);
}
diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h
index 4edc277d08eb..5c1042d9c310 100644
--- a/kernel/rcu/tree.h
+++ b/kernel/rcu/tree.h
@@ -486,7 +486,7 @@ struct rcu_state {
struct mutex barrier_mutex; /* Guards barrier fields. */
atomic_t barrier_cpu_count; /* # CPUs waiting on. */
struct completion barrier_completion; /* Wake at barrier end. */
- unsigned long n_barrier_done; /* ++ at start and end of */
+ unsigned long barrier_sequence; /* ++ at start and end of */
/* _rcu_barrier(). */
/* End of fields guarded by barrier_mutex. */
diff --git a/kernel/rcu/tree_trace.c b/kernel/rcu/tree_trace.c
index 36c04b46d3b8..d9982a2ce305 100644
--- a/kernel/rcu/tree_trace.c
+++ b/kernel/rcu/tree_trace.c
@@ -81,9 +81,9 @@ static void r_stop(struct seq_file *m, void *v)
static int show_rcubarrier(struct seq_file *m, void *v)
{
struct rcu_state *rsp = (struct rcu_state *)m->private;
- seq_printf(m, "bcc: %d nbd: %lu\n",
+ seq_printf(m, "bcc: %d bseq: %lu\n",
atomic_read(&rsp->barrier_cpu_count),
- rsp->n_barrier_done);
+ rsp->barrier_sequence);
return 0;
}
--
1.8.1.5
next prev parent reply other threads:[~2015-07-17 23:31 UTC|newest]
Thread overview: 36+ messages / expand[flat|nested] mbox.gz Atom feed top
2015-07-17 23:29 [PATCH tip/core/rcu 0/19] Expedited grace period changes for 4.3 Paul E. McKenney
2015-07-17 23:29 ` [PATCH tip/core/rcu 01/19] rcu: Stop disabling CPU hotplug in synchronize_rcu_expedited() Paul E. McKenney
2015-07-17 23:29 ` [PATCH tip/core/rcu 02/19] rcu: Remove CONFIG_RCU_CPU_STALL_INFO Paul E. McKenney
2015-07-30 12:49 ` Peter Zijlstra
2015-07-30 15:13 ` Paul E. McKenney
2015-07-30 15:31 ` Peter Zijlstra
2015-07-30 15:45 ` Josh Triplett
2015-07-17 23:29 ` [PATCH tip/core/rcu 03/19] rcu: Switch synchronize_sched_expedited() to stop_one_cpu() Paul E. McKenney
2015-07-17 23:29 ` [PATCH tip/core/rcu 04/19] rcu: Rework synchronize_sched_expedited() counter handling Paul E. McKenney
2015-07-17 23:29 ` [PATCH tip/core/rcu 05/19] rcu: Get rid of synchronize_sched_expedited()'s polling loop Paul E. McKenney
2015-07-17 23:29 ` [PATCH tip/core/rcu 06/19] rcu: Make expedited GP CPU stoppage asynchronous Paul E. McKenney
2015-07-17 23:29 ` [PATCH tip/core/rcu 07/19] rcu: Abstract sequence counting from synchronize_sched_expedited() Paul E. McKenney
2015-07-17 23:29 ` [PATCH tip/core/rcu 08/19] rcu: Make synchronize_rcu_expedited() use sequence-counter scheme Paul E. McKenney
2015-07-17 23:29 ` [PATCH tip/core/rcu 09/19] rcu: Abstract funnel locking from synchronize_sched_expedited() Paul E. McKenney
2015-07-17 23:29 ` [PATCH tip/core/rcu 10/19] rcu: Fix synchronize_sched_expedited() type error for "s" Paul E. McKenney
2015-07-17 23:29 ` [PATCH tip/core/rcu 11/19] rcu: Use funnel locking for synchronize_rcu_expedited()'s polling loop Paul E. McKenney
2015-07-17 23:29 ` Paul E. McKenney [this message]
2015-07-17 23:29 ` [PATCH tip/core/rcu 13/19] rcu: Consolidate last open-coded expedited memory barrier Paul E. McKenney
2015-07-17 23:29 ` [PATCH tip/core/rcu 14/19] rcu: Extend expedited funnel locking to rcu_data structure Paul E. McKenney
2015-09-20 14:58 ` Sasha Levin
2015-09-21 4:12 ` Paul E. McKenney
2015-09-21 22:04 ` Sasha Levin
2015-09-22 15:10 ` Paul E. McKenney
2015-07-17 23:29 ` [PATCH tip/core/rcu 15/19] rcu: Add stall warnings to synchronize_sched_expedited() Paul E. McKenney
2015-07-17 23:29 ` [PATCH tip/core/rcu 16/19] documentation: Describe new expedited stall warnings Paul E. McKenney
2015-07-17 23:29 ` [PATCH tip/core/rcu 17/19] rcu: Pull out wait_event*() condition into helper function Paul E. McKenney
2015-07-17 23:29 ` [PATCH tip/core/rcu 18/19] rcu: Rename RCU_GP_DONE_FQS to RCU_GP_DOING_FQS Paul E. McKenney
2015-07-17 23:29 ` [PATCH tip/core/rcu 19/19] rcu: Add fastpath bypassing funnel locking Paul E. McKenney
2015-07-30 14:44 ` Peter Zijlstra
2015-07-30 15:34 ` Paul E. McKenney
2015-07-30 15:40 ` Peter Zijlstra
2015-08-03 20:05 ` Steven Rostedt
2015-08-03 20:06 ` Peter Zijlstra
2015-07-30 16:34 ` Peter Zijlstra
2015-07-31 15:57 ` Paul E. McKenney
2015-07-31 2:03 ` Waiman Long
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1437175764-24096-12-git-send-email-paulmck@linux.vnet.ibm.com \
--to=paulmck@linux.vnet.ibm.com \
--cc=akpm@linux-foundation.org \
--cc=bobby.prani@gmail.com \
--cc=dhowells@redhat.com \
--cc=dipankar@in.ibm.com \
--cc=dvhart@linux.intel.com \
--cc=edumazet@google.com \
--cc=fweisbec@gmail.com \
--cc=jiangshanlai@gmail.com \
--cc=josh@joshtriplett.org \
--cc=linux-kernel@vger.kernel.org \
--cc=mathieu.desnoyers@efficios.com \
--cc=mingo@kernel.org \
--cc=oleg@redhat.com \
--cc=peterz@infradead.org \
--cc=rostedt@goodmis.org \
--cc=tglx@linutronix.de \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).