From: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
To: linux-kernel@vger.kernel.org
Cc: mingo@kernel.org, jiangshanlai@gmail.com, dipankar@in.ibm.com,
akpm@linux-foundation.org, mathieu.desnoyers@efficios.com,
josh@joshtriplett.org, tglx@linutronix.de, peterz@infradead.org,
rostedt@goodmis.org, dhowells@redhat.com, edumazet@google.com,
fweisbec@gmail.com, oleg@redhat.com, bobby.prani@gmail.com,
"Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
Subject: [PATCH tip/core/rcu 73/88] srcu: Use rnp->lock wrappers to replace explicit memory barriers
Date: Thu, 25 May 2017 14:59:46 -0700 [thread overview]
Message-ID: <1495749601-21574-73-git-send-email-paulmck@linux.vnet.ibm.com> (raw)
In-Reply-To: <20170525215934.GA11578@linux.vnet.ibm.com>
This commit uses TREE RCU's rnp->lock wrappers to replace a few explicit
memory barriers. This change also has the advantage of making SRCU's
memory-ordering properties be implemented in roughly the same way as they
are in Tree RCU.
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
---
include/linux/srcutree.h | 8 ++---
kernel/rcu/srcutree.c | 91 +++++++++++++++++++++++-------------------------
2 files changed, 47 insertions(+), 52 deletions(-)
diff --git a/include/linux/srcutree.h b/include/linux/srcutree.h
index 24e949bda12a..42973f787e7e 100644
--- a/include/linux/srcutree.h
+++ b/include/linux/srcutree.h
@@ -40,7 +40,7 @@ struct srcu_data {
unsigned long srcu_unlock_count[2]; /* Unlocks per CPU. */
/* Update-side state. */
- spinlock_t lock ____cacheline_internodealigned_in_smp;
+ raw_spinlock_t __private lock ____cacheline_internodealigned_in_smp;
struct rcu_segcblist srcu_cblist; /* List of callbacks.*/
unsigned long srcu_gp_seq_needed; /* Furthest future GP needed. */
unsigned long srcu_gp_seq_needed_exp; /* Furthest future exp GP. */
@@ -58,7 +58,7 @@ struct srcu_data {
* Node in SRCU combining tree, similar in function to rcu_data.
*/
struct srcu_node {
- spinlock_t lock;
+ raw_spinlock_t __private lock;
unsigned long srcu_have_cbs[4]; /* GP seq for children */
/* having CBs, but only */
/* is > ->srcu_gq_seq. */
@@ -78,7 +78,7 @@ struct srcu_struct {
struct srcu_node *level[RCU_NUM_LVLS + 1];
/* First node at each level. */
struct mutex srcu_cb_mutex; /* Serialize CB preparation. */
- spinlock_t gp_lock; /* protect ->srcu_cblist */
+ raw_spinlock_t __private lock; /* Protect counters */
struct mutex srcu_gp_mutex; /* Serialize GP work. */
unsigned int srcu_idx; /* Current rdr array element. */
unsigned long srcu_gp_seq; /* Grace-period seq #. */
@@ -109,7 +109,7 @@ void process_srcu(struct work_struct *work);
#define __SRCU_STRUCT_INIT(name) \
{ \
.sda = &name##_srcu_data, \
- .gp_lock = __SPIN_LOCK_UNLOCKED(name.gp_lock), \
+ .lock = __RAW_SPIN_LOCK_UNLOCKED(name.lock), \
.srcu_gp_seq_needed = 0 - 1, \
__SRCU_DEP_MAP_INIT(name) \
}
diff --git a/kernel/rcu/srcutree.c b/kernel/rcu/srcutree.c
index 72e89e81a420..34785bdf7c62 100644
--- a/kernel/rcu/srcutree.c
+++ b/kernel/rcu/srcutree.c
@@ -76,7 +76,7 @@ static void init_srcu_struct_nodes(struct srcu_struct *sp, bool is_static)
/* Each pass through this loop initializes one srcu_node structure. */
rcu_for_each_node_breadth_first(sp, snp) {
- spin_lock_init(&snp->lock);
+ raw_spin_lock_init(&ACCESS_PRIVATE(snp, lock));
WARN_ON_ONCE(ARRAY_SIZE(snp->srcu_have_cbs) !=
ARRAY_SIZE(snp->srcu_data_have_cbs));
for (i = 0; i < ARRAY_SIZE(snp->srcu_have_cbs); i++) {
@@ -110,7 +110,7 @@ static void init_srcu_struct_nodes(struct srcu_struct *sp, bool is_static)
snp_first = sp->level[level];
for_each_possible_cpu(cpu) {
sdp = per_cpu_ptr(sp->sda, cpu);
- spin_lock_init(&sdp->lock);
+ raw_spin_lock_init(&ACCESS_PRIVATE(sdp, lock));
rcu_segcblist_init(&sdp->srcu_cblist);
sdp->srcu_cblist_invoking = false;
sdp->srcu_gp_seq_needed = sp->srcu_gp_seq;
@@ -169,7 +169,7 @@ int __init_srcu_struct(struct srcu_struct *sp, const char *name,
/* Don't re-initialize a lock while it is held. */
debug_check_no_locks_freed((void *)sp, sizeof(*sp));
lockdep_init_map(&sp->dep_map, name, key, 0);
- spin_lock_init(&sp->gp_lock);
+ raw_spin_lock_init(&ACCESS_PRIVATE(sp, lock));
return init_srcu_struct_fields(sp, false);
}
EXPORT_SYMBOL_GPL(__init_srcu_struct);
@@ -186,7 +186,7 @@ EXPORT_SYMBOL_GPL(__init_srcu_struct);
*/
int init_srcu_struct(struct srcu_struct *sp)
{
- spin_lock_init(&sp->gp_lock);
+ raw_spin_lock_init(&ACCESS_PRIVATE(sp, lock));
return init_srcu_struct_fields(sp, false);
}
EXPORT_SYMBOL_GPL(init_srcu_struct);
@@ -197,7 +197,7 @@ EXPORT_SYMBOL_GPL(init_srcu_struct);
* First-use initialization of statically allocated srcu_struct
* structure. Wiring up the combining tree is more than can be
* done with compile-time initialization, so this check is added
- * to each update-side SRCU primitive. Use ->gp_lock, which -is-
+ * to each update-side SRCU primitive. Use sp->lock, which -is-
* compile-time initialized, to resolve races involving multiple
* CPUs trying to garner first-use privileges.
*/
@@ -209,13 +209,13 @@ static void check_init_srcu_struct(struct srcu_struct *sp)
/* The smp_load_acquire() pairs with the smp_store_release(). */
if (!rcu_seq_state(smp_load_acquire(&sp->srcu_gp_seq_needed))) /*^^^*/
return; /* Already initialized. */
- spin_lock_irqsave(&sp->gp_lock, flags);
+ raw_spin_lock_irqsave_rcu_node(sp, flags);
if (!rcu_seq_state(sp->srcu_gp_seq_needed)) {
- spin_unlock_irqrestore(&sp->gp_lock, flags);
+ raw_spin_unlock_irqrestore_rcu_node(sp, flags);
return;
}
init_srcu_struct_fields(sp, true);
- spin_unlock_irqrestore(&sp->gp_lock, flags);
+ raw_spin_unlock_irqrestore_rcu_node(sp, flags);
}
/*
@@ -412,8 +412,7 @@ static void srcu_gp_start(struct srcu_struct *sp)
struct srcu_data *sdp = this_cpu_ptr(sp->sda);
int state;
- RCU_LOCKDEP_WARN(!lockdep_is_held(&sp->gp_lock),
- "Invoked srcu_gp_start() without ->gp_lock!");
+ lockdep_assert_held(&sp->lock);
WARN_ON_ONCE(ULONG_CMP_GE(sp->srcu_gp_seq, sp->srcu_gp_seq_needed));
rcu_segcblist_advance(&sdp->srcu_cblist,
rcu_seq_current(&sp->srcu_gp_seq));
@@ -514,7 +513,7 @@ static void srcu_gp_end(struct srcu_struct *sp)
mutex_lock(&sp->srcu_cb_mutex);
/* End the current grace period. */
- spin_lock_irq(&sp->gp_lock);
+ raw_spin_lock_irq_rcu_node(sp);
idx = rcu_seq_state(sp->srcu_gp_seq);
WARN_ON_ONCE(idx != SRCU_STATE_SCAN2);
cbdelay = srcu_get_delay(sp);
@@ -523,7 +522,7 @@ static void srcu_gp_end(struct srcu_struct *sp)
gpseq = rcu_seq_current(&sp->srcu_gp_seq);
if (ULONG_CMP_LT(sp->srcu_gp_seq_needed_exp, gpseq))
sp->srcu_gp_seq_needed_exp = gpseq;
- spin_unlock_irq(&sp->gp_lock);
+ raw_spin_unlock_irq_rcu_node(sp);
mutex_unlock(&sp->srcu_gp_mutex);
/* A new grace period can start at this point. But only one. */
@@ -531,7 +530,7 @@ static void srcu_gp_end(struct srcu_struct *sp)
idx = rcu_seq_ctr(gpseq) % ARRAY_SIZE(snp->srcu_have_cbs);
idxnext = (idx + 1) % ARRAY_SIZE(snp->srcu_have_cbs);
rcu_for_each_node_breadth_first(sp, snp) {
- spin_lock_irq(&snp->lock);
+ raw_spin_lock_irq_rcu_node(snp);
cbs = false;
if (snp >= sp->level[rcu_num_lvls - 1])
cbs = snp->srcu_have_cbs[idx] == gpseq;
@@ -541,21 +540,19 @@ static void srcu_gp_end(struct srcu_struct *sp)
snp->srcu_gp_seq_needed_exp = gpseq;
mask = snp->srcu_data_have_cbs[idx];
snp->srcu_data_have_cbs[idx] = 0;
- spin_unlock_irq(&snp->lock);
- if (cbs) {
- smp_mb(); /* GP end before CB invocation. */
+ raw_spin_unlock_irq_rcu_node(snp);
+ if (cbs)
srcu_schedule_cbs_snp(sp, snp, mask, cbdelay);
- }
/* Occasionally prevent srcu_data counter wrap. */
if (!(gpseq & counter_wrap_check))
for (cpu = snp->grplo; cpu <= snp->grphi; cpu++) {
sdp = per_cpu_ptr(sp->sda, cpu);
- spin_lock_irqsave(&sdp->lock, flags);
+ raw_spin_lock_irqsave_rcu_node(sdp, flags);
if (ULONG_CMP_GE(gpseq,
sdp->srcu_gp_seq_needed + 100))
sdp->srcu_gp_seq_needed = gpseq;
- spin_unlock_irqrestore(&sdp->lock, flags);
+ raw_spin_unlock_irqrestore_rcu_node(sdp, flags);
}
}
@@ -563,17 +560,17 @@ static void srcu_gp_end(struct srcu_struct *sp)
mutex_unlock(&sp->srcu_cb_mutex);
/* Start a new grace period if needed. */
- spin_lock_irq(&sp->gp_lock);
+ raw_spin_lock_irq_rcu_node(sp);
gpseq = rcu_seq_current(&sp->srcu_gp_seq);
if (!rcu_seq_state(gpseq) &&
ULONG_CMP_LT(gpseq, sp->srcu_gp_seq_needed)) {
srcu_gp_start(sp);
- spin_unlock_irq(&sp->gp_lock);
+ raw_spin_unlock_irq_rcu_node(sp);
/* Throttle expedited grace periods: Should be rare! */
srcu_reschedule(sp, rcu_seq_ctr(gpseq) & 0x3ff
? 0 : SRCU_INTERVAL);
} else {
- spin_unlock_irq(&sp->gp_lock);
+ raw_spin_unlock_irq_rcu_node(sp);
}
}
@@ -593,18 +590,18 @@ static void srcu_funnel_exp_start(struct srcu_struct *sp, struct srcu_node *snp,
if (rcu_seq_done(&sp->srcu_gp_seq, s) ||
ULONG_CMP_GE(READ_ONCE(snp->srcu_gp_seq_needed_exp), s))
return;
- spin_lock_irqsave(&snp->lock, flags);
+ raw_spin_lock_irqsave_rcu_node(snp, flags);
if (ULONG_CMP_GE(snp->srcu_gp_seq_needed_exp, s)) {
- spin_unlock_irqrestore(&snp->lock, flags);
+ raw_spin_unlock_irqrestore_rcu_node(snp, flags);
return;
}
WRITE_ONCE(snp->srcu_gp_seq_needed_exp, s);
- spin_unlock_irqrestore(&snp->lock, flags);
+ raw_spin_unlock_irqrestore_rcu_node(snp, flags);
}
- spin_lock_irqsave(&sp->gp_lock, flags);
+ raw_spin_lock_irqsave_rcu_node(sp, flags);
if (!ULONG_CMP_LT(sp->srcu_gp_seq_needed_exp, s))
sp->srcu_gp_seq_needed_exp = s;
- spin_unlock_irqrestore(&sp->gp_lock, flags);
+ raw_spin_unlock_irqrestore_rcu_node(sp, flags);
}
/*
@@ -626,14 +623,13 @@ static void srcu_funnel_gp_start(struct srcu_struct *sp, struct srcu_data *sdp,
for (; snp != NULL; snp = snp->srcu_parent) {
if (rcu_seq_done(&sp->srcu_gp_seq, s) && snp != sdp->mynode)
return; /* GP already done and CBs recorded. */
- spin_lock_irqsave(&snp->lock, flags);
+ raw_spin_lock_irqsave_rcu_node(snp, flags);
if (ULONG_CMP_GE(snp->srcu_have_cbs[idx], s)) {
snp_seq = snp->srcu_have_cbs[idx];
if (snp == sdp->mynode && snp_seq == s)
snp->srcu_data_have_cbs[idx] |= sdp->grpmask;
- spin_unlock_irqrestore(&snp->lock, flags);
+ raw_spin_unlock_irqrestore_rcu_node(snp, flags);
if (snp == sdp->mynode && snp_seq != s) {
- smp_mb(); /* CBs after GP! */
srcu_schedule_cbs_sdp(sdp, do_norm
? SRCU_INTERVAL
: 0);
@@ -648,11 +644,11 @@ static void srcu_funnel_gp_start(struct srcu_struct *sp, struct srcu_data *sdp,
snp->srcu_data_have_cbs[idx] |= sdp->grpmask;
if (!do_norm && ULONG_CMP_LT(snp->srcu_gp_seq_needed_exp, s))
snp->srcu_gp_seq_needed_exp = s;
- spin_unlock_irqrestore(&snp->lock, flags);
+ raw_spin_unlock_irqrestore_rcu_node(snp, flags);
}
/* Top of tree, must ensure the grace period will be started. */
- spin_lock_irqsave(&sp->gp_lock, flags);
+ raw_spin_lock_irqsave_rcu_node(sp, flags);
if (ULONG_CMP_LT(sp->srcu_gp_seq_needed, s)) {
/*
* Record need for grace period s. Pair with load
@@ -671,7 +667,7 @@ static void srcu_funnel_gp_start(struct srcu_struct *sp, struct srcu_data *sdp,
queue_delayed_work(system_power_efficient_wq, &sp->work,
srcu_get_delay(sp));
}
- spin_unlock_irqrestore(&sp->gp_lock, flags);
+ raw_spin_unlock_irqrestore_rcu_node(sp, flags);
}
/*
@@ -834,7 +830,7 @@ void __call_srcu(struct srcu_struct *sp, struct rcu_head *rhp,
rhp->func = func;
local_irq_save(flags);
sdp = this_cpu_ptr(sp->sda);
- spin_lock(&sdp->lock);
+ raw_spin_lock_rcu_node(sdp);
rcu_segcblist_enqueue(&sdp->srcu_cblist, rhp, false);
rcu_segcblist_advance(&sdp->srcu_cblist,
rcu_seq_current(&sp->srcu_gp_seq));
@@ -848,7 +844,7 @@ void __call_srcu(struct srcu_struct *sp, struct rcu_head *rhp,
sdp->srcu_gp_seq_needed_exp = s;
needexp = true;
}
- spin_unlock_irqrestore(&sdp->lock, flags);
+ raw_spin_unlock_irqrestore_rcu_node(sdp, flags);
if (needgp)
srcu_funnel_gp_start(sp, sdp, s, do_norm);
else if (needexp)
@@ -1019,7 +1015,7 @@ void srcu_barrier(struct srcu_struct *sp)
*/
for_each_possible_cpu(cpu) {
sdp = per_cpu_ptr(sp->sda, cpu);
- spin_lock_irq(&sdp->lock);
+ raw_spin_lock_irq_rcu_node(sdp);
atomic_inc(&sp->srcu_barrier_cpu_cnt);
sdp->srcu_barrier_head.func = srcu_barrier_cb;
debug_rcu_head_queue(&sdp->srcu_barrier_head);
@@ -1028,7 +1024,7 @@ void srcu_barrier(struct srcu_struct *sp)
debug_rcu_head_unqueue(&sdp->srcu_barrier_head);
atomic_dec(&sp->srcu_barrier_cpu_cnt);
}
- spin_unlock_irq(&sdp->lock);
+ raw_spin_unlock_irq_rcu_node(sdp);
}
/* Remove the initial count, at which point reaching zero can happen. */
@@ -1077,17 +1073,17 @@ static void srcu_advance_state(struct srcu_struct *sp)
*/
idx = rcu_seq_state(smp_load_acquire(&sp->srcu_gp_seq)); /* ^^^ */
if (idx == SRCU_STATE_IDLE) {
- spin_lock_irq(&sp->gp_lock);
+ raw_spin_lock_irq_rcu_node(sp);
if (ULONG_CMP_GE(sp->srcu_gp_seq, sp->srcu_gp_seq_needed)) {
WARN_ON_ONCE(rcu_seq_state(sp->srcu_gp_seq));
- spin_unlock_irq(&sp->gp_lock);
+ raw_spin_unlock_irq_rcu_node(sp);
mutex_unlock(&sp->srcu_gp_mutex);
return;
}
idx = rcu_seq_state(READ_ONCE(sp->srcu_gp_seq));
if (idx == SRCU_STATE_IDLE)
srcu_gp_start(sp);
- spin_unlock_irq(&sp->gp_lock);
+ raw_spin_unlock_irq_rcu_node(sp);
if (idx != SRCU_STATE_IDLE) {
mutex_unlock(&sp->srcu_gp_mutex);
return; /* Someone else started the grace period. */
@@ -1136,20 +1132,19 @@ static void srcu_invoke_callbacks(struct work_struct *work)
sdp = container_of(work, struct srcu_data, work.work);
sp = sdp->sp;
rcu_cblist_init(&ready_cbs);
- spin_lock_irq(&sdp->lock);
- smp_mb(); /* Old grace periods before callback invocation! */
+ raw_spin_lock_irq_rcu_node(sdp);
rcu_segcblist_advance(&sdp->srcu_cblist,
rcu_seq_current(&sp->srcu_gp_seq));
if (sdp->srcu_cblist_invoking ||
!rcu_segcblist_ready_cbs(&sdp->srcu_cblist)) {
- spin_unlock_irq(&sdp->lock);
+ raw_spin_unlock_irq_rcu_node(sdp);
return; /* Someone else on the job or nothing to do. */
}
/* We are on the job! Extract and invoke ready callbacks. */
sdp->srcu_cblist_invoking = true;
rcu_segcblist_extract_done_cbs(&sdp->srcu_cblist, &ready_cbs);
- spin_unlock_irq(&sdp->lock);
+ raw_spin_unlock_irq_rcu_node(sdp);
rhp = rcu_cblist_dequeue(&ready_cbs);
for (; rhp != NULL; rhp = rcu_cblist_dequeue(&ready_cbs)) {
debug_rcu_head_unqueue(rhp);
@@ -1162,13 +1157,13 @@ static void srcu_invoke_callbacks(struct work_struct *work)
* Update counts, accelerate new callbacks, and if needed,
* schedule another round of callback invocation.
*/
- spin_lock_irq(&sdp->lock);
+ raw_spin_lock_irq_rcu_node(sdp);
rcu_segcblist_insert_count(&sdp->srcu_cblist, &ready_cbs);
(void)rcu_segcblist_accelerate(&sdp->srcu_cblist,
rcu_seq_snap(&sp->srcu_gp_seq));
sdp->srcu_cblist_invoking = false;
more = rcu_segcblist_ready_cbs(&sdp->srcu_cblist);
- spin_unlock_irq(&sdp->lock);
+ raw_spin_unlock_irq_rcu_node(sdp);
if (more)
srcu_schedule_cbs_sdp(sdp, 0);
}
@@ -1181,7 +1176,7 @@ static void srcu_reschedule(struct srcu_struct *sp, unsigned long delay)
{
bool pushgp = true;
- spin_lock_irq(&sp->gp_lock);
+ raw_spin_lock_irq_rcu_node(sp);
if (ULONG_CMP_GE(sp->srcu_gp_seq, sp->srcu_gp_seq_needed)) {
if (!WARN_ON_ONCE(rcu_seq_state(sp->srcu_gp_seq))) {
/* All requests fulfilled, time to go idle. */
@@ -1191,7 +1186,7 @@ static void srcu_reschedule(struct srcu_struct *sp, unsigned long delay)
/* Outstanding request and no GP. Start one. */
srcu_gp_start(sp);
}
- spin_unlock_irq(&sp->gp_lock);
+ raw_spin_unlock_irq_rcu_node(sp);
if (pushgp)
queue_delayed_work(system_power_efficient_wq, &sp->work, delay);
--
2.5.2
next prev parent reply other threads:[~2017-05-25 22:01 UTC|newest]
Thread overview: 104+ messages / expand[flat|nested] mbox.gz Atom feed top
2017-05-25 21:59 [PATCH tip/core/rcu 0/88] Commits for 4.13 Paul E. McKenney
2017-05-25 21:58 ` [PATCH tip/core/rcu 01/88] rcutorture: Add lockdep to one of the SRCU scenarios Paul E. McKenney
2017-05-25 21:58 ` [PATCH tip/core/rcu 02/88] rcutorture: Add three-level tree test for Tree SRCU Paul E. McKenney
2017-05-25 21:58 ` [PATCH tip/core/rcu 03/88] rcutorture: Fix bug in reporting Kconfig mis-settings Paul E. McKenney
2017-05-25 21:58 ` [PATCH tip/core/rcu 04/88] rcutorture: Add a scenario for Tiny SRCU Paul E. McKenney
2017-05-25 21:58 ` [PATCH tip/core/rcu 05/88] rcutorture: Add a scenario for Classic SRCU Paul E. McKenney
2017-05-25 21:58 ` [PATCH tip/core/rcu 06/88] rcu: Prevent rcu_barrier() from starting needless grace periods Paul E. McKenney
2017-05-25 21:58 ` [PATCH tip/core/rcu 07/88] rcutorture: Correctly handle CONFIG_RCU_TORTURE_TEST_* options Paul E. McKenney
2017-05-25 21:58 ` [PATCH tip/core/rcu 08/88] rcutorture: Update test scenarios based on new Kconfig dependencies Paul E. McKenney
2017-05-25 21:58 ` [PATCH tip/core/rcu 09/88] srcu: Eliminate possibility of destructive counter overflow Paul E. McKenney
2017-05-25 21:58 ` [PATCH tip/core/rcu 10/88] rcu: Complain if blocking in preemptible RCU read-side critical section Paul E. McKenney
2017-05-25 21:58 ` [PATCH tip/core/rcu 11/88] rcuperf: Defer expedited/normal check to end of test Paul E. McKenney
2017-05-25 21:58 ` [PATCH tip/core/rcu 12/88] rcuperf: Remove conflicting Kconfig options Paul E. McKenney
2017-05-25 21:58 ` [PATCH tip/core/rcu 13/88] rcu: Remove obsolete reference to synchronize_kernel() Paul E. McKenney
2017-05-25 21:58 ` [PATCH tip/core/rcu 14/88] rcuperf: Add ability to performance-test call_rcu() and friends Paul E. McKenney
2017-05-25 21:58 ` [PATCH tip/core/rcu 15/88] rcuperf: Add a Kconfig-fragment file for Classic SRCU Paul E. McKenney
2017-05-25 21:58 ` [PATCH tip/core/rcu 16/88] rcu: Make sync_rcu_preempt_exp_done() return bool Paul E. McKenney
2017-05-25 21:58 ` [PATCH tip/core/rcu 17/88] checkpatch: Remove checks for expedited grace periods Paul E. McKenney
2017-05-25 21:58 ` [PATCH tip/core/rcu 18/88] rcuperf: Add test for dynamically initialized srcu_struct Paul E. McKenney
2017-05-25 21:58 ` [PATCH tip/core/rcu 19/88] doc/atomic_ops: Clarify smp_mb__{before,after}_atomic() Paul E. McKenney
2017-05-25 21:58 ` [PATCH tip/core/rcu 20/88] atomics: Add header comment so spin_unlock_wait() Paul E. McKenney
2017-06-10 15:02 ` Andrea Parri
2017-05-25 21:58 ` [PATCH tip/core/rcu 21/88] docs: Fix typo in Documentation/memory-barriers.txt Paul E. McKenney
2017-05-25 21:58 ` [PATCH tip/core/rcu 22/88] rcuperf: Add the ability to test tiny RCU flavors Paul E. McKenney
2017-05-25 21:58 ` [PATCH tip/core/rcu 23/88] srcu: Make Classic and Tree SRCU announce themselves at bootup Paul E. McKenney
2017-05-25 21:58 ` [PATCH tip/core/rcu 24/88] rcutorture: Reduce CPUs dedicated to testing Classic SRCU Paul E. McKenney
2017-05-25 21:58 ` [PATCH tip/core/rcu 25/88] srcu: Shrink Tiny SRCU a bit more Paul E. McKenney
2017-05-25 21:58 ` [PATCH tip/core/rcu 26/88] rcuperf: Set more user-friendly defaults Paul E. McKenney
2017-05-25 21:59 ` [PATCH tip/core/rcu 27/88] srcu-cbmc: Use /usr/bin/awk instead of /bin/awk Paul E. McKenney
2017-05-25 21:59 ` [PATCH tip/core/rcu 28/88] rcuperf: Add writer_holdoff boot parameter Paul E. McKenney
2017-05-25 21:59 ` [PATCH tip/core/rcu 29/88] rcutorture: Add "git diff" output to testid.txt file Paul E. McKenney
2017-05-25 21:59 ` [PATCH tip/core/rcu 30/88] srcu: Document auto-expediting requirement Paul E. McKenney
2017-05-25 21:59 ` [PATCH tip/core/rcu 31/88] doc: Take tail recursion into account in RCU requirements Paul E. McKenney
2017-05-25 21:59 ` [PATCH tip/core/rcu 32/88] rcu: Add preemptibility checks in rcu_sched_qs() and rcu_bh_qs() Paul E. McKenney
2017-05-25 21:59 ` [PATCH tip/core/rcu 33/88] rcu: Print out rcupdate.c non-default boot-time settings Paul E. McKenney
2017-05-25 21:59 ` [PATCH tip/core/rcu 34/88] rcu: Update rcu_bootup_announce_oddness() Paul E. McKenney
2017-05-25 21:59 ` [PATCH tip/core/rcu 35/88] srcu: Make exp_holdoff module parameter be static Paul E. McKenney
2017-05-25 21:59 ` [PATCH tip/core/rcu 36/88] srcu: Print non-default exp_holdoff values at boot time Paul E. McKenney
2017-05-25 21:59 ` [PATCH tip/core/rcu 37/88] rcu: Add lockdep_assert_held() teeth to tree.c Paul E. McKenney
2017-05-25 21:59 ` [PATCH tip/core/rcu 38/88] rcu: Add lockdep_assert_held() teeth to tree_plugin.h Paul E. McKenney
2017-05-25 21:59 ` [PATCH tip/core/rcu 39/88] srcu: Make SRCU be once again optional Paul E. McKenney
2017-05-25 21:59 ` [PATCH tip/core/rcu 40/88] srcu: Shrink Tiny SRCU a bit Paul E. McKenney
2017-05-25 21:59 ` [PATCH tip/core/rcu 41/88] srcu: Add DEBUG_OBJECTS_RCU_HEAD functionality Paul E. McKenney
2017-05-25 21:59 ` [PATCH tip/core/rcu 42/88] rcu: Make synchronize_rcu_mult() check for duplicates Paul E. McKenney
2017-05-25 21:59 ` [PATCH tip/core/rcu 43/88] sched: Rely on synchronize_rcu_mult() de-duplication Paul E. McKenney
2017-05-25 21:59 ` [PATCH tip/core/rcu 44/88] rcu: Use RCU_NOCB_WAKE rather than RCU_NOGP_WAKE Paul E. McKenney
2017-05-25 21:59 ` [PATCH tip/core/rcu 45/88] rcu: Add memory barriers for NOCB leader wakeup Paul E. McKenney
2017-06-08 20:11 ` Krister Johansen
2017-06-08 20:55 ` Paul E. McKenney
2017-06-08 21:28 ` Krister Johansen
2017-06-08 23:47 ` Paul E. McKenney
2017-06-09 3:24 ` Krister Johansen
2017-05-25 21:59 ` [PATCH tip/core/rcu 46/88] torture: Add --kconfig argument to kvm.sh Paul E. McKenney
2017-05-25 21:59 ` [PATCH tip/core/rcu 47/88] rcu: Flag need for rcu_node_tree.h and rcu_segcblist.h visibility Paul E. McKenney
2017-05-25 21:59 ` [PATCH tip/core/rcu 48/88] rcutorture: Don't wait for kernel when all builds fail Paul E. McKenney
2017-05-25 21:59 ` [PATCH tip/core/rcu 49/88] rcu: Move docbook comments out of rcupdate.h Paul E. McKenney
2017-05-25 21:59 ` [PATCH tip/core/rcu 50/88] rcu: Move rcu_expedited and rcu_normal externs from rcupdate.h Paul E. McKenney
2017-05-25 21:59 ` [PATCH tip/core/rcu 51/88] rcu: Move expediting-related access/control out of rcupdate.h Paul E. McKenney
2017-05-25 21:59 ` [PATCH tip/core/rcu 52/88] rcu: Move torture-related definitions from rcupdate.h to rcu.h Paul E. McKenney
2017-05-25 21:59 ` [PATCH tip/core/rcu 53/88] rcu: Remove UINT_CMP_GE() and UINT_CMP_LT() Paul E. McKenney
2017-05-25 21:59 ` [PATCH tip/core/rcu 54/88] rcu: Move rcupdate.h to new empty-function style Paul E. McKenney
2017-05-25 21:59 ` [PATCH tip/core/rcu 55/88] rcu: Eliminate the unused __rcu_is_watching() function Paul E. McKenney
2017-05-25 21:59 ` [PATCH tip/core/rcu 56/88] rcu: Move the RCU_SCHEDULER_ definitions from rcupdate.h Paul E. McKenney
2017-05-25 21:59 ` [PATCH tip/core/rcu 57/88] rcu: Remove linux/debugobjects.h " Paul E. McKenney
2017-05-25 21:59 ` [PATCH tip/core/rcu 58/88] rcu: Improve __call_rcu() debug-objects error message Paul E. McKenney
2017-05-25 21:59 ` [PATCH tip/core/rcu 59/88] rcu: Move rcu_is_nocb_cpu() from rcupdate.h to rcu.h Paul E. McKenney
2017-05-25 21:59 ` [PATCH tip/core/rcu 60/88] rcu: Move rcu_ftrace_dump() " Paul E. McKenney
2017-05-25 21:59 ` [PATCH tip/core/rcu 61/88] rcu: move rcupdate.h to the new true/false-function style Paul E. McKenney
2017-05-25 21:59 ` [PATCH tip/core/rcu 62/88] rcu: Move torture-related functions out of rcutiny.h and rcutree.h Paul E. McKenney
2017-05-25 21:59 ` [PATCH tip/core/rcu 63/88] rcu: Move rcu_request_urgent_qs_task() " Paul E. McKenney
2017-05-25 21:59 ` [PATCH tip/core/rcu 64/88] rcu: Move rcutiny.h to new empty-function style Paul E. McKenney
2017-05-25 21:59 ` [PATCH tip/core/rcu 65/88] rcu: move rcutiny.h to the new true/false-function style Paul E. McKenney
2017-05-25 21:59 ` [PATCH tip/core/rcu 66/88] srcu: Prevent sdp->srcu_gp_seq_needed counter wrap Paul E. McKenney
2017-05-25 21:59 ` [PATCH tip/core/rcu 67/88] srcu: Shrink srcu.h by moving docbook and private function Paul E. McKenney
2017-05-25 21:59 ` [PATCH tip/core/rcu 68/88] srcu: Apply trivial callback lists to shrink Tiny SRCU Paul E. McKenney
2017-05-25 21:59 ` [PATCH tip/core/rcu 69/88] lockdep: Use consistent printing primitives Paul E. McKenney
2017-05-25 21:59 ` [PATCH tip/core/rcu 70/88] rcu: Refactor #includes from include/linux/rcupdate.h Paul E. McKenney
2017-05-25 21:59 ` [PATCH tip/core/rcu 71/88] rcu: Convert rnp->lock wrappers to macros for SRCU use Paul E. McKenney
2017-05-25 21:59 ` [PATCH tip/core/rcu 72/88] rcu: Move rnp->lock wrappers " Paul E. McKenney
2017-05-25 21:59 ` Paul E. McKenney [this message]
2017-05-25 21:59 ` [PATCH tip/core/rcu 74/88] rcu: Remove *_SLOW_* Kconfig options Paul E. McKenney
2017-05-25 21:59 ` [PATCH tip/core/rcu 75/88] rcu: Remove the RCU_KTHREAD_PRIO Kconfig option Paul E. McKenney
2017-05-25 21:59 ` [PATCH tip/core/rcu 76/88] rcu: Remove nohz_full full-system-idle state machine Paul E. McKenney
2017-05-25 23:03 ` Linus Torvalds
2017-05-25 23:42 ` Paul E. McKenney
2017-05-25 21:59 ` [PATCH tip/core/rcu 77/88] rcu: Remove #ifdef moving rcu_end_inkernel_boot from rcupdate.h Paul E. McKenney
2017-05-25 21:59 ` [PATCH tip/core/rcu 78/88] rcu: Remove typecheck() from RCU locking wrapper functions Paul E. McKenney
2017-05-25 21:59 ` [PATCH tip/core/rcu 79/88] rcu: Remove the now-obsolete PROVE_RCU_REPEATEDLY Kconfig option Paul E. McKenney
2017-05-25 21:59 ` [PATCH tip/core/rcu 80/88] rcu: Remove SPARSE_RCU_POINTER " Paul E. McKenney
2017-05-25 21:59 ` [PATCH tip/core/rcu 81/88] srcu: Fix rcutorture-statistics typo Paul E. McKenney
2017-05-25 21:59 ` [PATCH tip/core/rcu 82/88] srcu: Remove Classic SRCU Paul E. McKenney
2017-05-25 21:59 ` [PATCH tip/core/rcu 83/88] rcu: Remove debugfs tracing Paul E. McKenney
2017-05-25 21:59 ` [PATCH tip/core/rcu 84/88] rcu: Eliminate NOCBs CPU-state Kconfig options Paul E. McKenney
2017-05-25 21:59 ` [PATCH tip/core/rcu 85/88] rcu: Move RCU non-debug Kconfig options to kernel/rcu Paul E. McKenney
2017-05-25 21:59 ` [PATCH tip/core/rcu 86/88] rcu: Move RCU debug " Paul E. McKenney
2017-05-25 22:00 ` [PATCH tip/core/rcu 87/88] rcu: Remove event tracing from Tiny RCU Paul E. McKenney
2017-05-25 22:00 ` [PATCH tip/core/rcu 88/88] rcu: Remove RCU CPU stall warnings " Paul E. McKenney
2017-06-09 13:52 ` [PATCH tip/core/rcu 0/88] Commits for 4.13 Steven Rostedt
2017-06-09 16:24 ` Paul E. McKenney
2017-06-09 16:39 ` Steven Rostedt
2017-06-09 17:20 ` Paul E. McKenney
2017-06-09 17:33 ` Steven Rostedt
2017-06-09 17:57 ` Paul E. McKenney
2017-06-09 18:01 ` Paul E. McKenney
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1495749601-21574-73-git-send-email-paulmck@linux.vnet.ibm.com \
--to=paulmck@linux.vnet.ibm.com \
--cc=akpm@linux-foundation.org \
--cc=bobby.prani@gmail.com \
--cc=dhowells@redhat.com \
--cc=dipankar@in.ibm.com \
--cc=edumazet@google.com \
--cc=fweisbec@gmail.com \
--cc=jiangshanlai@gmail.com \
--cc=josh@joshtriplett.org \
--cc=linux-kernel@vger.kernel.org \
--cc=mathieu.desnoyers@efficios.com \
--cc=mingo@kernel.org \
--cc=oleg@redhat.com \
--cc=peterz@infradead.org \
--cc=rostedt@goodmis.org \
--cc=tglx@linutronix.de \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).