On 00:02 Tue 23 Mar 2021, Ingo Molnar wrote: > >Hi Paul, > >Was working on automation to make it a bit more straightforward to fix >typos within comments (which we tend to reintroduce during >development), and here are the ones it found in the RCU code. > >Thanks, > > Ingo > >=========> >From: Ingo Molnar >Date: Mon, 22 Mar 2021 23:57:26 +0100 >Subject: [PATCH] rcu: Fix various typos in comments > >Fix ~12 single-word typos in RCU code comments. > >Signed-off-by: Ingo Molnar >Cc: Paul E. McKenney >Cc: linux-kernel@vger.kernel.org >--- > kernel/rcu/srcutree.c | 4 ++-- > kernel/rcu/sync.c | 2 +- > kernel/rcu/tasks.h | 8 ++++---- > kernel/rcu/tree.c | 4 ++-- > kernel/rcu/tree.h | 2 +- > kernel/rcu/tree_plugin.h | 2 +- > tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/locks.h | 2 +- > 7 files changed, 12 insertions(+), 12 deletions(-) > >diff --git a/kernel/rcu/srcutree.c b/kernel/rcu/srcutree.c >index e26547b34ad3..036ff5499ad5 100644 >--- a/kernel/rcu/srcutree.c >+++ b/kernel/rcu/srcutree.c >@@ -777,9 +777,9 @@ static bool srcu_might_be_idle(struct srcu_struct *ssp) > spin_unlock_irqrestore_rcu_node(sdp, flags); > > /* >- * No local callbacks, so probabalistically probe global state. >+ * No local callbacks, so probabilistically probe global state. > * Exact information would require acquiring locks, which would >- * kill scalability, hence the probabalistic nature of the probe. >+ * kill scalability, hence the probabilistic nature of the probe. > */ > > /* First, see if enough time has passed since the last GP. */ >diff --git a/kernel/rcu/sync.c b/kernel/rcu/sync.c >index d4558ab7a07d..3eeb871cf0de 100644 >--- a/kernel/rcu/sync.c >+++ b/kernel/rcu/sync.c >@@ -94,7 +94,7 @@ static void rcu_sync_func(struct rcu_head *rhp) > rcu_sync_call(rsp); > } else { > /* >- * We're at least a GP after the last rcu_sync_exit(); eveybody >+ * We're at least a GP after the last rcu_sync_exit(); everybody > * will now have observed the write side critical section. > * Let 'em rip!. > */ >diff --git a/kernel/rcu/tasks.h b/kernel/rcu/tasks.h >index af7c19439f4e..ac3c362e08a3 100644 >--- a/kernel/rcu/tasks.h >+++ b/kernel/rcu/tasks.h >@@ -23,7 +23,7 @@ typedef void (*postgp_func_t)(struct rcu_tasks *rtp); > * Definition for a Tasks-RCU-like mechanism. > * @cbs_head: Head of callback list. > * @cbs_tail: Tail pointer for callback list. >- * @cbs_wq: Wait queue allowning new callback to get kthread's attention. >+ * @cbs_wq: Wait queue allowing new callback to get kthread's attention. > * @cbs_lock: Lock protecting callback list. > * @kthread_ptr: This flavor's grace-period/callback-invocation kthread. > * @gp_func: This flavor's grace-period-wait function. >@@ -504,7 +504,7 @@ DEFINE_RCU_TASKS(rcu_tasks, rcu_tasks_wait_gp, call_rcu_tasks, "RCU Tasks"); > * or transition to usermode execution. As such, there are no read-side > * primitives analogous to rcu_read_lock() and rcu_read_unlock() because > * this primitive is intended to determine that all tasks have passed >- * through a safe state, not so much for data-strcuture synchronization. >+ * through a safe state, not so much for data-structure synchronization. > * > * See the description of call_rcu() for more detailed information on > * memory ordering guarantees. >@@ -637,7 +637,7 @@ DEFINE_RCU_TASKS(rcu_tasks_rude, rcu_tasks_rude_wait_gp, call_rcu_tasks_rude, > * there are no read-side primitives analogous to rcu_read_lock() and > * rcu_read_unlock() because this primitive is intended to determine > * that all tasks have passed through a safe state, not so much for >- * data-strcuture synchronization. >+ * data-structure synchronization. > * The "hyphen" in the middle of the word "data structure" is required or keeping by convention or has some significance? > * See the description of call_rcu() for more detailed information on > * memory ordering guarantees. >@@ -1127,7 +1127,7 @@ static void exit_tasks_rcu_finish_trace(struct task_struct *t) > * there are no read-side primitives analogous to rcu_read_lock() and > * rcu_read_unlock() because this primitive is intended to determine > * that all tasks have passed through a safe state, not so much for >- * data-strcuture synchronization. >+ * data-structure synchronization. > * Same like above. > * See the description of call_rcu() for more detailed information on > * memory ordering guarantees. >diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c >index da6f5213fb74..ab5bd5b391e6 100644 >--- a/kernel/rcu/tree.c >+++ b/kernel/rcu/tree.c >@@ -2490,7 +2490,7 @@ int rcutree_dead_cpu(unsigned int cpu) > > /* > * Invoke any RCU callbacks that have made it to the end of their grace >- * period. Thottle as specified by rdp->blimit. >+ * period. Throttle as specified by rdp->blimit. > */ > static void rcu_do_batch(struct rcu_data *rdp) > { >@@ -4013,7 +4013,7 @@ EXPORT_SYMBOL_GPL(rcu_barrier); > /* > * Propagate ->qsinitmask bits up the rcu_node tree to account for the > * first CPU in a given leaf rcu_node structure coming online. The caller >- * must hold the corresponding leaf rcu_node ->lock with interrrupts >+ * must hold the corresponding leaf rcu_node ->lock with interrupts > * disabled. > */ > static void rcu_init_new_rnp(struct rcu_node *rnp_leaf) >diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h >index 71821d59d95c..abff7abd59ee 100644 >--- a/kernel/rcu/tree.h >+++ b/kernel/rcu/tree.h >@@ -153,7 +153,7 @@ struct rcu_data { > unsigned long gp_seq; /* Track rsp->gp_seq counter. */ > unsigned long gp_seq_needed; /* Track furthest future GP request. */ > union rcu_noqs cpu_no_qs; /* No QSes yet for this CPU. */ >- bool core_needs_qs; /* Core waits for quiesc state. */ >+ bool core_needs_qs; /* Core waits for quiescent state. */ > bool beenonline; /* CPU online at least once. */ > bool gpwrap; /* Possible ->gp_seq wrap. */ > bool exp_deferred_qs; /* This CPU awaiting a deferred QS? */ >diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h >index 2d603771c7dc..2a28f05cf467 100644 >--- a/kernel/rcu/tree_plugin.h >+++ b/kernel/rcu/tree_plugin.h >@@ -2772,7 +2772,7 @@ static void show_rcu_nocb_state(struct rcu_data *rdp) > wastimer = timer_pending(&rdp->nocb_bypass_timer); > wassleep = swait_active(&rdp->nocb_gp_wq); > if (!rdp->nocb_gp_sleep && !waslocked && !wastimer && !wassleep) >- return; /* Nothing untowards. */ >+ return; /* Nothing untoward. */ > > pr_info(" nocb GP activity on CB-only CPU!!! %c%c%c%c %c\n", > "lL"[waslocked], >diff --git a/tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/locks.h b/tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/locks.h >index cf6938d679d7..1e24827f96f1 100644 >--- a/tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/locks.h >+++ b/tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/locks.h >@@ -174,7 +174,7 @@ static inline bool spin_trylock(spinlock_t *lock) > } > > struct completion { >- /* Hopefuly this won't overflow. */ >+ /* Hopefully this won't overflow. */ > unsigned int count; > }; >