public inbox for linux-kernel@vger.kernel.org
 help / color / mirror / Atom feed
From: Frederic Weisbecker <frederic@kernel.org>
To: "Paul E. McKenney" <paulmck@kernel.org>
Cc: rcu@vger.kernel.org, linux-kernel@vger.kernel.org,
	kernel-team@fb.com, rostedt@goodmis.org,
	David Woodhouse <dwmw@amazon.co.uk>
Subject: Re: [PATCH rcu 3/9] rcu: Add mutex for rcu boost kthread spawning and affinity setting
Date: Fri, 11 Feb 2022 16:11:48 +0100	[thread overview]
Message-ID: <20220211151148.GA588079@lothringen> (raw)
In-Reply-To: <20220211145757.GA587320@lothringen>

On Fri, Feb 11, 2022 at 03:57:57PM +0100, Frederic Weisbecker wrote:
> On Fri, Feb 04, 2022 at 03:07:59PM -0800, Paul E. McKenney wrote:
> > From: David Woodhouse <dwmw@amazon.co.uk>
> > 
> > As we handle parallel CPU bringup, we will need to take care to avoid
> > spawning multiple boost threads, or race conditions when setting their
> > affinity. Spotted by Paul McKenney.
> > 
> > Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
> > Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
> 
> Reviewed-by: Frederic Weisbecker <frederic@kernel.org>
> 
> Speaking of, we have:
> 
> rcu_init()
>    for_each_online_cpu(cpu) // should be boot CPU only at this stage ?
>        rcutree_prepare_cpu(cpu)
>            rcu_spawn_one_boost_kthread(cpu)
> 
> 
> early_initcall()
>     rcu_spawn_gp_kthread()
>         rcu_spawn_boost_kthreads()
> 	    rcu_for_each_leaf_node(rnp)
> 	        rcu_rnp_online_cpus(rnp) // as above, only boot CPU at this stage.
>                     rcu_spawn_one_boost_kthread(cpu)
> 
> cpu_up()
>     rcutree_prepare_cpu(cpu)
>         rcu_spawn_one_boost_kthread(cpu)
> 
> 
> My guess is that we could remove rcu_spawn_boost_kthreads() and simplify
> rcu_init(). Something like this (untested yet):
> 
> diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
> index 86eec6a0f1a1..da8ac2b6f8cc 100644
> --- a/kernel/rcu/tree.c
> +++ b/kernel/rcu/tree.c
> @@ -4526,7 +4526,6 @@ static int __init rcu_spawn_gp_kthread(void)
>  	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
>  	wake_up_process(t);
>  	rcu_spawn_nocb_kthreads();
> -	rcu_spawn_boost_kthreads();
>  	rcu_spawn_core_kthreads();
>  	return 0;
>  }
> @@ -4813,7 +4812,7 @@ static void __init kfree_rcu_batch_init(void)
>  
>  void __init rcu_init(void)
>  {
> -	int cpu;
> +	int cpu = smp_processor_id();
>  
>  	rcu_early_boot_tests();
>  
> @@ -4833,11 +4832,10 @@ void __init rcu_init(void)
>  	 * or the scheduler are operational.
>  	 */
>  	pm_notifier(rcu_pm_notify, 0);
> -	for_each_online_cpu(cpu) {
> -		rcutree_prepare_cpu(cpu);
> -		rcu_cpu_starting(cpu);
> -		rcutree_online_cpu(cpu);
> -	}
> +
> +	rcutree_prepare_cpu(cpu);
> +	rcu_cpu_starting(cpu);
> +	rcutree_online_cpu(cpu);
>  
>  	/* Create workqueue for Tree SRCU and for expedited GPs. */
>  	rcu_gp_wq = alloc_workqueue("rcu_gp", WQ_MEM_RECLAIM, 0);
> diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
> index 6082dd23408f..90925a589774 100644
> --- a/kernel/rcu/tree_plugin.h
> +++ b/kernel/rcu/tree_plugin.h
> @@ -1226,18 +1226,6 @@ static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
>  	free_cpumask_var(cm);
>  }
>  
> -/*
> - * Spawn boost kthreads -- called as soon as the scheduler is running.
> - */
> -static void __init rcu_spawn_boost_kthreads(void)
> -{
> -	struct rcu_node *rnp;
> -
> -	rcu_for_each_leaf_node(rnp)
> -		if (rcu_rnp_online_cpus(rnp))
> -			rcu_spawn_one_boost_kthread(rnp);
> -}
> -
>  #else /* #ifdef CONFIG_RCU_BOOST */
>  
>  static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags)
> @@ -1263,10 +1251,6 @@ static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
>  {
>  }
>  
> -static void __init rcu_spawn_boost_kthreads(void)
> -{
> -}
> -
>  #endif /* #else #ifdef CONFIG_RCU_BOOST */
>  
>  /*

nocb kthread creation is similar but it depends on the gp kthread.
So we can't rely on rcu_init() -> rcu_prepare_cpu() and we must keep
the early_initcall() -> rcu_spawn_gp_kthread().

That would become (untested again):

diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index da8ac2b6f8cc..9284625a9a50 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -4525,7 +4525,7 @@ static int __init rcu_spawn_gp_kthread(void)
 	smp_store_release(&rcu_state.gp_kthread, t);  /* ^^^ */
 	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
 	wake_up_process(t);
-	rcu_spawn_nocb_kthreads();
+	rcu_spawn_cpu_nocb_kthread(smp_processor_id());
 	rcu_spawn_core_kthreads();
 	return 0;
 }
diff --git a/kernel/rcu/tree_nocb.h b/kernel/rcu/tree_nocb.h
index 636d0546a4e9..711f6eb7f7e1 100644
--- a/kernel/rcu/tree_nocb.h
+++ b/kernel/rcu/tree_nocb.h
@@ -1277,22 +1277,6 @@ static void rcu_spawn_cpu_nocb_kthread(int cpu)
 	WRITE_ONCE(rdp->nocb_gp_kthread, rdp_gp->nocb_gp_kthread);
 }
 
-/*
- * Once the scheduler is running, spawn rcuo kthreads for all online
- * no-CBs CPUs.  This assumes that the early_initcall()s happen before
- * non-boot CPUs come online -- if this changes, we will need to add
- * some mutual exclusion.
- */
-static void __init rcu_spawn_nocb_kthreads(void)
-{
-	int cpu;
-
-	if (rcu_nocb_is_setup) {
-		for_each_online_cpu(cpu)
-			rcu_spawn_cpu_nocb_kthread(cpu);
-	}
-}
-
 /* How many CB CPU IDs per GP kthread?  Default of -1 for sqrt(nr_cpu_ids). */
 static int rcu_nocb_gp_stride = -1;
 module_param(rcu_nocb_gp_stride, int, 0444);
@@ -1549,10 +1533,6 @@ static void rcu_spawn_cpu_nocb_kthread(int cpu)
 {
 }
 
-static void __init rcu_spawn_nocb_kthreads(void)
-{
-}
-
 static void show_rcu_nocb_state(struct rcu_data *rdp)
 {
 }

  reply	other threads:[~2022-02-11 15:11 UTC|newest]

Thread overview: 17+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-02-04 23:07 [PATCH rcu 0/9] Miscellaneous fixes for v5.18 Paul E. McKenney
2022-02-04 23:07 ` [PATCH rcu 1/9] MAINTAINERS: Add Frederic and Neeraj to their RCU files Paul E. McKenney
2022-02-11 15:15   ` Frederic Weisbecker
2022-02-11 16:40     ` Paul E. McKenney
2022-02-04 23:07 ` [PATCH rcu 2/9] rcu: Fix description of kvfree_rcu() Paul E. McKenney
2022-02-04 23:07 ` [PATCH rcu 3/9] rcu: Add mutex for rcu boost kthread spawning and affinity setting Paul E. McKenney
2022-02-11 14:57   ` Frederic Weisbecker
2022-02-11 15:11     ` Frederic Weisbecker [this message]
2022-02-11 15:42       ` Paul E. McKenney
2022-02-11 15:43         ` Frederic Weisbecker
2022-02-04 23:08 ` [PATCH rcu 4/9] rcu: Inline __call_rcu() into call_rcu() Paul E. McKenney
2022-02-04 23:08 ` [PATCH rcu 5/9] kasan: Record work creation stack trace with interrupts enabled Paul E. McKenney
2022-02-04 23:08 ` [PATCH rcu 6/9] rcu: Mark writes to the rcu_segcblist structure's ->flags field Paul E. McKenney
2022-02-04 23:08 ` [PATCH rcu 7/9] rcu: Uninline multi-use function: finish_rcuwait() Paul E. McKenney
2022-02-04 23:08 ` [PATCH rcu 8/9] rcu: Remove __read_mostly annotations from rcu_scheduler_active externs Paul E. McKenney
2022-02-04 23:08 ` [PATCH rcu 9/9] rcu: Replace cpumask_weight with cpumask_empty where appropriate Paul E. McKenney
2022-02-11 15:17   ` Frederic Weisbecker

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20220211151148.GA588079@lothringen \
    --to=frederic@kernel.org \
    --cc=dwmw@amazon.co.uk \
    --cc=kernel-team@fb.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=paulmck@kernel.org \
    --cc=rcu@vger.kernel.org \
    --cc=rostedt@goodmis.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox