From: Frederic Weisbecker <frederic@kernel.org>
To: LKML <linux-kernel@vger.kernel.org>
Cc: Frederic Weisbecker <frederic@kernel.org>,
Boqun Feng <boqun.feng@gmail.com>,
Joel Fernandes <joel@joelfernandes.org>,
Neeraj Upadhyay <neeraj.upadhyay@amd.com>,
"Paul E . McKenney" <paulmck@kernel.org>,
Uladzislau Rezki <urezki@gmail.com>,
Zqiang <qiang.zhang1211@gmail.com>, rcu <rcu@vger.kernel.org>,
Anna-Maria Behnsen <anna-maria.behnsen@linutronix.de>,
Thomas Gleixner <tglx@linutronix.de>
Subject: [PATCH 7/8] rcu/exp: Handle parallel exp gp kworkers affinity
Date: Fri, 8 Dec 2023 23:05:44 +0100 [thread overview]
Message-ID: <20231208220545.7452-8-frederic@kernel.org> (raw)
In-Reply-To: <20231208220545.7452-1-frederic@kernel.org>
Affine the parallel expedited gp kworkers to their respective RCU node
in order to make them close to the cache their are playing with.
This reuses the boost kthreads machinery that probe into CPU hotplug
operations such that the kthreads become/stay affine to their respective
node as soon/long as they contain online CPUs. Otherwise and if the
current CPU going down was the last online on the leaf node, the related
kthread is affine to the housekeeping CPUs.
In the long run, this affinity VS CPU hotplug operation game should
probably be implemented at the generic kthread level.
Signed-off-by: Frederic Weisbecker <frederic@kernel.org>
---
kernel/rcu/tree.c | 79 +++++++++++++++++++++++++++++++++++++---
kernel/rcu/tree_plugin.h | 42 ++-------------------
2 files changed, 78 insertions(+), 43 deletions(-)
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 060d418c2b44..e75ddf42e9b1 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -145,7 +145,7 @@ static int rcu_scheduler_fully_active __read_mostly;
static void rcu_report_qs_rnp(unsigned long mask, struct rcu_node *rnp,
unsigned long gps, unsigned long flags);
-static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu);
+static struct task_struct *rcu_boost_task(struct rcu_node *rnp);
static void invoke_rcu_core(void);
static void rcu_report_exp_rdp(struct rcu_data *rdp);
static void sync_sched_exp_online_cleanup(int cpu);
@@ -4390,6 +4390,16 @@ static void rcu_spawn_exp_par_gp_kworker(struct rcu_node *rnp)
sched_setscheduler_nocheck(kworker->task, SCHED_FIFO, ¶m);
}
+static struct task_struct *rcu_exp_par_gp_task(struct rcu_node *rnp)
+{
+ struct kthread_worker *kworker = READ_ONCE(rnp->exp_kworker);
+
+ if (!kworker)
+ return NULL;
+
+ return kworker->task;
+}
+
static void __init rcu_start_exp_gp_kworker(void)
{
const char *name = "rcu_exp_gp_kthread_worker";
@@ -4414,6 +4424,11 @@ static void rcu_spawn_exp_par_gp_kworker(struct rcu_node *rnp)
{
}
+static struct task_struct *rcu_exp_par_gp_task(struct rcu_node *rnp)
+{
+ return NULL;
+}
+
static void __init rcu_start_exp_gp_kworker(void)
{
}
@@ -4492,13 +4507,67 @@ int rcutree_prepare_cpu(unsigned int cpu)
}
/*
- * Update RCU priority boot kthread affinity for CPU-hotplug changes.
+ * Update kthreads affinity during CPU-hotplug changes.
+ *
+ * Set the per-rcu_node kthread's affinity to cover all CPUs that are
+ * served by the rcu_node in question. The CPU hotplug lock is still
+ * held, so the value of rnp->qsmaskinit will be stable.
+ *
+ * We don't include outgoingcpu in the affinity set, use -1 if there is
+ * no outgoing CPU. If there are no CPUs left in the affinity set,
+ * this function allows the kthread to execute on any CPU.
+ *
+ * Any future concurrent calls are serialized via ->kthread_mutex.
*/
-static void rcutree_affinity_setting(unsigned int cpu, int outgoing)
+static void rcutree_affinity_setting(unsigned int cpu, int outgoingcpu)
{
- struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
+ cpumask_var_t cm;
+ unsigned long mask;
+ struct rcu_data *rdp;
+ struct rcu_node *rnp;
+ struct task_struct *task_boost, *task_exp;
- rcu_boost_kthread_setaffinity(rdp->mynode, outgoing);
+ if (!IS_ENABLED(CONFIG_RCU_EXP_KTHREAD) && !IS_ENABLED(CONFIG_RCU_BOOST))
+ return;
+
+ rdp = per_cpu_ptr(&rcu_data, cpu);
+ rnp = rdp->mynode;
+
+ task_boost = rcu_boost_task(rnp);
+ task_exp = rcu_exp_par_gp_task(rnp);
+
+ /*
+ * If CPU is the boot one, those tasks are created later from early
+ * initcall since kthreadd must be created first.
+ */
+ if (!task_boost && !task_exp)
+ return;
+
+ if (!zalloc_cpumask_var(&cm, GFP_KERNEL))
+ return;
+
+ mutex_lock(&rnp->kthread_mutex);
+ mask = rcu_rnp_online_cpus(rnp);
+ for_each_leaf_node_possible_cpu(rnp, cpu)
+ if ((mask & leaf_node_cpu_bit(rnp, cpu)) &&
+ cpu != outgoingcpu)
+ cpumask_set_cpu(cpu, cm);
+ cpumask_and(cm, cm, housekeeping_cpumask(HK_TYPE_RCU));
+ if (cpumask_empty(cm)) {
+ cpumask_copy(cm, housekeeping_cpumask(HK_TYPE_RCU));
+ if (outgoingcpu >= 0)
+ cpumask_clear_cpu(outgoingcpu, cm);
+ }
+
+ if (task_exp)
+ set_cpus_allowed_ptr(task_exp, cm);
+
+ if (task_boost)
+ set_cpus_allowed_ptr(task_boost, cm);
+
+ mutex_unlock(&rnp->kthread_mutex);
+
+ free_cpumask_var(cm);
}
/*
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
index 09bdd36ca9ff..08246cca663f 100644
--- a/kernel/rcu/tree_plugin.h
+++ b/kernel/rcu/tree_plugin.h
@@ -1211,43 +1211,9 @@ static void rcu_spawn_one_boost_kthread(struct rcu_node *rnp)
wake_up_process(t); /* get to TASK_INTERRUPTIBLE quickly. */
}
-/*
- * Set the per-rcu_node kthread's affinity to cover all CPUs that are
- * served by the rcu_node in question. The CPU hotplug lock is still
- * held, so the value of rnp->qsmaskinit will be stable.
- *
- * We don't include outgoingcpu in the affinity set, use -1 if there is
- * no outgoing CPU. If there are no CPUs left in the affinity set,
- * this function allows the kthread to execute on any CPU.
- *
- * Any future concurrent calls are serialized via ->kthread_mutex.
- */
-static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
+static struct task_struct *rcu_boost_task(struct rcu_node *rnp)
{
- struct task_struct *t = rnp->boost_kthread_task;
- unsigned long mask;
- cpumask_var_t cm;
- int cpu;
-
- if (!t)
- return;
- if (!zalloc_cpumask_var(&cm, GFP_KERNEL))
- return;
- mutex_lock(&rnp->kthread_mutex);
- mask = rcu_rnp_online_cpus(rnp);
- for_each_leaf_node_possible_cpu(rnp, cpu)
- if ((mask & leaf_node_cpu_bit(rnp, cpu)) &&
- cpu != outgoingcpu)
- cpumask_set_cpu(cpu, cm);
- cpumask_and(cm, cm, housekeeping_cpumask(HK_TYPE_RCU));
- if (cpumask_empty(cm)) {
- cpumask_copy(cm, housekeeping_cpumask(HK_TYPE_RCU));
- if (outgoingcpu >= 0)
- cpumask_clear_cpu(outgoingcpu, cm);
- }
- set_cpus_allowed_ptr(t, cm);
- mutex_unlock(&rnp->kthread_mutex);
- free_cpumask_var(cm);
+ return READ_ONCE(rnp->boost_kthread_task);
}
#else /* #ifdef CONFIG_RCU_BOOST */
@@ -1266,10 +1232,10 @@ static void rcu_spawn_one_boost_kthread(struct rcu_node *rnp)
{
}
-static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
+static struct task_struct * rcu_boost_task(struct rcu_node *rnp)
{
+ return NULL;
}
-
#endif /* #else #ifdef CONFIG_RCU_BOOST */
/*
--
2.42.1
next prev parent reply other threads:[~2023-12-08 22:06 UTC|newest]
Thread overview: 19+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-12-08 22:05 [PATCH 0/8] rcu: Fix expedited GP deadlock (and cleanup some nocb stuff) Frederic Weisbecker
2023-12-08 22:05 ` [PATCH 1/8] rcu/nocb: Make IRQs disablement symetric Frederic Weisbecker
2023-12-13 13:01 ` Neeraj upadhyay
2023-12-08 22:05 ` [PATCH 2/8] rcu/nocb: Re-arrange call_rcu() NOCB specific code Frederic Weisbecker
2023-12-08 22:05 ` [PATCH 3/8] rcu/exp: Fix RCU expedited parallel grace period kworker allocation failure recovery Frederic Weisbecker
2023-12-12 16:04 ` Kalesh Singh
2023-12-12 16:10 ` Kalesh Singh
2023-12-08 22:05 ` [PATCH 4/8] rcu/exp: Handle RCU expedited grace period kworker allocation failure Frederic Weisbecker
2023-12-12 16:14 ` Kalesh Singh
2023-12-08 22:05 ` [PATCH 5/8] rcu: s/boost_kthread_mutex/kthread_mutex Frederic Weisbecker
2023-12-08 22:05 ` [PATCH 6/8] rcu/exp: Make parallel exp gp kworker per rcu node Frederic Weisbecker
2023-12-08 22:05 ` Frederic Weisbecker [this message]
2023-12-08 22:05 ` [PATCH 8/8] rcu/exp: Remove rcu_par_gp_wq Frederic Weisbecker
2023-12-11 16:38 ` [PATCH 0/8] rcu: Fix expedited GP deadlock (and cleanup some nocb stuff) Paul E. McKenney
2023-12-11 20:04 ` Frederic Weisbecker
2023-12-11 21:39 ` Paul E. McKenney
2023-12-12 13:34 ` Frederic Weisbecker
-- strict thread matches above, loose matches on Subject: below --
2023-12-19 14:08 [PATCH 0/8 v2] " Frederic Weisbecker
2023-12-19 14:08 ` [PATCH 7/8] rcu/exp: Handle parallel exp gp kworkers affinity Frederic Weisbecker
2024-01-29 23:23 [PATCH 0/8] RCU exp updates for v6.9 Boqun Feng
2024-01-29 23:23 ` [PATCH 7/8] rcu/exp: Handle parallel exp gp kworkers affinity Boqun Feng
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20231208220545.7452-8-frederic@kernel.org \
--to=frederic@kernel.org \
--cc=anna-maria.behnsen@linutronix.de \
--cc=boqun.feng@gmail.com \
--cc=joel@joelfernandes.org \
--cc=linux-kernel@vger.kernel.org \
--cc=neeraj.upadhyay@amd.com \
--cc=paulmck@kernel.org \
--cc=qiang.zhang1211@gmail.com \
--cc=rcu@vger.kernel.org \
--cc=tglx@linutronix.de \
--cc=urezki@gmail.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox