linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Frederic Weisbecker <frederic@kernel.org>
To: LKML <linux-kernel@vger.kernel.org>
Cc: Frederic Weisbecker <frederic@kernel.org>,
	Marco Crivellari <marco.crivellari@suse.com>,
	Michal Hocko <mhocko@suse.com>,
	Peter Zijlstra <peterz@infradead.org>, Tejun Heo <tj@kernel.org>,
	Thomas Gleixner <tglx@linutronix.de>,
	Vlastimil Babka <vbabka@suse.cz>,
	Waiman Long <longman@redhat.com>
Subject: [PATCH 21/27] kthread: Refine naming of affinity related fields
Date: Fri, 20 Jun 2025 17:23:02 +0200	[thread overview]
Message-ID: <20250620152308.27492-22-frederic@kernel.org> (raw)
In-Reply-To: <20250620152308.27492-1-frederic@kernel.org>

The kthreads preferred affinity related fields use "hotplug" as the base
of their naming because the affinity management was initially deemed to
deal with CPU hotplug.

The scope of this role is going to broaden now and also deal with
cpuset isolated partition updates.

Switch the naming accordingly.

Signed-off-by: Frederic Weisbecker <frederic@kernel.org>
---
 kernel/kthread.c | 38 +++++++++++++++++++-------------------
 1 file changed, 19 insertions(+), 19 deletions(-)

diff --git a/kernel/kthread.c b/kernel/kthread.c
index 85fc068f0083..24008dd9f3dc 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -35,8 +35,8 @@ static DEFINE_SPINLOCK(kthread_create_lock);
 static LIST_HEAD(kthread_create_list);
 struct task_struct *kthreadd_task;
 
-static LIST_HEAD(kthreads_hotplug);
-static DEFINE_MUTEX(kthreads_hotplug_lock);
+static LIST_HEAD(kthread_affinity_list);
+static DEFINE_MUTEX(kthread_affinity_lock);
 
 struct kthread_create_info
 {
@@ -69,7 +69,7 @@ struct kthread {
 	/* To store the full name if task comm is truncated. */
 	char *full_name;
 	struct task_struct *task;
-	struct list_head hotplug_node;
+	struct list_head affinity_node;
 	struct cpumask *preferred_affinity;
 };
 
@@ -129,7 +129,7 @@ bool set_kthread_struct(struct task_struct *p)
 
 	init_completion(&kthread->exited);
 	init_completion(&kthread->parked);
-	INIT_LIST_HEAD(&kthread->hotplug_node);
+	INIT_LIST_HEAD(&kthread->affinity_node);
 	p->vfork_done = &kthread->exited;
 
 	kthread->task = p;
@@ -324,10 +324,10 @@ void __noreturn kthread_exit(long result)
 {
 	struct kthread *kthread = to_kthread(current);
 	kthread->result = result;
-	if (!list_empty(&kthread->hotplug_node)) {
-		mutex_lock(&kthreads_hotplug_lock);
-		list_del(&kthread->hotplug_node);
-		mutex_unlock(&kthreads_hotplug_lock);
+	if (!list_empty(&kthread->affinity_node)) {
+		mutex_lock(&kthread_affinity_lock);
+		list_del(&kthread->affinity_node);
+		mutex_unlock(&kthread_affinity_lock);
 
 		if (kthread->preferred_affinity) {
 			kfree(kthread->preferred_affinity);
@@ -391,9 +391,9 @@ static void kthread_affine_node(void)
 			return;
 		}
 
-		mutex_lock(&kthreads_hotplug_lock);
-		WARN_ON_ONCE(!list_empty(&kthread->hotplug_node));
-		list_add_tail(&kthread->hotplug_node, &kthreads_hotplug);
+		mutex_lock(&kthread_affinity_lock);
+		WARN_ON_ONCE(!list_empty(&kthread->affinity_node));
+		list_add_tail(&kthread->affinity_node, &kthread_affinity_list);
 		/*
 		 * The node cpumask is racy when read from kthread() but:
 		 * - a racing CPU going down will either fail on the subsequent
@@ -403,7 +403,7 @@ static void kthread_affine_node(void)
 		 */
 		kthread_fetch_affinity(kthread, affinity);
 		set_cpus_allowed_ptr(current, affinity);
-		mutex_unlock(&kthreads_hotplug_lock);
+		mutex_unlock(&kthread_affinity_lock);
 
 		free_cpumask_var(affinity);
 	}
@@ -877,10 +877,10 @@ int kthread_affine_preferred(struct task_struct *p, const struct cpumask *mask)
 		goto out;
 	}
 
-	mutex_lock(&kthreads_hotplug_lock);
+	mutex_lock(&kthread_affinity_lock);
 	cpumask_copy(kthread->preferred_affinity, mask);
-	WARN_ON_ONCE(!list_empty(&kthread->hotplug_node));
-	list_add_tail(&kthread->hotplug_node, &kthreads_hotplug);
+	WARN_ON_ONCE(!list_empty(&kthread->affinity_node));
+	list_add_tail(&kthread->affinity_node, &kthread_affinity_list);
 	kthread_fetch_affinity(kthread, affinity);
 
 	/* It's safe because the task is inactive. */
@@ -888,7 +888,7 @@ int kthread_affine_preferred(struct task_struct *p, const struct cpumask *mask)
 	do_set_cpus_allowed(p, affinity);
 	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
 
-	mutex_unlock(&kthreads_hotplug_lock);
+	mutex_unlock(&kthread_affinity_lock);
 out:
 	free_cpumask_var(affinity);
 
@@ -908,9 +908,9 @@ static int kthreads_online_cpu(unsigned int cpu)
 	struct kthread *k;
 	int ret;
 
-	guard(mutex)(&kthreads_hotplug_lock);
+	guard(mutex)(&kthread_affinity_lock);
 
-	if (list_empty(&kthreads_hotplug))
+	if (list_empty(&kthread_affinity_list))
 		return 0;
 
 	if (!zalloc_cpumask_var(&affinity, GFP_KERNEL))
@@ -918,7 +918,7 @@ static int kthreads_online_cpu(unsigned int cpu)
 
 	ret = 0;
 
-	list_for_each_entry(k, &kthreads_hotplug, hotplug_node) {
+	list_for_each_entry(k, &kthread_affinity_list, affinity_node) {
 		if (WARN_ON_ONCE((k->task->flags & PF_NO_SETAFFINITY) ||
 				 kthread_is_per_cpu(k->task))) {
 			ret = -EINVAL;
-- 
2.48.1


  parent reply	other threads:[~2025-06-20 15:24 UTC|newest]

Thread overview: 51+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2025-06-20 15:22 [PATCH 00/27] cpuset/isolation: Honour kthreads preferred affinity Frederic Weisbecker
2025-06-20 15:22 ` [PATCH 01/27] sched/isolation: Remove housekeeping static key Frederic Weisbecker
2025-06-20 15:22 ` [PATCH 02/27] sched/isolation: Introduce housekeeping per-cpu rwsem Frederic Weisbecker
2025-06-23 17:34   ` Waiman Long
2025-06-23 17:39     ` Tejun Heo
2025-06-23 17:57       ` Waiman Long
2025-06-23 18:03         ` Tejun Heo
2025-06-25 14:30           ` Frederic Weisbecker
2025-06-25 12:18     ` Phil Auld
2025-06-25 14:34       ` Frederic Weisbecker
2025-06-25 15:50         ` Phil Auld
2025-06-27  0:11           ` Waiman Long
2025-06-27  0:48             ` Phil Auld
2025-06-30 12:59               ` Thomas Gleixner
2025-06-25 14:18     ` Frederic Weisbecker
2025-06-26 23:58       ` Waiman Long
2025-06-20 15:22 ` [PATCH 03/27] PCI: Protect against concurrent change of housekeeping cpumask Frederic Weisbecker
2025-06-20 16:17   ` Bjorn Helgaas
2025-06-26 14:51     ` Frederic Weisbecker
2025-06-20 15:22 ` [PATCH 04/27] cpu: Protect against concurrent isolated cpuset change Frederic Weisbecker
2025-06-20 15:22 ` [PATCH 05/27] memcg: Prepare to protect " Frederic Weisbecker
2025-06-20 19:19   ` Shakeel Butt
2025-06-20 15:22 ` [PATCH 06/27] mm: vmstat: " Frederic Weisbecker
2025-06-20 15:22 ` [PATCH 07/27] sched/isolation: Save boot defined domain flags Frederic Weisbecker
2025-06-20 15:22 ` [PATCH 08/27] cpuset: Convert boot_hk_cpus to use HK_TYPE_DOMAIN_BOOT Frederic Weisbecker
2025-06-20 15:22 ` [PATCH 09/27] driver core: cpu: Convert /sys/devices/system/cpu/isolated " Frederic Weisbecker
2025-06-20 15:22 ` [PATCH 10/27] net: Keep ignoring isolated cpuset change Frederic Weisbecker
2025-06-20 15:22 ` [PATCH 11/27] block: Protect against concurrent " Frederic Weisbecker
2025-06-20 15:59   ` Bart Van Assche
2025-06-26 15:03     ` Frederic Weisbecker
2025-06-23  5:46   ` Christoph Hellwig
2025-06-26 15:33     ` Frederic Weisbecker
2025-06-20 15:22 ` [PATCH 12/27] cpu: Provide lockdep check for CPU hotplug lock write-held Frederic Weisbecker
2025-06-20 15:22 ` [PATCH 13/27] cpuset: Provide lockdep check for cpuset lock held Frederic Weisbecker
2025-06-20 15:22 ` [PATCH 14/27] sched/isolation: Convert housekeeping cpumasks to rcu pointers Frederic Weisbecker
2025-06-20 15:22 ` [PATCH 15/27] cpuset: Update HK_TYPE_DOMAIN cpumask from cpuset Frederic Weisbecker
2025-06-20 15:22 ` [PATCH 16/27] sched/isolation: Flush memcg workqueues on cpuset isolated partition change Frederic Weisbecker
2025-06-20 19:30   ` Shakeel Butt
2025-06-20 15:22 ` [PATCH 17/27] sched/isolation: Flush vmstat " Frederic Weisbecker
2025-06-20 15:22 ` [PATCH 18/27] cpuset: Propagate cpuset isolation update to workqueue through housekeeping Frederic Weisbecker
2025-06-20 15:23 ` [PATCH 19/27] cpuset: Remove cpuset_cpu_is_isolated() Frederic Weisbecker
2025-06-20 15:23 ` [PATCH 20/27] sched/isolation: Remove HK_TYPE_TICK test from cpu_is_isolated() Frederic Weisbecker
2025-06-20 15:23 ` Frederic Weisbecker [this message]
2025-06-20 15:23 ` [PATCH 22/27] kthread: Include unbound kthreads in the managed affinity list Frederic Weisbecker
2025-06-20 15:23 ` [PATCH 23/27] kthread: Include kthreadd to " Frederic Weisbecker
2025-06-20 15:23 ` [PATCH 24/27] kthread: Rely on HK_TYPE_DOMAIN for preferred affinity management Frederic Weisbecker
2025-06-20 15:23 ` [PATCH 25/27] sched: Switch the fallback task allowed cpumask to HK_TYPE_DOMAIN Frederic Weisbecker
2025-06-20 15:23 ` [PATCH 26/27] kthread: Honour kthreads preferred affinity after cpuset changes Frederic Weisbecker
2025-06-20 15:23 ` [PATCH 27/27] kthread: Comment on the purpose and placement of kthread_affine_node() call Frederic Weisbecker
2025-06-20 16:08 ` [PATCH 00/27] cpuset/isolation: Honour kthreads preferred affinity Bjorn Helgaas
2025-06-26 14:57   ` Frederic Weisbecker

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20250620152308.27492-22-frederic@kernel.org \
    --to=frederic@kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=longman@redhat.com \
    --cc=marco.crivellari@suse.com \
    --cc=mhocko@suse.com \
    --cc=peterz@infradead.org \
    --cc=tglx@linutronix.de \
    --cc=tj@kernel.org \
    --cc=vbabka@suse.cz \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).