From mboxrd@z Thu Jan 1 00:00:00 1970 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 56E892BD5A7; Wed, 5 Nov 2025 21:06:48 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal:i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1762376808; cv=none; b=OA7lLIs28oC0OZrW4E04+e1Ye/QIgtvJoIewb5I0ZD9HpemyX4S1sx+ghv9Qwo0i0rpl+WBeH29X1BPIFAHXUHhB50HwaqKkyAlfOBA/h2KOak7RkNDImTLUtODY8GyLSZx7o4mbPGjNtcjoaETTtL7+eHGA55D27oufT0letYo= ARC-Message-Signature:i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1762376808; c=relaxed/simple; bh=msWuzDTcgxqxV1VfqrsWjNwgWbBhxXt72Fo9MOZeVHU=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=mFgrfkr64uPhDzkcKcK7P8e3/l9ercKBFMFysyGw81ii3LebqfeT9qCmSEyxO7/5Ox4yFyBPFVB0eMPwoxTN9pBr5uuSefvTltDwJfi6g1S9V8HHGQeb9Q1CU75ZUEwvDL3viAiOFx5Rom6vGn0ImEGXOTa34ugqYvkMBqdgY2M= ARC-Authentication-Results:i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=OLduUcG/; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="OLduUcG/" Received: by smtp.kernel.org (Postfix) with ESMTPSA id 9D176C116B1; Wed, 5 Nov 2025 21:06:40 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1762376808; bh=msWuzDTcgxqxV1VfqrsWjNwgWbBhxXt72Fo9MOZeVHU=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=OLduUcG/WX3sS+65TZ/jB9oleAmH7o+Ij6zm6G7vJRQPWAWn/kHN6E9NbiVBTETTK /tSXg4kSg8acavMwtkMnD5cCSx1kzBA38/REmggw6d/dhe6YWgbA/cArl5URBF/xtS g/5opQo0PL9RHjMmGs4LFg9IMBxDNjJsHV8PMg+doyGddTGf1j7R9RdBGuAap2AqwO +X//Jgt6py+or3LRMvYCSO3DJr68NLAU5ynEv1K/hyNIgEWMRTuGBrgurdbL8p52vW id04tI9qFZLhzJmCp7mudgZziC2RzYyNovODMsflC8Jdl6yqEsILrkfkUBWUyRVtKj ZAphLjB/PodZQ== From: Frederic Weisbecker To: LKML Cc: Frederic Weisbecker , =?UTF-8?q?Michal=20Koutn=C3=BD?= , Andrew Morton , Bjorn Helgaas , Catalin Marinas , Danilo Krummrich , "David S . Miller" , Eric Dumazet , Gabriele Monaco , Greg Kroah-Hartman , Ingo Molnar , Jakub Kicinski , Jens Axboe , Johannes Weiner , Lai Jiangshan , Marco Crivellari , Michal Hocko , Muchun Song , Paolo Abeni , Peter Zijlstra , Phil Auld , "Rafael J . Wysocki" , Roman Gushchin , Shakeel Butt , Simon Horman , Tejun Heo , Thomas Gleixner , Vlastimil Babka , Waiman Long , Will Deacon , cgroups@vger.kernel.org, linux-arm-kernel@lists.infradead.org, linux-block@vger.kernel.org, linux-mm@kvack.org, linux-pci@vger.kernel.org, netdev@vger.kernel.org Subject: [PATCH 21/31] kthread: Refine naming of affinity related fields Date: Wed, 5 Nov 2025 22:03:37 +0100 Message-ID: <20251105210348.35256-22-frederic@kernel.org> X-Mailer: git-send-email 2.51.0 In-Reply-To: <20251105210348.35256-1-frederic@kernel.org> References: <20251105210348.35256-1-frederic@kernel.org> Precedence: bulk X-Mailing-List: netdev@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: 8bit The kthreads preferred affinity related fields use "hotplug" as the base of their naming because the affinity management was initially deemed to deal with CPU hotplug. The scope of this role is going to broaden now and also deal with cpuset isolated partition updates. Switch the naming accordingly. Signed-off-by: Frederic Weisbecker --- kernel/kthread.c | 38 +++++++++++++++++++------------------- 1 file changed, 19 insertions(+), 19 deletions(-) diff --git a/kernel/kthread.c b/kernel/kthread.c index 31b072e8d427..c4dd967e9e9c 100644 --- a/kernel/kthread.c +++ b/kernel/kthread.c @@ -35,8 +35,8 @@ static DEFINE_SPINLOCK(kthread_create_lock); static LIST_HEAD(kthread_create_list); struct task_struct *kthreadd_task; -static LIST_HEAD(kthreads_hotplug); -static DEFINE_MUTEX(kthreads_hotplug_lock); +static LIST_HEAD(kthread_affinity_list); +static DEFINE_MUTEX(kthread_affinity_lock); struct kthread_create_info { @@ -69,7 +69,7 @@ struct kthread { /* To store the full name if task comm is truncated. */ char *full_name; struct task_struct *task; - struct list_head hotplug_node; + struct list_head affinity_node; struct cpumask *preferred_affinity; }; @@ -128,7 +128,7 @@ bool set_kthread_struct(struct task_struct *p) init_completion(&kthread->exited); init_completion(&kthread->parked); - INIT_LIST_HEAD(&kthread->hotplug_node); + INIT_LIST_HEAD(&kthread->affinity_node); p->vfork_done = &kthread->exited; kthread->task = p; @@ -323,10 +323,10 @@ void __noreturn kthread_exit(long result) { struct kthread *kthread = to_kthread(current); kthread->result = result; - if (!list_empty(&kthread->hotplug_node)) { - mutex_lock(&kthreads_hotplug_lock); - list_del(&kthread->hotplug_node); - mutex_unlock(&kthreads_hotplug_lock); + if (!list_empty(&kthread->affinity_node)) { + mutex_lock(&kthread_affinity_lock); + list_del(&kthread->affinity_node); + mutex_unlock(&kthread_affinity_lock); if (kthread->preferred_affinity) { kfree(kthread->preferred_affinity); @@ -390,9 +390,9 @@ static void kthread_affine_node(void) return; } - mutex_lock(&kthreads_hotplug_lock); - WARN_ON_ONCE(!list_empty(&kthread->hotplug_node)); - list_add_tail(&kthread->hotplug_node, &kthreads_hotplug); + mutex_lock(&kthread_affinity_lock); + WARN_ON_ONCE(!list_empty(&kthread->affinity_node)); + list_add_tail(&kthread->affinity_node, &kthread_affinity_list); /* * The node cpumask is racy when read from kthread() but: * - a racing CPU going down will either fail on the subsequent @@ -402,7 +402,7 @@ static void kthread_affine_node(void) */ kthread_fetch_affinity(kthread, affinity); set_cpus_allowed_ptr(current, affinity); - mutex_unlock(&kthreads_hotplug_lock); + mutex_unlock(&kthread_affinity_lock); free_cpumask_var(affinity); } @@ -876,10 +876,10 @@ int kthread_affine_preferred(struct task_struct *p, const struct cpumask *mask) goto out; } - mutex_lock(&kthreads_hotplug_lock); + mutex_lock(&kthread_affinity_lock); cpumask_copy(kthread->preferred_affinity, mask); - WARN_ON_ONCE(!list_empty(&kthread->hotplug_node)); - list_add_tail(&kthread->hotplug_node, &kthreads_hotplug); + WARN_ON_ONCE(!list_empty(&kthread->affinity_node)); + list_add_tail(&kthread->affinity_node, &kthread_affinity_list); kthread_fetch_affinity(kthread, affinity); /* It's safe because the task is inactive. */ @@ -887,7 +887,7 @@ int kthread_affine_preferred(struct task_struct *p, const struct cpumask *mask) do_set_cpus_allowed(p, affinity); raw_spin_unlock_irqrestore(&p->pi_lock, flags); - mutex_unlock(&kthreads_hotplug_lock); + mutex_unlock(&kthread_affinity_lock); out: free_cpumask_var(affinity); @@ -908,9 +908,9 @@ static int kthreads_online_cpu(unsigned int cpu) struct kthread *k; int ret; - guard(mutex)(&kthreads_hotplug_lock); + guard(mutex)(&kthread_affinity_lock); - if (list_empty(&kthreads_hotplug)) + if (list_empty(&kthread_affinity_list)) return 0; if (!zalloc_cpumask_var(&affinity, GFP_KERNEL)) @@ -918,7 +918,7 @@ static int kthreads_online_cpu(unsigned int cpu) ret = 0; - list_for_each_entry(k, &kthreads_hotplug, hotplug_node) { + list_for_each_entry(k, &kthread_affinity_list, affinity_node) { if (WARN_ON_ONCE((k->task->flags & PF_NO_SETAFFINITY) || kthread_is_per_cpu(k->task))) { ret = -EINVAL; -- 2.51.0