* [PATCH] workqueue: drop apply_wqattrs_lock()/unlock() wrappers
@ 2026-05-11 12:14 Breno Leitao
2026-05-11 19:04 ` Tejun Heo
0 siblings, 1 reply; 2+ messages in thread
From: Breno Leitao @ 2026-05-11 12:14 UTC (permalink / raw)
To: Tejun Heo, Lai Jiangshan; +Cc: linux-kernel, kernel-team, Breno Leitao
The apply_wqattrs_lock()/unlock() helpers were introduced by
commit a0111cf6710b ("workqueue: separate out and refactor the locking
of applying attrs") to encapsulate the get_online_cpus() (later
cpus_read_lock()) + mutex_lock(&wq_pool_mutex) acquire pair that was
duplicated across the apply-attrs paths.
Since commit 19af45757383 ("workqueue: Remove cpus_read_lock() from
apply_wqattrs_lock()") removed the cpus_read_lock() (pwq creation and
installation now operate on wq_online_cpumask, so CPU hotplug no longer
needs to be excluded), the wrappers have been one-line forwarders to
mutex_lock(&wq_pool_mutex)/mutex_unlock(&wq_pool_mutex).
They no longer encode any non-trivial locking rule and obscure the fact
that callers just take the existing wq_pool_mutex. This align with the
"unnecessary" helpers that got discussed in [1]
Inline the eight call sites and remove the wrappers. No functional
change.
Link: https://lore.kernel.org/all/afs_44-6ToJJVZTn@gmail.com/ [1]
Signed-off-by: Breno Leitao <leitao@debian.org>
---
kernel/workqueue.c | 38 ++++++++++++++------------------------
1 file changed, 14 insertions(+), 24 deletions(-)
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 15d98f4b4179d..61a34be6797e0 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -5316,16 +5316,6 @@ static struct pool_workqueue *alloc_unbound_pwq(struct workqueue_struct *wq,
return pwq;
}
-static void apply_wqattrs_lock(void)
-{
- mutex_lock(&wq_pool_mutex);
-}
-
-static void apply_wqattrs_unlock(void)
-{
- mutex_unlock(&wq_pool_mutex);
-}
-
/**
* wq_calc_pod_cpumask - calculate a wq_attrs' cpumask for a pod
* @attrs: the wq_attrs of the default pwq of the target workqueue
@@ -5881,7 +5871,7 @@ static struct workqueue_struct *__alloc_workqueue(const char *fmt,
* wq_pool_mutex protects the workqueues list, allocations of PWQs,
* and the global freeze state.
*/
- apply_wqattrs_lock();
+ mutex_lock(&wq_pool_mutex);
if (alloc_and_link_pwqs(wq) < 0)
goto err_unlock_free_node_nr_active;
@@ -5895,7 +5885,7 @@ static struct workqueue_struct *__alloc_workqueue(const char *fmt,
if (wq_online && init_rescuer(wq) < 0)
goto err_unlock_destroy;
- apply_wqattrs_unlock();
+ mutex_unlock(&wq_pool_mutex);
if ((wq->flags & WQ_SYSFS) && workqueue_sysfs_register(wq))
goto err_destroy;
@@ -5903,7 +5893,7 @@ static struct workqueue_struct *__alloc_workqueue(const char *fmt,
return wq;
err_unlock_free_node_nr_active:
- apply_wqattrs_unlock();
+ mutex_unlock(&wq_pool_mutex);
/*
* Failed alloc_and_link_pwqs() may leave pending pwq->release_work,
* flushing the pwq_release_worker ensures that the pwq_release_workfn()
@@ -5918,7 +5908,7 @@ static struct workqueue_struct *__alloc_workqueue(const char *fmt,
kfree(wq);
return NULL;
err_unlock_destroy:
- apply_wqattrs_unlock();
+ mutex_unlock(&wq_pool_mutex);
err_destroy:
destroy_workqueue(wq);
return NULL;
@@ -7319,7 +7309,7 @@ static ssize_t wq_nice_store(struct device *dev, struct device_attribute *attr,
struct workqueue_attrs *attrs;
int ret = -ENOMEM;
- apply_wqattrs_lock();
+ mutex_lock(&wq_pool_mutex);
attrs = wq_sysfs_prep_attrs(wq);
if (!attrs)
@@ -7332,7 +7322,7 @@ static ssize_t wq_nice_store(struct device *dev, struct device_attribute *attr,
ret = -EINVAL;
out_unlock:
- apply_wqattrs_unlock();
+ mutex_unlock(&wq_pool_mutex);
free_workqueue_attrs(attrs);
return ret ?: count;
}
@@ -7358,7 +7348,7 @@ static ssize_t wq_cpumask_store(struct device *dev,
struct workqueue_attrs *attrs;
int ret = -ENOMEM;
- apply_wqattrs_lock();
+ mutex_lock(&wq_pool_mutex);
attrs = wq_sysfs_prep_attrs(wq);
if (!attrs)
@@ -7369,7 +7359,7 @@ static ssize_t wq_cpumask_store(struct device *dev,
ret = apply_workqueue_attrs_locked(wq, attrs);
out_unlock:
- apply_wqattrs_unlock();
+ mutex_unlock(&wq_pool_mutex);
free_workqueue_attrs(attrs);
return ret ?: count;
}
@@ -7405,13 +7395,13 @@ static ssize_t wq_affn_scope_store(struct device *dev,
if (affn < 0)
return affn;
- apply_wqattrs_lock();
+ mutex_lock(&wq_pool_mutex);
attrs = wq_sysfs_prep_attrs(wq);
if (attrs) {
attrs->affn_scope = affn;
ret = apply_workqueue_attrs_locked(wq, attrs);
}
- apply_wqattrs_unlock();
+ mutex_unlock(&wq_pool_mutex);
free_workqueue_attrs(attrs);
return ret ?: count;
}
@@ -7436,13 +7426,13 @@ static ssize_t wq_affinity_strict_store(struct device *dev,
if (sscanf(buf, "%d", &v) != 1)
return -EINVAL;
- apply_wqattrs_lock();
+ mutex_lock(&wq_pool_mutex);
attrs = wq_sysfs_prep_attrs(wq);
if (attrs) {
attrs->affn_strict = (bool)v;
ret = apply_workqueue_attrs_locked(wq, attrs);
}
- apply_wqattrs_unlock();
+ mutex_unlock(&wq_pool_mutex);
free_workqueue_attrs(attrs);
return ret ?: count;
}
@@ -7483,12 +7473,12 @@ static int workqueue_set_unbound_cpumask(cpumask_var_t cpumask)
cpumask_and(cpumask, cpumask, cpu_possible_mask);
if (!cpumask_empty(cpumask)) {
ret = 0;
- apply_wqattrs_lock();
+ mutex_lock(&wq_pool_mutex);
if (!cpumask_equal(cpumask, wq_unbound_cpumask))
ret = workqueue_apply_unbound_cpumask(cpumask);
if (!ret)
cpumask_copy(wq_requested_unbound_cpumask, cpumask);
- apply_wqattrs_unlock();
+ mutex_unlock(&wq_pool_mutex);
}
return ret;
---
base-commit: 84db6d7197f3e2922e26938f1c2adb3b0fe225fc
change-id: 20260511-workqueue_drop-6b4031d48152
Best regards,
--
Breno Leitao <leitao@debian.org>
^ permalink raw reply related [flat|nested] 2+ messages in thread
* Re: [PATCH] workqueue: drop apply_wqattrs_lock()/unlock() wrappers
2026-05-11 12:14 [PATCH] workqueue: drop apply_wqattrs_lock()/unlock() wrappers Breno Leitao
@ 2026-05-11 19:04 ` Tejun Heo
0 siblings, 0 replies; 2+ messages in thread
From: Tejun Heo @ 2026-05-11 19:04 UTC (permalink / raw)
To: Breno Leitao; +Cc: Lai Jiangshan, linux-kernel, kernel-team
Hello,
Applied to wq/for-7.2.
Thanks.
--
tejun
^ permalink raw reply [flat|nested] 2+ messages in thread
end of thread, other threads:[~2026-05-11 19:04 UTC | newest]
Thread overview: 2+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2026-05-11 12:14 [PATCH] workqueue: drop apply_wqattrs_lock()/unlock() wrappers Breno Leitao
2026-05-11 19:04 ` Tejun Heo
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox