From: Tejun Heo <tj@kernel.org>
To: linux-kernel@vger.kernel.org
Cc: torvalds@linux-foundation.org, peterz@infradead.org,
tglx@linutronix.de, linux-pm@vger.kernel.org,
Tejun Heo <tj@kernel.org>
Subject: [PATCH 9/9] workqueue: simplify CPU hotplug code
Date: Tue, 17 Jul 2012 10:12:29 -0700 [thread overview]
Message-ID: <1342545149-3515-10-git-send-email-tj@kernel.org> (raw)
In-Reply-To: <1342545149-3515-1-git-send-email-tj@kernel.org>
With trustee gone, CPU hotplug code can be simplified.
* gcwq_claim/release_management() now grab and release gcwq lock too
respectively and gained _and_lock and _and_unlock postfixes.
* All CPU hotplug logic was implemented in workqueue_cpu_callback()
which was called by workqueue_cpu_up/down_callback() for the correct
priority. This was because up and down paths shared a lot of logic,
which is no longer true. Remove workqueue_cpu_callback() and move
all hotplug logic into the two actual callbacks.
This patch doesn't make any functional changes.
Signed-off-by: Tejun Heo <tj@kernel.org>
---
kernel/workqueue.c | 79 ++++++++++++++++-----------------------------------
1 files changed, 25 insertions(+), 54 deletions(-)
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index d1545da..471996a 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -3358,19 +3358,21 @@ EXPORT_SYMBOL_GPL(work_busy);
*/
/* claim manager positions of all pools */
-static void gcwq_claim_management(struct global_cwq *gcwq)
+static void gcwq_claim_management_and_lock(struct global_cwq *gcwq)
{
struct worker_pool *pool;
for_each_worker_pool(pool, gcwq)
mutex_lock_nested(&pool->manager_mutex, pool - gcwq->pools);
+ spin_lock_irq(&gcwq->lock);
}
/* release manager positions */
-static void gcwq_release_management(struct global_cwq *gcwq)
+static void gcwq_release_management_and_unlock(struct global_cwq *gcwq)
{
struct worker_pool *pool;
+ spin_unlock_irq(&gcwq->lock);
for_each_worker_pool(pool, gcwq)
mutex_unlock(&pool->manager_mutex);
}
@@ -3385,8 +3387,7 @@ static void gcwq_unbind_fn(struct work_struct *work)
BUG_ON(gcwq->cpu != smp_processor_id());
- gcwq_claim_management(gcwq);
- spin_lock_irq(&gcwq->lock);
+ gcwq_claim_management_and_lock(gcwq);
/*
* We've claimed all manager positions. Make all workers unbound
@@ -3403,8 +3404,7 @@ static void gcwq_unbind_fn(struct work_struct *work)
gcwq->flags |= GCWQ_DISASSOCIATED;
- spin_unlock_irq(&gcwq->lock);
- gcwq_release_management(gcwq);
+ gcwq_release_management_and_unlock(gcwq);
/*
* Call schedule() so that we cross rq->lock and thus can guarantee
@@ -3428,26 +3428,19 @@ static void gcwq_unbind_fn(struct work_struct *work)
atomic_set(get_pool_nr_running(pool), 0);
}
-static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
- unsigned long action,
- void *hcpu)
+/*
+ * Workqueues should be brought up before normal priority CPU notifiers.
+ * This will be registered high priority CPU notifier.
+ */
+static int __devinit workqueue_cpu_up_callback(struct notifier_block *nfb,
+ unsigned long action,
+ void *hcpu)
{
unsigned int cpu = (unsigned long)hcpu;
struct global_cwq *gcwq = get_gcwq(cpu);
struct worker_pool *pool;
- struct work_struct unbind_work;
- unsigned long flags;
-
- action &= ~CPU_TASKS_FROZEN;
-
- switch (action) {
- case CPU_DOWN_PREPARE:
- /* unbinding should happen on the local CPU */
- INIT_WORK_ONSTACK(&unbind_work, gcwq_unbind_fn);
- schedule_work_on(cpu, &unbind_work);
- flush_work(&unbind_work);
- break;
+ switch (action & ~CPU_TASKS_FROZEN) {
case CPU_UP_PREPARE:
for_each_worker_pool(pool, gcwq) {
struct worker *worker;
@@ -3463,45 +3456,16 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
start_worker(worker);
spin_unlock_irq(&gcwq->lock);
}
- }
-
- /* some are called w/ irq disabled, don't disturb irq status */
- spin_lock_irqsave(&gcwq->lock, flags);
+ break;
- switch (action) {
case CPU_DOWN_FAILED:
case CPU_ONLINE:
- spin_unlock_irq(&gcwq->lock);
- gcwq_claim_management(gcwq);
- spin_lock_irq(&gcwq->lock);
-
+ gcwq_claim_management_and_lock(gcwq);
gcwq->flags &= ~GCWQ_DISASSOCIATED;
-
rebind_workers(gcwq);
-
- gcwq_release_management(gcwq);
+ gcwq_release_management_and_unlock(gcwq);
break;
}
-
- spin_unlock_irqrestore(&gcwq->lock, flags);
-
- return notifier_from_errno(0);
-}
-
-/*
- * Workqueues should be brought up before normal priority CPU notifiers.
- * This will be registered high priority CPU notifier.
- */
-static int __devinit workqueue_cpu_up_callback(struct notifier_block *nfb,
- unsigned long action,
- void *hcpu)
-{
- switch (action & ~CPU_TASKS_FROZEN) {
- case CPU_UP_PREPARE:
- case CPU_DOWN_FAILED:
- case CPU_ONLINE:
- return workqueue_cpu_callback(nfb, action, hcpu);
- }
return NOTIFY_OK;
}
@@ -3513,9 +3477,16 @@ static int __devinit workqueue_cpu_down_callback(struct notifier_block *nfb,
unsigned long action,
void *hcpu)
{
+ unsigned int cpu = (unsigned long)hcpu;
+ struct work_struct unbind_work;
+
switch (action & ~CPU_TASKS_FROZEN) {
case CPU_DOWN_PREPARE:
- return workqueue_cpu_callback(nfb, action, hcpu);
+ /* unbinding should happen on the local CPU */
+ INIT_WORK_ONSTACK(&unbind_work, gcwq_unbind_fn);
+ schedule_work_on(cpu, &unbind_work);
+ flush_work(&unbind_work);
+ break;
}
return NOTIFY_OK;
}
--
1.7.7.3
next prev parent reply other threads:[~2012-07-17 17:12 UTC|newest]
Thread overview: 33+ messages / expand[flat|nested] mbox.gz Atom feed top
2012-07-17 17:12 [PATCHSET] workqueue: reimplement CPU hotplug to keep idle workers Tejun Heo
2012-07-17 17:12 ` [PATCH 1/9] workqueue: perform cpu down operations from low priority cpu_notifier() Tejun Heo
2012-07-20 21:52 ` Paul E. McKenney
2012-07-20 21:58 ` Tejun Heo
2012-07-21 21:36 ` Paul E. McKenney
2012-07-22 16:43 ` [PATCH] workqueue: fix spurious CPU locality WARN from process_one_work() Tejun Heo
2012-07-22 21:23 ` Paul E. McKenney
2012-07-17 17:12 ` [PATCH 2/9] workqueue: drop CPU_DYING notifier operation Tejun Heo
2012-07-17 17:12 ` [PATCH 3/9] workqueue: ROGUE workers are UNBOUND workers Tejun Heo
2012-07-17 17:12 ` [PATCH 4/9] workqueue: use mutex for global_cwq manager exclusion Tejun Heo
2012-07-17 17:12 ` [PATCH 5/9] workqueue: drop @bind from create_worker() Tejun Heo
2012-07-17 17:12 ` [PATCH 6/9] workqueue: reimplement CPU online rebinding to handle idle workers Tejun Heo
2012-07-17 17:12 ` [PATCH 7/9] workqueue: don't butcher idle workers on an offline CPU Tejun Heo
2012-07-17 17:12 ` [PATCH 8/9] workqueue: remove CPU offline trustee Tejun Heo
2012-07-17 17:12 ` Tejun Heo [this message]
2012-07-17 18:43 ` [PATCHSET] workqueue: reimplement CPU hotplug to keep idle workers Rafael J. Wysocki
2012-07-17 19:40 ` Tejun Heo
2012-07-20 15:48 ` Peter Zijlstra
2012-07-20 17:02 ` Tejun Heo
2012-07-20 17:21 ` Peter Zijlstra
2012-07-20 17:50 ` Tejun Heo
2012-07-20 18:22 ` Peter Zijlstra
2012-07-20 18:34 ` Tejun Heo
2012-07-20 19:44 ` Rafael J. Wysocki
2012-07-20 19:41 ` Tejun Heo
2012-07-21 6:42 ` Shilimkar, Santosh
2012-07-23 8:38 ` Peter De Schrijver
2012-07-20 16:39 ` Peter Zijlstra
2012-07-20 16:52 ` Tejun Heo
2012-07-20 17:01 ` Peter Zijlstra
2012-07-20 17:08 ` Tejun Heo
2012-07-20 17:19 ` Peter Zijlstra
2012-07-20 17:43 ` Tejun Heo
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1342545149-3515-10-git-send-email-tj@kernel.org \
--to=tj@kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-pm@vger.kernel.org \
--cc=peterz@infradead.org \
--cc=tglx@linutronix.de \
--cc=torvalds@linux-foundation.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox