From: Tejun Heo <tj@kernel.org>
To: torvalds@linux-foundation.org, mingo@elte.hu,
peterz@infradead.org, awalls@radix.net,
linux-kernel@vger.kernel.org, jeff@garzik.org,
akpm@linux-foundation.org, jens.axboe@oracle.com,
rusty@rustcorp.com.au, cl@linux-foundation.org,
dhowells@redhat.com, arjan@linux.intel.com, avi@redhat.com,
johannes@sipsolutions.net, andi@firstfloor.org, oleg@redhat.com
Cc: Tejun Heo <tj@kernel.org>
Subject: [PATCH 19/43] workqueue: introduce worker
Date: Fri, 26 Feb 2010 21:22:56 +0900 [thread overview]
Message-ID: <1267187000-18791-20-git-send-email-tj@kernel.org> (raw)
In-Reply-To: <1267187000-18791-1-git-send-email-tj@kernel.org>
Separate out worker thread related information to struct worker from
struct cpu_workqueue_struct and implement helper functions to deal
with the new struct worker. The only change which is visible outside
is that now workqueue worker are all named "kworker/CPUID:WORKERID"
where WORKERID is allocated from per-cpu ida.
This is in preparation of concurrency managed workqueue where shared
multiple workers would be available per cpu.
Signed-off-by: Tejun Heo <tj@kernel.org>
---
kernel/workqueue.c | 211 +++++++++++++++++++++++++++++++++++++---------------
1 files changed, 151 insertions(+), 60 deletions(-)
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 63ec65e..7be13a1 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -33,6 +33,7 @@
#include <linux/kallsyms.h>
#include <linux/debug_locks.h>
#include <linux/lockdep.h>
+#include <linux/idr.h>
/*
* Structure fields follow one of the following exclusion rules.
@@ -46,6 +47,15 @@
* W: workqueue_lock protected.
*/
+struct cpu_workqueue_struct;
+
+struct worker {
+ struct work_struct *current_work; /* L: work being processed */
+ struct task_struct *task; /* I: worker task */
+ struct cpu_workqueue_struct *cwq; /* I: the associated cwq */
+ int id; /* I: worker id */
+};
+
/*
* The per-CPU workqueue (if single thread, we always use the first
* possible cpu). The lower WORK_STRUCT_FLAG_BITS of
@@ -58,14 +68,14 @@ struct cpu_workqueue_struct {
struct list_head worklist;
wait_queue_head_t more_work;
- struct work_struct *current_work;
+ unsigned int cpu;
+ struct worker *worker;
struct workqueue_struct *wq; /* I: the owning workqueue */
int work_color; /* L: current color */
int flush_color; /* L: flushing color */
int nr_in_flight[WORK_NR_COLORS];
/* L: nr of in_flight works */
- struct task_struct *thread;
};
/*
@@ -213,6 +223,9 @@ static inline void debug_work_deactivate(struct work_struct *work) { }
/* Serializes the accesses to the list of workqueues. */
static DEFINE_SPINLOCK(workqueue_lock);
static LIST_HEAD(workqueues);
+static DEFINE_PER_CPU(struct ida, worker_ida);
+
+static int worker_thread(void *__worker);
static int singlethread_cpu __read_mostly;
@@ -427,6 +440,105 @@ int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
}
EXPORT_SYMBOL_GPL(queue_delayed_work_on);
+static struct worker *alloc_worker(void)
+{
+ struct worker *worker;
+
+ worker = kzalloc(sizeof(*worker), GFP_KERNEL);
+ return worker;
+}
+
+/**
+ * create_worker - create a new workqueue worker
+ * @cwq: cwq the new worker will belong to
+ * @bind: whether to set affinity to @cpu or not
+ *
+ * Create a new worker which is bound to @cwq. The returned worker
+ * can be started by calling start_worker() or destroyed using
+ * destroy_worker().
+ *
+ * CONTEXT:
+ * Might sleep. Does GFP_KERNEL allocations.
+ *
+ * RETURNS:
+ * Pointer to the newly created worker.
+ */
+static struct worker *create_worker(struct cpu_workqueue_struct *cwq, bool bind)
+{
+ int id = -1;
+ struct worker *worker = NULL;
+
+ spin_lock(&workqueue_lock);
+ while (ida_get_new(&per_cpu(worker_ida, cwq->cpu), &id)) {
+ spin_unlock(&workqueue_lock);
+ if (!ida_pre_get(&per_cpu(worker_ida, cwq->cpu), GFP_KERNEL))
+ goto fail;
+ spin_lock(&workqueue_lock);
+ }
+ spin_unlock(&workqueue_lock);
+
+ worker = alloc_worker();
+ if (!worker)
+ goto fail;
+
+ worker->cwq = cwq;
+ worker->id = id;
+
+ worker->task = kthread_create(worker_thread, worker, "kworker/%u:%d",
+ cwq->cpu, id);
+ if (IS_ERR(worker->task))
+ goto fail;
+
+ if (bind)
+ kthread_bind(worker->task, cwq->cpu);
+
+ return worker;
+fail:
+ if (id >= 0) {
+ spin_lock(&workqueue_lock);
+ ida_remove(&per_cpu(worker_ida, cwq->cpu), id);
+ spin_unlock(&workqueue_lock);
+ }
+ kfree(worker);
+ return NULL;
+}
+
+/**
+ * start_worker - start a newly created worker
+ * @worker: worker to start
+ *
+ * Start @worker.
+ *
+ * CONTEXT:
+ * spin_lock_irq(cwq->lock).
+ */
+static void start_worker(struct worker *worker)
+{
+ wake_up_process(worker->task);
+}
+
+/**
+ * destroy_worker - destroy a workqueue worker
+ * @worker: worker to be destroyed
+ *
+ * Destroy @worker.
+ */
+static void destroy_worker(struct worker *worker)
+{
+ int cpu = worker->cwq->cpu;
+ int id = worker->id;
+
+ /* sanity check frenzy */
+ BUG_ON(worker->current_work);
+
+ kthread_stop(worker->task);
+ kfree(worker);
+
+ spin_lock(&workqueue_lock);
+ ida_remove(&per_cpu(worker_ida, cpu), id);
+ spin_unlock(&workqueue_lock);
+}
+
/**
* cwq_dec_nr_in_flight - decrement cwq's nr_in_flight
* @cwq: cwq of interest
@@ -467,7 +579,7 @@ static void cwq_dec_nr_in_flight(struct cpu_workqueue_struct *cwq, int color)
/**
* process_one_work - process single work
- * @cwq: cwq to process work for
+ * @worker: self
* @work: work to process
*
* Process @work. This function contains all the logics necessary to
@@ -479,9 +591,9 @@ static void cwq_dec_nr_in_flight(struct cpu_workqueue_struct *cwq, int color)
* CONTEXT:
* spin_lock_irq(cwq->lock) which is released and regrabbed.
*/
-static void process_one_work(struct cpu_workqueue_struct *cwq,
- struct work_struct *work)
+static void process_one_work(struct worker *worker, struct work_struct *work)
{
+ struct cpu_workqueue_struct *cwq = worker->cwq;
work_func_t f = work->func;
int work_color;
#ifdef CONFIG_LOCKDEP
@@ -496,7 +608,7 @@ static void process_one_work(struct cpu_workqueue_struct *cwq,
#endif
/* claim and process */
debug_work_deactivate(work);
- cwq->current_work = work;
+ worker->current_work = work;
work_color = work_flags_to_color(*work_data_bits(work));
list_del_init(&work->entry);
@@ -523,30 +635,33 @@ static void process_one_work(struct cpu_workqueue_struct *cwq,
spin_lock_irq(&cwq->lock);
/* we're done with it, release */
- cwq->current_work = NULL;
+ worker->current_work = NULL;
cwq_dec_nr_in_flight(cwq, work_color);
}
-static void run_workqueue(struct cpu_workqueue_struct *cwq)
+static void run_workqueue(struct worker *worker)
{
+ struct cpu_workqueue_struct *cwq = worker->cwq;
+
spin_lock_irq(&cwq->lock);
while (!list_empty(&cwq->worklist)) {
struct work_struct *work = list_entry(cwq->worklist.next,
struct work_struct, entry);
- process_one_work(cwq, work);
+ process_one_work(worker, work);
}
spin_unlock_irq(&cwq->lock);
}
/**
* worker_thread - the worker thread function
- * @__cwq: cwq to serve
+ * @__worker: self
*
* The cwq worker thread function.
*/
-static int worker_thread(void *__cwq)
+static int worker_thread(void *__worker)
{
- struct cpu_workqueue_struct *cwq = __cwq;
+ struct worker *worker = __worker;
+ struct cpu_workqueue_struct *cwq = worker->cwq;
DEFINE_WAIT(wait);
if (cwq->wq->flags & WQ_FREEZEABLE)
@@ -565,7 +680,7 @@ static int worker_thread(void *__cwq)
if (kthread_should_stop())
break;
- run_workqueue(cwq);
+ run_workqueue(worker);
}
return 0;
@@ -863,7 +978,7 @@ int flush_work(struct work_struct *work)
goto already_gone;
prev = &work->entry;
} else {
- if (cwq->current_work != work)
+ if (!cwq->worker || cwq->worker->current_work != work)
goto already_gone;
prev = &cwq->worklist;
}
@@ -928,7 +1043,7 @@ static void wait_on_cpu_work(struct cpu_workqueue_struct *cwq,
int running = 0;
spin_lock_irq(&cwq->lock);
- if (unlikely(cwq->current_work == work)) {
+ if (unlikely(cwq->worker && cwq->worker->current_work == work)) {
insert_wq_barrier(cwq, &barr, cwq->worklist.next);
running = 1;
}
@@ -1191,7 +1306,7 @@ int current_is_keventd(void)
BUG_ON(!keventd_wq);
cwq = get_cwq(cpu, keventd_wq);
- if (current == cwq->thread)
+ if (current == cwq->worker->task)
ret = 1;
return ret;
@@ -1236,38 +1351,6 @@ static void free_cwqs(struct cpu_workqueue_struct *cwqs)
#endif
}
-static int create_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu)
-{
- struct workqueue_struct *wq = cwq->wq;
- struct task_struct *p;
-
- p = kthread_create(worker_thread, cwq, "%s/%d", wq->name, cpu);
- /*
- * Nobody can add the work_struct to this cwq,
- * if (caller is __create_workqueue)
- * nobody should see this wq
- * else // caller is CPU_UP_PREPARE
- * cpu is not on cpu_online_map
- * so we can abort safely.
- */
- if (IS_ERR(p))
- return PTR_ERR(p);
- cwq->thread = p;
-
- return 0;
-}
-
-static void start_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu)
-{
- struct task_struct *p = cwq->thread;
-
- if (p != NULL) {
- if (cpu >= 0)
- kthread_bind(p, cpu);
- wake_up_process(p);
- }
-}
-
struct workqueue_struct *__create_workqueue_key(const char *name,
unsigned int flags,
struct lock_class_key *key,
@@ -1275,7 +1358,8 @@ struct workqueue_struct *__create_workqueue_key(const char *name,
{
bool singlethread = flags & WQ_SINGLE_THREAD;
struct workqueue_struct *wq;
- int err = 0, cpu;
+ bool failed = false;
+ unsigned int cpu;
wq = kzalloc(sizeof(*wq), GFP_KERNEL);
if (!wq)
@@ -1305,23 +1389,25 @@ struct workqueue_struct *__create_workqueue_key(const char *name,
struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
BUG_ON((unsigned long)cwq & WORK_STRUCT_FLAG_MASK);
+ cwq->cpu = cpu;
cwq->wq = wq;
cwq->flush_color = -1;
spin_lock_init(&cwq->lock);
INIT_LIST_HEAD(&cwq->worklist);
init_waitqueue_head(&cwq->more_work);
- if (err)
+ if (failed)
continue;
- err = create_workqueue_thread(cwq, cpu);
- if (cpu_online(cpu) && !singlethread)
- start_workqueue_thread(cwq, cpu);
+ cwq->worker = create_worker(cwq,
+ cpu_online(cpu) && !singlethread);
+ if (cwq->worker)
+ start_worker(cwq->worker);
else
- start_workqueue_thread(cwq, -1);
+ failed = true;
}
cpu_maps_update_done();
- if (err) {
+ if (failed) {
destroy_workqueue(wq);
wq = NULL;
}
@@ -1360,9 +1446,9 @@ void destroy_workqueue(struct workqueue_struct *wq)
struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
int i;
- if (cwq->thread) {
- kthread_stop(cwq->thread);
- cwq->thread = NULL;
+ if (cwq->worker) {
+ destroy_worker(cwq->worker);
+ cwq->worker = NULL;
}
for (i = 0; i < WORK_NR_COLORS; i++)
@@ -1392,8 +1478,8 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
switch (action) {
case CPU_ONLINE:
- __set_cpus_allowed(cwq->thread, get_cpu_mask(cpu),
- true);
+ __set_cpus_allowed(cwq->worker->task,
+ get_cpu_mask(cpu), true);
break;
case CPU_POST_DEAD:
@@ -1454,6 +1540,8 @@ EXPORT_SYMBOL_GPL(work_on_cpu);
void __init init_workqueues(void)
{
+ unsigned int cpu;
+
/*
* cwqs are forced aligned according to WORK_STRUCT_FLAG_BITS.
* Make sure that the alignment isn't lower than that of
@@ -1462,6 +1550,9 @@ void __init init_workqueues(void)
BUILD_BUG_ON(__alignof__(struct cpu_workqueue_struct) <
__alignof__(unsigned long long));
+ for_each_possible_cpu(cpu)
+ ida_init(&per_cpu(worker_ida, cpu));
+
singlethread_cpu = cpumask_first(cpu_possible_mask);
hotcpu_notifier(workqueue_cpu_callback, 0);
keventd_wq = create_workqueue("events");
--
1.6.4.2
next prev parent reply other threads:[~2010-02-26 12:23 UTC|newest]
Thread overview: 86+ messages / expand[flat|nested] mbox.gz Atom feed top
2010-02-26 12:22 [PATCHSET] workqueue: concurrency managed workqueue, take#4 Tejun Heo
2010-02-26 12:22 ` [PATCH 01/43] sched: consult online mask instead of active in select_fallback_rq() Tejun Heo
2010-02-26 12:22 ` [PATCH 02/43] sched: rename preempt_notifiers to sched_notifiers and refactor implementation Tejun Heo
2010-02-26 12:22 ` [PATCH 03/43] sched: refactor try_to_wake_up() Tejun Heo
2010-02-26 12:22 ` [PATCH 04/43] sched: implement __set_cpus_allowed() Tejun Heo
2010-02-26 12:22 ` [PATCH 05/43] sched: make sched_notifiers unconditional Tejun Heo
2010-02-26 12:22 ` [PATCH 06/43] sched: add wakeup/sleep sched_notifiers and allow NULL notifier ops Tejun Heo
2010-02-26 12:22 ` [PATCH 07/43] sched: implement try_to_wake_up_local() Tejun Heo
2010-02-28 12:33 ` Oleg Nesterov
2010-03-01 14:22 ` Tejun Heo
2010-02-26 12:22 ` [PATCH 08/43] workqueue: change cancel_work_sync() to clear work->data Tejun Heo
2010-02-26 12:22 ` [PATCH 09/43] acpi: use queue_work_on() instead of binding workqueue worker to cpu0 Tejun Heo
2010-02-26 12:22 ` [PATCH 10/43] stop_machine: reimplement without using workqueue Tejun Heo
2010-02-28 14:11 ` Oleg Nesterov
2010-03-01 15:07 ` Tejun Heo
2010-03-01 15:37 ` Oleg Nesterov
2010-03-01 16:36 ` Tejun Heo
2010-03-01 16:50 ` Oleg Nesterov
2010-03-01 18:02 ` Tejun Heo
2010-02-28 14:34 ` Oleg Nesterov
2010-03-01 15:11 ` Tejun Heo
2010-03-01 15:41 ` Oleg Nesterov
2010-02-26 12:22 ` [PATCH 11/43] workqueue: misc/cosmetic updates Tejun Heo
2010-02-26 12:22 ` [PATCH 12/43] workqueue: merge feature parameters into flags Tejun Heo
2010-02-26 12:22 ` [PATCH 13/43] workqueue: define masks for work flags and conditionalize STATIC flags Tejun Heo
2010-02-26 12:22 ` [PATCH 14/43] workqueue: separate out process_one_work() Tejun Heo
2010-02-26 12:22 ` [PATCH 15/43] workqueue: temporarily disable workqueue tracing Tejun Heo
2010-02-26 12:22 ` [PATCH 16/43] workqueue: kill cpu_populated_map Tejun Heo
2010-02-28 16:00 ` Oleg Nesterov
2010-03-01 15:32 ` Tejun Heo
2010-03-01 15:50 ` Oleg Nesterov
2010-03-01 16:19 ` Tejun Heo
2010-02-26 12:22 ` [PATCH 17/43] workqueue: update cwq alignement Tejun Heo
2010-02-28 17:12 ` Oleg Nesterov
2010-03-01 16:40 ` Tejun Heo
2010-02-26 12:22 ` [PATCH 18/43] workqueue: reimplement workqueue flushing using color coded works Tejun Heo
2010-02-28 20:31 ` Oleg Nesterov
2010-03-01 17:33 ` Tejun Heo
2010-03-01 19:47 ` Oleg Nesterov
2010-02-26 12:22 ` Tejun Heo [this message]
2010-02-26 12:22 ` [PATCH 20/43] workqueue: reimplement work flushing using linked works Tejun Heo
2010-03-01 14:53 ` Oleg Nesterov
2010-03-01 18:00 ` Tejun Heo
2010-03-01 18:51 ` Oleg Nesterov
2010-02-26 12:22 ` [PATCH 21/43] workqueue: implement per-cwq active work limit Tejun Heo
2010-02-26 12:22 ` [PATCH 22/43] workqueue: reimplement workqueue freeze using max_active Tejun Heo
2010-02-26 12:23 ` [PATCH 23/43] workqueue: introduce global cwq and unify cwq locks Tejun Heo
2010-02-26 12:23 ` [PATCH 24/43] workqueue: implement worker states Tejun Heo
2010-02-26 12:23 ` [PATCH 25/43] workqueue: reimplement CPU hotplugging support using trustee Tejun Heo
2010-02-26 12:23 ` [PATCH 26/43] workqueue: make single thread workqueue shared worker pool friendly Tejun Heo
2010-02-26 12:23 ` [PATCH 27/43] workqueue: add find_worker_executing_work() and track current_cwq Tejun Heo
2010-02-26 12:23 ` [PATCH 28/43] workqueue: carry cpu number in work data once execution starts Tejun Heo
2010-02-28 7:08 ` [PATCH UPDATED " Tejun Heo
2010-02-26 12:23 ` [PATCH 29/43] workqueue: implement WQ_NON_REENTRANT Tejun Heo
2010-02-26 12:23 ` [PATCH 30/43] workqueue: use shared worklist and pool all workers per cpu Tejun Heo
2010-02-26 12:23 ` [PATCH 31/43] workqueue: implement concurrency managed dynamic worker pool Tejun Heo
2010-02-26 12:23 ` [PATCH 32/43] workqueue: increase max_active of keventd and kill current_is_keventd() Tejun Heo
2010-02-26 12:23 ` [PATCH 33/43] workqueue: add system_wq, system_long_wq and system_nrt_wq Tejun Heo
2010-02-26 12:23 ` [PATCH 34/43] workqueue: implement DEBUGFS/workqueue Tejun Heo
2010-02-28 7:13 ` [PATCH UPDATED " Tejun Heo
2010-02-26 12:23 ` [PATCH 35/43] workqueue: implement several utility APIs Tejun Heo
2010-02-28 7:15 ` [PATCH UPDATED " Tejun Heo
2010-02-26 12:23 ` [PATCH 36/43] libata: take advantage of cmwq and remove concurrency limitations Tejun Heo
2010-02-26 12:23 ` [PATCH 37/43] async: use workqueue for worker pool Tejun Heo
2010-02-26 12:23 ` [PATCH 38/43] fscache: convert object to use workqueue instead of slow-work Tejun Heo
2010-02-26 12:23 ` [PATCH 39/43] fscache: convert operation " Tejun Heo
2010-02-26 12:23 ` [PATCH 40/43] fscache: drop references to slow-work Tejun Heo
2010-02-26 12:23 ` [PATCH 41/43] cifs: use workqueue instead of slow-work Tejun Heo
2010-02-28 7:09 ` [PATCH UPDATED " Tejun Heo
2010-02-26 12:23 ` [PATCH 42/43] gfs2: " Tejun Heo
2010-02-28 7:10 ` [PATCH UPDATED " Tejun Heo
2010-02-26 12:23 ` [PATCH 43/43] slow-work: kill it Tejun Heo
2010-02-27 22:52 ` [PATCH] workqueue: Fix build on PowerPC Anton Blanchard
2010-02-28 6:08 ` Tejun Heo
2010-02-28 1:00 ` [PATCH] workqueue: Fix a compile warning in work_busy Anton Blanchard
2010-02-28 6:18 ` Tejun Heo
2010-02-28 1:11 ` [PATCHSET] workqueue: concurrency managed workqueue, take#4 Anton Blanchard
2010-02-28 6:32 ` Tejun Heo
2010-03-10 14:52 ` David Howells
2010-03-12 5:03 ` Tejun Heo
2010-03-12 11:23 ` David Howells
2010-03-12 22:55 ` Tejun Heo
2010-03-16 14:38 ` David Howells
2010-03-16 16:03 ` Tejun Heo
2010-03-16 17:18 ` David Howells
2010-04-25 8:09 ` [PATCHSET UPDATED] " Tejun Heo
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1267187000-18791-20-git-send-email-tj@kernel.org \
--to=tj@kernel.org \
--cc=akpm@linux-foundation.org \
--cc=andi@firstfloor.org \
--cc=arjan@linux.intel.com \
--cc=avi@redhat.com \
--cc=awalls@radix.net \
--cc=cl@linux-foundation.org \
--cc=dhowells@redhat.com \
--cc=jeff@garzik.org \
--cc=jens.axboe@oracle.com \
--cc=johannes@sipsolutions.net \
--cc=linux-kernel@vger.kernel.org \
--cc=mingo@elte.hu \
--cc=oleg@redhat.com \
--cc=peterz@infradead.org \
--cc=rusty@rustcorp.com.au \
--cc=torvalds@linux-foundation.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox