linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Tejun Heo <htejun@gmail.com>
To: David Howells <dhowells@redhat.com>,
	Arjan van de Ven <arjan@linux.intel.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>,
	torvalds@linux-foundation.org, mingo@elte.hu,
	linux-kernel@vger.kernel.org, jeff@garzik.org,
	akpm@linux-foundation.org, rusty@rustcorp.com.au,
	cl@linux-foundation.org, oleg@redhat.com, axboe@kernel.dk,
	dwalker@codeaurora.org, stefanr@s5r6.in-berlin.de,
	florian@mickler.org, andi@firstfloor.org, mst@redhat.com,
	randy.dunlap@oracle.com, Arjan van de Ven <arjan@infradead.org>
Subject: [PATCH 3/4] workqueue: remove WQ_SINGLE_CPU and use WQ_UNBOUND instead
Date: Fri, 02 Jul 2010 11:24:07 +0200	[thread overview]
Message-ID: <4C2DB037.3010106@gmail.com> (raw)
In-Reply-To: <4C2DAEBB.7090607@kernel.org>

WQ_SINGLE_CPU combined with @max_active of 1 is used to achieve full
ordering among works queued to a workqueue.  The same can be achieved
using WQ_UNBOUND as unbound workqueues always use the gcwq for
WORK_CPU_UNBOUND.  As @max_active is always one and benefits from cpu
locality isn't accessible anyway, serving them with unbound workqueues
should be fine.

Drop WQ_SINGLE_CPU support and use WQ_UNBOUND instead.  Note that most
single thread workqueue users will be converted to use multithread or
non-reentrant instead and only the ones which require strict ordering
will keep using WQ_UNBOUND + @max_active of 1.

Signed-off-by: Tejun Heo <tj@kernel.org>
---
 include/linux/workqueue.h |    7 +--
 kernel/workqueue.c        |  100 ++++++++-------------------------------------
 2 files changed, 21 insertions(+), 86 deletions(-)

diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index 67ce734..d74a529 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -233,12 +233,11 @@ static inline unsigned int work_static(struct work_struct *work) { return 0; }

 enum {
 	WQ_NON_REENTRANT	= 1 << 0, /* guarantee non-reentrance */
-	WQ_SINGLE_CPU		= 1 << 1, /* only single cpu at a time */
+	WQ_UNBOUND		= 1 << 1, /* not bound to any cpu */
 	WQ_FREEZEABLE		= 1 << 2, /* freeze during suspend */
 	WQ_RESCUER		= 1 << 3, /* has an rescue worker */
 	WQ_HIGHPRI		= 1 << 4, /* high priority */
 	WQ_CPU_INTENSIVE	= 1 << 5, /* cpu instensive workqueue */
-	WQ_UNBOUND		= 1 << 6, /* not bound to any cpu */

 	WQ_MAX_ACTIVE		= 512,	  /* I like 512, better ideas? */
 	WQ_MAX_UNBOUND_PER_CPU	= 4,	  /* 4 * #cpus for unbound wq */
@@ -300,9 +299,9 @@ __alloc_workqueue_key(const char *name, unsigned int flags, int max_active,
 #define create_workqueue(name)					\
 	alloc_workqueue((name), WQ_RESCUER, 1)
 #define create_freezeable_workqueue(name)			\
-	alloc_workqueue((name), WQ_FREEZEABLE | WQ_SINGLE_CPU | WQ_RESCUER, 1)
+	alloc_workqueue((name), WQ_FREEZEABLE | WQ_UNBOUND | WQ_RESCUER, 1)
 #define create_singlethread_workqueue(name)			\
-	alloc_workqueue((name), WQ_SINGLE_CPU | WQ_RESCUER, 1)
+	alloc_workqueue((name), WQ_UNBOUND | WQ_RESCUER, 1)

 extern void destroy_workqueue(struct workqueue_struct *wq);

diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 4608563..20d6237 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -206,8 +206,6 @@ struct workqueue_struct {
 	struct list_head	flusher_queue;	/* F: flush waiters */
 	struct list_head	flusher_overflow; /* F: flush overflow list */

-	unsigned long		single_cpu;	/* cpu for single cpu wq */
-
 	cpumask_var_t		mayday_mask;	/* cpus requesting rescue */
 	struct worker		*rescuer;	/* I: rescue worker */

@@ -889,34 +887,6 @@ static void insert_work(struct cpu_workqueue_struct *cwq,
 		wake_up_worker(gcwq);
 }

-/**
- * cwq_unbind_single_cpu - unbind cwq from single cpu workqueue processing
- * @cwq: cwq to unbind
- *
- * Try to unbind @cwq from single cpu workqueue processing.  If
- * @cwq->wq is frozen, unbind is delayed till the workqueue is thawed.
- *
- * CONTEXT:
- * spin_lock_irq(gcwq->lock).
- */
-static void cwq_unbind_single_cpu(struct cpu_workqueue_struct *cwq)
-{
-	struct workqueue_struct *wq = cwq->wq;
-	struct global_cwq *gcwq = cwq->gcwq;
-
-	BUG_ON(wq->single_cpu != gcwq->cpu);
-	/*
-	 * Unbind from workqueue if @cwq is not frozen.  If frozen,
-	 * thaw_workqueues() will either restart processing on this
-	 * cpu or unbind if empty.  This keeps works queued while
-	 * frozen fully ordered and flushable.
-	 */
-	if (likely(!(gcwq->flags & GCWQ_FREEZING))) {
-		smp_wmb();	/* paired with cmpxchg() in __queue_work() */
-		wq->single_cpu = WORK_CPU_NONE;
-	}
-}
-
 static void __queue_work(unsigned int cpu, struct workqueue_struct *wq,
 			 struct work_struct *work)
 {
@@ -924,20 +894,16 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq,
 	struct cpu_workqueue_struct *cwq;
 	struct list_head *worklist;
 	unsigned long flags;
-	bool arbitrate;

 	debug_work_activate(work);

-	if (unlikely(cpu == WORK_CPU_UNBOUND))
-		cpu = raw_smp_processor_id();
-
-	/*
-	 * Determine gcwq to use.  SINGLE_CPU is inherently
-	 * NON_REENTRANT, so test it first.
-	 */
-	if (!(wq->flags & (WQ_SINGLE_CPU | WQ_UNBOUND))) {
+	/* determine gcwq to use */
+	if (!(wq->flags & WQ_UNBOUND)) {
 		struct global_cwq *last_gcwq;

+		if (unlikely(cpu == WORK_CPU_UNBOUND))
+			cpu = raw_smp_processor_id();
+
 		/*
 		 * It's multi cpu.  If @wq is non-reentrant and @work
 		 * was previously on a different cpu, it might still
@@ -962,38 +928,6 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq,
 			}
 		} else
 			spin_lock_irqsave(&gcwq->lock, flags);
-	} else if (!(wq->flags & WQ_UNBOUND)) {
-		unsigned int req_cpu = cpu;
-
-		/*
-		 * It's a bit more complex for single cpu workqueues.
-		 * We first need to determine which cpu is going to be
-		 * used.  If no cpu is currently serving this
-		 * workqueue, arbitrate using atomic accesses to
-		 * wq->single_cpu; otherwise, use the current one.
-		 */
-	retry:
-		cpu = wq->single_cpu;
-		arbitrate = cpu == WORK_CPU_NONE;
-		if (arbitrate)
-			cpu = req_cpu;
-
-		gcwq = get_gcwq(cpu);
-		spin_lock_irqsave(&gcwq->lock, flags);
-
-		/*
-		 * The following cmpxchg() is a full barrier paired
-		 * with smp_wmb() in cwq_unbind_single_cpu() and
-		 * guarantees that all changes to wq->st_* fields are
-		 * visible on the new cpu after this point.
-		 */
-		if (arbitrate)
-			cmpxchg(&wq->single_cpu, WORK_CPU_NONE, cpu);
-
-		if (unlikely(wq->single_cpu != cpu)) {
-			spin_unlock_irqrestore(&gcwq->lock, flags);
-			goto retry;
-		}
 	} else {
 		gcwq = get_gcwq(WORK_CPU_UNBOUND);
 		spin_lock_irqsave(&gcwq->lock, flags);
@@ -1105,19 +1039,30 @@ int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
 	struct work_struct *work = &dwork->work;

 	if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
-		struct global_cwq *gcwq = get_work_gcwq(work);
-		unsigned int lcpu = gcwq ? gcwq->cpu : raw_smp_processor_id();
+		unsigned int lcpu;

 		BUG_ON(timer_pending(timer));
 		BUG_ON(!list_empty(&work->entry));

 		timer_stats_timer_set_start_info(&dwork->timer);
+
 		/*
 		 * This stores cwq for the moment, for the timer_fn.
 		 * Note that the work's gcwq is preserved to allow
 		 * reentrance detection for delayed works.
 		 */
+		if (!(wq->flags & WQ_UNBOUND)) {
+			struct global_cwq *gcwq = get_work_gcwq(work);
+
+			if (gcwq && gcwq->cpu != WORK_CPU_UNBOUND)
+				lcpu = gcwq->cpu;
+			else
+				lcpu = raw_smp_processor_id();
+		} else
+			lcpu = WORK_CPU_UNBOUND;
+
 		set_work_cwq(work, get_cwq(lcpu, wq), 0);
+
 		timer->expires = jiffies + delay;
 		timer->data = (unsigned long)dwork;
 		timer->function = delayed_work_timer_fn;
@@ -1696,9 +1641,6 @@ static void cwq_dec_nr_in_flight(struct cpu_workqueue_struct *cwq, int color)
 		/* one down, submit a delayed one */
 		if (cwq->nr_active < cwq->max_active)
 			cwq_activate_first_delayed(cwq);
-	} else if (!cwq->nr_active && cwq->wq->flags & WQ_SINGLE_CPU) {
-		/* this was the last work, unbind from single cpu */
-		cwq_unbind_single_cpu(cwq);
 	}

 	/* is flush in progress and are we at the flushing tip? */
@@ -2751,7 +2693,6 @@ struct workqueue_struct *__alloc_workqueue_key(const char *name,
 	atomic_set(&wq->nr_cwqs_to_flush, 0);
 	INIT_LIST_HEAD(&wq->flusher_queue);
 	INIT_LIST_HEAD(&wq->flusher_overflow);
-	wq->single_cpu = WORK_CPU_NONE;

 	wq->name = name;
 	lockdep_init_map(&wq->lockdep_map, lock_name, key, 0);
@@ -3513,11 +3454,6 @@ void thaw_workqueues(void)
 			while (!list_empty(&cwq->delayed_works) &&
 			       cwq->nr_active < cwq->max_active)
 				cwq_activate_first_delayed(cwq);
-
-			/* perform delayed unbind from single cpu if empty */
-			if (wq->single_cpu == gcwq->cpu &&
-			    !cwq->nr_active && list_empty(&cwq->delayed_works))
-				cwq_unbind_single_cpu(cwq);
 		}

 		wake_up_worker(gcwq);
-- 
1.6.4.2


  parent reply	other threads:[~2010-07-02  9:24 UTC|newest]

Thread overview: 98+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2010-06-28 21:03 [PATCHSET] workqueue: concurrency managed workqueue, take#6 Tejun Heo
2010-06-28 21:03 ` [PATCH 01/35] kthread: implement kthread_worker Tejun Heo
2010-06-28 21:03 ` [PATCH 02/35] ivtv: use kthread_worker instead of workqueue Tejun Heo
2010-07-05 17:11   ` Andy Walls
2010-07-06  7:01     ` Tejun Heo
2010-07-09 13:15       ` Andy Walls
2010-06-28 21:03 ` [PATCH 03/35] kthread: implement kthread_data() Tejun Heo
2010-06-28 21:03 ` [PATCH 04/35] acpi: use queue_work_on() instead of binding workqueue worker to cpu0 Tejun Heo
2010-06-28 21:03 ` [PATCH 05/35] workqueue: kill RT workqueue Tejun Heo
2010-06-28 21:03 ` [PATCH 06/35] workqueue: misc/cosmetic updates Tejun Heo
2010-06-28 21:03 ` [PATCH 07/35] workqueue: merge feature parameters into flags Tejun Heo
2010-06-28 21:03 ` [PATCH 08/35] workqueue: define masks for work flags and conditionalize STATIC flags Tejun Heo
2010-06-28 21:03 ` [PATCH 09/35] workqueue: separate out process_one_work() Tejun Heo
2010-06-28 21:03 ` [PATCH 10/35] workqueue: temporarily remove workqueue tracing Tejun Heo
2010-06-28 21:03 ` [PATCH 11/35] workqueue: kill cpu_populated_map Tejun Heo
2010-06-28 21:04 ` [PATCH 12/35] workqueue: update cwq alignement Tejun Heo
2010-06-28 22:47   ` Frederic Weisbecker
2010-06-29  7:39     ` Tejun Heo
2010-06-29 12:36       ` Frederic Weisbecker
2010-06-29 15:42         ` Tejun Heo
2010-06-29 15:47           ` Frederic Weisbecker
2010-06-29 15:51             ` Tejun Heo
2010-06-29 16:01               ` Frederic Weisbecker
2010-06-29 16:09                 ` Tejun Heo
2010-06-29 16:17                   ` Frederic Weisbecker
2010-07-06 14:22                   ` Christoph Lameter
2010-07-06 14:26                     ` Tejun Heo
2010-06-29  8:12     ` [PATCH UPDATED " Tejun Heo
2010-06-29 13:39       ` Frederic Weisbecker
2010-06-28 21:04 ` [PATCH 13/35] workqueue: reimplement workqueue flushing using color coded works Tejun Heo
2010-06-28 21:04 ` [PATCH 14/35] workqueue: introduce worker Tejun Heo
2010-06-28 21:04 ` [PATCH 15/35] workqueue: reimplement work flushing using linked works Tejun Heo
2010-06-28 21:04 ` [PATCH 16/35] workqueue: implement per-cwq active work limit Tejun Heo
2010-06-28 21:04 ` [PATCH 17/35] workqueue: reimplement workqueue freeze using max_active Tejun Heo
2010-06-28 21:04 ` [PATCH 18/35] workqueue: introduce global cwq and unify cwq locks Tejun Heo
2010-06-28 21:04 ` [PATCH 19/35] workqueue: implement worker states Tejun Heo
2010-06-28 21:04 ` [PATCH 20/35] workqueue: reimplement CPU hotplugging support using trustee Tejun Heo
2010-06-28 21:04 ` [PATCH 21/35] workqueue: make single thread workqueue shared worker pool friendly Tejun Heo
2010-06-28 21:04 ` [PATCH 22/35] workqueue: add find_worker_executing_work() and track current_cwq Tejun Heo
2010-06-28 21:04 ` [PATCH 23/35] workqueue: carry cpu number in work data once execution starts Tejun Heo
2010-06-28 21:04 ` [PATCH 24/35] workqueue: implement WQ_NON_REENTRANT Tejun Heo
2010-06-28 21:04 ` [PATCH 25/35] workqueue: use shared worklist and pool all workers per cpu Tejun Heo
2010-06-28 21:04 ` [PATCH 26/35] workqueue: implement worker_{set|clr}_flags() Tejun Heo
2010-06-28 21:04 ` [PATCH 27/35] workqueue: implement concurrency managed dynamic worker pool Tejun Heo
2010-07-09  9:11   ` Yong Zhang
2010-07-12  8:53     ` [PATCH] workqueue: fix locking in retry path of maybe_create_worker() Tejun Heo
2010-07-12 13:23       ` Yong Zhang
2010-07-14  9:37         ` Tejun Heo
2010-06-28 21:04 ` [PATCH 28/35] workqueue: increase max_active of keventd and kill current_is_keventd() Tejun Heo
2010-06-28 21:04 ` [PATCH 29/35] workqueue: s/__create_workqueue()/alloc_workqueue()/, and add system workqueues Tejun Heo
2010-06-28 21:04 ` [PATCH 30/35] workqueue: implement several utility APIs Tejun Heo
2010-06-28 21:04 ` [PATCH 31/35] workqueue: implement high priority workqueue Tejun Heo
2010-06-28 21:04 ` [PATCH 32/35] workqueue: implement cpu intensive workqueue Tejun Heo
2010-06-28 21:04 ` [PATCH 33/35] libata: take advantage of cmwq and remove concurrency limitations Tejun Heo
2010-06-28 22:32   ` Jeff Garzik
2010-06-29  7:00     ` Tejun Heo
2010-06-28 21:04 ` [PATCH 34/35] async: use workqueue for worker pool Tejun Heo
2010-06-28 22:55   ` Frederic Weisbecker
2010-06-29  7:25     ` Tejun Heo
2010-06-29 12:18       ` Frederic Weisbecker
2010-06-29 15:46         ` Tejun Heo
2010-06-29 15:52           ` Frederic Weisbecker
2010-06-29 15:55             ` Tejun Heo
2010-06-29 16:40               ` Arjan van de Ven
2010-06-29 16:59                 ` Tejun Heo
2010-06-29 17:12                   ` Tejun Heo
2010-06-29 18:08                     ` Arjan van de Ven
2010-06-29 18:07                   ` Arjan van de Ven
2010-06-29 18:15                     ` Tejun Heo
2010-06-29 18:22                       ` Arjan van de Ven
2010-06-29 18:34                         ` Tejun Heo
2010-06-29 18:41                           ` Arjan van de Ven
2010-06-29 18:59                             ` Tejun Heo
2010-06-29 21:37                 ` David Howells
2010-07-02  9:17                   ` [PATCHSET] workqueue: implement and use WQ_UNBOUND Tejun Heo
2010-07-02  9:19                     ` [PATCH 1/4] workqueue: prepare for WQ_UNBOUND implementation Tejun Heo
2010-07-02  9:24                     ` Tejun Heo [this message]
2010-07-02  9:25                     ` [PATCH 4/4] async: use workqueue for worker pool Tejun Heo
2010-07-02 15:09                       ` Stefan Richter
2010-07-02 16:26                         ` Tejun Heo
2010-07-02 16:25                       ` [PATCH UPDATED " Tejun Heo
2010-07-02  9:28                     ` [PATCH 2/4] workqueue: implement unbound workqueue Tejun Heo
2010-07-02  9:32                     ` [PATCHSET] workqueue: implement and use WQ_UNBOUND Tejun Heo
2010-07-07  5:41                     ` Tejun Heo
2010-07-14  9:39                       ` Tejun Heo
2010-07-02  9:20                   ` [PATCH 2/4] workqueue: implement unbound workqueue Tejun Heo
2010-07-20 22:01                   ` [PATCHSET] workqueue: implement and use WQ_UNBOUND David Howells
2010-06-28 21:04 ` [PATCH 35/35] pcrypt: use HIGHPRI and CPU_INTENSIVE workqueues for padata Tejun Heo
2010-06-28 23:18 ` [PATCHSET] workqueue: concurrency managed workqueue, take#6 Frederic Weisbecker
2010-06-29  7:05   ` Tejun Heo
2010-07-02  8:32 ` [PATCHSET] workqueue: fixes on top of cmwq take#6 Tejun Heo
2010-07-02  8:33   ` [PATCH 1/4] workqueue: use worker_set/clr_flags() only from worker itself Tejun Heo
2010-07-02  8:34   ` [PATCH 2/4] workqueue: fix race condition in flush_workqueue() Tejun Heo
2010-07-02  8:35   ` [PATCH 3/4] workqueue: fix incorrect cpu number BUG_ON() in get_work_gcwq() Tejun Heo
2010-07-02  8:35   ` [PATCH 4/4] workqueue: fix worker management invocation without pending works Tejun Heo
2010-07-19 14:51 ` [PATCHSET] workqueue: concurrency managed workqueue, take#6 Tejun Heo
2010-07-21 13:23 ` David Howells
2010-07-21 14:52   ` Tejun Heo

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=4C2DB037.3010106@gmail.com \
    --to=htejun@gmail.com \
    --cc=akpm@linux-foundation.org \
    --cc=andi@firstfloor.org \
    --cc=arjan@infradead.org \
    --cc=arjan@linux.intel.com \
    --cc=axboe@kernel.dk \
    --cc=cl@linux-foundation.org \
    --cc=dhowells@redhat.com \
    --cc=dwalker@codeaurora.org \
    --cc=florian@mickler.org \
    --cc=fweisbec@gmail.com \
    --cc=jeff@garzik.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=mingo@elte.hu \
    --cc=mst@redhat.com \
    --cc=oleg@redhat.com \
    --cc=randy.dunlap@oracle.com \
    --cc=rusty@rustcorp.com.au \
    --cc=stefanr@s5r6.in-berlin.de \
    --cc=torvalds@linux-foundation.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).