linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Tejun Heo <tj@kernel.org>
To: mingo@elte.hu, awalls@radix.net, linux-kernel@vger.kernel.org,
	jeff@garzik.org, akpm@linux-foundation.org,
	rusty@rustcorp.com.au, cl@linux-foundation.org,
	dhowells@redhat.com, arjan@linux.intel.com,
	johannes@sipsolutions.net, oleg@redhat.com, axboe@kernel.dk
Cc: Tejun Heo <tj@kernel.org>
Subject: [PATCH 15/30] workqueue: reimplement workqueue freeze using max_active
Date: Mon, 14 Jun 2010 23:37:32 +0200	[thread overview]
Message-ID: <1276551467-21246-16-git-send-email-tj@kernel.org> (raw)
In-Reply-To: <1276551467-21246-1-git-send-email-tj@kernel.org>

Currently, workqueue freezing is implemented by marking the worker
freezeable and calling try_to_freeze() from dispatch loop.
Reimplement it using cwq->limit so that the workqueue is frozen
instead of the worker.

* workqueue_struct->saved_max_active is added which stores the
  specified max_active on initialization.

* On freeze, all cwq->max_active's are quenched to zero.  Freezing is
  complete when nr_active on all cwqs reach zero.

* On thaw, all cwq->max_active's are restored to wq->saved_max_active
  and the worklist is repopulated.

This new implementation allows having single shared pool of workers
per cpu.

Signed-off-by: Tejun Heo <tj@kernel.org>
---
 include/linux/workqueue.h |    7 ++
 kernel/power/process.c    |   21 +++++-
 kernel/workqueue.c        |  163 ++++++++++++++++++++++++++++++++++++++++++---
 3 files changed, 179 insertions(+), 12 deletions(-)

diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index eb753b7..ab0b7fb 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -340,4 +340,11 @@ static inline long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg)
 #else
 long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg);
 #endif /* CONFIG_SMP */
+
+#ifdef CONFIG_FREEZER
+extern void freeze_workqueues_begin(void);
+extern bool freeze_workqueues_busy(void);
+extern void thaw_workqueues(void);
+#endif /* CONFIG_FREEZER */
+
 #endif
diff --git a/kernel/power/process.c b/kernel/power/process.c
index 71ae290..028a995 100644
--- a/kernel/power/process.c
+++ b/kernel/power/process.c
@@ -15,6 +15,7 @@
 #include <linux/syscalls.h>
 #include <linux/freezer.h>
 #include <linux/delay.h>
+#include <linux/workqueue.h>
 
 /* 
  * Timeout for stopping processes
@@ -35,6 +36,7 @@ static int try_to_freeze_tasks(bool sig_only)
 	struct task_struct *g, *p;
 	unsigned long end_time;
 	unsigned int todo;
+	bool wq_busy = false;
 	struct timeval start, end;
 	u64 elapsed_csecs64;
 	unsigned int elapsed_csecs;
@@ -42,6 +44,10 @@ static int try_to_freeze_tasks(bool sig_only)
 	do_gettimeofday(&start);
 
 	end_time = jiffies + TIMEOUT;
+
+	if (!sig_only)
+		freeze_workqueues_begin();
+
 	while (true) {
 		todo = 0;
 		read_lock(&tasklist_lock);
@@ -63,6 +69,12 @@ static int try_to_freeze_tasks(bool sig_only)
 				todo++;
 		} while_each_thread(g, p);
 		read_unlock(&tasklist_lock);
+
+		if (!sig_only) {
+			wq_busy = freeze_workqueues_busy();
+			todo += wq_busy;
+		}
+
 		if (!todo || time_after(jiffies, end_time))
 			break;
 
@@ -86,8 +98,12 @@ static int try_to_freeze_tasks(bool sig_only)
 		 */
 		printk("\n");
 		printk(KERN_ERR "Freezing of tasks failed after %d.%02d seconds "
-				"(%d tasks refusing to freeze):\n",
-				elapsed_csecs / 100, elapsed_csecs % 100, todo);
+		       "(%d tasks refusing to freeze, wq_busy=%d):\n",
+		       elapsed_csecs / 100, elapsed_csecs % 100,
+		       todo - wq_busy, wq_busy);
+
+		thaw_workqueues();
+
 		read_lock(&tasklist_lock);
 		do_each_thread(g, p) {
 			task_lock(p);
@@ -157,6 +173,7 @@ void thaw_processes(void)
 	oom_killer_enable();
 
 	printk("Restarting tasks ... ");
+	thaw_workqueues();
 	thaw_tasks(true);
 	thaw_tasks(false);
 	schedule();
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 101b92e..44c0fb2 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -78,7 +78,7 @@ struct cpu_workqueue_struct {
 	int			nr_in_flight[WORK_NR_COLORS];
 						/* L: nr of in_flight works */
 	int			nr_active;	/* L: nr of active works */
-	int			max_active;	/* I: max active works */
+	int			max_active;	/* L: max active works */
 	struct list_head	delayed_works;	/* L: delayed works */
 };
 
@@ -108,6 +108,7 @@ struct workqueue_struct {
 	struct list_head	flusher_queue;	/* F: flush waiters */
 	struct list_head	flusher_overflow; /* F: flush overflow list */
 
+	int			saved_max_active; /* I: saved cwq max_active */
 	const char		*name;		/* I: workqueue name */
 #ifdef CONFIG_LOCKDEP
 	struct lockdep_map	lockdep_map;
@@ -228,6 +229,7 @@ static inline void debug_work_deactivate(struct work_struct *work) { }
 static DEFINE_SPINLOCK(workqueue_lock);
 static LIST_HEAD(workqueues);
 static DEFINE_PER_CPU(struct ida, worker_ida);
+static bool workqueue_freezing;		/* W: have wqs started freezing? */
 
 static int worker_thread(void *__worker);
 
@@ -745,19 +747,13 @@ static int worker_thread(void *__worker)
 	struct cpu_workqueue_struct *cwq = worker->cwq;
 	DEFINE_WAIT(wait);
 
-	if (cwq->wq->flags & WQ_FREEZEABLE)
-		set_freezable();
-
 	for (;;) {
 		prepare_to_wait(&cwq->more_work, &wait, TASK_INTERRUPTIBLE);
-		if (!freezing(current) &&
-		    !kthread_should_stop() &&
+		if (!kthread_should_stop() &&
 		    list_empty(&cwq->worklist))
 			schedule();
 		finish_wait(&cwq->more_work, &wait);
 
-		try_to_freeze();
-
 		if (kthread_should_stop())
 			break;
 
@@ -1547,6 +1543,7 @@ struct workqueue_struct *__create_workqueue_key(const char *name,
 		goto err;
 
 	wq->flags = flags;
+	wq->saved_max_active = max_active;
 	mutex_init(&wq->flush_mutex);
 	atomic_set(&wq->nr_cwqs_to_flush, 0);
 	INIT_LIST_HEAD(&wq->flusher_queue);
@@ -1585,8 +1582,19 @@ struct workqueue_struct *__create_workqueue_key(const char *name,
 			failed = true;
 	}
 
+	/*
+	 * workqueue_lock protects global freeze state and workqueues
+	 * list.  Grab it, set max_active accordingly and add the new
+	 * workqueue to workqueues list.
+	 */
 	spin_lock(&workqueue_lock);
+
+	if (workqueue_freezing && wq->flags & WQ_FREEZEABLE)
+		for_each_possible_cpu(cpu)
+			get_cwq(cpu, wq)->max_active = 0;
+
 	list_add(&wq->list, &workqueues);
+
 	spin_unlock(&workqueue_lock);
 
 	cpu_maps_update_done();
@@ -1615,14 +1623,18 @@ void destroy_workqueue(struct workqueue_struct *wq)
 {
 	int cpu;
 
+	flush_workqueue(wq);
+
+	/*
+	 * wq list is used to freeze wq, remove from list after
+	 * flushing is complete in case freeze races us.
+	 */
 	cpu_maps_update_begin();
 	spin_lock(&workqueue_lock);
 	list_del(&wq->list);
 	spin_unlock(&workqueue_lock);
 	cpu_maps_update_done();
 
-	flush_workqueue(wq);
-
 	for_each_possible_cpu(cpu) {
 		struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
 		int i;
@@ -1716,6 +1728,137 @@ long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg)
 EXPORT_SYMBOL_GPL(work_on_cpu);
 #endif /* CONFIG_SMP */
 
+#ifdef CONFIG_FREEZER
+
+/**
+ * freeze_workqueues_begin - begin freezing workqueues
+ *
+ * Start freezing workqueues.  After this function returns, all
+ * freezeable workqueues will queue new works to their frozen_works
+ * list instead of the cwq ones.
+ *
+ * CONTEXT:
+ * Grabs and releases workqueue_lock and cwq->lock's.
+ */
+void freeze_workqueues_begin(void)
+{
+	struct workqueue_struct *wq;
+	unsigned int cpu;
+
+	spin_lock(&workqueue_lock);
+
+	BUG_ON(workqueue_freezing);
+	workqueue_freezing = true;
+
+	for_each_possible_cpu(cpu) {
+		list_for_each_entry(wq, &workqueues, list) {
+			struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
+
+			spin_lock_irq(&cwq->lock);
+
+			if (wq->flags & WQ_FREEZEABLE)
+				cwq->max_active = 0;
+
+			spin_unlock_irq(&cwq->lock);
+		}
+	}
+
+	spin_unlock(&workqueue_lock);
+}
+
+/**
+ * freeze_workqueues_busy - are freezeable workqueues still busy?
+ *
+ * Check whether freezing is complete.  This function must be called
+ * between freeze_workqueues_begin() and thaw_workqueues().
+ *
+ * CONTEXT:
+ * Grabs and releases workqueue_lock.
+ *
+ * RETURNS:
+ * %true if some freezeable workqueues are still busy.  %false if
+ * freezing is complete.
+ */
+bool freeze_workqueues_busy(void)
+{
+	struct workqueue_struct *wq;
+	unsigned int cpu;
+	bool busy = false;
+
+	spin_lock(&workqueue_lock);
+
+	BUG_ON(!workqueue_freezing);
+
+	for_each_possible_cpu(cpu) {
+		/*
+		 * nr_active is monotonically decreasing.  It's safe
+		 * to peek without lock.
+		 */
+		list_for_each_entry(wq, &workqueues, list) {
+			struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
+
+			if (!(wq->flags & WQ_FREEZEABLE))
+				continue;
+
+			BUG_ON(cwq->nr_active < 0);
+			if (cwq->nr_active) {
+				busy = true;
+				goto out_unlock;
+			}
+		}
+	}
+out_unlock:
+	spin_unlock(&workqueue_lock);
+	return busy;
+}
+
+/**
+ * thaw_workqueues - thaw workqueues
+ *
+ * Thaw workqueues.  Normal queueing is restored and all collected
+ * frozen works are transferred to their respective cwq worklists.
+ *
+ * CONTEXT:
+ * Grabs and releases workqueue_lock and cwq->lock's.
+ */
+void thaw_workqueues(void)
+{
+	struct workqueue_struct *wq;
+	unsigned int cpu;
+
+	spin_lock(&workqueue_lock);
+
+	if (!workqueue_freezing)
+		goto out_unlock;
+
+	for_each_possible_cpu(cpu) {
+		list_for_each_entry(wq, &workqueues, list) {
+			struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
+
+			if (!(wq->flags & WQ_FREEZEABLE))
+				continue;
+
+			spin_lock_irq(&cwq->lock);
+
+			/* restore max_active and repopulate worklist */
+			cwq->max_active = wq->saved_max_active;
+
+			while (!list_empty(&cwq->delayed_works) &&
+			       cwq->nr_active < cwq->max_active)
+				cwq_activate_first_delayed(cwq);
+
+			wake_up(&cwq->more_work);
+
+			spin_unlock_irq(&cwq->lock);
+		}
+	}
+
+	workqueue_freezing = false;
+out_unlock:
+	spin_unlock(&workqueue_lock);
+}
+#endif /* CONFIG_FREEZER */
+
 void __init init_workqueues(void)
 {
 	unsigned int cpu;
-- 
1.6.4.2


  parent reply	other threads:[~2010-06-14 21:39 UTC|newest]

Thread overview: 129+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2010-06-14 21:37 [PATCHSET] workqueue: concurrency managed workqueue, take#5 Tejun Heo
2010-06-14 21:37 ` [PATCH 01/30] kthread: implement kthread_data() Tejun Heo
2010-06-14 21:37 ` [PATCH 02/30] acpi: use queue_work_on() instead of binding workqueue worker to cpu0 Tejun Heo
2010-06-14 21:37 ` [PATCH 03/30] workqueue: kill RT workqueue Tejun Heo
2010-06-14 21:37 ` [PATCH 04/30] workqueue: misc/cosmetic updates Tejun Heo
2010-06-14 21:37 ` [PATCH 05/30] workqueue: merge feature parameters into flags Tejun Heo
2010-06-14 21:37 ` [PATCH 06/30] workqueue: define masks for work flags and conditionalize STATIC flags Tejun Heo
2010-06-14 21:37 ` [PATCH 07/30] workqueue: separate out process_one_work() Tejun Heo
2010-06-14 21:37 ` [PATCH 08/30] workqueue: temporarily disable workqueue tracing Tejun Heo
2010-06-15 13:29   ` Frederic Weisbecker
2010-06-15 16:37     ` Tejun Heo
2010-06-14 21:37 ` [PATCH 09/30] workqueue: kill cpu_populated_map Tejun Heo
2010-06-14 21:37 ` [PATCH 10/30] workqueue: update cwq alignement Tejun Heo
2010-06-14 21:37 ` [PATCH 11/30] workqueue: reimplement workqueue flushing using color coded works Tejun Heo
2010-06-14 21:37 ` [PATCH 12/30] workqueue: introduce worker Tejun Heo
2010-06-14 21:37 ` [PATCH 13/30] workqueue: reimplement work flushing using linked works Tejun Heo
2010-06-14 21:37 ` [PATCH 14/30] workqueue: implement per-cwq active work limit Tejun Heo
2010-06-14 21:37 ` Tejun Heo [this message]
2010-06-14 21:37 ` [PATCH 16/30] workqueue: introduce global cwq and unify cwq locks Tejun Heo
2010-06-14 21:37 ` [PATCH 17/30] workqueue: implement worker states Tejun Heo
2010-06-14 21:37 ` [PATCH 18/30] workqueue: reimplement CPU hotplugging support using trustee Tejun Heo
2010-06-14 21:37 ` [PATCH 19/30] workqueue: make single thread workqueue shared worker pool friendly Tejun Heo
2010-06-14 21:37 ` [PATCH 20/30] workqueue: add find_worker_executing_work() and track current_cwq Tejun Heo
2010-06-14 21:37 ` [PATCH 21/30] workqueue: carry cpu number in work data once execution starts Tejun Heo
2010-06-14 21:37 ` [PATCH 22/30] workqueue: implement WQ_NON_REENTRANT Tejun Heo
2010-06-14 21:37 ` [PATCH 23/30] workqueue: use shared worklist and pool all workers per cpu Tejun Heo
2010-06-14 21:37 ` [PATCH 24/30] workqueue: implement concurrency managed dynamic worker pool Tejun Heo
2010-06-14 21:37 ` [PATCH 25/30] workqueue: increase max_active of keventd and kill current_is_keventd() Tejun Heo
2010-06-14 21:37 ` [PATCH 26/30] workqueue: add system_wq, system_long_wq and system_nrt_wq Tejun Heo
2010-06-14 21:37 ` [PATCH 27/30] workqueue: implement DEBUGFS/workqueue Tejun Heo
2010-06-15 13:54   ` Frederic Weisbecker
2010-06-15 16:42     ` Tejun Heo
2010-06-14 21:37 ` [PATCH 28/30] workqueue: implement several utility APIs Tejun Heo
2010-06-14 21:37 ` [PATCH 29/30] libata: take advantage of cmwq and remove concurrency limitations Tejun Heo
2010-06-14 21:37 ` [PATCH 30/30] async: use workqueue for worker pool Tejun Heo
2010-06-14 21:58 ` [PATCHSET] workqueue: concurrency managed workqueue, take#5 Andrew Morton
2010-06-14 22:17   ` Tejun Heo
2010-06-14 22:31     ` Daniel Walker
2010-06-14 22:33       ` Tejun Heo
2010-06-14 22:35         ` Daniel Walker
2010-06-14 22:44           ` Tejun Heo
2010-06-14 22:49             ` Daniel Walker
2010-06-14 22:52               ` Tejun Heo
2010-06-14 22:35     ` Andrew Morton
2010-06-14 22:43       ` Tejun Heo
2010-06-14 23:06         ` Andrew Morton
2010-06-15 12:53         ` tytso
2010-06-15 16:15           ` [PATCH] SubmittingPatches: add more about patch descriptions Randy Dunlap
2010-06-15 16:33             ` Christoph Lameter
2010-06-15 18:15   ` [PATCHSET] workqueue: concurrency managed workqueue, take#5 Stefan Richter
2010-06-15 19:39     ` Tejun Heo
2010-06-15  1:20 ` Jeff Garzik
2010-06-15 18:25 ` Overview of concurrency managed workqueue Tejun Heo
2010-06-15 18:40   ` Christoph Lameter
2010-06-15 18:44     ` Tejun Heo
2010-06-15 19:43   ` Daniel Walker
2010-06-16 12:10     ` Tejun Heo
2010-06-16 13:27       ` Daniel Walker
2010-06-16 13:30         ` Tejun Heo
2010-06-16 13:41           ` Daniel Walker
2010-06-16 13:45             ` Tejun Heo
2010-06-16 14:05               ` Daniel Walker
2010-06-16 14:15                 ` Tejun Heo
2010-06-16 14:34                   ` Daniel Walker
2010-06-16 14:50                     ` Tejun Heo
2010-06-16 15:11                       ` Daniel Walker
2010-06-16 15:50                         ` Tejun Heo
2010-06-16 16:30                           ` Daniel Walker
2010-06-16 16:55                             ` Tejun Heo
2010-06-16 18:22                               ` Daniel Walker
2010-06-16 18:46                                 ` Tejun Heo
2010-06-16 19:20                                   ` Tejun Heo
2010-06-16 19:46                                     ` Daniel Walker
2010-06-16 19:58                                       ` Tejun Heo
2010-06-17  5:29                                     ` Florian Mickler
2010-06-17  6:21                                       ` Florian Mickler
2010-06-17  8:28                                       ` Tejun Heo
2010-06-17 18:03                                       ` Daniel Walker
2010-06-18  6:36                                         ` Florian Mickler
2010-06-18 16:38                                           ` Daniel Walker
2010-06-16 19:36                                   ` Daniel Walker
2010-06-16 19:52                                     ` Tejun Heo
2010-06-16 20:19                                       ` Daniel Walker
2010-06-16 20:24                                         ` Tejun Heo
2010-06-16 20:40                                           ` Daniel Walker
2010-06-16 21:41                                             ` Tejun Heo
2010-06-17 23:15                               ` Andrew Morton
2010-06-18  8:03                                 ` Tejun Heo
2010-06-18  8:22                                   ` Tejun Heo
2010-06-18 17:29                                   ` Daniel Walker
2010-06-16 18:31                     ` Stefan Richter
2010-06-16 18:41                       ` Daniel Walker
2010-06-17 12:01                 ` Andy Walls
2010-06-17 16:56                   ` Daniel Walker
2010-06-17 23:16                   ` Andrew Morton
2010-06-18  7:16                     ` Tejun Heo
2010-06-18  7:31                       ` Andrew Morton
2010-06-18  8:09                         ` Tejun Heo
2010-06-18 17:02                           ` Andrew Morton
2010-06-18 17:28                             ` Tejun Heo
2010-06-19 15:53                               ` [PATCH] kthread: implement kthread_worker Tejun Heo
2010-06-21 20:33                                 ` Randy Dunlap
2010-06-22  7:31                                   ` Tejun Heo
2010-06-19  8:38                   ` Overview of concurrency managed workqueue Andi Kleen
2010-06-19  8:40                     ` Tejun Heo
2010-06-19  8:55                       ` Andi Kleen
2010-06-19  9:01                         ` Tejun Heo
2010-06-19  9:08                           ` Andi Kleen
2010-06-19  9:12                             ` Tejun Heo
2010-06-19  9:15                               ` Andi Kleen
2010-06-19  9:17                                 ` Tejun Heo
2010-06-19  9:27                                   ` Andi Kleen
2010-06-19  9:42                                     ` Tejun Heo
2010-06-19 12:20                                       ` Andi Kleen
2010-06-19 12:48                                         ` Tejun Heo
2010-06-17 22:28               ` Daniel Walker
2010-06-16  6:55   ` Florian Mickler
2010-06-16 12:22     ` Tejun Heo
2010-06-16 13:37   ` Johannes Berg
2010-06-16 13:39     ` Tejun Heo
2010-06-16 13:42       ` Johannes Berg
2010-06-17 23:14   ` Andrew Morton
2010-06-17 23:25     ` Joel Becker
2010-06-17 23:56       ` Andrew Morton
2010-06-18  7:15         ` Tejun Heo
2010-06-18  7:31     ` Tejun Heo
2010-06-15 18:29 ` [PATCHSET] workqueue: concurrency managed workqueue, take#5 Stefan Richter
2010-06-15 18:40   ` Tejun Heo
2010-06-15 20:29   ` Stefan Richter

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1276551467-21246-16-git-send-email-tj@kernel.org \
    --to=tj@kernel.org \
    --cc=akpm@linux-foundation.org \
    --cc=arjan@linux.intel.com \
    --cc=awalls@radix.net \
    --cc=axboe@kernel.dk \
    --cc=cl@linux-foundation.org \
    --cc=dhowells@redhat.com \
    --cc=jeff@garzik.org \
    --cc=johannes@sipsolutions.net \
    --cc=linux-kernel@vger.kernel.org \
    --cc=mingo@elte.hu \
    --cc=oleg@redhat.com \
    --cc=rusty@rustcorp.com.au \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).