From: Tejun Heo <tj@kernel.org>
To: mingo@elte.hu, awalls@radix.net, linux-kernel@vger.kernel.org,
jeff@garzik.org, akpm@linux-foundation.org,
rusty@rustcorp.com.au, cl@linux-foundation.org,
dhowells@redhat.com, arjan@linux.intel.com,
johannes@sipsolutions.net, oleg@redhat.com, axboe@kernel.dk
Cc: Tejun Heo <tj@kernel.org>
Subject: [PATCH 17/30] workqueue: implement worker states
Date: Mon, 14 Jun 2010 23:37:34 +0200 [thread overview]
Message-ID: <1276551467-21246-18-git-send-email-tj@kernel.org> (raw)
In-Reply-To: <1276551467-21246-1-git-send-email-tj@kernel.org>
Implement worker states. After created, a worker is STARTED. While a
worker isn't processing a work, it's IDLE and chained on
gcwq->idle_list. While processing a work, a worker is BUSY and
chained on gcwq->busy_hash. Also, gcwq now counts the number of all
workers and idle ones.
worker_thread() is restructured to reflect state transitions.
cwq->more_work is removed and waking up a worker makes it check for
events. A worker is killed by setting DIE flag while it's IDLE and
waking it up.
This gives gcwq better visibility of what's going on and allows it to
find out whether a work is executing quickly which is necessary to
have multiple workers processing the same cwq.
Signed-off-by: Tejun Heo <tj@kernel.org>
---
kernel/workqueue.c | 214 ++++++++++++++++++++++++++++++++++++++++++----------
1 files changed, 173 insertions(+), 41 deletions(-)
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index d0ca750..62d7cfd 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -35,6 +35,17 @@
#include <linux/lockdep.h>
#include <linux/idr.h>
+enum {
+ /* worker flags */
+ WORKER_STARTED = 1 << 0, /* started */
+ WORKER_DIE = 1 << 1, /* die die die */
+ WORKER_IDLE = 1 << 2, /* is idle */
+
+ BUSY_WORKER_HASH_ORDER = 6, /* 64 pointers */
+ BUSY_WORKER_HASH_SIZE = 1 << BUSY_WORKER_HASH_ORDER,
+ BUSY_WORKER_HASH_MASK = BUSY_WORKER_HASH_SIZE - 1,
+};
+
/*
* Structure fields follow one of the following exclusion rules.
*
@@ -51,11 +62,18 @@ struct global_cwq;
struct cpu_workqueue_struct;
struct worker {
+ /* on idle list while idle, on busy hash table while busy */
+ union {
+ struct list_head entry; /* L: while idle */
+ struct hlist_node hentry; /* L: while busy */
+ };
+
struct work_struct *current_work; /* L: work being processed */
struct list_head scheduled; /* L: scheduled works */
struct task_struct *task; /* I: worker task */
struct global_cwq *gcwq; /* I: the associated gcwq */
struct cpu_workqueue_struct *cwq; /* I: the associated cwq */
+ unsigned int flags; /* L: flags */
int id; /* I: worker id */
};
@@ -65,6 +83,15 @@ struct worker {
struct global_cwq {
spinlock_t lock; /* the gcwq lock */
unsigned int cpu; /* I: the associated cpu */
+
+ int nr_workers; /* L: total number of workers */
+ int nr_idle; /* L: currently idle ones */
+
+ /* workers are chained either in the idle_list or busy_hash */
+ struct list_head idle_list; /* L: list of idle workers */
+ struct hlist_head busy_hash[BUSY_WORKER_HASH_SIZE];
+ /* L: hash of busy workers */
+
struct ida worker_ida; /* L: for worker IDs */
} ____cacheline_aligned_in_smp;
@@ -77,7 +104,6 @@ struct global_cwq {
struct cpu_workqueue_struct {
struct global_cwq *gcwq; /* I: the associated gcwq */
struct list_head worklist;
- wait_queue_head_t more_work;
struct worker *worker;
struct workqueue_struct *wq; /* I: the owning workqueue */
int work_color; /* L: current color */
@@ -307,6 +333,33 @@ static inline struct cpu_workqueue_struct *get_wq_data(struct work_struct *work)
}
/**
+ * busy_worker_head - return the busy hash head for a work
+ * @gcwq: gcwq of interest
+ * @work: work to be hashed
+ *
+ * Return hash head of @gcwq for @work.
+ *
+ * CONTEXT:
+ * spin_lock_irq(gcwq->lock).
+ *
+ * RETURNS:
+ * Pointer to the hash head.
+ */
+static struct hlist_head *busy_worker_head(struct global_cwq *gcwq,
+ struct work_struct *work)
+{
+ const int base_shift = ilog2(sizeof(struct work_struct));
+ unsigned long v = (unsigned long)work;
+
+ /* simple shift and fold hash, do we need something better? */
+ v >>= base_shift;
+ v += v >> BUSY_WORKER_HASH_ORDER;
+ v &= BUSY_WORKER_HASH_MASK;
+
+ return &gcwq->busy_hash[v];
+}
+
+/**
* insert_work - insert a work into cwq
* @cwq: cwq @work belongs to
* @work: work to insert
@@ -332,7 +385,7 @@ static void insert_work(struct cpu_workqueue_struct *cwq,
smp_wmb();
list_add_tail(&work->entry, head);
- wake_up(&cwq->more_work);
+ wake_up_process(cwq->worker->task);
}
static void __queue_work(unsigned int cpu, struct workqueue_struct *wq,
@@ -470,13 +523,59 @@ int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
}
EXPORT_SYMBOL_GPL(queue_delayed_work_on);
+/**
+ * worker_enter_idle - enter idle state
+ * @worker: worker which is entering idle state
+ *
+ * @worker is entering idle state. Update stats and idle timer if
+ * necessary.
+ *
+ * LOCKING:
+ * spin_lock_irq(gcwq->lock).
+ */
+static void worker_enter_idle(struct worker *worker)
+{
+ struct global_cwq *gcwq = worker->gcwq;
+
+ BUG_ON(worker->flags & WORKER_IDLE);
+ BUG_ON(!list_empty(&worker->entry) &&
+ (worker->hentry.next || worker->hentry.pprev));
+
+ worker->flags |= WORKER_IDLE;
+ gcwq->nr_idle++;
+
+ /* idle_list is LIFO */
+ list_add(&worker->entry, &gcwq->idle_list);
+}
+
+/**
+ * worker_leave_idle - leave idle state
+ * @worker: worker which is leaving idle state
+ *
+ * @worker is leaving idle state. Update stats.
+ *
+ * LOCKING:
+ * spin_lock_irq(gcwq->lock).
+ */
+static void worker_leave_idle(struct worker *worker)
+{
+ struct global_cwq *gcwq = worker->gcwq;
+
+ BUG_ON(!(worker->flags & WORKER_IDLE));
+ worker->flags &= ~WORKER_IDLE;
+ gcwq->nr_idle--;
+ list_del_init(&worker->entry);
+}
+
static struct worker *alloc_worker(void)
{
struct worker *worker;
worker = kzalloc(sizeof(*worker), GFP_KERNEL);
- if (worker)
+ if (worker) {
+ INIT_LIST_HEAD(&worker->entry);
INIT_LIST_HEAD(&worker->scheduled);
+ }
return worker;
}
@@ -541,13 +640,16 @@ fail:
* start_worker - start a newly created worker
* @worker: worker to start
*
- * Start @worker.
+ * Make the gcwq aware of @worker and start it.
*
* CONTEXT:
* spin_lock_irq(gcwq->lock).
*/
static void start_worker(struct worker *worker)
{
+ worker->flags |= WORKER_STARTED;
+ worker->gcwq->nr_workers++;
+ worker_enter_idle(worker);
wake_up_process(worker->task);
}
@@ -555,7 +657,10 @@ static void start_worker(struct worker *worker)
* destroy_worker - destroy a workqueue worker
* @worker: worker to be destroyed
*
- * Destroy @worker.
+ * Destroy @worker and adjust @gcwq stats accordingly.
+ *
+ * CONTEXT:
+ * spin_lock_irq(gcwq->lock) which is released and regrabbed.
*/
static void destroy_worker(struct worker *worker)
{
@@ -566,12 +671,21 @@ static void destroy_worker(struct worker *worker)
BUG_ON(worker->current_work);
BUG_ON(!list_empty(&worker->scheduled));
+ if (worker->flags & WORKER_STARTED)
+ gcwq->nr_workers--;
+ if (worker->flags & WORKER_IDLE)
+ gcwq->nr_idle--;
+
+ list_del_init(&worker->entry);
+ worker->flags |= WORKER_DIE;
+
+ spin_unlock_irq(&gcwq->lock);
+
kthread_stop(worker->task);
kfree(worker);
spin_lock_irq(&gcwq->lock);
ida_remove(&gcwq->worker_ida, id);
- spin_unlock_irq(&gcwq->lock);
}
/**
@@ -686,6 +800,7 @@ static void process_one_work(struct worker *worker, struct work_struct *work)
{
struct cpu_workqueue_struct *cwq = worker->cwq;
struct global_cwq *gcwq = cwq->gcwq;
+ struct hlist_head *bwh = busy_worker_head(gcwq, work);
work_func_t f = work->func;
int work_color;
#ifdef CONFIG_LOCKDEP
@@ -700,6 +815,7 @@ static void process_one_work(struct worker *worker, struct work_struct *work)
#endif
/* claim and process */
debug_work_deactivate(work);
+ hlist_add_head(&worker->hentry, bwh);
worker->current_work = work;
work_color = get_work_color(work);
list_del_init(&work->entry);
@@ -727,6 +843,7 @@ static void process_one_work(struct worker *worker, struct work_struct *work)
spin_lock_irq(&gcwq->lock);
/* we're done with it, release */
+ hlist_del_init(&worker->hentry);
worker->current_work = NULL;
cwq_dec_nr_in_flight(cwq, work_color);
}
@@ -763,47 +880,56 @@ static int worker_thread(void *__worker)
struct worker *worker = __worker;
struct global_cwq *gcwq = worker->gcwq;
struct cpu_workqueue_struct *cwq = worker->cwq;
- DEFINE_WAIT(wait);
- for (;;) {
- prepare_to_wait(&cwq->more_work, &wait, TASK_INTERRUPTIBLE);
- if (!kthread_should_stop() &&
- list_empty(&cwq->worklist))
- schedule();
- finish_wait(&cwq->more_work, &wait);
+woke_up:
+ if (unlikely(!cpumask_equal(&worker->task->cpus_allowed,
+ get_cpu_mask(gcwq->cpu))))
+ set_cpus_allowed_ptr(worker->task, get_cpu_mask(gcwq->cpu));
- if (kthread_should_stop())
- break;
+ spin_lock_irq(&gcwq->lock);
- if (unlikely(!cpumask_equal(&worker->task->cpus_allowed,
- get_cpu_mask(gcwq->cpu))))
- set_cpus_allowed_ptr(worker->task,
- get_cpu_mask(gcwq->cpu));
+ /* DIE can be set only while we're idle, checking here is enough */
+ if (worker->flags & WORKER_DIE) {
+ spin_unlock_irq(&gcwq->lock);
+ return 0;
+ }
- spin_lock_irq(&gcwq->lock);
+ worker_leave_idle(worker);
- while (!list_empty(&cwq->worklist)) {
- struct work_struct *work =
- list_first_entry(&cwq->worklist,
- struct work_struct, entry);
-
- if (likely(!(*work_data_bits(work) &
- WORK_STRUCT_LINKED))) {
- /* optimization path, not strictly necessary */
- process_one_work(worker, work);
- if (unlikely(!list_empty(&worker->scheduled)))
- process_scheduled_works(worker);
- } else {
- move_linked_works(work, &worker->scheduled,
- NULL);
+ /*
+ * ->scheduled list can only be filled while a worker is
+ * preparing to process a work or actually processing it.
+ * Make sure nobody diddled with it while I was sleeping.
+ */
+ BUG_ON(!list_empty(&worker->scheduled));
+
+ while (!list_empty(&cwq->worklist)) {
+ struct work_struct *work =
+ list_first_entry(&cwq->worklist,
+ struct work_struct, entry);
+
+ if (likely(!(*work_data_bits(work) & WORK_STRUCT_LINKED))) {
+ /* optimization path, not strictly necessary */
+ process_one_work(worker, work);
+ if (unlikely(!list_empty(&worker->scheduled)))
process_scheduled_works(worker);
- }
+ } else {
+ move_linked_works(work, &worker->scheduled, NULL);
+ process_scheduled_works(worker);
}
-
- spin_unlock_irq(&gcwq->lock);
}
- return 0;
+ /*
+ * gcwq->lock is held and there's no work to process, sleep.
+ * Workers are woken up only while holding gcwq->lock, so
+ * setting the current state before releasing gcwq->lock is
+ * enough to prevent losing any event.
+ */
+ worker_enter_idle(worker);
+ __set_current_state(TASK_INTERRUPTIBLE);
+ spin_unlock_irq(&gcwq->lock);
+ schedule();
+ goto woke_up;
}
struct wq_barrier {
@@ -1594,7 +1720,6 @@ struct workqueue_struct *__create_workqueue_key(const char *name,
cwq->max_active = max_active;
INIT_LIST_HEAD(&cwq->worklist);
INIT_LIST_HEAD(&cwq->delayed_works);
- init_waitqueue_head(&cwq->more_work);
if (failed)
continue;
@@ -1645,7 +1770,7 @@ EXPORT_SYMBOL_GPL(__create_workqueue_key);
*/
void destroy_workqueue(struct workqueue_struct *wq)
{
- int cpu;
+ unsigned int cpu;
flush_workqueue(wq);
@@ -1664,8 +1789,10 @@ void destroy_workqueue(struct workqueue_struct *wq)
int i;
if (cwq->worker) {
+ spin_lock_irq(&cwq->gcwq->lock);
destroy_worker(cwq->worker);
cwq->worker = NULL;
+ spin_unlock_irq(&cwq->gcwq->lock);
}
for (i = 0; i < WORK_NR_COLORS; i++)
@@ -1875,7 +2002,7 @@ void thaw_workqueues(void)
cwq->nr_active < cwq->max_active)
cwq_activate_first_delayed(cwq);
- wake_up(&cwq->more_work);
+ wake_up_process(cwq->worker->task);
}
spin_unlock_irq(&gcwq->lock);
@@ -1890,6 +2017,7 @@ out_unlock:
void __init init_workqueues(void)
{
unsigned int cpu;
+ int i;
/*
* cwqs are forced aligned according to WORK_STRUCT_FLAG_BITS.
@@ -1909,6 +2037,10 @@ void __init init_workqueues(void)
spin_lock_init(&gcwq->lock);
gcwq->cpu = cpu;
+ INIT_LIST_HEAD(&gcwq->idle_list);
+ for (i = 0; i < BUSY_WORKER_HASH_SIZE; i++)
+ INIT_HLIST_HEAD(&gcwq->busy_hash[i]);
+
ida_init(&gcwq->worker_ida);
}
--
1.6.4.2
next prev parent reply other threads:[~2010-06-14 21:45 UTC|newest]
Thread overview: 129+ messages / expand[flat|nested] mbox.gz Atom feed top
2010-06-14 21:37 [PATCHSET] workqueue: concurrency managed workqueue, take#5 Tejun Heo
2010-06-14 21:37 ` [PATCH 01/30] kthread: implement kthread_data() Tejun Heo
2010-06-14 21:37 ` [PATCH 02/30] acpi: use queue_work_on() instead of binding workqueue worker to cpu0 Tejun Heo
2010-06-14 21:37 ` [PATCH 03/30] workqueue: kill RT workqueue Tejun Heo
2010-06-14 21:37 ` [PATCH 04/30] workqueue: misc/cosmetic updates Tejun Heo
2010-06-14 21:37 ` [PATCH 05/30] workqueue: merge feature parameters into flags Tejun Heo
2010-06-14 21:37 ` [PATCH 06/30] workqueue: define masks for work flags and conditionalize STATIC flags Tejun Heo
2010-06-14 21:37 ` [PATCH 07/30] workqueue: separate out process_one_work() Tejun Heo
2010-06-14 21:37 ` [PATCH 08/30] workqueue: temporarily disable workqueue tracing Tejun Heo
2010-06-15 13:29 ` Frederic Weisbecker
2010-06-15 16:37 ` Tejun Heo
2010-06-14 21:37 ` [PATCH 09/30] workqueue: kill cpu_populated_map Tejun Heo
2010-06-14 21:37 ` [PATCH 10/30] workqueue: update cwq alignement Tejun Heo
2010-06-14 21:37 ` [PATCH 11/30] workqueue: reimplement workqueue flushing using color coded works Tejun Heo
2010-06-14 21:37 ` [PATCH 12/30] workqueue: introduce worker Tejun Heo
2010-06-14 21:37 ` [PATCH 13/30] workqueue: reimplement work flushing using linked works Tejun Heo
2010-06-14 21:37 ` [PATCH 14/30] workqueue: implement per-cwq active work limit Tejun Heo
2010-06-14 21:37 ` [PATCH 15/30] workqueue: reimplement workqueue freeze using max_active Tejun Heo
2010-06-14 21:37 ` [PATCH 16/30] workqueue: introduce global cwq and unify cwq locks Tejun Heo
2010-06-14 21:37 ` Tejun Heo [this message]
2010-06-14 21:37 ` [PATCH 18/30] workqueue: reimplement CPU hotplugging support using trustee Tejun Heo
2010-06-14 21:37 ` [PATCH 19/30] workqueue: make single thread workqueue shared worker pool friendly Tejun Heo
2010-06-14 21:37 ` [PATCH 20/30] workqueue: add find_worker_executing_work() and track current_cwq Tejun Heo
2010-06-14 21:37 ` [PATCH 21/30] workqueue: carry cpu number in work data once execution starts Tejun Heo
2010-06-14 21:37 ` [PATCH 22/30] workqueue: implement WQ_NON_REENTRANT Tejun Heo
2010-06-14 21:37 ` [PATCH 23/30] workqueue: use shared worklist and pool all workers per cpu Tejun Heo
2010-06-14 21:37 ` [PATCH 24/30] workqueue: implement concurrency managed dynamic worker pool Tejun Heo
2010-06-14 21:37 ` [PATCH 25/30] workqueue: increase max_active of keventd and kill current_is_keventd() Tejun Heo
2010-06-14 21:37 ` [PATCH 26/30] workqueue: add system_wq, system_long_wq and system_nrt_wq Tejun Heo
2010-06-14 21:37 ` [PATCH 27/30] workqueue: implement DEBUGFS/workqueue Tejun Heo
2010-06-15 13:54 ` Frederic Weisbecker
2010-06-15 16:42 ` Tejun Heo
2010-06-14 21:37 ` [PATCH 28/30] workqueue: implement several utility APIs Tejun Heo
2010-06-14 21:37 ` [PATCH 29/30] libata: take advantage of cmwq and remove concurrency limitations Tejun Heo
2010-06-14 21:37 ` [PATCH 30/30] async: use workqueue for worker pool Tejun Heo
2010-06-14 21:58 ` [PATCHSET] workqueue: concurrency managed workqueue, take#5 Andrew Morton
2010-06-14 22:17 ` Tejun Heo
2010-06-14 22:31 ` Daniel Walker
2010-06-14 22:33 ` Tejun Heo
2010-06-14 22:35 ` Daniel Walker
2010-06-14 22:44 ` Tejun Heo
2010-06-14 22:49 ` Daniel Walker
2010-06-14 22:52 ` Tejun Heo
2010-06-14 22:35 ` Andrew Morton
2010-06-14 22:43 ` Tejun Heo
2010-06-14 23:06 ` Andrew Morton
2010-06-15 12:53 ` tytso
2010-06-15 16:15 ` [PATCH] SubmittingPatches: add more about patch descriptions Randy Dunlap
2010-06-15 16:33 ` Christoph Lameter
2010-06-15 18:15 ` [PATCHSET] workqueue: concurrency managed workqueue, take#5 Stefan Richter
2010-06-15 19:39 ` Tejun Heo
2010-06-15 1:20 ` Jeff Garzik
2010-06-15 18:25 ` Overview of concurrency managed workqueue Tejun Heo
2010-06-15 18:40 ` Christoph Lameter
2010-06-15 18:44 ` Tejun Heo
2010-06-15 19:43 ` Daniel Walker
2010-06-16 12:10 ` Tejun Heo
2010-06-16 13:27 ` Daniel Walker
2010-06-16 13:30 ` Tejun Heo
2010-06-16 13:41 ` Daniel Walker
2010-06-16 13:45 ` Tejun Heo
2010-06-16 14:05 ` Daniel Walker
2010-06-16 14:15 ` Tejun Heo
2010-06-16 14:34 ` Daniel Walker
2010-06-16 14:50 ` Tejun Heo
2010-06-16 15:11 ` Daniel Walker
2010-06-16 15:50 ` Tejun Heo
2010-06-16 16:30 ` Daniel Walker
2010-06-16 16:55 ` Tejun Heo
2010-06-16 18:22 ` Daniel Walker
2010-06-16 18:46 ` Tejun Heo
2010-06-16 19:20 ` Tejun Heo
2010-06-16 19:46 ` Daniel Walker
2010-06-16 19:58 ` Tejun Heo
2010-06-17 5:29 ` Florian Mickler
2010-06-17 6:21 ` Florian Mickler
2010-06-17 8:28 ` Tejun Heo
2010-06-17 18:03 ` Daniel Walker
2010-06-18 6:36 ` Florian Mickler
2010-06-18 16:38 ` Daniel Walker
2010-06-16 19:36 ` Daniel Walker
2010-06-16 19:52 ` Tejun Heo
2010-06-16 20:19 ` Daniel Walker
2010-06-16 20:24 ` Tejun Heo
2010-06-16 20:40 ` Daniel Walker
2010-06-16 21:41 ` Tejun Heo
2010-06-17 23:15 ` Andrew Morton
2010-06-18 8:03 ` Tejun Heo
2010-06-18 8:22 ` Tejun Heo
2010-06-18 17:29 ` Daniel Walker
2010-06-16 18:31 ` Stefan Richter
2010-06-16 18:41 ` Daniel Walker
2010-06-17 12:01 ` Andy Walls
2010-06-17 16:56 ` Daniel Walker
2010-06-17 23:16 ` Andrew Morton
2010-06-18 7:16 ` Tejun Heo
2010-06-18 7:31 ` Andrew Morton
2010-06-18 8:09 ` Tejun Heo
2010-06-18 17:02 ` Andrew Morton
2010-06-18 17:28 ` Tejun Heo
2010-06-19 15:53 ` [PATCH] kthread: implement kthread_worker Tejun Heo
2010-06-21 20:33 ` Randy Dunlap
2010-06-22 7:31 ` Tejun Heo
2010-06-19 8:38 ` Overview of concurrency managed workqueue Andi Kleen
2010-06-19 8:40 ` Tejun Heo
2010-06-19 8:55 ` Andi Kleen
2010-06-19 9:01 ` Tejun Heo
2010-06-19 9:08 ` Andi Kleen
2010-06-19 9:12 ` Tejun Heo
2010-06-19 9:15 ` Andi Kleen
2010-06-19 9:17 ` Tejun Heo
2010-06-19 9:27 ` Andi Kleen
2010-06-19 9:42 ` Tejun Heo
2010-06-19 12:20 ` Andi Kleen
2010-06-19 12:48 ` Tejun Heo
2010-06-17 22:28 ` Daniel Walker
2010-06-16 6:55 ` Florian Mickler
2010-06-16 12:22 ` Tejun Heo
2010-06-16 13:37 ` Johannes Berg
2010-06-16 13:39 ` Tejun Heo
2010-06-16 13:42 ` Johannes Berg
2010-06-17 23:14 ` Andrew Morton
2010-06-17 23:25 ` Joel Becker
2010-06-17 23:56 ` Andrew Morton
2010-06-18 7:15 ` Tejun Heo
2010-06-18 7:31 ` Tejun Heo
2010-06-15 18:29 ` [PATCHSET] workqueue: concurrency managed workqueue, take#5 Stefan Richter
2010-06-15 18:40 ` Tejun Heo
2010-06-15 20:29 ` Stefan Richter
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1276551467-21246-18-git-send-email-tj@kernel.org \
--to=tj@kernel.org \
--cc=akpm@linux-foundation.org \
--cc=arjan@linux.intel.com \
--cc=awalls@radix.net \
--cc=axboe@kernel.dk \
--cc=cl@linux-foundation.org \
--cc=dhowells@redhat.com \
--cc=jeff@garzik.org \
--cc=johannes@sipsolutions.net \
--cc=linux-kernel@vger.kernel.org \
--cc=mingo@elte.hu \
--cc=oleg@redhat.com \
--cc=rusty@rustcorp.com.au \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).