* [PATCH 1/3] workqueue: promote workqueue_lock to hard-irq safe
2011-12-02 23:56 [PATCH 0/3] drain_workqueue vs scsi_flush_work Dan Williams
@ 2011-12-02 23:56 ` Dan Williams
2011-12-02 23:56 ` [PATCH 2/3] workqueue: defer work to a draining queue Dan Williams
2011-12-02 23:57 ` [PATCH 3/3] scsi: use drain_workqueue Dan Williams
2 siblings, 0 replies; 6+ messages in thread
From: Dan Williams @ 2011-12-02 23:56 UTC (permalink / raw)
To: tj, JBottomley; +Cc: linux-kernel, linux-scsi
In preparation for a deferred work implementation to queue unchained
work at the conclusion of a drain_workqueue() event.
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
---
kernel/workqueue.c | 44 ++++++++++++++++++++++----------------------
1 files changed, 22 insertions(+), 22 deletions(-)
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 1783aab..f476895 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -2403,10 +2403,10 @@ void drain_workqueue(struct workqueue_struct *wq)
* hotter than drain_workqueue() and already looks at @wq->flags.
* Use WQ_DRAINING so that queue doesn't have to check nr_drainers.
*/
- spin_lock(&workqueue_lock);
+ spin_lock_irq(&workqueue_lock);
if (!wq->nr_drainers++)
wq->flags |= WQ_DRAINING;
- spin_unlock(&workqueue_lock);
+ spin_unlock_irq(&workqueue_lock);
reflush:
flush_workqueue(wq);
@@ -2428,10 +2428,10 @@ reflush:
goto reflush;
}
- spin_lock(&workqueue_lock);
+ spin_lock_irq(&workqueue_lock);
if (!--wq->nr_drainers)
wq->flags &= ~WQ_DRAINING;
- spin_unlock(&workqueue_lock);
+ spin_unlock_irq(&workqueue_lock);
}
EXPORT_SYMBOL_GPL(drain_workqueue);
@@ -3033,7 +3033,7 @@ struct workqueue_struct *__alloc_workqueue_key(const char *name,
* list. Grab it, set max_active accordingly and add the new
* workqueue to workqueues list.
*/
- spin_lock(&workqueue_lock);
+ spin_lock_irq(&workqueue_lock);
if (workqueue_freezing && wq->flags & WQ_FREEZABLE)
for_each_cwq_cpu(cpu, wq)
@@ -3041,7 +3041,7 @@ struct workqueue_struct *__alloc_workqueue_key(const char *name,
list_add(&wq->list, &workqueues);
- spin_unlock(&workqueue_lock);
+ spin_unlock_irq(&workqueue_lock);
return wq;
err:
@@ -3072,9 +3072,9 @@ void destroy_workqueue(struct workqueue_struct *wq)
* wq list is used to freeze wq, remove from list after
* flushing is complete in case freeze races us.
*/
- spin_lock(&workqueue_lock);
+ spin_lock_irq(&workqueue_lock);
list_del(&wq->list);
- spin_unlock(&workqueue_lock);
+ spin_unlock_irq(&workqueue_lock);
/* sanity check */
for_each_cwq_cpu(cpu, wq) {
@@ -3114,23 +3114,23 @@ void workqueue_set_max_active(struct workqueue_struct *wq, int max_active)
max_active = wq_clamp_max_active(max_active, wq->flags, wq->name);
- spin_lock(&workqueue_lock);
+ spin_lock_irq(&workqueue_lock);
wq->saved_max_active = max_active;
for_each_cwq_cpu(cpu, wq) {
struct global_cwq *gcwq = get_gcwq(cpu);
- spin_lock_irq(&gcwq->lock);
+ spin_lock(&gcwq->lock);
if (!(wq->flags & WQ_FREEZABLE) ||
!(gcwq->flags & GCWQ_FREEZING))
get_cwq(gcwq->cpu, wq)->max_active = max_active;
- spin_unlock_irq(&gcwq->lock);
+ spin_unlock(&gcwq->lock);
}
- spin_unlock(&workqueue_lock);
+ spin_unlock_irq(&workqueue_lock);
}
EXPORT_SYMBOL_GPL(workqueue_set_max_active);
@@ -3642,7 +3642,7 @@ void freeze_workqueues_begin(void)
{
unsigned int cpu;
- spin_lock(&workqueue_lock);
+ spin_lock_irq(&workqueue_lock);
BUG_ON(workqueue_freezing);
workqueue_freezing = true;
@@ -3651,7 +3651,7 @@ void freeze_workqueues_begin(void)
struct global_cwq *gcwq = get_gcwq(cpu);
struct workqueue_struct *wq;
- spin_lock_irq(&gcwq->lock);
+ spin_lock(&gcwq->lock);
BUG_ON(gcwq->flags & GCWQ_FREEZING);
gcwq->flags |= GCWQ_FREEZING;
@@ -3663,10 +3663,10 @@ void freeze_workqueues_begin(void)
cwq->max_active = 0;
}
- spin_unlock_irq(&gcwq->lock);
+ spin_unlock(&gcwq->lock);
}
- spin_unlock(&workqueue_lock);
+ spin_unlock_irq(&workqueue_lock);
}
/**
@@ -3687,7 +3687,7 @@ bool freeze_workqueues_busy(void)
unsigned int cpu;
bool busy = false;
- spin_lock(&workqueue_lock);
+ spin_lock_irq(&workqueue_lock);
BUG_ON(!workqueue_freezing);
@@ -3711,7 +3711,7 @@ bool freeze_workqueues_busy(void)
}
}
out_unlock:
- spin_unlock(&workqueue_lock);
+ spin_unlock_irq(&workqueue_lock);
return busy;
}
@@ -3728,7 +3728,7 @@ void thaw_workqueues(void)
{
unsigned int cpu;
- spin_lock(&workqueue_lock);
+ spin_lock_irq(&workqueue_lock);
if (!workqueue_freezing)
goto out_unlock;
@@ -3737,7 +3737,7 @@ void thaw_workqueues(void)
struct global_cwq *gcwq = get_gcwq(cpu);
struct workqueue_struct *wq;
- spin_lock_irq(&gcwq->lock);
+ spin_lock(&gcwq->lock);
BUG_ON(!(gcwq->flags & GCWQ_FREEZING));
gcwq->flags &= ~GCWQ_FREEZING;
@@ -3758,12 +3758,12 @@ void thaw_workqueues(void)
wake_up_worker(gcwq);
- spin_unlock_irq(&gcwq->lock);
+ spin_unlock(&gcwq->lock);
}
workqueue_freezing = false;
out_unlock:
- spin_unlock(&workqueue_lock);
+ spin_unlock_irq(&workqueue_lock);
}
#endif /* CONFIG_FREEZER */
^ permalink raw reply related [flat|nested] 6+ messages in thread* [PATCH 2/3] workqueue: defer work to a draining queue
2011-12-02 23:56 [PATCH 0/3] drain_workqueue vs scsi_flush_work Dan Williams
2011-12-02 23:56 ` [PATCH 1/3] workqueue: promote workqueue_lock to hard-irq safe Dan Williams
@ 2011-12-02 23:56 ` Dan Williams
2011-12-03 1:53 ` Williams, Dan J
2011-12-04 7:12 ` Dan Williams
2011-12-02 23:57 ` [PATCH 3/3] scsi: use drain_workqueue Dan Williams
2 siblings, 2 replies; 6+ messages in thread
From: Dan Williams @ 2011-12-02 23:56 UTC (permalink / raw)
To: tj, JBottomley; +Cc: linux-kernel, linux-scsi
commit 9c5a2ba7 "workqueue: separate out drain_workqueue() from
destroy_workqueue()" provided drain_workqueue() for users like libsas to
use for flushing events.
When libsas drains it wants currently queued and chained events to be
flushed, but it fully expects to continue issuing unchained events with
the expectation that they are serviced sometime after the drain.
For external users of drain_workqueue() let unchained work commence
after the drain completes, if the caller cares if unchained work was
queued as a result of the drain it can check for a non-zero return
value.
Unfortunately this causes the promotion of workqueue_lock to hard-irq
safe and can't support queue_work_on() users.
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
---
include/linux/workqueue.h | 3 +-
kernel/workqueue.c | 70 +++++++++++++++++++++++++++++++++++++++------
2 files changed, 62 insertions(+), 11 deletions(-)
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index 0d556de..37de207 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -257,6 +257,7 @@ enum {
WQ_DRAINING = 1 << 6, /* internal: workqueue is draining */
WQ_RESCUER = 1 << 7, /* internal: workqueue has rescuer */
+ WQ_NO_DEFER = 1 << 8, /* internal: workqueue destructing */
WQ_MAX_ACTIVE = 512, /* I like 512, better ideas? */
WQ_MAX_UNBOUND_PER_CPU = 4, /* 4 * #cpus for unbound wq */
@@ -355,7 +356,7 @@ extern int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
struct delayed_work *work, unsigned long delay);
extern void flush_workqueue(struct workqueue_struct *wq);
-extern void drain_workqueue(struct workqueue_struct *wq);
+extern int drain_workqueue(struct workqueue_struct *wq);
extern void flush_scheduled_work(void);
extern int schedule_work(struct work_struct *work);
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index f476895..363a4ef 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -236,6 +236,7 @@ struct workqueue_struct {
struct wq_flusher *first_flusher; /* F: first flusher */
struct list_head flusher_queue; /* F: flush waiters */
struct list_head flusher_overflow; /* F: flush overflow list */
+ struct list_head drain_defer; /* W: unchained work to defer */
mayday_mask_t mayday_mask; /* cpus requesting rescue */
struct worker *rescuer; /* I: rescue worker */
@@ -979,6 +980,19 @@ static bool is_chained_work(struct workqueue_struct *wq)
return false;
}
+static bool defer_work(struct workqueue_struct *wq, struct work_struct *work)
+{
+ if (is_chained_work(wq))
+ return false;
+
+ if (WARN_ON_ONCE(wq->flags & WQ_NO_DEFER))
+ return true;
+
+ list_add_tail(&work->entry, &wq->drain_defer);
+
+ return true;
+}
+
static void __queue_work(unsigned int cpu, struct workqueue_struct *wq,
struct work_struct *work)
{
@@ -991,9 +1005,17 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq,
debug_work_activate(work);
/* if dying, only works from the same workqueue are allowed */
- if (unlikely(wq->flags & WQ_DRAINING) &&
- WARN_ON_ONCE(!is_chained_work(wq)))
- return;
+ if (unlikely(wq->flags & WQ_DRAINING)) {
+ unsigned long flags;
+ bool defer = false;
+
+ spin_lock_irqsave(&workqueue_lock, flags);
+ if (wq->flags & WQ_DRAINING)
+ defer = defer_work(wq, work);
+ spin_unlock_irqrestore(&workqueue_lock, flags);
+ if (defer)
+ return;
+ }
/* determine gcwq to use */
if (!(wq->flags & WQ_UNBOUND)) {
@@ -2383,8 +2405,9 @@ out_unlock:
EXPORT_SYMBOL_GPL(flush_workqueue);
/**
- * drain_workqueue - drain a workqueue
+ * __drain_workqueue - drain a workqueue
* @wq: workqueue to drain
+ * @flags: WQ_NO_DEFER - reject unchained work
*
* Wait until the workqueue becomes empty. While draining is in progress,
* only chain queueing is allowed. IOW, only currently pending or running
@@ -2392,11 +2415,17 @@ EXPORT_SYMBOL_GPL(flush_workqueue);
* repeatedly until it becomes empty. The number of flushing is detemined
* by the depth of chaining and should be relatively short. Whine if it
* takes too long.
+ *
+ * Indicate to the caller if any deferred (unchained) work was queued
+ * during the drain.
*/
-void drain_workqueue(struct workqueue_struct *wq)
+static int __drain_workqueue(struct workqueue_struct *wq, int flags)
{
+ struct work_struct *work, *w;
unsigned int flush_cnt = 0;
+ LIST_HEAD(drain_defer);
unsigned int cpu;
+ int ret = 0;
/*
* __queue_work() needs to test whether there are drainers, is much
@@ -2405,7 +2434,7 @@ void drain_workqueue(struct workqueue_struct *wq)
*/
spin_lock_irq(&workqueue_lock);
if (!wq->nr_drainers++)
- wq->flags |= WQ_DRAINING;
+ wq->flags |= WQ_DRAINING | flags;
spin_unlock_irq(&workqueue_lock);
reflush:
flush_workqueue(wq);
@@ -2429,9 +2458,25 @@ reflush:
}
spin_lock_irq(&workqueue_lock);
- if (!--wq->nr_drainers)
- wq->flags &= ~WQ_DRAINING;
+ if (!--wq->nr_drainers) {
+ wq->flags &= ~(WQ_DRAINING | WQ_NO_DEFER);
+ list_splice_init(&wq->drain_defer, &drain_defer);
+ ret = !list_empty(&drain_defer);
+ }
spin_unlock_irq(&workqueue_lock);
+
+ /* requeue work on this queue provided it was not being destroyed */
+ list_for_each_entry_safe(work, w, &drain_defer, entry) {
+ list_del_init(&work->entry);
+ queue_work(wq, work);
+ }
+
+ return ret;
+}
+
+int drain_workqueue(struct workqueue_struct *wq)
+{
+ return __drain_workqueue(wq, 0);
}
EXPORT_SYMBOL_GPL(drain_workqueue);
@@ -2990,6 +3035,7 @@ struct workqueue_struct *__alloc_workqueue_key(const char *name,
atomic_set(&wq->nr_cwqs_to_flush, 0);
INIT_LIST_HEAD(&wq->flusher_queue);
INIT_LIST_HEAD(&wq->flusher_overflow);
+ INIT_LIST_HEAD(&wq->drain_defer);
wq->name = name;
lockdep_init_map(&wq->lockdep_map, lock_name, key, 0);
@@ -3065,8 +3111,12 @@ void destroy_workqueue(struct workqueue_struct *wq)
{
unsigned int cpu;
- /* drain it before proceeding with destruction */
- drain_workqueue(wq);
+ /*
+ * drain it before proceeding with destruction and disable drain
+ * deferrement. !is_chained_work() that arrives after this
+ * point will be dropped on the floor
+ */
+ __drain_workqueue(wq, WQ_NO_DEFER);
/*
* wq list is used to freeze wq, remove from list after
^ permalink raw reply related [flat|nested] 6+ messages in thread* Re: [PATCH 2/3] workqueue: defer work to a draining queue
2011-12-02 23:56 ` [PATCH 2/3] workqueue: defer work to a draining queue Dan Williams
@ 2011-12-03 1:53 ` Williams, Dan J
2011-12-04 7:12 ` Dan Williams
1 sibling, 0 replies; 6+ messages in thread
From: Williams, Dan J @ 2011-12-03 1:53 UTC (permalink / raw)
To: tj, JBottomley; +Cc: linux-kernel, linux-scsi
On Fri, Dec 2, 2011 at 3:56 PM, Dan Williams <dan.j.williams@intel.com> wrote:
[..]
> @@ -2405,7 +2434,7 @@ void drain_workqueue(struct workqueue_struct *wq)
> */
> spin_lock_irq(&workqueue_lock);
> if (!wq->nr_drainers++)
> - wq->flags |= WQ_DRAINING;
> + wq->flags |= WQ_DRAINING | flags;
> spin_unlock_irq(&workqueue_lock);
> reflush:
> flush_workqueue(wq);
> @@ -2429,9 +2458,25 @@ reflush:
> }
>
> spin_lock_irq(&workqueue_lock);
> - if (!--wq->nr_drainers)
> - wq->flags &= ~WQ_DRAINING;
> + if (!--wq->nr_drainers) {
> + wq->flags &= ~(WQ_DRAINING | WQ_NO_DEFER);
> + list_splice_init(&wq->drain_defer, &drain_defer);
> + ret = !list_empty(&drain_defer);
> + }
> spin_unlock_irq(&workqueue_lock);
> +
> + /* requeue work on this queue provided it was not being destroyed */
> + list_for_each_entry_safe(work, w, &drain_defer, entry) {
> + list_del_init(&work->entry);
> + queue_work(wq, work);
> + }
Actually, this won't work. The queuing of deferred work would need to
be under the lock and then flushed again to guarantee workqueue
semantics which just devolves into a "wait until all work stops being
submitted to the queue for one drain_workqueue() duration". Which is
more than what we need for libsas.
--
To unsubscribe from this list: send the line "unsubscribe linux-scsi" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
^ permalink raw reply [flat|nested] 6+ messages in thread* Re: [PATCH 2/3] workqueue: defer work to a draining queue
2011-12-02 23:56 ` [PATCH 2/3] workqueue: defer work to a draining queue Dan Williams
2011-12-03 1:53 ` Williams, Dan J
@ 2011-12-04 7:12 ` Dan Williams
1 sibling, 0 replies; 6+ messages in thread
From: Dan Williams @ 2011-12-04 7:12 UTC (permalink / raw)
To: tj; +Cc: JBottomley, linux-kernel, linux-scsi
On Fri, 2011-12-02 at 15:56 -0800, Dan Williams wrote:
> commit 9c5a2ba7 "workqueue: separate out drain_workqueue() from
> destroy_workqueue()" provided drain_workqueue() for users like libsas to
> use for flushing events.
Any reason to allow drain requests to stack? If draining is under a
mutex then we don't break workqueue semantics (at least with respect to
draining), all chained work is flushed and all unchained work is
registered in the queue. But it still leaves deferred work invisible to
flush_workqueue(). drain_workqueue() users would need to be careful not
to mix 'flush' and 'drain'.
Untested incremental changes to implement above:
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 363a4ef..24563d6 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -236,12 +236,12 @@ struct workqueue_struct {
struct wq_flusher *first_flusher; /* F: first flusher */
struct list_head flusher_queue; /* F: flush waiters */
struct list_head flusher_overflow; /* F: flush overflow list */
+ struct mutex drain_mutex; /* 1 drainer at a time */
struct list_head drain_defer; /* W: unchained work to defer */
mayday_mask_t mayday_mask; /* cpus requesting rescue */
struct worker *rescuer; /* I: rescue worker */
- int nr_drainers; /* W: drain in progress */
int saved_max_active; /* W: saved cwq max_active */
const char *name; /* I: workqueue name */
#ifdef CONFIG_LOCKDEP
@@ -2427,14 +2427,10 @@ static int __drain_workqueue(struct workqueue_struct *wq, int flags)
unsigned int cpu;
int ret = 0;
- /*
- * __queue_work() needs to test whether there are drainers, is much
- * hotter than drain_workqueue() and already looks at @wq->flags.
- * Use WQ_DRAINING so that queue doesn't have to check nr_drainers.
- */
+ mutex_lock(&wq->drain_mutex);
+
spin_lock_irq(&workqueue_lock);
- if (!wq->nr_drainers++)
- wq->flags |= WQ_DRAINING | flags;
+ wq->flags |= WQ_DRAINING | flags;
spin_unlock_irq(&workqueue_lock);
reflush:
flush_workqueue(wq);
@@ -2458,24 +2454,26 @@ reflush:
}
spin_lock_irq(&workqueue_lock);
- if (!--wq->nr_drainers) {
- wq->flags &= ~(WQ_DRAINING | WQ_NO_DEFER);
- list_splice_init(&wq->drain_defer, &drain_defer);
- ret = !list_empty(&drain_defer);
- }
+ wq->flags &= ~(WQ_DRAINING | WQ_NO_DEFER);
+ list_splice_init(&wq->drain_defer, &drain_defer);
+ ret = !list_empty(&drain_defer);
spin_unlock_irq(&workqueue_lock);
- /* requeue work on this queue provided it was not being destroyed */
+ /* submit deferred work provided wq was not being destroyed */
list_for_each_entry_safe(work, w, &drain_defer, entry) {
list_del_init(&work->entry);
queue_work(wq, work);
}
+ mutex_unlock(&wq->drain_mutex);
+
return ret;
}
int drain_workqueue(struct workqueue_struct *wq)
{
+ if (WARN_ON_ONCE(wq->flags & WQ_NO_DEFER))
+ return 0; /* lost drain vs destroy race */
return __drain_workqueue(wq, 0);
}
EXPORT_SYMBOL_GPL(drain_workqueue);
@@ -3032,6 +3030,7 @@ struct workqueue_struct *__alloc_workqueue_key(const char *name,
wq->flags = flags;
wq->saved_max_active = max_active;
mutex_init(&wq->flush_mutex);
+ mutex_init(&wq->drain_mutex);
atomic_set(&wq->nr_cwqs_to_flush, 0);
INIT_LIST_HEAD(&wq->flusher_queue);
INIT_LIST_HEAD(&wq->flusher_overflow);
^ permalink raw reply related [flat|nested] 6+ messages in thread
* [PATCH 3/3] scsi: use drain_workqueue
2011-12-02 23:56 [PATCH 0/3] drain_workqueue vs scsi_flush_work Dan Williams
2011-12-02 23:56 ` [PATCH 1/3] workqueue: promote workqueue_lock to hard-irq safe Dan Williams
2011-12-02 23:56 ` [PATCH 2/3] workqueue: defer work to a draining queue Dan Williams
@ 2011-12-02 23:57 ` Dan Williams
2 siblings, 0 replies; 6+ messages in thread
From: Dan Williams @ 2011-12-02 23:57 UTC (permalink / raw)
To: tj, JBottomley; +Cc: Mike Christie, Robert Love, linux-kernel, linux-scsi
Use 'drain' versus 'flush' as the former additionally flushes chained
operations. libsas uses chained operations when it posts discovery work
in response to a port event.
As a result the hardcoded double-flush can be removed from the isci
driver.
Cc: Mike Christie <michaelc@cs.wisc.edu>
Cc: Robert Love <robert.w.love@intel.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
---
drivers/scsi/hosts.c | 8 ++++----
drivers/scsi/isci/host.c | 3 ---
include/scsi/scsi_host.h | 2 +-
3 files changed, 5 insertions(+), 8 deletions(-)
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
index 351dc0b..37155d1 100644
--- a/drivers/scsi/hosts.c
+++ b/drivers/scsi/hosts.c
@@ -564,19 +564,19 @@ int scsi_queue_work(struct Scsi_Host *shost, struct work_struct *work)
EXPORT_SYMBOL_GPL(scsi_queue_work);
/**
- * scsi_flush_work - Flush a Scsi_Host's workqueue.
+ * scsi_flush_work - Drain a Scsi_Host's workqueue.
* @shost: Pointer to Scsi_Host.
**/
-void scsi_flush_work(struct Scsi_Host *shost)
+int scsi_flush_work(struct Scsi_Host *shost)
{
if (!shost->work_q) {
printk(KERN_ERR
"ERROR: Scsi host '%s' attempted to flush scsi-work, "
"when no workqueue created.\n", shost->hostt->name);
dump_stack();
- return;
+ return 0;
}
- flush_workqueue(shost->work_q);
+ return drain_workqueue(shost->work_q);
}
EXPORT_SYMBOL_GPL(scsi_flush_work);
diff --git a/drivers/scsi/isci/host.c b/drivers/scsi/isci/host.c
index e7fe9c4..240779a 100644
--- a/drivers/scsi/isci/host.c
+++ b/drivers/scsi/isci/host.c
@@ -655,9 +655,6 @@ int isci_host_scan_finished(struct Scsi_Host *shost, unsigned long time)
if (test_bit(IHOST_START_PENDING, &ihost->flags))
return 0;
- /* todo: use sas_flush_discovery once it is upstream */
- scsi_flush_work(shost);
-
scsi_flush_work(shost);
dev_dbg(&ihost->pdev->dev,
diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h
index 50266c9..505bc34 100644
--- a/include/scsi/scsi_host.h
+++ b/include/scsi/scsi_host.h
@@ -770,7 +770,7 @@ static inline int scsi_host_in_recovery(struct Scsi_Host *shost)
}
extern int scsi_queue_work(struct Scsi_Host *, struct work_struct *);
-extern void scsi_flush_work(struct Scsi_Host *);
+extern int scsi_flush_work(struct Scsi_Host *);
extern struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *, int);
extern int __must_check scsi_add_host_with_dma(struct Scsi_Host *,
^ permalink raw reply related [flat|nested] 6+ messages in thread