* [PATCH 0/3 v3] blk_cleanup_queue() versus request_fn order fix
@ 2012-09-27 16:34 Bart Van Assche
2012-09-27 16:35 ` [PATCH 1/3] block: Rename queue dead flag Bart Van Assche
` (2 more replies)
0 siblings, 3 replies; 6+ messages in thread
From: Bart Van Assche @ 2012-09-27 16:34 UTC (permalink / raw)
To: linux-scsi, James Bottomley, Mike Christie, Jens Axboe, Tejun Heo,
Chanho Min
At device removal time request processing functions like
scsi_request_fn() that unlock the queue lock internally may cause
blk_cleanup_queue() to finish while request_fn is in progress.
This three-patch series makes sure that blk_cleanup_queue() can't
finish before all active request_fn calls have finished and also
that request_fn isn't invoked anymore after queue draining finished.
Changes compared to v2:
- Split second patch into two patches.
- Refined patch descriptions.
Changes compared to v1:
- Included a patch to rename QUEUE_FLAG_DEAD.
- Refined the descriptions of the __blk_run_queue_uncond() and
blk_cleanup_queue() functions.
^ permalink raw reply [flat|nested] 6+ messages in thread
* [PATCH 1/3] block: Rename queue dead flag
2012-09-27 16:34 [PATCH 0/3 v3] blk_cleanup_queue() versus request_fn order fix Bart Van Assche
@ 2012-09-27 16:35 ` Bart Van Assche
2012-09-27 16:38 ` [PATCH 2/3] block: Avoid that request_fn is invoked on a dead queue Bart Van Assche
2012-09-27 16:39 ` [PATCH 3/3] Make blk_cleanup_queue() wait until request_fn finished Bart Van Assche
2 siblings, 0 replies; 6+ messages in thread
From: Bart Van Assche @ 2012-09-27 16:35 UTC (permalink / raw)
To: Bart Van Assche
Cc: linux-scsi, James Bottomley, Mike Christie, Jens Axboe, Tejun Heo,
Chanho Min
This flag is used to indicate that queuing new requests must stop.
After this flag has been set queue draining starts. However, during
the queue draining phase it is still safe to invoke the queue's
request_fn, so "dying" is a better name than "dead".
This patch is the result of running the command below and manually
fixing up indentation:
git grep -lE 'blk_queue_dead|QUEUE_FLAG_DEAD' |
xargs sed -i .tmp -e 's/blk_queue_dead/blk_queue_dying/g' \
-e 's/QUEUE_FLAG_DEAD/QUEUE_FLAG_DYING/g'
Cc: James Bottomley <JBottomley@Parallels.com>
Cc: Mike Christie <michaelc@cs.wisc.edu>
Cc: Jens Axboe <axboe@kernel.dk>
Cc: Tejun Heo <tj@kernel.org>
Cc: Chanho Min <chanho.min@lge.com>
Signed-off-by: Bart Van Assche <bvanassche@acm.org>
---
block/blk-cgroup.c | 2 +-
block/blk-core.c | 16 ++++++++--------
block/blk-exec.c | 2 +-
block/blk-sysfs.c | 4 ++--
block/blk-throttle.c | 2 +-
block/blk.h | 2 +-
drivers/block/ub.c | 2 +-
drivers/scsi/scsi_lib.c | 2 +-
include/linux/blkdev.h | 4 ++--
9 files changed, 18 insertions(+), 18 deletions(-)
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index f3b44a6..5cbdad5 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -231,7 +231,7 @@ struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
* we shouldn't allow anything to go through for a bypassing queue.
*/
if (unlikely(blk_queue_bypass(q)))
- return ERR_PTR(blk_queue_dead(q) ? -EINVAL : -EBUSY);
+ return ERR_PTR(blk_queue_dying(q) ? -EINVAL : -EBUSY);
return __blkg_lookup_create(blkcg, q, NULL);
}
EXPORT_SYMBOL_GPL(blkg_lookup_create);
diff --git a/block/blk-core.c b/block/blk-core.c
index ee3cb3a..b37ac03 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -484,7 +484,7 @@ void blk_cleanup_queue(struct request_queue *q)
/* mark @q DEAD, no new request or merges will be allowed afterwards */
mutex_lock(&q->sysfs_lock);
- queue_flag_set_unlocked(QUEUE_FLAG_DEAD, q);
+ queue_flag_set_unlocked(QUEUE_FLAG_DYING, q);
spin_lock_irq(lock);
/*
@@ -501,7 +501,7 @@ void blk_cleanup_queue(struct request_queue *q)
queue_flag_set(QUEUE_FLAG_NOMERGES, q);
queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
- queue_flag_set(QUEUE_FLAG_DEAD, q);
+ queue_flag_set(QUEUE_FLAG_DYING, q);
spin_unlock_irq(lock);
mutex_unlock(&q->sysfs_lock);
@@ -723,7 +723,7 @@ EXPORT_SYMBOL(blk_init_allocated_queue);
bool blk_get_queue(struct request_queue *q)
{
- if (likely(!blk_queue_dead(q))) {
+ if (likely(!blk_queue_dying(q))) {
__blk_get_queue(q);
return true;
}
@@ -877,7 +877,7 @@ static struct request *__get_request(struct request_list *rl, int rw_flags,
const bool is_sync = rw_is_sync(rw_flags) != 0;
int may_queue;
- if (unlikely(blk_queue_dead(q)))
+ if (unlikely(blk_queue_dying(q)))
return NULL;
may_queue = elv_may_queue(q, rw_flags);
@@ -1057,7 +1057,7 @@ retry:
if (rq)
return rq;
- if (!(gfp_mask & __GFP_WAIT) || unlikely(blk_queue_dead(q))) {
+ if (!(gfp_mask & __GFP_WAIT) || unlikely(blk_queue_dying(q))) {
blk_put_rl(rl);
return NULL;
}
@@ -1909,7 +1909,7 @@ int blk_insert_cloned_request(struct request_queue *q, struct request *rq)
return -EIO;
spin_lock_irqsave(q->queue_lock, flags);
- if (unlikely(blk_queue_dead(q))) {
+ if (unlikely(blk_queue_dying(q))) {
spin_unlock_irqrestore(q->queue_lock, flags);
return -ENODEV;
}
@@ -2891,7 +2891,7 @@ static void queue_unplugged(struct request_queue *q, unsigned int depth,
/*
* Don't mess with dead queue.
*/
- if (unlikely(blk_queue_dead(q))) {
+ if (unlikely(blk_queue_dying(q))) {
spin_unlock(q->queue_lock);
return;
}
@@ -3000,7 +3000,7 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
/*
* Short-circuit if @q is dead
*/
- if (unlikely(blk_queue_dead(q))) {
+ if (unlikely(blk_queue_dying(q))) {
__blk_end_request_all(rq, -ENODEV);
continue;
}
diff --git a/block/blk-exec.c b/block/blk-exec.c
index 8b6dc5b..4aec98d 100644
--- a/block/blk-exec.c
+++ b/block/blk-exec.c
@@ -60,7 +60,7 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
spin_lock_irq(q->queue_lock);
- if (unlikely(blk_queue_dead(q))) {
+ if (unlikely(blk_queue_dying(q))) {
rq->errors = -ENXIO;
if (rq->end_io)
rq->end_io(rq, rq->errors);
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index 9628b29..6898f17 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -432,7 +432,7 @@ queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
if (!entry->show)
return -EIO;
mutex_lock(&q->sysfs_lock);
- if (blk_queue_dead(q)) {
+ if (blk_queue_dying(q)) {
mutex_unlock(&q->sysfs_lock);
return -ENOENT;
}
@@ -454,7 +454,7 @@ queue_attr_store(struct kobject *kobj, struct attribute *attr,
q = container_of(kobj, struct request_queue, kobj);
mutex_lock(&q->sysfs_lock);
- if (blk_queue_dead(q)) {
+ if (blk_queue_dying(q)) {
mutex_unlock(&q->sysfs_lock);
return -ENOENT;
}
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index e287c19..1da8497 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -303,7 +303,7 @@ static struct throtl_grp *throtl_lookup_create_tg(struct throtl_data *td,
/* if %NULL and @q is alive, fall back to root_tg */
if (!IS_ERR(blkg))
tg = blkg_to_tg(blkg);
- else if (!blk_queue_dead(q))
+ else if (!blk_queue_dying(q))
tg = td_root_tg(td);
}
diff --git a/block/blk.h b/block/blk.h
index 2a0ea32..a066ceb 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -96,7 +96,7 @@ static inline struct request *__elv_next_request(struct request_queue *q)
q->flush_queue_delayed = 1;
return NULL;
}
- if (unlikely(blk_queue_dead(q)) ||
+ if (unlikely(blk_queue_dying(q)) ||
!q->elevator->type->ops.elevator_dispatch_fn(q, 0))
return NULL;
}
diff --git a/drivers/block/ub.c b/drivers/block/ub.c
index fcec022..f32e4d6 100644
--- a/drivers/block/ub.c
+++ b/drivers/block/ub.c
@@ -2394,7 +2394,7 @@ static void ub_disconnect(struct usb_interface *intf)
del_gendisk(lun->disk);
/*
* I wish I could do:
- * queue_flag_set(QUEUE_FLAG_DEAD, q);
+ * queue_flag_set(QUEUE_FLAG_DYING, q);
* As it is, we rely on our internal poisoning and let
* the upper levels to spin furiously failing all the I/O.
*/
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index faa790f..593fc71 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -1406,7 +1406,7 @@ static int scsi_lld_busy(struct request_queue *q)
struct scsi_device *sdev = q->queuedata;
struct Scsi_Host *shost;
- if (blk_queue_dead(q))
+ if (blk_queue_dying(q))
return 0;
shost = sdev->host;
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 4a2ab7c..c6ab0db 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -436,7 +436,7 @@ struct request_queue {
#define QUEUE_FLAG_STOPPED 2 /* queue is stopped */
#define QUEUE_FLAG_SYNCFULL 3 /* read queue has been filled */
#define QUEUE_FLAG_ASYNCFULL 4 /* write queue has been filled */
-#define QUEUE_FLAG_DEAD 5 /* queue being torn down */
+#define QUEUE_FLAG_DYING 5 /* queue being torn down */
#define QUEUE_FLAG_BYPASS 6 /* act as dumb FIFO queue */
#define QUEUE_FLAG_BIDI 7 /* queue supports bidi requests */
#define QUEUE_FLAG_NOMERGES 8 /* disable merge attempts */
@@ -520,7 +520,7 @@ static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
#define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags)
#define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags)
-#define blk_queue_dead(q) test_bit(QUEUE_FLAG_DEAD, &(q)->queue_flags)
+#define blk_queue_dying(q) test_bit(QUEUE_FLAG_DYING, &(q)->queue_flags)
#define blk_queue_bypass(q) test_bit(QUEUE_FLAG_BYPASS, &(q)->queue_flags)
#define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags)
#define blk_queue_noxmerges(q) \
--
1.7.10.4
^ permalink raw reply related [flat|nested] 6+ messages in thread
* [PATCH 2/3] block: Avoid that request_fn is invoked on a dead queue
2012-09-27 16:34 [PATCH 0/3 v3] blk_cleanup_queue() versus request_fn order fix Bart Van Assche
2012-09-27 16:35 ` [PATCH 1/3] block: Rename queue dead flag Bart Van Assche
@ 2012-09-27 16:38 ` Bart Van Assche
2012-09-27 16:39 ` [PATCH 3/3] Make blk_cleanup_queue() wait until request_fn finished Bart Van Assche
2 siblings, 0 replies; 6+ messages in thread
From: Bart Van Assche @ 2012-09-27 16:38 UTC (permalink / raw)
To: Bart Van Assche
Cc: linux-scsi, James Bottomley, Mike Christie, Jens Axboe, Tejun Heo,
Chanho Min
Avoid that request_fn gets invoked after queue draining finished.
blk_cleanup_queue() callers expect that request handling has
finished once this function has returned so request_fn must not
get invoked after blk_cleanup_queue() finished.
Cc: James Bottomley <JBottomley@Parallels.com>
Cc: Mike Christie <michaelc@cs.wisc.edu>
Cc: Jens Axboe <axboe@kernel.dk>
Cc: Tejun Heo <tj@kernel.org>
Cc: Chanho Min <chanho.min@lge.com>
Signed-off-by: Bart Van Assche <bvanassche@acm.org>
---
block/blk-core.c | 25 ++++++++++++++++++++++++-
block/blk-exec.c | 2 +-
block/blk.h | 2 ++
include/linux/blkdev.h | 2 ++
4 files changed, 29 insertions(+), 2 deletions(-)
diff --git a/block/blk-core.c b/block/blk-core.c
index b37ac03..b5436b6 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -293,6 +293,25 @@ void blk_sync_queue(struct request_queue *q)
EXPORT_SYMBOL(blk_sync_queue);
/**
+ * __blk_run_queue_uncond - run a queue whether or not it has been stopped
+ * @q: The queue to run
+ *
+ * Description:
+ * Invoke request handling on a queue if there are any pending requests.
+ * May be used to restart request handling after a request has completed.
+ * This variant runs the queue whether or not the queue has been
+ * stopped. Must be called with the queue lock held and interrupts
+ * disabled. See also @blk_run_queue.
+ */
+void __blk_run_queue_uncond(struct request_queue *q)
+{
+ if (unlikely(blk_queue_dead(q)))
+ return;
+
+ q->request_fn(q);
+}
+
+/**
* __blk_run_queue - run a single device queue
* @q: The queue to run
*
@@ -305,7 +324,7 @@ void __blk_run_queue(struct request_queue *q)
if (unlikely(blk_queue_stopped(q)))
return;
- q->request_fn(q);
+ __blk_run_queue_uncond(q);
}
EXPORT_SYMBOL(__blk_run_queue);
@@ -508,6 +527,10 @@ void blk_cleanup_queue(struct request_queue *q)
/* drain all requests queued before DEAD marking */
blk_drain_queue(q, true);
+ spin_lock_irq(lock);
+ queue_flag_set(QUEUE_FLAG_DEAD, q);
+ spin_unlock_irq(lock);
+
/* @q won't process any more request, flush async actions */
del_timer_sync(&q->backing_dev_info.laptop_mode_wb_timer);
blk_sync_queue(q);
diff --git a/block/blk-exec.c b/block/blk-exec.c
index 4aec98d..1320e74 100644
--- a/block/blk-exec.c
+++ b/block/blk-exec.c
@@ -72,7 +72,7 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
__blk_run_queue(q);
/* the queue is stopped so it won't be run */
if (rq->cmd_type == REQ_TYPE_PM_RESUME)
- q->request_fn(q);
+ __blk_run_queue_uncond(q);
spin_unlock_irq(q->queue_lock);
}
EXPORT_SYMBOL_GPL(blk_execute_rq_nowait);
diff --git a/block/blk.h b/block/blk.h
index a066ceb..3e94c14 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -145,6 +145,8 @@ int blk_try_merge(struct request *rq, struct bio *bio);
void blk_queue_congestion_threshold(struct request_queue *q);
+void __blk_run_queue_uncond(struct request_queue *q);
+
int blk_dev_init(void);
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index c6ab0db..9b9855f 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -451,6 +451,7 @@ struct request_queue {
#define QUEUE_FLAG_ADD_RANDOM 16 /* Contributes to random pool */
#define QUEUE_FLAG_SECDISCARD 17 /* supports SECDISCARD */
#define QUEUE_FLAG_SAME_FORCE 18 /* force complete on same CPU */
+#define QUEUE_FLAG_DEAD 19 /* queue tear-down finished */
#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
(1 << QUEUE_FLAG_STACKABLE) | \
@@ -521,6 +522,7 @@ static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
#define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags)
#define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags)
#define blk_queue_dying(q) test_bit(QUEUE_FLAG_DYING, &(q)->queue_flags)
+#define blk_queue_dead(q) test_bit(QUEUE_FLAG_DEAD, &(q)->queue_flags)
#define blk_queue_bypass(q) test_bit(QUEUE_FLAG_BYPASS, &(q)->queue_flags)
#define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags)
#define blk_queue_noxmerges(q) \
--
1.7.10.4
^ permalink raw reply related [flat|nested] 6+ messages in thread
* [PATCH 3/3] Make blk_cleanup_queue() wait until request_fn finished
2012-09-27 16:34 [PATCH 0/3 v3] blk_cleanup_queue() versus request_fn order fix Bart Van Assche
2012-09-27 16:35 ` [PATCH 1/3] block: Rename queue dead flag Bart Van Assche
2012-09-27 16:38 ` [PATCH 2/3] block: Avoid that request_fn is invoked on a dead queue Bart Van Assche
@ 2012-09-27 16:39 ` Bart Van Assche
2012-10-01 17:41 ` Dan Williams
2 siblings, 1 reply; 6+ messages in thread
From: Bart Van Assche @ 2012-09-27 16:39 UTC (permalink / raw)
To: linux-scsi
Cc: James Bottomley, Mike Christie, Jens Axboe, Tejun Heo, Chanho Min
Some request_fn implementations, e.g. scsi_request_fn(), unlock
the queue lock. Make sure that blk_cleanup_queue() waits until all
active request_fn invocations have finished. This fixes a potential
use-after-free at the end of scsi_request_fn().
Reported-by: Chanho Min <chanho.min@lge.com>
Cc: James Bottomley <JBottomley@Parallels.com>
Cc: Mike Christie <michaelc@cs.wisc.edu>
Cc: Jens Axboe <axboe@kernel.dk>
Cc: Tejun Heo <tj@kernel.org>
Signed-off-by: Bart Van Assche <bvanassche@acm.org>
---
block/blk-core.c | 7 +++++--
drivers/scsi/scsi_lib.c | 10 +---------
include/linux/blkdev.h | 5 +++++
3 files changed, 11 insertions(+), 11 deletions(-)
diff --git a/block/blk-core.c b/block/blk-core.c
index b5436b6..e41b291 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -308,7 +308,9 @@ void __blk_run_queue_uncond(struct request_queue *q)
if (unlikely(blk_queue_dead(q)))
return;
+ q->request_fn_active++;
q->request_fn(q);
+ q->request_fn_active--;
}
/**
@@ -407,6 +409,7 @@ void blk_drain_queue(struct request_queue *q, bool drain_all)
__blk_run_queue(q);
drain |= q->nr_rqs_elvpriv;
+ drain |= q->request_fn_active;
/*
* Unfortunately, requests are queued at and tracked from
@@ -494,8 +497,8 @@ EXPORT_SYMBOL_GPL(blk_queue_bypass_end);
* blk_cleanup_queue - shutdown a request queue
* @q: request queue to shutdown
*
- * Mark @q DEAD, drain all pending requests, destroy and put it. All
- * future requests will be failed immediately with -ENODEV.
+ * Mark @q as dying, drain all pending requests, mark @q as dead, destroy and
+ * put it. All future requests will be failed immediately with -ENODEV.
*/
void blk_cleanup_queue(struct request_queue *q)
{
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 593fc71..03571a3 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -1517,10 +1517,6 @@ static void scsi_request_fn(struct request_queue *q)
struct scsi_cmnd *cmd;
struct request *req;
- if(!get_device(&sdev->sdev_gendev))
- /* We must be tearing the block queue down already */
- return;
-
/*
* To start with, we keep looping until the queue is empty, or until
* the host is no longer able to accept any more requests.
@@ -1629,11 +1625,7 @@ out_delay:
if (sdev->device_busy == 0)
blk_delay_queue(q, SCSI_QUEUE_DELAY);
out:
- /* must be careful here...if we trigger the ->remove() function
- * we cannot be holding the q lock */
- spin_unlock_irq(q->queue_lock);
- put_device(&sdev->sdev_gendev);
- spin_lock_irq(q->queue_lock);
+ ;
}
u64 scsi_calculate_bounce_limit(struct Scsi_Host *shost)
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 9b9855f..ef5b80a 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -377,6 +377,11 @@ struct request_queue {
unsigned int nr_sorted;
unsigned int in_flight[2];
+ /*
+ * Number of active request_fn() calls for those request_fn()
+ * implementations that unlock the queue_lock, e.g. scsi_request_fn().
+ */
+ unsigned int request_fn_active;
unsigned int rq_timeout;
struct timer_list timeout;
--
1.7.10.4
^ permalink raw reply related [flat|nested] 6+ messages in thread
* Re: [PATCH 3/3] Make blk_cleanup_queue() wait until request_fn finished
2012-09-27 16:39 ` [PATCH 3/3] Make blk_cleanup_queue() wait until request_fn finished Bart Van Assche
@ 2012-10-01 17:41 ` Dan Williams
2012-10-02 6:37 ` Bart Van Assche
0 siblings, 1 reply; 6+ messages in thread
From: Dan Williams @ 2012-10-01 17:41 UTC (permalink / raw)
To: Bart Van Assche
Cc: linux-scsi, James Bottomley, Mike Christie, Jens Axboe, Tejun Heo,
Chanho Min
On Thu, Sep 27, 2012 at 9:39 AM, Bart Van Assche <bvanassche@acm.org> wrote:
> Some request_fn implementations, e.g. scsi_request_fn(), unlock
> the queue lock. Make sure that blk_cleanup_queue() waits until all
> active request_fn invocations have finished. This fixes a potential
> use-after-free at the end of scsi_request_fn().
>
> Reported-by: Chanho Min <chanho.min@lge.com>
> Cc: James Bottomley <JBottomley@Parallels.com>
> Cc: Mike Christie <michaelc@cs.wisc.edu>
> Cc: Jens Axboe <axboe@kernel.dk>
> Cc: Tejun Heo <tj@kernel.org>
> Signed-off-by: Bart Van Assche <bvanassche@acm.org>
> ---
> block/blk-core.c | 7 +++++--
> drivers/scsi/scsi_lib.c | 10 +---------
> include/linux/blkdev.h | 5 +++++
> 3 files changed, 11 insertions(+), 11 deletions(-)
>
> diff --git a/block/blk-core.c b/block/blk-core.c
> index b5436b6..e41b291 100644
> --- a/block/blk-core.c
> +++ b/block/blk-core.c
> @@ -308,7 +308,9 @@ void __blk_run_queue_uncond(struct request_queue *q)
> if (unlikely(blk_queue_dead(q)))
> return;
>
> + q->request_fn_active++;
> q->request_fn(q);
> + q->request_fn_active--;
> }
>
> /**
> @@ -407,6 +409,7 @@ void blk_drain_queue(struct request_queue *q, bool drain_all)
> __blk_run_queue(q);
>
> drain |= q->nr_rqs_elvpriv;
> + drain |= q->request_fn_active;
>
> /*
> * Unfortunately, requests are queued at and tracked from
> @@ -494,8 +497,8 @@ EXPORT_SYMBOL_GPL(blk_queue_bypass_end);
> * blk_cleanup_queue - shutdown a request queue
> * @q: request queue to shutdown
> *
> - * Mark @q DEAD, drain all pending requests, destroy and put it. All
> - * future requests will be failed immediately with -ENODEV.
> + * Mark @q as dying, drain all pending requests, mark @q as dead, destroy and
> + * put it. All future requests will be failed immediately with -ENODEV.
> */
> void blk_cleanup_queue(struct request_queue *q)
> {
> diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
> index 593fc71..03571a3 100644
> --- a/drivers/scsi/scsi_lib.c
> +++ b/drivers/scsi/scsi_lib.c
> @@ -1517,10 +1517,6 @@ static void scsi_request_fn(struct request_queue *q)
> struct scsi_cmnd *cmd;
> struct request *req;
>
> - if(!get_device(&sdev->sdev_gendev))
> - /* We must be tearing the block queue down already */
> - return;
> -
> /*
> * To start with, we keep looping until the queue is empty, or until
> * the host is no longer able to accept any more requests.
> @@ -1629,11 +1625,7 @@ out_delay:
> if (sdev->device_busy == 0)
> blk_delay_queue(q, SCSI_QUEUE_DELAY);
> out:
> - /* must be careful here...if we trigger the ->remove() function
> - * we cannot be holding the q lock */
> - spin_unlock_irq(q->queue_lock);
> - put_device(&sdev->sdev_gendev);
> - spin_lock_irq(q->queue_lock);
> + ;
Any reason to keep this "out:" label now that it has no effect?
--
Dan
^ permalink raw reply [flat|nested] 6+ messages in thread
* Re: [PATCH 3/3] Make blk_cleanup_queue() wait until request_fn finished
2012-10-01 17:41 ` Dan Williams
@ 2012-10-02 6:37 ` Bart Van Assche
0 siblings, 0 replies; 6+ messages in thread
From: Bart Van Assche @ 2012-10-02 6:37 UTC (permalink / raw)
To: Dan Williams
Cc: linux-scsi, James Bottomley, Mike Christie, Jens Axboe, Tejun Heo,
Chanho Min
On 10/01/12 19:41, Dan Williams wrote:
> On Thu, Sep 27, 2012 at 9:39 AM, Bart Van Assche <bvanassche@acm.org> wrote:
>> [ ... ]
>> diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
>> index 593fc71..03571a3 100644
>> --- a/drivers/scsi/scsi_lib.c
>> +++ b/drivers/scsi/scsi_lib.c
>> @@ -1517,10 +1517,6 @@ static void scsi_request_fn(struct request_queue *q)
>> struct scsi_cmnd *cmd;
>> struct request *req;
>>
>> - if(!get_device(&sdev->sdev_gendev))
>> - /* We must be tearing the block queue down already */
>> - return;
>> -
>> /*
>> * To start with, we keep looping until the queue is empty, or until
>> * the host is no longer able to accept any more requests.
>> @@ -1629,11 +1625,7 @@ out_delay:
>> if (sdev->device_busy == 0)
>> blk_delay_queue(q, SCSI_QUEUE_DELAY);
>> out:
>> - /* must be careful here...if we trigger the ->remove() function
>> - * we cannot be holding the q lock */
>> - spin_unlock_irq(q->queue_lock);
>> - put_device(&sdev->sdev_gendev);
>> - spin_lock_irq(q->queue_lock);
>> + ;
>
> Any reason to keep this "out:" label now that it has no effect?
Some people prefer a single-exit style for kernel code since that style
makes it easy to add cleanup code for resources allocated inside the
function itself. I don't have a strong opinion about this though.
Note: after I posted this patch series I noticed that patch 2/3 leaves a
(small) race window. I'm currently testing this follow-up patch:
diff --git a/block/blk-core.c b/block/blk-core.c
index f0efe32..3991f8e 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -425,6 +425,9 @@ void blk_drain_queue(struct request_queue *q, bool
drain_all)
}
}
+ if (!drain && blk_queue_dying(q))
+ queue_flag_set(QUEUE_FLAG_DEAD, q);
+
spin_unlock_irq(q->queue_lock);
if (!drain)
@@ -525,10 +528,6 @@ void blk_cleanup_queue(struct request_queue *q)
/* drain all requests queued before DEAD marking */
blk_drain_queue(q, true);
- spin_lock_irq(lock);
- queue_flag_set(QUEUE_FLAG_DEAD, q);
- spin_unlock_irq(lock);
-
/* @q won't process any more request, flush async actions */
del_timer_sync(&q->backing_dev_info.laptop_mode_wb_timer);
blk_sync_queue(q);
^ permalink raw reply related [flat|nested] 6+ messages in thread
end of thread, other threads:[~2012-10-02 6:37 UTC | newest]
Thread overview: 6+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2012-09-27 16:34 [PATCH 0/3 v3] blk_cleanup_queue() versus request_fn order fix Bart Van Assche
2012-09-27 16:35 ` [PATCH 1/3] block: Rename queue dead flag Bart Van Assche
2012-09-27 16:38 ` [PATCH 2/3] block: Avoid that request_fn is invoked on a dead queue Bart Van Assche
2012-09-27 16:39 ` [PATCH 3/3] Make blk_cleanup_queue() wait until request_fn finished Bart Van Assche
2012-10-01 17:41 ` Dan Williams
2012-10-02 6:37 ` Bart Van Assche
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).