* [PATCH 1/3] scsi: Inline scsi_kick_queue()
2023-07-17 20:52 [PATCH 0/3] Improve performance for BLK_MQ_F_BLOCKING drivers Bart Van Assche
@ 2023-07-17 20:52 ` Bart Van Assche
2023-07-17 20:52 ` [PATCH 2/3] scsi: Remove a blk_mq_run_hw_queues() call Bart Van Assche
2023-07-17 20:52 ` [PATCH 3/3] block: Improve performance for BLK_MQ_F_BLOCKING drivers Bart Van Assche
2 siblings, 0 replies; 6+ messages in thread
From: Bart Van Assche @ 2023-07-17 20:52 UTC (permalink / raw)
To: Jens Axboe
Cc: linux-block, Christoph Hellwig, Martin K . Petersen,
Bart Van Assche, James E.J. Bottomley
scsi_kick_queue() is too short to keep it as a separate function. Hence
inline it.
Cc: Martin K. Petersen <martin.petersen@oracle.com>
Signed-off-by: Bart Van Assche <bvanassche@acm.org>
---
drivers/scsi/scsi_lib.c | 9 ++-------
1 file changed, 2 insertions(+), 7 deletions(-)
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index ad9afae49544..414d29eef968 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -300,11 +300,6 @@ void scsi_device_unbusy(struct scsi_device *sdev, struct scsi_cmnd *cmd)
cmd->budget_token = -1;
}
-static void scsi_kick_queue(struct request_queue *q)
-{
- blk_mq_run_hw_queues(q, false);
-}
-
/*
* Kick the queue of SCSI device @sdev if @sdev != current_sdev. Called with
* interrupts disabled.
@@ -340,7 +335,7 @@ static void scsi_single_lun_run(struct scsi_device *current_sdev)
* but in most cases, we will be first. Ideally, each LU on the
* target would get some limited time or requests on the target.
*/
- scsi_kick_queue(current_sdev->request_queue);
+ blk_mq_run_hw_queues(current_sdev->request_queue, false);
spin_lock_irqsave(shost->host_lock, flags);
if (!starget->starget_sdev_user)
@@ -427,7 +422,7 @@ static void scsi_starved_list_run(struct Scsi_Host *shost)
continue;
spin_unlock_irqrestore(shost->host_lock, flags);
- scsi_kick_queue(slq);
+ blk_mq_run_hw_queues(slq, false);
blk_put_queue(slq);
spin_lock_irqsave(shost->host_lock, flags);
^ permalink raw reply related [flat|nested] 6+ messages in thread* [PATCH 3/3] block: Improve performance for BLK_MQ_F_BLOCKING drivers
2023-07-17 20:52 [PATCH 0/3] Improve performance for BLK_MQ_F_BLOCKING drivers Bart Van Assche
2023-07-17 20:52 ` [PATCH 1/3] scsi: Inline scsi_kick_queue() Bart Van Assche
2023-07-17 20:52 ` [PATCH 2/3] scsi: Remove a blk_mq_run_hw_queues() call Bart Van Assche
@ 2023-07-17 20:52 ` Bart Van Assche
2023-07-18 4:54 ` Christoph Hellwig
2 siblings, 1 reply; 6+ messages in thread
From: Bart Van Assche @ 2023-07-17 20:52 UTC (permalink / raw)
To: Jens Axboe
Cc: linux-block, Christoph Hellwig, Martin K . Petersen,
Bart Van Assche, Ming Lei, James E.J. Bottomley
blk_mq_run_queue() runs the queue asynchronously if BLK_MQ_F_BLOCKING
has been set. This is suboptimal since running the queue asynchronously
is slower than running the queue synchronously. This patch modifies
blk_mq_run_queue() as follows if BLK_MQ_F_BLOCKING has been set:
- Run the queue synchronously if it is allowed to sleep.
- Run the queue asynchronously if it is not allowed to sleep.
Additionally, blk_mq_run_hw_queue(hctx, false) calls are modified into
blk_mq_run_hw_queue(hctx, hctx->flags & BLK_MQ_F_BLOCKING) if the caller
may be invoked from atomic context.
The following caller chains have been reviewed:
blk_mq_run_hw_queue(hctx, false)
blk_mq_get_tag() /* may sleep, hence the functions it calls may also sleep */
blk_execute_rq_nowait()
nvme_*() /* the NVMe driver does not set BLK_MQ_F_BLOCKING */
scsi_eh_lock_door() /* may sleep */
sg_common_write() /* implements an ioctl and hence may sleep */
st_scsi_execute() /* may sleep */
pscsi_execute_cmd() /* may sleep */
ufshpb_execute_umap_req() /* A request to remove HPB has been submitted. */
ufshbp_execute_map_req() /* A request to remove HPB has been submitted. */
blk_execute_rq() /* may sleep */
blk_mq_run_hw_queues(q, async=false)
blk_freeze_queue_start() /* may sleep */
blk_mq_requeue_work() /* may sleep */
scsi_kick_queue()
scsi_requeue_run_queue() /* may sleep */
scsi_run_host_queues()
scsi_ioctl_reset() /* may sleep */
blk_mq_insert_requests(hctx, ctx, list, run_queue_async=false)
blk_mq_dispatch_plug_list(plug, from_sched=false)
blk_mq_flush_plug_list(plug, from_schedule=false)
__blk_flush_plug(plug, from_schedule=false)
blk_add_rq_to_plug()
blk_execute_rq_nowait() /* see above */
blk_mq_submit_bio() /* may sleep if REQ_NOWAIT has not been set */
blk_mq_plug_issue_direct()
blk_mq_flush_plug_list() /* see above */
blk_mq_dispatch_plug_list(plug, from_sched=false)
blk_mq_flush_plug_list() /* see above */
blk_mq_try_issue_directly()
blk_mq_submit_bio() /* may sleep if REQ_NOWAIT has not been set */
blk_mq_try_issue_list_directly(hctx, list)
blk_mq_insert_requests() /* see above */
Cc: Christoph Hellwig <hch@lst.de>
Cc: Ming Lei <ming.lei@redhat.com>
Cc: Martin K. Petersen <martin.petersen@oracle.com>
Signed-off-by: Bart Van Assche <bvanassche@acm.org>
---
block/blk-mq.c | 17 +++++++++++------
drivers/scsi/scsi_lib.c | 3 +++
2 files changed, 14 insertions(+), 6 deletions(-)
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 5504719b970d..d5ab0bd8b472 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -1289,7 +1289,8 @@ static void blk_add_rq_to_plug(struct blk_plug *plug, struct request *rq)
*
* Description:
* Insert a fully prepared request at the back of the I/O scheduler queue
- * for execution. Don't wait for completion.
+ * for execution. Don't wait for completion. May sleep if BLK_MQ_F_BLOCKING
+ * has been set.
*
* Note:
* This function will invoke @done directly if the queue is dead.
@@ -2213,6 +2214,8 @@ void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
*/
WARN_ON_ONCE(!async && in_interrupt());
+ might_sleep_if(!async && hctx->flags & BLK_MQ_F_BLOCKING);
+
/*
* When queue is quiesced, we may be switching io scheduler, or
* updating nr_hw_queues, or other things, and we can't run queue
@@ -2228,8 +2231,7 @@ void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
if (!need_run)
return;
- if (async || (hctx->flags & BLK_MQ_F_BLOCKING) ||
- !cpumask_test_cpu(raw_smp_processor_id(), hctx->cpumask)) {
+ if (async || !cpumask_test_cpu(raw_smp_processor_id(), hctx->cpumask)) {
blk_mq_delay_run_hw_queue(hctx, 0);
return;
}
@@ -2364,7 +2366,7 @@ void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx)
{
clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
- blk_mq_run_hw_queue(hctx, false);
+ blk_mq_run_hw_queue(hctx, hctx->flags & BLK_MQ_F_BLOCKING);
}
EXPORT_SYMBOL(blk_mq_start_hw_queue);
@@ -2394,7 +2396,8 @@ void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async)
unsigned long i;
queue_for_each_hw_ctx(q, hctx, i)
- blk_mq_start_stopped_hw_queue(hctx, async);
+ blk_mq_start_stopped_hw_queue(hctx, async ||
+ (hctx->flags & BLK_MQ_F_BLOCKING));
}
EXPORT_SYMBOL(blk_mq_start_stopped_hw_queues);
@@ -2452,6 +2455,8 @@ static void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx,
list_for_each_entry(rq, list, queuelist) {
BUG_ON(rq->mq_ctx != ctx);
trace_block_rq_insert(rq);
+ if (rq->cmd_flags & REQ_NOWAIT)
+ run_queue_async = true;
}
spin_lock(&ctx->lock);
@@ -2612,7 +2617,7 @@ static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
if ((rq->rq_flags & RQF_USE_SCHED) || !blk_mq_get_budget_and_tag(rq)) {
blk_mq_insert_request(rq, 0);
- blk_mq_run_hw_queue(hctx, false);
+ blk_mq_run_hw_queue(hctx, rq->cmd_flags & REQ_NOWAIT);
return;
}
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 7043ca0f4da9..197942db8016 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -329,6 +329,9 @@ static void scsi_single_lun_run(struct scsi_device *current_sdev)
starget->starget_sdev_user = NULL;
spin_unlock_irqrestore(shost->host_lock, flags);
+ /* Combining BLIST_SINGLELUN with BLK_MQ_F_BLOCKING is not supported. */
+ WARN_ON_ONCE(shost->tag_set.flags & BLK_MQ_F_BLOCKING);
+
/*
* Call blk_run_queue for all LUNs on the target, starting with
* current_sdev. We race with others (to set starget_sdev_user),
^ permalink raw reply related [flat|nested] 6+ messages in thread