From: Jens Axboe <axboe@kernel.dk>
To: Song Liu <song@kernel.org>
Cc: linux-block@vger.kernel.org,
linux-raid <linux-raid@vger.kernel.org>,
stable@vger.kernel.org, Larkin Lowrey <llowrey@nuclearwinter.com>,
Wilson Jonathan <i400sjon@gmail.com>,
Roger Heflin <rogerheflin@gmail.com>
Subject: Re: [PATCH] block: check more requests for multiple_queues in blk_attempt_plug_merge
Date: Fri, 11 Mar 2022 07:16:26 -0700 [thread overview]
Message-ID: <84310ba2-a413-22f4-1349-59a09f4851a1@kernel.dk> (raw)
In-Reply-To: <2b437948-ba2a-c59c-1059-e937ea8636bd@kernel.dk>
On 3/10/22 5:07 PM, Jens Axboe wrote:
> In any case, just doing larger reads would likely help quite a bit, but
> would still be nice to get to the bottom of why we're not seeing the
> level of merging we expect.
Song, can you try this one? It'll do the dispatch in a somewhat saner
fashion, bundling identical queues. And we'll keep iterating the plug
list for a merge if we have multiple disks, until we've seen a queue
match and checked.
diff --git a/block/blk-merge.c b/block/blk-merge.c
index 0e871d4e7cb8..68b623d00db5 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -1073,12 +1073,20 @@ bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
if (!plug || rq_list_empty(plug->mq_list))
return false;
- /* check the previously added entry for a quick merge attempt */
- rq = rq_list_peek(&plug->mq_list);
- if (rq->q == q) {
- if (blk_attempt_bio_merge(q, rq, bio, nr_segs, false) ==
- BIO_MERGE_OK)
- return true;
+ rq_list_for_each(&plug->mq_list, rq) {
+ if (rq->q == q) {
+ if (blk_attempt_bio_merge(q, rq, bio, nr_segs, false) ==
+ BIO_MERGE_OK)
+ return true;
+ break;
+ }
+
+ /*
+ * Only keep iterating plug list for merges if we have multiple
+ * queues
+ */
+ if (!plug->multiple_queues)
+ break;
}
return false;
}
diff --git a/block/blk-mq.c b/block/blk-mq.c
index bb263abbb40f..9c784262fd6b 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -2576,13 +2576,36 @@ static void __blk_mq_flush_plug_list(struct request_queue *q,
q->mq_ops->queue_rqs(&plug->mq_list);
}
+static void blk_mq_dispatch_plug_list(struct blk_plug *plug, bool from_sched)
+{
+ struct blk_mq_hw_ctx *this_hctx = NULL;
+ struct blk_mq_ctx *this_ctx = NULL;
+ struct request *requeue_list = NULL;
+ unsigned int depth = 0;
+ LIST_HEAD(list);
+
+ do {
+ struct request *rq = rq_list_pop(&plug->mq_list);
+
+ if (!this_hctx) {
+ this_hctx = rq->mq_hctx;
+ this_ctx = rq->mq_ctx;
+ } else if (this_hctx != rq->mq_hctx || this_ctx != rq->mq_ctx) {
+ rq_list_add(&requeue_list, rq);
+ continue;
+ }
+ list_add_tail(&rq->queuelist, &list);
+ depth++;
+ } while (!rq_list_empty(plug->mq_list));
+
+ plug->mq_list = requeue_list;
+ trace_block_unplug(this_hctx->queue, depth, !from_sched);
+ blk_mq_sched_insert_requests(this_hctx, this_ctx, &list, from_sched);
+}
+
void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
{
- struct blk_mq_hw_ctx *this_hctx;
- struct blk_mq_ctx *this_ctx;
struct request *rq;
- unsigned int depth;
- LIST_HEAD(list);
if (rq_list_empty(plug->mq_list))
return;
@@ -2618,35 +2641,9 @@ void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
return;
}
- this_hctx = NULL;
- this_ctx = NULL;
- depth = 0;
do {
- rq = rq_list_pop(&plug->mq_list);
-
- if (!this_hctx) {
- this_hctx = rq->mq_hctx;
- this_ctx = rq->mq_ctx;
- } else if (this_hctx != rq->mq_hctx || this_ctx != rq->mq_ctx) {
- trace_block_unplug(this_hctx->queue, depth,
- !from_schedule);
- blk_mq_sched_insert_requests(this_hctx, this_ctx,
- &list, from_schedule);
- depth = 0;
- this_hctx = rq->mq_hctx;
- this_ctx = rq->mq_ctx;
-
- }
-
- list_add(&rq->queuelist, &list);
- depth++;
+ blk_mq_dispatch_plug_list(plug, from_schedule);
} while (!rq_list_empty(plug->mq_list));
-
- if (!list_empty(&list)) {
- trace_block_unplug(this_hctx->queue, depth, !from_schedule);
- blk_mq_sched_insert_requests(this_hctx, this_ctx, &list,
- from_schedule);
- }
}
void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
--
Jens Axboe
next prev parent reply other threads:[~2022-03-11 14:16 UTC|newest]
Thread overview: 31+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-03-09 6:42 [PATCH] block: check more requests for multiple_queues in blk_attempt_plug_merge Song Liu
2022-03-10 6:48 ` Christoph Hellwig
2022-03-10 7:23 ` Song Liu
2022-03-10 22:10 ` Song Liu
2022-03-10 22:12 ` Song Liu
2022-03-10 22:15 ` Jens Axboe
2022-03-10 22:37 ` Song Liu
2022-03-10 23:02 ` Jens Axboe
2022-03-10 23:33 ` Song Liu
2022-03-11 0:07 ` Jens Axboe
2022-03-11 0:31 ` Song Liu
2022-03-11 0:36 ` Jens Axboe
2022-03-11 0:38 ` Jens Axboe
2022-03-11 1:14 ` Ming Lei
2022-03-11 1:21 ` Jens Axboe
2022-03-11 1:32 ` Ming Lei
2022-03-11 1:35 ` Jens Axboe
2022-03-11 8:09 ` Wols Lists
2022-03-11 14:16 ` Jens Axboe [this message]
2022-03-11 16:59 ` Song Liu
2022-03-11 21:41 ` Paul Menzel
2022-03-11 22:40 ` Song Liu
2022-03-30 14:39 ` Larkin Lowrey
2022-03-30 14:49 ` Jens Axboe
2022-03-30 15:17 ` Greg Kroah-Hartman
2022-03-30 15:39 ` Jens Axboe
[not found] ` <CAAMCDefQQqwsLNmBjArTipLDnKzW2nQBW4MTHajrjKS4oi=JFg@mail.gmail.com>
2022-03-30 18:50 ` Song Liu
2022-03-30 19:01 ` Jens Axboe
2022-03-31 6:14 ` Greg Kroah-Hartman
2022-03-11 11:30 ` Wilson Jonathan
2022-03-11 15:58 ` Wols Lists
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=84310ba2-a413-22f4-1349-59a09f4851a1@kernel.dk \
--to=axboe@kernel.dk \
--cc=i400sjon@gmail.com \
--cc=linux-block@vger.kernel.org \
--cc=linux-raid@vger.kernel.org \
--cc=llowrey@nuclearwinter.com \
--cc=rogerheflin@gmail.com \
--cc=song@kernel.org \
--cc=stable@vger.kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).