* cleanup blk_mq_submit_bio
@ 2021-11-23 16:04 Christoph Hellwig
2021-11-23 16:04 ` [PATCH 1/3] blk-mq: simplify the plug handling in blk_mq_submit_bio Christoph Hellwig
` (2 more replies)
0 siblings, 3 replies; 7+ messages in thread
From: Christoph Hellwig @ 2021-11-23 16:04 UTC (permalink / raw)
To: axboe; +Cc: linux-block
Hi Jens,
this series refactors and cleans up the blk_mq_submit_bio path.
^ permalink raw reply [flat|nested] 7+ messages in thread
* [PATCH 1/3] blk-mq: simplify the plug handling in blk_mq_submit_bio
2021-11-23 16:04 cleanup blk_mq_submit_bio Christoph Hellwig
@ 2021-11-23 16:04 ` Christoph Hellwig
2021-11-23 19:23 ` Jens Axboe
2021-11-23 16:04 ` [PATCH 2/3] blk-mq: move more plug handling from blk_mq_submit_bio into blk_add_rq_to_plug Christoph Hellwig
2021-11-23 16:04 ` [PATCH 3/3] blk-mq: cleanup request allocation Christoph Hellwig
2 siblings, 1 reply; 7+ messages in thread
From: Christoph Hellwig @ 2021-11-23 16:04 UTC (permalink / raw)
To: axboe; +Cc: linux-block
blk_mq_submit_bio has two different plug cases, one that uses full
plugging and a limited plugging one.
The limited plugging case is only used for a corner case that does
not matter in real life:
- no ->commit_rqs (so not NVMe)
- no shared tags (so not SCSI)
- not rotational (so no old disk or floppy driver)
- must have multiple queues (so no eMMC)
Remove the limited merging case and all the related junk to simplify
blk_mq_submit_bio and the functions called from it.
Signed-off-by: Christoph Hellwig <hch@lst.de>
---
block/blk-merge.c | 9 +------
block/blk-mq.c | 68 +++++++++--------------------------------------
block/blk.h | 2 +-
3 files changed, 15 insertions(+), 64 deletions(-)
diff --git a/block/blk-merge.c b/block/blk-merge.c
index 893c1a60b701f..ba761c3f482ba 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -1067,7 +1067,6 @@ static enum bio_merge_status blk_attempt_bio_merge(struct request_queue *q,
* @q: request_queue new bio is being queued at
* @bio: new bio being queued
* @nr_segs: number of segments in @bio
- * @same_queue_rq: output value, will be true if there's an existing request
* from the passed in @q already in the plug list
*
* Determine whether @bio being queued on @q can be merged with the previous
@@ -1084,7 +1083,7 @@ static enum bio_merge_status blk_attempt_bio_merge(struct request_queue *q,
* Caller must ensure !blk_queue_nomerges(q) beforehand.
*/
bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
- unsigned int nr_segs, bool *same_queue_rq)
+ unsigned int nr_segs)
{
struct blk_plug *plug;
struct request *rq;
@@ -1096,12 +1095,6 @@ bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
/* check the previously added entry for a quick merge attempt */
rq = rq_list_peek(&plug->mq_list);
if (rq->q == q) {
- /*
- * Only blk-mq multiple hardware queues case checks the rq in
- * the same queue, there should be only one such rq in a queue
- */
- *same_queue_rq = true;
-
if (blk_attempt_bio_merge(q, rq, bio, nr_segs, false) ==
BIO_MERGE_OK)
return true;
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 1feb9ab65f28a..f05c458d983b4 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -2689,11 +2689,10 @@ static inline unsigned short blk_plug_max_rq_count(struct blk_plug *plug)
}
static bool blk_mq_attempt_bio_merge(struct request_queue *q,
- struct bio *bio, unsigned int nr_segs,
- bool *same_queue_rq)
+ struct bio *bio, unsigned int nr_segs)
{
if (!blk_queue_nomerges(q) && bio_mergeable(bio)) {
- if (blk_attempt_plug_merge(q, bio, nr_segs, same_queue_rq))
+ if (blk_attempt_plug_merge(q, bio, nr_segs))
return true;
if (blk_mq_sched_bio_merge(q, bio, nr_segs))
return true;
@@ -2704,8 +2703,7 @@ static bool blk_mq_attempt_bio_merge(struct request_queue *q,
static struct request *blk_mq_get_new_requests(struct request_queue *q,
struct blk_plug *plug,
struct bio *bio,
- unsigned int nsegs,
- bool *same_queue_rq)
+ unsigned int nsegs)
{
struct blk_mq_alloc_data data = {
.q = q,
@@ -2714,7 +2712,7 @@ static struct request *blk_mq_get_new_requests(struct request_queue *q,
};
struct request *rq;
- if (blk_mq_attempt_bio_merge(q, bio, nsegs, same_queue_rq))
+ if (blk_mq_attempt_bio_merge(q, bio, nsegs))
return NULL;
rq_qos_throttle(q, bio);
@@ -2751,8 +2749,7 @@ static inline bool blk_mq_can_use_cached_rq(struct request *rq,
static inline struct request *blk_mq_get_request(struct request_queue *q,
struct blk_plug *plug,
struct bio *bio,
- unsigned int nsegs,
- bool *same_queue_rq)
+ unsigned int nsegs)
{
struct request *rq;
bool checked = false;
@@ -2763,8 +2760,7 @@ static inline struct request *blk_mq_get_request(struct request_queue *q,
if (rq && rq->q == q) {
if (unlikely(!submit_bio_checks(bio)))
return NULL;
- if (blk_mq_attempt_bio_merge(q, bio, nsegs,
- same_queue_rq))
+ if (blk_mq_attempt_bio_merge(q, bio, nsegs))
return NULL;
checked = true;
if (!blk_mq_can_use_cached_rq(rq, bio))
@@ -2782,7 +2778,7 @@ static inline struct request *blk_mq_get_request(struct request_queue *q,
return NULL;
if (!checked && !submit_bio_checks(bio))
return NULL;
- rq = blk_mq_get_new_requests(q, plug, bio, nsegs, same_queue_rq);
+ rq = blk_mq_get_new_requests(q, plug, bio, nsegs);
if (!rq)
blk_queue_exit(q);
return rq;
@@ -2807,7 +2803,6 @@ void blk_mq_submit_bio(struct bio *bio)
const int is_sync = op_is_sync(bio->bi_opf);
struct request *rq;
struct blk_plug *plug;
- bool same_queue_rq = false;
unsigned int nr_segs = 1;
blk_status_t ret;
@@ -2822,7 +2817,7 @@ void blk_mq_submit_bio(struct bio *bio)
return;
plug = blk_mq_plug(q, bio);
- rq = blk_mq_get_request(q, plug, bio, nr_segs, &same_queue_rq);
+ rq = blk_mq_get_request(q, plug, bio, nr_segs);
if (unlikely(!rq))
return;
@@ -2843,16 +2838,7 @@ void blk_mq_submit_bio(struct bio *bio)
if (op_is_flush(bio->bi_opf) && blk_insert_flush(rq))
return;
- if (plug && (q->nr_hw_queues == 1 ||
- blk_mq_is_shared_tags(rq->mq_hctx->flags) ||
- q->mq_ops->commit_rqs || !blk_queue_nonrot(q))) {
- /*
- * Use plugging if we have a ->commit_rqs() hook as well, as
- * we know the driver uses bd->last in a smart fashion.
- *
- * Use normal plugging if this disk is slow HDD, as sequential
- * IO may benefit a lot from plug merging.
- */
+ if (plug) {
unsigned int request_count = plug->rq_count;
struct request *last = NULL;
@@ -2870,40 +2856,12 @@ void blk_mq_submit_bio(struct bio *bio)
}
blk_add_rq_to_plug(plug, rq);
- } else if (rq->rq_flags & RQF_ELV) {
- /* Insert the request at the IO scheduler queue */
+ } else if ((rq->rq_flags & RQF_ELV) ||
+ (rq->mq_hctx->dispatch_busy &&
+ (q->nr_hw_queues == 1 || !is_sync))) {
blk_mq_sched_insert_request(rq, false, true, true);
- } else if (plug && !blk_queue_nomerges(q)) {
- struct request *next_rq = NULL;
-
- /*
- * We do limited plugging. If the bio can be merged, do that.
- * Otherwise the existing request in the plug list will be
- * issued. So the plug list will have one request at most
- * The plug list might get flushed before this. If that happens,
- * the plug list is empty, and same_queue_rq is invalid.
- */
- if (same_queue_rq) {
- next_rq = rq_list_pop(&plug->mq_list);
- plug->rq_count--;
- }
- blk_add_rq_to_plug(plug, rq);
- trace_block_plug(q);
-
- if (next_rq) {
- trace_block_unplug(q, 1, true);
- blk_mq_try_issue_directly(next_rq->mq_hctx, next_rq);
- }
- } else if ((q->nr_hw_queues > 1 && is_sync) ||
- !rq->mq_hctx->dispatch_busy) {
- /*
- * There is no scheduler and we can try to send directly
- * to the hardware.
- */
- blk_mq_try_issue_directly(rq->mq_hctx, rq);
} else {
- /* Default case. */
- blk_mq_sched_insert_request(rq, false, true, true);
+ blk_mq_try_issue_directly(rq->mq_hctx, rq);
}
}
diff --git a/block/blk.h b/block/blk.h
index 296e3010f8d65..cfac3bdeb77d9 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -253,7 +253,7 @@ void blk_add_timer(struct request *req);
const char *blk_status_to_str(blk_status_t status);
bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
- unsigned int nr_segs, bool *same_queue_rq);
+ unsigned int nr_segs);
bool blk_bio_list_merge(struct request_queue *q, struct list_head *list,
struct bio *bio, unsigned int nr_segs);
--
2.30.2
^ permalink raw reply related [flat|nested] 7+ messages in thread
* [PATCH 2/3] blk-mq: move more plug handling from blk_mq_submit_bio into blk_add_rq_to_plug
2021-11-23 16:04 cleanup blk_mq_submit_bio Christoph Hellwig
2021-11-23 16:04 ` [PATCH 1/3] blk-mq: simplify the plug handling in blk_mq_submit_bio Christoph Hellwig
@ 2021-11-23 16:04 ` Christoph Hellwig
2021-11-23 16:04 ` [PATCH 3/3] blk-mq: cleanup request allocation Christoph Hellwig
2 siblings, 0 replies; 7+ messages in thread
From: Christoph Hellwig @ 2021-11-23 16:04 UTC (permalink / raw)
To: axboe; +Cc: linux-block
Keep all the functionality for adding a request to a plug in a single place.
Signed-off-by: Christoph Hellwig <hch@lst.de>
---
block/blk-mq.c | 64 +++++++++++++++++++++-----------------------------
1 file changed, 27 insertions(+), 37 deletions(-)
diff --git a/block/blk-mq.c b/block/blk-mq.c
index f05c458d983b4..9e91587997763 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -2661,21 +2661,6 @@ void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
hctx->queue->mq_ops->commit_rqs(hctx);
}
-static void blk_add_rq_to_plug(struct blk_plug *plug, struct request *rq)
-{
- if (!plug->multiple_queues) {
- struct request *nxt = rq_list_peek(&plug->mq_list);
-
- if (nxt && nxt->q != rq->q)
- plug->multiple_queues = true;
- }
- if (!plug->has_elevator && (rq->rq_flags & RQF_ELV))
- plug->has_elevator = true;
- rq->rq_next = NULL;
- rq_list_add(&plug->mq_list, rq);
- plug->rq_count++;
-}
-
/*
* Allow 2x BLK_MAX_REQUEST_COUNT requests on plug queue for multiple
* queues. This is important for md arrays to benefit from merging
@@ -2688,6 +2673,28 @@ static inline unsigned short blk_plug_max_rq_count(struct blk_plug *plug)
return BLK_MAX_REQUEST_COUNT;
}
+static void blk_add_rq_to_plug(struct blk_plug *plug, struct request *rq)
+{
+ struct request *last = rq_list_peek(&plug->mq_list);
+
+ if (!plug->rq_count) {
+ trace_block_plug(rq->q);
+ } else if (plug->rq_count >= blk_plug_max_rq_count(plug) ||
+ (!blk_queue_nomerges(rq->q) &&
+ blk_rq_bytes(last) >= BLK_PLUG_FLUSH_SIZE)) {
+ blk_mq_flush_plug_list(plug, false);
+ trace_block_plug(rq->q);
+ }
+
+ if (!plug->multiple_queues && last && last->q != rq->q)
+ plug->multiple_queues = true;
+ if (!plug->has_elevator && (rq->rq_flags & RQF_ELV))
+ plug->has_elevator = true;
+ rq->rq_next = NULL;
+ rq_list_add(&plug->mq_list, rq);
+ plug->rq_count++;
+}
+
static bool blk_mq_attempt_bio_merge(struct request_queue *q,
struct bio *bio, unsigned int nr_segs)
{
@@ -2838,31 +2845,14 @@ void blk_mq_submit_bio(struct bio *bio)
if (op_is_flush(bio->bi_opf) && blk_insert_flush(rq))
return;
- if (plug) {
- unsigned int request_count = plug->rq_count;
- struct request *last = NULL;
-
- if (!request_count) {
- trace_block_plug(q);
- } else if (!blk_queue_nomerges(q)) {
- last = rq_list_peek(&plug->mq_list);
- if (blk_rq_bytes(last) < BLK_PLUG_FLUSH_SIZE)
- last = NULL;
- }
-
- if (request_count >= blk_plug_max_rq_count(plug) || last) {
- blk_mq_flush_plug_list(plug, false);
- trace_block_plug(q);
- }
-
+ if (plug)
blk_add_rq_to_plug(plug, rq);
- } else if ((rq->rq_flags & RQF_ELV) ||
- (rq->mq_hctx->dispatch_busy &&
- (q->nr_hw_queues == 1 || !is_sync))) {
+ else if ((rq->rq_flags & RQF_ELV) ||
+ (rq->mq_hctx->dispatch_busy &&
+ (q->nr_hw_queues == 1 || !is_sync)))
blk_mq_sched_insert_request(rq, false, true, true);
- } else {
+ else
blk_mq_try_issue_directly(rq->mq_hctx, rq);
- }
}
/**
--
2.30.2
^ permalink raw reply related [flat|nested] 7+ messages in thread
* [PATCH 3/3] blk-mq: cleanup request allocation
2021-11-23 16:04 cleanup blk_mq_submit_bio Christoph Hellwig
2021-11-23 16:04 ` [PATCH 1/3] blk-mq: simplify the plug handling in blk_mq_submit_bio Christoph Hellwig
2021-11-23 16:04 ` [PATCH 2/3] blk-mq: move more plug handling from blk_mq_submit_bio into blk_add_rq_to_plug Christoph Hellwig
@ 2021-11-23 16:04 ` Christoph Hellwig
2021-11-23 16:09 ` Jens Axboe
2021-11-23 16:14 ` Jens Axboe
2 siblings, 2 replies; 7+ messages in thread
From: Christoph Hellwig @ 2021-11-23 16:04 UTC (permalink / raw)
To: axboe; +Cc: linux-block
Refactor the request alloction so that blk_mq_get_cached_request tries
to find a cached request first, and the entirely separate and now
self contained blk_mq_get_new_requests allocates one or more requests
if that is not possible.
There is a small change in behavior as submit_bio_checks is called
twice now if a cached request is present but can't be used, but that
is a small price to pay for unwinding this code.
Signed-off-by: Christoph Hellwig <hch@lst.de>
---
block/blk-mq.c | 86 +++++++++++++++++++++-----------------------------
1 file changed, 36 insertions(+), 50 deletions(-)
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 9e91587997763..ff719a17e5f08 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -2719,8 +2719,12 @@ static struct request *blk_mq_get_new_requests(struct request_queue *q,
};
struct request *rq;
- if (blk_mq_attempt_bio_merge(q, bio, nsegs))
+ if (unlikely(bio_queue_enter(bio)))
return NULL;
+ if (unlikely(!submit_bio_checks(bio)))
+ goto queue_exit;
+ if (blk_mq_attempt_bio_merge(q, bio, nsegs))
+ goto queue_exit;
rq_qos_throttle(q, bio);
@@ -2731,63 +2735,43 @@ static struct request *blk_mq_get_new_requests(struct request_queue *q,
}
rq = __blk_mq_alloc_requests(&data);
- if (rq)
- return rq;
+ if (!rq)
+ goto fail;
+ return rq;
+fail:
rq_qos_cleanup(q, bio);
if (bio->bi_opf & REQ_NOWAIT)
bio_wouldblock_error(bio);
-
+queue_exit:
+ blk_queue_exit(q);
return NULL;
}
-static inline bool blk_mq_can_use_cached_rq(struct request *rq,
- struct bio *bio)
-{
- if (blk_mq_get_hctx_type(bio->bi_opf) != rq->mq_hctx->type)
- return false;
-
- if (op_is_flush(rq->cmd_flags) != op_is_flush(bio->bi_opf))
- return false;
-
- return true;
-}
-
-static inline struct request *blk_mq_get_request(struct request_queue *q,
- struct blk_plug *plug,
- struct bio *bio,
- unsigned int nsegs)
+static inline struct request *blk_mq_get_cached_request(struct request_queue *q,
+ struct blk_plug *plug, struct bio *bio, unsigned int nsegs)
{
struct request *rq;
- bool checked = false;
-
- if (plug) {
- rq = rq_list_peek(&plug->cached_rq);
- if (rq && rq->q == q) {
- if (unlikely(!submit_bio_checks(bio)))
- return NULL;
- if (blk_mq_attempt_bio_merge(q, bio, nsegs))
- return NULL;
- checked = true;
- if (!blk_mq_can_use_cached_rq(rq, bio))
- goto fallback;
- rq->cmd_flags = bio->bi_opf;
- plug->cached_rq = rq_list_next(rq);
- INIT_LIST_HEAD(&rq->queuelist);
- rq_qos_throttle(q, bio);
- return rq;
- }
- }
+ if (!plug)
+ return NULL;
+ rq = rq_list_peek(&plug->cached_rq);
+ if (!rq || rq->q != q)
+ return NULL;
-fallback:
- if (unlikely(bio_queue_enter(bio)))
+ if (blk_mq_get_hctx_type(bio->bi_opf) != rq->mq_hctx->type)
return NULL;
- if (!checked && !submit_bio_checks(bio))
+ if (unlikely(!submit_bio_checks(bio)))
return NULL;
- rq = blk_mq_get_new_requests(q, plug, bio, nsegs);
- if (!rq)
- blk_queue_exit(q);
+ if (blk_mq_attempt_bio_merge(q, bio, nsegs))
+ return NULL;
+ if (op_is_flush(rq->cmd_flags) != op_is_flush(bio->bi_opf))
+ return NULL;
+
+ rq->cmd_flags = bio->bi_opf;
+ plug->cached_rq = rq_list_next(rq);
+ INIT_LIST_HEAD(&rq->queuelist);
+ rq_qos_throttle(q, bio);
return rq;
}
@@ -2807,9 +2791,9 @@ static inline struct request *blk_mq_get_request(struct request_queue *q,
void blk_mq_submit_bio(struct bio *bio)
{
struct request_queue *q = bdev_get_queue(bio->bi_bdev);
+ struct blk_plug *plug = blk_mq_plug(q, bio);
const int is_sync = op_is_sync(bio->bi_opf);
struct request *rq;
- struct blk_plug *plug;
unsigned int nr_segs = 1;
blk_status_t ret;
@@ -2823,10 +2807,12 @@ void blk_mq_submit_bio(struct bio *bio)
if (!bio_integrity_prep(bio))
return;
- plug = blk_mq_plug(q, bio);
- rq = blk_mq_get_request(q, plug, bio, nr_segs);
- if (unlikely(!rq))
- return;
+ rq = blk_mq_get_cached_request(q, plug, bio, nr_segs);
+ if (!rq) {
+ rq = blk_mq_get_new_requests(q, plug, bio, nr_segs);
+ if (unlikely(!rq))
+ return;
+ }
trace_block_getrq(bio);
--
2.30.2
^ permalink raw reply related [flat|nested] 7+ messages in thread
* Re: [PATCH 3/3] blk-mq: cleanup request allocation
2021-11-23 16:04 ` [PATCH 3/3] blk-mq: cleanup request allocation Christoph Hellwig
@ 2021-11-23 16:09 ` Jens Axboe
2021-11-23 16:14 ` Jens Axboe
1 sibling, 0 replies; 7+ messages in thread
From: Jens Axboe @ 2021-11-23 16:09 UTC (permalink / raw)
To: Christoph Hellwig; +Cc: linux-block
On 11/23/21 9:04 AM, Christoph Hellwig wrote:
> Refactor the request alloction so that blk_mq_get_cached_request tries
> to find a cached request first, and the entirely separate and now
> self contained blk_mq_get_new_requests allocates one or more requests
> if that is not possible.
>
> There is a small change in behavior as submit_bio_checks is called
> twice now if a cached request is present but can't be used, but that
> is a small price to pay for unwinding this code.
I don't think that's an issue, the only side effect that matters here
is the remap which is tracked in the bio anyway.
--
Jens Axboe
^ permalink raw reply [flat|nested] 7+ messages in thread
* Re: [PATCH 3/3] blk-mq: cleanup request allocation
2021-11-23 16:04 ` [PATCH 3/3] blk-mq: cleanup request allocation Christoph Hellwig
2021-11-23 16:09 ` Jens Axboe
@ 2021-11-23 16:14 ` Jens Axboe
1 sibling, 0 replies; 7+ messages in thread
From: Jens Axboe @ 2021-11-23 16:14 UTC (permalink / raw)
To: Christoph Hellwig; +Cc: linux-block
On 11/23/21 9:04 AM, Christoph Hellwig wrote:
> Refactor the request alloction so that blk_mq_get_cached_request tries
> to find a cached request first, and the entirely separate and now
> self contained blk_mq_get_new_requests allocates one or more requests
> if that is not possible.
>
> There is a small change in behavior as submit_bio_checks is called
> twice now if a cached request is present but can't be used, but that
> is a small price to pay for unwinding this code.
I've done 1-2 from this series, can you resend this one against the
current tree? With the fixes in 5.16-rc it needs some fixes and I don't
want to hand-apply it (I did with #2).
--
Jens Axboe
^ permalink raw reply [flat|nested] 7+ messages in thread
* Re: [PATCH 1/3] blk-mq: simplify the plug handling in blk_mq_submit_bio
2021-11-23 16:04 ` [PATCH 1/3] blk-mq: simplify the plug handling in blk_mq_submit_bio Christoph Hellwig
@ 2021-11-23 19:23 ` Jens Axboe
0 siblings, 0 replies; 7+ messages in thread
From: Jens Axboe @ 2021-11-23 19:23 UTC (permalink / raw)
To: Christoph Hellwig; +Cc: linux-block
On Tue, 23 Nov 2021 17:04:41 +0100, Christoph Hellwig wrote:
> blk_mq_submit_bio has two different plug cases, one that uses full
> plugging and a limited plugging one.
>
> The limited plugging case is only used for a corner case that does
> not matter in real life:
>
> - no ->commit_rqs (so not NVMe)
> - no shared tags (so not SCSI)
> - not rotational (so no old disk or floppy driver)
> - must have multiple queues (so no eMMC)
>
> [...]
Applied, thanks!
[1/3] blk-mq: simplify the plug handling in blk_mq_submit_bio
commit: bb5b684ffe6deb797ed36b2b323f747a5f7d1a2c
[2/3] blk-mq: move more plug handling from blk_mq_submit_bio into blk_add_rq_to_plug
commit: da7bdd66a69b14d13ff8f9064efc524081e64335
[3/3] blk-mq: cleanup request allocation
(no commit info)
Best regards,
--
Jens Axboe
^ permalink raw reply [flat|nested] 7+ messages in thread
end of thread, other threads:[~2021-11-23 19:23 UTC | newest]
Thread overview: 7+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2021-11-23 16:04 cleanup blk_mq_submit_bio Christoph Hellwig
2021-11-23 16:04 ` [PATCH 1/3] blk-mq: simplify the plug handling in blk_mq_submit_bio Christoph Hellwig
2021-11-23 19:23 ` Jens Axboe
2021-11-23 16:04 ` [PATCH 2/3] blk-mq: move more plug handling from blk_mq_submit_bio into blk_add_rq_to_plug Christoph Hellwig
2021-11-23 16:04 ` [PATCH 3/3] blk-mq: cleanup request allocation Christoph Hellwig
2021-11-23 16:09 ` Jens Axboe
2021-11-23 16:14 ` Jens Axboe
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox