From: Ming Lei <ming.lei@redhat.com>
To: Jens Axboe <axboe@kernel.dk>
Cc: linux-block@vger.kernel.org, Ming Lei <ming.lei@redhat.com>,
Yi Zhang <yi.zhang@redhat.com>
Subject: [PATCH] blk-mq: make sure active queue usage is held for bio_integrity_prep()
Date: Wed, 8 Nov 2023 16:05:04 +0800 [thread overview]
Message-ID: <20231108080504.2144952-1-ming.lei@redhat.com> (raw)
blk_integrity_unregister() can come if queue usage counter isn't held
for one bio with integrity prepared, so this request may be completed with
calling profile->complete_fn, then kernel panic.
Another constraint is that bio_integrity_prep() needs to be called
before bio merge.
Fix the issue by:
- call bio_integrity_prep() with one queue usage counter grabbed reliably
- call bio_integrity_prep() before bio merge
Fixes: 900e080752025f00 ("block: move queue enter logic into blk_mq_submit_bio()")
Reported-by: Yi Zhang <yi.zhang@redhat.com>
Signed-off-by: Ming Lei <ming.lei@redhat.com>
---
block/blk-mq.c | 71 ++++++++++++++++++++++++++++++++++----------------
1 file changed, 48 insertions(+), 23 deletions(-)
diff --git a/block/blk-mq.c b/block/blk-mq.c
index e2d11183f62e..80f36096f16f 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -2858,11 +2858,8 @@ static struct request *blk_mq_get_new_requests(struct request_queue *q,
};
struct request *rq;
- if (unlikely(bio_queue_enter(bio)))
- return NULL;
-
if (blk_mq_attempt_bio_merge(q, bio, nsegs))
- goto queue_exit;
+ return NULL;
rq_qos_throttle(q, bio);
@@ -2878,35 +2875,43 @@ static struct request *blk_mq_get_new_requests(struct request_queue *q,
rq_qos_cleanup(q, bio);
if (bio->bi_opf & REQ_NOWAIT)
bio_wouldblock_error(bio);
-queue_exit:
- blk_queue_exit(q);
return NULL;
}
-static inline struct request *blk_mq_get_cached_request(struct request_queue *q,
+/* cached request means we grab active queue usage counter */
+static inline struct request *blk_mq_cached_req(const struct request_queue *q,
+ const struct blk_plug *plug)
+{
+ if (plug) {
+ struct request *rq = rq_list_peek(&plug->cached_rq);
+
+ if (rq && rq->q == q)
+ return rq;
+ }
+ return NULL;
+}
+
+/* return true if this bio needs to handle by allocating new request */
+static inline bool blk_mq_try_cached_rq(struct request *rq,
struct blk_plug *plug, struct bio **bio, unsigned int nsegs)
{
- struct request *rq;
+ struct request_queue *q = rq->q;
enum hctx_type type, hctx_type;
- if (!plug)
- return NULL;
- rq = rq_list_peek(&plug->cached_rq);
- if (!rq || rq->q != q)
- return NULL;
+ WARN_ON_ONCE(rq_list_peek(&plug->cached_rq) != rq);
if (blk_mq_attempt_bio_merge(q, *bio, nsegs)) {
*bio = NULL;
- return NULL;
+ return false;
}
type = blk_mq_get_hctx_type((*bio)->bi_opf);
hctx_type = rq->mq_hctx->type;
if (type != hctx_type &&
!(type == HCTX_TYPE_READ && hctx_type == HCTX_TYPE_DEFAULT))
- return NULL;
+ return true;
if (op_is_flush(rq->cmd_flags) != op_is_flush((*bio)->bi_opf))
- return NULL;
+ return true;
/*
* If any qos ->throttle() end up blocking, we will have flushed the
@@ -2919,7 +2924,8 @@ static inline struct request *blk_mq_get_cached_request(struct request_queue *q,
blk_mq_rq_time_init(rq, 0);
rq->cmd_flags = (*bio)->bi_opf;
INIT_LIST_HEAD(&rq->queuelist);
- return rq;
+
+ return false;
}
static void bio_set_ioprio(struct bio *bio)
@@ -2951,6 +2957,7 @@ void blk_mq_submit_bio(struct bio *bio)
struct blk_mq_hw_ctx *hctx;
struct request *rq;
unsigned int nr_segs = 1;
+ bool need_alloc = true;
blk_status_t ret;
bio = blk_queue_bounce(bio, q);
@@ -2960,18 +2967,36 @@ void blk_mq_submit_bio(struct bio *bio)
return;
}
- if (!bio_integrity_prep(bio))
- return;
-
bio_set_ioprio(bio);
- rq = blk_mq_get_cached_request(q, plug, &bio, nr_segs);
- if (!rq) {
+ rq = blk_mq_cached_req(q, plug);
+ if (rq) {
+ /* cached request held queue usage counter */
+ if (!bio_integrity_prep(bio))
+ return;
+
+ need_alloc = blk_mq_try_cached_rq(rq, plug, &bio, nr_segs);
if (!bio)
return;
+ }
+
+ if (need_alloc) {
+ if (!rq) {
+ if (unlikely(bio_queue_enter(bio)))
+ return;
+
+ if (!bio_integrity_prep(bio))
+ return;
+ } else {
+ /* cached request held queue usage counter */
+ percpu_ref_get(&q->q_usage_counter);
+ }
+
rq = blk_mq_get_new_requests(q, plug, bio, nr_segs);
- if (unlikely(!rq))
+ if (unlikely(!rq)) {
+ blk_queue_exit(q);
return;
+ }
}
trace_block_getrq(bio);
--
2.41.0
next reply other threads:[~2023-11-08 8:05 UTC|newest]
Thread overview: 4+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-11-08 8:05 Ming Lei [this message]
2023-11-09 7:30 ` [PATCH] blk-mq: make sure active queue usage is held for bio_integrity_prep() Christoph Hellwig
2023-11-09 8:11 ` Ming Lei
2023-11-09 14:34 ` Christoph Hellwig
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20231108080504.2144952-1-ming.lei@redhat.com \
--to=ming.lei@redhat.com \
--cc=axboe@kernel.dk \
--cc=linux-block@vger.kernel.org \
--cc=yi.zhang@redhat.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox