From: Ming Lei <ming.lei@redhat.com>
To: Jens Axboe <axboe@kernel.dk>
Cc: linux-block@vger.kernel.org, Ming Lei <ming.lei@redhat.com>,
John Garry <john.garry@huawei.com>,
Bart Van Assche <bvanassche@acm.org>,
Hannes Reinecke <hare@suse.com>, Christoph Hellwig <hch@lst.de>,
Thomas Gleixner <tglx@linutronix.de>
Subject: [PATCH V6 6/8] blk-mq: re-submit IO in case that hctx is inactive
Date: Tue, 7 Apr 2020 17:28:59 +0800 [thread overview]
Message-ID: <20200407092901.314228-7-ming.lei@redhat.com> (raw)
In-Reply-To: <20200407092901.314228-1-ming.lei@redhat.com>
When all CPUs in one hctx are offline and this hctx becomes inactive,
we shouldn't run this hw queue for completing request any more.
So steal bios from the request, and resubmit them, and finally free
the request in blk_mq_hctx_notify_dead().
Cc: John Garry <john.garry@huawei.com>
Cc: Bart Van Assche <bvanassche@acm.org>
Cc: Hannes Reinecke <hare@suse.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Ming Lei <ming.lei@redhat.com>
---
block/blk-mq.c | 131 +++++++++++++++++++++++++++++++++++++++++++++----
1 file changed, 121 insertions(+), 10 deletions(-)
diff --git a/block/blk-mq.c b/block/blk-mq.c
index aac86cd99f02..6749f39fdd11 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -2319,10 +2319,98 @@ static int blk_mq_hctx_notify_online(unsigned int cpu, struct hlist_node *node)
return 0;
}
+static void blk_mq_resubmit_end_io(struct request *rq, blk_status_t error)
+{
+ struct request *orig_rq = rq->end_io_data;
+
+ blk_mq_cleanup_rq(orig_rq);
+ blk_mq_end_request(orig_rq, error);
+
+ blk_put_request(rq);
+}
+
+static void blk_mq_resubmit_passthrough_io(struct request *rq)
+{
+ struct request *nrq;
+ unsigned int flags = 0, cmd_flags = 0;
+ struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
+ struct blk_mq_tags *tags = rq->q->elevator ? hctx->sched_tags :
+ hctx->tags;
+ bool reserved = blk_mq_tag_is_reserved(tags, rq->internal_tag);
+
+ if (rq->rq_flags & RQF_PREEMPT)
+ flags |= BLK_MQ_REQ_PREEMPT;
+ if (reserved)
+ flags |= BLK_MQ_REQ_RESERVED;
+
+ /* avoid allocation failure & IO merge */
+ cmd_flags = (rq->cmd_flags & ~REQ_NOWAIT) | REQ_NOMERGE;
+
+ nrq = blk_get_request(rq->q, cmd_flags, flags);
+ if (!nrq)
+ return;
+
+ nrq->__sector = blk_rq_pos(rq);
+ nrq->__data_len = blk_rq_bytes(rq);
+ if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) {
+ nrq->rq_flags |= RQF_SPECIAL_PAYLOAD;
+ nrq->special_vec = rq->special_vec;
+ }
+#if defined(CONFIG_BLK_DEV_INTEGRITY)
+ nrq->nr_integrity_segments = rq->nr_integrity_segments;
+#endif
+ nrq->nr_phys_segments = rq->nr_phys_segments;
+ nrq->ioprio = rq->ioprio;
+ nrq->extra_len = rq->extra_len;
+ nrq->rq_disk = rq->rq_disk;
+ nrq->part = rq->part;
+ nrq->write_hint = rq->write_hint;
+ nrq->timeout = rq->timeout;
+
+ memcpy(blk_mq_rq_to_pdu(nrq), blk_mq_rq_to_pdu(rq),
+ rq->q->tag_set->cmd_size);
+
+ nrq->end_io = blk_mq_resubmit_end_io;
+ nrq->end_io_data = rq;
+ nrq->bio = rq->bio;
+ nrq->biotail = rq->biotail;
+
+ blk_account_io_start(nrq, true);
+ blk_mq_sched_insert_request(nrq, true, true, true);
+}
+
+static void blk_mq_resubmit_fs_io(struct request *rq)
+{
+ struct bio_list list;
+ struct bio *bio;
+
+ bio_list_init(&list);
+ blk_steal_bios(&list, rq);
+
+ while (true) {
+ bio = bio_list_pop(&list);
+ if (!bio)
+ break;
+
+ generic_make_request(bio);
+ }
+
+ blk_mq_cleanup_rq(rq);
+ blk_mq_end_request(rq, 0);
+}
+
+static void blk_mq_resubmit_io(struct request *rq)
+{
+ if (rq->end_io || blk_rq_is_passthrough(rq))
+ blk_mq_resubmit_passthrough_io(rq);
+ else
+ blk_mq_resubmit_fs_io(rq);
+}
+
/*
- * 'cpu' is going away. splice any existing rq_list entries from this
- * software queue to the hw queue dispatch list, and ensure that it
- * gets run.
+ * 'cpu' has gone away. If this hctx is inactive, we can't dispatch request
+ * to the hctx any more, so steal bios from requests of this hctx, and
+ * re-submit them to the request queue, and free these requests finally.
*/
static int blk_mq_hctx_notify_dead(unsigned int cpu, struct hlist_node *node)
{
@@ -2342,16 +2430,39 @@ static int blk_mq_hctx_notify_dead(unsigned int cpu, struct hlist_node *node)
}
spin_unlock(&ctx->lock);
- clear_bit(BLK_MQ_S_INACTIVE, &hctx->state);
+ if (!test_bit(BLK_MQ_S_INACTIVE, &hctx->state)) {
+ if (!list_empty(&tmp)) {
+ spin_lock(&hctx->lock);
+ list_splice_tail_init(&tmp, &hctx->dispatch);
+ spin_unlock(&hctx->lock);
+ blk_mq_run_hw_queue(hctx, true);
+ }
+ } else {
+ LIST_HEAD(flush_in);
+ LIST_HEAD(flush_out);
+ struct request *rq, *nxt;
- if (list_empty(&tmp))
- return 0;
+ /* requests in dispatch list have to be re-submitted too */
+ spin_lock(&hctx->lock);
+ list_splice_tail_init(&hctx->dispatch, &tmp);
+ spin_unlock(&hctx->lock);
- spin_lock(&hctx->lock);
- list_splice_tail_init(&tmp, &hctx->dispatch);
- spin_unlock(&hctx->lock);
+ /* blk_end_flush_machinery will cover flush request */
+ list_for_each_entry_safe(rq, nxt, &tmp, queuelist) {
+ if (rq->rq_flags & RQF_FLUSH_SEQ)
+ list_move(&rq->queuelist, &flush_in);
+ }
+ blk_end_flush_machinery(hctx, &flush_in, &flush_out);
+ list_splice_tail(&flush_out, &tmp);
+
+ while (!list_empty(&tmp)) {
+ rq = list_first_entry(&tmp, struct request, queuelist);
+ list_del_init(&rq->queuelist);
+ blk_mq_resubmit_io(rq);
+ }
+ clear_bit(BLK_MQ_S_INACTIVE, &hctx->state);
+ }
- blk_mq_run_hw_queue(hctx, true);
return 0;
}
--
2.25.2
next prev parent reply other threads:[~2020-04-07 9:29 UTC|newest]
Thread overview: 19+ messages / expand[flat|nested] mbox.gz Atom feed top
2020-04-07 9:28 [PATCH V6 0/8] blk-mq: improvement CPU hotplug Ming Lei
2020-04-07 9:28 ` [PATCH V6 1/8] blk-mq: assign rq->tag in blk_mq_get_driver_tag Ming Lei
2020-04-07 17:14 ` Christoph Hellwig
2020-04-08 1:38 ` Ming Lei
2020-04-07 9:28 ` [PATCH V6 2/8] blk-mq: add new state of BLK_MQ_S_INACTIVE Ming Lei
2020-04-07 17:14 ` Christoph Hellwig
2020-04-07 9:28 ` [PATCH V6 3/8] blk-mq: prepare for draining IO when hctx's all CPUs are offline Ming Lei
2020-04-07 9:28 ` [PATCH V6 4/8] blk-mq: stop to handle IO and drain IO before hctx becomes inactive Ming Lei
2020-04-07 9:28 ` [PATCH V6 5/8] block: add blk_end_flush_machinery Ming Lei
2020-04-07 9:28 ` Ming Lei [this message]
2020-04-07 9:29 ` [PATCH V6 7/8] blk-mq: handle requests dispatched from IO scheduler in case of inactive hctx Ming Lei
2020-04-07 9:29 ` [PATCH V6 8/8] block: deactivate hctx when the hctx is actually inactive Ming Lei
2020-04-08 12:40 ` [PATCH V6 0/8] blk-mq: improvement CPU hotplug Daniel Wagner
2020-04-08 13:01 ` John Garry
2020-04-08 13:10 ` Daniel Wagner
2020-04-08 13:29 ` John Garry
2020-04-08 15:14 ` Daniel Wagner
2020-04-08 16:56 ` John Garry
2020-04-08 13:25 ` Ming Lei
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20200407092901.314228-7-ming.lei@redhat.com \
--to=ming.lei@redhat.com \
--cc=axboe@kernel.dk \
--cc=bvanassche@acm.org \
--cc=hare@suse.com \
--cc=hch@lst.de \
--cc=john.garry@huawei.com \
--cc=linux-block@vger.kernel.org \
--cc=tglx@linutronix.de \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).