From: Ming Lei <ming.lei@redhat.com>
To: Jens Axboe <axboe@kernel.dk>
Cc: linux-block@vger.kernel.org, Ming Lei <ming.lei@redhat.com>,
John Garry <john.garry@huawei.com>,
Bart Van Assche <bvanassche@acm.org>,
Hannes Reinecke <hare@suse.com>, Christoph Hellwig <hch@lst.de>,
Thomas Gleixner <tglx@linutronix.de>
Subject: [PATCH V11 07/12] blk-mq: stop to handle IO and drain IO before hctx becomes inactive
Date: Wed, 13 May 2020 11:47:58 +0800 [thread overview]
Message-ID: <20200513034803.1844579-8-ming.lei@redhat.com> (raw)
In-Reply-To: <20200513034803.1844579-1-ming.lei@redhat.com>
Before one CPU becomes offline, check if it is the last online CPU of hctx.
If yes, mark this hctx as inactive, meantime wait for completion of all
in-flight IOs originated from this hctx. Meantime check if this hctx has
become inactive in blk_mq_get_driver_tag(), if yes, release the
allocated tag.
This way guarantees that there isn't any inflight IO before shutdowning
the managed IRQ line when all CPUs of this IRQ line is offline.
Cc: John Garry <john.garry@huawei.com>
Cc: Bart Van Assche <bvanassche@acm.org>
Cc: Hannes Reinecke <hare@suse.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Ming Lei <ming.lei@redhat.com>
---
block/blk-mq-debugfs.c | 1 +
block/blk-mq.c | 117 +++++++++++++++++++++++++++++++++++++----
include/linux/blk-mq.h | 3 ++
3 files changed, 110 insertions(+), 11 deletions(-)
diff --git a/block/blk-mq-debugfs.c b/block/blk-mq-debugfs.c
index ddec58743e88..dc66cb689d2f 100644
--- a/block/blk-mq-debugfs.c
+++ b/block/blk-mq-debugfs.c
@@ -213,6 +213,7 @@ static const char *const hctx_state_name[] = {
HCTX_STATE_NAME(STOPPED),
HCTX_STATE_NAME(TAG_ACTIVE),
HCTX_STATE_NAME(SCHED_RESTART),
+ HCTX_STATE_NAME(INACTIVE),
};
#undef HCTX_STATE_NAME
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 25d2cbe9c716..171bbf2fbc56 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -1038,11 +1038,36 @@ static bool __blk_mq_get_driver_tag(struct request *rq)
return true;
}
-static bool blk_mq_get_driver_tag(struct request *rq)
+static bool blk_mq_get_driver_tag(struct request *rq, bool direct_issue)
{
if (rq->tag != -1)
return true;
- return __blk_mq_get_driver_tag(rq);
+
+ if (!__blk_mq_get_driver_tag(rq))
+ return false;
+ /*
+ * In case that direct issue IO process is migrated to other CPU
+ * which may not belong to this hctx, add one memory barrier so we
+ * can order driver tag assignment and checking BLK_MQ_S_INACTIVE.
+ * Otherwise, barrier() is enough given both setting BLK_MQ_S_INACTIVE
+ * and driver tag assignment are run on the same CPU because
+ * BLK_MQ_S_INACTIVE is only set after the last CPU of this hctx is
+ * becoming offline.
+ *
+ * Process migration might happen after the check on current processor
+ * id, smp_mb() is implied by processor migration, so no need to worry
+ * about it.
+ */
+ if (unlikely(direct_issue && rq->mq_ctx->cpu != raw_smp_processor_id()))
+ smp_mb();
+ else
+ barrier();
+
+ if (unlikely(test_bit(BLK_MQ_S_INACTIVE, &rq->mq_hctx->state))) {
+ blk_mq_put_driver_tag(rq);
+ return false;
+ }
+ return true;
}
static int blk_mq_dispatch_wake(wait_queue_entry_t *wait, unsigned mode,
@@ -1091,7 +1116,7 @@ static bool blk_mq_mark_tag_wait(struct blk_mq_hw_ctx *hctx,
* Don't clear RESTART here, someone else could have set it.
* At most this will cost an extra queue run.
*/
- return blk_mq_get_driver_tag(rq);
+ return blk_mq_get_driver_tag(rq, false);
}
wait = &hctx->dispatch_wait;
@@ -1117,7 +1142,7 @@ static bool blk_mq_mark_tag_wait(struct blk_mq_hw_ctx *hctx,
* allocation failure and adding the hardware queue to the wait
* queue.
*/
- ret = blk_mq_get_driver_tag(rq);
+ ret = blk_mq_get_driver_tag(rq, false);
if (!ret) {
spin_unlock(&hctx->dispatch_wait_lock);
spin_unlock_irq(&wq->lock);
@@ -1232,7 +1257,7 @@ bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list,
break;
}
- if (!blk_mq_get_driver_tag(rq)) {
+ if (!blk_mq_get_driver_tag(rq, false)) {
/*
* The initial allocation attempt failed, so we need to
* rerun the hardware queue when a tag is freed. The
@@ -1264,7 +1289,7 @@ bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list,
bd.last = true;
else {
nxt = list_first_entry(list, struct request, queuelist);
- bd.last = !blk_mq_get_driver_tag(nxt);
+ bd.last = !blk_mq_get_driver_tag(nxt, false);
}
ret = q->mq_ops->queue_rq(hctx, &bd);
@@ -1891,7 +1916,7 @@ static blk_status_t __blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
if (!blk_mq_get_dispatch_budget(hctx))
goto insert;
- if (!blk_mq_get_driver_tag(rq)) {
+ if (!blk_mq_get_driver_tag(rq, true)) {
blk_mq_put_dispatch_budget(hctx);
goto insert;
}
@@ -2300,13 +2325,80 @@ int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
return -ENOMEM;
}
-static int blk_mq_hctx_notify_online(unsigned int cpu, struct hlist_node *node)
+struct count_inflight_data {
+ unsigned count;
+ struct blk_mq_hw_ctx *hctx;
+};
+
+static bool blk_mq_count_inflight_rq(struct request *rq, void *data,
+ bool reserved)
{
- return 0;
+ struct count_inflight_data *count_data = data;
+
+ /*
+ * Can't check rq's state because it is updated to MQ_RQ_IN_FLIGHT
+ * in blk_mq_start_request(), at that time we can't prevent this rq
+ * from being issued.
+ *
+ * So check if driver tag is assigned, if yes, count this rq as
+ * inflight.
+ */
+ if (rq->tag >= 0 && rq->mq_hctx == count_data->hctx)
+ count_data->count++;
+
+ return true;
+}
+
+static unsigned blk_mq_tags_inflight_rqs(struct blk_mq_hw_ctx *hctx)
+{
+ struct count_inflight_data count_data = {
+ .hctx = hctx,
+ };
+
+ blk_mq_all_tag_iter(hctx->tags, blk_mq_count_inflight_rq, &count_data);
+ return count_data.count;
+}
+
+static inline bool blk_mq_last_cpu_in_hctx(unsigned int cpu,
+ struct blk_mq_hw_ctx *hctx)
+{
+ if (cpumask_next_and(-1, hctx->cpumask, cpu_online_mask) != cpu)
+ return false;
+ if (cpumask_next_and(cpu, hctx->cpumask, cpu_online_mask) < nr_cpu_ids)
+ return false;
+ return true;
}
static int blk_mq_hctx_notify_offline(unsigned int cpu, struct hlist_node *node)
{
+ struct blk_mq_hw_ctx *hctx = hlist_entry_safe(node,
+ struct blk_mq_hw_ctx, cpuhp_online);
+
+ if (!cpumask_test_cpu(cpu, hctx->cpumask))
+ return 0;
+
+ if (!blk_mq_last_cpu_in_hctx(cpu, hctx))
+ return 0;
+
+ /*
+ * Order setting BLK_MQ_S_INACTIVE versus checking rq->tag and rqs[tag],
+ * in blk_mq_tags_inflight_rqs. It pairs with the smp_mb() in
+ * blk_mq_get_driver_tag.
+ */
+ set_bit(BLK_MQ_S_INACTIVE, &hctx->state);
+ smp_mb__after_atomic();
+ while (blk_mq_tags_inflight_rqs(hctx))
+ msleep(5);
+ return 0;
+}
+
+static int blk_mq_hctx_notify_online(unsigned int cpu, struct hlist_node *node)
+{
+ struct blk_mq_hw_ctx *hctx = hlist_entry_safe(node,
+ struct blk_mq_hw_ctx, cpuhp_online);
+
+ if (cpumask_test_cpu(cpu, hctx->cpumask))
+ clear_bit(BLK_MQ_S_INACTIVE, &hctx->state);
return 0;
}
@@ -2317,12 +2409,15 @@ static int blk_mq_hctx_notify_offline(unsigned int cpu, struct hlist_node *node)
*/
static int blk_mq_hctx_notify_dead(unsigned int cpu, struct hlist_node *node)
{
- struct blk_mq_hw_ctx *hctx;
+ struct blk_mq_hw_ctx *hctx = hlist_entry_safe(node,
+ struct blk_mq_hw_ctx, cpuhp_dead);
struct blk_mq_ctx *ctx;
LIST_HEAD(tmp);
enum hctx_type type;
- hctx = hlist_entry_safe(node, struct blk_mq_hw_ctx, cpuhp_dead);
+ if (!cpumask_test_cpu(cpu, hctx->cpumask))
+ return 0;
+
ctx = __blk_mq_get_ctx(hctx->queue, cpu);
type = hctx->type;
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index ddd2cb6ed21c..c2ea0a6e5b56 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -404,6 +404,9 @@ enum {
BLK_MQ_S_TAG_ACTIVE = 1,
BLK_MQ_S_SCHED_RESTART = 2,
+ /* hw queue is inactive after all its CPUs become offline */
+ BLK_MQ_S_INACTIVE = 3,
+
BLK_MQ_MAX_DEPTH = 10240,
BLK_MQ_CPU_WORK_BATCH = 8,
--
2.25.2
next prev parent reply other threads:[~2020-05-13 3:49 UTC|newest]
Thread overview: 32+ messages / expand[flat|nested] mbox.gz Atom feed top
2020-05-13 3:47 [PATCH V11 00/12] blk-mq: improvement CPU hotplug Ming Lei
2020-05-13 3:47 ` [PATCH V11 01/12] block: clone nr_integrity_segments and write_hint in blk_rq_prep_clone Ming Lei
2020-05-13 3:47 ` [PATCH V11 02/12] block: add helper for copying request Ming Lei
2020-05-13 3:47 ` [PATCH V11 03/12] blk-mq: mark blk_mq_get_driver_tag as static Ming Lei
2020-05-13 3:47 ` [PATCH V11 04/12] blk-mq: assign rq->tag in blk_mq_get_driver_tag Ming Lei
2020-05-13 3:47 ` [PATCH V11 05/12] blk-mq: add blk_mq_all_tag_iter Ming Lei
2020-05-13 11:56 ` Christoph Hellwig
2020-05-13 3:47 ` [PATCH V11 06/12] blk-mq: prepare for draining IO when hctx's all CPUs are offline Ming Lei
2020-05-13 6:35 ` Hannes Reinecke
2020-05-13 11:58 ` Christoph Hellwig
2020-05-14 0:33 ` Ming Lei
2020-05-13 3:47 ` Ming Lei [this message]
2020-05-13 11:59 ` [PATCH V11 07/12] blk-mq: stop to handle IO and drain IO before hctx becomes inactive Christoph Hellwig
2020-05-14 0:36 ` Ming Lei
2020-05-14 1:12 ` Bart Van Assche
2020-05-14 3:10 ` Ming Lei
2020-05-13 3:47 ` [PATCH V11 08/12] block: add blk_end_flush_machinery Ming Lei
2020-05-13 12:00 ` Christoph Hellwig
2020-05-13 3:48 ` [PATCH V11 09/12] blk-mq: add blk_mq_hctx_handle_dead_cpu for handling cpu dead Ming Lei
2020-05-13 12:06 ` Christoph Hellwig
2020-05-13 3:48 ` [PATCH V11 10/12] block: add request allocation flag of BLK_MQ_REQ_FORCE Ming Lei
2020-05-13 10:34 ` [PATCH V12 " Ming Lei
2020-05-13 3:48 ` [PATCH V11 11/12] blk-mq: re-submit IO in case that hctx is inactive Ming Lei
2020-05-13 9:21 ` John Garry
2020-05-13 12:21 ` Christoph Hellwig
2020-05-13 15:03 ` Bart Van Assche
2020-05-14 0:45 ` Ming Lei
2020-05-14 0:40 ` Ming Lei
2020-05-13 3:48 ` [PATCH V11 12/12] block: deactivate hctx when the hctx is actually inactive Ming Lei
2020-05-13 7:34 ` [PATCH V11 00/12] blk-mq: improvement CPU hotplug John Garry
2020-05-13 10:37 ` Ming Lei
2020-05-13 11:33 ` John Garry
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20200513034803.1844579-8-ming.lei@redhat.com \
--to=ming.lei@redhat.com \
--cc=axboe@kernel.dk \
--cc=bvanassche@acm.org \
--cc=hare@suse.com \
--cc=hch@lst.de \
--cc=john.garry@huawei.com \
--cc=linux-block@vger.kernel.org \
--cc=tglx@linutronix.de \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).