From: Chao Leng <lengchao@huawei.com>
To: <linux-nvme@lists.infradead.org>, <linux-block@vger.kernel.org>
Cc: <hch@lst.de>, <sagi@grimberg.me>, <kbusch@kernel.org>,
<lengchao@huawei.com>, <axboe@kernel.dk>, <ming.lei@redhat.com>,
<paulmck@kernel.org>
Subject: [PATCH v3 2/2] nvme: use blk_mq_[un]quiesce_tagset
Date: Thu, 20 Oct 2022 11:53:48 +0800 [thread overview]
Message-ID: <20221020035348.10163-3-lengchao@huawei.com> (raw)
In-Reply-To: <20221020035348.10163-1-lengchao@huawei.com>
All controller namespaces share the same tagset, so we can use this
interface which does the optimal operation for parallel quiesce based on
the tagset type(e.g. blocking tagsets and non-blocking tagsets).
nvme connect_q should not be quiesced when quiesce tagset, so set the
QUEUE_FLAG_SKIP_TAGSET_QUIESCE to skip it when init connect_q.
Currntely we use NVME_NS_STOPPED to ensure pairing quiescing and
unquiescing. If use blk_mq_[un]quiesce_tagset, NVME_NS_STOPPED will be
invalided, so introduce NVME_CTRL_STOPPED to replace NVME_NS_STOPPED.
In addition, we never really quiesce a single namespace. It is a better
choice to move the flag from ns to ctrl.
Signed-off-by: Sagi Grimberg <sagi@grimberg.me>
Signed-off-by: Chao Leng <lengchao@huawei.com>
---
drivers/nvme/host/core.c | 57 +++++++++++++++++++-----------------------------
drivers/nvme/host/nvme.h | 3 ++-
2 files changed, 25 insertions(+), 35 deletions(-)
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 059737c1a2c1..c7727d1f228e 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -4890,6 +4890,7 @@ int nvme_alloc_io_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
ret = PTR_ERR(ctrl->connect_q);
goto out_free_tag_set;
}
+ blk_queue_flag_set(QUEUE_FLAG_SKIP_TAGSET_QUIESCE, ctrl->connect_q);
}
ctrl->tagset = set;
@@ -5013,6 +5014,7 @@ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
clear_bit(NVME_CTRL_FAILFAST_EXPIRED, &ctrl->flags);
spin_lock_init(&ctrl->lock);
mutex_init(&ctrl->scan_lock);
+ mutex_init(&ctrl->queue_state_lock);
INIT_LIST_HEAD(&ctrl->namespaces);
xa_init(&ctrl->cels);
init_rwsem(&ctrl->namespaces_rwsem);
@@ -5089,36 +5091,21 @@ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
}
EXPORT_SYMBOL_GPL(nvme_init_ctrl);
-static void nvme_start_ns_queue(struct nvme_ns *ns)
-{
- if (test_and_clear_bit(NVME_NS_STOPPED, &ns->flags))
- blk_mq_unquiesce_queue(ns->queue);
-}
-
-static void nvme_stop_ns_queue(struct nvme_ns *ns)
-{
- if (!test_and_set_bit(NVME_NS_STOPPED, &ns->flags))
- blk_mq_quiesce_queue(ns->queue);
- else
- blk_mq_wait_quiesce_done(ns->queue);
-}
-
/*
* Prepare a queue for teardown.
*
- * This must forcibly unquiesce queues to avoid blocking dispatch, and only set
- * the capacity to 0 after that to avoid blocking dispatchers that may be
- * holding bd_butex. This will end buffered writers dirtying pages that can't
- * be synced.
+ * The caller should unquiesce the queue to avoid blocking dispatch.
*/
static void nvme_set_queue_dying(struct nvme_ns *ns)
{
if (test_and_set_bit(NVME_NS_DEAD, &ns->flags))
return;
+ /*
+ * Mark the disk dead to prevent new opens, and set the capacity to 0
+ * to end buffered writers dirtying pages that can't be synced.
+ */
blk_mark_disk_dead(ns->disk);
- nvme_start_ns_queue(ns);
-
set_capacity_and_notify(ns->disk, 0);
}
@@ -5134,15 +5121,17 @@ void nvme_kill_queues(struct nvme_ctrl *ctrl)
struct nvme_ns *ns;
down_read(&ctrl->namespaces_rwsem);
+ list_for_each_entry(ns, &ctrl->namespaces, list)
+ nvme_set_queue_dying(ns);
+
+ up_read(&ctrl->namespaces_rwsem);
/* Forcibly unquiesce queues to avoid blocking dispatch */
if (ctrl->admin_q && !blk_queue_dying(ctrl->admin_q))
nvme_start_admin_queue(ctrl);
- list_for_each_entry(ns, &ctrl->namespaces, list)
- nvme_set_queue_dying(ns);
-
- up_read(&ctrl->namespaces_rwsem);
+ if (test_and_clear_bit(NVME_CTRL_STOPPED, &ctrl->flags))
+ nvme_start_queues(ctrl);
}
EXPORT_SYMBOL_GPL(nvme_kill_queues);
@@ -5196,23 +5185,23 @@ EXPORT_SYMBOL_GPL(nvme_start_freeze);
void nvme_stop_queues(struct nvme_ctrl *ctrl)
{
- struct nvme_ns *ns;
+ mutex_lock(&ctrl->queue_state_lock);
- down_read(&ctrl->namespaces_rwsem);
- list_for_each_entry(ns, &ctrl->namespaces, list)
- nvme_stop_ns_queue(ns);
- up_read(&ctrl->namespaces_rwsem);
+ if (!test_and_set_bit(NVME_CTRL_STOPPED, &ctrl->flags))
+ blk_mq_quiesce_tagset(ctrl->tagset);
+
+ mutex_unlock(&ctrl->queue_state_lock);
}
EXPORT_SYMBOL_GPL(nvme_stop_queues);
void nvme_start_queues(struct nvme_ctrl *ctrl)
{
- struct nvme_ns *ns;
+ mutex_lock(&ctrl->queue_state_lock);
- down_read(&ctrl->namespaces_rwsem);
- list_for_each_entry(ns, &ctrl->namespaces, list)
- nvme_start_ns_queue(ns);
- up_read(&ctrl->namespaces_rwsem);
+ if (test_and_clear_bit(NVME_CTRL_STOPPED, &ctrl->flags))
+ blk_mq_unquiesce_tagset(ctrl->tagset);
+
+ mutex_unlock(&ctrl->queue_state_lock);
}
EXPORT_SYMBOL_GPL(nvme_start_queues);
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index a29877217ee6..23be055fb425 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -237,6 +237,7 @@ enum nvme_ctrl_flags {
NVME_CTRL_FAILFAST_EXPIRED = 0,
NVME_CTRL_ADMIN_Q_STOPPED = 1,
NVME_CTRL_STARTED_ONCE = 2,
+ NVME_CTRL_STOPPED = 3,
};
struct nvme_ctrl {
@@ -245,6 +246,7 @@ struct nvme_ctrl {
bool identified;
spinlock_t lock;
struct mutex scan_lock;
+ struct mutex queue_state_lock;
const struct nvme_ctrl_ops *ops;
struct request_queue *admin_q;
struct request_queue *connect_q;
@@ -487,7 +489,6 @@ struct nvme_ns {
#define NVME_NS_ANA_PENDING 2
#define NVME_NS_FORCE_RO 3
#define NVME_NS_READY 4
-#define NVME_NS_STOPPED 5
struct cdev cdev;
struct device cdev_device;
--
2.16.4
next prev parent reply other threads:[~2022-10-20 3:54 UTC|newest]
Thread overview: 8+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-10-20 3:53 [PATCH v2 0/2] improve nvme quiesce time for large amount of namespaces Chao Leng
2022-10-20 3:53 ` [PATCH v3 1/2] blk-mq: add tagset quiesce interface Chao Leng
2022-10-20 16:11 ` Paul E. McKenney
2022-10-20 3:53 ` Chao Leng [this message]
2022-10-20 6:11 ` [PATCH v3 2/2] nvme: use blk_mq_[un]quiesce_tagset Sagi Grimberg
2022-10-20 9:47 ` Chao Leng
2022-10-20 6:34 ` [PATCH v2 0/2] improve nvme quiesce time for large amount of namespaces Christoph Hellwig
2022-10-20 9:36 ` Chao Leng
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20221020035348.10163-3-lengchao@huawei.com \
--to=lengchao@huawei.com \
--cc=axboe@kernel.dk \
--cc=hch@lst.de \
--cc=kbusch@kernel.org \
--cc=linux-block@vger.kernel.org \
--cc=linux-nvme@lists.infradead.org \
--cc=ming.lei@redhat.com \
--cc=paulmck@kernel.org \
--cc=sagi@grimberg.me \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox