* [PATCH 3/3] nvme: Add async shutdown support
@ 2022-03-24 21:42 Tanjore Suresh
2022-03-25 5:30 ` Christoph Hellwig
0 siblings, 1 reply; 6+ messages in thread
From: Tanjore Suresh @ 2022-03-24 21:42 UTC (permalink / raw)
To: Christoph Hellwig, Sagi Grimberg; +Cc: linux-nvme, linux-kernel, Tanjore Suresh
This works with the asynchronous shutdown mechanism setup for the PCI
drivers and participates to provide both pre and post shutdown
routines at pci_driver structure level.
The shutdown_pre routine starts the shutdown and does not wait for the
shutdown to complete. The shutdown_post routine waits for the shutdown
to complete on individual controllers that this driver instance
controls. This mechanism optimizes to speed up the shutdown in a
system which host many controllers.
Signed-off-by: Tanjore Suresh <tansuresh@google.com>
---
drivers/nvme/host/core.c | 28 ++++++++++----
drivers/nvme/host/nvme.h | 8 ++++
drivers/nvme/host/pci.c | 80 +++++++++++++++++++++++++---------------
3 files changed, 80 insertions(+), 36 deletions(-)
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index cd6eac8e3dd6..3ada8f5163eb 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -2210,16 +2210,30 @@ EXPORT_SYMBOL_GPL(nvme_enable_ctrl);
int nvme_shutdown_ctrl(struct nvme_ctrl *ctrl)
{
- unsigned long timeout = jiffies + (ctrl->shutdown_timeout * HZ);
- u32 csts;
int ret;
+ ret = nvme_shutdown_ctrl_start(ctrl);
+ if (ret)
+ return ret;
+ return nvme_wait_for_shutdown_cmpl(ctrl);
+}
+EXPORT_SYMBOL_GPL(nvme_shutdown_ctrl);
+
+int nvme_shutdown_ctrl_start(struct nvme_ctrl *ctrl)
+{
+
ctrl->ctrl_config &= ~NVME_CC_SHN_MASK;
ctrl->ctrl_config |= NVME_CC_SHN_NORMAL;
- ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config);
- if (ret)
- return ret;
+ return ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config);
+}
+EXPORT_SYMBOL_GPL(nvme_shutdown_ctrl_start);
+
+int nvme_wait_for_shutdown_cmpl(struct nvme_ctrl *ctrl)
+{
+ unsigned long deadline = jiffies + (ctrl->shutdown_timeout * HZ);
+ u32 csts;
+ int ret;
while ((ret = ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &csts)) == 0) {
if ((csts & NVME_CSTS_SHST_MASK) == NVME_CSTS_SHST_CMPLT)
@@ -2228,7 +2242,7 @@ int nvme_shutdown_ctrl(struct nvme_ctrl *ctrl)
msleep(100);
if (fatal_signal_pending(current))
return -EINTR;
- if (time_after(jiffies, timeout)) {
+ if (time_after(jiffies, deadline)) {
dev_err(ctrl->device,
"Device shutdown incomplete; abort shutdown\n");
return -ENODEV;
@@ -2237,7 +2251,7 @@ int nvme_shutdown_ctrl(struct nvme_ctrl *ctrl)
return ret;
}
-EXPORT_SYMBOL_GPL(nvme_shutdown_ctrl);
+EXPORT_SYMBOL_GPL(nvme_wait_for_shutdown_cmpl);
static int nvme_configure_timestamp(struct nvme_ctrl *ctrl)
{
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index 1ea908d43e17..9491bda2e38a 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -170,6 +170,12 @@ enum {
NVME_REQ_USERCMD = (1 << 1),
};
+enum shutdown_type {
+ DO_NOT_SHUTDOWN = 0,
+ SHUTDOWN_TYPE_SYNC = 1,
+ SHUTDOWN_TYPE_ASYNC = 2,
+};
+
static inline struct nvme_request *nvme_req(struct request *req)
{
return blk_mq_rq_to_pdu(req);
@@ -671,6 +677,8 @@ bool nvme_wait_reset(struct nvme_ctrl *ctrl);
int nvme_disable_ctrl(struct nvme_ctrl *ctrl);
int nvme_enable_ctrl(struct nvme_ctrl *ctrl);
int nvme_shutdown_ctrl(struct nvme_ctrl *ctrl);
+int nvme_shutdown_ctrl_start(struct nvme_ctrl *ctrl);
+int nvme_wait_for_shutdown_cmpl(struct nvme_ctrl *ctrl);
int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
const struct nvme_ctrl_ops *ops, unsigned long quirks);
void nvme_uninit_ctrl(struct nvme_ctrl *ctrl);
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 2e98ac3f3ad6..dc72fe7d8994 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -107,7 +107,7 @@ MODULE_PARM_DESC(noacpi, "disable acpi bios quirks");
struct nvme_dev;
struct nvme_queue;
-static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown);
+static void nvme_dev_disable(struct nvme_dev *dev, int shutdown_type);
static bool __nvme_disable_io_queues(struct nvme_dev *dev, u8 opcode);
/*
@@ -1357,7 +1357,7 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
*/
if (nvme_should_reset(dev, csts)) {
nvme_warn_reset(dev, csts);
- nvme_dev_disable(dev, false);
+ nvme_dev_disable(dev, DO_NOT_SHUTDOWN);
nvme_reset_ctrl(&dev->ctrl);
return BLK_EH_DONE;
}
@@ -1392,7 +1392,7 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
"I/O %d QID %d timeout, disable controller\n",
req->tag, nvmeq->qid);
nvme_req(req)->flags |= NVME_REQ_CANCELLED;
- nvme_dev_disable(dev, true);
+ nvme_dev_disable(dev, SHUTDOWN_TYPE_SYNC);
return BLK_EH_DONE;
case NVME_CTRL_RESETTING:
return BLK_EH_RESET_TIMER;
@@ -1410,7 +1410,7 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
"I/O %d QID %d timeout, reset controller\n",
req->tag, nvmeq->qid);
nvme_req(req)->flags |= NVME_REQ_CANCELLED;
- nvme_dev_disable(dev, false);
+ nvme_dev_disable(dev, DO_NOT_SHUTDOWN);
nvme_reset_ctrl(&dev->ctrl);
return BLK_EH_DONE;
@@ -1503,11 +1503,13 @@ static void nvme_suspend_io_queues(struct nvme_dev *dev)
nvme_suspend_queue(&dev->queues[i]);
}
-static void nvme_disable_admin_queue(struct nvme_dev *dev, bool shutdown)
+static void nvme_disable_admin_queue(struct nvme_dev *dev, int shutdown_type)
{
struct nvme_queue *nvmeq = &dev->queues[0];
- if (shutdown)
+ if (shutdown_type == SHUTDOWN_TYPE_ASYNC)
+ nvme_shutdown_ctrl_start(&dev->ctrl);
+ else if (shutdown_type == SHUTDOWN_TYPE_SYNC)
nvme_shutdown_ctrl(&dev->ctrl);
else
nvme_disable_ctrl(&dev->ctrl);
@@ -2669,7 +2671,7 @@ static void nvme_pci_disable(struct nvme_dev *dev)
}
}
-static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
+static void nvme_dev_disable(struct nvme_dev *dev, int shutdown_type)
{
bool dead = true, freeze = false;
struct pci_dev *pdev = to_pci_dev(dev->dev);
@@ -2691,14 +2693,14 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
* Give the controller a chance to complete all entered requests if
* doing a safe shutdown.
*/
- if (!dead && shutdown && freeze)
+ if (!dead && (shutdown_type != DO_NOT_SHUTDOWN) && freeze)
nvme_wait_freeze_timeout(&dev->ctrl, NVME_IO_TIMEOUT);
nvme_stop_queues(&dev->ctrl);
if (!dead && dev->ctrl.queue_count > 0) {
nvme_disable_io_queues(dev);
- nvme_disable_admin_queue(dev, shutdown);
+ nvme_disable_admin_queue(dev, shutdown_type);
}
nvme_suspend_io_queues(dev);
nvme_suspend_queue(&dev->queues[0]);
@@ -2710,12 +2712,12 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
blk_mq_tagset_wait_completed_request(&dev->tagset);
blk_mq_tagset_wait_completed_request(&dev->admin_tagset);
- /*
- * The driver will not be starting up queues again if shutting down so
- * must flush all entered requests to their failed completion to avoid
- * deadlocking blk-mq hot-cpu notifier.
- */
- if (shutdown) {
+ if (shutdown_type == SHUTDOWN_TYPE_SYNC) {
+ /*
+ * The driver will not be starting up queues again if shutting down so
+ * must flush all entered requests to their failed completion to avoid
+ * deadlocking blk-mq hot-cpu notifier.
+ */
nvme_start_queues(&dev->ctrl);
if (dev->ctrl.admin_q && !blk_queue_dying(dev->ctrl.admin_q))
nvme_start_admin_queue(&dev->ctrl);
@@ -2723,11 +2725,11 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
mutex_unlock(&dev->shutdown_lock);
}
-static int nvme_disable_prepare_reset(struct nvme_dev *dev, bool shutdown)
+static int nvme_disable_prepare_reset(struct nvme_dev *dev, int type)
{
if (!nvme_wait_reset(&dev->ctrl))
return -EBUSY;
- nvme_dev_disable(dev, shutdown);
+ nvme_dev_disable(dev, type);
return 0;
}
@@ -2785,7 +2787,7 @@ static void nvme_remove_dead_ctrl(struct nvme_dev *dev)
*/
nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING);
nvme_get_ctrl(&dev->ctrl);
- nvme_dev_disable(dev, false);
+ nvme_dev_disable(dev, DO_NOT_SHUTDOWN);
nvme_kill_queues(&dev->ctrl);
if (!queue_work(nvme_wq, &dev->remove_work))
nvme_put_ctrl(&dev->ctrl);
@@ -2810,7 +2812,7 @@ static void nvme_reset_work(struct work_struct *work)
* moving on.
*/
if (dev->ctrl.ctrl_config & NVME_CC_ENABLE)
- nvme_dev_disable(dev, false);
+ nvme_dev_disable(dev, DO_NOT_SHUTDOWN);
nvme_sync_queues(&dev->ctrl);
mutex_lock(&dev->shutdown_lock);
@@ -3151,7 +3153,7 @@ static void nvme_reset_prepare(struct pci_dev *pdev)
* state as pci_dev device lock is held, making it impossible to race
* with ->remove().
*/
- nvme_disable_prepare_reset(dev, false);
+ nvme_disable_prepare_reset(dev, DO_NOT_SHUTDOWN);
nvme_sync_queues(&dev->ctrl);
}
@@ -3163,13 +3165,32 @@ static void nvme_reset_done(struct pci_dev *pdev)
flush_work(&dev->ctrl.reset_work);
}
-static void nvme_shutdown(struct pci_dev *pdev)
+static void nvme_shutdown_pre(struct pci_dev *pdev)
{
struct nvme_dev *dev = pci_get_drvdata(pdev);
- nvme_disable_prepare_reset(dev, true);
+ nvme_disable_prepare_reset(dev, SHUTDOWN_TYPE_ASYNC);
}
+static void nvme_shutdown_post(struct pci_dev *pdev)
+{
+ struct nvme_dev *dev = pci_get_drvdata(pdev);
+
+ mutex_lock(&dev->shutdown_lock);
+ nvme_wait_for_shutdown_cmpl(&dev->ctrl);
+
+ /*
+ * The driver will not be starting up queues again if shutting down so
+ * must flush all entered requests to their failed completion to avoid
+ * deadlocking blk-mq hot-cpu notifier.
+ */
+ nvme_start_queues(&dev->ctrl);
+ if (dev->ctrl.admin_q && !blk_queue_dying(dev->ctrl.admin_q))
+ nvme_start_admin_queue(&dev->ctrl);
+
+ mutex_unlock(&dev->shutdown_lock);
+
+}
static void nvme_remove_attrs(struct nvme_dev *dev)
{
if (dev->attrs_added)
@@ -3191,13 +3212,13 @@ static void nvme_remove(struct pci_dev *pdev)
if (!pci_device_is_present(pdev)) {
nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DEAD);
- nvme_dev_disable(dev, true);
+ nvme_dev_disable(dev, SHUTDOWN_TYPE_SYNC);
}
flush_work(&dev->ctrl.reset_work);
nvme_stop_ctrl(&dev->ctrl);
nvme_remove_namespaces(&dev->ctrl);
- nvme_dev_disable(dev, true);
+ nvme_dev_disable(dev, SHUTDOWN_TYPE_SYNC);
nvme_remove_attrs(dev);
nvme_free_host_mem(dev);
nvme_dev_remove_admin(dev);
@@ -3259,7 +3280,7 @@ static int nvme_suspend(struct device *dev)
if (pm_suspend_via_firmware() || !ctrl->npss ||
!pcie_aspm_enabled(pdev) ||
(ndev->ctrl.quirks & NVME_QUIRK_SIMPLE_SUSPEND))
- return nvme_disable_prepare_reset(ndev, true);
+ return nvme_disable_prepare_reset(ndev, SHUTDOWN_TYPE_SYNC);
nvme_start_freeze(ctrl);
nvme_wait_freeze(ctrl);
@@ -3302,7 +3323,7 @@ static int nvme_suspend(struct device *dev)
* Clearing npss forces a controller reset on resume. The
* correct value will be rediscovered then.
*/
- ret = nvme_disable_prepare_reset(ndev, true);
+ ret = nvme_disable_prepare_reset(ndev, SHUTDOWN_TYPE_SYNC);
ctrl->npss = 0;
}
unfreeze:
@@ -3314,7 +3335,7 @@ static int nvme_simple_suspend(struct device *dev)
{
struct nvme_dev *ndev = pci_get_drvdata(to_pci_dev(dev));
- return nvme_disable_prepare_reset(ndev, true);
+ return nvme_disable_prepare_reset(ndev, SHUTDOWN_TYPE_SYNC);
}
static int nvme_simple_resume(struct device *dev)
@@ -3351,7 +3372,7 @@ static pci_ers_result_t nvme_error_detected(struct pci_dev *pdev,
case pci_channel_io_frozen:
dev_warn(dev->ctrl.device,
"frozen state error detected, reset controller\n");
- nvme_dev_disable(dev, false);
+ nvme_dev_disable(dev, DO_NOT_SHUTDOWN);
return PCI_ERS_RESULT_NEED_RESET;
case pci_channel_io_perm_failure:
dev_warn(dev->ctrl.device,
@@ -3478,7 +3499,8 @@ static struct pci_driver nvme_driver = {
.id_table = nvme_id_table,
.probe = nvme_probe,
.remove = nvme_remove,
- .shutdown = nvme_shutdown,
+ .shutdown_pre = nvme_shutdown_pre,
+ .shutdown_post = nvme_shutdown_post,
#ifdef CONFIG_PM_SLEEP
.driver = {
.pm = &nvme_dev_pm_ops,
--
2.35.1.1021.g381101b075-goog
^ permalink raw reply related [flat|nested] 6+ messages in thread* Make NVME shutdown async
@ 2023-12-12 18:09 Jeremy Allison
2023-12-12 18:09 ` [PATCH 3/3] nvme: Add async shutdown support Jeremy Allison
0 siblings, 1 reply; 6+ messages in thread
From: Jeremy Allison @ 2023-12-12 18:09 UTC (permalink / raw)
To: jallison, jra, tansuresh, hch; +Cc: linux-nvme
This is a rebased update and resend of a patchset originally
written by Tanjore Suresh <tansuresh@google.com> to make
shutdown of nvme devices asynchronous. Minor changes
made to use an enum shutdown_type instead of an
integer flag.
Currently the Linux nvme driver shutdown code steps
through each connected drive, sets the NVME_CC_SHN_NORMAL
(normal shutdown) flag and then polls the given drive
waiting for the response NVME_CSTS_SHST_CMPLT flag
(shutdown complete).
Each drive is taking around 13 seconds to respond to this.
The customer has 20+ drives on the box so this time adds
up on shutdown when the nvme driver is being shut down.
This patchset changes shutdown to proceed in parallel,
so the NVME_CC_SHN_NORMAL (normal shutdown) flag is
sent to all drives first, and then it polls waiting
for the NVME_CSTS_SHST_CMPLT flag (shutdown complete)
for all drives.
In the specific customer case it reduces the NVME
shutdown time from over 300 seconds to around 15
seconds.
Thanks for your consideration,
Jeremy Allison.
CIQ / Samba Team.
^ permalink raw reply [flat|nested] 6+ messages in thread
* [PATCH 3/3] nvme: Add async shutdown support
2023-12-12 18:09 Make NVME shutdown async Jeremy Allison
@ 2023-12-12 18:09 ` Jeremy Allison
0 siblings, 0 replies; 6+ messages in thread
From: Jeremy Allison @ 2023-12-12 18:09 UTC (permalink / raw)
To: jallison, jra, tansuresh, hch; +Cc: linux-nvme
From: Tanjore Suresh <tansuresh@google.com>
This works with the asynchronous shutdown mechanism setup for the PCI
drivers and participates to provide both pre and post shutdown
routines at pci_driver structure level.
The shutdown_pre routine starts the shutdown and does not wait for the
shutdown to complete. The shutdown_post routine waits for the shutdown
to complete on individual controllers that this driver instance
controls. This mechanism optimizes to speed up the shutdown in a
system which host many controllers.
Signed-off-by: Tanjore Suresh <tansuresh@google.com>
Signed-off-by: Jeremy Allison <jallison@ciq.com>
---
drivers/nvme/host/core.c | 31 +++++++++++++++--
drivers/nvme/host/nvme.h | 9 ++++-
drivers/nvme/host/pci.c | 68 ++++++++++++++++++++++++--------------
drivers/nvme/host/rdma.c | 3 +-
drivers/nvme/host/tcp.c | 2 +-
drivers/nvme/target/loop.c | 2 +-
6 files changed, 84 insertions(+), 31 deletions(-)
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 590cd4f097c2..45645af41586 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -2201,12 +2201,12 @@ static int nvme_wait_ready(struct nvme_ctrl *ctrl, u32 mask, u32 val,
return ret;
}
-int nvme_disable_ctrl(struct nvme_ctrl *ctrl, bool shutdown)
+int nvme_disable_ctrl(struct nvme_ctrl *ctrl, enum shutdown_type shutdown_type)
{
int ret;
ctrl->ctrl_config &= ~NVME_CC_SHN_MASK;
- if (shutdown)
+ if (shutdown_type != DO_NOT_SHUTDOWN)
ctrl->ctrl_config |= NVME_CC_SHN_NORMAL;
else
ctrl->ctrl_config &= ~NVME_CC_ENABLE;
@@ -2215,10 +2215,24 @@ int nvme_disable_ctrl(struct nvme_ctrl *ctrl, bool shutdown)
if (ret)
return ret;
- if (shutdown) {
+ switch (shutdown_type) {
+ case SHUTDOWN_TYPE_ASYNC:
+ /*
+ * nvme_wait_for_shutdown_cmpl() will read the reply for this.
+ */
+ return ret;
+ case SHUTDOWN_TYPE_SYNC:
+ /*
+ * Spin on the read of the control register.
+ */
return nvme_wait_ready(ctrl, NVME_CSTS_SHST_MASK,
NVME_CSTS_SHST_CMPLT,
ctrl->shutdown_timeout, "shutdown");
+ case DO_NOT_SHUTDOWN:
+ /*
+ * Doing a reset here. Handle below.
+ */
+ break;
}
if (ctrl->quirks & NVME_QUIRK_DELAY_BEFORE_CHK_RDY)
msleep(NVME_QUIRK_DELAY_AMOUNT);
@@ -2227,6 +2241,17 @@ int nvme_disable_ctrl(struct nvme_ctrl *ctrl, bool shutdown)
}
EXPORT_SYMBOL_GPL(nvme_disable_ctrl);
+int nvme_wait_for_shutdown_cmpl(struct nvme_ctrl *ctrl)
+{
+ ctrl->ctrl_config &= ~NVME_CC_SHN_MASK;
+ ctrl->ctrl_config |= NVME_CC_SHN_NORMAL;
+
+ return nvme_wait_ready(ctrl, NVME_CSTS_SHST_MASK,
+ NVME_CSTS_SHST_CMPLT,
+ ctrl->shutdown_timeout, "shutdown");
+}
+EXPORT_SYMBOL_GPL(nvme_wait_for_shutdown_cmpl);
+
int nvme_enable_ctrl(struct nvme_ctrl *ctrl)
{
unsigned dev_page_min;
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index 39a90b7cb125..1ce034185000 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -186,6 +186,12 @@ enum {
NVME_MPATH_IO_STATS = (1 << 2),
};
+enum shutdown_type {
+ DO_NOT_SHUTDOWN = 0,
+ SHUTDOWN_TYPE_SYNC = 1,
+ SHUTDOWN_TYPE_ASYNC = 2,
+};
+
static inline struct nvme_request *nvme_req(struct request *req)
{
return blk_mq_rq_to_pdu(req);
@@ -746,7 +752,8 @@ void nvme_cancel_tagset(struct nvme_ctrl *ctrl);
void nvme_cancel_admin_tagset(struct nvme_ctrl *ctrl);
bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
enum nvme_ctrl_state new_state);
-int nvme_disable_ctrl(struct nvme_ctrl *ctrl, bool shutdown);
+int nvme_disable_ctrl(struct nvme_ctrl *ctrl, enum shutdown_type shutdown_type);
+int nvme_wait_for_shutdown_cmpl(struct nvme_ctrl *ctrl);
int nvme_enable_ctrl(struct nvme_ctrl *ctrl);
int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
const struct nvme_ctrl_ops *ops, unsigned long quirks);
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 507bc149046d..5379ce6dd21a 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -108,7 +108,7 @@ MODULE_PARM_DESC(noacpi, "disable acpi bios quirks");
struct nvme_dev;
struct nvme_queue;
-static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown);
+static void nvme_dev_disable(struct nvme_dev *dev, enum shutdown_type shutdown_type);
static void nvme_delete_io_queues(struct nvme_dev *dev);
static void nvme_update_attrs(struct nvme_dev *dev);
@@ -1330,7 +1330,7 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req)
"I/O %d QID %d timeout, disable controller\n",
req->tag, nvmeq->qid);
nvme_req(req)->flags |= NVME_REQ_CANCELLED;
- nvme_dev_disable(dev, true);
+ nvme_dev_disable(dev, SHUTDOWN_TYPE_SYNC);
return BLK_EH_DONE;
case NVME_CTRL_RESETTING:
return BLK_EH_RESET_TIMER;
@@ -1390,7 +1390,7 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req)
if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_RESETTING))
return BLK_EH_DONE;
- nvme_dev_disable(dev, false);
+ nvme_dev_disable(dev, DO_NOT_SHUTDOWN);
if (nvme_try_sched_reset(&dev->ctrl))
nvme_unquiesce_io_queues(&dev->ctrl);
return BLK_EH_DONE;
@@ -1736,7 +1736,7 @@ static int nvme_pci_configure_admin_queue(struct nvme_dev *dev)
* commands to the admin queue ... and we don't know what memory that
* might be pointing at!
*/
- result = nvme_disable_ctrl(&dev->ctrl, false);
+ result = nvme_disable_ctrl(&dev->ctrl, DO_NOT_SHUTDOWN);
if (result < 0)
return result;
@@ -2571,7 +2571,7 @@ static bool nvme_pci_ctrl_is_dead(struct nvme_dev *dev)
return (csts & NVME_CSTS_CFS) || !(csts & NVME_CSTS_RDY);
}
-static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
+static void nvme_dev_disable(struct nvme_dev *dev, enum shutdown_type shutdown_type)
{
struct pci_dev *pdev = to_pci_dev(dev->dev);
bool dead;
@@ -2586,7 +2586,7 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
* Give the controller a chance to complete all entered requests
* if doing a safe shutdown.
*/
- if (!dead && shutdown)
+ if (!dead && (shutdown_type != DO_NOT_SHUTDOWN))
nvme_wait_freeze_timeout(&dev->ctrl, NVME_IO_TIMEOUT);
}
@@ -2594,7 +2594,7 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
if (!dead && dev->ctrl.queue_count > 0) {
nvme_delete_io_queues(dev);
- nvme_disable_ctrl(&dev->ctrl, shutdown);
+ nvme_disable_ctrl(&dev->ctrl, shutdown_type);
nvme_poll_irqdisable(&dev->queues[0]);
}
nvme_suspend_io_queues(dev);
@@ -2612,7 +2612,7 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
* must flush all entered requests to their failed completion to avoid
* deadlocking blk-mq hot-cpu notifier.
*/
- if (shutdown) {
+ if (shutdown_type == SHUTDOWN_TYPE_SYNC) {
nvme_unquiesce_io_queues(&dev->ctrl);
if (dev->ctrl.admin_q && !blk_queue_dying(dev->ctrl.admin_q))
nvme_unquiesce_admin_queue(&dev->ctrl);
@@ -2620,11 +2620,11 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
mutex_unlock(&dev->shutdown_lock);
}
-static int nvme_disable_prepare_reset(struct nvme_dev *dev, bool shutdown)
+static int nvme_disable_prepare_reset(struct nvme_dev *dev, enum shutdown_type shutdown_type)
{
if (!nvme_wait_reset(&dev->ctrl))
return -EBUSY;
- nvme_dev_disable(dev, shutdown);
+ nvme_dev_disable(dev, shutdown_type);
return 0;
}
@@ -2702,7 +2702,7 @@ static void nvme_reset_work(struct work_struct *work)
* moving on.
*/
if (dev->ctrl.ctrl_config & NVME_CC_ENABLE)
- nvme_dev_disable(dev, false);
+ nvme_dev_disable(dev, DO_NOT_SHUTDOWN);
nvme_sync_queues(&dev->ctrl);
mutex_lock(&dev->shutdown_lock);
@@ -2780,7 +2780,7 @@ static void nvme_reset_work(struct work_struct *work)
dev_warn(dev->ctrl.device, "Disabling device after reset failure: %d\n",
result);
nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING);
- nvme_dev_disable(dev, true);
+ nvme_dev_disable(dev, SHUTDOWN_TYPE_SYNC);
nvme_sync_queues(&dev->ctrl);
nvme_mark_namespaces_dead(&dev->ctrl);
nvme_unquiesce_io_queues(&dev->ctrl);
@@ -3058,7 +3058,7 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
out_disable:
nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING);
- nvme_dev_disable(dev, true);
+ nvme_dev_disable(dev, SHUTDOWN_TYPE_SYNC);
nvme_free_host_mem(dev);
nvme_dev_remove_admin(dev);
nvme_dbbuf_dma_free(dev);
@@ -3084,7 +3084,7 @@ static void nvme_reset_prepare(struct pci_dev *pdev)
* state as pci_dev device lock is held, making it impossible to race
* with ->remove().
*/
- nvme_disable_prepare_reset(dev, false);
+ nvme_disable_prepare_reset(dev, DO_NOT_SHUTDOWN);
nvme_sync_queues(&dev->ctrl);
}
@@ -3096,11 +3096,30 @@ static void nvme_reset_done(struct pci_dev *pdev)
flush_work(&dev->ctrl.reset_work);
}
-static void nvme_shutdown(struct pci_dev *pdev)
+static void nvme_shutdown_pre(struct pci_dev *pdev)
{
struct nvme_dev *dev = pci_get_drvdata(pdev);
- nvme_disable_prepare_reset(dev, true);
+ nvme_disable_prepare_reset(dev, SHUTDOWN_TYPE_ASYNC);
+}
+
+static void nvme_shutdown_post(struct pci_dev *pdev)
+{
+ struct nvme_dev *dev = pci_get_drvdata(pdev);
+
+ mutex_lock(&dev->shutdown_lock);
+ nvme_wait_for_shutdown_cmpl(&dev->ctrl);
+
+ /*
+ * The driver will not be starting up queues again if shutting down so
+ * must flush all entered requests to their failed completion to avoid
+ * deadlocking blk-mq hot-cpu notifier.
+ */
+ nvme_unquiesce_io_queues(&dev->ctrl);
+ if (dev->ctrl.admin_q && !blk_queue_dying(dev->ctrl.admin_q))
+ nvme_unquiesce_admin_queue(&dev->ctrl);
+
+ mutex_unlock(&dev->shutdown_lock);
}
/*
@@ -3117,13 +3136,13 @@ static void nvme_remove(struct pci_dev *pdev)
if (!pci_device_is_present(pdev)) {
nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DEAD);
- nvme_dev_disable(dev, true);
+ nvme_dev_disable(dev, SHUTDOWN_TYPE_SYNC);
}
flush_work(&dev->ctrl.reset_work);
nvme_stop_ctrl(&dev->ctrl);
nvme_remove_namespaces(&dev->ctrl);
- nvme_dev_disable(dev, true);
+ nvme_dev_disable(dev, SHUTDOWN_TYPE_SYNC);
nvme_free_host_mem(dev);
nvme_dev_remove_admin(dev);
nvme_dbbuf_dma_free(dev);
@@ -3186,7 +3205,7 @@ static int nvme_suspend(struct device *dev)
if (pm_suspend_via_firmware() || !ctrl->npss ||
!pcie_aspm_enabled(pdev) ||
(ndev->ctrl.quirks & NVME_QUIRK_SIMPLE_SUSPEND))
- return nvme_disable_prepare_reset(ndev, true);
+ return nvme_disable_prepare_reset(ndev, SHUTDOWN_TYPE_SYNC);
nvme_start_freeze(ctrl);
nvme_wait_freeze(ctrl);
@@ -3229,7 +3248,7 @@ static int nvme_suspend(struct device *dev)
* Clearing npss forces a controller reset on resume. The
* correct value will be rediscovered then.
*/
- ret = nvme_disable_prepare_reset(ndev, true);
+ ret = nvme_disable_prepare_reset(ndev, SHUTDOWN_TYPE_SYNC);
ctrl->npss = 0;
}
unfreeze:
@@ -3241,7 +3260,7 @@ static int nvme_simple_suspend(struct device *dev)
{
struct nvme_dev *ndev = pci_get_drvdata(to_pci_dev(dev));
- return nvme_disable_prepare_reset(ndev, true);
+ return nvme_disable_prepare_reset(ndev, SHUTDOWN_TYPE_SYNC);
}
static int nvme_simple_resume(struct device *dev)
@@ -3279,10 +3298,10 @@ static pci_ers_result_t nvme_error_detected(struct pci_dev *pdev,
dev_warn(dev->ctrl.device,
"frozen state error detected, reset controller\n");
if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_RESETTING)) {
- nvme_dev_disable(dev, true);
+ nvme_dev_disable(dev, SHUTDOWN_TYPE_SYNC);
return PCI_ERS_RESULT_DISCONNECT;
}
- nvme_dev_disable(dev, false);
+ nvme_dev_disable(dev, DO_NOT_SHUTDOWN);
return PCI_ERS_RESULT_NEED_RESET;
case pci_channel_io_perm_failure:
dev_warn(dev->ctrl.device,
@@ -3491,7 +3510,8 @@ static struct pci_driver nvme_driver = {
.id_table = nvme_id_table,
.probe = nvme_probe,
.remove = nvme_remove,
- .shutdown = nvme_shutdown,
+ .shutdown_pre = nvme_shutdown_pre,
+ .shutdown_post = nvme_shutdown_post,
.driver = {
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index 6d178d555920..5a5bef40ed2a 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -2136,7 +2136,8 @@ static void nvme_rdma_shutdown_ctrl(struct nvme_rdma_ctrl *ctrl, bool shutdown)
{
nvme_rdma_teardown_io_queues(ctrl, shutdown);
nvme_quiesce_admin_queue(&ctrl->ctrl);
- nvme_disable_ctrl(&ctrl->ctrl, shutdown);
+ nvme_disable_ctrl(&ctrl->ctrl,
+ shutdown ? SHUTDOWN_TYPE_SYNC : DO_NOT_SHUTDOWN);
nvme_rdma_teardown_admin_queue(ctrl, shutdown);
}
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index d79811cfa0ce..a1e33ee14f5b 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -2292,7 +2292,7 @@ static void nvme_tcp_teardown_ctrl(struct nvme_ctrl *ctrl, bool shutdown)
{
nvme_tcp_teardown_io_queues(ctrl, shutdown);
nvme_quiesce_admin_queue(ctrl);
- nvme_disable_ctrl(ctrl, shutdown);
+ nvme_disable_ctrl(ctrl, shutdown ? SHUTDOWN_TYPE_SYNC : DO_NOT_SHUTDOWN);
nvme_tcp_teardown_admin_queue(ctrl, shutdown);
}
diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c
index 9cb434c58075..9d1221c77061 100644
--- a/drivers/nvme/target/loop.c
+++ b/drivers/nvme/target/loop.c
@@ -401,7 +401,7 @@ static void nvme_loop_shutdown_ctrl(struct nvme_loop_ctrl *ctrl)
nvme_quiesce_admin_queue(&ctrl->ctrl);
if (ctrl->ctrl.state == NVME_CTRL_LIVE)
- nvme_disable_ctrl(&ctrl->ctrl, true);
+ nvme_disable_ctrl(&ctrl->ctrl, SHUTDOWN_TYPE_SYNC);
nvme_cancel_admin_tagset(&ctrl->ctrl);
nvme_loop_destroy_admin_queue(ctrl);
--
2.39.3
^ permalink raw reply related [flat|nested] 6+ messages in thread
* Make NVME shutdown async - version 2
@ 2023-12-15 0:03 Jeremy Allison
2023-12-15 0:03 ` [PATCH 3/3] nvme: Add async shutdown support Jeremy Allison
0 siblings, 1 reply; 6+ messages in thread
From: Jeremy Allison @ 2023-12-15 0:03 UTC (permalink / raw)
To: jallison, jra, tansuresh, hch; +Cc: linux-nvme, gregkh, rafael, bhelgaas
This is version 2 of a rebased update and resend of a patchset
originally written by Tanjore Suresh <tansuresh@google.com> to
make shutdown of nvme devices asynchronous. Minor changes
made to use an enum shutdown_type instead of an
integer flag.
Changes from version 1:
As requested by Sagi Grimberg <sagi@grimberg.me>, ensure
that shutdown_pre is only called if shutdown_post is
also defined.
-------------------------------------------------------------
Currently the Linux nvme driver shutdown code steps
through each connected drive, sets the NVME_CC_SHN_NORMAL
(normal shutdown) flag and then polls the given drive
waiting for the response NVME_CSTS_SHST_CMPLT flag
(shutdown complete).
Each drive is taking around 13 seconds to respond to this.
The customer has 20+ drives on the box so this time adds
up on shutdown when the nvme driver is being shut down.
This patchset changes shutdown to proceed in parallel,
so the NVME_CC_SHN_NORMAL (normal shutdown) flag is
sent to all drives first, and then it polls waiting
for the NVME_CSTS_SHST_CMPLT flag (shutdown complete)
for all drives.
In the specific customer case it reduces the NVME
shutdown time from over 300 seconds to around 15
seconds.
-------------------------------------------------------------
Thanks for your consideration,
Jeremy Allison.
CIQ / Samba Team.
^ permalink raw reply [flat|nested] 6+ messages in thread
* [PATCH 3/3] nvme: Add async shutdown support
2023-12-15 0:03 Make NVME shutdown async - version 2 Jeremy Allison
@ 2023-12-15 0:03 ` Jeremy Allison
2023-12-19 5:43 ` Christoph Hellwig
0 siblings, 1 reply; 6+ messages in thread
From: Jeremy Allison @ 2023-12-15 0:03 UTC (permalink / raw)
To: jallison, jra, tansuresh, hch; +Cc: linux-nvme, gregkh, rafael, bhelgaas
From: Tanjore Suresh <tansuresh@google.com>
This works with the asynchronous shutdown mechanism setup for the PCI
drivers and participates to provide both pre and post shutdown
routines at pci_driver structure level.
The shutdown_pre routine starts the shutdown and does not wait for the
shutdown to complete. The shutdown_post routine waits for the shutdown
to complete on individual controllers that this driver instance
controls. This mechanism optimizes to speed up the shutdown in a
system which host many controllers.
Signed-off-by: Tanjore Suresh <tansuresh@google.com>
Signed-off-by: Jeremy Allison <jallison@ciq.com>
---
drivers/nvme/host/core.c | 31 +++++++++++++++--
drivers/nvme/host/nvme.h | 9 ++++-
drivers/nvme/host/pci.c | 68 ++++++++++++++++++++++++--------------
drivers/nvme/host/rdma.c | 3 +-
drivers/nvme/host/tcp.c | 2 +-
drivers/nvme/target/loop.c | 2 +-
6 files changed, 84 insertions(+), 31 deletions(-)
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 590cd4f097c2..45645af41586 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -2201,12 +2201,12 @@ static int nvme_wait_ready(struct nvme_ctrl *ctrl, u32 mask, u32 val,
return ret;
}
-int nvme_disable_ctrl(struct nvme_ctrl *ctrl, bool shutdown)
+int nvme_disable_ctrl(struct nvme_ctrl *ctrl, enum shutdown_type shutdown_type)
{
int ret;
ctrl->ctrl_config &= ~NVME_CC_SHN_MASK;
- if (shutdown)
+ if (shutdown_type != DO_NOT_SHUTDOWN)
ctrl->ctrl_config |= NVME_CC_SHN_NORMAL;
else
ctrl->ctrl_config &= ~NVME_CC_ENABLE;
@@ -2215,10 +2215,24 @@ int nvme_disable_ctrl(struct nvme_ctrl *ctrl, bool shutdown)
if (ret)
return ret;
- if (shutdown) {
+ switch (shutdown_type) {
+ case SHUTDOWN_TYPE_ASYNC:
+ /*
+ * nvme_wait_for_shutdown_cmpl() will read the reply for this.
+ */
+ return ret;
+ case SHUTDOWN_TYPE_SYNC:
+ /*
+ * Spin on the read of the control register.
+ */
return nvme_wait_ready(ctrl, NVME_CSTS_SHST_MASK,
NVME_CSTS_SHST_CMPLT,
ctrl->shutdown_timeout, "shutdown");
+ case DO_NOT_SHUTDOWN:
+ /*
+ * Doing a reset here. Handle below.
+ */
+ break;
}
if (ctrl->quirks & NVME_QUIRK_DELAY_BEFORE_CHK_RDY)
msleep(NVME_QUIRK_DELAY_AMOUNT);
@@ -2227,6 +2241,17 @@ int nvme_disable_ctrl(struct nvme_ctrl *ctrl, bool shutdown)
}
EXPORT_SYMBOL_GPL(nvme_disable_ctrl);
+int nvme_wait_for_shutdown_cmpl(struct nvme_ctrl *ctrl)
+{
+ ctrl->ctrl_config &= ~NVME_CC_SHN_MASK;
+ ctrl->ctrl_config |= NVME_CC_SHN_NORMAL;
+
+ return nvme_wait_ready(ctrl, NVME_CSTS_SHST_MASK,
+ NVME_CSTS_SHST_CMPLT,
+ ctrl->shutdown_timeout, "shutdown");
+}
+EXPORT_SYMBOL_GPL(nvme_wait_for_shutdown_cmpl);
+
int nvme_enable_ctrl(struct nvme_ctrl *ctrl)
{
unsigned dev_page_min;
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index 39a90b7cb125..1ce034185000 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -186,6 +186,12 @@ enum {
NVME_MPATH_IO_STATS = (1 << 2),
};
+enum shutdown_type {
+ DO_NOT_SHUTDOWN = 0,
+ SHUTDOWN_TYPE_SYNC = 1,
+ SHUTDOWN_TYPE_ASYNC = 2,
+};
+
static inline struct nvme_request *nvme_req(struct request *req)
{
return blk_mq_rq_to_pdu(req);
@@ -746,7 +752,8 @@ void nvme_cancel_tagset(struct nvme_ctrl *ctrl);
void nvme_cancel_admin_tagset(struct nvme_ctrl *ctrl);
bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
enum nvme_ctrl_state new_state);
-int nvme_disable_ctrl(struct nvme_ctrl *ctrl, bool shutdown);
+int nvme_disable_ctrl(struct nvme_ctrl *ctrl, enum shutdown_type shutdown_type);
+int nvme_wait_for_shutdown_cmpl(struct nvme_ctrl *ctrl);
int nvme_enable_ctrl(struct nvme_ctrl *ctrl);
int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
const struct nvme_ctrl_ops *ops, unsigned long quirks);
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 507bc149046d..5379ce6dd21a 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -108,7 +108,7 @@ MODULE_PARM_DESC(noacpi, "disable acpi bios quirks");
struct nvme_dev;
struct nvme_queue;
-static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown);
+static void nvme_dev_disable(struct nvme_dev *dev, enum shutdown_type shutdown_type);
static void nvme_delete_io_queues(struct nvme_dev *dev);
static void nvme_update_attrs(struct nvme_dev *dev);
@@ -1330,7 +1330,7 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req)
"I/O %d QID %d timeout, disable controller\n",
req->tag, nvmeq->qid);
nvme_req(req)->flags |= NVME_REQ_CANCELLED;
- nvme_dev_disable(dev, true);
+ nvme_dev_disable(dev, SHUTDOWN_TYPE_SYNC);
return BLK_EH_DONE;
case NVME_CTRL_RESETTING:
return BLK_EH_RESET_TIMER;
@@ -1390,7 +1390,7 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req)
if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_RESETTING))
return BLK_EH_DONE;
- nvme_dev_disable(dev, false);
+ nvme_dev_disable(dev, DO_NOT_SHUTDOWN);
if (nvme_try_sched_reset(&dev->ctrl))
nvme_unquiesce_io_queues(&dev->ctrl);
return BLK_EH_DONE;
@@ -1736,7 +1736,7 @@ static int nvme_pci_configure_admin_queue(struct nvme_dev *dev)
* commands to the admin queue ... and we don't know what memory that
* might be pointing at!
*/
- result = nvme_disable_ctrl(&dev->ctrl, false);
+ result = nvme_disable_ctrl(&dev->ctrl, DO_NOT_SHUTDOWN);
if (result < 0)
return result;
@@ -2571,7 +2571,7 @@ static bool nvme_pci_ctrl_is_dead(struct nvme_dev *dev)
return (csts & NVME_CSTS_CFS) || !(csts & NVME_CSTS_RDY);
}
-static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
+static void nvme_dev_disable(struct nvme_dev *dev, enum shutdown_type shutdown_type)
{
struct pci_dev *pdev = to_pci_dev(dev->dev);
bool dead;
@@ -2586,7 +2586,7 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
* Give the controller a chance to complete all entered requests
* if doing a safe shutdown.
*/
- if (!dead && shutdown)
+ if (!dead && (shutdown_type != DO_NOT_SHUTDOWN))
nvme_wait_freeze_timeout(&dev->ctrl, NVME_IO_TIMEOUT);
}
@@ -2594,7 +2594,7 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
if (!dead && dev->ctrl.queue_count > 0) {
nvme_delete_io_queues(dev);
- nvme_disable_ctrl(&dev->ctrl, shutdown);
+ nvme_disable_ctrl(&dev->ctrl, shutdown_type);
nvme_poll_irqdisable(&dev->queues[0]);
}
nvme_suspend_io_queues(dev);
@@ -2612,7 +2612,7 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
* must flush all entered requests to their failed completion to avoid
* deadlocking blk-mq hot-cpu notifier.
*/
- if (shutdown) {
+ if (shutdown_type == SHUTDOWN_TYPE_SYNC) {
nvme_unquiesce_io_queues(&dev->ctrl);
if (dev->ctrl.admin_q && !blk_queue_dying(dev->ctrl.admin_q))
nvme_unquiesce_admin_queue(&dev->ctrl);
@@ -2620,11 +2620,11 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
mutex_unlock(&dev->shutdown_lock);
}
-static int nvme_disable_prepare_reset(struct nvme_dev *dev, bool shutdown)
+static int nvme_disable_prepare_reset(struct nvme_dev *dev, enum shutdown_type shutdown_type)
{
if (!nvme_wait_reset(&dev->ctrl))
return -EBUSY;
- nvme_dev_disable(dev, shutdown);
+ nvme_dev_disable(dev, shutdown_type);
return 0;
}
@@ -2702,7 +2702,7 @@ static void nvme_reset_work(struct work_struct *work)
* moving on.
*/
if (dev->ctrl.ctrl_config & NVME_CC_ENABLE)
- nvme_dev_disable(dev, false);
+ nvme_dev_disable(dev, DO_NOT_SHUTDOWN);
nvme_sync_queues(&dev->ctrl);
mutex_lock(&dev->shutdown_lock);
@@ -2780,7 +2780,7 @@ static void nvme_reset_work(struct work_struct *work)
dev_warn(dev->ctrl.device, "Disabling device after reset failure: %d\n",
result);
nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING);
- nvme_dev_disable(dev, true);
+ nvme_dev_disable(dev, SHUTDOWN_TYPE_SYNC);
nvme_sync_queues(&dev->ctrl);
nvme_mark_namespaces_dead(&dev->ctrl);
nvme_unquiesce_io_queues(&dev->ctrl);
@@ -3058,7 +3058,7 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
out_disable:
nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING);
- nvme_dev_disable(dev, true);
+ nvme_dev_disable(dev, SHUTDOWN_TYPE_SYNC);
nvme_free_host_mem(dev);
nvme_dev_remove_admin(dev);
nvme_dbbuf_dma_free(dev);
@@ -3084,7 +3084,7 @@ static void nvme_reset_prepare(struct pci_dev *pdev)
* state as pci_dev device lock is held, making it impossible to race
* with ->remove().
*/
- nvme_disable_prepare_reset(dev, false);
+ nvme_disable_prepare_reset(dev, DO_NOT_SHUTDOWN);
nvme_sync_queues(&dev->ctrl);
}
@@ -3096,11 +3096,30 @@ static void nvme_reset_done(struct pci_dev *pdev)
flush_work(&dev->ctrl.reset_work);
}
-static void nvme_shutdown(struct pci_dev *pdev)
+static void nvme_shutdown_pre(struct pci_dev *pdev)
{
struct nvme_dev *dev = pci_get_drvdata(pdev);
- nvme_disable_prepare_reset(dev, true);
+ nvme_disable_prepare_reset(dev, SHUTDOWN_TYPE_ASYNC);
+}
+
+static void nvme_shutdown_post(struct pci_dev *pdev)
+{
+ struct nvme_dev *dev = pci_get_drvdata(pdev);
+
+ mutex_lock(&dev->shutdown_lock);
+ nvme_wait_for_shutdown_cmpl(&dev->ctrl);
+
+ /*
+ * The driver will not be starting up queues again if shutting down so
+ * must flush all entered requests to their failed completion to avoid
+ * deadlocking blk-mq hot-cpu notifier.
+ */
+ nvme_unquiesce_io_queues(&dev->ctrl);
+ if (dev->ctrl.admin_q && !blk_queue_dying(dev->ctrl.admin_q))
+ nvme_unquiesce_admin_queue(&dev->ctrl);
+
+ mutex_unlock(&dev->shutdown_lock);
}
/*
@@ -3117,13 +3136,13 @@ static void nvme_remove(struct pci_dev *pdev)
if (!pci_device_is_present(pdev)) {
nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DEAD);
- nvme_dev_disable(dev, true);
+ nvme_dev_disable(dev, SHUTDOWN_TYPE_SYNC);
}
flush_work(&dev->ctrl.reset_work);
nvme_stop_ctrl(&dev->ctrl);
nvme_remove_namespaces(&dev->ctrl);
- nvme_dev_disable(dev, true);
+ nvme_dev_disable(dev, SHUTDOWN_TYPE_SYNC);
nvme_free_host_mem(dev);
nvme_dev_remove_admin(dev);
nvme_dbbuf_dma_free(dev);
@@ -3186,7 +3205,7 @@ static int nvme_suspend(struct device *dev)
if (pm_suspend_via_firmware() || !ctrl->npss ||
!pcie_aspm_enabled(pdev) ||
(ndev->ctrl.quirks & NVME_QUIRK_SIMPLE_SUSPEND))
- return nvme_disable_prepare_reset(ndev, true);
+ return nvme_disable_prepare_reset(ndev, SHUTDOWN_TYPE_SYNC);
nvme_start_freeze(ctrl);
nvme_wait_freeze(ctrl);
@@ -3229,7 +3248,7 @@ static int nvme_suspend(struct device *dev)
* Clearing npss forces a controller reset on resume. The
* correct value will be rediscovered then.
*/
- ret = nvme_disable_prepare_reset(ndev, true);
+ ret = nvme_disable_prepare_reset(ndev, SHUTDOWN_TYPE_SYNC);
ctrl->npss = 0;
}
unfreeze:
@@ -3241,7 +3260,7 @@ static int nvme_simple_suspend(struct device *dev)
{
struct nvme_dev *ndev = pci_get_drvdata(to_pci_dev(dev));
- return nvme_disable_prepare_reset(ndev, true);
+ return nvme_disable_prepare_reset(ndev, SHUTDOWN_TYPE_SYNC);
}
static int nvme_simple_resume(struct device *dev)
@@ -3279,10 +3298,10 @@ static pci_ers_result_t nvme_error_detected(struct pci_dev *pdev,
dev_warn(dev->ctrl.device,
"frozen state error detected, reset controller\n");
if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_RESETTING)) {
- nvme_dev_disable(dev, true);
+ nvme_dev_disable(dev, SHUTDOWN_TYPE_SYNC);
return PCI_ERS_RESULT_DISCONNECT;
}
- nvme_dev_disable(dev, false);
+ nvme_dev_disable(dev, DO_NOT_SHUTDOWN);
return PCI_ERS_RESULT_NEED_RESET;
case pci_channel_io_perm_failure:
dev_warn(dev->ctrl.device,
@@ -3491,7 +3510,8 @@ static struct pci_driver nvme_driver = {
.id_table = nvme_id_table,
.probe = nvme_probe,
.remove = nvme_remove,
- .shutdown = nvme_shutdown,
+ .shutdown_pre = nvme_shutdown_pre,
+ .shutdown_post = nvme_shutdown_post,
.driver = {
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index 6d178d555920..5a5bef40ed2a 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -2136,7 +2136,8 @@ static void nvme_rdma_shutdown_ctrl(struct nvme_rdma_ctrl *ctrl, bool shutdown)
{
nvme_rdma_teardown_io_queues(ctrl, shutdown);
nvme_quiesce_admin_queue(&ctrl->ctrl);
- nvme_disable_ctrl(&ctrl->ctrl, shutdown);
+ nvme_disable_ctrl(&ctrl->ctrl,
+ shutdown ? SHUTDOWN_TYPE_SYNC : DO_NOT_SHUTDOWN);
nvme_rdma_teardown_admin_queue(ctrl, shutdown);
}
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index d79811cfa0ce..a1e33ee14f5b 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -2292,7 +2292,7 @@ static void nvme_tcp_teardown_ctrl(struct nvme_ctrl *ctrl, bool shutdown)
{
nvme_tcp_teardown_io_queues(ctrl, shutdown);
nvme_quiesce_admin_queue(ctrl);
- nvme_disable_ctrl(ctrl, shutdown);
+ nvme_disable_ctrl(ctrl, shutdown ? SHUTDOWN_TYPE_SYNC : DO_NOT_SHUTDOWN);
nvme_tcp_teardown_admin_queue(ctrl, shutdown);
}
diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c
index 9cb434c58075..9d1221c77061 100644
--- a/drivers/nvme/target/loop.c
+++ b/drivers/nvme/target/loop.c
@@ -401,7 +401,7 @@ static void nvme_loop_shutdown_ctrl(struct nvme_loop_ctrl *ctrl)
nvme_quiesce_admin_queue(&ctrl->ctrl);
if (ctrl->ctrl.state == NVME_CTRL_LIVE)
- nvme_disable_ctrl(&ctrl->ctrl, true);
+ nvme_disable_ctrl(&ctrl->ctrl, SHUTDOWN_TYPE_SYNC);
nvme_cancel_admin_tagset(&ctrl->ctrl);
nvme_loop_destroy_admin_queue(ctrl);
--
2.39.3
^ permalink raw reply related [flat|nested] 6+ messages in thread* Re: [PATCH 3/3] nvme: Add async shutdown support
2023-12-15 0:03 ` [PATCH 3/3] nvme: Add async shutdown support Jeremy Allison
@ 2023-12-19 5:43 ` Christoph Hellwig
2023-12-19 6:35 ` Jeremy Allison
0 siblings, 1 reply; 6+ messages in thread
From: Christoph Hellwig @ 2023-12-19 5:43 UTC (permalink / raw)
To: Jeremy Allison; +Cc: jra, tansuresh, hch, linux-nvme, gregkh, rafael, bhelgaas
On Thu, Dec 14, 2023 at 04:03:58PM -0800, Jeremy Allison wrote:
> From: Tanjore Suresh <tansuresh@google.com>
>
> This works with the asynchronous shutdown mechanism setup for the PCI
> drivers and participates to provide both pre and post shutdown
> routines at pci_driver structure level.
>
> The shutdown_pre routine starts the shutdown and does not wait for the
> shutdown to complete. The shutdown_post routine waits for the shutdown
> to complete on individual controllers that this driver instance
> controls. This mechanism optimizes to speed up the shutdown in a
> system which host many controllers.
I had a really hard time trying to understand this patch.
Please split switching from the bool shutdown to an enum (with initially
just two values) into a separate patch. And the names really confuse
me. I would have expect something like:
NVME_DISABLE_RESET,
NVME_DISABLE_SHUTDOWN_SYNC,
NVME_DISABLE_SHUTDOWN_ASYNC,
then again mixing two rather different concept (reset vs shutdown)
into a single enum is also not very helpful (but neither would be
two bool arguments). Not really sure what the right thing is, but
as-is it feels pretty obfuscated.
^ permalink raw reply [flat|nested] 6+ messages in thread
* Re: [PATCH 3/3] nvme: Add async shutdown support
2023-12-19 5:43 ` Christoph Hellwig
@ 2023-12-19 6:35 ` Jeremy Allison
0 siblings, 0 replies; 6+ messages in thread
From: Jeremy Allison @ 2023-12-19 6:35 UTC (permalink / raw)
To: Christoph Hellwig
Cc: Jeremy Allison, tansuresh, linux-nvme, gregkh, rafael, bhelgaas,
jra
On Tue, Dec 19, 2023 at 06:43:49AM +0100, Christoph Hellwig wrote:
>On Thu, Dec 14, 2023 at 04:03:58PM -0800, Jeremy Allison wrote:
>> From: Tanjore Suresh <tansuresh@google.com>
>>
>> This works with the asynchronous shutdown mechanism setup for the PCI
>> drivers and participates to provide both pre and post shutdown
>> routines at pci_driver structure level.
>>
>> The shutdown_pre routine starts the shutdown and does not wait for the
>> shutdown to complete. The shutdown_post routine waits for the shutdown
>> to complete on individual controllers that this driver instance
>> controls. This mechanism optimizes to speed up the shutdown in a
>> system which host many controllers.
>
>I had a really hard time trying to understand this patch.
Sorry. Me too when I first read it :-).
>Please split switching from the bool shutdown to an enum (with initially
>just two values) into a separate patch. And the names really confuse
>me. I would have expect something like:
>
> NVME_DISABLE_RESET,
> NVME_DISABLE_SHUTDOWN_SYNC,
> NVME_DISABLE_SHUTDOWN_ASYNC,
Makes sense. Start with
'enum shutdown_type { NVME_DISABLE_RESET, NVME_DISABLE_SHUTDOWN_SYNC}'
and then add NVME_DISABLE_SHUTDOWN_ASYNC when the
shutdown_wait() is added in the subsequent async patch.
>then again mixing two rather different concept (reset vs shutdown)
>into a single enum is also not very helpful (but neither would be
>two bool arguments). Not really sure what the right thing is, but
>as-is it feels pretty obfuscated.
Yeah. I really didn't want to add another bool here as having two
bools side by side as parameters to a function is a receipe for
mistakes. The original code just uses one bool parameter 'shutdown',
to the nvme_disable_ctrl() function which when set means do the
shutdown request, and when clear means reset. So 'shutdown and
reset' are already conflated in the code.
I think if I use the names you suggested and split the initial
add of the enum into a separate preparatory patch might make things
a little clearer for people to follow. I'm happy to take your
guidance on this though.
^ permalink raw reply [flat|nested] 6+ messages in thread
end of thread, other threads:[~2023-12-19 6:35 UTC | newest]
Thread overview: 6+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2022-03-24 21:42 [PATCH 3/3] nvme: Add async shutdown support Tanjore Suresh
2022-03-25 5:30 ` Christoph Hellwig
-- strict thread matches above, loose matches on Subject: below --
2023-12-12 18:09 Make NVME shutdown async Jeremy Allison
2023-12-12 18:09 ` [PATCH 3/3] nvme: Add async shutdown support Jeremy Allison
2023-12-15 0:03 Make NVME shutdown async - version 2 Jeremy Allison
2023-12-15 0:03 ` [PATCH 3/3] nvme: Add async shutdown support Jeremy Allison
2023-12-19 5:43 ` Christoph Hellwig
2023-12-19 6:35 ` Jeremy Allison
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox