From: Jeremy Allison <jallison@ciq.com>
To: jallison@ciq.com, jra@samba.org, tansuresh@google.com,
hch@lst.de, gregkh@linuxfoundation.org, rafael@kernel.org,
bhelgaas@google.com, sagi@grimberg.me
Cc: linux-nvme@lists.infradead.org
Subject: [PATCH 3/5] nvme: Change 'bool shutdown' into an enum shutdown_type.
Date: Wed, 3 Jan 2024 13:04:03 -0800 [thread overview]
Message-ID: <20240103210405.3593499-4-jallison@ciq.com> (raw)
In-Reply-To: <20240103210405.3593499-1-jallison@ciq.com>
Convert nvme_disable_ctrl() and nvme_dev_disable()
inside drivers/nvme/host/pci.c to use this:
bool shutdown = false == NVME_DISABLE_RESET
bool shutdown = true == NVME_DISABLE_SHUTDOWN_SYNC.
This will make it easier to add a third request later:
NVME_DISABLE_SHUTDOWN_ASNYC
As nvme_disable_ctrl() is used outside of drivers/nvme/host/pci.c,
convert the callers of nvme_disable_ctrl() to this convention too.
Signed-off-by: Jeremy Allison <jallison@ciq.com>
---
drivers/nvme/host/apple.c | 4 ++--
drivers/nvme/host/core.c | 6 +++---
drivers/nvme/host/nvme.h | 7 +++++-
drivers/nvme/host/pci.c | 44 +++++++++++++++++++-------------------
drivers/nvme/host/rdma.c | 3 ++-
drivers/nvme/host/tcp.c | 3 ++-
drivers/nvme/target/loop.c | 2 +-
7 files changed, 38 insertions(+), 31 deletions(-)
diff --git a/drivers/nvme/host/apple.c b/drivers/nvme/host/apple.c
index 596bb11eeba5..764639ede41d 100644
--- a/drivers/nvme/host/apple.c
+++ b/drivers/nvme/host/apple.c
@@ -844,8 +844,8 @@ static void apple_nvme_disable(struct apple_nvme *anv, bool shutdown)
* NVMe disabled state, after a clean shutdown).
*/
if (shutdown)
- nvme_disable_ctrl(&anv->ctrl, shutdown);
- nvme_disable_ctrl(&anv->ctrl, false);
+ nvme_disable_ctrl(&anv->ctrl, NVME_DISABLE_SHUTDOWN_SYNC);
+ nvme_disable_ctrl(&anv->ctrl, NVME_DISABLE_RESET);
}
WRITE_ONCE(anv->ioq.enabled, false);
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 50818dbcfa1a..e1b2facb7d6a 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -2219,12 +2219,12 @@ static int nvme_wait_ready(struct nvme_ctrl *ctrl, u32 mask, u32 val,
return ret;
}
-int nvme_disable_ctrl(struct nvme_ctrl *ctrl, bool shutdown)
+int nvme_disable_ctrl(struct nvme_ctrl *ctrl, enum shutdown_type shutdown_type)
{
int ret;
ctrl->ctrl_config &= ~NVME_CC_SHN_MASK;
- if (shutdown)
+ if (shutdown_type == NVME_DISABLE_SHUTDOWN_SYNC)
ctrl->ctrl_config |= NVME_CC_SHN_NORMAL;
else
ctrl->ctrl_config &= ~NVME_CC_ENABLE;
@@ -2233,7 +2233,7 @@ int nvme_disable_ctrl(struct nvme_ctrl *ctrl, bool shutdown)
if (ret)
return ret;
- if (shutdown) {
+ if (shutdown_type == NVME_DISABLE_SHUTDOWN_SYNC) {
return nvme_wait_ready(ctrl, NVME_CSTS_SHST_MASK,
NVME_CSTS_SHST_CMPLT,
ctrl->shutdown_timeout, "shutdown");
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index 6092cc361837..1a748640f2fb 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -187,6 +187,11 @@ enum {
NVME_MPATH_IO_STATS = (1 << 2),
};
+enum shutdown_type {
+ NVME_DISABLE_RESET = 0,
+ NVME_DISABLE_SHUTDOWN_SYNC = 1
+};
+
static inline struct nvme_request *nvme_req(struct request *req)
{
return blk_mq_rq_to_pdu(req);
@@ -749,7 +754,7 @@ void nvme_cancel_tagset(struct nvme_ctrl *ctrl);
void nvme_cancel_admin_tagset(struct nvme_ctrl *ctrl);
bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
enum nvme_ctrl_state new_state);
-int nvme_disable_ctrl(struct nvme_ctrl *ctrl, bool shutdown);
+int nvme_disable_ctrl(struct nvme_ctrl *ctrl, enum shutdown_type shutdown_type);
int nvme_enable_ctrl(struct nvme_ctrl *ctrl);
int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
const struct nvme_ctrl_ops *ops, unsigned long quirks);
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index f27202680741..367e322dc818 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -108,7 +108,7 @@ MODULE_PARM_DESC(noacpi, "disable acpi bios quirks");
struct nvme_dev;
struct nvme_queue;
-static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown);
+static void nvme_dev_disable(struct nvme_dev *dev, enum shutdown_type shutdown_type);
static void nvme_delete_io_queues(struct nvme_dev *dev);
static void nvme_update_attrs(struct nvme_dev *dev);
@@ -1330,7 +1330,7 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req)
"I/O %d QID %d timeout, disable controller\n",
req->tag, nvmeq->qid);
nvme_req(req)->flags |= NVME_REQ_CANCELLED;
- nvme_dev_disable(dev, true);
+ nvme_dev_disable(dev, NVME_DISABLE_SHUTDOWN_SYNC);
return BLK_EH_DONE;
case NVME_CTRL_RESETTING:
return BLK_EH_RESET_TIMER;
@@ -1390,7 +1390,7 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req)
if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_RESETTING))
return BLK_EH_DONE;
- nvme_dev_disable(dev, false);
+ nvme_dev_disable(dev, NVME_DISABLE_RESET);
if (nvme_try_sched_reset(&dev->ctrl))
nvme_unquiesce_io_queues(&dev->ctrl);
return BLK_EH_DONE;
@@ -1736,7 +1736,7 @@ static int nvme_pci_configure_admin_queue(struct nvme_dev *dev)
* commands to the admin queue ... and we don't know what memory that
* might be pointing at!
*/
- result = nvme_disable_ctrl(&dev->ctrl, false);
+ result = nvme_disable_ctrl(&dev->ctrl, NVME_DISABLE_RESET);
if (result < 0)
return result;
@@ -2571,7 +2571,7 @@ static bool nvme_pci_ctrl_is_dead(struct nvme_dev *dev)
return (csts & NVME_CSTS_CFS) || !(csts & NVME_CSTS_RDY);
}
-static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
+static void nvme_dev_disable(struct nvme_dev *dev, enum shutdown_type shutdown_type)
{
struct pci_dev *pdev = to_pci_dev(dev->dev);
bool dead;
@@ -2586,7 +2586,7 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
* Give the controller a chance to complete all entered requests
* if doing a safe shutdown.
*/
- if (!dead && shutdown)
+ if (!dead && (shutdown_type == NVME_DISABLE_SHUTDOWN_SYNC))
nvme_wait_freeze_timeout(&dev->ctrl, NVME_IO_TIMEOUT);
}
@@ -2594,7 +2594,7 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
if (!dead && dev->ctrl.queue_count > 0) {
nvme_delete_io_queues(dev);
- nvme_disable_ctrl(&dev->ctrl, shutdown);
+ nvme_disable_ctrl(&dev->ctrl, shutdown_type);
nvme_poll_irqdisable(&dev->queues[0]);
}
nvme_suspend_io_queues(dev);
@@ -2612,7 +2612,7 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
* must flush all entered requests to their failed completion to avoid
* deadlocking blk-mq hot-cpu notifier.
*/
- if (shutdown) {
+ if (shutdown_type == NVME_DISABLE_SHUTDOWN_SYNC) {
nvme_unquiesce_io_queues(&dev->ctrl);
if (dev->ctrl.admin_q && !blk_queue_dying(dev->ctrl.admin_q))
nvme_unquiesce_admin_queue(&dev->ctrl);
@@ -2620,11 +2620,11 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
mutex_unlock(&dev->shutdown_lock);
}
-static int nvme_disable_prepare_reset(struct nvme_dev *dev, bool shutdown)
+static int nvme_disable_prepare_reset(struct nvme_dev *dev, enum shutdown_type shutdown_type)
{
if (!nvme_wait_reset(&dev->ctrl))
return -EBUSY;
- nvme_dev_disable(dev, shutdown);
+ nvme_dev_disable(dev, shutdown_type);
return 0;
}
@@ -2702,7 +2702,7 @@ static void nvme_reset_work(struct work_struct *work)
* moving on.
*/
if (dev->ctrl.ctrl_config & NVME_CC_ENABLE)
- nvme_dev_disable(dev, false);
+ nvme_dev_disable(dev, NVME_DISABLE_RESET);
nvme_sync_queues(&dev->ctrl);
mutex_lock(&dev->shutdown_lock);
@@ -2780,7 +2780,7 @@ static void nvme_reset_work(struct work_struct *work)
dev_warn(dev->ctrl.device, "Disabling device after reset failure: %d\n",
result);
nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING);
- nvme_dev_disable(dev, true);
+ nvme_dev_disable(dev, NVME_DISABLE_SHUTDOWN_SYNC);
nvme_sync_queues(&dev->ctrl);
nvme_mark_namespaces_dead(&dev->ctrl);
nvme_unquiesce_io_queues(&dev->ctrl);
@@ -3058,7 +3058,7 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
out_disable:
nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING);
- nvme_dev_disable(dev, true);
+ nvme_dev_disable(dev, NVME_DISABLE_SHUTDOWN_SYNC);
nvme_free_host_mem(dev);
nvme_dev_remove_admin(dev);
nvme_dbbuf_dma_free(dev);
@@ -3084,7 +3084,7 @@ static void nvme_reset_prepare(struct pci_dev *pdev)
* state as pci_dev device lock is held, making it impossible to race
* with ->remove().
*/
- nvme_disable_prepare_reset(dev, false);
+ nvme_disable_prepare_reset(dev, NVME_DISABLE_RESET);
nvme_sync_queues(&dev->ctrl);
}
@@ -3100,7 +3100,7 @@ static void nvme_shutdown(struct pci_dev *pdev)
{
struct nvme_dev *dev = pci_get_drvdata(pdev);
- nvme_disable_prepare_reset(dev, true);
+ nvme_disable_prepare_reset(dev, NVME_DISABLE_SHUTDOWN_SYNC);
}
/*
@@ -3117,13 +3117,13 @@ static void nvme_remove(struct pci_dev *pdev)
if (!pci_device_is_present(pdev)) {
nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DEAD);
- nvme_dev_disable(dev, true);
+ nvme_dev_disable(dev, NVME_DISABLE_SHUTDOWN_SYNC);
}
flush_work(&dev->ctrl.reset_work);
nvme_stop_ctrl(&dev->ctrl);
nvme_remove_namespaces(&dev->ctrl);
- nvme_dev_disable(dev, true);
+ nvme_dev_disable(dev, NVME_DISABLE_SHUTDOWN_SYNC);
nvme_free_host_mem(dev);
nvme_dev_remove_admin(dev);
nvme_dbbuf_dma_free(dev);
@@ -3186,7 +3186,7 @@ static int nvme_suspend(struct device *dev)
if (pm_suspend_via_firmware() || !ctrl->npss ||
!pcie_aspm_enabled(pdev) ||
(ndev->ctrl.quirks & NVME_QUIRK_SIMPLE_SUSPEND))
- return nvme_disable_prepare_reset(ndev, true);
+ return nvme_disable_prepare_reset(ndev, NVME_DISABLE_SHUTDOWN_SYNC);
nvme_start_freeze(ctrl);
nvme_wait_freeze(ctrl);
@@ -3229,7 +3229,7 @@ static int nvme_suspend(struct device *dev)
* Clearing npss forces a controller reset on resume. The
* correct value will be rediscovered then.
*/
- ret = nvme_disable_prepare_reset(ndev, true);
+ ret = nvme_disable_prepare_reset(ndev, NVME_DISABLE_SHUTDOWN_SYNC);
ctrl->npss = 0;
}
unfreeze:
@@ -3241,7 +3241,7 @@ static int nvme_simple_suspend(struct device *dev)
{
struct nvme_dev *ndev = pci_get_drvdata(to_pci_dev(dev));
- return nvme_disable_prepare_reset(ndev, true);
+ return nvme_disable_prepare_reset(ndev, NVME_DISABLE_SHUTDOWN_SYNC);
}
static int nvme_simple_resume(struct device *dev)
@@ -3279,10 +3279,10 @@ static pci_ers_result_t nvme_error_detected(struct pci_dev *pdev,
dev_warn(dev->ctrl.device,
"frozen state error detected, reset controller\n");
if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_RESETTING)) {
- nvme_dev_disable(dev, true);
+ nvme_dev_disable(dev, NVME_DISABLE_SHUTDOWN_SYNC);
return PCI_ERS_RESULT_DISCONNECT;
}
- nvme_dev_disable(dev, false);
+ nvme_dev_disable(dev, NVME_DISABLE_RESET);
return PCI_ERS_RESULT_NEED_RESET;
case pci_channel_io_perm_failure:
dev_warn(dev->ctrl.device,
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index bc90ec3c51b0..b969ab23a55b 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -2136,7 +2136,8 @@ static void nvme_rdma_shutdown_ctrl(struct nvme_rdma_ctrl *ctrl, bool shutdown)
{
nvme_rdma_teardown_io_queues(ctrl, shutdown);
nvme_quiesce_admin_queue(&ctrl->ctrl);
- nvme_disable_ctrl(&ctrl->ctrl, shutdown);
+ nvme_disable_ctrl(&ctrl->ctrl, shutdown ?
+ NVME_DISABLE_SHUTDOWN_SYNC : NVME_DISABLE_RESET);
nvme_rdma_teardown_admin_queue(ctrl, shutdown);
}
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index 5056bcae2f39..de5937f786b8 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -2291,7 +2291,8 @@ static void nvme_tcp_teardown_ctrl(struct nvme_ctrl *ctrl, bool shutdown)
{
nvme_tcp_teardown_io_queues(ctrl, shutdown);
nvme_quiesce_admin_queue(ctrl);
- nvme_disable_ctrl(ctrl, shutdown);
+ nvme_disable_ctrl(ctrl, shutdown ?
+ NVME_DISABLE_SHUTDOWN_SYNC : NVME_DISABLE_RESET);
nvme_tcp_teardown_admin_queue(ctrl, shutdown);
}
diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c
index 9cb434c58075..6cb6e7c6bdd1 100644
--- a/drivers/nvme/target/loop.c
+++ b/drivers/nvme/target/loop.c
@@ -401,7 +401,7 @@ static void nvme_loop_shutdown_ctrl(struct nvme_loop_ctrl *ctrl)
nvme_quiesce_admin_queue(&ctrl->ctrl);
if (ctrl->ctrl.state == NVME_CTRL_LIVE)
- nvme_disable_ctrl(&ctrl->ctrl, true);
+ nvme_disable_ctrl(&ctrl->ctrl, NVME_DISABLE_SHUTDOWN_SYNC);
nvme_cancel_admin_tagset(&ctrl->ctrl);
nvme_loop_destroy_admin_queue(ctrl);
--
2.39.3
next prev parent reply other threads:[~2024-01-03 21:04 UTC|newest]
Thread overview: 20+ messages / expand[flat|nested] mbox.gz Atom feed top
2024-01-03 21:04 Make NVME shutdown two-pass - Version 4 Jeremy Allison
2024-01-03 21:04 ` [PATCH 1/5] driver core: Support two-pass driver shutdown Jeremy Allison
2024-01-04 13:12 ` Sagi Grimberg
2024-01-04 17:27 ` Jeremy Allison
2024-01-03 21:04 ` [PATCH 2/5] PCI: Support two-pass shutdown Jeremy Allison
2024-01-04 18:55 ` Bjorn Helgaas
2024-01-04 19:34 ` Jeremy Allison
2024-01-03 21:04 ` Jeremy Allison [this message]
2024-01-04 13:26 ` [PATCH 3/5] nvme: Change 'bool shutdown' into an enum shutdown_type Sagi Grimberg
2024-01-04 17:43 ` Jeremy Allison
2024-01-04 18:44 ` Jeremy Allison
2024-01-08 17:42 ` Sagi Grimberg
2024-01-08 18:41 ` Jeremy Allison
2024-01-03 21:04 ` [PATCH 4/5] nvme: Export nvme_wait_ready() Jeremy Allison
2024-01-03 21:04 ` [PATCH 5/5] nvme: Add two-pass shutdown support Jeremy Allison
2024-01-04 13:14 ` Sagi Grimberg
2024-01-04 17:30 ` Jeremy Allison
2024-01-04 4:48 ` Make NVME shutdown two-pass - Version 4 Chaitanya Kulkarni
2024-01-04 6:38 ` Jeremy Allison
2024-01-04 19:00 ` Keith Busch
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20240103210405.3593499-4-jallison@ciq.com \
--to=jallison@ciq.com \
--cc=bhelgaas@google.com \
--cc=gregkh@linuxfoundation.org \
--cc=hch@lst.de \
--cc=jra@samba.org \
--cc=linux-nvme@lists.infradead.org \
--cc=rafael@kernel.org \
--cc=sagi@grimberg.me \
--cc=tansuresh@google.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox