From: Max Gurtovoy <mgurtovoy@nvidia.com>
To: <linux-nvme@lists.infradead.org>, <hch@lst.de>,
<kbusch@kernel.org>, <sagi@grimberg.me>
Cc: <chaitanyak@nvidia.com>, <israelr@nvidia.com>, <oren@nvidia.com>,
<jsmart2021@gmail.com>, Max Gurtovoy <mgurtovoy@nvidia.com>
Subject: [PATCH 7/7] nvme-fabrics: add nvmf_init_ctrl/nvmf_teardown_ctrl API
Date: Mon, 18 Oct 2021 16:40:20 +0300 [thread overview]
Message-ID: <20211018134020.33838-8-mgurtovoy@nvidia.com> (raw)
In-Reply-To: <20211018134020.33838-1-mgurtovoy@nvidia.com>
Centralize the initalization and teardown of fabrics specific settings.
For now, only used by RDMA and TCP fabric transports.
Also, convert the reconnect_work and error_recovery_work to be static
functions since they are not used outside the fabrics driver anymore.
Reviewed-by: Israel Rukshin <israelr@nvidia.com>
Signed-off-by: Max Gurtovoy <mgurtovoy@nvidia.com>
---
drivers/nvme/host/fabrics.c | 20 ++++++++++++++++----
drivers/nvme/host/fabrics.h | 4 ++--
drivers/nvme/host/rdma.c | 7 ++-----
drivers/nvme/host/tcp.c | 7 ++-----
4 files changed, 22 insertions(+), 16 deletions(-)
diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c
index 7f76b27ce1f2..4a16e5f85d24 100644
--- a/drivers/nvme/host/fabrics.c
+++ b/drivers/nvme/host/fabrics.c
@@ -493,7 +493,7 @@ void nvmf_reconnect_or_remove(struct nvme_ctrl *ctrl)
}
EXPORT_SYMBOL_GPL(nvmf_reconnect_or_remove);
-void nvmf_error_recovery_work(struct work_struct *work)
+static void nvmf_error_recovery_work(struct work_struct *work)
{
struct nvme_ctrl *ctrl = container_of(work,
struct nvme_ctrl, err_work);
@@ -514,7 +514,6 @@ void nvmf_error_recovery_work(struct work_struct *work)
nvmf_reconnect_or_remove(ctrl);
}
-EXPORT_SYMBOL_GPL(nvmf_error_recovery_work);
void nvmf_error_recovery(struct nvme_ctrl *ctrl)
{
@@ -526,7 +525,7 @@ void nvmf_error_recovery(struct nvme_ctrl *ctrl)
}
EXPORT_SYMBOL_GPL(nvmf_error_recovery);
-void nvmf_reconnect_ctrl_work(struct work_struct *work)
+static void nvmf_reconnect_ctrl_work(struct work_struct *work)
{
struct nvme_ctrl *ctrl = container_of(to_delayed_work(work),
struct nvme_ctrl, connect_work);
@@ -548,7 +547,20 @@ void nvmf_reconnect_ctrl_work(struct work_struct *work)
ctrl->nr_reconnects);
nvmf_reconnect_or_remove(ctrl);
}
-EXPORT_SYMBOL_GPL(nvmf_reconnect_ctrl_work);
+
+void nvmf_init_ctrl(struct nvme_ctrl *ctrl)
+{
+ INIT_DELAYED_WORK(&ctrl->connect_work, nvmf_reconnect_ctrl_work);
+ INIT_WORK(&ctrl->err_work, nvmf_error_recovery_work);
+}
+EXPORT_SYMBOL_GPL(nvmf_init_ctrl);
+
+void nvmf_teardown_ctrl(struct nvme_ctrl *ctrl)
+{
+ cancel_work_sync(&ctrl->err_work);
+ cancel_delayed_work_sync(&ctrl->connect_work);
+}
+EXPORT_SYMBOL_GPL(nvmf_teardown_ctrl);
/**
* nvmf_register_transport() - NVMe Fabrics Library registration function.
diff --git a/drivers/nvme/host/fabrics.h b/drivers/nvme/host/fabrics.h
index 49c98b69647f..08b290c2e01a 100644
--- a/drivers/nvme/host/fabrics.h
+++ b/drivers/nvme/host/fabrics.h
@@ -190,8 +190,8 @@ int nvmf_get_address(struct nvme_ctrl *ctrl, char *buf, int size);
bool nvmf_should_reconnect(struct nvme_ctrl *ctrl);
void nvmf_reconnect_or_remove(struct nvme_ctrl *ctrl);
void nvmf_error_recovery(struct nvme_ctrl *ctrl);
-void nvmf_error_recovery_work(struct work_struct *work);
-void nvmf_reconnect_ctrl_work(struct work_struct *work);
+void nvmf_init_ctrl(struct nvme_ctrl *ctrl);
+void nvmf_teardown_ctrl(struct nvme_ctrl *ctrl);
bool nvmf_ip_options_match(struct nvme_ctrl *ctrl,
struct nvmf_ctrl_options *opts);
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index 7fb2f434fe0d..aa3e142047eb 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -2169,9 +2169,7 @@ static const struct blk_mq_ops nvme_rdma_admin_mq_ops = {
static void nvme_rdma_shutdown_ctrl(struct nvme_rdma_ctrl *ctrl, bool shutdown)
{
- cancel_work_sync(&ctrl->ctrl.err_work);
- cancel_delayed_work_sync(&ctrl->ctrl.connect_work);
-
+ nvmf_teardown_ctrl(&ctrl->ctrl);
nvme_rdma_teardown_io_queues(ctrl, shutdown);
blk_mq_quiesce_queue(ctrl->ctrl.admin_q);
if (shutdown)
@@ -2302,8 +2300,7 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev,
goto out_free_ctrl;
}
- INIT_DELAYED_WORK(&ctrl->ctrl.connect_work, nvmf_reconnect_ctrl_work);
- INIT_WORK(&ctrl->ctrl.err_work, nvmf_error_recovery_work);
+ nvmf_init_ctrl(&ctrl->ctrl);
INIT_WORK(&ctrl->ctrl.reset_work, nvme_rdma_reset_ctrl_work);
ctrl->ctrl.queue_count = opts->nr_io_queues + opts->nr_write_queues +
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index c0e5bb3949b3..26c2b181edb9 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -2049,9 +2049,7 @@ static int _nvme_tcp_setup_ctrl(struct nvme_ctrl *ctrl)
static void nvme_tcp_teardown_ctrl(struct nvme_ctrl *ctrl, bool shutdown)
{
- cancel_work_sync(&ctrl->err_work);
- cancel_delayed_work_sync(&ctrl->connect_work);
-
+ nvmf_teardown_ctrl(ctrl);
nvme_tcp_teardown_io_queues(ctrl, shutdown);
blk_mq_quiesce_queue(ctrl->admin_q);
if (shutdown)
@@ -2453,8 +2451,7 @@ static struct nvme_ctrl *nvme_tcp_create_ctrl(struct device *dev,
ctrl->ctrl.sqsize = opts->queue_size - 1;
ctrl->ctrl.kato = opts->kato;
- INIT_DELAYED_WORK(&ctrl->ctrl.connect_work, nvmf_reconnect_ctrl_work);
- INIT_WORK(&ctrl->ctrl.err_work, nvmf_error_recovery_work);
+ nvmf_init_ctrl(&ctrl->ctrl);
INIT_WORK(&ctrl->ctrl.reset_work, nvme_reset_ctrl_work);
if (!(opts->mask & NVMF_OPT_TRSVCID)) {
--
2.18.1
next prev parent reply other threads:[~2021-10-18 13:43 UTC|newest]
Thread overview: 34+ messages / expand[flat|nested] mbox.gz Atom feed top
2021-10-18 13:40 [PATCH v1 0/7] Centrelize common fabrics code to core drivers Max Gurtovoy
2021-10-18 13:40 ` [PATCH 1/7] nvme: add connect_work attribute to nvme ctrl Max Gurtovoy
2021-10-19 12:32 ` Sagi Grimberg
2021-10-19 13:20 ` Hannes Reinecke
2021-10-20 13:34 ` Himanshu Madhani
2021-10-18 13:40 ` [PATCH 2/7] nvme-fabrics: introduce nvmf_reconnect_or_remove API Max Gurtovoy
2021-10-19 6:26 ` Chaitanya Kulkarni
2021-10-19 12:36 ` Sagi Grimberg
2021-10-19 12:58 ` Max Gurtovoy
2021-10-19 13:21 ` Hannes Reinecke
2021-10-20 13:34 ` Himanshu Madhani
2021-10-18 13:40 ` [PATCH 3/7] nvme: add err_work attribute to nvme ctrl Max Gurtovoy
2021-10-19 12:36 ` Sagi Grimberg
2021-10-20 13:34 ` Himanshu Madhani
2021-10-18 13:40 ` [PATCH 4/7] nvme-fabrics: introduce nvmf_error_recovery API Max Gurtovoy
2021-10-19 13:27 ` Hannes Reinecke
2021-10-20 13:34 ` Himanshu Madhani
2021-10-18 13:40 ` [PATCH 5/7] nvme/nvme-fabrics: introduce nvmf_error_recovery_work API Max Gurtovoy
2021-10-19 6:29 ` Chaitanya Kulkarni
2021-10-19 12:43 ` Sagi Grimberg
2021-10-19 13:17 ` Max Gurtovoy
2021-10-19 13:34 ` Hannes Reinecke
2021-10-18 13:40 ` [PATCH 6/7] nvme/nvme-fabrics: introduce nvmf_reconnect_ctrl_work API Max Gurtovoy
2021-10-19 6:29 ` Chaitanya Kulkarni
2021-10-19 12:44 ` Sagi Grimberg
2021-10-19 13:18 ` Max Gurtovoy
2021-10-19 13:41 ` Hannes Reinecke
2021-10-18 13:40 ` Max Gurtovoy [this message]
2021-10-19 12:46 ` [PATCH 7/7] nvme-fabrics: add nvmf_init_ctrl/nvmf_teardown_ctrl API Sagi Grimberg
2021-10-19 13:20 ` Max Gurtovoy
2021-10-18 14:08 ` [PATCH v1 0/7] Centrelize common fabrics code to core drivers James Smart
2021-10-19 5:36 ` Christoph Hellwig
2021-10-19 6:24 ` Chaitanya Kulkarni
2021-10-19 12:32 ` Sagi Grimberg
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20211018134020.33838-8-mgurtovoy@nvidia.com \
--to=mgurtovoy@nvidia.com \
--cc=chaitanyak@nvidia.com \
--cc=hch@lst.de \
--cc=israelr@nvidia.com \
--cc=jsmart2021@gmail.com \
--cc=kbusch@kernel.org \
--cc=linux-nvme@lists.infradead.org \
--cc=oren@nvidia.com \
--cc=sagi@grimberg.me \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox