From: Hannes Reinecke <hare@suse.de>
To: Max Gurtovoy <mgurtovoy@nvidia.com>,
linux-nvme@lists.infradead.org, hch@lst.de, kbusch@kernel.org,
sagi@grimberg.me
Cc: chaitanyak@nvidia.com, israelr@nvidia.com, oren@nvidia.com,
jsmart2021@gmail.com
Subject: Re: [PATCH 6/7] nvme/nvme-fabrics: introduce nvmf_reconnect_ctrl_work API
Date: Tue, 19 Oct 2021 15:41:32 +0200 [thread overview]
Message-ID: <bd5b02ac-befb-b75d-0e03-371cd2664bef@suse.de> (raw)
In-Reply-To: <20211018134020.33838-7-mgurtovoy@nvidia.com>
On 10/18/21 3:40 PM, Max Gurtovoy wrote:
> Reconnect work is duplicated in RDMA and TCP transports. Move this logic
> to common code. For that, introduce a new ctrl op to setup a ctrl.
>
> Also update the RDMA/TCP transport drivers to use this API and remove
> the duplicated code.
>
> Reviewed-by: Israel Rukshin <israelr@nvidia.com>
> Signed-off-by: Max Gurtovoy <mgurtovoy@nvidia.com>
> ---
> drivers/nvme/host/fabrics.c | 24 ++++++++++++++++++++++++
> drivers/nvme/host/fabrics.h | 1 +
> drivers/nvme/host/nvme.h | 1 +
> drivers/nvme/host/rdma.c | 26 ++++----------------------
> drivers/nvme/host/tcp.c | 27 ++++-----------------------
> 5 files changed, 34 insertions(+), 45 deletions(-)
>
> diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c
> index 544195369c97..7f76b27ce1f2 100644
> --- a/drivers/nvme/host/fabrics.c
> +++ b/drivers/nvme/host/fabrics.c
> @@ -526,6 +526,30 @@ void nvmf_error_recovery(struct nvme_ctrl *ctrl)
> }
> EXPORT_SYMBOL_GPL(nvmf_error_recovery);
>
> +void nvmf_reconnect_ctrl_work(struct work_struct *work)
> +{
> + struct nvme_ctrl *ctrl = container_of(to_delayed_work(work),
> + struct nvme_ctrl, connect_work);
> +
> + ++ctrl->nr_reconnects;
> +
> + if (ctrl->ops->setup_ctrl(ctrl))
> + goto requeue;
> +
> + dev_info(ctrl->device, "Successfully reconnected (%d attempt)\n",
> + ctrl->nr_reconnects);
> +
> + ctrl->nr_reconnects = 0;
> +
> + return;
> +
> +requeue:
> + dev_info(ctrl->device, "Failed reconnect attempt %d\n",
> + ctrl->nr_reconnects);
> + nvmf_reconnect_or_remove(ctrl);
> +}
> +EXPORT_SYMBOL_GPL(nvmf_reconnect_ctrl_work);
> +
> /**
> * nvmf_register_transport() - NVMe Fabrics Library registration function.
> * @ops: Transport ops instance to be registered to the
> diff --git a/drivers/nvme/host/fabrics.h b/drivers/nvme/host/fabrics.h
> index 8655eff74ed0..49c98b69647f 100644
> --- a/drivers/nvme/host/fabrics.h
> +++ b/drivers/nvme/host/fabrics.h
> @@ -191,6 +191,7 @@ bool nvmf_should_reconnect(struct nvme_ctrl *ctrl);
> void nvmf_reconnect_or_remove(struct nvme_ctrl *ctrl);
> void nvmf_error_recovery(struct nvme_ctrl *ctrl);
> void nvmf_error_recovery_work(struct work_struct *work);
> +void nvmf_reconnect_ctrl_work(struct work_struct *work);
> bool nvmf_ip_options_match(struct nvme_ctrl *ctrl,
> struct nvmf_ctrl_options *opts);
>
> diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
> index 1573edf6e97f..9ae9594998c3 100644
> --- a/drivers/nvme/host/nvme.h
> +++ b/drivers/nvme/host/nvme.h
> @@ -497,6 +497,7 @@ struct nvme_ctrl_ops {
> /* Fabrics only */
> void (*teardown_ctrl_io_queues)(struct nvme_ctrl *ctrl);
> void (*teardown_ctrl_admin_queue)(struct nvme_ctrl *ctrl);
> + int (*setup_ctrl)(struct nvme_ctrl *ctrl);
> };
>
> /*
> diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
> index f4e4ebf673d2..7fb2f434fe0d 100644
> --- a/drivers/nvme/host/rdma.c
> +++ b/drivers/nvme/host/rdma.c
> @@ -1151,27 +1151,9 @@ static int nvme_rdma_setup_ctrl(struct nvme_rdma_ctrl *ctrl, bool new)
> return ret;
> }
>
> -static void nvme_rdma_reconnect_ctrl_work(struct work_struct *work)
> +static int _nvme_rdma_setup_ctrl(struct nvme_ctrl *ctrl)
> {
> - struct nvme_rdma_ctrl *ctrl = container_of(to_delayed_work(work),
> - struct nvme_rdma_ctrl, ctrl.connect_work);
> -
> - ++ctrl->ctrl.nr_reconnects;
> -
> - if (nvme_rdma_setup_ctrl(ctrl, false))
> - goto requeue;
> -
> - dev_info(ctrl->ctrl.device, "Successfully reconnected (%d attempts)\n",
> - ctrl->ctrl.nr_reconnects);
> -
> - ctrl->ctrl.nr_reconnects = 0;
> -
> - return;
> -
> -requeue:
> - dev_info(ctrl->ctrl.device, "Failed reconnect attempt %d\n",
> - ctrl->ctrl.nr_reconnects);
> - nvmf_reconnect_or_remove(&ctrl->ctrl);
> + return nvme_rdma_setup_ctrl(to_rdma_ctrl(ctrl), false);
> }
>
> static void nvme_rdma_end_request(struct nvme_rdma_request *req)
Really? Can't we separate nvme_rdma_setup_ctrl() such that we have two
distinct functions (one for new == true, and one for new == false)?
Or, alternatively, setting up the tagset in nvme_rdma_init_ctrl() and
kill the 'new' argument altogether?
> @@ -2242,6 +2224,7 @@ static const struct nvme_ctrl_ops nvme_rdma_ctrl_ops = {
> .get_address = nvmf_get_address,
> .teardown_ctrl_io_queues = _nvme_rdma_teardown_io_queues,
> .teardown_ctrl_admin_queue = _nvme_rdma_teardown_admin_queue,
> + .setup_ctrl = _nvme_rdma_setup_ctrl,
> };
>
> /*
> @@ -2319,8 +2302,7 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev,
> goto out_free_ctrl;
> }
>
> - INIT_DELAYED_WORK(&ctrl->ctrl.connect_work,
> - nvme_rdma_reconnect_ctrl_work);
> + INIT_DELAYED_WORK(&ctrl->ctrl.connect_work, nvmf_reconnect_ctrl_work);
> INIT_WORK(&ctrl->ctrl.err_work, nvmf_error_recovery_work);
> INIT_WORK(&ctrl->ctrl.reset_work, nvme_rdma_reset_ctrl_work);
>
> diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
> index 14bd16b8d99f..c0e5bb3949b3 100644
> --- a/drivers/nvme/host/tcp.c
> +++ b/drivers/nvme/host/tcp.c
> @@ -2042,28 +2042,9 @@ static int nvme_tcp_setup_ctrl(struct nvme_ctrl *ctrl, bool new)
> return ret;
> }
>
> -static void nvme_tcp_reconnect_ctrl_work(struct work_struct *work)
> +static int _nvme_tcp_setup_ctrl(struct nvme_ctrl *ctrl)
> {
> - struct nvme_tcp_ctrl *tcp_ctrl = container_of(to_delayed_work(work),
> - struct nvme_tcp_ctrl, ctrl.connect_work);
> - struct nvme_ctrl *ctrl = &tcp_ctrl->ctrl;
> -
> - ++ctrl->nr_reconnects;
> -
> - if (nvme_tcp_setup_ctrl(ctrl, false))
> - goto requeue;
> -
> - dev_info(ctrl->device, "Successfully reconnected (%d attempt)\n",
> - ctrl->nr_reconnects);
> -
> - ctrl->nr_reconnects = 0;
> -
> - return;
> -
> -requeue:
> - dev_info(ctrl->device, "Failed reconnect attempt %d\n",
> - ctrl->nr_reconnects);
> - nvmf_reconnect_or_remove(ctrl);
> + return nvme_tcp_setup_ctrl(ctrl, false);
> }
> Same argument here; I'd rather modify nvme_tcp_setup_ctrl() to drop the
'new' argument and allowing it to be used as the callback directly.
Cheers,
Hannes
--
Dr. Hannes Reinecke Kernel Storage Architect
hare@suse.de +49 911 74053 688
SUSE Software Solutions GmbH, Maxfeldstr. 5, 90409 Nürnberg
HRB 36809 (AG Nürnberg), Geschäftsführer: Felix Imendörffer
next prev parent reply other threads:[~2021-10-19 13:41 UTC|newest]
Thread overview: 34+ messages / expand[flat|nested] mbox.gz Atom feed top
2021-10-18 13:40 [PATCH v1 0/7] Centrelize common fabrics code to core drivers Max Gurtovoy
2021-10-18 13:40 ` [PATCH 1/7] nvme: add connect_work attribute to nvme ctrl Max Gurtovoy
2021-10-19 12:32 ` Sagi Grimberg
2021-10-19 13:20 ` Hannes Reinecke
2021-10-20 13:34 ` Himanshu Madhani
2021-10-18 13:40 ` [PATCH 2/7] nvme-fabrics: introduce nvmf_reconnect_or_remove API Max Gurtovoy
2021-10-19 6:26 ` Chaitanya Kulkarni
2021-10-19 12:36 ` Sagi Grimberg
2021-10-19 12:58 ` Max Gurtovoy
2021-10-19 13:21 ` Hannes Reinecke
2021-10-20 13:34 ` Himanshu Madhani
2021-10-18 13:40 ` [PATCH 3/7] nvme: add err_work attribute to nvme ctrl Max Gurtovoy
2021-10-19 12:36 ` Sagi Grimberg
2021-10-20 13:34 ` Himanshu Madhani
2021-10-18 13:40 ` [PATCH 4/7] nvme-fabrics: introduce nvmf_error_recovery API Max Gurtovoy
2021-10-19 13:27 ` Hannes Reinecke
2021-10-20 13:34 ` Himanshu Madhani
2021-10-18 13:40 ` [PATCH 5/7] nvme/nvme-fabrics: introduce nvmf_error_recovery_work API Max Gurtovoy
2021-10-19 6:29 ` Chaitanya Kulkarni
2021-10-19 12:43 ` Sagi Grimberg
2021-10-19 13:17 ` Max Gurtovoy
2021-10-19 13:34 ` Hannes Reinecke
2021-10-18 13:40 ` [PATCH 6/7] nvme/nvme-fabrics: introduce nvmf_reconnect_ctrl_work API Max Gurtovoy
2021-10-19 6:29 ` Chaitanya Kulkarni
2021-10-19 12:44 ` Sagi Grimberg
2021-10-19 13:18 ` Max Gurtovoy
2021-10-19 13:41 ` Hannes Reinecke [this message]
2021-10-18 13:40 ` [PATCH 7/7] nvme-fabrics: add nvmf_init_ctrl/nvmf_teardown_ctrl API Max Gurtovoy
2021-10-19 12:46 ` Sagi Grimberg
2021-10-19 13:20 ` Max Gurtovoy
2021-10-18 14:08 ` [PATCH v1 0/7] Centrelize common fabrics code to core drivers James Smart
2021-10-19 5:36 ` Christoph Hellwig
2021-10-19 6:24 ` Chaitanya Kulkarni
2021-10-19 12:32 ` Sagi Grimberg
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=bd5b02ac-befb-b75d-0e03-371cd2664bef@suse.de \
--to=hare@suse.de \
--cc=chaitanyak@nvidia.com \
--cc=hch@lst.de \
--cc=israelr@nvidia.com \
--cc=jsmart2021@gmail.com \
--cc=kbusch@kernel.org \
--cc=linux-nvme@lists.infradead.org \
--cc=mgurtovoy@nvidia.com \
--cc=oren@nvidia.com \
--cc=sagi@grimberg.me \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox