From: Max Gurtovoy <mgurtovoy@nvidia.com>
To: <hch@lst.de>, <sagi@grimberg.me>, <kbusch@kernel.org>,
<linux-nvme@lists.infradead.org>
Cc: <hare@suse.de>, <axboe@kernel.dk>, <oren@nvidia.com>,
<ngottlieb@nvidia.com>, <israelr@nvidia.com>,
Max Gurtovoy <mgurtovoy@nvidia.com>
Subject: [PATCH 1/3] nvme-fabrics: unify common code in admin and io queue connect
Date: Wed, 10 May 2023 21:02:29 +0300 [thread overview]
Message-ID: <20230510180231.18865-2-mgurtovoy@nvidia.com> (raw)
In-Reply-To: <20230510180231.18865-1-mgurtovoy@nvidia.com>
To simplify code maintenance, it is recommended to avoid duplicating
code.
Tested-by: Noam Gottlieb <ngottlieb@nvidia.com>
Reviewed-by: Israel Rukshin <israelr@nvidia.com>
Signed-off-by: Max Gurtovoy <mgurtovoy@nvidia.com>
---
drivers/nvme/host/fabrics.c | 75 ++++++++++++++++++++++---------------
1 file changed, 44 insertions(+), 31 deletions(-)
diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c
index 0069ebff85df..d11db06d293b 100644
--- a/drivers/nvme/host/fabrics.c
+++ b/drivers/nvme/host/fabrics.c
@@ -349,6 +349,46 @@ static void nvmf_log_connect_error(struct nvme_ctrl *ctrl,
}
}
+static struct nvmf_connect_data *nvmf_connect_data_prepare(struct nvme_ctrl *ctrl,
+ u16 cntlid)
+{
+ struct nvmf_connect_data *data;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return NULL;
+
+ uuid_copy(&data->hostid, &ctrl->opts->host->id);
+ data->cntlid = cpu_to_le16(cntlid);
+ strncpy(data->subsysnqn, ctrl->opts->subsysnqn, NVMF_NQN_SIZE);
+ strncpy(data->hostnqn, ctrl->opts->host->nqn, NVMF_NQN_SIZE);
+
+ return data;
+}
+
+static void nvmf_connect_cmd_prepare(struct nvme_ctrl *ctrl, u16 qid,
+ struct nvme_command *cmd)
+{
+ cmd->connect.opcode = nvme_fabrics_command;
+ cmd->connect.fctype = nvme_fabrics_type_connect;
+ cmd->connect.qid = cpu_to_le16(qid);
+
+ if (qid) {
+ cmd->connect.sqsize = cpu_to_le16(ctrl->sqsize);
+ } else {
+ cmd->connect.sqsize = cpu_to_le16(NVME_AQ_DEPTH - 1);
+
+ /*
+ * For admin queue, set keep-alive timeout in seconds
+ * granularity (ms * 1000)
+ */
+ cmd->connect.kato = cpu_to_le32(ctrl->kato * 1000);
+ }
+
+ if (ctrl->opts->disable_sqflow)
+ cmd->connect.cattr |= NVME_CONNECT_DISABLE_SQFLOW;
+}
+
/**
* nvmf_connect_admin_queue() - NVMe Fabrics Admin Queue "Connect"
* API function.
@@ -377,28 +417,12 @@ int nvmf_connect_admin_queue(struct nvme_ctrl *ctrl)
int ret;
u32 result;
- cmd.connect.opcode = nvme_fabrics_command;
- cmd.connect.fctype = nvme_fabrics_type_connect;
- cmd.connect.qid = 0;
- cmd.connect.sqsize = cpu_to_le16(NVME_AQ_DEPTH - 1);
-
- /*
- * Set keep-alive timeout in seconds granularity (ms * 1000)
- */
- cmd.connect.kato = cpu_to_le32(ctrl->kato * 1000);
-
- if (ctrl->opts->disable_sqflow)
- cmd.connect.cattr |= NVME_CONNECT_DISABLE_SQFLOW;
+ nvmf_connect_cmd_prepare(ctrl, 0, &cmd);
- data = kzalloc(sizeof(*data), GFP_KERNEL);
+ data = nvmf_connect_data_prepare(ctrl, 0xffff);
if (!data)
return -ENOMEM;
- uuid_copy(&data->hostid, &ctrl->opts->host->id);
- data->cntlid = cpu_to_le16(0xffff);
- strncpy(data->subsysnqn, ctrl->opts->subsysnqn, NVMF_NQN_SIZE);
- strncpy(data->hostnqn, ctrl->opts->host->nqn, NVMF_NQN_SIZE);
-
ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, &res,
data, sizeof(*data), NVME_QID_ANY, 1,
BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT);
@@ -468,23 +492,12 @@ int nvmf_connect_io_queue(struct nvme_ctrl *ctrl, u16 qid)
int ret;
u32 result;
- cmd.connect.opcode = nvme_fabrics_command;
- cmd.connect.fctype = nvme_fabrics_type_connect;
- cmd.connect.qid = cpu_to_le16(qid);
- cmd.connect.sqsize = cpu_to_le16(ctrl->sqsize);
+ nvmf_connect_cmd_prepare(ctrl, qid, &cmd);
- if (ctrl->opts->disable_sqflow)
- cmd.connect.cattr |= NVME_CONNECT_DISABLE_SQFLOW;
-
- data = kzalloc(sizeof(*data), GFP_KERNEL);
+ data = nvmf_connect_data_prepare(ctrl, ctrl->cntlid);
if (!data)
return -ENOMEM;
- uuid_copy(&data->hostid, &ctrl->opts->host->id);
- data->cntlid = cpu_to_le16(ctrl->cntlid);
- strncpy(data->subsysnqn, ctrl->opts->subsysnqn, NVMF_NQN_SIZE);
- strncpy(data->hostnqn, ctrl->opts->host->nqn, NVMF_NQN_SIZE);
-
ret = __nvme_submit_sync_cmd(ctrl->connect_q, &cmd, &res,
data, sizeof(*data), qid, 1,
BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT);
--
2.18.1
next prev parent reply other threads:[~2023-05-10 18:03 UTC|newest]
Thread overview: 11+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-05-10 18:02 [PATCH v1 0/3] nvme-fabrics: fix un-expected behaviour related to hostnqn and hostid Max Gurtovoy
2023-05-10 18:02 ` Max Gurtovoy [this message]
2023-05-11 13:12 ` [PATCH 1/3] nvme-fabrics: unify common code in admin and io queue connect Christoph Hellwig
2023-05-10 18:02 ` [PATCH 2/3] nvme-fabrics: check hostid using uuid_equal Max Gurtovoy
2023-05-11 13:13 ` Christoph Hellwig
2023-05-11 13:27 ` Max Gurtovoy
2023-05-10 18:02 ` [PATCH 3/3] nvme-fabrics: prevent overriding of existing host Max Gurtovoy
2023-05-11 13:16 ` Christoph Hellwig
2023-05-11 13:26 ` Max Gurtovoy
2023-05-11 13:49 ` Christoph Hellwig
2023-05-11 13:59 ` Max Gurtovoy
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20230510180231.18865-2-mgurtovoy@nvidia.com \
--to=mgurtovoy@nvidia.com \
--cc=axboe@kernel.dk \
--cc=hare@suse.de \
--cc=hch@lst.de \
--cc=israelr@nvidia.com \
--cc=kbusch@kernel.org \
--cc=linux-nvme@lists.infradead.org \
--cc=ngottlieb@nvidia.com \
--cc=oren@nvidia.com \
--cc=sagi@grimberg.me \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox