From: Chaitanya Kulkarni <kch@nvidia.com>
To: <linux-nvme@lists.infradead.org>
Cc: <kbusch@kernel.org>, <hch@lst.de>, <sagi@grimberg.me>,
<james.smart@broadcom.com>, Chaitanya Kulkarni <kch@nvidia.com>
Subject: [PATCH 4/6] nvme-core: remove flags parameter
Date: Mon, 6 Jun 2022 18:16:45 -0700 [thread overview]
Message-ID: <20220607011647.24105-5-kch@nvidia.com> (raw)
In-Reply-To: <20220607011647.24105-1-kch@nvidia.com>
The function __nvme_submit_sync_cmd() has following list of callers
that sets the blk_mq_req_flags_t flags value :-
Callers | blk_mq_req_flags_t
----------------------------------------------------------------------
nvme_submit_sync_cmd() | 0
nvme_feature() | 0
nvme_sec_submit() | 0
nvmf_reg_read32() | 0
nvmf_reg_read64() | 0
nvmf_reg_write32() | 0
nvmf_connect_admin_queue() | BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT
nvmf_connect_io_queue() | BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT
Remove the flag function parameter from __nvme_submit_sync_cmd() and
and derive from nvme_command if the caller is nvmf_connect_admin_queue()
or nvmf_connect_io_queue() and adjust the rest of code accordingly.
Signed-off-by: Chaitanya Kulkarni <kch@nvidia.com>
---
drivers/nvme/host/core.c | 19 +++++++++++++------
drivers/nvme/host/fabrics.c | 15 +++++----------
drivers/nvme/host/nvme.h | 2 +-
3 files changed, 19 insertions(+), 17 deletions(-)
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 95800aa9c83f..b8daf3ab9a22 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -984,6 +984,11 @@ static int nvme_execute_rq(struct request *rq, bool at_head)
return blk_status_to_errno(status);
}
+static inline bool is_fabrics_admin_connect_cmd(struct nvme_command *cmd)
+{
+ return cmd->connect.opcode == nvme_fabrics_command &&
+ cmd->connect.fctype == nvme_fabrics_type_connect;
+}
static inline bool is_fabrics_io_connect_cmd(struct nvme_command *cmd)
{
return cmd->connect.opcode == nvme_fabrics_command &&
@@ -997,12 +1002,16 @@ static inline bool is_fabrics_io_connect_cmd(struct nvme_command *cmd)
*/
int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
union nvme_result *result, void *buffer, unsigned bufflen,
- int at_head, blk_mq_req_flags_t flags)
+ int at_head)
{
+ blk_mq_req_flags_t flags = 0;
int qid = NVME_QID_ANY;
struct request *req;
int ret;
+ if (is_fabrics_io_connect_cmd(cmd) || is_fabrics_io_connect_cmd(cmd))
+ flags = BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT;
+
if (is_fabrics_io_connect_cmd(cmd)) {
/* nvmf io connect command has qid in nvme_command set */
qid = le16_to_cpu(cmd->connect.qid);
@@ -1037,8 +1046,7 @@ EXPORT_SYMBOL_GPL(__nvme_submit_sync_cmd);
int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
void *buffer, unsigned bufflen)
{
- return __nvme_submit_sync_cmd(q, cmd, NULL, buffer, bufflen,
- 0, 0);
+ return __nvme_submit_sync_cmd(q, cmd, NULL, buffer, bufflen, 0);
}
EXPORT_SYMBOL_GPL(nvme_submit_sync_cmd);
@@ -1473,8 +1481,7 @@ static int nvme_features(struct nvme_ctrl *dev, u8 op, unsigned int fid,
c.features.fid = cpu_to_le32(fid);
c.features.dword11 = cpu_to_le32(dword11);
- ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &res,
- buffer, buflen, 0, 0);
+ ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &res, buffer, buflen, 0);
if (ret >= 0 && result)
*result = le32_to_cpu(res.u32);
return ret;
@@ -2112,7 +2119,7 @@ int nvme_sec_submit(void *data, u16 spsp, u8 secp, void *buffer, size_t len,
cmd.common.cdw11 = cpu_to_le32(len);
return __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, NULL, buffer, len,
- 1, 0);
+ 1);
}
EXPORT_SYMBOL_GPL(nvme_sec_submit);
#endif /* CONFIG_BLK_SED_OPAL */
diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c
index 7ad8c4438318..3ca1a10cfb1c 100644
--- a/drivers/nvme/host/fabrics.c
+++ b/drivers/nvme/host/fabrics.c
@@ -152,8 +152,7 @@ int nvmf_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val)
cmd.prop_get.fctype = nvme_fabrics_type_property_get;
cmd.prop_get.offset = cpu_to_le32(off);
- ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, &res, NULL, 0,
- 0, 0);
+ ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, &res, NULL, 0, 0);
if (ret >= 0)
*val = le64_to_cpu(res.u64);
@@ -198,8 +197,7 @@ int nvmf_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val)
cmd.prop_get.attrib = 1;
cmd.prop_get.offset = cpu_to_le32(off);
- ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, &res, NULL, 0,
- 0, 0);
+ ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, &res, NULL, 0, 0);
if (ret >= 0)
*val = le64_to_cpu(res.u64);
@@ -243,8 +241,7 @@ int nvmf_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val)
cmd.prop_set.offset = cpu_to_le32(off);
cmd.prop_set.value = cpu_to_le64(val);
- ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, NULL, NULL, 0,
- 0, 0);
+ ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, NULL, NULL, 0, 0);
if (unlikely(ret))
dev_err(ctrl->device,
"Property Set error: %d, offset %#x\n",
@@ -389,8 +386,7 @@ int nvmf_connect_admin_queue(struct nvme_ctrl *ctrl)
strncpy(data->hostnqn, ctrl->opts->host->nqn, NVMF_NQN_SIZE);
ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, &res,
- data, sizeof(*data), 1,
- BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT);
+ data, sizeof(*data), 1);
if (ret) {
nvmf_log_connect_error(ctrl, ret, le32_to_cpu(res.u32),
&cmd, data);
@@ -450,8 +446,7 @@ int nvmf_connect_io_queue(struct nvme_ctrl *ctrl, u16 qid)
strncpy(data->hostnqn, ctrl->opts->host->nqn, NVMF_NQN_SIZE);
ret = __nvme_submit_sync_cmd(ctrl->connect_q, &cmd, &res,
- data, sizeof(*data), 1,
- BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT);
+ data, sizeof(*data), 1);
if (ret) {
nvmf_log_connect_error(ctrl, ret, le32_to_cpu(res.u32),
&cmd, data);
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index 3beb3ebb220e..3e1c5fbf5603 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -752,7 +752,7 @@ int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
void *buf, unsigned bufflen);
int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
union nvme_result *result, void *buffer, unsigned bufflen,
- int at_head, blk_mq_req_flags_t flags);
+ int at_head);
int nvme_set_features(struct nvme_ctrl *dev, unsigned int fid,
unsigned int dword11, void *buffer, size_t buflen,
u32 *result);
--
2.29.0
next prev parent reply other threads:[~2022-06-07 1:17 UTC|newest]
Thread overview: 12+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-06-07 1:16 [PATCH 0/6] nvme: __nvme_submit_sync_command() cleanup Chaitanya Kulkarni
2022-06-07 1:16 ` [PATCH 1/6] nvme-core: remove unused timeout parameter Chaitanya Kulkarni
2022-06-07 1:16 ` [PATCH 2/6] nvme-core: fix qid param blk_mq_alloc_request_hctx Chaitanya Kulkarni
2022-06-07 1:16 ` [PATCH 3/6] nvme-core: remove qid parameter Chaitanya Kulkarni
2022-06-07 4:39 ` Christoph Hellwig
2022-06-07 5:57 ` Chaitanya Kulkarni
2022-06-07 5:59 ` Christoph Hellwig
2022-06-07 6:04 ` Chaitanya Kulkarni
2022-06-07 1:16 ` Chaitanya Kulkarni [this message]
2022-06-07 1:16 ` [PATCH 5/6] nvme-core: remove at_head parameter Chaitanya Kulkarni
2022-06-07 1:16 ` [PATCH 6/6] nvme-core: remove __nvme_submit_sync_cmd() wrapper Chaitanya Kulkarni
2022-06-13 18:15 ` [PATCH 0/6] nvme: __nvme_submit_sync_command() cleanup Christoph Hellwig
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20220607011647.24105-5-kch@nvidia.com \
--to=kch@nvidia.com \
--cc=hch@lst.de \
--cc=james.smart@broadcom.com \
--cc=kbusch@kernel.org \
--cc=linux-nvme@lists.infradead.org \
--cc=sagi@grimberg.me \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox