* [PATCH 1/6] nvme-core: remove unused timeout parameter
2022-06-07 1:16 [PATCH 0/6] nvme: __nvme_submit_sync_command() cleanup Chaitanya Kulkarni
@ 2022-06-07 1:16 ` Chaitanya Kulkarni
2022-06-07 1:16 ` [PATCH 2/6] nvme-core: fix qid param blk_mq_alloc_request_hctx Chaitanya Kulkarni
` (5 subsequent siblings)
6 siblings, 0 replies; 12+ messages in thread
From: Chaitanya Kulkarni @ 2022-06-07 1:16 UTC (permalink / raw)
To: linux-nvme; +Cc: kbusch, hch, sagi, james.smart, Chaitanya Kulkarni
The function __nvme_submit_sync_cmd() has following list of callers
that sets the timeout value to 0 :-
Callers | Timeout value
------------------------------------------------
nvme_submit_sync_cmd() | 0
nvme_features() | 0
nvme_sec_submit() | 0
nvmf_reg_read32() | 0
nvmf_reg_read64() | 0
nvmf_reg_write32() | 0
nvmf_connect_admin_queue() | 0
nvmf_connect_io_queue() | 0
Remove the timeout function parameter from __nvme_submit_sync_cmd() and
adjust the rest of code accordingly.
Signed-off-by: Chaitanya Kulkarni <kch@nvidia.com>
---
drivers/nvme/host/core.c | 12 ++++--------
drivers/nvme/host/fabrics.c | 10 +++++-----
drivers/nvme/host/nvme.h | 2 +-
3 files changed, 10 insertions(+), 14 deletions(-)
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 98e343cb4a12..f46b0d37f45d 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -990,8 +990,7 @@ static int nvme_execute_rq(struct request *rq, bool at_head)
*/
int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
union nvme_result *result, void *buffer, unsigned bufflen,
- unsigned timeout, int qid, int at_head,
- blk_mq_req_flags_t flags)
+ int qid, int at_head, blk_mq_req_flags_t flags)
{
struct request *req;
int ret;
@@ -1006,9 +1005,6 @@ int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
return PTR_ERR(req);
nvme_init_request(req, cmd);
- if (timeout)
- req->timeout = timeout;
-
if (buffer && bufflen) {
ret = blk_rq_map_kern(q, req, buffer, bufflen, GFP_KERNEL);
if (ret)
@@ -1028,7 +1024,7 @@ EXPORT_SYMBOL_GPL(__nvme_submit_sync_cmd);
int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
void *buffer, unsigned bufflen)
{
- return __nvme_submit_sync_cmd(q, cmd, NULL, buffer, bufflen, 0,
+ return __nvme_submit_sync_cmd(q, cmd, NULL, buffer, bufflen,
NVME_QID_ANY, 0, 0);
}
EXPORT_SYMBOL_GPL(nvme_submit_sync_cmd);
@@ -1465,7 +1461,7 @@ static int nvme_features(struct nvme_ctrl *dev, u8 op, unsigned int fid,
c.features.dword11 = cpu_to_le32(dword11);
ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &res,
- buffer, buflen, 0, NVME_QID_ANY, 0, 0);
+ buffer, buflen, NVME_QID_ANY, 0, 0);
if (ret >= 0 && result)
*result = le32_to_cpu(res.u32);
return ret;
@@ -2102,7 +2098,7 @@ int nvme_sec_submit(void *data, u16 spsp, u8 secp, void *buffer, size_t len,
cmd.common.cdw10 = cpu_to_le32(((u32)secp) << 24 | ((u32)spsp) << 8);
cmd.common.cdw11 = cpu_to_le32(len);
- return __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, NULL, buffer, len, 0,
+ return __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, NULL, buffer, len,
NVME_QID_ANY, 1, 0);
}
EXPORT_SYMBOL_GPL(nvme_sec_submit);
diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c
index ee79a6d639b4..0a0512300f1b 100644
--- a/drivers/nvme/host/fabrics.c
+++ b/drivers/nvme/host/fabrics.c
@@ -152,7 +152,7 @@ int nvmf_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val)
cmd.prop_get.fctype = nvme_fabrics_type_property_get;
cmd.prop_get.offset = cpu_to_le32(off);
- ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, &res, NULL, 0, 0,
+ ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, &res, NULL, 0,
NVME_QID_ANY, 0, 0);
if (ret >= 0)
@@ -198,7 +198,7 @@ int nvmf_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val)
cmd.prop_get.attrib = 1;
cmd.prop_get.offset = cpu_to_le32(off);
- ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, &res, NULL, 0, 0,
+ ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, &res, NULL, 0,
NVME_QID_ANY, 0, 0);
if (ret >= 0)
@@ -243,7 +243,7 @@ int nvmf_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val)
cmd.prop_set.offset = cpu_to_le32(off);
cmd.prop_set.value = cpu_to_le64(val);
- ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, NULL, NULL, 0, 0,
+ ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, NULL, NULL, 0,
NVME_QID_ANY, 0, 0);
if (unlikely(ret))
dev_err(ctrl->device,
@@ -389,7 +389,7 @@ int nvmf_connect_admin_queue(struct nvme_ctrl *ctrl)
strncpy(data->hostnqn, ctrl->opts->host->nqn, NVMF_NQN_SIZE);
ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, &res,
- data, sizeof(*data), 0, NVME_QID_ANY, 1,
+ data, sizeof(*data), NVME_QID_ANY, 1,
BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT);
if (ret) {
nvmf_log_connect_error(ctrl, ret, le32_to_cpu(res.u32),
@@ -450,7 +450,7 @@ int nvmf_connect_io_queue(struct nvme_ctrl *ctrl, u16 qid)
strncpy(data->hostnqn, ctrl->opts->host->nqn, NVMF_NQN_SIZE);
ret = __nvme_submit_sync_cmd(ctrl->connect_q, &cmd, &res,
- data, sizeof(*data), 0, qid, 1,
+ data, sizeof(*data), qid, 1,
BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT);
if (ret) {
nvmf_log_connect_error(ctrl, ret, le32_to_cpu(res.u32),
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index 81c4f5379c0c..88bd147b8048 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -752,7 +752,7 @@ int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
void *buf, unsigned bufflen);
int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
union nvme_result *result, void *buffer, unsigned bufflen,
- unsigned timeout, int qid, int at_head,
+ int qid, int at_head,
blk_mq_req_flags_t flags);
int nvme_set_features(struct nvme_ctrl *dev, unsigned int fid,
unsigned int dword11, void *buffer, size_t buflen,
--
2.29.0
^ permalink raw reply related [flat|nested] 12+ messages in thread* [PATCH 2/6] nvme-core: fix qid param blk_mq_alloc_request_hctx
2022-06-07 1:16 [PATCH 0/6] nvme: __nvme_submit_sync_command() cleanup Chaitanya Kulkarni
2022-06-07 1:16 ` [PATCH 1/6] nvme-core: remove unused timeout parameter Chaitanya Kulkarni
@ 2022-06-07 1:16 ` Chaitanya Kulkarni
2022-06-07 1:16 ` [PATCH 3/6] nvme-core: remove qid parameter Chaitanya Kulkarni
` (4 subsequent siblings)
6 siblings, 0 replies; 12+ messages in thread
From: Chaitanya Kulkarni @ 2022-06-07 1:16 UTC (permalink / raw)
To: linux-nvme; +Cc: kbusch, hch, sagi, james.smart, Chaitanya Kulkarni
Only caller of the __nvme_submit_sync_cmd() with qid value not equal to
NVME_QID_ANY is nvmf_connect_io_queues(), where qid value is alway set
to > 0.
[1] __nvme_submit_sync_cmd() callers with qid parameter from :-
Caller | qid parameter
------------------------------------------------------
* nvme_fc_connect_io_queues() |
nvmf_connect_io_queue() | qid > 0
* nvme_rdma_start_io_queues() |
nvme_rdma_start_queue() |
nvmf_connect_io_queues() | qid > 0
* nvme_tcp_start_io_queues() |
nvme_tcp_start_queue() |
nvmf_connect_io_queues() | qid > 0
* nvme_loop_connect_io_queues() |
nvmf_connect_io_queues() | qid > 0
When qid value of the function parameter __nvme_submit_sync_cmd() is > 0
from above callers, we use blk_mq_alloc_request_hctx(), where we pass
last parameter as 0 if qid functional parameter value is set to 0 with
conditional operators, see 1002 :-
991 int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
992 union nvme_result *result, void *buffer, unsigned bufflen,
993 int qid, int at_head, blk_mq_req_flags_t flags)
994 {
995 struct request *req;
996 int ret;
997
998 if (qid == NVME_QID_ANY)
999 req = blk_mq_alloc_request(q, nvme_req_op(cmd), flags);
1000 else
1001 req = blk_mq_alloc_request_hctx(q, nvme_req_op(cmd), flags,
1002 qid ? qid - 1 : 0);
1003
But qid function parameter value of the __nvme_submit_sync_cmd() will
never be 0 from above caller list see [1], and all the other callers of
__nvme_submit_sync_cmd() use NVME_QID_ANY as qid value :-
1. nvme_submit_sync_cmd()
2. nvme_features()
3. nvme_sec_submit()
4. nvmf_reg_read32()
5. nvmf_reg_read64()
6. nvmf_ref_write32()
7. nvmf_connect_admin_queue()
Remove the conditional operator to pass the qid as 0 in the call to
blk_mq_alloc_requst_hctx().
Signed-off-by: Chaitanya Kulkarni <kch@nvidia.com>
---
drivers/nvme/host/core.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index f46b0d37f45d..36943e245acf 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -999,7 +999,7 @@ int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
req = blk_mq_alloc_request(q, nvme_req_op(cmd), flags);
else
req = blk_mq_alloc_request_hctx(q, nvme_req_op(cmd), flags,
- qid ? qid - 1 : 0);
+ qid - 1);
if (IS_ERR(req))
return PTR_ERR(req);
--
2.29.0
^ permalink raw reply related [flat|nested] 12+ messages in thread* [PATCH 3/6] nvme-core: remove qid parameter
2022-06-07 1:16 [PATCH 0/6] nvme: __nvme_submit_sync_command() cleanup Chaitanya Kulkarni
2022-06-07 1:16 ` [PATCH 1/6] nvme-core: remove unused timeout parameter Chaitanya Kulkarni
2022-06-07 1:16 ` [PATCH 2/6] nvme-core: fix qid param blk_mq_alloc_request_hctx Chaitanya Kulkarni
@ 2022-06-07 1:16 ` Chaitanya Kulkarni
2022-06-07 4:39 ` Christoph Hellwig
2022-06-07 1:16 ` [PATCH 4/6] nvme-core: remove flags parameter Chaitanya Kulkarni
` (3 subsequent siblings)
6 siblings, 1 reply; 12+ messages in thread
From: Chaitanya Kulkarni @ 2022-06-07 1:16 UTC (permalink / raw)
To: linux-nvme; +Cc: kbusch, hch, sagi, james.smart, Chaitanya Kulkarni
The only caller of __nvme_submit_sync_cmd() sets the qid value that
is different from NVME_QID_ANY is nvmf_connect_io_queue() :-
Callers | qid value
--------------------------------------------------
nvme_submit_sync_cmd() | NVME_QID_ANY
nvme_features() | NVME_QID_ANY
nvme_sec_submit() | NVME_QID_ANY
nvmf_reg_read32() | NVME_QID_ANY
nvmf_reg_read64() | NVME_QID_ANY
nvmf_reg_write32() | NVME_QID_ANY
nvmf_connect_admin_queue() | NVME_QID_ANY
nvmf_connect_io_queue() | qid > 0
We can easily derive the qid value from the nvme_command parameter of
the function __nvme_submit_sync_cmd() when its caller is
nvmf_connect_io_queue().
Remove the qid function parameter from __nvme_submit_sync_cmd() and
derive the value from nvme_command if the caller is
nvmf_connect_io_queue() and adjust the rest of code accordingly.
Signed-off-by: Chaitanya Kulkarni <kch@nvidia.com>
---
drivers/nvme/host/core.c | 21 +++++++++++++++++----
drivers/nvme/host/fabrics.c | 10 +++++-----
drivers/nvme/host/nvme.h | 3 +--
3 files changed, 23 insertions(+), 11 deletions(-)
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 36943e245acf..95800aa9c83f 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -984,17 +984,30 @@ static int nvme_execute_rq(struct request *rq, bool at_head)
return blk_status_to_errno(status);
}
+static inline bool is_fabrics_io_connect_cmd(struct nvme_command *cmd)
+{
+ return cmd->connect.opcode == nvme_fabrics_command &&
+ cmd->connect.fctype == nvme_fabrics_type_connect &&
+ le16_to_cpu(cmd->connect.qid) > 0;
+}
+
/*
* Returns 0 on success. If the result is negative, it's a Linux error code;
* if the result is positive, it's an NVM Express status code
*/
int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
union nvme_result *result, void *buffer, unsigned bufflen,
- int qid, int at_head, blk_mq_req_flags_t flags)
+ int at_head, blk_mq_req_flags_t flags)
{
+ int qid = NVME_QID_ANY;
struct request *req;
int ret;
+ if (is_fabrics_io_connect_cmd(cmd)) {
+ /* nvmf io connect command has qid in nvme_command set */
+ qid = le16_to_cpu(cmd->connect.qid);
+ }
+
if (qid == NVME_QID_ANY)
req = blk_mq_alloc_request(q, nvme_req_op(cmd), flags);
else
@@ -1025,7 +1038,7 @@ int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
void *buffer, unsigned bufflen)
{
return __nvme_submit_sync_cmd(q, cmd, NULL, buffer, bufflen,
- NVME_QID_ANY, 0, 0);
+ 0, 0);
}
EXPORT_SYMBOL_GPL(nvme_submit_sync_cmd);
@@ -1461,7 +1474,7 @@ static int nvme_features(struct nvme_ctrl *dev, u8 op, unsigned int fid,
c.features.dword11 = cpu_to_le32(dword11);
ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &res,
- buffer, buflen, NVME_QID_ANY, 0, 0);
+ buffer, buflen, 0, 0);
if (ret >= 0 && result)
*result = le32_to_cpu(res.u32);
return ret;
@@ -2099,7 +2112,7 @@ int nvme_sec_submit(void *data, u16 spsp, u8 secp, void *buffer, size_t len,
cmd.common.cdw11 = cpu_to_le32(len);
return __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, NULL, buffer, len,
- NVME_QID_ANY, 1, 0);
+ 1, 0);
}
EXPORT_SYMBOL_GPL(nvme_sec_submit);
#endif /* CONFIG_BLK_SED_OPAL */
diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c
index 0a0512300f1b..7ad8c4438318 100644
--- a/drivers/nvme/host/fabrics.c
+++ b/drivers/nvme/host/fabrics.c
@@ -153,7 +153,7 @@ int nvmf_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val)
cmd.prop_get.offset = cpu_to_le32(off);
ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, &res, NULL, 0,
- NVME_QID_ANY, 0, 0);
+ 0, 0);
if (ret >= 0)
*val = le64_to_cpu(res.u64);
@@ -199,7 +199,7 @@ int nvmf_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val)
cmd.prop_get.offset = cpu_to_le32(off);
ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, &res, NULL, 0,
- NVME_QID_ANY, 0, 0);
+ 0, 0);
if (ret >= 0)
*val = le64_to_cpu(res.u64);
@@ -244,7 +244,7 @@ int nvmf_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val)
cmd.prop_set.value = cpu_to_le64(val);
ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, NULL, NULL, 0,
- NVME_QID_ANY, 0, 0);
+ 0, 0);
if (unlikely(ret))
dev_err(ctrl->device,
"Property Set error: %d, offset %#x\n",
@@ -389,7 +389,7 @@ int nvmf_connect_admin_queue(struct nvme_ctrl *ctrl)
strncpy(data->hostnqn, ctrl->opts->host->nqn, NVMF_NQN_SIZE);
ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, &res,
- data, sizeof(*data), NVME_QID_ANY, 1,
+ data, sizeof(*data), 1,
BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT);
if (ret) {
nvmf_log_connect_error(ctrl, ret, le32_to_cpu(res.u32),
@@ -450,7 +450,7 @@ int nvmf_connect_io_queue(struct nvme_ctrl *ctrl, u16 qid)
strncpy(data->hostnqn, ctrl->opts->host->nqn, NVMF_NQN_SIZE);
ret = __nvme_submit_sync_cmd(ctrl->connect_q, &cmd, &res,
- data, sizeof(*data), qid, 1,
+ data, sizeof(*data), 1,
BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT);
if (ret) {
nvmf_log_connect_error(ctrl, ret, le32_to_cpu(res.u32),
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index 88bd147b8048..3beb3ebb220e 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -752,8 +752,7 @@ int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
void *buf, unsigned bufflen);
int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
union nvme_result *result, void *buffer, unsigned bufflen,
- int qid, int at_head,
- blk_mq_req_flags_t flags);
+ int at_head, blk_mq_req_flags_t flags);
int nvme_set_features(struct nvme_ctrl *dev, unsigned int fid,
unsigned int dword11, void *buffer, size_t buflen,
u32 *result);
--
2.29.0
^ permalink raw reply related [flat|nested] 12+ messages in thread* Re: [PATCH 3/6] nvme-core: remove qid parameter
2022-06-07 1:16 ` [PATCH 3/6] nvme-core: remove qid parameter Chaitanya Kulkarni
@ 2022-06-07 4:39 ` Christoph Hellwig
2022-06-07 5:57 ` Chaitanya Kulkarni
0 siblings, 1 reply; 12+ messages in thread
From: Christoph Hellwig @ 2022-06-07 4:39 UTC (permalink / raw)
To: Chaitanya Kulkarni; +Cc: linux-nvme, kbusch, hch, sagi, james.smart
On Mon, Jun 06, 2022 at 06:16:44PM -0700, Chaitanya Kulkarni wrote:
> We can easily derive the qid value from the nvme_command parameter of
> the function __nvme_submit_sync_cmd() when its caller is
> nvmf_connect_io_queue().
This is a pretty horrible layering violation. The low-level submit
helpers should no known about the contents of the payload.
^ permalink raw reply [flat|nested] 12+ messages in thread
* Re: [PATCH 3/6] nvme-core: remove qid parameter
2022-06-07 4:39 ` Christoph Hellwig
@ 2022-06-07 5:57 ` Chaitanya Kulkarni
2022-06-07 5:59 ` Christoph Hellwig
0 siblings, 1 reply; 12+ messages in thread
From: Chaitanya Kulkarni @ 2022-06-07 5:57 UTC (permalink / raw)
To: Christoph Hellwig
Cc: linux-nvme@lists.infradead.org, kbusch@kernel.org,
Chaitanya Kulkarni, sagi@grimberg.me, james.smart@broadcom.com
On 6/6/22 21:39, Christoph Hellwig wrote:
> On Mon, Jun 06, 2022 at 06:16:44PM -0700, Chaitanya Kulkarni wrote:
>> We can easily derive the qid value from the nvme_command parameter of
>> the function __nvme_submit_sync_cmd() when its caller is
>> nvmf_connect_io_queue().
>
> This is a pretty horrible layering violation. The low-level submit
> helpers should no known about the contents of the payload.
Okay, we can ignore this series then.
-ck
^ permalink raw reply [flat|nested] 12+ messages in thread
* Re: [PATCH 3/6] nvme-core: remove qid parameter
2022-06-07 5:57 ` Chaitanya Kulkarni
@ 2022-06-07 5:59 ` Christoph Hellwig
2022-06-07 6:04 ` Chaitanya Kulkarni
0 siblings, 1 reply; 12+ messages in thread
From: Christoph Hellwig @ 2022-06-07 5:59 UTC (permalink / raw)
To: Chaitanya Kulkarni
Cc: Christoph Hellwig, linux-nvme@lists.infradead.org,
kbusch@kernel.org, sagi@grimberg.me, james.smart@broadcom.com
On Tue, Jun 07, 2022 at 05:57:02AM +0000, Chaitanya Kulkarni wrote:
> On 6/6/22 21:39, Christoph Hellwig wrote:
> > On Mon, Jun 06, 2022 at 06:16:44PM -0700, Chaitanya Kulkarni wrote:
> >> We can easily derive the qid value from the nvme_command parameter of
> >> the function __nvme_submit_sync_cmd() when its caller is
> >> nvmf_connect_io_queue().
> >
> > This is a pretty horrible layering violation. The low-level submit
> > helpers should no known about the contents of the payload.
>
> Okay, we can ignore this series then.
The first patches still look good to me, though.
^ permalink raw reply [flat|nested] 12+ messages in thread
* Re: [PATCH 3/6] nvme-core: remove qid parameter
2022-06-07 5:59 ` Christoph Hellwig
@ 2022-06-07 6:04 ` Chaitanya Kulkarni
0 siblings, 0 replies; 12+ messages in thread
From: Chaitanya Kulkarni @ 2022-06-07 6:04 UTC (permalink / raw)
To: Christoph Hellwig
Cc: linux-nvme@lists.infradead.org, kbusch@kernel.org,
sagi@grimberg.me, james.smart@broadcom.com
On 6/6/22 22:59, Christoph Hellwig wrote:
> On Tue, Jun 07, 2022 at 05:57:02AM +0000, Chaitanya Kulkarni wrote:
>> On 6/6/22 21:39, Christoph Hellwig wrote:
>>> On Mon, Jun 06, 2022 at 06:16:44PM -0700, Chaitanya Kulkarni wrote:
>>>> We can easily derive the qid value from the nvme_command parameter of
>>>> the function __nvme_submit_sync_cmd() when its caller is
>>>> nvmf_connect_io_queue().
>>>
>>> This is a pretty horrible layering violation. The low-level submit
>>> helpers should no known about the contents of the payload.
>>
>> Okay, we can ignore this series then.
>
> The first patches still look good to me, though.
Well second patch also fixes the conditional operator without looking
at the contents of payload, perhaps we should consider that also if
it looks good.
-ck
^ permalink raw reply [flat|nested] 12+ messages in thread
* [PATCH 4/6] nvme-core: remove flags parameter
2022-06-07 1:16 [PATCH 0/6] nvme: __nvme_submit_sync_command() cleanup Chaitanya Kulkarni
` (2 preceding siblings ...)
2022-06-07 1:16 ` [PATCH 3/6] nvme-core: remove qid parameter Chaitanya Kulkarni
@ 2022-06-07 1:16 ` Chaitanya Kulkarni
2022-06-07 1:16 ` [PATCH 5/6] nvme-core: remove at_head parameter Chaitanya Kulkarni
` (2 subsequent siblings)
6 siblings, 0 replies; 12+ messages in thread
From: Chaitanya Kulkarni @ 2022-06-07 1:16 UTC (permalink / raw)
To: linux-nvme; +Cc: kbusch, hch, sagi, james.smart, Chaitanya Kulkarni
The function __nvme_submit_sync_cmd() has following list of callers
that sets the blk_mq_req_flags_t flags value :-
Callers | blk_mq_req_flags_t
----------------------------------------------------------------------
nvme_submit_sync_cmd() | 0
nvme_feature() | 0
nvme_sec_submit() | 0
nvmf_reg_read32() | 0
nvmf_reg_read64() | 0
nvmf_reg_write32() | 0
nvmf_connect_admin_queue() | BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT
nvmf_connect_io_queue() | BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT
Remove the flag function parameter from __nvme_submit_sync_cmd() and
and derive from nvme_command if the caller is nvmf_connect_admin_queue()
or nvmf_connect_io_queue() and adjust the rest of code accordingly.
Signed-off-by: Chaitanya Kulkarni <kch@nvidia.com>
---
drivers/nvme/host/core.c | 19 +++++++++++++------
drivers/nvme/host/fabrics.c | 15 +++++----------
drivers/nvme/host/nvme.h | 2 +-
3 files changed, 19 insertions(+), 17 deletions(-)
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 95800aa9c83f..b8daf3ab9a22 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -984,6 +984,11 @@ static int nvme_execute_rq(struct request *rq, bool at_head)
return blk_status_to_errno(status);
}
+static inline bool is_fabrics_admin_connect_cmd(struct nvme_command *cmd)
+{
+ return cmd->connect.opcode == nvme_fabrics_command &&
+ cmd->connect.fctype == nvme_fabrics_type_connect;
+}
static inline bool is_fabrics_io_connect_cmd(struct nvme_command *cmd)
{
return cmd->connect.opcode == nvme_fabrics_command &&
@@ -997,12 +1002,16 @@ static inline bool is_fabrics_io_connect_cmd(struct nvme_command *cmd)
*/
int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
union nvme_result *result, void *buffer, unsigned bufflen,
- int at_head, blk_mq_req_flags_t flags)
+ int at_head)
{
+ blk_mq_req_flags_t flags = 0;
int qid = NVME_QID_ANY;
struct request *req;
int ret;
+ if (is_fabrics_io_connect_cmd(cmd) || is_fabrics_io_connect_cmd(cmd))
+ flags = BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT;
+
if (is_fabrics_io_connect_cmd(cmd)) {
/* nvmf io connect command has qid in nvme_command set */
qid = le16_to_cpu(cmd->connect.qid);
@@ -1037,8 +1046,7 @@ EXPORT_SYMBOL_GPL(__nvme_submit_sync_cmd);
int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
void *buffer, unsigned bufflen)
{
- return __nvme_submit_sync_cmd(q, cmd, NULL, buffer, bufflen,
- 0, 0);
+ return __nvme_submit_sync_cmd(q, cmd, NULL, buffer, bufflen, 0);
}
EXPORT_SYMBOL_GPL(nvme_submit_sync_cmd);
@@ -1473,8 +1481,7 @@ static int nvme_features(struct nvme_ctrl *dev, u8 op, unsigned int fid,
c.features.fid = cpu_to_le32(fid);
c.features.dword11 = cpu_to_le32(dword11);
- ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &res,
- buffer, buflen, 0, 0);
+ ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &res, buffer, buflen, 0);
if (ret >= 0 && result)
*result = le32_to_cpu(res.u32);
return ret;
@@ -2112,7 +2119,7 @@ int nvme_sec_submit(void *data, u16 spsp, u8 secp, void *buffer, size_t len,
cmd.common.cdw11 = cpu_to_le32(len);
return __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, NULL, buffer, len,
- 1, 0);
+ 1);
}
EXPORT_SYMBOL_GPL(nvme_sec_submit);
#endif /* CONFIG_BLK_SED_OPAL */
diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c
index 7ad8c4438318..3ca1a10cfb1c 100644
--- a/drivers/nvme/host/fabrics.c
+++ b/drivers/nvme/host/fabrics.c
@@ -152,8 +152,7 @@ int nvmf_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val)
cmd.prop_get.fctype = nvme_fabrics_type_property_get;
cmd.prop_get.offset = cpu_to_le32(off);
- ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, &res, NULL, 0,
- 0, 0);
+ ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, &res, NULL, 0, 0);
if (ret >= 0)
*val = le64_to_cpu(res.u64);
@@ -198,8 +197,7 @@ int nvmf_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val)
cmd.prop_get.attrib = 1;
cmd.prop_get.offset = cpu_to_le32(off);
- ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, &res, NULL, 0,
- 0, 0);
+ ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, &res, NULL, 0, 0);
if (ret >= 0)
*val = le64_to_cpu(res.u64);
@@ -243,8 +241,7 @@ int nvmf_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val)
cmd.prop_set.offset = cpu_to_le32(off);
cmd.prop_set.value = cpu_to_le64(val);
- ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, NULL, NULL, 0,
- 0, 0);
+ ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, NULL, NULL, 0, 0);
if (unlikely(ret))
dev_err(ctrl->device,
"Property Set error: %d, offset %#x\n",
@@ -389,8 +386,7 @@ int nvmf_connect_admin_queue(struct nvme_ctrl *ctrl)
strncpy(data->hostnqn, ctrl->opts->host->nqn, NVMF_NQN_SIZE);
ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, &res,
- data, sizeof(*data), 1,
- BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT);
+ data, sizeof(*data), 1);
if (ret) {
nvmf_log_connect_error(ctrl, ret, le32_to_cpu(res.u32),
&cmd, data);
@@ -450,8 +446,7 @@ int nvmf_connect_io_queue(struct nvme_ctrl *ctrl, u16 qid)
strncpy(data->hostnqn, ctrl->opts->host->nqn, NVMF_NQN_SIZE);
ret = __nvme_submit_sync_cmd(ctrl->connect_q, &cmd, &res,
- data, sizeof(*data), 1,
- BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT);
+ data, sizeof(*data), 1);
if (ret) {
nvmf_log_connect_error(ctrl, ret, le32_to_cpu(res.u32),
&cmd, data);
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index 3beb3ebb220e..3e1c5fbf5603 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -752,7 +752,7 @@ int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
void *buf, unsigned bufflen);
int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
union nvme_result *result, void *buffer, unsigned bufflen,
- int at_head, blk_mq_req_flags_t flags);
+ int at_head);
int nvme_set_features(struct nvme_ctrl *dev, unsigned int fid,
unsigned int dword11, void *buffer, size_t buflen,
u32 *result);
--
2.29.0
^ permalink raw reply related [flat|nested] 12+ messages in thread* [PATCH 5/6] nvme-core: remove at_head parameter
2022-06-07 1:16 [PATCH 0/6] nvme: __nvme_submit_sync_command() cleanup Chaitanya Kulkarni
` (3 preceding siblings ...)
2022-06-07 1:16 ` [PATCH 4/6] nvme-core: remove flags parameter Chaitanya Kulkarni
@ 2022-06-07 1:16 ` Chaitanya Kulkarni
2022-06-07 1:16 ` [PATCH 6/6] nvme-core: remove __nvme_submit_sync_cmd() wrapper Chaitanya Kulkarni
2022-06-13 18:15 ` [PATCH 0/6] nvme: __nvme_submit_sync_command() cleanup Christoph Hellwig
6 siblings, 0 replies; 12+ messages in thread
From: Chaitanya Kulkarni @ 2022-06-07 1:16 UTC (permalink / raw)
To: linux-nvme; +Cc: kbusch, hch, sagi, james.smart, Chaitanya Kulkarni
The function __nvme_submit_sync_cmd() has following list of callers
that sets the at_head value :-
Callers | at_head value
----------------------------------------------
nvme_submit_sync_cmd() | 0
nvme_features() | 0
nvme_sec_submit() | 1
nvmf_reg_read32() | 0
nvmf_reg_read64() | 0
nvmf_reg_write32() | 0
nvmf_connect_admin_queue() | 1
nvmf_connect_io_queue() | 1
Remove the at_head function parameter from __nvme_submit_sync_cmd() and
and derive from nvme_command if the caller is nvmf_connect_admin_queue()
or nvmf_connect_io_queue() or nvme_sec_submit() and adjust the rest
of code accordingly.
Signed-off-by: Chaitanya Kulkarni <kch@nvidia.com>
---
drivers/nvme/host/core.c | 20 +++++++++++++-------
drivers/nvme/host/fabrics.c | 10 +++++-----
drivers/nvme/host/nvme.h | 3 +--
3 files changed, 19 insertions(+), 14 deletions(-)
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index b8daf3ab9a22..41c0045ceb5e 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -996,13 +996,20 @@ static inline bool is_fabrics_io_connect_cmd(struct nvme_command *cmd)
le16_to_cpu(cmd->connect.qid) > 0;
}
+static inline bool is_at_head(struct nvme_command *cmd)
+{
+ return (is_fabrics_admin_connect_cmd(cmd) ||
+ is_fabrics_io_connect_cmd(cmd) ||
+ cmd->common.opcode == nvme_admin_security_send ||
+ cmd->common.opcode == nvme_admin_security_recv) ? true : false;
+}
+
/*
* Returns 0 on success. If the result is negative, it's a Linux error code;
* if the result is positive, it's an NVM Express status code
*/
int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
- union nvme_result *result, void *buffer, unsigned bufflen,
- int at_head)
+ union nvme_result *result, void *buffer, unsigned bufflen)
{
blk_mq_req_flags_t flags = 0;
int qid = NVME_QID_ANY;
@@ -1034,7 +1041,7 @@ int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
}
req->rq_flags |= RQF_QUIET;
- ret = nvme_execute_rq(req, at_head);
+ ret = nvme_execute_rq(req, is_at_head(cmd));
if (result && ret >= 0)
*result = nvme_req(req)->result;
out:
@@ -1046,7 +1053,7 @@ EXPORT_SYMBOL_GPL(__nvme_submit_sync_cmd);
int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
void *buffer, unsigned bufflen)
{
- return __nvme_submit_sync_cmd(q, cmd, NULL, buffer, bufflen, 0);
+ return __nvme_submit_sync_cmd(q, cmd, NULL, buffer, bufflen);
}
EXPORT_SYMBOL_GPL(nvme_submit_sync_cmd);
@@ -1481,7 +1488,7 @@ static int nvme_features(struct nvme_ctrl *dev, u8 op, unsigned int fid,
c.features.fid = cpu_to_le32(fid);
c.features.dword11 = cpu_to_le32(dword11);
- ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &res, buffer, buflen, 0);
+ ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &res, buffer, buflen);
if (ret >= 0 && result)
*result = le32_to_cpu(res.u32);
return ret;
@@ -2118,8 +2125,7 @@ int nvme_sec_submit(void *data, u16 spsp, u8 secp, void *buffer, size_t len,
cmd.common.cdw10 = cpu_to_le32(((u32)secp) << 24 | ((u32)spsp) << 8);
cmd.common.cdw11 = cpu_to_le32(len);
- return __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, NULL, buffer, len,
- 1);
+ return __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, NULL, buffer, len);
}
EXPORT_SYMBOL_GPL(nvme_sec_submit);
#endif /* CONFIG_BLK_SED_OPAL */
diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c
index 3ca1a10cfb1c..0d620e5285cf 100644
--- a/drivers/nvme/host/fabrics.c
+++ b/drivers/nvme/host/fabrics.c
@@ -152,7 +152,7 @@ int nvmf_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val)
cmd.prop_get.fctype = nvme_fabrics_type_property_get;
cmd.prop_get.offset = cpu_to_le32(off);
- ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, &res, NULL, 0, 0);
+ ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, &res, NULL, 0);
if (ret >= 0)
*val = le64_to_cpu(res.u64);
@@ -197,7 +197,7 @@ int nvmf_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val)
cmd.prop_get.attrib = 1;
cmd.prop_get.offset = cpu_to_le32(off);
- ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, &res, NULL, 0, 0);
+ ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, &res, NULL, 0);
if (ret >= 0)
*val = le64_to_cpu(res.u64);
@@ -241,7 +241,7 @@ int nvmf_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val)
cmd.prop_set.offset = cpu_to_le32(off);
cmd.prop_set.value = cpu_to_le64(val);
- ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, NULL, NULL, 0, 0);
+ ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, NULL, NULL, 0);
if (unlikely(ret))
dev_err(ctrl->device,
"Property Set error: %d, offset %#x\n",
@@ -386,7 +386,7 @@ int nvmf_connect_admin_queue(struct nvme_ctrl *ctrl)
strncpy(data->hostnqn, ctrl->opts->host->nqn, NVMF_NQN_SIZE);
ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, &res,
- data, sizeof(*data), 1);
+ data, sizeof(*data));
if (ret) {
nvmf_log_connect_error(ctrl, ret, le32_to_cpu(res.u32),
&cmd, data);
@@ -446,7 +446,7 @@ int nvmf_connect_io_queue(struct nvme_ctrl *ctrl, u16 qid)
strncpy(data->hostnqn, ctrl->opts->host->nqn, NVMF_NQN_SIZE);
ret = __nvme_submit_sync_cmd(ctrl->connect_q, &cmd, &res,
- data, sizeof(*data), 1);
+ data, sizeof(*data));
if (ret) {
nvmf_log_connect_error(ctrl, ret, le32_to_cpu(res.u32),
&cmd, data);
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index 3e1c5fbf5603..3dbbc4b1a1c2 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -751,8 +751,7 @@ static inline bool nvme_is_unique_nsid(struct nvme_ctrl *ctrl,
int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
void *buf, unsigned bufflen);
int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
- union nvme_result *result, void *buffer, unsigned bufflen,
- int at_head);
+ union nvme_result *result, void *buffer, unsigned bufflen);
int nvme_set_features(struct nvme_ctrl *dev, unsigned int fid,
unsigned int dword11, void *buffer, size_t buflen,
u32 *result);
--
2.29.0
^ permalink raw reply related [flat|nested] 12+ messages in thread* [PATCH 6/6] nvme-core: remove __nvme_submit_sync_cmd() wrapper
2022-06-07 1:16 [PATCH 0/6] nvme: __nvme_submit_sync_command() cleanup Chaitanya Kulkarni
` (4 preceding siblings ...)
2022-06-07 1:16 ` [PATCH 5/6] nvme-core: remove at_head parameter Chaitanya Kulkarni
@ 2022-06-07 1:16 ` Chaitanya Kulkarni
2022-06-13 18:15 ` [PATCH 0/6] nvme: __nvme_submit_sync_command() cleanup Christoph Hellwig
6 siblings, 0 replies; 12+ messages in thread
From: Chaitanya Kulkarni @ 2022-06-07 1:16 UTC (permalink / raw)
To: linux-nvme; +Cc: kbusch, hch, sagi, james.smart, Chaitanya Kulkarni
Now that __nvme_submit_sync_cmd() has small number of function
parameters remove nvme_submit_sync_cmd() wrapper, rename
__nvme_submit_sync_cmd() to nvme_submit_sync_cmd() and adjust the
callsites.
Signed-off-by: Chaitanya Kulkarni <kch@nvidia.com>
---
drivers/nvme/host/core.c | 35 +++++++++++++++--------------------
drivers/nvme/host/fabrics.c | 10 +++++-----
drivers/nvme/host/nvme.h | 2 --
drivers/nvme/host/pci.c | 10 +++++-----
drivers/nvme/host/zns.c | 7 ++++---
5 files changed, 29 insertions(+), 35 deletions(-)
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 41c0045ceb5e..b5a040296aa5 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -1008,7 +1008,7 @@ static inline bool is_at_head(struct nvme_command *cmd)
* Returns 0 on success. If the result is negative, it's a Linux error code;
* if the result is positive, it's an NVM Express status code
*/
-int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
+int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
union nvme_result *result, void *buffer, unsigned bufflen)
{
blk_mq_req_flags_t flags = 0;
@@ -1048,13 +1048,6 @@ int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
blk_mq_free_request(req);
return ret;
}
-EXPORT_SYMBOL_GPL(__nvme_submit_sync_cmd);
-
-int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
- void *buffer, unsigned bufflen)
-{
- return __nvme_submit_sync_cmd(q, cmd, NULL, buffer, bufflen);
-}
EXPORT_SYMBOL_GPL(nvme_submit_sync_cmd);
static u32 nvme_known_admin_effects(u8 opcode)
@@ -1293,7 +1286,7 @@ static int nvme_identify_ctrl(struct nvme_ctrl *dev, struct nvme_id_ctrl **id)
if (!*id)
return -ENOMEM;
- error = nvme_submit_sync_cmd(dev->admin_q, &c, *id,
+ error = nvme_submit_sync_cmd(dev->admin_q, &c, NULL, *id,
sizeof(struct nvme_id_ctrl));
if (error)
kfree(*id);
@@ -1373,7 +1366,7 @@ static int nvme_identify_ns_descs(struct nvme_ctrl *ctrl, unsigned nsid,
if (!data)
return -ENOMEM;
- status = nvme_submit_sync_cmd(ctrl->admin_q, &c, data,
+ status = nvme_submit_sync_cmd(ctrl->admin_q, &c, NULL, data,
NVME_IDENTIFY_DATA_SIZE);
if (status) {
dev_warn(ctrl->device,
@@ -1421,7 +1414,8 @@ static int nvme_identify_ns(struct nvme_ctrl *ctrl, unsigned nsid,
if (!*id)
return -ENOMEM;
- error = nvme_submit_sync_cmd(ctrl->admin_q, &c, *id, sizeof(**id));
+ error = nvme_submit_sync_cmd(ctrl->admin_q, &c, NULL, *id,
+ sizeof(**id));
if (error) {
dev_warn(ctrl->device, "Identify namespace failed (%d)\n", error);
goto out_free_id;
@@ -1465,7 +1459,7 @@ static int nvme_identify_ns_cs_indep(struct nvme_ctrl *ctrl, unsigned nsid,
if (!*id)
return -ENOMEM;
- ret = nvme_submit_sync_cmd(ctrl->admin_q, &c, *id, sizeof(**id));
+ ret = nvme_submit_sync_cmd(ctrl->admin_q, &c, NULL, *id, sizeof(**id));
if (ret) {
dev_warn(ctrl->device,
"Identify namespace (CS independent) failed (%d)\n",
@@ -1488,7 +1482,7 @@ static int nvme_features(struct nvme_ctrl *dev, u8 op, unsigned int fid,
c.features.fid = cpu_to_le32(fid);
c.features.dword11 = cpu_to_le32(dword11);
- ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &res, buffer, buflen);
+ ret = nvme_submit_sync_cmd(dev->admin_q, &c, &res, buffer, buflen);
if (ret >= 0 && result)
*result = le32_to_cpu(res.u32);
return ret;
@@ -1729,7 +1723,8 @@ static int nvme_init_ms(struct nvme_ns *ns, struct nvme_id_ns *id)
c.identify.cns = NVME_ID_CNS_CS_NS;
c.identify.csi = NVME_CSI_NVM;
- ret = nvme_submit_sync_cmd(ns->ctrl->admin_q, &c, nvm, sizeof(*nvm));
+ ret = nvme_submit_sync_cmd(ns->ctrl->admin_q, &c, NULL, nvm,
+ sizeof(*nvm));
if (ret)
goto free_data;
@@ -2022,7 +2017,7 @@ static int nvme_send_ns_head_pr_command(struct block_device *bdev,
if (ns) {
c->common.nsid = cpu_to_le32(ns->head->ns_id);
- ret = nvme_submit_sync_cmd(ns->queue, c, data, 16);
+ ret = nvme_submit_sync_cmd(ns->queue, c, NULL, data, 16);
}
srcu_read_unlock(&head->srcu, srcu_idx);
return ret;
@@ -2032,7 +2027,7 @@ static int nvme_send_ns_pr_command(struct nvme_ns *ns, struct nvme_command *c,
u8 data[16])
{
c->common.nsid = cpu_to_le32(ns->head->ns_id);
- return nvme_submit_sync_cmd(ns->queue, c, data, 16);
+ return nvme_submit_sync_cmd(ns->queue, c, NULL, data, 16);
}
static int nvme_pr_command(struct block_device *bdev, u32 cdw10,
@@ -2125,7 +2120,7 @@ int nvme_sec_submit(void *data, u16 spsp, u8 secp, void *buffer, size_t len,
cmd.common.cdw10 = cpu_to_le32(((u32)secp) << 24 | ((u32)spsp) << 8);
cmd.common.cdw11 = cpu_to_le32(len);
- return __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, NULL, buffer, len);
+ return nvme_submit_sync_cmd(ctrl->admin_q, &cmd, NULL, buffer, len);
}
EXPORT_SYMBOL_GPL(nvme_sec_submit);
#endif /* CONFIG_BLK_SED_OPAL */
@@ -2893,7 +2888,7 @@ int nvme_get_log(struct nvme_ctrl *ctrl, u32 nsid, u8 log_page, u8 lsp, u8 csi,
c.get_log_page.lpou = cpu_to_le32(upper_32_bits(offset));
c.get_log_page.csi = csi;
- return nvme_submit_sync_cmd(ctrl->admin_q, &c, log, size);
+ return nvme_submit_sync_cmd(ctrl->admin_q, &c, NULL, log, size);
}
static int nvme_get_effects_log(struct nvme_ctrl *ctrl, u8 csi,
@@ -2968,7 +2963,7 @@ static int nvme_init_non_mdts_limits(struct nvme_ctrl *ctrl)
c.identify.cns = NVME_ID_CNS_CS_CTRL;
c.identify.csi = NVME_CSI_NVM;
- ret = nvme_submit_sync_cmd(ctrl->admin_q, &c, id, sizeof(*id));
+ ret = nvme_submit_sync_cmd(ctrl->admin_q, &c, NULL, id, sizeof(*id));
if (ret)
goto free_data;
@@ -4257,7 +4252,7 @@ static int nvme_scan_ns_list(struct nvme_ctrl *ctrl)
.identify.nsid = cpu_to_le32(prev),
};
- ret = nvme_submit_sync_cmd(ctrl->admin_q, &cmd, ns_list,
+ ret = nvme_submit_sync_cmd(ctrl->admin_q, &cmd, NULL, ns_list,
NVME_IDENTIFY_DATA_SIZE);
if (ret) {
dev_warn(ctrl->device,
diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c
index 0d620e5285cf..8036038eafd3 100644
--- a/drivers/nvme/host/fabrics.c
+++ b/drivers/nvme/host/fabrics.c
@@ -152,7 +152,7 @@ int nvmf_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val)
cmd.prop_get.fctype = nvme_fabrics_type_property_get;
cmd.prop_get.offset = cpu_to_le32(off);
- ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, &res, NULL, 0);
+ ret = nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, &res, NULL, 0);
if (ret >= 0)
*val = le64_to_cpu(res.u64);
@@ -197,7 +197,7 @@ int nvmf_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val)
cmd.prop_get.attrib = 1;
cmd.prop_get.offset = cpu_to_le32(off);
- ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, &res, NULL, 0);
+ ret = nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, &res, NULL, 0);
if (ret >= 0)
*val = le64_to_cpu(res.u64);
@@ -241,7 +241,7 @@ int nvmf_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val)
cmd.prop_set.offset = cpu_to_le32(off);
cmd.prop_set.value = cpu_to_le64(val);
- ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, NULL, NULL, 0);
+ ret = nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, NULL, NULL, 0);
if (unlikely(ret))
dev_err(ctrl->device,
"Property Set error: %d, offset %#x\n",
@@ -385,7 +385,7 @@ int nvmf_connect_admin_queue(struct nvme_ctrl *ctrl)
strncpy(data->subsysnqn, ctrl->opts->subsysnqn, NVMF_NQN_SIZE);
strncpy(data->hostnqn, ctrl->opts->host->nqn, NVMF_NQN_SIZE);
- ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, &res,
+ ret = nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, &res,
data, sizeof(*data));
if (ret) {
nvmf_log_connect_error(ctrl, ret, le32_to_cpu(res.u32),
@@ -445,7 +445,7 @@ int nvmf_connect_io_queue(struct nvme_ctrl *ctrl, u16 qid)
strncpy(data->subsysnqn, ctrl->opts->subsysnqn, NVMF_NQN_SIZE);
strncpy(data->hostnqn, ctrl->opts->host->nqn, NVMF_NQN_SIZE);
- ret = __nvme_submit_sync_cmd(ctrl->connect_q, &cmd, &res,
+ ret = nvme_submit_sync_cmd(ctrl->connect_q, &cmd, &res,
data, sizeof(*data));
if (ret) {
nvmf_log_connect_error(ctrl, ret, le32_to_cpu(res.u32),
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index 3dbbc4b1a1c2..75e5420c1e6e 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -749,8 +749,6 @@ static inline bool nvme_is_unique_nsid(struct nvme_ctrl *ctrl,
}
int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
- void *buf, unsigned bufflen);
-int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
union nvme_result *result, void *buffer, unsigned bufflen);
int nvme_set_features(struct nvme_ctrl *dev, unsigned int fid,
unsigned int dword11, void *buffer, size_t buflen,
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index cb4adc0c2284..b3eeff7905c9 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -325,7 +325,7 @@ static void nvme_dbbuf_set(struct nvme_dev *dev)
c.dbbuf.prp1 = cpu_to_le64(dev->dbbuf_dbs_dma_addr);
c.dbbuf.prp2 = cpu_to_le64(dev->dbbuf_eis_dma_addr);
- if (nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0)) {
+ if (nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, NULL, 0)) {
dev_warn(dev->ctrl.device, "unable to set dbbuf\n");
/* Free memory and continue on */
nvme_dbbuf_dma_free(dev);
@@ -1217,7 +1217,7 @@ static int adapter_delete_queue(struct nvme_dev *dev, u8 opcode, u16 id)
c.delete_queue.opcode = opcode;
c.delete_queue.qid = cpu_to_le16(id);
- return nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0);
+ return nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, NULL, 0);
}
static int adapter_alloc_cq(struct nvme_dev *dev, u16 qid,
@@ -1240,7 +1240,7 @@ static int adapter_alloc_cq(struct nvme_dev *dev, u16 qid,
c.create_cq.cq_flags = cpu_to_le16(flags);
c.create_cq.irq_vector = cpu_to_le16(vector);
- return nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0);
+ return nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, NULL, 0);
}
static int adapter_alloc_sq(struct nvme_dev *dev, u16 qid,
@@ -1269,7 +1269,7 @@ static int adapter_alloc_sq(struct nvme_dev *dev, u16 qid,
c.create_sq.sq_flags = cpu_to_le16(flags);
c.create_sq.cqid = cpu_to_le16(qid);
- return nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0);
+ return nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, NULL, 0);
}
static int adapter_delete_cq(struct nvme_dev *dev, u16 cqid)
@@ -1989,7 +1989,7 @@ static int nvme_set_host_mem(struct nvme_dev *dev, u32 bits)
c.features.dword14 = cpu_to_le32(upper_32_bits(dma_addr));
c.features.dword15 = cpu_to_le32(dev->nr_host_mem_descs);
- ret = nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0);
+ ret = nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, NULL, 0);
if (ret) {
dev_warn(dev->ctrl.device,
"failed to set host mem (err %d, flags %#x).\n",
diff --git a/drivers/nvme/host/zns.c b/drivers/nvme/host/zns.c
index 9f81beb4df4e..6a3b6eee6d14 100644
--- a/drivers/nvme/host/zns.c
+++ b/drivers/nvme/host/zns.c
@@ -32,7 +32,7 @@ static int nvme_set_max_append(struct nvme_ctrl *ctrl)
c.identify.cns = NVME_ID_CNS_CS_CTRL;
c.identify.csi = NVME_CSI_ZNS;
- status = nvme_submit_sync_cmd(ctrl->admin_q, &c, id, sizeof(*id));
+ status = nvme_submit_sync_cmd(ctrl->admin_q, &c, NULL, id, sizeof(*id));
if (status) {
kfree(id);
return status;
@@ -84,7 +84,8 @@ int nvme_update_zone_info(struct nvme_ns *ns, unsigned lbaf)
c.identify.cns = NVME_ID_CNS_CS_NS;
c.identify.csi = NVME_CSI_ZNS;
- status = nvme_submit_sync_cmd(ns->ctrl->admin_q, &c, id, sizeof(*id));
+ status = nvme_submit_sync_cmd(ns->ctrl->admin_q, &c, NULL, id,
+ sizeof(*id));
if (status)
goto free_data;
@@ -202,7 +203,7 @@ int nvme_ns_report_zones(struct nvme_ns *ns, sector_t sector,
memset(report, 0, buflen);
c.zmr.slba = cpu_to_le64(nvme_sect_to_lba(ns, sector));
- ret = nvme_submit_sync_cmd(ns->queue, &c, report, buflen);
+ ret = nvme_submit_sync_cmd(ns->queue, &c, NULL, report, buflen);
if (ret) {
if (ret > 0)
ret = -EIO;
--
2.29.0
^ permalink raw reply related [flat|nested] 12+ messages in thread* Re: [PATCH 0/6] nvme: __nvme_submit_sync_command() cleanup
2022-06-07 1:16 [PATCH 0/6] nvme: __nvme_submit_sync_command() cleanup Chaitanya Kulkarni
` (5 preceding siblings ...)
2022-06-07 1:16 ` [PATCH 6/6] nvme-core: remove __nvme_submit_sync_cmd() wrapper Chaitanya Kulkarni
@ 2022-06-13 18:15 ` Christoph Hellwig
6 siblings, 0 replies; 12+ messages in thread
From: Christoph Hellwig @ 2022-06-13 18:15 UTC (permalink / raw)
To: Chaitanya Kulkarni; +Cc: linux-nvme, kbusch, hch, sagi, james.smart
Thanks,
I've applied patches 1 and 2 to the nvme-5.20 branch.
^ permalink raw reply [flat|nested] 12+ messages in thread