linux-nvme.lists.infradead.org archive mirror
 help / color / mirror / Atom feed
* [PATCH 0/2] nvme/ioctl: improve user request allocation
@ 2025-06-06 20:16 Tokunori Ikegami
  2025-06-06 20:16 ` [PATCH 1/2] nvme/ioctl: split user request allocation from user command submission Tokunori Ikegami
  2025-06-06 20:16 ` [PATCH 2/2] nvme/ioctl: fix unsigned value as unsigned int value Tokunori Ikegami
  0 siblings, 2 replies; 3+ messages in thread
From: Tokunori Ikegami @ 2025-06-06 20:16 UTC (permalink / raw)
  To: linux-nvme; +Cc: Tokunori Ikegami

Previously nvme command copied for the user request allocation.
But the nvme command already allocated for the request.
So change to just set the nvme command values directly without the copy.
Also fixed the checkpatch.pl warnings for the unsigned values.

Tokunori Ikegami (2):
  nvme/ioctl: split user request allocation from user command submission
  nvme/ioctl: fix unsigned value as unsigned int value

 drivers/nvme/host/core.c  |  10 +-
 drivers/nvme/host/ioctl.c | 230 +++++++++++++++++++++-----------------
 2 files changed, 134 insertions(+), 106 deletions(-)

-- 
2.48.1



^ permalink raw reply	[flat|nested] 3+ messages in thread

* [PATCH 1/2] nvme/ioctl: split user request allocation from user command submission
  2025-06-06 20:16 [PATCH 0/2] nvme/ioctl: improve user request allocation Tokunori Ikegami
@ 2025-06-06 20:16 ` Tokunori Ikegami
  2025-06-06 20:16 ` [PATCH 2/2] nvme/ioctl: fix unsigned value as unsigned int value Tokunori Ikegami
  1 sibling, 0 replies; 3+ messages in thread
From: Tokunori Ikegami @ 2025-06-06 20:16 UTC (permalink / raw)
  To: linux-nvme; +Cc: Tokunori Ikegami

Delete unnecessary nvme command copy for the performance improvement.

Signed-off-by: Tokunori Ikegami <ikegami.t@gmail.com>
---
 drivers/nvme/host/core.c  |  10 +-
 drivers/nvme/host/ioctl.c | 213 +++++++++++++++++++++-----------------
 2 files changed, 125 insertions(+), 98 deletions(-)

diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index f69a232a000a..8736a9a8d74d 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -734,14 +734,16 @@ void nvme_init_request(struct request *req, struct nvme_command *cmd)
 	if (!logging_enabled)
 		req->rq_flags |= RQF_QUIET;
 
-	/* passthru commands should let the driver set the SGL flags */
-	cmd->common.flags &= ~NVME_CMD_SGL_ALL;
-
 	req->cmd_flags |= REQ_FAILFAST_DRIVER;
 	if (req->mq_hctx->type == HCTX_TYPE_POLL)
 		req->cmd_flags |= REQ_POLLED;
 	nvme_clear_nvme_request(req);
-	memcpy(nr->cmd, cmd, sizeof(*cmd));
+
+	if (likely(cmd)) {
+		/* passthru commands should let the driver set the SGL flags */
+		cmd->common.flags &= ~NVME_CMD_SGL_ALL;
+		memcpy(nr->cmd, cmd, sizeof(*cmd));
+	}
 }
 EXPORT_SYMBOL_GPL(nvme_init_request);
 
diff --git a/drivers/nvme/host/ioctl.c b/drivers/nvme/host/ioctl.c
index ca86d3bf7ea4..02edb0900413 100644
--- a/drivers/nvme/host/ioctl.c
+++ b/drivers/nvme/host/ioctl.c
@@ -14,7 +14,7 @@ enum {
 	NVME_IOCTL_PARTITION	= (1 << 1),
 };
 
-static bool nvme_cmd_allowed(struct nvme_ns *ns, struct nvme_command *c,
+static bool nvme_cmd_allowed(struct nvme_ns *ns, __u8 opcode, __u32 cdw10,
 		unsigned int flags, bool open_for_write)
 {
 	u32 effects;
@@ -30,8 +30,7 @@ static bool nvme_cmd_allowed(struct nvme_ns *ns, struct nvme_command *c,
 	 * Do not allow unprivileged processes to send vendor specific or fabrics
 	 * commands as we can't be sure about their effects.
 	 */
-	if (c->common.opcode >= nvme_cmd_vendor_start ||
-	    c->common.opcode == nvme_fabrics_command)
+	if (opcode >= nvme_cmd_vendor_start || opcode == nvme_fabrics_command)
 		goto admin;
 
 	/*
@@ -41,8 +40,8 @@ static bool nvme_cmd_allowed(struct nvme_ns *ns, struct nvme_command *c,
 	 * potentially sensitive information.
 	 */
 	if (!ns) {
-		if (c->common.opcode == nvme_admin_identify) {
-			switch (c->identify.cns) {
+		if (opcode == nvme_admin_identify) {
+			switch (cdw10 & 0xff) {
 			case NVME_ID_CNS_NS:
 			case NVME_ID_CNS_CS_NS:
 			case NVME_ID_CNS_NS_CS_INDEP:
@@ -59,7 +58,7 @@ static bool nvme_cmd_allowed(struct nvme_ns *ns, struct nvme_command *c,
 	 * and marks this command as supported.  If not reject unprivileged
 	 * passthrough.
 	 */
-	effects = nvme_command_effects(ns->ctrl, ns, c->common.opcode);
+	effects = nvme_command_effects(ns->ctrl, ns, opcode);
 	if (!(effects & NVME_CMD_EFFECTS_CSUPP))
 		goto admin;
 
@@ -77,7 +76,7 @@ static bool nvme_cmd_allowed(struct nvme_ns *ns, struct nvme_command *c,
 	 * change the logical block contents if the file descriptor is open for
 	 * writing.
 	 */
-	if ((nvme_is_write(c) || (effects & NVME_CMD_EFFECTS_LBCC)) &&
+	if ((opcode & 1 || (effects & NVME_CMD_EFFECTS_LBCC)) &&
 	    !open_for_write)
 		goto admin;
 
@@ -99,15 +98,15 @@ static void __user *nvme_to_user_ptr(uintptr_t ptrval)
 }
 
 static struct request *nvme_alloc_user_request(struct request_queue *q,
-		struct nvme_command *cmd, blk_opf_t rq_flags,
-		blk_mq_req_flags_t blk_flags)
+		__u8 opcode, blk_opf_t rq_flags, blk_mq_req_flags_t blk_flags)
 {
+	enum req_op op = opcode & 1 ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN;
 	struct request *req;
 
-	req = blk_mq_alloc_request(q, nvme_req_op(cmd) | rq_flags, blk_flags);
+	req = blk_mq_alloc_request(q, op | rq_flags, blk_flags);
 	if (IS_ERR(req))
 		return req;
-	nvme_init_request(req, cmd);
+	nvme_init_request(req, NULL);
 	nvme_req(req)->flags |= NVME_REQ_USERCMD;
 	return req;
 }
@@ -165,21 +164,17 @@ static int nvme_map_user_request(struct request *req, u64 ubuffer,
 }
 
 static int nvme_submit_user_cmd(struct request_queue *q,
-		struct nvme_command *cmd, u64 ubuffer, unsigned bufflen,
+		struct request *req, u64 ubuffer, unsigned bufflen,
 		void __user *meta_buffer, unsigned meta_len,
 		u64 *result, unsigned timeout, unsigned int flags)
 {
+	struct nvme_command *cmd = nvme_req(req)->cmd;
 	struct nvme_ns *ns = q->queuedata;
 	struct nvme_ctrl *ctrl;
-	struct request *req;
 	struct bio *bio;
 	u32 effects;
 	int ret;
 
-	req = nvme_alloc_user_request(q, cmd, 0, 0);
-	if (IS_ERR(req))
-		return PTR_ERR(req);
-
 	req->timeout = timeout;
 	if (ubuffer && bufflen) {
 		ret = nvme_map_user_request(req, ubuffer, bufflen, meta_buffer,
@@ -211,9 +206,10 @@ static int nvme_submit_user_cmd(struct request_queue *q,
 static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
 {
 	struct nvme_user_io io;
-	struct nvme_command c;
+	struct nvme_command *c;
 	unsigned length, meta_len;
 	void __user *metadata;
+	struct request *req;
 
 	if (copy_from_user(&io, uio, sizeof(io)))
 		return -EFAULT;
@@ -254,19 +250,25 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
 			return -EINVAL;
 	}
 
-	memset(&c, 0, sizeof(c));
-	c.rw.opcode = io.opcode;
-	c.rw.flags = io.flags;
-	c.rw.nsid = cpu_to_le32(ns->head->ns_id);
-	c.rw.slba = cpu_to_le64(io.slba);
-	c.rw.length = cpu_to_le16(io.nblocks);
-	c.rw.control = cpu_to_le16(io.control);
-	c.rw.dsmgmt = cpu_to_le32(io.dsmgmt);
-	c.rw.reftag = cpu_to_le32(io.reftag);
-	c.rw.lbat = cpu_to_le16(io.apptag);
-	c.rw.lbatm = cpu_to_le16(io.appmask);
-
-	return nvme_submit_user_cmd(ns->queue, &c, io.addr, length, metadata,
+	req = nvme_alloc_user_request(ns->queue, io.opcode, 0, 0);
+	if (IS_ERR(req))
+		return PTR_ERR(req);
+
+	c = nvme_req(req)->cmd;
+	memset(c, 0, sizeof(*c));
+	c->rw.opcode = io.opcode;
+	/* passthru commands should let the driver set the SGL flags */
+	c->rw.flags = io.flags & ~NVME_CMD_SGL_ALL;
+	c->rw.nsid = cpu_to_le32(ns->head->ns_id);
+	c->rw.slba = cpu_to_le64(io.slba);
+	c->rw.length = cpu_to_le16(io.nblocks);
+	c->rw.control = cpu_to_le16(io.control);
+	c->rw.dsmgmt = cpu_to_le32(io.dsmgmt);
+	c->rw.reftag = cpu_to_le32(io.reftag);
+	c->rw.lbat = cpu_to_le16(io.apptag);
+	c->rw.lbatm = cpu_to_le16(io.appmask);
+
+	return nvme_submit_user_cmd(ns->queue, req, io.addr, length, metadata,
 			meta_len, NULL, 0, 0);
 }
 
@@ -287,9 +289,11 @@ static int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
 		struct nvme_passthru_cmd __user *ucmd, unsigned int flags,
 		bool open_for_write)
 {
+	struct request_queue *q = ns ? ns->queue : ctrl->admin_q;
 	struct nvme_passthru_cmd cmd;
-	struct nvme_command c;
+	struct nvme_command *c;
 	unsigned timeout = 0;
+	struct request *req;
 	u64 result;
 	int status;
 
@@ -300,28 +304,35 @@ static int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
 	if (!nvme_validate_passthru_nsid(ctrl, ns, cmd.nsid))
 		return -EINVAL;
 
-	memset(&c, 0, sizeof(c));
-	c.common.opcode = cmd.opcode;
-	c.common.flags = cmd.flags;
-	c.common.nsid = cpu_to_le32(cmd.nsid);
-	c.common.cdw2[0] = cpu_to_le32(cmd.cdw2);
-	c.common.cdw2[1] = cpu_to_le32(cmd.cdw3);
-	c.common.cdw10 = cpu_to_le32(cmd.cdw10);
-	c.common.cdw11 = cpu_to_le32(cmd.cdw11);
-	c.common.cdw12 = cpu_to_le32(cmd.cdw12);
-	c.common.cdw13 = cpu_to_le32(cmd.cdw13);
-	c.common.cdw14 = cpu_to_le32(cmd.cdw14);
-	c.common.cdw15 = cpu_to_le32(cmd.cdw15);
-
-	if (!nvme_cmd_allowed(ns, &c, 0, open_for_write))
+	if (!nvme_cmd_allowed(ns, cmd.opcode, cmd.cdw10, 0, open_for_write))
 		return -EACCES;
 
+	req = nvme_alloc_user_request(q, cmd.opcode, 0, 0);
+	if (IS_ERR(req))
+		return PTR_ERR(req);
+
+	c = nvme_req(req)->cmd;
+	memset(c, 0, sizeof(*c));
+	c->common.opcode = cmd.opcode;
+
+	/* passthru commands should let the driver set the SGL flags */
+	c->common.flags = cmd.flags & ~NVME_CMD_SGL_ALL;
+	c->common.nsid = cpu_to_le32(cmd.nsid);
+	c->common.cdw2[0] = cpu_to_le32(cmd.cdw2);
+	c->common.cdw2[1] = cpu_to_le32(cmd.cdw3);
+	c->common.cdw10 = cpu_to_le32(cmd.cdw10);
+	c->common.cdw11 = cpu_to_le32(cmd.cdw11);
+	c->common.cdw12 = cpu_to_le32(cmd.cdw12);
+	c->common.cdw13 = cpu_to_le32(cmd.cdw13);
+	c->common.cdw14 = cpu_to_le32(cmd.cdw14);
+	c->common.cdw15 = cpu_to_le32(cmd.cdw15);
+
 	if (cmd.timeout_ms)
 		timeout = msecs_to_jiffies(cmd.timeout_ms);
 
-	status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c,
-			cmd.addr, cmd.data_len, nvme_to_user_ptr(cmd.metadata),
-			cmd.metadata_len, &result, timeout, 0);
+	status = nvme_submit_user_cmd(q, req, cmd.addr, cmd.data_len,
+			nvme_to_user_ptr(cmd.metadata), cmd.metadata_len,
+			&result, timeout, 0);
 
 	if (status >= 0) {
 		if (put_user(result, &ucmd->result))
@@ -335,9 +346,11 @@ static int nvme_user_cmd64(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
 		struct nvme_passthru_cmd64 __user *ucmd, unsigned int flags,
 		bool open_for_write)
 {
+	struct request_queue *q = ns ? ns->queue : ctrl->admin_q;
 	struct nvme_passthru_cmd64 cmd;
-	struct nvme_command c;
+	struct nvme_command *c;
 	unsigned timeout = 0;
+	struct request *req;
 	int status;
 
 	if (copy_from_user(&cmd, ucmd, sizeof(cmd)))
@@ -347,26 +360,32 @@ static int nvme_user_cmd64(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
 	if (!nvme_validate_passthru_nsid(ctrl, ns, cmd.nsid))
 		return -EINVAL;
 
-	memset(&c, 0, sizeof(c));
-	c.common.opcode = cmd.opcode;
-	c.common.flags = cmd.flags;
-	c.common.nsid = cpu_to_le32(cmd.nsid);
-	c.common.cdw2[0] = cpu_to_le32(cmd.cdw2);
-	c.common.cdw2[1] = cpu_to_le32(cmd.cdw3);
-	c.common.cdw10 = cpu_to_le32(cmd.cdw10);
-	c.common.cdw11 = cpu_to_le32(cmd.cdw11);
-	c.common.cdw12 = cpu_to_le32(cmd.cdw12);
-	c.common.cdw13 = cpu_to_le32(cmd.cdw13);
-	c.common.cdw14 = cpu_to_le32(cmd.cdw14);
-	c.common.cdw15 = cpu_to_le32(cmd.cdw15);
-
-	if (!nvme_cmd_allowed(ns, &c, flags, open_for_write))
+	if (!nvme_cmd_allowed(ns, cmd.opcode, cmd.cdw10, flags, open_for_write))
 		return -EACCES;
 
+	req = nvme_alloc_user_request(q, cmd.opcode, 0, 0);
+	if (IS_ERR(req))
+		return PTR_ERR(req);
+
+	c = nvme_req(req)->cmd;
+	memset(c, 0, sizeof(*c));
+	c->common.opcode = cmd.opcode;
+	/* passthru commands should let the driver set the SGL flags */
+	c->common.flags = cmd.flags & ~NVME_CMD_SGL_ALL;
+	c->common.nsid = cpu_to_le32(cmd.nsid);
+	c->common.cdw2[0] = cpu_to_le32(cmd.cdw2);
+	c->common.cdw2[1] = cpu_to_le32(cmd.cdw3);
+	c->common.cdw10 = cpu_to_le32(cmd.cdw10);
+	c->common.cdw11 = cpu_to_le32(cmd.cdw11);
+	c->common.cdw12 = cpu_to_le32(cmd.cdw12);
+	c->common.cdw13 = cpu_to_le32(cmd.cdw13);
+	c->common.cdw14 = cpu_to_le32(cmd.cdw14);
+	c->common.cdw15 = cpu_to_le32(cmd.cdw15);
+
 	if (cmd.timeout_ms)
 		timeout = msecs_to_jiffies(cmd.timeout_ms);
 
-	status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c,
+	status = nvme_submit_user_cmd(q, req,
 			cmd.addr, cmd.data_len, nvme_to_user_ptr(cmd.metadata),
 			cmd.metadata_len, &cmd.result, timeout, flags);
 
@@ -454,7 +473,7 @@ static int nvme_uring_cmd_io(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
 	const struct nvme_uring_cmd *cmd = io_uring_sqe_cmd(ioucmd->sqe);
 	struct request_queue *q = ns ? ns->queue : ctrl->admin_q;
 	struct nvme_uring_data d;
-	struct nvme_command c;
+	struct nvme_command *c;
 	struct iov_iter iter;
 	struct iov_iter *map_iter = NULL;
 	struct request *req;
@@ -462,43 +481,22 @@ static int nvme_uring_cmd_io(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
 	blk_mq_req_flags_t blk_flags = 0;
 	int ret;
 
-	c.common.opcode = READ_ONCE(cmd->opcode);
-	c.common.flags = READ_ONCE(cmd->flags);
-	if (c.common.flags)
+	if (READ_ONCE(cmd->flags))
 		return -EINVAL;
 
-	c.common.command_id = 0;
-	c.common.nsid = cpu_to_le32(cmd->nsid);
-	if (!nvme_validate_passthru_nsid(ctrl, ns, le32_to_cpu(c.common.nsid)))
+	if (!nvme_validate_passthru_nsid(ctrl, ns, cmd->nsid))
 		return -EINVAL;
 
-	c.common.cdw2[0] = cpu_to_le32(READ_ONCE(cmd->cdw2));
-	c.common.cdw2[1] = cpu_to_le32(READ_ONCE(cmd->cdw3));
-	c.common.metadata = 0;
-	c.common.dptr.prp1 = c.common.dptr.prp2 = 0;
-	c.common.cdw10 = cpu_to_le32(READ_ONCE(cmd->cdw10));
-	c.common.cdw11 = cpu_to_le32(READ_ONCE(cmd->cdw11));
-	c.common.cdw12 = cpu_to_le32(READ_ONCE(cmd->cdw12));
-	c.common.cdw13 = cpu_to_le32(READ_ONCE(cmd->cdw13));
-	c.common.cdw14 = cpu_to_le32(READ_ONCE(cmd->cdw14));
-	c.common.cdw15 = cpu_to_le32(READ_ONCE(cmd->cdw15));
-
-	if (!nvme_cmd_allowed(ns, &c, 0, ioucmd->file->f_mode & FMODE_WRITE))
+	if (!nvme_cmd_allowed(ns, cmd->opcode, cmd->cdw10, 0, ioucmd->file->f_mode & FMODE_WRITE))
 		return -EACCES;
 
-	d.metadata = READ_ONCE(cmd->metadata);
-	d.addr = READ_ONCE(cmd->addr);
-	d.data_len = READ_ONCE(cmd->data_len);
-	d.metadata_len = READ_ONCE(cmd->metadata_len);
-	d.timeout_ms = READ_ONCE(cmd->timeout_ms);
-
 	if (d.data_len && (ioucmd->flags & IORING_URING_CMD_FIXED)) {
 		/* fixedbufs is only for non-vectored io */
 		if (vec)
 			return -EINVAL;
 
 		ret = io_uring_cmd_import_fixed(d.addr, d.data_len,
-			nvme_is_write(&c) ? WRITE : READ, &iter, ioucmd,
+			cmd->opcode & 1 ? WRITE : READ, &iter, ioucmd,
 			issue_flags);
 		if (ret < 0)
 			return ret;
@@ -506,6 +504,36 @@ static int nvme_uring_cmd_io(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
 		map_iter = &iter;
 	}
 
+	req = nvme_alloc_user_request(q, cmd->opcode, rq_flags, blk_flags);
+	if (IS_ERR(req))
+		return PTR_ERR(req);
+
+	c = nvme_req(req)->cmd;
+	c->common.opcode = READ_ONCE(cmd->opcode);
+	/* passthru commands should let the driver set the SGL flags */
+	c->common.flags = READ_ONCE(cmd->flags) & ~NVME_CMD_SGL_ALL;
+
+	c->common.command_id = 0;
+	c->common.nsid = cpu_to_le32(cmd->nsid);
+
+	c->common.cdw2[0] = cpu_to_le32(READ_ONCE(cmd->cdw2));
+	c->common.cdw2[1] = cpu_to_le32(READ_ONCE(cmd->cdw3));
+	c->common.metadata = 0;
+	c->common.dptr.prp1 = c->common.dptr.prp2 = 0;
+	c->common.cdw10 = cpu_to_le32(READ_ONCE(cmd->cdw10));
+	c->common.cdw11 = cpu_to_le32(READ_ONCE(cmd->cdw11));
+	c->common.cdw12 = cpu_to_le32(READ_ONCE(cmd->cdw12));
+	c->common.cdw13 = cpu_to_le32(READ_ONCE(cmd->cdw13));
+	c->common.cdw14 = cpu_to_le32(READ_ONCE(cmd->cdw14));
+	c->common.cdw15 = cpu_to_le32(READ_ONCE(cmd->cdw15));
+
+	d.metadata = READ_ONCE(cmd->metadata);
+	d.addr = READ_ONCE(cmd->addr);
+	d.data_len = READ_ONCE(cmd->data_len);
+	d.metadata_len = READ_ONCE(cmd->metadata_len);
+	d.timeout_ms = READ_ONCE(cmd->timeout_ms);
+
+
 	if (issue_flags & IO_URING_F_NONBLOCK) {
 		rq_flags |= REQ_NOWAIT;
 		blk_flags = BLK_MQ_REQ_NOWAIT;
@@ -513,9 +541,6 @@ static int nvme_uring_cmd_io(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
 	if (issue_flags & IO_URING_F_IOPOLL)
 		rq_flags |= REQ_POLLED;
 
-	req = nvme_alloc_user_request(q, &c, rq_flags, blk_flags);
-	if (IS_ERR(req))
-		return PTR_ERR(req);
 	req->timeout = d.timeout_ms ? msecs_to_jiffies(d.timeout_ms) : 0;
 
 	if (d.data_len) {
-- 
2.48.1



^ permalink raw reply related	[flat|nested] 3+ messages in thread

* [PATCH 2/2] nvme/ioctl: fix unsigned value as unsigned int value
  2025-06-06 20:16 [PATCH 0/2] nvme/ioctl: improve user request allocation Tokunori Ikegami
  2025-06-06 20:16 ` [PATCH 1/2] nvme/ioctl: split user request allocation from user command submission Tokunori Ikegami
@ 2025-06-06 20:16 ` Tokunori Ikegami
  1 sibling, 0 replies; 3+ messages in thread
From: Tokunori Ikegami @ 2025-06-06 20:16 UTC (permalink / raw)
  To: linux-nvme; +Cc: Tokunori Ikegami

This is to fix for the checkpatch.pl warning.

Signed-off-by: Tokunori Ikegami <ikegami.t@gmail.com>
---
 drivers/nvme/host/ioctl.c | 19 ++++++++++---------
 1 file changed, 10 insertions(+), 9 deletions(-)

diff --git a/drivers/nvme/host/ioctl.c b/drivers/nvme/host/ioctl.c
index 02edb0900413..880a40cb78d7 100644
--- a/drivers/nvme/host/ioctl.c
+++ b/drivers/nvme/host/ioctl.c
@@ -112,8 +112,9 @@ static struct request *nvme_alloc_user_request(struct request_queue *q,
 }
 
 static int nvme_map_user_request(struct request *req, u64 ubuffer,
-		unsigned bufflen, void __user *meta_buffer, unsigned meta_len,
-		struct iov_iter *iter, unsigned int flags)
+		unsigned int bufflen, void __user *meta_buffer,
+		unsigned int meta_len, struct iov_iter *iter,
+		unsigned int flags)
 {
 	struct request_queue *q = req->q;
 	struct nvme_ns *ns = q->queuedata;
@@ -164,9 +165,9 @@ static int nvme_map_user_request(struct request *req, u64 ubuffer,
 }
 
 static int nvme_submit_user_cmd(struct request_queue *q,
-		struct request *req, u64 ubuffer, unsigned bufflen,
-		void __user *meta_buffer, unsigned meta_len,
-		u64 *result, unsigned timeout, unsigned int flags)
+		struct request *req, u64 ubuffer, unsigned int bufflen,
+		void __user *meta_buffer, unsigned int meta_len,
+		u64 *result, unsigned int timeout, unsigned int flags)
 {
 	struct nvme_command *cmd = nvme_req(req)->cmd;
 	struct nvme_ns *ns = q->queuedata;
@@ -207,7 +208,7 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
 {
 	struct nvme_user_io io;
 	struct nvme_command *c;
-	unsigned length, meta_len;
+	unsigned int length, meta_len;
 	void __user *metadata;
 	struct request *req;
 
@@ -292,7 +293,7 @@ static int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
 	struct request_queue *q = ns ? ns->queue : ctrl->admin_q;
 	struct nvme_passthru_cmd cmd;
 	struct nvme_command *c;
-	unsigned timeout = 0;
+	unsigned int timeout = 0;
 	struct request *req;
 	u64 result;
 	int status;
@@ -349,7 +350,7 @@ static int nvme_user_cmd64(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
 	struct request_queue *q = ns ? ns->queue : ctrl->admin_q;
 	struct nvme_passthru_cmd64 cmd;
 	struct nvme_command *c;
-	unsigned timeout = 0;
+	unsigned int timeout = 0;
 	struct request *req;
 	int status;
 
@@ -423,7 +424,7 @@ static inline struct nvme_uring_cmd_pdu *nvme_uring_cmd_pdu(
 }
 
 static void nvme_uring_task_cb(struct io_uring_cmd *ioucmd,
-			       unsigned issue_flags)
+			       unsigned int issue_flags)
 {
 	struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd);
 
-- 
2.48.1



^ permalink raw reply related	[flat|nested] 3+ messages in thread

end of thread, other threads:[~2025-06-06 20:18 UTC | newest]

Thread overview: 3+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2025-06-06 20:16 [PATCH 0/2] nvme/ioctl: improve user request allocation Tokunori Ikegami
2025-06-06 20:16 ` [PATCH 1/2] nvme/ioctl: split user request allocation from user command submission Tokunori Ikegami
2025-06-06 20:16 ` [PATCH 2/2] nvme/ioctl: fix unsigned value as unsigned int value Tokunori Ikegami

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).