* [PATCH 1/3] nvme/ioctl: don't warn on vectorized uring_cmd with fixed buffer
2025-03-21 20:36 [PATCH 0/3] nvme_map_user_request() cleanup Caleb Sander Mateos
@ 2025-03-21 20:36 ` Caleb Sander Mateos
2025-03-21 20:36 ` [PATCH 2/3] nvme/ioctl: don't call blk_mq_free_request() in nvme_map_user_request() Caleb Sander Mateos
2025-03-21 20:36 ` [PATCH 3/3] nvme/ioctl: move fixed buffer lookup to nvme_uring_cmd_io() Caleb Sander Mateos
2 siblings, 0 replies; 4+ messages in thread
From: Caleb Sander Mateos @ 2025-03-21 20:36 UTC (permalink / raw)
To: Keith Busch, Jens Axboe, Christoph Hellwig, Sagi Grimberg
Cc: linux-nvme, linux-kernel, Caleb Sander Mateos
The vectorized io_uring NVMe passthru opcodes don't yet support fixed
buffers. But since userspace can trigger this condition based on the
io_uring SQE parameters, it shouldn't cause a kernel warning.
Signed-off-by: Caleb Sander Mateos <csander@purestorage.com>
Fixes: 23fd22e55b76 ("nvme: wire up fixed buffer support for nvme passthrough")
---
drivers/nvme/host/ioctl.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/drivers/nvme/host/ioctl.c b/drivers/nvme/host/ioctl.c
index 41907b4e1869..6c27d72e8cee 100644
--- a/drivers/nvme/host/ioctl.c
+++ b/drivers/nvme/host/ioctl.c
@@ -139,11 +139,11 @@ static int nvme_map_user_request(struct request *req, u64 ubuffer,
if (ioucmd && (ioucmd->flags & IORING_URING_CMD_FIXED)) {
struct iov_iter iter;
/* fixedbufs is only for non-vectored io */
- if (WARN_ON_ONCE(flags & NVME_IOCTL_VEC)) {
+ if (flags & NVME_IOCTL_VEC) {
ret = -EINVAL;
goto out;
}
ret = io_uring_cmd_import_fixed(ubuffer, bufflen,
rq_data_dir(req), &iter, ioucmd);
--
2.45.2
^ permalink raw reply related [flat|nested] 4+ messages in thread* [PATCH 2/3] nvme/ioctl: don't call blk_mq_free_request() in nvme_map_user_request()
2025-03-21 20:36 [PATCH 0/3] nvme_map_user_request() cleanup Caleb Sander Mateos
2025-03-21 20:36 ` [PATCH 1/3] nvme/ioctl: don't warn on vectorized uring_cmd with fixed buffer Caleb Sander Mateos
@ 2025-03-21 20:36 ` Caleb Sander Mateos
2025-03-21 20:36 ` [PATCH 3/3] nvme/ioctl: move fixed buffer lookup to nvme_uring_cmd_io() Caleb Sander Mateos
2 siblings, 0 replies; 4+ messages in thread
From: Caleb Sander Mateos @ 2025-03-21 20:36 UTC (permalink / raw)
To: Keith Busch, Jens Axboe, Christoph Hellwig, Sagi Grimberg
Cc: linux-nvme, linux-kernel, Caleb Sander Mateos
The callers of nvme_map_user_request() (nvme_submit_user_cmd() and
nvme_uring_cmd_io()) allocate the request, so have them free it if
nvme_map_user_request() fails.
Signed-off-by: Caleb Sander Mateos <csander@purestorage.com>
---
drivers/nvme/host/ioctl.c | 31 ++++++++++++++++---------------
1 file changed, 16 insertions(+), 15 deletions(-)
diff --git a/drivers/nvme/host/ioctl.c b/drivers/nvme/host/ioctl.c
index 6c27d72e8cee..843371e6e1e2 100644
--- a/drivers/nvme/host/ioctl.c
+++ b/drivers/nvme/host/ioctl.c
@@ -126,40 +126,38 @@ static int nvme_map_user_request(struct request *req, u64 ubuffer,
int ret;
if (!nvme_ctrl_sgl_supported(ctrl))
dev_warn_once(ctrl->device, "using unchecked data buffer\n");
if (has_metadata) {
- if (!supports_metadata) {
- ret = -EINVAL;
- goto out;
- }
+ if (!supports_metadata)
+ return -EINVAL;
+
if (!nvme_ctrl_meta_sgl_supported(ctrl))
dev_warn_once(ctrl->device,
"using unchecked metadata buffer\n");
}
if (ioucmd && (ioucmd->flags & IORING_URING_CMD_FIXED)) {
struct iov_iter iter;
/* fixedbufs is only for non-vectored io */
- if (flags & NVME_IOCTL_VEC) {
- ret = -EINVAL;
- goto out;
- }
+ if (flags & NVME_IOCTL_VEC)
+ return -EINVAL;
+
ret = io_uring_cmd_import_fixed(ubuffer, bufflen,
rq_data_dir(req), &iter, ioucmd);
if (ret < 0)
- goto out;
+ return ret;
ret = blk_rq_map_user_iov(q, req, NULL, &iter, GFP_KERNEL);
} else {
ret = blk_rq_map_user_io(req, NULL, nvme_to_user_ptr(ubuffer),
bufflen, GFP_KERNEL, flags & NVME_IOCTL_VEC, 0,
0, rq_data_dir(req));
}
if (ret)
- goto out;
+ return ret;
bio = req->bio;
if (bdev)
bio_set_dev(bio, bdev);
@@ -172,12 +170,10 @@ static int nvme_map_user_request(struct request *req, u64 ubuffer,
return ret;
out_unmap:
if (bio)
blk_rq_unmap_user(bio);
-out:
- blk_mq_free_request(req);
return ret;
}
static int nvme_submit_user_cmd(struct request_queue *q,
struct nvme_command *cmd, u64 ubuffer, unsigned bufflen,
@@ -198,11 +194,11 @@ static int nvme_submit_user_cmd(struct request_queue *q,
req->timeout = timeout;
if (ubuffer && bufflen) {
ret = nvme_map_user_request(req, ubuffer, bufflen, meta_buffer,
meta_len, NULL, flags);
if (ret)
- return ret;
+ goto out_free_req;
}
bio = req->bio;
ctrl = nvme_req(req)->ctrl;
@@ -210,15 +206,16 @@ static int nvme_submit_user_cmd(struct request_queue *q,
ret = nvme_execute_rq(req, false);
if (result)
*result = le64_to_cpu(nvme_req(req)->result.u64);
if (bio)
blk_rq_unmap_user(bio);
- blk_mq_free_request(req);
if (effects)
nvme_passthru_end(ctrl, ns, effects, cmd, ret);
+out_free_req:
+ blk_mq_free_request(req);
return ret;
}
static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
{
@@ -518,20 +515,24 @@ static int nvme_uring_cmd_io(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
if (d.addr && d.data_len) {
ret = nvme_map_user_request(req, d.addr,
d.data_len, nvme_to_user_ptr(d.metadata),
d.metadata_len, ioucmd, vec);
if (ret)
- return ret;
+ goto out_free_req;
}
/* to free bio on completion, as req->bio will be null at that time */
pdu->bio = req->bio;
pdu->req = req;
req->end_io_data = ioucmd;
req->end_io = nvme_uring_cmd_end_io;
blk_execute_rq_nowait(req, false);
return -EIOCBQUEUED;
+
+out_free_req:
+ blk_mq_free_request(req);
+ return ret;
}
static bool is_ctrl_ioctl(unsigned int cmd)
{
if (cmd == NVME_IOCTL_ADMIN_CMD || cmd == NVME_IOCTL_ADMIN64_CMD)
--
2.45.2
^ permalink raw reply related [flat|nested] 4+ messages in thread* [PATCH 3/3] nvme/ioctl: move fixed buffer lookup to nvme_uring_cmd_io()
2025-03-21 20:36 [PATCH 0/3] nvme_map_user_request() cleanup Caleb Sander Mateos
2025-03-21 20:36 ` [PATCH 1/3] nvme/ioctl: don't warn on vectorized uring_cmd with fixed buffer Caleb Sander Mateos
2025-03-21 20:36 ` [PATCH 2/3] nvme/ioctl: don't call blk_mq_free_request() in nvme_map_user_request() Caleb Sander Mateos
@ 2025-03-21 20:36 ` Caleb Sander Mateos
2 siblings, 0 replies; 4+ messages in thread
From: Caleb Sander Mateos @ 2025-03-21 20:36 UTC (permalink / raw)
To: Keith Busch, Jens Axboe, Christoph Hellwig, Sagi Grimberg
Cc: linux-nvme, linux-kernel, Caleb Sander Mateos
nvme_map_user_request() is called from both nvme_submit_user_cmd() and
nvme_uring_cmd_io(). But the ioucmd branch is only applicable to
nvme_uring_cmd_io(). Move it to nvme_uring_cmd_io() and just pass the
resulting iov_iter to nvme_map_user_request().
Signed-off-by: Caleb Sander Mateos <csander@purestorage.com>
---
drivers/nvme/host/ioctl.c | 40 +++++++++++++++++++++------------------
1 file changed, 22 insertions(+), 18 deletions(-)
diff --git a/drivers/nvme/host/ioctl.c b/drivers/nvme/host/ioctl.c
index 843371e6e1e2..feac2c2b33e1 100644
--- a/drivers/nvme/host/ioctl.c
+++ b/drivers/nvme/host/ioctl.c
@@ -112,11 +112,11 @@ static struct request *nvme_alloc_user_request(struct request_queue *q,
return req;
}
static int nvme_map_user_request(struct request *req, u64 ubuffer,
unsigned bufflen, void __user *meta_buffer, unsigned meta_len,
- struct io_uring_cmd *ioucmd, unsigned int flags)
+ struct iov_iter *iter, unsigned int flags)
{
struct request_queue *q = req->q;
struct nvme_ns *ns = q->queuedata;
struct block_device *bdev = ns ? ns->disk->part0 : NULL;
bool supports_metadata = bdev && blk_get_integrity(bdev->bd_disk);
@@ -134,27 +134,16 @@ static int nvme_map_user_request(struct request *req, u64 ubuffer,
if (!nvme_ctrl_meta_sgl_supported(ctrl))
dev_warn_once(ctrl->device,
"using unchecked metadata buffer\n");
}
- if (ioucmd && (ioucmd->flags & IORING_URING_CMD_FIXED)) {
- struct iov_iter iter;
-
- /* fixedbufs is only for non-vectored io */
- if (flags & NVME_IOCTL_VEC)
- return -EINVAL;
-
- ret = io_uring_cmd_import_fixed(ubuffer, bufflen,
- rq_data_dir(req), &iter, ioucmd);
- if (ret < 0)
- return ret;
- ret = blk_rq_map_user_iov(q, req, NULL, &iter, GFP_KERNEL);
- } else {
+ if (iter)
+ ret = blk_rq_map_user_iov(q, req, NULL, iter, GFP_KERNEL);
+ else
ret = blk_rq_map_user_io(req, NULL, nvme_to_user_ptr(ubuffer),
bufflen, GFP_KERNEL, flags & NVME_IOCTL_VEC, 0,
0, rq_data_dir(req));
- }
if (ret)
return ret;
bio = req->bio;
@@ -511,13 +500,28 @@ static int nvme_uring_cmd_io(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
if (IS_ERR(req))
return PTR_ERR(req);
req->timeout = d.timeout_ms ? msecs_to_jiffies(d.timeout_ms) : 0;
if (d.addr && d.data_len) {
- ret = nvme_map_user_request(req, d.addr,
- d.data_len, nvme_to_user_ptr(d.metadata),
- d.metadata_len, ioucmd, vec);
+ struct iov_iter iter;
+ struct iov_iter *map_iter = NULL;
+
+ if (ioucmd->flags & IORING_URING_CMD_FIXED) {
+ /* fixedbufs is only for non-vectored io */
+ if (vec) {
+ ret = -EINVAL;
+ goto out_free_req;
+ }
+
+ ret = io_uring_cmd_import_fixed(d.addr, d.data_len,
+ rq_data_dir(req), &iter, ioucmd);
+ if (ret < 0)
+ goto out_free_req;
+ }
+ ret = nvme_map_user_request(req, d.addr, d.data_len,
+ nvme_to_user_ptr(d.metadata), d.metadata_len,
+ map_iter, vec);
if (ret)
goto out_free_req;
}
/* to free bio on completion, as req->bio will be null at that time */
--
2.45.2
^ permalink raw reply related [flat|nested] 4+ messages in thread