* [PATCH v3 for-4.18 2/2] nvmet: check fileio lba range access boundaries
@ 2018-07-11 13:13 Sagi Grimberg
2018-07-12 3:05 ` Chaitanya Kulkarni
2018-07-12 7:25 ` Christoph Hellwig
0 siblings, 2 replies; 3+ messages in thread
From: Sagi Grimberg @ 2018-07-11 13:13 UTC (permalink / raw)
Fail out-of-bounds with a proper status code.
Fixes: d5eff33ee6f8 ("nvmet: add simple file backed ns support")
Signed-off-by: Sagi Grimberg <sagi at grimberg.me>
---
Changes from v2:
- rebase accident - use offset + len after assigning them
Changes from v1:
- move boundary check before resource allocation so we don't need
to worry about freeing them (nvmet_file_execute_rw).
drivers/nvme/target/io-cmd-file.c | 19 +++++++++++++++++--
1 file changed, 17 insertions(+), 2 deletions(-)
diff --git a/drivers/nvme/target/io-cmd-file.c b/drivers/nvme/target/io-cmd-file.c
index dad8d44bf90e..c2d0d08b59c8 100644
--- a/drivers/nvme/target/io-cmd-file.c
+++ b/drivers/nvme/target/io-cmd-file.c
@@ -145,6 +145,12 @@ static void nvmet_file_execute_rw(struct nvmet_req *req)
return;
}
+ pos = le64_to_cpu(req->cmd->rw.slba) << req->ns->blksize_shift;
+ if (unlikely(pos + req->data_len > req->ns->size)) {
+ nvmet_req_complete(req, NVME_SC_LBA_RANGE | NVME_SC_DNR);
+ return;
+ }
+
if (nr_bvec > NVMET_MAX_INLINE_BIOVEC)
req->f.bvec = kmalloc_array(nr_bvec, sizeof(struct bio_vec),
GFP_KERNEL);
@@ -160,8 +166,6 @@ static void nvmet_file_execute_rw(struct nvmet_req *req)
is_sync = true;
}
- pos = le64_to_cpu(req->cmd->rw.slba) << req->ns->blksize_shift;
-
memset(&req->f.iocb, 0, sizeof(struct kiocb));
for_each_sg_page(req->sg, &sg_pg_iter, req->sg_cnt, 0) {
nvmet_file_init_bvec(&req->f.bvec[bv_cnt], &sg_pg_iter);
@@ -236,8 +240,14 @@ static void nvmet_file_execute_discard(struct nvmet_req *req)
sizeof(range));
if (ret)
break;
+
offset = le64_to_cpu(range.slba) << req->ns->blksize_shift;
len = le32_to_cpu(range.nlb) << req->ns->blksize_shift;
+ if (offset + len > req->ns->size) {
+ ret = NVME_SC_LBA_RANGE | NVME_SC_DNR;
+ break;
+ }
+
if (vfs_fallocate(req->ns->file, mode, offset, len)) {
ret = NVME_SC_INTERNAL | NVME_SC_DNR;
break;
@@ -283,6 +293,11 @@ static void nvmet_file_write_zeroes_work(struct work_struct *w)
len = (((sector_t)le16_to_cpu(write_zeroes->length) + 1) <<
req->ns->blksize_shift);
+ if (unlikely(offset + len > req->ns->size)) {
+ nvmet_req_complete(req, NVME_SC_LBA_RANGE | NVME_SC_DNR);
+ return;
+ }
+
ret = vfs_fallocate(req->ns->file, mode, offset, len);
nvmet_req_complete(req, ret < 0 ? NVME_SC_INTERNAL | NVME_SC_DNR : 0);
}
--
2.14.1
^ permalink raw reply related [flat|nested] 3+ messages in thread
* [PATCH v3 for-4.18 2/2] nvmet: check fileio lba range access boundaries
2018-07-11 13:13 [PATCH v3 for-4.18 2/2] nvmet: check fileio lba range access boundaries Sagi Grimberg
@ 2018-07-12 3:05 ` Chaitanya Kulkarni
2018-07-12 7:25 ` Christoph Hellwig
1 sibling, 0 replies; 3+ messages in thread
From: Chaitanya Kulkarni @ 2018-07-12 3:05 UTC (permalink / raw)
Looks good to me.
Reviewed-by: Chaitanya Kulkarni <chaitanya.kulkarni at wdc.com>
From: Sagi Grimberg <sagi@grimberg.me>
Sent: Wednesday, July 11, 2018 6:13 AM
To: linux-nvme at lists.infradead.org
Cc: Christoph Hellwig; Chaitanya Kulkarni
Subject: [PATCH v3 for-4.18 2/2] nvmet: check fileio lba range access boundaries
?
Fail out-of-bounds with a proper status code.
Fixes: d5eff33ee6f8 ("nvmet: add simple file backed ns support")
Signed-off-by: Sagi Grimberg <sagi at grimberg.me>
---
Changes from v2:
- rebase accident - use offset + len after assigning them
Changes from v1:
- move boundary check before resource allocation so we don't need
to worry about freeing them (nvmet_file_execute_rw).
?drivers/nvme/target/io-cmd-file.c | 19 +++++++++++++++++--
?1 file changed, 17 insertions(+), 2 deletions(-)
diff --git a/drivers/nvme/target/io-cmd-file.c b/drivers/nvme/target/io-cmd-file.c
index dad8d44bf90e..c2d0d08b59c8 100644
--- a/drivers/nvme/target/io-cmd-file.c
+++ b/drivers/nvme/target/io-cmd-file.c
@@ -145,6 +145,12 @@ static void nvmet_file_execute_rw(struct nvmet_req *req)
???????????????? return;
???????? }
?
+?????? pos = le64_to_cpu(req->cmd->rw.slba) << req->ns->blksize_shift;
+?????? if (unlikely(pos + req->data_len > req->ns->size)) {
+?????????????? nvmet_req_complete(req, NVME_SC_LBA_RANGE | NVME_SC_DNR);
+?????????????? return;
+?????? }
+
???????? if (nr_bvec > NVMET_MAX_INLINE_BIOVEC)
???????????????? req->f.bvec = kmalloc_array(nr_bvec, sizeof(struct bio_vec),
???????????????????????????????? GFP_KERNEL);
@@ -160,8 +166,6 @@ static void nvmet_file_execute_rw(struct nvmet_req *req)
???????????????????????? is_sync = true;
???????? }
?
-?????? pos = le64_to_cpu(req->cmd->rw.slba) << req->ns->blksize_shift;
-
???????? memset(&req->f.iocb, 0, sizeof(struct kiocb));
???????? for_each_sg_page(req->sg, &sg_pg_iter, req->sg_cnt, 0) {
???????????????? nvmet_file_init_bvec(&req->f.bvec[bv_cnt], &sg_pg_iter);
@@ -236,8 +240,14 @@ static void nvmet_file_execute_discard(struct nvmet_req *req)
???????????????????????????????????????? sizeof(range));
???????????????? if (ret)
???????????????????????? break;
+
???????????????? offset = le64_to_cpu(range.slba) << req->ns->blksize_shift;
???????????????? len = le32_to_cpu(range.nlb) << req->ns->blksize_shift;
+?????????????? if (offset + len > req->ns->size) {
+?????????????????????? ret = NVME_SC_LBA_RANGE | NVME_SC_DNR;
+?????????????????????? break;
+?????????????? }
+
???????????????? if (vfs_fallocate(req->ns->file, mode, offset, len)) {
???????????????????????? ret = NVME_SC_INTERNAL | NVME_SC_DNR;
???????????????????????? break;
@@ -283,6 +293,11 @@ static void nvmet_file_write_zeroes_work(struct work_struct *w)
???????? len = (((sector_t)le16_to_cpu(write_zeroes->length) + 1) <<
???????????????????????? req->ns->blksize_shift);
?
+?????? if (unlikely(offset + len > req->ns->size)) {
+?????????????? nvmet_req_complete(req, NVME_SC_LBA_RANGE | NVME_SC_DNR);
+?????????????? return;
+?????? }
+
???????? ret = vfs_fallocate(req->ns->file, mode, offset, len);
???????? nvmet_req_complete(req, ret < 0 ? NVME_SC_INTERNAL | NVME_SC_DNR : 0);
?}
--
2.14.1
^ permalink raw reply related [flat|nested] 3+ messages in thread
* [PATCH v3 for-4.18 2/2] nvmet: check fileio lba range access boundaries
2018-07-11 13:13 [PATCH v3 for-4.18 2/2] nvmet: check fileio lba range access boundaries Sagi Grimberg
2018-07-12 3:05 ` Chaitanya Kulkarni
@ 2018-07-12 7:25 ` Christoph Hellwig
1 sibling, 0 replies; 3+ messages in thread
From: Christoph Hellwig @ 2018-07-12 7:25 UTC (permalink / raw)
Thanks, applied.
^ permalink raw reply [flat|nested] 3+ messages in thread
end of thread, other threads:[~2018-07-12 7:25 UTC | newest]
Thread overview: 3+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2018-07-11 13:13 [PATCH v3 for-4.18 2/2] nvmet: check fileio lba range access boundaries Sagi Grimberg
2018-07-12 3:05 ` Chaitanya Kulkarni
2018-07-12 7:25 ` Christoph Hellwig
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).