From: Keith Busch <kbusch@meta.com>
To: <linux-block@vger.kernel.org>, <linux-nvme@lists.infradead.org>
Cc: Keith Busch <kbusch@kernel.org>
Subject: [PATCH 5/5] nvmet: implement copy support for bdev backed target
Date: Wed, 21 May 2025 15:31:07 -0700 [thread overview]
Message-ID: <20250521223107.709131-6-kbusch@meta.com> (raw)
In-Reply-To: <20250521223107.709131-1-kbusch@meta.com>
From: Keith Busch <kbusch@kernel.org>
The nvme block device target type does not have any particular limits on
copy commands, so all the settings are the protocol's max.
Signed-off-by: Keith Busch <kbusch@kernel.org>
---
drivers/nvme/target/io-cmd-bdev.c | 52 +++++++++++++++++++++++++++++++
1 file changed, 52 insertions(+)
diff --git a/drivers/nvme/target/io-cmd-bdev.c b/drivers/nvme/target/io-cmd-bdev.c
index 83be0657e6df4..d90dedcd2352f 100644
--- a/drivers/nvme/target/io-cmd-bdev.c
+++ b/drivers/nvme/target/io-cmd-bdev.c
@@ -46,6 +46,11 @@ void nvmet_bdev_set_limits(struct block_device *bdev, struct nvme_id_ns *id)
id->npda = id->npdg;
/* NOWS = Namespace Optimal Write Size */
id->nows = to0based(bdev_io_opt(bdev) / bdev_logical_block_size(bdev));
+
+ /* Copy offload support */
+ id->mssrl = cpu_to_le16(U16_MAX);
+ id->mcl = cpu_to_le32(U32_MAX);
+ id->msrc = U8_MAX;
}
void nvmet_bdev_ns_disable(struct nvmet_ns *ns)
@@ -412,6 +417,50 @@ static void nvmet_bdev_execute_discard(struct nvmet_req *req)
}
}
+static void nvmet_bdev_execute_copy(struct nvmet_req *req)
+{
+ struct bio_vec *bv, fast_bv[UIO_FASTIOV];
+ struct nvme_copy_range range;
+ u64 dst_sector, slba;
+ u16 status, nlb, nr;
+ int ret, i;
+
+ nr = req->cmd->copy.nr_range + 1;
+ if (nr <= UIO_FASTIOV) {
+ bv = fast_bv;
+ } else {
+ bv = kmalloc_array(nr, sizeof(*bv), GFP_KERNEL);
+ if (!bv) {
+ status = NVME_SC_INTERNAL;
+ goto done;
+ }
+ }
+
+ for (i = 0; i < nr; i++) {
+ status = nvmet_copy_from_sgl(req, i * sizeof(range), &range,
+ sizeof(range));
+ if (status)
+ goto done;
+
+ slba = le64_to_cpu(range.slba);
+ nlb = le16_to_cpu(range.nlb) + 1;
+ bv[i].bv_sector = nvmet_lba_to_sect(req->ns, slba);
+ bv[i].bv_sectors = nvmet_lba_to_sect(req->ns, nlb);
+ }
+
+ dst_sector = nvmet_lba_to_sect(req->ns, req->cmd->copy.sdlba);
+ ret = blkdev_copy_range(req->ns->bdev, dst_sector, bv, nr, GFP_KERNEL);
+ if (ret) {
+ req->error_slba = le64_to_cpu(dst_sector);
+ status = errno_to_nvme_status(req, ret);
+ } else
+ status = NVME_SC_SUCCESS;
+done:
+ nvmet_req_complete(req, status);
+ if (bv != fast_bv)
+ kfree(bv);
+}
+
static void nvmet_bdev_execute_dsm(struct nvmet_req *req)
{
if (!nvmet_check_data_len_lte(req, nvmet_dsm_len(req)))
@@ -474,6 +523,9 @@ u16 nvmet_bdev_parse_io_cmd(struct nvmet_req *req)
case nvme_cmd_write_zeroes:
req->execute = nvmet_bdev_execute_write_zeroes;
return 0;
+ case nvme_cmd_copy:
+ req->execute = nvmet_bdev_execute_copy;
+ return 0;
default:
return nvmet_report_invalid_opcode(req);
}
--
2.47.1
next prev parent reply other threads:[~2025-05-21 22:31 UTC|newest]
Thread overview: 46+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-05-21 22:31 [PATCH 0/5] block: another block copy offload Keith Busch
2025-05-21 22:31 ` [PATCH 1/5] block: new sector copy api Keith Busch
2025-05-22 10:02 ` Hannes Reinecke
2025-05-22 16:43 ` Keith Busch
2025-05-22 19:22 ` Bart Van Assche
2025-05-22 20:04 ` Keith Busch
2025-05-23 12:45 ` Christoph Hellwig
2025-05-23 17:02 ` Keith Busch
2025-05-26 5:18 ` Christoph Hellwig
2025-05-27 17:45 ` Keith Busch
2025-05-28 7:46 ` Christoph Hellwig
2025-05-28 22:41 ` Keith Busch
2025-06-02 4:58 ` Christoph Hellwig
2025-05-21 22:31 ` [PATCH 2/5] block: add support for copy offload Keith Busch
2025-05-22 13:49 ` Hannes Reinecke
2025-05-23 12:46 ` Christoph Hellwig
2025-05-23 13:26 ` Keith Busch
2025-05-23 13:37 ` Christoph Hellwig
2025-05-23 13:48 ` Keith Busch
2025-05-26 5:22 ` Christoph Hellwig
2025-05-27 21:33 ` Keith Busch
2025-05-28 7:47 ` Christoph Hellwig
2025-05-21 22:31 ` [PATCH 3/5] nvme: " Keith Busch
2025-05-22 0:47 ` Caleb Sander Mateos
2025-05-22 0:51 ` Caleb Sander Mateos
2025-05-22 3:23 ` Keith Busch
2025-05-22 3:41 ` Caleb Sander Mateos
2025-05-22 4:29 ` Keith Busch
2025-05-22 14:16 ` Caleb Sander Mateos
2025-05-23 12:49 ` Christoph Hellwig
2025-05-23 12:48 ` Christoph Hellwig
2025-05-22 13:54 ` Hannes Reinecke
2025-05-23 12:50 ` Christoph Hellwig
2025-05-23 14:22 ` Caleb Sander Mateos
2025-06-09 9:29 ` Niklas Cassel
2025-05-21 22:31 ` [PATCH 4/5] block: add support for vectored copies Keith Busch
2025-05-22 13:58 ` Hannes Reinecke
2025-05-22 16:36 ` Keith Busch
2025-05-21 22:31 ` Keith Busch [this message]
2025-05-22 13:59 ` [PATCH 5/5] nvmet: implement copy support for bdev backed target Hannes Reinecke
2025-05-23 13:18 ` Christoph Hellwig
2025-05-23 14:00 ` Keith Busch
2025-05-23 14:02 ` Christoph Hellwig
2025-05-22 15:52 ` [PATCH 0/5] block: another block copy offload Bart Van Assche
2025-05-23 12:53 ` Christoph Hellwig
2025-07-03 14:47 ` Niklas Cassel
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20250521223107.709131-6-kbusch@meta.com \
--to=kbusch@meta.com \
--cc=kbusch@kernel.org \
--cc=linux-block@vger.kernel.org \
--cc=linux-nvme@lists.infradead.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox