From: Pavel Begunkov <asml.silence@gmail.com>
To: linux-block@vger.kernel.org, io-uring@vger.kernel.org
Cc: "Vishal Verma" <vishal1.verma@intel.com>,
tushar.gohad@intel.com, "Keith Busch" <kbusch@kernel.org>,
"Jens Axboe" <axboe@kernel.dk>, "Christoph Hellwig" <hch@lst.de>,
"Sagi Grimberg" <sagi@grimberg.me>,
"Alexander Viro" <viro@zeniv.linux.org.uk>,
"Christian Brauner" <brauner@kernel.org>,
"Andrew Morton" <akpm@linux-foundation.org>,
"Sumit Semwal" <sumit.semwal@linaro.org>,
"Christian König" <christian.koenig@amd.com>,
"Pavel Begunkov" <asml.silence@gmail.com>,
linux-kernel@vger.kernel.org, linux-nvme@lists.infradead.org,
linux-fsdevel@vger.kernel.org, linux-media@vger.kernel.org,
dri-devel@lists.freedesktop.org, linaro-mm-sig@lists.linaro.org
Subject: [RFC v2 07/11] nvme-pci: implement dma_token backed requests
Date: Sun, 23 Nov 2025 22:51:27 +0000 [thread overview]
Message-ID: <a86bbe2d8d105ed2c342749cd46ece2d1c537821.1763725388.git.asml.silence@gmail.com> (raw)
In-Reply-To: <cover.1763725387.git.asml.silence@gmail.com>
Enable BIO_DMA_TOKEN backed requests. It requires special handling to
set up the nvme request from the prepared in advance mapping, tear it
down and sync the buffers.
Suggested-by: Keith Busch <kbusch@kernel.org>
Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
---
drivers/nvme/host/pci.c | 126 +++++++++++++++++++++++++++++++++++++++-
1 file changed, 124 insertions(+), 2 deletions(-)
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 63e03c3dc044..ac377416b088 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -797,6 +797,123 @@ static void nvme_free_descriptors(struct request *req)
}
}
+static void nvme_sync_dma(struct nvme_dev *nvme_dev, struct request *req,
+ enum dma_data_direction dir)
+{
+ struct blk_mq_dma_map *map = req->dma_map;
+ int length = blk_rq_payload_bytes(req);
+ bool for_cpu = dir == DMA_FROM_DEVICE;
+ struct device *dev = nvme_dev->dev;
+ dma_addr_t *dma_list = map->private;
+ struct bio *bio = req->bio;
+ int offset, map_idx;
+
+ offset = bio->bi_iter.bi_bvec_done;
+ map_idx = offset / NVME_CTRL_PAGE_SIZE;
+ length += offset & (NVME_CTRL_PAGE_SIZE - 1);
+
+ while (length > 0) {
+ u64 dma_addr = dma_list[map_idx++];
+
+ if (for_cpu)
+ __dma_sync_single_for_cpu(dev, dma_addr,
+ NVME_CTRL_PAGE_SIZE, dir);
+ else
+ __dma_sync_single_for_device(dev, dma_addr,
+ NVME_CTRL_PAGE_SIZE, dir);
+ length -= NVME_CTRL_PAGE_SIZE;
+ }
+}
+
+static void nvme_unmap_premapped_data(struct nvme_dev *dev,
+ struct request *req)
+{
+ struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
+
+ if (rq_data_dir(req) == READ)
+ nvme_sync_dma(dev, req, DMA_FROM_DEVICE);
+ if (!(iod->flags & IOD_SINGLE_SEGMENT))
+ nvme_free_descriptors(req);
+}
+
+static blk_status_t nvme_dma_premapped(struct request *req,
+ struct nvme_queue *nvmeq)
+{
+ struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
+ int length = blk_rq_payload_bytes(req);
+ struct blk_mq_dma_map *map = req->dma_map;
+ u64 dma_addr, prp1_dma, prp2_dma;
+ struct bio *bio = req->bio;
+ dma_addr_t *dma_list;
+ dma_addr_t prp_dma;
+ __le64 *prp_list;
+ int i, map_idx;
+ int offset;
+
+ dma_list = map->private;
+
+ if (rq_data_dir(req) == WRITE)
+ nvme_sync_dma(nvmeq->dev, req, DMA_TO_DEVICE);
+
+ offset = bio->bi_iter.bi_bvec_done;
+ map_idx = offset / NVME_CTRL_PAGE_SIZE;
+ offset &= (NVME_CTRL_PAGE_SIZE - 1);
+
+ prp1_dma = dma_list[map_idx++] + offset;
+
+ length -= (NVME_CTRL_PAGE_SIZE - offset);
+ if (length <= 0) {
+ prp2_dma = 0;
+ goto done;
+ }
+
+ if (length <= NVME_CTRL_PAGE_SIZE) {
+ prp2_dma = dma_list[map_idx];
+ goto done;
+ }
+
+ if (DIV_ROUND_UP(length, NVME_CTRL_PAGE_SIZE) <=
+ NVME_SMALL_POOL_SIZE / sizeof(__le64))
+ iod->flags |= IOD_SMALL_DESCRIPTOR;
+
+ prp_list = dma_pool_alloc(nvme_dma_pool(nvmeq, iod), GFP_ATOMIC,
+ &prp_dma);
+ if (!prp_list)
+ return BLK_STS_RESOURCE;
+
+ iod->descriptors[iod->nr_descriptors++] = prp_list;
+ prp2_dma = prp_dma;
+ i = 0;
+ for (;;) {
+ if (i == NVME_CTRL_PAGE_SIZE >> 3) {
+ __le64 *old_prp_list = prp_list;
+
+ prp_list = dma_pool_alloc(nvmeq->descriptor_pools.large,
+ GFP_ATOMIC, &prp_dma);
+ if (!prp_list)
+ goto free_prps;
+ iod->descriptors[iod->nr_descriptors++] = prp_list;
+ prp_list[0] = old_prp_list[i - 1];
+ old_prp_list[i - 1] = cpu_to_le64(prp_dma);
+ i = 1;
+ }
+
+ dma_addr = dma_list[map_idx++];
+ prp_list[i++] = cpu_to_le64(dma_addr);
+
+ length -= NVME_CTRL_PAGE_SIZE;
+ if (length <= 0)
+ break;
+ }
+done:
+ iod->cmd.common.dptr.prp1 = cpu_to_le64(prp1_dma);
+ iod->cmd.common.dptr.prp2 = cpu_to_le64(prp2_dma);
+ return BLK_STS_OK;
+free_prps:
+ nvme_free_descriptors(req);
+ return BLK_STS_RESOURCE;
+}
+
static void nvme_free_prps(struct request *req, unsigned int attrs)
{
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
@@ -875,6 +992,11 @@ static void nvme_unmap_data(struct request *req)
struct device *dma_dev = nvmeq->dev->dev;
unsigned int attrs = 0;
+ if (req->bio && bio_flagged(req->bio, BIO_DMA_TOKEN)) {
+ nvme_unmap_premapped_data(nvmeq->dev, req);
+ return;
+ }
+
if (iod->flags & IOD_SINGLE_SEGMENT) {
static_assert(offsetof(union nvme_data_ptr, prp1) ==
offsetof(union nvme_data_ptr, sgl.addr));
@@ -1154,8 +1276,8 @@ static blk_status_t nvme_map_data(struct request *req)
struct blk_dma_iter iter;
blk_status_t ret;
- if (req->bio && bio_flagged(req->bio, BIO_DMA_TOKEN))
- return BLK_STS_RESOURCE;
+ if (req->dma_map)
+ return nvme_dma_premapped(req, nvmeq);
/*
* Try to skip the DMA iterator for single segment requests, as that
--
2.52.0
next prev parent reply other threads:[~2025-11-23 22:51 UTC|newest]
Thread overview: 41+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-11-23 22:51 [RFC v2 00/11] Add dmabuf read/write via io_uring Pavel Begunkov
2025-11-23 22:51 ` [RFC v2 01/11] file: add callback for pre-mapping dmabuf Pavel Begunkov
2025-12-04 10:42 ` Christoph Hellwig
2025-12-12 1:02 ` Pavel Begunkov
2025-12-04 10:46 ` Christian König
2025-12-04 11:07 ` Christoph Hellwig
2025-12-04 11:09 ` Christian König
2025-12-04 13:10 ` Christoph Hellwig
2025-11-23 22:51 ` [RFC v2 02/11] iov_iter: introduce iter type for pre-registered dma Pavel Begunkov
2025-12-04 10:43 ` Christoph Hellwig
2025-12-12 1:06 ` Pavel Begunkov
2025-11-23 22:51 ` [RFC v2 03/11] block: move around bio flagging helpers Pavel Begunkov
2025-12-04 10:43 ` Christoph Hellwig
2025-12-12 1:08 ` Pavel Begunkov
2025-12-12 20:10 ` Jens Axboe
2025-11-23 22:51 ` [RFC v2 04/11] block: introduce dma token backed bio type Pavel Begunkov
2025-12-04 10:48 ` Christoph Hellwig
2025-11-23 22:51 ` [RFC v2 05/11] block: add infra to handle dmabuf tokens Pavel Begunkov
2025-11-24 13:38 ` Anuj gupta
2025-12-04 10:56 ` Christoph Hellwig
2025-12-12 1:56 ` Pavel Begunkov
2025-12-04 13:08 ` Christoph Hellwig
2025-11-23 22:51 ` [RFC v2 06/11] nvme-pci: add support for dmabuf reggistration Pavel Begunkov
2025-11-24 13:40 ` Anuj gupta
2025-12-04 11:00 ` Christoph Hellwig
2025-12-04 19:07 ` Keith Busch
2025-11-23 22:51 ` Pavel Begunkov [this message]
2025-12-04 11:04 ` [RFC v2 07/11] nvme-pci: implement dma_token backed requests Christoph Hellwig
2025-11-23 22:51 ` [RFC v2 08/11] io_uring/rsrc: add imu flags Pavel Begunkov
2025-11-23 22:51 ` [RFC v2 09/11] io_uring/rsrc: extended reg buffer registration Pavel Begunkov
2025-11-23 22:51 ` [RFC v2 10/11] io_uring/rsrc: add dmabuf-backed buffer registeration Pavel Begunkov
2025-11-23 22:51 ` [RFC v2 11/11] io_uring/rsrc: implement dmabuf regbuf import Pavel Begunkov
2025-11-24 10:33 ` [RFC v2 00/11] Add dmabuf read/write via io_uring Christian König
2025-11-24 11:30 ` Pavel Begunkov
2025-11-24 14:17 ` Christian König
2025-11-25 13:52 ` Pavel Begunkov
2025-11-25 14:21 ` Christian König
2025-11-25 19:40 ` Pavel Begunkov
2025-11-24 13:35 ` Anuj gupta
2025-11-25 12:35 ` Pavel Begunkov
2025-12-12 19:37 ` (subset) " Jens Axboe
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=a86bbe2d8d105ed2c342749cd46ece2d1c537821.1763725388.git.asml.silence@gmail.com \
--to=asml.silence@gmail.com \
--cc=akpm@linux-foundation.org \
--cc=axboe@kernel.dk \
--cc=brauner@kernel.org \
--cc=christian.koenig@amd.com \
--cc=dri-devel@lists.freedesktop.org \
--cc=hch@lst.de \
--cc=io-uring@vger.kernel.org \
--cc=kbusch@kernel.org \
--cc=linaro-mm-sig@lists.linaro.org \
--cc=linux-block@vger.kernel.org \
--cc=linux-fsdevel@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-media@vger.kernel.org \
--cc=linux-nvme@lists.infradead.org \
--cc=sagi@grimberg.me \
--cc=sumit.semwal@linaro.org \
--cc=tushar.gohad@intel.com \
--cc=viro@zeniv.linux.org.uk \
--cc=vishal1.verma@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).