linux-nvme.lists.infradead.org archive mirror
 help / color / mirror / Atom feed
From: Keith Busch <kbusch@meta.com>
To: <linux-block@vger.kernel.org>, <linux-nvme@lists.infradead.org>
Cc: <hch@lst.de>, <axboe@kernel.dk>, <joshi.k@samsung.com>,
	Keith Busch <kbusch@kernel.org>
Subject: [PATCHv5 2/8] blk-mq-dma: provide the bio_vec list being iterated
Date: Fri, 8 Aug 2025 08:58:20 -0700	[thread overview]
Message-ID: <20250808155826.1864803-3-kbusch@meta.com> (raw)
In-Reply-To: <20250808155826.1864803-1-kbusch@meta.com>

From: Keith Busch <kbusch@kernel.org>

This will make it easier to add different sources of the bvec table,
like for upcoming integrity support, rather than assume to use the bio's
bi_io_vec. It also makes iterating "special" payloads more in common
with iterating normal payloads.

Signed-off-by: Keith Busch <kbusch@kernel.org>
---
 block/blk-mq-dma.c         | 56 ++++++++++++++++++++++----------------
 include/linux/blk-mq-dma.h |  1 +
 2 files changed, 33 insertions(+), 24 deletions(-)

diff --git a/block/blk-mq-dma.c b/block/blk-mq-dma.c
index ff4c9a7e19d83..4a013703bcba5 100644
--- a/block/blk-mq-dma.c
+++ b/block/blk-mq-dma.c
@@ -10,23 +10,14 @@ static bool blk_map_iter_next(struct request *req, struct blk_map_iter *iter)
 	unsigned int max_size;
 	struct bio_vec bv;
 
-	if (req->rq_flags & RQF_SPECIAL_PAYLOAD) {
-		if (!iter->bio)
-			return false;
-		iter->paddr = bvec_phys(&req->special_vec);
-		iter->len = req->special_vec.bv_len;
-		iter->bio = NULL;
-		return true;
-	}
-
 	if (!iter->iter.bi_size)
 		return false;
 
-	bv = mp_bvec_iter_bvec(iter->bio->bi_io_vec, iter->iter);
+	bv = mp_bvec_iter_bvec(iter->bvec, iter->iter);
 	iter->paddr = bvec_phys(&bv);
 	max_size = get_max_segment_size(&req->q->limits, iter->paddr, UINT_MAX);
 	bv.bv_len = min(bv.bv_len, max_size);
-	bio_advance_iter_single(iter->bio, &iter->iter, bv.bv_len);
+	bvec_iter_advance_single(iter->bvec, &iter->iter, bv.bv_len);
 
 	/*
 	 * If we are entirely done with this bi_io_vec entry, check if the next
@@ -37,19 +28,20 @@ static bool blk_map_iter_next(struct request *req, struct blk_map_iter *iter)
 		struct bio_vec next;
 
 		if (!iter->iter.bi_size) {
-			if (!iter->bio->bi_next)
+			if (!iter->bio || !iter->bio->bi_next)
 				break;
 			iter->bio = iter->bio->bi_next;
 			iter->iter = iter->bio->bi_iter;
+			iter->bvec = iter->bio->bi_io_vec;
 		}
 
-		next = mp_bvec_iter_bvec(iter->bio->bi_io_vec, iter->iter);
+		next = mp_bvec_iter_bvec(iter->bvec, iter->iter);
 		if (bv.bv_len + next.bv_len > max_size ||
 		    !biovec_phys_mergeable(req->q, &bv, &next))
 			break;
 
 		bv.bv_len += next.bv_len;
-		bio_advance_iter_single(iter->bio, &iter->iter, next.bv_len);
+		bvec_iter_advance_single(iter->bvec, &iter->iter, next.bv_len);
 	}
 
 	iter->len = bv.bv_len;
@@ -119,6 +111,30 @@ static bool blk_rq_dma_map_iova(struct request *req, struct device *dma_dev,
 	return true;
 }
 
+static struct blk_map_iter blk_rq_map_iter(struct request *rq)
+{
+	struct bio *bio = rq->bio;
+
+	if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) {
+		return (struct blk_map_iter) {
+			.bvec = &rq->special_vec,
+			.iter = {
+				.bi_size = rq->special_vec.bv_len,
+			}
+		};
+       }
+
+	/* the internal flush request may not have bio attached */
+	if (!bio)
+	        return (struct blk_map_iter) {};
+
+	return (struct blk_map_iter) {
+		.bio = bio,
+		.bvec = bio->bi_io_vec,
+		.iter = bio->bi_iter,
+	};
+}
+
 /**
  * blk_rq_dma_map_iter_start - map the first DMA segment for a request
  * @req:	request to map
@@ -146,10 +162,9 @@ bool blk_rq_dma_map_iter_start(struct request *req, struct device *dma_dev,
 {
 	unsigned int total_len = blk_rq_payload_bytes(req);
 
-	iter->iter.bio = req->bio;
-	iter->iter.iter = req->bio->bi_iter;
 	memset(&iter->p2pdma, 0, sizeof(iter->p2pdma));
 	iter->status = BLK_STS_OK;
+	iter->iter = blk_rq_map_iter(req);
 
 	/*
 	 * Grab the first segment ASAP because we'll need it to check for P2P
@@ -237,16 +252,9 @@ blk_next_sg(struct scatterlist **sg, struct scatterlist *sglist)
 int __blk_rq_map_sg(struct request *rq, struct scatterlist *sglist,
 		    struct scatterlist **last_sg)
 {
-	struct bio *bio = rq->bio;
-	struct blk_map_iter iter = {
-		.bio	= bio,
-	};
+	struct blk_map_iter iter = blk_rq_map_iter(rq);
 	int nsegs = 0;
 
-	/* the internal flush request may not have bio attached */
-	if (bio)
-		iter.iter = bio->bi_iter;
-
 	while (blk_map_iter_next(rq, &iter)) {
 		*last_sg = blk_next_sg(last_sg, sglist);
 		sg_set_page(*last_sg, phys_to_page(iter.paddr), iter.len,
diff --git a/include/linux/blk-mq-dma.h b/include/linux/blk-mq-dma.h
index 1e5988afdb978..c82f880dee914 100644
--- a/include/linux/blk-mq-dma.h
+++ b/include/linux/blk-mq-dma.h
@@ -8,6 +8,7 @@
 struct blk_map_iter {
 	phys_addr_t			paddr;
 	u32				len;
+	struct bio_vec                  *bvec;
 	struct bvec_iter		iter;
 	struct bio			*bio;
 };
-- 
2.47.3



  parent reply	other threads:[~2025-08-08 16:01 UTC|newest]

Thread overview: 21+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2025-08-08 15:58 [PATCHv5 0/8] blk dma iter for integrity metadata Keith Busch
2025-08-08 15:58 ` [PATCHv5 1/8] blk-mq-dma: introduce blk_map_iter Keith Busch
2025-08-10 14:04   ` Christoph Hellwig
2025-08-11 13:30     ` Keith Busch
2025-08-11 14:05       ` Christoph Hellwig
2025-08-08 15:58 ` Keith Busch [this message]
2025-08-10 14:07   ` [PATCHv5 2/8] blk-mq-dma: provide the bio_vec list being iterated Christoph Hellwig
2025-08-11 17:04     ` Keith Busch
2025-08-10 14:09   ` Christoph Hellwig
2025-08-08 15:58 ` [PATCHv5 3/8] blk-mq-dma: require unmap caller provide p2p map type Keith Busch
2025-08-10 14:08   ` Christoph Hellwig
2025-08-08 15:58 ` [PATCHv5 4/8] blk-mq: remove REQ_P2PDMA flag Keith Busch
2025-08-10 14:08   ` Christoph Hellwig
2025-08-08 15:58 ` [PATCHv5 5/8] blk-mq-dma: move common dma start code to a helper Keith Busch
2025-08-10 14:10   ` Christoph Hellwig
2025-08-08 15:58 ` [PATCHv5 6/8] blk-mq-dma: add support for mapping integrity metadata Keith Busch
2025-08-10 14:16   ` Christoph Hellwig
2025-08-08 15:58 ` [PATCHv5 7/8] nvme-pci: create common sgl unmapping helper Keith Busch
2025-08-10 14:21   ` Christoph Hellwig
2025-08-08 15:58 ` [PATCHv5 8/8] nvme-pci: convert metadata mapping to dma iter Keith Busch
2025-08-10 14:27   ` Christoph Hellwig

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20250808155826.1864803-3-kbusch@meta.com \
    --to=kbusch@meta.com \
    --cc=axboe@kernel.dk \
    --cc=hch@lst.de \
    --cc=joshi.k@samsung.com \
    --cc=kbusch@kernel.org \
    --cc=linux-block@vger.kernel.org \
    --cc=linux-nvme@lists.infradead.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).