* [PATCHv2 0/2] blk-mq-dma: p2p cleanups and integrity fixup
@ 2025-09-02 20:01 Keith Busch
2025-09-02 20:01 ` [PATCHv2 1/2] blk-integrity: enable p2p source and destination Keith Busch
` (2 more replies)
0 siblings, 3 replies; 7+ messages in thread
From: Keith Busch @ 2025-09-02 20:01 UTC (permalink / raw)
To: linux-block, linux-nvme
Cc: hch, axboe, martin.petersen, jgg, leon, Keith Busch
From: Keith Busch <kbusch@kernel.org>
This series moves the p2p dma tracking from the caller to the block
layer, and makes it possible to actually use p2p for metadata payloads.
v1:
https://lore.kernel.org/linux-nvme/20250829142307.3769873-1-kbusch@meta.com/
Changes:
Folded in a fixed to patch 1 that was inadvertently included in patch 2.
Added review.
Keith Busch (2):
blk-integrity: enable p2p source and destination
blk-mq-dma: bring back p2p request flags
block/bio-integrity.c | 21 +++++++++++++++++----
block/blk-mq-dma.c | 4 ++++
drivers/nvme/host/pci.c | 21 ++++-----------------
include/linux/bio-integrity.h | 1 +
include/linux/blk-integrity.h | 14 ++++++++++++++
include/linux/blk-mq-dma.h | 11 +++++++++--
include/linux/blk_types.h | 2 ++
7 files changed, 51 insertions(+), 23 deletions(-)
--
2.47.3
^ permalink raw reply [flat|nested] 7+ messages in thread
* [PATCHv2 1/2] blk-integrity: enable p2p source and destination
2025-09-02 20:01 [PATCHv2 0/2] blk-mq-dma: p2p cleanups and integrity fixup Keith Busch
@ 2025-09-02 20:01 ` Keith Busch
2025-09-02 20:01 ` [PATCHv2 2/2] blk-mq-dma: bring back p2p request flags Keith Busch
2025-09-03 7:55 ` [PATCHv2 0/2] blk-mq-dma: p2p cleanups and integrity fixup Martin K. Petersen
2 siblings, 0 replies; 7+ messages in thread
From: Keith Busch @ 2025-09-02 20:01 UTC (permalink / raw)
To: linux-block, linux-nvme
Cc: hch, axboe, martin.petersen, jgg, leon, Keith Busch
From: Keith Busch <kbusch@kernel.org>
Set the extraction flags to allow p2p pages for the metadata buffer if
the block device allows it. Similar to data payloads, ensure the bio
does not allow merging if we see a p2p page.
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Keith Busch <kbusch@kernel.org>
---
block/bio-integrity.c | 21 +++++++++++++++++----
1 file changed, 17 insertions(+), 4 deletions(-)
diff --git a/block/bio-integrity.c b/block/bio-integrity.c
index 6b077ca937f6b..d3618ed106f4e 100644
--- a/block/bio-integrity.c
+++ b/block/bio-integrity.c
@@ -230,7 +230,8 @@ static int bio_integrity_init_user(struct bio *bio, struct bio_vec *bvec,
}
static unsigned int bvec_from_pages(struct bio_vec *bvec, struct page **pages,
- int nr_vecs, ssize_t bytes, ssize_t offset)
+ int nr_vecs, ssize_t bytes, ssize_t offset,
+ bool *is_p2p)
{
unsigned int nr_bvecs = 0;
int i, j;
@@ -251,6 +252,9 @@ static unsigned int bvec_from_pages(struct bio_vec *bvec, struct page **pages,
bytes -= next;
}
+ if (is_pci_p2pdma_page(pages[i]))
+ *is_p2p = true;
+
bvec_set_page(&bvec[nr_bvecs], pages[i], size, offset);
offset = 0;
nr_bvecs++;
@@ -265,10 +269,11 @@ int bio_integrity_map_user(struct bio *bio, struct iov_iter *iter)
unsigned int align = blk_lim_dma_alignment_and_pad(&q->limits);
struct page *stack_pages[UIO_FASTIOV], **pages = stack_pages;
struct bio_vec stack_vec[UIO_FASTIOV], *bvec = stack_vec;
+ iov_iter_extraction_t extraction_flags = 0;
size_t offset, bytes = iter->count;
+ bool copy, is_p2p = false;
unsigned int nr_bvecs;
int ret, nr_vecs;
- bool copy;
if (bio_integrity(bio))
return -EINVAL;
@@ -286,15 +291,23 @@ int bio_integrity_map_user(struct bio *bio, struct iov_iter *iter)
}
copy = !iov_iter_is_aligned(iter, align, align);
- ret = iov_iter_extract_pages(iter, &pages, bytes, nr_vecs, 0, &offset);
+
+ if (blk_queue_pci_p2pdma(q))
+ extraction_flags |= ITER_ALLOW_P2PDMA;
+
+ ret = iov_iter_extract_pages(iter, &pages, bytes, nr_vecs,
+ extraction_flags, &offset);
if (unlikely(ret < 0))
goto free_bvec;
- nr_bvecs = bvec_from_pages(bvec, pages, nr_vecs, bytes, offset);
+ nr_bvecs = bvec_from_pages(bvec, pages, nr_vecs, bytes, offset,
+ &is_p2p);
if (pages != stack_pages)
kvfree(pages);
if (nr_bvecs > queue_max_integrity_segments(q))
copy = true;
+ if (is_p2p)
+ bio->bi_opf |= REQ_NOMERGE;
if (copy)
ret = bio_integrity_copy_user(bio, bvec, nr_bvecs, bytes);
--
2.47.3
^ permalink raw reply related [flat|nested] 7+ messages in thread
* [PATCHv2 2/2] blk-mq-dma: bring back p2p request flags
2025-09-02 20:01 [PATCHv2 0/2] blk-mq-dma: p2p cleanups and integrity fixup Keith Busch
2025-09-02 20:01 ` [PATCHv2 1/2] blk-integrity: enable p2p source and destination Keith Busch
@ 2025-09-02 20:01 ` Keith Busch
2025-09-03 6:09 ` Christoph Hellwig
` (2 more replies)
2025-09-03 7:55 ` [PATCHv2 0/2] blk-mq-dma: p2p cleanups and integrity fixup Martin K. Petersen
2 siblings, 3 replies; 7+ messages in thread
From: Keith Busch @ 2025-09-02 20:01 UTC (permalink / raw)
To: linux-block, linux-nvme
Cc: hch, axboe, martin.petersen, jgg, leon, Keith Busch
From: Keith Busch <kbusch@kernel.org>
We need to consider data and metadata dma mapping types separately. The
request and bio integrity payload have enough flag bits to internally
track the mapping type for each. Use these so the caller doesn't need to
track them, and provide separete request and integrity helpers to the
common code. This will make it easier to scale new mappings, like the
proposed MMIO attribute, without burdening the caller to track such
things.
Signed-off-by: Keith Busch <kbusch@kernel.org>
---
block/blk-mq-dma.c | 4 ++++
drivers/nvme/host/pci.c | 21 ++++-----------------
include/linux/bio-integrity.h | 1 +
include/linux/blk-integrity.h | 14 ++++++++++++++
include/linux/blk-mq-dma.h | 11 +++++++++--
include/linux/blk_types.h | 2 ++
6 files changed, 34 insertions(+), 19 deletions(-)
diff --git a/block/blk-mq-dma.c b/block/blk-mq-dma.c
index 660b5e200ccf6..449950029872a 100644
--- a/block/blk-mq-dma.c
+++ b/block/blk-mq-dma.c
@@ -174,6 +174,10 @@ static bool blk_dma_map_iter_start(struct request *req, struct device *dma_dev,
switch (pci_p2pdma_state(&iter->p2pdma, dma_dev,
phys_to_page(vec.paddr))) {
case PCI_P2PDMA_MAP_BUS_ADDR:
+ if (iter->iter.is_integrity)
+ bio_integrity(req->bio)->bip_flags |= BIP_P2P_DMA;
+ else
+ req->cmd_flags |= REQ_P2PDMA;
return blk_dma_map_bus(iter, &vec);
case PCI_P2PDMA_MAP_THRU_HOST_BRIDGE:
/*
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index d8a9dee55de33..28e203b894eb1 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -260,12 +260,6 @@ enum nvme_iod_flags {
/* single segment dma mapping */
IOD_SINGLE_SEGMENT = 1U << 2,
- /* DMA mapped with PCI_P2PDMA_MAP_BUS_ADDR */
- IOD_P2P_BUS_ADDR = 1U << 3,
-
- /* Metadata DMA mapped with PCI_P2PDMA_MAP_BUS_ADDR */
- IOD_META_P2P_BUS_ADDR = 1U << 4,
-
/* Metadata using non-coalesced MPTR */
IOD_SINGLE_META_SEGMENT = 1U << 5,
};
@@ -737,9 +731,8 @@ static void nvme_unmap_metadata(struct request *req)
return;
}
- if (!blk_rq_dma_unmap(req, dma_dev, &iod->meta_dma_state,
- iod->meta_total_len,
- iod->flags & IOD_META_P2P_BUS_ADDR)) {
+ if (!blk_rq_integrity_dma_unmap(req, dma_dev, &iod->meta_dma_state,
+ iod->meta_total_len)) {
if (nvme_pci_cmd_use_meta_sgl(&iod->cmd))
nvme_free_sgls(req, sge, &sge[1]);
else
@@ -766,8 +759,7 @@ static void nvme_unmap_data(struct request *req)
return;
}
- if (!blk_rq_dma_unmap(req, dma_dev, &iod->dma_state, iod->total_len,
- iod->flags & IOD_P2P_BUS_ADDR)) {
+ if (!blk_rq_dma_unmap(req, dma_dev, &iod->dma_state, iod->total_len)) {
if (nvme_pci_cmd_use_sgl(&iod->cmd))
nvme_free_sgls(req, iod->descriptors[0],
&iod->cmd.common.dptr.sgl);
@@ -1043,9 +1035,6 @@ static blk_status_t nvme_map_data(struct request *req)
if (!blk_rq_dma_map_iter_start(req, dev->dev, &iod->dma_state, &iter))
return iter.status;
- if (iter.p2pdma.map == PCI_P2PDMA_MAP_BUS_ADDR)
- iod->flags |= IOD_P2P_BUS_ADDR;
-
if (use_sgl == SGL_FORCED ||
(use_sgl == SGL_SUPPORTED &&
(sgl_threshold && nvme_pci_avg_seg_size(req) >= sgl_threshold)))
@@ -1068,9 +1057,7 @@ static blk_status_t nvme_pci_setup_meta_sgls(struct request *req)
&iod->meta_dma_state, &iter))
return iter.status;
- if (iter.p2pdma.map == PCI_P2PDMA_MAP_BUS_ADDR)
- iod->flags |= IOD_META_P2P_BUS_ADDR;
- else if (blk_rq_dma_map_coalesce(&iod->meta_dma_state))
+ if (blk_rq_dma_map_coalesce(&iod->meta_dma_state))
entries = 1;
/*
diff --git a/include/linux/bio-integrity.h b/include/linux/bio-integrity.h
index 0a25716820fe0..851254f36eb36 100644
--- a/include/linux/bio-integrity.h
+++ b/include/linux/bio-integrity.h
@@ -13,6 +13,7 @@ enum bip_flags {
BIP_CHECK_GUARD = 1 << 5, /* guard check */
BIP_CHECK_REFTAG = 1 << 6, /* reftag check */
BIP_CHECK_APPTAG = 1 << 7, /* apptag check */
+ BIP_P2P_DMA = 1 << 8, /* using P2P address */
};
struct bio_integrity_payload {
diff --git a/include/linux/blk-integrity.h b/include/linux/blk-integrity.h
index 78fe2459e6612..4746f3e6ad36d 100644
--- a/include/linux/blk-integrity.h
+++ b/include/linux/blk-integrity.h
@@ -27,6 +27,15 @@ static inline bool queue_limits_stack_integrity_bdev(struct queue_limits *t,
#ifdef CONFIG_BLK_DEV_INTEGRITY
int blk_rq_map_integrity_sg(struct request *, struct scatterlist *);
+
+static inline bool blk_rq_integrity_dma_unmap(struct request *req,
+ struct device *dma_dev, struct dma_iova_state *state,
+ size_t mapped_len)
+{
+ return blk_dma_unmap(req, dma_dev, state, mapped_len,
+ bio_integrity(req->bio)->bip_flags & BIP_P2P_DMA);
+}
+
int blk_rq_count_integrity_sg(struct request_queue *, struct bio *);
int blk_rq_integrity_map_user(struct request *rq, void __user *ubuf,
ssize_t bytes);
@@ -115,6 +124,11 @@ static inline int blk_rq_map_integrity_sg(struct request *q,
{
return 0;
}
+static inline bool blk_rq_dma_unmap(struct request *req, struct device *dma_dev,
+ struct dma_iova_state *state, size_t mapped_len)
+{
+ return false;
+}
static inline int blk_rq_integrity_map_user(struct request *rq,
void __user *ubuf,
ssize_t bytes)
diff --git a/include/linux/blk-mq-dma.h b/include/linux/blk-mq-dma.h
index 0f45ea110ca12..51829958d8729 100644
--- a/include/linux/blk-mq-dma.h
+++ b/include/linux/blk-mq-dma.h
@@ -43,7 +43,7 @@ static inline bool blk_rq_dma_map_coalesce(struct dma_iova_state *state)
}
/**
- * blk_rq_dma_unmap - try to DMA unmap a request
+ * blk_dma_unmap - try to DMA unmap a request
* @req: request to unmap
* @dma_dev: device to unmap from
* @state: DMA IOVA state
@@ -53,7 +53,7 @@ static inline bool blk_rq_dma_map_coalesce(struct dma_iova_state *state)
* Returns %false if the callers need to manually unmap every DMA segment
* mapped using @iter or %true if no work is left to be done.
*/
-static inline bool blk_rq_dma_unmap(struct request *req, struct device *dma_dev,
+static inline bool blk_dma_unmap(struct request *req, struct device *dma_dev,
struct dma_iova_state *state, size_t mapped_len, bool is_p2p)
{
if (is_p2p)
@@ -68,4 +68,11 @@ static inline bool blk_rq_dma_unmap(struct request *req, struct device *dma_dev,
return !dma_need_unmap(dma_dev);
}
+static inline bool blk_rq_dma_unmap(struct request *req, struct device *dma_dev,
+ struct dma_iova_state *state, size_t mapped_len)
+{
+ return blk_dma_unmap(req, dma_dev, state, mapped_len,
+ req->cmd_flags & REQ_P2PDMA);
+}
+
#endif /* BLK_MQ_DMA_H */
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index 930daff207df2..09b99d52fd365 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -386,6 +386,7 @@ enum req_flag_bits {
__REQ_DRV, /* for driver use */
__REQ_FS_PRIVATE, /* for file system (submitter) use */
__REQ_ATOMIC, /* for atomic write operations */
+ __REQ_P2PDMA, /* contains P2P DMA pages */
/*
* Command specific flags, keep last:
*/
@@ -418,6 +419,7 @@ enum req_flag_bits {
#define REQ_DRV (__force blk_opf_t)(1ULL << __REQ_DRV)
#define REQ_FS_PRIVATE (__force blk_opf_t)(1ULL << __REQ_FS_PRIVATE)
#define REQ_ATOMIC (__force blk_opf_t)(1ULL << __REQ_ATOMIC)
+#define REQ_P2PDMA (__force blk_opf_t)(1ULL << __REQ_P2PDMA)
#define REQ_NOUNMAP (__force blk_opf_t)(1ULL << __REQ_NOUNMAP)
--
2.47.3
^ permalink raw reply related [flat|nested] 7+ messages in thread
* Re: [PATCHv2 2/2] blk-mq-dma: bring back p2p request flags
2025-09-02 20:01 ` [PATCHv2 2/2] blk-mq-dma: bring back p2p request flags Keith Busch
@ 2025-09-03 6:09 ` Christoph Hellwig
2025-09-03 8:20 ` Leon Romanovsky
2025-09-03 11:02 ` kernel test robot
2 siblings, 0 replies; 7+ messages in thread
From: Christoph Hellwig @ 2025-09-03 6:09 UTC (permalink / raw)
To: Keith Busch
Cc: linux-block, linux-nvme, hch, axboe, martin.petersen, jgg, leon,
Keith Busch
Looks good:
Reviewed-by: Christoph Hellwig <hch@lst.de>
^ permalink raw reply [flat|nested] 7+ messages in thread
* Re: [PATCHv2 0/2] blk-mq-dma: p2p cleanups and integrity fixup
2025-09-02 20:01 [PATCHv2 0/2] blk-mq-dma: p2p cleanups and integrity fixup Keith Busch
2025-09-02 20:01 ` [PATCHv2 1/2] blk-integrity: enable p2p source and destination Keith Busch
2025-09-02 20:01 ` [PATCHv2 2/2] blk-mq-dma: bring back p2p request flags Keith Busch
@ 2025-09-03 7:55 ` Martin K. Petersen
2 siblings, 0 replies; 7+ messages in thread
From: Martin K. Petersen @ 2025-09-03 7:55 UTC (permalink / raw)
To: Keith Busch
Cc: linux-block, linux-nvme, hch, axboe, martin.petersen, jgg, leon,
Keith Busch
Keith,
> This series moves the p2p dma tracking from the caller to the block
> layer, and makes it possible to actually use p2p for metadata
> payloads.
Looks good to me.
Reviewed-by: Martin K. Petersen <martin.petersen@oracle.com>
--
Martin K. Petersen
^ permalink raw reply [flat|nested] 7+ messages in thread
* Re: [PATCHv2 2/2] blk-mq-dma: bring back p2p request flags
2025-09-02 20:01 ` [PATCHv2 2/2] blk-mq-dma: bring back p2p request flags Keith Busch
2025-09-03 6:09 ` Christoph Hellwig
@ 2025-09-03 8:20 ` Leon Romanovsky
2025-09-03 11:02 ` kernel test robot
2 siblings, 0 replies; 7+ messages in thread
From: Leon Romanovsky @ 2025-09-03 8:20 UTC (permalink / raw)
To: Keith Busch
Cc: linux-block, linux-nvme, hch, axboe, martin.petersen, jgg,
Keith Busch
On Tue, Sep 02, 2025 at 01:01:21PM -0700, Keith Busch wrote:
> From: Keith Busch <kbusch@kernel.org>
>
> We need to consider data and metadata dma mapping types separately. The
> request and bio integrity payload have enough flag bits to internally
> track the mapping type for each. Use these so the caller doesn't need to
> track them, and provide separete request and integrity helpers to the
> common code. This will make it easier to scale new mappings, like the
> proposed MMIO attribute, without burdening the caller to track such
> things.
>
> Signed-off-by: Keith Busch <kbusch@kernel.org>
> ---
> block/blk-mq-dma.c | 4 ++++
> drivers/nvme/host/pci.c | 21 ++++-----------------
> include/linux/bio-integrity.h | 1 +
> include/linux/blk-integrity.h | 14 ++++++++++++++
> include/linux/blk-mq-dma.h | 11 +++++++++--
> include/linux/blk_types.h | 2 ++
> 6 files changed, 34 insertions(+), 19 deletions(-)
>
Thanks,
Reviewed-by: Leon Romanovsky <leonro@nvidia.com>
^ permalink raw reply [flat|nested] 7+ messages in thread
* Re: [PATCHv2 2/2] blk-mq-dma: bring back p2p request flags
2025-09-02 20:01 ` [PATCHv2 2/2] blk-mq-dma: bring back p2p request flags Keith Busch
2025-09-03 6:09 ` Christoph Hellwig
2025-09-03 8:20 ` Leon Romanovsky
@ 2025-09-03 11:02 ` kernel test robot
2 siblings, 0 replies; 7+ messages in thread
From: kernel test robot @ 2025-09-03 11:02 UTC (permalink / raw)
To: Keith Busch, linux-block, linux-nvme
Cc: oe-kbuild-all, hch, axboe, martin.petersen, jgg, leon,
Keith Busch
Hi Keith,
kernel test robot noticed the following build errors:
[auto build test ERROR on axboe-block/for-next]
[also build test ERROR on next-20250903]
[cannot apply to linus/master linux-nvme/for-next hch-configfs/for-next v6.17-rc4]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch#_base_tree_information]
url: https://github.com/intel-lab-lkp/linux/commits/Keith-Busch/blk-integrity-enable-p2p-source-and-destination/20250903-040417
base: https://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux-block.git for-next
patch link: https://lore.kernel.org/r/20250902200121.3665600-3-kbusch%40meta.com
patch subject: [PATCHv2 2/2] blk-mq-dma: bring back p2p request flags
config: microblaze-allnoconfig (https://download.01.org/0day-ci/archive/20250903/202509031816.qq7ODYRv-lkp@intel.com/config)
compiler: microblaze-linux-gcc (GCC) 15.1.0
reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20250903/202509031816.qq7ODYRv-lkp@intel.com/reproduce)
If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <lkp@intel.com>
| Closes: https://lore.kernel.org/oe-kbuild-all/202509031816.qq7ODYRv-lkp@intel.com/
All errors (new ones prefixed by >>):
In file included from block/bdev.c:15:
>> include/linux/blk-integrity.h:127:20: error: redefinition of 'blk_rq_dma_unmap'
127 | static inline bool blk_rq_dma_unmap(struct request *req, struct device *dma_dev,
| ^~~~~~~~~~~~~~~~
In file included from include/linux/blk-integrity.h:7:
include/linux/blk-mq-dma.h:71:20: note: previous definition of 'blk_rq_dma_unmap' with type 'bool(struct request *, struct device *, struct dma_iova_state *, size_t)' {aka '_Bool(struct request *, struct device *, struct dma_iova_state *, unsigned int)'}
71 | static inline bool blk_rq_dma_unmap(struct request *req, struct device *dma_dev,
| ^~~~~~~~~~~~~~~~
vim +/blk_rq_dma_unmap +127 include/linux/blk-integrity.h
101
102 /*
103 * Return the current bvec that contains the integrity data. bip_iter may be
104 * advanced to iterate over the integrity data.
105 */
106 static inline struct bio_vec rq_integrity_vec(struct request *rq)
107 {
108 return mp_bvec_iter_bvec(rq->bio->bi_integrity->bip_vec,
109 rq->bio->bi_integrity->bip_iter);
110 }
111 #else /* CONFIG_BLK_DEV_INTEGRITY */
112 static inline int blk_get_meta_cap(struct block_device *bdev, unsigned int cmd,
113 struct logical_block_metadata_cap __user *argp)
114 {
115 return -ENOIOCTLCMD;
116 }
117 static inline int blk_rq_count_integrity_sg(struct request_queue *q,
118 struct bio *b)
119 {
120 return 0;
121 }
122 static inline int blk_rq_map_integrity_sg(struct request *q,
123 struct scatterlist *s)
124 {
125 return 0;
126 }
> 127 static inline bool blk_rq_dma_unmap(struct request *req, struct device *dma_dev,
128 struct dma_iova_state *state, size_t mapped_len)
129 {
130 return false;
131 }
132 static inline int blk_rq_integrity_map_user(struct request *rq,
133 void __user *ubuf,
134 ssize_t bytes)
135 {
136 return -EINVAL;
137 }
138 static inline bool blk_rq_integrity_dma_map_iter_start(struct request *req,
139 struct device *dma_dev, struct dma_iova_state *state,
140 struct blk_dma_iter *iter)
141 {
142 return false;
143 }
144 static inline bool blk_rq_integrity_dma_map_iter_next(struct request *req,
145 struct device *dma_dev, struct blk_dma_iter *iter)
146 {
147 return false;
148 }
149 static inline struct blk_integrity *bdev_get_integrity(struct block_device *b)
150 {
151 return NULL;
152 }
153 static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk)
154 {
155 return NULL;
156 }
157 static inline bool
158 blk_integrity_queue_supports_integrity(struct request_queue *q)
159 {
160 return false;
161 }
162 static inline unsigned short
163 queue_max_integrity_segments(const struct request_queue *q)
164 {
165 return 0;
166 }
167
--
0-DAY CI Kernel Test Service
https://github.com/intel/lkp-tests/wiki
^ permalink raw reply [flat|nested] 7+ messages in thread
end of thread, other threads:[~2025-09-03 11:03 UTC | newest]
Thread overview: 7+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2025-09-02 20:01 [PATCHv2 0/2] blk-mq-dma: p2p cleanups and integrity fixup Keith Busch
2025-09-02 20:01 ` [PATCHv2 1/2] blk-integrity: enable p2p source and destination Keith Busch
2025-09-02 20:01 ` [PATCHv2 2/2] blk-mq-dma: bring back p2p request flags Keith Busch
2025-09-03 6:09 ` Christoph Hellwig
2025-09-03 8:20 ` Leon Romanovsky
2025-09-03 11:02 ` kernel test robot
2025-09-03 7:55 ` [PATCHv2 0/2] blk-mq-dma: p2p cleanups and integrity fixup Martin K. Petersen
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).