From: Leon Romanovsky <leon@kernel.org>
To: Marek Szyprowski <m.szyprowski@samsung.com>
Cc: Leon Romanovsky <leonro@nvidia.com>,
Jason Gunthorpe <jgg@nvidia.com>,
Abdiel Janulgue <abdiel.janulgue@gmail.com>,
Alexander Potapenko <glider@google.com>,
Alex Gaynor <alex.gaynor@gmail.com>,
Andrew Morton <akpm@linux-foundation.org>,
Christoph Hellwig <hch@lst.de>,
Danilo Krummrich <dakr@kernel.org>,
iommu@lists.linux.dev, Jason Wang <jasowang@redhat.com>,
Jens Axboe <axboe@kernel.dk>, Joerg Roedel <joro@8bytes.org>,
Jonathan Corbet <corbet@lwn.net>, Juergen Gross <jgross@suse.com>,
kasan-dev@googlegroups.com, Keith Busch <kbusch@kernel.org>,
linux-block@vger.kernel.org, linux-doc@vger.kernel.org,
linux-kernel@vger.kernel.org, linux-mm@kvack.org,
linux-nvme@lists.infradead.org, linuxppc-dev@lists.ozlabs.org,
linux-trace-kernel@vger.kernel.org,
Madhavan Srinivasan <maddy@linux.ibm.com>,
Masami Hiramatsu <mhiramat@kernel.org>,
Michael Ellerman <mpe@ellerman.id.au>,
"Michael S. Tsirkin" <mst@redhat.com>,
Miguel Ojeda <ojeda@kernel.org>,
Robin Murphy <robin.murphy@arm.com>,
rust-for-linux@vger.kernel.org, Sagi Grimberg <sagi@grimberg.me>,
Stefano Stabellini <sstabellini@kernel.org>,
Steven Rostedt <rostedt@goodmis.org>,
virtualization@lists.linux.dev, Will Deacon <will@kernel.org>,
xen-devel@lists.xenproject.org
Subject: [PATCH v4 15/16] block-dma: properly take MMIO path
Date: Tue, 19 Aug 2025 20:36:59 +0300 [thread overview]
Message-ID: <642dbeb7aa94257eaea71ec63c06e3f939270023.1755624249.git.leon@kernel.org> (raw)
In-Reply-To: <cover.1755624249.git.leon@kernel.org>
From: Leon Romanovsky <leonro@nvidia.com>
Make sure that CPU is not synced and IOMMU is configured to take
MMIO path by providing newly introduced DMA_ATTR_MMIO attribute.
Signed-off-by: Leon Romanovsky <leonro@nvidia.com>
---
block/blk-mq-dma.c | 13 +++++++++++--
include/linux/blk-mq-dma.h | 6 +++++-
include/linux/blk_types.h | 2 ++
3 files changed, 18 insertions(+), 3 deletions(-)
diff --git a/block/blk-mq-dma.c b/block/blk-mq-dma.c
index 37e2142be4f7..d415088ed9fd 100644
--- a/block/blk-mq-dma.c
+++ b/block/blk-mq-dma.c
@@ -87,8 +87,13 @@ static bool blk_dma_map_bus(struct blk_dma_iter *iter, struct phys_vec *vec)
static bool blk_dma_map_direct(struct request *req, struct device *dma_dev,
struct blk_dma_iter *iter, struct phys_vec *vec)
{
+ unsigned int attrs = 0;
+
+ if (req->cmd_flags & REQ_MMIO)
+ attrs = DMA_ATTR_MMIO;
+
iter->addr = dma_map_phys(dma_dev, vec->paddr, vec->len,
- rq_dma_dir(req), 0);
+ rq_dma_dir(req), attrs);
if (dma_mapping_error(dma_dev, iter->addr)) {
iter->status = BLK_STS_RESOURCE;
return false;
@@ -103,14 +108,17 @@ static bool blk_rq_dma_map_iova(struct request *req, struct device *dma_dev,
{
enum dma_data_direction dir = rq_dma_dir(req);
unsigned int mapped = 0;
+ unsigned int attrs = 0;
int error;
iter->addr = state->addr;
iter->len = dma_iova_size(state);
+ if (req->cmd_flags & REQ_MMIO)
+ attrs = DMA_ATTR_MMIO;
do {
error = dma_iova_link(dma_dev, state, vec->paddr, mapped,
- vec->len, dir, 0);
+ vec->len, dir, attrs);
if (error)
break;
mapped += vec->len;
@@ -176,6 +184,7 @@ bool blk_rq_dma_map_iter_start(struct request *req, struct device *dma_dev,
* same as non-P2P transfers below and during unmap.
*/
req->cmd_flags &= ~REQ_P2PDMA;
+ req->cmd_flags |= REQ_MMIO;
break;
default:
iter->status = BLK_STS_INVAL;
diff --git a/include/linux/blk-mq-dma.h b/include/linux/blk-mq-dma.h
index c26a01aeae00..6c55f5e58511 100644
--- a/include/linux/blk-mq-dma.h
+++ b/include/linux/blk-mq-dma.h
@@ -48,12 +48,16 @@ static inline bool blk_rq_dma_map_coalesce(struct dma_iova_state *state)
static inline bool blk_rq_dma_unmap(struct request *req, struct device *dma_dev,
struct dma_iova_state *state, size_t mapped_len)
{
+ unsigned int attrs = 0;
+
if (req->cmd_flags & REQ_P2PDMA)
return true;
if (dma_use_iova(state)) {
+ if (req->cmd_flags & REQ_MMIO)
+ attrs = DMA_ATTR_MMIO;
dma_iova_destroy(dma_dev, state, mapped_len, rq_dma_dir(req),
- 0);
+ attrs);
return true;
}
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index 09b99d52fd36..283058bcb5b1 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -387,6 +387,7 @@ enum req_flag_bits {
__REQ_FS_PRIVATE, /* for file system (submitter) use */
__REQ_ATOMIC, /* for atomic write operations */
__REQ_P2PDMA, /* contains P2P DMA pages */
+ __REQ_MMIO, /* contains MMIO memory */
/*
* Command specific flags, keep last:
*/
@@ -420,6 +421,7 @@ enum req_flag_bits {
#define REQ_FS_PRIVATE (__force blk_opf_t)(1ULL << __REQ_FS_PRIVATE)
#define REQ_ATOMIC (__force blk_opf_t)(1ULL << __REQ_ATOMIC)
#define REQ_P2PDMA (__force blk_opf_t)(1ULL << __REQ_P2PDMA)
+#define REQ_MMIO (__force blk_opf_t)(1ULL << __REQ_MMIO)
#define REQ_NOUNMAP (__force blk_opf_t)(1ULL << __REQ_NOUNMAP)
--
2.50.1
next prev parent reply other threads:[~2025-08-19 17:39 UTC|newest]
Thread overview: 50+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-08-19 17:36 [PATCH v4 00/16] dma-mapping: migrate to physical address-based API Leon Romanovsky
2025-08-19 17:36 ` [PATCH v4 01/16] dma-mapping: introduce new DMA attribute to indicate MMIO memory Leon Romanovsky
2025-08-28 13:03 ` Jason Gunthorpe
2025-08-19 17:36 ` [PATCH v4 02/16] iommu/dma: implement DMA_ATTR_MMIO for dma_iova_link() Leon Romanovsky
2025-08-19 17:36 ` [PATCH v4 03/16] dma-debug: refactor to use physical addresses for page mapping Leon Romanovsky
2025-08-28 13:19 ` Jason Gunthorpe
2025-08-19 17:36 ` [PATCH v4 04/16] dma-mapping: rename trace_dma_*map_page to trace_dma_*map_phys Leon Romanovsky
2025-08-28 13:27 ` Jason Gunthorpe
2025-08-19 17:36 ` [PATCH v4 05/16] iommu/dma: rename iommu_dma_*map_page to iommu_dma_*map_phys Leon Romanovsky
2025-08-28 13:38 ` Jason Gunthorpe
2025-08-19 17:36 ` [PATCH v4 06/16] iommu/dma: extend iommu_dma_*map_phys API to handle MMIO memory Leon Romanovsky
2025-08-28 13:49 ` Jason Gunthorpe
2025-08-19 17:36 ` [PATCH v4 07/16] dma-mapping: convert dma_direct_*map_page to be phys_addr_t based Leon Romanovsky
2025-08-28 14:19 ` Jason Gunthorpe
2025-08-19 17:36 ` [PATCH v4 08/16] kmsan: convert kmsan_handle_dma to use physical addresses Leon Romanovsky
2025-08-28 15:00 ` Jason Gunthorpe
2025-08-19 17:36 ` [PATCH v4 09/16] dma-mapping: handle MMIO flow in dma_map|unmap_page Leon Romanovsky
2025-08-28 15:17 ` Jason Gunthorpe
2025-08-31 13:12 ` Leon Romanovsky
2025-08-19 17:36 ` [PATCH v4 10/16] xen: swiotlb: Open code map_resource callback Leon Romanovsky
2025-08-28 15:18 ` Jason Gunthorpe
2025-08-19 17:36 ` [PATCH v4 11/16] dma-mapping: export new dma_*map_phys() interface Leon Romanovsky
2025-08-19 18:22 ` Keith Busch
2025-08-28 16:01 ` Jason Gunthorpe
2025-08-19 17:36 ` [PATCH v4 12/16] mm/hmm: migrate to physical address-based DMA mapping API Leon Romanovsky
2025-08-19 17:36 ` [PATCH v4 13/16] mm/hmm: properly take MMIO path Leon Romanovsky
2025-08-19 17:36 ` [PATCH v4 14/16] block-dma: migrate to dma_map_phys instead of map_page Leon Romanovsky
2025-08-19 18:20 ` Keith Busch
2025-08-19 18:49 ` Leon Romanovsky
2025-09-02 20:49 ` Marek Szyprowski
2025-09-02 21:59 ` Keith Busch
2025-09-02 23:24 ` Jason Gunthorpe
2025-08-19 17:36 ` Leon Romanovsky [this message]
2025-08-19 18:24 ` [PATCH v4 15/16] block-dma: properly take MMIO path Keith Busch
2025-08-28 15:19 ` Keith Busch
2025-08-28 16:54 ` Leon Romanovsky
2025-08-28 17:15 ` Keith Busch
2025-08-28 18:41 ` Jason Gunthorpe
2025-08-28 19:10 ` Keith Busch
2025-08-28 19:18 ` Jason Gunthorpe
2025-08-28 20:54 ` Keith Busch
2025-08-28 23:45 ` Jason Gunthorpe
2025-08-29 12:35 ` Keith Busch
2025-08-19 17:37 ` [PATCH v4 16/16] nvme-pci: unmap MMIO pages with appropriate interface Leon Romanovsky
2025-08-19 19:58 ` Keith Busch
2025-08-28 11:57 ` [PATCH v4 00/16] dma-mapping: migrate to physical address-based API Leon Romanovsky
2025-09-01 21:47 ` Marek Szyprowski
2025-09-01 22:23 ` Jason Gunthorpe
2025-09-02 9:29 ` Leon Romanovsky
2025-08-29 13:16 ` Jason Gunthorpe
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=642dbeb7aa94257eaea71ec63c06e3f939270023.1755624249.git.leon@kernel.org \
--to=leon@kernel.org \
--cc=abdiel.janulgue@gmail.com \
--cc=akpm@linux-foundation.org \
--cc=alex.gaynor@gmail.com \
--cc=axboe@kernel.dk \
--cc=corbet@lwn.net \
--cc=dakr@kernel.org \
--cc=glider@google.com \
--cc=hch@lst.de \
--cc=iommu@lists.linux.dev \
--cc=jasowang@redhat.com \
--cc=jgg@nvidia.com \
--cc=jgross@suse.com \
--cc=joro@8bytes.org \
--cc=kasan-dev@googlegroups.com \
--cc=kbusch@kernel.org \
--cc=leonro@nvidia.com \
--cc=linux-block@vger.kernel.org \
--cc=linux-doc@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=linux-nvme@lists.infradead.org \
--cc=linux-trace-kernel@vger.kernel.org \
--cc=linuxppc-dev@lists.ozlabs.org \
--cc=m.szyprowski@samsung.com \
--cc=maddy@linux.ibm.com \
--cc=mhiramat@kernel.org \
--cc=mpe@ellerman.id.au \
--cc=mst@redhat.com \
--cc=ojeda@kernel.org \
--cc=robin.murphy@arm.com \
--cc=rostedt@goodmis.org \
--cc=rust-for-linux@vger.kernel.org \
--cc=sagi@grimberg.me \
--cc=sstabellini@kernel.org \
--cc=virtualization@lists.linux.dev \
--cc=will@kernel.org \
--cc=xen-devel@lists.xenproject.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).