From: Leon Romanovsky <leon@kernel.org>
To: Marek Szyprowski <m.szyprowski@samsung.com>
Cc: Leon Romanovsky <leonro@nvidia.com>,
Jason Gunthorpe <jgg@nvidia.com>,
Abdiel Janulgue <abdiel.janulgue@gmail.com>,
Alexander Potapenko <glider@google.com>,
Alex Gaynor <alex.gaynor@gmail.com>,
Andrew Morton <akpm@linux-foundation.org>,
Christoph Hellwig <hch@lst.de>,
Danilo Krummrich <dakr@kernel.org>,
iommu@lists.linux.dev, Jason Wang <jasowang@redhat.com>,
Jens Axboe <axboe@kernel.dk>, Joerg Roedel <joro@8bytes.org>,
Jonathan Corbet <corbet@lwn.net>, Juergen Gross <jgross@suse.com>,
kasan-dev@googlegroups.com, Keith Busch <kbusch@kernel.org>,
linux-block@vger.kernel.org, linux-doc@vger.kernel.org,
linux-kernel@vger.kernel.org, linux-mm@kvack.org,
linux-nvme@lists.infradead.org, linuxppc-dev@lists.ozlabs.org,
linux-trace-kernel@vger.kernel.org,
Madhavan Srinivasan <maddy@linux.ibm.com>,
Masami Hiramatsu <mhiramat@kernel.org>,
Michael Ellerman <mpe@ellerman.id.au>,
"Michael S. Tsirkin" <mst@redhat.com>,
Miguel Ojeda <ojeda@kernel.org>,
Robin Murphy <robin.murphy@arm.com>,
rust-for-linux@vger.kernel.org, Sagi Grimberg <sagi@grimberg.me>,
Stefano Stabellini <sstabellini@kernel.org>,
Steven Rostedt <rostedt@goodmis.org>,
virtualization@lists.linux.dev, Will Deacon <will@kernel.org>,
xen-devel@lists.xenproject.org
Subject: [PATCH v1 08/16] kmsan: convert kmsan_handle_dma to use physical addresses
Date: Mon, 4 Aug 2025 15:42:42 +0300 [thread overview]
Message-ID: <5b40377b621e49ff4107fa10646c828ccc94e53e.1754292567.git.leon@kernel.org> (raw)
In-Reply-To: <cover.1754292567.git.leon@kernel.org>
From: Leon Romanovsky <leonro@nvidia.com>
Convert the KMSAN DMA handling function from page-based to physical
address-based interface.
The refactoring renames kmsan_handle_dma() parameters from accepting
(struct page *page, size_t offset, size_t size) to (phys_addr_t phys,
size_t size). A PFN_VALID check is added to prevent KMSAN operations
on non-page memory, preventing from non struct page backed address,
As part of this change, support for highmem addresses is implemented
using kmap_local_page() to handle both lowmem and highmem regions
properly. All callers throughout the codebase are updated to use the
new phys_addr_t based interface.
Signed-off-by: Leon Romanovsky <leonro@nvidia.com>
---
drivers/virtio/virtio_ring.c | 4 ++--
include/linux/kmsan.h | 12 +++++++-----
kernel/dma/mapping.c | 2 +-
mm/kmsan/hooks.c | 36 +++++++++++++++++++++++++++++-------
tools/virtio/linux/kmsan.h | 2 +-
5 files changed, 40 insertions(+), 16 deletions(-)
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index f5062061c4084..c147145a65930 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -378,7 +378,7 @@ static int vring_map_one_sg(const struct vring_virtqueue *vq, struct scatterlist
* is initialized by the hardware. Explicitly check/unpoison it
* depending on the direction.
*/
- kmsan_handle_dma(sg_page(sg), sg->offset, sg->length, direction);
+ kmsan_handle_dma(sg_phys(sg), sg->length, direction);
*addr = (dma_addr_t)sg_phys(sg);
return 0;
}
@@ -3157,7 +3157,7 @@ dma_addr_t virtqueue_dma_map_single_attrs(struct virtqueue *_vq, void *ptr,
struct vring_virtqueue *vq = to_vvq(_vq);
if (!vq->use_dma_api) {
- kmsan_handle_dma(virt_to_page(ptr), offset_in_page(ptr), size, dir);
+ kmsan_handle_dma(virt_to_phys(ptr), size, dir);
return (dma_addr_t)virt_to_phys(ptr);
}
diff --git a/include/linux/kmsan.h b/include/linux/kmsan.h
index 2b1432cc16d59..6f27b9824ef77 100644
--- a/include/linux/kmsan.h
+++ b/include/linux/kmsan.h
@@ -182,8 +182,7 @@ void kmsan_iounmap_page_range(unsigned long start, unsigned long end);
/**
* kmsan_handle_dma() - Handle a DMA data transfer.
- * @page: first page of the buffer.
- * @offset: offset of the buffer within the first page.
+ * @phys: physical address of the buffer.
* @size: buffer size.
* @dir: one of possible dma_data_direction values.
*
@@ -191,8 +190,11 @@ void kmsan_iounmap_page_range(unsigned long start, unsigned long end);
* * checks the buffer, if it is copied to device;
* * initializes the buffer, if it is copied from device;
* * does both, if this is a DMA_BIDIRECTIONAL transfer.
+ *
+ * The function handles page lookup internally and supports both lowmem
+ * and highmem addresses.
*/
-void kmsan_handle_dma(struct page *page, size_t offset, size_t size,
+void kmsan_handle_dma(phys_addr_t phys, size_t size,
enum dma_data_direction dir);
/**
@@ -372,8 +374,8 @@ static inline void kmsan_iounmap_page_range(unsigned long start,
{
}
-static inline void kmsan_handle_dma(struct page *page, size_t offset,
- size_t size, enum dma_data_direction dir)
+static inline void kmsan_handle_dma(phys_addr_t phys, size_t size,
+ enum dma_data_direction dir)
{
}
diff --git a/kernel/dma/mapping.c b/kernel/dma/mapping.c
index 80481a873340a..709405d46b2b4 100644
--- a/kernel/dma/mapping.c
+++ b/kernel/dma/mapping.c
@@ -172,7 +172,7 @@ dma_addr_t dma_map_page_attrs(struct device *dev, struct page *page,
addr = iommu_dma_map_phys(dev, phys, size, dir, attrs);
else
addr = ops->map_page(dev, page, offset, size, dir, attrs);
- kmsan_handle_dma(page, offset, size, dir);
+ kmsan_handle_dma(phys, size, dir);
trace_dma_map_phys(dev, phys, addr, size, dir, attrs);
debug_dma_map_phys(dev, phys, size, dir, addr, attrs);
diff --git a/mm/kmsan/hooks.c b/mm/kmsan/hooks.c
index 97de3d6194f07..eab7912a3bf05 100644
--- a/mm/kmsan/hooks.c
+++ b/mm/kmsan/hooks.c
@@ -336,25 +336,48 @@ static void kmsan_handle_dma_page(const void *addr, size_t size,
}
/* Helper function to handle DMA data transfers. */
-void kmsan_handle_dma(struct page *page, size_t offset, size_t size,
+void kmsan_handle_dma(phys_addr_t phys, size_t size,
enum dma_data_direction dir)
{
u64 page_offset, to_go, addr;
+ struct page *page;
+ void *kaddr;
- if (PageHighMem(page))
+ if (!pfn_valid(PHYS_PFN(phys)))
return;
- addr = (u64)page_address(page) + offset;
+
+ page = phys_to_page(phys);
+ page_offset = offset_in_page(phys);
+
/*
* The kernel may occasionally give us adjacent DMA pages not belonging
* to the same allocation. Process them separately to avoid triggering
* internal KMSAN checks.
*/
while (size > 0) {
- page_offset = offset_in_page(addr);
to_go = min(PAGE_SIZE - page_offset, (u64)size);
+
+ if (PageHighMem(page))
+ /* Handle highmem pages using kmap */
+ kaddr = kmap_local_page(page);
+ else
+ /* Lowmem pages can be accessed directly */
+ kaddr = page_address(page);
+
+ addr = (u64)kaddr + page_offset;
kmsan_handle_dma_page((void *)addr, to_go, dir);
- addr += to_go;
+
+ if (PageHighMem(page))
+ kunmap_local(page);
+
+ phys += to_go;
size -= to_go;
+
+ /* Move to next page if needed */
+ if (size > 0) {
+ page = phys_to_page(phys);
+ page_offset = offset_in_page(phys);
+ }
}
}
EXPORT_SYMBOL_GPL(kmsan_handle_dma);
@@ -366,8 +389,7 @@ void kmsan_handle_dma_sg(struct scatterlist *sg, int nents,
int i;
for_each_sg(sg, item, nents, i)
- kmsan_handle_dma(sg_page(item), item->offset, item->length,
- dir);
+ kmsan_handle_dma(sg_phys(item), item->length, dir);
}
/* Functions from kmsan-checks.h follow. */
diff --git a/tools/virtio/linux/kmsan.h b/tools/virtio/linux/kmsan.h
index 272b5aa285d5a..6cd2e3efd03dc 100644
--- a/tools/virtio/linux/kmsan.h
+++ b/tools/virtio/linux/kmsan.h
@@ -4,7 +4,7 @@
#include <linux/gfp.h>
-inline void kmsan_handle_dma(struct page *page, size_t offset, size_t size,
+inline void kmsan_handle_dma(phys_addr_t phys, size_t size,
enum dma_data_direction dir)
{
}
--
2.50.1
next prev parent reply other threads:[~2025-08-04 12:43 UTC|newest]
Thread overview: 43+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-08-04 12:42 [PATCH v1 00/16] dma-mapping: migrate to physical address-based API Leon Romanovsky
2025-08-04 12:42 ` [PATCH v1 01/16] dma-mapping: introduce new DMA attribute to indicate MMIO memory Leon Romanovsky
2025-08-06 17:31 ` Jason Gunthorpe
2025-08-04 12:42 ` [PATCH v1 02/16] iommu/dma: handle MMIO path in dma_iova_link Leon Romanovsky
2025-08-06 18:10 ` Jason Gunthorpe
2025-08-04 12:42 ` [PATCH v1 03/16] dma-debug: refactor to use physical addresses for page mapping Leon Romanovsky
2025-08-06 18:26 ` Jason Gunthorpe
2025-08-06 18:38 ` Leon Romanovsky
2025-08-04 12:42 ` [PATCH v1 04/16] dma-mapping: rename trace_dma_*map_page to trace_dma_*map_phys Leon Romanovsky
2025-08-04 12:42 ` [PATCH v1 05/16] iommu/dma: rename iommu_dma_*map_page to iommu_dma_*map_phys Leon Romanovsky
2025-08-06 18:44 ` Jason Gunthorpe
2025-08-04 12:42 ` [PATCH v1 06/16] iommu/dma: extend iommu_dma_*map_phys API to handle MMIO memory Leon Romanovsky
2025-08-07 12:07 ` Jason Gunthorpe
2025-08-04 12:42 ` [PATCH v1 07/16] dma-mapping: convert dma_direct_*map_page to be phys_addr_t based Leon Romanovsky
2025-08-07 12:13 ` Jason Gunthorpe
2025-08-04 12:42 ` Leon Romanovsky [this message]
2025-08-07 12:21 ` [PATCH v1 08/16] kmsan: convert kmsan_handle_dma to use physical addresses Jason Gunthorpe
2025-08-13 15:07 ` Leon Romanovsky
2025-08-14 12:13 ` Jason Gunthorpe
2025-08-14 12:35 ` Leon Romanovsky
2025-08-14 12:44 ` Jason Gunthorpe
2025-08-14 13:31 ` Leon Romanovsky
2025-08-14 14:14 ` Jason Gunthorpe
2025-08-04 12:42 ` [PATCH v1 09/16] dma-mapping: handle MMIO flow in dma_map|unmap_page Leon Romanovsky
2025-08-07 13:08 ` Jason Gunthorpe
2025-08-04 12:42 ` [PATCH v1 10/16] xen: swiotlb: Open code map_resource callback Leon Romanovsky
2025-08-07 14:40 ` Jürgen Groß
2025-08-04 12:42 ` [PATCH v1 11/16] dma-mapping: export new dma_*map_phys() interface Leon Romanovsky
2025-08-07 13:38 ` Jason Gunthorpe
2025-08-04 12:42 ` [PATCH v1 12/16] mm/hmm: migrate to physical address-based DMA mapping API Leon Romanovsky
2025-08-07 13:14 ` Jason Gunthorpe
2025-08-04 12:42 ` [PATCH v1 13/16] mm/hmm: properly take MMIO path Leon Romanovsky
2025-08-07 13:14 ` Jason Gunthorpe
2025-08-04 12:42 ` [PATCH v1 14/16] block-dma: migrate to dma_map_phys instead of map_page Leon Romanovsky
2025-08-04 12:42 ` [PATCH v1 15/16] block-dma: properly take MMIO path Leon Romanovsky
2025-08-04 12:42 ` [PATCH v1 16/16] nvme-pci: unmap MMIO pages with appropriate interface Leon Romanovsky
2025-08-07 13:45 ` Jason Gunthorpe
2025-08-13 15:37 ` Leon Romanovsky
2025-08-07 14:19 ` [PATCH v1 00/16] dma-mapping: migrate to physical address-based API Jason Gunthorpe
2025-08-08 18:51 ` Marek Szyprowski
2025-08-09 13:34 ` Jason Gunthorpe
2025-08-09 16:53 ` Demi Marie Obenour
2025-08-10 17:02 ` Jason Gunthorpe
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=5b40377b621e49ff4107fa10646c828ccc94e53e.1754292567.git.leon@kernel.org \
--to=leon@kernel.org \
--cc=abdiel.janulgue@gmail.com \
--cc=akpm@linux-foundation.org \
--cc=alex.gaynor@gmail.com \
--cc=axboe@kernel.dk \
--cc=corbet@lwn.net \
--cc=dakr@kernel.org \
--cc=glider@google.com \
--cc=hch@lst.de \
--cc=iommu@lists.linux.dev \
--cc=jasowang@redhat.com \
--cc=jgg@nvidia.com \
--cc=jgross@suse.com \
--cc=joro@8bytes.org \
--cc=kasan-dev@googlegroups.com \
--cc=kbusch@kernel.org \
--cc=leonro@nvidia.com \
--cc=linux-block@vger.kernel.org \
--cc=linux-doc@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=linux-nvme@lists.infradead.org \
--cc=linux-trace-kernel@vger.kernel.org \
--cc=linuxppc-dev@lists.ozlabs.org \
--cc=m.szyprowski@samsung.com \
--cc=maddy@linux.ibm.com \
--cc=mhiramat@kernel.org \
--cc=mpe@ellerman.id.au \
--cc=mst@redhat.com \
--cc=ojeda@kernel.org \
--cc=robin.murphy@arm.com \
--cc=rostedt@goodmis.org \
--cc=rust-for-linux@vger.kernel.org \
--cc=sagi@grimberg.me \
--cc=sstabellini@kernel.org \
--cc=virtualization@lists.linux.dev \
--cc=will@kernel.org \
--cc=xen-devel@lists.xenproject.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).