From: Jason Gunthorpe <jgg@nvidia.com>
To: "Alex Williamson" <alex@shazbot.org>,
"Christian König" <christian.koenig@amd.com>,
dri-devel@lists.freedesktop.org, iommu@lists.linux.dev,
"Joerg Roedel" <joro@8bytes.org>,
"Kevin Tian" <kevin.tian@intel.com>,
kvm@vger.kernel.org, linaro-mm-sig@lists.linaro.org,
linux-kselftest@vger.kernel.org, linux-media@vger.kernel.org,
"Robin Murphy" <robin.murphy@arm.com>,
"Shuah Khan" <shuah@kernel.org>,
"Sumit Semwal" <sumit.semwal@linaro.org>,
"Will Deacon" <will@kernel.org>
Cc: Krishnakant Jaju <kjaju@nvidia.com>,
Leon Romanovsky <leon@kernel.org>, Matt Ochs <mochs@nvidia.com>,
Nicolin Chen <nicolinc@nvidia.com>,
patches@lists.linux.dev, Simona Vetter <simona.vetter@ffwll.ch>,
Vivek Kasireddy <vivek.kasireddy@intel.com>,
Xu Yilun <yilun.xu@linux.intel.com>
Subject: [PATCH 6/9] iommufd: Have pfn_reader process DMABUF iopt_pages
Date: Fri, 7 Nov 2025 12:49:38 -0400 [thread overview]
Message-ID: <6-v1-af84a3ab44f5+f68-iommufd_buf_jgg@nvidia.com> (raw)
In-Reply-To: <0-v1-af84a3ab44f5+f68-iommufd_buf_jgg@nvidia.com>
Make another sub implementation of pfn_reader for DMABUF. This version
will fill the batch using the struct phys_vec recorded during the
attachment.
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
---
drivers/iommu/iommufd/pages.c | 70 +++++++++++++++++++++++++++++++----
1 file changed, 63 insertions(+), 7 deletions(-)
diff --git a/drivers/iommu/iommufd/pages.c b/drivers/iommu/iommufd/pages.c
index e71796b0c4bf44..935184dc68be32 100644
--- a/drivers/iommu/iommufd/pages.c
+++ b/drivers/iommu/iommufd/pages.c
@@ -1076,6 +1076,40 @@ static int pfn_reader_user_update_pinned(struct pfn_reader_user *user,
return iopt_pages_update_pinned(pages, npages, inc, user);
}
+struct pfn_reader_dmabuf {
+ struct dma_buf_phys_vec phys;
+ unsigned long start_offset;
+};
+
+static int pfn_reader_dmabuf_init(struct pfn_reader_dmabuf *dmabuf,
+ struct iopt_pages *pages)
+{
+ /* Callers must not get here if the dmabuf was already revoked */
+ if (WARN_ON(iopt_dmabuf_revoked(pages)))
+ return -EINVAL;
+
+ dmabuf->phys = pages->dmabuf.phys;
+ dmabuf->start_offset = pages->dmabuf.start;
+ return 0;
+}
+
+static int pfn_reader_fill_dmabuf(struct pfn_reader_dmabuf *dmabuf,
+ struct pfn_batch *batch,
+ unsigned long start_index,
+ unsigned long last_index)
+{
+ unsigned long start = dmabuf->start_offset + start_index * PAGE_SIZE;
+
+ /*
+ * This works in PAGE_SIZE indexes, if the dmabuf is sliced and
+ * starts/ends at a sub page offset then the batch to domain code will
+ * adjust it.
+ */
+ batch_add_pfn_num(batch, PHYS_PFN(dmabuf->phys.paddr + start),
+ last_index - start_index + 1, BATCH_MMIO);
+ return 0;
+}
+
/*
* PFNs are stored in three places, in order of preference:
* - The iopt_pages xarray. This is only populated if there is a
@@ -1094,7 +1128,10 @@ struct pfn_reader {
unsigned long batch_end_index;
unsigned long last_index;
- struct pfn_reader_user user;
+ union {
+ struct pfn_reader_user user;
+ struct pfn_reader_dmabuf dmabuf;
+ };
};
static int pfn_reader_update_pinned(struct pfn_reader *pfns)
@@ -1130,7 +1167,7 @@ static int pfn_reader_fill_span(struct pfn_reader *pfns)
{
struct interval_tree_double_span_iter *span = &pfns->span;
unsigned long start_index = pfns->batch_end_index;
- struct pfn_reader_user *user = &pfns->user;
+ struct pfn_reader_user *user;
unsigned long npages;
struct iopt_area *area;
int rc;
@@ -1162,8 +1199,13 @@ static int pfn_reader_fill_span(struct pfn_reader *pfns)
return 0;
}
- if (start_index >= pfns->user.upages_end) {
- rc = pfn_reader_user_pin(&pfns->user, pfns->pages, start_index,
+ if (iopt_is_dmabuf(pfns->pages))
+ return pfn_reader_fill_dmabuf(&pfns->dmabuf, &pfns->batch,
+ start_index, span->last_hole);
+
+ user = &pfns->user;
+ if (start_index >= user->upages_end) {
+ rc = pfn_reader_user_pin(user, pfns->pages, start_index,
span->last_hole);
if (rc)
return rc;
@@ -1231,7 +1273,10 @@ static int pfn_reader_init(struct pfn_reader *pfns, struct iopt_pages *pages,
pfns->batch_start_index = start_index;
pfns->batch_end_index = start_index;
pfns->last_index = last_index;
- pfn_reader_user_init(&pfns->user, pages);
+ if (iopt_is_dmabuf(pages))
+ pfn_reader_dmabuf_init(&pfns->dmabuf, pages);
+ else
+ pfn_reader_user_init(&pfns->user, pages);
rc = batch_init(&pfns->batch, last_index - start_index + 1);
if (rc)
return rc;
@@ -1252,8 +1297,12 @@ static int pfn_reader_init(struct pfn_reader *pfns, struct iopt_pages *pages,
static void pfn_reader_release_pins(struct pfn_reader *pfns)
{
struct iopt_pages *pages = pfns->pages;
- struct pfn_reader_user *user = &pfns->user;
+ struct pfn_reader_user *user;
+ if (iopt_is_dmabuf(pages))
+ return;
+
+ user = &pfns->user;
if (user->upages_end > pfns->batch_end_index) {
/* Any pages not transferred to the batch are just unpinned */
@@ -1283,7 +1332,8 @@ static void pfn_reader_destroy(struct pfn_reader *pfns)
struct iopt_pages *pages = pfns->pages;
pfn_reader_release_pins(pfns);
- pfn_reader_user_destroy(&pfns->user, pfns->pages);
+ if (!iopt_is_dmabuf(pfns->pages))
+ pfn_reader_user_destroy(&pfns->user, pfns->pages);
batch_destroy(&pfns->batch, NULL);
WARN_ON(pages->last_npinned != pages->npinned);
}
@@ -1687,6 +1737,12 @@ static void __iopt_area_unfill_domain(struct iopt_area *area,
lockdep_assert_held(&pages->mutex);
+ if (iopt_is_dmabuf(pages)) {
+ iopt_area_unmap_domain_range(area, domain, start_index,
+ last_index);
+ return;
+ }
+
/*
* For security we must not unpin something that is still DMA mapped,
* so this must unmap any IOVA before we go ahead and unpin the pages.
--
2.43.0
next prev parent reply other threads:[~2025-11-07 16:49 UTC|newest]
Thread overview: 45+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-11-07 16:49 [PATCH 0/9] Initial DMABUF support for iommufd Jason Gunthorpe
2025-11-07 16:49 ` [PATCH 1/9] vfio/pci: Add vfio_pci_dma_buf_iommufd_map() Jason Gunthorpe
2025-11-20 7:49 ` Tian, Kevin
2025-11-20 17:34 ` Nicolin Chen
2025-11-07 16:49 ` [PATCH 2/9] iommufd: Add DMABUF to iopt_pages Jason Gunthorpe
2025-11-07 18:02 ` Nicolin Chen
2025-11-20 7:55 ` Tian, Kevin
2025-11-21 14:27 ` Jason Gunthorpe
2025-11-07 16:49 ` [PATCH 3/9] iommufd: Do not map/unmap revoked DMABUFs Jason Gunthorpe
2025-11-07 18:30 ` Nicolin Chen
2025-11-20 7:56 ` Tian, Kevin
2025-11-07 16:49 ` [PATCH 4/9] iommufd: Allow a DMABUF to be revoked Jason Gunthorpe
2025-11-13 23:26 ` Nicolin Chen
2025-11-20 7:58 ` Tian, Kevin
2025-11-07 16:49 ` [PATCH 5/9] iommufd: Allow MMIO pages in a batch Jason Gunthorpe
2025-11-13 23:28 ` Nicolin Chen
2025-11-20 7:59 ` Tian, Kevin
2025-11-20 14:59 ` Jason Gunthorpe
2025-11-07 16:49 ` Jason Gunthorpe [this message]
2025-11-13 23:39 ` [PATCH 6/9] iommufd: Have pfn_reader process DMABUF iopt_pages Nicolin Chen
2025-11-18 19:38 ` Jason Gunthorpe
2025-11-20 8:04 ` Tian, Kevin
2025-11-21 0:47 ` Jason Gunthorpe
2025-11-21 14:33 ` Jason Gunthorpe
2025-11-07 16:49 ` [PATCH 7/9] iommufd: Have iopt_map_file_pages convert the fd to a file Jason Gunthorpe
2025-11-13 23:43 ` Nicolin Chen
2025-11-20 8:05 ` Tian, Kevin
2025-11-07 16:49 ` [PATCH 8/9] iommufd: Accept a DMABUF through IOMMU_IOAS_MAP_FILE Jason Gunthorpe
2025-11-14 0:05 ` Nicolin Chen
2025-11-18 19:44 ` Jason Gunthorpe
2025-11-18 19:57 ` Nicolin Chen
2025-11-20 8:06 ` Tian, Kevin
2025-11-07 16:49 ` [PATCH 9/9] iommufd/selftest: Add some tests for the dmabuf flow Jason Gunthorpe
2025-11-07 19:43 ` Nicolin Chen
2025-11-18 19:25 ` Jason Gunthorpe
2025-11-20 8:06 ` Tian, Kevin
2025-11-07 17:52 ` [PATCH 0/9] Initial DMABUF support for iommufd Nicolin Chen
2025-11-13 6:33 ` Shuai Xue
2025-11-13 7:34 ` Nicolin Chen
2025-11-13 11:32 ` Shuai Xue
2025-11-13 17:44 ` Nicolin Chen
2025-11-13 18:37 ` Alex Williamson
2025-11-17 15:50 ` Jason Gunthorpe
2025-11-18 5:37 ` Kasireddy, Vivek
2025-11-18 14:59 ` Jason Gunthorpe
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=6-v1-af84a3ab44f5+f68-iommufd_buf_jgg@nvidia.com \
--to=jgg@nvidia.com \
--cc=alex@shazbot.org \
--cc=christian.koenig@amd.com \
--cc=dri-devel@lists.freedesktop.org \
--cc=iommu@lists.linux.dev \
--cc=joro@8bytes.org \
--cc=kevin.tian@intel.com \
--cc=kjaju@nvidia.com \
--cc=kvm@vger.kernel.org \
--cc=leon@kernel.org \
--cc=linaro-mm-sig@lists.linaro.org \
--cc=linux-kselftest@vger.kernel.org \
--cc=linux-media@vger.kernel.org \
--cc=mochs@nvidia.com \
--cc=nicolinc@nvidia.com \
--cc=patches@lists.linux.dev \
--cc=robin.murphy@arm.com \
--cc=shuah@kernel.org \
--cc=simona.vetter@ffwll.ch \
--cc=sumit.semwal@linaro.org \
--cc=vivek.kasireddy@intel.com \
--cc=will@kernel.org \
--cc=yilun.xu@linux.intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox