From: Leon Romanovsky <leon@kernel.org>
To: Alex Williamson <alex.williamson@redhat.com>
Cc: "Leon Romanovsky" <leonro@nvidia.com>,
"Christoph Hellwig" <hch@lst.de>,
"Jason Gunthorpe" <jgg@nvidia.com>,
"Andrew Morton" <akpm@linux-foundation.org>,
"Bjorn Helgaas" <bhelgaas@google.com>,
"Christian König" <christian.koenig@amd.com>,
dri-devel@lists.freedesktop.org, iommu@lists.linux.dev,
"Jens Axboe" <axboe@kernel.dk>,
"Jérôme Glisse" <jglisse@redhat.com>,
"Joerg Roedel" <joro@8bytes.org>,
kvm@vger.kernel.org, linaro-mm-sig@lists.linaro.org,
linux-block@vger.kernel.org, linux-kernel@vger.kernel.org,
linux-media@vger.kernel.org, linux-mm@kvack.org,
linux-pci@vger.kernel.org,
"Logan Gunthorpe" <logang@deltatee.com>,
"Marek Szyprowski" <m.szyprowski@samsung.com>,
"Robin Murphy" <robin.murphy@arm.com>,
"Sumit Semwal" <sumit.semwal@linaro.org>,
"Vivek Kasireddy" <vivek.kasireddy@intel.com>,
"Will Deacon" <will@kernel.org>
Subject: [PATCH 02/10] PCI/P2PDMA: Introduce p2pdma_provider structure for cleaner abstraction
Date: Wed, 23 Jul 2025 16:00:03 +0300 [thread overview]
Message-ID: <c2307cb4c3f1af46da138f3410738754691fbb3d.1753274085.git.leonro@nvidia.com> (raw)
In-Reply-To: <cover.1753274085.git.leonro@nvidia.com>
From: Leon Romanovsky <leonro@nvidia.com>
Extract the core P2PDMA provider information (device owner and bus
offset) from the dev_pagemap into a dedicated p2pdma_provider structure.
This creates a cleaner separation between the memory management layer and
the P2PDMA functionality.
The new p2pdma_provider structure contains:
- owner: pointer to the providing device
- bus_offset: computed offset for non-host transactions
This refactoring simplifies the P2PDMA state management by removing
the need to access pgmap internals directly. The pci_p2pdma_map_state
now stores a pointer to the provider instead of the pgmap, making
the API more explicit and easier to understand.
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Signed-off-by: Leon Romanovsky <leonro@nvidia.com>
---
drivers/pci/p2pdma.c | 42 +++++++++++++++++++++-----------------
include/linux/pci-p2pdma.h | 18 ++++++++++++----
2 files changed, 37 insertions(+), 23 deletions(-)
diff --git a/drivers/pci/p2pdma.c b/drivers/pci/p2pdma.c
index fe347ed7fd8f4..5a310026bd24f 100644
--- a/drivers/pci/p2pdma.c
+++ b/drivers/pci/p2pdma.c
@@ -28,9 +28,8 @@ struct pci_p2pdma {
};
struct pci_p2pdma_pagemap {
- struct pci_dev *provider;
- u64 bus_offset;
struct dev_pagemap pgmap;
+ struct p2pdma_provider mem;
};
static struct pci_p2pdma_pagemap *to_p2p_pgmap(struct dev_pagemap *pgmap)
@@ -204,8 +203,8 @@ static void p2pdma_page_free(struct page *page)
{
struct pci_p2pdma_pagemap *pgmap = to_p2p_pgmap(page_pgmap(page));
/* safe to dereference while a reference is held to the percpu ref */
- struct pci_p2pdma *p2pdma =
- rcu_dereference_protected(pgmap->provider->p2pdma, 1);
+ struct pci_p2pdma *p2pdma = rcu_dereference_protected(
+ to_pci_dev(pgmap->mem.owner)->p2pdma, 1);
struct percpu_ref *ref;
gen_pool_free_owner(p2pdma->pool, (uintptr_t)page_to_virt(page),
@@ -270,14 +269,15 @@ static int pci_p2pdma_setup(struct pci_dev *pdev)
static void pci_p2pdma_unmap_mappings(void *data)
{
- struct pci_dev *pdev = data;
+ struct pci_p2pdma_pagemap *p2p_pgmap = data;
/*
* Removing the alloc attribute from sysfs will call
* unmap_mapping_range() on the inode, teardown any existing userspace
* mappings and prevent new ones from being created.
*/
- sysfs_remove_file_from_group(&pdev->dev.kobj, &p2pmem_alloc_attr.attr,
+ sysfs_remove_file_from_group(&p2p_pgmap->mem.owner->kobj,
+ &p2pmem_alloc_attr.attr,
p2pmem_group.name);
}
@@ -328,10 +328,9 @@ int pci_p2pdma_add_resource(struct pci_dev *pdev, int bar, size_t size,
pgmap->nr_range = 1;
pgmap->type = MEMORY_DEVICE_PCI_P2PDMA;
pgmap->ops = &p2pdma_pgmap_ops;
-
- p2p_pgmap->provider = pdev;
- p2p_pgmap->bus_offset = pci_bus_address(pdev, bar) -
- pci_resource_start(pdev, bar);
+ p2p_pgmap->mem.owner = &pdev->dev;
+ p2p_pgmap->mem.bus_offset =
+ pci_bus_address(pdev, bar) - pci_resource_start(pdev, bar);
addr = devm_memremap_pages(&pdev->dev, pgmap);
if (IS_ERR(addr)) {
@@ -340,7 +339,7 @@ int pci_p2pdma_add_resource(struct pci_dev *pdev, int bar, size_t size,
}
error = devm_add_action_or_reset(&pdev->dev, pci_p2pdma_unmap_mappings,
- pdev);
+ p2p_pgmap);
if (error)
goto pages_free;
@@ -973,16 +972,16 @@ void pci_p2pmem_publish(struct pci_dev *pdev, bool publish)
}
EXPORT_SYMBOL_GPL(pci_p2pmem_publish);
-static enum pci_p2pdma_map_type pci_p2pdma_map_type(struct dev_pagemap *pgmap,
- struct device *dev)
+static enum pci_p2pdma_map_type
+pci_p2pdma_map_type(struct p2pdma_provider *provider, struct device *dev)
{
enum pci_p2pdma_map_type type = PCI_P2PDMA_MAP_NOT_SUPPORTED;
- struct pci_dev *provider = to_p2p_pgmap(pgmap)->provider;
+ struct pci_dev *pdev = to_pci_dev(provider->owner);
struct pci_dev *client;
struct pci_p2pdma *p2pdma;
int dist;
- if (!provider->p2pdma)
+ if (!pdev->p2pdma)
return PCI_P2PDMA_MAP_NOT_SUPPORTED;
if (!dev_is_pci(dev))
@@ -991,7 +990,7 @@ static enum pci_p2pdma_map_type pci_p2pdma_map_type(struct dev_pagemap *pgmap,
client = to_pci_dev(dev);
rcu_read_lock();
- p2pdma = rcu_dereference(provider->p2pdma);
+ p2pdma = rcu_dereference(pdev->p2pdma);
if (p2pdma)
type = xa_to_value(xa_load(&p2pdma->map_types,
@@ -999,7 +998,7 @@ static enum pci_p2pdma_map_type pci_p2pdma_map_type(struct dev_pagemap *pgmap,
rcu_read_unlock();
if (type == PCI_P2PDMA_MAP_UNKNOWN)
- return calc_map_type_and_dist(provider, client, &dist, true);
+ return calc_map_type_and_dist(pdev, client, &dist, true);
return type;
}
@@ -1007,8 +1006,13 @@ static enum pci_p2pdma_map_type pci_p2pdma_map_type(struct dev_pagemap *pgmap,
void __pci_p2pdma_update_state(struct pci_p2pdma_map_state *state,
struct device *dev, struct page *page)
{
- state->pgmap = page_pgmap(page);
- state->map = pci_p2pdma_map_type(state->pgmap, dev);
+ struct pci_p2pdma_pagemap *p2p_pgmap = to_p2p_pgmap(page_pgmap(page));
+
+ if (state->mem == &p2p_pgmap->mem)
+ return;
+
+ state->mem = &p2p_pgmap->mem;
+ state->map = pci_p2pdma_map_type(&p2p_pgmap->mem, dev);
}
/**
diff --git a/include/linux/pci-p2pdma.h b/include/linux/pci-p2pdma.h
index b502fc8b49bf9..27a2c399f47da 100644
--- a/include/linux/pci-p2pdma.h
+++ b/include/linux/pci-p2pdma.h
@@ -16,6 +16,16 @@
struct block_device;
struct scatterlist;
+/**
+ * struct p2pdma_provider
+ *
+ * A p2pdma provider is a range of MMIO address space available to the CPU.
+ */
+struct p2pdma_provider {
+ struct device *owner;
+ u64 bus_offset;
+};
+
#ifdef CONFIG_PCI_P2PDMA
int pci_p2pdma_add_resource(struct pci_dev *pdev, int bar, size_t size,
u64 offset);
@@ -144,10 +154,11 @@ enum pci_p2pdma_map_type {
};
struct pci_p2pdma_map_state {
- struct dev_pagemap *pgmap;
+ struct p2pdma_provider *mem;
enum pci_p2pdma_map_type map;
};
+
/* helper for pci_p2pdma_state(), do not use directly */
void __pci_p2pdma_update_state(struct pci_p2pdma_map_state *state,
struct device *dev, struct page *page);
@@ -166,8 +177,7 @@ pci_p2pdma_state(struct pci_p2pdma_map_state *state, struct device *dev,
struct page *page)
{
if (IS_ENABLED(CONFIG_PCI_P2PDMA) && is_pci_p2pdma_page(page)) {
- if (state->pgmap != page_pgmap(page))
- __pci_p2pdma_update_state(state, dev, page);
+ __pci_p2pdma_update_state(state, dev, page);
return state->map;
}
return PCI_P2PDMA_MAP_NONE;
@@ -185,7 +195,7 @@ static inline dma_addr_t
pci_p2pdma_bus_addr_map(struct pci_p2pdma_map_state *state, phys_addr_t paddr)
{
WARN_ON_ONCE(state->map != PCI_P2PDMA_MAP_BUS_ADDR);
- return paddr + to_p2p_pgmap(state->pgmap)->bus_offsetf;
+ return paddr + state->mem->bus_offset;
}
#endif /* _LINUX_PCI_P2P_H */
--
2.50.1
next prev parent reply other threads:[~2025-07-23 13:02 UTC|newest]
Thread overview: 54+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-07-23 13:00 [PATCH 00/10] vfio/pci: Allow MMIO regions to be exported through dma-buf Leon Romanovsky
2025-07-23 13:00 ` [PATCH 01/10] PCI/P2PDMA: Remove redundant bus_offset from map state Leon Romanovsky
2025-07-24 7:50 ` Christoph Hellwig
2025-07-23 13:00 ` Leon Romanovsky [this message]
2025-07-24 7:51 ` [PATCH 02/10] PCI/P2PDMA: Introduce p2pdma_provider structure for cleaner abstraction Christoph Hellwig
2025-07-24 7:55 ` Leon Romanovsky
2025-07-24 7:59 ` Christoph Hellwig
2025-07-24 8:07 ` Leon Romanovsky
2025-07-27 18:51 ` Jason Gunthorpe
2025-07-29 7:52 ` Christoph Hellwig
2025-07-29 8:53 ` Leon Romanovsky
2025-07-29 10:41 ` Christoph Hellwig
2025-07-29 11:39 ` Leon Romanovsky
2025-07-29 13:15 ` Jason Gunthorpe
2025-07-29 16:12 ` Jason Gunthorpe
2025-07-23 13:00 ` [PATCH 03/10] PCI/P2PDMA: Simplify bus address mapping API Leon Romanovsky
2025-07-24 7:52 ` Christoph Hellwig
2025-07-23 13:00 ` [PATCH 04/10] PCI/P2PDMA: Refactor to separate core P2P functionality from memory allocation Leon Romanovsky
2025-07-23 13:00 ` [PATCH 05/10] PCI/P2PDMA: Export pci_p2pdma_map_type() function Leon Romanovsky
2025-07-24 8:03 ` Christoph Hellwig
2025-07-24 8:13 ` Leon Romanovsky
2025-07-25 16:30 ` Logan Gunthorpe
2025-07-25 18:54 ` Leon Romanovsky
2025-07-25 19:12 ` Logan Gunthorpe
2025-07-27 6:01 ` Leon Romanovsky
2025-07-27 19:05 ` Jason Gunthorpe
2025-07-28 16:12 ` Logan Gunthorpe
2025-07-28 16:41 ` Leon Romanovsky
2025-07-28 17:07 ` Logan Gunthorpe
2025-07-28 23:11 ` Jason Gunthorpe
2025-07-29 20:54 ` Logan Gunthorpe
2025-07-29 22:14 ` Jason Gunthorpe
2025-07-30 8:03 ` Leon Romanovsky
2025-07-29 7:52 ` Christoph Hellwig
2025-07-29 8:45 ` Leon Romanovsky
2025-07-27 19:02 ` Jason Gunthorpe
2025-07-23 13:00 ` [PATCH 06/10] types: move phys_vec definition to common header Leon Romanovsky
2025-07-23 13:00 ` [PATCH 07/10] vfio: Export vfio device get and put registration helpers Leon Romanovsky
2025-07-23 13:00 ` [PATCH 08/10] vfio/pci: Enable peer-to-peer DMA transactions by default Leon Romanovsky
2025-07-23 13:00 ` [PATCH 09/10] vfio/pci: Share the core device pointer while invoking feature functions Leon Romanovsky
2025-07-28 20:55 ` Alex Williamson
2025-07-29 8:39 ` Leon Romanovsky
2025-07-23 13:00 ` [PATCH 10/10] vfio/pci: Add dma-buf export support for MMIO regions Leon Romanovsky
2025-07-24 5:13 ` Kasireddy, Vivek
2025-07-24 5:44 ` Leon Romanovsky
2025-07-25 5:34 ` Kasireddy, Vivek
2025-07-27 6:16 ` Leon Romanovsky
2025-07-29 19:44 ` Robin Murphy
2025-07-29 20:13 ` Jason Gunthorpe
2025-07-30 9:32 ` Leon Romanovsky
2025-07-30 14:49 ` Robin Murphy
2025-07-30 16:01 ` Jason Gunthorpe
2025-07-30 19:58 ` [PATCH 00/10] vfio/pci: Allow MMIO regions to be exported through dma-buf Alex Williamson
2025-07-31 0:21 ` Jason Gunthorpe
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=c2307cb4c3f1af46da138f3410738754691fbb3d.1753274085.git.leonro@nvidia.com \
--to=leon@kernel.org \
--cc=akpm@linux-foundation.org \
--cc=alex.williamson@redhat.com \
--cc=axboe@kernel.dk \
--cc=bhelgaas@google.com \
--cc=christian.koenig@amd.com \
--cc=dri-devel@lists.freedesktop.org \
--cc=hch@lst.de \
--cc=iommu@lists.linux.dev \
--cc=jgg@nvidia.com \
--cc=jglisse@redhat.com \
--cc=joro@8bytes.org \
--cc=kvm@vger.kernel.org \
--cc=leonro@nvidia.com \
--cc=linaro-mm-sig@lists.linaro.org \
--cc=linux-block@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-media@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=linux-pci@vger.kernel.org \
--cc=logang@deltatee.com \
--cc=m.szyprowski@samsung.com \
--cc=robin.murphy@arm.com \
--cc=sumit.semwal@linaro.org \
--cc=vivek.kasireddy@intel.com \
--cc=will@kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).