Intel-XE Archive on lore.kernel.org
 help / color / mirror / Atom feed
From: Vivek Kasireddy <vivek.kasireddy@intel.com>
To: dri-devel@lists.freedesktop.org, intel-xe@lists.freedesktop.org,
	linux-media@vger.kernel.org, linaro-mm-sig@lists.linaro.org
Cc: "Vivek Kasireddy" <vivek.kasireddy@intel.com>,
	"Jason Gunthorpe" <jgg@nvidia.com>,
	"Christian Koenig" <christian.koenig@amd.com>,
	"Sumit Semwal" <sumit.semwal@linaro.org>,
	"Thomas Hellström" <thomas.hellstrom@linux.intel.com>,
	"Simona Vetter" <simona.vetter@ffwll.ch>
Subject: [RFC 4/8] vfio/pci/dmabuf: Add support for IOV interconnect
Date: Tue, 14 Oct 2025 00:08:54 -0700	[thread overview]
Message-ID: <20251014071243.811884-5-vivek.kasireddy@intel.com> (raw)
In-Reply-To: <20251014071243.811884-1-vivek.kasireddy@intel.com>

Add support for IOV interconnect by provding ops for map/unmap and
match interconnect. Note that the xarray is populated with entries
of type struct range. The range struct contains the start and end
address of the memory region.

Cc: Jason Gunthorpe <jgg@nvidia.com>
Cc: Christian Koenig <christian.koenig@amd.com>
Cc: Sumit Semwal <sumit.semwal@linaro.org>
Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Cc: Simona Vetter <simona.vetter@ffwll.ch>
Signed-off-by: Vivek Kasireddy <vivek.kasireddy@intel.com>
---
 drivers/vfio/pci/vfio_pci_dmabuf.c | 141 ++++++++++++++++++++++++++++-
 1 file changed, 140 insertions(+), 1 deletion(-)

diff --git a/drivers/vfio/pci/vfio_pci_dmabuf.c b/drivers/vfio/pci/vfio_pci_dmabuf.c
index eaba010777f3..c45c1a7923f8 100644
--- a/drivers/vfio/pci/vfio_pci_dmabuf.c
+++ b/drivers/vfio/pci/vfio_pci_dmabuf.c
@@ -4,6 +4,7 @@
 #include <linux/dma-buf.h>
 #include <linux/pci-p2pdma.h>
 #include <linux/dma-resv.h>
+#include <linux/range.h>
 
 #include "vfio_pci_priv.h"
 
@@ -16,15 +17,138 @@ struct vfio_pci_dma_buf {
 	size_t size;
 	struct phys_vec *phys_vec;
 	struct p2pdma_provider *provider;
+	struct dma_buf_iov_interconnect *iov_ic;
 	u32 nr_ranges;
 	u8 revoked : 1;
 };
 
+static int
+vfio_pci_create_iov_match(struct vfio_pci_dma_buf *priv,
+			  struct vfio_device_feature_dma_buf *dma_buf)
+{
+	struct dma_buf_iov_interconnect *iov_ic;
+
+	iov_ic = kzalloc(sizeof(*iov_ic), GFP_KERNEL);
+	if (!iov_ic)
+		return -ENOMEM;
+
+	iov_ic->base.type = DMA_BUF_INTERCONNECT_IOV;
+	iov_ic->pdev = priv->vdev->pdev;
+	iov_ic->bar = dma_buf->region_index;
+
+	priv->iov_ic = iov_ic;
+	return 0;
+}
+
+static int vfio_pci_map_iov_interconnect(struct vfio_pci_dma_buf *priv,
+					 struct xarray *ranges)
+{
+	struct phys_vec *phys_vec = priv->phys_vec;
+	struct range *range;
+	unsigned long i;
+	void *entry;
+	int ret;
+
+	range = kmalloc_array(priv->nr_ranges, sizeof(*range), GFP_KERNEL);
+	if (!range)
+		return -ENOMEM;
+
+	for (i = 0; i < priv->nr_ranges; i++) {
+		entry = &range[i];
+		range[i].start = phys_vec[i].paddr;
+		range[i].end = phys_vec[i].paddr + phys_vec[i].len - 1;
+
+		entry = xa_store(ranges, i, entry, GFP_KERNEL);
+		if (xa_is_err(entry)) {
+			ret = xa_err(entry);
+			goto err_free_range;
+		}
+	}
+	return 0;
+
+err_free_range:
+	kfree(range);
+	return ret;
+}
+
+static int vfio_pci_map_interconnect(struct dma_buf_attachment *attachment,
+				     struct dma_buf_ranges *ranges)
+{
+	enum dma_buf_interconnect_type type = attachment->interconnect.type;
+	struct vfio_pci_dma_buf *priv = attachment->dmabuf->priv;
+	int ret = -EINVAL;
+
+	ranges->nranges = priv->nr_ranges;
+
+	if (type == DMA_BUF_INTERCONNECT_IOV)
+		ret = vfio_pci_map_iov_interconnect(priv, &ranges->ranges);
+	return ret;
+}
+
+static void vfio_pci_unmap_interconnect(struct dma_buf_attachment *attachment,
+					struct dma_buf_ranges *ranges)
+{
+	void *entry;
+
+	entry = xa_load(&ranges->ranges, 0);
+	kfree(entry);
+}
+
+static bool
+vfio_pci_match_iov_interconnect(const struct dma_buf_interconnect *exp,
+				const struct dma_buf_interconnect *imp)
+{
+	const struct dma_buf_iov_interconnect *exp_ic =
+		container_of(exp, struct dma_buf_iov_interconnect, base);
+	const struct dma_buf_iov_interconnect *imp_ic =
+		container_of(imp, struct dma_buf_iov_interconnect, base);
+
+	return imp_ic->pdev == pci_physfn(exp_ic->pdev) &&
+	       imp_ic->bar == exp_ic->bar;
+}
+
+static bool
+vfio_pci_match_interconnect(const struct dma_buf_interconnect *exp,
+			    const struct dma_buf_interconnect *imp)
+{
+	enum dma_buf_interconnect_type type = exp->type;
+
+	switch (type) {
+	case DMA_BUF_INTERCONNECT_IOV:
+		return vfio_pci_match_iov_interconnect(exp, imp);
+	default:
+		return false;
+	}
+}
+
+static bool
+vfio_pci_match_interconnects(struct vfio_pci_dma_buf *priv,
+			     struct dma_buf_attachment *attachment)
+{
+	const struct dma_buf_attach_ops *aops = attachment->importer_ops;
+	struct pci_dev *pdev = priv->vdev->pdev;
+	unsigned int bar = priv->iov_ic->bar;
+	const struct dma_buf_interconnect_match supports_ics[] = {
+		CREATE_IOV_INTERCONNECT(pdev, bar),
+	};
+
+	if (attachment->allow_ic) {
+		if (aops->supports_interconnects(attachment, supports_ics,
+						 ARRAY_SIZE(supports_ics)))
+			return true;
+	}
+	return false;
+}
+
 static int vfio_pci_dma_buf_attach(struct dma_buf *dmabuf,
 				   struct dma_buf_attachment *attachment)
 {
 	struct vfio_pci_dma_buf *priv = dmabuf->priv;
 
+	if (vfio_pci_match_interconnects(priv, attachment)) {
+		return 0;
+	}
+
 	if (!attachment->peer2peer)
 		return -EOPNOTSUPP;
 
@@ -189,6 +313,7 @@ vfio_pci_dma_buf_map(struct dma_buf_attachment *attachment,
 	return ERR_PTR(ret);
 }
 
+
 static void vfio_pci_dma_buf_unmap(struct dma_buf_attachment *attachment,
 				   struct sg_table *sgt,
 				   enum dma_data_direction dir)
@@ -228,15 +353,23 @@ static void vfio_pci_dma_buf_release(struct dma_buf *dmabuf)
 		vfio_device_put_registration(&priv->vdev->vdev);
 	}
 	kfree(priv->phys_vec);
+	kfree(priv->iov_ic);
 	kfree(priv);
 }
 
+static const struct dma_buf_interconnect_ops vfio_pci_interconnect_ops = {
+	.match_interconnect = vfio_pci_match_interconnect,
+	.map_interconnect = vfio_pci_map_interconnect,
+	.unmap_interconnect = vfio_pci_unmap_interconnect,
+};
+
 static const struct dma_buf_ops vfio_pci_dmabuf_ops = {
 	.attach = vfio_pci_dma_buf_attach,
 	.detach = vfio_pci_dma_buf_detach,
 	.map_dma_buf = vfio_pci_dma_buf_map,
 	.release = vfio_pci_dma_buf_release,
 	.unmap_dma_buf = vfio_pci_dma_buf_unmap,
+	.interconnect_ops = &vfio_pci_interconnect_ops,
 };
 
 static void dma_ranges_to_p2p_phys(struct vfio_pci_dma_buf *priv,
@@ -365,6 +498,10 @@ int vfio_pci_core_feature_dma_buf(struct vfio_pci_core_device *vdev, u32 flags,
 		goto err_free_phys;
 	}
 
+	ret = vfio_pci_create_iov_match(priv, &get_dma_buf);
+	if (ret)
+		goto err_dev_put;
+
 	exp_info.ops = &vfio_pci_dmabuf_ops;
 	exp_info.size = priv->size;
 	exp_info.flags = get_dma_buf.open_flags;
@@ -373,7 +510,7 @@ int vfio_pci_core_feature_dma_buf(struct vfio_pci_core_device *vdev, u32 flags,
 	priv->dmabuf = dma_buf_export(&exp_info);
 	if (IS_ERR(priv->dmabuf)) {
 		ret = PTR_ERR(priv->dmabuf);
-		goto err_dev_put;
+		goto err_free_iov;
 	}
 
 	/* dma_buf_put() now frees priv */
@@ -391,6 +528,8 @@ int vfio_pci_core_feature_dma_buf(struct vfio_pci_core_device *vdev, u32 flags,
 	 */
 	return dma_buf_fd(priv->dmabuf, get_dma_buf.open_flags);
 
+err_free_iov:
+	kfree(priv->iov_ic);
 err_dev_put:
 	vfio_device_put_registration(&vdev->vdev);
 err_free_phys:
-- 
2.50.1


  parent reply	other threads:[~2025-10-14  7:14 UTC|newest]

Thread overview: 16+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2025-10-14  7:08 [RFC 0/8] dma-buf: Add support for mapping dmabufs via interconnects Vivek Kasireddy
2025-10-14  7:08 ` [RFC 1/8] dma-buf: Add support for map/unmap APIs for interconnects Vivek Kasireddy
2025-10-20  9:34   ` Thomas Hellström
2025-10-21  5:45     ` Kasireddy, Vivek
2025-10-28 13:58   ` Christian König
2025-10-28 14:05     ` Jason Gunthorpe
2025-10-28 14:14       ` Christian König
2025-10-28 14:44         ` Jason Gunthorpe
2025-10-14  7:08 ` [RFC 2/8] dma-buf: Add a helper to match interconnects between exporter/importer Vivek Kasireddy
2025-10-17 15:58   ` Kasireddy, Vivek
2025-10-14  7:08 ` [RFC 3/8] dma-buf: Add support for IOV interconnect Vivek Kasireddy
2025-10-14  7:08 ` Vivek Kasireddy [this message]
2025-10-14  7:08 ` [RFC 5/8] drm/xe/dma_buf: " Vivek Kasireddy
2025-10-14  7:08 ` [RFC 6/8] drm/xe/pf: Add a helper function to get a VF's backing object in LMEM Vivek Kasireddy
2025-10-14  7:08 ` [RFC 7/8] drm/xe/bo: Create new dma_addr array for dmabuf BOs associated with VFs Vivek Kasireddy
2025-10-14  7:08 ` [RFC 8/8] drm/xe/pt: Add an additional check for dmabuf BOs while doing bind Vivek Kasireddy

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20251014071243.811884-5-vivek.kasireddy@intel.com \
    --to=vivek.kasireddy@intel.com \
    --cc=christian.koenig@amd.com \
    --cc=dri-devel@lists.freedesktop.org \
    --cc=intel-xe@lists.freedesktop.org \
    --cc=jgg@nvidia.com \
    --cc=linaro-mm-sig@lists.linaro.org \
    --cc=linux-media@vger.kernel.org \
    --cc=simona.vetter@ffwll.ch \
    --cc=sumit.semwal@linaro.org \
    --cc=thomas.hellstrom@linux.intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox