Intel-XE Archive on lore.kernel.org
 help / color / mirror / Atom feed
From: "Thomas Hellström" <thomas.hellstrom@linux.intel.com>
To: intel-xe@lists.freedesktop.org
Cc: "Thomas Hellström" <thomas.hellstrom@linux.intel.com>,
	"Matthew Brost" <matthew.brost@intel.com>,
	"Maarten Lankhorst" <maarten.lankhorst@linux.intel.com>,
	"Christian König" <christian.koenig@amd.com>,
	"Kasireddy Vivek" <vivek.kasireddy@intel.com>,
	"Simona Vetter" <simona.vetter@ffwll.ch>,
	"Jason Gunthorpe" <jgg@nvidia.com>,
	dri-devel@lists.freedesktop.org, linaro-mm-sig@lists.linaro.org
Subject: [RFC PATCH v2 2/2] drm/xe/dma-buf: Add generic interconnect support framework
Date: Fri, 26 Sep 2025 10:46:24 +0200	[thread overview]
Message-ID: <20250926084624.2288-3-thomas.hellstrom@linux.intel.com> (raw)
In-Reply-To: <20250926084624.2288-1-thomas.hellstrom@linux.intel.com>

Negotiate to use an xe-specific interconnect.

Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
---
 drivers/gpu/drm/xe/tests/xe_dma_buf.c | 12 ++---
 drivers/gpu/drm/xe/xe_dma_buf.c       | 73 ++++++++++++++++++++++++---
 drivers/gpu/drm/xe/xe_dma_buf.h       |  1 -
 drivers/gpu/drm/xe/xe_interconnect.h  | 31 ++++++++++++
 4 files changed, 104 insertions(+), 13 deletions(-)
 create mode 100644 drivers/gpu/drm/xe/xe_interconnect.h

diff --git a/drivers/gpu/drm/xe/tests/xe_dma_buf.c b/drivers/gpu/drm/xe/tests/xe_dma_buf.c
index 5df98de5ba3c..8eaea6c2a3b7 100644
--- a/drivers/gpu/drm/xe/tests/xe_dma_buf.c
+++ b/drivers/gpu/drm/xe/tests/xe_dma_buf.c
@@ -210,9 +210,9 @@ static const struct dma_buf_attach_ops nop2p_attach_ops = {
  */
 static const struct dma_buf_test_params test_params[] = {
 	{.mem_mask = XE_BO_FLAG_VRAM0,
-	 .attach_ops = &xe_dma_buf_attach_ops},
+	 .attach_ops = &xe_dma_buf_attach_ops.dma_ops},
 	{.mem_mask = XE_BO_FLAG_VRAM0 | XE_BO_FLAG_NEEDS_CPU_ACCESS,
-	 .attach_ops = &xe_dma_buf_attach_ops,
+	 .attach_ops = &xe_dma_buf_attach_ops.dma_ops,
 	 .force_different_devices = true},
 
 	{.mem_mask = XE_BO_FLAG_VRAM0,
@@ -226,9 +226,9 @@ static const struct dma_buf_test_params test_params[] = {
 	 .force_different_devices = true},
 
 	{.mem_mask = XE_BO_FLAG_SYSTEM,
-	 .attach_ops = &xe_dma_buf_attach_ops},
+	 .attach_ops = &xe_dma_buf_attach_ops.dma_ops},
 	{.mem_mask = XE_BO_FLAG_SYSTEM,
-	 .attach_ops = &xe_dma_buf_attach_ops,
+	 .attach_ops = &xe_dma_buf_attach_ops.dma_ops,
 	 .force_different_devices = true},
 
 	{.mem_mask = XE_BO_FLAG_SYSTEM,
@@ -242,10 +242,10 @@ static const struct dma_buf_test_params test_params[] = {
 	 .force_different_devices = true},
 
 	{.mem_mask = XE_BO_FLAG_SYSTEM | XE_BO_FLAG_VRAM0,
-	 .attach_ops = &xe_dma_buf_attach_ops},
+	 .attach_ops = &xe_dma_buf_attach_ops.dma_ops},
 	{.mem_mask = XE_BO_FLAG_SYSTEM | XE_BO_FLAG_VRAM0 |
 		     XE_BO_FLAG_NEEDS_CPU_ACCESS,
-	 .attach_ops = &xe_dma_buf_attach_ops,
+	 .attach_ops = &xe_dma_buf_attach_ops.dma_ops,
 	 .force_different_devices = true},
 
 	{.mem_mask = XE_BO_FLAG_SYSTEM | XE_BO_FLAG_VRAM0,
diff --git a/drivers/gpu/drm/xe/xe_dma_buf.c b/drivers/gpu/drm/xe/xe_dma_buf.c
index 54e42960daad..ffb00d54bb9e 100644
--- a/drivers/gpu/drm/xe/xe_dma_buf.c
+++ b/drivers/gpu/drm/xe/xe_dma_buf.c
@@ -16,18 +16,49 @@
 #include "tests/xe_test.h"
 #include "xe_bo.h"
 #include "xe_device.h"
+#include "xe_interconnect.h"
 #include "xe_pm.h"
 #include "xe_ttm_vram_mgr.h"
 #include "xe_vm.h"
 
 MODULE_IMPORT_NS("DMA_BUF");
 
+struct xe_dma_buf_attach_ops {
+	struct dma_buf_attach_ops dma_ops;
+	struct xe_interconnect_attach_ops ic_ops;
+};
+
+static const struct xe_dma_buf_attach_ops *
+to_xe_dma_buf_attach_ops(struct dma_buf_attachment *attach)
+{
+	const struct dma_buf_attach_ops *aops = attach->importer_ops;
+	const struct dma_buf_interconnect_attach_ops *iaops;
+
+	if (!aops || !aops->supports_interconnect)
+		return NULL;
+
+	iaops = aops->supports_interconnect(attach, xe_interconnect);
+	return iaops ? container_of(iaops, struct xe_dma_buf_attach_ops, ic_ops.base) : NULL;
+}
+
 static int xe_dma_buf_attach(struct dma_buf *dmabuf,
 			     struct dma_buf_attachment *attach)
 {
 	struct drm_gem_object *obj = attach->dmabuf->priv;
+	const struct xe_dma_buf_attach_ops *xe_attach_ops =
+		to_xe_dma_buf_attach_ops(attach);
+
+	if (xe_attach_ops && xe_attach_ops->ic_ops.allow_ic) {
+		struct xe_interconnect_attach *xe_attach = kzalloc(sizeof(*attach), GFP_KERNEL);
+
+		if (xe_attach) {
+			xe_attach->base.interconnect = xe_interconnect;
+			xe_attach->sg_list_replacement = NULL;
+			attach->interconnect_attach = &xe_attach->base;
+		}
+	}
 
-	if (attach->peer2peer &&
+	if (!attach->interconnect_attach && attach->peer2peer &&
 	    pci_p2pdma_distance(to_pci_dev(obj->dev->dev), attach->dev, false) < 0)
 		attach->peer2peer = false;
 
@@ -43,6 +74,7 @@ static void xe_dma_buf_detach(struct dma_buf *dmabuf,
 {
 	struct drm_gem_object *obj = attach->dmabuf->priv;
 
+	kfree(attach->interconnect_attach);
 	xe_pm_runtime_put(to_xe_device(obj->dev));
 }
 
@@ -135,6 +167,11 @@ static struct sg_table *xe_dma_buf_map(struct dma_buf_attachment *attach,
 
 	case XE_PL_VRAM0:
 	case XE_PL_VRAM1:
+		if (attach->interconnect_attach &&
+		    attach->interconnect_attach->interconnect == xe_interconnect) {
+			/* Map using something else than sglist */
+			;
+		}
 		r = xe_ttm_vram_mgr_alloc_sgt(xe_bo_device(bo),
 					      bo->ttm.resource, 0,
 					      bo->ttm.base.size, attach->dev,
@@ -285,9 +322,28 @@ static void xe_dma_buf_move_notify(struct dma_buf_attachment *attach)
 	XE_WARN_ON(xe_bo_evict(bo, exec));
 }
 
-static const struct dma_buf_attach_ops xe_dma_buf_attach_ops = {
-	.allow_peer2peer = true,
-	.move_notify = xe_dma_buf_move_notify
+static const struct dma_buf_interconnect_attach_ops *
+xe_dma_buf_supports_interconnect(struct dma_buf_attachment *attach,
+				 const struct dma_buf_interconnect *interconnect)
+{
+	if (interconnect == xe_interconnect) {
+		return &container_of(attach->importer_ops,
+				     const struct xe_dma_buf_attach_ops,
+				     dma_ops)->ic_ops.base;
+	}
+
+	return NULL;
+}
+
+static const struct xe_dma_buf_attach_ops xe_dma_buf_attach_ops = {
+	.dma_ops = {
+		.allow_peer2peer = true,
+		.move_notify = xe_dma_buf_move_notify,
+		.supports_interconnect = xe_dma_buf_supports_interconnect,
+	},
+	.ic_ops = {
+		.allow_ic = true,
+	}
 };
 
 #if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST)
@@ -336,12 +392,11 @@ struct drm_gem_object *xe_gem_prime_import(struct drm_device *dev,
 	if (IS_ERR(bo))
 		return ERR_CAST(bo);
 
-	attach_ops = &xe_dma_buf_attach_ops;
+	attach_ops = &xe_dma_buf_attach_ops.dma_ops;
 #if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST)
 	if (test)
 		attach_ops = test->attach_ops;
 #endif
-
 	attach = dma_buf_dynamic_attach(dma_buf, dev->dev, attach_ops, &bo->ttm.base);
 	if (IS_ERR(attach)) {
 		obj = ERR_CAST(attach);
@@ -364,6 +419,12 @@ struct drm_gem_object *xe_gem_prime_import(struct drm_device *dev,
 	return obj;
 }
 
+static const struct dma_buf_interconnect _xe_interconnect = {
+	.name = "xe_interconnect",
+};
+
+const struct dma_buf_interconnect *xe_interconnect = &_xe_interconnect;
+
 #if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST)
 #include "tests/xe_dma_buf.c"
 #endif
diff --git a/drivers/gpu/drm/xe/xe_dma_buf.h b/drivers/gpu/drm/xe/xe_dma_buf.h
index 861dd28a862c..6b381ce4b7c1 100644
--- a/drivers/gpu/drm/xe/xe_dma_buf.h
+++ b/drivers/gpu/drm/xe/xe_dma_buf.h
@@ -11,5 +11,4 @@
 struct dma_buf *xe_gem_prime_export(struct drm_gem_object *obj, int flags);
 struct drm_gem_object *xe_gem_prime_import(struct drm_device *dev,
 					   struct dma_buf *dma_buf);
-
 #endif
diff --git a/drivers/gpu/drm/xe/xe_interconnect.h b/drivers/gpu/drm/xe/xe_interconnect.h
new file mode 100644
index 000000000000..2b8bc9bf1c8d
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_interconnect.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+#ifndef _XE_INTERCONNECT_H_
+#define _XE_INTERCONNECT_H_
+
+#include <linux/types.h>
+#include <linux/dma-buf.h>
+
+struct device_private_address;
+
+/* This file needs to be shared between the importer and exporter of the interconnect */
+
+extern const struct dma_buf_interconnect *xe_interconnect;
+
+struct xe_interconnect_attach_ops {
+	struct dma_buf_interconnect_attach_ops base;
+	/*
+	 * Here interconnect-private stuff can be added.
+	 * Like a function to check interconnect possibility.
+	 */
+	bool allow_ic;
+};
+
+struct xe_interconnect_attach {
+	struct dma_buf_interconnect_attach base;
+	struct device_private_address *sg_list_replacement;
+};
+
+#endif
-- 
2.51.0


  parent reply	other threads:[~2025-09-26  8:46 UTC|newest]

Thread overview: 21+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2025-09-26  8:46 [RFC PATCH v2 0/2] dma-buf private interconnect POC Thomas Hellström
2025-09-26  8:46 ` [RFC PATCH v2 1/2] dma-buf: Add support for private interconnects Thomas Hellström
2025-09-26 12:56   ` Christian König
2025-09-26 13:51     ` Thomas Hellström
2025-09-26 14:41       ` Jason Gunthorpe
2025-09-26 14:51         ` Christian König
2025-09-26 16:00           ` Jason Gunthorpe
2025-09-29  8:16             ` Thomas Hellström
2025-09-29  8:20               ` Christian König
2025-09-29  8:25                 ` Thomas Hellström
2025-09-29 12:27                   ` Jason Gunthorpe
2025-09-29  8:16             ` Christian König
2025-09-29 12:45               ` Jason Gunthorpe
2025-09-29 16:02                 ` Thomas Hellström
2025-09-29 16:13                   ` Jason Gunthorpe
2025-09-26  8:46 ` Thomas Hellström [this message]
2025-09-26  9:34 ` ✗ CI.checkpatch: warning for dma-buf private interconnect POC (rev2) Patchwork
2025-09-26  9:35 ` ✓ CI.KUnit: success " Patchwork
2025-09-26  9:50 ` ✗ CI.checksparse: warning " Patchwork
2025-09-26 10:11 ` ✓ Xe.CI.BAT: success " Patchwork
2025-09-26 14:23 ` ✗ Xe.CI.Full: failure " Patchwork

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20250926084624.2288-3-thomas.hellstrom@linux.intel.com \
    --to=thomas.hellstrom@linux.intel.com \
    --cc=christian.koenig@amd.com \
    --cc=dri-devel@lists.freedesktop.org \
    --cc=intel-xe@lists.freedesktop.org \
    --cc=jgg@nvidia.com \
    --cc=linaro-mm-sig@lists.linaro.org \
    --cc=maarten.lankhorst@linux.intel.com \
    --cc=matthew.brost@intel.com \
    --cc=simona.vetter@ffwll.ch \
    --cc=vivek.kasireddy@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox