Intel-XE Archive on lore.kernel.org
 help / color / mirror / Atom feed
From: Matthew Auld <matthew.auld@intel.com>
To: intel-xe@lists.freedesktop.org
Cc: Matthew Brost <matthew.brost@intel.com>
Subject: [PATCH v2 7/7] drm/xe/configfs: add disable_mem_copy knob
Date: Mon, 20 Oct 2025 13:54:39 +0100	[thread overview]
Message-ID: <20251020125431.41153-16-matthew.auld@intel.com> (raw)
In-Reply-To: <20251020125431.41153-9-matthew.auld@intel.com>

For easier experimentation/comparison allow turning off the newer
MEM_COPY path, without needing to apply manual hacks and build a new
kernel. This needs to be configured before fully probing the device.

Signed-off-by: Matthew Auld <matthew.auld@intel.com>
Cc: Matthew Brost <matthew.brost@intel.com>
---
 drivers/gpu/drm/xe/xe_configfs.c | 65 ++++++++++++++++++++++++++++++++
 drivers/gpu/drm/xe/xe_configfs.h |  2 +
 drivers/gpu/drm/xe/xe_pci.c      |  4 +-
 3 files changed, 70 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/xe/xe_configfs.c b/drivers/gpu/drm/xe/xe_configfs.c
index c1419a270fa4..df459daa0f07 100644
--- a/drivers/gpu/drm/xe/xe_configfs.c
+++ b/drivers/gpu/drm/xe/xe_configfs.c
@@ -236,6 +236,15 @@
  *
  * This setting only takes effect when probing the device.
  *
+ * Disable MEM_COPY path
+ * -----------------------------------------------------
+ * This config will force the use of the older XY_FAST_COPY instruction in the migration code.
+ * Intended only for experimentation.
+ *
+ *	# echo 1 > /sys/kernel/config/xe/0000:03:00.0/disable_mem_copy
+ *
+ * This setting only takes effect when probing the device.
+ *
  * Remove devices
  * ==============
  *
@@ -264,6 +273,9 @@ struct xe_config_group_device {
 		struct {
 			unsigned int max_vfs;
 		} sriov;
+		struct {
+			bool disable_mem_copy;
+		} migrate;
 	} config;
 
 	/* protects attributes */
@@ -282,6 +294,9 @@ static const struct xe_config_device device_defaults = {
 	.sriov = {
 		.max_vfs = UINT_MAX,
 	},
+	.migrate = {
+		.disable_mem_copy = false,
+	},
 };
 
 static void set_device_defaults(struct xe_config_device *config)
@@ -809,8 +824,35 @@ static ssize_t ctx_restore_post_bb_store(struct config_item *item,
 	return wa_bb_store(dev->config.ctx_restore_post_bb, dev, data, sz);
 }
 
+static ssize_t disable_mem_copy_show(struct config_item *item, char *page)
+{
+	struct xe_config_device *dev = to_xe_config_device(item);
+
+	return sprintf(page, "%d\n", dev->migrate.disable_mem_copy);
+}
+
+static ssize_t disable_mem_copy_store(struct config_item *item, const char *page, size_t len)
+{
+	struct xe_config_group_device *dev = to_xe_config_group_device(item);
+	bool val;
+	int ret;
+
+	ret = kstrtobool(page, &val);
+	if (ret)
+		return ret;
+
+	guard(mutex)(&dev->lock);
+	if (is_bound(dev))
+		return -EBUSY;
+
+	dev->config.migrate.disable_mem_copy = val;
+
+	return len;
+}
+
 CONFIGFS_ATTR(, ctx_restore_mid_bb);
 CONFIGFS_ATTR(, ctx_restore_post_bb);
+CONFIGFS_ATTR(, disable_mem_copy);
 CONFIGFS_ATTR(, enable_psmi);
 CONFIGFS_ATTR(, engines_allowed);
 CONFIGFS_ATTR(, gt_types_allowed);
@@ -819,6 +861,7 @@ CONFIGFS_ATTR(, survivability_mode);
 static struct configfs_attribute *xe_config_device_attrs[] = {
 	&attr_ctx_restore_mid_bb,
 	&attr_ctx_restore_post_bb,
+	&attr_disable_mem_copy,
 	&attr_enable_psmi,
 	&attr_engines_allowed,
 	&attr_gt_types_allowed,
@@ -1065,6 +1108,7 @@ static void dump_custom_dev_config(struct pci_dev *pdev,
 	PRI_CUSTOM_ATTR("%llx", engines_allowed);
 	PRI_CUSTOM_ATTR("%d", enable_psmi);
 	PRI_CUSTOM_ATTR("%d", survivability_mode);
+	PRI_CUSTOM_ATTR("%d", migrate.disable_mem_copy);
 
 #undef PRI_CUSTOM_ATTR
 }
@@ -1242,6 +1286,27 @@ u32 xe_configfs_get_ctx_restore_post_bb(struct pci_dev *pdev,
 	return len;
 }
 
+/**
+ * xe_configfs_migrate_disable_mem_copy - get configfs disable_mem_copy setting
+ * @pdev: pci device
+ *
+ * Return: True if fast_copy_xy instruction in migration code should be used as the default. False
+ * otherwise.
+ */
+bool xe_configfs_migrate_disable_mem_copy(struct pci_dev *pdev)
+{
+	struct xe_config_group_device *dev = find_xe_config_group_device(pdev);
+	bool disable_mem_copy;
+
+	if (!dev)
+		return false;
+
+	disable_mem_copy = dev->config.migrate.disable_mem_copy;
+	config_group_put(&dev->group);
+
+	return disable_mem_copy;
+}
+
 #ifdef CONFIG_PCI_IOV
 /**
  * xe_configfs_get_max_vfs() - Get number of VFs that could be managed
diff --git a/drivers/gpu/drm/xe/xe_configfs.h b/drivers/gpu/drm/xe/xe_configfs.h
index fed57be0b90e..01f285ce56f2 100644
--- a/drivers/gpu/drm/xe/xe_configfs.h
+++ b/drivers/gpu/drm/xe/xe_configfs.h
@@ -25,6 +25,7 @@ u32 xe_configfs_get_ctx_restore_mid_bb(struct pci_dev *pdev, enum xe_engine_clas
 				       const u32 **cs);
 u32 xe_configfs_get_ctx_restore_post_bb(struct pci_dev *pdev, enum xe_engine_class,
 					const u32 **cs);
+bool xe_configfs_migrate_disable_mem_copy(struct pci_dev *pdev);
 #ifdef CONFIG_PCI_IOV
 unsigned int xe_configfs_get_max_vfs(struct pci_dev *pdev);
 #endif
@@ -42,6 +43,7 @@ static inline u32 xe_configfs_get_ctx_restore_mid_bb(struct pci_dev *pdev, enum
 static inline u32 xe_configfs_get_ctx_restore_post_bb(struct pci_dev *pdev, enum xe_engine_class,
 						      const u32 **cs) { return 0; }
 static inline unsigned int xe_configfs_get_max_vfs(struct pci_dev *pdev) { return UINT_MAX; }
+bool xe_configfs_migrate_disable_mem_copy(struct pci_dev *pdev) { return false; }
 #endif
 
 #endif
diff --git a/drivers/gpu/drm/xe/xe_pci.c b/drivers/gpu/drm/xe/xe_pci.c
index 8458d4ae8ee7..9f8a2a298216 100644
--- a/drivers/gpu/drm/xe/xe_pci.c
+++ b/drivers/gpu/drm/xe/xe_pci.c
@@ -660,7 +660,9 @@ static int xe_info_init_early(struct xe_device *xe,
 	xe->info.has_pxp = desc->has_pxp;
 	xe->info.has_sriov = xe_configfs_primary_gt_allowed(to_pci_dev(xe->drm.dev)) &&
 		desc->has_sriov;
-	xe->info.has_mem_copy_instr = desc->has_mem_copy_instr;
+	xe->info.has_mem_copy_instr =
+		desc->has_mem_copy_instr &&
+		!xe_configfs_migrate_disable_mem_copy(to_pci_dev(xe->drm.dev));
 	xe->info.skip_guc_pc = desc->skip_guc_pc;
 	xe->info.skip_mtcfg = desc->skip_mtcfg;
 	xe->info.skip_pcode = desc->skip_pcode;
-- 
2.51.0


  parent reply	other threads:[~2025-10-20 12:54 UTC|newest]

Thread overview: 15+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2025-10-20 12:54 [PATCH v2 0/7] Some migration fixes/improvements Matthew Auld
2025-10-20 12:54 ` [PATCH v2 1/7] drm/xe/migrate: rework size restrictions for sram pte emit Matthew Auld
2025-10-20 12:54 ` [PATCH v2 2/7] drm/xe/migrate: fix chunk handling for 2M page emit Matthew Auld
2025-10-20 12:54 ` [PATCH v2 3/7] drm/xe/migrate: fix batch buffer sizing Matthew Auld
2025-10-20 12:54 ` [PATCH v2 4/7] drm/xe/migrate: trim " Matthew Auld
2025-10-20 12:54 ` [PATCH v2 5/7] drm/xe/migrate: support MEM_COPY instruction Matthew Auld
2025-10-20 18:41   ` Matthew Brost
2025-10-20 12:54 ` [PATCH v2 6/7] drm/xe/migrate: skip bounce buffer path on xe2 Matthew Auld
2025-10-20 18:52   ` Matthew Brost
2025-10-21  9:23     ` Matthew Auld
2025-10-20 12:54 ` Matthew Auld [this message]
2025-10-20 22:49   ` [PATCH v2 7/7] drm/xe/configfs: add disable_mem_copy knob Matthew Brost
2025-10-21  2:18   ` Lucas De Marchi
2025-10-21  9:06     ` Matthew Auld
2025-10-20 13:04 ` ✗ CI.KUnit: failure for Some migration fixes/improvements (rev2) Patchwork

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20251020125431.41153-16-matthew.auld@intel.com \
    --to=matthew.auld@intel.com \
    --cc=intel-xe@lists.freedesktop.org \
    --cc=matthew.brost@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox