public inbox for igt-dev@lists.freedesktop.org
 help / color / mirror / Atom feed
* [PATCH v2 i-g-t] lib/intel_pat: use kernel debugfs as authoritative PAT source for Xe
@ 2026-02-24 17:07 himanshu.girotra
  2026-02-24 19:00 ` Wang, X
                   ` (4 more replies)
  0 siblings, 5 replies; 7+ messages in thread
From: himanshu.girotra @ 2026-02-24 17:07 UTC (permalink / raw)
  To: matthew.d.roper, x.wang, igt-dev

From: Himanshu Girotra <himanshu.girotra@intel.com>

IGT should treat the kernel as authoritative for PAT configuration
rather than replicating platform-specific logic and workaround
adjustments in hardcoded tables, which is error-prone as PAT layouts
vary across platforms.

For Xe devices, query pat_sw_config from debugfs instead of using
hardcoded PAT indices. Remove the Xe-only hardcoded entries and
retain the i915 fallback for older platforms.

Drop the now-redundant max_index assert in pat_sanity().

v2: Drop redundant index asserts; instead validate actual PAT register contents for correct cache types (Matt Roper)

Cc: Matt Roper <matthew.d.roper@intel.com>
Cc: Xin Wang <x.wang@intel.com>
Signed-off-by: Himanshu Girotra <himanshu.girotra@intel.com>
---
 lib/intel_pat.c      | 37 ++++++++++----------
 tests/intel/xe_pat.c | 81 ++++++++++++++++++++++++++++++++++++++++----
 2 files changed, 94 insertions(+), 24 deletions(-)

diff --git a/lib/intel_pat.c b/lib/intel_pat.c
index 9815efc18..8660a2515 100644
--- a/lib/intel_pat.c
+++ b/lib/intel_pat.c
@@ -96,24 +96,27 @@ int32_t xe_get_pat_sw_config(int drm_fd, struct intel_pat_cache *xe_pat_cache)
 
 static void intel_get_pat_idx(int fd, struct intel_pat_cache *pat)
 {
-	uint16_t dev_id = intel_get_drm_devid(fd);
+	uint16_t dev_id;
+
+	/*
+	 * For Xe driver, query the kernel's PAT software configuration
+	 * via debugfs. The kernel is the authoritative source for PAT
+	 * indices, accounting for platform-specific workarounds
+	 * (e.g. Wa_16023588340) at runtime.
+	 */
+	if (is_xe_device(fd)) {
+		int32_t parsed = xe_get_pat_sw_config(fd, pat);
+
+		igt_assert_f(parsed > 0,
+			     "Failed to get PAT sw_config from debugfs (parsed=%d)\n",
+			     parsed);
+		return;
+	}
 
-	if (intel_graphics_ver(dev_id) == IP_VER(35, 11)) {
-		pat->uc = 3;
-		pat->wb = 2;
-		pat->max_index = 31;
-	} else if (intel_get_device_info(dev_id)->graphics_ver == 30 ||
-		   intel_get_device_info(dev_id)->graphics_ver == 20) {
-		pat->uc = 3;
-		pat->wt = 15; /* Compressed + WB-transient */
-		pat->wb = 2;
-		pat->uc_comp = 12; /* Compressed + UC, XE2 and later */
-		pat->max_index = 31;
-
-		/* Wa_16023588340: CLOS3 entries at end of table are unusable */
-		if (intel_graphics_ver(dev_id) == IP_VER(20, 1))
-			pat->max_index -= 4;
-	} else if (IS_METEORLAKE(dev_id)) {
+	/* i915 fallback: hardcoded PAT indices */
+	dev_id = intel_get_drm_devid(fd);
+
+	if (IS_METEORLAKE(dev_id)) {
 		pat->uc = 2;
 		pat->wt = 1;
 		pat->wb = 3;
diff --git a/tests/intel/xe_pat.c b/tests/intel/xe_pat.c
index 21547c84e..6ad6adab7 100644
--- a/tests/intel/xe_pat.c
+++ b/tests/intel/xe_pat.c
@@ -103,6 +103,57 @@ static void userptr_coh_none(int fd)
 #define COH_MODE_1WAY		2
 #define COH_MODE_2WAY		3
 
+/* Pre-Xe2 PAT bit fields (from kernel xe_pat.c) */
+#define XELP_MEM_TYPE_MASK	GENMASK(1, 0)
+
+static bool pat_entry_is_uc(unsigned int gfx_ver, uint32_t pat)
+{
+	if (gfx_ver >= IP_VER(20, 0))
+		return REG_FIELD_GET(XE2_L3_POLICY, pat) == L3_CACHE_POLICY_UC &&
+		       REG_FIELD_GET(XE2_L4_POLICY, pat) == L4_CACHE_POLICY_UC;
+
+	if (gfx_ver >= IP_VER(12, 70))
+		return REG_FIELD_GET(XE2_L4_POLICY, pat) == L4_CACHE_POLICY_UC;
+
+	return REG_FIELD_GET(XELP_MEM_TYPE_MASK, pat) == 0;
+}
+
+static bool pat_entry_is_wb(unsigned int gfx_ver, uint32_t pat)
+{
+	if (gfx_ver >= IP_VER(20, 0)) {
+		uint32_t l3 = REG_FIELD_GET(XE2_L3_POLICY, pat);
+
+		return l3 == L3_CACHE_POLICY_WB || l3 == L3_CACHE_POLICY_XD;
+	}
+
+	if (gfx_ver >= IP_VER(12, 70))
+		return REG_FIELD_GET(XE2_L4_POLICY, pat) == L4_CACHE_POLICY_WB;
+
+	return REG_FIELD_GET(XELP_MEM_TYPE_MASK, pat) == 3;
+}
+
+static bool pat_entry_is_wt(unsigned int gfx_ver, uint32_t pat)
+{
+	if (gfx_ver >= IP_VER(20, 0))
+		return REG_FIELD_GET(XE2_L3_POLICY, pat) == L3_CACHE_POLICY_XD &&
+		       REG_FIELD_GET(XE2_L4_POLICY, pat) == L4_CACHE_POLICY_WT;
+
+	if (gfx_ver >= IP_VER(12, 70))
+		return REG_FIELD_GET(XE2_L4_POLICY, pat) == L4_CACHE_POLICY_WT;
+
+	return REG_FIELD_GET(XELP_MEM_TYPE_MASK, pat) == 2;
+}
+
+static bool pat_entry_is_uc_comp(unsigned int gfx_ver, uint32_t pat)
+{
+	if (gfx_ver >= IP_VER(20, 0))
+		return !!(pat & XE2_COMP_EN) &&
+		       REG_FIELD_GET(XE2_L3_POLICY, pat) == L3_CACHE_POLICY_UC &&
+		       REG_FIELD_GET(XE2_L4_POLICY, pat) == L4_CACHE_POLICY_UC;
+
+	return false;
+}
+
 static int xe_fetch_pat_sw_config(int fd, struct intel_pat_cache *pat_sw_config)
 {
 	int32_t parsed = xe_get_pat_sw_config(fd, pat_sw_config);
@@ -120,13 +171,14 @@ static int xe_fetch_pat_sw_config(int fd, struct intel_pat_cache *pat_sw_config)
 static void pat_sanity(int fd)
 {
 	uint16_t dev_id = intel_get_drm_devid(fd);
+	unsigned int gfx_ver = intel_graphics_ver(dev_id);
 	struct intel_pat_cache pat_sw_config = {};
 	int32_t parsed;
 	bool has_uc_comp = false, has_wt = false;
 
 	parsed = xe_fetch_pat_sw_config(fd, &pat_sw_config);
 
-	if (intel_graphics_ver(dev_id) >= IP_VER(20, 0)) {
+	if (gfx_ver >= IP_VER(20, 0)) {
 		for (int i = 0; i < parsed; i++) {
 			uint32_t pat = pat_sw_config.entries[i].pat;
 			if (pat_sw_config.entries[i].rsvd)
@@ -144,13 +196,28 @@ static void pat_sanity(int fd)
 	} else {
 		has_wt = true;
 	}
-	igt_assert_eq(pat_sw_config.max_index, intel_get_max_pat_index(fd));
-	igt_assert_eq(pat_sw_config.uc, intel_get_pat_idx_uc(fd));
-	igt_assert_eq(pat_sw_config.wb, intel_get_pat_idx_wb(fd));
+
+	/*
+	 * Validate that the selected PAT indices actually have the expected
+	 * cache types rather than comparing against hardcoded values.
+	 */
+	igt_assert_f(pat_entry_is_uc(gfx_ver, pat_sw_config.entries[pat_sw_config.uc].pat),
+		     "UC index %d does not point to an uncached entry (pat=0x%x)\n",
+		     pat_sw_config.uc, pat_sw_config.entries[pat_sw_config.uc].pat);
+	igt_assert_f(pat_entry_is_wb(gfx_ver, pat_sw_config.entries[pat_sw_config.wb].pat),
+		     "WB index %d does not point to a WB/XA/XD entry (pat=0x%x)\n",
+		     pat_sw_config.wb, pat_sw_config.entries[pat_sw_config.wb].pat);
 	if (has_wt)
-		igt_assert_eq(pat_sw_config.wt, intel_get_pat_idx_wt(fd));
-	if (has_uc_comp)
-		igt_assert_eq(pat_sw_config.uc_comp, intel_get_pat_idx_uc_comp(fd));
+		igt_assert_f(pat_entry_is_wt(gfx_ver, pat_sw_config.entries[pat_sw_config.wt].pat),
+			     "WT index %d does not point to a WT entry (pat=0x%x)\n",
+			     pat_sw_config.wt, pat_sw_config.entries[pat_sw_config.wt].pat);
+	if (has_uc_comp) {
+		uint32_t uc_comp_pat = pat_sw_config.entries[pat_sw_config.uc_comp].pat;
+
+		igt_assert_f(pat_entry_is_uc_comp(gfx_ver, uc_comp_pat),
+			     "UC_COMP index %d does not point to a compressed UC entry (pat=0x%x)\n",
+			     pat_sw_config.uc_comp, uc_comp_pat);
+	}
 }
 
 /**
-- 
2.50.1


^ permalink raw reply related	[flat|nested] 7+ messages in thread

end of thread, other threads:[~2026-02-27 17:45 UTC | newest]

Thread overview: 7+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2026-02-24 17:07 [PATCH v2 i-g-t] lib/intel_pat: use kernel debugfs as authoritative PAT source for Xe himanshu.girotra
2026-02-24 19:00 ` Wang, X
2026-02-27 17:45   ` Matt Roper
2026-02-25  0:26 ` ✗ Xe.CI.BAT: failure for lib/intel_pat: use kernel debugfs as authoritative PAT source for Xe (rev2) Patchwork
2026-02-25  1:20 ` ✗ i915.CI.BAT: " Patchwork
2026-02-25  8:18 ` ✗ Xe.CI.FULL: " Patchwork
2026-02-27 17:37 ` [PATCH v2 i-g-t] lib/intel_pat: use kernel debugfs as authoritative PAT source for Xe Matt Roper

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox