From: Brian Nguyen <brian3.nguyen@intel.com>
To: igt-dev@lists.freedesktop.org
Cc: x.wang@intel.com, Brian Nguyen <brian3.nguyen@intel.com>
Subject: [PATCH 3/4] tests/xe: Add transient display PRL skip
Date: Mon, 6 Apr 2026 18:42:30 +0000 [thread overview]
Message-ID: <20260406184226.1294486-9-brian3.nguyen@intel.com> (raw)
In-Reply-To: <20260406184226.1294486-6-brian3.nguyen@intel.com>
Page reclamation may be suppressed for various reasons. In this case,
transient display vma ranges will not use page reclamation, so ensure
that behavior in this subtest.
Signed-off-by: Brian Nguyen <brian3.nguyen@intel.com>
Suggested-by: Xin Wang <x.wang@intel.com>
---
tests/intel/xe_page_reclaim.c | 87 +++++++++++++++++++++++++++++++++++
1 file changed, 87 insertions(+)
diff --git a/tests/intel/xe_page_reclaim.c b/tests/intel/xe_page_reclaim.c
index 7741063d2..6cc29d8d3 100644
--- a/tests/intel/xe_page_reclaim.c
+++ b/tests/intel/xe_page_reclaim.c
@@ -4,7 +4,10 @@
*/
#include <fcntl.h>
+#include <linux_scaffold.h>
+#include "igt_syncobj.h"
+#include "intel_pat.h"
#include "ioctl_wrappers.h"
#include "xe/xe_gt.h"
#include "xe/xe_ioctl.h"
@@ -44,6 +47,33 @@ static struct xe_prl_stats get_prl_stats(int fd, int gt)
return stats;
}
+#define XE2_L3_POLICY GENMASK(5, 4)
+#define L3_CACHE_POLICY_XD 1
+
+static int get_xd_pat_idx(int fd)
+{
+ uint16_t dev_id = intel_get_drm_devid(fd);
+ struct intel_pat_cache pat_config = {};
+ int32_t parsed;
+ int i;
+
+ if (intel_graphics_ver(dev_id) < IP_VER(20, 0))
+ return -1;
+
+ parsed = xe_get_pat_sw_config(fd, &pat_config, 0);
+ if (parsed <= 0)
+ return -1;
+
+ for (i = 0; i < parsed; i++) {
+ if (pat_config.entries[i].rsvd)
+ continue;
+ if (FIELD_GET(XE2_L3_POLICY, pat_config.entries[i].pat) == L3_CACHE_POLICY_XD)
+ return i;
+ }
+
+ return -1;
+}
+
static void log_prl_stat_diff(struct xe_prl_stats *stats_before, struct xe_prl_stats *stats_after)
{
igt_debug("PRL stats diff: 4K: %d->%d, 64K: %d->%d, 2M: %d -> %d, issued: %d->%d, aborted: %d->%d\n",
@@ -535,7 +565,61 @@ static void test_binds_1g_partial(int fd)
stats_before = get_prl_stats(fd, 0);
vma_range_list_with_unbind_and_offsets(fd, sizes, count, (1ull << 30), SZ_1G + SZ_2M, offsets);
stats_after = get_prl_stats(fd, 0);
+ compare_prl_stats(&stats_before, &stats_after, &expected_stats);
+}
+/**
+ * SUBTEST: pat-index-xd
+ * Description: Create a VM binding with a BO that has PAT INDEX with XD
+ * (transient display) property to test page reclamation
+ * with transient cache entries on XE2+ platforms.
+ */
+static void test_pat_index_xd(int fd)
+{
+ struct xe_prl_stats stats_before, stats_after, expected_stats = { 0 };
+ uint32_t vm, bo;
+ uint64_t size = SZ_4K;
+ uint64_t addr = 1ull << 30;
+ int pat_idx_xd, err;
+ struct drm_xe_sync sync = {
+ .type = DRM_XE_SYNC_TYPE_SYNCOBJ,
+ .flags = DRM_XE_SYNC_FLAG_SIGNAL,
+ };
+
+ pat_idx_xd = get_xd_pat_idx(fd);
+ igt_require_f(pat_idx_xd >= 0, "XD PAT index not available on this platform\n");
+
+ vm = xe_vm_create(fd, 0, 0);
+ bo = xe_bo_create_caching(fd, 0, size, system_memory(fd), 0,
+ DRM_XE_GEM_CPU_CACHING_WC);
+
+ /* Bind with XD PAT index - synchronous operation */
+ sync.handle = syncobj_create(fd, 0);
+ err = __xe_vm_bind(fd, vm, 0, bo, 0, addr,
+ size, DRM_XE_VM_BIND_OP_MAP, 0, &sync, 1, 0,
+ pat_idx_xd, 0);
+ igt_assert_eq(err, 0);
+ igt_assert(syncobj_wait(fd, &sync.handle, 1, INT64_MAX, 0, NULL));
+ syncobj_destroy(fd, sync.handle);
+
+ /*
+ * Page reclamation should skip over the XD pat vma pages.
+ * PRL is still issued because pages are still valid, just handled
+ * elsewhere so no invalidation required to ensure not squashing valid
+ * PRL entries from other VMAs.
+ */
+ expected_stats.prl_4k_entry_count = 0;
+ expected_stats.prl_64k_entry_count = 0;
+ expected_stats.prl_2m_entry_count = 0;
+ expected_stats.prl_issued_count = 1;
+ expected_stats.prl_aborted_count = 0;
+
+ stats_before = get_prl_stats(fd, 0);
+ xe_vm_unbind_sync(fd, vm, 0, addr, size);
+ stats_after = get_prl_stats(fd, 0);
+
+ gem_close(fd, bo);
+ xe_vm_destroy(fd, vm);
compare_prl_stats(&stats_before, &stats_after, &expected_stats);
}
@@ -587,6 +671,9 @@ int igt_main()
igt_subtest("binds-1g-partial")
test_binds_1g_partial(fd);
+ igt_subtest("pat-index-xd")
+ test_pat_index_xd(fd);
+
igt_fixture()
drm_close_driver(fd);
}
--
2.43.0
next prev parent reply other threads:[~2026-04-06 18:43 UTC|newest]
Thread overview: 11+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-04-06 18:42 [PATCH 0/4] tests/xe: Add xe_page_reclaim test suite Brian Nguyen
2026-04-06 18:42 ` [PATCH 1/4] tests/xe: Add page reclaim test Brian Nguyen
2026-04-06 18:42 ` [PATCH 2/4] tests/xe: Add random page reclaim subtest Brian Nguyen
2026-04-06 18:42 ` Brian Nguyen [this message]
2026-04-06 18:42 ` [PATCH 4/4] tests/xe: Add large VMA range tests for better coverage Brian Nguyen
2026-04-06 19:29 ` ✓ Xe.CI.BAT: success for tests/xe: Add xe_page_reclaim test suite Patchwork
2026-04-06 19:45 ` ✓ i915.CI.BAT: " Patchwork
2026-04-06 21:45 ` ✓ i915.CI.Full: " Patchwork
2026-04-07 0:23 ` ✗ Xe.CI.FULL: failure " Patchwork
2026-04-07 19:15 ` [PATCH 0/4] " Summers, Stuart
2026-04-07 22:02 ` Nguyen, Brian3
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20260406184226.1294486-9-brian3.nguyen@intel.com \
--to=brian3.nguyen@intel.com \
--cc=igt-dev@lists.freedesktop.org \
--cc=x.wang@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox