From: nishit.sharma@intel.com
To: igt-dev@lists.freedesktop.org, priyanka.dandamudi@intel.com
Subject: [PATCH i-g-t 3/3] tests/intel/xe_exec_store: Extending test scope for PCIE6 relax ordering
Date: Fri, 20 Feb 2026 09:30:41 +0000 [thread overview]
Message-ID: <20260220093041.1911492-4-nishit.sharma@intel.com> (raw)
In-Reply-To: <20260220093041.1911492-1-nishit.sharma@intel.com>
From: Nishit Sharma <nishit.sharma@intel.com>
This is extension of validating PCIe relax ordering to run on all possible
memory region available in a device. Currently VRAM region is utilized for
memory operations for discrete platforms. This extension utilizes both
System and VRAM region of discrete GPU for performing memory operations.
Signed-off-by: Nishit Sharma <nishit.sharma@intel.com>
---
tests/intel/xe_exec_store.c | 107 ++++++++++++++++++++++--------------
1 file changed, 66 insertions(+), 41 deletions(-)
diff --git a/tests/intel/xe_exec_store.c b/tests/intel/xe_exec_store.c
index 5930f16e5..f65c22779 100644
--- a/tests/intel/xe_exec_store.c
+++ b/tests/intel/xe_exec_store.c
@@ -114,7 +114,7 @@ static void persistance_batch(struct data *data, uint64_t addr)
* Description: Test to verify store dword on all available engines.
*/
static void basic_inst(int fd, int inst_type, struct drm_xe_engine_class_instance *eci,
- uint16_t dev_id)
+ uint16_t dev_id, uint32_t region)
{
struct drm_xe_sync sync[2] = {
{ .type = DRM_XE_SYNC_TYPE_SYNCOBJ, .flags = DRM_XE_SYNC_FLAG_SIGNAL, },
@@ -143,8 +143,7 @@ static void basic_inst(int fd, int inst_type, struct drm_xe_engine_class_instanc
bo_size = sizeof(*data);
bo_size = xe_bb_size(fd, bo_size);
- bo = xe_bo_create(fd, vm, bo_size,
- vram_if_possible(fd, eci->gt_id),
+ bo = xe_bo_create(fd, vm, bo_size, region,
DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM);
exec_queue = xe_exec_queue_create(fd, vm, eci, 0);
@@ -193,7 +192,7 @@ static void basic_inst(int fd, int inst_type, struct drm_xe_engine_class_instanc
* @page-sized: page-sized
*/
static void store_cachelines(int fd, struct drm_xe_engine_class_instance *eci,
- unsigned int flags)
+ unsigned int flags, uint32_t region)
{
struct drm_xe_sync sync[2] = {
{ .type = DRM_XE_SYNC_TYPE_SYNCOBJ, .flags = DRM_XE_SYNC_FLAG_SIGNAL, },
@@ -225,8 +224,7 @@ static void store_cachelines(int fd, struct drm_xe_engine_class_instance *eci,
sync[0].handle = syncobj_create(fd, 0);
for (i = 0; i < count; i++) {
- bo[i] = xe_bo_create(fd, vm, bo_size,
- vram_if_possible(fd, eci->gt_id),
+ bo[i] = xe_bo_create(fd, vm, bo_size, region,
DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM);
bo_map[i] = xe_bo_map(fd, bo[i], bo_size);
dst_offset[i] = intel_allocator_alloc_with_strategy(ahnd, bo[i],
@@ -282,7 +280,7 @@ static void store_cachelines(int fd, struct drm_xe_engine_class_instance *eci,
* SUBTEST: persistent
* Description: Validate MI_PRT_BATCH_BUFFER_START functionality
*/
-static void persistent(int fd)
+static void persistent(int fd, uint32_t region)
{
struct drm_xe_sync sync = {
.type = DRM_XE_SYNC_TYPE_SYNCOBJ,
@@ -309,12 +307,10 @@ static void persistent(int fd)
batch_size = xe_bb_size(fd, batch_size);
engine = xe_engine(fd, 1);
- sd_batch = xe_bo_create(fd, vm, batch_size,
- vram_if_possible(fd, engine->instance.gt_id),
- DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM);
- prt_batch = xe_bo_create(fd, vm, batch_size,
- vram_if_possible(fd, engine->instance.gt_id),
- DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM);
+ sd_batch = xe_bo_create(fd, vm, batch_size, region,
+ DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM);
+ prt_batch = xe_bo_create(fd, vm, batch_size, region,
+ DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM);
xe_vm_bind_sync(fd, vm, sd_batch, 0, addr, batch_size);
sd_data = xe_bo_map(fd, sd_batch, batch_size);
@@ -419,7 +415,7 @@ static void long_shader(int fd, struct drm_xe_engine_class_instance *hwe,
* Test category: functionality test
*
*/
-static void mem_transection_ordering(int fd, size_t bo_size, bool fence)
+static void mem_transaction_ordering(int fd, size_t bo_size, bool fence, uint32_t region)
{
struct drm_xe_engine_class_instance inst = {
.engine_class = DRM_XE_ENGINE_CLASS_COPY,
@@ -456,8 +452,7 @@ static void mem_transection_ordering(int fd, size_t bo_size, bool fence)
sync[0].handle = syncobj_create(fd, 0);
for (i = 0; i < count; i++) {
- bo[i] = xe_bo_create_caching(fd, vm, bo_size, system_memory(fd), 0,
- DRM_XE_GEM_CPU_CACHING_WC);
+ bo[i] = xe_bo_create_caching(fd, vm, bo_size, region, 0, DRM_XE_GEM_CPU_CACHING_WC);
bo_map[i] = xe_bo_map(fd, bo[i], bo_size);
offset[i] = intel_allocator_alloc_with_strategy(ahnd, bo[i],
bo_size, 0,
@@ -493,7 +488,6 @@ static void mem_transection_ordering(int fd, size_t bo_size, bool fence)
}
if (fence)
batch_map[b++] = MI_MEM_FENCE | MI_WRITE_FENCE;
-
batch_map[b++] = MI_BATCH_BUFFER_END;
sync[0].flags &= ~DRM_XE_SYNC_FLAG_SIGNAL;
sync[1].flags |= DRM_XE_SYNC_FLAG_SIGNAL;
@@ -538,44 +532,72 @@ int igt_main()
struct drm_xe_engine_class_instance *hwe;
int fd;
uint16_t dev_id;
+ uint32_t region;
+ uint64_t memreg;
struct drm_xe_engine *engine;
igt_fixture() {
fd = drm_open_driver(DRIVER_XE);
xe_device_get(fd);
dev_id = intel_get_drm_devid(fd);
+ memreg = all_memory_regions(fd);
}
- igt_subtest("basic-store") {
- engine = xe_engine(fd, 1);
- basic_inst(fd, STORE, &engine->instance, dev_id);
+ igt_subtest_with_dynamic("basic-store") {
+ xe_for_each_mem_region(fd, memreg, region) {
+ igt_dynamic_f("region-%s", xe_region_name(region)) {
+ engine = xe_engine(fd, 1);
+ basic_inst(fd, STORE, &engine->instance, dev_id, region);
+ }
+ }
}
- igt_subtest("basic-cond-batch") {
- engine = xe_engine(fd, 1);
- basic_inst(fd, COND_BATCH, &engine->instance, dev_id);
+ igt_subtest_with_dynamic("basic-cond-batch") {
+ xe_for_each_mem_region(fd, memreg, region) {
+ igt_dynamic_f("region-%s", xe_region_name(region)) {
+ engine = xe_engine(fd, 1);
+ basic_inst(fd, COND_BATCH, &engine->instance, dev_id, region);
+ }
+ }
}
igt_subtest_with_dynamic("basic-all") {
- xe_for_each_engine(fd, hwe) {
- igt_dynamic_f("Engine-%s-Instance-%d-Tile-%d",
- xe_engine_class_string(hwe->engine_class),
- hwe->engine_instance,
- hwe->gt_id);
- basic_inst(fd, STORE, hwe, dev_id);
+ xe_for_each_mem_region(fd, memreg, region) {
+ xe_for_each_engine(fd, hwe) {
+ igt_dynamic_f("Engine-%s-Instance-%d-Tile-%d-Region-%s",
+ xe_engine_class_string(hwe->engine_class),
+ hwe->engine_instance,
+ hwe->gt_id,
+ xe_region_name(region))
+ basic_inst(fd, STORE, hwe, dev_id, region);
+ }
}
}
- igt_subtest("cachelines")
- xe_for_each_engine(fd, hwe)
- store_cachelines(fd, hwe, 0);
+ igt_subtest_with_dynamic("cachelines") {
+ xe_for_each_mem_region(fd, memreg, region) {
+ xe_for_each_engine(fd, hwe) {
+ igt_dynamic_f("region-%s", xe_region_name(region))
+ store_cachelines(fd, hwe, 0, region);
+ }
+ }
+ }
- igt_subtest("page-sized")
- xe_for_each_engine(fd, hwe)
- store_cachelines(fd, hwe, PAGES);
+ igt_subtest_with_dynamic("page-sized") {
+ xe_for_each_mem_region(fd, memreg, region) {
+ xe_for_each_engine(fd, hwe) {
+ igt_dynamic_f("region-%s", xe_region_name(region))
+ store_cachelines(fd, hwe, PAGES, region);
+ }
+ }
+ }
- igt_subtest("persistent")
- persistent(fd);
+ igt_subtest_with_dynamic("persistent") {
+ xe_for_each_mem_region(fd, memreg, region) {
+ igt_dynamic_f("region-%s", xe_region_name(region))
+ persistent(fd, region);
+ }
+ }
igt_subtest_with_dynamic("long-shader-bb-check") {
struct igt_collection *set;
@@ -604,6 +626,7 @@ int igt_main()
igt_collection_destroy(set);
}
+ igt_describe("Verify memory relax ordering using copy/write operations");
igt_subtest_with_dynamic("mem-write-ordering-check") {
struct {
size_t size;
@@ -614,10 +637,12 @@ int igt_main()
{ SZ_8M, "8M" },
};
- for (size_t i = 0; i < ARRAY_SIZE(sizes); i++) {
- igt_dynamic_f("size-%s", sizes[i].label) {
- mem_transection_ordering(fd, sizes[i].size, true);
- mem_transection_ordering(fd, sizes[i].size, false);
+ xe_for_each_mem_region(fd, memreg, region) {
+ for (size_t i = 0; i < ARRAY_SIZE(sizes); i++) {
+ igt_dynamic_f("region-%s", xe_region_name(region)) {
+ mem_transaction_ordering(fd, sizes[i].size, true, region);
+ mem_transaction_ordering(fd, sizes[i].size, false, region);
+ }
}
}
}
--
2.34.1
next prev parent reply other threads:[~2026-02-20 9:30 UTC|newest]
Thread overview: 10+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-02-20 9:30 [PATCH i-g-t 0/3] Add memory write ordering verification nishit.sharma
2026-02-20 9:30 ` [PATCH i-g-t 1/3] tests/intel/xe_exec_store: Validate PCIe6 relax ordering nishit.sharma
2026-02-24 4:25 ` Dandamudi, Priyanka
2026-02-20 9:30 ` [PATCH i-g-t 2/3] tests/intel/xe_exec_store: Enforce per-instruction copy limit for MEM_COPY nishit.sharma
2026-02-20 9:30 ` nishit.sharma [this message]
2026-02-24 4:26 ` [PATCH i-g-t 3/3] tests/intel/xe_exec_store: Extending test scope for PCIE6 relax ordering Dandamudi, Priyanka
2026-02-20 10:06 ` ✓ Xe.CI.BAT: success for Add memory write ordering verification Patchwork
2026-02-20 10:23 ` ✓ i915.CI.BAT: " Patchwork
2026-02-20 12:26 ` ✓ i915.CI.Full: " Patchwork
2026-02-20 21:44 ` ✗ Xe.CI.FULL: failure " Patchwork
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20260220093041.1911492-4-nishit.sharma@intel.com \
--to=nishit.sharma@intel.com \
--cc=igt-dev@lists.freedesktop.org \
--cc=priyanka.dandamudi@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox