From: nishit.sharma@intel.com
To: igt-dev@lists.freedesktop.org, priyanka.dandamudi@intel.com
Subject: [PATCH i-g-t 2/2] tests/intel/xe_exec_store: Extend test coverage to all memory regions
Date: Tue, 24 Feb 2026 05:33:24 +0000 [thread overview]
Message-ID: <20260224053324.2354159-3-nishit.sharma@intel.com> (raw)
In-Reply-To: <20260224053324.2354159-1-nishit.sharma@intel.com>
From: Nishit Sharma <nishit.sharma@intel.com>
This change generalizes the test to run on all available memory regions
(e.g., system and VRAM) in the device, instead of only VRAM. All relevant
subtests now iterate over each memory region, improving coverage and
validation.
Signed-off-by: Nishit Sharma <nishit.sharma@intel.com>
---
tests/intel/xe_exec_store.c | 106 ++++++++++++++++++++++--------------
1 file changed, 65 insertions(+), 41 deletions(-)
diff --git a/tests/intel/xe_exec_store.c b/tests/intel/xe_exec_store.c
index 1acaa5aaa..faf2c7fa8 100644
--- a/tests/intel/xe_exec_store.c
+++ b/tests/intel/xe_exec_store.c
@@ -114,7 +114,7 @@ static void persistance_batch(struct data *data, uint64_t addr)
* Description: Test to verify store dword on all available engines.
*/
static void basic_inst(int fd, int inst_type, struct drm_xe_engine_class_instance *eci,
- uint16_t dev_id)
+ uint16_t dev_id, uint32_t region)
{
struct drm_xe_sync sync[2] = {
{ .type = DRM_XE_SYNC_TYPE_SYNCOBJ, .flags = DRM_XE_SYNC_FLAG_SIGNAL, },
@@ -143,8 +143,7 @@ static void basic_inst(int fd, int inst_type, struct drm_xe_engine_class_instanc
bo_size = sizeof(*data);
bo_size = xe_bb_size(fd, bo_size);
- bo = xe_bo_create(fd, vm, bo_size,
- vram_if_possible(fd, eci->gt_id),
+ bo = xe_bo_create(fd, vm, bo_size, region,
DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM);
exec_queue = xe_exec_queue_create(fd, vm, eci, 0);
@@ -193,7 +192,7 @@ static void basic_inst(int fd, int inst_type, struct drm_xe_engine_class_instanc
* @page-sized: page-sized
*/
static void store_cachelines(int fd, struct drm_xe_engine_class_instance *eci,
- unsigned int flags)
+ unsigned int flags, uint32_t region)
{
struct drm_xe_sync sync[2] = {
{ .type = DRM_XE_SYNC_TYPE_SYNCOBJ, .flags = DRM_XE_SYNC_FLAG_SIGNAL, },
@@ -225,8 +224,7 @@ static void store_cachelines(int fd, struct drm_xe_engine_class_instance *eci,
sync[0].handle = syncobj_create(fd, 0);
for (i = 0; i < count; i++) {
- bo[i] = xe_bo_create(fd, vm, bo_size,
- vram_if_possible(fd, eci->gt_id),
+ bo[i] = xe_bo_create(fd, vm, bo_size, region,
DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM);
bo_map[i] = xe_bo_map(fd, bo[i], bo_size);
dst_offset[i] = intel_allocator_alloc_with_strategy(ahnd, bo[i],
@@ -282,7 +280,7 @@ static void store_cachelines(int fd, struct drm_xe_engine_class_instance *eci,
* SUBTEST: persistent
* Description: Validate MI_PRT_BATCH_BUFFER_START functionality
*/
-static void persistent(int fd)
+static void persistent(int fd, uint32_t region)
{
struct drm_xe_sync sync = {
.type = DRM_XE_SYNC_TYPE_SYNCOBJ,
@@ -309,12 +307,10 @@ static void persistent(int fd)
batch_size = xe_bb_size(fd, batch_size);
engine = xe_engine(fd, 1);
- sd_batch = xe_bo_create(fd, vm, batch_size,
- vram_if_possible(fd, engine->instance.gt_id),
- DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM);
- prt_batch = xe_bo_create(fd, vm, batch_size,
- vram_if_possible(fd, engine->instance.gt_id),
- DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM);
+ sd_batch = xe_bo_create(fd, vm, batch_size, region,
+ DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM);
+ prt_batch = xe_bo_create(fd, vm, batch_size, region,
+ DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM);
xe_vm_bind_sync(fd, vm, sd_batch, 0, addr, batch_size);
sd_data = xe_bo_map(fd, sd_batch, batch_size);
@@ -419,7 +415,7 @@ static void long_shader(int fd, struct drm_xe_engine_class_instance *hwe,
* Test category: functionality test
*
*/
-static void mem_transaction_ordering(int fd, size_t bo_size, bool fence)
+static void mem_transaction_ordering(int fd, size_t bo_size, bool fence, uint32_t region)
{
struct drm_xe_engine_class_instance inst = {
.engine_class = DRM_XE_ENGINE_CLASS_COPY,
@@ -456,8 +452,7 @@ static void mem_transaction_ordering(int fd, size_t bo_size, bool fence)
sync[0].handle = syncobj_create(fd, 0);
for (i = 0; i < count; i++) {
- bo[i] = xe_bo_create_caching(fd, vm, bo_size, system_memory(fd), 0,
- DRM_XE_GEM_CPU_CACHING_WC);
+ bo[i] = xe_bo_create_caching(fd, vm, bo_size, region, 0, DRM_XE_GEM_CPU_CACHING_WC);
bo_map[i] = xe_bo_map(fd, bo[i], bo_size);
offset[i] = intel_allocator_alloc_with_strategy(ahnd, bo[i],
bo_size, 0,
@@ -493,7 +488,6 @@ static void mem_transaction_ordering(int fd, size_t bo_size, bool fence)
}
if (fence)
batch_map[b++] = MI_MEM_FENCE | MI_WRITE_FENCE;
-
batch_map[b++] = MI_BATCH_BUFFER_END;
sync[0].flags &= ~DRM_XE_SYNC_FLAG_SIGNAL;
sync[1].flags |= DRM_XE_SYNC_FLAG_SIGNAL;
@@ -538,44 +532,72 @@ int igt_main()
struct drm_xe_engine_class_instance *hwe;
int fd;
uint16_t dev_id;
+ uint32_t region;
+ uint64_t memreg;
struct drm_xe_engine *engine;
igt_fixture() {
fd = drm_open_driver(DRIVER_XE);
xe_device_get(fd);
dev_id = intel_get_drm_devid(fd);
+ memreg = all_memory_regions(fd);
}
- igt_subtest("basic-store") {
- engine = xe_engine(fd, 1);
- basic_inst(fd, STORE, &engine->instance, dev_id);
+ igt_subtest_with_dynamic("basic-store") {
+ xe_for_each_mem_region(fd, memreg, region) {
+ igt_dynamic_f("region-%s", xe_region_name(region)) {
+ engine = xe_engine(fd, 1);
+ basic_inst(fd, STORE, &engine->instance, dev_id, region);
+ }
+ }
}
- igt_subtest("basic-cond-batch") {
- engine = xe_engine(fd, 1);
- basic_inst(fd, COND_BATCH, &engine->instance, dev_id);
+ igt_subtest_with_dynamic("basic-cond-batch") {
+ xe_for_each_mem_region(fd, memreg, region) {
+ igt_dynamic_f("region-%s", xe_region_name(region)) {
+ engine = xe_engine(fd, 1);
+ basic_inst(fd, COND_BATCH, &engine->instance, dev_id, region);
+ }
+ }
}
igt_subtest_with_dynamic("basic-all") {
- xe_for_each_engine(fd, hwe) {
- igt_dynamic_f("Engine-%s-Instance-%d-Tile-%d",
- xe_engine_class_string(hwe->engine_class),
- hwe->engine_instance,
- hwe->gt_id);
- basic_inst(fd, STORE, hwe, dev_id);
+ xe_for_each_mem_region(fd, memreg, region) {
+ xe_for_each_engine(fd, hwe) {
+ igt_dynamic_f("Engine-%s-Instance-%d-Tile-%d-Region-%s",
+ xe_engine_class_string(hwe->engine_class),
+ hwe->engine_instance,
+ hwe->gt_id,
+ xe_region_name(region))
+ basic_inst(fd, STORE, hwe, dev_id, region);
+ }
}
}
- igt_subtest("cachelines")
- xe_for_each_engine(fd, hwe)
- store_cachelines(fd, hwe, 0);
+ igt_subtest_with_dynamic("cachelines") {
+ xe_for_each_mem_region(fd, memreg, region) {
+ xe_for_each_engine(fd, hwe) {
+ igt_dynamic_f("region-%s", xe_region_name(region))
+ store_cachelines(fd, hwe, 0, region);
+ }
+ }
+ }
- igt_subtest("page-sized")
- xe_for_each_engine(fd, hwe)
- store_cachelines(fd, hwe, PAGES);
+ igt_subtest_with_dynamic("page-sized") {
+ xe_for_each_mem_region(fd, memreg, region) {
+ xe_for_each_engine(fd, hwe) {
+ igt_dynamic_f("region-%s", xe_region_name(region))
+ store_cachelines(fd, hwe, PAGES, region);
+ }
+ }
+ }
- igt_subtest("persistent")
- persistent(fd);
+ igt_subtest_with_dynamic("persistent") {
+ xe_for_each_mem_region(fd, memreg, region) {
+ igt_dynamic_f("region-%s", xe_region_name(region))
+ persistent(fd, region);
+ }
+ }
igt_subtest_with_dynamic("long-shader-bb-check") {
struct igt_collection *set;
@@ -615,10 +637,12 @@ int igt_main()
{ SZ_8M, "8M" },
};
- for (size_t i = 0; i < ARRAY_SIZE(sizes); i++) {
- igt_dynamic_f("size-%s", sizes[i].label) {
- mem_transaction_ordering(fd, sizes[i].size, true);
- mem_transaction_ordering(fd, sizes[i].size, false);
+ xe_for_each_mem_region(fd, memreg, region) {
+ for (size_t i = 0; i < ARRAY_SIZE(sizes); i++) {
+ igt_dynamic_f("region-%s", xe_region_name(region)) {
+ mem_transaction_ordering(fd, sizes[i].size, true, region);
+ mem_transaction_ordering(fd, sizes[i].size, false, region);
+ }
}
}
}
--
2.34.1
next prev parent reply other threads:[~2026-02-24 5:33 UTC|newest]
Thread overview: 9+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-02-24 5:33 [PATCH i-g-t 0/2] Add memory write ordering verification nishit.sharma
2026-02-24 5:33 ` [PATCH i-g-t 1/2] tests/intel/xe_exec_store: Validate PCIe6 relax ordering nishit.sharma
2026-02-24 8:55 ` Dandamudi, Priyanka
2026-02-24 5:33 ` nishit.sharma [this message]
2026-02-24 8:55 ` [PATCH i-g-t 2/2] tests/intel/xe_exec_store: Extend test coverage to all memory regions Dandamudi, Priyanka
2026-02-24 6:20 ` ✓ Xe.CI.BAT: success for Add memory write ordering verification (rev2) Patchwork
2026-02-24 6:32 ` ✓ i915.CI.BAT: " Patchwork
2026-02-24 9:23 ` ✗ i915.CI.Full: failure " Patchwork
2026-02-24 17:29 ` ✗ Xe.CI.FULL: " Patchwork
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20260224053324.2354159-3-nishit.sharma@intel.com \
--to=nishit.sharma@intel.com \
--cc=igt-dev@lists.freedesktop.org \
--cc=priyanka.dandamudi@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox