* [PATCH] tests/xe: Add System Allocator test
@ 2024-05-21 4:18 Matthew Brost
0 siblings, 0 replies; 16+ messages in thread
From: Matthew Brost @ 2024-05-21 4:18 UTC (permalink / raw)
To: intel-xe, igt-dev; +Cc: Matthew Brost
IGT for pending SVM implementation in Xe.
Various system allocation types (malloc, mmap, mmap flags, huge pages,
different sizes, different alignments), mixing runtime allocations,
unmapping corners, testing invalid faults, and eviction have been
tested. Testing scales from single thread to multiple threads and
multiple processes. Most tests pass on PVC (though a few intermittent
KMD bugs still need to be tracked down).
Signed-off-by: Matthew Brost <matthew.brost@intel.com>
---
include/drm-uapi/xe_drm.h | 1 +
lib/xe/xe_ioctl.c | 12 +
lib/xe/xe_ioctl.h | 1 +
tests/intel/xe_exec_system_allocator.c | 1281 ++++++++++++++++++++++++
tests/meson.build | 1 +
5 files changed, 1296 insertions(+)
create mode 100644 tests/intel/xe_exec_system_allocator.c
diff --git a/include/drm-uapi/xe_drm.h b/include/drm-uapi/xe_drm.h
index 0b709b3746..69c8792bbc 100644
--- a/include/drm-uapi/xe_drm.h
+++ b/include/drm-uapi/xe_drm.h
@@ -973,6 +973,7 @@ struct drm_xe_vm_bind_op {
#define DRM_XE_VM_BIND_FLAG_IMMEDIATE (1 << 1)
#define DRM_XE_VM_BIND_FLAG_NULL (1 << 2)
#define DRM_XE_VM_BIND_FLAG_DUMPABLE (1 << 3)
+#define DRM_XE_VM_BIND_FLAG_SYSTEM_ALLOCATOR (1 << 4)
/** @flags: Bind flags */
__u32 flags;
diff --git a/lib/xe/xe_ioctl.c b/lib/xe/xe_ioctl.c
index 94cf4c9fdc..a437fd828a 100644
--- a/lib/xe/xe_ioctl.c
+++ b/lib/xe/xe_ioctl.c
@@ -443,6 +443,18 @@ void *xe_bo_map(int fd, uint32_t bo, size_t size)
return __xe_bo_map(fd, bo, size, PROT_WRITE);
}
+void *xe_bo_map_fixed(int fd, uint32_t bo, size_t size, uint64_t addr)
+{
+ uint64_t mmo;
+ void *map;
+
+ mmo = xe_bo_mmap_offset(fd, bo);
+ map = mmap((void *)addr, size, PROT_WRITE, MAP_SHARED | MAP_FIXED, fd, mmo);
+ igt_assert(map != MAP_FAILED);
+
+ return map;
+}
+
void *xe_bo_mmap_ext(int fd, uint32_t bo, size_t size, int prot)
{
return __xe_bo_map(fd, bo, size, prot);
diff --git a/lib/xe/xe_ioctl.h b/lib/xe/xe_ioctl.h
index d0e6c4910b..2c7506caaf 100644
--- a/lib/xe/xe_ioctl.h
+++ b/lib/xe/xe_ioctl.h
@@ -85,6 +85,7 @@ uint32_t xe_exec_queue_create_class(int fd, uint32_t vm, uint16_t class);
void xe_exec_queue_destroy(int fd, uint32_t exec_queue);
uint64_t xe_bo_mmap_offset(int fd, uint32_t bo);
void *xe_bo_map(int fd, uint32_t bo, size_t size);
+void *xe_bo_map_fixed(int fd, uint32_t bo, size_t size, long unsigned int addr);
void *xe_bo_mmap_ext(int fd, uint32_t bo, size_t size, int prot);
int __xe_exec(int fd, struct drm_xe_exec *exec);
void xe_exec(int fd, struct drm_xe_exec *exec);
diff --git a/tests/intel/xe_exec_system_allocator.c b/tests/intel/xe_exec_system_allocator.c
new file mode 100644
index 0000000000..7b85f85e5e
--- /dev/null
+++ b/tests/intel/xe_exec_system_allocator.c
@@ -0,0 +1,1281 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2024 Intel Corporation
+ */
+
+/**
+ * TEST: Basic tests for execbuf functionality using system allocator
+ * Category: Hardware building block
+ * Sub-category: execbuf
+ * Functionality: fault mode, system allocator
+ * GPU requirements: GPU needs support for DRM_XE_VM_CREATE_FLAG_FAULT_MODE
+ */
+
+#include <fcntl.h>
+#include <linux/mman.h>
+
+#include "igt.h"
+#include "lib/igt_syncobj.h"
+#include "lib/intel_reg.h"
+#include "xe_drm.h"
+
+#include "xe/xe_ioctl.h"
+#include "xe/xe_query.h"
+#include <string.h>
+
+#define USER_FENCE_VALUE 0xdeadbeefdeadbeefull
+#define QUARTER_SEC MS_TO_NS(250)
+#define FIVE_SEC MS_TO_NS(5000)
+
+struct batch_data {
+ uint32_t batch[16];
+ uint64_t pad;
+ uint32_t data;
+ uint32_t expected_data;
+};
+
+#define WRITE_VALUE(data__, i__) ({ \
+ (data__)->expected_data = rand() << 12 | (i__); \
+ (data__)->expected_data; \
+})
+#define READ_VALUE(data__, i__) ((data__)->expected_data)
+
+static void write_dword(uint32_t *batch, uint64_t sdi_addr, uint32_t wdata,
+ int *idx)
+{
+ batch[(*idx)++] = MI_STORE_DWORD_IMM_GEN4;
+ batch[(*idx)++] = sdi_addr;
+ batch[(*idx)++] = sdi_addr >> 32;
+ batch[(*idx)++] = wdata;
+ batch[(*idx)++] = MI_BATCH_BUFFER_END;
+}
+
+static void check_all_pages(void *ptr, uint64_t alloc_size, uint64_t stride)
+{
+ int i, n_writes = alloc_size / stride;
+
+ for (i = 0; i < n_writes; ++i) {
+ struct batch_data *data = ptr + i * stride;
+
+ igt_assert_eq(data->data, READ_VALUE(data, i));
+ }
+}
+
+static void touch_all_pages(int fd, uint32_t exec_queue, void *ptr,
+ uint64_t alloc_size, uint64_t stride)
+{
+ struct drm_xe_sync sync[1] = {
+ { .type = DRM_XE_SYNC_TYPE_USER_FENCE,
+ .flags = DRM_XE_SYNC_FLAG_SIGNAL,
+ .timeline_value = USER_FENCE_VALUE },
+ };
+ struct drm_xe_exec exec = {
+ .num_batch_buffer = 1,
+ .num_syncs = 0,
+ .exec_queue_id = exec_queue,
+ .syncs = to_user_pointer(&sync),
+ };
+ uint64_t addr = to_user_pointer(ptr);
+ int i, ret, n_writes = alloc_size / stride;
+ u64 *exec_ufence = NULL;
+ int64_t timeout = FIVE_SEC;
+
+ exec_ufence = mmap(NULL, SZ_4K, PROT_READ |
+ PROT_WRITE, MAP_SHARED |
+ MAP_ANONYMOUS, -1, 0);
+ igt_assert(exec_ufence != MAP_FAILED);
+ memset(exec_ufence, 0, SZ_4K);
+ sync[0].addr = to_user_pointer(exec_ufence);
+
+ for (i = 0; i < n_writes; ++i, addr += stride) {
+ struct batch_data *data = ptr + i * stride;
+ uint64_t sdi_offset = (char *)&data->data - (char *)data;
+ uint64_t sdi_addr = addr + sdi_offset;
+ int b = 0;
+
+ write_dword(data->batch, sdi_addr, WRITE_VALUE(data, i), &b);
+ igt_assert(b <= ARRAY_SIZE(data->batch));
+ }
+
+ addr = to_user_pointer(ptr);
+ for (i = 0; i < n_writes; ++i, addr += stride) {
+ struct batch_data *data = ptr + i * stride;
+ uint64_t batch_offset = (char *)&data->batch - (char *)data;
+ uint64_t batch_addr = addr + batch_offset;
+
+ exec.address = batch_addr;
+ if (i + 1 == n_writes)
+ exec.num_syncs = 1;
+ xe_exec(fd, &exec);
+ }
+
+ ret = __xe_wait_ufence(fd, exec_ufence, USER_FENCE_VALUE, exec_queue,
+ &timeout);
+ if (ret) {
+ printf("FAIL EXEC_UFENCE: 0x%016llx\n", sync[0].addr);
+
+ addr = to_user_pointer(ptr);
+ for (i = 0; i < n_writes; ++i, addr += stride) {
+ struct batch_data *data = ptr + i * stride;
+ uint64_t batch_offset = (char *)&data->batch - (char *)data;
+ uint64_t batch_addr = addr + batch_offset;
+ uint64_t sdi_offset = (char *)&data->data - (char *)data;
+ uint64_t sdi_addr = addr + sdi_offset;
+
+ printf("FAIL BATCH_ADDR: 0x%016lx\n", batch_addr);
+ printf("FAIL SDI_ADDR: 0x%016lx\n", sdi_addr);
+ printf("FAIL SDI_ADDR (in batch): 0x%016lx\n",
+ (((u64)data->batch[2]) << 32) | data->batch[1]);
+ }
+ igt_assert_eq(ret, 0);
+ }
+ munmap(exec_ufence, SZ_4K);
+}
+
+#define bind_system_allocator(__sync, __num_sync) \
+ __xe_vm_bind_assert(fd, vm, 0, \
+ 0, 0, 0, 0x1ull << 56, \
+ DRM_XE_VM_BIND_OP_MAP, \
+ DRM_XE_VM_BIND_FLAG_SYSTEM_ALLOCATOR, \
+ (__sync), (__num_sync), 0, 0)
+
+#define unbind_system_allocator() \
+ __xe_vm_bind(fd, vm, 0, 0, 0, 0, 0x1ull << 56, \
+ DRM_XE_VM_BIND_OP_UNMAP, 0, \
+ NULL, 0, 0, 0, 0)
+
+#define odd(__i) (__i & 1)
+
+#define MIX_BO_ALLOC (0x1 << 0)
+
+#define SYNC_FILE "/tmp/xe_exec_system_allocator_sync"
+
+struct process_data {
+ pthread_mutex_t mutex;
+ pthread_cond_t cond;
+ pthread_barrier_t barrier;
+ bool go;
+};
+
+/**
+ * SUBTEST: unaligned-alloc
+ * Description: allocate unaligned sizes of memory
+ * Test category: functionality test
+ *
+ * SUBTEST: evict-malloc
+ * Description: trigger eviction of VRAM allocated via malloc
+ * Test category: functionality test
+ *
+ * SUBTEST: evict-malloc-mix-bo
+ * Description: trigger eviction of VRAM allocated via malloc and BO create
+ * Test category: functionality test
+ *
+ * SUBTEST: processes-evict-malloc
+ * Description: multi-process trigger eviction of VRAM allocated via malloc
+ * Test category: stress test
+ *
+ * SUBTEST: processes-evict-malloc-mix-bo
+ * Description: multi-process trigger eviction of VRAM allocated via malloc and BO create
+ * Test category: stress test
+ */
+
+static void
+evict(int fd, struct drm_xe_engine_class_instance *eci, uint64_t total_alloc,
+ uint64_t alloc_size, uint64_t stride, pthread_barrier_t *barrier,
+ unsigned int flags)
+{
+ uint32_t vm, exec_queue;
+ int num_allocs = (9 * (total_alloc / alloc_size)) / 8;
+ void **allocs;
+ uint32_t *bos = NULL;
+ int i;
+
+ vm = xe_vm_create(fd, DRM_XE_VM_CREATE_FLAG_LR_MODE |
+ DRM_XE_VM_CREATE_FLAG_FAULT_MODE, 0);
+ exec_queue = xe_exec_queue_create(fd, vm, eci, 0);
+
+ bind_system_allocator(NULL, 0);
+
+ allocs = malloc(sizeof(*allocs) * num_allocs);
+ igt_assert(allocs);
+ memset(allocs, 0, sizeof(*allocs) * num_allocs);
+
+ if (flags & MIX_BO_ALLOC) {
+ bos = malloc(sizeof(*bos) * num_allocs);
+ igt_assert(bos);
+ memset(bos, 0, sizeof(*bos) * num_allocs);
+ }
+
+ for (i = 0; i < num_allocs; ++i) {
+ void *alloc;
+
+ alloc = aligned_alloc(SZ_2M, alloc_size);
+ igt_assert(alloc);
+
+ if (flags & MIX_BO_ALLOC && odd(i)) {
+ uint32_t bo_flags =
+ DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM;
+
+ bos[i] = xe_bo_create(fd, vm, alloc_size,
+ vram_if_possible(fd, eci->gt_id),
+ bo_flags);
+ alloc = xe_bo_map_fixed(fd, bos[i], alloc_size,
+ to_user_pointer(alloc));
+ xe_vm_bind_async(fd, vm, 0, bos[i], 0,
+ to_user_pointer(alloc),
+ alloc_size, 0, 0);
+ }
+ allocs[i] = alloc;
+
+ touch_all_pages(fd, exec_queue, allocs[i], alloc_size, stride);
+ }
+
+ if (barrier)
+ pthread_barrier_wait(barrier);
+
+ for (i = 0; i < num_allocs; ++i) {
+ check_all_pages(allocs[i], alloc_size, stride);
+ if (bos && bos[i]) {
+ munmap(allocs[i], alloc_size);
+ gem_close(fd, bos[i]);
+ } else {
+ free(allocs[i]);
+ }
+ }
+ if (bos)
+ free(bos);
+ free(allocs);
+ xe_exec_queue_destroy(fd, exec_queue);
+ xe_vm_destroy(fd, vm);
+}
+
+static void wait_pdata(struct process_data *pdata)
+{
+ pthread_mutex_lock(&pdata->mutex);
+ while (!pdata->go)
+ pthread_cond_wait(&pdata->cond, &pdata->mutex);
+ pthread_mutex_unlock(&pdata->mutex);
+}
+
+static void process_evict(struct drm_xe_engine_class_instance *hwe,
+ uint64_t total_alloc, uint64_t alloc_size,
+ uint64_t stride, unsigned int flags)
+{
+ struct process_data *pdata;
+ int map_fd;
+ int fd;
+
+ map_fd = open(SYNC_FILE, O_RDWR, 0x666);
+ pdata = mmap(NULL, sizeof(*pdata), PROT_READ |
+ PROT_WRITE, MAP_SHARED, map_fd, 0);
+ wait_pdata(pdata);
+
+ fd = drm_open_driver(DRIVER_XE);
+ evict(fd, hwe, total_alloc, alloc_size, stride, &pdata->barrier,
+ flags);
+ drm_close_driver(fd);
+
+ close(map_fd);
+ munmap(pdata, sizeof(*pdata));
+}
+
+static void init_pdata(struct process_data *pdata, int n_engine)
+{
+ pthread_mutexattr_t mutex_attr;
+ pthread_condattr_t cond_attr;
+ pthread_barrierattr_t barrier_attr;
+
+ pthread_mutexattr_init(&mutex_attr);
+ pthread_mutexattr_setpshared(&mutex_attr, PTHREAD_PROCESS_SHARED);
+ pthread_mutex_init(&pdata->mutex, &mutex_attr);
+
+ pthread_condattr_init(&cond_attr);
+ pthread_condattr_setpshared(&cond_attr, PTHREAD_PROCESS_SHARED);
+ pthread_cond_init(&pdata->cond, &cond_attr);
+
+ pthread_barrierattr_init(&barrier_attr);
+ pthread_barrierattr_setpshared(&barrier_attr, PTHREAD_PROCESS_SHARED);
+ pthread_barrier_init(&pdata->barrier, &barrier_attr, n_engine);
+
+ pdata->go = false;
+}
+
+static void signal_pdata(struct process_data *pdata)
+{
+ pthread_mutex_lock(&pdata->mutex);
+ pdata->go = true;
+ pthread_cond_broadcast(&pdata->cond);
+ pthread_mutex_unlock(&pdata->mutex);
+}
+
+static void
+processes_evict(int fd, uint64_t alloc_size, uint64_t stride,
+ unsigned int flags)
+{
+ struct drm_xe_engine_class_instance *hwe;
+ struct process_data *pdata;
+ int n_engine_gt[2] = { 0, 0 }, n_engine = 0;
+ int map_fd;
+
+ map_fd = open(SYNC_FILE, O_RDWR | O_CREAT, 0x666);
+ posix_fallocate(map_fd, 0, sizeof(*pdata));
+ pdata = mmap(NULL, sizeof(*pdata), PROT_READ |
+ PROT_WRITE, MAP_SHARED, map_fd, 0);
+
+ xe_for_each_engine(fd, hwe) {
+ igt_assert(hwe->gt_id < 2);
+ n_engine_gt[hwe->gt_id]++;
+ n_engine++;
+ }
+
+ init_pdata(pdata, n_engine);
+
+ xe_for_each_engine(fd, hwe) {
+ igt_fork(child, 1)
+ process_evict(hwe,
+ xe_visible_vram_size(fd, hwe->gt_id) /
+ n_engine_gt[hwe->gt_id], alloc_size,
+ stride, flags);
+ }
+
+ signal_pdata(pdata);
+ igt_waitchildren();
+
+ close(map_fd);
+ munmap(pdata, sizeof(*pdata));
+}
+
+#define CPU_FAULT (0x1 << 0)
+#define REMAP (0x1 << 1)
+#define MIDDLE (0x1 << 2)
+
+/**
+ * SUBTEST: partial-munmap-cpu-fault
+ * Description: munmap partially with cpu access in between
+ * Test category: functionality test
+ *
+ * SUBTEST: partial-munmap-no-cpu-fault
+ * Description: munmap partially with no cpu access in between
+ * Test category: functionality test
+ *
+ * SUBTEST: partial-remap-cpu-fault
+ * Description: remap partially with cpu access in between
+ * Test category: functionality test
+ *
+ * SUBTEST: partial-remap-no-cpu-fault
+ * Description: remap partially with no cpu access in between
+ * Test category: functionality test
+ *
+ * SUBTEST: partial-middle-munmap-cpu-fault
+ * Description: munmap middle with cpu access in between
+ * Test category: functionality test
+ *
+ * SUBTEST: partial-middle-munmap-no-cpu-fault
+ * Description: munmap middle with no cpu access in between
+ * Test category: functionality test
+ *
+ * SUBTEST: partial-middle-remap-cpu-fault
+ * Description: remap middle with cpu access in between
+ * Test category: functionality test
+ *
+ * SUBTEST: partial-middle-remap-no-cpu-fault
+ * Description: remap middle with no cpu access in between
+ * Test category: functionality test
+ */
+
+static void
+partial(int fd, struct drm_xe_engine_class_instance *eci, unsigned int flags)
+{
+ struct drm_xe_sync sync[1] = {
+ { .type = DRM_XE_SYNC_TYPE_USER_FENCE, .flags = DRM_XE_SYNC_FLAG_SIGNAL,
+ .timeline_value = USER_FENCE_VALUE },
+ };
+ struct drm_xe_exec exec = {
+ .num_batch_buffer = 1,
+ .num_syncs = 1,
+ .syncs = to_user_pointer(sync),
+ };
+ struct {
+ uint32_t batch[16];
+ uint64_t pad;
+ uint64_t vm_sync;
+ uint64_t exec_sync;
+ uint32_t data;
+ uint32_t expected_data;
+ } *data;
+ size_t bo_size = SZ_2M, unmap_offset = 0;
+ uint32_t vm, exec_queue;
+ u64 *exec_ufence = NULL;
+ int i;
+ void *old, *new = NULL;
+
+ if (flags & MIDDLE)
+ unmap_offset = bo_size / 4;
+
+ vm = xe_vm_create(fd, DRM_XE_VM_CREATE_FLAG_LR_MODE |
+ DRM_XE_VM_CREATE_FLAG_FAULT_MODE, 0);
+
+ data = aligned_alloc(bo_size, bo_size);
+ igt_assert(data);
+
+ data = mmap(data, bo_size, PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
+ igt_assert(data != MAP_FAILED);
+ memset(data, 0, bo_size);
+ old = data;
+
+ exec_queue = xe_exec_queue_create(fd, vm, eci, 0);
+
+ sync[0].addr = to_user_pointer(&data[0].vm_sync);
+ bind_system_allocator(sync, 1);
+ xe_wait_ufence(fd, &data[0].vm_sync, USER_FENCE_VALUE, 0, FIVE_SEC);
+ data[0].vm_sync = 0;
+
+ exec_ufence = mmap(NULL, SZ_4K, PROT_READ |
+ PROT_WRITE, MAP_SHARED |
+ MAP_ANONYMOUS, -1, 0);
+ igt_assert(exec_ufence != MAP_FAILED);
+ memset(exec_ufence, 0, SZ_4K);
+
+ for (i = 0; i < 2; i++) {
+ uint64_t addr = to_user_pointer(data);
+ uint64_t sdi_offset = (char *)&data[i].data - (char *)data;
+ uint64_t sdi_addr = addr + sdi_offset;
+ int b = 0;
+
+ write_dword(data[i].batch, sdi_addr, WRITE_VALUE(&data[i], i), &b);
+ igt_assert(b <= ARRAY_SIZE(data[i].batch));
+
+ if (!i)
+ data = old + unmap_offset + bo_size / 2;
+ }
+
+ data = old;
+ exec.exec_queue_id = exec_queue;
+
+ for (i = 0; i < 2; i++) {
+ uint64_t addr = to_user_pointer(data);
+ uint64_t batch_offset = (char *)&data[i].batch - (char *)data;
+ uint64_t batch_addr = addr + batch_offset;
+
+ sync[0].addr = new ? to_user_pointer(new) :
+ to_user_pointer(exec_ufence);
+ exec.address = batch_addr;
+ xe_exec(fd, &exec);
+
+ xe_wait_ufence(fd, new ?: exec_ufence, USER_FENCE_VALUE,
+ exec_queue, FIVE_SEC);
+ if (i || (flags & CPU_FAULT))
+ igt_assert_eq(data[i].data, READ_VALUE(&data[i], i));
+ exec_ufence[0] = 0;
+
+ if (!i) {
+ data = old + unmap_offset + bo_size / 2;
+ munmap(old + unmap_offset, bo_size / 2);
+ if (flags & REMAP) {
+ new = mmap(old + unmap_offset, bo_size / 2,
+ PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED |
+ MAP_LOCKED, -1, 0);
+ igt_assert(new != MAP_FAILED);
+ }
+ }
+ }
+
+ xe_exec_queue_destroy(fd, exec_queue);
+ munmap(exec_ufence, SZ_4K);
+ munmap(old, bo_size);
+ if (new)
+ munmap(new, bo_size / 2);
+ xe_vm_destroy(fd, vm);
+}
+
+#define MAX_N_EXEC_QUEUES 16
+
+#define MMAP (0x1 << 0)
+#define NEW (0x1 << 1)
+#define BO_UNMAP (0x1 << 2)
+#define FREE (0x1 << 3)
+#define BUSY (0x1 << 4)
+#define BO_MAP (0x1 << 5)
+#define RACE (0x1 << 6)
+#define SKIP_MEMSET (0x1 << 7)
+#define FAULT (0x1 << 8)
+#define FILE_BACKED (0x1 << 9)
+#define LOCK (0x1 << 10)
+#define MMAP_SHARED (0x1 << 11)
+#define HUGE_PAGE (0x1 << 12)
+
+/**
+ * SUBTEST: once-%s
+ * Description: Run %arg[1] system allocator test only once
+ * Test category: functionality test
+ *
+ * SUBTEST: twice-%s
+ * Description: Run %arg[1] system allocator test twice
+ * Test category: functionality test
+ *
+ * SUBTEST: many-%s
+ * Description: Run %arg[1] system allocator test many times
+ * Test category: stress test
+ *
+ * SUBTEST: many-stride-%s
+ * Description: Run %arg[1] system allocator test many times with a stride on each exec
+ * Test category: stress test
+ *
+ * SUBTEST: many-execqueues-%s
+ * Description: Run %arg[1] system allocator test on many exec_queues
+ * Test category: stress test
+ *
+ * SUBTEST: many-large-%s
+ * Description: Run %arg[1] system allocator test many times with large allocations
+ * Test category: stress test
+ *
+ * SUBTEST: many-large-execqueues-%s
+ * Description: Run %arg[1] system allocator test on many exec_queues with large allocations
+ *
+ * SUBTEST: threads-many-%s
+ * Description: Run %arg[1] system allocator threaded test many times
+ * Test category: stress test
+ *
+ * SUBTEST: threads-many-stride-%s
+ * Description: Run %arg[1] system allocator threaded test many times with a stride on each exec
+ * Test category: stress test
+ *
+ * SUBTEST: threads-many-execqueues-%s
+ * Description: Run %arg[1] system allocator threaded test on many exec_queues
+ * Test category: stress test
+ *
+ * SUBTEST: threads-many-large-%s
+ * Description: Run %arg[1] system allocator threaded test many times with large allocations
+ * Test category: stress test
+ *
+ * SUBTEST: threads-many-large-execqueues-%s
+ * Description: Run %arg[1] system allocator threaded test on many exec_queues with large allocations
+ *
+ * SUBTEST: threads-shared-vm-many-%s
+ * Description: Run %arg[1] system allocator threaded, shared vm test many times
+ * Test category: stress test
+ *
+ * SUBTEST: threads-shared-vm-many-stride-%s
+ * Description: Run %arg[1] system allocator threaded, shared vm test many times with a stride on each exec
+ * Test category: stress test
+ *
+ * SUBTEST: threads-shared-vm-many-execqueues-%s
+ * Description: Run %arg[1] system allocator threaded, shared vm test on many exec_queues
+ * Test category: stress test
+ *
+ * SUBTEST: threads-shared-vm-many-large-%s
+ * Description: Run %arg[1] system allocator threaded, shared vm test many times with large allocations
+ * Test category: stress test
+ *
+ * SUBTEST: threads-shared-vm-many-large-execqueues-%s
+ * Description: Run %arg[1] system allocator threaded, shared vm test on many exec_queues with large allocations
+ * Test category: stress test
+ *
+ * SUBTEST: process-many-%s
+ * Description: Run %arg[1] system allocator multi-process test many times
+ * Test category: stress test
+ *
+ * SUBTEST: process-many-stride-%s
+ * Description: Run %arg[1] system allocator multi-process test many times with a stride on each exec
+ * Test category: stress test
+ *
+ * SUBTEST: process-many-execqueues-%s
+ * Description: Run %arg[1] system allocator multi-process test on many exec_queues
+ * Test category: stress test
+ *
+ * SUBTEST: process-many-large-%s
+ * Description: Run %arg[1] system allocator multi-process test many times with large allocations
+ * Test category: stress test
+ *
+ * SUBTEST: process-many-large-execqueues-%s
+ * Description: Run %arg[1] system allocator multi-process test on many exec_queues with large allocations
+ *
+ * SUBTEST: fault
+ * Description: use a bad system allocator address resulting in a fault
+ * Test category: bad input
+ *
+ * arg[1]:
+ *
+ * @malloc: malloc single buffer for all execs
+ * @malloc-mlock: malloc and mlock single buffer for all execs
+ * @malloc-race: malloc single buffer for all execs with race between cpu and gpu access
+ * @malloc-bo-unmap: malloc single buffer for all execs, bind and unbind a BO to same address before execs
+ * @malloc-busy: malloc single buffer for all execs, try to unbind while buffer valid
+ * @mmap: mmap single buffer for all execs
+ * @mmap-huge: mmap huge page single buffer for all execs
+ * @mmap-shared: mmap shared single buffer for all execs
+ * @mmap-mlock: mmap and mlock single buffer for all execs
+ * @mmap-file: mmap single buffer, with file backing, for all execs
+ * @mmap-file-mlock: mmap and mlock single buffer, with file backing, for all execs
+ * @mmap-race: mmap single buffer for all execs with race between cpu and gpu access
+ * @free: malloc and free buffer for each exec
+ * @free-race: malloc and free buffer for each exec with race between cpu and gpu access
+ * @new: malloc a new buffer for each exec
+ * @new-race: malloc a new buffer for each exec with race between cpu and gpu access
+ * @new-bo-map: malloc a new buffer or map BO for each exec
+ * @new-busy: malloc a new buffer for each exec, try to unbind while buffers valid
+ * @mmap-free: mmap and free buffer for each exec
+ * @mmap-free-huge: mmap huge page and free buffer for each exec
+ * @mmap-free-race: mmap and free buffer for each exec with race between cpu and gpu access
+ * @mmap-new: mmap a new buffer for each exec
+ * @mmap-new-huge: mmap huge page a new buffer for each exec
+ * @mmap-new-race: mmap a new buffer for each exec with race between cpu and gpu access
+ * @malloc-nomemset: malloc single buffer for all execs, skip memset of buffers
+ * @malloc-mlock-nomemset: malloc and mlock single buffer for all execs, skip memset of buffers
+ * @malloc-race-nomemset: malloc single buffer for all execs with race between cpu and gpu access, skip memset of buffers
+ * @malloc-bo-unmap-nomemset: malloc single buffer for all execs, bind and unbind a BO to same address before execs, skip memset of buffers
+ * @malloc-busy-nomemset: malloc single buffer for all execs, try to unbind while buffer valid, skip memset of buffers
+ * @mmap-nomemset: mmap single buffer for all execs, skip memset of buffers
+ * @mmap-huge-nomemset: mmap huge page single buffer for all execs, skip memset of buffers
+ * @mmap-shared-nomemset: mmap shared single buffer for all execs, skip memset of buffers
+ * @mmap-mlock-nomemset: mmap and mlock single buffer for all execs, skip memset of buffers
+ * @mmap-file-nomemset: mmap single buffer, with file backing, for all execs, skip memset of buffers
+ * @mmap-file-mlock-nomemset: mmap and mlock single buffer, with file backing, for all execs, skip memset of buffers
+ * @mmap-race-nomemset: mmap single buffer for all execs with race between cpu and gpu access, skip memset of buffers
+ * @free-nomemset: malloc and free buffer for each exec, skip memset of buffers
+ * @free-race-nomemset: malloc and free buffer for each exec with race between cpu and gpu access, skip memset of buffers
+ * @new-nomemset: malloc a new buffer for each exec, skip memset of buffers
+ * @new-race-nomemset: malloc a new buffer for each exec with race between cpu and gpu access, skip memset of buffers
+ * @new-bo-map-nomemset: malloc a new buffer or map BO for each exec, skip memset of buffers
+ * @new-busy-nomemset: malloc a new buffer for each exec, try to unbind while buffers valid, skip memset of buffers
+ * @mmap-free-nomemset: mmap and free buffer for each exec, skip memset of buffers
+ * @mmap-free-huge-nomemset: mmap huge page and free buffer for each exec, skip memset of buffers
+ * @mmap-free-race-nomemset: mmap and free buffer for each exec with race between cpu and gpu access, skip memset of buffers
+ * @mmap-new-nomemset: mmap a new buffer for each exec, skip memset of buffers
+ * @mmap-new-huge-nomemset: mmap huge page new buffer for each exec, skip memset of buffers
+ * @mmap-new-race-nomemset: mmap a new buffer for each exec with race between cpu and gpu access, skip memset of buffers
+ */
+
+static void
+test_exec(int fd, struct drm_xe_engine_class_instance *eci,
+ int n_exec_queues, int n_execs, size_t bo_size,
+ size_t stride, uint32_t vm, unsigned int flags)
+{
+ uint64_t addr;
+ struct drm_xe_sync sync[1] = {
+ { .type = DRM_XE_SYNC_TYPE_USER_FENCE, .flags = DRM_XE_SYNC_FLAG_SIGNAL,
+ .timeline_value = USER_FENCE_VALUE },
+ };
+ struct drm_xe_exec exec = {
+ .num_batch_buffer = 1,
+ .num_syncs = 1,
+ .syncs = to_user_pointer(sync),
+ };
+ uint32_t exec_queues[MAX_N_EXEC_QUEUES];
+ struct {
+ uint32_t batch[16];
+ uint64_t pad;
+ uint64_t vm_sync;
+ uint64_t exec_sync;
+ uint32_t data;
+ uint32_t expected_data;
+ } *data;
+ uint32_t bo_flags;
+ uint32_t bo = 0;
+ void **pending_free;
+ u64 *exec_ufence = NULL;
+ int i, b, file_fd = -1;
+ bool free_vm = false;
+ size_t aligned_size = bo_size ?: xe_get_default_alignment(fd);
+
+ igt_assert(n_exec_queues <= MAX_N_EXEC_QUEUES);
+
+ if (flags & NEW && !(flags & FREE)) {
+ pending_free = malloc(sizeof(*pending_free) * n_execs);
+ igt_assert(pending_free);
+ memset(pending_free, 0, sizeof(*pending_free) * n_execs);
+ }
+
+ if (!vm) {
+ vm = xe_vm_create(fd, DRM_XE_VM_CREATE_FLAG_LR_MODE |
+ DRM_XE_VM_CREATE_FLAG_FAULT_MODE, 0);
+ free_vm = true;
+ }
+ if (!bo_size) {
+ if (!stride) {
+ bo_size = sizeof(*data) * n_execs;
+ bo_size = xe_bb_size(fd, bo_size);
+ } else {
+ bo_size = stride * n_execs * sizeof(*data);
+ bo_size = xe_bb_size(fd, bo_size);
+ }
+ }
+ if (flags & HUGE_PAGE) {
+ aligned_size = ALIGN(aligned_size, SZ_2M);
+ bo_size = ALIGN(bo_size, SZ_2M);
+ }
+
+ data = aligned_alloc(aligned_size, bo_size);
+ igt_assert(data);
+ if (flags & MMAP) {
+ int mmap_flags = MAP_FIXED;
+
+ if (flags & MMAP_SHARED)
+ mmap_flags |= MAP_SHARED;
+ else
+ mmap_flags |= MAP_PRIVATE;
+
+ if (flags & HUGE_PAGE)
+ mmap_flags |= MAP_HUGETLB | MAP_HUGE_2MB;
+
+ if (flags & FILE_BACKED) {
+ char name[1024];
+
+ igt_assert(!(flags & NEW));
+
+ sprintf(name, "/tmp/xe_exec_system_allocator_dat%d\n",
+ getpid());
+ file_fd = open(name, O_RDWR | O_CREAT, 0x666);
+ posix_fallocate(file_fd, 0, bo_size);
+ } else {
+ mmap_flags |= MAP_ANONYMOUS;
+ }
+
+ data = mmap(data, bo_size, PROT_READ |
+ PROT_WRITE, mmap_flags, file_fd, 0);
+ igt_assert(data != MAP_FAILED);
+ }
+ if (!(flags & SKIP_MEMSET))
+ memset(data, 0, bo_size);
+ if (flags & LOCK) {
+ igt_assert(!(flags & NEW));
+ mlock(data, bo_size);
+ }
+
+ for (i = 0; i < n_exec_queues; i++)
+ exec_queues[i] = xe_exec_queue_create(fd, vm, eci, 0);
+
+ sync[0].addr = to_user_pointer(&data[0].vm_sync);
+ if (free_vm) {
+ bind_system_allocator(sync, 1);
+ xe_wait_ufence(fd, &data[0].vm_sync, USER_FENCE_VALUE, 0, FIVE_SEC);
+ }
+ data[0].vm_sync = 0;
+
+ addr = to_user_pointer(data);
+
+ if (flags & BO_UNMAP) {
+ bo_flags = DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM;
+ bo = xe_bo_create(fd, vm, bo_size,
+ vram_if_possible(fd, eci->gt_id), bo_flags);
+ xe_vm_bind_async(fd, vm, 0, bo, 0, addr, bo_size, 0, 0);
+
+ __xe_vm_bind_assert(fd, vm, 0,
+ 0, 0, addr, bo_size,
+ DRM_XE_VM_BIND_OP_MAP,
+ DRM_XE_VM_BIND_FLAG_SYSTEM_ALLOCATOR, sync,
+ 1, 0, 0);
+ xe_wait_ufence(fd, &data[0].vm_sync, USER_FENCE_VALUE, 0,
+ FIVE_SEC);
+ data[0].vm_sync = 0;
+ gem_close(fd, bo);
+ bo = 0;
+ }
+
+ if (!(flags & RACE)) {
+ exec_ufence = mmap(NULL, SZ_4K, PROT_READ |
+ PROT_WRITE, MAP_SHARED |
+ MAP_ANONYMOUS, -1, 0);
+ igt_assert(exec_ufence != MAP_FAILED);
+ memset(exec_ufence, 0, SZ_4K);
+ }
+
+ for (i = 0; i < n_execs; i++) {
+ int idx = !stride ? i : i * stride;
+ uint64_t batch_offset = (char *)&data[idx].batch - (char *)data;
+ uint64_t batch_addr = addr + batch_offset;
+ uint64_t sdi_offset = (char *)&data[idx].data - (char *)data;
+ uint64_t sdi_addr = addr + sdi_offset;
+ int e = i % n_exec_queues, err;
+ bool fault_inject = (FAULT & flags) && i == n_execs / 2;
+ bool fault_injected = (FAULT & flags) && i > n_execs;
+
+ b = 0;
+ write_dword(data[idx].batch, sdi_addr,
+ WRITE_VALUE(&data[idx], idx), &b);
+ igt_assert(b <= ARRAY_SIZE(data[idx].batch));
+
+ if (!exec_ufence)
+ data[idx].exec_sync = 0;
+
+ sync[0].addr = exec_ufence ? to_user_pointer(exec_ufence) :
+ addr + (char *)&data[idx].exec_sync - (char *)data;
+
+ exec.exec_queue_id = exec_queues[e];
+ if (fault_inject)
+ exec.address = batch_addr * 2;
+ else
+ exec.address = batch_addr;
+
+ if (fault_injected) {
+ err = __xe_exec(fd, &exec);
+ igt_assert(err == -ENOENT);
+ } else {
+ xe_exec(fd, &exec);
+ }
+
+ if (fault_inject || fault_injected) {
+ int64_t timeout = QUARTER_SEC;
+
+ err = __xe_wait_ufence(fd, exec_ufence ? exec_ufence :
+ &data[idx].exec_sync,
+ USER_FENCE_VALUE,
+ exec_queues[e], &timeout);
+ igt_assert(err == -ETIME || err == -EIO);
+ } else {
+ xe_wait_ufence(fd, exec_ufence ? exec_ufence :
+ &data[idx].exec_sync, USER_FENCE_VALUE,
+ exec_queues[e], FIVE_SEC);
+ igt_assert_eq(data[idx].data,
+ READ_VALUE(&data[idx], idx));
+ }
+
+ if (exec_ufence)
+ exec_ufence[0] = 0;
+
+ if (bo) {
+ __xe_vm_bind_assert(fd, vm, 0,
+ 0, 0, addr, bo_size,
+ DRM_XE_VM_BIND_OP_MAP,
+ DRM_XE_VM_BIND_FLAG_SYSTEM_ALLOCATOR,
+ NULL, 0, 0, 0);
+ munmap(data, bo_size);
+ gem_close(fd, bo);
+ }
+
+ if (flags & NEW) {
+ if (flags & MMAP) {
+ if (flags & FREE)
+ munmap(data, bo_size);
+ else
+ pending_free[i] = data;
+ data = mmap(NULL, bo_size, PROT_READ |
+ PROT_WRITE, MAP_SHARED |
+ MAP_ANONYMOUS, -1, 0);
+ igt_assert(data != MAP_FAILED);
+ } else if (flags & BO_MAP && (i % 2)) {
+ if (!bo) {
+ if (flags & FREE)
+ free(data);
+ else
+ pending_free[i] = data;
+ }
+ data = aligned_alloc(aligned_size, bo_size);
+ bo_flags = DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM;
+ bo = xe_bo_create(fd, vm, bo_size,
+ vram_if_possible(fd, eci->gt_id),
+ bo_flags);
+ data = xe_bo_map_fixed(fd, bo, bo_size,
+ to_user_pointer(data));
+
+ xe_vm_bind_async(fd, vm, 0, bo, 0,
+ to_user_pointer(data),
+ bo_size, 0, 0);
+ } else {
+ if (!bo) {
+ if (flags & FREE)
+ free(data);
+ else
+ pending_free[i] = data;
+ }
+ bo = 0;
+ data = aligned_alloc(aligned_size, bo_size);
+ igt_assert(data);
+ }
+ addr = to_user_pointer(data);
+ if (!(flags & SKIP_MEMSET))
+ memset(data, 0, bo_size);
+ }
+ }
+
+ if (bo) {
+ __xe_vm_bind_assert(fd, vm, 0,
+ 0, 0, addr, bo_size,
+ DRM_XE_VM_BIND_OP_MAP,
+ DRM_XE_VM_BIND_FLAG_SYSTEM_ALLOCATOR,
+ NULL, 0, 0, 0);
+ munmap(data, bo_size);
+ gem_close(fd, bo);
+ }
+
+ if (flags & BUSY)
+ igt_assert_eq(unbind_system_allocator(), -EBUSY);
+
+ for (i = 0; i < n_exec_queues; i++)
+ xe_exec_queue_destroy(fd, exec_queues[i]);
+
+ if (exec_ufence)
+ munmap(exec_ufence, SZ_4K);
+
+ if (flags & LOCK)
+ munlock(data, bo_size);
+
+ if (file_fd != -1)
+ close(file_fd);
+
+ if (flags & NEW && !(flags & FREE)) {
+ for (i = 0; i < n_execs; i++) {
+ if (!pending_free[i])
+ continue;
+
+ if (flags & MMAP)
+ munmap(pending_free[i], bo_size);
+ else
+ free(pending_free[i]);
+ }
+ free(pending_free);
+ } else {
+ if (flags & MMAP)
+ munmap(data, bo_size);
+ else
+ free(data);
+ }
+ if (free_vm)
+ xe_vm_destroy(fd, vm);
+}
+
+struct thread_data {
+ pthread_t thread;
+ pthread_mutex_t *mutex;
+ pthread_cond_t *cond;
+ int fd;
+ struct drm_xe_engine_class_instance *eci;
+ int n_exec_queues;
+ int n_execs;
+ size_t bo_size;
+ size_t stride;
+ uint32_t vm;
+ unsigned int flags;
+ bool *go;
+};
+
+static void *thread(void *data)
+{
+ struct thread_data *t = data;
+
+ pthread_mutex_lock(t->mutex);
+ while (!*t->go)
+ pthread_cond_wait(t->cond, t->mutex);
+ pthread_mutex_unlock(t->mutex);
+
+ test_exec(t->fd, t->eci, t->n_exec_queues, t->n_execs,
+ t->bo_size, t->stride, t->vm, t->flags);
+
+ return NULL;
+}
+
+static void
+threads(int fd, int n_exec_queues, int n_execs, size_t bo_size,
+ size_t stride, unsigned int flags, bool shared_vm)
+{
+ struct drm_xe_engine_class_instance *hwe;
+ struct thread_data *threads_data;
+ int n_engines = 0, i = 0;
+ pthread_mutex_t mutex;
+ pthread_cond_t cond;
+ uint32_t vm = 0;
+ bool go = false;
+
+ if (FILE_BACKED & flags)
+ return;
+
+ if (shared_vm) {
+ vm = xe_vm_create(fd, DRM_XE_VM_CREATE_FLAG_LR_MODE |
+ DRM_XE_VM_CREATE_FLAG_FAULT_MODE, 0);
+ bind_system_allocator(NULL, 0);
+ }
+
+ xe_for_each_engine(fd, hwe)
+ ++n_engines;
+
+ threads_data = calloc(n_engines, sizeof(*threads_data));
+ igt_assert(threads_data);
+
+ pthread_mutex_init(&mutex, 0);
+ pthread_cond_init(&cond, 0);
+
+ xe_for_each_engine(fd, hwe) {
+ threads_data[i].mutex = &mutex;
+ threads_data[i].cond = &cond;
+ threads_data[i].fd = fd;
+ threads_data[i].eci = hwe;
+ threads_data[i].n_exec_queues = n_exec_queues;
+ threads_data[i].n_execs = n_execs;
+ threads_data[i].bo_size = bo_size;
+ threads_data[i].stride = stride;
+ threads_data[i].vm = vm;
+ threads_data[i].flags = flags;
+ threads_data[i].go = &go;
+ pthread_create(&threads_data[i].thread, 0, thread,
+ &threads_data[i]);
+ ++i;
+ }
+
+ pthread_mutex_lock(&mutex);
+ go = true;
+ pthread_cond_broadcast(&cond);
+ pthread_mutex_unlock(&mutex);
+
+ for (i = 0; i < n_engines; ++i)
+ pthread_join(threads_data[i].thread, NULL);
+
+ if (shared_vm) {
+ int ret;
+
+ if (flags & MMAP) {
+ int tries = 300;
+
+ while (tries && (ret = unbind_system_allocator()) == -EBUSY) {
+ sleep(.01);
+ --tries;
+ }
+ igt_assert_eq(ret, 0);
+ }
+ xe_vm_destroy(fd, vm);
+ }
+ free(threads_data);
+}
+
+static void process(struct drm_xe_engine_class_instance *hwe, int n_exec_queues,
+ int n_execs, size_t bo_size, size_t stride,
+ unsigned int flags)
+{
+ struct process_data *pdata;
+ int map_fd;
+ int fd;
+
+ map_fd = open(SYNC_FILE, O_RDWR, 0x666);
+ pdata = mmap(NULL, sizeof(*pdata), PROT_READ |
+ PROT_WRITE, MAP_SHARED, map_fd, 0);
+ wait_pdata(pdata);
+
+ fd = drm_open_driver(DRIVER_XE);
+ test_exec(fd, hwe, n_exec_queues, n_execs,
+ bo_size, stride, 0, flags);
+ drm_close_driver(fd);
+
+ close(map_fd);
+ munmap(pdata, sizeof(*pdata));
+}
+
+static void
+processes(int fd, int n_exec_queues, int n_execs, size_t bo_size,
+ size_t stride, unsigned int flags)
+{
+ struct drm_xe_engine_class_instance *hwe;
+ struct process_data *pdata;
+ int map_fd;
+
+ map_fd = open(SYNC_FILE, O_RDWR | O_CREAT, 0x666);
+ posix_fallocate(map_fd, 0, sizeof(*pdata));
+ pdata = mmap(NULL, sizeof(*pdata), PROT_READ |
+ PROT_WRITE, MAP_SHARED, map_fd, 0);
+
+ init_pdata(pdata, 0);
+
+ xe_for_each_engine(fd, hwe) {
+ igt_fork(child, 1)
+ process(hwe, n_exec_queues, n_execs, bo_size,
+ stride, flags);
+ }
+
+ signal_pdata(pdata);
+ igt_waitchildren();
+
+ close(map_fd);
+ munmap(pdata, sizeof(*pdata));
+}
+
+struct section {
+ const char *name;
+ unsigned int flags;
+};
+
+igt_main
+{
+ struct drm_xe_engine_class_instance *hwe;
+ const struct section sections[] = {
+ { "malloc", 0 },
+ { "malloc-mlock", LOCK },
+ { "malloc-race", RACE },
+ { "malloc-busy", BUSY },
+ { "malloc-bo-unmap", BO_UNMAP },
+ { "mmap", MMAP },
+ { "mmap-huge", MMAP | HUGE_PAGE },
+ { "mmap-shared", MMAP | LOCK | MMAP_SHARED },
+ { "mmap-mlock", MMAP | LOCK },
+ { "mmap-file", MMAP | FILE_BACKED },
+ { "mmap-file-mlock", MMAP | LOCK | FILE_BACKED },
+ { "mmap-race", MMAP | RACE },
+ { "free", NEW | FREE },
+ { "free-race", NEW | FREE | RACE },
+ { "new", NEW },
+ { "new-race", NEW | RACE },
+ { "new-bo-map", NEW | BO_MAP },
+ { "new-busy", NEW | BUSY },
+ { "mmap-free", MMAP | NEW | FREE },
+ { "mmap-free-huge", MMAP | NEW | FREE | HUGE_PAGE },
+ { "mmap-free-race", MMAP | NEW | FREE | RACE },
+ { "mmap-new", MMAP | NEW },
+ { "mmap-new-huge", MMAP | NEW | HUGE_PAGE },
+ { "mmap-new-race", MMAP | NEW | RACE },
+ { "malloc-nomemset", SKIP_MEMSET },
+ { "malloc-mlock-nomemset", SKIP_MEMSET | LOCK },
+ { "malloc-race-nomemset", SKIP_MEMSET | RACE },
+ { "malloc-busy-nomemset", SKIP_MEMSET | BUSY },
+ { "malloc-bo-unmap-nomemset", SKIP_MEMSET | BO_UNMAP },
+ { "mmap-nomemset", SKIP_MEMSET | MMAP },
+ { "mmap-huge-nomemset", SKIP_MEMSET | MMAP | HUGE_PAGE },
+ { "mmap-shared-nomemset", SKIP_MEMSET | MMAP | MMAP_SHARED },
+ { "mmap-mlock-nomemset", SKIP_MEMSET | MMAP | LOCK },
+ { "mmap-file-nomemset", SKIP_MEMSET | MMAP | FILE_BACKED },
+ { "mmap-file-mlock-nomemset", SKIP_MEMSET | MMAP | LOCK | FILE_BACKED },
+ { "mmap-race-nomemset", SKIP_MEMSET | MMAP | RACE },
+ { "free-nomemset", SKIP_MEMSET | NEW | FREE },
+ { "free-race-nomemset", SKIP_MEMSET | NEW | FREE | RACE },
+ { "new-nomemset", SKIP_MEMSET | NEW },
+ { "new-race-nomemset", SKIP_MEMSET | NEW | RACE },
+ { "new-bo-map-nomemset", SKIP_MEMSET | NEW | BO_MAP },
+ { "new-busy-nomemset", SKIP_MEMSET | NEW | BUSY },
+ { "mmap-free-nomemset", SKIP_MEMSET | MMAP | NEW | FREE },
+ { "mmap-free-huge-nomemset", SKIP_MEMSET | MMAP | NEW | FREE | HUGE_PAGE },
+ { "mmap-free-race-nomemset", SKIP_MEMSET | MMAP | NEW | FREE | RACE },
+ { "mmap-new-nomemset", SKIP_MEMSET | MMAP | NEW },
+ { "mmap-new-huge-nomemset", SKIP_MEMSET | MMAP | NEW | HUGE_PAGE },
+ { "mmap-new-race-nomemset", SKIP_MEMSET | MMAP | NEW | RACE },
+ { NULL },
+ };
+ const struct section psections[] = {
+ { "munmap-cpu-fault", CPU_FAULT },
+ { "munmap-no-cpu-fault", 0 },
+ { "remap-cpu-fault", CPU_FAULT | REMAP },
+ { "remap-no-cpu-fault", REMAP },
+ { "middle-munmap-cpu-fault", MIDDLE | CPU_FAULT },
+ { "middle-munmap-no-cpu-fault", MIDDLE },
+ { "middle-remap-cpu-fault", MIDDLE | CPU_FAULT | REMAP },
+ { "middle-remap-no-cpu-fault", MIDDLE | REMAP },
+ { NULL },
+ };
+ const struct section esections[] = {
+ { "malloc", 0 },
+ { "malloc-mix-bo", MIX_BO_ALLOC },
+ { NULL },
+ };
+ int fd;
+
+ igt_fixture {
+ fd = drm_open_driver(DRIVER_XE);
+ igt_require(xe_supports_faults(fd));
+ }
+
+ for (const struct section *s = sections; s->name; s++) {
+ igt_subtest_f("once-%s", s->name)
+ xe_for_each_engine(fd, hwe)
+ test_exec(fd, hwe, 1, 1, 0, 0, 0, s->flags);
+
+ igt_subtest_f("twice-%s", s->name)
+ xe_for_each_engine(fd, hwe)
+ test_exec(fd, hwe, 1, 2, 0, 0, 0, s->flags);
+
+ igt_subtest_f("many-%s", s->name)
+ xe_for_each_engine(fd, hwe)
+ test_exec(fd, hwe, 1, 128, 0, 0, 0, s->flags);
+
+ igt_subtest_f("many-stride-%s", s->name)
+ xe_for_each_engine(fd, hwe)
+ test_exec(fd, hwe, 1, 128, 0, 256, 0, s->flags);
+
+ igt_subtest_f("many-execqueues-%s", s->name)
+ xe_for_each_engine(fd, hwe)
+ test_exec(fd, hwe, 16, 128, 0, 0, 0, s->flags);
+
+ igt_subtest_f("many-large-%s", s->name)
+ xe_for_each_engine(fd, hwe)
+ test_exec(fd, hwe, 1, 128, SZ_2M, 0, 0, s->flags);
+
+ igt_subtest_f("many-large-execqueues-%s", s->name)
+ xe_for_each_engine(fd, hwe)
+ test_exec(fd, hwe, 16, 128, SZ_2M, 0, 0, s->flags);
+
+ igt_subtest_f("threads-many-%s", s->name)
+ threads(fd, 1, 128, 0, 0, s->flags, false);
+
+ igt_subtest_f("threads-many-stride-%s", s->name)
+ threads(fd, 1, 128, 0, 256, s->flags, false);
+
+ igt_subtest_f("threads-many-execqueues-%s", s->name)
+ threads(fd, 16, 128, 0, 0, s->flags, false);
+
+ igt_subtest_f("threads-many-large-%s", s->name)
+ threads(fd, 1, 128, SZ_2M, 0, s->flags, false);
+
+ igt_subtest_f("threads-many-large-execqueues-%s", s->name)
+ threads(fd, 16, 128, SZ_2M, 0, s->flags, false);
+
+ igt_subtest_f("threads-shared-vm-many-%s", s->name)
+ threads(fd, 1, 128, 0, 0, s->flags, true);
+
+ igt_subtest_f("threads-shared-vm-many-stride-%s", s->name)
+ threads(fd, 1, 128, 0, 256, s->flags, true);
+
+ igt_subtest_f("threads-shared-vm-many-execqueues-%s", s->name)
+ threads(fd, 16, 128, 0, 0, s->flags, true);
+
+ igt_subtest_f("threads-shared-vm-many-large-%s", s->name)
+ threads(fd, 1, 128, SZ_2M, 0, s->flags, true);
+
+ igt_subtest_f("threads-shared-vm-many-large-execqueues-%s", s->name)
+ threads(fd, 16, 128, SZ_2M, 0, s->flags, true);
+
+ igt_subtest_f("process-many-%s", s->name)
+ processes(fd, 1, 128, 0, 0, s->flags);
+
+ igt_subtest_f("process-many-stride-%s", s->name)
+ processes(fd, 1, 128, 0, 256, s->flags);
+
+ igt_subtest_f("process-many-execqueues-%s", s->name)
+ processes(fd, 16, 128, 0, 0, s->flags);
+
+ igt_subtest_f("process-many-large-%s", s->name)
+ processes(fd, 1, 128, SZ_2M, 0, s->flags);
+
+ igt_subtest_f("process-many-large-execqueues-%s", s->name)
+ processes(fd, 16, 128, SZ_2M, 0, s->flags);
+ }
+
+ igt_subtest_f("fault")
+ xe_for_each_engine(fd, hwe)
+ test_exec(fd, hwe, 4, 1, SZ_2M, 0, 0, FAULT);
+
+ for (const struct section *s = psections; s->name; s++) {
+ igt_subtest_f("partial-%s", s->name)
+ xe_for_each_engine(fd, hwe)
+ partial(fd, hwe, s->flags);
+ }
+
+ igt_subtest_f("unaligned-alloc")
+ xe_for_each_engine(fd, hwe) {
+ evict(fd, hwe, (SZ_1M + SZ_512K) * 8,
+ SZ_1M + SZ_512K, SZ_4K, NULL, 0);
+ break;
+ }
+
+ for (const struct section *s = esections; s->name; s++) {
+ igt_subtest_f("evict-%s", s->name)
+ xe_for_each_engine(fd, hwe) {
+ evict(fd, hwe, xe_visible_vram_size(fd, hwe->gt_id),
+ SZ_8M, SZ_1M, NULL, s->flags);
+ break;
+ }
+ }
+
+ for (const struct section *s = esections; s->name; s++) {
+ igt_subtest_f("processes-evict-%s", s->name)
+ processes_evict(fd, SZ_8M, SZ_1M, s->flags);
+ }
+
+ igt_fixture
+ drm_close_driver(fd);
+}
diff --git a/tests/meson.build b/tests/meson.build
index 65b8bf23b9..0e6e19ae68 100644
--- a/tests/meson.build
+++ b/tests/meson.build
@@ -293,6 +293,7 @@ intel_xe_progs = [
'xe_exec_queue_property',
'xe_exec_reset',
'xe_exec_store',
+ 'xe_exec_system_allocator',
'xe_exec_threads',
'xe_exercise_blt',
'xe_gpgpu_fill',
--
2.34.1
^ permalink raw reply related [flat|nested] 16+ messages in thread
* [PATCH] tests/xe: Add system_allocator test
@ 2024-08-21 1:41 Matthew Brost
0 siblings, 0 replies; 16+ messages in thread
From: Matthew Brost @ 2024-08-21 1:41 UTC (permalink / raw)
To: igt-dev
Do not review, public checkpoint on progress.
Signed-off-by: Matthew Brost <matthew.brost@intel.com>
---
include/drm-uapi/xe_drm.h | 1 +
lib/xe/xe_ioctl.c | 12 +
lib/xe/xe_ioctl.h | 1 +
tests/intel/xe_exec_system_allocator.c | 1439 ++++++++++++++++++++++++
tests/meson.build | 1 +
5 files changed, 1454 insertions(+)
create mode 100644 tests/intel/xe_exec_system_allocator.c
diff --git a/include/drm-uapi/xe_drm.h b/include/drm-uapi/xe_drm.h
index 29425d7fdc..f4a4b78dd4 100644
--- a/include/drm-uapi/xe_drm.h
+++ b/include/drm-uapi/xe_drm.h
@@ -994,6 +994,7 @@ struct drm_xe_vm_bind_op {
#define DRM_XE_VM_BIND_FLAG_IMMEDIATE (1 << 1)
#define DRM_XE_VM_BIND_FLAG_NULL (1 << 2)
#define DRM_XE_VM_BIND_FLAG_DUMPABLE (1 << 3)
+#define DRM_XE_VM_BIND_FLAG_SYSTEM_ALLOCATOR (1 << 4)
/** @flags: Bind flags */
__u32 flags;
diff --git a/lib/xe/xe_ioctl.c b/lib/xe/xe_ioctl.c
index ae43ffd15e..9eb73918b9 100644
--- a/lib/xe/xe_ioctl.c
+++ b/lib/xe/xe_ioctl.c
@@ -424,6 +424,18 @@ void *xe_bo_map(int fd, uint32_t bo, size_t size)
return __xe_bo_map(fd, bo, size, PROT_WRITE);
}
+void *xe_bo_map_fixed(int fd, uint32_t bo, size_t size, uint64_t addr)
+{
+ uint64_t mmo;
+ void *map;
+
+ mmo = xe_bo_mmap_offset(fd, bo);
+ map = mmap((void *)addr, size, PROT_WRITE, MAP_SHARED | MAP_FIXED, fd, mmo);
+ igt_assert(map != MAP_FAILED);
+
+ return map;
+}
+
void *xe_bo_mmap_ext(int fd, uint32_t bo, size_t size, int prot)
{
return __xe_bo_map(fd, bo, size, prot);
diff --git a/lib/xe/xe_ioctl.h b/lib/xe/xe_ioctl.h
index b27c0053f0..cfa4f63560 100644
--- a/lib/xe/xe_ioctl.h
+++ b/lib/xe/xe_ioctl.h
@@ -81,6 +81,7 @@ uint32_t xe_exec_queue_create_class(int fd, uint32_t vm, uint16_t class);
void xe_exec_queue_destroy(int fd, uint32_t exec_queue);
uint64_t xe_bo_mmap_offset(int fd, uint32_t bo);
void *xe_bo_map(int fd, uint32_t bo, size_t size);
+void *xe_bo_map_fixed(int fd, uint32_t bo, size_t size, long unsigned int addr);
void *xe_bo_mmap_ext(int fd, uint32_t bo, size_t size, int prot);
int __xe_exec(int fd, struct drm_xe_exec *exec);
void xe_exec(int fd, struct drm_xe_exec *exec);
diff --git a/tests/intel/xe_exec_system_allocator.c b/tests/intel/xe_exec_system_allocator.c
new file mode 100644
index 0000000000..ef13d13442
--- /dev/null
+++ b/tests/intel/xe_exec_system_allocator.c
@@ -0,0 +1,1439 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2024 Intel Corporation
+ */
+
+/**
+ * TEST: Basic tests for execbuf functionality using system allocator
+ * Category: Hardware building block
+ * Sub-category: execbuf
+ * Functionality: fault mode, system allocator
+ * GPU requirements: GPU needs support for DRM_XE_VM_CREATE_FLAG_FAULT_MODE
+ */
+
+#include <fcntl.h>
+#include <linux/mman.h>
+#include <time.h>
+
+#include "igt.h"
+#include "lib/igt_syncobj.h"
+#include "lib/intel_reg.h"
+#include "xe_drm.h"
+
+#include "xe/xe_ioctl.h"
+#include "xe/xe_query.h"
+#include <string.h>
+
+#define USER_FENCE_VALUE 0xdeadbeefdeadbeefull
+#define QUARTER_SEC MS_TO_NS(250)
+#define FIVE_SEC MS_TO_NS(5000)
+
+struct batch_data {
+ uint32_t batch[16];
+ uint64_t pad;
+ uint32_t data;
+ uint32_t expected_data;
+};
+
+#define WRITE_VALUE(data__, i__) ({ \
+ (data__)->expected_data = rand() << 12 | (i__); \
+ (data__)->expected_data; \
+})
+#define READ_VALUE(data__, i__) ((data__)->expected_data)
+
+static void write_dword(uint32_t *batch, uint64_t sdi_addr, uint32_t wdata,
+ int *idx)
+{
+ batch[(*idx)++] = MI_STORE_DWORD_IMM_GEN4;
+ batch[(*idx)++] = sdi_addr;
+ batch[(*idx)++] = sdi_addr >> 32;
+ batch[(*idx)++] = wdata;
+ batch[(*idx)++] = MI_BATCH_BUFFER_END;
+}
+
+static void check_all_pages(void *ptr, uint64_t alloc_size, uint64_t stride)
+{
+ int i, n_writes = alloc_size / stride;
+
+ for (i = 0; i < n_writes; ++i) {
+ struct batch_data *data = ptr + i * stride;
+
+ igt_assert_eq(data->data, READ_VALUE(data, i));
+ }
+}
+
+struct thread_check_data {
+ pthread_t thread;
+ pthread_mutex_t *mutex;
+ pthread_cond_t *cond;
+ void *ptr;
+ uint64_t alloc_size;
+ uint64_t stride;
+ bool *go;
+};
+
+static void *thread_check(void *data)
+{
+ struct thread_check_data *t = data;
+
+ pthread_mutex_lock(t->mutex);
+ while (!*t->go)
+ pthread_cond_wait(t->cond, t->mutex);
+ pthread_mutex_unlock(t->mutex);
+
+ check_all_pages(t->ptr, t->alloc_size, t->stride);
+
+ return NULL;
+}
+
+/*
+ * Partition checking of results in chunks which causes multiple threads to
+ * fault same VRAM allocation in parallel.
+ */
+static void
+check_all_pages_threads(void *ptr, uint64_t alloc_size, uint64_t stride,
+ int n_threads)
+{
+ struct thread_check_data *threads_check_data;
+ pthread_mutex_t mutex;
+ pthread_cond_t cond;
+ int i;
+ bool go = false;
+
+ threads_check_data = calloc(n_threads, sizeof(*threads_check_data));
+ igt_assert(threads_check_data);
+
+ pthread_mutex_init(&mutex, 0);
+ pthread_cond_init(&cond, 0);
+
+ for (i = 0; i < n_threads; ++i) {
+ threads_check_data[i].mutex = &mutex;
+ threads_check_data[i].cond = &cond;
+ threads_check_data[i].ptr = ptr + stride * i;
+ threads_check_data[i].alloc_size = alloc_size;
+ threads_check_data[i].stride = n_threads * stride;
+ threads_check_data[i].go = &go;
+
+ pthread_create(&threads_check_data[i].thread, 0, thread_check,
+ &threads_check_data[i]);
+ }
+
+ pthread_mutex_lock(&mutex);
+ go = true;
+ pthread_cond_broadcast(&cond);
+ pthread_mutex_unlock(&mutex);
+
+ for (i = 0; i < n_threads; ++i)
+ pthread_join(threads_check_data[i].thread, NULL);
+ free(threads_check_data);
+}
+
+static void touch_all_pages(int fd, uint32_t exec_queue, void *ptr,
+ uint64_t alloc_size, uint64_t stride,
+ struct timespec *tv, uint64_t *submit)
+{
+ struct drm_xe_sync sync[1] = {
+ { .type = DRM_XE_SYNC_TYPE_USER_FENCE,
+ .flags = DRM_XE_SYNC_FLAG_SIGNAL,
+ .timeline_value = USER_FENCE_VALUE },
+ };
+ struct drm_xe_exec exec = {
+ .num_batch_buffer = 1,
+ .num_syncs = 0,
+ .exec_queue_id = exec_queue,
+ .syncs = to_user_pointer(&sync),
+ };
+ uint64_t addr = to_user_pointer(ptr);
+ int i, ret, n_writes = alloc_size / stride;
+ u64 *exec_ufence = NULL;
+ int64_t timeout = FIVE_SEC;
+
+ exec_ufence = mmap(NULL, SZ_4K, PROT_READ |
+ PROT_WRITE, MAP_SHARED |
+ MAP_ANONYMOUS, -1, 0);
+ igt_assert(exec_ufence != MAP_FAILED);
+ memset(exec_ufence, 0, SZ_4K);
+ sync[0].addr = to_user_pointer(exec_ufence);
+
+ for (i = 0; i < n_writes; ++i, addr += stride) {
+ struct batch_data *data = ptr + i * stride;
+ uint64_t sdi_offset = (char *)&data->data - (char *)data;
+ uint64_t sdi_addr = addr + sdi_offset;
+ int b = 0;
+
+ write_dword(data->batch, sdi_addr, WRITE_VALUE(data, i), &b);
+ igt_assert(b <= ARRAY_SIZE(data->batch));
+ }
+
+ igt_nsec_elapsed(tv);
+ *submit = igt_nsec_elapsed(tv);
+
+ addr = to_user_pointer(ptr);
+ for (i = 0; i < n_writes; ++i, addr += stride) {
+ struct batch_data *data = ptr + i * stride;
+ uint64_t batch_offset = (char *)&data->batch - (char *)data;
+ uint64_t batch_addr = addr + batch_offset;
+
+ exec.address = batch_addr;
+ if (i + 1 == n_writes)
+ exec.num_syncs = 1;
+ xe_exec(fd, &exec);
+ }
+
+ ret = __xe_wait_ufence(fd, exec_ufence, USER_FENCE_VALUE, exec_queue,
+ &timeout);
+ if (ret) {
+ printf("FAIL EXEC_UFENCE: 0x%016llx\n", sync[0].addr);
+
+ addr = to_user_pointer(ptr);
+ for (i = 0; i < n_writes; ++i, addr += stride) {
+ struct batch_data *data = ptr + i * stride;
+ uint64_t batch_offset = (char *)&data->batch - (char *)data;
+ uint64_t batch_addr = addr + batch_offset;
+ uint64_t sdi_offset = (char *)&data->data - (char *)data;
+ uint64_t sdi_addr = addr + sdi_offset;
+
+ printf("FAIL BATCH_ADDR: 0x%016lx\n", batch_addr);
+ printf("FAIL SDI_ADDR: 0x%016lx\n", sdi_addr);
+ printf("FAIL SDI_ADDR (in batch): 0x%016lx\n",
+ (((u64)data->batch[2]) << 32) | data->batch[1]);
+ }
+ igt_assert_eq(ret, 0);
+ }
+ munmap(exec_ufence, SZ_4K);
+}
+
+#define bind_system_allocator(__sync, __num_sync) \
+ __xe_vm_bind_assert(fd, vm, 0, \
+ 0, 0, 0, 0x1ull << 56, \
+ DRM_XE_VM_BIND_OP_MAP, \
+ DRM_XE_VM_BIND_FLAG_SYSTEM_ALLOCATOR, \
+ (__sync), (__num_sync), 0, 0)
+
+#define unbind_system_allocator() \
+ __xe_vm_bind(fd, vm, 0, 0, 0, 0, 0x1ull << 56, \
+ DRM_XE_VM_BIND_OP_UNMAP, 0, \
+ NULL, 0, 0, 0, 0)
+
+#define odd(__i) (__i & 1)
+
+#define MIX_BO_ALLOC (0x1 << 0)
+#define BENCHMARK (0x1 << 1)
+#define CPU_FAULT_THREADS (0x1 << 2)
+
+#define SYNC_FILE "/tmp/xe_exec_system_allocator_sync"
+
+struct process_data {
+ pthread_mutex_t mutex;
+ pthread_cond_t cond;
+ pthread_barrier_t barrier;
+ bool go;
+};
+
+/**
+ * SUBTEST: unaligned-alloc
+ * Description: allocate unaligned sizes of memory
+ * Test category: functionality test
+ *
+ * SUBTEST: fault-benchmark
+ * Description: Benchmark how long GPU / CPU take
+ * Test category: performance test
+ *
+ * SUBTEST: fault-threads-benchmark
+ * Description: Benchmark how long GPU / CPU take, reading results with multiple threads
+ * Test category: performance and functionality test
+ *
+ * SUBTEST: evict-malloc
+ * Description: trigger eviction of VRAM allocated via malloc
+ * Test category: functionality test
+ *
+ * SUBTEST: evict-malloc-mix-bo
+ * Description: trigger eviction of VRAM allocated via malloc and BO create
+ * Test category: functionality test
+ *
+ * SUBTEST: processes-evict-malloc
+ * Description: multi-process trigger eviction of VRAM allocated via malloc
+ * Test category: stress test
+ *
+ * SUBTEST: processes-evict-malloc-mix-bo
+ * Description: multi-process trigger eviction of VRAM allocated via malloc and BO create
+ * Test category: stress test
+ */
+
+static void
+many_allocs(int fd, struct drm_xe_engine_class_instance *eci,
+ uint64_t total_alloc, uint64_t alloc_size, uint64_t stride,
+ pthread_barrier_t *barrier, unsigned int flags)
+{
+ uint32_t vm, exec_queue;
+ int num_allocs = flags & BENCHMARK ? 1 :
+ (9 * (total_alloc / alloc_size)) / 8;
+ void **allocs;
+ uint32_t *bos = NULL;
+ struct timespec tv = {};
+ uint64_t submit, read, elapsed;
+ int i;
+
+ vm = xe_vm_create(fd, DRM_XE_VM_CREATE_FLAG_LR_MODE |
+ DRM_XE_VM_CREATE_FLAG_FAULT_MODE, 0);
+ exec_queue = xe_exec_queue_create(fd, vm, eci, 0);
+
+ bind_system_allocator(NULL, 0);
+
+ allocs = malloc(sizeof(*allocs) * num_allocs);
+ igt_assert(allocs);
+ memset(allocs, 0, sizeof(*allocs) * num_allocs);
+
+ if (flags & MIX_BO_ALLOC) {
+ bos = malloc(sizeof(*bos) * num_allocs);
+ igt_assert(bos);
+ memset(bos, 0, sizeof(*bos) * num_allocs);
+ }
+
+ for (i = 0; i < num_allocs; ++i) {
+ void *alloc;
+
+ alloc = aligned_alloc(SZ_2M, alloc_size);
+ igt_assert(alloc);
+
+ if (flags & MIX_BO_ALLOC && odd(i)) {
+ uint32_t bo_flags =
+ DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM;
+
+ bos[i] = xe_bo_create(fd, vm, alloc_size,
+ vram_if_possible(fd, eci->gt_id),
+ bo_flags);
+ alloc = xe_bo_map_fixed(fd, bos[i], alloc_size,
+ to_user_pointer(alloc));
+ xe_vm_bind_async(fd, vm, 0, bos[i], 0,
+ to_user_pointer(alloc),
+ alloc_size, 0, 0);
+ }
+ allocs[i] = alloc;
+
+ touch_all_pages(fd, exec_queue, allocs[i], alloc_size, stride,
+ &tv, &submit);
+ }
+
+ if (barrier)
+ pthread_barrier_wait(barrier);
+
+ for (i = 0; i < num_allocs; ++i) {
+ if (flags & BENCHMARK)
+ read = igt_nsec_elapsed(&tv);
+#define NUM_CHECK_THREADS 8
+ if (flags & CPU_FAULT_THREADS)
+ check_all_pages_threads(allocs[i], alloc_size, stride,
+ NUM_CHECK_THREADS);
+ else
+ check_all_pages(allocs[i], alloc_size, stride);
+ if (flags & BENCHMARK) {
+ elapsed = igt_nsec_elapsed(&tv);
+ printf("Execution took %.3fms (submit %.1fus, read %.1fus, total %.1fus, read_total %.1fus)\n",
+ 1e-6 * elapsed, 1e-3 * submit, 1e-3 * read,
+ 1e-3 * (elapsed - submit),
+ 1e-3 * (elapsed - read));
+ }
+ if (bos && bos[i]) {
+ munmap(allocs[i], alloc_size);
+ gem_close(fd, bos[i]);
+ } else {
+ free(allocs[i]);
+ }
+ }
+ if (bos)
+ free(bos);
+ free(allocs);
+ xe_exec_queue_destroy(fd, exec_queue);
+ xe_vm_destroy(fd, vm);
+}
+
+static void wait_pdata(struct process_data *pdata)
+{
+ pthread_mutex_lock(&pdata->mutex);
+ while (!pdata->go)
+ pthread_cond_wait(&pdata->cond, &pdata->mutex);
+ pthread_mutex_unlock(&pdata->mutex);
+}
+
+static void process_evict(struct drm_xe_engine_class_instance *hwe,
+ uint64_t total_alloc, uint64_t alloc_size,
+ uint64_t stride, unsigned int flags)
+{
+ struct process_data *pdata;
+ int map_fd;
+ int fd;
+
+ map_fd = open(SYNC_FILE, O_RDWR, 0x666);
+ pdata = mmap(NULL, sizeof(*pdata), PROT_READ |
+ PROT_WRITE, MAP_SHARED, map_fd, 0);
+ wait_pdata(pdata);
+
+ fd = drm_open_driver(DRIVER_XE);
+ many_allocs(fd, hwe, total_alloc, alloc_size, stride, &pdata->barrier,
+ flags);
+ drm_close_driver(fd);
+
+ close(map_fd);
+ munmap(pdata, sizeof(*pdata));
+}
+
+static void init_pdata(struct process_data *pdata, int n_engine)
+{
+ pthread_mutexattr_t mutex_attr;
+ pthread_condattr_t cond_attr;
+ pthread_barrierattr_t barrier_attr;
+
+ pthread_mutexattr_init(&mutex_attr);
+ pthread_mutexattr_setpshared(&mutex_attr, PTHREAD_PROCESS_SHARED);
+ pthread_mutex_init(&pdata->mutex, &mutex_attr);
+
+ pthread_condattr_init(&cond_attr);
+ pthread_condattr_setpshared(&cond_attr, PTHREAD_PROCESS_SHARED);
+ pthread_cond_init(&pdata->cond, &cond_attr);
+
+ pthread_barrierattr_init(&barrier_attr);
+ pthread_barrierattr_setpshared(&barrier_attr, PTHREAD_PROCESS_SHARED);
+ pthread_barrier_init(&pdata->barrier, &barrier_attr, n_engine);
+
+ pdata->go = false;
+}
+
+static void signal_pdata(struct process_data *pdata)
+{
+ pthread_mutex_lock(&pdata->mutex);
+ pdata->go = true;
+ pthread_cond_broadcast(&pdata->cond);
+ pthread_mutex_unlock(&pdata->mutex);
+}
+
+static void
+processes_evict(int fd, uint64_t alloc_size, uint64_t stride,
+ unsigned int flags)
+{
+ struct drm_xe_engine_class_instance *hwe;
+ struct process_data *pdata;
+ int n_engine_gt[2] = { 0, 0 }, n_engine = 0;
+ int map_fd;
+
+ map_fd = open(SYNC_FILE, O_RDWR | O_CREAT, 0x666);
+ posix_fallocate(map_fd, 0, sizeof(*pdata));
+ pdata = mmap(NULL, sizeof(*pdata), PROT_READ |
+ PROT_WRITE, MAP_SHARED, map_fd, 0);
+
+ xe_for_each_engine(fd, hwe) {
+ igt_assert(hwe->gt_id < 2);
+ n_engine_gt[hwe->gt_id]++;
+ n_engine++;
+ }
+
+ init_pdata(pdata, n_engine);
+
+ xe_for_each_engine(fd, hwe) {
+ igt_fork(child, 1)
+ process_evict(hwe,
+ xe_visible_vram_size(fd, hwe->gt_id) /
+ n_engine_gt[hwe->gt_id], alloc_size,
+ stride, flags);
+ }
+
+ signal_pdata(pdata);
+ igt_waitchildren();
+
+ close(map_fd);
+ munmap(pdata, sizeof(*pdata));
+}
+
+#define CPU_FAULT (0x1 << 0)
+#define REMAP (0x1 << 1)
+#define MIDDLE (0x1 << 2)
+
+/**
+ * SUBTEST: partial-munmap-cpu-fault
+ * Description: munmap partially with cpu access in between
+ * Test category: functionality test
+ *
+ * SUBTEST: partial-munmap-no-cpu-fault
+ * Description: munmap partially with no cpu access in between
+ * Test category: functionality test
+ *
+ * SUBTEST: partial-remap-cpu-fault
+ * Description: remap partially with cpu access in between
+ * Test category: functionality test
+ *
+ * SUBTEST: partial-remap-no-cpu-fault
+ * Description: remap partially with no cpu access in between
+ * Test category: functionality test
+ *
+ * SUBTEST: partial-middle-munmap-cpu-fault
+ * Description: munmap middle with cpu access in between
+ * Test category: functionality test
+ *
+ * SUBTEST: partial-middle-munmap-no-cpu-fault
+ * Description: munmap middle with no cpu access in between
+ * Test category: functionality test
+ *
+ * SUBTEST: partial-middle-remap-cpu-fault
+ * Description: remap middle with cpu access in between
+ * Test category: functionality test
+ *
+ * SUBTEST: partial-middle-remap-no-cpu-fault
+ * Description: remap middle with no cpu access in between
+ * Test category: functionality test
+ */
+
+static void
+partial(int fd, struct drm_xe_engine_class_instance *eci, unsigned int flags)
+{
+ struct drm_xe_sync sync[1] = {
+ { .type = DRM_XE_SYNC_TYPE_USER_FENCE, .flags = DRM_XE_SYNC_FLAG_SIGNAL,
+ .timeline_value = USER_FENCE_VALUE },
+ };
+ struct drm_xe_exec exec = {
+ .num_batch_buffer = 1,
+ .num_syncs = 1,
+ .syncs = to_user_pointer(sync),
+ };
+ struct {
+ uint32_t batch[16];
+ uint64_t pad;
+ uint64_t vm_sync;
+ uint64_t exec_sync;
+ uint32_t data;
+ uint32_t expected_data;
+ } *data;
+ size_t bo_size = SZ_2M, unmap_offset = 0;
+ uint32_t vm, exec_queue;
+ u64 *exec_ufence = NULL;
+ int i;
+ void *old, *new = NULL;
+
+ if (flags & MIDDLE)
+ unmap_offset = bo_size / 4;
+
+ vm = xe_vm_create(fd, DRM_XE_VM_CREATE_FLAG_LR_MODE |
+ DRM_XE_VM_CREATE_FLAG_FAULT_MODE, 0);
+
+ data = aligned_alloc(bo_size, bo_size);
+ igt_assert(data);
+
+ data = mmap(data, bo_size, PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
+ igt_assert(data != MAP_FAILED);
+ memset(data, 0, bo_size);
+ old = data;
+
+ exec_queue = xe_exec_queue_create(fd, vm, eci, 0);
+
+ sync[0].addr = to_user_pointer(&data[0].vm_sync);
+ bind_system_allocator(sync, 1);
+ xe_wait_ufence(fd, &data[0].vm_sync, USER_FENCE_VALUE, 0, FIVE_SEC);
+ data[0].vm_sync = 0;
+
+ exec_ufence = mmap(NULL, SZ_4K, PROT_READ |
+ PROT_WRITE, MAP_SHARED |
+ MAP_ANONYMOUS, -1, 0);
+ igt_assert(exec_ufence != MAP_FAILED);
+ memset(exec_ufence, 0, SZ_4K);
+
+ for (i = 0; i < 2; i++) {
+ uint64_t addr = to_user_pointer(data);
+ uint64_t sdi_offset = (char *)&data[i].data - (char *)data;
+ uint64_t sdi_addr = addr + sdi_offset;
+ int b = 0;
+
+ write_dword(data[i].batch, sdi_addr, WRITE_VALUE(&data[i], i), &b);
+ igt_assert(b <= ARRAY_SIZE(data[i].batch));
+
+ if (!i)
+ data = old + unmap_offset + bo_size / 2;
+ }
+
+ data = old;
+ exec.exec_queue_id = exec_queue;
+
+ for (i = 0; i < 2; i++) {
+ uint64_t addr = to_user_pointer(data);
+ uint64_t batch_offset = (char *)&data[i].batch - (char *)data;
+ uint64_t batch_addr = addr + batch_offset;
+
+ sync[0].addr = new ? to_user_pointer(new) :
+ to_user_pointer(exec_ufence);
+ exec.address = batch_addr;
+ xe_exec(fd, &exec);
+
+ xe_wait_ufence(fd, new ?: exec_ufence, USER_FENCE_VALUE,
+ exec_queue, FIVE_SEC);
+ if (i || (flags & CPU_FAULT))
+ igt_assert_eq(data[i].data, READ_VALUE(&data[i], i));
+ exec_ufence[0] = 0;
+
+ if (!i) {
+ data = old + unmap_offset + bo_size / 2;
+ munmap(old + unmap_offset, bo_size / 2);
+ if (flags & REMAP) {
+ new = mmap(old + unmap_offset, bo_size / 2,
+ PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED |
+ MAP_LOCKED, -1, 0);
+ igt_assert(new != MAP_FAILED);
+ }
+ }
+ }
+
+ xe_exec_queue_destroy(fd, exec_queue);
+ munmap(exec_ufence, SZ_4K);
+ munmap(old, bo_size);
+ if (new)
+ munmap(new, bo_size / 2);
+ xe_vm_destroy(fd, vm);
+}
+
+#define MAX_N_EXEC_QUEUES 16
+
+#define MMAP (0x1 << 0)
+#define NEW (0x1 << 1)
+#define BO_UNMAP (0x1 << 2)
+#define FREE (0x1 << 3)
+#define BUSY (0x1 << 4)
+#define BO_MAP (0x1 << 5)
+#define RACE (0x1 << 6)
+#define SKIP_MEMSET (0x1 << 7)
+#define FAULT (0x1 << 8)
+#define FILE_BACKED (0x1 << 9)
+#define LOCK (0x1 << 10)
+#define MMAP_SHARED (0x1 << 11)
+#define HUGE_PAGE (0x1 << 12)
+#define SHARED_ALLOC (0x1 << 13)
+
+/**
+ * SUBTEST: once-%s
+ * Description: Run %arg[1] system allocator test only once
+ * Test category: functionality test
+ *
+ * SUBTEST: twice-%s
+ * Description: Run %arg[1] system allocator test twice
+ * Test category: functionality test
+ *
+ * SUBTEST: many-%s
+ * Description: Run %arg[1] system allocator test many times
+ * Test category: stress test
+ *
+ * SUBTEST: many-stride-%s
+ * Description: Run %arg[1] system allocator test many times with a stride on each exec
+ * Test category: stress test
+ *
+ * SUBTEST: many-execqueues-%s
+ * Description: Run %arg[1] system allocator test on many exec_queues
+ * Test category: stress test
+ *
+ * SUBTEST: many-large-%s
+ * Description: Run %arg[1] system allocator test many times with large allocations
+ * Test category: stress test
+ *
+ * SUBTEST: many-large-execqueues-%s
+ * Description: Run %arg[1] system allocator test on many exec_queues with large allocations
+ *
+ * SUBTEST: threads-many-%s
+ * Description: Run %arg[1] system allocator threaded test many times
+ * Test category: stress test
+ *
+ * SUBTEST: threads-many-stride-%s
+ * Description: Run %arg[1] system allocator threaded test many times with a stride on each exec
+ * Test category: stress test
+ *
+ * SUBTEST: threads-many-execqueues-%s
+ * Description: Run %arg[1] system allocator threaded test on many exec_queues
+ * Test category: stress test
+ *
+ * SUBTEST: threads-many-large-%s
+ * Description: Run %arg[1] system allocator threaded test many times with large allocations
+ * Test category: stress test
+ *
+ * SUBTEST: threads-many-large-execqueues-%s
+ * Description: Run %arg[1] system allocator threaded test on many exec_queues with large allocations
+ *
+ * SUBTEST: threads-shared-vm-many-%s
+ * Description: Run %arg[1] system allocator threaded, shared vm test many times
+ * Test category: stress test
+ *
+ * SUBTEST: threads-shared-vm-many-stride-%s
+ * Description: Run %arg[1] system allocator threaded, shared vm test many times with a stride on each exec
+ * Test category: stress test
+ *
+ * SUBTEST: threads-shared-vm-many-execqueues-%s
+ * Description: Run %arg[1] system allocator threaded, shared vm test on many exec_queues
+ * Test category: stress test
+ *
+ * SUBTEST: threads-shared-vm-many-large-%s
+ * Description: Run %arg[1] system allocator threaded, shared vm test many times with large allocations
+ * Test category: stress test
+ *
+ * SUBTEST: threads-shared-vm-many-large-execqueues-%s
+ * Description: Run %arg[1] system allocator threaded, shared vm test on many exec_queues with large allocations
+ * Test category: stress test
+ *
+ * SUBTEST: process-many-%s
+ * Description: Run %arg[1] system allocator multi-process test many times
+ * Test category: stress test
+ *
+ * SUBTEST: process-many-stride-%s
+ * Description: Run %arg[1] system allocator multi-process test many times with a stride on each exec
+ * Test category: stress test
+ *
+ * SUBTEST: process-many-execqueues-%s
+ * Description: Run %arg[1] system allocator multi-process test on many exec_queues
+ * Test category: stress test
+ *
+ * SUBTEST: process-many-large-%s
+ * Description: Run %arg[1] system allocator multi-process test many times with large allocations
+ * Test category: stress test
+ *
+ * SUBTEST: process-many-large-execqueues-%s
+ * Description: Run %arg[1] system allocator multi-process test on many exec_queues with large allocations
+ *
+ * SUBTEST: fault
+ * Description: use a bad system allocator address resulting in a fault
+ * Test category: bad input
+ *
+ * arg[1]:
+ *
+ * @malloc: malloc single buffer for all execs
+ * @malloc-mlock: malloc and mlock single buffer for all execs
+ * @malloc-race: malloc single buffer for all execs with race between cpu and gpu access
+ * @malloc-bo-unmap: malloc single buffer for all execs, bind and unbind a BO to same address before execs
+ * @malloc-busy: malloc single buffer for all execs, try to unbind while buffer valid
+ * @mmap: mmap single buffer for all execs
+ * @mmap-huge: mmap huge page single buffer for all execs
+ * @mmap-shared: mmap shared single buffer for all execs
+ * @mmap-mlock: mmap and mlock single buffer for all execs
+ * @mmap-file: mmap single buffer, with file backing, for all execs
+ * @mmap-file-mlock: mmap and mlock single buffer, with file backing, for all execs
+ * @mmap-race: mmap single buffer for all execs with race between cpu and gpu access
+ * @free: malloc and free buffer for each exec
+ * @free-race: malloc and free buffer for each exec with race between cpu and gpu access
+ * @new: malloc a new buffer for each exec
+ * @new-race: malloc a new buffer for each exec with race between cpu and gpu access
+ * @new-bo-map: malloc a new buffer or map BO for each exec
+ * @new-busy: malloc a new buffer for each exec, try to unbind while buffers valid
+ * @mmap-free: mmap and free buffer for each exec
+ * @mmap-free-huge: mmap huge page and free buffer for each exec
+ * @mmap-free-race: mmap and free buffer for each exec with race between cpu and gpu access
+ * @mmap-new: mmap a new buffer for each exec
+ * @mmap-new-huge: mmap huge page a new buffer for each exec
+ * @mmap-new-race: mmap a new buffer for each exec with race between cpu and gpu access
+ * @malloc-nomemset: malloc single buffer for all execs, skip memset of buffers
+ * @malloc-mlock-nomemset: malloc and mlock single buffer for all execs, skip memset of buffers
+ * @malloc-race-nomemset: malloc single buffer for all execs with race between cpu and gpu access, skip memset of buffers
+ * @malloc-bo-unmap-nomemset: malloc single buffer for all execs, bind and unbind a BO to same address before execs, skip memset of buffers
+ * @malloc-busy-nomemset: malloc single buffer for all execs, try to unbind while buffer valid, skip memset of buffers
+ * @mmap-nomemset: mmap single buffer for all execs, skip memset of buffers
+ * @mmap-huge-nomemset: mmap huge page single buffer for all execs, skip memset of buffers
+ * @mmap-shared-nomemset: mmap shared single buffer for all execs, skip memset of buffers
+ * @mmap-mlock-nomemset: mmap and mlock single buffer for all execs, skip memset of buffers
+ * @mmap-file-nomemset: mmap single buffer, with file backing, for all execs, skip memset of buffers
+ * @mmap-file-mlock-nomemset: mmap and mlock single buffer, with file backing, for all execs, skip memset of buffers
+ * @mmap-race-nomemset: mmap single buffer for all execs with race between cpu and gpu access, skip memset of buffers
+ * @free-nomemset: malloc and free buffer for each exec, skip memset of buffers
+ * @free-race-nomemset: malloc and free buffer for each exec with race between cpu and gpu access, skip memset of buffers
+ * @new-nomemset: malloc a new buffer for each exec, skip memset of buffers
+ * @new-race-nomemset: malloc a new buffer for each exec with race between cpu and gpu access, skip memset of buffers
+ * @new-bo-map-nomemset: malloc a new buffer or map BO for each exec, skip memset of buffers
+ * @new-busy-nomemset: malloc a new buffer for each exec, try to unbind while buffers valid, skip memset of buffers
+ * @mmap-free-nomemset: mmap and free buffer for each exec, skip memset of buffers
+ * @mmap-free-huge-nomemset: mmap huge page and free buffer for each exec, skip memset of buffers
+ * @mmap-free-race-nomemset: mmap and free buffer for each exec with race between cpu and gpu access, skip memset of buffers
+ * @mmap-new-nomemset: mmap a new buffer for each exec, skip memset of buffers
+ * @mmap-new-huge-nomemset: mmap huge page new buffer for each exec, skip memset of buffers
+ * @mmap-new-race-nomemset: mmap a new buffer for each exec with race between cpu and gpu access, skip memset of buffers
+ *
+ * SUBTEST: threads-shared-vm-shared-alloc-many-stride-malloc
+ * Description: Create multiple threads with a shared VM triggering faults on different hardware engines to same addresses
+ * Test category: stress test
+ *
+ * SUBTEST: threads-shared-vm-shared-alloc-many-stride-malloc-race
+ * Description: Create multiple threads with a shared VM triggering faults on different hardware engines to same addresses, racing between CPU and GPU access
+ * Test category: stress test
+ */
+
+struct test_exec_data {
+ uint32_t batch[16];
+ uint64_t pad;
+ uint64_t vm_sync;
+ uint64_t exec_sync;
+ uint32_t data;
+ uint32_t expected_data;
+};
+
+static void
+test_exec(int fd, struct drm_xe_engine_class_instance *eci,
+ int n_exec_queues, int n_execs, size_t bo_size,
+ size_t stride, uint32_t vm, void *alloc, unsigned int flags)
+{
+ uint64_t addr;
+ struct drm_xe_sync sync[1] = {
+ { .type = DRM_XE_SYNC_TYPE_USER_FENCE, .flags = DRM_XE_SYNC_FLAG_SIGNAL,
+ .timeline_value = USER_FENCE_VALUE },
+ };
+ struct drm_xe_exec exec = {
+ .num_batch_buffer = 1,
+ .num_syncs = 1,
+ .syncs = to_user_pointer(sync),
+ };
+ uint32_t exec_queues[MAX_N_EXEC_QUEUES];
+ struct test_exec_data *data;
+ uint32_t bo_flags;
+ uint32_t bo = 0;
+ void **pending_free;
+ u64 *exec_ufence = NULL;
+ int i, b, file_fd = -1;
+ bool free_vm = false;
+ size_t aligned_size = bo_size ?: xe_get_default_alignment(fd);
+
+ if (flags & SHARED_ALLOC)
+ return;
+
+ igt_assert(n_exec_queues <= MAX_N_EXEC_QUEUES);
+
+ if (flags & NEW && !(flags & FREE)) {
+ pending_free = malloc(sizeof(*pending_free) * n_execs);
+ igt_assert(pending_free);
+ memset(pending_free, 0, sizeof(*pending_free) * n_execs);
+ }
+
+ if (!vm) {
+ vm = xe_vm_create(fd, DRM_XE_VM_CREATE_FLAG_LR_MODE |
+ DRM_XE_VM_CREATE_FLAG_FAULT_MODE, 0);
+ free_vm = true;
+ }
+ if (!bo_size) {
+ if (!stride) {
+ bo_size = sizeof(*data) * n_execs;
+ bo_size = xe_bb_size(fd, bo_size);
+ } else {
+ bo_size = stride * n_execs * sizeof(*data);
+ bo_size = xe_bb_size(fd, bo_size);
+ }
+ }
+ if (flags & HUGE_PAGE) {
+ aligned_size = ALIGN(aligned_size, SZ_2M);
+ bo_size = ALIGN(bo_size, SZ_2M);
+ }
+
+ if (alloc) {
+ data = alloc;
+ } else {
+ data = aligned_alloc(aligned_size, bo_size);
+ igt_assert(data);
+ if (flags & MMAP) {
+ int mmap_flags = MAP_FIXED;
+
+ if (flags & MMAP_SHARED)
+ mmap_flags |= MAP_SHARED;
+ else
+ mmap_flags |= MAP_PRIVATE;
+
+ if (flags & HUGE_PAGE)
+ mmap_flags |= MAP_HUGETLB | MAP_HUGE_2MB;
+
+ if (flags & FILE_BACKED) {
+ char name[1024];
+
+ igt_assert(!(flags & NEW));
+
+ sprintf(name, "/tmp/xe_exec_system_allocator_dat%d\n",
+ getpid());
+ file_fd = open(name, O_RDWR | O_CREAT, 0x666);
+ posix_fallocate(file_fd, 0, bo_size);
+ } else {
+ mmap_flags |= MAP_ANONYMOUS;
+ }
+
+ data = mmap(data, bo_size, PROT_READ |
+ PROT_WRITE, mmap_flags, file_fd, 0);
+ igt_assert(data != MAP_FAILED);
+ }
+ if (!(flags & SKIP_MEMSET))
+ memset(data, 0, bo_size);
+ if (flags & LOCK) {
+ igt_assert(!(flags & NEW));
+ mlock(data, bo_size);
+ }
+ }
+
+ for (i = 0; i < n_exec_queues; i++)
+ exec_queues[i] = xe_exec_queue_create(fd, vm, eci, 0);
+
+ sync[0].addr = to_user_pointer(&data[0].vm_sync);
+ if (free_vm) {
+ bind_system_allocator(sync, 1);
+ xe_wait_ufence(fd, &data[0].vm_sync, USER_FENCE_VALUE, 0, FIVE_SEC);
+ }
+ data[0].vm_sync = 0;
+
+ addr = to_user_pointer(data);
+
+ if (flags & BO_UNMAP) {
+ bo_flags = DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM;
+ bo = xe_bo_create(fd, vm, bo_size,
+ vram_if_possible(fd, eci->gt_id), bo_flags);
+ xe_vm_bind_async(fd, vm, 0, bo, 0, addr, bo_size, 0, 0);
+
+ __xe_vm_bind_assert(fd, vm, 0,
+ 0, 0, addr, bo_size,
+ DRM_XE_VM_BIND_OP_MAP,
+ DRM_XE_VM_BIND_FLAG_SYSTEM_ALLOCATOR, sync,
+ 1, 0, 0);
+ xe_wait_ufence(fd, &data[0].vm_sync, USER_FENCE_VALUE, 0,
+ FIVE_SEC);
+ data[0].vm_sync = 0;
+ gem_close(fd, bo);
+ bo = 0;
+ }
+
+ if (!(flags & RACE)) {
+ exec_ufence = mmap(NULL, SZ_4K, PROT_READ |
+ PROT_WRITE, MAP_SHARED |
+ MAP_ANONYMOUS, -1, 0);
+ igt_assert(exec_ufence != MAP_FAILED);
+ memset(exec_ufence, 0, SZ_4K);
+ }
+
+ for (i = 0; i < n_execs; i++) {
+ int idx = !stride ? i : i * stride;
+ uint64_t batch_offset = (char *)&data[idx].batch - (char *)data;
+ uint64_t batch_addr = addr + batch_offset;
+ uint64_t sdi_offset = (char *)&data[idx].data - (char *)data;
+ uint64_t sdi_addr = addr + sdi_offset;
+ int e = i % n_exec_queues, err;
+ bool fault_inject = (FAULT & flags) && i == n_execs / 2;
+ bool fault_injected = (FAULT & flags) && i > n_execs;
+
+ b = 0;
+ write_dword(data[idx].batch, sdi_addr,
+ WRITE_VALUE(&data[idx], idx), &b);
+ igt_assert(b <= ARRAY_SIZE(data[idx].batch));
+
+ if (!exec_ufence)
+ data[idx].exec_sync = 0;
+
+ sync[0].addr = exec_ufence ? to_user_pointer(exec_ufence) :
+ addr + (char *)&data[idx].exec_sync - (char *)data;
+
+ exec.exec_queue_id = exec_queues[e];
+ if (fault_inject)
+ exec.address = batch_addr * 2;
+ else
+ exec.address = batch_addr;
+
+ if (fault_injected) {
+ err = __xe_exec(fd, &exec);
+ igt_assert(err == -ENOENT);
+ } else {
+ xe_exec(fd, &exec);
+ }
+
+ if (fault_inject || fault_injected) {
+ int64_t timeout = QUARTER_SEC;
+
+ err = __xe_wait_ufence(fd, exec_ufence ? exec_ufence :
+ &data[idx].exec_sync,
+ USER_FENCE_VALUE,
+ exec_queues[e], &timeout);
+ igt_assert(err == -ETIME || err == -EIO);
+ } else {
+ xe_wait_ufence(fd, exec_ufence ? exec_ufence :
+ &data[idx].exec_sync, USER_FENCE_VALUE,
+ exec_queues[e], FIVE_SEC);
+ if (flags & LOCK && !i)
+ munlock(data, bo_size);
+ igt_assert_eq(data[idx].data,
+ READ_VALUE(&data[idx], idx));
+ }
+
+ if (exec_ufence)
+ exec_ufence[0] = 0;
+
+ if (bo) {
+ __xe_vm_bind_assert(fd, vm, 0,
+ 0, 0, addr, bo_size,
+ DRM_XE_VM_BIND_OP_MAP,
+ DRM_XE_VM_BIND_FLAG_SYSTEM_ALLOCATOR,
+ NULL, 0, 0, 0);
+ munmap(data, bo_size);
+ gem_close(fd, bo);
+ }
+
+ if (flags & NEW) {
+ if (flags & MMAP) {
+ if (flags & FREE)
+ munmap(data, bo_size);
+ else
+ pending_free[i] = data;
+ data = mmap(NULL, bo_size, PROT_READ |
+ PROT_WRITE, MAP_SHARED |
+ MAP_ANONYMOUS, -1, 0);
+ igt_assert(data != MAP_FAILED);
+ } else if (flags & BO_MAP && (i % 2)) {
+ if (!bo) {
+ if (flags & FREE)
+ free(data);
+ else
+ pending_free[i] = data;
+ }
+ data = aligned_alloc(aligned_size, bo_size);
+ bo_flags = DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM;
+ bo = xe_bo_create(fd, vm, bo_size,
+ vram_if_possible(fd, eci->gt_id),
+ bo_flags);
+ data = xe_bo_map_fixed(fd, bo, bo_size,
+ to_user_pointer(data));
+
+ xe_vm_bind_async(fd, vm, 0, bo, 0,
+ to_user_pointer(data),
+ bo_size, 0, 0);
+ } else {
+ if (!bo) {
+ if (flags & FREE)
+ free(data);
+ else
+ pending_free[i] = data;
+ }
+ bo = 0;
+ data = aligned_alloc(aligned_size, bo_size);
+ igt_assert(data);
+ }
+ addr = to_user_pointer(data);
+ if (!(flags & SKIP_MEMSET))
+ memset(data, 0, bo_size);
+ }
+ }
+
+ if (bo) {
+ __xe_vm_bind_assert(fd, vm, 0,
+ 0, 0, addr, bo_size,
+ DRM_XE_VM_BIND_OP_MAP,
+ DRM_XE_VM_BIND_FLAG_SYSTEM_ALLOCATOR,
+ NULL, 0, 0, 0);
+ munmap(data, bo_size);
+ gem_close(fd, bo);
+ }
+
+ if (flags & BUSY)
+ igt_assert_eq(unbind_system_allocator(), -EBUSY);
+
+ for (i = 0; i < n_exec_queues; i++)
+ xe_exec_queue_destroy(fd, exec_queues[i]);
+
+ if (exec_ufence)
+ munmap(exec_ufence, SZ_4K);
+
+ if (flags & LOCK)
+ munlock(data, bo_size);
+
+ if (file_fd != -1)
+ close(file_fd);
+
+ if (flags & NEW && !(flags & FREE)) {
+ for (i = 0; i < n_execs; i++) {
+ if (!pending_free[i])
+ continue;
+
+ if (flags & MMAP)
+ munmap(pending_free[i], bo_size);
+ else
+ free(pending_free[i]);
+ }
+ free(pending_free);
+ } else {
+ if (flags & MMAP)
+ munmap(data, bo_size);
+ else if (!alloc)
+ free(data);
+ }
+ if (free_vm)
+ xe_vm_destroy(fd, vm);
+}
+
+struct thread_data {
+ pthread_t thread;
+ pthread_mutex_t *mutex;
+ pthread_cond_t *cond;
+ int fd;
+ struct drm_xe_engine_class_instance *eci;
+ int n_exec_queues;
+ int n_execs;
+ size_t bo_size;
+ size_t stride;
+ uint32_t vm;
+ unsigned int flags;
+ void *alloc;
+ bool *go;
+};
+
+static void *thread(void *data)
+{
+ struct thread_data *t = data;
+
+ pthread_mutex_lock(t->mutex);
+ while (!*t->go)
+ pthread_cond_wait(t->cond, t->mutex);
+ pthread_mutex_unlock(t->mutex);
+
+ test_exec(t->fd, t->eci, t->n_exec_queues, t->n_execs,
+ t->bo_size, t->stride, t->vm, t->alloc, t->flags);
+
+ return NULL;
+}
+
+static void
+threads(int fd, int n_exec_queues, int n_execs, size_t bo_size,
+ size_t stride, unsigned int flags, bool shared_vm)
+{
+ struct drm_xe_engine_class_instance *hwe;
+ struct thread_data *threads_data;
+ int n_engines = 0, i = 0;
+ pthread_mutex_t mutex;
+ pthread_cond_t cond;
+ uint32_t vm = 0;
+ bool go = false;
+ void *alloc = NULL;
+
+ if (FILE_BACKED & flags)
+ return;
+
+ xe_for_each_engine(fd, hwe)
+ ++n_engines;
+
+ if (shared_vm) {
+ vm = xe_vm_create(fd, DRM_XE_VM_CREATE_FLAG_LR_MODE |
+ DRM_XE_VM_CREATE_FLAG_FAULT_MODE, 0);
+ bind_system_allocator(NULL, 0);
+ if (flags & SHARED_ALLOC) {
+ uint64_t alloc_size;
+
+ igt_assert(stride);
+
+ alloc_size = sizeof(struct test_exec_data) * stride *
+ n_execs * n_engines;
+ alloc_size = xe_bb_size(fd, alloc_size);
+ alloc = aligned_alloc(SZ_2M, alloc_size);
+ igt_assert(alloc);
+
+ memset(alloc, 0, alloc_size);
+ flags &= ~SHARED_ALLOC;
+ }
+ } else if (flags & SHARED_ALLOC) {
+ return;
+ }
+
+ threads_data = calloc(n_engines, sizeof(*threads_data));
+ igt_assert(threads_data);
+
+ pthread_mutex_init(&mutex, 0);
+ pthread_cond_init(&cond, 0);
+
+ xe_for_each_engine(fd, hwe) {
+ threads_data[i].mutex = &mutex;
+ threads_data[i].cond = &cond;
+ threads_data[i].fd = fd;
+ threads_data[i].eci = hwe;
+ threads_data[i].n_exec_queues = n_exec_queues;
+ threads_data[i].n_execs = n_execs;
+ threads_data[i].bo_size = bo_size;
+ threads_data[i].stride = stride;
+ threads_data[i].vm = vm;
+ threads_data[i].flags = flags;
+ threads_data[i].alloc = alloc ? alloc + i *
+ sizeof(struct test_exec_data) : NULL;
+ threads_data[i].go = &go;
+ pthread_create(&threads_data[i].thread, 0, thread,
+ &threads_data[i]);
+ ++i;
+ }
+
+ pthread_mutex_lock(&mutex);
+ go = true;
+ pthread_cond_broadcast(&cond);
+ pthread_mutex_unlock(&mutex);
+
+ for (i = 0; i < n_engines; ++i)
+ pthread_join(threads_data[i].thread, NULL);
+
+ if (shared_vm) {
+ int ret;
+
+ if (flags & MMAP) {
+ int tries = 300;
+
+ while (tries && (ret = unbind_system_allocator()) == -EBUSY) {
+ sleep(.01);
+ --tries;
+ }
+ igt_assert_eq(ret, 0);
+ }
+ xe_vm_destroy(fd, vm);
+ if (alloc)
+ free(alloc);
+ }
+ free(threads_data);
+}
+
+static void process(struct drm_xe_engine_class_instance *hwe, int n_exec_queues,
+ int n_execs, size_t bo_size, size_t stride,
+ unsigned int flags)
+{
+ struct process_data *pdata;
+ int map_fd;
+ int fd;
+
+ map_fd = open(SYNC_FILE, O_RDWR, 0x666);
+ pdata = mmap(NULL, sizeof(*pdata), PROT_READ |
+ PROT_WRITE, MAP_SHARED, map_fd, 0);
+ wait_pdata(pdata);
+
+ fd = drm_open_driver(DRIVER_XE);
+ test_exec(fd, hwe, n_exec_queues, n_execs,
+ bo_size, stride, 0, NULL, flags);
+ drm_close_driver(fd);
+
+ close(map_fd);
+ munmap(pdata, sizeof(*pdata));
+}
+
+static void
+processes(int fd, int n_exec_queues, int n_execs, size_t bo_size,
+ size_t stride, unsigned int flags)
+{
+ struct drm_xe_engine_class_instance *hwe;
+ struct process_data *pdata;
+ int map_fd;
+
+ map_fd = open(SYNC_FILE, O_RDWR | O_CREAT, 0x666);
+ posix_fallocate(map_fd, 0, sizeof(*pdata));
+ pdata = mmap(NULL, sizeof(*pdata), PROT_READ |
+ PROT_WRITE, MAP_SHARED, map_fd, 0);
+
+ init_pdata(pdata, 0);
+
+ xe_for_each_engine(fd, hwe) {
+ igt_fork(child, 1)
+ process(hwe, n_exec_queues, n_execs, bo_size,
+ stride, flags);
+ }
+
+ signal_pdata(pdata);
+ igt_waitchildren();
+
+ close(map_fd);
+ munmap(pdata, sizeof(*pdata));
+}
+
+struct section {
+ const char *name;
+ unsigned int flags;
+};
+
+igt_main
+{
+ struct drm_xe_engine_class_instance *hwe;
+ const struct section sections[] = {
+ { "malloc", 0 },
+ { "malloc-mlock", LOCK },
+ { "malloc-race", RACE },
+ { "malloc-busy", BUSY },
+ { "malloc-bo-unmap", BO_UNMAP },
+ { "mmap", MMAP },
+ { "mmap-huge", MMAP | HUGE_PAGE },
+ { "mmap-shared", MMAP | LOCK | MMAP_SHARED },
+ { "mmap-mlock", MMAP | LOCK },
+ { "mmap-file", MMAP | FILE_BACKED },
+ { "mmap-file-mlock", MMAP | LOCK | FILE_BACKED },
+ { "mmap-race", MMAP | RACE },
+ { "free", NEW | FREE },
+ { "free-race", NEW | FREE | RACE },
+ { "new", NEW },
+ { "new-race", NEW | RACE },
+ { "new-bo-map", NEW | BO_MAP },
+ { "new-busy", NEW | BUSY },
+ { "mmap-free", MMAP | NEW | FREE },
+ { "mmap-free-huge", MMAP | NEW | FREE | HUGE_PAGE },
+ { "mmap-free-race", MMAP | NEW | FREE | RACE },
+ { "mmap-new", MMAP | NEW },
+ { "mmap-new-huge", MMAP | NEW | HUGE_PAGE },
+ { "mmap-new-race", MMAP | NEW | RACE },
+ { "malloc-nomemset", SKIP_MEMSET },
+ { "malloc-mlock-nomemset", SKIP_MEMSET | LOCK },
+ { "malloc-race-nomemset", SKIP_MEMSET | RACE },
+ { "malloc-busy-nomemset", SKIP_MEMSET | BUSY },
+ { "malloc-bo-unmap-nomemset", SKIP_MEMSET | BO_UNMAP },
+ { "mmap-nomemset", SKIP_MEMSET | MMAP },
+ { "mmap-huge-nomemset", SKIP_MEMSET | MMAP | HUGE_PAGE },
+ { "mmap-shared-nomemset", SKIP_MEMSET | MMAP | MMAP_SHARED },
+ { "mmap-mlock-nomemset", SKIP_MEMSET | MMAP | LOCK },
+ { "mmap-file-nomemset", SKIP_MEMSET | MMAP | FILE_BACKED },
+ { "mmap-file-mlock-nomemset", SKIP_MEMSET | MMAP | LOCK | FILE_BACKED },
+ { "mmap-race-nomemset", SKIP_MEMSET | MMAP | RACE },
+ { "free-nomemset", SKIP_MEMSET | NEW | FREE },
+ { "free-race-nomemset", SKIP_MEMSET | NEW | FREE | RACE },
+ { "new-nomemset", SKIP_MEMSET | NEW },
+ { "new-race-nomemset", SKIP_MEMSET | NEW | RACE },
+ { "new-bo-map-nomemset", SKIP_MEMSET | NEW | BO_MAP },
+ { "new-busy-nomemset", SKIP_MEMSET | NEW | BUSY },
+ { "mmap-free-nomemset", SKIP_MEMSET | MMAP | NEW | FREE },
+ { "mmap-free-huge-nomemset", SKIP_MEMSET | MMAP | NEW | FREE | HUGE_PAGE },
+ { "mmap-free-race-nomemset", SKIP_MEMSET | MMAP | NEW | FREE | RACE },
+ { "mmap-new-nomemset", SKIP_MEMSET | MMAP | NEW },
+ { "mmap-new-huge-nomemset", SKIP_MEMSET | MMAP | NEW | HUGE_PAGE },
+ { "mmap-new-race-nomemset", SKIP_MEMSET | MMAP | NEW | RACE },
+ { NULL },
+ };
+ const struct section psections[] = {
+ { "munmap-cpu-fault", CPU_FAULT },
+ { "munmap-no-cpu-fault", 0 },
+ { "remap-cpu-fault", CPU_FAULT | REMAP },
+ { "remap-no-cpu-fault", REMAP },
+ { "middle-munmap-cpu-fault", MIDDLE | CPU_FAULT },
+ { "middle-munmap-no-cpu-fault", MIDDLE },
+ { "middle-remap-cpu-fault", MIDDLE | CPU_FAULT | REMAP },
+ { "middle-remap-no-cpu-fault", MIDDLE | REMAP },
+ { NULL },
+ };
+ const struct section esections[] = {
+ { "malloc", 0 },
+ { "malloc-mix-bo", MIX_BO_ALLOC },
+ { NULL },
+ };
+ int fd;
+
+ igt_fixture {
+ fd = drm_open_driver(DRIVER_XE);
+ igt_require(xe_supports_faults(fd));
+ }
+
+ for (const struct section *s = sections; s->name; s++) {
+ igt_subtest_f("once-%s", s->name)
+ xe_for_each_engine(fd, hwe)
+ test_exec(fd, hwe, 1, 1, 0, 0, 0, NULL, s->flags);
+
+ igt_subtest_f("twice-%s", s->name)
+ xe_for_each_engine(fd, hwe)
+ test_exec(fd, hwe, 1, 2, 0, 0, 0, NULL, s->flags);
+
+ igt_subtest_f("many-%s", s->name)
+ xe_for_each_engine(fd, hwe)
+ test_exec(fd, hwe, 1, 128, 0, 0, 0, NULL, s->flags);
+
+ igt_subtest_f("many-stride-%s", s->name)
+ xe_for_each_engine(fd, hwe)
+ test_exec(fd, hwe, 1, 128, 0, 256, 0, NULL, s->flags);
+
+ igt_subtest_f("many-execqueues-%s", s->name)
+ xe_for_each_engine(fd, hwe)
+ test_exec(fd, hwe, 16, 128, 0, 0, 0, NULL, s->flags);
+
+ igt_subtest_f("many-large-%s", s->name)
+ xe_for_each_engine(fd, hwe)
+ test_exec(fd, hwe, 1, 128, SZ_2M, 0, 0, NULL, s->flags);
+
+ igt_subtest_f("many-large-execqueues-%s", s->name)
+ xe_for_each_engine(fd, hwe)
+ test_exec(fd, hwe, 16, 128, SZ_2M, 0, 0, NULL, s->flags);
+
+ igt_subtest_f("threads-many-%s", s->name)
+ threads(fd, 1, 128, 0, 0, s->flags, false);
+
+ igt_subtest_f("threads-many-stride-%s", s->name)
+ threads(fd, 1, 128, 0, 256, s->flags, false);
+
+ igt_subtest_f("threads-many-execqueues-%s", s->name)
+ threads(fd, 16, 128, 0, 0, s->flags, false);
+
+ igt_subtest_f("threads-many-large-%s", s->name)
+ threads(fd, 1, 128, SZ_2M, 0, s->flags, false);
+
+ igt_subtest_f("threads-many-large-execqueues-%s", s->name)
+ threads(fd, 16, 128, SZ_2M, 0, s->flags, false);
+
+ igt_subtest_f("threads-shared-vm-many-%s", s->name)
+ threads(fd, 1, 128, 0, 0, s->flags, true);
+
+ igt_subtest_f("threads-shared-vm-many-stride-%s", s->name)
+ threads(fd, 1, 128, 0, 256, s->flags, true);
+
+ igt_subtest_f("threads-shared-vm-many-execqueues-%s", s->name)
+ threads(fd, 16, 128, 0, 0, s->flags, true);
+
+ igt_subtest_f("threads-shared-vm-many-large-%s", s->name)
+ threads(fd, 1, 128, SZ_2M, 0, s->flags, true);
+
+ igt_subtest_f("threads-shared-vm-many-large-execqueues-%s", s->name)
+ threads(fd, 16, 128, SZ_2M, 0, s->flags, true);
+
+ igt_subtest_f("process-many-%s", s->name)
+ processes(fd, 1, 128, 0, 0, s->flags);
+
+ igt_subtest_f("process-many-stride-%s", s->name)
+ processes(fd, 1, 128, 0, 256, s->flags);
+
+ igt_subtest_f("process-many-execqueues-%s", s->name)
+ processes(fd, 16, 128, 0, 0, s->flags);
+
+ igt_subtest_f("process-many-large-%s", s->name)
+ processes(fd, 1, 128, SZ_2M, 0, s->flags);
+
+ igt_subtest_f("process-many-large-execqueues-%s", s->name)
+ processes(fd, 16, 128, SZ_2M, 0, s->flags);
+ }
+
+ igt_subtest("threads-shared-vm-shared-alloc-many-stride-malloc")
+ threads(fd, 1, 128, 0, 256, SHARED_ALLOC, true);
+
+ igt_subtest("threads-shared-vm-shared-alloc-many-stride-malloc-race")
+ threads(fd, 1, 128, 0, 256, RACE | SHARED_ALLOC, true);
+
+ igt_subtest_f("fault")
+ xe_for_each_engine(fd, hwe)
+ test_exec(fd, hwe, 4, 1, SZ_2M, 0, 0, NULL, FAULT);
+
+ for (const struct section *s = psections; s->name; s++) {
+ igt_subtest_f("partial-%s", s->name)
+ xe_for_each_engine(fd, hwe)
+ partial(fd, hwe, s->flags);
+ }
+
+ igt_subtest_f("unaligned-alloc")
+ xe_for_each_engine(fd, hwe) {
+ many_allocs(fd, hwe, (SZ_1M + SZ_512K) * 8,
+ SZ_1M + SZ_512K, SZ_4K, NULL, 0);
+ break;
+ }
+
+ igt_subtest_f("fault-benchmark")
+ xe_for_each_engine(fd, hwe)
+ many_allocs(fd, hwe, SZ_64M, SZ_64M, SZ_4K, NULL,
+ BENCHMARK);
+
+ igt_subtest_f("fault-threads-benchmark")
+ xe_for_each_engine(fd, hwe)
+ many_allocs(fd, hwe, SZ_64M, SZ_64M, SZ_4K, NULL,
+ BENCHMARK | CPU_FAULT_THREADS);
+
+ for (const struct section *s = esections; s->name; s++) {
+ igt_subtest_f("evict-%s", s->name)
+ xe_for_each_engine(fd, hwe) {
+ many_allocs(fd, hwe,
+ xe_visible_vram_size(fd, hwe->gt_id),
+ SZ_8M, SZ_1M, NULL, s->flags);
+ break;
+ }
+ }
+
+ for (const struct section *s = esections; s->name; s++) {
+ igt_subtest_f("processes-evict-%s", s->name)
+ processes_evict(fd, SZ_8M, SZ_1M, s->flags);
+ }
+
+ igt_fixture
+ drm_close_driver(fd);
+}
diff --git a/tests/meson.build b/tests/meson.build
index 00556c9d61..31d0acd6a7 100644
--- a/tests/meson.build
+++ b/tests/meson.build
@@ -291,6 +291,7 @@ intel_xe_progs = [
'xe_exec_reset',
'xe_exec_sip',
'xe_exec_store',
+ 'xe_exec_system_allocator',
'xe_exec_threads',
'xe_exercise_blt',
'xe_gpgpu_fill',
--
2.34.1
^ permalink raw reply related [flat|nested] 16+ messages in thread
* [PATCH] tests/xe: Add system_allocator test
@ 2024-08-27 23:16 Matthew Brost
0 siblings, 0 replies; 16+ messages in thread
From: Matthew Brost @ 2024-08-27 23:16 UTC (permalink / raw)
To: igt-dev
Test various uses of system allocator in single thread, multiple
threads, and multiple processes.
Features tested:
- Malloc with various size
- Mmap with various sizes and flags including file backed mappings
- Mixing BO allocations with system allocator
- Various page sizes
- Dynamically freeing / unmapping memory
- Sharing VM across threads
- Faults racing on different hardware engines / GTs / Tiles
- GPU faults and CPU faults racing
- CPU faults on multiple threads racing
- GPU faults of memory not faulted in by CPU
- Partial unmap of allocations
- Attempting to unmap system allocations when GPU has mappings
- Eviction of both system allocations and BOs
Running on LNL, BMG, PVC 1 tile, and PVC 2 tile.
Signed-off-by: Matthew Brost <matthew.brost@intel.com>
---
include/drm-uapi/xe_drm.h | 1 +
lib/xe/xe_ioctl.c | 12 +
lib/xe/xe_ioctl.h | 1 +
tests/intel/xe_exec_system_allocator.c | 1449 ++++++++++++++++++++++++
tests/meson.build | 1 +
5 files changed, 1464 insertions(+)
create mode 100644 tests/intel/xe_exec_system_allocator.c
diff --git a/include/drm-uapi/xe_drm.h b/include/drm-uapi/xe_drm.h
index 29425d7fdc..f4a4b78dd4 100644
--- a/include/drm-uapi/xe_drm.h
+++ b/include/drm-uapi/xe_drm.h
@@ -994,6 +994,7 @@ struct drm_xe_vm_bind_op {
#define DRM_XE_VM_BIND_FLAG_IMMEDIATE (1 << 1)
#define DRM_XE_VM_BIND_FLAG_NULL (1 << 2)
#define DRM_XE_VM_BIND_FLAG_DUMPABLE (1 << 3)
+#define DRM_XE_VM_BIND_FLAG_SYSTEM_ALLOCATOR (1 << 4)
/** @flags: Bind flags */
__u32 flags;
diff --git a/lib/xe/xe_ioctl.c b/lib/xe/xe_ioctl.c
index ae43ffd15e..9eb73918b9 100644
--- a/lib/xe/xe_ioctl.c
+++ b/lib/xe/xe_ioctl.c
@@ -424,6 +424,18 @@ void *xe_bo_map(int fd, uint32_t bo, size_t size)
return __xe_bo_map(fd, bo, size, PROT_WRITE);
}
+void *xe_bo_map_fixed(int fd, uint32_t bo, size_t size, uint64_t addr)
+{
+ uint64_t mmo;
+ void *map;
+
+ mmo = xe_bo_mmap_offset(fd, bo);
+ map = mmap((void *)addr, size, PROT_WRITE, MAP_SHARED | MAP_FIXED, fd, mmo);
+ igt_assert(map != MAP_FAILED);
+
+ return map;
+}
+
void *xe_bo_mmap_ext(int fd, uint32_t bo, size_t size, int prot)
{
return __xe_bo_map(fd, bo, size, prot);
diff --git a/lib/xe/xe_ioctl.h b/lib/xe/xe_ioctl.h
index b27c0053f0..cfa4f63560 100644
--- a/lib/xe/xe_ioctl.h
+++ b/lib/xe/xe_ioctl.h
@@ -81,6 +81,7 @@ uint32_t xe_exec_queue_create_class(int fd, uint32_t vm, uint16_t class);
void xe_exec_queue_destroy(int fd, uint32_t exec_queue);
uint64_t xe_bo_mmap_offset(int fd, uint32_t bo);
void *xe_bo_map(int fd, uint32_t bo, size_t size);
+void *xe_bo_map_fixed(int fd, uint32_t bo, size_t size, long unsigned int addr);
void *xe_bo_mmap_ext(int fd, uint32_t bo, size_t size, int prot);
int __xe_exec(int fd, struct drm_xe_exec *exec);
void xe_exec(int fd, struct drm_xe_exec *exec);
diff --git a/tests/intel/xe_exec_system_allocator.c b/tests/intel/xe_exec_system_allocator.c
new file mode 100644
index 0000000000..23c8ace150
--- /dev/null
+++ b/tests/intel/xe_exec_system_allocator.c
@@ -0,0 +1,1449 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2024 Intel Corporation
+ */
+
+/**
+ * TEST: Basic tests for execbuf functionality using system allocator
+ * Category: Hardware building block
+ * Mega feature: Shared virtual memory
+ * Sub-category: execbuf
+ * Functionality: fault mode, system allocator
+ * GPU requirements: GPU needs support for DRM_XE_VM_CREATE_FLAG_FAULT_MODE
+ */
+
+#include <fcntl.h>
+#include <linux/mman.h>
+#include <time.h>
+
+#include "igt.h"
+#include "lib/igt_syncobj.h"
+#include "lib/intel_reg.h"
+#include "xe_drm.h"
+
+#include "xe/xe_ioctl.h"
+#include "xe/xe_query.h"
+#include <string.h>
+
+#define USER_FENCE_VALUE 0xdeadbeefdeadbeefull
+#define QUARTER_SEC (NSEC_PER_SEC / 4)
+#define FIVE_SEC (5 * NSEC_PER_SEC)
+
+struct batch_data {
+ uint32_t batch[16];
+ uint64_t pad;
+ uint32_t data;
+ uint32_t expected_data;
+};
+
+#define WRITE_VALUE(data__, i__) ({ \
+ (data__)->expected_data = rand() << 12 | (i__); \
+ (data__)->expected_data; \
+})
+#define READ_VALUE(data__, i__) ((data__)->expected_data)
+
+static void write_dword(uint32_t *batch, uint64_t sdi_addr, uint32_t wdata,
+ int *idx)
+{
+ batch[(*idx)++] = MI_STORE_DWORD_IMM_GEN4;
+ batch[(*idx)++] = sdi_addr;
+ batch[(*idx)++] = sdi_addr >> 32;
+ batch[(*idx)++] = wdata;
+ batch[(*idx)++] = MI_BATCH_BUFFER_END;
+}
+
+static void check_all_pages(void *ptr, uint64_t alloc_size, uint64_t stride)
+{
+ int i, n_writes = alloc_size / stride;
+
+ for (i = 0; i < n_writes; ++i) {
+ struct batch_data *data = ptr + i * stride;
+
+ igt_assert_eq(data->data, READ_VALUE(data, i));
+ }
+}
+
+struct thread_check_data {
+ pthread_t thread;
+ pthread_mutex_t *mutex;
+ pthread_cond_t *cond;
+ void *ptr;
+ uint64_t alloc_size;
+ uint64_t stride;
+ bool *go;
+};
+
+static void *thread_check(void *data)
+{
+ struct thread_check_data *t = data;
+
+ pthread_mutex_lock(t->mutex);
+ while (!*t->go)
+ pthread_cond_wait(t->cond, t->mutex);
+ pthread_mutex_unlock(t->mutex);
+
+ check_all_pages(t->ptr, t->alloc_size, t->stride);
+
+ return NULL;
+}
+
+/*
+ * Partition checking of results in chunks which causes multiple threads to
+ * fault same VRAM allocation in parallel.
+ */
+static void
+check_all_pages_threads(void *ptr, uint64_t alloc_size, uint64_t stride,
+ int n_threads)
+{
+ struct thread_check_data *threads_check_data;
+ pthread_mutex_t mutex;
+ pthread_cond_t cond;
+ int i;
+ bool go = false;
+
+ threads_check_data = calloc(n_threads, sizeof(*threads_check_data));
+ igt_assert(threads_check_data);
+
+ pthread_mutex_init(&mutex, 0);
+ pthread_cond_init(&cond, 0);
+
+ for (i = 0; i < n_threads; ++i) {
+ threads_check_data[i].mutex = &mutex;
+ threads_check_data[i].cond = &cond;
+ threads_check_data[i].ptr = ptr + stride * i;
+ threads_check_data[i].alloc_size = alloc_size;
+ threads_check_data[i].stride = n_threads * stride;
+ threads_check_data[i].go = &go;
+
+ pthread_create(&threads_check_data[i].thread, 0, thread_check,
+ &threads_check_data[i]);
+ }
+
+ pthread_mutex_lock(&mutex);
+ go = true;
+ pthread_cond_broadcast(&cond);
+ pthread_mutex_unlock(&mutex);
+
+ for (i = 0; i < n_threads; ++i)
+ pthread_join(threads_check_data[i].thread, NULL);
+ free(threads_check_data);
+}
+
+static void touch_all_pages(int fd, uint32_t exec_queue, void *ptr,
+ uint64_t alloc_size, uint64_t stride,
+ struct timespec *tv, uint64_t *submit)
+{
+ struct drm_xe_sync sync[1] = {
+ { .type = DRM_XE_SYNC_TYPE_USER_FENCE,
+ .flags = DRM_XE_SYNC_FLAG_SIGNAL,
+ .timeline_value = USER_FENCE_VALUE },
+ };
+ struct drm_xe_exec exec = {
+ .num_batch_buffer = 1,
+ .num_syncs = 0,
+ .exec_queue_id = exec_queue,
+ .syncs = to_user_pointer(&sync),
+ };
+ uint64_t addr = to_user_pointer(ptr);
+ int i, ret, n_writes = alloc_size / stride;
+ u64 *exec_ufence = NULL;
+ int64_t timeout = FIVE_SEC;
+
+ exec_ufence = mmap(NULL, SZ_4K, PROT_READ |
+ PROT_WRITE, MAP_SHARED |
+ MAP_ANONYMOUS, -1, 0);
+ igt_assert(exec_ufence != MAP_FAILED);
+ memset(exec_ufence, 0, SZ_4K);
+ sync[0].addr = to_user_pointer(exec_ufence);
+
+ for (i = 0; i < n_writes; ++i, addr += stride) {
+ struct batch_data *data = ptr + i * stride;
+ uint64_t sdi_offset = (char *)&data->data - (char *)data;
+ uint64_t sdi_addr = addr + sdi_offset;
+ int b = 0;
+
+ write_dword(data->batch, sdi_addr, WRITE_VALUE(data, i), &b);
+ igt_assert(b <= ARRAY_SIZE(data->batch));
+ }
+
+ igt_nsec_elapsed(tv);
+ *submit = igt_nsec_elapsed(tv);
+
+ addr = to_user_pointer(ptr);
+ for (i = 0; i < n_writes; ++i, addr += stride) {
+ struct batch_data *data = ptr + i * stride;
+ uint64_t batch_offset = (char *)&data->batch - (char *)data;
+ uint64_t batch_addr = addr + batch_offset;
+
+ exec.address = batch_addr;
+ if (i + 1 == n_writes)
+ exec.num_syncs = 1;
+ xe_exec(fd, &exec);
+ }
+
+ ret = __xe_wait_ufence(fd, exec_ufence, USER_FENCE_VALUE, exec_queue,
+ &timeout);
+ if (ret) {
+ printf("FAIL EXEC_UFENCE: 0x%016llx\n", sync[0].addr);
+
+ addr = to_user_pointer(ptr);
+ for (i = 0; i < n_writes; ++i, addr += stride) {
+ struct batch_data *data = ptr + i * stride;
+ uint64_t batch_offset = (char *)&data->batch - (char *)data;
+ uint64_t batch_addr = addr + batch_offset;
+ uint64_t sdi_offset = (char *)&data->data - (char *)data;
+ uint64_t sdi_addr = addr + sdi_offset;
+
+ printf("FAIL BATCH_ADDR: 0x%016lx\n", batch_addr);
+ printf("FAIL SDI_ADDR: 0x%016lx\n", sdi_addr);
+ printf("FAIL SDI_ADDR (in batch): 0x%016lx\n",
+ (((u64)data->batch[2]) << 32) | data->batch[1]);
+ }
+ igt_assert_eq(ret, 0);
+ }
+ munmap(exec_ufence, SZ_4K);
+}
+
+static int va_bits;
+
+#define bind_system_allocator(__sync, __num_sync) \
+ __xe_vm_bind_assert(fd, vm, 0, \
+ 0, 0, 0, 0x1ull << va_bits, \
+ DRM_XE_VM_BIND_OP_MAP, \
+ DRM_XE_VM_BIND_FLAG_SYSTEM_ALLOCATOR, \
+ (__sync), (__num_sync), 0, 0)
+
+#define unbind_system_allocator() \
+ __xe_vm_bind(fd, vm, 0, 0, 0, 0, 0x1ull << va_bits, \
+ DRM_XE_VM_BIND_OP_UNMAP, 0, \
+ NULL, 0, 0, 0, 0)
+
+#define odd(__i) (__i & 1)
+
+#define MIX_BO_ALLOC (0x1 << 0)
+#define BENCHMARK (0x1 << 1)
+#define CPU_FAULT_THREADS (0x1 << 2)
+
+#define SYNC_FILE "/tmp/xe_exec_system_allocator_sync"
+
+struct process_data {
+ pthread_mutex_t mutex;
+ pthread_cond_t cond;
+ pthread_barrier_t barrier;
+ bool go;
+};
+
+/**
+ * SUBTEST: unaligned-alloc
+ * Description: allocate unaligned sizes of memory
+ * Test category: functionality test
+ *
+ * SUBTEST: fault-benchmark
+ * Description: Benchmark how long GPU / CPU take
+ * Test category: performance test
+ *
+ * SUBTEST: fault-threads-benchmark
+ * Description: Benchmark how long GPU / CPU take, reading results with multiple threads
+ * Test category: performance and functionality test
+ *
+ * SUBTEST: evict-malloc
+ * Description: trigger eviction of VRAM allocated via malloc
+ * Test category: functionality test
+ *
+ * SUBTEST: evict-malloc-mix-bo
+ * Description: trigger eviction of VRAM allocated via malloc and BO create
+ * Test category: functionality test
+ *
+ * SUBTEST: processes-evict-malloc
+ * Description: multi-process trigger eviction of VRAM allocated via malloc
+ * Test category: stress test
+ *
+ * SUBTEST: processes-evict-malloc-mix-bo
+ * Description: multi-process trigger eviction of VRAM allocated via malloc and BO create
+ * Test category: stress test
+ */
+
+static void
+many_allocs(int fd, struct drm_xe_engine_class_instance *eci,
+ uint64_t total_alloc, uint64_t alloc_size, uint64_t stride,
+ pthread_barrier_t *barrier, unsigned int flags)
+{
+ uint32_t vm, exec_queue;
+ int num_allocs = flags & BENCHMARK ? 1 :
+ (9 * (total_alloc / alloc_size)) / 8;
+ void **allocs;
+ uint32_t *bos = NULL;
+ struct timespec tv = {};
+ uint64_t submit, read, elapsed;
+ int i;
+
+ vm = xe_vm_create(fd, DRM_XE_VM_CREATE_FLAG_LR_MODE |
+ DRM_XE_VM_CREATE_FLAG_FAULT_MODE, 0);
+ exec_queue = xe_exec_queue_create(fd, vm, eci, 0);
+
+ bind_system_allocator(NULL, 0);
+
+ allocs = malloc(sizeof(*allocs) * num_allocs);
+ igt_assert(allocs);
+ memset(allocs, 0, sizeof(*allocs) * num_allocs);
+
+ if (flags & MIX_BO_ALLOC) {
+ bos = malloc(sizeof(*bos) * num_allocs);
+ igt_assert(bos);
+ memset(bos, 0, sizeof(*bos) * num_allocs);
+ }
+
+ for (i = 0; i < num_allocs; ++i) {
+ void *alloc;
+
+ alloc = aligned_alloc(SZ_2M, alloc_size);
+ igt_assert(alloc);
+
+ if (flags & MIX_BO_ALLOC && odd(i)) {
+ uint32_t bo_flags =
+ DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM;
+
+ bos[i] = xe_bo_create(fd, vm, alloc_size,
+ vram_if_possible(fd, eci->gt_id),
+ bo_flags);
+ alloc = xe_bo_map_fixed(fd, bos[i], alloc_size,
+ to_user_pointer(alloc));
+ xe_vm_bind_async(fd, vm, 0, bos[i], 0,
+ to_user_pointer(alloc),
+ alloc_size, 0, 0);
+ }
+ allocs[i] = alloc;
+
+ touch_all_pages(fd, exec_queue, allocs[i], alloc_size, stride,
+ &tv, &submit);
+ }
+
+ if (barrier)
+ pthread_barrier_wait(barrier);
+
+ for (i = 0; i < num_allocs; ++i) {
+ if (flags & BENCHMARK)
+ read = igt_nsec_elapsed(&tv);
+#define NUM_CHECK_THREADS 8
+ if (flags & CPU_FAULT_THREADS)
+ check_all_pages_threads(allocs[i], alloc_size, stride,
+ NUM_CHECK_THREADS);
+ else
+ check_all_pages(allocs[i], alloc_size, stride);
+ if (flags & BENCHMARK) {
+ elapsed = igt_nsec_elapsed(&tv);
+ printf("Execution took %.3fms (submit %.1fus, read %.1fus, total %.1fus, read_total %.1fus)\n",
+ 1e-6 * elapsed, 1e-3 * submit, 1e-3 * read,
+ 1e-3 * (elapsed - submit),
+ 1e-3 * (elapsed - read));
+ }
+ if (bos && bos[i]) {
+ munmap(allocs[i], alloc_size);
+ gem_close(fd, bos[i]);
+ } else {
+ free(allocs[i]);
+ }
+ }
+ if (bos)
+ free(bos);
+ free(allocs);
+ xe_exec_queue_destroy(fd, exec_queue);
+ xe_vm_destroy(fd, vm);
+}
+
+static void wait_pdata(struct process_data *pdata)
+{
+ pthread_mutex_lock(&pdata->mutex);
+ while (!pdata->go)
+ pthread_cond_wait(&pdata->cond, &pdata->mutex);
+ pthread_mutex_unlock(&pdata->mutex);
+}
+
+static void process_evict(struct drm_xe_engine_class_instance *hwe,
+ uint64_t total_alloc, uint64_t alloc_size,
+ uint64_t stride, unsigned int flags)
+{
+ struct process_data *pdata;
+ int map_fd;
+ int fd;
+
+ map_fd = open(SYNC_FILE, O_RDWR, 0x666);
+ pdata = mmap(NULL, sizeof(*pdata), PROT_READ |
+ PROT_WRITE, MAP_SHARED, map_fd, 0);
+ wait_pdata(pdata);
+
+ fd = drm_open_driver(DRIVER_XE);
+ many_allocs(fd, hwe, total_alloc, alloc_size, stride, &pdata->barrier,
+ flags);
+ drm_close_driver(fd);
+
+ close(map_fd);
+ munmap(pdata, sizeof(*pdata));
+}
+
+static void init_pdata(struct process_data *pdata, int n_engine)
+{
+ pthread_mutexattr_t mutex_attr;
+ pthread_condattr_t cond_attr;
+ pthread_barrierattr_t barrier_attr;
+
+ pthread_mutexattr_init(&mutex_attr);
+ pthread_mutexattr_setpshared(&mutex_attr, PTHREAD_PROCESS_SHARED);
+ pthread_mutex_init(&pdata->mutex, &mutex_attr);
+
+ pthread_condattr_init(&cond_attr);
+ pthread_condattr_setpshared(&cond_attr, PTHREAD_PROCESS_SHARED);
+ pthread_cond_init(&pdata->cond, &cond_attr);
+
+ pthread_barrierattr_init(&barrier_attr);
+ pthread_barrierattr_setpshared(&barrier_attr, PTHREAD_PROCESS_SHARED);
+ pthread_barrier_init(&pdata->barrier, &barrier_attr, n_engine);
+
+ pdata->go = false;
+}
+
+static void signal_pdata(struct process_data *pdata)
+{
+ pthread_mutex_lock(&pdata->mutex);
+ pdata->go = true;
+ pthread_cond_broadcast(&pdata->cond);
+ pthread_mutex_unlock(&pdata->mutex);
+}
+
+static void
+processes_evict(int fd, uint64_t alloc_size, uint64_t stride,
+ unsigned int flags)
+{
+ struct drm_xe_engine_class_instance *hwe;
+ struct process_data *pdata;
+ int n_engine_gt[2] = { 0, 0 }, n_engine = 0;
+ int map_fd;
+
+ map_fd = open(SYNC_FILE, O_RDWR | O_CREAT, 0x666);
+ posix_fallocate(map_fd, 0, sizeof(*pdata));
+ pdata = mmap(NULL, sizeof(*pdata), PROT_READ |
+ PROT_WRITE, MAP_SHARED, map_fd, 0);
+
+ xe_for_each_engine(fd, hwe) {
+ igt_assert(hwe->gt_id < 2);
+ n_engine_gt[hwe->gt_id]++;
+ n_engine++;
+ }
+
+ init_pdata(pdata, n_engine);
+
+ xe_for_each_engine(fd, hwe) {
+ igt_fork(child, 1)
+ process_evict(hwe,
+ xe_visible_vram_size(fd, hwe->gt_id) /
+ n_engine_gt[hwe->gt_id], alloc_size,
+ stride, flags);
+ }
+
+ signal_pdata(pdata);
+ igt_waitchildren();
+
+ close(map_fd);
+ munmap(pdata, sizeof(*pdata));
+}
+
+#define CPU_FAULT (0x1 << 0)
+#define REMAP (0x1 << 1)
+#define MIDDLE (0x1 << 2)
+
+/**
+ * SUBTEST: partial-munmap-cpu-fault
+ * Description: munmap partially with cpu access in between
+ * Test category: functionality test
+ *
+ * SUBTEST: partial-munmap-no-cpu-fault
+ * Description: munmap partially with no cpu access in between
+ * Test category: functionality test
+ *
+ * SUBTEST: partial-remap-cpu-fault
+ * Description: remap partially with cpu access in between
+ * Test category: functionality test
+ *
+ * SUBTEST: partial-remap-no-cpu-fault
+ * Description: remap partially with no cpu access in between
+ * Test category: functionality test
+ *
+ * SUBTEST: partial-middle-munmap-cpu-fault
+ * Description: munmap middle with cpu access in between
+ * Test category: functionality test
+ *
+ * SUBTEST: partial-middle-munmap-no-cpu-fault
+ * Description: munmap middle with no cpu access in between
+ * Test category: functionality test
+ *
+ * SUBTEST: partial-middle-remap-cpu-fault
+ * Description: remap middle with cpu access in between
+ * Test category: functionality test
+ *
+ * SUBTEST: partial-middle-remap-no-cpu-fault
+ * Description: remap middle with no cpu access in between
+ * Test category: functionality test
+ */
+
+static void
+partial(int fd, struct drm_xe_engine_class_instance *eci, unsigned int flags)
+{
+ struct drm_xe_sync sync[1] = {
+ { .type = DRM_XE_SYNC_TYPE_USER_FENCE, .flags = DRM_XE_SYNC_FLAG_SIGNAL,
+ .timeline_value = USER_FENCE_VALUE },
+ };
+ struct drm_xe_exec exec = {
+ .num_batch_buffer = 1,
+ .num_syncs = 1,
+ .syncs = to_user_pointer(sync),
+ };
+ struct {
+ uint32_t batch[16];
+ uint64_t pad;
+ uint64_t vm_sync;
+ uint64_t exec_sync;
+ uint32_t data;
+ uint32_t expected_data;
+ } *data;
+ size_t bo_size = SZ_2M, unmap_offset = 0;
+ uint32_t vm, exec_queue;
+ u64 *exec_ufence = NULL;
+ int i;
+ void *old, *new = NULL;
+
+ if (flags & MIDDLE)
+ unmap_offset = bo_size / 4;
+
+ vm = xe_vm_create(fd, DRM_XE_VM_CREATE_FLAG_LR_MODE |
+ DRM_XE_VM_CREATE_FLAG_FAULT_MODE, 0);
+
+ data = aligned_alloc(bo_size, bo_size);
+ igt_assert(data);
+
+ data = mmap(data, bo_size, PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
+ igt_assert(data != MAP_FAILED);
+ memset(data, 0, bo_size);
+ old = data;
+
+ exec_queue = xe_exec_queue_create(fd, vm, eci, 0);
+
+ sync[0].addr = to_user_pointer(&data[0].vm_sync);
+ bind_system_allocator(sync, 1);
+ xe_wait_ufence(fd, &data[0].vm_sync, USER_FENCE_VALUE, 0, FIVE_SEC);
+ data[0].vm_sync = 0;
+
+ exec_ufence = mmap(NULL, SZ_4K, PROT_READ |
+ PROT_WRITE, MAP_SHARED |
+ MAP_ANONYMOUS, -1, 0);
+ igt_assert(exec_ufence != MAP_FAILED);
+ memset(exec_ufence, 0, SZ_4K);
+
+ for (i = 0; i < 2; i++) {
+ uint64_t addr = to_user_pointer(data);
+ uint64_t sdi_offset = (char *)&data[i].data - (char *)data;
+ uint64_t sdi_addr = addr + sdi_offset;
+ int b = 0;
+
+ write_dword(data[i].batch, sdi_addr, WRITE_VALUE(&data[i], i), &b);
+ igt_assert(b <= ARRAY_SIZE(data[i].batch));
+
+ if (!i)
+ data = old + unmap_offset + bo_size / 2;
+ }
+
+ data = old;
+ exec.exec_queue_id = exec_queue;
+
+ for (i = 0; i < 2; i++) {
+ uint64_t addr = to_user_pointer(data);
+ uint64_t batch_offset = (char *)&data[i].batch - (char *)data;
+ uint64_t batch_addr = addr + batch_offset;
+
+ sync[0].addr = new ? to_user_pointer(new) :
+ to_user_pointer(exec_ufence);
+ exec.address = batch_addr;
+ xe_exec(fd, &exec);
+
+ xe_wait_ufence(fd, new ?: exec_ufence, USER_FENCE_VALUE,
+ exec_queue, FIVE_SEC);
+ if (i || (flags & CPU_FAULT))
+ igt_assert_eq(data[i].data, READ_VALUE(&data[i], i));
+ exec_ufence[0] = 0;
+
+ if (!i) {
+ data = old + unmap_offset + bo_size / 2;
+ munmap(old + unmap_offset, bo_size / 2);
+ if (flags & REMAP) {
+ new = mmap(old + unmap_offset, bo_size / 2,
+ PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED |
+ MAP_LOCKED, -1, 0);
+ igt_assert(new != MAP_FAILED);
+ }
+ }
+ }
+
+ xe_exec_queue_destroy(fd, exec_queue);
+ munmap(exec_ufence, SZ_4K);
+ munmap(old, bo_size);
+ if (new)
+ munmap(new, bo_size / 2);
+ xe_vm_destroy(fd, vm);
+}
+
+#define MAX_N_EXEC_QUEUES 16
+
+#define MMAP (0x1 << 0)
+#define NEW (0x1 << 1)
+#define BO_UNMAP (0x1 << 2)
+#define FREE (0x1 << 3)
+#define BUSY (0x1 << 4)
+#define BO_MAP (0x1 << 5)
+#define RACE (0x1 << 6)
+#define SKIP_MEMSET (0x1 << 7)
+#define FAULT (0x1 << 8)
+#define FILE_BACKED (0x1 << 9)
+#define LOCK (0x1 << 10)
+#define MMAP_SHARED (0x1 << 11)
+#define HUGE_PAGE (0x1 << 12)
+#define SHARED_ALLOC (0x1 << 13)
+
+/**
+ * SUBTEST: once-%s
+ * Description: Run %arg[1] system allocator test only once
+ * Test category: functionality test
+ *
+ * SUBTEST: twice-%s
+ * Description: Run %arg[1] system allocator test twice
+ * Test category: functionality test
+ *
+ * SUBTEST: many-%s
+ * Description: Run %arg[1] system allocator test many times
+ * Test category: stress test
+ *
+ * SUBTEST: many-stride-%s
+ * Description: Run %arg[1] system allocator test many times with a stride on each exec
+ * Test category: stress test
+ *
+ * SUBTEST: many-execqueues-%s
+ * Description: Run %arg[1] system allocator test on many exec_queues
+ * Test category: stress test
+ *
+ * SUBTEST: many-large-%s
+ * Description: Run %arg[1] system allocator test many times with large allocations
+ * Test category: stress test
+ *
+ * SUBTEST: many-large-execqueues-%s
+ * Description: Run %arg[1] system allocator test on many exec_queues with large allocations
+ *
+ * SUBTEST: threads-many-%s
+ * Description: Run %arg[1] system allocator threaded test many times
+ * Test category: stress test
+ *
+ * SUBTEST: threads-many-stride-%s
+ * Description: Run %arg[1] system allocator threaded test many times with a stride on each exec
+ * Test category: stress test
+ *
+ * SUBTEST: threads-many-execqueues-%s
+ * Description: Run %arg[1] system allocator threaded test on many exec_queues
+ * Test category: stress test
+ *
+ * SUBTEST: threads-many-large-%s
+ * Description: Run %arg[1] system allocator threaded test many times with large allocations
+ * Test category: stress test
+ *
+ * SUBTEST: threads-many-large-execqueues-%s
+ * Description: Run %arg[1] system allocator threaded test on many exec_queues with large allocations
+ *
+ * SUBTEST: threads-shared-vm-many-%s
+ * Description: Run %arg[1] system allocator threaded, shared vm test many times
+ * Test category: stress test
+ *
+ * SUBTEST: threads-shared-vm-many-stride-%s
+ * Description: Run %arg[1] system allocator threaded, shared vm test many times with a stride on each exec
+ * Test category: stress test
+ *
+ * SUBTEST: threads-shared-vm-many-execqueues-%s
+ * Description: Run %arg[1] system allocator threaded, shared vm test on many exec_queues
+ * Test category: stress test
+ *
+ * SUBTEST: threads-shared-vm-many-large-%s
+ * Description: Run %arg[1] system allocator threaded, shared vm test many times with large allocations
+ * Test category: stress test
+ *
+ * SUBTEST: threads-shared-vm-many-large-execqueues-%s
+ * Description: Run %arg[1] system allocator threaded, shared vm test on many exec_queues with large allocations
+ * Test category: stress test
+ *
+ * SUBTEST: process-many-%s
+ * Description: Run %arg[1] system allocator multi-process test many times
+ * Test category: stress test
+ *
+ * SUBTEST: process-many-stride-%s
+ * Description: Run %arg[1] system allocator multi-process test many times with a stride on each exec
+ * Test category: stress test
+ *
+ * SUBTEST: process-many-execqueues-%s
+ * Description: Run %arg[1] system allocator multi-process test on many exec_queues
+ * Test category: stress test
+ *
+ * SUBTEST: process-many-large-%s
+ * Description: Run %arg[1] system allocator multi-process test many times with large allocations
+ * Test category: stress test
+ *
+ * SUBTEST: process-many-large-execqueues-%s
+ * Description: Run %arg[1] system allocator multi-process test on many exec_queues with large allocations
+ *
+ * SUBTEST: fault
+ * Description: use a bad system allocator address resulting in a fault
+ * Test category: bad input
+ *
+ * arg[1]:
+ *
+ * @malloc: malloc single buffer for all execs
+ * @malloc-mlock: malloc and mlock single buffer for all execs
+ * @malloc-race: malloc single buffer for all execs with race between cpu and gpu access
+ * @malloc-bo-unmap: malloc single buffer for all execs, bind and unbind a BO to same address before execs
+ * @malloc-busy: malloc single buffer for all execs, try to unbind while buffer valid
+ * @mmap: mmap single buffer for all execs
+ * @mmap-huge: mmap huge page single buffer for all execs
+ * @mmap-shared: mmap shared single buffer for all execs
+ * @mmap-mlock: mmap and mlock single buffer for all execs
+ * @mmap-file: mmap single buffer, with file backing, for all execs
+ * @mmap-file-mlock: mmap and mlock single buffer, with file backing, for all execs
+ * @mmap-race: mmap single buffer for all execs with race between cpu and gpu access
+ * @free: malloc and free buffer for each exec
+ * @free-race: malloc and free buffer for each exec with race between cpu and gpu access
+ * @new: malloc a new buffer for each exec
+ * @new-race: malloc a new buffer for each exec with race between cpu and gpu access
+ * @new-bo-map: malloc a new buffer or map BO for each exec
+ * @new-busy: malloc a new buffer for each exec, try to unbind while buffers valid
+ * @mmap-free: mmap and free buffer for each exec
+ * @mmap-free-huge: mmap huge page and free buffer for each exec
+ * @mmap-free-race: mmap and free buffer for each exec with race between cpu and gpu access
+ * @mmap-new: mmap a new buffer for each exec
+ * @mmap-new-huge: mmap huge page a new buffer for each exec
+ * @mmap-new-race: mmap a new buffer for each exec with race between cpu and gpu access
+ * @malloc-nomemset: malloc single buffer for all execs, skip memset of buffers
+ * @malloc-mlock-nomemset: malloc and mlock single buffer for all execs, skip memset of buffers
+ * @malloc-race-nomemset: malloc single buffer for all execs with race between cpu and gpu access, skip memset of buffers
+ * @malloc-bo-unmap-nomemset: malloc single buffer for all execs, bind and unbind a BO to same address before execs, skip memset of buffers
+ * @malloc-busy-nomemset: malloc single buffer for all execs, try to unbind while buffer valid, skip memset of buffers
+ * @mmap-nomemset: mmap single buffer for all execs, skip memset of buffers
+ * @mmap-huge-nomemset: mmap huge page single buffer for all execs, skip memset of buffers
+ * @mmap-shared-nomemset: mmap shared single buffer for all execs, skip memset of buffers
+ * @mmap-mlock-nomemset: mmap and mlock single buffer for all execs, skip memset of buffers
+ * @mmap-file-nomemset: mmap single buffer, with file backing, for all execs, skip memset of buffers
+ * @mmap-file-mlock-nomemset: mmap and mlock single buffer, with file backing, for all execs, skip memset of buffers
+ * @mmap-race-nomemset: mmap single buffer for all execs with race between cpu and gpu access, skip memset of buffers
+ * @free-nomemset: malloc and free buffer for each exec, skip memset of buffers
+ * @free-race-nomemset: malloc and free buffer for each exec with race between cpu and gpu access, skip memset of buffers
+ * @new-nomemset: malloc a new buffer for each exec, skip memset of buffers
+ * @new-race-nomemset: malloc a new buffer for each exec with race between cpu and gpu access, skip memset of buffers
+ * @new-bo-map-nomemset: malloc a new buffer or map BO for each exec, skip memset of buffers
+ * @new-busy-nomemset: malloc a new buffer for each exec, try to unbind while buffers valid, skip memset of buffers
+ * @mmap-free-nomemset: mmap and free buffer for each exec, skip memset of buffers
+ * @mmap-free-huge-nomemset: mmap huge page and free buffer for each exec, skip memset of buffers
+ * @mmap-free-race-nomemset: mmap and free buffer for each exec with race between cpu and gpu access, skip memset of buffers
+ * @mmap-new-nomemset: mmap a new buffer for each exec, skip memset of buffers
+ * @mmap-new-huge-nomemset: mmap huge page new buffer for each exec, skip memset of buffers
+ * @mmap-new-race-nomemset: mmap a new buffer for each exec with race between cpu and gpu access, skip memset of buffers
+ *
+ * SUBTEST: threads-shared-vm-shared-alloc-many-stride-malloc
+ * Description: Create multiple threads with a shared VM triggering faults on different hardware engines to same addresses
+ * Test category: stress test
+ *
+ * SUBTEST: threads-shared-vm-shared-alloc-many-stride-malloc-race
+ * Description: Create multiple threads with a shared VM triggering faults on different hardware engines to same addresses, racing between CPU and GPU access
+ * Test category: stress test
+ */
+
+struct test_exec_data {
+ uint32_t batch[16];
+ uint64_t pad;
+ uint64_t vm_sync;
+ uint64_t exec_sync;
+ uint32_t data;
+ uint32_t expected_data;
+};
+
+static void
+test_exec(int fd, struct drm_xe_engine_class_instance *eci,
+ int n_exec_queues, int n_execs, size_t bo_size,
+ size_t stride, uint32_t vm, void *alloc, unsigned int flags)
+{
+ uint64_t addr;
+ struct drm_xe_sync sync[1] = {
+ { .type = DRM_XE_SYNC_TYPE_USER_FENCE, .flags = DRM_XE_SYNC_FLAG_SIGNAL,
+ .timeline_value = USER_FENCE_VALUE },
+ };
+ struct drm_xe_exec exec = {
+ .num_batch_buffer = 1,
+ .num_syncs = 1,
+ .syncs = to_user_pointer(sync),
+ };
+ uint32_t exec_queues[MAX_N_EXEC_QUEUES];
+ struct test_exec_data *data;
+ uint32_t bo_flags;
+ uint32_t bo = 0;
+ void **pending_free;
+ u64 *exec_ufence = NULL;
+ int i, b, file_fd = -1;
+ bool free_vm = false;
+ size_t aligned_size = bo_size ?: xe_get_default_alignment(fd);
+
+ if (flags & SHARED_ALLOC)
+ return;
+
+ igt_assert(n_exec_queues <= MAX_N_EXEC_QUEUES);
+
+ if (flags & NEW && !(flags & FREE)) {
+ pending_free = malloc(sizeof(*pending_free) * n_execs);
+ igt_assert(pending_free);
+ memset(pending_free, 0, sizeof(*pending_free) * n_execs);
+ }
+
+ if (!vm) {
+ vm = xe_vm_create(fd, DRM_XE_VM_CREATE_FLAG_LR_MODE |
+ DRM_XE_VM_CREATE_FLAG_FAULT_MODE, 0);
+ free_vm = true;
+ }
+ if (!bo_size) {
+ if (!stride) {
+ bo_size = sizeof(*data) * n_execs;
+ bo_size = xe_bb_size(fd, bo_size);
+ } else {
+ bo_size = stride * n_execs * sizeof(*data);
+ bo_size = xe_bb_size(fd, bo_size);
+ }
+ }
+ if (flags & HUGE_PAGE) {
+ aligned_size = ALIGN(aligned_size, SZ_2M);
+ bo_size = ALIGN(bo_size, SZ_2M);
+ }
+
+ if (alloc) {
+ data = alloc;
+ } else {
+ data = aligned_alloc(aligned_size, bo_size);
+ igt_assert(data);
+ if (flags & MMAP) {
+ int mmap_flags = MAP_FIXED;
+
+ if (flags & MMAP_SHARED)
+ mmap_flags |= MAP_SHARED;
+ else
+ mmap_flags |= MAP_PRIVATE;
+
+ if (flags & HUGE_PAGE)
+ mmap_flags |= MAP_HUGETLB | MAP_HUGE_2MB;
+
+ if (flags & FILE_BACKED) {
+ char name[1024];
+
+ igt_assert(!(flags & NEW));
+
+ sprintf(name, "/tmp/xe_exec_system_allocator_dat%d\n",
+ getpid());
+ file_fd = open(name, O_RDWR | O_CREAT, 0x666);
+ posix_fallocate(file_fd, 0, bo_size);
+ } else {
+ mmap_flags |= MAP_ANONYMOUS;
+ }
+
+ data = mmap(data, bo_size, PROT_READ |
+ PROT_WRITE, mmap_flags, file_fd, 0);
+ igt_assert(data != MAP_FAILED);
+ }
+ if (!(flags & SKIP_MEMSET))
+ memset(data, 0, bo_size);
+ if (flags & LOCK) {
+ igt_assert(!(flags & NEW));
+ mlock(data, bo_size);
+ }
+ }
+
+ for (i = 0; i < n_exec_queues; i++)
+ exec_queues[i] = xe_exec_queue_create(fd, vm, eci, 0);
+
+ sync[0].addr = to_user_pointer(&data[0].vm_sync);
+ if (free_vm) {
+ bind_system_allocator(sync, 1);
+ xe_wait_ufence(fd, &data[0].vm_sync, USER_FENCE_VALUE, 0, FIVE_SEC);
+ }
+ data[0].vm_sync = 0;
+
+ addr = to_user_pointer(data);
+
+ if (flags & BO_UNMAP) {
+ bo_flags = DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM;
+ bo = xe_bo_create(fd, vm, bo_size,
+ vram_if_possible(fd, eci->gt_id), bo_flags);
+ xe_vm_bind_async(fd, vm, 0, bo, 0, addr, bo_size, 0, 0);
+
+ __xe_vm_bind_assert(fd, vm, 0,
+ 0, 0, addr, bo_size,
+ DRM_XE_VM_BIND_OP_MAP,
+ DRM_XE_VM_BIND_FLAG_SYSTEM_ALLOCATOR, sync,
+ 1, 0, 0);
+ xe_wait_ufence(fd, &data[0].vm_sync, USER_FENCE_VALUE, 0,
+ FIVE_SEC);
+ data[0].vm_sync = 0;
+ gem_close(fd, bo);
+ bo = 0;
+ }
+
+ if (!(flags & RACE)) {
+ exec_ufence = mmap(NULL, SZ_4K, PROT_READ |
+ PROT_WRITE, MAP_SHARED |
+ MAP_ANONYMOUS, -1, 0);
+ igt_assert(exec_ufence != MAP_FAILED);
+ memset(exec_ufence, 0, SZ_4K);
+ }
+
+ for (i = 0; i < n_execs; i++) {
+ int idx = !stride ? i : i * stride;
+ uint64_t batch_offset = (char *)&data[idx].batch - (char *)data;
+ uint64_t batch_addr = addr + batch_offset;
+ uint64_t sdi_offset = (char *)&data[idx].data - (char *)data;
+ uint64_t sdi_addr = addr + sdi_offset;
+ int e = i % n_exec_queues, err;
+ bool fault_inject = (FAULT & flags) && i == n_execs / 2;
+ bool fault_injected = (FAULT & flags) && i > n_execs;
+
+ b = 0;
+ write_dword(data[idx].batch, sdi_addr,
+ WRITE_VALUE(&data[idx], idx), &b);
+ igt_assert(b <= ARRAY_SIZE(data[idx].batch));
+
+ if (!exec_ufence)
+ data[idx].exec_sync = 0;
+
+ sync[0].addr = exec_ufence ? to_user_pointer(exec_ufence) :
+ addr + (char *)&data[idx].exec_sync - (char *)data;
+
+ exec.exec_queue_id = exec_queues[e];
+ if (fault_inject)
+ exec.address = batch_addr * 2;
+ else
+ exec.address = batch_addr;
+
+ if (fault_injected) {
+ err = __xe_exec(fd, &exec);
+ igt_assert(err == -ENOENT);
+ } else {
+ xe_exec(fd, &exec);
+ }
+
+ if (fault_inject || fault_injected) {
+ int64_t timeout = QUARTER_SEC;
+
+ err = __xe_wait_ufence(fd, exec_ufence ? exec_ufence :
+ &data[idx].exec_sync,
+ USER_FENCE_VALUE,
+ exec_queues[e], &timeout);
+ igt_assert(err == -ETIME || err == -EIO);
+ } else {
+ xe_wait_ufence(fd, exec_ufence ? exec_ufence :
+ &data[idx].exec_sync, USER_FENCE_VALUE,
+ exec_queues[e], FIVE_SEC);
+ if (flags & LOCK && !i)
+ munlock(data, bo_size);
+ igt_assert_eq(data[idx].data,
+ READ_VALUE(&data[idx], idx));
+ }
+
+ if (exec_ufence)
+ exec_ufence[0] = 0;
+
+ if (bo) {
+ __xe_vm_bind_assert(fd, vm, 0,
+ 0, 0, addr, bo_size,
+ DRM_XE_VM_BIND_OP_MAP,
+ DRM_XE_VM_BIND_FLAG_SYSTEM_ALLOCATOR,
+ NULL, 0, 0, 0);
+ munmap(data, bo_size);
+ gem_close(fd, bo);
+ }
+
+ if (flags & NEW) {
+ if (flags & MMAP) {
+ if (flags & FREE)
+ munmap(data, bo_size);
+ else
+ pending_free[i] = data;
+ data = mmap(NULL, bo_size, PROT_READ |
+ PROT_WRITE, MAP_SHARED |
+ MAP_ANONYMOUS, -1, 0);
+ igt_assert(data != MAP_FAILED);
+ } else if (flags & BO_MAP && (i % 2)) {
+ if (!bo) {
+ if (flags & FREE)
+ free(data);
+ else
+ pending_free[i] = data;
+ }
+ data = aligned_alloc(aligned_size, bo_size);
+ bo_flags = DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM;
+ bo = xe_bo_create(fd, vm, bo_size,
+ vram_if_possible(fd, eci->gt_id),
+ bo_flags);
+ data = xe_bo_map_fixed(fd, bo, bo_size,
+ to_user_pointer(data));
+
+ xe_vm_bind_async(fd, vm, 0, bo, 0,
+ to_user_pointer(data),
+ bo_size, 0, 0);
+ } else {
+ if (!bo) {
+ if (flags & FREE)
+ free(data);
+ else
+ pending_free[i] = data;
+ }
+ bo = 0;
+ data = aligned_alloc(aligned_size, bo_size);
+ igt_assert(data);
+ }
+ addr = to_user_pointer(data);
+ if (!(flags & SKIP_MEMSET))
+ memset(data, 0, bo_size);
+ }
+ }
+
+ if (bo) {
+ __xe_vm_bind_assert(fd, vm, 0,
+ 0, 0, addr, bo_size,
+ DRM_XE_VM_BIND_OP_MAP,
+ DRM_XE_VM_BIND_FLAG_SYSTEM_ALLOCATOR,
+ NULL, 0, 0, 0);
+ munmap(data, bo_size);
+ gem_close(fd, bo);
+ }
+
+ if (flags & BUSY)
+ igt_assert_eq(unbind_system_allocator(), -EBUSY);
+
+ for (i = 0; i < n_exec_queues; i++)
+ xe_exec_queue_destroy(fd, exec_queues[i]);
+
+ if (exec_ufence)
+ munmap(exec_ufence, SZ_4K);
+
+ if (flags & LOCK)
+ munlock(data, bo_size);
+
+ if (file_fd != -1)
+ close(file_fd);
+
+ if (flags & NEW && !(flags & FREE)) {
+ for (i = 0; i < n_execs; i++) {
+ if (!pending_free[i])
+ continue;
+
+ if (flags & MMAP)
+ munmap(pending_free[i], bo_size);
+ else
+ free(pending_free[i]);
+ }
+ free(pending_free);
+ } else {
+ if (flags & MMAP)
+ munmap(data, bo_size);
+ else if (!alloc)
+ free(data);
+ }
+ if (free_vm)
+ xe_vm_destroy(fd, vm);
+}
+
+struct thread_data {
+ pthread_t thread;
+ pthread_mutex_t *mutex;
+ pthread_cond_t *cond;
+ int fd;
+ struct drm_xe_engine_class_instance *eci;
+ int n_exec_queues;
+ int n_execs;
+ size_t bo_size;
+ size_t stride;
+ uint32_t vm;
+ unsigned int flags;
+ void *alloc;
+ bool *go;
+};
+
+static void *thread(void *data)
+{
+ struct thread_data *t = data;
+
+ pthread_mutex_lock(t->mutex);
+ while (!*t->go)
+ pthread_cond_wait(t->cond, t->mutex);
+ pthread_mutex_unlock(t->mutex);
+
+ test_exec(t->fd, t->eci, t->n_exec_queues, t->n_execs,
+ t->bo_size, t->stride, t->vm, t->alloc, t->flags);
+
+ return NULL;
+}
+
+static void
+threads(int fd, int n_exec_queues, int n_execs, size_t bo_size,
+ size_t stride, unsigned int flags, bool shared_vm)
+{
+ struct drm_xe_engine_class_instance *hwe;
+ struct thread_data *threads_data;
+ int n_engines = 0, i = 0;
+ pthread_mutex_t mutex;
+ pthread_cond_t cond;
+ uint32_t vm = 0;
+ bool go = false;
+ void *alloc = NULL;
+
+ if (FILE_BACKED & flags)
+ return;
+
+ xe_for_each_engine(fd, hwe)
+ ++n_engines;
+
+ if (shared_vm) {
+ vm = xe_vm_create(fd, DRM_XE_VM_CREATE_FLAG_LR_MODE |
+ DRM_XE_VM_CREATE_FLAG_FAULT_MODE, 0);
+ bind_system_allocator(NULL, 0);
+ if (flags & SHARED_ALLOC) {
+ uint64_t alloc_size;
+
+ igt_assert(stride);
+
+ alloc_size = sizeof(struct test_exec_data) * stride *
+ n_execs * n_engines;
+ alloc_size = xe_bb_size(fd, alloc_size);
+ alloc = aligned_alloc(SZ_2M, alloc_size);
+ igt_assert(alloc);
+
+ memset(alloc, 0, alloc_size);
+ flags &= ~SHARED_ALLOC;
+ }
+ } else if (flags & SHARED_ALLOC) {
+ return;
+ }
+
+ threads_data = calloc(n_engines, sizeof(*threads_data));
+ igt_assert(threads_data);
+
+ pthread_mutex_init(&mutex, 0);
+ pthread_cond_init(&cond, 0);
+
+ xe_for_each_engine(fd, hwe) {
+ threads_data[i].mutex = &mutex;
+ threads_data[i].cond = &cond;
+ threads_data[i].fd = fd;
+ threads_data[i].eci = hwe;
+ threads_data[i].n_exec_queues = n_exec_queues;
+ threads_data[i].n_execs = n_execs;
+ threads_data[i].bo_size = bo_size;
+ threads_data[i].stride = stride;
+ threads_data[i].vm = vm;
+ threads_data[i].flags = flags;
+ threads_data[i].alloc = alloc ? alloc + i *
+ sizeof(struct test_exec_data) : NULL;
+ threads_data[i].go = &go;
+ pthread_create(&threads_data[i].thread, 0, thread,
+ &threads_data[i]);
+ ++i;
+ }
+
+ pthread_mutex_lock(&mutex);
+ go = true;
+ pthread_cond_broadcast(&cond);
+ pthread_mutex_unlock(&mutex);
+
+ for (i = 0; i < n_engines; ++i)
+ pthread_join(threads_data[i].thread, NULL);
+
+ if (shared_vm) {
+ int ret;
+
+ if (flags & MMAP) {
+ int tries = 300;
+
+ while (tries && (ret = unbind_system_allocator()) == -EBUSY) {
+ sleep(.01);
+ --tries;
+ }
+ igt_assert_eq(ret, 0);
+ }
+ xe_vm_destroy(fd, vm);
+ if (alloc)
+ free(alloc);
+ }
+ free(threads_data);
+}
+
+static void process(struct drm_xe_engine_class_instance *hwe, int n_exec_queues,
+ int n_execs, size_t bo_size, size_t stride,
+ unsigned int flags)
+{
+ struct process_data *pdata;
+ int map_fd;
+ int fd;
+
+ map_fd = open(SYNC_FILE, O_RDWR, 0x666);
+ pdata = mmap(NULL, sizeof(*pdata), PROT_READ |
+ PROT_WRITE, MAP_SHARED, map_fd, 0);
+ wait_pdata(pdata);
+
+ fd = drm_open_driver(DRIVER_XE);
+ test_exec(fd, hwe, n_exec_queues, n_execs,
+ bo_size, stride, 0, NULL, flags);
+ drm_close_driver(fd);
+
+ close(map_fd);
+ munmap(pdata, sizeof(*pdata));
+}
+
+static void
+processes(int fd, int n_exec_queues, int n_execs, size_t bo_size,
+ size_t stride, unsigned int flags)
+{
+ struct drm_xe_engine_class_instance *hwe;
+ struct process_data *pdata;
+ int map_fd;
+
+ map_fd = open(SYNC_FILE, O_RDWR | O_CREAT, 0x666);
+ posix_fallocate(map_fd, 0, sizeof(*pdata));
+ pdata = mmap(NULL, sizeof(*pdata), PROT_READ |
+ PROT_WRITE, MAP_SHARED, map_fd, 0);
+
+ init_pdata(pdata, 0);
+
+ xe_for_each_engine(fd, hwe) {
+ igt_fork(child, 1)
+ process(hwe, n_exec_queues, n_execs, bo_size,
+ stride, flags);
+ }
+
+ signal_pdata(pdata);
+ igt_waitchildren();
+
+ close(map_fd);
+ munmap(pdata, sizeof(*pdata));
+}
+
+struct section {
+ const char *name;
+ unsigned int flags;
+};
+
+igt_main
+{
+ struct drm_xe_engine_class_instance *hwe;
+ const struct section sections[] = {
+ { "malloc", 0 },
+ { "malloc-mlock", LOCK },
+ { "malloc-race", RACE },
+ { "malloc-busy", BUSY },
+ { "malloc-bo-unmap", BO_UNMAP },
+ { "mmap", MMAP },
+ { "mmap-huge", MMAP | HUGE_PAGE },
+ { "mmap-shared", MMAP | LOCK | MMAP_SHARED },
+ { "mmap-mlock", MMAP | LOCK },
+ { "mmap-file", MMAP | FILE_BACKED },
+ { "mmap-file-mlock", MMAP | LOCK | FILE_BACKED },
+ { "mmap-race", MMAP | RACE },
+ { "free", NEW | FREE },
+ { "free-race", NEW | FREE | RACE },
+ { "new", NEW },
+ { "new-race", NEW | RACE },
+ { "new-bo-map", NEW | BO_MAP },
+ { "new-busy", NEW | BUSY },
+ { "mmap-free", MMAP | NEW | FREE },
+ { "mmap-free-huge", MMAP | NEW | FREE | HUGE_PAGE },
+ { "mmap-free-race", MMAP | NEW | FREE | RACE },
+ { "mmap-new", MMAP | NEW },
+ { "mmap-new-huge", MMAP | NEW | HUGE_PAGE },
+ { "mmap-new-race", MMAP | NEW | RACE },
+ { "malloc-nomemset", SKIP_MEMSET },
+ { "malloc-mlock-nomemset", SKIP_MEMSET | LOCK },
+ { "malloc-race-nomemset", SKIP_MEMSET | RACE },
+ { "malloc-busy-nomemset", SKIP_MEMSET | BUSY },
+ { "malloc-bo-unmap-nomemset", SKIP_MEMSET | BO_UNMAP },
+ { "mmap-nomemset", SKIP_MEMSET | MMAP },
+ { "mmap-huge-nomemset", SKIP_MEMSET | MMAP | HUGE_PAGE },
+ { "mmap-shared-nomemset", SKIP_MEMSET | MMAP | MMAP_SHARED },
+ { "mmap-mlock-nomemset", SKIP_MEMSET | MMAP | LOCK },
+ { "mmap-file-nomemset", SKIP_MEMSET | MMAP | FILE_BACKED },
+ { "mmap-file-mlock-nomemset", SKIP_MEMSET | MMAP | LOCK | FILE_BACKED },
+ { "mmap-race-nomemset", SKIP_MEMSET | MMAP | RACE },
+ { "free-nomemset", SKIP_MEMSET | NEW | FREE },
+ { "free-race-nomemset", SKIP_MEMSET | NEW | FREE | RACE },
+ { "new-nomemset", SKIP_MEMSET | NEW },
+ { "new-race-nomemset", SKIP_MEMSET | NEW | RACE },
+ { "new-bo-map-nomemset", SKIP_MEMSET | NEW | BO_MAP },
+ { "new-busy-nomemset", SKIP_MEMSET | NEW | BUSY },
+ { "mmap-free-nomemset", SKIP_MEMSET | MMAP | NEW | FREE },
+ { "mmap-free-huge-nomemset", SKIP_MEMSET | MMAP | NEW | FREE | HUGE_PAGE },
+ { "mmap-free-race-nomemset", SKIP_MEMSET | MMAP | NEW | FREE | RACE },
+ { "mmap-new-nomemset", SKIP_MEMSET | MMAP | NEW },
+ { "mmap-new-huge-nomemset", SKIP_MEMSET | MMAP | NEW | HUGE_PAGE },
+ { "mmap-new-race-nomemset", SKIP_MEMSET | MMAP | NEW | RACE },
+ { NULL },
+ };
+ const struct section psections[] = {
+ { "munmap-cpu-fault", CPU_FAULT },
+ { "munmap-no-cpu-fault", 0 },
+ { "remap-cpu-fault", CPU_FAULT | REMAP },
+ { "remap-no-cpu-fault", REMAP },
+ { "middle-munmap-cpu-fault", MIDDLE | CPU_FAULT },
+ { "middle-munmap-no-cpu-fault", MIDDLE },
+ { "middle-remap-cpu-fault", MIDDLE | CPU_FAULT | REMAP },
+ { "middle-remap-no-cpu-fault", MIDDLE | REMAP },
+ { NULL },
+ };
+ const struct section esections[] = {
+ { "malloc", 0 },
+ { "malloc-mix-bo", MIX_BO_ALLOC },
+ { NULL },
+ };
+ int fd;
+
+ igt_fixture {
+ struct xe_device *xe;
+
+ fd = drm_open_driver(DRIVER_XE);
+ igt_require(xe_supports_faults(fd));
+
+ xe = xe_device_get(fd);
+ va_bits = xe->va_bits;
+ }
+
+ for (const struct section *s = sections; s->name; s++) {
+ igt_subtest_f("once-%s", s->name)
+ xe_for_each_engine(fd, hwe)
+ test_exec(fd, hwe, 1, 1, 0, 0, 0, NULL, s->flags);
+
+ igt_subtest_f("twice-%s", s->name)
+ xe_for_each_engine(fd, hwe)
+ test_exec(fd, hwe, 1, 2, 0, 0, 0, NULL, s->flags);
+
+ igt_subtest_f("many-%s", s->name)
+ xe_for_each_engine(fd, hwe)
+ test_exec(fd, hwe, 1, 128, 0, 0, 0, NULL, s->flags);
+
+ igt_subtest_f("many-stride-%s", s->name)
+ xe_for_each_engine(fd, hwe)
+ test_exec(fd, hwe, 1, 128, 0, 256, 0, NULL, s->flags);
+
+ igt_subtest_f("many-execqueues-%s", s->name)
+ xe_for_each_engine(fd, hwe)
+ test_exec(fd, hwe, 16, 128, 0, 0, 0, NULL, s->flags);
+
+ igt_subtest_f("many-large-%s", s->name)
+ xe_for_each_engine(fd, hwe)
+ test_exec(fd, hwe, 1, 128, SZ_2M, 0, 0, NULL, s->flags);
+
+ igt_subtest_f("many-large-execqueues-%s", s->name)
+ xe_for_each_engine(fd, hwe)
+ test_exec(fd, hwe, 16, 128, SZ_2M, 0, 0, NULL, s->flags);
+
+ igt_subtest_f("threads-many-%s", s->name)
+ threads(fd, 1, 128, 0, 0, s->flags, false);
+
+ igt_subtest_f("threads-many-stride-%s", s->name)
+ threads(fd, 1, 128, 0, 256, s->flags, false);
+
+ igt_subtest_f("threads-many-execqueues-%s", s->name)
+ threads(fd, 16, 128, 0, 0, s->flags, false);
+
+ igt_subtest_f("threads-many-large-%s", s->name)
+ threads(fd, 1, 128, SZ_2M, 0, s->flags, false);
+
+ igt_subtest_f("threads-many-large-execqueues-%s", s->name)
+ threads(fd, 16, 128, SZ_2M, 0, s->flags, false);
+
+ igt_subtest_f("threads-shared-vm-many-%s", s->name)
+ threads(fd, 1, 128, 0, 0, s->flags, true);
+
+ igt_subtest_f("threads-shared-vm-many-stride-%s", s->name)
+ threads(fd, 1, 128, 0, 256, s->flags, true);
+
+ igt_subtest_f("threads-shared-vm-many-execqueues-%s", s->name)
+ threads(fd, 16, 128, 0, 0, s->flags, true);
+
+ igt_subtest_f("threads-shared-vm-many-large-%s", s->name)
+ threads(fd, 1, 128, SZ_2M, 0, s->flags, true);
+
+ igt_subtest_f("threads-shared-vm-many-large-execqueues-%s", s->name)
+ threads(fd, 16, 128, SZ_2M, 0, s->flags, true);
+
+ igt_subtest_f("process-many-%s", s->name)
+ processes(fd, 1, 128, 0, 0, s->flags);
+
+ igt_subtest_f("process-many-stride-%s", s->name)
+ processes(fd, 1, 128, 0, 256, s->flags);
+
+ igt_subtest_f("process-many-execqueues-%s", s->name)
+ processes(fd, 16, 128, 0, 0, s->flags);
+
+ igt_subtest_f("process-many-large-%s", s->name)
+ processes(fd, 1, 128, SZ_2M, 0, s->flags);
+
+ igt_subtest_f("process-many-large-execqueues-%s", s->name)
+ processes(fd, 16, 128, SZ_2M, 0, s->flags);
+ }
+
+ igt_subtest("threads-shared-vm-shared-alloc-many-stride-malloc")
+ threads(fd, 1, 128, 0, 256, SHARED_ALLOC, true);
+
+ igt_subtest("threads-shared-vm-shared-alloc-many-stride-malloc-race")
+ threads(fd, 1, 128, 0, 256, RACE | SHARED_ALLOC, true);
+
+ igt_subtest_f("fault")
+ xe_for_each_engine(fd, hwe)
+ test_exec(fd, hwe, 4, 1, SZ_2M, 0, 0, NULL, FAULT);
+
+ for (const struct section *s = psections; s->name; s++) {
+ igt_subtest_f("partial-%s", s->name)
+ xe_for_each_engine(fd, hwe)
+ partial(fd, hwe, s->flags);
+ }
+
+ igt_subtest_f("unaligned-alloc")
+ xe_for_each_engine(fd, hwe) {
+ many_allocs(fd, hwe, (SZ_1M + SZ_512K) * 8,
+ SZ_1M + SZ_512K, SZ_4K, NULL, 0);
+ break;
+ }
+
+ igt_subtest_f("fault-benchmark")
+ xe_for_each_engine(fd, hwe)
+ many_allocs(fd, hwe, SZ_64M, SZ_64M, SZ_4K, NULL,
+ BENCHMARK);
+
+ igt_subtest_f("fault-threads-benchmark")
+ xe_for_each_engine(fd, hwe)
+ many_allocs(fd, hwe, SZ_64M, SZ_64M, SZ_4K, NULL,
+ BENCHMARK | CPU_FAULT_THREADS);
+
+ for (const struct section *s = esections; s->name; s++) {
+ igt_subtest_f("evict-%s", s->name)
+ xe_for_each_engine(fd, hwe) {
+ many_allocs(fd, hwe,
+ xe_visible_vram_size(fd, hwe->gt_id),
+ SZ_8M, SZ_1M, NULL, s->flags);
+ break;
+ }
+ }
+
+ for (const struct section *s = esections; s->name; s++) {
+ igt_subtest_f("processes-evict-%s", s->name)
+ processes_evict(fd, SZ_8M, SZ_1M, s->flags);
+ }
+
+ igt_fixture {
+ xe_device_put(fd);
+ drm_close_driver(fd);
+ }
+}
diff --git a/tests/meson.build b/tests/meson.build
index 00556c9d61..31d0acd6a7 100644
--- a/tests/meson.build
+++ b/tests/meson.build
@@ -291,6 +291,7 @@ intel_xe_progs = [
'xe_exec_reset',
'xe_exec_sip',
'xe_exec_store',
+ 'xe_exec_system_allocator',
'xe_exec_threads',
'xe_exercise_blt',
'xe_gpgpu_fill',
--
2.34.1
^ permalink raw reply related [flat|nested] 16+ messages in thread
* [PATCH] tests/xe: Add system_allocator test
@ 2024-10-16 3:04 Matthew Brost
0 siblings, 0 replies; 16+ messages in thread
From: Matthew Brost @ 2024-10-16 3:04 UTC (permalink / raw)
To: igt-dev
Test various uses of system allocator in single thread, multiple
threads, and multiple processes.
Features tested:
- Malloc with various size
- Mmap with various sizes and flags including file backed mappings
- Mixing BO allocations with system allocator
- Various page sizes
- Dynamically freeing / unmapping memory
- Sharing VM across threads
- Faults racing on different hardware engines / GTs / Tiles
- GPU faults and CPU faults racing
- CPU faults on multiple threads racing
- CPU faults on multiple process racing
- GPU faults of memory not faulted in by CPU
- Partial unmap of allocations
- Attempting to unmap system allocations when GPU has mappings
- Eviction of both system allocations and BOs
- Forking child processes and reading data from VRAM
- mremap data in VRAM
- Protection changes
- Multiple faults per execbuf
Running on LNL, BMG, PVC 1 tile, and PVC 2 tile.
Signed-off-by: Matthew Brost <matthew.brost@intel.com>
---
include/drm-uapi/xe_drm.h | 1 +
lib/xe/xe_ioctl.c | 12 +
lib/xe/xe_ioctl.h | 1 +
tests/intel/xe_exec_system_allocator.c | 1772 ++++++++++++++++++++++++
tests/meson.build | 1 +
5 files changed, 1787 insertions(+)
create mode 100644 tests/intel/xe_exec_system_allocator.c
diff --git a/include/drm-uapi/xe_drm.h b/include/drm-uapi/xe_drm.h
index f0a450db95..ca57c57ef9 100644
--- a/include/drm-uapi/xe_drm.h
+++ b/include/drm-uapi/xe_drm.h
@@ -994,6 +994,7 @@ struct drm_xe_vm_bind_op {
#define DRM_XE_VM_BIND_FLAG_IMMEDIATE (1 << 1)
#define DRM_XE_VM_BIND_FLAG_NULL (1 << 2)
#define DRM_XE_VM_BIND_FLAG_DUMPABLE (1 << 3)
+#define DRM_XE_VM_BIND_FLAG_SYSTEM_ALLOCATOR (1 << 4)
/** @flags: Bind flags */
__u32 flags;
diff --git a/lib/xe/xe_ioctl.c b/lib/xe/xe_ioctl.c
index 6d83889188..1d416db5e4 100644
--- a/lib/xe/xe_ioctl.c
+++ b/lib/xe/xe_ioctl.c
@@ -436,6 +436,18 @@ void *xe_bo_map(int fd, uint32_t bo, size_t size)
return __xe_bo_map(fd, bo, size, PROT_WRITE);
}
+void *xe_bo_map_fixed(int fd, uint32_t bo, size_t size, uint64_t addr)
+{
+ uint64_t mmo;
+ void *map;
+
+ mmo = xe_bo_mmap_offset(fd, bo);
+ map = mmap((void *)addr, size, PROT_WRITE, MAP_SHARED | MAP_FIXED, fd, mmo);
+ igt_assert(map != MAP_FAILED);
+
+ return map;
+}
+
void *xe_bo_mmap_ext(int fd, uint32_t bo, size_t size, int prot)
{
return __xe_bo_map(fd, bo, size, prot);
diff --git a/lib/xe/xe_ioctl.h b/lib/xe/xe_ioctl.h
index 18cc2b72b2..98b31d71c8 100644
--- a/lib/xe/xe_ioctl.h
+++ b/lib/xe/xe_ioctl.h
@@ -86,6 +86,7 @@ uint32_t xe_exec_queue_create_class(int fd, uint32_t vm, uint16_t class);
void xe_exec_queue_destroy(int fd, uint32_t exec_queue);
uint64_t xe_bo_mmap_offset(int fd, uint32_t bo);
void *xe_bo_map(int fd, uint32_t bo, size_t size);
+void *xe_bo_map_fixed(int fd, uint32_t bo, size_t size, long unsigned int addr);
void *xe_bo_mmap_ext(int fd, uint32_t bo, size_t size, int prot);
int __xe_exec(int fd, struct drm_xe_exec *exec);
void xe_exec(int fd, struct drm_xe_exec *exec);
diff --git a/tests/intel/xe_exec_system_allocator.c b/tests/intel/xe_exec_system_allocator.c
new file mode 100644
index 0000000000..46f8a3ecaf
--- /dev/null
+++ b/tests/intel/xe_exec_system_allocator.c
@@ -0,0 +1,1772 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2024 Intel Corporation
+ */
+
+/**
+ * TEST: Basic tests for execbuf functionality using system allocator
+ * Category: Hardware building block
+ * Mega feature: Shared virtual memory
+ * Sub-category: execbuf
+ * Functionality: fault mode, system allocator
+ * GPU requirements: GPU needs support for DRM_XE_VM_CREATE_FLAG_FAULT_MODE
+ */
+
+#include <fcntl.h>
+#include <linux/mman.h>
+#include <time.h>
+
+#include "igt.h"
+#include "lib/igt_syncobj.h"
+#include "lib/intel_reg.h"
+#include "xe_drm.h"
+
+#include "xe/xe_ioctl.h"
+#include "xe/xe_query.h"
+#include <string.h>
+
+#define USER_FENCE_VALUE 0xdeadbeefdeadbeefull
+#define QUARTER_SEC (NSEC_PER_SEC / 4)
+#define FIVE_SEC (5LL * NSEC_PER_SEC)
+
+struct batch_data {
+ uint32_t batch[16];
+ uint64_t pad;
+ uint32_t data;
+ uint32_t expected_data;
+};
+
+#define WRITE_VALUE(data__, i__) ({ \
+ if (!(data__)->expected_data) \
+ (data__)->expected_data = rand() << 12 | (i__); \
+ (data__)->expected_data; \
+})
+#define READ_VALUE(data__, i__) ((data__)->expected_data)
+
+static void __write_dword(uint32_t *batch, uint64_t sdi_addr, uint32_t wdata,
+ int *idx)
+{
+ batch[(*idx)++] = MI_STORE_DWORD_IMM_GEN4;
+ batch[(*idx)++] = sdi_addr;
+ batch[(*idx)++] = sdi_addr >> 32;
+ batch[(*idx)++] = wdata;
+}
+
+static void write_dword(uint32_t *batch, uint64_t sdi_addr, uint32_t wdata,
+ int *idx)
+{
+ __write_dword(batch, sdi_addr, wdata, idx);
+ batch[(*idx)++] = MI_BATCH_BUFFER_END;
+}
+
+static void check_all_pages(void *ptr, uint64_t alloc_size, uint64_t stride,
+ pthread_barrier_t *barrier)
+{
+ int i, n_writes = alloc_size / stride;
+
+ for (i = 0; i < n_writes; ++i) {
+ struct batch_data *data = ptr + i * stride;
+
+ igt_assert_eq(data->data, READ_VALUE(data, i));
+
+ if (barrier)
+ pthread_barrier_wait(barrier);
+ }
+}
+
+#define SYNC_FILE "/tmp/xe_exec_system_allocator_sync"
+
+struct process_data {
+ pthread_mutex_t mutex;
+ pthread_cond_t cond;
+ pthread_barrier_t barrier;
+ bool go;
+};
+
+static void wait_pdata(struct process_data *pdata)
+{
+ pthread_mutex_lock(&pdata->mutex);
+ while (!pdata->go)
+ pthread_cond_wait(&pdata->cond, &pdata->mutex);
+ pthread_mutex_unlock(&pdata->mutex);
+}
+
+static void init_pdata(struct process_data *pdata, int n_engine)
+{
+ pthread_mutexattr_t mutex_attr;
+ pthread_condattr_t cond_attr;
+ pthread_barrierattr_t barrier_attr;
+
+ pthread_mutexattr_init(&mutex_attr);
+ pthread_mutexattr_setpshared(&mutex_attr, PTHREAD_PROCESS_SHARED);
+ pthread_mutex_init(&pdata->mutex, &mutex_attr);
+
+ pthread_condattr_init(&cond_attr);
+ pthread_condattr_setpshared(&cond_attr, PTHREAD_PROCESS_SHARED);
+ pthread_cond_init(&pdata->cond, &cond_attr);
+
+ pthread_barrierattr_init(&barrier_attr);
+ pthread_barrierattr_setpshared(&barrier_attr, PTHREAD_PROCESS_SHARED);
+ pthread_barrier_init(&pdata->barrier, &barrier_attr, n_engine);
+
+ pdata->go = false;
+}
+
+static void signal_pdata(struct process_data *pdata)
+{
+ pthread_mutex_lock(&pdata->mutex);
+ pdata->go = true;
+ pthread_cond_broadcast(&pdata->cond);
+ pthread_mutex_unlock(&pdata->mutex);
+}
+
+/* many_alloc flags */
+#define MIX_BO_ALLOC (0x1 << 0)
+#define BENCHMARK (0x1 << 1)
+#define CPU_FAULT_THREADS (0x1 << 2)
+#define CPU_FAULT_PROCESS (0x1 << 3)
+#define CPU_FAULT_SAME_PAGE (0x1 << 4)
+
+static void process_check(void *ptr, uint64_t alloc_size, uint64_t stride,
+ unsigned int flags)
+{
+ struct process_data *pdata;
+ int map_fd;
+
+ map_fd = open(SYNC_FILE, O_RDWR, 0x666);
+ pdata = mmap(NULL, sizeof(*pdata), PROT_READ |
+ PROT_WRITE, MAP_SHARED, map_fd, 0);
+ wait_pdata(pdata);
+
+ if (flags & CPU_FAULT_SAME_PAGE)
+ check_all_pages(ptr, alloc_size, stride, &pdata->barrier);
+ else
+ check_all_pages(ptr, alloc_size, stride, NULL);
+
+ close(map_fd);
+ munmap(pdata, sizeof(*pdata));
+}
+
+static void
+check_all_pages_process(void *ptr, uint64_t alloc_size, uint64_t stride,
+ int n_process, unsigned int flags)
+{
+ struct process_data *pdata;
+ int map_fd, i;
+
+ map_fd = open(SYNC_FILE, O_RDWR | O_CREAT, 0x666);
+ posix_fallocate(map_fd, 0, sizeof(*pdata));
+ pdata = mmap(NULL, sizeof(*pdata), PROT_READ |
+ PROT_WRITE, MAP_SHARED, map_fd, 0);
+
+ init_pdata(pdata, n_process);
+
+ for (i = 0; i < n_process; ++i) {
+ igt_fork(child, 1)
+ if (flags & CPU_FAULT_SAME_PAGE)
+ process_check(ptr, alloc_size, stride, flags);
+ else
+ process_check(ptr + stride * i, alloc_size,
+ stride * n_process, flags);
+ }
+
+ signal_pdata(pdata);
+ igt_waitchildren();
+
+ close(map_fd);
+ munmap(pdata, sizeof(*pdata));
+}
+
+struct thread_check_data {
+ pthread_t thread;
+ pthread_mutex_t *mutex;
+ pthread_cond_t *cond;
+ pthread_barrier_t *barrier;
+ void *ptr;
+ uint64_t alloc_size;
+ uint64_t stride;
+ bool *go;
+};
+
+static void *thread_check(void *data)
+{
+ struct thread_check_data *t = data;
+
+ pthread_mutex_lock(t->mutex);
+ while (!*t->go)
+ pthread_cond_wait(t->cond, t->mutex);
+ pthread_mutex_unlock(t->mutex);
+
+ check_all_pages(t->ptr, t->alloc_size, t->stride, t->barrier);
+
+ return NULL;
+}
+
+/*
+ * Partition checking of results in chunks which causes multiple threads to
+ * fault same VRAM allocation in parallel.
+ */
+static void
+check_all_pages_threads(void *ptr, uint64_t alloc_size, uint64_t stride,
+ int n_threads, unsigned int flags)
+{
+ struct thread_check_data *threads_check_data;
+ pthread_mutex_t mutex;
+ pthread_cond_t cond;
+ pthread_barrier_t barrier;
+ int i;
+ bool go = false;
+
+ threads_check_data = calloc(n_threads, sizeof(*threads_check_data));
+ igt_assert(threads_check_data);
+
+ pthread_mutex_init(&mutex, 0);
+ pthread_cond_init(&cond, 0);
+ pthread_barrier_init(&barrier, 0, n_threads);
+
+ for (i = 0; i < n_threads; ++i) {
+ threads_check_data[i].mutex = &mutex;
+ threads_check_data[i].cond = &cond;
+ if (flags & CPU_FAULT_SAME_PAGE) {
+ threads_check_data[i].barrier = &barrier;
+ threads_check_data[i].ptr = ptr;
+ threads_check_data[i].alloc_size = alloc_size;
+ threads_check_data[i].stride = stride;
+ } else {
+ threads_check_data[i].barrier = NULL;
+ threads_check_data[i].ptr = ptr + stride * i;
+ threads_check_data[i].alloc_size = alloc_size;
+ threads_check_data[i].stride = n_threads * stride;
+ }
+ threads_check_data[i].go = &go;
+
+ pthread_create(&threads_check_data[i].thread, 0, thread_check,
+ &threads_check_data[i]);
+ }
+
+ pthread_mutex_lock(&mutex);
+ go = true;
+ pthread_cond_broadcast(&cond);
+ pthread_mutex_unlock(&mutex);
+
+ for (i = 0; i < n_threads; ++i)
+ pthread_join(threads_check_data[i].thread, NULL);
+ free(threads_check_data);
+}
+
+static void touch_all_pages(int fd, uint32_t exec_queue, void *ptr,
+ uint64_t alloc_size, uint64_t stride,
+ struct timespec *tv, uint64_t *submit)
+{
+ struct drm_xe_sync sync[1] = {
+ { .type = DRM_XE_SYNC_TYPE_USER_FENCE,
+ .flags = DRM_XE_SYNC_FLAG_SIGNAL,
+ .timeline_value = USER_FENCE_VALUE },
+ };
+ struct drm_xe_exec exec = {
+ .num_batch_buffer = 1,
+ .num_syncs = 0,
+ .exec_queue_id = exec_queue,
+ .syncs = to_user_pointer(&sync),
+ };
+ uint64_t addr = to_user_pointer(ptr);
+ int i, ret, n_writes = alloc_size / stride;
+ u64 *exec_ufence = NULL;
+ int64_t timeout = FIVE_SEC;
+
+ exec_ufence = mmap(NULL, SZ_4K, PROT_READ |
+ PROT_WRITE, MAP_SHARED |
+ MAP_ANONYMOUS, -1, 0);
+ igt_assert(exec_ufence != MAP_FAILED);
+ memset(exec_ufence, 0, SZ_4K);
+ sync[0].addr = to_user_pointer(exec_ufence);
+
+ for (i = 0; i < n_writes; ++i, addr += stride) {
+ struct batch_data *data = ptr + i * stride;
+ uint64_t sdi_offset = (char *)&data->data - (char *)data;
+ uint64_t sdi_addr = addr + sdi_offset;
+ int b = 0;
+
+ write_dword(data->batch, sdi_addr, WRITE_VALUE(data, i), &b);
+ igt_assert(b <= ARRAY_SIZE(data->batch));
+ }
+
+ igt_nsec_elapsed(tv);
+ *submit = igt_nsec_elapsed(tv);
+
+ addr = to_user_pointer(ptr);
+ for (i = 0; i < n_writes; ++i, addr += stride) {
+ struct batch_data *data = ptr + i * stride;
+ uint64_t batch_offset = (char *)&data->batch - (char *)data;
+ uint64_t batch_addr = addr + batch_offset;
+
+ exec.address = batch_addr;
+ if (i + 1 == n_writes)
+ exec.num_syncs = 1;
+ xe_exec(fd, &exec);
+ }
+
+ ret = __xe_wait_ufence(fd, exec_ufence, USER_FENCE_VALUE, exec_queue,
+ &timeout);
+ if (ret) {
+ printf("FAIL EXEC_UFENCE_ADDR: 0x%016llx\n", sync[0].addr);
+ printf("FAIL EXEC_UFENCE: EXPEXCTED=0x%016llx, ACTUAL=0x%016lx\n",
+ USER_FENCE_VALUE, exec_ufence[0]);
+
+ addr = to_user_pointer(ptr);
+ for (i = 0; i < n_writes; ++i, addr += stride) {
+ struct batch_data *data = ptr + i * stride;
+ uint64_t batch_offset = (char *)&data->batch - (char *)data;
+ uint64_t batch_addr = addr + batch_offset;
+ uint64_t sdi_offset = (char *)&data->data - (char *)data;
+ uint64_t sdi_addr = addr + sdi_offset;
+
+ printf("FAIL BATCH_ADDR: 0x%016lx\n", batch_addr);
+ printf("FAIL SDI_ADDR: 0x%016lx\n", sdi_addr);
+ printf("FAIL SDI_ADDR (in batch): 0x%016lx\n",
+ (((u64)data->batch[2]) << 32) | data->batch[1]);
+ printf("FAIL DARA: EXPEXCTED=0x%08x, ACTUAL=0x%08x\n",
+ data->expected_data, data->data);
+ }
+ igt_assert_eq(ret, 0);
+ }
+ munmap(exec_ufence, SZ_4K);
+}
+
+static int va_bits;
+
+#define bind_system_allocator(__sync, __num_sync) \
+ __xe_vm_bind_assert(fd, vm, 0, \
+ 0, 0, 0, 0x1ull << va_bits, \
+ DRM_XE_VM_BIND_OP_MAP, \
+ DRM_XE_VM_BIND_FLAG_SYSTEM_ALLOCATOR, \
+ (__sync), (__num_sync), 0, 0)
+
+#define unbind_system_allocator() \
+ __xe_vm_bind(fd, vm, 0, 0, 0, 0, 0x1ull << va_bits, \
+ DRM_XE_VM_BIND_OP_UNMAP, 0, \
+ NULL, 0, 0, 0, 0)
+
+#define odd(__i) (__i & 1)
+
+/**
+ * SUBTEST: unaligned-alloc
+ * Description: allocate unaligned sizes of memory
+ * Test category: functionality test
+ *
+ * SUBTEST: fault-benchmark
+ * Description: Benchmark how long GPU / CPU take
+ * Test category: performance test
+ *
+ * SUBTEST: fault-threads-benchmark
+ * Description: Benchmark how long GPU / CPU take, reading results with multiple threads
+ * Test category: performance and functionality test
+ *
+ * SUBTEST: fault-threads-same-page-benchmark
+ * Description: Benchmark how long GPU / CPU take, reading results with multiple threads, hammer same page
+ * Test category: performance and functionality test
+ *
+ * SUBTEST: fault-process-benchmark
+ * Description: Benchmark how long GPU / CPU take, reading results with multiple process
+ * Test category: performance and functionality test
+ *
+ * SUBTEST: fault-process-same-page-benchmark
+ * Description: Benchmark how long GPU / CPU take, reading results with multiple process, hammer same page
+ * Test category: performance and functionality test
+ *
+ * SUBTEST: evict-malloc
+ * Description: trigger eviction of VRAM allocated via malloc
+ * Test category: functionality test
+ *
+ * SUBTEST: evict-malloc-mix-bo
+ * Description: trigger eviction of VRAM allocated via malloc and BO create
+ * Test category: functionality test
+ *
+ * SUBTEST: processes-evict-malloc
+ * Description: multi-process trigger eviction of VRAM allocated via malloc
+ * Test category: stress test
+ *
+ * SUBTEST: processes-evict-malloc-mix-bo
+ * Description: multi-process trigger eviction of VRAM allocated via malloc and BO create
+ * Test category: stress test
+ */
+
+static void
+many_allocs(int fd, struct drm_xe_engine_class_instance *eci,
+ uint64_t total_alloc, uint64_t alloc_size, uint64_t stride,
+ pthread_barrier_t *barrier, unsigned int flags)
+{
+ uint32_t vm, exec_queue;
+ int num_allocs = flags & BENCHMARK ? 1 :
+ (9 * (total_alloc / alloc_size)) / 8;
+ void **allocs;
+ uint32_t *bos = NULL;
+ struct timespec tv = {};
+ uint64_t submit, read, elapsed;
+ int i;
+
+ vm = xe_vm_create(fd, DRM_XE_VM_CREATE_FLAG_LR_MODE |
+ DRM_XE_VM_CREATE_FLAG_FAULT_MODE, 0);
+ exec_queue = xe_exec_queue_create(fd, vm, eci, 0);
+
+ bind_system_allocator(NULL, 0);
+
+ allocs = malloc(sizeof(*allocs) * num_allocs);
+ igt_assert(allocs);
+ memset(allocs, 0, sizeof(*allocs) * num_allocs);
+
+ if (flags & MIX_BO_ALLOC) {
+ bos = malloc(sizeof(*bos) * num_allocs);
+ igt_assert(bos);
+ memset(bos, 0, sizeof(*bos) * num_allocs);
+ }
+
+ for (i = 0; i < num_allocs; ++i) {
+ void *alloc;
+
+ alloc = aligned_alloc(SZ_2M, alloc_size);
+ igt_assert(alloc);
+
+ if (flags & MIX_BO_ALLOC && odd(i)) {
+ uint32_t bo_flags =
+ DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM;
+
+ bos[i] = xe_bo_create(fd, vm, alloc_size,
+ vram_if_possible(fd, eci->gt_id),
+ bo_flags);
+ alloc = xe_bo_map_fixed(fd, bos[i], alloc_size,
+ to_user_pointer(alloc));
+ xe_vm_bind_async(fd, vm, 0, bos[i], 0,
+ to_user_pointer(alloc),
+ alloc_size, 0, 0);
+ }
+ allocs[i] = alloc;
+
+ touch_all_pages(fd, exec_queue, allocs[i], alloc_size, stride,
+ &tv, &submit);
+ }
+
+ if (barrier)
+ pthread_barrier_wait(barrier);
+
+ for (i = 0; i < num_allocs; ++i) {
+ if (flags & BENCHMARK)
+ read = igt_nsec_elapsed(&tv);
+#define NUM_CHECK_THREADS 8
+ if (flags & CPU_FAULT_PROCESS)
+ check_all_pages_process(allocs[i], alloc_size, stride,
+ NUM_CHECK_THREADS, flags);
+ else if (flags & CPU_FAULT_THREADS)
+ check_all_pages_threads(allocs[i], alloc_size, stride,
+ NUM_CHECK_THREADS, flags);
+ else
+ check_all_pages(allocs[i], alloc_size, stride, NULL);
+ if (flags & BENCHMARK) {
+ elapsed = igt_nsec_elapsed(&tv);
+ printf("Execution took %.3fms (submit %.1fus, read %.1fus, total %.1fus, read_total %.1fus)\n",
+ 1e-6 * elapsed, 1e-3 * submit, 1e-3 * read,
+ 1e-3 * (elapsed - submit),
+ 1e-3 * (elapsed - read));
+ }
+ if (bos && bos[i]) {
+ munmap(allocs[i], alloc_size);
+ gem_close(fd, bos[i]);
+ } else {
+ free(allocs[i]);
+ }
+ }
+ if (bos)
+ free(bos);
+ free(allocs);
+ xe_exec_queue_destroy(fd, exec_queue);
+ xe_vm_destroy(fd, vm);
+}
+
+static void process_evict(struct drm_xe_engine_class_instance *hwe,
+ uint64_t total_alloc, uint64_t alloc_size,
+ uint64_t stride, unsigned int flags)
+{
+ struct process_data *pdata;
+ int map_fd;
+ int fd;
+
+ map_fd = open(SYNC_FILE, O_RDWR, 0x666);
+ pdata = mmap(NULL, sizeof(*pdata), PROT_READ |
+ PROT_WRITE, MAP_SHARED, map_fd, 0);
+ wait_pdata(pdata);
+
+ fd = drm_open_driver(DRIVER_XE);
+ many_allocs(fd, hwe, total_alloc, alloc_size, stride, &pdata->barrier,
+ flags);
+ drm_close_driver(fd);
+
+ close(map_fd);
+ munmap(pdata, sizeof(*pdata));
+}
+
+static void
+processes_evict(int fd, uint64_t alloc_size, uint64_t stride,
+ unsigned int flags)
+{
+ struct drm_xe_engine_class_instance *hwe;
+ struct process_data *pdata;
+ int n_engine_gt[2] = { 0, 0 }, n_engine = 0;
+ int map_fd;
+
+ map_fd = open(SYNC_FILE, O_RDWR | O_CREAT, 0x666);
+ posix_fallocate(map_fd, 0, sizeof(*pdata));
+ pdata = mmap(NULL, sizeof(*pdata), PROT_READ |
+ PROT_WRITE, MAP_SHARED, map_fd, 0);
+
+ xe_for_each_engine(fd, hwe) {
+ igt_assert(hwe->gt_id < 2);
+ n_engine_gt[hwe->gt_id]++;
+ n_engine++;
+ }
+
+ init_pdata(pdata, n_engine);
+
+ xe_for_each_engine(fd, hwe) {
+ igt_fork(child, 1)
+ process_evict(hwe,
+ xe_visible_vram_size(fd, hwe->gt_id) /
+ n_engine_gt[hwe->gt_id], alloc_size,
+ stride, flags);
+ }
+
+ signal_pdata(pdata);
+ igt_waitchildren();
+
+ close(map_fd);
+ munmap(pdata, sizeof(*pdata));
+}
+
+#define CPU_FAULT (0x1 << 0)
+#define REMAP (0x1 << 1)
+#define MIDDLE (0x1 << 2)
+
+/**
+ * SUBTEST: partial-munmap-cpu-fault
+ * Description: munmap partially with cpu access in between
+ * Test category: functionality test
+ *
+ * SUBTEST: partial-munmap-no-cpu-fault
+ * Description: munmap partially with no cpu access in between
+ * Test category: functionality test
+ *
+ * SUBTEST: partial-remap-cpu-fault
+ * Description: remap partially with cpu access in between
+ * Test category: functionality test
+ *
+ * SUBTEST: partial-remap-no-cpu-fault
+ * Description: remap partially with no cpu access in between
+ * Test category: functionality test
+ *
+ * SUBTEST: partial-middle-munmap-cpu-fault
+ * Description: munmap middle with cpu access in between
+ * Test category: functionality test
+ *
+ * SUBTEST: partial-middle-munmap-no-cpu-fault
+ * Description: munmap middle with no cpu access in between
+ * Test category: functionality test
+ *
+ * SUBTEST: partial-middle-remap-cpu-fault
+ * Description: remap middle with cpu access in between
+ * Test category: functionality test
+ *
+ * SUBTEST: partial-middle-remap-no-cpu-fault
+ * Description: remap middle with no cpu access in between
+ * Test category: functionality test
+ */
+
+static void
+partial(int fd, struct drm_xe_engine_class_instance *eci, unsigned int flags)
+{
+ struct drm_xe_sync sync[1] = {
+ { .type = DRM_XE_SYNC_TYPE_USER_FENCE, .flags = DRM_XE_SYNC_FLAG_SIGNAL,
+ .timeline_value = USER_FENCE_VALUE },
+ };
+ struct drm_xe_exec exec = {
+ .num_batch_buffer = 1,
+ .num_syncs = 1,
+ .syncs = to_user_pointer(sync),
+ };
+ struct {
+ uint32_t batch[16];
+ uint64_t pad;
+ uint64_t vm_sync;
+ uint64_t exec_sync;
+ uint32_t data;
+ uint32_t expected_data;
+ } *data;
+ size_t bo_size = SZ_2M, unmap_offset = 0;
+ uint32_t vm, exec_queue;
+ u64 *exec_ufence = NULL;
+ int i;
+ void *old, *new = NULL;
+
+ if (flags & MIDDLE)
+ unmap_offset = bo_size / 4;
+
+ vm = xe_vm_create(fd, DRM_XE_VM_CREATE_FLAG_LR_MODE |
+ DRM_XE_VM_CREATE_FLAG_FAULT_MODE, 0);
+
+ data = aligned_alloc(bo_size, bo_size);
+ igt_assert(data);
+
+ data = mmap(data, bo_size, PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
+ igt_assert(data != MAP_FAILED);
+ memset(data, 0, bo_size);
+ old = data;
+
+ exec_queue = xe_exec_queue_create(fd, vm, eci, 0);
+
+ sync[0].addr = to_user_pointer(&data[0].vm_sync);
+ bind_system_allocator(sync, 1);
+ xe_wait_ufence(fd, &data[0].vm_sync, USER_FENCE_VALUE, 0, FIVE_SEC);
+ data[0].vm_sync = 0;
+
+ exec_ufence = mmap(NULL, SZ_4K, PROT_READ |
+ PROT_WRITE, MAP_SHARED |
+ MAP_ANONYMOUS, -1, 0);
+ igt_assert(exec_ufence != MAP_FAILED);
+ memset(exec_ufence, 0, SZ_4K);
+
+ for (i = 0; i < 2; i++) {
+ uint64_t addr = to_user_pointer(data);
+ uint64_t sdi_offset = (char *)&data[i].data - (char *)data;
+ uint64_t sdi_addr = addr + sdi_offset;
+ int b = 0;
+
+ write_dword(data[i].batch, sdi_addr, WRITE_VALUE(&data[i], i), &b);
+ igt_assert(b <= ARRAY_SIZE(data[i].batch));
+
+ if (!i)
+ data = old + unmap_offset + bo_size / 2;
+ }
+
+ data = old;
+ exec.exec_queue_id = exec_queue;
+
+ for (i = 0; i < 2; i++) {
+ uint64_t addr = to_user_pointer(data);
+ uint64_t batch_offset = (char *)&data[i].batch - (char *)data;
+ uint64_t batch_addr = addr + batch_offset;
+
+ sync[0].addr = new ? to_user_pointer(new) :
+ to_user_pointer(exec_ufence);
+ exec.address = batch_addr;
+ xe_exec(fd, &exec);
+
+ xe_wait_ufence(fd, new ?: exec_ufence, USER_FENCE_VALUE,
+ exec_queue, FIVE_SEC);
+ if (i || (flags & CPU_FAULT))
+ igt_assert_eq(data[i].data, READ_VALUE(&data[i], i));
+ exec_ufence[0] = 0;
+
+ if (!i) {
+ data = old + unmap_offset + bo_size / 2;
+ munmap(old + unmap_offset, bo_size / 2);
+ if (flags & REMAP) {
+ new = mmap(old + unmap_offset, bo_size / 2,
+ PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED |
+ MAP_LOCKED, -1, 0);
+ igt_assert(new != MAP_FAILED);
+ }
+ }
+ }
+
+ xe_exec_queue_destroy(fd, exec_queue);
+ munmap(exec_ufence, SZ_4K);
+ munmap(old, bo_size);
+ if (new)
+ munmap(new, bo_size / 2);
+ xe_vm_destroy(fd, vm);
+}
+
+#define MAX_N_EXEC_QUEUES 16
+
+#define MMAP (0x1 << 0)
+#define NEW (0x1 << 1)
+#define BO_UNMAP (0x1 << 2)
+#define FREE (0x1 << 3)
+#define BUSY (0x1 << 4)
+#define BO_MAP (0x1 << 5)
+#define RACE (0x1 << 6)
+#define SKIP_MEMSET (0x1 << 7)
+#define FAULT (0x1 << 8)
+#define FILE_BACKED (0x1 << 9)
+#define LOCK (0x1 << 10)
+#define MMAP_SHARED (0x1 << 11)
+#define HUGE_PAGE (0x1 << 12)
+#define SHARED_ALLOC (0x1 << 13)
+#define FORK_READ (0x1 << 14)
+#define FORK_READ_AFTER (0x1 << 15)
+#define MREMAP (0x1 << 16)
+#define DONTUNMAP (0x1 << 17)
+#define READ_ONLY_REMAP (0x1 << 18)
+#define SYNC_EXEC (0x1 << 19)
+#define EVERY_OTHER_CHECK (0x1 << 20)
+#define MULTI_FAULT (0x1 << 21)
+
+#define N_MULTI_FAULT 4
+
+/**
+ * SUBTEST: once-%s
+ * Description: Run %arg[1] system allocator test only once
+ * Test category: functionality test
+ *
+ * SUBTEST: once-large-%s
+ * Description: Run %arg[1] system allocator test only once with large allocation
+ * Test category: functionality test
+ *
+ * SUBTEST: twice-%s
+ * Description: Run %arg[1] system allocator test twice
+ * Test category: functionality test
+ *
+ * SUBTEST: twice-large-%s
+ * Description: Run %arg[1] system allocator test twice with large allocation
+ * Test category: functionality test
+ *
+ * SUBTEST: many-%s
+ * Description: Run %arg[1] system allocator test many times
+ * Test category: stress test
+ *
+ * SUBTEST: many-stride-%s
+ * Description: Run %arg[1] system allocator test many times with a stride on each exec
+ * Test category: stress test
+ *
+ * SUBTEST: many-execqueues-%s
+ * Description: Run %arg[1] system allocator test on many exec_queues
+ * Test category: stress test
+ *
+ * SUBTEST: many-large-%s
+ * Description: Run %arg[1] system allocator test many times with large allocations
+ * Test category: stress test
+ *
+ * SUBTEST: many-large-execqueues-%s
+ * Description: Run %arg[1] system allocator test on many exec_queues with large allocations
+ *
+ * SUBTEST: threads-many-%s
+ * Description: Run %arg[1] system allocator threaded test many times
+ * Test category: stress test
+ *
+ * SUBTEST: threads-many-stride-%s
+ * Description: Run %arg[1] system allocator threaded test many times with a stride on each exec
+ * Test category: stress test
+ *
+ * SUBTEST: threads-many-execqueues-%s
+ * Description: Run %arg[1] system allocator threaded test on many exec_queues
+ * Test category: stress test
+ *
+ * SUBTEST: threads-many-large-%s
+ * Description: Run %arg[1] system allocator threaded test many times with large allocations
+ * Test category: stress test
+ *
+ * SUBTEST: threads-many-large-execqueues-%s
+ * Description: Run %arg[1] system allocator threaded test on many exec_queues with large allocations
+ *
+ * SUBTEST: threads-shared-vm-many-%s
+ * Description: Run %arg[1] system allocator threaded, shared vm test many times
+ * Test category: stress test
+ *
+ * SUBTEST: threads-shared-vm-many-stride-%s
+ * Description: Run %arg[1] system allocator threaded, shared vm test many times with a stride on each exec
+ * Test category: stress test
+ *
+ * SUBTEST: threads-shared-vm-many-execqueues-%s
+ * Description: Run %arg[1] system allocator threaded, shared vm test on many exec_queues
+ * Test category: stress test
+ *
+ * SUBTEST: threads-shared-vm-many-large-%s
+ * Description: Run %arg[1] system allocator threaded, shared vm test many times with large allocations
+ * Test category: stress test
+ *
+ * SUBTEST: threads-shared-vm-many-large-execqueues-%s
+ * Description: Run %arg[1] system allocator threaded, shared vm test on many exec_queues with large allocations
+ * Test category: stress test
+ *
+ * SUBTEST: process-many-%s
+ * Description: Run %arg[1] system allocator multi-process test many times
+ * Test category: stress test
+ *
+ * SUBTEST: process-many-stride-%s
+ * Description: Run %arg[1] system allocator multi-process test many times with a stride on each exec
+ * Test category: stress test
+ *
+ * SUBTEST: process-many-execqueues-%s
+ * Description: Run %arg[1] system allocator multi-process test on many exec_queues
+ * Test category: stress test
+ *
+ * SUBTEST: process-many-large-%s
+ * Description: Run %arg[1] system allocator multi-process test many times with large allocations
+ * Test category: stress test
+ *
+ * SUBTEST: process-many-large-execqueues-%s
+ * Description: Run %arg[1] system allocator multi-process test on many exec_queues with large allocations
+ *
+ * SUBTEST: fault
+ * Description: use a bad system allocator address resulting in a fault
+ * Test category: bad input
+ *
+ * arg[1]:
+ *
+ * @malloc: malloc single buffer for all execs, issue a command which will trigger multiple faults
+ * @malloc-multi-fault: malloc single buffer for all execs
+ * @malloc-fork-read: malloc single buffer for all execs, fork a process to read test output
+ * @malloc-fork-read-after: malloc single buffer for all execs, fork a process to read test output, check again after fork returns in parent
+ * @malloc-mlock: malloc and mlock single buffer for all execs
+ * @malloc-race: malloc single buffer for all execs with race between cpu and gpu access
+ * @malloc-bo-unmap: malloc single buffer for all execs, bind and unbind a BO to same address before execs
+ * @malloc-busy: malloc single buffer for all execs, try to unbind while buffer valid
+ * @mmap: mmap single buffer for all execs
+ * @mmap-remap: mmap and mremap a buffer for all execs
+ * @mmap-remap-dontunmap: mmap and mremap a buffer with dontunmap flag for all execs
+ * @mmap-remap-ro: mmap and mremap a read-only buffer for all execs
+ * @mmap-remap-ro-dontunmap: mmap and mremap a read-only buffer with dontunmap flag for all execs
+ * @mmap-remap-eocheck: mmap and mremap a buffer for all execs, check data every other loop iteration
+ * @mmap-remap-dontunmap-eocheck: mmap and mremap a buffer with dontunmap flag for all execs, check data every other loop iteration
+ * @mmap-remap-ro-eocheck: mmap and mremap a read-only buffer for all execs, check data every other loop iteration
+ * @mmap-remap-ro-dontunmap-eocheck: mmap and mremap a read-only buffer with dontunmap flag for all execs, check data every other loop iteration
+ * @mmap-huge: mmap huge page single buffer for all execs
+ * @mmap-shared: mmap shared single buffer for all execs
+ * @mmap-shared-remap: mmap shared and mremap a buffer for all execs
+ * @mmap-shared-remap-dontunmap: mmap shared and mremap a buffer with dontunmap flag for all execs
+ * @mmap-shared-remap-eocheck: mmap shared and mremap a buffer for all execs, check data every other loop iteration
+ * @mmap-shared-remap-dontunmap-eocheck: mmap shared and mremap a buffer with dontunmap flag for all execs, check data every other loop iteration
+ * @mmap-mlock: mmap and mlock single buffer for all execs
+ * @mmap-file: mmap single buffer, with file backing, for all execs
+ * @mmap-file-mlock: mmap and mlock single buffer, with file backing, for all execs
+ * @mmap-race: mmap single buffer for all execs with race between cpu and gpu access
+ * @free: malloc and free buffer for each exec
+ * @free-race: malloc and free buffer for each exec with race between cpu and gpu access
+ * @new: malloc a new buffer for each exec
+ * @new-race: malloc a new buffer for each exec with race between cpu and gpu access
+ * @new-bo-map: malloc a new buffer or map BO for each exec
+ * @new-busy: malloc a new buffer for each exec, try to unbind while buffers valid
+ * @mmap-free: mmap and free buffer for each exec
+ * @mmap-free-huge: mmap huge page and free buffer for each exec
+ * @mmap-free-race: mmap and free buffer for each exec with race between cpu and gpu access
+ * @mmap-new: mmap a new buffer for each exec
+ * @mmap-new-huge: mmap huge page a new buffer for each exec
+ * @mmap-new-race: mmap a new buffer for each exec with race between cpu and gpu access
+ * @malloc-nomemset: malloc single buffer for all execs, skip memset of buffers
+ * @malloc-mlock-nomemset: malloc and mlock single buffer for all execs, skip memset of buffers
+ * @malloc-race-nomemset: malloc single buffer for all execs with race between cpu and gpu access, skip memset of buffers
+ * @malloc-bo-unmap-nomemset: malloc single buffer for all execs, bind and unbind a BO to same address before execs, skip memset of buffers
+ * @malloc-busy-nomemset: malloc single buffer for all execs, try to unbind while buffer valid, skip memset of buffers
+ * @mmap-nomemset: mmap single buffer for all execs, skip memset of buffers
+ * @mmap-huge-nomemset: mmap huge page single buffer for all execs, skip memset of buffers
+ * @mmap-shared-nomemset: mmap shared single buffer for all execs, skip memset of buffers
+ * @mmap-mlock-nomemset: mmap and mlock single buffer for all execs, skip memset of buffers
+ * @mmap-file-nomemset: mmap single buffer, with file backing, for all execs, skip memset of buffers
+ * @mmap-file-mlock-nomemset: mmap and mlock single buffer, with file backing, for all execs, skip memset of buffers
+ * @mmap-race-nomemset: mmap single buffer for all execs with race between cpu and gpu access, skip memset of buffers
+ * @free-nomemset: malloc and free buffer for each exec, skip memset of buffers
+ * @free-race-nomemset: malloc and free buffer for each exec with race between cpu and gpu access, skip memset of buffers
+ * @new-nomemset: malloc a new buffer for each exec, skip memset of buffers
+ * @new-race-nomemset: malloc a new buffer for each exec with race between cpu and gpu access, skip memset of buffers
+ * @new-bo-map-nomemset: malloc a new buffer or map BO for each exec, skip memset of buffers
+ * @new-busy-nomemset: malloc a new buffer for each exec, try to unbind while buffers valid, skip memset of buffers
+ * @mmap-free-nomemset: mmap and free buffer for each exec, skip memset of buffers
+ * @mmap-free-huge-nomemset: mmap huge page and free buffer for each exec, skip memset of buffers
+ * @mmap-free-race-nomemset: mmap and free buffer for each exec with race between cpu and gpu access, skip memset of buffers
+ * @mmap-new-nomemset: mmap a new buffer for each exec, skip memset of buffers
+ * @mmap-new-huge-nomemset: mmap huge page new buffer for each exec, skip memset of buffers
+ * @mmap-new-race-nomemset: mmap a new buffer for each exec with race between cpu and gpu access, skip memset of buffers
+ *
+ * SUBTEST: threads-shared-vm-shared-alloc-many-stride-malloc
+ * Description: Create multiple threads with a shared VM triggering faults on different hardware engines to same addresses
+ * Test category: stress test
+ *
+ * SUBTEST: threads-shared-vm-shared-alloc-many-stride-malloc-race
+ * Description: Create multiple threads with a shared VM triggering faults on different hardware engines to same addresses, racing between CPU and GPU access
+ * Test category: stress test
+ *
+ * SUBTEST: threads-shared-alloc-many-stride-malloc
+ * Description: Create multiple threads with a faults on different hardware engines to same addresses
+ * Test category: stress test
+ *
+ * SUBTEST: threads-shared-alloc-many-stride-malloc-sync
+ * Description: Create multiple threads with a faults on different hardware engines to same addresses, syncing on each exec
+ * Test category: stress test
+ *
+ * SUBTEST: threads-shared-alloc-many-stride-malloc-race
+ * Description: Create multiple threads with a faults on different hardware engines to same addresses, racing between CPU and GPU access
+ * Test category: stress test
+ */
+
+struct test_exec_data {
+ uint32_t batch[32];
+ uint64_t pad;
+ uint64_t vm_sync;
+ uint64_t exec_sync;
+ uint32_t data;
+ uint32_t expected_data;
+};
+
+static void
+test_exec(int fd, struct drm_xe_engine_class_instance *eci,
+ int n_exec_queues, int n_execs, size_t bo_size,
+ size_t stride, uint32_t vm, void *alloc, pthread_barrier_t *barrier,
+ unsigned int flags)
+{
+ uint64_t addr;
+ struct drm_xe_sync sync[1] = {
+ { .type = DRM_XE_SYNC_TYPE_USER_FENCE, .flags = DRM_XE_SYNC_FLAG_SIGNAL,
+ .timeline_value = USER_FENCE_VALUE },
+ };
+ struct drm_xe_exec exec = {
+ .num_batch_buffer = 1,
+ .num_syncs = 1,
+ .syncs = to_user_pointer(sync),
+ };
+ uint32_t exec_queues[MAX_N_EXEC_QUEUES];
+ struct test_exec_data *data, *next_data = NULL;
+ uint32_t bo_flags;
+ uint32_t bo = 0;
+ void **pending_free;
+ u64 *exec_ufence = NULL;
+ int i, j, b, file_fd = -1, prev_idx;
+ bool free_vm = false;
+ size_t aligned_size = bo_size ?: xe_get_default_alignment(fd);
+ size_t orig_size = bo_size;
+
+ if (flags & MULTI_FAULT) {
+ if (!bo_size)
+ return;
+
+ bo_size *= N_MULTI_FAULT;
+ }
+
+ if (flags & SHARED_ALLOC)
+ return;
+
+ if (flags & EVERY_OTHER_CHECK && odd(n_execs))
+ return;
+
+ if (flags & EVERY_OTHER_CHECK)
+ igt_assert(flags & MREMAP);
+
+ igt_assert(n_exec_queues <= MAX_N_EXEC_QUEUES);
+
+ if (flags & NEW && !(flags & FREE)) {
+ pending_free = malloc(sizeof(*pending_free) * n_execs);
+ igt_assert(pending_free);
+ memset(pending_free, 0, sizeof(*pending_free) * n_execs);
+ }
+
+ if (!vm) {
+ vm = xe_vm_create(fd, DRM_XE_VM_CREATE_FLAG_LR_MODE |
+ DRM_XE_VM_CREATE_FLAG_FAULT_MODE, 0);
+ free_vm = true;
+ }
+ if (!bo_size) {
+ if (!stride) {
+ bo_size = sizeof(*data) * n_execs;
+ bo_size = xe_bb_size(fd, bo_size);
+ } else {
+ bo_size = stride * n_execs * sizeof(*data);
+ bo_size = xe_bb_size(fd, bo_size);
+ }
+ }
+ if (flags & HUGE_PAGE) {
+ aligned_size = ALIGN(aligned_size, SZ_2M);
+ bo_size = ALIGN(bo_size, SZ_2M);
+ }
+
+ if (alloc) {
+ data = alloc;
+ } else {
+ data = aligned_alloc(aligned_size, bo_size);
+ igt_assert(data);
+ if (flags & MMAP) {
+ int mmap_flags = MAP_FIXED;
+
+ if (flags & MMAP_SHARED)
+ mmap_flags |= MAP_SHARED;
+ else
+ mmap_flags |= MAP_PRIVATE;
+
+ if (flags & HUGE_PAGE)
+ mmap_flags |= MAP_HUGETLB | MAP_HUGE_2MB;
+
+ if (flags & FILE_BACKED) {
+ char name[1024];
+
+ igt_assert(!(flags & NEW));
+
+ sprintf(name, "/tmp/xe_exec_system_allocator_dat%d\n",
+ getpid());
+ file_fd = open(name, O_RDWR | O_CREAT, 0x666);
+ posix_fallocate(file_fd, 0, bo_size);
+ } else {
+ mmap_flags |= MAP_ANONYMOUS;
+ }
+
+ data = mmap(data, bo_size, PROT_READ |
+ PROT_WRITE, mmap_flags, file_fd, 0);
+ igt_assert(data != MAP_FAILED);
+ }
+ if (!(flags & SKIP_MEMSET))
+ memset(data, 0, bo_size);
+ if (flags & LOCK) {
+ igt_assert(!(flags & NEW));
+ mlock(data, bo_size);
+ }
+ }
+
+ for (i = 0; i < n_exec_queues; i++)
+ exec_queues[i] = xe_exec_queue_create(fd, vm, eci, 0);
+
+ sync[0].addr = to_user_pointer(&data[0].vm_sync);
+ if (free_vm) {
+ bind_system_allocator(sync, 1);
+ xe_wait_ufence(fd, &data[0].vm_sync, USER_FENCE_VALUE, 0, FIVE_SEC);
+ }
+ data[0].vm_sync = 0;
+
+ addr = to_user_pointer(data);
+
+ if (flags & BO_UNMAP) {
+ bo_flags = DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM;
+ bo = xe_bo_create(fd, vm, bo_size,
+ vram_if_possible(fd, eci->gt_id), bo_flags);
+ xe_vm_bind_async(fd, vm, 0, bo, 0, addr, bo_size, 0, 0);
+
+ __xe_vm_bind_assert(fd, vm, 0,
+ 0, 0, addr, bo_size,
+ DRM_XE_VM_BIND_OP_MAP,
+ DRM_XE_VM_BIND_FLAG_SYSTEM_ALLOCATOR, sync,
+ 1, 0, 0);
+ xe_wait_ufence(fd, &data[0].vm_sync, USER_FENCE_VALUE, 0,
+ FIVE_SEC);
+ data[0].vm_sync = 0;
+ gem_close(fd, bo);
+ bo = 0;
+ }
+
+ if (!(flags & RACE)) {
+ exec_ufence = mmap(NULL, SZ_4K, PROT_READ |
+ PROT_WRITE, MAP_SHARED |
+ MAP_ANONYMOUS, -1, 0);
+ igt_assert(exec_ufence != MAP_FAILED);
+ memset(exec_ufence, 0, SZ_4K);
+ }
+
+ for (i = 0; i < n_execs; i++) {
+ int idx = !stride ? i : i * stride, next_idx = !stride
+ ? (i + 1) : (i + 1) * stride;
+ uint64_t batch_offset = (char *)&data[idx].batch - (char *)data;
+ uint64_t batch_addr = addr + batch_offset;
+ uint64_t sdi_offset = (char *)&data[idx].data - (char *)data;
+ uint64_t sdi_addr = addr + sdi_offset;
+ int e = i % n_exec_queues, err;
+ bool fault_inject = (FAULT & flags) && i == n_execs / 2;
+ bool fault_injected = (FAULT & flags) && i > n_execs;
+
+ if (barrier)
+ pthread_barrier_wait(barrier);
+
+ if (flags & MULTI_FAULT) {
+ b = 0;
+ for (j = 0; j < N_MULTI_FAULT - 1; ++j)
+ __write_dword(data[idx].batch,
+ sdi_addr + j * orig_size,
+ WRITE_VALUE(&data[idx], idx), &b);
+ write_dword(data[idx].batch, sdi_addr + j * orig_size,
+ WRITE_VALUE(&data[idx], idx), &b);
+ igt_assert(b <= ARRAY_SIZE(data[idx].batch));
+ } else if (!(flags & EVERY_OTHER_CHECK)) {
+ b = 0;
+ write_dword(data[idx].batch, sdi_addr,
+ WRITE_VALUE(&data[idx], idx), &b);
+ igt_assert(b <= ARRAY_SIZE(data[idx].batch));
+ } else if (flags & EVERY_OTHER_CHECK && !odd(i)) {
+ b = 0;
+ write_dword(data[idx].batch, sdi_addr,
+ WRITE_VALUE(&data[idx], idx), &b);
+ igt_assert(b <= ARRAY_SIZE(data[idx].batch));
+
+ next_data = aligned_alloc(aligned_size, bo_size);
+ igt_assert(next_data);
+
+ b = 0;
+ write_dword(data[next_idx].batch,
+ to_user_pointer(next_data) +
+ (char *)&data[next_idx].data - (char *)data,
+ WRITE_VALUE(&data[next_idx], next_idx), &b);
+ igt_assert(b <= ARRAY_SIZE(data[next_idx].batch));
+ }
+
+ if (!exec_ufence)
+ data[idx].exec_sync = 0;
+
+ sync[0].addr = exec_ufence ? to_user_pointer(exec_ufence) :
+ addr + (char *)&data[idx].exec_sync - (char *)data;
+
+ exec.exec_queue_id = exec_queues[e];
+ if (fault_inject)
+ exec.address = batch_addr * 2;
+ else
+ exec.address = batch_addr;
+
+ if (fault_injected) {
+ err = __xe_exec(fd, &exec);
+ igt_assert(err == -ENOENT);
+ } else {
+ xe_exec(fd, &exec);
+ }
+
+ if (barrier)
+ pthread_barrier_wait(barrier);
+
+ if (fault_inject || fault_injected) {
+ int64_t timeout = QUARTER_SEC;
+
+ err = __xe_wait_ufence(fd, exec_ufence ? exec_ufence :
+ &data[idx].exec_sync,
+ USER_FENCE_VALUE,
+ exec_queues[e], &timeout);
+ igt_assert(err == -ETIME || err == -EIO);
+ } else {
+ xe_wait_ufence(fd, exec_ufence ? exec_ufence :
+ &data[idx].exec_sync, USER_FENCE_VALUE,
+ exec_queues[e], FIVE_SEC);
+ if (flags & LOCK && !i)
+ munlock(data, bo_size);
+
+ if (flags & MREMAP) {
+ void *old = data;
+ int remap_flags = MREMAP_MAYMOVE | MREMAP_FIXED;
+
+ if (flags & DONTUNMAP)
+ remap_flags |= MREMAP_DONTUNMAP;
+
+ if (flags & READ_ONLY_REMAP)
+ igt_assert(!mprotect(old, bo_size,
+ PROT_READ));
+
+ if (!next_data)
+ data = aligned_alloc(aligned_size, bo_size);
+ else
+ data = next_data;
+ next_data = NULL;
+ igt_assert(data);
+
+ data = mremap(old, bo_size, bo_size,
+ remap_flags, data);
+ igt_assert(data != MAP_FAILED);
+
+ if (flags & READ_ONLY_REMAP)
+ igt_assert(!mprotect(data, bo_size,
+ PROT_READ |
+ PROT_WRITE));
+
+ addr = to_user_pointer(data);
+ if (flags & DONTUNMAP)
+ munmap(old, bo_size);
+ }
+
+ if (!(flags & EVERY_OTHER_CHECK) || odd(i)) {
+ if (flags & FORK_READ) {
+ igt_fork(child, 1)
+ igt_assert_eq(data[idx].data,
+ READ_VALUE(&data[idx], idx));
+ if (!(flags & FORK_READ_AFTER))
+ igt_assert_eq(data[idx].data,
+ READ_VALUE(&data[idx], idx));
+ igt_waitchildren();
+ if (flags & FORK_READ_AFTER)
+ igt_assert_eq(data[idx].data,
+ READ_VALUE(&data[idx], idx));
+ } else {
+ igt_assert_eq(data[idx].data,
+ READ_VALUE(&data[idx], idx));
+
+ if (flags & MULTI_FAULT) {
+ for (j = 1; j < N_MULTI_FAULT; ++j) {
+ struct test_exec_data *__data =
+ ((void *)data) + j * orig_size;
+
+ igt_assert_eq(__data[idx].data,
+ READ_VALUE(&data[idx], idx));
+ }
+ }
+ }
+ if (flags & EVERY_OTHER_CHECK)
+ igt_assert_eq(data[prev_idx].data,
+ READ_VALUE(&data[prev_idx], idx));
+ }
+ }
+
+ if (exec_ufence)
+ exec_ufence[0] = 0;
+
+ if (bo) {
+ __xe_vm_bind_assert(fd, vm, 0,
+ 0, 0, addr, bo_size,
+ DRM_XE_VM_BIND_OP_MAP,
+ DRM_XE_VM_BIND_FLAG_SYSTEM_ALLOCATOR,
+ NULL, 0, 0, 0);
+ munmap(data, bo_size);
+ gem_close(fd, bo);
+ }
+
+ if (flags & NEW) {
+ if (flags & MMAP) {
+ if (flags & FREE)
+ munmap(data, bo_size);
+ else
+ pending_free[i] = data;
+ data = mmap(NULL, bo_size, PROT_READ |
+ PROT_WRITE, MAP_SHARED |
+ MAP_ANONYMOUS, -1, 0);
+ igt_assert(data != MAP_FAILED);
+ } else if (flags & BO_MAP && (i % 2)) {
+ if (!bo) {
+ if (flags & FREE)
+ free(data);
+ else
+ pending_free[i] = data;
+ }
+ data = aligned_alloc(aligned_size, bo_size);
+ bo_flags = DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM;
+ bo = xe_bo_create(fd, vm, bo_size,
+ vram_if_possible(fd, eci->gt_id),
+ bo_flags);
+ data = xe_bo_map_fixed(fd, bo, bo_size,
+ to_user_pointer(data));
+
+ xe_vm_bind_async(fd, vm, 0, bo, 0,
+ to_user_pointer(data),
+ bo_size, 0, 0);
+ } else {
+ if (!bo) {
+ if (flags & FREE)
+ free(data);
+ else
+ pending_free[i] = data;
+ }
+ bo = 0;
+ data = aligned_alloc(aligned_size, bo_size);
+ igt_assert(data);
+ }
+ addr = to_user_pointer(data);
+ if (!(flags & SKIP_MEMSET))
+ memset(data, 0, bo_size);
+ }
+
+ prev_idx = idx;
+ }
+
+ if (bo) {
+ __xe_vm_bind_assert(fd, vm, 0,
+ 0, 0, addr, bo_size,
+ DRM_XE_VM_BIND_OP_MAP,
+ DRM_XE_VM_BIND_FLAG_SYSTEM_ALLOCATOR,
+ NULL, 0, 0, 0);
+ munmap(data, bo_size);
+ gem_close(fd, bo);
+ }
+
+ if (flags & BUSY)
+ igt_assert_eq(unbind_system_allocator(), -EBUSY);
+
+ for (i = 0; i < n_exec_queues; i++)
+ xe_exec_queue_destroy(fd, exec_queues[i]);
+
+ if (exec_ufence)
+ munmap(exec_ufence, SZ_4K);
+
+ if (flags & LOCK)
+ munlock(data, bo_size);
+
+ if (file_fd != -1)
+ close(file_fd);
+
+ if (flags & NEW && !(flags & FREE)) {
+ for (i = 0; i < n_execs; i++) {
+ if (!pending_free[i])
+ continue;
+
+ if (flags & MMAP)
+ munmap(pending_free[i], bo_size);
+ else
+ free(pending_free[i]);
+ }
+ free(pending_free);
+ } else {
+ if (flags & MMAP)
+ munmap(data, bo_size);
+ else if (!alloc)
+ free(data);
+ }
+ if (free_vm)
+ xe_vm_destroy(fd, vm);
+}
+
+struct thread_data {
+ pthread_t thread;
+ pthread_mutex_t *mutex;
+ pthread_cond_t *cond;
+ pthread_barrier_t *barrier;
+ int fd;
+ struct drm_xe_engine_class_instance *eci;
+ int n_exec_queues;
+ int n_execs;
+ size_t bo_size;
+ size_t stride;
+ uint32_t vm;
+ unsigned int flags;
+ void *alloc;
+ bool *go;
+};
+
+static void *thread(void *data)
+{
+ struct thread_data *t = data;
+
+ pthread_mutex_lock(t->mutex);
+ while (!*t->go)
+ pthread_cond_wait(t->cond, t->mutex);
+ pthread_mutex_unlock(t->mutex);
+
+ test_exec(t->fd, t->eci, t->n_exec_queues, t->n_execs,
+ t->bo_size, t->stride, t->vm, t->alloc, t->barrier,
+ t->flags);
+
+ return NULL;
+}
+
+static void
+threads(int fd, int n_exec_queues, int n_execs, size_t bo_size,
+ size_t stride, unsigned int flags, bool shared_vm)
+{
+ struct drm_xe_engine_class_instance *hwe;
+ struct thread_data *threads_data;
+ int n_engines = 0, i = 0;
+ pthread_mutex_t mutex;
+ pthread_cond_t cond;
+ pthread_barrier_t barrier;
+ uint32_t vm = 0;
+ bool go = false;
+ void *alloc = NULL;
+
+ if ((FILE_BACKED | FORK_READ) & flags)
+ return;
+
+ xe_for_each_engine(fd, hwe)
+ ++n_engines;
+
+ if (shared_vm) {
+ vm = xe_vm_create(fd, DRM_XE_VM_CREATE_FLAG_LR_MODE |
+ DRM_XE_VM_CREATE_FLAG_FAULT_MODE, 0);
+ bind_system_allocator(NULL, 0);
+ }
+
+ if (flags & SHARED_ALLOC) {
+ uint64_t alloc_size;
+
+ igt_assert(stride);
+
+ alloc_size = sizeof(struct test_exec_data) * stride *
+ n_execs * n_engines;
+ alloc_size = xe_bb_size(fd, alloc_size);
+ alloc = aligned_alloc(SZ_2M, alloc_size);
+ igt_assert(alloc);
+
+ memset(alloc, 0, alloc_size);
+ flags &= ~SHARED_ALLOC;
+ }
+
+ threads_data = calloc(n_engines, sizeof(*threads_data));
+ igt_assert(threads_data);
+
+ pthread_mutex_init(&mutex, 0);
+ pthread_cond_init(&cond, 0);
+ pthread_barrier_init(&barrier, 0, n_engines);
+
+ xe_for_each_engine(fd, hwe) {
+ threads_data[i].mutex = &mutex;
+ threads_data[i].cond = &cond;
+ threads_data[i].barrier = (flags & SYNC_EXEC) ? &barrier : NULL;
+ threads_data[i].fd = fd;
+ threads_data[i].eci = hwe;
+ threads_data[i].n_exec_queues = n_exec_queues;
+ threads_data[i].n_execs = n_execs;
+ threads_data[i].bo_size = bo_size;
+ threads_data[i].stride = stride;
+ threads_data[i].vm = vm;
+ threads_data[i].flags = flags;
+ threads_data[i].alloc = alloc ? alloc + i *
+ sizeof(struct test_exec_data) : NULL;
+ threads_data[i].go = &go;
+ pthread_create(&threads_data[i].thread, 0, thread,
+ &threads_data[i]);
+ ++i;
+ }
+
+ pthread_mutex_lock(&mutex);
+ go = true;
+ pthread_cond_broadcast(&cond);
+ pthread_mutex_unlock(&mutex);
+
+ for (i = 0; i < n_engines; ++i)
+ pthread_join(threads_data[i].thread, NULL);
+
+ if (shared_vm) {
+ int ret;
+
+ if (flags & MMAP) {
+ int tries = 300;
+
+ while (tries && (ret = unbind_system_allocator()) == -EBUSY) {
+ sleep(.01);
+ --tries;
+ }
+ igt_assert_eq(ret, 0);
+ }
+ xe_vm_destroy(fd, vm);
+ if (alloc)
+ free(alloc);
+ }
+ free(threads_data);
+}
+
+static void process(struct drm_xe_engine_class_instance *hwe, int n_exec_queues,
+ int n_execs, size_t bo_size, size_t stride,
+ unsigned int flags)
+{
+ struct process_data *pdata;
+ int map_fd;
+ int fd;
+
+ map_fd = open(SYNC_FILE, O_RDWR, 0x666);
+ pdata = mmap(NULL, sizeof(*pdata), PROT_READ |
+ PROT_WRITE, MAP_SHARED, map_fd, 0);
+ wait_pdata(pdata);
+
+ fd = drm_open_driver(DRIVER_XE);
+ test_exec(fd, hwe, n_exec_queues, n_execs,
+ bo_size, stride, 0, NULL, NULL, flags);
+ drm_close_driver(fd);
+
+ close(map_fd);
+ munmap(pdata, sizeof(*pdata));
+}
+
+static void
+processes(int fd, int n_exec_queues, int n_execs, size_t bo_size,
+ size_t stride, unsigned int flags)
+{
+ struct drm_xe_engine_class_instance *hwe;
+ struct process_data *pdata;
+ int map_fd;
+
+ if (flags & FORK_READ)
+ return;
+
+ map_fd = open(SYNC_FILE, O_RDWR | O_CREAT, 0x666);
+ posix_fallocate(map_fd, 0, sizeof(*pdata));
+ pdata = mmap(NULL, sizeof(*pdata), PROT_READ |
+ PROT_WRITE, MAP_SHARED, map_fd, 0);
+
+ init_pdata(pdata, 0);
+
+ xe_for_each_engine(fd, hwe) {
+ igt_fork(child, 1)
+ process(hwe, n_exec_queues, n_execs, bo_size,
+ stride, flags);
+ }
+
+ signal_pdata(pdata);
+ igt_waitchildren();
+
+ close(map_fd);
+ munmap(pdata, sizeof(*pdata));
+}
+
+struct section {
+ const char *name;
+ unsigned int flags;
+};
+
+igt_main
+{
+ struct drm_xe_engine_class_instance *hwe;
+ const struct section sections[] = {
+ { "malloc", 0 },
+ { "malloc-multi-fault", MULTI_FAULT },
+ { "malloc-fork-read", FORK_READ },
+ { "malloc-fork-read-after", FORK_READ | FORK_READ_AFTER },
+ { "malloc-mlock", LOCK },
+ { "malloc-race", RACE },
+ { "malloc-busy", BUSY },
+ { "malloc-bo-unmap", BO_UNMAP },
+ { "mmap", MMAP },
+ { "mmap-remap", MMAP | MREMAP },
+ { "mmap-remap-dontunmap", MMAP | MREMAP | DONTUNMAP },
+ { "mmap-remap-ro", MMAP | MREMAP | READ_ONLY_REMAP },
+ { "mmap-remap-ro-dontunmap", MMAP | MREMAP | DONTUNMAP |
+ READ_ONLY_REMAP },
+ { "mmap-remap-eocheck", MMAP | MREMAP | EVERY_OTHER_CHECK },
+ { "mmap-remap-dontunmap-eocheck", MMAP | MREMAP | DONTUNMAP |
+ EVERY_OTHER_CHECK },
+ { "mmap-remap-ro-eocheck", MMAP | MREMAP | READ_ONLY_REMAP |
+ EVERY_OTHER_CHECK },
+ { "mmap-remap-ro-dontunmap-eocheck", MMAP | MREMAP | DONTUNMAP |
+ READ_ONLY_REMAP | EVERY_OTHER_CHECK },
+ { "mmap-huge", MMAP | HUGE_PAGE },
+ { "mmap-shared", MMAP | LOCK | MMAP_SHARED },
+ { "mmap-shared-remap", MMAP | LOCK | MMAP_SHARED | MREMAP },
+ { "mmap-shared-remap-dontunmap", MMAP | LOCK | MMAP_SHARED |
+ MREMAP | DONTUNMAP },
+ { "mmap-shared-remap-eocheck", MMAP | LOCK | MMAP_SHARED |
+ MREMAP | EVERY_OTHER_CHECK },
+ { "mmap-shared-remap-dontunmap-eocheck", MMAP | LOCK |
+ MMAP_SHARED | MREMAP | DONTUNMAP | EVERY_OTHER_CHECK },
+ { "mmap-mlock", MMAP | LOCK },
+ { "mmap-file", MMAP | FILE_BACKED },
+ { "mmap-file-mlock", MMAP | LOCK | FILE_BACKED },
+ { "mmap-race", MMAP | RACE },
+ { "free", NEW | FREE },
+ { "free-race", NEW | FREE | RACE },
+ { "new", NEW },
+ { "new-race", NEW | RACE },
+ { "new-bo-map", NEW | BO_MAP },
+ { "new-busy", NEW | BUSY },
+ { "mmap-free", MMAP | NEW | FREE },
+ { "mmap-free-huge", MMAP | NEW | FREE | HUGE_PAGE },
+ { "mmap-free-race", MMAP | NEW | FREE | RACE },
+ { "mmap-new", MMAP | NEW },
+ { "mmap-new-huge", MMAP | NEW | HUGE_PAGE },
+ { "mmap-new-race", MMAP | NEW | RACE },
+ { "malloc-nomemset", SKIP_MEMSET },
+ { "malloc-mlock-nomemset", SKIP_MEMSET | LOCK },
+ { "malloc-race-nomemset", SKIP_MEMSET | RACE },
+ { "malloc-busy-nomemset", SKIP_MEMSET | BUSY },
+ { "malloc-bo-unmap-nomemset", SKIP_MEMSET | BO_UNMAP },
+ { "mmap-nomemset", SKIP_MEMSET | MMAP },
+ { "mmap-huge-nomemset", SKIP_MEMSET | MMAP | HUGE_PAGE },
+ { "mmap-shared-nomemset", SKIP_MEMSET | MMAP | MMAP_SHARED },
+ { "mmap-mlock-nomemset", SKIP_MEMSET | MMAP | LOCK },
+ { "mmap-file-nomemset", SKIP_MEMSET | MMAP | FILE_BACKED },
+ { "mmap-file-mlock-nomemset", SKIP_MEMSET | MMAP | LOCK | FILE_BACKED },
+ { "mmap-race-nomemset", SKIP_MEMSET | MMAP | RACE },
+ { "free-nomemset", SKIP_MEMSET | NEW | FREE },
+ { "free-race-nomemset", SKIP_MEMSET | NEW | FREE | RACE },
+ { "new-nomemset", SKIP_MEMSET | NEW },
+ { "new-race-nomemset", SKIP_MEMSET | NEW | RACE },
+ { "new-bo-map-nomemset", SKIP_MEMSET | NEW | BO_MAP },
+ { "new-busy-nomemset", SKIP_MEMSET | NEW | BUSY },
+ { "mmap-free-nomemset", SKIP_MEMSET | MMAP | NEW | FREE },
+ { "mmap-free-huge-nomemset", SKIP_MEMSET | MMAP | NEW | FREE | HUGE_PAGE },
+ { "mmap-free-race-nomemset", SKIP_MEMSET | MMAP | NEW | FREE | RACE },
+ { "mmap-new-nomemset", SKIP_MEMSET | MMAP | NEW },
+ { "mmap-new-huge-nomemset", SKIP_MEMSET | MMAP | NEW | HUGE_PAGE },
+ { "mmap-new-race-nomemset", SKIP_MEMSET | MMAP | NEW | RACE },
+ { NULL },
+ };
+ const struct section psections[] = {
+ { "munmap-cpu-fault", CPU_FAULT },
+ { "munmap-no-cpu-fault", 0 },
+ { "remap-cpu-fault", CPU_FAULT | REMAP },
+ { "remap-no-cpu-fault", REMAP },
+ { "middle-munmap-cpu-fault", MIDDLE | CPU_FAULT },
+ { "middle-munmap-no-cpu-fault", MIDDLE },
+ { "middle-remap-cpu-fault", MIDDLE | CPU_FAULT | REMAP },
+ { "middle-remap-no-cpu-fault", MIDDLE | REMAP },
+ { NULL },
+ };
+ const struct section esections[] = {
+ { "malloc", 0 },
+ { "malloc-mix-bo", MIX_BO_ALLOC },
+ { NULL },
+ };
+ int fd;
+
+ igt_fixture {
+ struct xe_device *xe;
+
+ fd = drm_open_driver(DRIVER_XE);
+ igt_require(!xe_supports_faults(fd));
+
+ xe = xe_device_get(fd);
+ va_bits = xe->va_bits;
+ }
+
+ for (const struct section *s = sections; s->name; s++) {
+ igt_subtest_f("once-%s", s->name)
+ xe_for_each_engine(fd, hwe)
+ test_exec(fd, hwe, 1, 1, 0, 0, 0, NULL,
+ NULL, s->flags);
+
+ igt_subtest_f("once-large-%s", s->name)
+ xe_for_each_engine(fd, hwe)
+ test_exec(fd, hwe, 1, 1, SZ_2M, 0, 0, NULL,
+ NULL, s->flags);
+
+ igt_subtest_f("twice-%s", s->name)
+ xe_for_each_engine(fd, hwe)
+ test_exec(fd, hwe, 1, 2, 0, 0, 0, NULL,
+ NULL, s->flags);
+
+ igt_subtest_f("twice-large-%s", s->name)
+ xe_for_each_engine(fd, hwe)
+ test_exec(fd, hwe, 1, 2, SZ_2M, 0, 0, NULL,
+ NULL, s->flags);
+
+ igt_subtest_f("many-%s", s->name)
+ xe_for_each_engine(fd, hwe)
+ test_exec(fd, hwe, 1, 128, 0, 0, 0, NULL,
+ NULL, s->flags);
+
+ igt_subtest_f("many-stride-%s", s->name)
+ xe_for_each_engine(fd, hwe)
+ test_exec(fd, hwe, 1, 128, 0, 256, 0, NULL,
+ NULL, s->flags);
+
+ igt_subtest_f("many-execqueues-%s", s->name)
+ xe_for_each_engine(fd, hwe)
+ test_exec(fd, hwe, 16, 128, 0, 0, 0, NULL,
+ NULL, s->flags);
+
+ igt_subtest_f("many-large-%s", s->name)
+ xe_for_each_engine(fd, hwe)
+ test_exec(fd, hwe, 1, 128, SZ_2M, 0, 0, NULL,
+ NULL, s->flags);
+
+ igt_subtest_f("many-large-execqueues-%s", s->name)
+ xe_for_each_engine(fd, hwe)
+ test_exec(fd, hwe, 16, 128, SZ_2M, 0, 0, NULL,
+ NULL, s->flags);
+
+ igt_subtest_f("threads-many-%s", s->name)
+ threads(fd, 1, 128, 0, 0, s->flags, false);
+
+ igt_subtest_f("threads-many-stride-%s", s->name)
+ threads(fd, 1, 128, 0, 256, s->flags, false);
+
+ igt_subtest_f("threads-many-execqueues-%s", s->name)
+ threads(fd, 16, 128, 0, 0, s->flags, false);
+
+ igt_subtest_f("threads-many-large-%s", s->name)
+ threads(fd, 1, 128, SZ_2M, 0, s->flags, false);
+
+ igt_subtest_f("threads-many-large-execqueues-%s", s->name)
+ threads(fd, 16, 128, SZ_2M, 0, s->flags, false);
+
+ igt_subtest_f("threads-shared-vm-many-%s", s->name)
+ threads(fd, 1, 128, 0, 0, s->flags, true);
+
+ igt_subtest_f("threads-shared-vm-many-stride-%s", s->name)
+ threads(fd, 1, 128, 0, 256, s->flags, true);
+
+ igt_subtest_f("threads-shared-vm-many-execqueues-%s", s->name)
+ threads(fd, 16, 128, 0, 0, s->flags, true);
+
+ igt_subtest_f("threads-shared-vm-many-large-%s", s->name)
+ threads(fd, 1, 128, SZ_2M, 0, s->flags, true);
+
+ igt_subtest_f("threads-shared-vm-many-large-execqueues-%s", s->name)
+ threads(fd, 16, 128, SZ_2M, 0, s->flags, true);
+
+ igt_subtest_f("process-many-%s", s->name)
+ processes(fd, 1, 128, 0, 0, s->flags);
+
+ igt_subtest_f("process-many-stride-%s", s->name)
+ processes(fd, 1, 128, 0, 256, s->flags);
+
+ igt_subtest_f("process-many-execqueues-%s", s->name)
+ processes(fd, 16, 128, 0, 0, s->flags);
+
+ igt_subtest_f("process-many-large-%s", s->name)
+ processes(fd, 1, 128, SZ_2M, 0, s->flags);
+
+ igt_subtest_f("process-many-large-execqueues-%s", s->name)
+ processes(fd, 16, 128, SZ_2M, 0, s->flags);
+ }
+
+ igt_subtest("threads-shared-vm-shared-alloc-many-stride-malloc")
+ threads(fd, 1, 128, 0, 256, SHARED_ALLOC, true);
+
+ igt_subtest("threads-shared-vm-shared-alloc-many-stride-malloc-race")
+ threads(fd, 1, 128, 0, 256, RACE | SHARED_ALLOC, true);
+
+ igt_subtest("threads-shared-alloc-many-stride-malloc")
+ threads(fd, 1, 128, 0, 256, SHARED_ALLOC, false);
+
+ igt_subtest("threads-shared-alloc-many-stride-malloc-sync")
+ threads(fd, 1, 128, 0, 256, SHARED_ALLOC | SYNC_EXEC, false);
+
+ igt_subtest("threads-shared-alloc-many-stride-malloc-race")
+ threads(fd, 1, 128, 0, 256, RACE | SHARED_ALLOC, false);
+
+ igt_subtest_f("fault")
+ xe_for_each_engine(fd, hwe)
+ test_exec(fd, hwe, 4, 1, SZ_2M, 0, 0, NULL, NULL,
+ FAULT);
+
+ for (const struct section *s = psections; s->name; s++) {
+ igt_subtest_f("partial-%s", s->name)
+ xe_for_each_engine(fd, hwe)
+ partial(fd, hwe, s->flags);
+ }
+
+ igt_subtest_f("unaligned-alloc")
+ xe_for_each_engine(fd, hwe) {
+ many_allocs(fd, hwe, (SZ_1M + SZ_512K) * 8,
+ SZ_1M + SZ_512K, SZ_4K, NULL, 0);
+ break;
+ }
+
+ igt_subtest_f("fault-benchmark")
+ xe_for_each_engine(fd, hwe)
+ many_allocs(fd, hwe, SZ_64M, SZ_64M, SZ_4K, NULL,
+ BENCHMARK);
+
+ igt_subtest_f("fault-threads-benchmark")
+ xe_for_each_engine(fd, hwe)
+ many_allocs(fd, hwe, SZ_64M, SZ_64M, SZ_4K, NULL,
+ BENCHMARK | CPU_FAULT_THREADS);
+
+ igt_subtest_f("fault-threads-same-page-benchmark")
+ xe_for_each_engine(fd, hwe)
+ many_allocs(fd, hwe, SZ_64M, SZ_64M, SZ_4K, NULL,
+ BENCHMARK | CPU_FAULT_THREADS |
+ CPU_FAULT_SAME_PAGE);
+
+ igt_subtest_f("fault-process-benchmark")
+ xe_for_each_engine(fd, hwe)
+ many_allocs(fd, hwe, SZ_64M, SZ_64M, SZ_4K, NULL,
+ BENCHMARK | CPU_FAULT_PROCESS);
+
+ igt_subtest_f("fault-process-same-page-benchmark")
+ xe_for_each_engine(fd, hwe)
+ many_allocs(fd, hwe, SZ_64M, SZ_64M, SZ_4K, NULL,
+ BENCHMARK | CPU_FAULT_PROCESS |
+ CPU_FAULT_SAME_PAGE);
+
+ for (const struct section *s = esections; s->name; s++) {
+ igt_subtest_f("evict-%s", s->name)
+ xe_for_each_engine(fd, hwe) {
+ many_allocs(fd, hwe,
+ xe_visible_vram_size(fd, hwe->gt_id),
+ SZ_8M, SZ_1M, NULL, s->flags);
+ break;
+ }
+ }
+
+ for (const struct section *s = esections; s->name; s++) {
+ igt_subtest_f("processes-evict-%s", s->name)
+ processes_evict(fd, SZ_8M, SZ_1M, s->flags);
+ }
+
+ igt_fixture {
+ xe_device_put(fd);
+ drm_close_driver(fd);
+ }
+}
diff --git a/tests/meson.build b/tests/meson.build
index 34b87b125b..03eef24bf6 100644
--- a/tests/meson.build
+++ b/tests/meson.build
@@ -292,6 +292,7 @@ intel_xe_progs = [
'xe_exec_reset',
'xe_exec_sip',
'xe_exec_store',
+ 'xe_exec_system_allocator',
'xe_exec_threads',
'xe_exercise_blt',
'xe_fault_injection',
--
2.34.1
^ permalink raw reply related [flat|nested] 16+ messages in thread
* [PATCH] tests/xe: Add system_allocator test
@ 2025-04-16 2:20 Matthew Brost
2025-04-16 17:09 ` Thomas Hellström
2025-04-18 15:47 ` Francois Dugast
0 siblings, 2 replies; 16+ messages in thread
From: Matthew Brost @ 2025-04-16 2:20 UTC (permalink / raw)
To: igt-dev
Test various uses of system allocator in single thread, multiple
threads, and multiple processes.
Features tested:
- Malloc with various size
- Mmap with various sizes and flags including file backed mappings
- Mixing BO allocations with system allocator
- Various page sizes
- Dynamically freeing / unmapping memory
- Sharing VM across threads
- Faults racing on different hardware engines / GTs / Tiles
- GPU faults and CPU faults racing
- CPU faults on multiple threads racing
- CPU faults on multiple process racing
- GPU faults of memory not faulted in by CPU
- Partial unmap of allocations
- Attempting to unmap system allocations when GPU has mappings
- Eviction of both system allocations and BOs
- Forking child processes and reading data from VRAM
- mremap data in VRAM
- Protection changes
- Multiple faults per execbuf
Running on LNL, BMG, PVC 1 tile, and PVC 2 tile.
v2:
- Rebase
- Fix memory allocation to not interfear with malloc (Thomas)
Signed-off-by: Matthew Brost <matthew.brost@intel.com>
---
include/drm-uapi/xe_drm.h | 57 +-
lib/xe/xe_ioctl.c | 12 +
lib/xe/xe_ioctl.h | 1 +
tests/intel/xe_exec_system_allocator.c | 1832 ++++++++++++++++++++++++
tests/meson.build | 1 +
5 files changed, 1896 insertions(+), 7 deletions(-)
create mode 100644 tests/intel/xe_exec_system_allocator.c
diff --git a/include/drm-uapi/xe_drm.h b/include/drm-uapi/xe_drm.h
index 154f947ef0..9c08738c3b 100644
--- a/include/drm-uapi/xe_drm.h
+++ b/include/drm-uapi/xe_drm.h
@@ -3,8 +3,8 @@
* Copyright © 2023 Intel Corporation
*/
-#ifndef _XE_DRM_H_
-#define _XE_DRM_H_
+#ifndef _UAPI_XE_DRM_H_
+#define _UAPI_XE_DRM_H_
#include "drm.h"
@@ -134,7 +134,7 @@ extern "C" {
* redefine the interface more easily than an ever growing struct of
* increasing complexity, and for large parts of that interface to be
* entirely optional. The downside is more pointer chasing; chasing across
- * the boundary with pointers encapsulated inside u64.
+ * the __user boundary with pointers encapsulated inside u64.
*
* Example chaining:
*
@@ -393,6 +393,10 @@ struct drm_xe_query_mem_regions {
*
* - %DRM_XE_QUERY_CONFIG_FLAG_HAS_VRAM - Flag is set if the device
* has usable VRAM
+ * - %DRM_XE_QUERY_CONFIG_FLAG_HAS_LOW_LATENCY - Flag is set if the device
+ * has low latency hint support
+ * - %DRM_XE_QUERY_CONFIG_FLAG_HAS_CPU_ADDR_MIRROR - Flag is set if the
+ * device has CPU address mirroring support
* - %DRM_XE_QUERY_CONFIG_MIN_ALIGNMENT - Minimal memory alignment
* required by this device, typically SZ_4K or SZ_64K
* - %DRM_XE_QUERY_CONFIG_VA_BITS - Maximum bits of a virtual address
@@ -409,6 +413,8 @@ struct drm_xe_query_config {
#define DRM_XE_QUERY_CONFIG_REV_AND_DEVICE_ID 0
#define DRM_XE_QUERY_CONFIG_FLAGS 1
#define DRM_XE_QUERY_CONFIG_FLAG_HAS_VRAM (1 << 0)
+ #define DRM_XE_QUERY_CONFIG_FLAG_HAS_LOW_LATENCY (1 << 1)
+ #define DRM_XE_QUERY_CONFIG_FLAG_HAS_CPU_ADDR_MIRROR (1 << 2)
#define DRM_XE_QUERY_CONFIG_MIN_ALIGNMENT 2
#define DRM_XE_QUERY_CONFIG_VA_BITS 3
#define DRM_XE_QUERY_CONFIG_MAX_EXEC_QUEUE_PRIORITY 4
@@ -911,7 +917,11 @@ struct drm_xe_gem_mmap_offset {
* struct drm_xe_vm_create - Input of &DRM_IOCTL_XE_VM_CREATE
*
* The @flags can be:
- * - %DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE
+ * - %DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE - Map the whole virtual address
+ * space of the VM to scratch page. A vm_bind would overwrite the scratch
+ * page mapping. This flag is mutually exclusive with the
+ * %DRM_XE_VM_CREATE_FLAG_FAULT_MODE flag, with an exception of on x2 and
+ * xe3 platform.
* - %DRM_XE_VM_CREATE_FLAG_LR_MODE - An LR, or Long Running VM accepts
* exec submissions to its exec_queues that don't have an upper time
* limit on the job execution time. But exec submissions to these
@@ -987,6 +997,12 @@ struct drm_xe_vm_destroy {
* - %DRM_XE_VM_BIND_FLAG_CHECK_PXP - If the object is encrypted via PXP,
* reject the binding if the encryption key is no longer valid. This
* flag has no effect on BOs that are not marked as using PXP.
+ * - %DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR - When the CPU address mirror flag is
+ * set, no mappings are created rather the range is reserved for CPU address
+ * mirroring which will be populated on GPU page faults or prefetches. Only
+ * valid on VMs with DRM_XE_VM_CREATE_FLAG_FAULT_MODE set. The CPU address
+ * mirror flag are only valid for DRM_XE_VM_BIND_OP_MAP operations, the BO
+ * handle MBZ, and the BO offset MBZ.
*/
struct drm_xe_vm_bind_op {
/** @extensions: Pointer to the first extension struct, if any */
@@ -1039,7 +1055,9 @@ struct drm_xe_vm_bind_op {
* on the @pat_index. For such mappings there is no actual memory being
* mapped (the address in the PTE is invalid), so the various PAT memory
* attributes likely do not apply. Simply leaving as zero is one
- * option (still a valid pat_index).
+ * option (still a valid pat_index). Same applies to
+ * DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR bindings as for such mapping
+ * there is no actual memory being mapped.
*/
__u16 pat_index;
@@ -1055,6 +1073,14 @@ struct drm_xe_vm_bind_op {
/** @userptr: user pointer to bind on */
__u64 userptr;
+
+ /**
+ * @cpu_addr_mirror_offset: Offset from GPU @addr to create
+ * CPU address mirror mappings. MBZ with current level of
+ * support (e.g. 1 to 1 mapping between GPU and CPU mappings
+ * only supported).
+ */
+ __s64 cpu_addr_mirror_offset;
};
/**
@@ -1078,6 +1104,7 @@ struct drm_xe_vm_bind_op {
#define DRM_XE_VM_BIND_FLAG_NULL (1 << 2)
#define DRM_XE_VM_BIND_FLAG_DUMPABLE (1 << 3)
#define DRM_XE_VM_BIND_FLAG_CHECK_PXP (1 << 4)
+#define DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR (1 << 5)
/** @flags: Bind flags */
__u32 flags;
@@ -1205,6 +1232,21 @@ struct drm_xe_vm_bind {
* };
* ioctl(fd, DRM_IOCTL_XE_EXEC_QUEUE_CREATE, &exec_queue_create);
*
+ * Allow users to provide a hint to kernel for cases demanding low latency
+ * profile. Please note it will have impact on power consumption. User can
+ * indicate low latency hint with flag while creating exec queue as
+ * mentioned below,
+ *
+ * struct drm_xe_exec_queue_create exec_queue_create = {
+ * .flags = DRM_XE_EXEC_QUEUE_LOW_LATENCY_HINT,
+ * .extensions = 0,
+ * .vm_id = vm,
+ * .num_bb_per_exec = 1,
+ * .num_eng_per_bb = 1,
+ * .instances = to_user_pointer(&instance),
+ * };
+ * ioctl(fd, DRM_IOCTL_XE_EXEC_QUEUE_CREATE, &exec_queue_create);
+ *
*/
struct drm_xe_exec_queue_create {
#define DRM_XE_EXEC_QUEUE_EXTENSION_SET_PROPERTY 0
@@ -1223,7 +1265,8 @@ struct drm_xe_exec_queue_create {
/** @vm_id: VM to use for this exec queue */
__u32 vm_id;
- /** @flags: MBZ */
+#define DRM_XE_EXEC_QUEUE_LOW_LATENCY_HINT (1 << 0)
+ /** @flags: flags to use for this exec queue */
__u32 flags;
/** @exec_queue_id: Returned exec queue ID */
@@ -1926,4 +1969,4 @@ struct drm_xe_query_eu_stall {
}
#endif
-#endif /* _XE_DRM_H_ */
+#endif /* _UAPI_XE_DRM_H_ */
diff --git a/lib/xe/xe_ioctl.c b/lib/xe/xe_ioctl.c
index fb8c4aef13..785fc9184c 100644
--- a/lib/xe/xe_ioctl.c
+++ b/lib/xe/xe_ioctl.c
@@ -440,6 +440,18 @@ void *xe_bo_map(int fd, uint32_t bo, size_t size)
return __xe_bo_map(fd, bo, size, PROT_WRITE);
}
+void *xe_bo_map_fixed(int fd, uint32_t bo, size_t size, uint64_t addr)
+{
+ uint64_t mmo;
+ void *map;
+
+ mmo = xe_bo_mmap_offset(fd, bo);
+ map = mmap((void *)addr, size, PROT_WRITE, MAP_SHARED | MAP_FIXED, fd, mmo);
+ igt_assert(map != MAP_FAILED);
+
+ return map;
+}
+
void *xe_bo_mmap_ext(int fd, uint32_t bo, size_t size, int prot)
{
return __xe_bo_map(fd, bo, size, prot);
diff --git a/lib/xe/xe_ioctl.h b/lib/xe/xe_ioctl.h
index 9bdf73b2bd..554a33c9cd 100644
--- a/lib/xe/xe_ioctl.h
+++ b/lib/xe/xe_ioctl.h
@@ -86,6 +86,7 @@ uint32_t xe_exec_queue_create_class(int fd, uint32_t vm, uint16_t class);
void xe_exec_queue_destroy(int fd, uint32_t exec_queue);
uint64_t xe_bo_mmap_offset(int fd, uint32_t bo);
void *xe_bo_map(int fd, uint32_t bo, size_t size);
+void *xe_bo_map_fixed(int fd, uint32_t bo, size_t size, long unsigned int addr);
void *xe_bo_mmap_ext(int fd, uint32_t bo, size_t size, int prot);
int __xe_exec(int fd, struct drm_xe_exec *exec);
void xe_exec(int fd, struct drm_xe_exec *exec);
diff --git a/tests/intel/xe_exec_system_allocator.c b/tests/intel/xe_exec_system_allocator.c
new file mode 100644
index 0000000000..14fa59353e
--- /dev/null
+++ b/tests/intel/xe_exec_system_allocator.c
@@ -0,0 +1,1832 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2024 Intel Corporation
+ */
+
+/**
+ * TEST: Basic tests for execbuf functionality using system allocator
+ * Category: Hardware building block
+ * Mega feature: Shared virtual memory
+ * Sub-category: execbuf
+ * Functionality: fault mode, system allocator
+ * GPU requirements: GPU needs support for DRM_XE_VM_CREATE_FLAG_FAULT_MODE
+ */
+
+#include <fcntl.h>
+#include <linux/mman.h>
+#include <time.h>
+
+#include "igt.h"
+#include "lib/igt_syncobj.h"
+#include "lib/intel_reg.h"
+#include "xe_drm.h"
+
+#include "xe/xe_ioctl.h"
+#include "xe/xe_query.h"
+#include <string.h>
+
+#define USER_FENCE_VALUE 0xdeadbeefdeadbeefull
+#define QUARTER_SEC (NSEC_PER_SEC / 4)
+#define FIVE_SEC (5LL * NSEC_PER_SEC)
+
+struct batch_data {
+ uint32_t batch[16];
+ uint64_t pad;
+ uint32_t data;
+ uint32_t expected_data;
+};
+
+#define WRITE_VALUE(data__, i__) ({ \
+ if (!(data__)->expected_data) \
+ (data__)->expected_data = rand() << 12 | (i__); \
+ (data__)->expected_data; \
+})
+#define READ_VALUE(data__, i__) ((data__)->expected_data)
+
+static void __write_dword(uint32_t *batch, uint64_t sdi_addr, uint32_t wdata,
+ int *idx)
+{
+ batch[(*idx)++] = MI_STORE_DWORD_IMM_GEN4;
+ batch[(*idx)++] = sdi_addr;
+ batch[(*idx)++] = sdi_addr >> 32;
+ batch[(*idx)++] = wdata;
+}
+
+static void write_dword(uint32_t *batch, uint64_t sdi_addr, uint32_t wdata,
+ int *idx)
+{
+ __write_dword(batch, sdi_addr, wdata, idx);
+ batch[(*idx)++] = MI_BATCH_BUFFER_END;
+}
+
+static void check_all_pages(void *ptr, uint64_t alloc_size, uint64_t stride,
+ pthread_barrier_t *barrier)
+{
+ int i, n_writes = alloc_size / stride;
+
+ for (i = 0; i < n_writes; ++i) {
+ struct batch_data *data = ptr + i * stride;
+
+ igt_assert_eq(data->data, READ_VALUE(data, i));
+
+ if (barrier)
+ pthread_barrier_wait(barrier);
+ }
+}
+
+#define SYNC_FILE "/tmp/xe_exec_system_allocator_sync"
+
+struct process_data {
+ pthread_mutex_t mutex;
+ pthread_cond_t cond;
+ pthread_barrier_t barrier;
+ bool go;
+};
+
+static void wait_pdata(struct process_data *pdata)
+{
+ pthread_mutex_lock(&pdata->mutex);
+ while (!pdata->go)
+ pthread_cond_wait(&pdata->cond, &pdata->mutex);
+ pthread_mutex_unlock(&pdata->mutex);
+}
+
+static void init_pdata(struct process_data *pdata, int n_engine)
+{
+ pthread_mutexattr_t mutex_attr;
+ pthread_condattr_t cond_attr;
+ pthread_barrierattr_t barrier_attr;
+
+ pthread_mutexattr_init(&mutex_attr);
+ pthread_mutexattr_setpshared(&mutex_attr, PTHREAD_PROCESS_SHARED);
+ pthread_mutex_init(&pdata->mutex, &mutex_attr);
+
+ pthread_condattr_init(&cond_attr);
+ pthread_condattr_setpshared(&cond_attr, PTHREAD_PROCESS_SHARED);
+ pthread_cond_init(&pdata->cond, &cond_attr);
+
+ pthread_barrierattr_init(&barrier_attr);
+ pthread_barrierattr_setpshared(&barrier_attr, PTHREAD_PROCESS_SHARED);
+ pthread_barrier_init(&pdata->barrier, &barrier_attr, n_engine);
+
+ pdata->go = false;
+}
+
+static void signal_pdata(struct process_data *pdata)
+{
+ pthread_mutex_lock(&pdata->mutex);
+ pdata->go = true;
+ pthread_cond_broadcast(&pdata->cond);
+ pthread_mutex_unlock(&pdata->mutex);
+}
+
+/* many_alloc flags */
+#define MIX_BO_ALLOC (0x1 << 0)
+#define BENCHMARK (0x1 << 1)
+#define CPU_FAULT_THREADS (0x1 << 2)
+#define CPU_FAULT_PROCESS (0x1 << 3)
+#define CPU_FAULT_SAME_PAGE (0x1 << 4)
+
+static void process_check(void *ptr, uint64_t alloc_size, uint64_t stride,
+ unsigned int flags)
+{
+ struct process_data *pdata;
+ int map_fd;
+
+ map_fd = open(SYNC_FILE, O_RDWR, 0x666);
+ pdata = mmap(NULL, sizeof(*pdata), PROT_READ |
+ PROT_WRITE, MAP_SHARED, map_fd, 0);
+ wait_pdata(pdata);
+
+ if (flags & CPU_FAULT_SAME_PAGE)
+ check_all_pages(ptr, alloc_size, stride, &pdata->barrier);
+ else
+ check_all_pages(ptr, alloc_size, stride, NULL);
+
+ close(map_fd);
+ munmap(pdata, sizeof(*pdata));
+}
+
+static void
+check_all_pages_process(void *ptr, uint64_t alloc_size, uint64_t stride,
+ int n_process, unsigned int flags)
+{
+ struct process_data *pdata;
+ int map_fd, i;
+
+ map_fd = open(SYNC_FILE, O_RDWR | O_CREAT, 0x666);
+ posix_fallocate(map_fd, 0, sizeof(*pdata));
+ pdata = mmap(NULL, sizeof(*pdata), PROT_READ |
+ PROT_WRITE, MAP_SHARED, map_fd, 0);
+
+ init_pdata(pdata, n_process);
+
+ for (i = 0; i < n_process; ++i) {
+ igt_fork(child, 1)
+ if (flags & CPU_FAULT_SAME_PAGE)
+ process_check(ptr, alloc_size, stride, flags);
+ else
+ process_check(ptr + stride * i, alloc_size,
+ stride * n_process, flags);
+ }
+
+ signal_pdata(pdata);
+ igt_waitchildren();
+
+ close(map_fd);
+ munmap(pdata, sizeof(*pdata));
+}
+
+struct thread_check_data {
+ pthread_t thread;
+ pthread_mutex_t *mutex;
+ pthread_cond_t *cond;
+ pthread_barrier_t *barrier;
+ void *ptr;
+ uint64_t alloc_size;
+ uint64_t stride;
+ bool *go;
+};
+
+static void *thread_check(void *data)
+{
+ struct thread_check_data *t = data;
+
+ pthread_mutex_lock(t->mutex);
+ while (!*t->go)
+ pthread_cond_wait(t->cond, t->mutex);
+ pthread_mutex_unlock(t->mutex);
+
+ check_all_pages(t->ptr, t->alloc_size, t->stride, t->barrier);
+
+ return NULL;
+}
+
+/*
+ * Partition checking of results in chunks which causes multiple threads to
+ * fault same VRAM allocation in parallel.
+ */
+static void
+check_all_pages_threads(void *ptr, uint64_t alloc_size, uint64_t stride,
+ int n_threads, unsigned int flags)
+{
+ struct thread_check_data *threads_check_data;
+ pthread_mutex_t mutex;
+ pthread_cond_t cond;
+ pthread_barrier_t barrier;
+ int i;
+ bool go = false;
+
+ threads_check_data = calloc(n_threads, sizeof(*threads_check_data));
+ igt_assert(threads_check_data);
+
+ pthread_mutex_init(&mutex, 0);
+ pthread_cond_init(&cond, 0);
+ pthread_barrier_init(&barrier, 0, n_threads);
+
+ for (i = 0; i < n_threads; ++i) {
+ threads_check_data[i].mutex = &mutex;
+ threads_check_data[i].cond = &cond;
+ if (flags & CPU_FAULT_SAME_PAGE) {
+ threads_check_data[i].barrier = &barrier;
+ threads_check_data[i].ptr = ptr;
+ threads_check_data[i].alloc_size = alloc_size;
+ threads_check_data[i].stride = stride;
+ } else {
+ threads_check_data[i].barrier = NULL;
+ threads_check_data[i].ptr = ptr + stride * i;
+ threads_check_data[i].alloc_size = alloc_size;
+ threads_check_data[i].stride = n_threads * stride;
+ }
+ threads_check_data[i].go = &go;
+
+ pthread_create(&threads_check_data[i].thread, 0, thread_check,
+ &threads_check_data[i]);
+ }
+
+ pthread_mutex_lock(&mutex);
+ go = true;
+ pthread_cond_broadcast(&cond);
+ pthread_mutex_unlock(&mutex);
+
+ for (i = 0; i < n_threads; ++i)
+ pthread_join(threads_check_data[i].thread, NULL);
+ free(threads_check_data);
+}
+
+static void touch_all_pages(int fd, uint32_t exec_queue, void *ptr,
+ uint64_t alloc_size, uint64_t stride,
+ struct timespec *tv, uint64_t *submit)
+{
+ struct drm_xe_sync sync[1] = {
+ { .type = DRM_XE_SYNC_TYPE_USER_FENCE,
+ .flags = DRM_XE_SYNC_FLAG_SIGNAL,
+ .timeline_value = USER_FENCE_VALUE },
+ };
+ struct drm_xe_exec exec = {
+ .num_batch_buffer = 1,
+ .num_syncs = 0,
+ .exec_queue_id = exec_queue,
+ .syncs = to_user_pointer(&sync),
+ };
+ uint64_t addr = to_user_pointer(ptr);
+ int i, ret, n_writes = alloc_size / stride;
+ u64 *exec_ufence = NULL;
+ int64_t timeout = FIVE_SEC;
+
+ exec_ufence = mmap(NULL, SZ_4K, PROT_READ |
+ PROT_WRITE, MAP_SHARED |
+ MAP_ANONYMOUS, -1, 0);
+ igt_assert(exec_ufence != MAP_FAILED);
+ memset(exec_ufence, 0, SZ_4K);
+ sync[0].addr = to_user_pointer(exec_ufence);
+
+ for (i = 0; i < n_writes; ++i, addr += stride) {
+ struct batch_data *data = ptr + i * stride;
+ uint64_t sdi_offset = (char *)&data->data - (char *)data;
+ uint64_t sdi_addr = addr + sdi_offset;
+ int b = 0;
+
+ write_dword(data->batch, sdi_addr, WRITE_VALUE(data, i), &b);
+ igt_assert(b <= ARRAY_SIZE(data->batch));
+ }
+
+ igt_nsec_elapsed(tv);
+ *submit = igt_nsec_elapsed(tv);
+
+ addr = to_user_pointer(ptr);
+ for (i = 0; i < n_writes; ++i, addr += stride) {
+ struct batch_data *data = ptr + i * stride;
+ uint64_t batch_offset = (char *)&data->batch - (char *)data;
+ uint64_t batch_addr = addr + batch_offset;
+
+ exec.address = batch_addr;
+ if (i + 1 == n_writes)
+ exec.num_syncs = 1;
+ xe_exec(fd, &exec);
+ }
+
+ ret = __xe_wait_ufence(fd, exec_ufence, USER_FENCE_VALUE, exec_queue,
+ &timeout);
+ if (ret) {
+ printf("FAIL EXEC_UFENCE_ADDR: 0x%016llx\n", sync[0].addr);
+ printf("FAIL EXEC_UFENCE: EXPEXCTED=0x%016llx, ACTUAL=0x%016lx\n",
+ USER_FENCE_VALUE, exec_ufence[0]);
+
+ addr = to_user_pointer(ptr);
+ for (i = 0; i < n_writes; ++i, addr += stride) {
+ struct batch_data *data = ptr + i * stride;
+ uint64_t batch_offset = (char *)&data->batch - (char *)data;
+ uint64_t batch_addr = addr + batch_offset;
+ uint64_t sdi_offset = (char *)&data->data - (char *)data;
+ uint64_t sdi_addr = addr + sdi_offset;
+
+ printf("FAIL BATCH_ADDR: 0x%016lx\n", batch_addr);
+ printf("FAIL SDI_ADDR: 0x%016lx\n", sdi_addr);
+ printf("FAIL SDI_ADDR (in batch): 0x%016lx\n",
+ (((u64)data->batch[2]) << 32) | data->batch[1]);
+ printf("FAIL DARA: EXPEXCTED=0x%08x, ACTUAL=0x%08x\n",
+ data->expected_data, data->data);
+ }
+ igt_assert_eq(ret, 0);
+ }
+ munmap(exec_ufence, SZ_4K);
+}
+
+static int va_bits;
+
+#define bind_system_allocator(__sync, __num_sync) \
+ __xe_vm_bind_assert(fd, vm, 0, \
+ 0, 0, 0, 0x1ull << va_bits, \
+ DRM_XE_VM_BIND_OP_MAP, \
+ DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR, \
+ (__sync), (__num_sync), 0, 0)
+
+#define unbind_system_allocator() \
+ __xe_vm_bind(fd, vm, 0, 0, 0, 0, 0x1ull << va_bits, \
+ DRM_XE_VM_BIND_OP_UNMAP, 0, \
+ NULL, 0, 0, 0, 0)
+
+#define odd(__i) (__i & 1)
+
+struct aligned_alloc_type {
+ void *__ptr;
+ void *ptr;
+ size_t __size;
+ size_t size;
+};
+
+static struct aligned_alloc_type __aligned_alloc(size_t alignment, size_t size)
+{
+ struct aligned_alloc_type aligned_alloc_type;
+
+ aligned_alloc_type.__ptr = mmap(NULL, alignment + size, PROT_NONE, MAP_PRIVATE |
+ MAP_ANONYMOUS, -1, 0);
+ igt_assert(aligned_alloc_type.__ptr != MAP_FAILED);
+
+ aligned_alloc_type.ptr = (void *)ALIGN((uint64_t)aligned_alloc_type.__ptr, alignment);
+ aligned_alloc_type.size = size;
+ aligned_alloc_type.__size = size + alignment;
+
+ return aligned_alloc_type;
+}
+
+static void __aligned_free(struct aligned_alloc_type *aligned_alloc_type)
+{
+ munmap(aligned_alloc_type->__ptr, aligned_alloc_type->__size);
+}
+
+static void __aligned_partial_free(struct aligned_alloc_type *aligned_alloc_type)
+{
+ size_t begin_size = (size_t)(aligned_alloc_type->ptr - aligned_alloc_type->__ptr);
+
+ if (begin_size)
+ munmap(aligned_alloc_type->__ptr, begin_size);
+ if (aligned_alloc_type->__size - aligned_alloc_type->size - begin_size)
+ munmap(aligned_alloc_type->ptr + aligned_alloc_type->size,
+ aligned_alloc_type->__size - aligned_alloc_type->size - begin_size);
+}
+
+/**
+ * SUBTEST: unaligned-alloc
+ * Description: allocate unaligned sizes of memory
+ * Test category: functionality test
+ *
+ * SUBTEST: fault-benchmark
+ * Description: Benchmark how long GPU / CPU take
+ * Test category: performance test
+ *
+ * SUBTEST: fault-threads-benchmark
+ * Description: Benchmark how long GPU / CPU take, reading results with multiple threads
+ * Test category: performance and functionality test
+ *
+ * SUBTEST: fault-threads-same-page-benchmark
+ * Description: Benchmark how long GPU / CPU take, reading results with multiple threads, hammer same page
+ * Test category: performance and functionality test
+ *
+ * SUBTEST: fault-process-benchmark
+ * Description: Benchmark how long GPU / CPU take, reading results with multiple process
+ * Test category: performance and functionality test
+ *
+ * SUBTEST: fault-process-same-page-benchmark
+ * Description: Benchmark how long GPU / CPU take, reading results with multiple process, hammer same page
+ * Test category: performance and functionality test
+ *
+ * SUBTEST: evict-malloc
+ * Description: trigger eviction of VRAM allocated via malloc
+ * Test category: functionality test
+ *
+ * SUBTEST: evict-malloc-mix-bo
+ * Description: trigger eviction of VRAM allocated via malloc and BO create
+ * Test category: functionality test
+ *
+ * SUBTEST: processes-evict-malloc
+ * Description: multi-process trigger eviction of VRAM allocated via malloc
+ * Test category: stress test
+ *
+ * SUBTEST: processes-evict-malloc-mix-bo
+ * Description: multi-process trigger eviction of VRAM allocated via malloc and BO create
+ * Test category: stress test
+ */
+
+static void
+many_allocs(int fd, struct drm_xe_engine_class_instance *eci,
+ uint64_t total_alloc, uint64_t alloc_size, uint64_t stride,
+ pthread_barrier_t *barrier, unsigned int flags)
+{
+ uint32_t vm, exec_queue;
+ int num_allocs = flags & BENCHMARK ? 1 :
+ (9 * (total_alloc / alloc_size)) / 8;
+ struct aligned_alloc_type *allocs;
+ uint32_t *bos = NULL;
+ struct timespec tv = {};
+ uint64_t submit, read, elapsed;
+ int i;
+
+ vm = xe_vm_create(fd, DRM_XE_VM_CREATE_FLAG_LR_MODE |
+ DRM_XE_VM_CREATE_FLAG_FAULT_MODE, 0);
+ exec_queue = xe_exec_queue_create(fd, vm, eci, 0);
+
+ bind_system_allocator(NULL, 0);
+
+ allocs = malloc(sizeof(*allocs) * num_allocs);
+ igt_assert(allocs);
+ memset(allocs, 0, sizeof(*allocs) * num_allocs);
+
+ if (flags & MIX_BO_ALLOC) {
+ bos = malloc(sizeof(*bos) * num_allocs);
+ igt_assert(bos);
+ memset(bos, 0, sizeof(*bos) * num_allocs);
+ }
+
+ for (i = 0; i < num_allocs; ++i) {
+ struct aligned_alloc_type alloc;
+
+ if (flags & MIX_BO_ALLOC && odd(i)) {
+ uint32_t bo_flags =
+ DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM;
+
+ alloc = __aligned_alloc(SZ_2M, alloc_size);
+ igt_assert(alloc.ptr);
+
+ bos[i] = xe_bo_create(fd, vm, alloc_size,
+ vram_if_possible(fd, eci->gt_id),
+ bo_flags);
+ alloc.ptr = xe_bo_map_fixed(fd, bos[i], alloc_size,
+ to_user_pointer(alloc.ptr));
+ xe_vm_bind_async(fd, vm, 0, bos[i], 0,
+ to_user_pointer(alloc.ptr),
+ alloc_size, 0, 0);
+ } else {
+ alloc.ptr = aligned_alloc(SZ_2M, alloc_size);
+ igt_assert(alloc.ptr);
+ }
+ allocs[i] = alloc;
+
+ touch_all_pages(fd, exec_queue, allocs[i].ptr, alloc_size, stride,
+ &tv, &submit);
+ }
+
+ if (barrier)
+ pthread_barrier_wait(barrier);
+
+ for (i = 0; i < num_allocs; ++i) {
+ if (flags & BENCHMARK)
+ read = igt_nsec_elapsed(&tv);
+#define NUM_CHECK_THREADS 8
+ if (flags & CPU_FAULT_PROCESS)
+ check_all_pages_process(allocs[i].ptr, alloc_size, stride,
+ NUM_CHECK_THREADS, flags);
+ else if (flags & CPU_FAULT_THREADS)
+ check_all_pages_threads(allocs[i].ptr, alloc_size, stride,
+ NUM_CHECK_THREADS, flags);
+ else
+ check_all_pages(allocs[i].ptr, alloc_size, stride, NULL);
+ if (flags & BENCHMARK) {
+ elapsed = igt_nsec_elapsed(&tv);
+ printf("Execution took %.3fms (submit %.1fus, read %.1fus, total %.1fus, read_total %.1fus)\n",
+ 1e-6 * elapsed, 1e-3 * submit, 1e-3 * read,
+ 1e-3 * (elapsed - submit),
+ 1e-3 * (elapsed - read));
+ }
+ if (bos && bos[i]) {
+ __aligned_free(allocs + i);
+ gem_close(fd, bos[i]);
+ } else {
+ free(allocs[i].ptr);
+ }
+ }
+ if (bos)
+ free(bos);
+ free(allocs);
+ xe_exec_queue_destroy(fd, exec_queue);
+ xe_vm_destroy(fd, vm);
+}
+
+static void process_evict(struct drm_xe_engine_class_instance *hwe,
+ uint64_t total_alloc, uint64_t alloc_size,
+ uint64_t stride, unsigned int flags)
+{
+ struct process_data *pdata;
+ int map_fd;
+ int fd;
+
+ map_fd = open(SYNC_FILE, O_RDWR, 0x666);
+ pdata = mmap(NULL, sizeof(*pdata), PROT_READ |
+ PROT_WRITE, MAP_SHARED, map_fd, 0);
+ wait_pdata(pdata);
+
+ fd = drm_open_driver(DRIVER_XE);
+ many_allocs(fd, hwe, total_alloc, alloc_size, stride, &pdata->barrier,
+ flags);
+ drm_close_driver(fd);
+
+ close(map_fd);
+ munmap(pdata, sizeof(*pdata));
+}
+
+static void
+processes_evict(int fd, uint64_t alloc_size, uint64_t stride,
+ unsigned int flags)
+{
+ struct drm_xe_engine_class_instance *hwe;
+ struct process_data *pdata;
+ int n_engine_gt[2] = { 0, 0 }, n_engine = 0;
+ int map_fd;
+
+ map_fd = open(SYNC_FILE, O_RDWR | O_CREAT, 0x666);
+ posix_fallocate(map_fd, 0, sizeof(*pdata));
+ pdata = mmap(NULL, sizeof(*pdata), PROT_READ |
+ PROT_WRITE, MAP_SHARED, map_fd, 0);
+
+ xe_for_each_engine(fd, hwe) {
+ igt_assert(hwe->gt_id < 2);
+ n_engine_gt[hwe->gt_id]++;
+ n_engine++;
+ }
+
+ init_pdata(pdata, n_engine);
+
+ xe_for_each_engine(fd, hwe) {
+ igt_fork(child, 1)
+ process_evict(hwe,
+ xe_visible_vram_size(fd, hwe->gt_id) /
+ n_engine_gt[hwe->gt_id], alloc_size,
+ stride, flags);
+ }
+
+ signal_pdata(pdata);
+ igt_waitchildren();
+
+ close(map_fd);
+ munmap(pdata, sizeof(*pdata));
+}
+
+#define CPU_FAULT (0x1 << 0)
+#define REMAP (0x1 << 1)
+#define MIDDLE (0x1 << 2)
+
+/**
+ * SUBTEST: partial-munmap-cpu-fault
+ * Description: munmap partially with cpu access in between
+ * Test category: functionality test
+ *
+ * SUBTEST: partial-munmap-no-cpu-fault
+ * Description: munmap partially with no cpu access in between
+ * Test category: functionality test
+ *
+ * SUBTEST: partial-remap-cpu-fault
+ * Description: remap partially with cpu access in between
+ * Test category: functionality test
+ *
+ * SUBTEST: partial-remap-no-cpu-fault
+ * Description: remap partially with no cpu access in between
+ * Test category: functionality test
+ *
+ * SUBTEST: partial-middle-munmap-cpu-fault
+ * Description: munmap middle with cpu access in between
+ * Test category: functionality test
+ *
+ * SUBTEST: partial-middle-munmap-no-cpu-fault
+ * Description: munmap middle with no cpu access in between
+ * Test category: functionality test
+ *
+ * SUBTEST: partial-middle-remap-cpu-fault
+ * Description: remap middle with cpu access in between
+ * Test category: functionality test
+ *
+ * SUBTEST: partial-middle-remap-no-cpu-fault
+ * Description: remap middle with no cpu access in between
+ * Test category: functionality test
+ */
+
+static void
+partial(int fd, struct drm_xe_engine_class_instance *eci, unsigned int flags)
+{
+ struct drm_xe_sync sync[1] = {
+ { .type = DRM_XE_SYNC_TYPE_USER_FENCE, .flags = DRM_XE_SYNC_FLAG_SIGNAL,
+ .timeline_value = USER_FENCE_VALUE },
+ };
+ struct drm_xe_exec exec = {
+ .num_batch_buffer = 1,
+ .num_syncs = 1,
+ .syncs = to_user_pointer(sync),
+ };
+ struct {
+ uint32_t batch[16];
+ uint64_t pad;
+ uint64_t vm_sync;
+ uint64_t exec_sync;
+ uint32_t data;
+ uint32_t expected_data;
+ } *data;
+ size_t bo_size = SZ_2M, unmap_offset = 0;
+ uint32_t vm, exec_queue;
+ u64 *exec_ufence = NULL;
+ int i;
+ void *old, *new = NULL;
+ struct aligned_alloc_type alloc;
+
+ if (flags & MIDDLE)
+ unmap_offset = bo_size / 4;
+
+ vm = xe_vm_create(fd, DRM_XE_VM_CREATE_FLAG_LR_MODE |
+ DRM_XE_VM_CREATE_FLAG_FAULT_MODE, 0);
+
+ alloc = __aligned_alloc(bo_size, bo_size);
+ igt_assert(alloc.ptr);
+
+ data = mmap(alloc.ptr, bo_size, PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
+ igt_assert(data != MAP_FAILED);
+ memset(data, 0, bo_size);
+ old = data;
+
+ exec_queue = xe_exec_queue_create(fd, vm, eci, 0);
+
+ sync[0].addr = to_user_pointer(&data[0].vm_sync);
+ bind_system_allocator(sync, 1);
+ xe_wait_ufence(fd, &data[0].vm_sync, USER_FENCE_VALUE, 0, FIVE_SEC);
+ data[0].vm_sync = 0;
+
+ exec_ufence = mmap(NULL, SZ_4K, PROT_READ |
+ PROT_WRITE, MAP_SHARED |
+ MAP_ANONYMOUS, -1, 0);
+ igt_assert(exec_ufence != MAP_FAILED);
+ memset(exec_ufence, 0, SZ_4K);
+
+ for (i = 0; i < 2; i++) {
+ uint64_t addr = to_user_pointer(data);
+ uint64_t sdi_offset = (char *)&data[i].data - (char *)data;
+ uint64_t sdi_addr = addr + sdi_offset;
+ int b = 0;
+
+ write_dword(data[i].batch, sdi_addr, WRITE_VALUE(&data[i], i), &b);
+ igt_assert(b <= ARRAY_SIZE(data[i].batch));
+
+ if (!i)
+ data = old + unmap_offset + bo_size / 2;
+ }
+
+ data = old;
+ exec.exec_queue_id = exec_queue;
+
+ for (i = 0; i < 2; i++) {
+ uint64_t addr = to_user_pointer(data);
+ uint64_t batch_offset = (char *)&data[i].batch - (char *)data;
+ uint64_t batch_addr = addr + batch_offset;
+
+ sync[0].addr = new ? to_user_pointer(new) :
+ to_user_pointer(exec_ufence);
+ exec.address = batch_addr;
+ xe_exec(fd, &exec);
+
+ xe_wait_ufence(fd, new ?: exec_ufence, USER_FENCE_VALUE,
+ exec_queue, FIVE_SEC);
+ if (i || (flags & CPU_FAULT))
+ igt_assert_eq(data[i].data, READ_VALUE(&data[i], i));
+ exec_ufence[0] = 0;
+
+ if (!i) {
+ data = old + unmap_offset + bo_size / 2;
+ munmap(old + unmap_offset, bo_size / 2);
+ if (flags & REMAP) {
+ new = mmap(old + unmap_offset, bo_size / 2,
+ PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED |
+ MAP_LOCKED, -1, 0);
+ igt_assert(new != MAP_FAILED);
+ }
+ }
+ }
+
+ xe_exec_queue_destroy(fd, exec_queue);
+ munmap(exec_ufence, SZ_4K);
+ __aligned_free(&alloc);
+ if (new)
+ munmap(new, bo_size / 2);
+ xe_vm_destroy(fd, vm);
+}
+
+#define MAX_N_EXEC_QUEUES 16
+
+#define MMAP (0x1 << 0)
+#define NEW (0x1 << 1)
+#define BO_UNMAP (0x1 << 2)
+#define FREE (0x1 << 3)
+#define BUSY (0x1 << 4)
+#define BO_MAP (0x1 << 5)
+#define RACE (0x1 << 6)
+#define SKIP_MEMSET (0x1 << 7)
+#define FAULT (0x1 << 8)
+#define FILE_BACKED (0x1 << 9)
+#define LOCK (0x1 << 10)
+#define MMAP_SHARED (0x1 << 11)
+#define HUGE_PAGE (0x1 << 12)
+#define SHARED_ALLOC (0x1 << 13)
+#define FORK_READ (0x1 << 14)
+#define FORK_READ_AFTER (0x1 << 15)
+#define MREMAP (0x1 << 16)
+#define DONTUNMAP (0x1 << 17)
+#define READ_ONLY_REMAP (0x1 << 18)
+#define SYNC_EXEC (0x1 << 19)
+#define EVERY_OTHER_CHECK (0x1 << 20)
+#define MULTI_FAULT (0x1 << 21)
+
+#define N_MULTI_FAULT 4
+
+/**
+ * SUBTEST: once-%s
+ * Description: Run %arg[1] system allocator test only once
+ * Test category: functionality test
+ *
+ * SUBTEST: once-large-%s
+ * Description: Run %arg[1] system allocator test only once with large allocation
+ * Test category: functionality test
+ *
+ * SUBTEST: twice-%s
+ * Description: Run %arg[1] system allocator test twice
+ * Test category: functionality test
+ *
+ * SUBTEST: twice-large-%s
+ * Description: Run %arg[1] system allocator test twice with large allocation
+ * Test category: functionality test
+ *
+ * SUBTEST: many-%s
+ * Description: Run %arg[1] system allocator test many times
+ * Test category: stress test
+ *
+ * SUBTEST: many-stride-%s
+ * Description: Run %arg[1] system allocator test many times with a stride on each exec
+ * Test category: stress test
+ *
+ * SUBTEST: many-execqueues-%s
+ * Description: Run %arg[1] system allocator test on many exec_queues
+ * Test category: stress test
+ *
+ * SUBTEST: many-large-%s
+ * Description: Run %arg[1] system allocator test many times with large allocations
+ * Test category: stress test
+ *
+ * SUBTEST: many-large-execqueues-%s
+ * Description: Run %arg[1] system allocator test on many exec_queues with large allocations
+ *
+ * SUBTEST: threads-many-%s
+ * Description: Run %arg[1] system allocator threaded test many times
+ * Test category: stress test
+ *
+ * SUBTEST: threads-many-stride-%s
+ * Description: Run %arg[1] system allocator threaded test many times with a stride on each exec
+ * Test category: stress test
+ *
+ * SUBTEST: threads-many-execqueues-%s
+ * Description: Run %arg[1] system allocator threaded test on many exec_queues
+ * Test category: stress test
+ *
+ * SUBTEST: threads-many-large-%s
+ * Description: Run %arg[1] system allocator threaded test many times with large allocations
+ * Test category: stress test
+ *
+ * SUBTEST: threads-many-large-execqueues-%s
+ * Description: Run %arg[1] system allocator threaded test on many exec_queues with large allocations
+ *
+ * SUBTEST: threads-shared-vm-many-%s
+ * Description: Run %arg[1] system allocator threaded, shared vm test many times
+ * Test category: stress test
+ *
+ * SUBTEST: threads-shared-vm-many-stride-%s
+ * Description: Run %arg[1] system allocator threaded, shared vm test many times with a stride on each exec
+ * Test category: stress test
+ *
+ * SUBTEST: threads-shared-vm-many-execqueues-%s
+ * Description: Run %arg[1] system allocator threaded, shared vm test on many exec_queues
+ * Test category: stress test
+ *
+ * SUBTEST: threads-shared-vm-many-large-%s
+ * Description: Run %arg[1] system allocator threaded, shared vm test many times with large allocations
+ * Test category: stress test
+ *
+ * SUBTEST: threads-shared-vm-many-large-execqueues-%s
+ * Description: Run %arg[1] system allocator threaded, shared vm test on many exec_queues with large allocations
+ * Test category: stress test
+ *
+ * SUBTEST: process-many-%s
+ * Description: Run %arg[1] system allocator multi-process test many times
+ * Test category: stress test
+ *
+ * SUBTEST: process-many-stride-%s
+ * Description: Run %arg[1] system allocator multi-process test many times with a stride on each exec
+ * Test category: stress test
+ *
+ * SUBTEST: process-many-execqueues-%s
+ * Description: Run %arg[1] system allocator multi-process test on many exec_queues
+ * Test category: stress test
+ *
+ * SUBTEST: process-many-large-%s
+ * Description: Run %arg[1] system allocator multi-process test many times with large allocations
+ * Test category: stress test
+ *
+ * SUBTEST: process-many-large-execqueues-%s
+ * Description: Run %arg[1] system allocator multi-process test on many exec_queues with large allocations
+ *
+ * SUBTEST: fault
+ * Description: use a bad system allocator address resulting in a fault
+ * Test category: bad input
+ *
+ * arg[1]:
+ *
+ * @malloc: malloc single buffer for all execs, issue a command which will trigger multiple faults
+ * @malloc-multi-fault: malloc single buffer for all execs
+ * @malloc-fork-read: malloc single buffer for all execs, fork a process to read test output
+ * @malloc-fork-read-after: malloc single buffer for all execs, fork a process to read test output, check again after fork returns in parent
+ * @malloc-mlock: malloc and mlock single buffer for all execs
+ * @malloc-race: malloc single buffer for all execs with race between cpu and gpu access
+ * @malloc-bo-unmap: malloc single buffer for all execs, bind and unbind a BO to same address before execs
+ * @malloc-busy: malloc single buffer for all execs, try to unbind while buffer valid
+ * @mmap: mmap single buffer for all execs
+ * @mmap-remap: mmap and mremap a buffer for all execs
+ * @mmap-remap-dontunmap: mmap and mremap a buffer with dontunmap flag for all execs
+ * @mmap-remap-ro: mmap and mremap a read-only buffer for all execs
+ * @mmap-remap-ro-dontunmap: mmap and mremap a read-only buffer with dontunmap flag for all execs
+ * @mmap-remap-eocheck: mmap and mremap a buffer for all execs, check data every other loop iteration
+ * @mmap-remap-dontunmap-eocheck: mmap and mremap a buffer with dontunmap flag for all execs, check data every other loop iteration
+ * @mmap-remap-ro-eocheck: mmap and mremap a read-only buffer for all execs, check data every other loop iteration
+ * @mmap-remap-ro-dontunmap-eocheck: mmap and mremap a read-only buffer with dontunmap flag for all execs, check data every other loop iteration
+ * @mmap-huge: mmap huge page single buffer for all execs
+ * @mmap-shared: mmap shared single buffer for all execs
+ * @mmap-shared-remap: mmap shared and mremap a buffer for all execs
+ * @mmap-shared-remap-dontunmap: mmap shared and mremap a buffer with dontunmap flag for all execs
+ * @mmap-shared-remap-eocheck: mmap shared and mremap a buffer for all execs, check data every other loop iteration
+ * @mmap-shared-remap-dontunmap-eocheck: mmap shared and mremap a buffer with dontunmap flag for all execs, check data every other loop iteration
+ * @mmap-mlock: mmap and mlock single buffer for all execs
+ * @mmap-file: mmap single buffer, with file backing, for all execs
+ * @mmap-file-mlock: mmap and mlock single buffer, with file backing, for all execs
+ * @mmap-race: mmap single buffer for all execs with race between cpu and gpu access
+ * @free: malloc and free buffer for each exec
+ * @free-race: malloc and free buffer for each exec with race between cpu and gpu access
+ * @new: malloc a new buffer for each exec
+ * @new-race: malloc a new buffer for each exec with race between cpu and gpu access
+ * @new-bo-map: malloc a new buffer or map BO for each exec
+ * @new-busy: malloc a new buffer for each exec, try to unbind while buffers valid
+ * @mmap-free: mmap and free buffer for each exec
+ * @mmap-free-huge: mmap huge page and free buffer for each exec
+ * @mmap-free-race: mmap and free buffer for each exec with race between cpu and gpu access
+ * @mmap-new: mmap a new buffer for each exec
+ * @mmap-new-huge: mmap huge page a new buffer for each exec
+ * @mmap-new-race: mmap a new buffer for each exec with race between cpu and gpu access
+ * @malloc-nomemset: malloc single buffer for all execs, skip memset of buffers
+ * @malloc-mlock-nomemset: malloc and mlock single buffer for all execs, skip memset of buffers
+ * @malloc-race-nomemset: malloc single buffer for all execs with race between cpu and gpu access, skip memset of buffers
+ * @malloc-bo-unmap-nomemset: malloc single buffer for all execs, bind and unbind a BO to same address before execs, skip memset of buffers
+ * @malloc-busy-nomemset: malloc single buffer for all execs, try to unbind while buffer valid, skip memset of buffers
+ * @mmap-nomemset: mmap single buffer for all execs, skip memset of buffers
+ * @mmap-huge-nomemset: mmap huge page single buffer for all execs, skip memset of buffers
+ * @mmap-shared-nomemset: mmap shared single buffer for all execs, skip memset of buffers
+ * @mmap-mlock-nomemset: mmap and mlock single buffer for all execs, skip memset of buffers
+ * @mmap-file-nomemset: mmap single buffer, with file backing, for all execs, skip memset of buffers
+ * @mmap-file-mlock-nomemset: mmap and mlock single buffer, with file backing, for all execs, skip memset of buffers
+ * @mmap-race-nomemset: mmap single buffer for all execs with race between cpu and gpu access, skip memset of buffers
+ * @free-nomemset: malloc and free buffer for each exec, skip memset of buffers
+ * @free-race-nomemset: malloc and free buffer for each exec with race between cpu and gpu access, skip memset of buffers
+ * @new-nomemset: malloc a new buffer for each exec, skip memset of buffers
+ * @new-race-nomemset: malloc a new buffer for each exec with race between cpu and gpu access, skip memset of buffers
+ * @new-bo-map-nomemset: malloc a new buffer or map BO for each exec, skip memset of buffers
+ * @new-busy-nomemset: malloc a new buffer for each exec, try to unbind while buffers valid, skip memset of buffers
+ * @mmap-free-nomemset: mmap and free buffer for each exec, skip memset of buffers
+ * @mmap-free-huge-nomemset: mmap huge page and free buffer for each exec, skip memset of buffers
+ * @mmap-free-race-nomemset: mmap and free buffer for each exec with race between cpu and gpu access, skip memset of buffers
+ * @mmap-new-nomemset: mmap a new buffer for each exec, skip memset of buffers
+ * @mmap-new-huge-nomemset: mmap huge page new buffer for each exec, skip memset of buffers
+ * @mmap-new-race-nomemset: mmap a new buffer for each exec with race between cpu and gpu access, skip memset of buffers
+ *
+ * SUBTEST: threads-shared-vm-shared-alloc-many-stride-malloc
+ * Description: Create multiple threads with a shared VM triggering faults on different hardware engines to same addresses
+ * Test category: stress test
+ *
+ * SUBTEST: threads-shared-vm-shared-alloc-many-stride-malloc-race
+ * Description: Create multiple threads with a shared VM triggering faults on different hardware engines to same addresses, racing between CPU and GPU access
+ * Test category: stress test
+ *
+ * SUBTEST: threads-shared-alloc-many-stride-malloc
+ * Description: Create multiple threads with a faults on different hardware engines to same addresses
+ * Test category: stress test
+ *
+ * SUBTEST: threads-shared-alloc-many-stride-malloc-sync
+ * Description: Create multiple threads with a faults on different hardware engines to same addresses, syncing on each exec
+ * Test category: stress test
+ *
+ * SUBTEST: threads-shared-alloc-many-stride-malloc-race
+ * Description: Create multiple threads with a faults on different hardware engines to same addresses, racing between CPU and GPU access
+ * Test category: stress test
+ */
+
+struct test_exec_data {
+ uint32_t batch[32];
+ uint64_t pad;
+ uint64_t vm_sync;
+ uint64_t exec_sync;
+ uint32_t data;
+ uint32_t expected_data;
+};
+
+static void
+test_exec(int fd, struct drm_xe_engine_class_instance *eci,
+ int n_exec_queues, int n_execs, size_t bo_size,
+ size_t stride, uint32_t vm, void *alloc, pthread_barrier_t *barrier,
+ unsigned int flags)
+{
+ uint64_t addr;
+ struct drm_xe_sync sync[1] = {
+ { .type = DRM_XE_SYNC_TYPE_USER_FENCE, .flags = DRM_XE_SYNC_FLAG_SIGNAL,
+ .timeline_value = USER_FENCE_VALUE },
+ };
+ struct drm_xe_exec exec = {
+ .num_batch_buffer = 1,
+ .num_syncs = 1,
+ .syncs = to_user_pointer(sync),
+ };
+ uint32_t exec_queues[MAX_N_EXEC_QUEUES];
+ struct test_exec_data *data, *next_data = NULL;
+ uint32_t bo_flags;
+ uint32_t bo = 0;
+ void **pending_free;
+ u64 *exec_ufence = NULL;
+ int i, j, b, file_fd = -1, prev_idx;
+ bool free_vm = false;
+ size_t aligned_size = bo_size ?: xe_get_default_alignment(fd);
+ size_t orig_size = bo_size;
+ struct aligned_alloc_type aligned_alloc_type;
+
+ if (flags & MULTI_FAULT) {
+ if (!bo_size)
+ return;
+
+ bo_size *= N_MULTI_FAULT;
+ }
+
+ if (flags & SHARED_ALLOC)
+ return;
+
+ if (flags & EVERY_OTHER_CHECK && odd(n_execs))
+ return;
+
+ if (flags & EVERY_OTHER_CHECK)
+ igt_assert(flags & MREMAP);
+
+ igt_assert(n_exec_queues <= MAX_N_EXEC_QUEUES);
+
+ if (flags & NEW && !(flags & FREE)) {
+ pending_free = malloc(sizeof(*pending_free) * n_execs);
+ igt_assert(pending_free);
+ memset(pending_free, 0, sizeof(*pending_free) * n_execs);
+ }
+
+ if (!vm) {
+ vm = xe_vm_create(fd, DRM_XE_VM_CREATE_FLAG_LR_MODE |
+ DRM_XE_VM_CREATE_FLAG_FAULT_MODE, 0);
+ free_vm = true;
+ }
+ if (!bo_size) {
+ if (!stride) {
+ bo_size = sizeof(*data) * n_execs;
+ bo_size = xe_bb_size(fd, bo_size);
+ } else {
+ bo_size = stride * n_execs * sizeof(*data);
+ bo_size = xe_bb_size(fd, bo_size);
+ }
+ }
+ if (flags & HUGE_PAGE) {
+ aligned_size = ALIGN(aligned_size, SZ_2M);
+ bo_size = ALIGN(bo_size, SZ_2M);
+ }
+
+ if (alloc) {
+ data = alloc;
+ } else {
+ if (flags & MMAP) {
+ int mmap_flags = MAP_FIXED;
+
+ aligned_alloc_type = __aligned_alloc(aligned_size, bo_size);
+ data = aligned_alloc_type.ptr;
+ igt_assert(data);
+ __aligned_partial_free(&aligned_alloc_type);
+
+ if (flags & MMAP_SHARED)
+ mmap_flags |= MAP_SHARED;
+ else
+ mmap_flags |= MAP_PRIVATE;
+
+ if (flags & HUGE_PAGE)
+ mmap_flags |= MAP_HUGETLB | MAP_HUGE_2MB;
+
+ if (flags & FILE_BACKED) {
+ char name[1024];
+
+ igt_assert(!(flags & NEW));
+
+ sprintf(name, "/tmp/xe_exec_system_allocator_dat%d\n",
+ getpid());
+ file_fd = open(name, O_RDWR | O_CREAT, 0x666);
+ posix_fallocate(file_fd, 0, bo_size);
+ } else {
+ mmap_flags |= MAP_ANONYMOUS;
+ }
+
+ data = mmap(data, bo_size, PROT_READ |
+ PROT_WRITE, mmap_flags, file_fd, 0);
+ igt_assert(data != MAP_FAILED);
+ } else {
+ data = aligned_alloc(aligned_size, bo_size);
+ igt_assert(data);
+ }
+ if (!(flags & SKIP_MEMSET))
+ memset(data, 0, bo_size);
+ if (flags & LOCK) {
+ igt_assert(!(flags & NEW));
+ mlock(data, bo_size);
+ }
+ }
+
+ for (i = 0; i < n_exec_queues; i++)
+ exec_queues[i] = xe_exec_queue_create(fd, vm, eci, 0);
+
+ sync[0].addr = to_user_pointer(&data[0].vm_sync);
+ if (free_vm) {
+ bind_system_allocator(sync, 1);
+ xe_wait_ufence(fd, &data[0].vm_sync, USER_FENCE_VALUE, 0, FIVE_SEC);
+ }
+ data[0].vm_sync = 0;
+
+ addr = to_user_pointer(data);
+
+ if (flags & BO_UNMAP) {
+ bo_flags = DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM;
+ bo = xe_bo_create(fd, vm, bo_size,
+ vram_if_possible(fd, eci->gt_id), bo_flags);
+ xe_vm_bind_async(fd, vm, 0, bo, 0, addr, bo_size, 0, 0);
+
+ __xe_vm_bind_assert(fd, vm, 0,
+ 0, 0, addr, bo_size,
+ DRM_XE_VM_BIND_OP_MAP,
+ DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR, sync,
+ 1, 0, 0);
+ xe_wait_ufence(fd, &data[0].vm_sync, USER_FENCE_VALUE, 0,
+ FIVE_SEC);
+ data[0].vm_sync = 0;
+ gem_close(fd, bo);
+ bo = 0;
+ }
+
+ if (!(flags & RACE)) {
+ exec_ufence = mmap(NULL, SZ_4K, PROT_READ |
+ PROT_WRITE, MAP_SHARED |
+ MAP_ANONYMOUS, -1, 0);
+ igt_assert(exec_ufence != MAP_FAILED);
+ memset(exec_ufence, 0, SZ_4K);
+ }
+
+ for (i = 0; i < n_execs; i++) {
+ int idx = !stride ? i : i * stride, next_idx = !stride
+ ? (i + 1) : (i + 1) * stride;
+ uint64_t batch_offset = (char *)&data[idx].batch - (char *)data;
+ uint64_t batch_addr = addr + batch_offset;
+ uint64_t sdi_offset = (char *)&data[idx].data - (char *)data;
+ uint64_t sdi_addr = addr + sdi_offset;
+ int e = i % n_exec_queues, err;
+ bool fault_inject = (FAULT & flags) && i == n_execs / 2;
+ bool fault_injected = (FAULT & flags) && i > n_execs;
+
+ if (barrier)
+ pthread_barrier_wait(barrier);
+
+ if (flags & MULTI_FAULT) {
+ b = 0;
+ for (j = 0; j < N_MULTI_FAULT - 1; ++j)
+ __write_dword(data[idx].batch,
+ sdi_addr + j * orig_size,
+ WRITE_VALUE(&data[idx], idx), &b);
+ write_dword(data[idx].batch, sdi_addr + j * orig_size,
+ WRITE_VALUE(&data[idx], idx), &b);
+ igt_assert(b <= ARRAY_SIZE(data[idx].batch));
+ } else if (!(flags & EVERY_OTHER_CHECK)) {
+ b = 0;
+ write_dword(data[idx].batch, sdi_addr,
+ WRITE_VALUE(&data[idx], idx), &b);
+ igt_assert(b <= ARRAY_SIZE(data[idx].batch));
+ } else if (flags & EVERY_OTHER_CHECK && !odd(i)) {
+ b = 0;
+ write_dword(data[idx].batch, sdi_addr,
+ WRITE_VALUE(&data[idx], idx), &b);
+ igt_assert(b <= ARRAY_SIZE(data[idx].batch));
+
+ aligned_alloc_type = __aligned_alloc(aligned_size, bo_size);
+ next_data = aligned_alloc_type.ptr;
+ igt_assert(next_data);
+ __aligned_partial_free(&aligned_alloc_type);
+
+ b = 0;
+ write_dword(data[next_idx].batch,
+ to_user_pointer(next_data) +
+ (char *)&data[next_idx].data - (char *)data,
+ WRITE_VALUE(&data[next_idx], next_idx), &b);
+ igt_assert(b <= ARRAY_SIZE(data[next_idx].batch));
+ }
+
+ if (!exec_ufence)
+ data[idx].exec_sync = 0;
+
+ sync[0].addr = exec_ufence ? to_user_pointer(exec_ufence) :
+ addr + (char *)&data[idx].exec_sync - (char *)data;
+
+ exec.exec_queue_id = exec_queues[e];
+ if (fault_inject)
+ exec.address = batch_addr * 2;
+ else
+ exec.address = batch_addr;
+
+ if (fault_injected) {
+ err = __xe_exec(fd, &exec);
+ igt_assert(err == -ENOENT);
+ } else {
+ xe_exec(fd, &exec);
+ }
+
+ if (barrier)
+ pthread_barrier_wait(barrier);
+
+ if (fault_inject || fault_injected) {
+ int64_t timeout = QUARTER_SEC;
+
+ err = __xe_wait_ufence(fd, exec_ufence ? exec_ufence :
+ &data[idx].exec_sync,
+ USER_FENCE_VALUE,
+ exec_queues[e], &timeout);
+ igt_assert(err == -ETIME || err == -EIO);
+ } else {
+ xe_wait_ufence(fd, exec_ufence ? exec_ufence :
+ &data[idx].exec_sync, USER_FENCE_VALUE,
+ exec_queues[e], FIVE_SEC);
+ if (flags & LOCK && !i)
+ munlock(data, bo_size);
+
+ if (flags & MREMAP) {
+ void *old = data;
+ int remap_flags = MREMAP_MAYMOVE | MREMAP_FIXED;
+
+ if (flags & DONTUNMAP)
+ remap_flags |= MREMAP_DONTUNMAP;
+
+ if (flags & READ_ONLY_REMAP)
+ igt_assert(!mprotect(old, bo_size,
+ PROT_READ));
+
+ if (!next_data) {
+ aligned_alloc_type = __aligned_alloc(aligned_size,
+ bo_size);
+ data = aligned_alloc_type.ptr;
+ __aligned_partial_free(&aligned_alloc_type);
+ } else {
+ data = next_data;
+ }
+ next_data = NULL;
+ igt_assert(data);
+
+ data = mremap(old, bo_size, bo_size,
+ remap_flags, data);
+ igt_assert(data != MAP_FAILED);
+
+ if (flags & READ_ONLY_REMAP)
+ igt_assert(!mprotect(data, bo_size,
+ PROT_READ |
+ PROT_WRITE));
+
+ addr = to_user_pointer(data);
+ if (flags & DONTUNMAP)
+ munmap(old, bo_size);
+ }
+
+ if (!(flags & EVERY_OTHER_CHECK) || odd(i)) {
+ if (flags & FORK_READ) {
+ igt_fork(child, 1)
+ igt_assert_eq(data[idx].data,
+ READ_VALUE(&data[idx], idx));
+ if (!(flags & FORK_READ_AFTER))
+ igt_assert_eq(data[idx].data,
+ READ_VALUE(&data[idx], idx));
+ igt_waitchildren();
+ if (flags & FORK_READ_AFTER)
+ igt_assert_eq(data[idx].data,
+ READ_VALUE(&data[idx], idx));
+ } else {
+ igt_assert_eq(data[idx].data,
+ READ_VALUE(&data[idx], idx));
+
+ if (flags & MULTI_FAULT) {
+ for (j = 1; j < N_MULTI_FAULT; ++j) {
+ struct test_exec_data *__data =
+ ((void *)data) + j * orig_size;
+
+ igt_assert_eq(__data[idx].data,
+ READ_VALUE(&data[idx], idx));
+ }
+ }
+ }
+ if (flags & EVERY_OTHER_CHECK)
+ igt_assert_eq(data[prev_idx].data,
+ READ_VALUE(&data[prev_idx], idx));
+ }
+ }
+
+ if (exec_ufence)
+ exec_ufence[0] = 0;
+
+ if (bo) {
+ __xe_vm_bind_assert(fd, vm, 0,
+ 0, 0, addr, bo_size,
+ DRM_XE_VM_BIND_OP_MAP,
+ DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR,
+ NULL, 0, 0, 0);
+ munmap(data, bo_size);
+ gem_close(fd, bo);
+ }
+
+ if (flags & NEW) {
+ if (flags & MMAP) {
+ if (flags & FREE)
+ munmap(data, bo_size);
+ else
+ pending_free[i] = data;
+ data = mmap(NULL, bo_size, PROT_READ |
+ PROT_WRITE, MAP_SHARED |
+ MAP_ANONYMOUS, -1, 0);
+ igt_assert(data != MAP_FAILED);
+ } else if (flags & BO_MAP && (i % 2)) {
+ if (!bo) {
+ if (flags & FREE)
+ free(data);
+ else
+ pending_free[i] = data;
+ }
+
+ aligned_alloc_type = __aligned_alloc(aligned_size, bo_size);
+ data = aligned_alloc_type.ptr;
+ igt_assert(data);
+ __aligned_partial_free(&aligned_alloc_type);
+
+ bo_flags = DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM;
+ bo = xe_bo_create(fd, vm, bo_size,
+ vram_if_possible(fd, eci->gt_id),
+ bo_flags);
+ data = xe_bo_map_fixed(fd, bo, bo_size,
+ to_user_pointer(data));
+
+ xe_vm_bind_async(fd, vm, 0, bo, 0,
+ to_user_pointer(data),
+ bo_size, 0, 0);
+ } else {
+ if (!bo) {
+ if (flags & FREE)
+ free(data);
+ else
+ pending_free[i] = data;
+ }
+ bo = 0;
+ data = aligned_alloc(aligned_size, bo_size);
+ igt_assert(data);
+ }
+ addr = to_user_pointer(data);
+ if (!(flags & SKIP_MEMSET))
+ memset(data, 0, bo_size);
+ }
+
+ prev_idx = idx;
+ }
+
+ if (bo) {
+ __xe_vm_bind_assert(fd, vm, 0,
+ 0, 0, addr, bo_size,
+ DRM_XE_VM_BIND_OP_MAP,
+ DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR,
+ NULL, 0, 0, 0);
+ munmap(data, bo_size);
+ gem_close(fd, bo);
+ }
+
+ if (flags & BUSY)
+ igt_assert_eq(unbind_system_allocator(), -EBUSY);
+
+ for (i = 0; i < n_exec_queues; i++)
+ xe_exec_queue_destroy(fd, exec_queues[i]);
+
+ if (exec_ufence)
+ munmap(exec_ufence, SZ_4K);
+
+ if (flags & LOCK)
+ munlock(data, bo_size);
+
+ if (file_fd != -1)
+ close(file_fd);
+
+ if (flags & NEW && !(flags & FREE)) {
+ for (i = 0; i < n_execs; i++) {
+ if (!pending_free[i])
+ continue;
+
+ if (flags & MMAP)
+ munmap(pending_free[i], bo_size);
+ else
+ free(pending_free[i]);
+ }
+ free(pending_free);
+ } else {
+ if (flags & MMAP)
+ munmap(data, bo_size);
+ else if (!alloc)
+ free(data);
+ }
+ if (free_vm)
+ xe_vm_destroy(fd, vm);
+}
+
+struct thread_data {
+ pthread_t thread;
+ pthread_mutex_t *mutex;
+ pthread_cond_t *cond;
+ pthread_barrier_t *barrier;
+ int fd;
+ struct drm_xe_engine_class_instance *eci;
+ int n_exec_queues;
+ int n_execs;
+ size_t bo_size;
+ size_t stride;
+ uint32_t vm;
+ unsigned int flags;
+ void *alloc;
+ bool *go;
+};
+
+static void *thread(void *data)
+{
+ struct thread_data *t = data;
+
+ pthread_mutex_lock(t->mutex);
+ while (!*t->go)
+ pthread_cond_wait(t->cond, t->mutex);
+ pthread_mutex_unlock(t->mutex);
+
+ test_exec(t->fd, t->eci, t->n_exec_queues, t->n_execs,
+ t->bo_size, t->stride, t->vm, t->alloc, t->barrier,
+ t->flags);
+
+ return NULL;
+}
+
+static void
+threads(int fd, int n_exec_queues, int n_execs, size_t bo_size,
+ size_t stride, unsigned int flags, bool shared_vm)
+{
+ struct drm_xe_engine_class_instance *hwe;
+ struct thread_data *threads_data;
+ int n_engines = 0, i = 0;
+ pthread_mutex_t mutex;
+ pthread_cond_t cond;
+ pthread_barrier_t barrier;
+ uint32_t vm = 0;
+ bool go = false;
+ void *alloc = NULL;
+
+ if ((FILE_BACKED | FORK_READ) & flags)
+ return;
+
+ xe_for_each_engine(fd, hwe)
+ ++n_engines;
+
+ if (shared_vm) {
+ vm = xe_vm_create(fd, DRM_XE_VM_CREATE_FLAG_LR_MODE |
+ DRM_XE_VM_CREATE_FLAG_FAULT_MODE, 0);
+ bind_system_allocator(NULL, 0);
+ }
+
+ if (flags & SHARED_ALLOC) {
+ uint64_t alloc_size;
+
+ igt_assert(stride);
+
+ alloc_size = sizeof(struct test_exec_data) * stride *
+ n_execs * n_engines;
+ alloc_size = xe_bb_size(fd, alloc_size);
+ alloc = aligned_alloc(SZ_2M, alloc_size);
+ igt_assert(alloc);
+
+ memset(alloc, 0, alloc_size);
+ flags &= ~SHARED_ALLOC;
+ }
+
+ threads_data = calloc(n_engines, sizeof(*threads_data));
+ igt_assert(threads_data);
+
+ pthread_mutex_init(&mutex, 0);
+ pthread_cond_init(&cond, 0);
+ pthread_barrier_init(&barrier, 0, n_engines);
+
+ xe_for_each_engine(fd, hwe) {
+ threads_data[i].mutex = &mutex;
+ threads_data[i].cond = &cond;
+ threads_data[i].barrier = (flags & SYNC_EXEC) ? &barrier : NULL;
+ threads_data[i].fd = fd;
+ threads_data[i].eci = hwe;
+ threads_data[i].n_exec_queues = n_exec_queues;
+ threads_data[i].n_execs = n_execs;
+ threads_data[i].bo_size = bo_size;
+ threads_data[i].stride = stride;
+ threads_data[i].vm = vm;
+ threads_data[i].flags = flags;
+ threads_data[i].alloc = alloc ? alloc + i *
+ sizeof(struct test_exec_data) : NULL;
+ threads_data[i].go = &go;
+ pthread_create(&threads_data[i].thread, 0, thread,
+ &threads_data[i]);
+ ++i;
+ }
+
+ pthread_mutex_lock(&mutex);
+ go = true;
+ pthread_cond_broadcast(&cond);
+ pthread_mutex_unlock(&mutex);
+
+ for (i = 0; i < n_engines; ++i)
+ pthread_join(threads_data[i].thread, NULL);
+
+ if (shared_vm) {
+ int ret;
+
+ if (flags & MMAP) {
+ int tries = 300;
+
+ while (tries && (ret = unbind_system_allocator()) == -EBUSY) {
+ sleep(.01);
+ --tries;
+ }
+ igt_assert_eq(ret, 0);
+ }
+ xe_vm_destroy(fd, vm);
+ if (alloc)
+ free(alloc);
+ }
+ free(threads_data);
+}
+
+static void process(struct drm_xe_engine_class_instance *hwe, int n_exec_queues,
+ int n_execs, size_t bo_size, size_t stride,
+ unsigned int flags)
+{
+ struct process_data *pdata;
+ int map_fd;
+ int fd;
+
+ map_fd = open(SYNC_FILE, O_RDWR, 0x666);
+ pdata = mmap(NULL, sizeof(*pdata), PROT_READ |
+ PROT_WRITE, MAP_SHARED, map_fd, 0);
+ wait_pdata(pdata);
+
+ fd = drm_open_driver(DRIVER_XE);
+ test_exec(fd, hwe, n_exec_queues, n_execs,
+ bo_size, stride, 0, NULL, NULL, flags);
+ drm_close_driver(fd);
+
+ close(map_fd);
+ munmap(pdata, sizeof(*pdata));
+}
+
+static void
+processes(int fd, int n_exec_queues, int n_execs, size_t bo_size,
+ size_t stride, unsigned int flags)
+{
+ struct drm_xe_engine_class_instance *hwe;
+ struct process_data *pdata;
+ int map_fd;
+
+ if (flags & FORK_READ)
+ return;
+
+ map_fd = open(SYNC_FILE, O_RDWR | O_CREAT, 0x666);
+ posix_fallocate(map_fd, 0, sizeof(*pdata));
+ pdata = mmap(NULL, sizeof(*pdata), PROT_READ |
+ PROT_WRITE, MAP_SHARED, map_fd, 0);
+
+ init_pdata(pdata, 0);
+
+ xe_for_each_engine(fd, hwe) {
+ igt_fork(child, 1)
+ process(hwe, n_exec_queues, n_execs, bo_size,
+ stride, flags);
+ }
+
+ signal_pdata(pdata);
+ igt_waitchildren();
+
+ close(map_fd);
+ munmap(pdata, sizeof(*pdata));
+}
+
+struct section {
+ const char *name;
+ unsigned int flags;
+};
+
+igt_main
+{
+ struct drm_xe_engine_class_instance *hwe;
+ const struct section sections[] = {
+ { "malloc", 0 },
+ { "malloc-multi-fault", MULTI_FAULT },
+ { "malloc-fork-read", FORK_READ },
+ { "malloc-fork-read-after", FORK_READ | FORK_READ_AFTER },
+ { "malloc-mlock", LOCK },
+ { "malloc-race", RACE },
+ { "malloc-busy", BUSY },
+ { "malloc-bo-unmap", BO_UNMAP },
+ { "mmap", MMAP },
+ { "mmap-remap", MMAP | MREMAP },
+ { "mmap-remap-dontunmap", MMAP | MREMAP | DONTUNMAP },
+ { "mmap-remap-ro", MMAP | MREMAP | READ_ONLY_REMAP },
+ { "mmap-remap-ro-dontunmap", MMAP | MREMAP | DONTUNMAP |
+ READ_ONLY_REMAP },
+ { "mmap-remap-eocheck", MMAP | MREMAP | EVERY_OTHER_CHECK },
+ { "mmap-remap-dontunmap-eocheck", MMAP | MREMAP | DONTUNMAP |
+ EVERY_OTHER_CHECK },
+ { "mmap-remap-ro-eocheck", MMAP | MREMAP | READ_ONLY_REMAP |
+ EVERY_OTHER_CHECK },
+ { "mmap-remap-ro-dontunmap-eocheck", MMAP | MREMAP | DONTUNMAP |
+ READ_ONLY_REMAP | EVERY_OTHER_CHECK },
+ { "mmap-huge", MMAP | HUGE_PAGE },
+ { "mmap-shared", MMAP | LOCK | MMAP_SHARED },
+ { "mmap-shared-remap", MMAP | LOCK | MMAP_SHARED | MREMAP },
+ { "mmap-shared-remap-dontunmap", MMAP | LOCK | MMAP_SHARED |
+ MREMAP | DONTUNMAP },
+ { "mmap-shared-remap-eocheck", MMAP | LOCK | MMAP_SHARED |
+ MREMAP | EVERY_OTHER_CHECK },
+ { "mmap-shared-remap-dontunmap-eocheck", MMAP | LOCK |
+ MMAP_SHARED | MREMAP | DONTUNMAP | EVERY_OTHER_CHECK },
+ { "mmap-mlock", MMAP | LOCK },
+ { "mmap-file", MMAP | FILE_BACKED },
+ { "mmap-file-mlock", MMAP | LOCK | FILE_BACKED },
+ { "mmap-race", MMAP | RACE },
+ { "free", NEW | FREE },
+ { "free-race", NEW | FREE | RACE },
+ { "new", NEW },
+ { "new-race", NEW | RACE },
+ { "new-bo-map", NEW | BO_MAP },
+ { "new-busy", NEW | BUSY },
+ { "mmap-free", MMAP | NEW | FREE },
+ { "mmap-free-huge", MMAP | NEW | FREE | HUGE_PAGE },
+ { "mmap-free-race", MMAP | NEW | FREE | RACE },
+ { "mmap-new", MMAP | NEW },
+ { "mmap-new-huge", MMAP | NEW | HUGE_PAGE },
+ { "mmap-new-race", MMAP | NEW | RACE },
+ { "malloc-nomemset", SKIP_MEMSET },
+ { "malloc-mlock-nomemset", SKIP_MEMSET | LOCK },
+ { "malloc-race-nomemset", SKIP_MEMSET | RACE },
+ { "malloc-busy-nomemset", SKIP_MEMSET | BUSY },
+ { "malloc-bo-unmap-nomemset", SKIP_MEMSET | BO_UNMAP },
+ { "mmap-nomemset", SKIP_MEMSET | MMAP },
+ { "mmap-huge-nomemset", SKIP_MEMSET | MMAP | HUGE_PAGE },
+ { "mmap-shared-nomemset", SKIP_MEMSET | MMAP | MMAP_SHARED },
+ { "mmap-mlock-nomemset", SKIP_MEMSET | MMAP | LOCK },
+ { "mmap-file-nomemset", SKIP_MEMSET | MMAP | FILE_BACKED },
+ { "mmap-file-mlock-nomemset", SKIP_MEMSET | MMAP | LOCK | FILE_BACKED },
+ { "mmap-race-nomemset", SKIP_MEMSET | MMAP | RACE },
+ { "free-nomemset", SKIP_MEMSET | NEW | FREE },
+ { "free-race-nomemset", SKIP_MEMSET | NEW | FREE | RACE },
+ { "new-nomemset", SKIP_MEMSET | NEW },
+ { "new-race-nomemset", SKIP_MEMSET | NEW | RACE },
+ { "new-bo-map-nomemset", SKIP_MEMSET | NEW | BO_MAP },
+ { "new-busy-nomemset", SKIP_MEMSET | NEW | BUSY },
+ { "mmap-free-nomemset", SKIP_MEMSET | MMAP | NEW | FREE },
+ { "mmap-free-huge-nomemset", SKIP_MEMSET | MMAP | NEW | FREE | HUGE_PAGE },
+ { "mmap-free-race-nomemset", SKIP_MEMSET | MMAP | NEW | FREE | RACE },
+ { "mmap-new-nomemset", SKIP_MEMSET | MMAP | NEW },
+ { "mmap-new-huge-nomemset", SKIP_MEMSET | MMAP | NEW | HUGE_PAGE },
+ { "mmap-new-race-nomemset", SKIP_MEMSET | MMAP | NEW | RACE },
+ { NULL },
+ };
+ const struct section psections[] = {
+ { "munmap-cpu-fault", CPU_FAULT },
+ { "munmap-no-cpu-fault", 0 },
+ { "remap-cpu-fault", CPU_FAULT | REMAP },
+ { "remap-no-cpu-fault", REMAP },
+ { "middle-munmap-cpu-fault", MIDDLE | CPU_FAULT },
+ { "middle-munmap-no-cpu-fault", MIDDLE },
+ { "middle-remap-cpu-fault", MIDDLE | CPU_FAULT | REMAP },
+ { "middle-remap-no-cpu-fault", MIDDLE | REMAP },
+ { NULL },
+ };
+ const struct section esections[] = {
+ { "malloc", 0 },
+ { "malloc-mix-bo", MIX_BO_ALLOC },
+ { NULL },
+ };
+ int fd;
+
+ igt_fixture {
+ struct xe_device *xe;
+
+ fd = drm_open_driver(DRIVER_XE);
+ igt_require(!xe_supports_faults(fd));
+
+ xe = xe_device_get(fd);
+ va_bits = xe->va_bits;
+ }
+
+ for (const struct section *s = sections; s->name; s++) {
+ igt_subtest_f("once-%s", s->name)
+ xe_for_each_engine(fd, hwe)
+ test_exec(fd, hwe, 1, 1, 0, 0, 0, NULL,
+ NULL, s->flags);
+
+ igt_subtest_f("once-large-%s", s->name)
+ xe_for_each_engine(fd, hwe)
+ test_exec(fd, hwe, 1, 1, SZ_2M, 0, 0, NULL,
+ NULL, s->flags);
+
+ igt_subtest_f("twice-%s", s->name)
+ xe_for_each_engine(fd, hwe)
+ test_exec(fd, hwe, 1, 2, 0, 0, 0, NULL,
+ NULL, s->flags);
+
+ igt_subtest_f("twice-large-%s", s->name)
+ xe_for_each_engine(fd, hwe)
+ test_exec(fd, hwe, 1, 2, SZ_2M, 0, 0, NULL,
+ NULL, s->flags);
+
+ igt_subtest_f("many-%s", s->name)
+ xe_for_each_engine(fd, hwe)
+ test_exec(fd, hwe, 1, 128, 0, 0, 0, NULL,
+ NULL, s->flags);
+
+ igt_subtest_f("many-stride-%s", s->name)
+ xe_for_each_engine(fd, hwe)
+ test_exec(fd, hwe, 1, 128, 0, 256, 0, NULL,
+ NULL, s->flags);
+
+ igt_subtest_f("many-execqueues-%s", s->name)
+ xe_for_each_engine(fd, hwe)
+ test_exec(fd, hwe, 16, 128, 0, 0, 0, NULL,
+ NULL, s->flags);
+
+ igt_subtest_f("many-large-%s", s->name)
+ xe_for_each_engine(fd, hwe)
+ test_exec(fd, hwe, 1, 128, SZ_2M, 0, 0, NULL,
+ NULL, s->flags);
+
+ igt_subtest_f("many-large-execqueues-%s", s->name)
+ xe_for_each_engine(fd, hwe)
+ test_exec(fd, hwe, 16, 128, SZ_2M, 0, 0, NULL,
+ NULL, s->flags);
+
+ igt_subtest_f("threads-many-%s", s->name)
+ threads(fd, 1, 128, 0, 0, s->flags, false);
+
+ igt_subtest_f("threads-many-stride-%s", s->name)
+ threads(fd, 1, 128, 0, 256, s->flags, false);
+
+ igt_subtest_f("threads-many-execqueues-%s", s->name)
+ threads(fd, 16, 128, 0, 0, s->flags, false);
+
+ igt_subtest_f("threads-many-large-%s", s->name)
+ threads(fd, 1, 128, SZ_2M, 0, s->flags, false);
+
+ igt_subtest_f("threads-many-large-execqueues-%s", s->name)
+ threads(fd, 16, 128, SZ_2M, 0, s->flags, false);
+
+ igt_subtest_f("threads-shared-vm-many-%s", s->name)
+ threads(fd, 1, 128, 0, 0, s->flags, true);
+
+ igt_subtest_f("threads-shared-vm-many-stride-%s", s->name)
+ threads(fd, 1, 128, 0, 256, s->flags, true);
+
+ igt_subtest_f("threads-shared-vm-many-execqueues-%s", s->name)
+ threads(fd, 16, 128, 0, 0, s->flags, true);
+
+ igt_subtest_f("threads-shared-vm-many-large-%s", s->name)
+ threads(fd, 1, 128, SZ_2M, 0, s->flags, true);
+
+ igt_subtest_f("threads-shared-vm-many-large-execqueues-%s", s->name)
+ threads(fd, 16, 128, SZ_2M, 0, s->flags, true);
+
+ igt_subtest_f("process-many-%s", s->name)
+ processes(fd, 1, 128, 0, 0, s->flags);
+
+ igt_subtest_f("process-many-stride-%s", s->name)
+ processes(fd, 1, 128, 0, 256, s->flags);
+
+ igt_subtest_f("process-many-execqueues-%s", s->name)
+ processes(fd, 16, 128, 0, 0, s->flags);
+
+ igt_subtest_f("process-many-large-%s", s->name)
+ processes(fd, 1, 128, SZ_2M, 0, s->flags);
+
+ igt_subtest_f("process-many-large-execqueues-%s", s->name)
+ processes(fd, 16, 128, SZ_2M, 0, s->flags);
+ }
+
+ igt_subtest("threads-shared-vm-shared-alloc-many-stride-malloc")
+ threads(fd, 1, 128, 0, 256, SHARED_ALLOC, true);
+
+ igt_subtest("threads-shared-vm-shared-alloc-many-stride-malloc-race")
+ threads(fd, 1, 128, 0, 256, RACE | SHARED_ALLOC, true);
+
+ igt_subtest("threads-shared-alloc-many-stride-malloc")
+ threads(fd, 1, 128, 0, 256, SHARED_ALLOC, false);
+
+ igt_subtest("threads-shared-alloc-many-stride-malloc-sync")
+ threads(fd, 1, 128, 0, 256, SHARED_ALLOC | SYNC_EXEC, false);
+
+ igt_subtest("threads-shared-alloc-many-stride-malloc-race")
+ threads(fd, 1, 128, 0, 256, RACE | SHARED_ALLOC, false);
+
+ igt_subtest_f("fault")
+ xe_for_each_engine(fd, hwe)
+ test_exec(fd, hwe, 4, 1, SZ_2M, 0, 0, NULL, NULL,
+ FAULT);
+
+ for (const struct section *s = psections; s->name; s++) {
+ igt_subtest_f("partial-%s", s->name)
+ xe_for_each_engine(fd, hwe)
+ partial(fd, hwe, s->flags);
+ }
+
+ igt_subtest_f("unaligned-alloc")
+ xe_for_each_engine(fd, hwe) {
+ many_allocs(fd, hwe, (SZ_1M + SZ_512K) * 8,
+ SZ_1M + SZ_512K, SZ_4K, NULL, 0);
+ break;
+ }
+
+ igt_subtest_f("fault-benchmark")
+ xe_for_each_engine(fd, hwe)
+ many_allocs(fd, hwe, SZ_64M, SZ_64M, SZ_4K, NULL,
+ BENCHMARK);
+
+ igt_subtest_f("fault-threads-benchmark")
+ xe_for_each_engine(fd, hwe)
+ many_allocs(fd, hwe, SZ_64M, SZ_64M, SZ_4K, NULL,
+ BENCHMARK | CPU_FAULT_THREADS);
+
+ igt_subtest_f("fault-threads-same-page-benchmark")
+ xe_for_each_engine(fd, hwe)
+ many_allocs(fd, hwe, SZ_64M, SZ_64M, SZ_4K, NULL,
+ BENCHMARK | CPU_FAULT_THREADS |
+ CPU_FAULT_SAME_PAGE);
+
+ igt_subtest_f("fault-process-benchmark")
+ xe_for_each_engine(fd, hwe)
+ many_allocs(fd, hwe, SZ_64M, SZ_64M, SZ_4K, NULL,
+ BENCHMARK | CPU_FAULT_PROCESS);
+
+ igt_subtest_f("fault-process-same-page-benchmark")
+ xe_for_each_engine(fd, hwe)
+ many_allocs(fd, hwe, SZ_64M, SZ_64M, SZ_4K, NULL,
+ BENCHMARK | CPU_FAULT_PROCESS |
+ CPU_FAULT_SAME_PAGE);
+
+ for (const struct section *s = esections; s->name; s++) {
+ igt_subtest_f("evict-%s", s->name)
+ xe_for_each_engine(fd, hwe) {
+ many_allocs(fd, hwe,
+ xe_visible_vram_size(fd, hwe->gt_id),
+ SZ_8M, SZ_1M, NULL, s->flags);
+ break;
+ }
+ }
+
+ for (const struct section *s = esections; s->name; s++) {
+ igt_subtest_f("processes-evict-%s", s->name)
+ processes_evict(fd, SZ_8M, SZ_1M, s->flags);
+ }
+
+ igt_fixture {
+ xe_device_put(fd);
+ drm_close_driver(fd);
+ }
+}
diff --git a/tests/meson.build b/tests/meson.build
index 9224145cf4..8c7b756716 100644
--- a/tests/meson.build
+++ b/tests/meson.build
@@ -295,6 +295,7 @@ intel_xe_progs = [
'xe_exec_reset',
'xe_exec_sip',
'xe_exec_store',
+ 'xe_exec_system_allocator',
'xe_exec_threads',
'xe_exercise_blt',
'xe_fault_injection',
--
2.34.1
^ permalink raw reply related [flat|nested] 16+ messages in thread
* Re: [PATCH] tests/xe: Add system_allocator test
2025-04-16 2:20 [PATCH] tests/xe: Add system_allocator test Matthew Brost
@ 2025-04-16 17:09 ` Thomas Hellström
2025-04-16 18:36 ` Matthew Brost
2025-04-18 15:47 ` Francois Dugast
1 sibling, 1 reply; 16+ messages in thread
From: Thomas Hellström @ 2025-04-16 17:09 UTC (permalink / raw)
To: Matthew Brost, igt-dev
Hi, Matt,
On Tue, 2025-04-15 at 19:20 -0700, Matthew Brost wrote:
> Test various uses of system allocator in single thread, multiple
> threads, and multiple processes.
>
> Features tested:
> - Malloc with various size
> - Mmap with various sizes and flags including file backed mappings
> - Mixing BO allocations with system allocator
> - Various page sizes
> - Dynamically freeing / unmapping memory
> - Sharing VM across threads
> - Faults racing on different hardware engines / GTs / Tiles
> - GPU faults and CPU faults racing
> - CPU faults on multiple threads racing
> - CPU faults on multiple process racing
> - GPU faults of memory not faulted in by CPU
> - Partial unmap of allocations
> - Attempting to unmap system allocations when GPU has mappings
> - Eviction of both system allocations and BOs
> - Forking child processes and reading data from VRAM
> - mremap data in VRAM
> - Protection changes
> - Multiple faults per execbuf
>
> Running on LNL, BMG, PVC 1 tile, and PVC 2 tile.
>
> v2:
> - Rebase
> - Fix memory allocation to not interfear with malloc (Thomas)
>
> Signed-off-by: Matthew Brost <matthew.brost@intel.com>
Did you have a chance to look at the memory (or rather virtual
adddress) leaks on the tests as well?
With the old version it would exceed 1GB after some time, which I
believe caused some slowdown.
Unfortunately the igt library funcs also leaks memory so running the
test under valgrind was a bit noisy, but helped to some extent.
/Thomas
^ permalink raw reply [flat|nested] 16+ messages in thread
* Re: [PATCH] tests/xe: Add system_allocator test
2025-04-16 17:09 ` Thomas Hellström
@ 2025-04-16 18:36 ` Matthew Brost
0 siblings, 0 replies; 16+ messages in thread
From: Matthew Brost @ 2025-04-16 18:36 UTC (permalink / raw)
To: Thomas Hellström; +Cc: igt-dev
On Wed, Apr 16, 2025 at 07:09:40PM +0200, Thomas Hellström wrote:
> Hi, Matt,
>
> On Tue, 2025-04-15 at 19:20 -0700, Matthew Brost wrote:
> > Test various uses of system allocator in single thread, multiple
> > threads, and multiple processes.
> >
> > Features tested:
> > - Malloc with various size
> > - Mmap with various sizes and flags including file backed mappings
> > - Mixing BO allocations with system allocator
> > - Various page sizes
> > - Dynamically freeing / unmapping memory
> > - Sharing VM across threads
> > - Faults racing on different hardware engines / GTs / Tiles
> > - GPU faults and CPU faults racing
> > - CPU faults on multiple threads racing
> > - CPU faults on multiple process racing
> > - GPU faults of memory not faulted in by CPU
> > - Partial unmap of allocations
> > - Attempting to unmap system allocations when GPU has mappings
> > - Eviction of both system allocations and BOs
> > - Forking child processes and reading data from VRAM
> > - mremap data in VRAM
> > - Protection changes
> > - Multiple faults per execbuf
> >
> > Running on LNL, BMG, PVC 1 tile, and PVC 2 tile.
> >
> > v2:
> > - Rebase
> > - Fix memory allocation to not interfear with malloc (Thomas)
> >
> > Signed-off-by: Matthew Brost <matthew.brost@intel.com>
>
> Did you have a chance to look at the memory (or rather virtual
> adddress) leaks on the tests as well?
> With the old version it would exceed 1GB after some time, which I
> believe caused some slowdown.
>
I have not looked at that - rather just fixed mmaping mallocs which
would eventually cause the test to fail with -ENOSPC (e.g. if just ran
xe_exec_system_alloc eventually everything started to fail whereas now I
can run xe_exec_system_alloc to completion).
I wonder if the way I changed memory allocation fixed the leaks, let me
try running under valgrind and see if anything pops.
Matt
> Unfortunately the igt library funcs also leaks memory so running the
> test under valgrind was a bit noisy, but helped to some extent.
>
> /Thomas
>
^ permalink raw reply [flat|nested] 16+ messages in thread
* Re: [PATCH] tests/xe: Add system_allocator test
2025-04-16 2:20 [PATCH] tests/xe: Add system_allocator test Matthew Brost
2025-04-16 17:09 ` Thomas Hellström
@ 2025-04-18 15:47 ` Francois Dugast
2025-04-18 19:44 ` Matthew Brost
1 sibling, 1 reply; 16+ messages in thread
From: Francois Dugast @ 2025-04-18 15:47 UTC (permalink / raw)
To: Matthew Brost; +Cc: igt-dev
Hi Matt,
I am still going through your patch but sharing some comments already.
The sequence is neither complex nor too different from existing tests
but as it is a lot of multi-thread / multi-process code, I am trying
to come up with a suggestion to break it down. Might not be possible
though.
On Tue, Apr 15, 2025 at 07:20:40PM -0700, Matthew Brost wrote:
> Test various uses of system allocator in single thread, multiple
> threads, and multiple processes.
>
> Features tested:
> - Malloc with various size
> - Mmap with various sizes and flags including file backed mappings
> - Mixing BO allocations with system allocator
> - Various page sizes
> - Dynamically freeing / unmapping memory
> - Sharing VM across threads
> - Faults racing on different hardware engines / GTs / Tiles
> - GPU faults and CPU faults racing
> - CPU faults on multiple threads racing
> - CPU faults on multiple process racing
> - GPU faults of memory not faulted in by CPU
> - Partial unmap of allocations
> - Attempting to unmap system allocations when GPU has mappings
> - Eviction of both system allocations and BOs
> - Forking child processes and reading data from VRAM
> - mremap data in VRAM
> - Protection changes
> - Multiple faults per execbuf
>
> Running on LNL, BMG, PVC 1 tile, and PVC 2 tile.
>
> v2:
> - Rebase
> - Fix memory allocation to not interfear with malloc (Thomas)
>
> Signed-off-by: Matthew Brost <matthew.brost@intel.com>
> ---
> include/drm-uapi/xe_drm.h | 57 +-
> lib/xe/xe_ioctl.c | 12 +
> lib/xe/xe_ioctl.h | 1 +
> tests/intel/xe_exec_system_allocator.c | 1832 ++++++++++++++++++++++++
> tests/meson.build | 1 +
> 5 files changed, 1896 insertions(+), 7 deletions(-)
> create mode 100644 tests/intel/xe_exec_system_allocator.c
>
> diff --git a/include/drm-uapi/xe_drm.h b/include/drm-uapi/xe_drm.h
> index 154f947ef0..9c08738c3b 100644
> --- a/include/drm-uapi/xe_drm.h
> +++ b/include/drm-uapi/xe_drm.h
> @@ -3,8 +3,8 @@
> * Copyright © 2023 Intel Corporation
> */
>
> -#ifndef _XE_DRM_H_
> -#define _XE_DRM_H_
> +#ifndef _UAPI_XE_DRM_H_
> +#define _UAPI_XE_DRM_H_
Nit: The header seems to have been copied directly from the kernel tree, instead
it should be generated with:
make headers_install
https://docs.kernel.org/kbuild/headers_install.html
>
> #include "drm.h"
>
> @@ -134,7 +134,7 @@ extern "C" {
> * redefine the interface more easily than an ever growing struct of
> * increasing complexity, and for large parts of that interface to be
> * entirely optional. The downside is more pointer chasing; chasing across
> - * the boundary with pointers encapsulated inside u64.
> + * the __user boundary with pointers encapsulated inside u64.
See above comment on make headers_install.
> *
> * Example chaining:
> *
> @@ -393,6 +393,10 @@ struct drm_xe_query_mem_regions {
> *
> * - %DRM_XE_QUERY_CONFIG_FLAG_HAS_VRAM - Flag is set if the device
> * has usable VRAM
> + * - %DRM_XE_QUERY_CONFIG_FLAG_HAS_LOW_LATENCY - Flag is set if the device
> + * has low latency hint support
> + * - %DRM_XE_QUERY_CONFIG_FLAG_HAS_CPU_ADDR_MIRROR - Flag is set if the
> + * device has CPU address mirroring support
> * - %DRM_XE_QUERY_CONFIG_MIN_ALIGNMENT - Minimal memory alignment
> * required by this device, typically SZ_4K or SZ_64K
> * - %DRM_XE_QUERY_CONFIG_VA_BITS - Maximum bits of a virtual address
> @@ -409,6 +413,8 @@ struct drm_xe_query_config {
> #define DRM_XE_QUERY_CONFIG_REV_AND_DEVICE_ID 0
> #define DRM_XE_QUERY_CONFIG_FLAGS 1
> #define DRM_XE_QUERY_CONFIG_FLAG_HAS_VRAM (1 << 0)
> + #define DRM_XE_QUERY_CONFIG_FLAG_HAS_LOW_LATENCY (1 << 1)
> + #define DRM_XE_QUERY_CONFIG_FLAG_HAS_CPU_ADDR_MIRROR (1 << 2)
> #define DRM_XE_QUERY_CONFIG_MIN_ALIGNMENT 2
> #define DRM_XE_QUERY_CONFIG_VA_BITS 3
> #define DRM_XE_QUERY_CONFIG_MAX_EXEC_QUEUE_PRIORITY 4
> @@ -911,7 +917,11 @@ struct drm_xe_gem_mmap_offset {
> * struct drm_xe_vm_create - Input of &DRM_IOCTL_XE_VM_CREATE
> *
> * The @flags can be:
> - * - %DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE
> + * - %DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE - Map the whole virtual address
> + * space of the VM to scratch page. A vm_bind would overwrite the scratch
> + * page mapping. This flag is mutually exclusive with the
> + * %DRM_XE_VM_CREATE_FLAG_FAULT_MODE flag, with an exception of on x2 and
> + * xe3 platform.
> * - %DRM_XE_VM_CREATE_FLAG_LR_MODE - An LR, or Long Running VM accepts
> * exec submissions to its exec_queues that don't have an upper time
> * limit on the job execution time. But exec submissions to these
> @@ -987,6 +997,12 @@ struct drm_xe_vm_destroy {
> * - %DRM_XE_VM_BIND_FLAG_CHECK_PXP - If the object is encrypted via PXP,
> * reject the binding if the encryption key is no longer valid. This
> * flag has no effect on BOs that are not marked as using PXP.
> + * - %DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR - When the CPU address mirror flag is
> + * set, no mappings are created rather the range is reserved for CPU address
> + * mirroring which will be populated on GPU page faults or prefetches. Only
> + * valid on VMs with DRM_XE_VM_CREATE_FLAG_FAULT_MODE set. The CPU address
> + * mirror flag are only valid for DRM_XE_VM_BIND_OP_MAP operations, the BO
> + * handle MBZ, and the BO offset MBZ.
> */
> struct drm_xe_vm_bind_op {
> /** @extensions: Pointer to the first extension struct, if any */
> @@ -1039,7 +1055,9 @@ struct drm_xe_vm_bind_op {
> * on the @pat_index. For such mappings there is no actual memory being
> * mapped (the address in the PTE is invalid), so the various PAT memory
> * attributes likely do not apply. Simply leaving as zero is one
> - * option (still a valid pat_index).
> + * option (still a valid pat_index). Same applies to
> + * DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR bindings as for such mapping
> + * there is no actual memory being mapped.
> */
> __u16 pat_index;
>
> @@ -1055,6 +1073,14 @@ struct drm_xe_vm_bind_op {
>
> /** @userptr: user pointer to bind on */
> __u64 userptr;
> +
> + /**
> + * @cpu_addr_mirror_offset: Offset from GPU @addr to create
> + * CPU address mirror mappings. MBZ with current level of
> + * support (e.g. 1 to 1 mapping between GPU and CPU mappings
> + * only supported).
> + */
> + __s64 cpu_addr_mirror_offset;
> };
>
> /**
> @@ -1078,6 +1104,7 @@ struct drm_xe_vm_bind_op {
> #define DRM_XE_VM_BIND_FLAG_NULL (1 << 2)
> #define DRM_XE_VM_BIND_FLAG_DUMPABLE (1 << 3)
> #define DRM_XE_VM_BIND_FLAG_CHECK_PXP (1 << 4)
> +#define DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR (1 << 5)
> /** @flags: Bind flags */
> __u32 flags;
>
> @@ -1205,6 +1232,21 @@ struct drm_xe_vm_bind {
> * };
> * ioctl(fd, DRM_IOCTL_XE_EXEC_QUEUE_CREATE, &exec_queue_create);
> *
> + * Allow users to provide a hint to kernel for cases demanding low latency
> + * profile. Please note it will have impact on power consumption. User can
> + * indicate low latency hint with flag while creating exec queue as
> + * mentioned below,
> + *
> + * struct drm_xe_exec_queue_create exec_queue_create = {
> + * .flags = DRM_XE_EXEC_QUEUE_LOW_LATENCY_HINT,
> + * .extensions = 0,
> + * .vm_id = vm,
> + * .num_bb_per_exec = 1,
> + * .num_eng_per_bb = 1,
> + * .instances = to_user_pointer(&instance),
> + * };
> + * ioctl(fd, DRM_IOCTL_XE_EXEC_QUEUE_CREATE, &exec_queue_create);
> + *
> */
> struct drm_xe_exec_queue_create {
> #define DRM_XE_EXEC_QUEUE_EXTENSION_SET_PROPERTY 0
> @@ -1223,7 +1265,8 @@ struct drm_xe_exec_queue_create {
> /** @vm_id: VM to use for this exec queue */
> __u32 vm_id;
>
> - /** @flags: MBZ */
> +#define DRM_XE_EXEC_QUEUE_LOW_LATENCY_HINT (1 << 0)
> + /** @flags: flags to use for this exec queue */
> __u32 flags;
>
> /** @exec_queue_id: Returned exec queue ID */
> @@ -1926,4 +1969,4 @@ struct drm_xe_query_eu_stall {
> }
> #endif
>
> -#endif /* _XE_DRM_H_ */
> +#endif /* _UAPI_XE_DRM_H_ */
> diff --git a/lib/xe/xe_ioctl.c b/lib/xe/xe_ioctl.c
> index fb8c4aef13..785fc9184c 100644
> --- a/lib/xe/xe_ioctl.c
> +++ b/lib/xe/xe_ioctl.c
> @@ -440,6 +440,18 @@ void *xe_bo_map(int fd, uint32_t bo, size_t size)
> return __xe_bo_map(fd, bo, size, PROT_WRITE);
> }
>
> +void *xe_bo_map_fixed(int fd, uint32_t bo, size_t size, uint64_t addr)
> +{
> + uint64_t mmo;
> + void *map;
> +
> + mmo = xe_bo_mmap_offset(fd, bo);
> + map = mmap((void *)addr, size, PROT_WRITE, MAP_SHARED | MAP_FIXED, fd, mmo);
> + igt_assert(map != MAP_FAILED);
> +
> + return map;
> +}
> +
> void *xe_bo_mmap_ext(int fd, uint32_t bo, size_t size, int prot)
> {
> return __xe_bo_map(fd, bo, size, prot);
> diff --git a/lib/xe/xe_ioctl.h b/lib/xe/xe_ioctl.h
> index 9bdf73b2bd..554a33c9cd 100644
> --- a/lib/xe/xe_ioctl.h
> +++ b/lib/xe/xe_ioctl.h
> @@ -86,6 +86,7 @@ uint32_t xe_exec_queue_create_class(int fd, uint32_t vm, uint16_t class);
> void xe_exec_queue_destroy(int fd, uint32_t exec_queue);
> uint64_t xe_bo_mmap_offset(int fd, uint32_t bo);
> void *xe_bo_map(int fd, uint32_t bo, size_t size);
> +void *xe_bo_map_fixed(int fd, uint32_t bo, size_t size, long unsigned int addr);
> void *xe_bo_mmap_ext(int fd, uint32_t bo, size_t size, int prot);
> int __xe_exec(int fd, struct drm_xe_exec *exec);
> void xe_exec(int fd, struct drm_xe_exec *exec);
> diff --git a/tests/intel/xe_exec_system_allocator.c b/tests/intel/xe_exec_system_allocator.c
> new file mode 100644
> index 0000000000..14fa59353e
> --- /dev/null
> +++ b/tests/intel/xe_exec_system_allocator.c
> @@ -0,0 +1,1832 @@
> +// SPDX-License-Identifier: MIT
> +/*
> + * Copyright © 2024 Intel Corporation
> + */
> +
> +/**
> + * TEST: Basic tests for execbuf functionality using system allocator
> + * Category: Hardware building block
> + * Mega feature: Shared virtual memory
> + * Sub-category: execbuf
> + * Functionality: fault mode, system allocator
> + * GPU requirements: GPU needs support for DRM_XE_VM_CREATE_FLAG_FAULT_MODE
> + */
> +
> +#include <fcntl.h>
> +#include <linux/mman.h>
> +#include <time.h>
> +
> +#include "igt.h"
> +#include "lib/igt_syncobj.h"
> +#include "lib/intel_reg.h"
> +#include "xe_drm.h"
> +
> +#include "xe/xe_ioctl.h"
> +#include "xe/xe_query.h"
> +#include <string.h>
> +
> +#define USER_FENCE_VALUE 0xdeadbeefdeadbeefull
> +#define QUARTER_SEC (NSEC_PER_SEC / 4)
> +#define FIVE_SEC (5LL * NSEC_PER_SEC)
> +
> +struct batch_data {
> + uint32_t batch[16];
> + uint64_t pad;
> + uint32_t data;
> + uint32_t expected_data;
> +};
> +
> +#define WRITE_VALUE(data__, i__) ({ \
> + if (!(data__)->expected_data) \
> + (data__)->expected_data = rand() << 12 | (i__); \
> + (data__)->expected_data; \
> +})
> +#define READ_VALUE(data__, i__) ((data__)->expected_data)
> +
> +static void __write_dword(uint32_t *batch, uint64_t sdi_addr, uint32_t wdata,
> + int *idx)
> +{
> + batch[(*idx)++] = MI_STORE_DWORD_IMM_GEN4;
> + batch[(*idx)++] = sdi_addr;
> + batch[(*idx)++] = sdi_addr >> 32;
> + batch[(*idx)++] = wdata;
> +}
> +
> +static void write_dword(uint32_t *batch, uint64_t sdi_addr, uint32_t wdata,
> + int *idx)
> +{
> + __write_dword(batch, sdi_addr, wdata, idx);
> + batch[(*idx)++] = MI_BATCH_BUFFER_END;
> +}
Slightly out of scope for this review but the 2 functions above might be
helpful under lib/ to prevent adding more duplications of the dword write
batch sequence.
> +
> +static void check_all_pages(void *ptr, uint64_t alloc_size, uint64_t stride,
> + pthread_barrier_t *barrier)
> +{
> + int i, n_writes = alloc_size / stride;
> +
> + for (i = 0; i < n_writes; ++i) {
> + struct batch_data *data = ptr + i * stride;
> +
> + igt_assert_eq(data->data, READ_VALUE(data, i));
> +
> + if (barrier)
> + pthread_barrier_wait(barrier);
> + }
> +}
> +
> +#define SYNC_FILE "/tmp/xe_exec_system_allocator_sync"
Might be worth creating and propagating a unique file name at runtime, for
example with mkstemp(), in order to avoid potential concurrent accesses from
multiple instances of the test.
> +
> +struct process_data {
> + pthread_mutex_t mutex;
> + pthread_cond_t cond;
> + pthread_barrier_t barrier;
> + bool go;
> +};
> +
> +static void wait_pdata(struct process_data *pdata)
> +{
> + pthread_mutex_lock(&pdata->mutex);
> + while (!pdata->go)
> + pthread_cond_wait(&pdata->cond, &pdata->mutex);
> + pthread_mutex_unlock(&pdata->mutex);
> +}
> +
> +static void init_pdata(struct process_data *pdata, int n_engine)
> +{
> + pthread_mutexattr_t mutex_attr;
> + pthread_condattr_t cond_attr;
> + pthread_barrierattr_t barrier_attr;
> +
> + pthread_mutexattr_init(&mutex_attr);
> + pthread_mutexattr_setpshared(&mutex_attr, PTHREAD_PROCESS_SHARED);
> + pthread_mutex_init(&pdata->mutex, &mutex_attr);
> +
> + pthread_condattr_init(&cond_attr);
> + pthread_condattr_setpshared(&cond_attr, PTHREAD_PROCESS_SHARED);
> + pthread_cond_init(&pdata->cond, &cond_attr);
> +
> + pthread_barrierattr_init(&barrier_attr);
> + pthread_barrierattr_setpshared(&barrier_attr, PTHREAD_PROCESS_SHARED);
> + pthread_barrier_init(&pdata->barrier, &barrier_attr, n_engine);
> +
> + pdata->go = false;
> +}
> +
> +static void signal_pdata(struct process_data *pdata)
> +{
> + pthread_mutex_lock(&pdata->mutex);
> + pdata->go = true;
> + pthread_cond_broadcast(&pdata->cond);
> + pthread_mutex_unlock(&pdata->mutex);
> +}
> +
> +/* many_alloc flags */
> +#define MIX_BO_ALLOC (0x1 << 0)
> +#define BENCHMARK (0x1 << 1)
> +#define CPU_FAULT_THREADS (0x1 << 2)
> +#define CPU_FAULT_PROCESS (0x1 << 3)
> +#define CPU_FAULT_SAME_PAGE (0x1 << 4)
> +
> +static void process_check(void *ptr, uint64_t alloc_size, uint64_t stride,
> + unsigned int flags)
> +{
> + struct process_data *pdata;
> + int map_fd;
> +
> + map_fd = open(SYNC_FILE, O_RDWR, 0x666);
> + pdata = mmap(NULL, sizeof(*pdata), PROT_READ |
> + PROT_WRITE, MAP_SHARED, map_fd, 0);
> + wait_pdata(pdata);
> +
> + if (flags & CPU_FAULT_SAME_PAGE)
> + check_all_pages(ptr, alloc_size, stride, &pdata->barrier);
> + else
> + check_all_pages(ptr, alloc_size, stride, NULL);
> +
> + close(map_fd);
> + munmap(pdata, sizeof(*pdata));
> +}
> +
> +static void
> +check_all_pages_process(void *ptr, uint64_t alloc_size, uint64_t stride,
> + int n_process, unsigned int flags)
> +{
> + struct process_data *pdata;
> + int map_fd, i;
> +
> + map_fd = open(SYNC_FILE, O_RDWR | O_CREAT, 0x666);
> + posix_fallocate(map_fd, 0, sizeof(*pdata));
> + pdata = mmap(NULL, sizeof(*pdata), PROT_READ |
> + PROT_WRITE, MAP_SHARED, map_fd, 0);
> +
> + init_pdata(pdata, n_process);
> +
> + for (i = 0; i < n_process; ++i) {
> + igt_fork(child, 1)
> + if (flags & CPU_FAULT_SAME_PAGE)
> + process_check(ptr, alloc_size, stride, flags);
> + else
> + process_check(ptr + stride * i, alloc_size,
> + stride * n_process, flags);
> + }
> +
> + signal_pdata(pdata);
> + igt_waitchildren();
> +
> + close(map_fd);
> + munmap(pdata, sizeof(*pdata));
> +}
> +
> +struct thread_check_data {
> + pthread_t thread;
> + pthread_mutex_t *mutex;
> + pthread_cond_t *cond;
> + pthread_barrier_t *barrier;
> + void *ptr;
> + uint64_t alloc_size;
> + uint64_t stride;
> + bool *go;
> +};
> +
> +static void *thread_check(void *data)
> +{
> + struct thread_check_data *t = data;
> +
> + pthread_mutex_lock(t->mutex);
> + while (!*t->go)
> + pthread_cond_wait(t->cond, t->mutex);
> + pthread_mutex_unlock(t->mutex);
> +
> + check_all_pages(t->ptr, t->alloc_size, t->stride, t->barrier);
> +
> + return NULL;
> +}
> +
> +/*
> + * Partition checking of results in chunks which causes multiple threads to
> + * fault same VRAM allocation in parallel.
> + */
> +static void
> +check_all_pages_threads(void *ptr, uint64_t alloc_size, uint64_t stride,
> + int n_threads, unsigned int flags)
> +{
> + struct thread_check_data *threads_check_data;
> + pthread_mutex_t mutex;
> + pthread_cond_t cond;
> + pthread_barrier_t barrier;
> + int i;
> + bool go = false;
> +
> + threads_check_data = calloc(n_threads, sizeof(*threads_check_data));
> + igt_assert(threads_check_data);
> +
> + pthread_mutex_init(&mutex, 0);
> + pthread_cond_init(&cond, 0);
> + pthread_barrier_init(&barrier, 0, n_threads);
> +
> + for (i = 0; i < n_threads; ++i) {
> + threads_check_data[i].mutex = &mutex;
> + threads_check_data[i].cond = &cond;
> + if (flags & CPU_FAULT_SAME_PAGE) {
> + threads_check_data[i].barrier = &barrier;
> + threads_check_data[i].ptr = ptr;
> + threads_check_data[i].alloc_size = alloc_size;
> + threads_check_data[i].stride = stride;
> + } else {
> + threads_check_data[i].barrier = NULL;
> + threads_check_data[i].ptr = ptr + stride * i;
> + threads_check_data[i].alloc_size = alloc_size;
> + threads_check_data[i].stride = n_threads * stride;
> + }
> + threads_check_data[i].go = &go;
> +
> + pthread_create(&threads_check_data[i].thread, 0, thread_check,
> + &threads_check_data[i]);
> + }
> +
> + pthread_mutex_lock(&mutex);
> + go = true;
> + pthread_cond_broadcast(&cond);
> + pthread_mutex_unlock(&mutex);
> +
> + for (i = 0; i < n_threads; ++i)
> + pthread_join(threads_check_data[i].thread, NULL);
> + free(threads_check_data);
> +}
> +
> +static void touch_all_pages(int fd, uint32_t exec_queue, void *ptr,
> + uint64_t alloc_size, uint64_t stride,
> + struct timespec *tv, uint64_t *submit)
> +{
> + struct drm_xe_sync sync[1] = {
> + { .type = DRM_XE_SYNC_TYPE_USER_FENCE,
> + .flags = DRM_XE_SYNC_FLAG_SIGNAL,
> + .timeline_value = USER_FENCE_VALUE },
> + };
> + struct drm_xe_exec exec = {
> + .num_batch_buffer = 1,
> + .num_syncs = 0,
> + .exec_queue_id = exec_queue,
> + .syncs = to_user_pointer(&sync),
> + };
> + uint64_t addr = to_user_pointer(ptr);
> + int i, ret, n_writes = alloc_size / stride;
> + u64 *exec_ufence = NULL;
> + int64_t timeout = FIVE_SEC;
> +
> + exec_ufence = mmap(NULL, SZ_4K, PROT_READ |
> + PROT_WRITE, MAP_SHARED |
> + MAP_ANONYMOUS, -1, 0);
> + igt_assert(exec_ufence != MAP_FAILED);
> + memset(exec_ufence, 0, SZ_4K);
> + sync[0].addr = to_user_pointer(exec_ufence);
> +
> + for (i = 0; i < n_writes; ++i, addr += stride) {
> + struct batch_data *data = ptr + i * stride;
> + uint64_t sdi_offset = (char *)&data->data - (char *)data;
> + uint64_t sdi_addr = addr + sdi_offset;
> + int b = 0;
> +
> + write_dword(data->batch, sdi_addr, WRITE_VALUE(data, i), &b);
> + igt_assert(b <= ARRAY_SIZE(data->batch));
> + }
> +
> + igt_nsec_elapsed(tv);
> + *submit = igt_nsec_elapsed(tv);
> +
> + addr = to_user_pointer(ptr);
> + for (i = 0; i < n_writes; ++i, addr += stride) {
> + struct batch_data *data = ptr + i * stride;
> + uint64_t batch_offset = (char *)&data->batch - (char *)data;
> + uint64_t batch_addr = addr + batch_offset;
> +
> + exec.address = batch_addr;
> + if (i + 1 == n_writes)
> + exec.num_syncs = 1;
> + xe_exec(fd, &exec);
> + }
> +
> + ret = __xe_wait_ufence(fd, exec_ufence, USER_FENCE_VALUE, exec_queue,
> + &timeout);
> + if (ret) {
> + printf("FAIL EXEC_UFENCE_ADDR: 0x%016llx\n", sync[0].addr);
> + printf("FAIL EXEC_UFENCE: EXPEXCTED=0x%016llx, ACTUAL=0x%016lx\n",
> + USER_FENCE_VALUE, exec_ufence[0]);
> +
> + addr = to_user_pointer(ptr);
> + for (i = 0; i < n_writes; ++i, addr += stride) {
> + struct batch_data *data = ptr + i * stride;
> + uint64_t batch_offset = (char *)&data->batch - (char *)data;
> + uint64_t batch_addr = addr + batch_offset;
> + uint64_t sdi_offset = (char *)&data->data - (char *)data;
> + uint64_t sdi_addr = addr + sdi_offset;
> +
> + printf("FAIL BATCH_ADDR: 0x%016lx\n", batch_addr);
> + printf("FAIL SDI_ADDR: 0x%016lx\n", sdi_addr);
> + printf("FAIL SDI_ADDR (in batch): 0x%016lx\n",
> + (((u64)data->batch[2]) << 32) | data->batch[1]);
> + printf("FAIL DARA: EXPEXCTED=0x%08x, ACTUAL=0x%08x\n",
> + data->expected_data, data->data);
> + }
> + igt_assert_eq(ret, 0);
> + }
> + munmap(exec_ufence, SZ_4K);
> +}
> +
> +static int va_bits;
> +
> +#define bind_system_allocator(__sync, __num_sync) \
> + __xe_vm_bind_assert(fd, vm, 0, \
> + 0, 0, 0, 0x1ull << va_bits, \
> + DRM_XE_VM_BIND_OP_MAP, \
> + DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR, \
> + (__sync), (__num_sync), 0, 0)
> +
> +#define unbind_system_allocator() \
> + __xe_vm_bind(fd, vm, 0, 0, 0, 0, 0x1ull << va_bits, \
> + DRM_XE_VM_BIND_OP_UNMAP, 0, \
> + NULL, 0, 0, 0, 0)
Is there a reason here to favor static variable + macros over helper function
with parameters?
> +
> +#define odd(__i) (__i & 1)
> +
> +struct aligned_alloc_type {
> + void *__ptr;
> + void *ptr;
> + size_t __size;
> + size_t size;
> +};
> +
> +static struct aligned_alloc_type __aligned_alloc(size_t alignment, size_t size)
> +{
> + struct aligned_alloc_type aligned_alloc_type;
> +
> + aligned_alloc_type.__ptr = mmap(NULL, alignment + size, PROT_NONE, MAP_PRIVATE |
> + MAP_ANONYMOUS, -1, 0);
> + igt_assert(aligned_alloc_type.__ptr != MAP_FAILED);
> +
> + aligned_alloc_type.ptr = (void *)ALIGN((uint64_t)aligned_alloc_type.__ptr, alignment);
> + aligned_alloc_type.size = size;
> + aligned_alloc_type.__size = size + alignment;
> +
> + return aligned_alloc_type;
> +}
> +
> +static void __aligned_free(struct aligned_alloc_type *aligned_alloc_type)
> +{
> + munmap(aligned_alloc_type->__ptr, aligned_alloc_type->__size);
> +}
> +
> +static void __aligned_partial_free(struct aligned_alloc_type *aligned_alloc_type)
> +{
> + size_t begin_size = (size_t)(aligned_alloc_type->ptr - aligned_alloc_type->__ptr);
> +
> + if (begin_size)
> + munmap(aligned_alloc_type->__ptr, begin_size);
> + if (aligned_alloc_type->__size - aligned_alloc_type->size - begin_size)
> + munmap(aligned_alloc_type->ptr + aligned_alloc_type->size,
> + aligned_alloc_type->__size - aligned_alloc_type->size - begin_size);
> +}
> +
> +/**
> + * SUBTEST: unaligned-alloc
> + * Description: allocate unaligned sizes of memory
> + * Test category: functionality test
> + *
> + * SUBTEST: fault-benchmark
> + * Description: Benchmark how long GPU / CPU take
> + * Test category: performance test
> + *
> + * SUBTEST: fault-threads-benchmark
> + * Description: Benchmark how long GPU / CPU take, reading results with multiple threads
> + * Test category: performance and functionality test
> + *
> + * SUBTEST: fault-threads-same-page-benchmark
> + * Description: Benchmark how long GPU / CPU take, reading results with multiple threads, hammer same page
> + * Test category: performance and functionality test
> + *
> + * SUBTEST: fault-process-benchmark
> + * Description: Benchmark how long GPU / CPU take, reading results with multiple process
> + * Test category: performance and functionality test
> + *
> + * SUBTEST: fault-process-same-page-benchmark
> + * Description: Benchmark how long GPU / CPU take, reading results with multiple process, hammer same page
> + * Test category: performance and functionality test
> + *
> + * SUBTEST: evict-malloc
> + * Description: trigger eviction of VRAM allocated via malloc
> + * Test category: functionality test
> + *
> + * SUBTEST: evict-malloc-mix-bo
> + * Description: trigger eviction of VRAM allocated via malloc and BO create
> + * Test category: functionality test
> + *
> + * SUBTEST: processes-evict-malloc
> + * Description: multi-process trigger eviction of VRAM allocated via malloc
> + * Test category: stress test
> + *
> + * SUBTEST: processes-evict-malloc-mix-bo
> + * Description: multi-process trigger eviction of VRAM allocated via malloc and BO create
> + * Test category: stress test
> + */
> +
> +static void
> +many_allocs(int fd, struct drm_xe_engine_class_instance *eci,
> + uint64_t total_alloc, uint64_t alloc_size, uint64_t stride,
> + pthread_barrier_t *barrier, unsigned int flags)
> +{
> + uint32_t vm, exec_queue;
> + int num_allocs = flags & BENCHMARK ? 1 :
> + (9 * (total_alloc / alloc_size)) / 8;
> + struct aligned_alloc_type *allocs;
> + uint32_t *bos = NULL;
> + struct timespec tv = {};
> + uint64_t submit, read, elapsed;
> + int i;
> +
> + vm = xe_vm_create(fd, DRM_XE_VM_CREATE_FLAG_LR_MODE |
> + DRM_XE_VM_CREATE_FLAG_FAULT_MODE, 0);
> + exec_queue = xe_exec_queue_create(fd, vm, eci, 0);
> +
> + bind_system_allocator(NULL, 0);
> +
> + allocs = malloc(sizeof(*allocs) * num_allocs);
> + igt_assert(allocs);
> + memset(allocs, 0, sizeof(*allocs) * num_allocs);
> +
> + if (flags & MIX_BO_ALLOC) {
> + bos = malloc(sizeof(*bos) * num_allocs);
> + igt_assert(bos);
> + memset(bos, 0, sizeof(*bos) * num_allocs);
> + }
> +
> + for (i = 0; i < num_allocs; ++i) {
> + struct aligned_alloc_type alloc;
> +
> + if (flags & MIX_BO_ALLOC && odd(i)) {
> + uint32_t bo_flags =
> + DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM;
> +
> + alloc = __aligned_alloc(SZ_2M, alloc_size);
> + igt_assert(alloc.ptr);
> +
> + bos[i] = xe_bo_create(fd, vm, alloc_size,
> + vram_if_possible(fd, eci->gt_id),
> + bo_flags);
> + alloc.ptr = xe_bo_map_fixed(fd, bos[i], alloc_size,
> + to_user_pointer(alloc.ptr));
> + xe_vm_bind_async(fd, vm, 0, bos[i], 0,
> + to_user_pointer(alloc.ptr),
> + alloc_size, 0, 0);
> + } else {
> + alloc.ptr = aligned_alloc(SZ_2M, alloc_size);
> + igt_assert(alloc.ptr);
> + }
> + allocs[i] = alloc;
> +
> + touch_all_pages(fd, exec_queue, allocs[i].ptr, alloc_size, stride,
> + &tv, &submit);
> + }
> +
> + if (barrier)
> + pthread_barrier_wait(barrier);
> +
> + for (i = 0; i < num_allocs; ++i) {
> + if (flags & BENCHMARK)
> + read = igt_nsec_elapsed(&tv);
> +#define NUM_CHECK_THREADS 8
> + if (flags & CPU_FAULT_PROCESS)
> + check_all_pages_process(allocs[i].ptr, alloc_size, stride,
> + NUM_CHECK_THREADS, flags);
> + else if (flags & CPU_FAULT_THREADS)
> + check_all_pages_threads(allocs[i].ptr, alloc_size, stride,
> + NUM_CHECK_THREADS, flags);
> + else
> + check_all_pages(allocs[i].ptr, alloc_size, stride, NULL);
> + if (flags & BENCHMARK) {
> + elapsed = igt_nsec_elapsed(&tv);
> + printf("Execution took %.3fms (submit %.1fus, read %.1fus, total %.1fus, read_total %.1fus)\n",
> + 1e-6 * elapsed, 1e-3 * submit, 1e-3 * read,
> + 1e-3 * (elapsed - submit),
> + 1e-3 * (elapsed - read));
> + }
> + if (bos && bos[i]) {
> + __aligned_free(allocs + i);
> + gem_close(fd, bos[i]);
> + } else {
> + free(allocs[i].ptr);
> + }
> + }
> + if (bos)
> + free(bos);
> + free(allocs);
> + xe_exec_queue_destroy(fd, exec_queue);
> + xe_vm_destroy(fd, vm);
> +}
> +
> +static void process_evict(struct drm_xe_engine_class_instance *hwe,
> + uint64_t total_alloc, uint64_t alloc_size,
> + uint64_t stride, unsigned int flags)
> +{
> + struct process_data *pdata;
> + int map_fd;
> + int fd;
> +
> + map_fd = open(SYNC_FILE, O_RDWR, 0x666);
> + pdata = mmap(NULL, sizeof(*pdata), PROT_READ |
> + PROT_WRITE, MAP_SHARED, map_fd, 0);
> + wait_pdata(pdata);
> +
> + fd = drm_open_driver(DRIVER_XE);
> + many_allocs(fd, hwe, total_alloc, alloc_size, stride, &pdata->barrier,
> + flags);
> + drm_close_driver(fd);
> +
> + close(map_fd);
> + munmap(pdata, sizeof(*pdata));
> +}
> +
> +static void
> +processes_evict(int fd, uint64_t alloc_size, uint64_t stride,
> + unsigned int flags)
> +{
> + struct drm_xe_engine_class_instance *hwe;
> + struct process_data *pdata;
> + int n_engine_gt[2] = { 0, 0 }, n_engine = 0;
> + int map_fd;
> +
> + map_fd = open(SYNC_FILE, O_RDWR | O_CREAT, 0x666);
> + posix_fallocate(map_fd, 0, sizeof(*pdata));
> + pdata = mmap(NULL, sizeof(*pdata), PROT_READ |
> + PROT_WRITE, MAP_SHARED, map_fd, 0);
> +
> + xe_for_each_engine(fd, hwe) {
> + igt_assert(hwe->gt_id < 2);
> + n_engine_gt[hwe->gt_id]++;
> + n_engine++;
> + }
> +
> + init_pdata(pdata, n_engine);
> +
> + xe_for_each_engine(fd, hwe) {
> + igt_fork(child, 1)
> + process_evict(hwe,
> + xe_visible_vram_size(fd, hwe->gt_id) /
> + n_engine_gt[hwe->gt_id], alloc_size,
> + stride, flags);
> + }
> +
> + signal_pdata(pdata);
> + igt_waitchildren();
> +
> + close(map_fd);
> + munmap(pdata, sizeof(*pdata));
> +}
> +
> +#define CPU_FAULT (0x1 << 0)
> +#define REMAP (0x1 << 1)
> +#define MIDDLE (0x1 << 2)
> +
> +/**
> + * SUBTEST: partial-munmap-cpu-fault
> + * Description: munmap partially with cpu access in between
> + * Test category: functionality test
> + *
> + * SUBTEST: partial-munmap-no-cpu-fault
> + * Description: munmap partially with no cpu access in between
> + * Test category: functionality test
> + *
> + * SUBTEST: partial-remap-cpu-fault
> + * Description: remap partially with cpu access in between
> + * Test category: functionality test
> + *
> + * SUBTEST: partial-remap-no-cpu-fault
> + * Description: remap partially with no cpu access in between
> + * Test category: functionality test
> + *
> + * SUBTEST: partial-middle-munmap-cpu-fault
> + * Description: munmap middle with cpu access in between
> + * Test category: functionality test
> + *
> + * SUBTEST: partial-middle-munmap-no-cpu-fault
> + * Description: munmap middle with no cpu access in between
> + * Test category: functionality test
> + *
> + * SUBTEST: partial-middle-remap-cpu-fault
> + * Description: remap middle with cpu access in between
> + * Test category: functionality test
> + *
> + * SUBTEST: partial-middle-remap-no-cpu-fault
> + * Description: remap middle with no cpu access in between
> + * Test category: functionality test
> + */
> +
> +static void
> +partial(int fd, struct drm_xe_engine_class_instance *eci, unsigned int flags)
> +{
> + struct drm_xe_sync sync[1] = {
> + { .type = DRM_XE_SYNC_TYPE_USER_FENCE, .flags = DRM_XE_SYNC_FLAG_SIGNAL,
> + .timeline_value = USER_FENCE_VALUE },
> + };
> + struct drm_xe_exec exec = {
> + .num_batch_buffer = 1,
> + .num_syncs = 1,
> + .syncs = to_user_pointer(sync),
> + };
> + struct {
> + uint32_t batch[16];
> + uint64_t pad;
> + uint64_t vm_sync;
> + uint64_t exec_sync;
> + uint32_t data;
> + uint32_t expected_data;
> + } *data;
> + size_t bo_size = SZ_2M, unmap_offset = 0;
> + uint32_t vm, exec_queue;
> + u64 *exec_ufence = NULL;
> + int i;
> + void *old, *new = NULL;
> + struct aligned_alloc_type alloc;
> +
> + if (flags & MIDDLE)
> + unmap_offset = bo_size / 4;
> +
> + vm = xe_vm_create(fd, DRM_XE_VM_CREATE_FLAG_LR_MODE |
> + DRM_XE_VM_CREATE_FLAG_FAULT_MODE, 0);
> +
> + alloc = __aligned_alloc(bo_size, bo_size);
> + igt_assert(alloc.ptr);
> +
> + data = mmap(alloc.ptr, bo_size, PROT_READ | PROT_WRITE,
> + MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
> + igt_assert(data != MAP_FAILED);
> + memset(data, 0, bo_size);
> + old = data;
> +
> + exec_queue = xe_exec_queue_create(fd, vm, eci, 0);
> +
> + sync[0].addr = to_user_pointer(&data[0].vm_sync);
> + bind_system_allocator(sync, 1);
> + xe_wait_ufence(fd, &data[0].vm_sync, USER_FENCE_VALUE, 0, FIVE_SEC);
> + data[0].vm_sync = 0;
> +
> + exec_ufence = mmap(NULL, SZ_4K, PROT_READ |
> + PROT_WRITE, MAP_SHARED |
> + MAP_ANONYMOUS, -1, 0);
> + igt_assert(exec_ufence != MAP_FAILED);
> + memset(exec_ufence, 0, SZ_4K);
> +
> + for (i = 0; i < 2; i++) {
> + uint64_t addr = to_user_pointer(data);
> + uint64_t sdi_offset = (char *)&data[i].data - (char *)data;
> + uint64_t sdi_addr = addr + sdi_offset;
> + int b = 0;
> +
> + write_dword(data[i].batch, sdi_addr, WRITE_VALUE(&data[i], i), &b);
> + igt_assert(b <= ARRAY_SIZE(data[i].batch));
> +
> + if (!i)
> + data = old + unmap_offset + bo_size / 2;
> + }
> +
> + data = old;
> + exec.exec_queue_id = exec_queue;
> +
> + for (i = 0; i < 2; i++) {
> + uint64_t addr = to_user_pointer(data);
> + uint64_t batch_offset = (char *)&data[i].batch - (char *)data;
> + uint64_t batch_addr = addr + batch_offset;
> +
> + sync[0].addr = new ? to_user_pointer(new) :
> + to_user_pointer(exec_ufence);
> + exec.address = batch_addr;
> + xe_exec(fd, &exec);
> +
> + xe_wait_ufence(fd, new ?: exec_ufence, USER_FENCE_VALUE,
> + exec_queue, FIVE_SEC);
> + if (i || (flags & CPU_FAULT))
> + igt_assert_eq(data[i].data, READ_VALUE(&data[i], i));
> + exec_ufence[0] = 0;
> +
> + if (!i) {
> + data = old + unmap_offset + bo_size / 2;
> + munmap(old + unmap_offset, bo_size / 2);
> + if (flags & REMAP) {
> + new = mmap(old + unmap_offset, bo_size / 2,
> + PROT_READ | PROT_WRITE,
> + MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED |
> + MAP_LOCKED, -1, 0);
> + igt_assert(new != MAP_FAILED);
> + }
> + }
> + }
> +
> + xe_exec_queue_destroy(fd, exec_queue);
> + munmap(exec_ufence, SZ_4K);
> + __aligned_free(&alloc);
> + if (new)
> + munmap(new, bo_size / 2);
> + xe_vm_destroy(fd, vm);
> +}
> +
> +#define MAX_N_EXEC_QUEUES 16
> +
> +#define MMAP (0x1 << 0)
> +#define NEW (0x1 << 1)
> +#define BO_UNMAP (0x1 << 2)
> +#define FREE (0x1 << 3)
> +#define BUSY (0x1 << 4)
> +#define BO_MAP (0x1 << 5)
> +#define RACE (0x1 << 6)
> +#define SKIP_MEMSET (0x1 << 7)
> +#define FAULT (0x1 << 8)
> +#define FILE_BACKED (0x1 << 9)
> +#define LOCK (0x1 << 10)
> +#define MMAP_SHARED (0x1 << 11)
> +#define HUGE_PAGE (0x1 << 12)
> +#define SHARED_ALLOC (0x1 << 13)
> +#define FORK_READ (0x1 << 14)
> +#define FORK_READ_AFTER (0x1 << 15)
> +#define MREMAP (0x1 << 16)
> +#define DONTUNMAP (0x1 << 17)
> +#define READ_ONLY_REMAP (0x1 << 18)
> +#define SYNC_EXEC (0x1 << 19)
> +#define EVERY_OTHER_CHECK (0x1 << 20)
> +#define MULTI_FAULT (0x1 << 21)
> +
> +#define N_MULTI_FAULT 4
> +
> +/**
> + * SUBTEST: once-%s
> + * Description: Run %arg[1] system allocator test only once
> + * Test category: functionality test
> + *
> + * SUBTEST: once-large-%s
> + * Description: Run %arg[1] system allocator test only once with large allocation
> + * Test category: functionality test
> + *
> + * SUBTEST: twice-%s
> + * Description: Run %arg[1] system allocator test twice
> + * Test category: functionality test
> + *
> + * SUBTEST: twice-large-%s
> + * Description: Run %arg[1] system allocator test twice with large allocation
> + * Test category: functionality test
> + *
> + * SUBTEST: many-%s
> + * Description: Run %arg[1] system allocator test many times
> + * Test category: stress test
> + *
> + * SUBTEST: many-stride-%s
> + * Description: Run %arg[1] system allocator test many times with a stride on each exec
> + * Test category: stress test
> + *
> + * SUBTEST: many-execqueues-%s
> + * Description: Run %arg[1] system allocator test on many exec_queues
> + * Test category: stress test
> + *
> + * SUBTEST: many-large-%s
> + * Description: Run %arg[1] system allocator test many times with large allocations
> + * Test category: stress test
> + *
> + * SUBTEST: many-large-execqueues-%s
> + * Description: Run %arg[1] system allocator test on many exec_queues with large allocations
> + *
> + * SUBTEST: threads-many-%s
> + * Description: Run %arg[1] system allocator threaded test many times
> + * Test category: stress test
> + *
> + * SUBTEST: threads-many-stride-%s
> + * Description: Run %arg[1] system allocator threaded test many times with a stride on each exec
> + * Test category: stress test
> + *
> + * SUBTEST: threads-many-execqueues-%s
> + * Description: Run %arg[1] system allocator threaded test on many exec_queues
> + * Test category: stress test
> + *
> + * SUBTEST: threads-many-large-%s
> + * Description: Run %arg[1] system allocator threaded test many times with large allocations
> + * Test category: stress test
> + *
> + * SUBTEST: threads-many-large-execqueues-%s
> + * Description: Run %arg[1] system allocator threaded test on many exec_queues with large allocations
> + *
> + * SUBTEST: threads-shared-vm-many-%s
> + * Description: Run %arg[1] system allocator threaded, shared vm test many times
> + * Test category: stress test
> + *
> + * SUBTEST: threads-shared-vm-many-stride-%s
> + * Description: Run %arg[1] system allocator threaded, shared vm test many times with a stride on each exec
> + * Test category: stress test
> + *
> + * SUBTEST: threads-shared-vm-many-execqueues-%s
> + * Description: Run %arg[1] system allocator threaded, shared vm test on many exec_queues
> + * Test category: stress test
> + *
> + * SUBTEST: threads-shared-vm-many-large-%s
> + * Description: Run %arg[1] system allocator threaded, shared vm test many times with large allocations
> + * Test category: stress test
> + *
> + * SUBTEST: threads-shared-vm-many-large-execqueues-%s
> + * Description: Run %arg[1] system allocator threaded, shared vm test on many exec_queues with large allocations
> + * Test category: stress test
> + *
> + * SUBTEST: process-many-%s
> + * Description: Run %arg[1] system allocator multi-process test many times
> + * Test category: stress test
> + *
> + * SUBTEST: process-many-stride-%s
> + * Description: Run %arg[1] system allocator multi-process test many times with a stride on each exec
> + * Test category: stress test
> + *
> + * SUBTEST: process-many-execqueues-%s
> + * Description: Run %arg[1] system allocator multi-process test on many exec_queues
> + * Test category: stress test
> + *
> + * SUBTEST: process-many-large-%s
> + * Description: Run %arg[1] system allocator multi-process test many times with large allocations
> + * Test category: stress test
> + *
> + * SUBTEST: process-many-large-execqueues-%s
> + * Description: Run %arg[1] system allocator multi-process test on many exec_queues with large allocations
> + *
> + * SUBTEST: fault
> + * Description: use a bad system allocator address resulting in a fault
> + * Test category: bad input
> + *
> + * arg[1]:
> + *
> + * @malloc: malloc single buffer for all execs, issue a command which will trigger multiple faults
> + * @malloc-multi-fault: malloc single buffer for all execs
> + * @malloc-fork-read: malloc single buffer for all execs, fork a process to read test output
> + * @malloc-fork-read-after: malloc single buffer for all execs, fork a process to read test output, check again after fork returns in parent
> + * @malloc-mlock: malloc and mlock single buffer for all execs
> + * @malloc-race: malloc single buffer for all execs with race between cpu and gpu access
> + * @malloc-bo-unmap: malloc single buffer for all execs, bind and unbind a BO to same address before execs
> + * @malloc-busy: malloc single buffer for all execs, try to unbind while buffer valid
> + * @mmap: mmap single buffer for all execs
> + * @mmap-remap: mmap and mremap a buffer for all execs
> + * @mmap-remap-dontunmap: mmap and mremap a buffer with dontunmap flag for all execs
> + * @mmap-remap-ro: mmap and mremap a read-only buffer for all execs
> + * @mmap-remap-ro-dontunmap: mmap and mremap a read-only buffer with dontunmap flag for all execs
> + * @mmap-remap-eocheck: mmap and mremap a buffer for all execs, check data every other loop iteration
> + * @mmap-remap-dontunmap-eocheck: mmap and mremap a buffer with dontunmap flag for all execs, check data every other loop iteration
> + * @mmap-remap-ro-eocheck: mmap and mremap a read-only buffer for all execs, check data every other loop iteration
> + * @mmap-remap-ro-dontunmap-eocheck: mmap and mremap a read-only buffer with dontunmap flag for all execs, check data every other loop iteration
> + * @mmap-huge: mmap huge page single buffer for all execs
> + * @mmap-shared: mmap shared single buffer for all execs
> + * @mmap-shared-remap: mmap shared and mremap a buffer for all execs
> + * @mmap-shared-remap-dontunmap: mmap shared and mremap a buffer with dontunmap flag for all execs
> + * @mmap-shared-remap-eocheck: mmap shared and mremap a buffer for all execs, check data every other loop iteration
> + * @mmap-shared-remap-dontunmap-eocheck: mmap shared and mremap a buffer with dontunmap flag for all execs, check data every other loop iteration
> + * @mmap-mlock: mmap and mlock single buffer for all execs
> + * @mmap-file: mmap single buffer, with file backing, for all execs
> + * @mmap-file-mlock: mmap and mlock single buffer, with file backing, for all execs
> + * @mmap-race: mmap single buffer for all execs with race between cpu and gpu access
> + * @free: malloc and free buffer for each exec
> + * @free-race: malloc and free buffer for each exec with race between cpu and gpu access
> + * @new: malloc a new buffer for each exec
> + * @new-race: malloc a new buffer for each exec with race between cpu and gpu access
> + * @new-bo-map: malloc a new buffer or map BO for each exec
> + * @new-busy: malloc a new buffer for each exec, try to unbind while buffers valid
> + * @mmap-free: mmap and free buffer for each exec
> + * @mmap-free-huge: mmap huge page and free buffer for each exec
> + * @mmap-free-race: mmap and free buffer for each exec with race between cpu and gpu access
> + * @mmap-new: mmap a new buffer for each exec
> + * @mmap-new-huge: mmap huge page a new buffer for each exec
> + * @mmap-new-race: mmap a new buffer for each exec with race between cpu and gpu access
> + * @malloc-nomemset: malloc single buffer for all execs, skip memset of buffers
> + * @malloc-mlock-nomemset: malloc and mlock single buffer for all execs, skip memset of buffers
> + * @malloc-race-nomemset: malloc single buffer for all execs with race between cpu and gpu access, skip memset of buffers
> + * @malloc-bo-unmap-nomemset: malloc single buffer for all execs, bind and unbind a BO to same address before execs, skip memset of buffers
> + * @malloc-busy-nomemset: malloc single buffer for all execs, try to unbind while buffer valid, skip memset of buffers
> + * @mmap-nomemset: mmap single buffer for all execs, skip memset of buffers
> + * @mmap-huge-nomemset: mmap huge page single buffer for all execs, skip memset of buffers
> + * @mmap-shared-nomemset: mmap shared single buffer for all execs, skip memset of buffers
> + * @mmap-mlock-nomemset: mmap and mlock single buffer for all execs, skip memset of buffers
> + * @mmap-file-nomemset: mmap single buffer, with file backing, for all execs, skip memset of buffers
> + * @mmap-file-mlock-nomemset: mmap and mlock single buffer, with file backing, for all execs, skip memset of buffers
> + * @mmap-race-nomemset: mmap single buffer for all execs with race between cpu and gpu access, skip memset of buffers
> + * @free-nomemset: malloc and free buffer for each exec, skip memset of buffers
> + * @free-race-nomemset: malloc and free buffer for each exec with race between cpu and gpu access, skip memset of buffers
> + * @new-nomemset: malloc a new buffer for each exec, skip memset of buffers
> + * @new-race-nomemset: malloc a new buffer for each exec with race between cpu and gpu access, skip memset of buffers
> + * @new-bo-map-nomemset: malloc a new buffer or map BO for each exec, skip memset of buffers
> + * @new-busy-nomemset: malloc a new buffer for each exec, try to unbind while buffers valid, skip memset of buffers
> + * @mmap-free-nomemset: mmap and free buffer for each exec, skip memset of buffers
> + * @mmap-free-huge-nomemset: mmap huge page and free buffer for each exec, skip memset of buffers
> + * @mmap-free-race-nomemset: mmap and free buffer for each exec with race between cpu and gpu access, skip memset of buffers
> + * @mmap-new-nomemset: mmap a new buffer for each exec, skip memset of buffers
> + * @mmap-new-huge-nomemset: mmap huge page new buffer for each exec, skip memset of buffers
> + * @mmap-new-race-nomemset: mmap a new buffer for each exec with race between cpu and gpu access, skip memset of buffers
> + *
> + * SUBTEST: threads-shared-vm-shared-alloc-many-stride-malloc
> + * Description: Create multiple threads with a shared VM triggering faults on different hardware engines to same addresses
> + * Test category: stress test
> + *
> + * SUBTEST: threads-shared-vm-shared-alloc-many-stride-malloc-race
> + * Description: Create multiple threads with a shared VM triggering faults on different hardware engines to same addresses, racing between CPU and GPU access
> + * Test category: stress test
> + *
> + * SUBTEST: threads-shared-alloc-many-stride-malloc
> + * Description: Create multiple threads with a faults on different hardware engines to same addresses
> + * Test category: stress test
> + *
> + * SUBTEST: threads-shared-alloc-many-stride-malloc-sync
> + * Description: Create multiple threads with a faults on different hardware engines to same addresses, syncing on each exec
> + * Test category: stress test
> + *
> + * SUBTEST: threads-shared-alloc-many-stride-malloc-race
> + * Description: Create multiple threads with a faults on different hardware engines to same addresses, racing between CPU and GPU access
> + * Test category: stress test
> + */
> +
> +struct test_exec_data {
> + uint32_t batch[32];
> + uint64_t pad;
> + uint64_t vm_sync;
> + uint64_t exec_sync;
> + uint32_t data;
> + uint32_t expected_data;
> +};
> +
> +static void
> +test_exec(int fd, struct drm_xe_engine_class_instance *eci,
> + int n_exec_queues, int n_execs, size_t bo_size,
> + size_t stride, uint32_t vm, void *alloc, pthread_barrier_t *barrier,
> + unsigned int flags)
> +{
> + uint64_t addr;
> + struct drm_xe_sync sync[1] = {
> + { .type = DRM_XE_SYNC_TYPE_USER_FENCE, .flags = DRM_XE_SYNC_FLAG_SIGNAL,
> + .timeline_value = USER_FENCE_VALUE },
> + };
> + struct drm_xe_exec exec = {
> + .num_batch_buffer = 1,
> + .num_syncs = 1,
> + .syncs = to_user_pointer(sync),
> + };
> + uint32_t exec_queues[MAX_N_EXEC_QUEUES];
> + struct test_exec_data *data, *next_data = NULL;
> + uint32_t bo_flags;
> + uint32_t bo = 0;
> + void **pending_free;
> + u64 *exec_ufence = NULL;
> + int i, j, b, file_fd = -1, prev_idx;
> + bool free_vm = false;
> + size_t aligned_size = bo_size ?: xe_get_default_alignment(fd);
> + size_t orig_size = bo_size;
> + struct aligned_alloc_type aligned_alloc_type;
> +
> + if (flags & MULTI_FAULT) {
> + if (!bo_size)
> + return;
> +
> + bo_size *= N_MULTI_FAULT;
> + }
> +
> + if (flags & SHARED_ALLOC)
> + return;
> +
> + if (flags & EVERY_OTHER_CHECK && odd(n_execs))
> + return;
> +
> + if (flags & EVERY_OTHER_CHECK)
> + igt_assert(flags & MREMAP);
> +
> + igt_assert(n_exec_queues <= MAX_N_EXEC_QUEUES);
> +
> + if (flags & NEW && !(flags & FREE)) {
> + pending_free = malloc(sizeof(*pending_free) * n_execs);
> + igt_assert(pending_free);
> + memset(pending_free, 0, sizeof(*pending_free) * n_execs);
> + }
> +
> + if (!vm) {
> + vm = xe_vm_create(fd, DRM_XE_VM_CREATE_FLAG_LR_MODE |
> + DRM_XE_VM_CREATE_FLAG_FAULT_MODE, 0);
> + free_vm = true;
> + }
> + if (!bo_size) {
> + if (!stride) {
> + bo_size = sizeof(*data) * n_execs;
> + bo_size = xe_bb_size(fd, bo_size);
> + } else {
> + bo_size = stride * n_execs * sizeof(*data);
> + bo_size = xe_bb_size(fd, bo_size);
> + }
> + }
> + if (flags & HUGE_PAGE) {
> + aligned_size = ALIGN(aligned_size, SZ_2M);
> + bo_size = ALIGN(bo_size, SZ_2M);
> + }
> +
> + if (alloc) {
> + data = alloc;
> + } else {
> + if (flags & MMAP) {
> + int mmap_flags = MAP_FIXED;
> +
> + aligned_alloc_type = __aligned_alloc(aligned_size, bo_size);
> + data = aligned_alloc_type.ptr;
> + igt_assert(data);
> + __aligned_partial_free(&aligned_alloc_type);
> +
> + if (flags & MMAP_SHARED)
> + mmap_flags |= MAP_SHARED;
> + else
> + mmap_flags |= MAP_PRIVATE;
> +
> + if (flags & HUGE_PAGE)
> + mmap_flags |= MAP_HUGETLB | MAP_HUGE_2MB;
> +
> + if (flags & FILE_BACKED) {
> + char name[1024];
> +
> + igt_assert(!(flags & NEW));
> +
> + sprintf(name, "/tmp/xe_exec_system_allocator_dat%d\n",
> + getpid());
Might be another candidate to use mkstemp() or similar.
> + file_fd = open(name, O_RDWR | O_CREAT, 0x666);
> + posix_fallocate(file_fd, 0, bo_size);
> + } else {
> + mmap_flags |= MAP_ANONYMOUS;
> + }
> +
> + data = mmap(data, bo_size, PROT_READ |
> + PROT_WRITE, mmap_flags, file_fd, 0);
> + igt_assert(data != MAP_FAILED);
> + } else {
> + data = aligned_alloc(aligned_size, bo_size);
> + igt_assert(data);
> + }
> + if (!(flags & SKIP_MEMSET))
> + memset(data, 0, bo_size);
> + if (flags & LOCK) {
> + igt_assert(!(flags & NEW));
> + mlock(data, bo_size);
> + }
> + }
> +
> + for (i = 0; i < n_exec_queues; i++)
> + exec_queues[i] = xe_exec_queue_create(fd, vm, eci, 0);
> +
> + sync[0].addr = to_user_pointer(&data[0].vm_sync);
> + if (free_vm) {
> + bind_system_allocator(sync, 1);
> + xe_wait_ufence(fd, &data[0].vm_sync, USER_FENCE_VALUE, 0, FIVE_SEC);
> + }
> + data[0].vm_sync = 0;
> +
> + addr = to_user_pointer(data);
> +
> + if (flags & BO_UNMAP) {
> + bo_flags = DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM;
> + bo = xe_bo_create(fd, vm, bo_size,
> + vram_if_possible(fd, eci->gt_id), bo_flags);
> + xe_vm_bind_async(fd, vm, 0, bo, 0, addr, bo_size, 0, 0);
> +
> + __xe_vm_bind_assert(fd, vm, 0,
> + 0, 0, addr, bo_size,
> + DRM_XE_VM_BIND_OP_MAP,
> + DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR, sync,
> + 1, 0, 0);
> + xe_wait_ufence(fd, &data[0].vm_sync, USER_FENCE_VALUE, 0,
> + FIVE_SEC);
> + data[0].vm_sync = 0;
> + gem_close(fd, bo);
> + bo = 0;
> + }
> +
> + if (!(flags & RACE)) {
> + exec_ufence = mmap(NULL, SZ_4K, PROT_READ |
> + PROT_WRITE, MAP_SHARED |
> + MAP_ANONYMOUS, -1, 0);
> + igt_assert(exec_ufence != MAP_FAILED);
> + memset(exec_ufence, 0, SZ_4K);
> + }
> +
> + for (i = 0; i < n_execs; i++) {
> + int idx = !stride ? i : i * stride, next_idx = !stride
> + ? (i + 1) : (i + 1) * stride;
> + uint64_t batch_offset = (char *)&data[idx].batch - (char *)data;
> + uint64_t batch_addr = addr + batch_offset;
> + uint64_t sdi_offset = (char *)&data[idx].data - (char *)data;
> + uint64_t sdi_addr = addr + sdi_offset;
> + int e = i % n_exec_queues, err;
> + bool fault_inject = (FAULT & flags) && i == n_execs / 2;
> + bool fault_injected = (FAULT & flags) && i > n_execs;
> +
> + if (barrier)
> + pthread_barrier_wait(barrier);
> +
> + if (flags & MULTI_FAULT) {
> + b = 0;
> + for (j = 0; j < N_MULTI_FAULT - 1; ++j)
> + __write_dword(data[idx].batch,
> + sdi_addr + j * orig_size,
> + WRITE_VALUE(&data[idx], idx), &b);
> + write_dword(data[idx].batch, sdi_addr + j * orig_size,
> + WRITE_VALUE(&data[idx], idx), &b);
> + igt_assert(b <= ARRAY_SIZE(data[idx].batch));
> + } else if (!(flags & EVERY_OTHER_CHECK)) {
> + b = 0;
> + write_dword(data[idx].batch, sdi_addr,
> + WRITE_VALUE(&data[idx], idx), &b);
> + igt_assert(b <= ARRAY_SIZE(data[idx].batch));
> + } else if (flags & EVERY_OTHER_CHECK && !odd(i)) {
> + b = 0;
> + write_dword(data[idx].batch, sdi_addr,
> + WRITE_VALUE(&data[idx], idx), &b);
> + igt_assert(b <= ARRAY_SIZE(data[idx].batch));
> +
> + aligned_alloc_type = __aligned_alloc(aligned_size, bo_size);
> + next_data = aligned_alloc_type.ptr;
> + igt_assert(next_data);
> + __aligned_partial_free(&aligned_alloc_type);
> +
> + b = 0;
> + write_dword(data[next_idx].batch,
> + to_user_pointer(next_data) +
> + (char *)&data[next_idx].data - (char *)data,
> + WRITE_VALUE(&data[next_idx], next_idx), &b);
> + igt_assert(b <= ARRAY_SIZE(data[next_idx].batch));
> + }
> +
> + if (!exec_ufence)
> + data[idx].exec_sync = 0;
> +
> + sync[0].addr = exec_ufence ? to_user_pointer(exec_ufence) :
> + addr + (char *)&data[idx].exec_sync - (char *)data;
> +
> + exec.exec_queue_id = exec_queues[e];
> + if (fault_inject)
> + exec.address = batch_addr * 2;
> + else
> + exec.address = batch_addr;
> +
> + if (fault_injected) {
> + err = __xe_exec(fd, &exec);
> + igt_assert(err == -ENOENT);
> + } else {
> + xe_exec(fd, &exec);
> + }
> +
> + if (barrier)
> + pthread_barrier_wait(barrier);
> +
> + if (fault_inject || fault_injected) {
> + int64_t timeout = QUARTER_SEC;
> +
> + err = __xe_wait_ufence(fd, exec_ufence ? exec_ufence :
> + &data[idx].exec_sync,
> + USER_FENCE_VALUE,
> + exec_queues[e], &timeout);
> + igt_assert(err == -ETIME || err == -EIO);
> + } else {
> + xe_wait_ufence(fd, exec_ufence ? exec_ufence :
> + &data[idx].exec_sync, USER_FENCE_VALUE,
> + exec_queues[e], FIVE_SEC);
> + if (flags & LOCK && !i)
> + munlock(data, bo_size);
> +
> + if (flags & MREMAP) {
> + void *old = data;
> + int remap_flags = MREMAP_MAYMOVE | MREMAP_FIXED;
> +
> + if (flags & DONTUNMAP)
> + remap_flags |= MREMAP_DONTUNMAP;
> +
> + if (flags & READ_ONLY_REMAP)
> + igt_assert(!mprotect(old, bo_size,
> + PROT_READ));
> +
> + if (!next_data) {
> + aligned_alloc_type = __aligned_alloc(aligned_size,
> + bo_size);
> + data = aligned_alloc_type.ptr;
> + __aligned_partial_free(&aligned_alloc_type);
> + } else {
> + data = next_data;
> + }
> + next_data = NULL;
> + igt_assert(data);
> +
> + data = mremap(old, bo_size, bo_size,
> + remap_flags, data);
> + igt_assert(data != MAP_FAILED);
> +
> + if (flags & READ_ONLY_REMAP)
> + igt_assert(!mprotect(data, bo_size,
> + PROT_READ |
> + PROT_WRITE));
> +
> + addr = to_user_pointer(data);
> + if (flags & DONTUNMAP)
> + munmap(old, bo_size);
> + }
> +
> + if (!(flags & EVERY_OTHER_CHECK) || odd(i)) {
> + if (flags & FORK_READ) {
> + igt_fork(child, 1)
> + igt_assert_eq(data[idx].data,
> + READ_VALUE(&data[idx], idx));
> + if (!(flags & FORK_READ_AFTER))
> + igt_assert_eq(data[idx].data,
> + READ_VALUE(&data[idx], idx));
> + igt_waitchildren();
> + if (flags & FORK_READ_AFTER)
> + igt_assert_eq(data[idx].data,
> + READ_VALUE(&data[idx], idx));
> + } else {
> + igt_assert_eq(data[idx].data,
> + READ_VALUE(&data[idx], idx));
> +
> + if (flags & MULTI_FAULT) {
> + for (j = 1; j < N_MULTI_FAULT; ++j) {
> + struct test_exec_data *__data =
> + ((void *)data) + j * orig_size;
> +
> + igt_assert_eq(__data[idx].data,
> + READ_VALUE(&data[idx], idx));
> + }
> + }
> + }
> + if (flags & EVERY_OTHER_CHECK)
> + igt_assert_eq(data[prev_idx].data,
> + READ_VALUE(&data[prev_idx], idx));
> + }
> + }
> +
> + if (exec_ufence)
> + exec_ufence[0] = 0;
> +
> + if (bo) {
> + __xe_vm_bind_assert(fd, vm, 0,
> + 0, 0, addr, bo_size,
> + DRM_XE_VM_BIND_OP_MAP,
> + DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR,
> + NULL, 0, 0, 0);
> + munmap(data, bo_size);
> + gem_close(fd, bo);
> + }
> +
> + if (flags & NEW) {
> + if (flags & MMAP) {
> + if (flags & FREE)
> + munmap(data, bo_size);
> + else
> + pending_free[i] = data;
> + data = mmap(NULL, bo_size, PROT_READ |
> + PROT_WRITE, MAP_SHARED |
> + MAP_ANONYMOUS, -1, 0);
> + igt_assert(data != MAP_FAILED);
> + } else if (flags & BO_MAP && (i % 2)) {
> + if (!bo) {
> + if (flags & FREE)
> + free(data);
> + else
> + pending_free[i] = data;
> + }
> +
> + aligned_alloc_type = __aligned_alloc(aligned_size, bo_size);
> + data = aligned_alloc_type.ptr;
> + igt_assert(data);
> + __aligned_partial_free(&aligned_alloc_type);
> +
> + bo_flags = DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM;
> + bo = xe_bo_create(fd, vm, bo_size,
> + vram_if_possible(fd, eci->gt_id),
> + bo_flags);
> + data = xe_bo_map_fixed(fd, bo, bo_size,
> + to_user_pointer(data));
> +
> + xe_vm_bind_async(fd, vm, 0, bo, 0,
> + to_user_pointer(data),
> + bo_size, 0, 0);
> + } else {
> + if (!bo) {
> + if (flags & FREE)
> + free(data);
> + else
> + pending_free[i] = data;
> + }
> + bo = 0;
> + data = aligned_alloc(aligned_size, bo_size);
Large memory leaks come from this ^ aligned_alloc(), see below.
> + igt_assert(data);
> + }
> + addr = to_user_pointer(data);
> + if (!(flags & SKIP_MEMSET))
> + memset(data, 0, bo_size);
> + }
> +
> + prev_idx = idx;
> + }
> +
> + if (bo) {
> + __xe_vm_bind_assert(fd, vm, 0,
> + 0, 0, addr, bo_size,
> + DRM_XE_VM_BIND_OP_MAP,
> + DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR,
> + NULL, 0, 0, 0);
> + munmap(data, bo_size);
> + gem_close(fd, bo);
> + }
> +
> + if (flags & BUSY)
> + igt_assert_eq(unbind_system_allocator(), -EBUSY);
> +
> + for (i = 0; i < n_exec_queues; i++)
> + xe_exec_queue_destroy(fd, exec_queues[i]);
> +
> + if (exec_ufence)
> + munmap(exec_ufence, SZ_4K);
> +
> + if (flags & LOCK)
> + munlock(data, bo_size);
> +
> + if (file_fd != -1)
> + close(file_fd);
> +
> + if (flags & NEW && !(flags & FREE)) {
> + for (i = 0; i < n_execs; i++) {
> + if (!pending_free[i])
> + continue;
> +
> + if (flags & MMAP)
> + munmap(pending_free[i], bo_size);
> + else
> + free(pending_free[i]);
> + }
> + free(pending_free);
> + } else {
> + if (flags & MMAP)
> + munmap(data, bo_size);
> + else if (!alloc)
> + free(data);
Something seems wrong with the flags logic when skipping this ^ free() for the
allocation pointed above.
Francois
> + }
> + if (free_vm)
> + xe_vm_destroy(fd, vm);
> +}
> +
> +struct thread_data {
> + pthread_t thread;
> + pthread_mutex_t *mutex;
> + pthread_cond_t *cond;
> + pthread_barrier_t *barrier;
> + int fd;
> + struct drm_xe_engine_class_instance *eci;
> + int n_exec_queues;
> + int n_execs;
> + size_t bo_size;
> + size_t stride;
> + uint32_t vm;
> + unsigned int flags;
> + void *alloc;
> + bool *go;
> +};
> +
> +static void *thread(void *data)
> +{
> + struct thread_data *t = data;
> +
> + pthread_mutex_lock(t->mutex);
> + while (!*t->go)
> + pthread_cond_wait(t->cond, t->mutex);
> + pthread_mutex_unlock(t->mutex);
> +
> + test_exec(t->fd, t->eci, t->n_exec_queues, t->n_execs,
> + t->bo_size, t->stride, t->vm, t->alloc, t->barrier,
> + t->flags);
> +
> + return NULL;
> +}
> +
> +static void
> +threads(int fd, int n_exec_queues, int n_execs, size_t bo_size,
> + size_t stride, unsigned int flags, bool shared_vm)
> +{
> + struct drm_xe_engine_class_instance *hwe;
> + struct thread_data *threads_data;
> + int n_engines = 0, i = 0;
> + pthread_mutex_t mutex;
> + pthread_cond_t cond;
> + pthread_barrier_t barrier;
> + uint32_t vm = 0;
> + bool go = false;
> + void *alloc = NULL;
> +
> + if ((FILE_BACKED | FORK_READ) & flags)
> + return;
> +
> + xe_for_each_engine(fd, hwe)
> + ++n_engines;
> +
> + if (shared_vm) {
> + vm = xe_vm_create(fd, DRM_XE_VM_CREATE_FLAG_LR_MODE |
> + DRM_XE_VM_CREATE_FLAG_FAULT_MODE, 0);
> + bind_system_allocator(NULL, 0);
> + }
> +
> + if (flags & SHARED_ALLOC) {
> + uint64_t alloc_size;
> +
> + igt_assert(stride);
> +
> + alloc_size = sizeof(struct test_exec_data) * stride *
> + n_execs * n_engines;
> + alloc_size = xe_bb_size(fd, alloc_size);
> + alloc = aligned_alloc(SZ_2M, alloc_size);
> + igt_assert(alloc);
> +
> + memset(alloc, 0, alloc_size);
> + flags &= ~SHARED_ALLOC;
> + }
> +
> + threads_data = calloc(n_engines, sizeof(*threads_data));
> + igt_assert(threads_data);
> +
> + pthread_mutex_init(&mutex, 0);
> + pthread_cond_init(&cond, 0);
> + pthread_barrier_init(&barrier, 0, n_engines);
> +
> + xe_for_each_engine(fd, hwe) {
> + threads_data[i].mutex = &mutex;
> + threads_data[i].cond = &cond;
> + threads_data[i].barrier = (flags & SYNC_EXEC) ? &barrier : NULL;
> + threads_data[i].fd = fd;
> + threads_data[i].eci = hwe;
> + threads_data[i].n_exec_queues = n_exec_queues;
> + threads_data[i].n_execs = n_execs;
> + threads_data[i].bo_size = bo_size;
> + threads_data[i].stride = stride;
> + threads_data[i].vm = vm;
> + threads_data[i].flags = flags;
> + threads_data[i].alloc = alloc ? alloc + i *
> + sizeof(struct test_exec_data) : NULL;
> + threads_data[i].go = &go;
> + pthread_create(&threads_data[i].thread, 0, thread,
> + &threads_data[i]);
> + ++i;
> + }
> +
> + pthread_mutex_lock(&mutex);
> + go = true;
> + pthread_cond_broadcast(&cond);
> + pthread_mutex_unlock(&mutex);
> +
> + for (i = 0; i < n_engines; ++i)
> + pthread_join(threads_data[i].thread, NULL);
> +
> + if (shared_vm) {
> + int ret;
> +
> + if (flags & MMAP) {
> + int tries = 300;
> +
> + while (tries && (ret = unbind_system_allocator()) == -EBUSY) {
> + sleep(.01);
> + --tries;
> + }
> + igt_assert_eq(ret, 0);
> + }
> + xe_vm_destroy(fd, vm);
> + if (alloc)
> + free(alloc);
> + }
> + free(threads_data);
> +}
> +
> +static void process(struct drm_xe_engine_class_instance *hwe, int n_exec_queues,
> + int n_execs, size_t bo_size, size_t stride,
> + unsigned int flags)
> +{
> + struct process_data *pdata;
> + int map_fd;
> + int fd;
> +
> + map_fd = open(SYNC_FILE, O_RDWR, 0x666);
> + pdata = mmap(NULL, sizeof(*pdata), PROT_READ |
> + PROT_WRITE, MAP_SHARED, map_fd, 0);
> + wait_pdata(pdata);
> +
> + fd = drm_open_driver(DRIVER_XE);
> + test_exec(fd, hwe, n_exec_queues, n_execs,
> + bo_size, stride, 0, NULL, NULL, flags);
> + drm_close_driver(fd);
> +
> + close(map_fd);
> + munmap(pdata, sizeof(*pdata));
> +}
> +
> +static void
> +processes(int fd, int n_exec_queues, int n_execs, size_t bo_size,
> + size_t stride, unsigned int flags)
> +{
> + struct drm_xe_engine_class_instance *hwe;
> + struct process_data *pdata;
> + int map_fd;
> +
> + if (flags & FORK_READ)
> + return;
> +
> + map_fd = open(SYNC_FILE, O_RDWR | O_CREAT, 0x666);
> + posix_fallocate(map_fd, 0, sizeof(*pdata));
> + pdata = mmap(NULL, sizeof(*pdata), PROT_READ |
> + PROT_WRITE, MAP_SHARED, map_fd, 0);
> +
> + init_pdata(pdata, 0);
> +
> + xe_for_each_engine(fd, hwe) {
> + igt_fork(child, 1)
> + process(hwe, n_exec_queues, n_execs, bo_size,
> + stride, flags);
> + }
> +
> + signal_pdata(pdata);
> + igt_waitchildren();
> +
> + close(map_fd);
> + munmap(pdata, sizeof(*pdata));
> +}
> +
> +struct section {
> + const char *name;
> + unsigned int flags;
> +};
> +
> +igt_main
> +{
> + struct drm_xe_engine_class_instance *hwe;
> + const struct section sections[] = {
> + { "malloc", 0 },
> + { "malloc-multi-fault", MULTI_FAULT },
> + { "malloc-fork-read", FORK_READ },
> + { "malloc-fork-read-after", FORK_READ | FORK_READ_AFTER },
> + { "malloc-mlock", LOCK },
> + { "malloc-race", RACE },
> + { "malloc-busy", BUSY },
> + { "malloc-bo-unmap", BO_UNMAP },
> + { "mmap", MMAP },
> + { "mmap-remap", MMAP | MREMAP },
> + { "mmap-remap-dontunmap", MMAP | MREMAP | DONTUNMAP },
> + { "mmap-remap-ro", MMAP | MREMAP | READ_ONLY_REMAP },
> + { "mmap-remap-ro-dontunmap", MMAP | MREMAP | DONTUNMAP |
> + READ_ONLY_REMAP },
> + { "mmap-remap-eocheck", MMAP | MREMAP | EVERY_OTHER_CHECK },
> + { "mmap-remap-dontunmap-eocheck", MMAP | MREMAP | DONTUNMAP |
> + EVERY_OTHER_CHECK },
> + { "mmap-remap-ro-eocheck", MMAP | MREMAP | READ_ONLY_REMAP |
> + EVERY_OTHER_CHECK },
> + { "mmap-remap-ro-dontunmap-eocheck", MMAP | MREMAP | DONTUNMAP |
> + READ_ONLY_REMAP | EVERY_OTHER_CHECK },
> + { "mmap-huge", MMAP | HUGE_PAGE },
> + { "mmap-shared", MMAP | LOCK | MMAP_SHARED },
> + { "mmap-shared-remap", MMAP | LOCK | MMAP_SHARED | MREMAP },
> + { "mmap-shared-remap-dontunmap", MMAP | LOCK | MMAP_SHARED |
> + MREMAP | DONTUNMAP },
> + { "mmap-shared-remap-eocheck", MMAP | LOCK | MMAP_SHARED |
> + MREMAP | EVERY_OTHER_CHECK },
> + { "mmap-shared-remap-dontunmap-eocheck", MMAP | LOCK |
> + MMAP_SHARED | MREMAP | DONTUNMAP | EVERY_OTHER_CHECK },
> + { "mmap-mlock", MMAP | LOCK },
> + { "mmap-file", MMAP | FILE_BACKED },
> + { "mmap-file-mlock", MMAP | LOCK | FILE_BACKED },
> + { "mmap-race", MMAP | RACE },
> + { "free", NEW | FREE },
> + { "free-race", NEW | FREE | RACE },
> + { "new", NEW },
> + { "new-race", NEW | RACE },
> + { "new-bo-map", NEW | BO_MAP },
> + { "new-busy", NEW | BUSY },
> + { "mmap-free", MMAP | NEW | FREE },
> + { "mmap-free-huge", MMAP | NEW | FREE | HUGE_PAGE },
> + { "mmap-free-race", MMAP | NEW | FREE | RACE },
> + { "mmap-new", MMAP | NEW },
> + { "mmap-new-huge", MMAP | NEW | HUGE_PAGE },
> + { "mmap-new-race", MMAP | NEW | RACE },
> + { "malloc-nomemset", SKIP_MEMSET },
> + { "malloc-mlock-nomemset", SKIP_MEMSET | LOCK },
> + { "malloc-race-nomemset", SKIP_MEMSET | RACE },
> + { "malloc-busy-nomemset", SKIP_MEMSET | BUSY },
> + { "malloc-bo-unmap-nomemset", SKIP_MEMSET | BO_UNMAP },
> + { "mmap-nomemset", SKIP_MEMSET | MMAP },
> + { "mmap-huge-nomemset", SKIP_MEMSET | MMAP | HUGE_PAGE },
> + { "mmap-shared-nomemset", SKIP_MEMSET | MMAP | MMAP_SHARED },
> + { "mmap-mlock-nomemset", SKIP_MEMSET | MMAP | LOCK },
> + { "mmap-file-nomemset", SKIP_MEMSET | MMAP | FILE_BACKED },
> + { "mmap-file-mlock-nomemset", SKIP_MEMSET | MMAP | LOCK | FILE_BACKED },
> + { "mmap-race-nomemset", SKIP_MEMSET | MMAP | RACE },
> + { "free-nomemset", SKIP_MEMSET | NEW | FREE },
> + { "free-race-nomemset", SKIP_MEMSET | NEW | FREE | RACE },
> + { "new-nomemset", SKIP_MEMSET | NEW },
> + { "new-race-nomemset", SKIP_MEMSET | NEW | RACE },
> + { "new-bo-map-nomemset", SKIP_MEMSET | NEW | BO_MAP },
> + { "new-busy-nomemset", SKIP_MEMSET | NEW | BUSY },
> + { "mmap-free-nomemset", SKIP_MEMSET | MMAP | NEW | FREE },
> + { "mmap-free-huge-nomemset", SKIP_MEMSET | MMAP | NEW | FREE | HUGE_PAGE },
> + { "mmap-free-race-nomemset", SKIP_MEMSET | MMAP | NEW | FREE | RACE },
> + { "mmap-new-nomemset", SKIP_MEMSET | MMAP | NEW },
> + { "mmap-new-huge-nomemset", SKIP_MEMSET | MMAP | NEW | HUGE_PAGE },
> + { "mmap-new-race-nomemset", SKIP_MEMSET | MMAP | NEW | RACE },
> + { NULL },
> + };
> + const struct section psections[] = {
> + { "munmap-cpu-fault", CPU_FAULT },
> + { "munmap-no-cpu-fault", 0 },
> + { "remap-cpu-fault", CPU_FAULT | REMAP },
> + { "remap-no-cpu-fault", REMAP },
> + { "middle-munmap-cpu-fault", MIDDLE | CPU_FAULT },
> + { "middle-munmap-no-cpu-fault", MIDDLE },
> + { "middle-remap-cpu-fault", MIDDLE | CPU_FAULT | REMAP },
> + { "middle-remap-no-cpu-fault", MIDDLE | REMAP },
> + { NULL },
> + };
> + const struct section esections[] = {
> + { "malloc", 0 },
> + { "malloc-mix-bo", MIX_BO_ALLOC },
> + { NULL },
> + };
> + int fd;
> +
> + igt_fixture {
> + struct xe_device *xe;
> +
> + fd = drm_open_driver(DRIVER_XE);
> + igt_require(!xe_supports_faults(fd));
> +
> + xe = xe_device_get(fd);
> + va_bits = xe->va_bits;
> + }
> +
> + for (const struct section *s = sections; s->name; s++) {
> + igt_subtest_f("once-%s", s->name)
> + xe_for_each_engine(fd, hwe)
> + test_exec(fd, hwe, 1, 1, 0, 0, 0, NULL,
> + NULL, s->flags);
> +
> + igt_subtest_f("once-large-%s", s->name)
> + xe_for_each_engine(fd, hwe)
> + test_exec(fd, hwe, 1, 1, SZ_2M, 0, 0, NULL,
> + NULL, s->flags);
> +
> + igt_subtest_f("twice-%s", s->name)
> + xe_for_each_engine(fd, hwe)
> + test_exec(fd, hwe, 1, 2, 0, 0, 0, NULL,
> + NULL, s->flags);
> +
> + igt_subtest_f("twice-large-%s", s->name)
> + xe_for_each_engine(fd, hwe)
> + test_exec(fd, hwe, 1, 2, SZ_2M, 0, 0, NULL,
> + NULL, s->flags);
> +
> + igt_subtest_f("many-%s", s->name)
> + xe_for_each_engine(fd, hwe)
> + test_exec(fd, hwe, 1, 128, 0, 0, 0, NULL,
> + NULL, s->flags);
> +
> + igt_subtest_f("many-stride-%s", s->name)
> + xe_for_each_engine(fd, hwe)
> + test_exec(fd, hwe, 1, 128, 0, 256, 0, NULL,
> + NULL, s->flags);
> +
> + igt_subtest_f("many-execqueues-%s", s->name)
> + xe_for_each_engine(fd, hwe)
> + test_exec(fd, hwe, 16, 128, 0, 0, 0, NULL,
> + NULL, s->flags);
> +
> + igt_subtest_f("many-large-%s", s->name)
> + xe_for_each_engine(fd, hwe)
> + test_exec(fd, hwe, 1, 128, SZ_2M, 0, 0, NULL,
> + NULL, s->flags);
> +
> + igt_subtest_f("many-large-execqueues-%s", s->name)
> + xe_for_each_engine(fd, hwe)
> + test_exec(fd, hwe, 16, 128, SZ_2M, 0, 0, NULL,
> + NULL, s->flags);
> +
> + igt_subtest_f("threads-many-%s", s->name)
> + threads(fd, 1, 128, 0, 0, s->flags, false);
> +
> + igt_subtest_f("threads-many-stride-%s", s->name)
> + threads(fd, 1, 128, 0, 256, s->flags, false);
> +
> + igt_subtest_f("threads-many-execqueues-%s", s->name)
> + threads(fd, 16, 128, 0, 0, s->flags, false);
> +
> + igt_subtest_f("threads-many-large-%s", s->name)
> + threads(fd, 1, 128, SZ_2M, 0, s->flags, false);
> +
> + igt_subtest_f("threads-many-large-execqueues-%s", s->name)
> + threads(fd, 16, 128, SZ_2M, 0, s->flags, false);
> +
> + igt_subtest_f("threads-shared-vm-many-%s", s->name)
> + threads(fd, 1, 128, 0, 0, s->flags, true);
> +
> + igt_subtest_f("threads-shared-vm-many-stride-%s", s->name)
> + threads(fd, 1, 128, 0, 256, s->flags, true);
> +
> + igt_subtest_f("threads-shared-vm-many-execqueues-%s", s->name)
> + threads(fd, 16, 128, 0, 0, s->flags, true);
> +
> + igt_subtest_f("threads-shared-vm-many-large-%s", s->name)
> + threads(fd, 1, 128, SZ_2M, 0, s->flags, true);
> +
> + igt_subtest_f("threads-shared-vm-many-large-execqueues-%s", s->name)
> + threads(fd, 16, 128, SZ_2M, 0, s->flags, true);
> +
> + igt_subtest_f("process-many-%s", s->name)
> + processes(fd, 1, 128, 0, 0, s->flags);
> +
> + igt_subtest_f("process-many-stride-%s", s->name)
> + processes(fd, 1, 128, 0, 256, s->flags);
> +
> + igt_subtest_f("process-many-execqueues-%s", s->name)
> + processes(fd, 16, 128, 0, 0, s->flags);
> +
> + igt_subtest_f("process-many-large-%s", s->name)
> + processes(fd, 1, 128, SZ_2M, 0, s->flags);
> +
> + igt_subtest_f("process-many-large-execqueues-%s", s->name)
> + processes(fd, 16, 128, SZ_2M, 0, s->flags);
> + }
> +
> + igt_subtest("threads-shared-vm-shared-alloc-many-stride-malloc")
> + threads(fd, 1, 128, 0, 256, SHARED_ALLOC, true);
> +
> + igt_subtest("threads-shared-vm-shared-alloc-many-stride-malloc-race")
> + threads(fd, 1, 128, 0, 256, RACE | SHARED_ALLOC, true);
> +
> + igt_subtest("threads-shared-alloc-many-stride-malloc")
> + threads(fd, 1, 128, 0, 256, SHARED_ALLOC, false);
> +
> + igt_subtest("threads-shared-alloc-many-stride-malloc-sync")
> + threads(fd, 1, 128, 0, 256, SHARED_ALLOC | SYNC_EXEC, false);
> +
> + igt_subtest("threads-shared-alloc-many-stride-malloc-race")
> + threads(fd, 1, 128, 0, 256, RACE | SHARED_ALLOC, false);
> +
> + igt_subtest_f("fault")
> + xe_for_each_engine(fd, hwe)
> + test_exec(fd, hwe, 4, 1, SZ_2M, 0, 0, NULL, NULL,
> + FAULT);
> +
> + for (const struct section *s = psections; s->name; s++) {
> + igt_subtest_f("partial-%s", s->name)
> + xe_for_each_engine(fd, hwe)
> + partial(fd, hwe, s->flags);
> + }
> +
> + igt_subtest_f("unaligned-alloc")
> + xe_for_each_engine(fd, hwe) {
> + many_allocs(fd, hwe, (SZ_1M + SZ_512K) * 8,
> + SZ_1M + SZ_512K, SZ_4K, NULL, 0);
> + break;
> + }
> +
> + igt_subtest_f("fault-benchmark")
> + xe_for_each_engine(fd, hwe)
> + many_allocs(fd, hwe, SZ_64M, SZ_64M, SZ_4K, NULL,
> + BENCHMARK);
> +
> + igt_subtest_f("fault-threads-benchmark")
> + xe_for_each_engine(fd, hwe)
> + many_allocs(fd, hwe, SZ_64M, SZ_64M, SZ_4K, NULL,
> + BENCHMARK | CPU_FAULT_THREADS);
> +
> + igt_subtest_f("fault-threads-same-page-benchmark")
> + xe_for_each_engine(fd, hwe)
> + many_allocs(fd, hwe, SZ_64M, SZ_64M, SZ_4K, NULL,
> + BENCHMARK | CPU_FAULT_THREADS |
> + CPU_FAULT_SAME_PAGE);
> +
> + igt_subtest_f("fault-process-benchmark")
> + xe_for_each_engine(fd, hwe)
> + many_allocs(fd, hwe, SZ_64M, SZ_64M, SZ_4K, NULL,
> + BENCHMARK | CPU_FAULT_PROCESS);
> +
> + igt_subtest_f("fault-process-same-page-benchmark")
> + xe_for_each_engine(fd, hwe)
> + many_allocs(fd, hwe, SZ_64M, SZ_64M, SZ_4K, NULL,
> + BENCHMARK | CPU_FAULT_PROCESS |
> + CPU_FAULT_SAME_PAGE);
> +
> + for (const struct section *s = esections; s->name; s++) {
> + igt_subtest_f("evict-%s", s->name)
> + xe_for_each_engine(fd, hwe) {
> + many_allocs(fd, hwe,
> + xe_visible_vram_size(fd, hwe->gt_id),
> + SZ_8M, SZ_1M, NULL, s->flags);
> + break;
> + }
> + }
> +
> + for (const struct section *s = esections; s->name; s++) {
> + igt_subtest_f("processes-evict-%s", s->name)
> + processes_evict(fd, SZ_8M, SZ_1M, s->flags);
> + }
> +
> + igt_fixture {
> + xe_device_put(fd);
> + drm_close_driver(fd);
> + }
> +}
> diff --git a/tests/meson.build b/tests/meson.build
> index 9224145cf4..8c7b756716 100644
> --- a/tests/meson.build
> +++ b/tests/meson.build
> @@ -295,6 +295,7 @@ intel_xe_progs = [
> 'xe_exec_reset',
> 'xe_exec_sip',
> 'xe_exec_store',
> + 'xe_exec_system_allocator',
> 'xe_exec_threads',
> 'xe_exercise_blt',
> 'xe_fault_injection',
> --
> 2.34.1
>
^ permalink raw reply [flat|nested] 16+ messages in thread
* Re: [PATCH] tests/xe: Add system_allocator test
2025-04-18 15:47 ` Francois Dugast
@ 2025-04-18 19:44 ` Matthew Brost
2025-04-24 19:28 ` Francois Dugast
0 siblings, 1 reply; 16+ messages in thread
From: Matthew Brost @ 2025-04-18 19:44 UTC (permalink / raw)
To: Francois Dugast; +Cc: igt-dev
On Fri, Apr 18, 2025 at 05:47:55PM +0200, Francois Dugast wrote:
> Hi Matt,
>
> I am still going through your patch but sharing some comments already.
>
> The sequence is neither complex nor too different from existing tests
> but as it is a lot of multi-thread / multi-process code, I am trying
> to come up with a suggestion to break it down. Might not be possible
> though.
>
Good to hear you don't find it too complex, open spliting if it makes
sense.
> On Tue, Apr 15, 2025 at 07:20:40PM -0700, Matthew Brost wrote:
> > Test various uses of system allocator in single thread, multiple
> > threads, and multiple processes.
> >
> > Features tested:
> > - Malloc with various size
> > - Mmap with various sizes and flags including file backed mappings
> > - Mixing BO allocations with system allocator
> > - Various page sizes
> > - Dynamically freeing / unmapping memory
> > - Sharing VM across threads
> > - Faults racing on different hardware engines / GTs / Tiles
> > - GPU faults and CPU faults racing
> > - CPU faults on multiple threads racing
> > - CPU faults on multiple process racing
> > - GPU faults of memory not faulted in by CPU
> > - Partial unmap of allocations
> > - Attempting to unmap system allocations when GPU has mappings
> > - Eviction of both system allocations and BOs
> > - Forking child processes and reading data from VRAM
> > - mremap data in VRAM
> > - Protection changes
> > - Multiple faults per execbuf
> >
> > Running on LNL, BMG, PVC 1 tile, and PVC 2 tile.
> >
> > v2:
> > - Rebase
> > - Fix memory allocation to not interfear with malloc (Thomas)
> >
> > Signed-off-by: Matthew Brost <matthew.brost@intel.com>
> > ---
> > include/drm-uapi/xe_drm.h | 57 +-
> > lib/xe/xe_ioctl.c | 12 +
> > lib/xe/xe_ioctl.h | 1 +
> > tests/intel/xe_exec_system_allocator.c | 1832 ++++++++++++++++++++++++
> > tests/meson.build | 1 +
> > 5 files changed, 1896 insertions(+), 7 deletions(-)
> > create mode 100644 tests/intel/xe_exec_system_allocator.c
> >
> > diff --git a/include/drm-uapi/xe_drm.h b/include/drm-uapi/xe_drm.h
> > index 154f947ef0..9c08738c3b 100644
> > --- a/include/drm-uapi/xe_drm.h
> > +++ b/include/drm-uapi/xe_drm.h
> > @@ -3,8 +3,8 @@
> > * Copyright © 2023 Intel Corporation
> > */
> >
> > -#ifndef _XE_DRM_H_
> > -#define _XE_DRM_H_
> > +#ifndef _UAPI_XE_DRM_H_
> > +#define _UAPI_XE_DRM_H_
>
> Nit: The header seems to have been copied directly from the kernel tree, instead
> it should be generated with:
>
> make headers_install
>
> https://docs.kernel.org/kbuild/headers_install.html
>
Let me split out the uAPI update into its own patch + use the proper
flow.
> >
> > #include "drm.h"
> >
> > @@ -134,7 +134,7 @@ extern "C" {
> > * redefine the interface more easily than an ever growing struct of
> > * increasing complexity, and for large parts of that interface to be
> > * entirely optional. The downside is more pointer chasing; chasing across
> > - * the boundary with pointers encapsulated inside u64.
> > + * the __user boundary with pointers encapsulated inside u64.
>
> See above comment on make headers_install.
>
+1
> > *
> > * Example chaining:
> > *
> > @@ -393,6 +393,10 @@ struct drm_xe_query_mem_regions {
> > *
> > * - %DRM_XE_QUERY_CONFIG_FLAG_HAS_VRAM - Flag is set if the device
> > * has usable VRAM
> > + * - %DRM_XE_QUERY_CONFIG_FLAG_HAS_LOW_LATENCY - Flag is set if the device
> > + * has low latency hint support
> > + * - %DRM_XE_QUERY_CONFIG_FLAG_HAS_CPU_ADDR_MIRROR - Flag is set if the
> > + * device has CPU address mirroring support
> > * - %DRM_XE_QUERY_CONFIG_MIN_ALIGNMENT - Minimal memory alignment
> > * required by this device, typically SZ_4K or SZ_64K
> > * - %DRM_XE_QUERY_CONFIG_VA_BITS - Maximum bits of a virtual address
> > @@ -409,6 +413,8 @@ struct drm_xe_query_config {
> > #define DRM_XE_QUERY_CONFIG_REV_AND_DEVICE_ID 0
> > #define DRM_XE_QUERY_CONFIG_FLAGS 1
> > #define DRM_XE_QUERY_CONFIG_FLAG_HAS_VRAM (1 << 0)
> > + #define DRM_XE_QUERY_CONFIG_FLAG_HAS_LOW_LATENCY (1 << 1)
> > + #define DRM_XE_QUERY_CONFIG_FLAG_HAS_CPU_ADDR_MIRROR (1 << 2)
> > #define DRM_XE_QUERY_CONFIG_MIN_ALIGNMENT 2
> > #define DRM_XE_QUERY_CONFIG_VA_BITS 3
> > #define DRM_XE_QUERY_CONFIG_MAX_EXEC_QUEUE_PRIORITY 4
> > @@ -911,7 +917,11 @@ struct drm_xe_gem_mmap_offset {
> > * struct drm_xe_vm_create - Input of &DRM_IOCTL_XE_VM_CREATE
> > *
> > * The @flags can be:
> > - * - %DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE
> > + * - %DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE - Map the whole virtual address
> > + * space of the VM to scratch page. A vm_bind would overwrite the scratch
> > + * page mapping. This flag is mutually exclusive with the
> > + * %DRM_XE_VM_CREATE_FLAG_FAULT_MODE flag, with an exception of on x2 and
> > + * xe3 platform.
> > * - %DRM_XE_VM_CREATE_FLAG_LR_MODE - An LR, or Long Running VM accepts
> > * exec submissions to its exec_queues that don't have an upper time
> > * limit on the job execution time. But exec submissions to these
> > @@ -987,6 +997,12 @@ struct drm_xe_vm_destroy {
> > * - %DRM_XE_VM_BIND_FLAG_CHECK_PXP - If the object is encrypted via PXP,
> > * reject the binding if the encryption key is no longer valid. This
> > * flag has no effect on BOs that are not marked as using PXP.
> > + * - %DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR - When the CPU address mirror flag is
> > + * set, no mappings are created rather the range is reserved for CPU address
> > + * mirroring which will be populated on GPU page faults or prefetches. Only
> > + * valid on VMs with DRM_XE_VM_CREATE_FLAG_FAULT_MODE set. The CPU address
> > + * mirror flag are only valid for DRM_XE_VM_BIND_OP_MAP operations, the BO
> > + * handle MBZ, and the BO offset MBZ.
> > */
> > struct drm_xe_vm_bind_op {
> > /** @extensions: Pointer to the first extension struct, if any */
> > @@ -1039,7 +1055,9 @@ struct drm_xe_vm_bind_op {
> > * on the @pat_index. For such mappings there is no actual memory being
> > * mapped (the address in the PTE is invalid), so the various PAT memory
> > * attributes likely do not apply. Simply leaving as zero is one
> > - * option (still a valid pat_index).
> > + * option (still a valid pat_index). Same applies to
> > + * DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR bindings as for such mapping
> > + * there is no actual memory being mapped.
> > */
> > __u16 pat_index;
> >
> > @@ -1055,6 +1073,14 @@ struct drm_xe_vm_bind_op {
> >
> > /** @userptr: user pointer to bind on */
> > __u64 userptr;
> > +
> > + /**
> > + * @cpu_addr_mirror_offset: Offset from GPU @addr to create
> > + * CPU address mirror mappings. MBZ with current level of
> > + * support (e.g. 1 to 1 mapping between GPU and CPU mappings
> > + * only supported).
> > + */
> > + __s64 cpu_addr_mirror_offset;
> > };
> >
> > /**
> > @@ -1078,6 +1104,7 @@ struct drm_xe_vm_bind_op {
> > #define DRM_XE_VM_BIND_FLAG_NULL (1 << 2)
> > #define DRM_XE_VM_BIND_FLAG_DUMPABLE (1 << 3)
> > #define DRM_XE_VM_BIND_FLAG_CHECK_PXP (1 << 4)
> > +#define DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR (1 << 5)
> > /** @flags: Bind flags */
> > __u32 flags;
> >
> > @@ -1205,6 +1232,21 @@ struct drm_xe_vm_bind {
> > * };
> > * ioctl(fd, DRM_IOCTL_XE_EXEC_QUEUE_CREATE, &exec_queue_create);
> > *
> > + * Allow users to provide a hint to kernel for cases demanding low latency
> > + * profile. Please note it will have impact on power consumption. User can
> > + * indicate low latency hint with flag while creating exec queue as
> > + * mentioned below,
> > + *
> > + * struct drm_xe_exec_queue_create exec_queue_create = {
> > + * .flags = DRM_XE_EXEC_QUEUE_LOW_LATENCY_HINT,
> > + * .extensions = 0,
> > + * .vm_id = vm,
> > + * .num_bb_per_exec = 1,
> > + * .num_eng_per_bb = 1,
> > + * .instances = to_user_pointer(&instance),
> > + * };
> > + * ioctl(fd, DRM_IOCTL_XE_EXEC_QUEUE_CREATE, &exec_queue_create);
> > + *
> > */
> > struct drm_xe_exec_queue_create {
> > #define DRM_XE_EXEC_QUEUE_EXTENSION_SET_PROPERTY 0
> > @@ -1223,7 +1265,8 @@ struct drm_xe_exec_queue_create {
> > /** @vm_id: VM to use for this exec queue */
> > __u32 vm_id;
> >
> > - /** @flags: MBZ */
> > +#define DRM_XE_EXEC_QUEUE_LOW_LATENCY_HINT (1 << 0)
> > + /** @flags: flags to use for this exec queue */
> > __u32 flags;
> >
> > /** @exec_queue_id: Returned exec queue ID */
> > @@ -1926,4 +1969,4 @@ struct drm_xe_query_eu_stall {
> > }
> > #endif
> >
> > -#endif /* _XE_DRM_H_ */
> > +#endif /* _UAPI_XE_DRM_H_ */
> > diff --git a/lib/xe/xe_ioctl.c b/lib/xe/xe_ioctl.c
> > index fb8c4aef13..785fc9184c 100644
> > --- a/lib/xe/xe_ioctl.c
> > +++ b/lib/xe/xe_ioctl.c
> > @@ -440,6 +440,18 @@ void *xe_bo_map(int fd, uint32_t bo, size_t size)
> > return __xe_bo_map(fd, bo, size, PROT_WRITE);
> > }
> >
> > +void *xe_bo_map_fixed(int fd, uint32_t bo, size_t size, uint64_t addr)
> > +{
> > + uint64_t mmo;
> > + void *map;
> > +
> > + mmo = xe_bo_mmap_offset(fd, bo);
> > + map = mmap((void *)addr, size, PROT_WRITE, MAP_SHARED | MAP_FIXED, fd, mmo);
> > + igt_assert(map != MAP_FAILED);
> > +
> > + return map;
> > +}
> > +
> > void *xe_bo_mmap_ext(int fd, uint32_t bo, size_t size, int prot)
> > {
> > return __xe_bo_map(fd, bo, size, prot);
> > diff --git a/lib/xe/xe_ioctl.h b/lib/xe/xe_ioctl.h
> > index 9bdf73b2bd..554a33c9cd 100644
> > --- a/lib/xe/xe_ioctl.h
> > +++ b/lib/xe/xe_ioctl.h
> > @@ -86,6 +86,7 @@ uint32_t xe_exec_queue_create_class(int fd, uint32_t vm, uint16_t class);
> > void xe_exec_queue_destroy(int fd, uint32_t exec_queue);
> > uint64_t xe_bo_mmap_offset(int fd, uint32_t bo);
> > void *xe_bo_map(int fd, uint32_t bo, size_t size);
> > +void *xe_bo_map_fixed(int fd, uint32_t bo, size_t size, long unsigned int addr);
> > void *xe_bo_mmap_ext(int fd, uint32_t bo, size_t size, int prot);
> > int __xe_exec(int fd, struct drm_xe_exec *exec);
> > void xe_exec(int fd, struct drm_xe_exec *exec);
> > diff --git a/tests/intel/xe_exec_system_allocator.c b/tests/intel/xe_exec_system_allocator.c
> > new file mode 100644
> > index 0000000000..14fa59353e
> > --- /dev/null
> > +++ b/tests/intel/xe_exec_system_allocator.c
> > @@ -0,0 +1,1832 @@
> > +// SPDX-License-Identifier: MIT
> > +/*
> > + * Copyright © 2024 Intel Corporation
> > + */
> > +
> > +/**
> > + * TEST: Basic tests for execbuf functionality using system allocator
> > + * Category: Hardware building block
> > + * Mega feature: Shared virtual memory
> > + * Sub-category: execbuf
> > + * Functionality: fault mode, system allocator
> > + * GPU requirements: GPU needs support for DRM_XE_VM_CREATE_FLAG_FAULT_MODE
> > + */
> > +
> > +#include <fcntl.h>
> > +#include <linux/mman.h>
> > +#include <time.h>
> > +
> > +#include "igt.h"
> > +#include "lib/igt_syncobj.h"
> > +#include "lib/intel_reg.h"
> > +#include "xe_drm.h"
> > +
> > +#include "xe/xe_ioctl.h"
> > +#include "xe/xe_query.h"
> > +#include <string.h>
> > +
> > +#define USER_FENCE_VALUE 0xdeadbeefdeadbeefull
> > +#define QUARTER_SEC (NSEC_PER_SEC / 4)
> > +#define FIVE_SEC (5LL * NSEC_PER_SEC)
> > +
> > +struct batch_data {
> > + uint32_t batch[16];
> > + uint64_t pad;
> > + uint32_t data;
> > + uint32_t expected_data;
> > +};
> > +
> > +#define WRITE_VALUE(data__, i__) ({ \
> > + if (!(data__)->expected_data) \
> > + (data__)->expected_data = rand() << 12 | (i__); \
> > + (data__)->expected_data; \
> > +})
> > +#define READ_VALUE(data__, i__) ((data__)->expected_data)
> > +
> > +static void __write_dword(uint32_t *batch, uint64_t sdi_addr, uint32_t wdata,
> > + int *idx)
> > +{
> > + batch[(*idx)++] = MI_STORE_DWORD_IMM_GEN4;
> > + batch[(*idx)++] = sdi_addr;
> > + batch[(*idx)++] = sdi_addr >> 32;
> > + batch[(*idx)++] = wdata;
> > +}
> > +
> > +static void write_dword(uint32_t *batch, uint64_t sdi_addr, uint32_t wdata,
> > + int *idx)
> > +{
> > + __write_dword(batch, sdi_addr, wdata, idx);
> > + batch[(*idx)++] = MI_BATCH_BUFFER_END;
> > +}
>
> Slightly out of scope for this review but the 2 functions above might be
> helpful under lib/ to prevent adding more duplications of the dword write
> batch sequence.
>
Yea we could split out generic batch writing functions into a library at
some point but agree is probably out of scope for this series.
> > +
> > +static void check_all_pages(void *ptr, uint64_t alloc_size, uint64_t stride,
> > + pthread_barrier_t *barrier)
> > +{
> > + int i, n_writes = alloc_size / stride;
> > +
> > + for (i = 0; i < n_writes; ++i) {
> > + struct batch_data *data = ptr + i * stride;
> > +
> > + igt_assert_eq(data->data, READ_VALUE(data, i));
> > +
> > + if (barrier)
> > + pthread_barrier_wait(barrier);
> > + }
> > +}
> > +
> > +#define SYNC_FILE "/tmp/xe_exec_system_allocator_sync"
>
> Might be worth creating and propagating a unique file name at runtime, for
> example with mkstemp(), in order to avoid potential concurrent accesses from
> multiple instances of the test.
>
Let me look into that.
> > +
> > +struct process_data {
> > + pthread_mutex_t mutex;
> > + pthread_cond_t cond;
> > + pthread_barrier_t barrier;
> > + bool go;
> > +};
> > +
> > +static void wait_pdata(struct process_data *pdata)
> > +{
> > + pthread_mutex_lock(&pdata->mutex);
> > + while (!pdata->go)
> > + pthread_cond_wait(&pdata->cond, &pdata->mutex);
> > + pthread_mutex_unlock(&pdata->mutex);
> > +}
> > +
> > +static void init_pdata(struct process_data *pdata, int n_engine)
> > +{
> > + pthread_mutexattr_t mutex_attr;
> > + pthread_condattr_t cond_attr;
> > + pthread_barrierattr_t barrier_attr;
> > +
> > + pthread_mutexattr_init(&mutex_attr);
> > + pthread_mutexattr_setpshared(&mutex_attr, PTHREAD_PROCESS_SHARED);
> > + pthread_mutex_init(&pdata->mutex, &mutex_attr);
> > +
> > + pthread_condattr_init(&cond_attr);
> > + pthread_condattr_setpshared(&cond_attr, PTHREAD_PROCESS_SHARED);
> > + pthread_cond_init(&pdata->cond, &cond_attr);
> > +
> > + pthread_barrierattr_init(&barrier_attr);
> > + pthread_barrierattr_setpshared(&barrier_attr, PTHREAD_PROCESS_SHARED);
> > + pthread_barrier_init(&pdata->barrier, &barrier_attr, n_engine);
> > +
> > + pdata->go = false;
> > +}
> > +
> > +static void signal_pdata(struct process_data *pdata)
> > +{
> > + pthread_mutex_lock(&pdata->mutex);
> > + pdata->go = true;
> > + pthread_cond_broadcast(&pdata->cond);
> > + pthread_mutex_unlock(&pdata->mutex);
> > +}
> > +
> > +/* many_alloc flags */
> > +#define MIX_BO_ALLOC (0x1 << 0)
> > +#define BENCHMARK (0x1 << 1)
> > +#define CPU_FAULT_THREADS (0x1 << 2)
> > +#define CPU_FAULT_PROCESS (0x1 << 3)
> > +#define CPU_FAULT_SAME_PAGE (0x1 << 4)
> > +
> > +static void process_check(void *ptr, uint64_t alloc_size, uint64_t stride,
> > + unsigned int flags)
> > +{
> > + struct process_data *pdata;
> > + int map_fd;
> > +
> > + map_fd = open(SYNC_FILE, O_RDWR, 0x666);
> > + pdata = mmap(NULL, sizeof(*pdata), PROT_READ |
> > + PROT_WRITE, MAP_SHARED, map_fd, 0);
> > + wait_pdata(pdata);
> > +
> > + if (flags & CPU_FAULT_SAME_PAGE)
> > + check_all_pages(ptr, alloc_size, stride, &pdata->barrier);
> > + else
> > + check_all_pages(ptr, alloc_size, stride, NULL);
> > +
> > + close(map_fd);
> > + munmap(pdata, sizeof(*pdata));
> > +}
> > +
> > +static void
> > +check_all_pages_process(void *ptr, uint64_t alloc_size, uint64_t stride,
> > + int n_process, unsigned int flags)
> > +{
> > + struct process_data *pdata;
> > + int map_fd, i;
> > +
> > + map_fd = open(SYNC_FILE, O_RDWR | O_CREAT, 0x666);
> > + posix_fallocate(map_fd, 0, sizeof(*pdata));
> > + pdata = mmap(NULL, sizeof(*pdata), PROT_READ |
> > + PROT_WRITE, MAP_SHARED, map_fd, 0);
> > +
> > + init_pdata(pdata, n_process);
> > +
> > + for (i = 0; i < n_process; ++i) {
> > + igt_fork(child, 1)
> > + if (flags & CPU_FAULT_SAME_PAGE)
> > + process_check(ptr, alloc_size, stride, flags);
> > + else
> > + process_check(ptr + stride * i, alloc_size,
> > + stride * n_process, flags);
> > + }
> > +
> > + signal_pdata(pdata);
> > + igt_waitchildren();
> > +
> > + close(map_fd);
> > + munmap(pdata, sizeof(*pdata));
> > +}
> > +
> > +struct thread_check_data {
> > + pthread_t thread;
> > + pthread_mutex_t *mutex;
> > + pthread_cond_t *cond;
> > + pthread_barrier_t *barrier;
> > + void *ptr;
> > + uint64_t alloc_size;
> > + uint64_t stride;
> > + bool *go;
> > +};
> > +
> > +static void *thread_check(void *data)
> > +{
> > + struct thread_check_data *t = data;
> > +
> > + pthread_mutex_lock(t->mutex);
> > + while (!*t->go)
> > + pthread_cond_wait(t->cond, t->mutex);
> > + pthread_mutex_unlock(t->mutex);
> > +
> > + check_all_pages(t->ptr, t->alloc_size, t->stride, t->barrier);
> > +
> > + return NULL;
> > +}
> > +
> > +/*
> > + * Partition checking of results in chunks which causes multiple threads to
> > + * fault same VRAM allocation in parallel.
> > + */
> > +static void
> > +check_all_pages_threads(void *ptr, uint64_t alloc_size, uint64_t stride,
> > + int n_threads, unsigned int flags)
> > +{
> > + struct thread_check_data *threads_check_data;
> > + pthread_mutex_t mutex;
> > + pthread_cond_t cond;
> > + pthread_barrier_t barrier;
> > + int i;
> > + bool go = false;
> > +
> > + threads_check_data = calloc(n_threads, sizeof(*threads_check_data));
> > + igt_assert(threads_check_data);
> > +
> > + pthread_mutex_init(&mutex, 0);
> > + pthread_cond_init(&cond, 0);
> > + pthread_barrier_init(&barrier, 0, n_threads);
> > +
> > + for (i = 0; i < n_threads; ++i) {
> > + threads_check_data[i].mutex = &mutex;
> > + threads_check_data[i].cond = &cond;
> > + if (flags & CPU_FAULT_SAME_PAGE) {
> > + threads_check_data[i].barrier = &barrier;
> > + threads_check_data[i].ptr = ptr;
> > + threads_check_data[i].alloc_size = alloc_size;
> > + threads_check_data[i].stride = stride;
> > + } else {
> > + threads_check_data[i].barrier = NULL;
> > + threads_check_data[i].ptr = ptr + stride * i;
> > + threads_check_data[i].alloc_size = alloc_size;
> > + threads_check_data[i].stride = n_threads * stride;
> > + }
> > + threads_check_data[i].go = &go;
> > +
> > + pthread_create(&threads_check_data[i].thread, 0, thread_check,
> > + &threads_check_data[i]);
> > + }
> > +
> > + pthread_mutex_lock(&mutex);
> > + go = true;
> > + pthread_cond_broadcast(&cond);
> > + pthread_mutex_unlock(&mutex);
> > +
> > + for (i = 0; i < n_threads; ++i)
> > + pthread_join(threads_check_data[i].thread, NULL);
> > + free(threads_check_data);
> > +}
> > +
> > +static void touch_all_pages(int fd, uint32_t exec_queue, void *ptr,
> > + uint64_t alloc_size, uint64_t stride,
> > + struct timespec *tv, uint64_t *submit)
> > +{
> > + struct drm_xe_sync sync[1] = {
> > + { .type = DRM_XE_SYNC_TYPE_USER_FENCE,
> > + .flags = DRM_XE_SYNC_FLAG_SIGNAL,
> > + .timeline_value = USER_FENCE_VALUE },
> > + };
> > + struct drm_xe_exec exec = {
> > + .num_batch_buffer = 1,
> > + .num_syncs = 0,
> > + .exec_queue_id = exec_queue,
> > + .syncs = to_user_pointer(&sync),
> > + };
> > + uint64_t addr = to_user_pointer(ptr);
> > + int i, ret, n_writes = alloc_size / stride;
> > + u64 *exec_ufence = NULL;
> > + int64_t timeout = FIVE_SEC;
> > +
> > + exec_ufence = mmap(NULL, SZ_4K, PROT_READ |
> > + PROT_WRITE, MAP_SHARED |
> > + MAP_ANONYMOUS, -1, 0);
> > + igt_assert(exec_ufence != MAP_FAILED);
> > + memset(exec_ufence, 0, SZ_4K);
> > + sync[0].addr = to_user_pointer(exec_ufence);
> > +
> > + for (i = 0; i < n_writes; ++i, addr += stride) {
> > + struct batch_data *data = ptr + i * stride;
> > + uint64_t sdi_offset = (char *)&data->data - (char *)data;
> > + uint64_t sdi_addr = addr + sdi_offset;
> > + int b = 0;
> > +
> > + write_dword(data->batch, sdi_addr, WRITE_VALUE(data, i), &b);
> > + igt_assert(b <= ARRAY_SIZE(data->batch));
> > + }
> > +
> > + igt_nsec_elapsed(tv);
> > + *submit = igt_nsec_elapsed(tv);
> > +
> > + addr = to_user_pointer(ptr);
> > + for (i = 0; i < n_writes; ++i, addr += stride) {
> > + struct batch_data *data = ptr + i * stride;
> > + uint64_t batch_offset = (char *)&data->batch - (char *)data;
> > + uint64_t batch_addr = addr + batch_offset;
> > +
> > + exec.address = batch_addr;
> > + if (i + 1 == n_writes)
> > + exec.num_syncs = 1;
> > + xe_exec(fd, &exec);
> > + }
> > +
> > + ret = __xe_wait_ufence(fd, exec_ufence, USER_FENCE_VALUE, exec_queue,
> > + &timeout);
> > + if (ret) {
> > + printf("FAIL EXEC_UFENCE_ADDR: 0x%016llx\n", sync[0].addr);
> > + printf("FAIL EXEC_UFENCE: EXPEXCTED=0x%016llx, ACTUAL=0x%016lx\n",
> > + USER_FENCE_VALUE, exec_ufence[0]);
> > +
> > + addr = to_user_pointer(ptr);
> > + for (i = 0; i < n_writes; ++i, addr += stride) {
> > + struct batch_data *data = ptr + i * stride;
> > + uint64_t batch_offset = (char *)&data->batch - (char *)data;
> > + uint64_t batch_addr = addr + batch_offset;
> > + uint64_t sdi_offset = (char *)&data->data - (char *)data;
> > + uint64_t sdi_addr = addr + sdi_offset;
> > +
> > + printf("FAIL BATCH_ADDR: 0x%016lx\n", batch_addr);
> > + printf("FAIL SDI_ADDR: 0x%016lx\n", sdi_addr);
> > + printf("FAIL SDI_ADDR (in batch): 0x%016lx\n",
> > + (((u64)data->batch[2]) << 32) | data->batch[1]);
> > + printf("FAIL DARA: EXPEXCTED=0x%08x, ACTUAL=0x%08x\n",
> > + data->expected_data, data->data);
> > + }
> > + igt_assert_eq(ret, 0);
> > + }
> > + munmap(exec_ufence, SZ_4K);
> > +}
> > +
> > +static int va_bits;
> > +
> > +#define bind_system_allocator(__sync, __num_sync) \
> > + __xe_vm_bind_assert(fd, vm, 0, \
> > + 0, 0, 0, 0x1ull << va_bits, \
> > + DRM_XE_VM_BIND_OP_MAP, \
> > + DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR, \
> > + (__sync), (__num_sync), 0, 0)
> > +
> > +#define unbind_system_allocator() \
> > + __xe_vm_bind(fd, vm, 0, 0, 0, 0, 0x1ull << va_bits, \
> > + DRM_XE_VM_BIND_OP_UNMAP, 0, \
> > + NULL, 0, 0, 0, 0)
>
> Is there a reason here to favor static variable + macros over helper function
> with parameters?
>
va_bits is static as it looked up exactly once when the test loads.
I could change these helpers to static functions rather than macros if
that is preferred.
> > +
> > +#define odd(__i) (__i & 1)
> > +
> > +struct aligned_alloc_type {
> > + void *__ptr;
> > + void *ptr;
> > + size_t __size;
> > + size_t size;
> > +};
> > +
> > +static struct aligned_alloc_type __aligned_alloc(size_t alignment, size_t size)
> > +{
> > + struct aligned_alloc_type aligned_alloc_type;
> > +
> > + aligned_alloc_type.__ptr = mmap(NULL, alignment + size, PROT_NONE, MAP_PRIVATE |
> > + MAP_ANONYMOUS, -1, 0);
> > + igt_assert(aligned_alloc_type.__ptr != MAP_FAILED);
> > +
> > + aligned_alloc_type.ptr = (void *)ALIGN((uint64_t)aligned_alloc_type.__ptr, alignment);
> > + aligned_alloc_type.size = size;
> > + aligned_alloc_type.__size = size + alignment;
> > +
> > + return aligned_alloc_type;
> > +}
> > +
> > +static void __aligned_free(struct aligned_alloc_type *aligned_alloc_type)
> > +{
> > + munmap(aligned_alloc_type->__ptr, aligned_alloc_type->__size);
> > +}
> > +
> > +static void __aligned_partial_free(struct aligned_alloc_type *aligned_alloc_type)
> > +{
> > + size_t begin_size = (size_t)(aligned_alloc_type->ptr - aligned_alloc_type->__ptr);
> > +
> > + if (begin_size)
> > + munmap(aligned_alloc_type->__ptr, begin_size);
> > + if (aligned_alloc_type->__size - aligned_alloc_type->size - begin_size)
> > + munmap(aligned_alloc_type->ptr + aligned_alloc_type->size,
> > + aligned_alloc_type->__size - aligned_alloc_type->size - begin_size);
> > +}
> > +
> > +/**
> > + * SUBTEST: unaligned-alloc
> > + * Description: allocate unaligned sizes of memory
> > + * Test category: functionality test
> > + *
> > + * SUBTEST: fault-benchmark
> > + * Description: Benchmark how long GPU / CPU take
> > + * Test category: performance test
> > + *
> > + * SUBTEST: fault-threads-benchmark
> > + * Description: Benchmark how long GPU / CPU take, reading results with multiple threads
> > + * Test category: performance and functionality test
> > + *
> > + * SUBTEST: fault-threads-same-page-benchmark
> > + * Description: Benchmark how long GPU / CPU take, reading results with multiple threads, hammer same page
> > + * Test category: performance and functionality test
> > + *
> > + * SUBTEST: fault-process-benchmark
> > + * Description: Benchmark how long GPU / CPU take, reading results with multiple process
> > + * Test category: performance and functionality test
> > + *
> > + * SUBTEST: fault-process-same-page-benchmark
> > + * Description: Benchmark how long GPU / CPU take, reading results with multiple process, hammer same page
> > + * Test category: performance and functionality test
> > + *
> > + * SUBTEST: evict-malloc
> > + * Description: trigger eviction of VRAM allocated via malloc
> > + * Test category: functionality test
> > + *
> > + * SUBTEST: evict-malloc-mix-bo
> > + * Description: trigger eviction of VRAM allocated via malloc and BO create
> > + * Test category: functionality test
> > + *
> > + * SUBTEST: processes-evict-malloc
> > + * Description: multi-process trigger eviction of VRAM allocated via malloc
> > + * Test category: stress test
> > + *
> > + * SUBTEST: processes-evict-malloc-mix-bo
> > + * Description: multi-process trigger eviction of VRAM allocated via malloc and BO create
> > + * Test category: stress test
> > + */
> > +
> > +static void
> > +many_allocs(int fd, struct drm_xe_engine_class_instance *eci,
> > + uint64_t total_alloc, uint64_t alloc_size, uint64_t stride,
> > + pthread_barrier_t *barrier, unsigned int flags)
> > +{
> > + uint32_t vm, exec_queue;
> > + int num_allocs = flags & BENCHMARK ? 1 :
> > + (9 * (total_alloc / alloc_size)) / 8;
> > + struct aligned_alloc_type *allocs;
> > + uint32_t *bos = NULL;
> > + struct timespec tv = {};
> > + uint64_t submit, read, elapsed;
> > + int i;
> > +
> > + vm = xe_vm_create(fd, DRM_XE_VM_CREATE_FLAG_LR_MODE |
> > + DRM_XE_VM_CREATE_FLAG_FAULT_MODE, 0);
> > + exec_queue = xe_exec_queue_create(fd, vm, eci, 0);
> > +
> > + bind_system_allocator(NULL, 0);
> > +
> > + allocs = malloc(sizeof(*allocs) * num_allocs);
> > + igt_assert(allocs);
> > + memset(allocs, 0, sizeof(*allocs) * num_allocs);
> > +
> > + if (flags & MIX_BO_ALLOC) {
> > + bos = malloc(sizeof(*bos) * num_allocs);
> > + igt_assert(bos);
> > + memset(bos, 0, sizeof(*bos) * num_allocs);
> > + }
> > +
> > + for (i = 0; i < num_allocs; ++i) {
> > + struct aligned_alloc_type alloc;
> > +
> > + if (flags & MIX_BO_ALLOC && odd(i)) {
> > + uint32_t bo_flags =
> > + DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM;
> > +
> > + alloc = __aligned_alloc(SZ_2M, alloc_size);
> > + igt_assert(alloc.ptr);
> > +
> > + bos[i] = xe_bo_create(fd, vm, alloc_size,
> > + vram_if_possible(fd, eci->gt_id),
> > + bo_flags);
> > + alloc.ptr = xe_bo_map_fixed(fd, bos[i], alloc_size,
> > + to_user_pointer(alloc.ptr));
> > + xe_vm_bind_async(fd, vm, 0, bos[i], 0,
> > + to_user_pointer(alloc.ptr),
> > + alloc_size, 0, 0);
> > + } else {
> > + alloc.ptr = aligned_alloc(SZ_2M, alloc_size);
> > + igt_assert(alloc.ptr);
> > + }
> > + allocs[i] = alloc;
> > +
> > + touch_all_pages(fd, exec_queue, allocs[i].ptr, alloc_size, stride,
> > + &tv, &submit);
> > + }
> > +
> > + if (barrier)
> > + pthread_barrier_wait(barrier);
> > +
> > + for (i = 0; i < num_allocs; ++i) {
> > + if (flags & BENCHMARK)
> > + read = igt_nsec_elapsed(&tv);
> > +#define NUM_CHECK_THREADS 8
> > + if (flags & CPU_FAULT_PROCESS)
> > + check_all_pages_process(allocs[i].ptr, alloc_size, stride,
> > + NUM_CHECK_THREADS, flags);
> > + else if (flags & CPU_FAULT_THREADS)
> > + check_all_pages_threads(allocs[i].ptr, alloc_size, stride,
> > + NUM_CHECK_THREADS, flags);
> > + else
> > + check_all_pages(allocs[i].ptr, alloc_size, stride, NULL);
> > + if (flags & BENCHMARK) {
> > + elapsed = igt_nsec_elapsed(&tv);
> > + printf("Execution took %.3fms (submit %.1fus, read %.1fus, total %.1fus, read_total %.1fus)\n",
> > + 1e-6 * elapsed, 1e-3 * submit, 1e-3 * read,
> > + 1e-3 * (elapsed - submit),
> > + 1e-3 * (elapsed - read));
> > + }
> > + if (bos && bos[i]) {
> > + __aligned_free(allocs + i);
> > + gem_close(fd, bos[i]);
> > + } else {
> > + free(allocs[i].ptr);
> > + }
> > + }
> > + if (bos)
> > + free(bos);
> > + free(allocs);
> > + xe_exec_queue_destroy(fd, exec_queue);
> > + xe_vm_destroy(fd, vm);
> > +}
> > +
> > +static void process_evict(struct drm_xe_engine_class_instance *hwe,
> > + uint64_t total_alloc, uint64_t alloc_size,
> > + uint64_t stride, unsigned int flags)
> > +{
> > + struct process_data *pdata;
> > + int map_fd;
> > + int fd;
> > +
> > + map_fd = open(SYNC_FILE, O_RDWR, 0x666);
> > + pdata = mmap(NULL, sizeof(*pdata), PROT_READ |
> > + PROT_WRITE, MAP_SHARED, map_fd, 0);
> > + wait_pdata(pdata);
> > +
> > + fd = drm_open_driver(DRIVER_XE);
> > + many_allocs(fd, hwe, total_alloc, alloc_size, stride, &pdata->barrier,
> > + flags);
> > + drm_close_driver(fd);
> > +
> > + close(map_fd);
> > + munmap(pdata, sizeof(*pdata));
> > +}
> > +
> > +static void
> > +processes_evict(int fd, uint64_t alloc_size, uint64_t stride,
> > + unsigned int flags)
> > +{
> > + struct drm_xe_engine_class_instance *hwe;
> > + struct process_data *pdata;
> > + int n_engine_gt[2] = { 0, 0 }, n_engine = 0;
> > + int map_fd;
> > +
> > + map_fd = open(SYNC_FILE, O_RDWR | O_CREAT, 0x666);
> > + posix_fallocate(map_fd, 0, sizeof(*pdata));
> > + pdata = mmap(NULL, sizeof(*pdata), PROT_READ |
> > + PROT_WRITE, MAP_SHARED, map_fd, 0);
> > +
> > + xe_for_each_engine(fd, hwe) {
> > + igt_assert(hwe->gt_id < 2);
> > + n_engine_gt[hwe->gt_id]++;
> > + n_engine++;
> > + }
> > +
> > + init_pdata(pdata, n_engine);
> > +
> > + xe_for_each_engine(fd, hwe) {
> > + igt_fork(child, 1)
> > + process_evict(hwe,
> > + xe_visible_vram_size(fd, hwe->gt_id) /
> > + n_engine_gt[hwe->gt_id], alloc_size,
> > + stride, flags);
> > + }
> > +
> > + signal_pdata(pdata);
> > + igt_waitchildren();
> > +
> > + close(map_fd);
> > + munmap(pdata, sizeof(*pdata));
> > +}
> > +
> > +#define CPU_FAULT (0x1 << 0)
> > +#define REMAP (0x1 << 1)
> > +#define MIDDLE (0x1 << 2)
> > +
> > +/**
> > + * SUBTEST: partial-munmap-cpu-fault
> > + * Description: munmap partially with cpu access in between
> > + * Test category: functionality test
> > + *
> > + * SUBTEST: partial-munmap-no-cpu-fault
> > + * Description: munmap partially with no cpu access in between
> > + * Test category: functionality test
> > + *
> > + * SUBTEST: partial-remap-cpu-fault
> > + * Description: remap partially with cpu access in between
> > + * Test category: functionality test
> > + *
> > + * SUBTEST: partial-remap-no-cpu-fault
> > + * Description: remap partially with no cpu access in between
> > + * Test category: functionality test
> > + *
> > + * SUBTEST: partial-middle-munmap-cpu-fault
> > + * Description: munmap middle with cpu access in between
> > + * Test category: functionality test
> > + *
> > + * SUBTEST: partial-middle-munmap-no-cpu-fault
> > + * Description: munmap middle with no cpu access in between
> > + * Test category: functionality test
> > + *
> > + * SUBTEST: partial-middle-remap-cpu-fault
> > + * Description: remap middle with cpu access in between
> > + * Test category: functionality test
> > + *
> > + * SUBTEST: partial-middle-remap-no-cpu-fault
> > + * Description: remap middle with no cpu access in between
> > + * Test category: functionality test
> > + */
> > +
> > +static void
> > +partial(int fd, struct drm_xe_engine_class_instance *eci, unsigned int flags)
> > +{
> > + struct drm_xe_sync sync[1] = {
> > + { .type = DRM_XE_SYNC_TYPE_USER_FENCE, .flags = DRM_XE_SYNC_FLAG_SIGNAL,
> > + .timeline_value = USER_FENCE_VALUE },
> > + };
> > + struct drm_xe_exec exec = {
> > + .num_batch_buffer = 1,
> > + .num_syncs = 1,
> > + .syncs = to_user_pointer(sync),
> > + };
> > + struct {
> > + uint32_t batch[16];
> > + uint64_t pad;
> > + uint64_t vm_sync;
> > + uint64_t exec_sync;
> > + uint32_t data;
> > + uint32_t expected_data;
> > + } *data;
> > + size_t bo_size = SZ_2M, unmap_offset = 0;
> > + uint32_t vm, exec_queue;
> > + u64 *exec_ufence = NULL;
> > + int i;
> > + void *old, *new = NULL;
> > + struct aligned_alloc_type alloc;
> > +
> > + if (flags & MIDDLE)
> > + unmap_offset = bo_size / 4;
> > +
> > + vm = xe_vm_create(fd, DRM_XE_VM_CREATE_FLAG_LR_MODE |
> > + DRM_XE_VM_CREATE_FLAG_FAULT_MODE, 0);
> > +
> > + alloc = __aligned_alloc(bo_size, bo_size);
> > + igt_assert(alloc.ptr);
> > +
> > + data = mmap(alloc.ptr, bo_size, PROT_READ | PROT_WRITE,
> > + MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
> > + igt_assert(data != MAP_FAILED);
> > + memset(data, 0, bo_size);
> > + old = data;
> > +
> > + exec_queue = xe_exec_queue_create(fd, vm, eci, 0);
> > +
> > + sync[0].addr = to_user_pointer(&data[0].vm_sync);
> > + bind_system_allocator(sync, 1);
> > + xe_wait_ufence(fd, &data[0].vm_sync, USER_FENCE_VALUE, 0, FIVE_SEC);
> > + data[0].vm_sync = 0;
> > +
> > + exec_ufence = mmap(NULL, SZ_4K, PROT_READ |
> > + PROT_WRITE, MAP_SHARED |
> > + MAP_ANONYMOUS, -1, 0);
> > + igt_assert(exec_ufence != MAP_FAILED);
> > + memset(exec_ufence, 0, SZ_4K);
> > +
> > + for (i = 0; i < 2; i++) {
> > + uint64_t addr = to_user_pointer(data);
> > + uint64_t sdi_offset = (char *)&data[i].data - (char *)data;
> > + uint64_t sdi_addr = addr + sdi_offset;
> > + int b = 0;
> > +
> > + write_dword(data[i].batch, sdi_addr, WRITE_VALUE(&data[i], i), &b);
> > + igt_assert(b <= ARRAY_SIZE(data[i].batch));
> > +
> > + if (!i)
> > + data = old + unmap_offset + bo_size / 2;
> > + }
> > +
> > + data = old;
> > + exec.exec_queue_id = exec_queue;
> > +
> > + for (i = 0; i < 2; i++) {
> > + uint64_t addr = to_user_pointer(data);
> > + uint64_t batch_offset = (char *)&data[i].batch - (char *)data;
> > + uint64_t batch_addr = addr + batch_offset;
> > +
> > + sync[0].addr = new ? to_user_pointer(new) :
> > + to_user_pointer(exec_ufence);
> > + exec.address = batch_addr;
> > + xe_exec(fd, &exec);
> > +
> > + xe_wait_ufence(fd, new ?: exec_ufence, USER_FENCE_VALUE,
> > + exec_queue, FIVE_SEC);
> > + if (i || (flags & CPU_FAULT))
> > + igt_assert_eq(data[i].data, READ_VALUE(&data[i], i));
> > + exec_ufence[0] = 0;
> > +
> > + if (!i) {
> > + data = old + unmap_offset + bo_size / 2;
> > + munmap(old + unmap_offset, bo_size / 2);
> > + if (flags & REMAP) {
> > + new = mmap(old + unmap_offset, bo_size / 2,
> > + PROT_READ | PROT_WRITE,
> > + MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED |
> > + MAP_LOCKED, -1, 0);
> > + igt_assert(new != MAP_FAILED);
> > + }
> > + }
> > + }
> > +
> > + xe_exec_queue_destroy(fd, exec_queue);
> > + munmap(exec_ufence, SZ_4K);
> > + __aligned_free(&alloc);
> > + if (new)
> > + munmap(new, bo_size / 2);
> > + xe_vm_destroy(fd, vm);
> > +}
> > +
> > +#define MAX_N_EXEC_QUEUES 16
> > +
> > +#define MMAP (0x1 << 0)
> > +#define NEW (0x1 << 1)
> > +#define BO_UNMAP (0x1 << 2)
> > +#define FREE (0x1 << 3)
> > +#define BUSY (0x1 << 4)
> > +#define BO_MAP (0x1 << 5)
> > +#define RACE (0x1 << 6)
> > +#define SKIP_MEMSET (0x1 << 7)
> > +#define FAULT (0x1 << 8)
> > +#define FILE_BACKED (0x1 << 9)
> > +#define LOCK (0x1 << 10)
> > +#define MMAP_SHARED (0x1 << 11)
> > +#define HUGE_PAGE (0x1 << 12)
> > +#define SHARED_ALLOC (0x1 << 13)
> > +#define FORK_READ (0x1 << 14)
> > +#define FORK_READ_AFTER (0x1 << 15)
> > +#define MREMAP (0x1 << 16)
> > +#define DONTUNMAP (0x1 << 17)
> > +#define READ_ONLY_REMAP (0x1 << 18)
> > +#define SYNC_EXEC (0x1 << 19)
> > +#define EVERY_OTHER_CHECK (0x1 << 20)
> > +#define MULTI_FAULT (0x1 << 21)
> > +
> > +#define N_MULTI_FAULT 4
> > +
> > +/**
> > + * SUBTEST: once-%s
> > + * Description: Run %arg[1] system allocator test only once
> > + * Test category: functionality test
> > + *
> > + * SUBTEST: once-large-%s
> > + * Description: Run %arg[1] system allocator test only once with large allocation
> > + * Test category: functionality test
> > + *
> > + * SUBTEST: twice-%s
> > + * Description: Run %arg[1] system allocator test twice
> > + * Test category: functionality test
> > + *
> > + * SUBTEST: twice-large-%s
> > + * Description: Run %arg[1] system allocator test twice with large allocation
> > + * Test category: functionality test
> > + *
> > + * SUBTEST: many-%s
> > + * Description: Run %arg[1] system allocator test many times
> > + * Test category: stress test
> > + *
> > + * SUBTEST: many-stride-%s
> > + * Description: Run %arg[1] system allocator test many times with a stride on each exec
> > + * Test category: stress test
> > + *
> > + * SUBTEST: many-execqueues-%s
> > + * Description: Run %arg[1] system allocator test on many exec_queues
> > + * Test category: stress test
> > + *
> > + * SUBTEST: many-large-%s
> > + * Description: Run %arg[1] system allocator test many times with large allocations
> > + * Test category: stress test
> > + *
> > + * SUBTEST: many-large-execqueues-%s
> > + * Description: Run %arg[1] system allocator test on many exec_queues with large allocations
> > + *
> > + * SUBTEST: threads-many-%s
> > + * Description: Run %arg[1] system allocator threaded test many times
> > + * Test category: stress test
> > + *
> > + * SUBTEST: threads-many-stride-%s
> > + * Description: Run %arg[1] system allocator threaded test many times with a stride on each exec
> > + * Test category: stress test
> > + *
> > + * SUBTEST: threads-many-execqueues-%s
> > + * Description: Run %arg[1] system allocator threaded test on many exec_queues
> > + * Test category: stress test
> > + *
> > + * SUBTEST: threads-many-large-%s
> > + * Description: Run %arg[1] system allocator threaded test many times with large allocations
> > + * Test category: stress test
> > + *
> > + * SUBTEST: threads-many-large-execqueues-%s
> > + * Description: Run %arg[1] system allocator threaded test on many exec_queues with large allocations
> > + *
> > + * SUBTEST: threads-shared-vm-many-%s
> > + * Description: Run %arg[1] system allocator threaded, shared vm test many times
> > + * Test category: stress test
> > + *
> > + * SUBTEST: threads-shared-vm-many-stride-%s
> > + * Description: Run %arg[1] system allocator threaded, shared vm test many times with a stride on each exec
> > + * Test category: stress test
> > + *
> > + * SUBTEST: threads-shared-vm-many-execqueues-%s
> > + * Description: Run %arg[1] system allocator threaded, shared vm test on many exec_queues
> > + * Test category: stress test
> > + *
> > + * SUBTEST: threads-shared-vm-many-large-%s
> > + * Description: Run %arg[1] system allocator threaded, shared vm test many times with large allocations
> > + * Test category: stress test
> > + *
> > + * SUBTEST: threads-shared-vm-many-large-execqueues-%s
> > + * Description: Run %arg[1] system allocator threaded, shared vm test on many exec_queues with large allocations
> > + * Test category: stress test
> > + *
> > + * SUBTEST: process-many-%s
> > + * Description: Run %arg[1] system allocator multi-process test many times
> > + * Test category: stress test
> > + *
> > + * SUBTEST: process-many-stride-%s
> > + * Description: Run %arg[1] system allocator multi-process test many times with a stride on each exec
> > + * Test category: stress test
> > + *
> > + * SUBTEST: process-many-execqueues-%s
> > + * Description: Run %arg[1] system allocator multi-process test on many exec_queues
> > + * Test category: stress test
> > + *
> > + * SUBTEST: process-many-large-%s
> > + * Description: Run %arg[1] system allocator multi-process test many times with large allocations
> > + * Test category: stress test
> > + *
> > + * SUBTEST: process-many-large-execqueues-%s
> > + * Description: Run %arg[1] system allocator multi-process test on many exec_queues with large allocations
> > + *
> > + * SUBTEST: fault
> > + * Description: use a bad system allocator address resulting in a fault
> > + * Test category: bad input
> > + *
> > + * arg[1]:
> > + *
> > + * @malloc: malloc single buffer for all execs, issue a command which will trigger multiple faults
> > + * @malloc-multi-fault: malloc single buffer for all execs
> > + * @malloc-fork-read: malloc single buffer for all execs, fork a process to read test output
> > + * @malloc-fork-read-after: malloc single buffer for all execs, fork a process to read test output, check again after fork returns in parent
> > + * @malloc-mlock: malloc and mlock single buffer for all execs
> > + * @malloc-race: malloc single buffer for all execs with race between cpu and gpu access
> > + * @malloc-bo-unmap: malloc single buffer for all execs, bind and unbind a BO to same address before execs
> > + * @malloc-busy: malloc single buffer for all execs, try to unbind while buffer valid
> > + * @mmap: mmap single buffer for all execs
> > + * @mmap-remap: mmap and mremap a buffer for all execs
> > + * @mmap-remap-dontunmap: mmap and mremap a buffer with dontunmap flag for all execs
> > + * @mmap-remap-ro: mmap and mremap a read-only buffer for all execs
> > + * @mmap-remap-ro-dontunmap: mmap and mremap a read-only buffer with dontunmap flag for all execs
> > + * @mmap-remap-eocheck: mmap and mremap a buffer for all execs, check data every other loop iteration
> > + * @mmap-remap-dontunmap-eocheck: mmap and mremap a buffer with dontunmap flag for all execs, check data every other loop iteration
> > + * @mmap-remap-ro-eocheck: mmap and mremap a read-only buffer for all execs, check data every other loop iteration
> > + * @mmap-remap-ro-dontunmap-eocheck: mmap and mremap a read-only buffer with dontunmap flag for all execs, check data every other loop iteration
> > + * @mmap-huge: mmap huge page single buffer for all execs
> > + * @mmap-shared: mmap shared single buffer for all execs
> > + * @mmap-shared-remap: mmap shared and mremap a buffer for all execs
> > + * @mmap-shared-remap-dontunmap: mmap shared and mremap a buffer with dontunmap flag for all execs
> > + * @mmap-shared-remap-eocheck: mmap shared and mremap a buffer for all execs, check data every other loop iteration
> > + * @mmap-shared-remap-dontunmap-eocheck: mmap shared and mremap a buffer with dontunmap flag for all execs, check data every other loop iteration
> > + * @mmap-mlock: mmap and mlock single buffer for all execs
> > + * @mmap-file: mmap single buffer, with file backing, for all execs
> > + * @mmap-file-mlock: mmap and mlock single buffer, with file backing, for all execs
> > + * @mmap-race: mmap single buffer for all execs with race between cpu and gpu access
> > + * @free: malloc and free buffer for each exec
> > + * @free-race: malloc and free buffer for each exec with race between cpu and gpu access
> > + * @new: malloc a new buffer for each exec
> > + * @new-race: malloc a new buffer for each exec with race between cpu and gpu access
> > + * @new-bo-map: malloc a new buffer or map BO for each exec
> > + * @new-busy: malloc a new buffer for each exec, try to unbind while buffers valid
> > + * @mmap-free: mmap and free buffer for each exec
> > + * @mmap-free-huge: mmap huge page and free buffer for each exec
> > + * @mmap-free-race: mmap and free buffer for each exec with race between cpu and gpu access
> > + * @mmap-new: mmap a new buffer for each exec
> > + * @mmap-new-huge: mmap huge page a new buffer for each exec
> > + * @mmap-new-race: mmap a new buffer for each exec with race between cpu and gpu access
> > + * @malloc-nomemset: malloc single buffer for all execs, skip memset of buffers
> > + * @malloc-mlock-nomemset: malloc and mlock single buffer for all execs, skip memset of buffers
> > + * @malloc-race-nomemset: malloc single buffer for all execs with race between cpu and gpu access, skip memset of buffers
> > + * @malloc-bo-unmap-nomemset: malloc single buffer for all execs, bind and unbind a BO to same address before execs, skip memset of buffers
> > + * @malloc-busy-nomemset: malloc single buffer for all execs, try to unbind while buffer valid, skip memset of buffers
> > + * @mmap-nomemset: mmap single buffer for all execs, skip memset of buffers
> > + * @mmap-huge-nomemset: mmap huge page single buffer for all execs, skip memset of buffers
> > + * @mmap-shared-nomemset: mmap shared single buffer for all execs, skip memset of buffers
> > + * @mmap-mlock-nomemset: mmap and mlock single buffer for all execs, skip memset of buffers
> > + * @mmap-file-nomemset: mmap single buffer, with file backing, for all execs, skip memset of buffers
> > + * @mmap-file-mlock-nomemset: mmap and mlock single buffer, with file backing, for all execs, skip memset of buffers
> > + * @mmap-race-nomemset: mmap single buffer for all execs with race between cpu and gpu access, skip memset of buffers
> > + * @free-nomemset: malloc and free buffer for each exec, skip memset of buffers
> > + * @free-race-nomemset: malloc and free buffer for each exec with race between cpu and gpu access, skip memset of buffers
> > + * @new-nomemset: malloc a new buffer for each exec, skip memset of buffers
> > + * @new-race-nomemset: malloc a new buffer for each exec with race between cpu and gpu access, skip memset of buffers
> > + * @new-bo-map-nomemset: malloc a new buffer or map BO for each exec, skip memset of buffers
> > + * @new-busy-nomemset: malloc a new buffer for each exec, try to unbind while buffers valid, skip memset of buffers
> > + * @mmap-free-nomemset: mmap and free buffer for each exec, skip memset of buffers
> > + * @mmap-free-huge-nomemset: mmap huge page and free buffer for each exec, skip memset of buffers
> > + * @mmap-free-race-nomemset: mmap and free buffer for each exec with race between cpu and gpu access, skip memset of buffers
> > + * @mmap-new-nomemset: mmap a new buffer for each exec, skip memset of buffers
> > + * @mmap-new-huge-nomemset: mmap huge page new buffer for each exec, skip memset of buffers
> > + * @mmap-new-race-nomemset: mmap a new buffer for each exec with race between cpu and gpu access, skip memset of buffers
> > + *
> > + * SUBTEST: threads-shared-vm-shared-alloc-many-stride-malloc
> > + * Description: Create multiple threads with a shared VM triggering faults on different hardware engines to same addresses
> > + * Test category: stress test
> > + *
> > + * SUBTEST: threads-shared-vm-shared-alloc-many-stride-malloc-race
> > + * Description: Create multiple threads with a shared VM triggering faults on different hardware engines to same addresses, racing between CPU and GPU access
> > + * Test category: stress test
> > + *
> > + * SUBTEST: threads-shared-alloc-many-stride-malloc
> > + * Description: Create multiple threads with a faults on different hardware engines to same addresses
> > + * Test category: stress test
> > + *
> > + * SUBTEST: threads-shared-alloc-many-stride-malloc-sync
> > + * Description: Create multiple threads with a faults on different hardware engines to same addresses, syncing on each exec
> > + * Test category: stress test
> > + *
> > + * SUBTEST: threads-shared-alloc-many-stride-malloc-race
> > + * Description: Create multiple threads with a faults on different hardware engines to same addresses, racing between CPU and GPU access
> > + * Test category: stress test
> > + */
> > +
> > +struct test_exec_data {
> > + uint32_t batch[32];
> > + uint64_t pad;
> > + uint64_t vm_sync;
> > + uint64_t exec_sync;
> > + uint32_t data;
> > + uint32_t expected_data;
> > +};
> > +
> > +static void
> > +test_exec(int fd, struct drm_xe_engine_class_instance *eci,
> > + int n_exec_queues, int n_execs, size_t bo_size,
> > + size_t stride, uint32_t vm, void *alloc, pthread_barrier_t *barrier,
> > + unsigned int flags)
> > +{
> > + uint64_t addr;
> > + struct drm_xe_sync sync[1] = {
> > + { .type = DRM_XE_SYNC_TYPE_USER_FENCE, .flags = DRM_XE_SYNC_FLAG_SIGNAL,
> > + .timeline_value = USER_FENCE_VALUE },
> > + };
> > + struct drm_xe_exec exec = {
> > + .num_batch_buffer = 1,
> > + .num_syncs = 1,
> > + .syncs = to_user_pointer(sync),
> > + };
> > + uint32_t exec_queues[MAX_N_EXEC_QUEUES];
> > + struct test_exec_data *data, *next_data = NULL;
> > + uint32_t bo_flags;
> > + uint32_t bo = 0;
> > + void **pending_free;
> > + u64 *exec_ufence = NULL;
> > + int i, j, b, file_fd = -1, prev_idx;
> > + bool free_vm = false;
> > + size_t aligned_size = bo_size ?: xe_get_default_alignment(fd);
> > + size_t orig_size = bo_size;
> > + struct aligned_alloc_type aligned_alloc_type;
> > +
> > + if (flags & MULTI_FAULT) {
> > + if (!bo_size)
> > + return;
> > +
> > + bo_size *= N_MULTI_FAULT;
> > + }
> > +
> > + if (flags & SHARED_ALLOC)
> > + return;
> > +
> > + if (flags & EVERY_OTHER_CHECK && odd(n_execs))
> > + return;
> > +
> > + if (flags & EVERY_OTHER_CHECK)
> > + igt_assert(flags & MREMAP);
> > +
> > + igt_assert(n_exec_queues <= MAX_N_EXEC_QUEUES);
> > +
> > + if (flags & NEW && !(flags & FREE)) {
> > + pending_free = malloc(sizeof(*pending_free) * n_execs);
> > + igt_assert(pending_free);
> > + memset(pending_free, 0, sizeof(*pending_free) * n_execs);
> > + }
> > +
> > + if (!vm) {
> > + vm = xe_vm_create(fd, DRM_XE_VM_CREATE_FLAG_LR_MODE |
> > + DRM_XE_VM_CREATE_FLAG_FAULT_MODE, 0);
> > + free_vm = true;
> > + }
> > + if (!bo_size) {
> > + if (!stride) {
> > + bo_size = sizeof(*data) * n_execs;
> > + bo_size = xe_bb_size(fd, bo_size);
> > + } else {
> > + bo_size = stride * n_execs * sizeof(*data);
> > + bo_size = xe_bb_size(fd, bo_size);
> > + }
> > + }
> > + if (flags & HUGE_PAGE) {
> > + aligned_size = ALIGN(aligned_size, SZ_2M);
> > + bo_size = ALIGN(bo_size, SZ_2M);
> > + }
> > +
> > + if (alloc) {
> > + data = alloc;
> > + } else {
> > + if (flags & MMAP) {
> > + int mmap_flags = MAP_FIXED;
> > +
> > + aligned_alloc_type = __aligned_alloc(aligned_size, bo_size);
> > + data = aligned_alloc_type.ptr;
> > + igt_assert(data);
> > + __aligned_partial_free(&aligned_alloc_type);
> > +
> > + if (flags & MMAP_SHARED)
> > + mmap_flags |= MAP_SHARED;
> > + else
> > + mmap_flags |= MAP_PRIVATE;
> > +
> > + if (flags & HUGE_PAGE)
> > + mmap_flags |= MAP_HUGETLB | MAP_HUGE_2MB;
> > +
> > + if (flags & FILE_BACKED) {
> > + char name[1024];
> > +
> > + igt_assert(!(flags & NEW));
> > +
> > + sprintf(name, "/tmp/xe_exec_system_allocator_dat%d\n",
> > + getpid());
>
> Might be another candidate to use mkstemp() or similar.
>
Will look into that.
> > + file_fd = open(name, O_RDWR | O_CREAT, 0x666);
> > + posix_fallocate(file_fd, 0, bo_size);
> > + } else {
> > + mmap_flags |= MAP_ANONYMOUS;
> > + }
> > +
> > + data = mmap(data, bo_size, PROT_READ |
> > + PROT_WRITE, mmap_flags, file_fd, 0);
> > + igt_assert(data != MAP_FAILED);
> > + } else {
> > + data = aligned_alloc(aligned_size, bo_size);
> > + igt_assert(data);
> > + }
> > + if (!(flags & SKIP_MEMSET))
> > + memset(data, 0, bo_size);
> > + if (flags & LOCK) {
> > + igt_assert(!(flags & NEW));
> > + mlock(data, bo_size);
> > + }
> > + }
> > +
> > + for (i = 0; i < n_exec_queues; i++)
> > + exec_queues[i] = xe_exec_queue_create(fd, vm, eci, 0);
> > +
> > + sync[0].addr = to_user_pointer(&data[0].vm_sync);
> > + if (free_vm) {
> > + bind_system_allocator(sync, 1);
> > + xe_wait_ufence(fd, &data[0].vm_sync, USER_FENCE_VALUE, 0, FIVE_SEC);
> > + }
> > + data[0].vm_sync = 0;
> > +
> > + addr = to_user_pointer(data);
> > +
> > + if (flags & BO_UNMAP) {
> > + bo_flags = DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM;
> > + bo = xe_bo_create(fd, vm, bo_size,
> > + vram_if_possible(fd, eci->gt_id), bo_flags);
> > + xe_vm_bind_async(fd, vm, 0, bo, 0, addr, bo_size, 0, 0);
> > +
> > + __xe_vm_bind_assert(fd, vm, 0,
> > + 0, 0, addr, bo_size,
> > + DRM_XE_VM_BIND_OP_MAP,
> > + DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR, sync,
> > + 1, 0, 0);
> > + xe_wait_ufence(fd, &data[0].vm_sync, USER_FENCE_VALUE, 0,
> > + FIVE_SEC);
> > + data[0].vm_sync = 0;
> > + gem_close(fd, bo);
> > + bo = 0;
> > + }
> > +
> > + if (!(flags & RACE)) {
> > + exec_ufence = mmap(NULL, SZ_4K, PROT_READ |
> > + PROT_WRITE, MAP_SHARED |
> > + MAP_ANONYMOUS, -1, 0);
> > + igt_assert(exec_ufence != MAP_FAILED);
> > + memset(exec_ufence, 0, SZ_4K);
> > + }
> > +
> > + for (i = 0; i < n_execs; i++) {
> > + int idx = !stride ? i : i * stride, next_idx = !stride
> > + ? (i + 1) : (i + 1) * stride;
> > + uint64_t batch_offset = (char *)&data[idx].batch - (char *)data;
> > + uint64_t batch_addr = addr + batch_offset;
> > + uint64_t sdi_offset = (char *)&data[idx].data - (char *)data;
> > + uint64_t sdi_addr = addr + sdi_offset;
> > + int e = i % n_exec_queues, err;
> > + bool fault_inject = (FAULT & flags) && i == n_execs / 2;
> > + bool fault_injected = (FAULT & flags) && i > n_execs;
> > +
> > + if (barrier)
> > + pthread_barrier_wait(barrier);
> > +
> > + if (flags & MULTI_FAULT) {
> > + b = 0;
> > + for (j = 0; j < N_MULTI_FAULT - 1; ++j)
> > + __write_dword(data[idx].batch,
> > + sdi_addr + j * orig_size,
> > + WRITE_VALUE(&data[idx], idx), &b);
> > + write_dword(data[idx].batch, sdi_addr + j * orig_size,
> > + WRITE_VALUE(&data[idx], idx), &b);
> > + igt_assert(b <= ARRAY_SIZE(data[idx].batch));
> > + } else if (!(flags & EVERY_OTHER_CHECK)) {
> > + b = 0;
> > + write_dword(data[idx].batch, sdi_addr,
> > + WRITE_VALUE(&data[idx], idx), &b);
> > + igt_assert(b <= ARRAY_SIZE(data[idx].batch));
> > + } else if (flags & EVERY_OTHER_CHECK && !odd(i)) {
> > + b = 0;
> > + write_dword(data[idx].batch, sdi_addr,
> > + WRITE_VALUE(&data[idx], idx), &b);
> > + igt_assert(b <= ARRAY_SIZE(data[idx].batch));
> > +
> > + aligned_alloc_type = __aligned_alloc(aligned_size, bo_size);
> > + next_data = aligned_alloc_type.ptr;
> > + igt_assert(next_data);
> > + __aligned_partial_free(&aligned_alloc_type);
> > +
> > + b = 0;
> > + write_dword(data[next_idx].batch,
> > + to_user_pointer(next_data) +
> > + (char *)&data[next_idx].data - (char *)data,
> > + WRITE_VALUE(&data[next_idx], next_idx), &b);
> > + igt_assert(b <= ARRAY_SIZE(data[next_idx].batch));
> > + }
> > +
> > + if (!exec_ufence)
> > + data[idx].exec_sync = 0;
> > +
> > + sync[0].addr = exec_ufence ? to_user_pointer(exec_ufence) :
> > + addr + (char *)&data[idx].exec_sync - (char *)data;
> > +
> > + exec.exec_queue_id = exec_queues[e];
> > + if (fault_inject)
> > + exec.address = batch_addr * 2;
> > + else
> > + exec.address = batch_addr;
> > +
> > + if (fault_injected) {
> > + err = __xe_exec(fd, &exec);
> > + igt_assert(err == -ENOENT);
> > + } else {
> > + xe_exec(fd, &exec);
> > + }
> > +
> > + if (barrier)
> > + pthread_barrier_wait(barrier);
> > +
> > + if (fault_inject || fault_injected) {
> > + int64_t timeout = QUARTER_SEC;
> > +
> > + err = __xe_wait_ufence(fd, exec_ufence ? exec_ufence :
> > + &data[idx].exec_sync,
> > + USER_FENCE_VALUE,
> > + exec_queues[e], &timeout);
> > + igt_assert(err == -ETIME || err == -EIO);
> > + } else {
> > + xe_wait_ufence(fd, exec_ufence ? exec_ufence :
> > + &data[idx].exec_sync, USER_FENCE_VALUE,
> > + exec_queues[e], FIVE_SEC);
> > + if (flags & LOCK && !i)
> > + munlock(data, bo_size);
> > +
> > + if (flags & MREMAP) {
> > + void *old = data;
> > + int remap_flags = MREMAP_MAYMOVE | MREMAP_FIXED;
> > +
> > + if (flags & DONTUNMAP)
> > + remap_flags |= MREMAP_DONTUNMAP;
> > +
> > + if (flags & READ_ONLY_REMAP)
> > + igt_assert(!mprotect(old, bo_size,
> > + PROT_READ));
> > +
> > + if (!next_data) {
> > + aligned_alloc_type = __aligned_alloc(aligned_size,
> > + bo_size);
> > + data = aligned_alloc_type.ptr;
> > + __aligned_partial_free(&aligned_alloc_type);
> > + } else {
> > + data = next_data;
> > + }
> > + next_data = NULL;
> > + igt_assert(data);
> > +
> > + data = mremap(old, bo_size, bo_size,
> > + remap_flags, data);
> > + igt_assert(data != MAP_FAILED);
> > +
> > + if (flags & READ_ONLY_REMAP)
> > + igt_assert(!mprotect(data, bo_size,
> > + PROT_READ |
> > + PROT_WRITE));
> > +
> > + addr = to_user_pointer(data);
> > + if (flags & DONTUNMAP)
> > + munmap(old, bo_size);
> > + }
> > +
> > + if (!(flags & EVERY_OTHER_CHECK) || odd(i)) {
> > + if (flags & FORK_READ) {
> > + igt_fork(child, 1)
> > + igt_assert_eq(data[idx].data,
> > + READ_VALUE(&data[idx], idx));
> > + if (!(flags & FORK_READ_AFTER))
> > + igt_assert_eq(data[idx].data,
> > + READ_VALUE(&data[idx], idx));
> > + igt_waitchildren();
> > + if (flags & FORK_READ_AFTER)
> > + igt_assert_eq(data[idx].data,
> > + READ_VALUE(&data[idx], idx));
> > + } else {
> > + igt_assert_eq(data[idx].data,
> > + READ_VALUE(&data[idx], idx));
> > +
> > + if (flags & MULTI_FAULT) {
> > + for (j = 1; j < N_MULTI_FAULT; ++j) {
> > + struct test_exec_data *__data =
> > + ((void *)data) + j * orig_size;
> > +
> > + igt_assert_eq(__data[idx].data,
> > + READ_VALUE(&data[idx], idx));
> > + }
> > + }
> > + }
> > + if (flags & EVERY_OTHER_CHECK)
> > + igt_assert_eq(data[prev_idx].data,
> > + READ_VALUE(&data[prev_idx], idx));
> > + }
> > + }
> > +
> > + if (exec_ufence)
> > + exec_ufence[0] = 0;
> > +
> > + if (bo) {
> > + __xe_vm_bind_assert(fd, vm, 0,
> > + 0, 0, addr, bo_size,
> > + DRM_XE_VM_BIND_OP_MAP,
> > + DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR,
> > + NULL, 0, 0, 0);
> > + munmap(data, bo_size);
> > + gem_close(fd, bo);
> > + }
> > +
> > + if (flags & NEW) {
> > + if (flags & MMAP) {
> > + if (flags & FREE)
> > + munmap(data, bo_size);
> > + else
> > + pending_free[i] = data;
> > + data = mmap(NULL, bo_size, PROT_READ |
> > + PROT_WRITE, MAP_SHARED |
> > + MAP_ANONYMOUS, -1, 0);
> > + igt_assert(data != MAP_FAILED);
> > + } else if (flags & BO_MAP && (i % 2)) {
> > + if (!bo) {
> > + if (flags & FREE)
> > + free(data);
> > + else
> > + pending_free[i] = data;
> > + }
> > +
> > + aligned_alloc_type = __aligned_alloc(aligned_size, bo_size);
> > + data = aligned_alloc_type.ptr;
> > + igt_assert(data);
> > + __aligned_partial_free(&aligned_alloc_type);
> > +
> > + bo_flags = DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM;
> > + bo = xe_bo_create(fd, vm, bo_size,
> > + vram_if_possible(fd, eci->gt_id),
> > + bo_flags);
> > + data = xe_bo_map_fixed(fd, bo, bo_size,
> > + to_user_pointer(data));
> > +
> > + xe_vm_bind_async(fd, vm, 0, bo, 0,
> > + to_user_pointer(data),
> > + bo_size, 0, 0);
> > + } else {
> > + if (!bo) {
> > + if (flags & FREE)
> > + free(data);
> > + else
> > + pending_free[i] = data;
> > + }
> > + bo = 0;
> > + data = aligned_alloc(aligned_size, bo_size);
>
> Large memory leaks come from this ^ aligned_alloc(), see below.
>
Yea this will leak, thanks for pointing this out. Thomas also raised
memory leaks as a concern.
Inline below with the changes to fix this...
> > + igt_assert(data);
> > + }
> > + addr = to_user_pointer(data);
> > + if (!(flags & SKIP_MEMSET))
> > + memset(data, 0, bo_size);
> > + }
> > +
> > + prev_idx = idx;
> > + }
> > +
> > + if (bo) {
> > + __xe_vm_bind_assert(fd, vm, 0,
> > + 0, 0, addr, bo_size,
> > + DRM_XE_VM_BIND_OP_MAP,
> > + DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR,
> > + NULL, 0, 0, 0);
> > + munmap(data, bo_size);
data = NULL;
> > + gem_close(fd, bo);
> > + }
> > +
> > + if (flags & BUSY)
> > + igt_assert_eq(unbind_system_allocator(), -EBUSY);
> > +
> > + for (i = 0; i < n_exec_queues; i++)
> > + xe_exec_queue_destroy(fd, exec_queues[i]);
> > +
> > + if (exec_ufence)
> > + munmap(exec_ufence, SZ_4K);
> > +
> > + if (flags & LOCK)
> > + munlock(data, bo_size);
> > +
> > + if (file_fd != -1)
> > + close(file_fd);
> > +
> > + if (flags & NEW && !(flags & FREE)) {
> > + for (i = 0; i < n_execs; i++) {
> > + if (!pending_free[i])
> > + continue;
> > +
> > + if (flags & MMAP)
> > + munmap(pending_free[i], bo_size);
> > + else
> > + free(pending_free[i]);
> > + }
> > + free(pending_free);
> > + } else {
s/ else {/\nif (data) {
> > + if (flags & MMAP)
> > + munmap(data, bo_size);
> > + else if (!alloc)
> > + free(data);
>
> Something seems wrong with the flags logic when skipping this ^ free() for the
> allocation pointed above.
>
Yep, see above,
Matt
> Francois
>
> > + }
> > + if (free_vm)
> > + xe_vm_destroy(fd, vm);
> > +}
> > +
> > +struct thread_data {
> > + pthread_t thread;
> > + pthread_mutex_t *mutex;
> > + pthread_cond_t *cond;
> > + pthread_barrier_t *barrier;
> > + int fd;
> > + struct drm_xe_engine_class_instance *eci;
> > + int n_exec_queues;
> > + int n_execs;
> > + size_t bo_size;
> > + size_t stride;
> > + uint32_t vm;
> > + unsigned int flags;
> > + void *alloc;
> > + bool *go;
> > +};
> > +
> > +static void *thread(void *data)
> > +{
> > + struct thread_data *t = data;
> > +
> > + pthread_mutex_lock(t->mutex);
> > + while (!*t->go)
> > + pthread_cond_wait(t->cond, t->mutex);
> > + pthread_mutex_unlock(t->mutex);
> > +
> > + test_exec(t->fd, t->eci, t->n_exec_queues, t->n_execs,
> > + t->bo_size, t->stride, t->vm, t->alloc, t->barrier,
> > + t->flags);
> > +
> > + return NULL;
> > +}
> > +
> > +static void
> > +threads(int fd, int n_exec_queues, int n_execs, size_t bo_size,
> > + size_t stride, unsigned int flags, bool shared_vm)
> > +{
> > + struct drm_xe_engine_class_instance *hwe;
> > + struct thread_data *threads_data;
> > + int n_engines = 0, i = 0;
> > + pthread_mutex_t mutex;
> > + pthread_cond_t cond;
> > + pthread_barrier_t barrier;
> > + uint32_t vm = 0;
> > + bool go = false;
> > + void *alloc = NULL;
> > +
> > + if ((FILE_BACKED | FORK_READ) & flags)
> > + return;
> > +
> > + xe_for_each_engine(fd, hwe)
> > + ++n_engines;
> > +
> > + if (shared_vm) {
> > + vm = xe_vm_create(fd, DRM_XE_VM_CREATE_FLAG_LR_MODE |
> > + DRM_XE_VM_CREATE_FLAG_FAULT_MODE, 0);
> > + bind_system_allocator(NULL, 0);
> > + }
> > +
> > + if (flags & SHARED_ALLOC) {
> > + uint64_t alloc_size;
> > +
> > + igt_assert(stride);
> > +
> > + alloc_size = sizeof(struct test_exec_data) * stride *
> > + n_execs * n_engines;
> > + alloc_size = xe_bb_size(fd, alloc_size);
> > + alloc = aligned_alloc(SZ_2M, alloc_size);
> > + igt_assert(alloc);
> > +
> > + memset(alloc, 0, alloc_size);
> > + flags &= ~SHARED_ALLOC;
> > + }
> > +
> > + threads_data = calloc(n_engines, sizeof(*threads_data));
> > + igt_assert(threads_data);
> > +
> > + pthread_mutex_init(&mutex, 0);
> > + pthread_cond_init(&cond, 0);
> > + pthread_barrier_init(&barrier, 0, n_engines);
> > +
> > + xe_for_each_engine(fd, hwe) {
> > + threads_data[i].mutex = &mutex;
> > + threads_data[i].cond = &cond;
> > + threads_data[i].barrier = (flags & SYNC_EXEC) ? &barrier : NULL;
> > + threads_data[i].fd = fd;
> > + threads_data[i].eci = hwe;
> > + threads_data[i].n_exec_queues = n_exec_queues;
> > + threads_data[i].n_execs = n_execs;
> > + threads_data[i].bo_size = bo_size;
> > + threads_data[i].stride = stride;
> > + threads_data[i].vm = vm;
> > + threads_data[i].flags = flags;
> > + threads_data[i].alloc = alloc ? alloc + i *
> > + sizeof(struct test_exec_data) : NULL;
> > + threads_data[i].go = &go;
> > + pthread_create(&threads_data[i].thread, 0, thread,
> > + &threads_data[i]);
> > + ++i;
> > + }
> > +
> > + pthread_mutex_lock(&mutex);
> > + go = true;
> > + pthread_cond_broadcast(&cond);
> > + pthread_mutex_unlock(&mutex);
> > +
> > + for (i = 0; i < n_engines; ++i)
> > + pthread_join(threads_data[i].thread, NULL);
> > +
> > + if (shared_vm) {
> > + int ret;
> > +
> > + if (flags & MMAP) {
> > + int tries = 300;
> > +
> > + while (tries && (ret = unbind_system_allocator()) == -EBUSY) {
> > + sleep(.01);
> > + --tries;
> > + }
> > + igt_assert_eq(ret, 0);
> > + }
> > + xe_vm_destroy(fd, vm);
> > + if (alloc)
> > + free(alloc);
> > + }
> > + free(threads_data);
> > +}
> > +
> > +static void process(struct drm_xe_engine_class_instance *hwe, int n_exec_queues,
> > + int n_execs, size_t bo_size, size_t stride,
> > + unsigned int flags)
> > +{
> > + struct process_data *pdata;
> > + int map_fd;
> > + int fd;
> > +
> > + map_fd = open(SYNC_FILE, O_RDWR, 0x666);
> > + pdata = mmap(NULL, sizeof(*pdata), PROT_READ |
> > + PROT_WRITE, MAP_SHARED, map_fd, 0);
> > + wait_pdata(pdata);
> > +
> > + fd = drm_open_driver(DRIVER_XE);
> > + test_exec(fd, hwe, n_exec_queues, n_execs,
> > + bo_size, stride, 0, NULL, NULL, flags);
> > + drm_close_driver(fd);
> > +
> > + close(map_fd);
> > + munmap(pdata, sizeof(*pdata));
> > +}
> > +
> > +static void
> > +processes(int fd, int n_exec_queues, int n_execs, size_t bo_size,
> > + size_t stride, unsigned int flags)
> > +{
> > + struct drm_xe_engine_class_instance *hwe;
> > + struct process_data *pdata;
> > + int map_fd;
> > +
> > + if (flags & FORK_READ)
> > + return;
> > +
> > + map_fd = open(SYNC_FILE, O_RDWR | O_CREAT, 0x666);
> > + posix_fallocate(map_fd, 0, sizeof(*pdata));
> > + pdata = mmap(NULL, sizeof(*pdata), PROT_READ |
> > + PROT_WRITE, MAP_SHARED, map_fd, 0);
> > +
> > + init_pdata(pdata, 0);
> > +
> > + xe_for_each_engine(fd, hwe) {
> > + igt_fork(child, 1)
> > + process(hwe, n_exec_queues, n_execs, bo_size,
> > + stride, flags);
> > + }
> > +
> > + signal_pdata(pdata);
> > + igt_waitchildren();
> > +
> > + close(map_fd);
> > + munmap(pdata, sizeof(*pdata));
> > +}
> > +
> > +struct section {
> > + const char *name;
> > + unsigned int flags;
> > +};
> > +
> > +igt_main
> > +{
> > + struct drm_xe_engine_class_instance *hwe;
> > + const struct section sections[] = {
> > + { "malloc", 0 },
> > + { "malloc-multi-fault", MULTI_FAULT },
> > + { "malloc-fork-read", FORK_READ },
> > + { "malloc-fork-read-after", FORK_READ | FORK_READ_AFTER },
> > + { "malloc-mlock", LOCK },
> > + { "malloc-race", RACE },
> > + { "malloc-busy", BUSY },
> > + { "malloc-bo-unmap", BO_UNMAP },
> > + { "mmap", MMAP },
> > + { "mmap-remap", MMAP | MREMAP },
> > + { "mmap-remap-dontunmap", MMAP | MREMAP | DONTUNMAP },
> > + { "mmap-remap-ro", MMAP | MREMAP | READ_ONLY_REMAP },
> > + { "mmap-remap-ro-dontunmap", MMAP | MREMAP | DONTUNMAP |
> > + READ_ONLY_REMAP },
> > + { "mmap-remap-eocheck", MMAP | MREMAP | EVERY_OTHER_CHECK },
> > + { "mmap-remap-dontunmap-eocheck", MMAP | MREMAP | DONTUNMAP |
> > + EVERY_OTHER_CHECK },
> > + { "mmap-remap-ro-eocheck", MMAP | MREMAP | READ_ONLY_REMAP |
> > + EVERY_OTHER_CHECK },
> > + { "mmap-remap-ro-dontunmap-eocheck", MMAP | MREMAP | DONTUNMAP |
> > + READ_ONLY_REMAP | EVERY_OTHER_CHECK },
> > + { "mmap-huge", MMAP | HUGE_PAGE },
> > + { "mmap-shared", MMAP | LOCK | MMAP_SHARED },
> > + { "mmap-shared-remap", MMAP | LOCK | MMAP_SHARED | MREMAP },
> > + { "mmap-shared-remap-dontunmap", MMAP | LOCK | MMAP_SHARED |
> > + MREMAP | DONTUNMAP },
> > + { "mmap-shared-remap-eocheck", MMAP | LOCK | MMAP_SHARED |
> > + MREMAP | EVERY_OTHER_CHECK },
> > + { "mmap-shared-remap-dontunmap-eocheck", MMAP | LOCK |
> > + MMAP_SHARED | MREMAP | DONTUNMAP | EVERY_OTHER_CHECK },
> > + { "mmap-mlock", MMAP | LOCK },
> > + { "mmap-file", MMAP | FILE_BACKED },
> > + { "mmap-file-mlock", MMAP | LOCK | FILE_BACKED },
> > + { "mmap-race", MMAP | RACE },
> > + { "free", NEW | FREE },
> > + { "free-race", NEW | FREE | RACE },
> > + { "new", NEW },
> > + { "new-race", NEW | RACE },
> > + { "new-bo-map", NEW | BO_MAP },
> > + { "new-busy", NEW | BUSY },
> > + { "mmap-free", MMAP | NEW | FREE },
> > + { "mmap-free-huge", MMAP | NEW | FREE | HUGE_PAGE },
> > + { "mmap-free-race", MMAP | NEW | FREE | RACE },
> > + { "mmap-new", MMAP | NEW },
> > + { "mmap-new-huge", MMAP | NEW | HUGE_PAGE },
> > + { "mmap-new-race", MMAP | NEW | RACE },
> > + { "malloc-nomemset", SKIP_MEMSET },
> > + { "malloc-mlock-nomemset", SKIP_MEMSET | LOCK },
> > + { "malloc-race-nomemset", SKIP_MEMSET | RACE },
> > + { "malloc-busy-nomemset", SKIP_MEMSET | BUSY },
> > + { "malloc-bo-unmap-nomemset", SKIP_MEMSET | BO_UNMAP },
> > + { "mmap-nomemset", SKIP_MEMSET | MMAP },
> > + { "mmap-huge-nomemset", SKIP_MEMSET | MMAP | HUGE_PAGE },
> > + { "mmap-shared-nomemset", SKIP_MEMSET | MMAP | MMAP_SHARED },
> > + { "mmap-mlock-nomemset", SKIP_MEMSET | MMAP | LOCK },
> > + { "mmap-file-nomemset", SKIP_MEMSET | MMAP | FILE_BACKED },
> > + { "mmap-file-mlock-nomemset", SKIP_MEMSET | MMAP | LOCK | FILE_BACKED },
> > + { "mmap-race-nomemset", SKIP_MEMSET | MMAP | RACE },
> > + { "free-nomemset", SKIP_MEMSET | NEW | FREE },
> > + { "free-race-nomemset", SKIP_MEMSET | NEW | FREE | RACE },
> > + { "new-nomemset", SKIP_MEMSET | NEW },
> > + { "new-race-nomemset", SKIP_MEMSET | NEW | RACE },
> > + { "new-bo-map-nomemset", SKIP_MEMSET | NEW | BO_MAP },
> > + { "new-busy-nomemset", SKIP_MEMSET | NEW | BUSY },
> > + { "mmap-free-nomemset", SKIP_MEMSET | MMAP | NEW | FREE },
> > + { "mmap-free-huge-nomemset", SKIP_MEMSET | MMAP | NEW | FREE | HUGE_PAGE },
> > + { "mmap-free-race-nomemset", SKIP_MEMSET | MMAP | NEW | FREE | RACE },
> > + { "mmap-new-nomemset", SKIP_MEMSET | MMAP | NEW },
> > + { "mmap-new-huge-nomemset", SKIP_MEMSET | MMAP | NEW | HUGE_PAGE },
> > + { "mmap-new-race-nomemset", SKIP_MEMSET | MMAP | NEW | RACE },
> > + { NULL },
> > + };
> > + const struct section psections[] = {
> > + { "munmap-cpu-fault", CPU_FAULT },
> > + { "munmap-no-cpu-fault", 0 },
> > + { "remap-cpu-fault", CPU_FAULT | REMAP },
> > + { "remap-no-cpu-fault", REMAP },
> > + { "middle-munmap-cpu-fault", MIDDLE | CPU_FAULT },
> > + { "middle-munmap-no-cpu-fault", MIDDLE },
> > + { "middle-remap-cpu-fault", MIDDLE | CPU_FAULT | REMAP },
> > + { "middle-remap-no-cpu-fault", MIDDLE | REMAP },
> > + { NULL },
> > + };
> > + const struct section esections[] = {
> > + { "malloc", 0 },
> > + { "malloc-mix-bo", MIX_BO_ALLOC },
> > + { NULL },
> > + };
> > + int fd;
> > +
> > + igt_fixture {
> > + struct xe_device *xe;
> > +
> > + fd = drm_open_driver(DRIVER_XE);
> > + igt_require(!xe_supports_faults(fd));
> > +
> > + xe = xe_device_get(fd);
> > + va_bits = xe->va_bits;
> > + }
> > +
> > + for (const struct section *s = sections; s->name; s++) {
> > + igt_subtest_f("once-%s", s->name)
> > + xe_for_each_engine(fd, hwe)
> > + test_exec(fd, hwe, 1, 1, 0, 0, 0, NULL,
> > + NULL, s->flags);
> > +
> > + igt_subtest_f("once-large-%s", s->name)
> > + xe_for_each_engine(fd, hwe)
> > + test_exec(fd, hwe, 1, 1, SZ_2M, 0, 0, NULL,
> > + NULL, s->flags);
> > +
> > + igt_subtest_f("twice-%s", s->name)
> > + xe_for_each_engine(fd, hwe)
> > + test_exec(fd, hwe, 1, 2, 0, 0, 0, NULL,
> > + NULL, s->flags);
> > +
> > + igt_subtest_f("twice-large-%s", s->name)
> > + xe_for_each_engine(fd, hwe)
> > + test_exec(fd, hwe, 1, 2, SZ_2M, 0, 0, NULL,
> > + NULL, s->flags);
> > +
> > + igt_subtest_f("many-%s", s->name)
> > + xe_for_each_engine(fd, hwe)
> > + test_exec(fd, hwe, 1, 128, 0, 0, 0, NULL,
> > + NULL, s->flags);
> > +
> > + igt_subtest_f("many-stride-%s", s->name)
> > + xe_for_each_engine(fd, hwe)
> > + test_exec(fd, hwe, 1, 128, 0, 256, 0, NULL,
> > + NULL, s->flags);
> > +
> > + igt_subtest_f("many-execqueues-%s", s->name)
> > + xe_for_each_engine(fd, hwe)
> > + test_exec(fd, hwe, 16, 128, 0, 0, 0, NULL,
> > + NULL, s->flags);
> > +
> > + igt_subtest_f("many-large-%s", s->name)
> > + xe_for_each_engine(fd, hwe)
> > + test_exec(fd, hwe, 1, 128, SZ_2M, 0, 0, NULL,
> > + NULL, s->flags);
> > +
> > + igt_subtest_f("many-large-execqueues-%s", s->name)
> > + xe_for_each_engine(fd, hwe)
> > + test_exec(fd, hwe, 16, 128, SZ_2M, 0, 0, NULL,
> > + NULL, s->flags);
> > +
> > + igt_subtest_f("threads-many-%s", s->name)
> > + threads(fd, 1, 128, 0, 0, s->flags, false);
> > +
> > + igt_subtest_f("threads-many-stride-%s", s->name)
> > + threads(fd, 1, 128, 0, 256, s->flags, false);
> > +
> > + igt_subtest_f("threads-many-execqueues-%s", s->name)
> > + threads(fd, 16, 128, 0, 0, s->flags, false);
> > +
> > + igt_subtest_f("threads-many-large-%s", s->name)
> > + threads(fd, 1, 128, SZ_2M, 0, s->flags, false);
> > +
> > + igt_subtest_f("threads-many-large-execqueues-%s", s->name)
> > + threads(fd, 16, 128, SZ_2M, 0, s->flags, false);
> > +
> > + igt_subtest_f("threads-shared-vm-many-%s", s->name)
> > + threads(fd, 1, 128, 0, 0, s->flags, true);
> > +
> > + igt_subtest_f("threads-shared-vm-many-stride-%s", s->name)
> > + threads(fd, 1, 128, 0, 256, s->flags, true);
> > +
> > + igt_subtest_f("threads-shared-vm-many-execqueues-%s", s->name)
> > + threads(fd, 16, 128, 0, 0, s->flags, true);
> > +
> > + igt_subtest_f("threads-shared-vm-many-large-%s", s->name)
> > + threads(fd, 1, 128, SZ_2M, 0, s->flags, true);
> > +
> > + igt_subtest_f("threads-shared-vm-many-large-execqueues-%s", s->name)
> > + threads(fd, 16, 128, SZ_2M, 0, s->flags, true);
> > +
> > + igt_subtest_f("process-many-%s", s->name)
> > + processes(fd, 1, 128, 0, 0, s->flags);
> > +
> > + igt_subtest_f("process-many-stride-%s", s->name)
> > + processes(fd, 1, 128, 0, 256, s->flags);
> > +
> > + igt_subtest_f("process-many-execqueues-%s", s->name)
> > + processes(fd, 16, 128, 0, 0, s->flags);
> > +
> > + igt_subtest_f("process-many-large-%s", s->name)
> > + processes(fd, 1, 128, SZ_2M, 0, s->flags);
> > +
> > + igt_subtest_f("process-many-large-execqueues-%s", s->name)
> > + processes(fd, 16, 128, SZ_2M, 0, s->flags);
> > + }
> > +
> > + igt_subtest("threads-shared-vm-shared-alloc-many-stride-malloc")
> > + threads(fd, 1, 128, 0, 256, SHARED_ALLOC, true);
> > +
> > + igt_subtest("threads-shared-vm-shared-alloc-many-stride-malloc-race")
> > + threads(fd, 1, 128, 0, 256, RACE | SHARED_ALLOC, true);
> > +
> > + igt_subtest("threads-shared-alloc-many-stride-malloc")
> > + threads(fd, 1, 128, 0, 256, SHARED_ALLOC, false);
> > +
> > + igt_subtest("threads-shared-alloc-many-stride-malloc-sync")
> > + threads(fd, 1, 128, 0, 256, SHARED_ALLOC | SYNC_EXEC, false);
> > +
> > + igt_subtest("threads-shared-alloc-many-stride-malloc-race")
> > + threads(fd, 1, 128, 0, 256, RACE | SHARED_ALLOC, false);
> > +
> > + igt_subtest_f("fault")
> > + xe_for_each_engine(fd, hwe)
> > + test_exec(fd, hwe, 4, 1, SZ_2M, 0, 0, NULL, NULL,
> > + FAULT);
> > +
> > + for (const struct section *s = psections; s->name; s++) {
> > + igt_subtest_f("partial-%s", s->name)
> > + xe_for_each_engine(fd, hwe)
> > + partial(fd, hwe, s->flags);
> > + }
> > +
> > + igt_subtest_f("unaligned-alloc")
> > + xe_for_each_engine(fd, hwe) {
> > + many_allocs(fd, hwe, (SZ_1M + SZ_512K) * 8,
> > + SZ_1M + SZ_512K, SZ_4K, NULL, 0);
> > + break;
> > + }
> > +
> > + igt_subtest_f("fault-benchmark")
> > + xe_for_each_engine(fd, hwe)
> > + many_allocs(fd, hwe, SZ_64M, SZ_64M, SZ_4K, NULL,
> > + BENCHMARK);
> > +
> > + igt_subtest_f("fault-threads-benchmark")
> > + xe_for_each_engine(fd, hwe)
> > + many_allocs(fd, hwe, SZ_64M, SZ_64M, SZ_4K, NULL,
> > + BENCHMARK | CPU_FAULT_THREADS);
> > +
> > + igt_subtest_f("fault-threads-same-page-benchmark")
> > + xe_for_each_engine(fd, hwe)
> > + many_allocs(fd, hwe, SZ_64M, SZ_64M, SZ_4K, NULL,
> > + BENCHMARK | CPU_FAULT_THREADS |
> > + CPU_FAULT_SAME_PAGE);
> > +
> > + igt_subtest_f("fault-process-benchmark")
> > + xe_for_each_engine(fd, hwe)
> > + many_allocs(fd, hwe, SZ_64M, SZ_64M, SZ_4K, NULL,
> > + BENCHMARK | CPU_FAULT_PROCESS);
> > +
> > + igt_subtest_f("fault-process-same-page-benchmark")
> > + xe_for_each_engine(fd, hwe)
> > + many_allocs(fd, hwe, SZ_64M, SZ_64M, SZ_4K, NULL,
> > + BENCHMARK | CPU_FAULT_PROCESS |
> > + CPU_FAULT_SAME_PAGE);
> > +
> > + for (const struct section *s = esections; s->name; s++) {
> > + igt_subtest_f("evict-%s", s->name)
> > + xe_for_each_engine(fd, hwe) {
> > + many_allocs(fd, hwe,
> > + xe_visible_vram_size(fd, hwe->gt_id),
> > + SZ_8M, SZ_1M, NULL, s->flags);
> > + break;
> > + }
> > + }
> > +
> > + for (const struct section *s = esections; s->name; s++) {
> > + igt_subtest_f("processes-evict-%s", s->name)
> > + processes_evict(fd, SZ_8M, SZ_1M, s->flags);
> > + }
> > +
> > + igt_fixture {
> > + xe_device_put(fd);
> > + drm_close_driver(fd);
> > + }
> > +}
> > diff --git a/tests/meson.build b/tests/meson.build
> > index 9224145cf4..8c7b756716 100644
> > --- a/tests/meson.build
> > +++ b/tests/meson.build
> > @@ -295,6 +295,7 @@ intel_xe_progs = [
> > 'xe_exec_reset',
> > 'xe_exec_sip',
> > 'xe_exec_store',
> > + 'xe_exec_system_allocator',
> > 'xe_exec_threads',
> > 'xe_exercise_blt',
> > 'xe_fault_injection',
> > --
> > 2.34.1
> >
^ permalink raw reply [flat|nested] 16+ messages in thread
* Re: [PATCH] tests/xe: Add system_allocator test
2025-04-18 19:44 ` Matthew Brost
@ 2025-04-24 19:28 ` Francois Dugast
2025-04-24 19:46 ` Matthew Brost
0 siblings, 1 reply; 16+ messages in thread
From: Francois Dugast @ 2025-04-24 19:28 UTC (permalink / raw)
To: Matthew Brost; +Cc: igt-dev
Hi,
On Fri, Apr 18, 2025 at 12:44:51PM -0700, Matthew Brost wrote:
> On Fri, Apr 18, 2025 at 05:47:55PM +0200, Francois Dugast wrote:
> > Hi Matt,
> >
> > I am still going through your patch but sharing some comments already.
> >
> > The sequence is neither complex nor too different from existing tests
> > but as it is a lot of multi-thread / multi-process code, I am trying
> > to come up with a suggestion to break it down. Might not be possible
> > though.
> >
>
> Good to hear you don't find it too complex, open spliting if it makes
> sense.
>
> > On Tue, Apr 15, 2025 at 07:20:40PM -0700, Matthew Brost wrote:
> > > Test various uses of system allocator in single thread, multiple
> > > threads, and multiple processes.
> > >
> > > Features tested:
> > > - Malloc with various size
> > > - Mmap with various sizes and flags including file backed mappings
> > > - Mixing BO allocations with system allocator
> > > - Various page sizes
> > > - Dynamically freeing / unmapping memory
> > > - Sharing VM across threads
> > > - Faults racing on different hardware engines / GTs / Tiles
> > > - GPU faults and CPU faults racing
> > > - CPU faults on multiple threads racing
> > > - CPU faults on multiple process racing
> > > - GPU faults of memory not faulted in by CPU
> > > - Partial unmap of allocations
> > > - Attempting to unmap system allocations when GPU has mappings
> > > - Eviction of both system allocations and BOs
> > > - Forking child processes and reading data from VRAM
> > > - mremap data in VRAM
> > > - Protection changes
> > > - Multiple faults per execbuf
> > >
> > > Running on LNL, BMG, PVC 1 tile, and PVC 2 tile.
> > >
> > > v2:
> > > - Rebase
> > > - Fix memory allocation to not interfear with malloc (Thomas)
> > >
> > > Signed-off-by: Matthew Brost <matthew.brost@intel.com>
> > > ---
> > > include/drm-uapi/xe_drm.h | 57 +-
> > > lib/xe/xe_ioctl.c | 12 +
> > > lib/xe/xe_ioctl.h | 1 +
> > > tests/intel/xe_exec_system_allocator.c | 1832 ++++++++++++++++++++++++
> > > tests/meson.build | 1 +
> > > 5 files changed, 1896 insertions(+), 7 deletions(-)
> > > create mode 100644 tests/intel/xe_exec_system_allocator.c
> > >
> > > diff --git a/include/drm-uapi/xe_drm.h b/include/drm-uapi/xe_drm.h
> > > index 154f947ef0..9c08738c3b 100644
> > > --- a/include/drm-uapi/xe_drm.h
> > > +++ b/include/drm-uapi/xe_drm.h
> > > @@ -3,8 +3,8 @@
> > > * Copyright © 2023 Intel Corporation
> > > */
> > >
> > > -#ifndef _XE_DRM_H_
> > > -#define _XE_DRM_H_
> > > +#ifndef _UAPI_XE_DRM_H_
> > > +#define _UAPI_XE_DRM_H_
> >
> > Nit: The header seems to have been copied directly from the kernel tree, instead
> > it should be generated with:
> >
> > make headers_install
> >
> > https://docs.kernel.org/kbuild/headers_install.html
> >
>
> Let me split out the uAPI update into its own patch + use the proper
> flow.
Sounds good.
>
> > >
> > > #include "drm.h"
> > >
> > > @@ -134,7 +134,7 @@ extern "C" {
> > > * redefine the interface more easily than an ever growing struct of
> > > * increasing complexity, and for large parts of that interface to be
> > > * entirely optional. The downside is more pointer chasing; chasing across
> > > - * the boundary with pointers encapsulated inside u64.
> > > + * the __user boundary with pointers encapsulated inside u64.
> >
> > See above comment on make headers_install.
> >
>
> +1
>
> > > *
> > > * Example chaining:
> > > *
> > > @@ -393,6 +393,10 @@ struct drm_xe_query_mem_regions {
> > > *
> > > * - %DRM_XE_QUERY_CONFIG_FLAG_HAS_VRAM - Flag is set if the device
> > > * has usable VRAM
> > > + * - %DRM_XE_QUERY_CONFIG_FLAG_HAS_LOW_LATENCY - Flag is set if the device
> > > + * has low latency hint support
> > > + * - %DRM_XE_QUERY_CONFIG_FLAG_HAS_CPU_ADDR_MIRROR - Flag is set if the
> > > + * device has CPU address mirroring support
> > > * - %DRM_XE_QUERY_CONFIG_MIN_ALIGNMENT - Minimal memory alignment
> > > * required by this device, typically SZ_4K or SZ_64K
> > > * - %DRM_XE_QUERY_CONFIG_VA_BITS - Maximum bits of a virtual address
> > > @@ -409,6 +413,8 @@ struct drm_xe_query_config {
> > > #define DRM_XE_QUERY_CONFIG_REV_AND_DEVICE_ID 0
> > > #define DRM_XE_QUERY_CONFIG_FLAGS 1
> > > #define DRM_XE_QUERY_CONFIG_FLAG_HAS_VRAM (1 << 0)
> > > + #define DRM_XE_QUERY_CONFIG_FLAG_HAS_LOW_LATENCY (1 << 1)
> > > + #define DRM_XE_QUERY_CONFIG_FLAG_HAS_CPU_ADDR_MIRROR (1 << 2)
> > > #define DRM_XE_QUERY_CONFIG_MIN_ALIGNMENT 2
> > > #define DRM_XE_QUERY_CONFIG_VA_BITS 3
> > > #define DRM_XE_QUERY_CONFIG_MAX_EXEC_QUEUE_PRIORITY 4
> > > @@ -911,7 +917,11 @@ struct drm_xe_gem_mmap_offset {
> > > * struct drm_xe_vm_create - Input of &DRM_IOCTL_XE_VM_CREATE
> > > *
> > > * The @flags can be:
> > > - * - %DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE
> > > + * - %DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE - Map the whole virtual address
> > > + * space of the VM to scratch page. A vm_bind would overwrite the scratch
> > > + * page mapping. This flag is mutually exclusive with the
> > > + * %DRM_XE_VM_CREATE_FLAG_FAULT_MODE flag, with an exception of on x2 and
> > > + * xe3 platform.
> > > * - %DRM_XE_VM_CREATE_FLAG_LR_MODE - An LR, or Long Running VM accepts
> > > * exec submissions to its exec_queues that don't have an upper time
> > > * limit on the job execution time. But exec submissions to these
> > > @@ -987,6 +997,12 @@ struct drm_xe_vm_destroy {
> > > * - %DRM_XE_VM_BIND_FLAG_CHECK_PXP - If the object is encrypted via PXP,
> > > * reject the binding if the encryption key is no longer valid. This
> > > * flag has no effect on BOs that are not marked as using PXP.
> > > + * - %DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR - When the CPU address mirror flag is
> > > + * set, no mappings are created rather the range is reserved for CPU address
> > > + * mirroring which will be populated on GPU page faults or prefetches. Only
> > > + * valid on VMs with DRM_XE_VM_CREATE_FLAG_FAULT_MODE set. The CPU address
> > > + * mirror flag are only valid for DRM_XE_VM_BIND_OP_MAP operations, the BO
> > > + * handle MBZ, and the BO offset MBZ.
> > > */
> > > struct drm_xe_vm_bind_op {
> > > /** @extensions: Pointer to the first extension struct, if any */
> > > @@ -1039,7 +1055,9 @@ struct drm_xe_vm_bind_op {
> > > * on the @pat_index. For such mappings there is no actual memory being
> > > * mapped (the address in the PTE is invalid), so the various PAT memory
> > > * attributes likely do not apply. Simply leaving as zero is one
> > > - * option (still a valid pat_index).
> > > + * option (still a valid pat_index). Same applies to
> > > + * DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR bindings as for such mapping
> > > + * there is no actual memory being mapped.
> > > */
> > > __u16 pat_index;
> > >
> > > @@ -1055,6 +1073,14 @@ struct drm_xe_vm_bind_op {
> > >
> > > /** @userptr: user pointer to bind on */
> > > __u64 userptr;
> > > +
> > > + /**
> > > + * @cpu_addr_mirror_offset: Offset from GPU @addr to create
> > > + * CPU address mirror mappings. MBZ with current level of
> > > + * support (e.g. 1 to 1 mapping between GPU and CPU mappings
> > > + * only supported).
> > > + */
> > > + __s64 cpu_addr_mirror_offset;
> > > };
> > >
> > > /**
> > > @@ -1078,6 +1104,7 @@ struct drm_xe_vm_bind_op {
> > > #define DRM_XE_VM_BIND_FLAG_NULL (1 << 2)
> > > #define DRM_XE_VM_BIND_FLAG_DUMPABLE (1 << 3)
> > > #define DRM_XE_VM_BIND_FLAG_CHECK_PXP (1 << 4)
> > > +#define DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR (1 << 5)
> > > /** @flags: Bind flags */
> > > __u32 flags;
> > >
> > > @@ -1205,6 +1232,21 @@ struct drm_xe_vm_bind {
> > > * };
> > > * ioctl(fd, DRM_IOCTL_XE_EXEC_QUEUE_CREATE, &exec_queue_create);
> > > *
> > > + * Allow users to provide a hint to kernel for cases demanding low latency
> > > + * profile. Please note it will have impact on power consumption. User can
> > > + * indicate low latency hint with flag while creating exec queue as
> > > + * mentioned below,
> > > + *
> > > + * struct drm_xe_exec_queue_create exec_queue_create = {
> > > + * .flags = DRM_XE_EXEC_QUEUE_LOW_LATENCY_HINT,
> > > + * .extensions = 0,
> > > + * .vm_id = vm,
> > > + * .num_bb_per_exec = 1,
> > > + * .num_eng_per_bb = 1,
> > > + * .instances = to_user_pointer(&instance),
> > > + * };
> > > + * ioctl(fd, DRM_IOCTL_XE_EXEC_QUEUE_CREATE, &exec_queue_create);
> > > + *
> > > */
> > > struct drm_xe_exec_queue_create {
> > > #define DRM_XE_EXEC_QUEUE_EXTENSION_SET_PROPERTY 0
> > > @@ -1223,7 +1265,8 @@ struct drm_xe_exec_queue_create {
> > > /** @vm_id: VM to use for this exec queue */
> > > __u32 vm_id;
> > >
> > > - /** @flags: MBZ */
> > > +#define DRM_XE_EXEC_QUEUE_LOW_LATENCY_HINT (1 << 0)
> > > + /** @flags: flags to use for this exec queue */
> > > __u32 flags;
> > >
> > > /** @exec_queue_id: Returned exec queue ID */
> > > @@ -1926,4 +1969,4 @@ struct drm_xe_query_eu_stall {
> > > }
> > > #endif
> > >
> > > -#endif /* _XE_DRM_H_ */
> > > +#endif /* _UAPI_XE_DRM_H_ */
> > > diff --git a/lib/xe/xe_ioctl.c b/lib/xe/xe_ioctl.c
> > > index fb8c4aef13..785fc9184c 100644
> > > --- a/lib/xe/xe_ioctl.c
> > > +++ b/lib/xe/xe_ioctl.c
> > > @@ -440,6 +440,18 @@ void *xe_bo_map(int fd, uint32_t bo, size_t size)
> > > return __xe_bo_map(fd, bo, size, PROT_WRITE);
> > > }
> > >
> > > +void *xe_bo_map_fixed(int fd, uint32_t bo, size_t size, uint64_t addr)
> > > +{
> > > + uint64_t mmo;
> > > + void *map;
> > > +
> > > + mmo = xe_bo_mmap_offset(fd, bo);
> > > + map = mmap((void *)addr, size, PROT_WRITE, MAP_SHARED | MAP_FIXED, fd, mmo);
> > > + igt_assert(map != MAP_FAILED);
> > > +
> > > + return map;
> > > +}
> > > +
> > > void *xe_bo_mmap_ext(int fd, uint32_t bo, size_t size, int prot)
> > > {
> > > return __xe_bo_map(fd, bo, size, prot);
> > > diff --git a/lib/xe/xe_ioctl.h b/lib/xe/xe_ioctl.h
> > > index 9bdf73b2bd..554a33c9cd 100644
> > > --- a/lib/xe/xe_ioctl.h
> > > +++ b/lib/xe/xe_ioctl.h
> > > @@ -86,6 +86,7 @@ uint32_t xe_exec_queue_create_class(int fd, uint32_t vm, uint16_t class);
> > > void xe_exec_queue_destroy(int fd, uint32_t exec_queue);
> > > uint64_t xe_bo_mmap_offset(int fd, uint32_t bo);
> > > void *xe_bo_map(int fd, uint32_t bo, size_t size);
> > > +void *xe_bo_map_fixed(int fd, uint32_t bo, size_t size, long unsigned int addr);
> > > void *xe_bo_mmap_ext(int fd, uint32_t bo, size_t size, int prot);
> > > int __xe_exec(int fd, struct drm_xe_exec *exec);
> > > void xe_exec(int fd, struct drm_xe_exec *exec);
> > > diff --git a/tests/intel/xe_exec_system_allocator.c b/tests/intel/xe_exec_system_allocator.c
> > > new file mode 100644
> > > index 0000000000..14fa59353e
> > > --- /dev/null
> > > +++ b/tests/intel/xe_exec_system_allocator.c
> > > @@ -0,0 +1,1832 @@
> > > +// SPDX-License-Identifier: MIT
> > > +/*
> > > + * Copyright © 2024 Intel Corporation
> > > + */
> > > +
> > > +/**
> > > + * TEST: Basic tests for execbuf functionality using system allocator
> > > + * Category: Hardware building block
> > > + * Mega feature: Shared virtual memory
> > > + * Sub-category: execbuf
> > > + * Functionality: fault mode, system allocator
> > > + * GPU requirements: GPU needs support for DRM_XE_VM_CREATE_FLAG_FAULT_MODE
Using this below will help test automation and reporting:
* TEST: Basic tests for execbuf functionality using system allocator
* Category: Core
* Mega feature: USM
* Sub-category: System allocator
* Functionality: fault mode, system allocator
* GPU: LNL, BMG, PVC
> > > + */
> > > +
> > > +#include <fcntl.h>
> > > +#include <linux/mman.h>
> > > +#include <time.h>
> > > +
> > > +#include "igt.h"
> > > +#include "lib/igt_syncobj.h"
> > > +#include "lib/intel_reg.h"
> > > +#include "xe_drm.h"
> > > +
> > > +#include "xe/xe_ioctl.h"
> > > +#include "xe/xe_query.h"
> > > +#include <string.h>
> > > +
> > > +#define USER_FENCE_VALUE 0xdeadbeefdeadbeefull
> > > +#define QUARTER_SEC (NSEC_PER_SEC / 4)
> > > +#define FIVE_SEC (5LL * NSEC_PER_SEC)
> > > +
> > > +struct batch_data {
> > > + uint32_t batch[16];
> > > + uint64_t pad;
> > > + uint32_t data;
> > > + uint32_t expected_data;
> > > +};
> > > +
> > > +#define WRITE_VALUE(data__, i__) ({ \
> > > + if (!(data__)->expected_data) \
> > > + (data__)->expected_data = rand() << 12 | (i__); \
> > > + (data__)->expected_data; \
> > > +})
> > > +#define READ_VALUE(data__, i__) ((data__)->expected_data)
Argument i__ is not used.
> > > +
> > > +static void __write_dword(uint32_t *batch, uint64_t sdi_addr, uint32_t wdata,
> > > + int *idx)
> > > +{
> > > + batch[(*idx)++] = MI_STORE_DWORD_IMM_GEN4;
> > > + batch[(*idx)++] = sdi_addr;
> > > + batch[(*idx)++] = sdi_addr >> 32;
> > > + batch[(*idx)++] = wdata;
> > > +}
> > > +
> > > +static void write_dword(uint32_t *batch, uint64_t sdi_addr, uint32_t wdata,
> > > + int *idx)
> > > +{
> > > + __write_dword(batch, sdi_addr, wdata, idx);
> > > + batch[(*idx)++] = MI_BATCH_BUFFER_END;
> > > +}
> >
> > Slightly out of scope for this review but the 2 functions above might be
> > helpful under lib/ to prevent adding more duplications of the dword write
> > batch sequence.
> >
>
> Yea we could split out generic batch writing functions into a library at
> some point but agree is probably out of scope for this series.
>
> > > +
> > > +static void check_all_pages(void *ptr, uint64_t alloc_size, uint64_t stride,
> > > + pthread_barrier_t *barrier)
> > > +{
> > > + int i, n_writes = alloc_size / stride;
> > > +
> > > + for (i = 0; i < n_writes; ++i) {
> > > + struct batch_data *data = ptr + i * stride;
> > > +
> > > + igt_assert_eq(data->data, READ_VALUE(data, i));
> > > +
> > > + if (barrier)
> > > + pthread_barrier_wait(barrier);
> > > + }
> > > +}
> > > +
> > > +#define SYNC_FILE "/tmp/xe_exec_system_allocator_sync"
> >
> > Might be worth creating and propagating a unique file name at runtime, for
> > example with mkstemp(), in order to avoid potential concurrent accesses from
> > multiple instances of the test.
> >
>
> Let me look into that.
>
> > > +
> > > +struct process_data {
> > > + pthread_mutex_t mutex;
> > > + pthread_cond_t cond;
> > > + pthread_barrier_t barrier;
> > > + bool go;
> > > +};
> > > +
> > > +static void wait_pdata(struct process_data *pdata)
> > > +{
> > > + pthread_mutex_lock(&pdata->mutex);
> > > + while (!pdata->go)
> > > + pthread_cond_wait(&pdata->cond, &pdata->mutex);
> > > + pthread_mutex_unlock(&pdata->mutex);
> > > +}
> > > +
> > > +static void init_pdata(struct process_data *pdata, int n_engine)
> > > +{
> > > + pthread_mutexattr_t mutex_attr;
> > > + pthread_condattr_t cond_attr;
> > > + pthread_barrierattr_t barrier_attr;
> > > +
> > > + pthread_mutexattr_init(&mutex_attr);
> > > + pthread_mutexattr_setpshared(&mutex_attr, PTHREAD_PROCESS_SHARED);
> > > + pthread_mutex_init(&pdata->mutex, &mutex_attr);
> > > +
> > > + pthread_condattr_init(&cond_attr);
> > > + pthread_condattr_setpshared(&cond_attr, PTHREAD_PROCESS_SHARED);
> > > + pthread_cond_init(&pdata->cond, &cond_attr);
> > > +
> > > + pthread_barrierattr_init(&barrier_attr);
> > > + pthread_barrierattr_setpshared(&barrier_attr, PTHREAD_PROCESS_SHARED);
> > > + pthread_barrier_init(&pdata->barrier, &barrier_attr, n_engine);
> > > +
> > > + pdata->go = false;
> > > +}
> > > +
> > > +static void signal_pdata(struct process_data *pdata)
> > > +{
> > > + pthread_mutex_lock(&pdata->mutex);
> > > + pdata->go = true;
> > > + pthread_cond_broadcast(&pdata->cond);
> > > + pthread_mutex_unlock(&pdata->mutex);
> > > +}
> > > +
> > > +/* many_alloc flags */
> > > +#define MIX_BO_ALLOC (0x1 << 0)
> > > +#define BENCHMARK (0x1 << 1)
> > > +#define CPU_FAULT_THREADS (0x1 << 2)
> > > +#define CPU_FAULT_PROCESS (0x1 << 3)
> > > +#define CPU_FAULT_SAME_PAGE (0x1 << 4)
> > > +
> > > +static void process_check(void *ptr, uint64_t alloc_size, uint64_t stride,
> > > + unsigned int flags)
> > > +{
> > > + struct process_data *pdata;
> > > + int map_fd;
> > > +
> > > + map_fd = open(SYNC_FILE, O_RDWR, 0x666);
> > > + pdata = mmap(NULL, sizeof(*pdata), PROT_READ |
> > > + PROT_WRITE, MAP_SHARED, map_fd, 0);
> > > + wait_pdata(pdata);
> > > +
> > > + if (flags & CPU_FAULT_SAME_PAGE)
> > > + check_all_pages(ptr, alloc_size, stride, &pdata->barrier);
> > > + else
> > > + check_all_pages(ptr, alloc_size, stride, NULL);
> > > +
> > > + close(map_fd);
> > > + munmap(pdata, sizeof(*pdata));
> > > +}
> > > +
> > > +static void
> > > +check_all_pages_process(void *ptr, uint64_t alloc_size, uint64_t stride,
> > > + int n_process, unsigned int flags)
It would be helpful to add here a comment similar to the one below for
check_all_pages_threads().
> > > +{
> > > + struct process_data *pdata;
> > > + int map_fd, i;
> > > +
> > > + map_fd = open(SYNC_FILE, O_RDWR | O_CREAT, 0x666);
> > > + posix_fallocate(map_fd, 0, sizeof(*pdata));
> > > + pdata = mmap(NULL, sizeof(*pdata), PROT_READ |
> > > + PROT_WRITE, MAP_SHARED, map_fd, 0);
> > > +
> > > + init_pdata(pdata, n_process);
> > > +
> > > + for (i = 0; i < n_process; ++i) {
> > > + igt_fork(child, 1)
> > > + if (flags & CPU_FAULT_SAME_PAGE)
> > > + process_check(ptr, alloc_size, stride, flags);
> > > + else
> > > + process_check(ptr + stride * i, alloc_size,
> > > + stride * n_process, flags);
> > > + }
> > > +
> > > + signal_pdata(pdata);
> > > + igt_waitchildren();
> > > +
> > > + close(map_fd);
> > > + munmap(pdata, sizeof(*pdata));
> > > +}
> > > +
> > > +struct thread_check_data {
> > > + pthread_t thread;
> > > + pthread_mutex_t *mutex;
> > > + pthread_cond_t *cond;
> > > + pthread_barrier_t *barrier;
> > > + void *ptr;
> > > + uint64_t alloc_size;
> > > + uint64_t stride;
> > > + bool *go;
> > > +};
> > > +
> > > +static void *thread_check(void *data)
> > > +{
> > > + struct thread_check_data *t = data;
> > > +
> > > + pthread_mutex_lock(t->mutex);
> > > + while (!*t->go)
> > > + pthread_cond_wait(t->cond, t->mutex);
> > > + pthread_mutex_unlock(t->mutex);
> > > +
> > > + check_all_pages(t->ptr, t->alloc_size, t->stride, t->barrier);
> > > +
> > > + return NULL;
> > > +}
> > > +
> > > +/*
> > > + * Partition checking of results in chunks which causes multiple threads to
> > > + * fault same VRAM allocation in parallel.
> > > + */
> > > +static void
> > > +check_all_pages_threads(void *ptr, uint64_t alloc_size, uint64_t stride,
> > > + int n_threads, unsigned int flags)
> > > +{
> > > + struct thread_check_data *threads_check_data;
> > > + pthread_mutex_t mutex;
> > > + pthread_cond_t cond;
> > > + pthread_barrier_t barrier;
> > > + int i;
> > > + bool go = false;
> > > +
> > > + threads_check_data = calloc(n_threads, sizeof(*threads_check_data));
> > > + igt_assert(threads_check_data);
> > > +
> > > + pthread_mutex_init(&mutex, 0);
> > > + pthread_cond_init(&cond, 0);
> > > + pthread_barrier_init(&barrier, 0, n_threads);
> > > +
> > > + for (i = 0; i < n_threads; ++i) {
> > > + threads_check_data[i].mutex = &mutex;
> > > + threads_check_data[i].cond = &cond;
> > > + if (flags & CPU_FAULT_SAME_PAGE) {
> > > + threads_check_data[i].barrier = &barrier;
> > > + threads_check_data[i].ptr = ptr;
> > > + threads_check_data[i].alloc_size = alloc_size;
> > > + threads_check_data[i].stride = stride;
> > > + } else {
> > > + threads_check_data[i].barrier = NULL;
> > > + threads_check_data[i].ptr = ptr + stride * i;
> > > + threads_check_data[i].alloc_size = alloc_size;
> > > + threads_check_data[i].stride = n_threads * stride;
> > > + }
> > > + threads_check_data[i].go = &go;
> > > +
> > > + pthread_create(&threads_check_data[i].thread, 0, thread_check,
> > > + &threads_check_data[i]);
> > > + }
> > > +
> > > + pthread_mutex_lock(&mutex);
> > > + go = true;
> > > + pthread_cond_broadcast(&cond);
> > > + pthread_mutex_unlock(&mutex);
> > > +
> > > + for (i = 0; i < n_threads; ++i)
> > > + pthread_join(threads_check_data[i].thread, NULL);
> > > + free(threads_check_data);
> > > +}
> > > +
> > > +static void touch_all_pages(int fd, uint32_t exec_queue, void *ptr,
> > > + uint64_t alloc_size, uint64_t stride,
> > > + struct timespec *tv, uint64_t *submit)
> > > +{
> > > + struct drm_xe_sync sync[1] = {
> > > + { .type = DRM_XE_SYNC_TYPE_USER_FENCE,
> > > + .flags = DRM_XE_SYNC_FLAG_SIGNAL,
> > > + .timeline_value = USER_FENCE_VALUE },
> > > + };
> > > + struct drm_xe_exec exec = {
> > > + .num_batch_buffer = 1,
> > > + .num_syncs = 0,
> > > + .exec_queue_id = exec_queue,
> > > + .syncs = to_user_pointer(&sync),
> > > + };
> > > + uint64_t addr = to_user_pointer(ptr);
> > > + int i, ret, n_writes = alloc_size / stride;
> > > + u64 *exec_ufence = NULL;
> > > + int64_t timeout = FIVE_SEC;
> > > +
> > > + exec_ufence = mmap(NULL, SZ_4K, PROT_READ |
> > > + PROT_WRITE, MAP_SHARED |
> > > + MAP_ANONYMOUS, -1, 0);
> > > + igt_assert(exec_ufence != MAP_FAILED);
> > > + memset(exec_ufence, 0, SZ_4K);
> > > + sync[0].addr = to_user_pointer(exec_ufence);
> > > +
> > > + for (i = 0; i < n_writes; ++i, addr += stride) {
> > > + struct batch_data *data = ptr + i * stride;
> > > + uint64_t sdi_offset = (char *)&data->data - (char *)data;
> > > + uint64_t sdi_addr = addr + sdi_offset;
> > > + int b = 0;
> > > +
> > > + write_dword(data->batch, sdi_addr, WRITE_VALUE(data, i), &b);
> > > + igt_assert(b <= ARRAY_SIZE(data->batch));
> > > + }
> > > +
> > > + igt_nsec_elapsed(tv);
> > > + *submit = igt_nsec_elapsed(tv);
> > > +
> > > + addr = to_user_pointer(ptr);
> > > + for (i = 0; i < n_writes; ++i, addr += stride) {
> > > + struct batch_data *data = ptr + i * stride;
> > > + uint64_t batch_offset = (char *)&data->batch - (char *)data;
> > > + uint64_t batch_addr = addr + batch_offset;
> > > +
> > > + exec.address = batch_addr;
> > > + if (i + 1 == n_writes)
> > > + exec.num_syncs = 1;
> > > + xe_exec(fd, &exec);
> > > + }
> > > +
> > > + ret = __xe_wait_ufence(fd, exec_ufence, USER_FENCE_VALUE, exec_queue,
> > > + &timeout);
> > > + if (ret) {
> > > + printf("FAIL EXEC_UFENCE_ADDR: 0x%016llx\n", sync[0].addr);
> > > + printf("FAIL EXEC_UFENCE: EXPEXCTED=0x%016llx, ACTUAL=0x%016lx\n",
> > > + USER_FENCE_VALUE, exec_ufence[0]);
s/EXPEXCTED/EXPECTED/
Also, we should probably use IGT's log functions such as igt_info() instead
of printf().
> > > +
> > > + addr = to_user_pointer(ptr);
> > > + for (i = 0; i < n_writes; ++i, addr += stride) {
> > > + struct batch_data *data = ptr + i * stride;
> > > + uint64_t batch_offset = (char *)&data->batch - (char *)data;
> > > + uint64_t batch_addr = addr + batch_offset;
> > > + uint64_t sdi_offset = (char *)&data->data - (char *)data;
> > > + uint64_t sdi_addr = addr + sdi_offset;
> > > +
> > > + printf("FAIL BATCH_ADDR: 0x%016lx\n", batch_addr);
> > > + printf("FAIL SDI_ADDR: 0x%016lx\n", sdi_addr);
> > > + printf("FAIL SDI_ADDR (in batch): 0x%016lx\n",
> > > + (((u64)data->batch[2]) << 32) | data->batch[1]);
> > > + printf("FAIL DARA: EXPEXCTED=0x%08x, ACTUAL=0x%08x\n",
> > > + data->expected_data, data->data);
s/DARA/DATA/ and s/EXPEXCTED/EXPECTED/ and same as above for printf().
> > > + }
> > > + igt_assert_eq(ret, 0);
> > > + }
> > > + munmap(exec_ufence, SZ_4K);
> > > +}
> > > +
> > > +static int va_bits;
> > > +
> > > +#define bind_system_allocator(__sync, __num_sync) \
> > > + __xe_vm_bind_assert(fd, vm, 0, \
> > > + 0, 0, 0, 0x1ull << va_bits, \
> > > + DRM_XE_VM_BIND_OP_MAP, \
> > > + DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR, \
> > > + (__sync), (__num_sync), 0, 0)
> > > +
> > > +#define unbind_system_allocator() \
> > > + __xe_vm_bind(fd, vm, 0, 0, 0, 0, 0x1ull << va_bits, \
> > > + DRM_XE_VM_BIND_OP_UNMAP, 0, \
> > > + NULL, 0, 0, 0, 0)
> >
> > Is there a reason here to favor static variable + macros over helper function
> > with parameters?
> >
>
> va_bits is static as it looked up exactly once when the test loads.
>
> I could change these helpers to static functions rather than macros if
> that is preferred.
It forces variable names in the caller but that is just a minor issue.
>
> > > +
> > > +#define odd(__i) (__i & 1)
> > > +
> > > +struct aligned_alloc_type {
> > > + void *__ptr;
> > > + void *ptr;
> > > + size_t __size;
> > > + size_t size;
> > > +};
> > > +
> > > +static struct aligned_alloc_type __aligned_alloc(size_t alignment, size_t size)
> > > +{
> > > + struct aligned_alloc_type aligned_alloc_type;
> > > +
> > > + aligned_alloc_type.__ptr = mmap(NULL, alignment + size, PROT_NONE, MAP_PRIVATE |
> > > + MAP_ANONYMOUS, -1, 0);
> > > + igt_assert(aligned_alloc_type.__ptr != MAP_FAILED);
> > > +
> > > + aligned_alloc_type.ptr = (void *)ALIGN((uint64_t)aligned_alloc_type.__ptr, alignment);
> > > + aligned_alloc_type.size = size;
> > > + aligned_alloc_type.__size = size + alignment;
> > > +
> > > + return aligned_alloc_type;
> > > +}
> > > +
> > > +static void __aligned_free(struct aligned_alloc_type *aligned_alloc_type)
> > > +{
> > > + munmap(aligned_alloc_type->__ptr, aligned_alloc_type->__size);
> > > +}
> > > +
> > > +static void __aligned_partial_free(struct aligned_alloc_type *aligned_alloc_type)
> > > +{
> > > + size_t begin_size = (size_t)(aligned_alloc_type->ptr - aligned_alloc_type->__ptr);
> > > +
> > > + if (begin_size)
> > > + munmap(aligned_alloc_type->__ptr, begin_size);
> > > + if (aligned_alloc_type->__size - aligned_alloc_type->size - begin_size)
> > > + munmap(aligned_alloc_type->ptr + aligned_alloc_type->size,
> > > + aligned_alloc_type->__size - aligned_alloc_type->size - begin_size);
> > > +}
> > > +
> > > +/**
> > > + * SUBTEST: unaligned-alloc
> > > + * Description: allocate unaligned sizes of memory
> > > + * Test category: functionality test
> > > + *
> > > + * SUBTEST: fault-benchmark
> > > + * Description: Benchmark how long GPU / CPU take
> > > + * Test category: performance test
> > > + *
> > > + * SUBTEST: fault-threads-benchmark
> > > + * Description: Benchmark how long GPU / CPU take, reading results with multiple threads
> > > + * Test category: performance and functionality test
> > > + *
> > > + * SUBTEST: fault-threads-same-page-benchmark
> > > + * Description: Benchmark how long GPU / CPU take, reading results with multiple threads, hammer same page
> > > + * Test category: performance and functionality test
> > > + *
> > > + * SUBTEST: fault-process-benchmark
> > > + * Description: Benchmark how long GPU / CPU take, reading results with multiple process
> > > + * Test category: performance and functionality test
> > > + *
> > > + * SUBTEST: fault-process-same-page-benchmark
> > > + * Description: Benchmark how long GPU / CPU take, reading results with multiple process, hammer same page
> > > + * Test category: performance and functionality test
> > > + *
> > > + * SUBTEST: evict-malloc
> > > + * Description: trigger eviction of VRAM allocated via malloc
> > > + * Test category: functionality test
> > > + *
> > > + * SUBTEST: evict-malloc-mix-bo
> > > + * Description: trigger eviction of VRAM allocated via malloc and BO create
> > > + * Test category: functionality test
> > > + *
> > > + * SUBTEST: processes-evict-malloc
> > > + * Description: multi-process trigger eviction of VRAM allocated via malloc
> > > + * Test category: stress test
> > > + *
> > > + * SUBTEST: processes-evict-malloc-mix-bo
> > > + * Description: multi-process trigger eviction of VRAM allocated via malloc and BO create
> > > + * Test category: stress test
> > > + */
> > > +
> > > +static void
> > > +many_allocs(int fd, struct drm_xe_engine_class_instance *eci,
> > > + uint64_t total_alloc, uint64_t alloc_size, uint64_t stride,
> > > + pthread_barrier_t *barrier, unsigned int flags)
> > > +{
> > > + uint32_t vm, exec_queue;
> > > + int num_allocs = flags & BENCHMARK ? 1 :
> > > + (9 * (total_alloc / alloc_size)) / 8;
> > > + struct aligned_alloc_type *allocs;
> > > + uint32_t *bos = NULL;
> > > + struct timespec tv = {};
> > > + uint64_t submit, read, elapsed;
> > > + int i;
> > > +
> > > + vm = xe_vm_create(fd, DRM_XE_VM_CREATE_FLAG_LR_MODE |
> > > + DRM_XE_VM_CREATE_FLAG_FAULT_MODE, 0);
> > > + exec_queue = xe_exec_queue_create(fd, vm, eci, 0);
> > > +
> > > + bind_system_allocator(NULL, 0);
> > > +
> > > + allocs = malloc(sizeof(*allocs) * num_allocs);
> > > + igt_assert(allocs);
> > > + memset(allocs, 0, sizeof(*allocs) * num_allocs);
> > > +
> > > + if (flags & MIX_BO_ALLOC) {
> > > + bos = malloc(sizeof(*bos) * num_allocs);
> > > + igt_assert(bos);
> > > + memset(bos, 0, sizeof(*bos) * num_allocs);
> > > + }
> > > +
> > > + for (i = 0; i < num_allocs; ++i) {
> > > + struct aligned_alloc_type alloc;
> > > +
> > > + if (flags & MIX_BO_ALLOC && odd(i)) {
> > > + uint32_t bo_flags =
> > > + DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM;
> > > +
> > > + alloc = __aligned_alloc(SZ_2M, alloc_size);
> > > + igt_assert(alloc.ptr);
> > > +
> > > + bos[i] = xe_bo_create(fd, vm, alloc_size,
> > > + vram_if_possible(fd, eci->gt_id),
> > > + bo_flags);
> > > + alloc.ptr = xe_bo_map_fixed(fd, bos[i], alloc_size,
> > > + to_user_pointer(alloc.ptr));
> > > + xe_vm_bind_async(fd, vm, 0, bos[i], 0,
> > > + to_user_pointer(alloc.ptr),
> > > + alloc_size, 0, 0);
> > > + } else {
> > > + alloc.ptr = aligned_alloc(SZ_2M, alloc_size);
> > > + igt_assert(alloc.ptr);
> > > + }
> > > + allocs[i] = alloc;
> > > +
> > > + touch_all_pages(fd, exec_queue, allocs[i].ptr, alloc_size, stride,
> > > + &tv, &submit);
> > > + }
> > > +
> > > + if (barrier)
> > > + pthread_barrier_wait(barrier);
> > > +
> > > + for (i = 0; i < num_allocs; ++i) {
> > > + if (flags & BENCHMARK)
> > > + read = igt_nsec_elapsed(&tv);
> > > +#define NUM_CHECK_THREADS 8
> > > + if (flags & CPU_FAULT_PROCESS)
> > > + check_all_pages_process(allocs[i].ptr, alloc_size, stride,
> > > + NUM_CHECK_THREADS, flags);
> > > + else if (flags & CPU_FAULT_THREADS)
> > > + check_all_pages_threads(allocs[i].ptr, alloc_size, stride,
> > > + NUM_CHECK_THREADS, flags);
> > > + else
> > > + check_all_pages(allocs[i].ptr, alloc_size, stride, NULL);
> > > + if (flags & BENCHMARK) {
> > > + elapsed = igt_nsec_elapsed(&tv);
> > > + printf("Execution took %.3fms (submit %.1fus, read %.1fus, total %.1fus, read_total %.1fus)\n",
Also here, prefer IGT's log functions.
> > > + 1e-6 * elapsed, 1e-3 * submit, 1e-3 * read,
> > > + 1e-3 * (elapsed - submit),
> > > + 1e-3 * (elapsed - read));
> > > + }
> > > + if (bos && bos[i]) {
> > > + __aligned_free(allocs + i);
> > > + gem_close(fd, bos[i]);
> > > + } else {
> > > + free(allocs[i].ptr);
> > > + }
> > > + }
> > > + if (bos)
> > > + free(bos);
> > > + free(allocs);
> > > + xe_exec_queue_destroy(fd, exec_queue);
> > > + xe_vm_destroy(fd, vm);
> > > +}
> > > +
> > > +static void process_evict(struct drm_xe_engine_class_instance *hwe,
> > > + uint64_t total_alloc, uint64_t alloc_size,
> > > + uint64_t stride, unsigned int flags)
> > > +{
> > > + struct process_data *pdata;
> > > + int map_fd;
> > > + int fd;
> > > +
> > > + map_fd = open(SYNC_FILE, O_RDWR, 0x666);
> > > + pdata = mmap(NULL, sizeof(*pdata), PROT_READ |
> > > + PROT_WRITE, MAP_SHARED, map_fd, 0);
> > > + wait_pdata(pdata);
> > > +
> > > + fd = drm_open_driver(DRIVER_XE);
> > > + many_allocs(fd, hwe, total_alloc, alloc_size, stride, &pdata->barrier,
> > > + flags);
> > > + drm_close_driver(fd);
> > > +
> > > + close(map_fd);
> > > + munmap(pdata, sizeof(*pdata));
> > > +}
> > > +
> > > +static void
> > > +processes_evict(int fd, uint64_t alloc_size, uint64_t stride,
> > > + unsigned int flags)
> > > +{
> > > + struct drm_xe_engine_class_instance *hwe;
> > > + struct process_data *pdata;
> > > + int n_engine_gt[2] = { 0, 0 }, n_engine = 0;
> > > + int map_fd;
> > > +
> > > + map_fd = open(SYNC_FILE, O_RDWR | O_CREAT, 0x666);
> > > + posix_fallocate(map_fd, 0, sizeof(*pdata));
> > > + pdata = mmap(NULL, sizeof(*pdata), PROT_READ |
> > > + PROT_WRITE, MAP_SHARED, map_fd, 0);
> > > +
> > > + xe_for_each_engine(fd, hwe) {
> > > + igt_assert(hwe->gt_id < 2);
> > > + n_engine_gt[hwe->gt_id]++;
> > > + n_engine++;
> > > + }
> > > +
> > > + init_pdata(pdata, n_engine);
> > > +
> > > + xe_for_each_engine(fd, hwe) {
> > > + igt_fork(child, 1)
> > > + process_evict(hwe,
> > > + xe_visible_vram_size(fd, hwe->gt_id) /
> > > + n_engine_gt[hwe->gt_id], alloc_size,
> > > + stride, flags);
> > > + }
> > > +
> > > + signal_pdata(pdata);
> > > + igt_waitchildren();
> > > +
> > > + close(map_fd);
> > > + munmap(pdata, sizeof(*pdata));
> > > +}
> > > +
> > > +#define CPU_FAULT (0x1 << 0)
> > > +#define REMAP (0x1 << 1)
> > > +#define MIDDLE (0x1 << 2)
> > > +
> > > +/**
> > > + * SUBTEST: partial-munmap-cpu-fault
> > > + * Description: munmap partially with cpu access in between
> > > + * Test category: functionality test
> > > + *
> > > + * SUBTEST: partial-munmap-no-cpu-fault
> > > + * Description: munmap partially with no cpu access in between
> > > + * Test category: functionality test
> > > + *
> > > + * SUBTEST: partial-remap-cpu-fault
> > > + * Description: remap partially with cpu access in between
> > > + * Test category: functionality test
> > > + *
> > > + * SUBTEST: partial-remap-no-cpu-fault
> > > + * Description: remap partially with no cpu access in between
> > > + * Test category: functionality test
> > > + *
> > > + * SUBTEST: partial-middle-munmap-cpu-fault
> > > + * Description: munmap middle with cpu access in between
> > > + * Test category: functionality test
> > > + *
> > > + * SUBTEST: partial-middle-munmap-no-cpu-fault
> > > + * Description: munmap middle with no cpu access in between
> > > + * Test category: functionality test
> > > + *
> > > + * SUBTEST: partial-middle-remap-cpu-fault
> > > + * Description: remap middle with cpu access in between
> > > + * Test category: functionality test
> > > + *
> > > + * SUBTEST: partial-middle-remap-no-cpu-fault
> > > + * Description: remap middle with no cpu access in between
> > > + * Test category: functionality test
> > > + */
> > > +
> > > +static void
> > > +partial(int fd, struct drm_xe_engine_class_instance *eci, unsigned int flags)
> > > +{
> > > + struct drm_xe_sync sync[1] = {
> > > + { .type = DRM_XE_SYNC_TYPE_USER_FENCE, .flags = DRM_XE_SYNC_FLAG_SIGNAL,
> > > + .timeline_value = USER_FENCE_VALUE },
> > > + };
> > > + struct drm_xe_exec exec = {
> > > + .num_batch_buffer = 1,
> > > + .num_syncs = 1,
> > > + .syncs = to_user_pointer(sync),
> > > + };
> > > + struct {
> > > + uint32_t batch[16];
> > > + uint64_t pad;
> > > + uint64_t vm_sync;
> > > + uint64_t exec_sync;
> > > + uint32_t data;
> > > + uint32_t expected_data;
> > > + } *data;
> > > + size_t bo_size = SZ_2M, unmap_offset = 0;
> > > + uint32_t vm, exec_queue;
> > > + u64 *exec_ufence = NULL;
> > > + int i;
> > > + void *old, *new = NULL;
> > > + struct aligned_alloc_type alloc;
> > > +
> > > + if (flags & MIDDLE)
> > > + unmap_offset = bo_size / 4;
> > > +
> > > + vm = xe_vm_create(fd, DRM_XE_VM_CREATE_FLAG_LR_MODE |
> > > + DRM_XE_VM_CREATE_FLAG_FAULT_MODE, 0);
> > > +
> > > + alloc = __aligned_alloc(bo_size, bo_size);
> > > + igt_assert(alloc.ptr);
> > > +
> > > + data = mmap(alloc.ptr, bo_size, PROT_READ | PROT_WRITE,
> > > + MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
> > > + igt_assert(data != MAP_FAILED);
> > > + memset(data, 0, bo_size);
> > > + old = data;
> > > +
> > > + exec_queue = xe_exec_queue_create(fd, vm, eci, 0);
> > > +
> > > + sync[0].addr = to_user_pointer(&data[0].vm_sync);
> > > + bind_system_allocator(sync, 1);
> > > + xe_wait_ufence(fd, &data[0].vm_sync, USER_FENCE_VALUE, 0, FIVE_SEC);
> > > + data[0].vm_sync = 0;
> > > +
> > > + exec_ufence = mmap(NULL, SZ_4K, PROT_READ |
> > > + PROT_WRITE, MAP_SHARED |
> > > + MAP_ANONYMOUS, -1, 0);
> > > + igt_assert(exec_ufence != MAP_FAILED);
> > > + memset(exec_ufence, 0, SZ_4K);
> > > +
> > > + for (i = 0; i < 2; i++) {
> > > + uint64_t addr = to_user_pointer(data);
> > > + uint64_t sdi_offset = (char *)&data[i].data - (char *)data;
> > > + uint64_t sdi_addr = addr + sdi_offset;
> > > + int b = 0;
> > > +
> > > + write_dword(data[i].batch, sdi_addr, WRITE_VALUE(&data[i], i), &b);
> > > + igt_assert(b <= ARRAY_SIZE(data[i].batch));
> > > +
> > > + if (!i)
> > > + data = old + unmap_offset + bo_size / 2;
> > > + }
> > > +
> > > + data = old;
> > > + exec.exec_queue_id = exec_queue;
> > > +
> > > + for (i = 0; i < 2; i++) {
> > > + uint64_t addr = to_user_pointer(data);
> > > + uint64_t batch_offset = (char *)&data[i].batch - (char *)data;
> > > + uint64_t batch_addr = addr + batch_offset;
> > > +
> > > + sync[0].addr = new ? to_user_pointer(new) :
> > > + to_user_pointer(exec_ufence);
> > > + exec.address = batch_addr;
> > > + xe_exec(fd, &exec);
> > > +
> > > + xe_wait_ufence(fd, new ?: exec_ufence, USER_FENCE_VALUE,
> > > + exec_queue, FIVE_SEC);
> > > + if (i || (flags & CPU_FAULT))
> > > + igt_assert_eq(data[i].data, READ_VALUE(&data[i], i));
> > > + exec_ufence[0] = 0;
> > > +
> > > + if (!i) {
> > > + data = old + unmap_offset + bo_size / 2;
> > > + munmap(old + unmap_offset, bo_size / 2);
> > > + if (flags & REMAP) {
> > > + new = mmap(old + unmap_offset, bo_size / 2,
> > > + PROT_READ | PROT_WRITE,
> > > + MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED |
> > > + MAP_LOCKED, -1, 0);
> > > + igt_assert(new != MAP_FAILED);
> > > + }
> > > + }
> > > + }
> > > +
> > > + xe_exec_queue_destroy(fd, exec_queue);
> > > + munmap(exec_ufence, SZ_4K);
> > > + __aligned_free(&alloc);
> > > + if (new)
> > > + munmap(new, bo_size / 2);
> > > + xe_vm_destroy(fd, vm);
> > > +}
> > > +
> > > +#define MAX_N_EXEC_QUEUES 16
> > > +
> > > +#define MMAP (0x1 << 0)
> > > +#define NEW (0x1 << 1)
> > > +#define BO_UNMAP (0x1 << 2)
> > > +#define FREE (0x1 << 3)
> > > +#define BUSY (0x1 << 4)
> > > +#define BO_MAP (0x1 << 5)
> > > +#define RACE (0x1 << 6)
> > > +#define SKIP_MEMSET (0x1 << 7)
> > > +#define FAULT (0x1 << 8)
> > > +#define FILE_BACKED (0x1 << 9)
> > > +#define LOCK (0x1 << 10)
> > > +#define MMAP_SHARED (0x1 << 11)
> > > +#define HUGE_PAGE (0x1 << 12)
> > > +#define SHARED_ALLOC (0x1 << 13)
> > > +#define FORK_READ (0x1 << 14)
> > > +#define FORK_READ_AFTER (0x1 << 15)
> > > +#define MREMAP (0x1 << 16)
> > > +#define DONTUNMAP (0x1 << 17)
> > > +#define READ_ONLY_REMAP (0x1 << 18)
> > > +#define SYNC_EXEC (0x1 << 19)
> > > +#define EVERY_OTHER_CHECK (0x1 << 20)
> > > +#define MULTI_FAULT (0x1 << 21)
> > > +
> > > +#define N_MULTI_FAULT 4
> > > +
> > > +/**
> > > + * SUBTEST: once-%s
> > > + * Description: Run %arg[1] system allocator test only once
> > > + * Test category: functionality test
> > > + *
> > > + * SUBTEST: once-large-%s
> > > + * Description: Run %arg[1] system allocator test only once with large allocation
> > > + * Test category: functionality test
> > > + *
> > > + * SUBTEST: twice-%s
> > > + * Description: Run %arg[1] system allocator test twice
> > > + * Test category: functionality test
> > > + *
> > > + * SUBTEST: twice-large-%s
> > > + * Description: Run %arg[1] system allocator test twice with large allocation
> > > + * Test category: functionality test
> > > + *
> > > + * SUBTEST: many-%s
> > > + * Description: Run %arg[1] system allocator test many times
> > > + * Test category: stress test
> > > + *
> > > + * SUBTEST: many-stride-%s
> > > + * Description: Run %arg[1] system allocator test many times with a stride on each exec
> > > + * Test category: stress test
> > > + *
> > > + * SUBTEST: many-execqueues-%s
> > > + * Description: Run %arg[1] system allocator test on many exec_queues
> > > + * Test category: stress test
> > > + *
> > > + * SUBTEST: many-large-%s
> > > + * Description: Run %arg[1] system allocator test many times with large allocations
> > > + * Test category: stress test
> > > + *
> > > + * SUBTEST: many-large-execqueues-%s
> > > + * Description: Run %arg[1] system allocator test on many exec_queues with large allocations
> > > + *
> > > + * SUBTEST: threads-many-%s
> > > + * Description: Run %arg[1] system allocator threaded test many times
> > > + * Test category: stress test
> > > + *
> > > + * SUBTEST: threads-many-stride-%s
> > > + * Description: Run %arg[1] system allocator threaded test many times with a stride on each exec
> > > + * Test category: stress test
> > > + *
> > > + * SUBTEST: threads-many-execqueues-%s
> > > + * Description: Run %arg[1] system allocator threaded test on many exec_queues
> > > + * Test category: stress test
> > > + *
> > > + * SUBTEST: threads-many-large-%s
> > > + * Description: Run %arg[1] system allocator threaded test many times with large allocations
> > > + * Test category: stress test
> > > + *
> > > + * SUBTEST: threads-many-large-execqueues-%s
> > > + * Description: Run %arg[1] system allocator threaded test on many exec_queues with large allocations
> > > + *
> > > + * SUBTEST: threads-shared-vm-many-%s
> > > + * Description: Run %arg[1] system allocator threaded, shared vm test many times
> > > + * Test category: stress test
> > > + *
> > > + * SUBTEST: threads-shared-vm-many-stride-%s
> > > + * Description: Run %arg[1] system allocator threaded, shared vm test many times with a stride on each exec
> > > + * Test category: stress test
> > > + *
> > > + * SUBTEST: threads-shared-vm-many-execqueues-%s
> > > + * Description: Run %arg[1] system allocator threaded, shared vm test on many exec_queues
> > > + * Test category: stress test
> > > + *
> > > + * SUBTEST: threads-shared-vm-many-large-%s
> > > + * Description: Run %arg[1] system allocator threaded, shared vm test many times with large allocations
> > > + * Test category: stress test
> > > + *
> > > + * SUBTEST: threads-shared-vm-many-large-execqueues-%s
> > > + * Description: Run %arg[1] system allocator threaded, shared vm test on many exec_queues with large allocations
> > > + * Test category: stress test
> > > + *
> > > + * SUBTEST: process-many-%s
> > > + * Description: Run %arg[1] system allocator multi-process test many times
> > > + * Test category: stress test
> > > + *
> > > + * SUBTEST: process-many-stride-%s
> > > + * Description: Run %arg[1] system allocator multi-process test many times with a stride on each exec
> > > + * Test category: stress test
> > > + *
> > > + * SUBTEST: process-many-execqueues-%s
> > > + * Description: Run %arg[1] system allocator multi-process test on many exec_queues
> > > + * Test category: stress test
> > > + *
> > > + * SUBTEST: process-many-large-%s
> > > + * Description: Run %arg[1] system allocator multi-process test many times with large allocations
> > > + * Test category: stress test
> > > + *
> > > + * SUBTEST: process-many-large-execqueues-%s
> > > + * Description: Run %arg[1] system allocator multi-process test on many exec_queues with large allocations
> > > + *
> > > + * SUBTEST: fault
> > > + * Description: use a bad system allocator address resulting in a fault
> > > + * Test category: bad input
> > > + *
> > > + * arg[1]:
> > > + *
> > > + * @malloc: malloc single buffer for all execs, issue a command which will trigger multiple faults
> > > + * @malloc-multi-fault: malloc single buffer for all execs
> > > + * @malloc-fork-read: malloc single buffer for all execs, fork a process to read test output
> > > + * @malloc-fork-read-after: malloc single buffer for all execs, fork a process to read test output, check again after fork returns in parent
> > > + * @malloc-mlock: malloc and mlock single buffer for all execs
> > > + * @malloc-race: malloc single buffer for all execs with race between cpu and gpu access
> > > + * @malloc-bo-unmap: malloc single buffer for all execs, bind and unbind a BO to same address before execs
> > > + * @malloc-busy: malloc single buffer for all execs, try to unbind while buffer valid
> > > + * @mmap: mmap single buffer for all execs
> > > + * @mmap-remap: mmap and mremap a buffer for all execs
> > > + * @mmap-remap-dontunmap: mmap and mremap a buffer with dontunmap flag for all execs
> > > + * @mmap-remap-ro: mmap and mremap a read-only buffer for all execs
> > > + * @mmap-remap-ro-dontunmap: mmap and mremap a read-only buffer with dontunmap flag for all execs
> > > + * @mmap-remap-eocheck: mmap and mremap a buffer for all execs, check data every other loop iteration
> > > + * @mmap-remap-dontunmap-eocheck: mmap and mremap a buffer with dontunmap flag for all execs, check data every other loop iteration
> > > + * @mmap-remap-ro-eocheck: mmap and mremap a read-only buffer for all execs, check data every other loop iteration
> > > + * @mmap-remap-ro-dontunmap-eocheck: mmap and mremap a read-only buffer with dontunmap flag for all execs, check data every other loop iteration
> > > + * @mmap-huge: mmap huge page single buffer for all execs
> > > + * @mmap-shared: mmap shared single buffer for all execs
> > > + * @mmap-shared-remap: mmap shared and mremap a buffer for all execs
> > > + * @mmap-shared-remap-dontunmap: mmap shared and mremap a buffer with dontunmap flag for all execs
> > > + * @mmap-shared-remap-eocheck: mmap shared and mremap a buffer for all execs, check data every other loop iteration
> > > + * @mmap-shared-remap-dontunmap-eocheck: mmap shared and mremap a buffer with dontunmap flag for all execs, check data every other loop iteration
> > > + * @mmap-mlock: mmap and mlock single buffer for all execs
> > > + * @mmap-file: mmap single buffer, with file backing, for all execs
> > > + * @mmap-file-mlock: mmap and mlock single buffer, with file backing, for all execs
> > > + * @mmap-race: mmap single buffer for all execs with race between cpu and gpu access
> > > + * @free: malloc and free buffer for each exec
> > > + * @free-race: malloc and free buffer for each exec with race between cpu and gpu access
> > > + * @new: malloc a new buffer for each exec
> > > + * @new-race: malloc a new buffer for each exec with race between cpu and gpu access
> > > + * @new-bo-map: malloc a new buffer or map BO for each exec
> > > + * @new-busy: malloc a new buffer for each exec, try to unbind while buffers valid
> > > + * @mmap-free: mmap and free buffer for each exec
> > > + * @mmap-free-huge: mmap huge page and free buffer for each exec
> > > + * @mmap-free-race: mmap and free buffer for each exec with race between cpu and gpu access
> > > + * @mmap-new: mmap a new buffer for each exec
> > > + * @mmap-new-huge: mmap huge page a new buffer for each exec
> > > + * @mmap-new-race: mmap a new buffer for each exec with race between cpu and gpu access
> > > + * @malloc-nomemset: malloc single buffer for all execs, skip memset of buffers
> > > + * @malloc-mlock-nomemset: malloc and mlock single buffer for all execs, skip memset of buffers
> > > + * @malloc-race-nomemset: malloc single buffer for all execs with race between cpu and gpu access, skip memset of buffers
> > > + * @malloc-bo-unmap-nomemset: malloc single buffer for all execs, bind and unbind a BO to same address before execs, skip memset of buffers
> > > + * @malloc-busy-nomemset: malloc single buffer for all execs, try to unbind while buffer valid, skip memset of buffers
> > > + * @mmap-nomemset: mmap single buffer for all execs, skip memset of buffers
> > > + * @mmap-huge-nomemset: mmap huge page single buffer for all execs, skip memset of buffers
> > > + * @mmap-shared-nomemset: mmap shared single buffer for all execs, skip memset of buffers
> > > + * @mmap-mlock-nomemset: mmap and mlock single buffer for all execs, skip memset of buffers
> > > + * @mmap-file-nomemset: mmap single buffer, with file backing, for all execs, skip memset of buffers
> > > + * @mmap-file-mlock-nomemset: mmap and mlock single buffer, with file backing, for all execs, skip memset of buffers
> > > + * @mmap-race-nomemset: mmap single buffer for all execs with race between cpu and gpu access, skip memset of buffers
> > > + * @free-nomemset: malloc and free buffer for each exec, skip memset of buffers
> > > + * @free-race-nomemset: malloc and free buffer for each exec with race between cpu and gpu access, skip memset of buffers
> > > + * @new-nomemset: malloc a new buffer for each exec, skip memset of buffers
> > > + * @new-race-nomemset: malloc a new buffer for each exec with race between cpu and gpu access, skip memset of buffers
> > > + * @new-bo-map-nomemset: malloc a new buffer or map BO for each exec, skip memset of buffers
> > > + * @new-busy-nomemset: malloc a new buffer for each exec, try to unbind while buffers valid, skip memset of buffers
> > > + * @mmap-free-nomemset: mmap and free buffer for each exec, skip memset of buffers
> > > + * @mmap-free-huge-nomemset: mmap huge page and free buffer for each exec, skip memset of buffers
> > > + * @mmap-free-race-nomemset: mmap and free buffer for each exec with race between cpu and gpu access, skip memset of buffers
> > > + * @mmap-new-nomemset: mmap a new buffer for each exec, skip memset of buffers
> > > + * @mmap-new-huge-nomemset: mmap huge page new buffer for each exec, skip memset of buffers
> > > + * @mmap-new-race-nomemset: mmap a new buffer for each exec with race between cpu and gpu access, skip memset of buffers
> > > + *
> > > + * SUBTEST: threads-shared-vm-shared-alloc-many-stride-malloc
> > > + * Description: Create multiple threads with a shared VM triggering faults on different hardware engines to same addresses
> > > + * Test category: stress test
> > > + *
> > > + * SUBTEST: threads-shared-vm-shared-alloc-many-stride-malloc-race
> > > + * Description: Create multiple threads with a shared VM triggering faults on different hardware engines to same addresses, racing between CPU and GPU access
> > > + * Test category: stress test
> > > + *
> > > + * SUBTEST: threads-shared-alloc-many-stride-malloc
> > > + * Description: Create multiple threads with a faults on different hardware engines to same addresses
> > > + * Test category: stress test
> > > + *
> > > + * SUBTEST: threads-shared-alloc-many-stride-malloc-sync
> > > + * Description: Create multiple threads with a faults on different hardware engines to same addresses, syncing on each exec
> > > + * Test category: stress test
> > > + *
> > > + * SUBTEST: threads-shared-alloc-many-stride-malloc-race
> > > + * Description: Create multiple threads with a faults on different hardware engines to same addresses, racing between CPU and GPU access
> > > + * Test category: stress test
> > > + */
> > > +
> > > +struct test_exec_data {
> > > + uint32_t batch[32];
> > > + uint64_t pad;
> > > + uint64_t vm_sync;
> > > + uint64_t exec_sync;
> > > + uint32_t data;
> > > + uint32_t expected_data;
> > > +};
> > > +
> > > +static void
> > > +test_exec(int fd, struct drm_xe_engine_class_instance *eci,
> > > + int n_exec_queues, int n_execs, size_t bo_size,
> > > + size_t stride, uint32_t vm, void *alloc, pthread_barrier_t *barrier,
> > > + unsigned int flags)
> > > +{
> > > + uint64_t addr;
> > > + struct drm_xe_sync sync[1] = {
> > > + { .type = DRM_XE_SYNC_TYPE_USER_FENCE, .flags = DRM_XE_SYNC_FLAG_SIGNAL,
> > > + .timeline_value = USER_FENCE_VALUE },
> > > + };
> > > + struct drm_xe_exec exec = {
> > > + .num_batch_buffer = 1,
> > > + .num_syncs = 1,
> > > + .syncs = to_user_pointer(sync),
> > > + };
> > > + uint32_t exec_queues[MAX_N_EXEC_QUEUES];
> > > + struct test_exec_data *data, *next_data = NULL;
> > > + uint32_t bo_flags;
> > > + uint32_t bo = 0;
> > > + void **pending_free;
> > > + u64 *exec_ufence = NULL;
> > > + int i, j, b, file_fd = -1, prev_idx;
> > > + bool free_vm = false;
> > > + size_t aligned_size = bo_size ?: xe_get_default_alignment(fd);
> > > + size_t orig_size = bo_size;
> > > + struct aligned_alloc_type aligned_alloc_type;
> > > +
> > > + if (flags & MULTI_FAULT) {
> > > + if (!bo_size)
> > > + return;
> > > +
> > > + bo_size *= N_MULTI_FAULT;
> > > + }
> > > +
> > > + if (flags & SHARED_ALLOC)
> > > + return;
> > > +
> > > + if (flags & EVERY_OTHER_CHECK && odd(n_execs))
> > > + return;
> > > +
> > > + if (flags & EVERY_OTHER_CHECK)
> > > + igt_assert(flags & MREMAP);
> > > +
> > > + igt_assert(n_exec_queues <= MAX_N_EXEC_QUEUES);
> > > +
> > > + if (flags & NEW && !(flags & FREE)) {
> > > + pending_free = malloc(sizeof(*pending_free) * n_execs);
> > > + igt_assert(pending_free);
> > > + memset(pending_free, 0, sizeof(*pending_free) * n_execs);
> > > + }
> > > +
> > > + if (!vm) {
> > > + vm = xe_vm_create(fd, DRM_XE_VM_CREATE_FLAG_LR_MODE |
> > > + DRM_XE_VM_CREATE_FLAG_FAULT_MODE, 0);
> > > + free_vm = true;
> > > + }
> > > + if (!bo_size) {
> > > + if (!stride) {
> > > + bo_size = sizeof(*data) * n_execs;
> > > + bo_size = xe_bb_size(fd, bo_size);
> > > + } else {
> > > + bo_size = stride * n_execs * sizeof(*data);
> > > + bo_size = xe_bb_size(fd, bo_size);
> > > + }
> > > + }
> > > + if (flags & HUGE_PAGE) {
> > > + aligned_size = ALIGN(aligned_size, SZ_2M);
> > > + bo_size = ALIGN(bo_size, SZ_2M);
> > > + }
> > > +
> > > + if (alloc) {
> > > + data = alloc;
> > > + } else {
> > > + if (flags & MMAP) {
> > > + int mmap_flags = MAP_FIXED;
> > > +
> > > + aligned_alloc_type = __aligned_alloc(aligned_size, bo_size);
> > > + data = aligned_alloc_type.ptr;
> > > + igt_assert(data);
> > > + __aligned_partial_free(&aligned_alloc_type);
> > > +
> > > + if (flags & MMAP_SHARED)
> > > + mmap_flags |= MAP_SHARED;
> > > + else
> > > + mmap_flags |= MAP_PRIVATE;
> > > +
> > > + if (flags & HUGE_PAGE)
> > > + mmap_flags |= MAP_HUGETLB | MAP_HUGE_2MB;
> > > +
> > > + if (flags & FILE_BACKED) {
> > > + char name[1024];
> > > +
> > > + igt_assert(!(flags & NEW));
> > > +
> > > + sprintf(name, "/tmp/xe_exec_system_allocator_dat%d\n",
> > > + getpid());
> >
> > Might be another candidate to use mkstemp() or similar.
> >
>
> Will look into that.
>
> > > + file_fd = open(name, O_RDWR | O_CREAT, 0x666);
> > > + posix_fallocate(file_fd, 0, bo_size);
> > > + } else {
> > > + mmap_flags |= MAP_ANONYMOUS;
> > > + }
> > > +
> > > + data = mmap(data, bo_size, PROT_READ |
> > > + PROT_WRITE, mmap_flags, file_fd, 0);
> > > + igt_assert(data != MAP_FAILED);
> > > + } else {
> > > + data = aligned_alloc(aligned_size, bo_size);
> > > + igt_assert(data);
> > > + }
> > > + if (!(flags & SKIP_MEMSET))
> > > + memset(data, 0, bo_size);
> > > + if (flags & LOCK) {
> > > + igt_assert(!(flags & NEW));
> > > + mlock(data, bo_size);
> > > + }
> > > + }
> > > +
> > > + for (i = 0; i < n_exec_queues; i++)
> > > + exec_queues[i] = xe_exec_queue_create(fd, vm, eci, 0);
> > > +
> > > + sync[0].addr = to_user_pointer(&data[0].vm_sync);
> > > + if (free_vm) {
> > > + bind_system_allocator(sync, 1);
> > > + xe_wait_ufence(fd, &data[0].vm_sync, USER_FENCE_VALUE, 0, FIVE_SEC);
> > > + }
> > > + data[0].vm_sync = 0;
> > > +
> > > + addr = to_user_pointer(data);
> > > +
> > > + if (flags & BO_UNMAP) {
> > > + bo_flags = DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM;
> > > + bo = xe_bo_create(fd, vm, bo_size,
> > > + vram_if_possible(fd, eci->gt_id), bo_flags);
> > > + xe_vm_bind_async(fd, vm, 0, bo, 0, addr, bo_size, 0, 0);
> > > +
> > > + __xe_vm_bind_assert(fd, vm, 0,
> > > + 0, 0, addr, bo_size,
> > > + DRM_XE_VM_BIND_OP_MAP,
> > > + DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR, sync,
> > > + 1, 0, 0);
> > > + xe_wait_ufence(fd, &data[0].vm_sync, USER_FENCE_VALUE, 0,
> > > + FIVE_SEC);
> > > + data[0].vm_sync = 0;
> > > + gem_close(fd, bo);
> > > + bo = 0;
> > > + }
> > > +
> > > + if (!(flags & RACE)) {
> > > + exec_ufence = mmap(NULL, SZ_4K, PROT_READ |
> > > + PROT_WRITE, MAP_SHARED |
> > > + MAP_ANONYMOUS, -1, 0);
> > > + igt_assert(exec_ufence != MAP_FAILED);
> > > + memset(exec_ufence, 0, SZ_4K);
> > > + }
> > > +
> > > + for (i = 0; i < n_execs; i++) {
> > > + int idx = !stride ? i : i * stride, next_idx = !stride
> > > + ? (i + 1) : (i + 1) * stride;
> > > + uint64_t batch_offset = (char *)&data[idx].batch - (char *)data;
> > > + uint64_t batch_addr = addr + batch_offset;
> > > + uint64_t sdi_offset = (char *)&data[idx].data - (char *)data;
> > > + uint64_t sdi_addr = addr + sdi_offset;
> > > + int e = i % n_exec_queues, err;
> > > + bool fault_inject = (FAULT & flags) && i == n_execs / 2;
> > > + bool fault_injected = (FAULT & flags) && i > n_execs;
> > > +
> > > + if (barrier)
> > > + pthread_barrier_wait(barrier);
> > > +
> > > + if (flags & MULTI_FAULT) {
> > > + b = 0;
> > > + for (j = 0; j < N_MULTI_FAULT - 1; ++j)
> > > + __write_dword(data[idx].batch,
> > > + sdi_addr + j * orig_size,
> > > + WRITE_VALUE(&data[idx], idx), &b);
> > > + write_dword(data[idx].batch, sdi_addr + j * orig_size,
> > > + WRITE_VALUE(&data[idx], idx), &b);
> > > + igt_assert(b <= ARRAY_SIZE(data[idx].batch));
> > > + } else if (!(flags & EVERY_OTHER_CHECK)) {
> > > + b = 0;
> > > + write_dword(data[idx].batch, sdi_addr,
> > > + WRITE_VALUE(&data[idx], idx), &b);
> > > + igt_assert(b <= ARRAY_SIZE(data[idx].batch));
> > > + } else if (flags & EVERY_OTHER_CHECK && !odd(i)) {
> > > + b = 0;
> > > + write_dword(data[idx].batch, sdi_addr,
> > > + WRITE_VALUE(&data[idx], idx), &b);
> > > + igt_assert(b <= ARRAY_SIZE(data[idx].batch));
> > > +
> > > + aligned_alloc_type = __aligned_alloc(aligned_size, bo_size);
> > > + next_data = aligned_alloc_type.ptr;
> > > + igt_assert(next_data);
> > > + __aligned_partial_free(&aligned_alloc_type);
> > > +
> > > + b = 0;
> > > + write_dword(data[next_idx].batch,
> > > + to_user_pointer(next_data) +
> > > + (char *)&data[next_idx].data - (char *)data,
> > > + WRITE_VALUE(&data[next_idx], next_idx), &b);
> > > + igt_assert(b <= ARRAY_SIZE(data[next_idx].batch));
> > > + }
> > > +
> > > + if (!exec_ufence)
> > > + data[idx].exec_sync = 0;
> > > +
> > > + sync[0].addr = exec_ufence ? to_user_pointer(exec_ufence) :
> > > + addr + (char *)&data[idx].exec_sync - (char *)data;
> > > +
> > > + exec.exec_queue_id = exec_queues[e];
> > > + if (fault_inject)
> > > + exec.address = batch_addr * 2;
> > > + else
> > > + exec.address = batch_addr;
> > > +
> > > + if (fault_injected) {
> > > + err = __xe_exec(fd, &exec);
> > > + igt_assert(err == -ENOENT);
> > > + } else {
> > > + xe_exec(fd, &exec);
> > > + }
> > > +
> > > + if (barrier)
> > > + pthread_barrier_wait(barrier);
> > > +
> > > + if (fault_inject || fault_injected) {
> > > + int64_t timeout = QUARTER_SEC;
> > > +
> > > + err = __xe_wait_ufence(fd, exec_ufence ? exec_ufence :
> > > + &data[idx].exec_sync,
> > > + USER_FENCE_VALUE,
> > > + exec_queues[e], &timeout);
> > > + igt_assert(err == -ETIME || err == -EIO);
> > > + } else {
> > > + xe_wait_ufence(fd, exec_ufence ? exec_ufence :
> > > + &data[idx].exec_sync, USER_FENCE_VALUE,
> > > + exec_queues[e], FIVE_SEC);
> > > + if (flags & LOCK && !i)
> > > + munlock(data, bo_size);
> > > +
> > > + if (flags & MREMAP) {
> > > + void *old = data;
> > > + int remap_flags = MREMAP_MAYMOVE | MREMAP_FIXED;
> > > +
> > > + if (flags & DONTUNMAP)
> > > + remap_flags |= MREMAP_DONTUNMAP;
> > > +
> > > + if (flags & READ_ONLY_REMAP)
> > > + igt_assert(!mprotect(old, bo_size,
> > > + PROT_READ));
> > > +
> > > + if (!next_data) {
> > > + aligned_alloc_type = __aligned_alloc(aligned_size,
> > > + bo_size);
> > > + data = aligned_alloc_type.ptr;
> > > + __aligned_partial_free(&aligned_alloc_type);
> > > + } else {
> > > + data = next_data;
> > > + }
> > > + next_data = NULL;
> > > + igt_assert(data);
> > > +
> > > + data = mremap(old, bo_size, bo_size,
> > > + remap_flags, data);
> > > + igt_assert(data != MAP_FAILED);
> > > +
> > > + if (flags & READ_ONLY_REMAP)
> > > + igt_assert(!mprotect(data, bo_size,
> > > + PROT_READ |
> > > + PROT_WRITE));
> > > +
> > > + addr = to_user_pointer(data);
> > > + if (flags & DONTUNMAP)
> > > + munmap(old, bo_size);
> > > + }
> > > +
> > > + if (!(flags & EVERY_OTHER_CHECK) || odd(i)) {
> > > + if (flags & FORK_READ) {
> > > + igt_fork(child, 1)
> > > + igt_assert_eq(data[idx].data,
> > > + READ_VALUE(&data[idx], idx));
> > > + if (!(flags & FORK_READ_AFTER))
> > > + igt_assert_eq(data[idx].data,
> > > + READ_VALUE(&data[idx], idx));
> > > + igt_waitchildren();
> > > + if (flags & FORK_READ_AFTER)
> > > + igt_assert_eq(data[idx].data,
> > > + READ_VALUE(&data[idx], idx));
> > > + } else {
> > > + igt_assert_eq(data[idx].data,
> > > + READ_VALUE(&data[idx], idx));
> > > +
> > > + if (flags & MULTI_FAULT) {
> > > + for (j = 1; j < N_MULTI_FAULT; ++j) {
> > > + struct test_exec_data *__data =
> > > + ((void *)data) + j * orig_size;
> > > +
> > > + igt_assert_eq(__data[idx].data,
> > > + READ_VALUE(&data[idx], idx));
> > > + }
> > > + }
> > > + }
> > > + if (flags & EVERY_OTHER_CHECK)
> > > + igt_assert_eq(data[prev_idx].data,
> > > + READ_VALUE(&data[prev_idx], idx));
> > > + }
> > > + }
> > > +
> > > + if (exec_ufence)
> > > + exec_ufence[0] = 0;
> > > +
> > > + if (bo) {
> > > + __xe_vm_bind_assert(fd, vm, 0,
> > > + 0, 0, addr, bo_size,
> > > + DRM_XE_VM_BIND_OP_MAP,
> > > + DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR,
> > > + NULL, 0, 0, 0);
> > > + munmap(data, bo_size);
> > > + gem_close(fd, bo);
> > > + }
> > > +
> > > + if (flags & NEW) {
> > > + if (flags & MMAP) {
> > > + if (flags & FREE)
> > > + munmap(data, bo_size);
> > > + else
> > > + pending_free[i] = data;
> > > + data = mmap(NULL, bo_size, PROT_READ |
> > > + PROT_WRITE, MAP_SHARED |
> > > + MAP_ANONYMOUS, -1, 0);
> > > + igt_assert(data != MAP_FAILED);
> > > + } else if (flags & BO_MAP && (i % 2)) {
The odd() macro defined above can be used here.
> > > + if (!bo) {
> > > + if (flags & FREE)
> > > + free(data);
> > > + else
> > > + pending_free[i] = data;
> > > + }
> > > +
> > > + aligned_alloc_type = __aligned_alloc(aligned_size, bo_size);
> > > + data = aligned_alloc_type.ptr;
> > > + igt_assert(data);
> > > + __aligned_partial_free(&aligned_alloc_type);
> > > +
> > > + bo_flags = DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM;
> > > + bo = xe_bo_create(fd, vm, bo_size,
> > > + vram_if_possible(fd, eci->gt_id),
> > > + bo_flags);
> > > + data = xe_bo_map_fixed(fd, bo, bo_size,
> > > + to_user_pointer(data));
> > > +
> > > + xe_vm_bind_async(fd, vm, 0, bo, 0,
> > > + to_user_pointer(data),
> > > + bo_size, 0, 0);
> > > + } else {
> > > + if (!bo) {
> > > + if (flags & FREE)
> > > + free(data);
> > > + else
> > > + pending_free[i] = data;
> > > + }
> > > + bo = 0;
> > > + data = aligned_alloc(aligned_size, bo_size);
> >
> > Large memory leaks come from this ^ aligned_alloc(), see below.
> >
>
> Yea this will leak, thanks for pointing this out. Thomas also raised
> memory leaks as a concern.
>
> Inline below with the changes to fix this...
>
> > > + igt_assert(data);
> > > + }
> > > + addr = to_user_pointer(data);
> > > + if (!(flags & SKIP_MEMSET))
> > > + memset(data, 0, bo_size);
> > > + }
> > > +
> > > + prev_idx = idx;
> > > + }
> > > +
> > > + if (bo) {
> > > + __xe_vm_bind_assert(fd, vm, 0,
> > > + 0, 0, addr, bo_size,
> > > + DRM_XE_VM_BIND_OP_MAP,
> > > + DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR,
> > > + NULL, 0, 0, 0);
> > > + munmap(data, bo_size);
>
> data = NULL;
>
> > > + gem_close(fd, bo);
> > > + }
> > > +
> > > + if (flags & BUSY)
> > > + igt_assert_eq(unbind_system_allocator(), -EBUSY);
> > > +
> > > + for (i = 0; i < n_exec_queues; i++)
> > > + xe_exec_queue_destroy(fd, exec_queues[i]);
> > > +
> > > + if (exec_ufence)
> > > + munmap(exec_ufence, SZ_4K);
> > > +
> > > + if (flags & LOCK)
> > > + munlock(data, bo_size);
> > > +
> > > + if (file_fd != -1)
> > > + close(file_fd);
> > > +
> > > + if (flags & NEW && !(flags & FREE)) {
> > > + for (i = 0; i < n_execs; i++) {
> > > + if (!pending_free[i])
> > > + continue;
> > > +
> > > + if (flags & MMAP)
> > > + munmap(pending_free[i], bo_size);
> > > + else
> > > + free(pending_free[i]);
> > > + }
> > > + free(pending_free);
> > > + } else {
>
> s/ else {/\nif (data) {
>
> > > + if (flags & MMAP)
> > > + munmap(data, bo_size);
> > > + else if (!alloc)
> > > + free(data);
> >
> > Something seems wrong with the flags logic when skipping this ^ free() for the
> > allocation pointed above.
> >
>
> Yep, see above,
>
> Matt
>
> > Francois
> >
> > > + }
> > > + if (free_vm)
> > > + xe_vm_destroy(fd, vm);
> > > +}
> > > +
> > > +struct thread_data {
> > > + pthread_t thread;
> > > + pthread_mutex_t *mutex;
> > > + pthread_cond_t *cond;
> > > + pthread_barrier_t *barrier;
> > > + int fd;
> > > + struct drm_xe_engine_class_instance *eci;
> > > + int n_exec_queues;
> > > + int n_execs;
> > > + size_t bo_size;
> > > + size_t stride;
> > > + uint32_t vm;
> > > + unsigned int flags;
> > > + void *alloc;
> > > + bool *go;
> > > +};
> > > +
> > > +static void *thread(void *data)
> > > +{
> > > + struct thread_data *t = data;
> > > +
> > > + pthread_mutex_lock(t->mutex);
> > > + while (!*t->go)
> > > + pthread_cond_wait(t->cond, t->mutex);
> > > + pthread_mutex_unlock(t->mutex);
> > > +
> > > + test_exec(t->fd, t->eci, t->n_exec_queues, t->n_execs,
> > > + t->bo_size, t->stride, t->vm, t->alloc, t->barrier,
> > > + t->flags);
> > > +
> > > + return NULL;
> > > +}
> > > +
> > > +static void
> > > +threads(int fd, int n_exec_queues, int n_execs, size_t bo_size,
> > > + size_t stride, unsigned int flags, bool shared_vm)
> > > +{
> > > + struct drm_xe_engine_class_instance *hwe;
> > > + struct thread_data *threads_data;
> > > + int n_engines = 0, i = 0;
> > > + pthread_mutex_t mutex;
> > > + pthread_cond_t cond;
> > > + pthread_barrier_t barrier;
> > > + uint32_t vm = 0;
> > > + bool go = false;
> > > + void *alloc = NULL;
> > > +
> > > + if ((FILE_BACKED | FORK_READ) & flags)
> > > + return;
> > > +
> > > + xe_for_each_engine(fd, hwe)
> > > + ++n_engines;
> > > +
> > > + if (shared_vm) {
> > > + vm = xe_vm_create(fd, DRM_XE_VM_CREATE_FLAG_LR_MODE |
> > > + DRM_XE_VM_CREATE_FLAG_FAULT_MODE, 0);
> > > + bind_system_allocator(NULL, 0);
> > > + }
> > > +
> > > + if (flags & SHARED_ALLOC) {
> > > + uint64_t alloc_size;
> > > +
> > > + igt_assert(stride);
> > > +
> > > + alloc_size = sizeof(struct test_exec_data) * stride *
> > > + n_execs * n_engines;
> > > + alloc_size = xe_bb_size(fd, alloc_size);
> > > + alloc = aligned_alloc(SZ_2M, alloc_size);
> > > + igt_assert(alloc);
> > > +
> > > + memset(alloc, 0, alloc_size);
> > > + flags &= ~SHARED_ALLOC;
> > > + }
> > > +
> > > + threads_data = calloc(n_engines, sizeof(*threads_data));
> > > + igt_assert(threads_data);
> > > +
> > > + pthread_mutex_init(&mutex, 0);
> > > + pthread_cond_init(&cond, 0);
> > > + pthread_barrier_init(&barrier, 0, n_engines);
> > > +
> > > + xe_for_each_engine(fd, hwe) {
> > > + threads_data[i].mutex = &mutex;
> > > + threads_data[i].cond = &cond;
> > > + threads_data[i].barrier = (flags & SYNC_EXEC) ? &barrier : NULL;
> > > + threads_data[i].fd = fd;
> > > + threads_data[i].eci = hwe;
> > > + threads_data[i].n_exec_queues = n_exec_queues;
> > > + threads_data[i].n_execs = n_execs;
> > > + threads_data[i].bo_size = bo_size;
> > > + threads_data[i].stride = stride;
> > > + threads_data[i].vm = vm;
> > > + threads_data[i].flags = flags;
> > > + threads_data[i].alloc = alloc ? alloc + i *
> > > + sizeof(struct test_exec_data) : NULL;
> > > + threads_data[i].go = &go;
> > > + pthread_create(&threads_data[i].thread, 0, thread,
> > > + &threads_data[i]);
> > > + ++i;
> > > + }
> > > +
> > > + pthread_mutex_lock(&mutex);
> > > + go = true;
> > > + pthread_cond_broadcast(&cond);
> > > + pthread_mutex_unlock(&mutex);
> > > +
> > > + for (i = 0; i < n_engines; ++i)
> > > + pthread_join(threads_data[i].thread, NULL);
Just a thought, the SVM page fault count is provided in GT stats:
cat /sys/kernel/debug/dri/*/gt*/stats
svm_pagefault_count: 134785
Wondering if it would make sense to read the value before and after test
execution then compare the delta with the expected number of page faults.
Francois
> > > +
> > > + if (shared_vm) {
> > > + int ret;
> > > +
> > > + if (flags & MMAP) {
> > > + int tries = 300;
> > > +
> > > + while (tries && (ret = unbind_system_allocator()) == -EBUSY) {
> > > + sleep(.01);
> > > + --tries;
> > > + }
> > > + igt_assert_eq(ret, 0);
> > > + }
> > > + xe_vm_destroy(fd, vm);
> > > + if (alloc)
> > > + free(alloc);
> > > + }
> > > + free(threads_data);
> > > +}
> > > +
> > > +static void process(struct drm_xe_engine_class_instance *hwe, int n_exec_queues,
> > > + int n_execs, size_t bo_size, size_t stride,
> > > + unsigned int flags)
> > > +{
> > > + struct process_data *pdata;
> > > + int map_fd;
> > > + int fd;
> > > +
> > > + map_fd = open(SYNC_FILE, O_RDWR, 0x666);
> > > + pdata = mmap(NULL, sizeof(*pdata), PROT_READ |
> > > + PROT_WRITE, MAP_SHARED, map_fd, 0);
> > > + wait_pdata(pdata);
> > > +
> > > + fd = drm_open_driver(DRIVER_XE);
> > > + test_exec(fd, hwe, n_exec_queues, n_execs,
> > > + bo_size, stride, 0, NULL, NULL, flags);
> > > + drm_close_driver(fd);
> > > +
> > > + close(map_fd);
> > > + munmap(pdata, sizeof(*pdata));
> > > +}
> > > +
> > > +static void
> > > +processes(int fd, int n_exec_queues, int n_execs, size_t bo_size,
> > > + size_t stride, unsigned int flags)
> > > +{
> > > + struct drm_xe_engine_class_instance *hwe;
> > > + struct process_data *pdata;
> > > + int map_fd;
> > > +
> > > + if (flags & FORK_READ)
> > > + return;
> > > +
> > > + map_fd = open(SYNC_FILE, O_RDWR | O_CREAT, 0x666);
> > > + posix_fallocate(map_fd, 0, sizeof(*pdata));
> > > + pdata = mmap(NULL, sizeof(*pdata), PROT_READ |
> > > + PROT_WRITE, MAP_SHARED, map_fd, 0);
> > > +
> > > + init_pdata(pdata, 0);
> > > +
> > > + xe_for_each_engine(fd, hwe) {
> > > + igt_fork(child, 1)
> > > + process(hwe, n_exec_queues, n_execs, bo_size,
> > > + stride, flags);
> > > + }
> > > +
> > > + signal_pdata(pdata);
> > > + igt_waitchildren();
> > > +
> > > + close(map_fd);
> > > + munmap(pdata, sizeof(*pdata));
> > > +}
> > > +
> > > +struct section {
> > > + const char *name;
> > > + unsigned int flags;
> > > +};
> > > +
> > > +igt_main
> > > +{
> > > + struct drm_xe_engine_class_instance *hwe;
> > > + const struct section sections[] = {
> > > + { "malloc", 0 },
> > > + { "malloc-multi-fault", MULTI_FAULT },
> > > + { "malloc-fork-read", FORK_READ },
> > > + { "malloc-fork-read-after", FORK_READ | FORK_READ_AFTER },
> > > + { "malloc-mlock", LOCK },
> > > + { "malloc-race", RACE },
> > > + { "malloc-busy", BUSY },
> > > + { "malloc-bo-unmap", BO_UNMAP },
> > > + { "mmap", MMAP },
> > > + { "mmap-remap", MMAP | MREMAP },
> > > + { "mmap-remap-dontunmap", MMAP | MREMAP | DONTUNMAP },
> > > + { "mmap-remap-ro", MMAP | MREMAP | READ_ONLY_REMAP },
> > > + { "mmap-remap-ro-dontunmap", MMAP | MREMAP | DONTUNMAP |
> > > + READ_ONLY_REMAP },
> > > + { "mmap-remap-eocheck", MMAP | MREMAP | EVERY_OTHER_CHECK },
> > > + { "mmap-remap-dontunmap-eocheck", MMAP | MREMAP | DONTUNMAP |
> > > + EVERY_OTHER_CHECK },
> > > + { "mmap-remap-ro-eocheck", MMAP | MREMAP | READ_ONLY_REMAP |
> > > + EVERY_OTHER_CHECK },
> > > + { "mmap-remap-ro-dontunmap-eocheck", MMAP | MREMAP | DONTUNMAP |
> > > + READ_ONLY_REMAP | EVERY_OTHER_CHECK },
> > > + { "mmap-huge", MMAP | HUGE_PAGE },
> > > + { "mmap-shared", MMAP | LOCK | MMAP_SHARED },
> > > + { "mmap-shared-remap", MMAP | LOCK | MMAP_SHARED | MREMAP },
> > > + { "mmap-shared-remap-dontunmap", MMAP | LOCK | MMAP_SHARED |
> > > + MREMAP | DONTUNMAP },
> > > + { "mmap-shared-remap-eocheck", MMAP | LOCK | MMAP_SHARED |
> > > + MREMAP | EVERY_OTHER_CHECK },
> > > + { "mmap-shared-remap-dontunmap-eocheck", MMAP | LOCK |
> > > + MMAP_SHARED | MREMAP | DONTUNMAP | EVERY_OTHER_CHECK },
> > > + { "mmap-mlock", MMAP | LOCK },
> > > + { "mmap-file", MMAP | FILE_BACKED },
> > > + { "mmap-file-mlock", MMAP | LOCK | FILE_BACKED },
> > > + { "mmap-race", MMAP | RACE },
> > > + { "free", NEW | FREE },
> > > + { "free-race", NEW | FREE | RACE },
> > > + { "new", NEW },
> > > + { "new-race", NEW | RACE },
> > > + { "new-bo-map", NEW | BO_MAP },
> > > + { "new-busy", NEW | BUSY },
> > > + { "mmap-free", MMAP | NEW | FREE },
> > > + { "mmap-free-huge", MMAP | NEW | FREE | HUGE_PAGE },
> > > + { "mmap-free-race", MMAP | NEW | FREE | RACE },
> > > + { "mmap-new", MMAP | NEW },
> > > + { "mmap-new-huge", MMAP | NEW | HUGE_PAGE },
> > > + { "mmap-new-race", MMAP | NEW | RACE },
> > > + { "malloc-nomemset", SKIP_MEMSET },
> > > + { "malloc-mlock-nomemset", SKIP_MEMSET | LOCK },
> > > + { "malloc-race-nomemset", SKIP_MEMSET | RACE },
> > > + { "malloc-busy-nomemset", SKIP_MEMSET | BUSY },
> > > + { "malloc-bo-unmap-nomemset", SKIP_MEMSET | BO_UNMAP },
> > > + { "mmap-nomemset", SKIP_MEMSET | MMAP },
> > > + { "mmap-huge-nomemset", SKIP_MEMSET | MMAP | HUGE_PAGE },
> > > + { "mmap-shared-nomemset", SKIP_MEMSET | MMAP | MMAP_SHARED },
> > > + { "mmap-mlock-nomemset", SKIP_MEMSET | MMAP | LOCK },
> > > + { "mmap-file-nomemset", SKIP_MEMSET | MMAP | FILE_BACKED },
> > > + { "mmap-file-mlock-nomemset", SKIP_MEMSET | MMAP | LOCK | FILE_BACKED },
> > > + { "mmap-race-nomemset", SKIP_MEMSET | MMAP | RACE },
> > > + { "free-nomemset", SKIP_MEMSET | NEW | FREE },
> > > + { "free-race-nomemset", SKIP_MEMSET | NEW | FREE | RACE },
> > > + { "new-nomemset", SKIP_MEMSET | NEW },
> > > + { "new-race-nomemset", SKIP_MEMSET | NEW | RACE },
> > > + { "new-bo-map-nomemset", SKIP_MEMSET | NEW | BO_MAP },
> > > + { "new-busy-nomemset", SKIP_MEMSET | NEW | BUSY },
> > > + { "mmap-free-nomemset", SKIP_MEMSET | MMAP | NEW | FREE },
> > > + { "mmap-free-huge-nomemset", SKIP_MEMSET | MMAP | NEW | FREE | HUGE_PAGE },
> > > + { "mmap-free-race-nomemset", SKIP_MEMSET | MMAP | NEW | FREE | RACE },
> > > + { "mmap-new-nomemset", SKIP_MEMSET | MMAP | NEW },
> > > + { "mmap-new-huge-nomemset", SKIP_MEMSET | MMAP | NEW | HUGE_PAGE },
> > > + { "mmap-new-race-nomemset", SKIP_MEMSET | MMAP | NEW | RACE },
> > > + { NULL },
> > > + };
> > > + const struct section psections[] = {
> > > + { "munmap-cpu-fault", CPU_FAULT },
> > > + { "munmap-no-cpu-fault", 0 },
> > > + { "remap-cpu-fault", CPU_FAULT | REMAP },
> > > + { "remap-no-cpu-fault", REMAP },
> > > + { "middle-munmap-cpu-fault", MIDDLE | CPU_FAULT },
> > > + { "middle-munmap-no-cpu-fault", MIDDLE },
> > > + { "middle-remap-cpu-fault", MIDDLE | CPU_FAULT | REMAP },
> > > + { "middle-remap-no-cpu-fault", MIDDLE | REMAP },
> > > + { NULL },
> > > + };
> > > + const struct section esections[] = {
> > > + { "malloc", 0 },
> > > + { "malloc-mix-bo", MIX_BO_ALLOC },
> > > + { NULL },
> > > + };
> > > + int fd;
> > > +
> > > + igt_fixture {
> > > + struct xe_device *xe;
> > > +
> > > + fd = drm_open_driver(DRIVER_XE);
> > > + igt_require(!xe_supports_faults(fd));
> > > +
> > > + xe = xe_device_get(fd);
> > > + va_bits = xe->va_bits;
> > > + }
> > > +
> > > + for (const struct section *s = sections; s->name; s++) {
> > > + igt_subtest_f("once-%s", s->name)
> > > + xe_for_each_engine(fd, hwe)
> > > + test_exec(fd, hwe, 1, 1, 0, 0, 0, NULL,
> > > + NULL, s->flags);
> > > +
> > > + igt_subtest_f("once-large-%s", s->name)
> > > + xe_for_each_engine(fd, hwe)
> > > + test_exec(fd, hwe, 1, 1, SZ_2M, 0, 0, NULL,
> > > + NULL, s->flags);
> > > +
> > > + igt_subtest_f("twice-%s", s->name)
> > > + xe_for_each_engine(fd, hwe)
> > > + test_exec(fd, hwe, 1, 2, 0, 0, 0, NULL,
> > > + NULL, s->flags);
> > > +
> > > + igt_subtest_f("twice-large-%s", s->name)
> > > + xe_for_each_engine(fd, hwe)
> > > + test_exec(fd, hwe, 1, 2, SZ_2M, 0, 0, NULL,
> > > + NULL, s->flags);
> > > +
> > > + igt_subtest_f("many-%s", s->name)
> > > + xe_for_each_engine(fd, hwe)
> > > + test_exec(fd, hwe, 1, 128, 0, 0, 0, NULL,
> > > + NULL, s->flags);
> > > +
> > > + igt_subtest_f("many-stride-%s", s->name)
> > > + xe_for_each_engine(fd, hwe)
> > > + test_exec(fd, hwe, 1, 128, 0, 256, 0, NULL,
> > > + NULL, s->flags);
> > > +
> > > + igt_subtest_f("many-execqueues-%s", s->name)
> > > + xe_for_each_engine(fd, hwe)
> > > + test_exec(fd, hwe, 16, 128, 0, 0, 0, NULL,
> > > + NULL, s->flags);
> > > +
> > > + igt_subtest_f("many-large-%s", s->name)
> > > + xe_for_each_engine(fd, hwe)
> > > + test_exec(fd, hwe, 1, 128, SZ_2M, 0, 0, NULL,
> > > + NULL, s->flags);
> > > +
> > > + igt_subtest_f("many-large-execqueues-%s", s->name)
> > > + xe_for_each_engine(fd, hwe)
> > > + test_exec(fd, hwe, 16, 128, SZ_2M, 0, 0, NULL,
> > > + NULL, s->flags);
> > > +
> > > + igt_subtest_f("threads-many-%s", s->name)
> > > + threads(fd, 1, 128, 0, 0, s->flags, false);
> > > +
> > > + igt_subtest_f("threads-many-stride-%s", s->name)
> > > + threads(fd, 1, 128, 0, 256, s->flags, false);
> > > +
> > > + igt_subtest_f("threads-many-execqueues-%s", s->name)
> > > + threads(fd, 16, 128, 0, 0, s->flags, false);
> > > +
> > > + igt_subtest_f("threads-many-large-%s", s->name)
> > > + threads(fd, 1, 128, SZ_2M, 0, s->flags, false);
> > > +
> > > + igt_subtest_f("threads-many-large-execqueues-%s", s->name)
> > > + threads(fd, 16, 128, SZ_2M, 0, s->flags, false);
> > > +
> > > + igt_subtest_f("threads-shared-vm-many-%s", s->name)
> > > + threads(fd, 1, 128, 0, 0, s->flags, true);
> > > +
> > > + igt_subtest_f("threads-shared-vm-many-stride-%s", s->name)
> > > + threads(fd, 1, 128, 0, 256, s->flags, true);
> > > +
> > > + igt_subtest_f("threads-shared-vm-many-execqueues-%s", s->name)
> > > + threads(fd, 16, 128, 0, 0, s->flags, true);
> > > +
> > > + igt_subtest_f("threads-shared-vm-many-large-%s", s->name)
> > > + threads(fd, 1, 128, SZ_2M, 0, s->flags, true);
> > > +
> > > + igt_subtest_f("threads-shared-vm-many-large-execqueues-%s", s->name)
> > > + threads(fd, 16, 128, SZ_2M, 0, s->flags, true);
> > > +
> > > + igt_subtest_f("process-many-%s", s->name)
> > > + processes(fd, 1, 128, 0, 0, s->flags);
> > > +
> > > + igt_subtest_f("process-many-stride-%s", s->name)
> > > + processes(fd, 1, 128, 0, 256, s->flags);
> > > +
> > > + igt_subtest_f("process-many-execqueues-%s", s->name)
> > > + processes(fd, 16, 128, 0, 0, s->flags);
> > > +
> > > + igt_subtest_f("process-many-large-%s", s->name)
> > > + processes(fd, 1, 128, SZ_2M, 0, s->flags);
> > > +
> > > + igt_subtest_f("process-many-large-execqueues-%s", s->name)
> > > + processes(fd, 16, 128, SZ_2M, 0, s->flags);
> > > + }
> > > +
> > > + igt_subtest("threads-shared-vm-shared-alloc-many-stride-malloc")
> > > + threads(fd, 1, 128, 0, 256, SHARED_ALLOC, true);
> > > +
> > > + igt_subtest("threads-shared-vm-shared-alloc-many-stride-malloc-race")
> > > + threads(fd, 1, 128, 0, 256, RACE | SHARED_ALLOC, true);
> > > +
> > > + igt_subtest("threads-shared-alloc-many-stride-malloc")
> > > + threads(fd, 1, 128, 0, 256, SHARED_ALLOC, false);
> > > +
> > > + igt_subtest("threads-shared-alloc-many-stride-malloc-sync")
> > > + threads(fd, 1, 128, 0, 256, SHARED_ALLOC | SYNC_EXEC, false);
> > > +
> > > + igt_subtest("threads-shared-alloc-many-stride-malloc-race")
> > > + threads(fd, 1, 128, 0, 256, RACE | SHARED_ALLOC, false);
> > > +
> > > + igt_subtest_f("fault")
> > > + xe_for_each_engine(fd, hwe)
> > > + test_exec(fd, hwe, 4, 1, SZ_2M, 0, 0, NULL, NULL,
> > > + FAULT);
> > > +
> > > + for (const struct section *s = psections; s->name; s++) {
> > > + igt_subtest_f("partial-%s", s->name)
> > > + xe_for_each_engine(fd, hwe)
> > > + partial(fd, hwe, s->flags);
> > > + }
> > > +
> > > + igt_subtest_f("unaligned-alloc")
> > > + xe_for_each_engine(fd, hwe) {
> > > + many_allocs(fd, hwe, (SZ_1M + SZ_512K) * 8,
> > > + SZ_1M + SZ_512K, SZ_4K, NULL, 0);
> > > + break;
> > > + }
> > > +
> > > + igt_subtest_f("fault-benchmark")
> > > + xe_for_each_engine(fd, hwe)
> > > + many_allocs(fd, hwe, SZ_64M, SZ_64M, SZ_4K, NULL,
> > > + BENCHMARK);
> > > +
> > > + igt_subtest_f("fault-threads-benchmark")
> > > + xe_for_each_engine(fd, hwe)
> > > + many_allocs(fd, hwe, SZ_64M, SZ_64M, SZ_4K, NULL,
> > > + BENCHMARK | CPU_FAULT_THREADS);
> > > +
> > > + igt_subtest_f("fault-threads-same-page-benchmark")
> > > + xe_for_each_engine(fd, hwe)
> > > + many_allocs(fd, hwe, SZ_64M, SZ_64M, SZ_4K, NULL,
> > > + BENCHMARK | CPU_FAULT_THREADS |
> > > + CPU_FAULT_SAME_PAGE);
> > > +
> > > + igt_subtest_f("fault-process-benchmark")
> > > + xe_for_each_engine(fd, hwe)
> > > + many_allocs(fd, hwe, SZ_64M, SZ_64M, SZ_4K, NULL,
> > > + BENCHMARK | CPU_FAULT_PROCESS);
> > > +
> > > + igt_subtest_f("fault-process-same-page-benchmark")
> > > + xe_for_each_engine(fd, hwe)
> > > + many_allocs(fd, hwe, SZ_64M, SZ_64M, SZ_4K, NULL,
> > > + BENCHMARK | CPU_FAULT_PROCESS |
> > > + CPU_FAULT_SAME_PAGE);
> > > +
> > > + for (const struct section *s = esections; s->name; s++) {
> > > + igt_subtest_f("evict-%s", s->name)
> > > + xe_for_each_engine(fd, hwe) {
> > > + many_allocs(fd, hwe,
> > > + xe_visible_vram_size(fd, hwe->gt_id),
> > > + SZ_8M, SZ_1M, NULL, s->flags);
> > > + break;
> > > + }
> > > + }
> > > +
> > > + for (const struct section *s = esections; s->name; s++) {
> > > + igt_subtest_f("processes-evict-%s", s->name)
> > > + processes_evict(fd, SZ_8M, SZ_1M, s->flags);
> > > + }
> > > +
> > > + igt_fixture {
> > > + xe_device_put(fd);
> > > + drm_close_driver(fd);
> > > + }
> > > +}
> > > diff --git a/tests/meson.build b/tests/meson.build
> > > index 9224145cf4..8c7b756716 100644
> > > --- a/tests/meson.build
> > > +++ b/tests/meson.build
> > > @@ -295,6 +295,7 @@ intel_xe_progs = [
> > > 'xe_exec_reset',
> > > 'xe_exec_sip',
> > > 'xe_exec_store',
> > > + 'xe_exec_system_allocator',
> > > 'xe_exec_threads',
> > > 'xe_exercise_blt',
> > > 'xe_fault_injection',
> > > --
> > > 2.34.1
> > >
^ permalink raw reply [flat|nested] 16+ messages in thread
* Re: [PATCH] tests/xe: Add system_allocator test
2025-04-24 19:28 ` Francois Dugast
@ 2025-04-24 19:46 ` Matthew Brost
0 siblings, 0 replies; 16+ messages in thread
From: Matthew Brost @ 2025-04-24 19:46 UTC (permalink / raw)
To: Francois Dugast; +Cc: igt-dev
On Thu, Apr 24, 2025 at 09:28:57PM +0200, Francois Dugast wrote:
> Hi,
>
> On Fri, Apr 18, 2025 at 12:44:51PM -0700, Matthew Brost wrote:
> > On Fri, Apr 18, 2025 at 05:47:55PM +0200, Francois Dugast wrote:
> > > Hi Matt,
> > >
> > > I am still going through your patch but sharing some comments already.
> > >
> > > The sequence is neither complex nor too different from existing tests
> > > but as it is a lot of multi-thread / multi-process code, I am trying
> > > to come up with a suggestion to break it down. Might not be possible
> > > though.
> > >
> >
> > Good to hear you don't find it too complex, open spliting if it makes
> > sense.
> >
> > > On Tue, Apr 15, 2025 at 07:20:40PM -0700, Matthew Brost wrote:
> > > > Test various uses of system allocator in single thread, multiple
> > > > threads, and multiple processes.
> > > >
> > > > Features tested:
> > > > - Malloc with various size
> > > > - Mmap with various sizes and flags including file backed mappings
> > > > - Mixing BO allocations with system allocator
> > > > - Various page sizes
> > > > - Dynamically freeing / unmapping memory
> > > > - Sharing VM across threads
> > > > - Faults racing on different hardware engines / GTs / Tiles
> > > > - GPU faults and CPU faults racing
> > > > - CPU faults on multiple threads racing
> > > > - CPU faults on multiple process racing
> > > > - GPU faults of memory not faulted in by CPU
> > > > - Partial unmap of allocations
> > > > - Attempting to unmap system allocations when GPU has mappings
> > > > - Eviction of both system allocations and BOs
> > > > - Forking child processes and reading data from VRAM
> > > > - mremap data in VRAM
> > > > - Protection changes
> > > > - Multiple faults per execbuf
> > > >
> > > > Running on LNL, BMG, PVC 1 tile, and PVC 2 tile.
> > > >
> > > > v2:
> > > > - Rebase
> > > > - Fix memory allocation to not interfear with malloc (Thomas)
> > > >
> > > > Signed-off-by: Matthew Brost <matthew.brost@intel.com>
> > > > ---
> > > > include/drm-uapi/xe_drm.h | 57 +-
> > > > lib/xe/xe_ioctl.c | 12 +
> > > > lib/xe/xe_ioctl.h | 1 +
> > > > tests/intel/xe_exec_system_allocator.c | 1832 ++++++++++++++++++++++++
> > > > tests/meson.build | 1 +
> > > > 5 files changed, 1896 insertions(+), 7 deletions(-)
> > > > create mode 100644 tests/intel/xe_exec_system_allocator.c
> > > >
> > > > diff --git a/include/drm-uapi/xe_drm.h b/include/drm-uapi/xe_drm.h
> > > > index 154f947ef0..9c08738c3b 100644
> > > > --- a/include/drm-uapi/xe_drm.h
> > > > +++ b/include/drm-uapi/xe_drm.h
> > > > @@ -3,8 +3,8 @@
> > > > * Copyright © 2023 Intel Corporation
> > > > */
> > > >
> > > > -#ifndef _XE_DRM_H_
> > > > -#define _XE_DRM_H_
> > > > +#ifndef _UAPI_XE_DRM_H_
> > > > +#define _UAPI_XE_DRM_H_
> > >
> > > Nit: The header seems to have been copied directly from the kernel tree, instead
> > > it should be generated with:
> > >
> > > make headers_install
> > >
> > > https://docs.kernel.org/kbuild/headers_install.html
> > >
> >
> > Let me split out the uAPI update into its own patch + use the proper
> > flow.
>
> Sounds good.
>
> >
> > > >
> > > > #include "drm.h"
> > > >
> > > > @@ -134,7 +134,7 @@ extern "C" {
> > > > * redefine the interface more easily than an ever growing struct of
> > > > * increasing complexity, and for large parts of that interface to be
> > > > * entirely optional. The downside is more pointer chasing; chasing across
> > > > - * the boundary with pointers encapsulated inside u64.
> > > > + * the __user boundary with pointers encapsulated inside u64.
> > >
> > > See above comment on make headers_install.
> > >
> >
> > +1
> >
> > > > *
> > > > * Example chaining:
> > > > *
> > > > @@ -393,6 +393,10 @@ struct drm_xe_query_mem_regions {
> > > > *
> > > > * - %DRM_XE_QUERY_CONFIG_FLAG_HAS_VRAM - Flag is set if the device
> > > > * has usable VRAM
> > > > + * - %DRM_XE_QUERY_CONFIG_FLAG_HAS_LOW_LATENCY - Flag is set if the device
> > > > + * has low latency hint support
> > > > + * - %DRM_XE_QUERY_CONFIG_FLAG_HAS_CPU_ADDR_MIRROR - Flag is set if the
> > > > + * device has CPU address mirroring support
> > > > * - %DRM_XE_QUERY_CONFIG_MIN_ALIGNMENT - Minimal memory alignment
> > > > * required by this device, typically SZ_4K or SZ_64K
> > > > * - %DRM_XE_QUERY_CONFIG_VA_BITS - Maximum bits of a virtual address
> > > > @@ -409,6 +413,8 @@ struct drm_xe_query_config {
> > > > #define DRM_XE_QUERY_CONFIG_REV_AND_DEVICE_ID 0
> > > > #define DRM_XE_QUERY_CONFIG_FLAGS 1
> > > > #define DRM_XE_QUERY_CONFIG_FLAG_HAS_VRAM (1 << 0)
> > > > + #define DRM_XE_QUERY_CONFIG_FLAG_HAS_LOW_LATENCY (1 << 1)
> > > > + #define DRM_XE_QUERY_CONFIG_FLAG_HAS_CPU_ADDR_MIRROR (1 << 2)
> > > > #define DRM_XE_QUERY_CONFIG_MIN_ALIGNMENT 2
> > > > #define DRM_XE_QUERY_CONFIG_VA_BITS 3
> > > > #define DRM_XE_QUERY_CONFIG_MAX_EXEC_QUEUE_PRIORITY 4
> > > > @@ -911,7 +917,11 @@ struct drm_xe_gem_mmap_offset {
> > > > * struct drm_xe_vm_create - Input of &DRM_IOCTL_XE_VM_CREATE
> > > > *
> > > > * The @flags can be:
> > > > - * - %DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE
> > > > + * - %DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE - Map the whole virtual address
> > > > + * space of the VM to scratch page. A vm_bind would overwrite the scratch
> > > > + * page mapping. This flag is mutually exclusive with the
> > > > + * %DRM_XE_VM_CREATE_FLAG_FAULT_MODE flag, with an exception of on x2 and
> > > > + * xe3 platform.
> > > > * - %DRM_XE_VM_CREATE_FLAG_LR_MODE - An LR, or Long Running VM accepts
> > > > * exec submissions to its exec_queues that don't have an upper time
> > > > * limit on the job execution time. But exec submissions to these
> > > > @@ -987,6 +997,12 @@ struct drm_xe_vm_destroy {
> > > > * - %DRM_XE_VM_BIND_FLAG_CHECK_PXP - If the object is encrypted via PXP,
> > > > * reject the binding if the encryption key is no longer valid. This
> > > > * flag has no effect on BOs that are not marked as using PXP.
> > > > + * - %DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR - When the CPU address mirror flag is
> > > > + * set, no mappings are created rather the range is reserved for CPU address
> > > > + * mirroring which will be populated on GPU page faults or prefetches. Only
> > > > + * valid on VMs with DRM_XE_VM_CREATE_FLAG_FAULT_MODE set. The CPU address
> > > > + * mirror flag are only valid for DRM_XE_VM_BIND_OP_MAP operations, the BO
> > > > + * handle MBZ, and the BO offset MBZ.
> > > > */
> > > > struct drm_xe_vm_bind_op {
> > > > /** @extensions: Pointer to the first extension struct, if any */
> > > > @@ -1039,7 +1055,9 @@ struct drm_xe_vm_bind_op {
> > > > * on the @pat_index. For such mappings there is no actual memory being
> > > > * mapped (the address in the PTE is invalid), so the various PAT memory
> > > > * attributes likely do not apply. Simply leaving as zero is one
> > > > - * option (still a valid pat_index).
> > > > + * option (still a valid pat_index). Same applies to
> > > > + * DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR bindings as for such mapping
> > > > + * there is no actual memory being mapped.
> > > > */
> > > > __u16 pat_index;
> > > >
> > > > @@ -1055,6 +1073,14 @@ struct drm_xe_vm_bind_op {
> > > >
> > > > /** @userptr: user pointer to bind on */
> > > > __u64 userptr;
> > > > +
> > > > + /**
> > > > + * @cpu_addr_mirror_offset: Offset from GPU @addr to create
> > > > + * CPU address mirror mappings. MBZ with current level of
> > > > + * support (e.g. 1 to 1 mapping between GPU and CPU mappings
> > > > + * only supported).
> > > > + */
> > > > + __s64 cpu_addr_mirror_offset;
> > > > };
> > > >
> > > > /**
> > > > @@ -1078,6 +1104,7 @@ struct drm_xe_vm_bind_op {
> > > > #define DRM_XE_VM_BIND_FLAG_NULL (1 << 2)
> > > > #define DRM_XE_VM_BIND_FLAG_DUMPABLE (1 << 3)
> > > > #define DRM_XE_VM_BIND_FLAG_CHECK_PXP (1 << 4)
> > > > +#define DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR (1 << 5)
> > > > /** @flags: Bind flags */
> > > > __u32 flags;
> > > >
> > > > @@ -1205,6 +1232,21 @@ struct drm_xe_vm_bind {
> > > > * };
> > > > * ioctl(fd, DRM_IOCTL_XE_EXEC_QUEUE_CREATE, &exec_queue_create);
> > > > *
> > > > + * Allow users to provide a hint to kernel for cases demanding low latency
> > > > + * profile. Please note it will have impact on power consumption. User can
> > > > + * indicate low latency hint with flag while creating exec queue as
> > > > + * mentioned below,
> > > > + *
> > > > + * struct drm_xe_exec_queue_create exec_queue_create = {
> > > > + * .flags = DRM_XE_EXEC_QUEUE_LOW_LATENCY_HINT,
> > > > + * .extensions = 0,
> > > > + * .vm_id = vm,
> > > > + * .num_bb_per_exec = 1,
> > > > + * .num_eng_per_bb = 1,
> > > > + * .instances = to_user_pointer(&instance),
> > > > + * };
> > > > + * ioctl(fd, DRM_IOCTL_XE_EXEC_QUEUE_CREATE, &exec_queue_create);
> > > > + *
> > > > */
> > > > struct drm_xe_exec_queue_create {
> > > > #define DRM_XE_EXEC_QUEUE_EXTENSION_SET_PROPERTY 0
> > > > @@ -1223,7 +1265,8 @@ struct drm_xe_exec_queue_create {
> > > > /** @vm_id: VM to use for this exec queue */
> > > > __u32 vm_id;
> > > >
> > > > - /** @flags: MBZ */
> > > > +#define DRM_XE_EXEC_QUEUE_LOW_LATENCY_HINT (1 << 0)
> > > > + /** @flags: flags to use for this exec queue */
> > > > __u32 flags;
> > > >
> > > > /** @exec_queue_id: Returned exec queue ID */
> > > > @@ -1926,4 +1969,4 @@ struct drm_xe_query_eu_stall {
> > > > }
> > > > #endif
> > > >
> > > > -#endif /* _XE_DRM_H_ */
> > > > +#endif /* _UAPI_XE_DRM_H_ */
> > > > diff --git a/lib/xe/xe_ioctl.c b/lib/xe/xe_ioctl.c
> > > > index fb8c4aef13..785fc9184c 100644
> > > > --- a/lib/xe/xe_ioctl.c
> > > > +++ b/lib/xe/xe_ioctl.c
> > > > @@ -440,6 +440,18 @@ void *xe_bo_map(int fd, uint32_t bo, size_t size)
> > > > return __xe_bo_map(fd, bo, size, PROT_WRITE);
> > > > }
> > > >
> > > > +void *xe_bo_map_fixed(int fd, uint32_t bo, size_t size, uint64_t addr)
> > > > +{
> > > > + uint64_t mmo;
> > > > + void *map;
> > > > +
> > > > + mmo = xe_bo_mmap_offset(fd, bo);
> > > > + map = mmap((void *)addr, size, PROT_WRITE, MAP_SHARED | MAP_FIXED, fd, mmo);
> > > > + igt_assert(map != MAP_FAILED);
> > > > +
> > > > + return map;
> > > > +}
> > > > +
> > > > void *xe_bo_mmap_ext(int fd, uint32_t bo, size_t size, int prot)
> > > > {
> > > > return __xe_bo_map(fd, bo, size, prot);
> > > > diff --git a/lib/xe/xe_ioctl.h b/lib/xe/xe_ioctl.h
> > > > index 9bdf73b2bd..554a33c9cd 100644
> > > > --- a/lib/xe/xe_ioctl.h
> > > > +++ b/lib/xe/xe_ioctl.h
> > > > @@ -86,6 +86,7 @@ uint32_t xe_exec_queue_create_class(int fd, uint32_t vm, uint16_t class);
> > > > void xe_exec_queue_destroy(int fd, uint32_t exec_queue);
> > > > uint64_t xe_bo_mmap_offset(int fd, uint32_t bo);
> > > > void *xe_bo_map(int fd, uint32_t bo, size_t size);
> > > > +void *xe_bo_map_fixed(int fd, uint32_t bo, size_t size, long unsigned int addr);
> > > > void *xe_bo_mmap_ext(int fd, uint32_t bo, size_t size, int prot);
> > > > int __xe_exec(int fd, struct drm_xe_exec *exec);
> > > > void xe_exec(int fd, struct drm_xe_exec *exec);
> > > > diff --git a/tests/intel/xe_exec_system_allocator.c b/tests/intel/xe_exec_system_allocator.c
> > > > new file mode 100644
> > > > index 0000000000..14fa59353e
> > > > --- /dev/null
> > > > +++ b/tests/intel/xe_exec_system_allocator.c
> > > > @@ -0,0 +1,1832 @@
> > > > +// SPDX-License-Identifier: MIT
> > > > +/*
> > > > + * Copyright © 2024 Intel Corporation
> > > > + */
> > > > +
> > > > +/**
> > > > + * TEST: Basic tests for execbuf functionality using system allocator
> > > > + * Category: Hardware building block
> > > > + * Mega feature: Shared virtual memory
> > > > + * Sub-category: execbuf
> > > > + * Functionality: fault mode, system allocator
> > > > + * GPU requirements: GPU needs support for DRM_XE_VM_CREATE_FLAG_FAULT_MODE
>
> Using this below will help test automation and reporting:
>
> * TEST: Basic tests for execbuf functionality using system allocator
> * Category: Core
> * Mega feature: USM
> * Sub-category: System allocator
> * Functionality: fault mode, system allocator
> * GPU: LNL, BMG, PVC
>
+1
> > > > + */
> > > > +
> > > > +#include <fcntl.h>
> > > > +#include <linux/mman.h>
> > > > +#include <time.h>
> > > > +
> > > > +#include "igt.h"
> > > > +#include "lib/igt_syncobj.h"
> > > > +#include "lib/intel_reg.h"
> > > > +#include "xe_drm.h"
> > > > +
> > > > +#include "xe/xe_ioctl.h"
> > > > +#include "xe/xe_query.h"
> > > > +#include <string.h>
> > > > +
> > > > +#define USER_FENCE_VALUE 0xdeadbeefdeadbeefull
> > > > +#define QUARTER_SEC (NSEC_PER_SEC / 4)
> > > > +#define FIVE_SEC (5LL * NSEC_PER_SEC)
> > > > +
> > > > +struct batch_data {
> > > > + uint32_t batch[16];
> > > > + uint64_t pad;
> > > > + uint32_t data;
> > > > + uint32_t expected_data;
> > > > +};
> > > > +
> > > > +#define WRITE_VALUE(data__, i__) ({ \
> > > > + if (!(data__)->expected_data) \
> > > > + (data__)->expected_data = rand() << 12 | (i__); \
> > > > + (data__)->expected_data; \
> > > > +})
> > > > +#define READ_VALUE(data__, i__) ((data__)->expected_data)
>
> Argument i__ is not used.
>
Indeed, will remove.
> > > > +
> > > > +static void __write_dword(uint32_t *batch, uint64_t sdi_addr, uint32_t wdata,
> > > > + int *idx)
> > > > +{
> > > > + batch[(*idx)++] = MI_STORE_DWORD_IMM_GEN4;
> > > > + batch[(*idx)++] = sdi_addr;
> > > > + batch[(*idx)++] = sdi_addr >> 32;
> > > > + batch[(*idx)++] = wdata;
> > > > +}
> > > > +
> > > > +static void write_dword(uint32_t *batch, uint64_t sdi_addr, uint32_t wdata,
> > > > + int *idx)
> > > > +{
> > > > + __write_dword(batch, sdi_addr, wdata, idx);
> > > > + batch[(*idx)++] = MI_BATCH_BUFFER_END;
> > > > +}
> > >
> > > Slightly out of scope for this review but the 2 functions above might be
> > > helpful under lib/ to prevent adding more duplications of the dword write
> > > batch sequence.
> > >
> >
> > Yea we could split out generic batch writing functions into a library at
> > some point but agree is probably out of scope for this series.
> >
> > > > +
> > > > +static void check_all_pages(void *ptr, uint64_t alloc_size, uint64_t stride,
> > > > + pthread_barrier_t *barrier)
> > > > +{
> > > > + int i, n_writes = alloc_size / stride;
> > > > +
> > > > + for (i = 0; i < n_writes; ++i) {
> > > > + struct batch_data *data = ptr + i * stride;
> > > > +
> > > > + igt_assert_eq(data->data, READ_VALUE(data, i));
> > > > +
> > > > + if (barrier)
> > > > + pthread_barrier_wait(barrier);
> > > > + }
> > > > +}
> > > > +
> > > > +#define SYNC_FILE "/tmp/xe_exec_system_allocator_sync"
> > >
> > > Might be worth creating and propagating a unique file name at runtime, for
> > > example with mkstemp(), in order to avoid potential concurrent accesses from
> > > multiple instances of the test.
> > >
> >
> > Let me look into that.
> >
> > > > +
> > > > +struct process_data {
> > > > + pthread_mutex_t mutex;
> > > > + pthread_cond_t cond;
> > > > + pthread_barrier_t barrier;
> > > > + bool go;
> > > > +};
> > > > +
> > > > +static void wait_pdata(struct process_data *pdata)
> > > > +{
> > > > + pthread_mutex_lock(&pdata->mutex);
> > > > + while (!pdata->go)
> > > > + pthread_cond_wait(&pdata->cond, &pdata->mutex);
> > > > + pthread_mutex_unlock(&pdata->mutex);
> > > > +}
> > > > +
> > > > +static void init_pdata(struct process_data *pdata, int n_engine)
> > > > +{
> > > > + pthread_mutexattr_t mutex_attr;
> > > > + pthread_condattr_t cond_attr;
> > > > + pthread_barrierattr_t barrier_attr;
> > > > +
> > > > + pthread_mutexattr_init(&mutex_attr);
> > > > + pthread_mutexattr_setpshared(&mutex_attr, PTHREAD_PROCESS_SHARED);
> > > > + pthread_mutex_init(&pdata->mutex, &mutex_attr);
> > > > +
> > > > + pthread_condattr_init(&cond_attr);
> > > > + pthread_condattr_setpshared(&cond_attr, PTHREAD_PROCESS_SHARED);
> > > > + pthread_cond_init(&pdata->cond, &cond_attr);
> > > > +
> > > > + pthread_barrierattr_init(&barrier_attr);
> > > > + pthread_barrierattr_setpshared(&barrier_attr, PTHREAD_PROCESS_SHARED);
> > > > + pthread_barrier_init(&pdata->barrier, &barrier_attr, n_engine);
> > > > +
> > > > + pdata->go = false;
> > > > +}
> > > > +
> > > > +static void signal_pdata(struct process_data *pdata)
> > > > +{
> > > > + pthread_mutex_lock(&pdata->mutex);
> > > > + pdata->go = true;
> > > > + pthread_cond_broadcast(&pdata->cond);
> > > > + pthread_mutex_unlock(&pdata->mutex);
> > > > +}
> > > > +
> > > > +/* many_alloc flags */
> > > > +#define MIX_BO_ALLOC (0x1 << 0)
> > > > +#define BENCHMARK (0x1 << 1)
> > > > +#define CPU_FAULT_THREADS (0x1 << 2)
> > > > +#define CPU_FAULT_PROCESS (0x1 << 3)
> > > > +#define CPU_FAULT_SAME_PAGE (0x1 << 4)
> > > > +
> > > > +static void process_check(void *ptr, uint64_t alloc_size, uint64_t stride,
> > > > + unsigned int flags)
> > > > +{
> > > > + struct process_data *pdata;
> > > > + int map_fd;
> > > > +
> > > > + map_fd = open(SYNC_FILE, O_RDWR, 0x666);
> > > > + pdata = mmap(NULL, sizeof(*pdata), PROT_READ |
> > > > + PROT_WRITE, MAP_SHARED, map_fd, 0);
> > > > + wait_pdata(pdata);
> > > > +
> > > > + if (flags & CPU_FAULT_SAME_PAGE)
> > > > + check_all_pages(ptr, alloc_size, stride, &pdata->barrier);
> > > > + else
> > > > + check_all_pages(ptr, alloc_size, stride, NULL);
> > > > +
> > > > + close(map_fd);
> > > > + munmap(pdata, sizeof(*pdata));
> > > > +}
> > > > +
> > > > +static void
> > > > +check_all_pages_process(void *ptr, uint64_t alloc_size, uint64_t stride,
> > > > + int n_process, unsigned int flags)
>
> It would be helpful to add here a comment similar to the one below for
> check_all_pages_threads().
>
Sure.
> > > > +{
> > > > + struct process_data *pdata;
> > > > + int map_fd, i;
> > > > +
> > > > + map_fd = open(SYNC_FILE, O_RDWR | O_CREAT, 0x666);
> > > > + posix_fallocate(map_fd, 0, sizeof(*pdata));
> > > > + pdata = mmap(NULL, sizeof(*pdata), PROT_READ |
> > > > + PROT_WRITE, MAP_SHARED, map_fd, 0);
> > > > +
> > > > + init_pdata(pdata, n_process);
> > > > +
> > > > + for (i = 0; i < n_process; ++i) {
> > > > + igt_fork(child, 1)
> > > > + if (flags & CPU_FAULT_SAME_PAGE)
> > > > + process_check(ptr, alloc_size, stride, flags);
> > > > + else
> > > > + process_check(ptr + stride * i, alloc_size,
> > > > + stride * n_process, flags);
> > > > + }
> > > > +
> > > > + signal_pdata(pdata);
> > > > + igt_waitchildren();
> > > > +
> > > > + close(map_fd);
> > > > + munmap(pdata, sizeof(*pdata));
> > > > +}
> > > > +
> > > > +struct thread_check_data {
> > > > + pthread_t thread;
> > > > + pthread_mutex_t *mutex;
> > > > + pthread_cond_t *cond;
> > > > + pthread_barrier_t *barrier;
> > > > + void *ptr;
> > > > + uint64_t alloc_size;
> > > > + uint64_t stride;
> > > > + bool *go;
> > > > +};
> > > > +
> > > > +static void *thread_check(void *data)
> > > > +{
> > > > + struct thread_check_data *t = data;
> > > > +
> > > > + pthread_mutex_lock(t->mutex);
> > > > + while (!*t->go)
> > > > + pthread_cond_wait(t->cond, t->mutex);
> > > > + pthread_mutex_unlock(t->mutex);
> > > > +
> > > > + check_all_pages(t->ptr, t->alloc_size, t->stride, t->barrier);
> > > > +
> > > > + return NULL;
> > > > +}
> > > > +
> > > > +/*
> > > > + * Partition checking of results in chunks which causes multiple threads to
> > > > + * fault same VRAM allocation in parallel.
> > > > + */
> > > > +static void
> > > > +check_all_pages_threads(void *ptr, uint64_t alloc_size, uint64_t stride,
> > > > + int n_threads, unsigned int flags)
> > > > +{
> > > > + struct thread_check_data *threads_check_data;
> > > > + pthread_mutex_t mutex;
> > > > + pthread_cond_t cond;
> > > > + pthread_barrier_t barrier;
> > > > + int i;
> > > > + bool go = false;
> > > > +
> > > > + threads_check_data = calloc(n_threads, sizeof(*threads_check_data));
> > > > + igt_assert(threads_check_data);
> > > > +
> > > > + pthread_mutex_init(&mutex, 0);
> > > > + pthread_cond_init(&cond, 0);
> > > > + pthread_barrier_init(&barrier, 0, n_threads);
> > > > +
> > > > + for (i = 0; i < n_threads; ++i) {
> > > > + threads_check_data[i].mutex = &mutex;
> > > > + threads_check_data[i].cond = &cond;
> > > > + if (flags & CPU_FAULT_SAME_PAGE) {
> > > > + threads_check_data[i].barrier = &barrier;
> > > > + threads_check_data[i].ptr = ptr;
> > > > + threads_check_data[i].alloc_size = alloc_size;
> > > > + threads_check_data[i].stride = stride;
> > > > + } else {
> > > > + threads_check_data[i].barrier = NULL;
> > > > + threads_check_data[i].ptr = ptr + stride * i;
> > > > + threads_check_data[i].alloc_size = alloc_size;
> > > > + threads_check_data[i].stride = n_threads * stride;
> > > > + }
> > > > + threads_check_data[i].go = &go;
> > > > +
> > > > + pthread_create(&threads_check_data[i].thread, 0, thread_check,
> > > > + &threads_check_data[i]);
> > > > + }
> > > > +
> > > > + pthread_mutex_lock(&mutex);
> > > > + go = true;
> > > > + pthread_cond_broadcast(&cond);
> > > > + pthread_mutex_unlock(&mutex);
> > > > +
> > > > + for (i = 0; i < n_threads; ++i)
> > > > + pthread_join(threads_check_data[i].thread, NULL);
> > > > + free(threads_check_data);
> > > > +}
> > > > +
> > > > +static void touch_all_pages(int fd, uint32_t exec_queue, void *ptr,
> > > > + uint64_t alloc_size, uint64_t stride,
> > > > + struct timespec *tv, uint64_t *submit)
> > > > +{
> > > > + struct drm_xe_sync sync[1] = {
> > > > + { .type = DRM_XE_SYNC_TYPE_USER_FENCE,
> > > > + .flags = DRM_XE_SYNC_FLAG_SIGNAL,
> > > > + .timeline_value = USER_FENCE_VALUE },
> > > > + };
> > > > + struct drm_xe_exec exec = {
> > > > + .num_batch_buffer = 1,
> > > > + .num_syncs = 0,
> > > > + .exec_queue_id = exec_queue,
> > > > + .syncs = to_user_pointer(&sync),
> > > > + };
> > > > + uint64_t addr = to_user_pointer(ptr);
> > > > + int i, ret, n_writes = alloc_size / stride;
> > > > + u64 *exec_ufence = NULL;
> > > > + int64_t timeout = FIVE_SEC;
> > > > +
> > > > + exec_ufence = mmap(NULL, SZ_4K, PROT_READ |
> > > > + PROT_WRITE, MAP_SHARED |
> > > > + MAP_ANONYMOUS, -1, 0);
> > > > + igt_assert(exec_ufence != MAP_FAILED);
> > > > + memset(exec_ufence, 0, SZ_4K);
> > > > + sync[0].addr = to_user_pointer(exec_ufence);
> > > > +
> > > > + for (i = 0; i < n_writes; ++i, addr += stride) {
> > > > + struct batch_data *data = ptr + i * stride;
> > > > + uint64_t sdi_offset = (char *)&data->data - (char *)data;
> > > > + uint64_t sdi_addr = addr + sdi_offset;
> > > > + int b = 0;
> > > > +
> > > > + write_dword(data->batch, sdi_addr, WRITE_VALUE(data, i), &b);
> > > > + igt_assert(b <= ARRAY_SIZE(data->batch));
> > > > + }
> > > > +
> > > > + igt_nsec_elapsed(tv);
> > > > + *submit = igt_nsec_elapsed(tv);
> > > > +
> > > > + addr = to_user_pointer(ptr);
> > > > + for (i = 0; i < n_writes; ++i, addr += stride) {
> > > > + struct batch_data *data = ptr + i * stride;
> > > > + uint64_t batch_offset = (char *)&data->batch - (char *)data;
> > > > + uint64_t batch_addr = addr + batch_offset;
> > > > +
> > > > + exec.address = batch_addr;
> > > > + if (i + 1 == n_writes)
> > > > + exec.num_syncs = 1;
> > > > + xe_exec(fd, &exec);
> > > > + }
> > > > +
> > > > + ret = __xe_wait_ufence(fd, exec_ufence, USER_FENCE_VALUE, exec_queue,
> > > > + &timeout);
> > > > + if (ret) {
> > > > + printf("FAIL EXEC_UFENCE_ADDR: 0x%016llx\n", sync[0].addr);
> > > > + printf("FAIL EXEC_UFENCE: EXPEXCTED=0x%016llx, ACTUAL=0x%016lx\n",
> > > > + USER_FENCE_VALUE, exec_ufence[0]);
>
> s/EXPEXCTED/EXPECTED/
>
> Also, we should probably use IGT's log functions such as igt_info() instead
> of printf().
>
Will fix.
> > > > +
> > > > + addr = to_user_pointer(ptr);
> > > > + for (i = 0; i < n_writes; ++i, addr += stride) {
> > > > + struct batch_data *data = ptr + i * stride;
> > > > + uint64_t batch_offset = (char *)&data->batch - (char *)data;
> > > > + uint64_t batch_addr = addr + batch_offset;
> > > > + uint64_t sdi_offset = (char *)&data->data - (char *)data;
> > > > + uint64_t sdi_addr = addr + sdi_offset;
> > > > +
> > > > + printf("FAIL BATCH_ADDR: 0x%016lx\n", batch_addr);
> > > > + printf("FAIL SDI_ADDR: 0x%016lx\n", sdi_addr);
> > > > + printf("FAIL SDI_ADDR (in batch): 0x%016lx\n",
> > > > + (((u64)data->batch[2]) << 32) | data->batch[1]);
> > > > + printf("FAIL DARA: EXPEXCTED=0x%08x, ACTUAL=0x%08x\n",
> > > > + data->expected_data, data->data);
>
> s/DARA/DATA/ and s/EXPEXCTED/EXPECTED/ and same as above for printf().
>
Will fix.
> > > > + }
> > > > + igt_assert_eq(ret, 0);
> > > > + }
> > > > + munmap(exec_ufence, SZ_4K);
> > > > +}
> > > > +
> > > > +static int va_bits;
> > > > +
> > > > +#define bind_system_allocator(__sync, __num_sync) \
> > > > + __xe_vm_bind_assert(fd, vm, 0, \
> > > > + 0, 0, 0, 0x1ull << va_bits, \
> > > > + DRM_XE_VM_BIND_OP_MAP, \
> > > > + DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR, \
> > > > + (__sync), (__num_sync), 0, 0)
> > > > +
> > > > +#define unbind_system_allocator() \
> > > > + __xe_vm_bind(fd, vm, 0, 0, 0, 0, 0x1ull << va_bits, \
> > > > + DRM_XE_VM_BIND_OP_UNMAP, 0, \
> > > > + NULL, 0, 0, 0, 0)
> > >
> > > Is there a reason here to favor static variable + macros over helper function
> > > with parameters?
> > >
> >
> > va_bits is static as it looked up exactly once when the test loads.
> >
> > I could change these helpers to static functions rather than macros if
> > that is preferred.
>
> It forces variable names in the caller but that is just a minor issue.
>
> >
> > > > +
> > > > +#define odd(__i) (__i & 1)
> > > > +
> > > > +struct aligned_alloc_type {
> > > > + void *__ptr;
> > > > + void *ptr;
> > > > + size_t __size;
> > > > + size_t size;
> > > > +};
> > > > +
> > > > +static struct aligned_alloc_type __aligned_alloc(size_t alignment, size_t size)
> > > > +{
> > > > + struct aligned_alloc_type aligned_alloc_type;
> > > > +
> > > > + aligned_alloc_type.__ptr = mmap(NULL, alignment + size, PROT_NONE, MAP_PRIVATE |
> > > > + MAP_ANONYMOUS, -1, 0);
> > > > + igt_assert(aligned_alloc_type.__ptr != MAP_FAILED);
> > > > +
> > > > + aligned_alloc_type.ptr = (void *)ALIGN((uint64_t)aligned_alloc_type.__ptr, alignment);
> > > > + aligned_alloc_type.size = size;
> > > > + aligned_alloc_type.__size = size + alignment;
> > > > +
> > > > + return aligned_alloc_type;
> > > > +}
> > > > +
> > > > +static void __aligned_free(struct aligned_alloc_type *aligned_alloc_type)
> > > > +{
> > > > + munmap(aligned_alloc_type->__ptr, aligned_alloc_type->__size);
> > > > +}
> > > > +
> > > > +static void __aligned_partial_free(struct aligned_alloc_type *aligned_alloc_type)
> > > > +{
> > > > + size_t begin_size = (size_t)(aligned_alloc_type->ptr - aligned_alloc_type->__ptr);
> > > > +
> > > > + if (begin_size)
> > > > + munmap(aligned_alloc_type->__ptr, begin_size);
> > > > + if (aligned_alloc_type->__size - aligned_alloc_type->size - begin_size)
> > > > + munmap(aligned_alloc_type->ptr + aligned_alloc_type->size,
> > > > + aligned_alloc_type->__size - aligned_alloc_type->size - begin_size);
> > > > +}
> > > > +
> > > > +/**
> > > > + * SUBTEST: unaligned-alloc
> > > > + * Description: allocate unaligned sizes of memory
> > > > + * Test category: functionality test
> > > > + *
> > > > + * SUBTEST: fault-benchmark
> > > > + * Description: Benchmark how long GPU / CPU take
> > > > + * Test category: performance test
> > > > + *
> > > > + * SUBTEST: fault-threads-benchmark
> > > > + * Description: Benchmark how long GPU / CPU take, reading results with multiple threads
> > > > + * Test category: performance and functionality test
> > > > + *
> > > > + * SUBTEST: fault-threads-same-page-benchmark
> > > > + * Description: Benchmark how long GPU / CPU take, reading results with multiple threads, hammer same page
> > > > + * Test category: performance and functionality test
> > > > + *
> > > > + * SUBTEST: fault-process-benchmark
> > > > + * Description: Benchmark how long GPU / CPU take, reading results with multiple process
> > > > + * Test category: performance and functionality test
> > > > + *
> > > > + * SUBTEST: fault-process-same-page-benchmark
> > > > + * Description: Benchmark how long GPU / CPU take, reading results with multiple process, hammer same page
> > > > + * Test category: performance and functionality test
> > > > + *
> > > > + * SUBTEST: evict-malloc
> > > > + * Description: trigger eviction of VRAM allocated via malloc
> > > > + * Test category: functionality test
> > > > + *
> > > > + * SUBTEST: evict-malloc-mix-bo
> > > > + * Description: trigger eviction of VRAM allocated via malloc and BO create
> > > > + * Test category: functionality test
> > > > + *
> > > > + * SUBTEST: processes-evict-malloc
> > > > + * Description: multi-process trigger eviction of VRAM allocated via malloc
> > > > + * Test category: stress test
> > > > + *
> > > > + * SUBTEST: processes-evict-malloc-mix-bo
> > > > + * Description: multi-process trigger eviction of VRAM allocated via malloc and BO create
> > > > + * Test category: stress test
> > > > + */
> > > > +
> > > > +static void
> > > > +many_allocs(int fd, struct drm_xe_engine_class_instance *eci,
> > > > + uint64_t total_alloc, uint64_t alloc_size, uint64_t stride,
> > > > + pthread_barrier_t *barrier, unsigned int flags)
> > > > +{
> > > > + uint32_t vm, exec_queue;
> > > > + int num_allocs = flags & BENCHMARK ? 1 :
> > > > + (9 * (total_alloc / alloc_size)) / 8;
> > > > + struct aligned_alloc_type *allocs;
> > > > + uint32_t *bos = NULL;
> > > > + struct timespec tv = {};
> > > > + uint64_t submit, read, elapsed;
> > > > + int i;
> > > > +
> > > > + vm = xe_vm_create(fd, DRM_XE_VM_CREATE_FLAG_LR_MODE |
> > > > + DRM_XE_VM_CREATE_FLAG_FAULT_MODE, 0);
> > > > + exec_queue = xe_exec_queue_create(fd, vm, eci, 0);
> > > > +
> > > > + bind_system_allocator(NULL, 0);
> > > > +
> > > > + allocs = malloc(sizeof(*allocs) * num_allocs);
> > > > + igt_assert(allocs);
> > > > + memset(allocs, 0, sizeof(*allocs) * num_allocs);
> > > > +
> > > > + if (flags & MIX_BO_ALLOC) {
> > > > + bos = malloc(sizeof(*bos) * num_allocs);
> > > > + igt_assert(bos);
> > > > + memset(bos, 0, sizeof(*bos) * num_allocs);
> > > > + }
> > > > +
> > > > + for (i = 0; i < num_allocs; ++i) {
> > > > + struct aligned_alloc_type alloc;
> > > > +
> > > > + if (flags & MIX_BO_ALLOC && odd(i)) {
> > > > + uint32_t bo_flags =
> > > > + DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM;
> > > > +
> > > > + alloc = __aligned_alloc(SZ_2M, alloc_size);
> > > > + igt_assert(alloc.ptr);
> > > > +
> > > > + bos[i] = xe_bo_create(fd, vm, alloc_size,
> > > > + vram_if_possible(fd, eci->gt_id),
> > > > + bo_flags);
> > > > + alloc.ptr = xe_bo_map_fixed(fd, bos[i], alloc_size,
> > > > + to_user_pointer(alloc.ptr));
> > > > + xe_vm_bind_async(fd, vm, 0, bos[i], 0,
> > > > + to_user_pointer(alloc.ptr),
> > > > + alloc_size, 0, 0);
> > > > + } else {
> > > > + alloc.ptr = aligned_alloc(SZ_2M, alloc_size);
> > > > + igt_assert(alloc.ptr);
> > > > + }
> > > > + allocs[i] = alloc;
> > > > +
> > > > + touch_all_pages(fd, exec_queue, allocs[i].ptr, alloc_size, stride,
> > > > + &tv, &submit);
> > > > + }
> > > > +
> > > > + if (barrier)
> > > > + pthread_barrier_wait(barrier);
> > > > +
> > > > + for (i = 0; i < num_allocs; ++i) {
> > > > + if (flags & BENCHMARK)
> > > > + read = igt_nsec_elapsed(&tv);
> > > > +#define NUM_CHECK_THREADS 8
> > > > + if (flags & CPU_FAULT_PROCESS)
> > > > + check_all_pages_process(allocs[i].ptr, alloc_size, stride,
> > > > + NUM_CHECK_THREADS, flags);
> > > > + else if (flags & CPU_FAULT_THREADS)
> > > > + check_all_pages_threads(allocs[i].ptr, alloc_size, stride,
> > > > + NUM_CHECK_THREADS, flags);
> > > > + else
> > > > + check_all_pages(allocs[i].ptr, alloc_size, stride, NULL);
> > > > + if (flags & BENCHMARK) {
> > > > + elapsed = igt_nsec_elapsed(&tv);
> > > > + printf("Execution took %.3fms (submit %.1fus, read %.1fus, total %.1fus, read_total %.1fus)\n",
>
> Also here, prefer IGT's log functions.
>
Will fix.
> > > > + 1e-6 * elapsed, 1e-3 * submit, 1e-3 * read,
> > > > + 1e-3 * (elapsed - submit),
> > > > + 1e-3 * (elapsed - read));
> > > > + }
> > > > + if (bos && bos[i]) {
> > > > + __aligned_free(allocs + i);
> > > > + gem_close(fd, bos[i]);
> > > > + } else {
> > > > + free(allocs[i].ptr);
> > > > + }
> > > > + }
> > > > + if (bos)
> > > > + free(bos);
> > > > + free(allocs);
> > > > + xe_exec_queue_destroy(fd, exec_queue);
> > > > + xe_vm_destroy(fd, vm);
> > > > +}
> > > > +
> > > > +static void process_evict(struct drm_xe_engine_class_instance *hwe,
> > > > + uint64_t total_alloc, uint64_t alloc_size,
> > > > + uint64_t stride, unsigned int flags)
> > > > +{
> > > > + struct process_data *pdata;
> > > > + int map_fd;
> > > > + int fd;
> > > > +
> > > > + map_fd = open(SYNC_FILE, O_RDWR, 0x666);
> > > > + pdata = mmap(NULL, sizeof(*pdata), PROT_READ |
> > > > + PROT_WRITE, MAP_SHARED, map_fd, 0);
> > > > + wait_pdata(pdata);
> > > > +
> > > > + fd = drm_open_driver(DRIVER_XE);
> > > > + many_allocs(fd, hwe, total_alloc, alloc_size, stride, &pdata->barrier,
> > > > + flags);
> > > > + drm_close_driver(fd);
> > > > +
> > > > + close(map_fd);
> > > > + munmap(pdata, sizeof(*pdata));
> > > > +}
> > > > +
> > > > +static void
> > > > +processes_evict(int fd, uint64_t alloc_size, uint64_t stride,
> > > > + unsigned int flags)
> > > > +{
> > > > + struct drm_xe_engine_class_instance *hwe;
> > > > + struct process_data *pdata;
> > > > + int n_engine_gt[2] = { 0, 0 }, n_engine = 0;
> > > > + int map_fd;
> > > > +
> > > > + map_fd = open(SYNC_FILE, O_RDWR | O_CREAT, 0x666);
> > > > + posix_fallocate(map_fd, 0, sizeof(*pdata));
> > > > + pdata = mmap(NULL, sizeof(*pdata), PROT_READ |
> > > > + PROT_WRITE, MAP_SHARED, map_fd, 0);
> > > > +
> > > > + xe_for_each_engine(fd, hwe) {
> > > > + igt_assert(hwe->gt_id < 2);
> > > > + n_engine_gt[hwe->gt_id]++;
> > > > + n_engine++;
> > > > + }
> > > > +
> > > > + init_pdata(pdata, n_engine);
> > > > +
> > > > + xe_for_each_engine(fd, hwe) {
> > > > + igt_fork(child, 1)
> > > > + process_evict(hwe,
> > > > + xe_visible_vram_size(fd, hwe->gt_id) /
> > > > + n_engine_gt[hwe->gt_id], alloc_size,
> > > > + stride, flags);
> > > > + }
> > > > +
> > > > + signal_pdata(pdata);
> > > > + igt_waitchildren();
> > > > +
> > > > + close(map_fd);
> > > > + munmap(pdata, sizeof(*pdata));
> > > > +}
> > > > +
> > > > +#define CPU_FAULT (0x1 << 0)
> > > > +#define REMAP (0x1 << 1)
> > > > +#define MIDDLE (0x1 << 2)
> > > > +
> > > > +/**
> > > > + * SUBTEST: partial-munmap-cpu-fault
> > > > + * Description: munmap partially with cpu access in between
> > > > + * Test category: functionality test
> > > > + *
> > > > + * SUBTEST: partial-munmap-no-cpu-fault
> > > > + * Description: munmap partially with no cpu access in between
> > > > + * Test category: functionality test
> > > > + *
> > > > + * SUBTEST: partial-remap-cpu-fault
> > > > + * Description: remap partially with cpu access in between
> > > > + * Test category: functionality test
> > > > + *
> > > > + * SUBTEST: partial-remap-no-cpu-fault
> > > > + * Description: remap partially with no cpu access in between
> > > > + * Test category: functionality test
> > > > + *
> > > > + * SUBTEST: partial-middle-munmap-cpu-fault
> > > > + * Description: munmap middle with cpu access in between
> > > > + * Test category: functionality test
> > > > + *
> > > > + * SUBTEST: partial-middle-munmap-no-cpu-fault
> > > > + * Description: munmap middle with no cpu access in between
> > > > + * Test category: functionality test
> > > > + *
> > > > + * SUBTEST: partial-middle-remap-cpu-fault
> > > > + * Description: remap middle with cpu access in between
> > > > + * Test category: functionality test
> > > > + *
> > > > + * SUBTEST: partial-middle-remap-no-cpu-fault
> > > > + * Description: remap middle with no cpu access in between
> > > > + * Test category: functionality test
> > > > + */
> > > > +
> > > > +static void
> > > > +partial(int fd, struct drm_xe_engine_class_instance *eci, unsigned int flags)
> > > > +{
> > > > + struct drm_xe_sync sync[1] = {
> > > > + { .type = DRM_XE_SYNC_TYPE_USER_FENCE, .flags = DRM_XE_SYNC_FLAG_SIGNAL,
> > > > + .timeline_value = USER_FENCE_VALUE },
> > > > + };
> > > > + struct drm_xe_exec exec = {
> > > > + .num_batch_buffer = 1,
> > > > + .num_syncs = 1,
> > > > + .syncs = to_user_pointer(sync),
> > > > + };
> > > > + struct {
> > > > + uint32_t batch[16];
> > > > + uint64_t pad;
> > > > + uint64_t vm_sync;
> > > > + uint64_t exec_sync;
> > > > + uint32_t data;
> > > > + uint32_t expected_data;
> > > > + } *data;
> > > > + size_t bo_size = SZ_2M, unmap_offset = 0;
> > > > + uint32_t vm, exec_queue;
> > > > + u64 *exec_ufence = NULL;
> > > > + int i;
> > > > + void *old, *new = NULL;
> > > > + struct aligned_alloc_type alloc;
> > > > +
> > > > + if (flags & MIDDLE)
> > > > + unmap_offset = bo_size / 4;
> > > > +
> > > > + vm = xe_vm_create(fd, DRM_XE_VM_CREATE_FLAG_LR_MODE |
> > > > + DRM_XE_VM_CREATE_FLAG_FAULT_MODE, 0);
> > > > +
> > > > + alloc = __aligned_alloc(bo_size, bo_size);
> > > > + igt_assert(alloc.ptr);
> > > > +
> > > > + data = mmap(alloc.ptr, bo_size, PROT_READ | PROT_WRITE,
> > > > + MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
> > > > + igt_assert(data != MAP_FAILED);
> > > > + memset(data, 0, bo_size);
> > > > + old = data;
> > > > +
> > > > + exec_queue = xe_exec_queue_create(fd, vm, eci, 0);
> > > > +
> > > > + sync[0].addr = to_user_pointer(&data[0].vm_sync);
> > > > + bind_system_allocator(sync, 1);
> > > > + xe_wait_ufence(fd, &data[0].vm_sync, USER_FENCE_VALUE, 0, FIVE_SEC);
> > > > + data[0].vm_sync = 0;
> > > > +
> > > > + exec_ufence = mmap(NULL, SZ_4K, PROT_READ |
> > > > + PROT_WRITE, MAP_SHARED |
> > > > + MAP_ANONYMOUS, -1, 0);
> > > > + igt_assert(exec_ufence != MAP_FAILED);
> > > > + memset(exec_ufence, 0, SZ_4K);
> > > > +
> > > > + for (i = 0; i < 2; i++) {
> > > > + uint64_t addr = to_user_pointer(data);
> > > > + uint64_t sdi_offset = (char *)&data[i].data - (char *)data;
> > > > + uint64_t sdi_addr = addr + sdi_offset;
> > > > + int b = 0;
> > > > +
> > > > + write_dword(data[i].batch, sdi_addr, WRITE_VALUE(&data[i], i), &b);
> > > > + igt_assert(b <= ARRAY_SIZE(data[i].batch));
> > > > +
> > > > + if (!i)
> > > > + data = old + unmap_offset + bo_size / 2;
> > > > + }
> > > > +
> > > > + data = old;
> > > > + exec.exec_queue_id = exec_queue;
> > > > +
> > > > + for (i = 0; i < 2; i++) {
> > > > + uint64_t addr = to_user_pointer(data);
> > > > + uint64_t batch_offset = (char *)&data[i].batch - (char *)data;
> > > > + uint64_t batch_addr = addr + batch_offset;
> > > > +
> > > > + sync[0].addr = new ? to_user_pointer(new) :
> > > > + to_user_pointer(exec_ufence);
> > > > + exec.address = batch_addr;
> > > > + xe_exec(fd, &exec);
> > > > +
> > > > + xe_wait_ufence(fd, new ?: exec_ufence, USER_FENCE_VALUE,
> > > > + exec_queue, FIVE_SEC);
> > > > + if (i || (flags & CPU_FAULT))
> > > > + igt_assert_eq(data[i].data, READ_VALUE(&data[i], i));
> > > > + exec_ufence[0] = 0;
> > > > +
> > > > + if (!i) {
> > > > + data = old + unmap_offset + bo_size / 2;
> > > > + munmap(old + unmap_offset, bo_size / 2);
> > > > + if (flags & REMAP) {
> > > > + new = mmap(old + unmap_offset, bo_size / 2,
> > > > + PROT_READ | PROT_WRITE,
> > > > + MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED |
> > > > + MAP_LOCKED, -1, 0);
> > > > + igt_assert(new != MAP_FAILED);
> > > > + }
> > > > + }
> > > > + }
> > > > +
> > > > + xe_exec_queue_destroy(fd, exec_queue);
> > > > + munmap(exec_ufence, SZ_4K);
> > > > + __aligned_free(&alloc);
> > > > + if (new)
> > > > + munmap(new, bo_size / 2);
> > > > + xe_vm_destroy(fd, vm);
> > > > +}
> > > > +
> > > > +#define MAX_N_EXEC_QUEUES 16
> > > > +
> > > > +#define MMAP (0x1 << 0)
> > > > +#define NEW (0x1 << 1)
> > > > +#define BO_UNMAP (0x1 << 2)
> > > > +#define FREE (0x1 << 3)
> > > > +#define BUSY (0x1 << 4)
> > > > +#define BO_MAP (0x1 << 5)
> > > > +#define RACE (0x1 << 6)
> > > > +#define SKIP_MEMSET (0x1 << 7)
> > > > +#define FAULT (0x1 << 8)
> > > > +#define FILE_BACKED (0x1 << 9)
> > > > +#define LOCK (0x1 << 10)
> > > > +#define MMAP_SHARED (0x1 << 11)
> > > > +#define HUGE_PAGE (0x1 << 12)
> > > > +#define SHARED_ALLOC (0x1 << 13)
> > > > +#define FORK_READ (0x1 << 14)
> > > > +#define FORK_READ_AFTER (0x1 << 15)
> > > > +#define MREMAP (0x1 << 16)
> > > > +#define DONTUNMAP (0x1 << 17)
> > > > +#define READ_ONLY_REMAP (0x1 << 18)
> > > > +#define SYNC_EXEC (0x1 << 19)
> > > > +#define EVERY_OTHER_CHECK (0x1 << 20)
> > > > +#define MULTI_FAULT (0x1 << 21)
> > > > +
> > > > +#define N_MULTI_FAULT 4
> > > > +
> > > > +/**
> > > > + * SUBTEST: once-%s
> > > > + * Description: Run %arg[1] system allocator test only once
> > > > + * Test category: functionality test
> > > > + *
> > > > + * SUBTEST: once-large-%s
> > > > + * Description: Run %arg[1] system allocator test only once with large allocation
> > > > + * Test category: functionality test
> > > > + *
> > > > + * SUBTEST: twice-%s
> > > > + * Description: Run %arg[1] system allocator test twice
> > > > + * Test category: functionality test
> > > > + *
> > > > + * SUBTEST: twice-large-%s
> > > > + * Description: Run %arg[1] system allocator test twice with large allocation
> > > > + * Test category: functionality test
> > > > + *
> > > > + * SUBTEST: many-%s
> > > > + * Description: Run %arg[1] system allocator test many times
> > > > + * Test category: stress test
> > > > + *
> > > > + * SUBTEST: many-stride-%s
> > > > + * Description: Run %arg[1] system allocator test many times with a stride on each exec
> > > > + * Test category: stress test
> > > > + *
> > > > + * SUBTEST: many-execqueues-%s
> > > > + * Description: Run %arg[1] system allocator test on many exec_queues
> > > > + * Test category: stress test
> > > > + *
> > > > + * SUBTEST: many-large-%s
> > > > + * Description: Run %arg[1] system allocator test many times with large allocations
> > > > + * Test category: stress test
> > > > + *
> > > > + * SUBTEST: many-large-execqueues-%s
> > > > + * Description: Run %arg[1] system allocator test on many exec_queues with large allocations
> > > > + *
> > > > + * SUBTEST: threads-many-%s
> > > > + * Description: Run %arg[1] system allocator threaded test many times
> > > > + * Test category: stress test
> > > > + *
> > > > + * SUBTEST: threads-many-stride-%s
> > > > + * Description: Run %arg[1] system allocator threaded test many times with a stride on each exec
> > > > + * Test category: stress test
> > > > + *
> > > > + * SUBTEST: threads-many-execqueues-%s
> > > > + * Description: Run %arg[1] system allocator threaded test on many exec_queues
> > > > + * Test category: stress test
> > > > + *
> > > > + * SUBTEST: threads-many-large-%s
> > > > + * Description: Run %arg[1] system allocator threaded test many times with large allocations
> > > > + * Test category: stress test
> > > > + *
> > > > + * SUBTEST: threads-many-large-execqueues-%s
> > > > + * Description: Run %arg[1] system allocator threaded test on many exec_queues with large allocations
> > > > + *
> > > > + * SUBTEST: threads-shared-vm-many-%s
> > > > + * Description: Run %arg[1] system allocator threaded, shared vm test many times
> > > > + * Test category: stress test
> > > > + *
> > > > + * SUBTEST: threads-shared-vm-many-stride-%s
> > > > + * Description: Run %arg[1] system allocator threaded, shared vm test many times with a stride on each exec
> > > > + * Test category: stress test
> > > > + *
> > > > + * SUBTEST: threads-shared-vm-many-execqueues-%s
> > > > + * Description: Run %arg[1] system allocator threaded, shared vm test on many exec_queues
> > > > + * Test category: stress test
> > > > + *
> > > > + * SUBTEST: threads-shared-vm-many-large-%s
> > > > + * Description: Run %arg[1] system allocator threaded, shared vm test many times with large allocations
> > > > + * Test category: stress test
> > > > + *
> > > > + * SUBTEST: threads-shared-vm-many-large-execqueues-%s
> > > > + * Description: Run %arg[1] system allocator threaded, shared vm test on many exec_queues with large allocations
> > > > + * Test category: stress test
> > > > + *
> > > > + * SUBTEST: process-many-%s
> > > > + * Description: Run %arg[1] system allocator multi-process test many times
> > > > + * Test category: stress test
> > > > + *
> > > > + * SUBTEST: process-many-stride-%s
> > > > + * Description: Run %arg[1] system allocator multi-process test many times with a stride on each exec
> > > > + * Test category: stress test
> > > > + *
> > > > + * SUBTEST: process-many-execqueues-%s
> > > > + * Description: Run %arg[1] system allocator multi-process test on many exec_queues
> > > > + * Test category: stress test
> > > > + *
> > > > + * SUBTEST: process-many-large-%s
> > > > + * Description: Run %arg[1] system allocator multi-process test many times with large allocations
> > > > + * Test category: stress test
> > > > + *
> > > > + * SUBTEST: process-many-large-execqueues-%s
> > > > + * Description: Run %arg[1] system allocator multi-process test on many exec_queues with large allocations
> > > > + *
> > > > + * SUBTEST: fault
> > > > + * Description: use a bad system allocator address resulting in a fault
> > > > + * Test category: bad input
> > > > + *
> > > > + * arg[1]:
> > > > + *
> > > > + * @malloc: malloc single buffer for all execs, issue a command which will trigger multiple faults
> > > > + * @malloc-multi-fault: malloc single buffer for all execs
> > > > + * @malloc-fork-read: malloc single buffer for all execs, fork a process to read test output
> > > > + * @malloc-fork-read-after: malloc single buffer for all execs, fork a process to read test output, check again after fork returns in parent
> > > > + * @malloc-mlock: malloc and mlock single buffer for all execs
> > > > + * @malloc-race: malloc single buffer for all execs with race between cpu and gpu access
> > > > + * @malloc-bo-unmap: malloc single buffer for all execs, bind and unbind a BO to same address before execs
> > > > + * @malloc-busy: malloc single buffer for all execs, try to unbind while buffer valid
> > > > + * @mmap: mmap single buffer for all execs
> > > > + * @mmap-remap: mmap and mremap a buffer for all execs
> > > > + * @mmap-remap-dontunmap: mmap and mremap a buffer with dontunmap flag for all execs
> > > > + * @mmap-remap-ro: mmap and mremap a read-only buffer for all execs
> > > > + * @mmap-remap-ro-dontunmap: mmap and mremap a read-only buffer with dontunmap flag for all execs
> > > > + * @mmap-remap-eocheck: mmap and mremap a buffer for all execs, check data every other loop iteration
> > > > + * @mmap-remap-dontunmap-eocheck: mmap and mremap a buffer with dontunmap flag for all execs, check data every other loop iteration
> > > > + * @mmap-remap-ro-eocheck: mmap and mremap a read-only buffer for all execs, check data every other loop iteration
> > > > + * @mmap-remap-ro-dontunmap-eocheck: mmap and mremap a read-only buffer with dontunmap flag for all execs, check data every other loop iteration
> > > > + * @mmap-huge: mmap huge page single buffer for all execs
> > > > + * @mmap-shared: mmap shared single buffer for all execs
> > > > + * @mmap-shared-remap: mmap shared and mremap a buffer for all execs
> > > > + * @mmap-shared-remap-dontunmap: mmap shared and mremap a buffer with dontunmap flag for all execs
> > > > + * @mmap-shared-remap-eocheck: mmap shared and mremap a buffer for all execs, check data every other loop iteration
> > > > + * @mmap-shared-remap-dontunmap-eocheck: mmap shared and mremap a buffer with dontunmap flag for all execs, check data every other loop iteration
> > > > + * @mmap-mlock: mmap and mlock single buffer for all execs
> > > > + * @mmap-file: mmap single buffer, with file backing, for all execs
> > > > + * @mmap-file-mlock: mmap and mlock single buffer, with file backing, for all execs
> > > > + * @mmap-race: mmap single buffer for all execs with race between cpu and gpu access
> > > > + * @free: malloc and free buffer for each exec
> > > > + * @free-race: malloc and free buffer for each exec with race between cpu and gpu access
> > > > + * @new: malloc a new buffer for each exec
> > > > + * @new-race: malloc a new buffer for each exec with race between cpu and gpu access
> > > > + * @new-bo-map: malloc a new buffer or map BO for each exec
> > > > + * @new-busy: malloc a new buffer for each exec, try to unbind while buffers valid
> > > > + * @mmap-free: mmap and free buffer for each exec
> > > > + * @mmap-free-huge: mmap huge page and free buffer for each exec
> > > > + * @mmap-free-race: mmap and free buffer for each exec with race between cpu and gpu access
> > > > + * @mmap-new: mmap a new buffer for each exec
> > > > + * @mmap-new-huge: mmap huge page a new buffer for each exec
> > > > + * @mmap-new-race: mmap a new buffer for each exec with race between cpu and gpu access
> > > > + * @malloc-nomemset: malloc single buffer for all execs, skip memset of buffers
> > > > + * @malloc-mlock-nomemset: malloc and mlock single buffer for all execs, skip memset of buffers
> > > > + * @malloc-race-nomemset: malloc single buffer for all execs with race between cpu and gpu access, skip memset of buffers
> > > > + * @malloc-bo-unmap-nomemset: malloc single buffer for all execs, bind and unbind a BO to same address before execs, skip memset of buffers
> > > > + * @malloc-busy-nomemset: malloc single buffer for all execs, try to unbind while buffer valid, skip memset of buffers
> > > > + * @mmap-nomemset: mmap single buffer for all execs, skip memset of buffers
> > > > + * @mmap-huge-nomemset: mmap huge page single buffer for all execs, skip memset of buffers
> > > > + * @mmap-shared-nomemset: mmap shared single buffer for all execs, skip memset of buffers
> > > > + * @mmap-mlock-nomemset: mmap and mlock single buffer for all execs, skip memset of buffers
> > > > + * @mmap-file-nomemset: mmap single buffer, with file backing, for all execs, skip memset of buffers
> > > > + * @mmap-file-mlock-nomemset: mmap and mlock single buffer, with file backing, for all execs, skip memset of buffers
> > > > + * @mmap-race-nomemset: mmap single buffer for all execs with race between cpu and gpu access, skip memset of buffers
> > > > + * @free-nomemset: malloc and free buffer for each exec, skip memset of buffers
> > > > + * @free-race-nomemset: malloc and free buffer for each exec with race between cpu and gpu access, skip memset of buffers
> > > > + * @new-nomemset: malloc a new buffer for each exec, skip memset of buffers
> > > > + * @new-race-nomemset: malloc a new buffer for each exec with race between cpu and gpu access, skip memset of buffers
> > > > + * @new-bo-map-nomemset: malloc a new buffer or map BO for each exec, skip memset of buffers
> > > > + * @new-busy-nomemset: malloc a new buffer for each exec, try to unbind while buffers valid, skip memset of buffers
> > > > + * @mmap-free-nomemset: mmap and free buffer for each exec, skip memset of buffers
> > > > + * @mmap-free-huge-nomemset: mmap huge page and free buffer for each exec, skip memset of buffers
> > > > + * @mmap-free-race-nomemset: mmap and free buffer for each exec with race between cpu and gpu access, skip memset of buffers
> > > > + * @mmap-new-nomemset: mmap a new buffer for each exec, skip memset of buffers
> > > > + * @mmap-new-huge-nomemset: mmap huge page new buffer for each exec, skip memset of buffers
> > > > + * @mmap-new-race-nomemset: mmap a new buffer for each exec with race between cpu and gpu access, skip memset of buffers
> > > > + *
> > > > + * SUBTEST: threads-shared-vm-shared-alloc-many-stride-malloc
> > > > + * Description: Create multiple threads with a shared VM triggering faults on different hardware engines to same addresses
> > > > + * Test category: stress test
> > > > + *
> > > > + * SUBTEST: threads-shared-vm-shared-alloc-many-stride-malloc-race
> > > > + * Description: Create multiple threads with a shared VM triggering faults on different hardware engines to same addresses, racing between CPU and GPU access
> > > > + * Test category: stress test
> > > > + *
> > > > + * SUBTEST: threads-shared-alloc-many-stride-malloc
> > > > + * Description: Create multiple threads with a faults on different hardware engines to same addresses
> > > > + * Test category: stress test
> > > > + *
> > > > + * SUBTEST: threads-shared-alloc-many-stride-malloc-sync
> > > > + * Description: Create multiple threads with a faults on different hardware engines to same addresses, syncing on each exec
> > > > + * Test category: stress test
> > > > + *
> > > > + * SUBTEST: threads-shared-alloc-many-stride-malloc-race
> > > > + * Description: Create multiple threads with a faults on different hardware engines to same addresses, racing between CPU and GPU access
> > > > + * Test category: stress test
> > > > + */
> > > > +
> > > > +struct test_exec_data {
> > > > + uint32_t batch[32];
> > > > + uint64_t pad;
> > > > + uint64_t vm_sync;
> > > > + uint64_t exec_sync;
> > > > + uint32_t data;
> > > > + uint32_t expected_data;
> > > > +};
> > > > +
> > > > +static void
> > > > +test_exec(int fd, struct drm_xe_engine_class_instance *eci,
> > > > + int n_exec_queues, int n_execs, size_t bo_size,
> > > > + size_t stride, uint32_t vm, void *alloc, pthread_barrier_t *barrier,
> > > > + unsigned int flags)
> > > > +{
> > > > + uint64_t addr;
> > > > + struct drm_xe_sync sync[1] = {
> > > > + { .type = DRM_XE_SYNC_TYPE_USER_FENCE, .flags = DRM_XE_SYNC_FLAG_SIGNAL,
> > > > + .timeline_value = USER_FENCE_VALUE },
> > > > + };
> > > > + struct drm_xe_exec exec = {
> > > > + .num_batch_buffer = 1,
> > > > + .num_syncs = 1,
> > > > + .syncs = to_user_pointer(sync),
> > > > + };
> > > > + uint32_t exec_queues[MAX_N_EXEC_QUEUES];
> > > > + struct test_exec_data *data, *next_data = NULL;
> > > > + uint32_t bo_flags;
> > > > + uint32_t bo = 0;
> > > > + void **pending_free;
> > > > + u64 *exec_ufence = NULL;
> > > > + int i, j, b, file_fd = -1, prev_idx;
> > > > + bool free_vm = false;
> > > > + size_t aligned_size = bo_size ?: xe_get_default_alignment(fd);
> > > > + size_t orig_size = bo_size;
> > > > + struct aligned_alloc_type aligned_alloc_type;
> > > > +
> > > > + if (flags & MULTI_FAULT) {
> > > > + if (!bo_size)
> > > > + return;
> > > > +
> > > > + bo_size *= N_MULTI_FAULT;
> > > > + }
> > > > +
> > > > + if (flags & SHARED_ALLOC)
> > > > + return;
> > > > +
> > > > + if (flags & EVERY_OTHER_CHECK && odd(n_execs))
> > > > + return;
> > > > +
> > > > + if (flags & EVERY_OTHER_CHECK)
> > > > + igt_assert(flags & MREMAP);
> > > > +
> > > > + igt_assert(n_exec_queues <= MAX_N_EXEC_QUEUES);
> > > > +
> > > > + if (flags & NEW && !(flags & FREE)) {
> > > > + pending_free = malloc(sizeof(*pending_free) * n_execs);
> > > > + igt_assert(pending_free);
> > > > + memset(pending_free, 0, sizeof(*pending_free) * n_execs);
> > > > + }
> > > > +
> > > > + if (!vm) {
> > > > + vm = xe_vm_create(fd, DRM_XE_VM_CREATE_FLAG_LR_MODE |
> > > > + DRM_XE_VM_CREATE_FLAG_FAULT_MODE, 0);
> > > > + free_vm = true;
> > > > + }
> > > > + if (!bo_size) {
> > > > + if (!stride) {
> > > > + bo_size = sizeof(*data) * n_execs;
> > > > + bo_size = xe_bb_size(fd, bo_size);
> > > > + } else {
> > > > + bo_size = stride * n_execs * sizeof(*data);
> > > > + bo_size = xe_bb_size(fd, bo_size);
> > > > + }
> > > > + }
> > > > + if (flags & HUGE_PAGE) {
> > > > + aligned_size = ALIGN(aligned_size, SZ_2M);
> > > > + bo_size = ALIGN(bo_size, SZ_2M);
> > > > + }
> > > > +
> > > > + if (alloc) {
> > > > + data = alloc;
> > > > + } else {
> > > > + if (flags & MMAP) {
> > > > + int mmap_flags = MAP_FIXED;
> > > > +
> > > > + aligned_alloc_type = __aligned_alloc(aligned_size, bo_size);
> > > > + data = aligned_alloc_type.ptr;
> > > > + igt_assert(data);
> > > > + __aligned_partial_free(&aligned_alloc_type);
> > > > +
> > > > + if (flags & MMAP_SHARED)
> > > > + mmap_flags |= MAP_SHARED;
> > > > + else
> > > > + mmap_flags |= MAP_PRIVATE;
> > > > +
> > > > + if (flags & HUGE_PAGE)
> > > > + mmap_flags |= MAP_HUGETLB | MAP_HUGE_2MB;
> > > > +
> > > > + if (flags & FILE_BACKED) {
> > > > + char name[1024];
> > > > +
> > > > + igt_assert(!(flags & NEW));
> > > > +
> > > > + sprintf(name, "/tmp/xe_exec_system_allocator_dat%d\n",
> > > > + getpid());
> > >
> > > Might be another candidate to use mkstemp() or similar.
> > >
> >
> > Will look into that.
> >
> > > > + file_fd = open(name, O_RDWR | O_CREAT, 0x666);
> > > > + posix_fallocate(file_fd, 0, bo_size);
> > > > + } else {
> > > > + mmap_flags |= MAP_ANONYMOUS;
> > > > + }
> > > > +
> > > > + data = mmap(data, bo_size, PROT_READ |
> > > > + PROT_WRITE, mmap_flags, file_fd, 0);
> > > > + igt_assert(data != MAP_FAILED);
> > > > + } else {
> > > > + data = aligned_alloc(aligned_size, bo_size);
> > > > + igt_assert(data);
> > > > + }
> > > > + if (!(flags & SKIP_MEMSET))
> > > > + memset(data, 0, bo_size);
> > > > + if (flags & LOCK) {
> > > > + igt_assert(!(flags & NEW));
> > > > + mlock(data, bo_size);
> > > > + }
> > > > + }
> > > > +
> > > > + for (i = 0; i < n_exec_queues; i++)
> > > > + exec_queues[i] = xe_exec_queue_create(fd, vm, eci, 0);
> > > > +
> > > > + sync[0].addr = to_user_pointer(&data[0].vm_sync);
> > > > + if (free_vm) {
> > > > + bind_system_allocator(sync, 1);
> > > > + xe_wait_ufence(fd, &data[0].vm_sync, USER_FENCE_VALUE, 0, FIVE_SEC);
> > > > + }
> > > > + data[0].vm_sync = 0;
> > > > +
> > > > + addr = to_user_pointer(data);
> > > > +
> > > > + if (flags & BO_UNMAP) {
> > > > + bo_flags = DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM;
> > > > + bo = xe_bo_create(fd, vm, bo_size,
> > > > + vram_if_possible(fd, eci->gt_id), bo_flags);
> > > > + xe_vm_bind_async(fd, vm, 0, bo, 0, addr, bo_size, 0, 0);
> > > > +
> > > > + __xe_vm_bind_assert(fd, vm, 0,
> > > > + 0, 0, addr, bo_size,
> > > > + DRM_XE_VM_BIND_OP_MAP,
> > > > + DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR, sync,
> > > > + 1, 0, 0);
> > > > + xe_wait_ufence(fd, &data[0].vm_sync, USER_FENCE_VALUE, 0,
> > > > + FIVE_SEC);
> > > > + data[0].vm_sync = 0;
> > > > + gem_close(fd, bo);
> > > > + bo = 0;
> > > > + }
> > > > +
> > > > + if (!(flags & RACE)) {
> > > > + exec_ufence = mmap(NULL, SZ_4K, PROT_READ |
> > > > + PROT_WRITE, MAP_SHARED |
> > > > + MAP_ANONYMOUS, -1, 0);
> > > > + igt_assert(exec_ufence != MAP_FAILED);
> > > > + memset(exec_ufence, 0, SZ_4K);
> > > > + }
> > > > +
> > > > + for (i = 0; i < n_execs; i++) {
> > > > + int idx = !stride ? i : i * stride, next_idx = !stride
> > > > + ? (i + 1) : (i + 1) * stride;
> > > > + uint64_t batch_offset = (char *)&data[idx].batch - (char *)data;
> > > > + uint64_t batch_addr = addr + batch_offset;
> > > > + uint64_t sdi_offset = (char *)&data[idx].data - (char *)data;
> > > > + uint64_t sdi_addr = addr + sdi_offset;
> > > > + int e = i % n_exec_queues, err;
> > > > + bool fault_inject = (FAULT & flags) && i == n_execs / 2;
> > > > + bool fault_injected = (FAULT & flags) && i > n_execs;
> > > > +
> > > > + if (barrier)
> > > > + pthread_barrier_wait(barrier);
> > > > +
> > > > + if (flags & MULTI_FAULT) {
> > > > + b = 0;
> > > > + for (j = 0; j < N_MULTI_FAULT - 1; ++j)
> > > > + __write_dword(data[idx].batch,
> > > > + sdi_addr + j * orig_size,
> > > > + WRITE_VALUE(&data[idx], idx), &b);
> > > > + write_dword(data[idx].batch, sdi_addr + j * orig_size,
> > > > + WRITE_VALUE(&data[idx], idx), &b);
> > > > + igt_assert(b <= ARRAY_SIZE(data[idx].batch));
> > > > + } else if (!(flags & EVERY_OTHER_CHECK)) {
> > > > + b = 0;
> > > > + write_dword(data[idx].batch, sdi_addr,
> > > > + WRITE_VALUE(&data[idx], idx), &b);
> > > > + igt_assert(b <= ARRAY_SIZE(data[idx].batch));
> > > > + } else if (flags & EVERY_OTHER_CHECK && !odd(i)) {
> > > > + b = 0;
> > > > + write_dword(data[idx].batch, sdi_addr,
> > > > + WRITE_VALUE(&data[idx], idx), &b);
> > > > + igt_assert(b <= ARRAY_SIZE(data[idx].batch));
> > > > +
> > > > + aligned_alloc_type = __aligned_alloc(aligned_size, bo_size);
> > > > + next_data = aligned_alloc_type.ptr;
> > > > + igt_assert(next_data);
> > > > + __aligned_partial_free(&aligned_alloc_type);
> > > > +
> > > > + b = 0;
> > > > + write_dword(data[next_idx].batch,
> > > > + to_user_pointer(next_data) +
> > > > + (char *)&data[next_idx].data - (char *)data,
> > > > + WRITE_VALUE(&data[next_idx], next_idx), &b);
> > > > + igt_assert(b <= ARRAY_SIZE(data[next_idx].batch));
> > > > + }
> > > > +
> > > > + if (!exec_ufence)
> > > > + data[idx].exec_sync = 0;
> > > > +
> > > > + sync[0].addr = exec_ufence ? to_user_pointer(exec_ufence) :
> > > > + addr + (char *)&data[idx].exec_sync - (char *)data;
> > > > +
> > > > + exec.exec_queue_id = exec_queues[e];
> > > > + if (fault_inject)
> > > > + exec.address = batch_addr * 2;
> > > > + else
> > > > + exec.address = batch_addr;
> > > > +
> > > > + if (fault_injected) {
> > > > + err = __xe_exec(fd, &exec);
> > > > + igt_assert(err == -ENOENT);
> > > > + } else {
> > > > + xe_exec(fd, &exec);
> > > > + }
> > > > +
> > > > + if (barrier)
> > > > + pthread_barrier_wait(barrier);
> > > > +
> > > > + if (fault_inject || fault_injected) {
> > > > + int64_t timeout = QUARTER_SEC;
> > > > +
> > > > + err = __xe_wait_ufence(fd, exec_ufence ? exec_ufence :
> > > > + &data[idx].exec_sync,
> > > > + USER_FENCE_VALUE,
> > > > + exec_queues[e], &timeout);
> > > > + igt_assert(err == -ETIME || err == -EIO);
> > > > + } else {
> > > > + xe_wait_ufence(fd, exec_ufence ? exec_ufence :
> > > > + &data[idx].exec_sync, USER_FENCE_VALUE,
> > > > + exec_queues[e], FIVE_SEC);
> > > > + if (flags & LOCK && !i)
> > > > + munlock(data, bo_size);
> > > > +
> > > > + if (flags & MREMAP) {
> > > > + void *old = data;
> > > > + int remap_flags = MREMAP_MAYMOVE | MREMAP_FIXED;
> > > > +
> > > > + if (flags & DONTUNMAP)
> > > > + remap_flags |= MREMAP_DONTUNMAP;
> > > > +
> > > > + if (flags & READ_ONLY_REMAP)
> > > > + igt_assert(!mprotect(old, bo_size,
> > > > + PROT_READ));
> > > > +
> > > > + if (!next_data) {
> > > > + aligned_alloc_type = __aligned_alloc(aligned_size,
> > > > + bo_size);
> > > > + data = aligned_alloc_type.ptr;
> > > > + __aligned_partial_free(&aligned_alloc_type);
> > > > + } else {
> > > > + data = next_data;
> > > > + }
> > > > + next_data = NULL;
> > > > + igt_assert(data);
> > > > +
> > > > + data = mremap(old, bo_size, bo_size,
> > > > + remap_flags, data);
> > > > + igt_assert(data != MAP_FAILED);
> > > > +
> > > > + if (flags & READ_ONLY_REMAP)
> > > > + igt_assert(!mprotect(data, bo_size,
> > > > + PROT_READ |
> > > > + PROT_WRITE));
> > > > +
> > > > + addr = to_user_pointer(data);
> > > > + if (flags & DONTUNMAP)
> > > > + munmap(old, bo_size);
> > > > + }
> > > > +
> > > > + if (!(flags & EVERY_OTHER_CHECK) || odd(i)) {
> > > > + if (flags & FORK_READ) {
> > > > + igt_fork(child, 1)
> > > > + igt_assert_eq(data[idx].data,
> > > > + READ_VALUE(&data[idx], idx));
> > > > + if (!(flags & FORK_READ_AFTER))
> > > > + igt_assert_eq(data[idx].data,
> > > > + READ_VALUE(&data[idx], idx));
> > > > + igt_waitchildren();
> > > > + if (flags & FORK_READ_AFTER)
> > > > + igt_assert_eq(data[idx].data,
> > > > + READ_VALUE(&data[idx], idx));
> > > > + } else {
> > > > + igt_assert_eq(data[idx].data,
> > > > + READ_VALUE(&data[idx], idx));
> > > > +
> > > > + if (flags & MULTI_FAULT) {
> > > > + for (j = 1; j < N_MULTI_FAULT; ++j) {
> > > > + struct test_exec_data *__data =
> > > > + ((void *)data) + j * orig_size;
> > > > +
> > > > + igt_assert_eq(__data[idx].data,
> > > > + READ_VALUE(&data[idx], idx));
> > > > + }
> > > > + }
> > > > + }
> > > > + if (flags & EVERY_OTHER_CHECK)
> > > > + igt_assert_eq(data[prev_idx].data,
> > > > + READ_VALUE(&data[prev_idx], idx));
> > > > + }
> > > > + }
> > > > +
> > > > + if (exec_ufence)
> > > > + exec_ufence[0] = 0;
> > > > +
> > > > + if (bo) {
> > > > + __xe_vm_bind_assert(fd, vm, 0,
> > > > + 0, 0, addr, bo_size,
> > > > + DRM_XE_VM_BIND_OP_MAP,
> > > > + DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR,
> > > > + NULL, 0, 0, 0);
> > > > + munmap(data, bo_size);
> > > > + gem_close(fd, bo);
> > > > + }
> > > > +
> > > > + if (flags & NEW) {
> > > > + if (flags & MMAP) {
> > > > + if (flags & FREE)
> > > > + munmap(data, bo_size);
> > > > + else
> > > > + pending_free[i] = data;
> > > > + data = mmap(NULL, bo_size, PROT_READ |
> > > > + PROT_WRITE, MAP_SHARED |
> > > > + MAP_ANONYMOUS, -1, 0);
> > > > + igt_assert(data != MAP_FAILED);
> > > > + } else if (flags & BO_MAP && (i % 2)) {
>
> The odd() macro defined above can be used here.
>
Yep.
> > > > + if (!bo) {
> > > > + if (flags & FREE)
> > > > + free(data);
> > > > + else
> > > > + pending_free[i] = data;
> > > > + }
> > > > +
> > > > + aligned_alloc_type = __aligned_alloc(aligned_size, bo_size);
> > > > + data = aligned_alloc_type.ptr;
> > > > + igt_assert(data);
> > > > + __aligned_partial_free(&aligned_alloc_type);
> > > > +
> > > > + bo_flags = DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM;
> > > > + bo = xe_bo_create(fd, vm, bo_size,
> > > > + vram_if_possible(fd, eci->gt_id),
> > > > + bo_flags);
> > > > + data = xe_bo_map_fixed(fd, bo, bo_size,
> > > > + to_user_pointer(data));
> > > > +
> > > > + xe_vm_bind_async(fd, vm, 0, bo, 0,
> > > > + to_user_pointer(data),
> > > > + bo_size, 0, 0);
> > > > + } else {
> > > > + if (!bo) {
> > > > + if (flags & FREE)
> > > > + free(data);
> > > > + else
> > > > + pending_free[i] = data;
> > > > + }
> > > > + bo = 0;
> > > > + data = aligned_alloc(aligned_size, bo_size);
> > >
> > > Large memory leaks come from this ^ aligned_alloc(), see below.
> > >
> >
> > Yea this will leak, thanks for pointing this out. Thomas also raised
> > memory leaks as a concern.
> >
> > Inline below with the changes to fix this...
> >
> > > > + igt_assert(data);
> > > > + }
> > > > + addr = to_user_pointer(data);
> > > > + if (!(flags & SKIP_MEMSET))
> > > > + memset(data, 0, bo_size);
> > > > + }
> > > > +
> > > > + prev_idx = idx;
> > > > + }
> > > > +
> > > > + if (bo) {
> > > > + __xe_vm_bind_assert(fd, vm, 0,
> > > > + 0, 0, addr, bo_size,
> > > > + DRM_XE_VM_BIND_OP_MAP,
> > > > + DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR,
> > > > + NULL, 0, 0, 0);
> > > > + munmap(data, bo_size);
> >
> > data = NULL;
> >
> > > > + gem_close(fd, bo);
> > > > + }
> > > > +
> > > > + if (flags & BUSY)
> > > > + igt_assert_eq(unbind_system_allocator(), -EBUSY);
> > > > +
> > > > + for (i = 0; i < n_exec_queues; i++)
> > > > + xe_exec_queue_destroy(fd, exec_queues[i]);
> > > > +
> > > > + if (exec_ufence)
> > > > + munmap(exec_ufence, SZ_4K);
> > > > +
> > > > + if (flags & LOCK)
> > > > + munlock(data, bo_size);
> > > > +
> > > > + if (file_fd != -1)
> > > > + close(file_fd);
> > > > +
> > > > + if (flags & NEW && !(flags & FREE)) {
> > > > + for (i = 0; i < n_execs; i++) {
> > > > + if (!pending_free[i])
> > > > + continue;
> > > > +
> > > > + if (flags & MMAP)
> > > > + munmap(pending_free[i], bo_size);
> > > > + else
> > > > + free(pending_free[i]);
> > > > + }
> > > > + free(pending_free);
> > > > + } else {
> >
> > s/ else {/\nif (data) {
> >
> > > > + if (flags & MMAP)
> > > > + munmap(data, bo_size);
> > > > + else if (!alloc)
> > > > + free(data);
> > >
> > > Something seems wrong with the flags logic when skipping this ^ free() for the
> > > allocation pointed above.
> > >
> >
> > Yep, see above,
> >
> > Matt
> >
> > > Francois
> > >
> > > > + }
> > > > + if (free_vm)
> > > > + xe_vm_destroy(fd, vm);
> > > > +}
> > > > +
> > > > +struct thread_data {
> > > > + pthread_t thread;
> > > > + pthread_mutex_t *mutex;
> > > > + pthread_cond_t *cond;
> > > > + pthread_barrier_t *barrier;
> > > > + int fd;
> > > > + struct drm_xe_engine_class_instance *eci;
> > > > + int n_exec_queues;
> > > > + int n_execs;
> > > > + size_t bo_size;
> > > > + size_t stride;
> > > > + uint32_t vm;
> > > > + unsigned int flags;
> > > > + void *alloc;
> > > > + bool *go;
> > > > +};
> > > > +
> > > > +static void *thread(void *data)
> > > > +{
> > > > + struct thread_data *t = data;
> > > > +
> > > > + pthread_mutex_lock(t->mutex);
> > > > + while (!*t->go)
> > > > + pthread_cond_wait(t->cond, t->mutex);
> > > > + pthread_mutex_unlock(t->mutex);
> > > > +
> > > > + test_exec(t->fd, t->eci, t->n_exec_queues, t->n_execs,
> > > > + t->bo_size, t->stride, t->vm, t->alloc, t->barrier,
> > > > + t->flags);
> > > > +
> > > > + return NULL;
> > > > +}
> > > > +
> > > > +static void
> > > > +threads(int fd, int n_exec_queues, int n_execs, size_t bo_size,
> > > > + size_t stride, unsigned int flags, bool shared_vm)
> > > > +{
> > > > + struct drm_xe_engine_class_instance *hwe;
> > > > + struct thread_data *threads_data;
> > > > + int n_engines = 0, i = 0;
> > > > + pthread_mutex_t mutex;
> > > > + pthread_cond_t cond;
> > > > + pthread_barrier_t barrier;
> > > > + uint32_t vm = 0;
> > > > + bool go = false;
> > > > + void *alloc = NULL;
> > > > +
> > > > + if ((FILE_BACKED | FORK_READ) & flags)
> > > > + return;
> > > > +
> > > > + xe_for_each_engine(fd, hwe)
> > > > + ++n_engines;
> > > > +
> > > > + if (shared_vm) {
> > > > + vm = xe_vm_create(fd, DRM_XE_VM_CREATE_FLAG_LR_MODE |
> > > > + DRM_XE_VM_CREATE_FLAG_FAULT_MODE, 0);
> > > > + bind_system_allocator(NULL, 0);
> > > > + }
> > > > +
> > > > + if (flags & SHARED_ALLOC) {
> > > > + uint64_t alloc_size;
> > > > +
> > > > + igt_assert(stride);
> > > > +
> > > > + alloc_size = sizeof(struct test_exec_data) * stride *
> > > > + n_execs * n_engines;
> > > > + alloc_size = xe_bb_size(fd, alloc_size);
> > > > + alloc = aligned_alloc(SZ_2M, alloc_size);
> > > > + igt_assert(alloc);
> > > > +
> > > > + memset(alloc, 0, alloc_size);
> > > > + flags &= ~SHARED_ALLOC;
> > > > + }
> > > > +
> > > > + threads_data = calloc(n_engines, sizeof(*threads_data));
> > > > + igt_assert(threads_data);
> > > > +
> > > > + pthread_mutex_init(&mutex, 0);
> > > > + pthread_cond_init(&cond, 0);
> > > > + pthread_barrier_init(&barrier, 0, n_engines);
> > > > +
> > > > + xe_for_each_engine(fd, hwe) {
> > > > + threads_data[i].mutex = &mutex;
> > > > + threads_data[i].cond = &cond;
> > > > + threads_data[i].barrier = (flags & SYNC_EXEC) ? &barrier : NULL;
> > > > + threads_data[i].fd = fd;
> > > > + threads_data[i].eci = hwe;
> > > > + threads_data[i].n_exec_queues = n_exec_queues;
> > > > + threads_data[i].n_execs = n_execs;
> > > > + threads_data[i].bo_size = bo_size;
> > > > + threads_data[i].stride = stride;
> > > > + threads_data[i].vm = vm;
> > > > + threads_data[i].flags = flags;
> > > > + threads_data[i].alloc = alloc ? alloc + i *
> > > > + sizeof(struct test_exec_data) : NULL;
> > > > + threads_data[i].go = &go;
> > > > + pthread_create(&threads_data[i].thread, 0, thread,
> > > > + &threads_data[i]);
> > > > + ++i;
> > > > + }
> > > > +
> > > > + pthread_mutex_lock(&mutex);
> > > > + go = true;
> > > > + pthread_cond_broadcast(&cond);
> > > > + pthread_mutex_unlock(&mutex);
> > > > +
> > > > + for (i = 0; i < n_engines; ++i)
> > > > + pthread_join(threads_data[i].thread, NULL);
>
> Just a thought, the SVM page fault count is provided in GT stats:
>
> cat /sys/kernel/debug/dri/*/gt*/stats
> svm_pagefault_count: 134785
>
> Wondering if it would make sense to read the value before and after test
> execution then compare the delta with the expected number of page faults.
>
I think that is a bit out of scope here and number of faults isn't 100%
predictable because of intentional races, but I was thinking for
prefetch tests we could hook into page fault stats to ensure no page
faults occur if prefetches are done.
Matt
> Francois
>
> > > > +
> > > > + if (shared_vm) {
> > > > + int ret;
> > > > +
> > > > + if (flags & MMAP) {
> > > > + int tries = 300;
> > > > +
> > > > + while (tries && (ret = unbind_system_allocator()) == -EBUSY) {
> > > > + sleep(.01);
> > > > + --tries;
> > > > + }
> > > > + igt_assert_eq(ret, 0);
> > > > + }
> > > > + xe_vm_destroy(fd, vm);
> > > > + if (alloc)
> > > > + free(alloc);
> > > > + }
> > > > + free(threads_data);
> > > > +}
> > > > +
> > > > +static void process(struct drm_xe_engine_class_instance *hwe, int n_exec_queues,
> > > > + int n_execs, size_t bo_size, size_t stride,
> > > > + unsigned int flags)
> > > > +{
> > > > + struct process_data *pdata;
> > > > + int map_fd;
> > > > + int fd;
> > > > +
> > > > + map_fd = open(SYNC_FILE, O_RDWR, 0x666);
> > > > + pdata = mmap(NULL, sizeof(*pdata), PROT_READ |
> > > > + PROT_WRITE, MAP_SHARED, map_fd, 0);
> > > > + wait_pdata(pdata);
> > > > +
> > > > + fd = drm_open_driver(DRIVER_XE);
> > > > + test_exec(fd, hwe, n_exec_queues, n_execs,
> > > > + bo_size, stride, 0, NULL, NULL, flags);
> > > > + drm_close_driver(fd);
> > > > +
> > > > + close(map_fd);
> > > > + munmap(pdata, sizeof(*pdata));
> > > > +}
> > > > +
> > > > +static void
> > > > +processes(int fd, int n_exec_queues, int n_execs, size_t bo_size,
> > > > + size_t stride, unsigned int flags)
> > > > +{
> > > > + struct drm_xe_engine_class_instance *hwe;
> > > > + struct process_data *pdata;
> > > > + int map_fd;
> > > > +
> > > > + if (flags & FORK_READ)
> > > > + return;
> > > > +
> > > > + map_fd = open(SYNC_FILE, O_RDWR | O_CREAT, 0x666);
> > > > + posix_fallocate(map_fd, 0, sizeof(*pdata));
> > > > + pdata = mmap(NULL, sizeof(*pdata), PROT_READ |
> > > > + PROT_WRITE, MAP_SHARED, map_fd, 0);
> > > > +
> > > > + init_pdata(pdata, 0);
> > > > +
> > > > + xe_for_each_engine(fd, hwe) {
> > > > + igt_fork(child, 1)
> > > > + process(hwe, n_exec_queues, n_execs, bo_size,
> > > > + stride, flags);
> > > > + }
> > > > +
> > > > + signal_pdata(pdata);
> > > > + igt_waitchildren();
> > > > +
> > > > + close(map_fd);
> > > > + munmap(pdata, sizeof(*pdata));
> > > > +}
> > > > +
> > > > +struct section {
> > > > + const char *name;
> > > > + unsigned int flags;
> > > > +};
> > > > +
> > > > +igt_main
> > > > +{
> > > > + struct drm_xe_engine_class_instance *hwe;
> > > > + const struct section sections[] = {
> > > > + { "malloc", 0 },
> > > > + { "malloc-multi-fault", MULTI_FAULT },
> > > > + { "malloc-fork-read", FORK_READ },
> > > > + { "malloc-fork-read-after", FORK_READ | FORK_READ_AFTER },
> > > > + { "malloc-mlock", LOCK },
> > > > + { "malloc-race", RACE },
> > > > + { "malloc-busy", BUSY },
> > > > + { "malloc-bo-unmap", BO_UNMAP },
> > > > + { "mmap", MMAP },
> > > > + { "mmap-remap", MMAP | MREMAP },
> > > > + { "mmap-remap-dontunmap", MMAP | MREMAP | DONTUNMAP },
> > > > + { "mmap-remap-ro", MMAP | MREMAP | READ_ONLY_REMAP },
> > > > + { "mmap-remap-ro-dontunmap", MMAP | MREMAP | DONTUNMAP |
> > > > + READ_ONLY_REMAP },
> > > > + { "mmap-remap-eocheck", MMAP | MREMAP | EVERY_OTHER_CHECK },
> > > > + { "mmap-remap-dontunmap-eocheck", MMAP | MREMAP | DONTUNMAP |
> > > > + EVERY_OTHER_CHECK },
> > > > + { "mmap-remap-ro-eocheck", MMAP | MREMAP | READ_ONLY_REMAP |
> > > > + EVERY_OTHER_CHECK },
> > > > + { "mmap-remap-ro-dontunmap-eocheck", MMAP | MREMAP | DONTUNMAP |
> > > > + READ_ONLY_REMAP | EVERY_OTHER_CHECK },
> > > > + { "mmap-huge", MMAP | HUGE_PAGE },
> > > > + { "mmap-shared", MMAP | LOCK | MMAP_SHARED },
> > > > + { "mmap-shared-remap", MMAP | LOCK | MMAP_SHARED | MREMAP },
> > > > + { "mmap-shared-remap-dontunmap", MMAP | LOCK | MMAP_SHARED |
> > > > + MREMAP | DONTUNMAP },
> > > > + { "mmap-shared-remap-eocheck", MMAP | LOCK | MMAP_SHARED |
> > > > + MREMAP | EVERY_OTHER_CHECK },
> > > > + { "mmap-shared-remap-dontunmap-eocheck", MMAP | LOCK |
> > > > + MMAP_SHARED | MREMAP | DONTUNMAP | EVERY_OTHER_CHECK },
> > > > + { "mmap-mlock", MMAP | LOCK },
> > > > + { "mmap-file", MMAP | FILE_BACKED },
> > > > + { "mmap-file-mlock", MMAP | LOCK | FILE_BACKED },
> > > > + { "mmap-race", MMAP | RACE },
> > > > + { "free", NEW | FREE },
> > > > + { "free-race", NEW | FREE | RACE },
> > > > + { "new", NEW },
> > > > + { "new-race", NEW | RACE },
> > > > + { "new-bo-map", NEW | BO_MAP },
> > > > + { "new-busy", NEW | BUSY },
> > > > + { "mmap-free", MMAP | NEW | FREE },
> > > > + { "mmap-free-huge", MMAP | NEW | FREE | HUGE_PAGE },
> > > > + { "mmap-free-race", MMAP | NEW | FREE | RACE },
> > > > + { "mmap-new", MMAP | NEW },
> > > > + { "mmap-new-huge", MMAP | NEW | HUGE_PAGE },
> > > > + { "mmap-new-race", MMAP | NEW | RACE },
> > > > + { "malloc-nomemset", SKIP_MEMSET },
> > > > + { "malloc-mlock-nomemset", SKIP_MEMSET | LOCK },
> > > > + { "malloc-race-nomemset", SKIP_MEMSET | RACE },
> > > > + { "malloc-busy-nomemset", SKIP_MEMSET | BUSY },
> > > > + { "malloc-bo-unmap-nomemset", SKIP_MEMSET | BO_UNMAP },
> > > > + { "mmap-nomemset", SKIP_MEMSET | MMAP },
> > > > + { "mmap-huge-nomemset", SKIP_MEMSET | MMAP | HUGE_PAGE },
> > > > + { "mmap-shared-nomemset", SKIP_MEMSET | MMAP | MMAP_SHARED },
> > > > + { "mmap-mlock-nomemset", SKIP_MEMSET | MMAP | LOCK },
> > > > + { "mmap-file-nomemset", SKIP_MEMSET | MMAP | FILE_BACKED },
> > > > + { "mmap-file-mlock-nomemset", SKIP_MEMSET | MMAP | LOCK | FILE_BACKED },
> > > > + { "mmap-race-nomemset", SKIP_MEMSET | MMAP | RACE },
> > > > + { "free-nomemset", SKIP_MEMSET | NEW | FREE },
> > > > + { "free-race-nomemset", SKIP_MEMSET | NEW | FREE | RACE },
> > > > + { "new-nomemset", SKIP_MEMSET | NEW },
> > > > + { "new-race-nomemset", SKIP_MEMSET | NEW | RACE },
> > > > + { "new-bo-map-nomemset", SKIP_MEMSET | NEW | BO_MAP },
> > > > + { "new-busy-nomemset", SKIP_MEMSET | NEW | BUSY },
> > > > + { "mmap-free-nomemset", SKIP_MEMSET | MMAP | NEW | FREE },
> > > > + { "mmap-free-huge-nomemset", SKIP_MEMSET | MMAP | NEW | FREE | HUGE_PAGE },
> > > > + { "mmap-free-race-nomemset", SKIP_MEMSET | MMAP | NEW | FREE | RACE },
> > > > + { "mmap-new-nomemset", SKIP_MEMSET | MMAP | NEW },
> > > > + { "mmap-new-huge-nomemset", SKIP_MEMSET | MMAP | NEW | HUGE_PAGE },
> > > > + { "mmap-new-race-nomemset", SKIP_MEMSET | MMAP | NEW | RACE },
> > > > + { NULL },
> > > > + };
> > > > + const struct section psections[] = {
> > > > + { "munmap-cpu-fault", CPU_FAULT },
> > > > + { "munmap-no-cpu-fault", 0 },
> > > > + { "remap-cpu-fault", CPU_FAULT | REMAP },
> > > > + { "remap-no-cpu-fault", REMAP },
> > > > + { "middle-munmap-cpu-fault", MIDDLE | CPU_FAULT },
> > > > + { "middle-munmap-no-cpu-fault", MIDDLE },
> > > > + { "middle-remap-cpu-fault", MIDDLE | CPU_FAULT | REMAP },
> > > > + { "middle-remap-no-cpu-fault", MIDDLE | REMAP },
> > > > + { NULL },
> > > > + };
> > > > + const struct section esections[] = {
> > > > + { "malloc", 0 },
> > > > + { "malloc-mix-bo", MIX_BO_ALLOC },
> > > > + { NULL },
> > > > + };
> > > > + int fd;
> > > > +
> > > > + igt_fixture {
> > > > + struct xe_device *xe;
> > > > +
> > > > + fd = drm_open_driver(DRIVER_XE);
> > > > + igt_require(!xe_supports_faults(fd));
> > > > +
> > > > + xe = xe_device_get(fd);
> > > > + va_bits = xe->va_bits;
> > > > + }
> > > > +
> > > > + for (const struct section *s = sections; s->name; s++) {
> > > > + igt_subtest_f("once-%s", s->name)
> > > > + xe_for_each_engine(fd, hwe)
> > > > + test_exec(fd, hwe, 1, 1, 0, 0, 0, NULL,
> > > > + NULL, s->flags);
> > > > +
> > > > + igt_subtest_f("once-large-%s", s->name)
> > > > + xe_for_each_engine(fd, hwe)
> > > > + test_exec(fd, hwe, 1, 1, SZ_2M, 0, 0, NULL,
> > > > + NULL, s->flags);
> > > > +
> > > > + igt_subtest_f("twice-%s", s->name)
> > > > + xe_for_each_engine(fd, hwe)
> > > > + test_exec(fd, hwe, 1, 2, 0, 0, 0, NULL,
> > > > + NULL, s->flags);
> > > > +
> > > > + igt_subtest_f("twice-large-%s", s->name)
> > > > + xe_for_each_engine(fd, hwe)
> > > > + test_exec(fd, hwe, 1, 2, SZ_2M, 0, 0, NULL,
> > > > + NULL, s->flags);
> > > > +
> > > > + igt_subtest_f("many-%s", s->name)
> > > > + xe_for_each_engine(fd, hwe)
> > > > + test_exec(fd, hwe, 1, 128, 0, 0, 0, NULL,
> > > > + NULL, s->flags);
> > > > +
> > > > + igt_subtest_f("many-stride-%s", s->name)
> > > > + xe_for_each_engine(fd, hwe)
> > > > + test_exec(fd, hwe, 1, 128, 0, 256, 0, NULL,
> > > > + NULL, s->flags);
> > > > +
> > > > + igt_subtest_f("many-execqueues-%s", s->name)
> > > > + xe_for_each_engine(fd, hwe)
> > > > + test_exec(fd, hwe, 16, 128, 0, 0, 0, NULL,
> > > > + NULL, s->flags);
> > > > +
> > > > + igt_subtest_f("many-large-%s", s->name)
> > > > + xe_for_each_engine(fd, hwe)
> > > > + test_exec(fd, hwe, 1, 128, SZ_2M, 0, 0, NULL,
> > > > + NULL, s->flags);
> > > > +
> > > > + igt_subtest_f("many-large-execqueues-%s", s->name)
> > > > + xe_for_each_engine(fd, hwe)
> > > > + test_exec(fd, hwe, 16, 128, SZ_2M, 0, 0, NULL,
> > > > + NULL, s->flags);
> > > > +
> > > > + igt_subtest_f("threads-many-%s", s->name)
> > > > + threads(fd, 1, 128, 0, 0, s->flags, false);
> > > > +
> > > > + igt_subtest_f("threads-many-stride-%s", s->name)
> > > > + threads(fd, 1, 128, 0, 256, s->flags, false);
> > > > +
> > > > + igt_subtest_f("threads-many-execqueues-%s", s->name)
> > > > + threads(fd, 16, 128, 0, 0, s->flags, false);
> > > > +
> > > > + igt_subtest_f("threads-many-large-%s", s->name)
> > > > + threads(fd, 1, 128, SZ_2M, 0, s->flags, false);
> > > > +
> > > > + igt_subtest_f("threads-many-large-execqueues-%s", s->name)
> > > > + threads(fd, 16, 128, SZ_2M, 0, s->flags, false);
> > > > +
> > > > + igt_subtest_f("threads-shared-vm-many-%s", s->name)
> > > > + threads(fd, 1, 128, 0, 0, s->flags, true);
> > > > +
> > > > + igt_subtest_f("threads-shared-vm-many-stride-%s", s->name)
> > > > + threads(fd, 1, 128, 0, 256, s->flags, true);
> > > > +
> > > > + igt_subtest_f("threads-shared-vm-many-execqueues-%s", s->name)
> > > > + threads(fd, 16, 128, 0, 0, s->flags, true);
> > > > +
> > > > + igt_subtest_f("threads-shared-vm-many-large-%s", s->name)
> > > > + threads(fd, 1, 128, SZ_2M, 0, s->flags, true);
> > > > +
> > > > + igt_subtest_f("threads-shared-vm-many-large-execqueues-%s", s->name)
> > > > + threads(fd, 16, 128, SZ_2M, 0, s->flags, true);
> > > > +
> > > > + igt_subtest_f("process-many-%s", s->name)
> > > > + processes(fd, 1, 128, 0, 0, s->flags);
> > > > +
> > > > + igt_subtest_f("process-many-stride-%s", s->name)
> > > > + processes(fd, 1, 128, 0, 256, s->flags);
> > > > +
> > > > + igt_subtest_f("process-many-execqueues-%s", s->name)
> > > > + processes(fd, 16, 128, 0, 0, s->flags);
> > > > +
> > > > + igt_subtest_f("process-many-large-%s", s->name)
> > > > + processes(fd, 1, 128, SZ_2M, 0, s->flags);
> > > > +
> > > > + igt_subtest_f("process-many-large-execqueues-%s", s->name)
> > > > + processes(fd, 16, 128, SZ_2M, 0, s->flags);
> > > > + }
> > > > +
> > > > + igt_subtest("threads-shared-vm-shared-alloc-many-stride-malloc")
> > > > + threads(fd, 1, 128, 0, 256, SHARED_ALLOC, true);
> > > > +
> > > > + igt_subtest("threads-shared-vm-shared-alloc-many-stride-malloc-race")
> > > > + threads(fd, 1, 128, 0, 256, RACE | SHARED_ALLOC, true);
> > > > +
> > > > + igt_subtest("threads-shared-alloc-many-stride-malloc")
> > > > + threads(fd, 1, 128, 0, 256, SHARED_ALLOC, false);
> > > > +
> > > > + igt_subtest("threads-shared-alloc-many-stride-malloc-sync")
> > > > + threads(fd, 1, 128, 0, 256, SHARED_ALLOC | SYNC_EXEC, false);
> > > > +
> > > > + igt_subtest("threads-shared-alloc-many-stride-malloc-race")
> > > > + threads(fd, 1, 128, 0, 256, RACE | SHARED_ALLOC, false);
> > > > +
> > > > + igt_subtest_f("fault")
> > > > + xe_for_each_engine(fd, hwe)
> > > > + test_exec(fd, hwe, 4, 1, SZ_2M, 0, 0, NULL, NULL,
> > > > + FAULT);
> > > > +
> > > > + for (const struct section *s = psections; s->name; s++) {
> > > > + igt_subtest_f("partial-%s", s->name)
> > > > + xe_for_each_engine(fd, hwe)
> > > > + partial(fd, hwe, s->flags);
> > > > + }
> > > > +
> > > > + igt_subtest_f("unaligned-alloc")
> > > > + xe_for_each_engine(fd, hwe) {
> > > > + many_allocs(fd, hwe, (SZ_1M + SZ_512K) * 8,
> > > > + SZ_1M + SZ_512K, SZ_4K, NULL, 0);
> > > > + break;
> > > > + }
> > > > +
> > > > + igt_subtest_f("fault-benchmark")
> > > > + xe_for_each_engine(fd, hwe)
> > > > + many_allocs(fd, hwe, SZ_64M, SZ_64M, SZ_4K, NULL,
> > > > + BENCHMARK);
> > > > +
> > > > + igt_subtest_f("fault-threads-benchmark")
> > > > + xe_for_each_engine(fd, hwe)
> > > > + many_allocs(fd, hwe, SZ_64M, SZ_64M, SZ_4K, NULL,
> > > > + BENCHMARK | CPU_FAULT_THREADS);
> > > > +
> > > > + igt_subtest_f("fault-threads-same-page-benchmark")
> > > > + xe_for_each_engine(fd, hwe)
> > > > + many_allocs(fd, hwe, SZ_64M, SZ_64M, SZ_4K, NULL,
> > > > + BENCHMARK | CPU_FAULT_THREADS |
> > > > + CPU_FAULT_SAME_PAGE);
> > > > +
> > > > + igt_subtest_f("fault-process-benchmark")
> > > > + xe_for_each_engine(fd, hwe)
> > > > + many_allocs(fd, hwe, SZ_64M, SZ_64M, SZ_4K, NULL,
> > > > + BENCHMARK | CPU_FAULT_PROCESS);
> > > > +
> > > > + igt_subtest_f("fault-process-same-page-benchmark")
> > > > + xe_for_each_engine(fd, hwe)
> > > > + many_allocs(fd, hwe, SZ_64M, SZ_64M, SZ_4K, NULL,
> > > > + BENCHMARK | CPU_FAULT_PROCESS |
> > > > + CPU_FAULT_SAME_PAGE);
> > > > +
> > > > + for (const struct section *s = esections; s->name; s++) {
> > > > + igt_subtest_f("evict-%s", s->name)
> > > > + xe_for_each_engine(fd, hwe) {
> > > > + many_allocs(fd, hwe,
> > > > + xe_visible_vram_size(fd, hwe->gt_id),
> > > > + SZ_8M, SZ_1M, NULL, s->flags);
> > > > + break;
> > > > + }
> > > > + }
> > > > +
> > > > + for (const struct section *s = esections; s->name; s++) {
> > > > + igt_subtest_f("processes-evict-%s", s->name)
> > > > + processes_evict(fd, SZ_8M, SZ_1M, s->flags);
> > > > + }
> > > > +
> > > > + igt_fixture {
> > > > + xe_device_put(fd);
> > > > + drm_close_driver(fd);
> > > > + }
> > > > +}
> > > > diff --git a/tests/meson.build b/tests/meson.build
> > > > index 9224145cf4..8c7b756716 100644
> > > > --- a/tests/meson.build
> > > > +++ b/tests/meson.build
> > > > @@ -295,6 +295,7 @@ intel_xe_progs = [
> > > > 'xe_exec_reset',
> > > > 'xe_exec_sip',
> > > > 'xe_exec_store',
> > > > + 'xe_exec_system_allocator',
> > > > 'xe_exec_threads',
> > > > 'xe_exercise_blt',
> > > > 'xe_fault_injection',
> > > > --
> > > > 2.34.1
> > > >
^ permalink raw reply [flat|nested] 16+ messages in thread
* [PATCH] tests/xe: Add system_allocator test
@ 2025-04-25 18:20 Matthew Brost
2025-04-25 21:03 ` ✓ Xe.CI.BAT: success for tests/xe: Add system_allocator test (rev5) Patchwork
` (3 more replies)
0 siblings, 4 replies; 16+ messages in thread
From: Matthew Brost @ 2025-04-25 18:20 UTC (permalink / raw)
To: igt-dev; +Cc: francois.dugast
Test various uses of system allocator in single thread, multiple
threads, and multiple processes.
Features tested:
- Malloc with various size
- Mmap with various sizes and flags including file backed mappings
- Mixing BO allocations with system allocator
- Various page sizes
- Dynamically freeing / unmapping memory
- Sharing VM across threads
- Faults racing on different hardware engines / GTs / Tiles
- GPU faults and CPU faults racing
- CPU faults on multiple threads racing
- CPU faults on multiple process racing
- GPU faults of memory not faulted in by CPU
- Partial unmap of allocations
- Attempting to unmap system allocations when GPU has mappings
- Eviction of both system allocations and BOs
- Forking child processes and reading data from VRAM
- mremap data in VRAM
- Protection changes
- Multiple faults per execbuf
Running on LNL, BMG, PVC 1 tile, and PVC 2 tile.
v2:
- Rebase
- Fix memory allocation to not interfear with malloc (Thomas)
v3:
- Fix memory leak (Francois)
- Break out uAPI into own patch (Francois)
- Use mkstemp for sync file (Francois)
- Use mkstemp for file backed data (Francois)
- Drop i argument from READ_VALUE (Francois)
- Fix test description (Francois)
- Add comment to check_all_pages_process (Francois)
- Prefer igt_info over printf (Francois)
- Fix types in messages (Francois)
- Prefer odd macro (Francois)
v4:
- Fix alignment (Johnathan)
v5:
- Add ifdef for MREMAP_DONTUNMAP (build error)
Signed-off-by: Matthew Brost <matthew.brost@intel.com>
Reviewed-by: Jonathan Cavitt <jonathan.cavitt@intel.com>
Reviewed-by: Francois Dugast <francois.dugast@intel.com>
---
lib/xe/xe_ioctl.c | 12 +
lib/xe/xe_ioctl.h | 1 +
tests/intel/xe_exec_system_allocator.c | 1855 ++++++++++++++++++++++++
tests/meson.build | 1 +
4 files changed, 1869 insertions(+)
create mode 100644 tests/intel/xe_exec_system_allocator.c
diff --git a/lib/xe/xe_ioctl.c b/lib/xe/xe_ioctl.c
index fb8c4aef13..785fc9184c 100644
--- a/lib/xe/xe_ioctl.c
+++ b/lib/xe/xe_ioctl.c
@@ -440,6 +440,18 @@ void *xe_bo_map(int fd, uint32_t bo, size_t size)
return __xe_bo_map(fd, bo, size, PROT_WRITE);
}
+void *xe_bo_map_fixed(int fd, uint32_t bo, size_t size, uint64_t addr)
+{
+ uint64_t mmo;
+ void *map;
+
+ mmo = xe_bo_mmap_offset(fd, bo);
+ map = mmap((void *)addr, size, PROT_WRITE, MAP_SHARED | MAP_FIXED, fd, mmo);
+ igt_assert(map != MAP_FAILED);
+
+ return map;
+}
+
void *xe_bo_mmap_ext(int fd, uint32_t bo, size_t size, int prot)
{
return __xe_bo_map(fd, bo, size, prot);
diff --git a/lib/xe/xe_ioctl.h b/lib/xe/xe_ioctl.h
index 9bdf73b2bd..554a33c9cd 100644
--- a/lib/xe/xe_ioctl.h
+++ b/lib/xe/xe_ioctl.h
@@ -86,6 +86,7 @@ uint32_t xe_exec_queue_create_class(int fd, uint32_t vm, uint16_t class);
void xe_exec_queue_destroy(int fd, uint32_t exec_queue);
uint64_t xe_bo_mmap_offset(int fd, uint32_t bo);
void *xe_bo_map(int fd, uint32_t bo, size_t size);
+void *xe_bo_map_fixed(int fd, uint32_t bo, size_t size, long unsigned int addr);
void *xe_bo_mmap_ext(int fd, uint32_t bo, size_t size, int prot);
int __xe_exec(int fd, struct drm_xe_exec *exec);
void xe_exec(int fd, struct drm_xe_exec *exec);
diff --git a/tests/intel/xe_exec_system_allocator.c b/tests/intel/xe_exec_system_allocator.c
new file mode 100644
index 0000000000..b8c636d275
--- /dev/null
+++ b/tests/intel/xe_exec_system_allocator.c
@@ -0,0 +1,1855 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2024-2025 Intel Corporation
+ */
+
+/**
+ * TEST: Basic tests for execbuf functionality using system allocator
+ * Category: Core
+ * Mega feature: USM
+ * Sub-category: System allocator
+ * Functionality: fault mode, system allocator
+ * GPU: LNL, BMG, PVC
+ */
+
+#include <fcntl.h>
+#include <linux/mman.h>
+#include <time.h>
+
+#include "igt.h"
+#include "lib/igt_syncobj.h"
+#include "lib/intel_reg.h"
+#include "xe_drm.h"
+
+#include "xe/xe_ioctl.h"
+#include "xe/xe_query.h"
+#include <string.h>
+
+#define USER_FENCE_VALUE 0xdeadbeefdeadbeefull
+#define QUARTER_SEC (NSEC_PER_SEC / 4)
+#define FIVE_SEC (5LL * NSEC_PER_SEC)
+
+struct batch_data {
+ uint32_t batch[16];
+ uint64_t pad;
+ uint32_t data;
+ uint32_t expected_data;
+};
+
+#define WRITE_VALUE(data__, i__) ({ \
+ if (!(data__)->expected_data) \
+ (data__)->expected_data = rand() << 12 | (i__); \
+ (data__)->expected_data; \
+})
+#define READ_VALUE(data__) ((data__)->expected_data)
+
+static void __write_dword(uint32_t *batch, uint64_t sdi_addr, uint32_t wdata,
+ int *idx)
+{
+ batch[(*idx)++] = MI_STORE_DWORD_IMM_GEN4;
+ batch[(*idx)++] = sdi_addr;
+ batch[(*idx)++] = sdi_addr >> 32;
+ batch[(*idx)++] = wdata;
+}
+
+static void write_dword(uint32_t *batch, uint64_t sdi_addr, uint32_t wdata,
+ int *idx)
+{
+ __write_dword(batch, sdi_addr, wdata, idx);
+ batch[(*idx)++] = MI_BATCH_BUFFER_END;
+}
+
+static void check_all_pages(void *ptr, uint64_t alloc_size, uint64_t stride,
+ pthread_barrier_t *barrier)
+{
+ int i, n_writes = alloc_size / stride;
+
+ for (i = 0; i < n_writes; ++i) {
+ struct batch_data *data = ptr + i * stride;
+
+ igt_assert_eq(data->data, READ_VALUE(data));
+
+ if (barrier)
+ pthread_barrier_wait(barrier);
+ }
+}
+
+static char sync_file[] = "/tmp/xe_exec_system_allocator_syncXXXXXX";
+static int sync_fd;
+
+static void open_sync_file(void)
+{
+ sync_fd = mkstemp(sync_file);
+}
+
+static void close_sync_file(void)
+{
+ close(sync_fd);
+}
+
+struct process_data {
+ pthread_mutex_t mutex;
+ pthread_cond_t cond;
+ pthread_barrier_t barrier;
+ bool go;
+};
+
+static void wait_pdata(struct process_data *pdata)
+{
+ pthread_mutex_lock(&pdata->mutex);
+ while (!pdata->go)
+ pthread_cond_wait(&pdata->cond, &pdata->mutex);
+ pthread_mutex_unlock(&pdata->mutex);
+}
+
+static void init_pdata(struct process_data *pdata, int n_engine)
+{
+ pthread_mutexattr_t mutex_attr;
+ pthread_condattr_t cond_attr;
+ pthread_barrierattr_t barrier_attr;
+
+ pthread_mutexattr_init(&mutex_attr);
+ pthread_mutexattr_setpshared(&mutex_attr, PTHREAD_PROCESS_SHARED);
+ pthread_mutex_init(&pdata->mutex, &mutex_attr);
+
+ pthread_condattr_init(&cond_attr);
+ pthread_condattr_setpshared(&cond_attr, PTHREAD_PROCESS_SHARED);
+ pthread_cond_init(&pdata->cond, &cond_attr);
+
+ pthread_barrierattr_init(&barrier_attr);
+ pthread_barrierattr_setpshared(&barrier_attr, PTHREAD_PROCESS_SHARED);
+ pthread_barrier_init(&pdata->barrier, &barrier_attr, n_engine);
+
+ pdata->go = false;
+}
+
+static void signal_pdata(struct process_data *pdata)
+{
+ pthread_mutex_lock(&pdata->mutex);
+ pdata->go = true;
+ pthread_cond_broadcast(&pdata->cond);
+ pthread_mutex_unlock(&pdata->mutex);
+}
+
+/* many_alloc flags */
+#define MIX_BO_ALLOC (0x1 << 0)
+#define BENCHMARK (0x1 << 1)
+#define CPU_FAULT_THREADS (0x1 << 2)
+#define CPU_FAULT_PROCESS (0x1 << 3)
+#define CPU_FAULT_SAME_PAGE (0x1 << 4)
+
+static void process_check(void *ptr, uint64_t alloc_size, uint64_t stride,
+ unsigned int flags)
+{
+ struct process_data *pdata;
+ int map_fd;
+
+ map_fd = open(sync_file, O_RDWR, 0x666);
+ pdata = mmap(NULL, sizeof(*pdata), PROT_READ |
+ PROT_WRITE, MAP_SHARED, map_fd, 0);
+ wait_pdata(pdata);
+
+ if (flags & CPU_FAULT_SAME_PAGE)
+ check_all_pages(ptr, alloc_size, stride, &pdata->barrier);
+ else
+ check_all_pages(ptr, alloc_size, stride, NULL);
+
+ close(map_fd);
+ munmap(pdata, sizeof(*pdata));
+}
+
+/*
+ * Partition checking of results in chunks which causes multiple processes to
+ * fault same VRAM allocation in parallel.
+ */
+static void
+check_all_pages_process(void *ptr, uint64_t alloc_size, uint64_t stride,
+ int n_process, unsigned int flags)
+{
+ struct process_data *pdata;
+ int map_fd, i;
+
+ map_fd = open(sync_file, O_RDWR | O_CREAT, 0x666);
+ posix_fallocate(map_fd, 0, sizeof(*pdata));
+ pdata = mmap(NULL, sizeof(*pdata), PROT_READ |
+ PROT_WRITE, MAP_SHARED, map_fd, 0);
+
+ init_pdata(pdata, n_process);
+
+ for (i = 0; i < n_process; ++i) {
+ igt_fork(child, 1)
+ if (flags & CPU_FAULT_SAME_PAGE)
+ process_check(ptr, alloc_size, stride, flags);
+ else
+ process_check(ptr + stride * i, alloc_size,
+ stride * n_process, flags);
+ }
+
+ signal_pdata(pdata);
+ igt_waitchildren();
+
+ close(map_fd);
+ munmap(pdata, sizeof(*pdata));
+}
+
+struct thread_check_data {
+ pthread_t thread;
+ pthread_mutex_t *mutex;
+ pthread_cond_t *cond;
+ pthread_barrier_t *barrier;
+ void *ptr;
+ uint64_t alloc_size;
+ uint64_t stride;
+ bool *go;
+};
+
+static void *thread_check(void *data)
+{
+ struct thread_check_data *t = data;
+
+ pthread_mutex_lock(t->mutex);
+ while (!*t->go)
+ pthread_cond_wait(t->cond, t->mutex);
+ pthread_mutex_unlock(t->mutex);
+
+ check_all_pages(t->ptr, t->alloc_size, t->stride, t->barrier);
+
+ return NULL;
+}
+
+/*
+ * Partition checking of results in chunks which causes multiple threads to
+ * fault same VRAM allocation in parallel.
+ */
+static void
+check_all_pages_threads(void *ptr, uint64_t alloc_size, uint64_t stride,
+ int n_threads, unsigned int flags)
+{
+ struct thread_check_data *threads_check_data;
+ pthread_mutex_t mutex;
+ pthread_cond_t cond;
+ pthread_barrier_t barrier;
+ int i;
+ bool go = false;
+
+ threads_check_data = calloc(n_threads, sizeof(*threads_check_data));
+ igt_assert(threads_check_data);
+
+ pthread_mutex_init(&mutex, 0);
+ pthread_cond_init(&cond, 0);
+ pthread_barrier_init(&barrier, 0, n_threads);
+
+ for (i = 0; i < n_threads; ++i) {
+ threads_check_data[i].mutex = &mutex;
+ threads_check_data[i].cond = &cond;
+ if (flags & CPU_FAULT_SAME_PAGE) {
+ threads_check_data[i].barrier = &barrier;
+ threads_check_data[i].ptr = ptr;
+ threads_check_data[i].alloc_size = alloc_size;
+ threads_check_data[i].stride = stride;
+ } else {
+ threads_check_data[i].barrier = NULL;
+ threads_check_data[i].ptr = ptr + stride * i;
+ threads_check_data[i].alloc_size = alloc_size;
+ threads_check_data[i].stride = n_threads * stride;
+ }
+ threads_check_data[i].go = &go;
+
+ pthread_create(&threads_check_data[i].thread, 0, thread_check,
+ &threads_check_data[i]);
+ }
+
+ pthread_mutex_lock(&mutex);
+ go = true;
+ pthread_cond_broadcast(&cond);
+ pthread_mutex_unlock(&mutex);
+
+ for (i = 0; i < n_threads; ++i)
+ pthread_join(threads_check_data[i].thread, NULL);
+ free(threads_check_data);
+}
+
+static void touch_all_pages(int fd, uint32_t exec_queue, void *ptr,
+ uint64_t alloc_size, uint64_t stride,
+ struct timespec *tv, uint64_t *submit)
+{
+ struct drm_xe_sync sync[1] = {
+ { .type = DRM_XE_SYNC_TYPE_USER_FENCE,
+ .flags = DRM_XE_SYNC_FLAG_SIGNAL,
+ .timeline_value = USER_FENCE_VALUE },
+ };
+ struct drm_xe_exec exec = {
+ .num_batch_buffer = 1,
+ .num_syncs = 0,
+ .exec_queue_id = exec_queue,
+ .syncs = to_user_pointer(&sync),
+ };
+ uint64_t addr = to_user_pointer(ptr);
+ int i, ret, n_writes = alloc_size / stride;
+ u64 *exec_ufence = NULL;
+ int64_t timeout = FIVE_SEC;
+
+ exec_ufence = mmap(NULL, SZ_4K, PROT_READ |
+ PROT_WRITE, MAP_SHARED |
+ MAP_ANONYMOUS, -1, 0);
+ igt_assert(exec_ufence != MAP_FAILED);
+ memset(exec_ufence, 0, SZ_4K);
+ sync[0].addr = to_user_pointer(exec_ufence);
+
+ for (i = 0; i < n_writes; ++i, addr += stride) {
+ struct batch_data *data = ptr + i * stride;
+ uint64_t sdi_offset = (char *)&data->data - (char *)data;
+ uint64_t sdi_addr = addr + sdi_offset;
+ int b = 0;
+
+ write_dword(data->batch, sdi_addr, WRITE_VALUE(data, i), &b);
+ igt_assert(b <= ARRAY_SIZE(data->batch));
+ }
+
+ igt_nsec_elapsed(tv);
+ *submit = igt_nsec_elapsed(tv);
+
+ addr = to_user_pointer(ptr);
+ for (i = 0; i < n_writes; ++i, addr += stride) {
+ struct batch_data *data = ptr + i * stride;
+ uint64_t batch_offset = (char *)&data->batch - (char *)data;
+ uint64_t batch_addr = addr + batch_offset;
+
+ exec.address = batch_addr;
+ if (i + 1 == n_writes)
+ exec.num_syncs = 1;
+ xe_exec(fd, &exec);
+ }
+
+ ret = __xe_wait_ufence(fd, exec_ufence, USER_FENCE_VALUE, exec_queue,
+ &timeout);
+ if (ret) {
+ igt_info("FAIL EXEC_UFENCE_ADDR: 0x%016llx\n", sync[0].addr);
+ igt_info("FAIL EXEC_UFENCE: EXPECTED=0x%016llx, ACTUAL=0x%016lx\n",
+ USER_FENCE_VALUE, exec_ufence[0]);
+
+ addr = to_user_pointer(ptr);
+ for (i = 0; i < n_writes; ++i, addr += stride) {
+ struct batch_data *data = ptr + i * stride;
+ uint64_t batch_offset = (char *)&data->batch - (char *)data;
+ uint64_t batch_addr = addr + batch_offset;
+ uint64_t sdi_offset = (char *)&data->data - (char *)data;
+ uint64_t sdi_addr = addr + sdi_offset;
+
+ igt_info("FAIL BATCH_ADDR: 0x%016lx\n", batch_addr);
+ igt_info("FAIL SDI_ADDR: 0x%016lx\n", sdi_addr);
+ igt_info("FAIL SDI_ADDR (in batch): 0x%016lx\n",
+ (((u64)data->batch[2]) << 32) | data->batch[1]);
+ igt_info("FAIL DATA: EXPECTED=0x%08x, ACTUAL=0x%08x\n",
+ data->expected_data, data->data);
+ }
+ igt_assert_eq(ret, 0);
+ }
+ munmap(exec_ufence, SZ_4K);
+}
+
+static int va_bits;
+
+#define bind_system_allocator(__sync, __num_sync) \
+ __xe_vm_bind_assert(fd, vm, 0, \
+ 0, 0, 0, 0x1ull << va_bits, \
+ DRM_XE_VM_BIND_OP_MAP, \
+ DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR, \
+ (__sync), (__num_sync), 0, 0)
+
+#define unbind_system_allocator() \
+ __xe_vm_bind(fd, vm, 0, 0, 0, 0, 0x1ull << va_bits, \
+ DRM_XE_VM_BIND_OP_UNMAP, 0, \
+ NULL, 0, 0, 0, 0)
+
+#define odd(__i) (__i & 1)
+
+struct aligned_alloc_type {
+ void *__ptr;
+ void *ptr;
+ size_t __size;
+ size_t size;
+};
+
+static struct aligned_alloc_type __aligned_alloc(size_t alignment, size_t size)
+{
+ struct aligned_alloc_type aligned_alloc_type;
+
+ aligned_alloc_type.__ptr = mmap(NULL, alignment + size, PROT_NONE, MAP_PRIVATE |
+ MAP_ANONYMOUS, -1, 0);
+ igt_assert(aligned_alloc_type.__ptr != MAP_FAILED);
+
+ aligned_alloc_type.ptr = (void *)ALIGN((uint64_t)aligned_alloc_type.__ptr, alignment);
+ aligned_alloc_type.size = size;
+ aligned_alloc_type.__size = size + alignment;
+
+ return aligned_alloc_type;
+}
+
+static void __aligned_free(struct aligned_alloc_type *aligned_alloc_type)
+{
+ munmap(aligned_alloc_type->__ptr, aligned_alloc_type->__size);
+}
+
+static void __aligned_partial_free(struct aligned_alloc_type *aligned_alloc_type)
+{
+ size_t begin_size = (size_t)(aligned_alloc_type->ptr - aligned_alloc_type->__ptr);
+
+ if (begin_size)
+ munmap(aligned_alloc_type->__ptr, begin_size);
+ if (aligned_alloc_type->__size - aligned_alloc_type->size - begin_size)
+ munmap(aligned_alloc_type->ptr + aligned_alloc_type->size,
+ aligned_alloc_type->__size - aligned_alloc_type->size - begin_size);
+}
+
+/**
+ * SUBTEST: unaligned-alloc
+ * Description: allocate unaligned sizes of memory
+ * Test category: functionality test
+ *
+ * SUBTEST: fault-benchmark
+ * Description: Benchmark how long GPU / CPU take
+ * Test category: performance test
+ *
+ * SUBTEST: fault-threads-benchmark
+ * Description: Benchmark how long GPU / CPU take, reading results with multiple threads
+ * Test category: performance and functionality test
+ *
+ * SUBTEST: fault-threads-same-page-benchmark
+ * Description: Benchmark how long GPU / CPU take, reading results with multiple threads, hammer same page
+ * Test category: performance and functionality test
+ *
+ * SUBTEST: fault-process-benchmark
+ * Description: Benchmark how long GPU / CPU take, reading results with multiple process
+ * Test category: performance and functionality test
+ *
+ * SUBTEST: fault-process-same-page-benchmark
+ * Description: Benchmark how long GPU / CPU take, reading results with multiple process, hammer same page
+ * Test category: performance and functionality test
+ *
+ * SUBTEST: evict-malloc
+ * Description: trigger eviction of VRAM allocated via malloc
+ * Test category: functionality test
+ *
+ * SUBTEST: evict-malloc-mix-bo
+ * Description: trigger eviction of VRAM allocated via malloc and BO create
+ * Test category: functionality test
+ *
+ * SUBTEST: processes-evict-malloc
+ * Description: multi-process trigger eviction of VRAM allocated via malloc
+ * Test category: stress test
+ *
+ * SUBTEST: processes-evict-malloc-mix-bo
+ * Description: multi-process trigger eviction of VRAM allocated via malloc and BO create
+ * Test category: stress test
+ */
+
+static void
+many_allocs(int fd, struct drm_xe_engine_class_instance *eci,
+ uint64_t total_alloc, uint64_t alloc_size, uint64_t stride,
+ pthread_barrier_t *barrier, unsigned int flags)
+{
+ uint32_t vm, exec_queue;
+ int num_allocs = flags & BENCHMARK ? 1 :
+ (9 * (total_alloc / alloc_size)) / 8;
+ struct aligned_alloc_type *allocs;
+ uint32_t *bos = NULL;
+ struct timespec tv = {};
+ uint64_t submit, read, elapsed;
+ int i;
+
+ vm = xe_vm_create(fd, DRM_XE_VM_CREATE_FLAG_LR_MODE |
+ DRM_XE_VM_CREATE_FLAG_FAULT_MODE, 0);
+ exec_queue = xe_exec_queue_create(fd, vm, eci, 0);
+
+ bind_system_allocator(NULL, 0);
+
+ allocs = malloc(sizeof(*allocs) * num_allocs);
+ igt_assert(allocs);
+ memset(allocs, 0, sizeof(*allocs) * num_allocs);
+
+ if (flags & MIX_BO_ALLOC) {
+ bos = malloc(sizeof(*bos) * num_allocs);
+ igt_assert(bos);
+ memset(bos, 0, sizeof(*bos) * num_allocs);
+ }
+
+ for (i = 0; i < num_allocs; ++i) {
+ struct aligned_alloc_type alloc;
+
+ if (flags & MIX_BO_ALLOC && odd(i)) {
+ uint32_t bo_flags =
+ DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM;
+
+ alloc = __aligned_alloc(SZ_2M, alloc_size);
+ igt_assert(alloc.ptr);
+
+ bos[i] = xe_bo_create(fd, vm, alloc_size,
+ vram_if_possible(fd, eci->gt_id),
+ bo_flags);
+ alloc.ptr = xe_bo_map_fixed(fd, bos[i], alloc_size,
+ to_user_pointer(alloc.ptr));
+ xe_vm_bind_async(fd, vm, 0, bos[i], 0,
+ to_user_pointer(alloc.ptr),
+ alloc_size, 0, 0);
+ } else {
+ alloc.ptr = aligned_alloc(SZ_2M, alloc_size);
+ igt_assert(alloc.ptr);
+ }
+ allocs[i] = alloc;
+
+ touch_all_pages(fd, exec_queue, allocs[i].ptr, alloc_size, stride,
+ &tv, &submit);
+ }
+
+ if (barrier)
+ pthread_barrier_wait(barrier);
+
+ for (i = 0; i < num_allocs; ++i) {
+ if (flags & BENCHMARK)
+ read = igt_nsec_elapsed(&tv);
+#define NUM_CHECK_THREADS 8
+ if (flags & CPU_FAULT_PROCESS)
+ check_all_pages_process(allocs[i].ptr, alloc_size, stride,
+ NUM_CHECK_THREADS, flags);
+ else if (flags & CPU_FAULT_THREADS)
+ check_all_pages_threads(allocs[i].ptr, alloc_size, stride,
+ NUM_CHECK_THREADS, flags);
+ else
+ check_all_pages(allocs[i].ptr, alloc_size, stride, NULL);
+ if (flags & BENCHMARK) {
+ elapsed = igt_nsec_elapsed(&tv);
+ igt_info("Execution took %.3fms (submit %.1fus, read %.1fus, total %.1fus, read_total %.1fus)\n",
+ 1e-6 * elapsed, 1e-3 * submit, 1e-3 * read,
+ 1e-3 * (elapsed - submit),
+ 1e-3 * (elapsed - read));
+ }
+ if (bos && bos[i]) {
+ __aligned_free(allocs + i);
+ gem_close(fd, bos[i]);
+ } else {
+ free(allocs[i].ptr);
+ }
+ }
+ if (bos)
+ free(bos);
+ free(allocs);
+ xe_exec_queue_destroy(fd, exec_queue);
+ xe_vm_destroy(fd, vm);
+}
+
+static void process_evict(struct drm_xe_engine_class_instance *hwe,
+ uint64_t total_alloc, uint64_t alloc_size,
+ uint64_t stride, unsigned int flags)
+{
+ struct process_data *pdata;
+ int map_fd;
+ int fd;
+
+ map_fd = open(sync_file, O_RDWR, 0x666);
+ pdata = mmap(NULL, sizeof(*pdata), PROT_READ |
+ PROT_WRITE, MAP_SHARED, map_fd, 0);
+ wait_pdata(pdata);
+
+ fd = drm_open_driver(DRIVER_XE);
+ many_allocs(fd, hwe, total_alloc, alloc_size, stride, &pdata->barrier,
+ flags);
+ drm_close_driver(fd);
+
+ close(map_fd);
+ munmap(pdata, sizeof(*pdata));
+}
+
+static void
+processes_evict(int fd, uint64_t alloc_size, uint64_t stride,
+ unsigned int flags)
+{
+ struct drm_xe_engine_class_instance *hwe;
+ struct process_data *pdata;
+ int n_engine_gt[2] = { 0, 0 }, n_engine = 0;
+ int map_fd;
+
+ map_fd = open(sync_file, O_RDWR | O_CREAT, 0x666);
+ posix_fallocate(map_fd, 0, sizeof(*pdata));
+ pdata = mmap(NULL, sizeof(*pdata), PROT_READ |
+ PROT_WRITE, MAP_SHARED, map_fd, 0);
+
+ xe_for_each_engine(fd, hwe) {
+ igt_assert(hwe->gt_id < 2);
+ n_engine_gt[hwe->gt_id]++;
+ n_engine++;
+ }
+
+ init_pdata(pdata, n_engine);
+
+ xe_for_each_engine(fd, hwe) {
+ igt_fork(child, 1)
+ process_evict(hwe,
+ xe_visible_vram_size(fd, hwe->gt_id) /
+ n_engine_gt[hwe->gt_id], alloc_size,
+ stride, flags);
+ }
+
+ signal_pdata(pdata);
+ igt_waitchildren();
+
+ close(map_fd);
+ munmap(pdata, sizeof(*pdata));
+}
+
+#define CPU_FAULT (0x1 << 0)
+#define REMAP (0x1 << 1)
+#define MIDDLE (0x1 << 2)
+
+/**
+ * SUBTEST: partial-munmap-cpu-fault
+ * Description: munmap partially with cpu access in between
+ * Test category: functionality test
+ *
+ * SUBTEST: partial-munmap-no-cpu-fault
+ * Description: munmap partially with no cpu access in between
+ * Test category: functionality test
+ *
+ * SUBTEST: partial-remap-cpu-fault
+ * Description: remap partially with cpu access in between
+ * Test category: functionality test
+ *
+ * SUBTEST: partial-remap-no-cpu-fault
+ * Description: remap partially with no cpu access in between
+ * Test category: functionality test
+ *
+ * SUBTEST: partial-middle-munmap-cpu-fault
+ * Description: munmap middle with cpu access in between
+ * Test category: functionality test
+ *
+ * SUBTEST: partial-middle-munmap-no-cpu-fault
+ * Description: munmap middle with no cpu access in between
+ * Test category: functionality test
+ *
+ * SUBTEST: partial-middle-remap-cpu-fault
+ * Description: remap middle with cpu access in between
+ * Test category: functionality test
+ *
+ * SUBTEST: partial-middle-remap-no-cpu-fault
+ * Description: remap middle with no cpu access in between
+ * Test category: functionality test
+ */
+
+static void
+partial(int fd, struct drm_xe_engine_class_instance *eci, unsigned int flags)
+{
+ struct drm_xe_sync sync[1] = {
+ { .type = DRM_XE_SYNC_TYPE_USER_FENCE, .flags = DRM_XE_SYNC_FLAG_SIGNAL,
+ .timeline_value = USER_FENCE_VALUE },
+ };
+ struct drm_xe_exec exec = {
+ .num_batch_buffer = 1,
+ .num_syncs = 1,
+ .syncs = to_user_pointer(sync),
+ };
+ struct {
+ uint32_t batch[16];
+ uint64_t pad;
+ uint64_t vm_sync;
+ uint64_t exec_sync;
+ uint32_t data;
+ uint32_t expected_data;
+ } *data;
+ size_t bo_size = SZ_2M, unmap_offset = 0;
+ uint32_t vm, exec_queue;
+ u64 *exec_ufence = NULL;
+ int i;
+ void *old, *new = NULL;
+ struct aligned_alloc_type alloc;
+
+ if (flags & MIDDLE)
+ unmap_offset = bo_size / 4;
+
+ vm = xe_vm_create(fd, DRM_XE_VM_CREATE_FLAG_LR_MODE |
+ DRM_XE_VM_CREATE_FLAG_FAULT_MODE, 0);
+
+ alloc = __aligned_alloc(bo_size, bo_size);
+ igt_assert(alloc.ptr);
+
+ data = mmap(alloc.ptr, bo_size, PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
+ igt_assert(data != MAP_FAILED);
+ memset(data, 0, bo_size);
+ old = data;
+
+ exec_queue = xe_exec_queue_create(fd, vm, eci, 0);
+
+ sync[0].addr = to_user_pointer(&data[0].vm_sync);
+ bind_system_allocator(sync, 1);
+ xe_wait_ufence(fd, &data[0].vm_sync, USER_FENCE_VALUE, 0, FIVE_SEC);
+ data[0].vm_sync = 0;
+
+ exec_ufence = mmap(NULL, SZ_4K, PROT_READ |
+ PROT_WRITE, MAP_SHARED |
+ MAP_ANONYMOUS, -1, 0);
+ igt_assert(exec_ufence != MAP_FAILED);
+ memset(exec_ufence, 0, SZ_4K);
+
+ for (i = 0; i < 2; i++) {
+ uint64_t addr = to_user_pointer(data);
+ uint64_t sdi_offset = (char *)&data[i].data - (char *)data;
+ uint64_t sdi_addr = addr + sdi_offset;
+ int b = 0;
+
+ write_dword(data[i].batch, sdi_addr, WRITE_VALUE(&data[i], i), &b);
+ igt_assert(b <= ARRAY_SIZE(data[i].batch));
+
+ if (!i)
+ data = old + unmap_offset + bo_size / 2;
+ }
+
+ data = old;
+ exec.exec_queue_id = exec_queue;
+
+ for (i = 0; i < 2; i++) {
+ uint64_t addr = to_user_pointer(data);
+ uint64_t batch_offset = (char *)&data[i].batch - (char *)data;
+ uint64_t batch_addr = addr + batch_offset;
+
+ sync[0].addr = new ? to_user_pointer(new) :
+ to_user_pointer(exec_ufence);
+ exec.address = batch_addr;
+ xe_exec(fd, &exec);
+
+ xe_wait_ufence(fd, new ?: exec_ufence, USER_FENCE_VALUE,
+ exec_queue, FIVE_SEC);
+ if (i || (flags & CPU_FAULT))
+ igt_assert_eq(data[i].data, READ_VALUE(&data[i]));
+ exec_ufence[0] = 0;
+
+ if (!i) {
+ data = old + unmap_offset + bo_size / 2;
+ munmap(old + unmap_offset, bo_size / 2);
+ if (flags & REMAP) {
+ new = mmap(old + unmap_offset, bo_size / 2,
+ PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED |
+ MAP_LOCKED, -1, 0);
+ igt_assert(new != MAP_FAILED);
+ }
+ }
+ }
+
+ xe_exec_queue_destroy(fd, exec_queue);
+ munmap(exec_ufence, SZ_4K);
+ __aligned_free(&alloc);
+ if (new)
+ munmap(new, bo_size / 2);
+ xe_vm_destroy(fd, vm);
+}
+
+#define MAX_N_EXEC_QUEUES 16
+
+#define MMAP (0x1 << 0)
+#define NEW (0x1 << 1)
+#define BO_UNMAP (0x1 << 2)
+#define FREE (0x1 << 3)
+#define BUSY (0x1 << 4)
+#define BO_MAP (0x1 << 5)
+#define RACE (0x1 << 6)
+#define SKIP_MEMSET (0x1 << 7)
+#define FAULT (0x1 << 8)
+#define FILE_BACKED (0x1 << 9)
+#define LOCK (0x1 << 10)
+#define MMAP_SHARED (0x1 << 11)
+#define HUGE_PAGE (0x1 << 12)
+#define SHARED_ALLOC (0x1 << 13)
+#define FORK_READ (0x1 << 14)
+#define FORK_READ_AFTER (0x1 << 15)
+#define MREMAP (0x1 << 16)
+#define DONTUNMAP (0x1 << 17)
+#define READ_ONLY_REMAP (0x1 << 18)
+#define SYNC_EXEC (0x1 << 19)
+#define EVERY_OTHER_CHECK (0x1 << 20)
+#define MULTI_FAULT (0x1 << 21)
+
+#define N_MULTI_FAULT 4
+
+/**
+ * SUBTEST: once-%s
+ * Description: Run %arg[1] system allocator test only once
+ * Test category: functionality test
+ *
+ * SUBTEST: once-large-%s
+ * Description: Run %arg[1] system allocator test only once with large allocation
+ * Test category: functionality test
+ *
+ * SUBTEST: twice-%s
+ * Description: Run %arg[1] system allocator test twice
+ * Test category: functionality test
+ *
+ * SUBTEST: twice-large-%s
+ * Description: Run %arg[1] system allocator test twice with large allocation
+ * Test category: functionality test
+ *
+ * SUBTEST: many-%s
+ * Description: Run %arg[1] system allocator test many times
+ * Test category: stress test
+ *
+ * SUBTEST: many-stride-%s
+ * Description: Run %arg[1] system allocator test many times with a stride on each exec
+ * Test category: stress test
+ *
+ * SUBTEST: many-execqueues-%s
+ * Description: Run %arg[1] system allocator test on many exec_queues
+ * Test category: stress test
+ *
+ * SUBTEST: many-large-%s
+ * Description: Run %arg[1] system allocator test many times with large allocations
+ * Test category: stress test
+ *
+ * SUBTEST: many-large-execqueues-%s
+ * Description: Run %arg[1] system allocator test on many exec_queues with large allocations
+ *
+ * SUBTEST: threads-many-%s
+ * Description: Run %arg[1] system allocator threaded test many times
+ * Test category: stress test
+ *
+ * SUBTEST: threads-many-stride-%s
+ * Description: Run %arg[1] system allocator threaded test many times with a stride on each exec
+ * Test category: stress test
+ *
+ * SUBTEST: threads-many-execqueues-%s
+ * Description: Run %arg[1] system allocator threaded test on many exec_queues
+ * Test category: stress test
+ *
+ * SUBTEST: threads-many-large-%s
+ * Description: Run %arg[1] system allocator threaded test many times with large allocations
+ * Test category: stress test
+ *
+ * SUBTEST: threads-many-large-execqueues-%s
+ * Description: Run %arg[1] system allocator threaded test on many exec_queues with large allocations
+ *
+ * SUBTEST: threads-shared-vm-many-%s
+ * Description: Run %arg[1] system allocator threaded, shared vm test many times
+ * Test category: stress test
+ *
+ * SUBTEST: threads-shared-vm-many-stride-%s
+ * Description: Run %arg[1] system allocator threaded, shared vm test many times with a stride on each exec
+ * Test category: stress test
+ *
+ * SUBTEST: threads-shared-vm-many-execqueues-%s
+ * Description: Run %arg[1] system allocator threaded, shared vm test on many exec_queues
+ * Test category: stress test
+ *
+ * SUBTEST: threads-shared-vm-many-large-%s
+ * Description: Run %arg[1] system allocator threaded, shared vm test many times with large allocations
+ * Test category: stress test
+ *
+ * SUBTEST: threads-shared-vm-many-large-execqueues-%s
+ * Description: Run %arg[1] system allocator threaded, shared vm test on many exec_queues with large allocations
+ * Test category: stress test
+ *
+ * SUBTEST: process-many-%s
+ * Description: Run %arg[1] system allocator multi-process test many times
+ * Test category: stress test
+ *
+ * SUBTEST: process-many-stride-%s
+ * Description: Run %arg[1] system allocator multi-process test many times with a stride on each exec
+ * Test category: stress test
+ *
+ * SUBTEST: process-many-execqueues-%s
+ * Description: Run %arg[1] system allocator multi-process test on many exec_queues
+ * Test category: stress test
+ *
+ * SUBTEST: process-many-large-%s
+ * Description: Run %arg[1] system allocator multi-process test many times with large allocations
+ * Test category: stress test
+ *
+ * SUBTEST: process-many-large-execqueues-%s
+ * Description: Run %arg[1] system allocator multi-process test on many exec_queues with large allocations
+ *
+ * SUBTEST: fault
+ * Description: use a bad system allocator address resulting in a fault
+ * Test category: bad input
+ *
+ * arg[1]:
+ *
+ * @malloc: malloc single buffer for all execs, issue a command which will trigger multiple faults
+ * @malloc-multi-fault: malloc single buffer for all execs
+ * @malloc-fork-read: malloc single buffer for all execs, fork a process to read test output
+ * @malloc-fork-read-after: malloc single buffer for all execs, fork a process to read test output, check again after fork returns in parent
+ * @malloc-mlock: malloc and mlock single buffer for all execs
+ * @malloc-race: malloc single buffer for all execs with race between cpu and gpu access
+ * @malloc-bo-unmap: malloc single buffer for all execs, bind and unbind a BO to same address before execs
+ * @malloc-busy: malloc single buffer for all execs, try to unbind while buffer valid
+ * @mmap: mmap single buffer for all execs
+ * @mmap-remap: mmap and mremap a buffer for all execs
+ * @mmap-remap-dontunmap: mmap and mremap a buffer with dontunmap flag for all execs
+ * @mmap-remap-ro: mmap and mremap a read-only buffer for all execs
+ * @mmap-remap-ro-dontunmap: mmap and mremap a read-only buffer with dontunmap flag for all execs
+ * @mmap-remap-eocheck: mmap and mremap a buffer for all execs, check data every other loop iteration
+ * @mmap-remap-dontunmap-eocheck: mmap and mremap a buffer with dontunmap flag for all execs, check data every other loop iteration
+ * @mmap-remap-ro-eocheck: mmap and mremap a read-only buffer for all execs, check data every other loop iteration
+ * @mmap-remap-ro-dontunmap-eocheck: mmap and mremap a read-only buffer with dontunmap flag for all execs, check data every other loop iteration
+ * @mmap-huge: mmap huge page single buffer for all execs
+ * @mmap-shared: mmap shared single buffer for all execs
+ * @mmap-shared-remap: mmap shared and mremap a buffer for all execs
+ * @mmap-shared-remap-dontunmap: mmap shared and mremap a buffer with dontunmap flag for all execs
+ * @mmap-shared-remap-eocheck: mmap shared and mremap a buffer for all execs, check data every other loop iteration
+ * @mmap-shared-remap-dontunmap-eocheck: mmap shared and mremap a buffer with dontunmap flag for all execs, check data every other loop iteration
+ * @mmap-mlock: mmap and mlock single buffer for all execs
+ * @mmap-file: mmap single buffer, with file backing, for all execs
+ * @mmap-file-mlock: mmap and mlock single buffer, with file backing, for all execs
+ * @mmap-race: mmap single buffer for all execs with race between cpu and gpu access
+ * @free: malloc and free buffer for each exec
+ * @free-race: malloc and free buffer for each exec with race between cpu and gpu access
+ * @new: malloc a new buffer for each exec
+ * @new-race: malloc a new buffer for each exec with race between cpu and gpu access
+ * @new-bo-map: malloc a new buffer or map BO for each exec
+ * @new-busy: malloc a new buffer for each exec, try to unbind while buffers valid
+ * @mmap-free: mmap and free buffer for each exec
+ * @mmap-free-huge: mmap huge page and free buffer for each exec
+ * @mmap-free-race: mmap and free buffer for each exec with race between cpu and gpu access
+ * @mmap-new: mmap a new buffer for each exec
+ * @mmap-new-huge: mmap huge page a new buffer for each exec
+ * @mmap-new-race: mmap a new buffer for each exec with race between cpu and gpu access
+ * @malloc-nomemset: malloc single buffer for all execs, skip memset of buffers
+ * @malloc-mlock-nomemset: malloc and mlock single buffer for all execs, skip memset of buffers
+ * @malloc-race-nomemset: malloc single buffer for all execs with race between cpu and gpu access, skip memset of buffers
+ * @malloc-bo-unmap-nomemset: malloc single buffer for all execs, bind and unbind a BO to same address before execs, skip memset of buffers
+ * @malloc-busy-nomemset: malloc single buffer for all execs, try to unbind while buffer valid, skip memset of buffers
+ * @mmap-nomemset: mmap single buffer for all execs, skip memset of buffers
+ * @mmap-huge-nomemset: mmap huge page single buffer for all execs, skip memset of buffers
+ * @mmap-shared-nomemset: mmap shared single buffer for all execs, skip memset of buffers
+ * @mmap-mlock-nomemset: mmap and mlock single buffer for all execs, skip memset of buffers
+ * @mmap-file-nomemset: mmap single buffer, with file backing, for all execs, skip memset of buffers
+ * @mmap-file-mlock-nomemset: mmap and mlock single buffer, with file backing, for all execs, skip memset of buffers
+ * @mmap-race-nomemset: mmap single buffer for all execs with race between cpu and gpu access, skip memset of buffers
+ * @free-nomemset: malloc and free buffer for each exec, skip memset of buffers
+ * @free-race-nomemset: malloc and free buffer for each exec with race between cpu and gpu access, skip memset of buffers
+ * @new-nomemset: malloc a new buffer for each exec, skip memset of buffers
+ * @new-race-nomemset: malloc a new buffer for each exec with race between cpu and gpu access, skip memset of buffers
+ * @new-bo-map-nomemset: malloc a new buffer or map BO for each exec, skip memset of buffers
+ * @new-busy-nomemset: malloc a new buffer for each exec, try to unbind while buffers valid, skip memset of buffers
+ * @mmap-free-nomemset: mmap and free buffer for each exec, skip memset of buffers
+ * @mmap-free-huge-nomemset: mmap huge page and free buffer for each exec, skip memset of buffers
+ * @mmap-free-race-nomemset: mmap and free buffer for each exec with race between cpu and gpu access, skip memset of buffers
+ * @mmap-new-nomemset: mmap a new buffer for each exec, skip memset of buffers
+ * @mmap-new-huge-nomemset: mmap huge page new buffer for each exec, skip memset of buffers
+ * @mmap-new-race-nomemset: mmap a new buffer for each exec with race between cpu and gpu access, skip memset of buffers
+ *
+ * SUBTEST: threads-shared-vm-shared-alloc-many-stride-malloc
+ * Description: Create multiple threads with a shared VM triggering faults on different hardware engines to same addresses
+ * Test category: stress test
+ *
+ * SUBTEST: threads-shared-vm-shared-alloc-many-stride-malloc-race
+ * Description: Create multiple threads with a shared VM triggering faults on different hardware engines to same addresses, racing between CPU and GPU access
+ * Test category: stress test
+ *
+ * SUBTEST: threads-shared-alloc-many-stride-malloc
+ * Description: Create multiple threads with a faults on different hardware engines to same addresses
+ * Test category: stress test
+ *
+ * SUBTEST: threads-shared-alloc-many-stride-malloc-sync
+ * Description: Create multiple threads with a faults on different hardware engines to same addresses, syncing on each exec
+ * Test category: stress test
+ *
+ * SUBTEST: threads-shared-alloc-many-stride-malloc-race
+ * Description: Create multiple threads with a faults on different hardware engines to same addresses, racing between CPU and GPU access
+ * Test category: stress test
+ */
+
+struct test_exec_data {
+ uint32_t batch[32];
+ uint64_t pad;
+ uint64_t vm_sync;
+ uint64_t exec_sync;
+ uint32_t data;
+ uint32_t expected_data;
+};
+
+static void
+test_exec(int fd, struct drm_xe_engine_class_instance *eci,
+ int n_exec_queues, int n_execs, size_t bo_size,
+ size_t stride, uint32_t vm, void *alloc, pthread_barrier_t *barrier,
+ unsigned int flags)
+{
+ uint64_t addr;
+ struct drm_xe_sync sync[1] = {
+ { .type = DRM_XE_SYNC_TYPE_USER_FENCE, .flags = DRM_XE_SYNC_FLAG_SIGNAL,
+ .timeline_value = USER_FENCE_VALUE },
+ };
+ struct drm_xe_exec exec = {
+ .num_batch_buffer = 1,
+ .num_syncs = 1,
+ .syncs = to_user_pointer(sync),
+ };
+ uint32_t exec_queues[MAX_N_EXEC_QUEUES];
+ struct test_exec_data *data, *next_data = NULL;
+ uint32_t bo_flags;
+ uint32_t bo = 0;
+ void **pending_free;
+ u64 *exec_ufence = NULL;
+ int i, j, b, file_fd = -1, prev_idx;
+ bool free_vm = false;
+ size_t aligned_size = bo_size ?: xe_get_default_alignment(fd);
+ size_t orig_size = bo_size;
+ struct aligned_alloc_type aligned_alloc_type;
+
+ if (flags & MULTI_FAULT) {
+ if (!bo_size)
+ return;
+
+ bo_size *= N_MULTI_FAULT;
+ }
+
+ if (flags & SHARED_ALLOC)
+ return;
+
+ if (flags & EVERY_OTHER_CHECK && odd(n_execs))
+ return;
+
+ if (flags & EVERY_OTHER_CHECK)
+ igt_assert(flags & MREMAP);
+
+ igt_assert(n_exec_queues <= MAX_N_EXEC_QUEUES);
+
+ if (flags & NEW && !(flags & FREE)) {
+ pending_free = malloc(sizeof(*pending_free) * n_execs);
+ igt_assert(pending_free);
+ memset(pending_free, 0, sizeof(*pending_free) * n_execs);
+ }
+
+ if (!vm) {
+ vm = xe_vm_create(fd, DRM_XE_VM_CREATE_FLAG_LR_MODE |
+ DRM_XE_VM_CREATE_FLAG_FAULT_MODE, 0);
+ free_vm = true;
+ }
+ if (!bo_size) {
+ if (!stride) {
+ bo_size = sizeof(*data) * n_execs;
+ bo_size = xe_bb_size(fd, bo_size);
+ } else {
+ bo_size = stride * n_execs * sizeof(*data);
+ bo_size = xe_bb_size(fd, bo_size);
+ }
+ }
+ if (flags & HUGE_PAGE) {
+ aligned_size = ALIGN(aligned_size, SZ_2M);
+ bo_size = ALIGN(bo_size, SZ_2M);
+ }
+
+ if (alloc) {
+ data = alloc;
+ } else {
+ if (flags & MMAP) {
+ int mmap_flags = MAP_FIXED;
+
+ aligned_alloc_type = __aligned_alloc(aligned_size, bo_size);
+ data = aligned_alloc_type.ptr;
+ igt_assert(data);
+ __aligned_partial_free(&aligned_alloc_type);
+
+ if (flags & MMAP_SHARED)
+ mmap_flags |= MAP_SHARED;
+ else
+ mmap_flags |= MAP_PRIVATE;
+
+ if (flags & HUGE_PAGE)
+ mmap_flags |= MAP_HUGETLB | MAP_HUGE_2MB;
+
+ if (flags & FILE_BACKED) {
+ char name[] = "/tmp/xe_exec_system_allocator_datXXXXXX";
+
+ igt_assert(!(flags & NEW));
+
+ file_fd = mkstemp(name);
+ posix_fallocate(file_fd, 0, bo_size);
+ } else {
+ mmap_flags |= MAP_ANONYMOUS;
+ }
+
+ data = mmap(data, bo_size, PROT_READ |
+ PROT_WRITE, mmap_flags, file_fd, 0);
+ igt_assert(data != MAP_FAILED);
+ } else {
+ data = aligned_alloc(aligned_size, bo_size);
+ igt_assert(data);
+ }
+ if (!(flags & SKIP_MEMSET))
+ memset(data, 0, bo_size);
+ if (flags & LOCK) {
+ igt_assert(!(flags & NEW));
+ mlock(data, bo_size);
+ }
+ }
+
+ for (i = 0; i < n_exec_queues; i++)
+ exec_queues[i] = xe_exec_queue_create(fd, vm, eci, 0);
+
+ sync[0].addr = to_user_pointer(&data[0].vm_sync);
+ if (free_vm) {
+ bind_system_allocator(sync, 1);
+ xe_wait_ufence(fd, &data[0].vm_sync, USER_FENCE_VALUE, 0, FIVE_SEC);
+ }
+ data[0].vm_sync = 0;
+
+ addr = to_user_pointer(data);
+
+ if (flags & BO_UNMAP) {
+ bo_flags = DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM;
+ bo = xe_bo_create(fd, vm, bo_size,
+ vram_if_possible(fd, eci->gt_id), bo_flags);
+ xe_vm_bind_async(fd, vm, 0, bo, 0, addr, bo_size, 0, 0);
+
+ __xe_vm_bind_assert(fd, vm, 0,
+ 0, 0, addr, bo_size,
+ DRM_XE_VM_BIND_OP_MAP,
+ DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR, sync,
+ 1, 0, 0);
+ xe_wait_ufence(fd, &data[0].vm_sync, USER_FENCE_VALUE, 0,
+ FIVE_SEC);
+ data[0].vm_sync = 0;
+ gem_close(fd, bo);
+ bo = 0;
+ }
+
+ if (!(flags & RACE)) {
+ exec_ufence = mmap(NULL, SZ_4K, PROT_READ |
+ PROT_WRITE, MAP_SHARED |
+ MAP_ANONYMOUS, -1, 0);
+ igt_assert(exec_ufence != MAP_FAILED);
+ memset(exec_ufence, 0, SZ_4K);
+ }
+
+ for (i = 0; i < n_execs; i++) {
+ int idx = !stride ? i : i * stride, next_idx = !stride
+ ? (i + 1) : (i + 1) * stride;
+ uint64_t batch_offset = (char *)&data[idx].batch - (char *)data;
+ uint64_t batch_addr = addr + batch_offset;
+ uint64_t sdi_offset = (char *)&data[idx].data - (char *)data;
+ uint64_t sdi_addr = addr + sdi_offset;
+ int e = i % n_exec_queues, err;
+ bool fault_inject = (FAULT & flags) && i == n_execs / 2;
+ bool fault_injected = (FAULT & flags) && i > n_execs;
+
+ if (barrier)
+ pthread_barrier_wait(barrier);
+
+ if (flags & MULTI_FAULT) {
+ b = 0;
+ for (j = 0; j < N_MULTI_FAULT - 1; ++j)
+ __write_dword(data[idx].batch,
+ sdi_addr + j * orig_size,
+ WRITE_VALUE(&data[idx], idx), &b);
+ write_dword(data[idx].batch, sdi_addr + j * orig_size,
+ WRITE_VALUE(&data[idx], idx), &b);
+ igt_assert(b <= ARRAY_SIZE(data[idx].batch));
+ } else if (!(flags & EVERY_OTHER_CHECK)) {
+ b = 0;
+ write_dword(data[idx].batch, sdi_addr,
+ WRITE_VALUE(&data[idx], idx), &b);
+ igt_assert(b <= ARRAY_SIZE(data[idx].batch));
+ } else if (flags & EVERY_OTHER_CHECK && !odd(i)) {
+ b = 0;
+ write_dword(data[idx].batch, sdi_addr,
+ WRITE_VALUE(&data[idx], idx), &b);
+ igt_assert(b <= ARRAY_SIZE(data[idx].batch));
+
+ aligned_alloc_type = __aligned_alloc(aligned_size, bo_size);
+ next_data = aligned_alloc_type.ptr;
+ igt_assert(next_data);
+ __aligned_partial_free(&aligned_alloc_type);
+
+ b = 0;
+ write_dword(data[next_idx].batch,
+ to_user_pointer(next_data) +
+ (char *)&data[next_idx].data - (char *)data,
+ WRITE_VALUE(&data[next_idx], next_idx), &b);
+ igt_assert(b <= ARRAY_SIZE(data[next_idx].batch));
+ }
+
+ if (!exec_ufence)
+ data[idx].exec_sync = 0;
+
+ sync[0].addr = exec_ufence ? to_user_pointer(exec_ufence) :
+ addr + (char *)&data[idx].exec_sync - (char *)data;
+
+ exec.exec_queue_id = exec_queues[e];
+ if (fault_inject)
+ exec.address = batch_addr * 2;
+ else
+ exec.address = batch_addr;
+
+ if (fault_injected) {
+ err = __xe_exec(fd, &exec);
+ igt_assert(err == -ENOENT);
+ } else {
+ xe_exec(fd, &exec);
+ }
+
+ if (barrier)
+ pthread_barrier_wait(barrier);
+
+ if (fault_inject || fault_injected) {
+ int64_t timeout = QUARTER_SEC;
+
+ err = __xe_wait_ufence(fd, exec_ufence ? exec_ufence :
+ &data[idx].exec_sync,
+ USER_FENCE_VALUE,
+ exec_queues[e], &timeout);
+ igt_assert(err == -ETIME || err == -EIO);
+ } else {
+ xe_wait_ufence(fd, exec_ufence ? exec_ufence :
+ &data[idx].exec_sync, USER_FENCE_VALUE,
+ exec_queues[e], FIVE_SEC);
+ if (flags & LOCK && !i)
+ munlock(data, bo_size);
+
+ if (flags & MREMAP) {
+ void *old = data;
+ int remap_flags = MREMAP_MAYMOVE | MREMAP_FIXED;
+
+ /* Only available on kernels 5.7+ */
+ #ifdef MREMAP_DONTUNMAP
+ if (flags & DONTUNMAP)
+ remap_flags |= MREMAP_DONTUNMAP;
+ #endif
+
+ if (flags & READ_ONLY_REMAP)
+ igt_assert(!mprotect(old, bo_size,
+ PROT_READ));
+
+ if (!next_data) {
+ aligned_alloc_type = __aligned_alloc(aligned_size,
+ bo_size);
+ data = aligned_alloc_type.ptr;
+ __aligned_partial_free(&aligned_alloc_type);
+ } else {
+ data = next_data;
+ }
+ next_data = NULL;
+ igt_assert(data);
+
+ data = mremap(old, bo_size, bo_size,
+ remap_flags, data);
+ igt_assert(data != MAP_FAILED);
+
+ if (flags & READ_ONLY_REMAP)
+ igt_assert(!mprotect(data, bo_size,
+ PROT_READ |
+ PROT_WRITE));
+
+ addr = to_user_pointer(data);
+
+ #ifdef MREMAP_DONTUNMAP
+ if (flags & DONTUNMAP)
+ munmap(old, bo_size);
+ #endif
+ }
+
+ if (!(flags & EVERY_OTHER_CHECK) || odd(i)) {
+ if (flags & FORK_READ) {
+ igt_fork(child, 1)
+ igt_assert_eq(data[idx].data,
+ READ_VALUE(&data[idx]));
+ if (!(flags & FORK_READ_AFTER))
+ igt_assert_eq(data[idx].data,
+ READ_VALUE(&data[idx]));
+ igt_waitchildren();
+ if (flags & FORK_READ_AFTER)
+ igt_assert_eq(data[idx].data,
+ READ_VALUE(&data[idx]));
+ } else {
+ igt_assert_eq(data[idx].data,
+ READ_VALUE(&data[idx]));
+
+ if (flags & MULTI_FAULT) {
+ for (j = 1; j < N_MULTI_FAULT; ++j) {
+ struct test_exec_data *__data =
+ ((void *)data) + j * orig_size;
+
+ igt_assert_eq(__data[idx].data,
+ READ_VALUE(&data[idx]));
+ }
+ }
+ }
+ if (flags & EVERY_OTHER_CHECK)
+ igt_assert_eq(data[prev_idx].data,
+ READ_VALUE(&data[prev_idx]));
+ }
+ }
+
+ if (exec_ufence)
+ exec_ufence[0] = 0;
+
+ if (bo) {
+ __xe_vm_bind_assert(fd, vm, 0,
+ 0, 0, addr, bo_size,
+ DRM_XE_VM_BIND_OP_MAP,
+ DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR,
+ NULL, 0, 0, 0);
+ munmap(data, bo_size);
+ gem_close(fd, bo);
+ }
+
+ if (flags & NEW) {
+ if (flags & MMAP) {
+ if (flags & FREE)
+ munmap(data, bo_size);
+ else
+ pending_free[i] = data;
+ data = mmap(NULL, bo_size, PROT_READ |
+ PROT_WRITE, MAP_SHARED |
+ MAP_ANONYMOUS, -1, 0);
+ igt_assert(data != MAP_FAILED);
+ } else if (flags & BO_MAP && odd(i)) {
+ if (!bo) {
+ if (flags & FREE)
+ free(data);
+ else
+ pending_free[i] = data;
+ }
+
+ aligned_alloc_type = __aligned_alloc(aligned_size, bo_size);
+ data = aligned_alloc_type.ptr;
+ igt_assert(data);
+ __aligned_partial_free(&aligned_alloc_type);
+
+ bo_flags = DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM;
+ bo = xe_bo_create(fd, vm, bo_size,
+ vram_if_possible(fd, eci->gt_id),
+ bo_flags);
+ data = xe_bo_map_fixed(fd, bo, bo_size,
+ to_user_pointer(data));
+
+ xe_vm_bind_async(fd, vm, 0, bo, 0,
+ to_user_pointer(data),
+ bo_size, 0, 0);
+ } else {
+ if (!bo) {
+ if (flags & FREE)
+ free(data);
+ else
+ pending_free[i] = data;
+ }
+ bo = 0;
+ data = aligned_alloc(aligned_size, bo_size);
+ igt_assert(data);
+ }
+ addr = to_user_pointer(data);
+ if (!(flags & SKIP_MEMSET))
+ memset(data, 0, bo_size);
+ }
+
+ prev_idx = idx;
+ }
+
+ if (bo) {
+ __xe_vm_bind_assert(fd, vm, 0,
+ 0, 0, addr, bo_size,
+ DRM_XE_VM_BIND_OP_MAP,
+ DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR,
+ NULL, 0, 0, 0);
+ munmap(data, bo_size);
+ data = NULL;
+ gem_close(fd, bo);
+ }
+
+ if (flags & BUSY)
+ igt_assert_eq(unbind_system_allocator(), -EBUSY);
+
+ for (i = 0; i < n_exec_queues; i++)
+ xe_exec_queue_destroy(fd, exec_queues[i]);
+
+ if (exec_ufence)
+ munmap(exec_ufence, SZ_4K);
+
+ if (flags & LOCK)
+ munlock(data, bo_size);
+
+ if (file_fd != -1)
+ close(file_fd);
+
+ if (flags & NEW && !(flags & FREE)) {
+ for (i = 0; i < n_execs; i++) {
+ if (!pending_free[i])
+ continue;
+
+ if (flags & MMAP)
+ munmap(pending_free[i], bo_size);
+ else
+ free(pending_free[i]);
+ }
+ free(pending_free);
+ }
+ if (data) {
+ if (flags & MMAP)
+ munmap(data, bo_size);
+ else if (!alloc)
+ free(data);
+ }
+ if (free_vm)
+ xe_vm_destroy(fd, vm);
+}
+
+struct thread_data {
+ pthread_t thread;
+ pthread_mutex_t *mutex;
+ pthread_cond_t *cond;
+ pthread_barrier_t *barrier;
+ int fd;
+ struct drm_xe_engine_class_instance *eci;
+ int n_exec_queues;
+ int n_execs;
+ size_t bo_size;
+ size_t stride;
+ uint32_t vm;
+ unsigned int flags;
+ void *alloc;
+ bool *go;
+};
+
+static void *thread(void *data)
+{
+ struct thread_data *t = data;
+
+ pthread_mutex_lock(t->mutex);
+ while (!*t->go)
+ pthread_cond_wait(t->cond, t->mutex);
+ pthread_mutex_unlock(t->mutex);
+
+ test_exec(t->fd, t->eci, t->n_exec_queues, t->n_execs,
+ t->bo_size, t->stride, t->vm, t->alloc, t->barrier,
+ t->flags);
+
+ return NULL;
+}
+
+static void
+threads(int fd, int n_exec_queues, int n_execs, size_t bo_size,
+ size_t stride, unsigned int flags, bool shared_vm)
+{
+ struct drm_xe_engine_class_instance *hwe;
+ struct thread_data *threads_data;
+ int n_engines = 0, i = 0;
+ pthread_mutex_t mutex;
+ pthread_cond_t cond;
+ pthread_barrier_t barrier;
+ uint32_t vm = 0;
+ bool go = false;
+ void *alloc = NULL;
+
+ if ((FILE_BACKED | FORK_READ) & flags)
+ return;
+
+ xe_for_each_engine(fd, hwe)
+ ++n_engines;
+
+ if (shared_vm) {
+ vm = xe_vm_create(fd, DRM_XE_VM_CREATE_FLAG_LR_MODE |
+ DRM_XE_VM_CREATE_FLAG_FAULT_MODE, 0);
+ bind_system_allocator(NULL, 0);
+ }
+
+ if (flags & SHARED_ALLOC) {
+ uint64_t alloc_size;
+
+ igt_assert(stride);
+
+ alloc_size = sizeof(struct test_exec_data) * stride *
+ n_execs * n_engines;
+ alloc_size = xe_bb_size(fd, alloc_size);
+ alloc = aligned_alloc(SZ_2M, alloc_size);
+ igt_assert(alloc);
+
+ memset(alloc, 0, alloc_size);
+ flags &= ~SHARED_ALLOC;
+ }
+
+ threads_data = calloc(n_engines, sizeof(*threads_data));
+ igt_assert(threads_data);
+
+ pthread_mutex_init(&mutex, 0);
+ pthread_cond_init(&cond, 0);
+ pthread_barrier_init(&barrier, 0, n_engines);
+
+ xe_for_each_engine(fd, hwe) {
+ threads_data[i].mutex = &mutex;
+ threads_data[i].cond = &cond;
+ threads_data[i].barrier = (flags & SYNC_EXEC) ? &barrier : NULL;
+ threads_data[i].fd = fd;
+ threads_data[i].eci = hwe;
+ threads_data[i].n_exec_queues = n_exec_queues;
+ threads_data[i].n_execs = n_execs;
+ threads_data[i].bo_size = bo_size;
+ threads_data[i].stride = stride;
+ threads_data[i].vm = vm;
+ threads_data[i].flags = flags;
+ threads_data[i].alloc = alloc ? alloc + i *
+ sizeof(struct test_exec_data) : NULL;
+ threads_data[i].go = &go;
+ pthread_create(&threads_data[i].thread, 0, thread,
+ &threads_data[i]);
+ ++i;
+ }
+
+ pthread_mutex_lock(&mutex);
+ go = true;
+ pthread_cond_broadcast(&cond);
+ pthread_mutex_unlock(&mutex);
+
+ for (i = 0; i < n_engines; ++i)
+ pthread_join(threads_data[i].thread, NULL);
+
+ if (shared_vm) {
+ int ret;
+
+ if (flags & MMAP) {
+ int tries = 300;
+
+ while (tries && (ret = unbind_system_allocator()) == -EBUSY) {
+ sleep(.01);
+ --tries;
+ }
+ igt_assert_eq(ret, 0);
+ }
+ xe_vm_destroy(fd, vm);
+ if (alloc)
+ free(alloc);
+ }
+ free(threads_data);
+}
+
+static void process(struct drm_xe_engine_class_instance *hwe, int n_exec_queues,
+ int n_execs, size_t bo_size, size_t stride,
+ unsigned int flags)
+{
+ struct process_data *pdata;
+ int map_fd;
+ int fd;
+
+ map_fd = open(sync_file, O_RDWR, 0x666);
+ pdata = mmap(NULL, sizeof(*pdata), PROT_READ |
+ PROT_WRITE, MAP_SHARED, map_fd, 0);
+ wait_pdata(pdata);
+
+ fd = drm_open_driver(DRIVER_XE);
+ test_exec(fd, hwe, n_exec_queues, n_execs,
+ bo_size, stride, 0, NULL, NULL, flags);
+ drm_close_driver(fd);
+
+ close(map_fd);
+ munmap(pdata, sizeof(*pdata));
+}
+
+static void
+processes(int fd, int n_exec_queues, int n_execs, size_t bo_size,
+ size_t stride, unsigned int flags)
+{
+ struct drm_xe_engine_class_instance *hwe;
+ struct process_data *pdata;
+ int map_fd;
+
+ if (flags & FORK_READ)
+ return;
+
+ map_fd = open(sync_file, O_RDWR | O_CREAT, 0x666);
+ posix_fallocate(map_fd, 0, sizeof(*pdata));
+ pdata = mmap(NULL, sizeof(*pdata), PROT_READ |
+ PROT_WRITE, MAP_SHARED, map_fd, 0);
+
+ init_pdata(pdata, 0);
+
+ xe_for_each_engine(fd, hwe) {
+ igt_fork(child, 1)
+ process(hwe, n_exec_queues, n_execs, bo_size,
+ stride, flags);
+ }
+
+ signal_pdata(pdata);
+ igt_waitchildren();
+
+ close(map_fd);
+ munmap(pdata, sizeof(*pdata));
+}
+
+struct section {
+ const char *name;
+ unsigned int flags;
+};
+
+igt_main
+{
+ struct drm_xe_engine_class_instance *hwe;
+ const struct section sections[] = {
+ { "malloc", 0 },
+ { "malloc-multi-fault", MULTI_FAULT },
+ { "malloc-fork-read", FORK_READ },
+ { "malloc-fork-read-after", FORK_READ | FORK_READ_AFTER },
+ { "malloc-mlock", LOCK },
+ { "malloc-race", RACE },
+ { "malloc-busy", BUSY },
+ { "malloc-bo-unmap", BO_UNMAP },
+ { "mmap", MMAP },
+ { "mmap-remap", MMAP | MREMAP },
+ { "mmap-remap-dontunmap", MMAP | MREMAP | DONTUNMAP },
+ { "mmap-remap-ro", MMAP | MREMAP | READ_ONLY_REMAP },
+ { "mmap-remap-ro-dontunmap", MMAP | MREMAP | DONTUNMAP |
+ READ_ONLY_REMAP },
+ { "mmap-remap-eocheck", MMAP | MREMAP | EVERY_OTHER_CHECK },
+ { "mmap-remap-dontunmap-eocheck", MMAP | MREMAP | DONTUNMAP |
+ EVERY_OTHER_CHECK },
+ { "mmap-remap-ro-eocheck", MMAP | MREMAP | READ_ONLY_REMAP |
+ EVERY_OTHER_CHECK },
+ { "mmap-remap-ro-dontunmap-eocheck", MMAP | MREMAP | DONTUNMAP |
+ READ_ONLY_REMAP | EVERY_OTHER_CHECK },
+ { "mmap-huge", MMAP | HUGE_PAGE },
+ { "mmap-shared", MMAP | LOCK | MMAP_SHARED },
+ { "mmap-shared-remap", MMAP | LOCK | MMAP_SHARED | MREMAP },
+ { "mmap-shared-remap-dontunmap", MMAP | LOCK | MMAP_SHARED |
+ MREMAP | DONTUNMAP },
+ { "mmap-shared-remap-eocheck", MMAP | LOCK | MMAP_SHARED |
+ MREMAP | EVERY_OTHER_CHECK },
+ { "mmap-shared-remap-dontunmap-eocheck", MMAP | LOCK |
+ MMAP_SHARED | MREMAP | DONTUNMAP | EVERY_OTHER_CHECK },
+ { "mmap-mlock", MMAP | LOCK },
+ { "mmap-file", MMAP | FILE_BACKED },
+ { "mmap-file-mlock", MMAP | LOCK | FILE_BACKED },
+ { "mmap-race", MMAP | RACE },
+ { "free", NEW | FREE },
+ { "free-race", NEW | FREE | RACE },
+ { "new", NEW },
+ { "new-race", NEW | RACE },
+ { "new-bo-map", NEW | BO_MAP },
+ { "new-busy", NEW | BUSY },
+ { "mmap-free", MMAP | NEW | FREE },
+ { "mmap-free-huge", MMAP | NEW | FREE | HUGE_PAGE },
+ { "mmap-free-race", MMAP | NEW | FREE | RACE },
+ { "mmap-new", MMAP | NEW },
+ { "mmap-new-huge", MMAP | NEW | HUGE_PAGE },
+ { "mmap-new-race", MMAP | NEW | RACE },
+ { "malloc-nomemset", SKIP_MEMSET },
+ { "malloc-mlock-nomemset", SKIP_MEMSET | LOCK },
+ { "malloc-race-nomemset", SKIP_MEMSET | RACE },
+ { "malloc-busy-nomemset", SKIP_MEMSET | BUSY },
+ { "malloc-bo-unmap-nomemset", SKIP_MEMSET | BO_UNMAP },
+ { "mmap-nomemset", SKIP_MEMSET | MMAP },
+ { "mmap-huge-nomemset", SKIP_MEMSET | MMAP | HUGE_PAGE },
+ { "mmap-shared-nomemset", SKIP_MEMSET | MMAP | MMAP_SHARED },
+ { "mmap-mlock-nomemset", SKIP_MEMSET | MMAP | LOCK },
+ { "mmap-file-nomemset", SKIP_MEMSET | MMAP | FILE_BACKED },
+ { "mmap-file-mlock-nomemset", SKIP_MEMSET | MMAP | LOCK | FILE_BACKED },
+ { "mmap-race-nomemset", SKIP_MEMSET | MMAP | RACE },
+ { "free-nomemset", SKIP_MEMSET | NEW | FREE },
+ { "free-race-nomemset", SKIP_MEMSET | NEW | FREE | RACE },
+ { "new-nomemset", SKIP_MEMSET | NEW },
+ { "new-race-nomemset", SKIP_MEMSET | NEW | RACE },
+ { "new-bo-map-nomemset", SKIP_MEMSET | NEW | BO_MAP },
+ { "new-busy-nomemset", SKIP_MEMSET | NEW | BUSY },
+ { "mmap-free-nomemset", SKIP_MEMSET | MMAP | NEW | FREE },
+ { "mmap-free-huge-nomemset", SKIP_MEMSET | MMAP | NEW | FREE | HUGE_PAGE },
+ { "mmap-free-race-nomemset", SKIP_MEMSET | MMAP | NEW | FREE | RACE },
+ { "mmap-new-nomemset", SKIP_MEMSET | MMAP | NEW },
+ { "mmap-new-huge-nomemset", SKIP_MEMSET | MMAP | NEW | HUGE_PAGE },
+ { "mmap-new-race-nomemset", SKIP_MEMSET | MMAP | NEW | RACE },
+ { NULL },
+ };
+ const struct section psections[] = {
+ { "munmap-cpu-fault", CPU_FAULT },
+ { "munmap-no-cpu-fault", 0 },
+ { "remap-cpu-fault", CPU_FAULT | REMAP },
+ { "remap-no-cpu-fault", REMAP },
+ { "middle-munmap-cpu-fault", MIDDLE | CPU_FAULT },
+ { "middle-munmap-no-cpu-fault", MIDDLE },
+ { "middle-remap-cpu-fault", MIDDLE | CPU_FAULT | REMAP },
+ { "middle-remap-no-cpu-fault", MIDDLE | REMAP },
+ { NULL },
+ };
+ const struct section esections[] = {
+ { "malloc", 0 },
+ { "malloc-mix-bo", MIX_BO_ALLOC },
+ { NULL },
+ };
+ int fd;
+
+ igt_fixture {
+ struct xe_device *xe;
+
+ fd = drm_open_driver(DRIVER_XE);
+ igt_require(!xe_supports_faults(fd));
+
+ xe = xe_device_get(fd);
+ va_bits = xe->va_bits;
+ open_sync_file();
+ }
+
+ for (const struct section *s = sections; s->name; s++) {
+ igt_subtest_f("once-%s", s->name)
+ xe_for_each_engine(fd, hwe)
+ test_exec(fd, hwe, 1, 1, 0, 0, 0, NULL,
+ NULL, s->flags);
+
+ igt_subtest_f("once-large-%s", s->name)
+ xe_for_each_engine(fd, hwe)
+ test_exec(fd, hwe, 1, 1, SZ_2M, 0, 0, NULL,
+ NULL, s->flags);
+
+ igt_subtest_f("twice-%s", s->name)
+ xe_for_each_engine(fd, hwe)
+ test_exec(fd, hwe, 1, 2, 0, 0, 0, NULL,
+ NULL, s->flags);
+
+ igt_subtest_f("twice-large-%s", s->name)
+ xe_for_each_engine(fd, hwe)
+ test_exec(fd, hwe, 1, 2, SZ_2M, 0, 0, NULL,
+ NULL, s->flags);
+
+ igt_subtest_f("many-%s", s->name)
+ xe_for_each_engine(fd, hwe)
+ test_exec(fd, hwe, 1, 128, 0, 0, 0, NULL,
+ NULL, s->flags);
+
+ igt_subtest_f("many-stride-%s", s->name)
+ xe_for_each_engine(fd, hwe)
+ test_exec(fd, hwe, 1, 128, 0, 256, 0, NULL,
+ NULL, s->flags);
+
+ igt_subtest_f("many-execqueues-%s", s->name)
+ xe_for_each_engine(fd, hwe)
+ test_exec(fd, hwe, 16, 128, 0, 0, 0, NULL,
+ NULL, s->flags);
+
+ igt_subtest_f("many-large-%s", s->name)
+ xe_for_each_engine(fd, hwe)
+ test_exec(fd, hwe, 1, 128, SZ_2M, 0, 0, NULL,
+ NULL, s->flags);
+
+ igt_subtest_f("many-large-execqueues-%s", s->name)
+ xe_for_each_engine(fd, hwe)
+ test_exec(fd, hwe, 16, 128, SZ_2M, 0, 0, NULL,
+ NULL, s->flags);
+
+ igt_subtest_f("threads-many-%s", s->name)
+ threads(fd, 1, 128, 0, 0, s->flags, false);
+
+ igt_subtest_f("threads-many-stride-%s", s->name)
+ threads(fd, 1, 128, 0, 256, s->flags, false);
+
+ igt_subtest_f("threads-many-execqueues-%s", s->name)
+ threads(fd, 16, 128, 0, 0, s->flags, false);
+
+ igt_subtest_f("threads-many-large-%s", s->name)
+ threads(fd, 1, 128, SZ_2M, 0, s->flags, false);
+
+ igt_subtest_f("threads-many-large-execqueues-%s", s->name)
+ threads(fd, 16, 128, SZ_2M, 0, s->flags, false);
+
+ igt_subtest_f("threads-shared-vm-many-%s", s->name)
+ threads(fd, 1, 128, 0, 0, s->flags, true);
+
+ igt_subtest_f("threads-shared-vm-many-stride-%s", s->name)
+ threads(fd, 1, 128, 0, 256, s->flags, true);
+
+ igt_subtest_f("threads-shared-vm-many-execqueues-%s", s->name)
+ threads(fd, 16, 128, 0, 0, s->flags, true);
+
+ igt_subtest_f("threads-shared-vm-many-large-%s", s->name)
+ threads(fd, 1, 128, SZ_2M, 0, s->flags, true);
+
+ igt_subtest_f("threads-shared-vm-many-large-execqueues-%s", s->name)
+ threads(fd, 16, 128, SZ_2M, 0, s->flags, true);
+
+ igt_subtest_f("process-many-%s", s->name)
+ processes(fd, 1, 128, 0, 0, s->flags);
+
+ igt_subtest_f("process-many-stride-%s", s->name)
+ processes(fd, 1, 128, 0, 256, s->flags);
+
+ igt_subtest_f("process-many-execqueues-%s", s->name)
+ processes(fd, 16, 128, 0, 0, s->flags);
+
+ igt_subtest_f("process-many-large-%s", s->name)
+ processes(fd, 1, 128, SZ_2M, 0, s->flags);
+
+ igt_subtest_f("process-many-large-execqueues-%s", s->name)
+ processes(fd, 16, 128, SZ_2M, 0, s->flags);
+ }
+
+ igt_subtest("threads-shared-vm-shared-alloc-many-stride-malloc")
+ threads(fd, 1, 128, 0, 256, SHARED_ALLOC, true);
+
+ igt_subtest("threads-shared-vm-shared-alloc-many-stride-malloc-race")
+ threads(fd, 1, 128, 0, 256, RACE | SHARED_ALLOC, true);
+
+ igt_subtest("threads-shared-alloc-many-stride-malloc")
+ threads(fd, 1, 128, 0, 256, SHARED_ALLOC, false);
+
+ igt_subtest("threads-shared-alloc-many-stride-malloc-sync")
+ threads(fd, 1, 128, 0, 256, SHARED_ALLOC | SYNC_EXEC, false);
+
+ igt_subtest("threads-shared-alloc-many-stride-malloc-race")
+ threads(fd, 1, 128, 0, 256, RACE | SHARED_ALLOC, false);
+
+ igt_subtest_f("fault")
+ xe_for_each_engine(fd, hwe)
+ test_exec(fd, hwe, 4, 1, SZ_2M, 0, 0, NULL, NULL,
+ FAULT);
+
+ for (const struct section *s = psections; s->name; s++) {
+ igt_subtest_f("partial-%s", s->name)
+ xe_for_each_engine(fd, hwe)
+ partial(fd, hwe, s->flags);
+ }
+
+ igt_subtest_f("unaligned-alloc")
+ xe_for_each_engine(fd, hwe) {
+ many_allocs(fd, hwe, (SZ_1M + SZ_512K) * 8,
+ SZ_1M + SZ_512K, SZ_4K, NULL, 0);
+ break;
+ }
+
+ igt_subtest_f("fault-benchmark")
+ xe_for_each_engine(fd, hwe)
+ many_allocs(fd, hwe, SZ_64M, SZ_64M, SZ_4K, NULL,
+ BENCHMARK);
+
+ igt_subtest_f("fault-threads-benchmark")
+ xe_for_each_engine(fd, hwe)
+ many_allocs(fd, hwe, SZ_64M, SZ_64M, SZ_4K, NULL,
+ BENCHMARK | CPU_FAULT_THREADS);
+
+ igt_subtest_f("fault-threads-same-page-benchmark")
+ xe_for_each_engine(fd, hwe)
+ many_allocs(fd, hwe, SZ_64M, SZ_64M, SZ_4K, NULL,
+ BENCHMARK | CPU_FAULT_THREADS |
+ CPU_FAULT_SAME_PAGE);
+
+ igt_subtest_f("fault-process-benchmark")
+ xe_for_each_engine(fd, hwe)
+ many_allocs(fd, hwe, SZ_64M, SZ_64M, SZ_4K, NULL,
+ BENCHMARK | CPU_FAULT_PROCESS);
+
+ igt_subtest_f("fault-process-same-page-benchmark")
+ xe_for_each_engine(fd, hwe)
+ many_allocs(fd, hwe, SZ_64M, SZ_64M, SZ_4K, NULL,
+ BENCHMARK | CPU_FAULT_PROCESS |
+ CPU_FAULT_SAME_PAGE);
+
+ for (const struct section *s = esections; s->name; s++) {
+ igt_subtest_f("evict-%s", s->name)
+ xe_for_each_engine(fd, hwe) {
+ many_allocs(fd, hwe,
+ xe_visible_vram_size(fd, hwe->gt_id),
+ SZ_8M, SZ_1M, NULL, s->flags);
+ break;
+ }
+ }
+
+ for (const struct section *s = esections; s->name; s++) {
+ igt_subtest_f("processes-evict-%s", s->name)
+ processes_evict(fd, SZ_8M, SZ_1M, s->flags);
+ }
+
+ igt_fixture {
+ xe_device_put(fd);
+ drm_close_driver(fd);
+ close_sync_file();
+ }
+}
diff --git a/tests/meson.build b/tests/meson.build
index 6328792e3a..20ddddb89f 100644
--- a/tests/meson.build
+++ b/tests/meson.build
@@ -295,6 +295,7 @@ intel_xe_progs = [
'xe_exec_reset',
'xe_exec_sip',
'xe_exec_store',
+ 'xe_exec_system_allocator',
'xe_exec_threads',
'xe_exercise_blt',
'xe_fault_injection',
--
2.34.1
^ permalink raw reply related [flat|nested] 16+ messages in thread
* ✓ Xe.CI.BAT: success for tests/xe: Add system_allocator test (rev5)
2025-04-25 18:20 [PATCH] tests/xe: Add system_allocator test Matthew Brost
@ 2025-04-25 21:03 ` Patchwork
2025-04-25 21:21 ` ✓ i915.CI.BAT: " Patchwork
` (2 subsequent siblings)
3 siblings, 0 replies; 16+ messages in thread
From: Patchwork @ 2025-04-25 21:03 UTC (permalink / raw)
To: Matthew Brost; +Cc: igt-dev
[-- Attachment #1: Type: text/plain, Size: 4704 bytes --]
== Series Details ==
Series: tests/xe: Add system_allocator test (rev5)
URL : https://patchwork.freedesktop.org/series/137545/
State : success
== Summary ==
CI Bug Log - changes from XEIGT_8339_BAT -> XEIGTPW_13046_BAT
====================================================
Summary
-------
**SUCCESS**
No regressions found.
Participating hosts (8 -> 8)
------------------------------
No changes in participating hosts
Known issues
------------
Here are the changes found in XEIGTPW_13046_BAT that come from known issues:
### IGT changes ###
#### Issues hit ####
* igt@kms_addfb_basic@addfb25-y-tiled-small-legacy:
- bat-dg2-oem2: NOTRUN -> [SKIP][1] ([Intel XE#623])
[1]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/bat-dg2-oem2/igt@kms_addfb_basic@addfb25-y-tiled-small-legacy.html
* igt@kms_dsc@dsc-basic:
- bat-dg2-oem2: NOTRUN -> [SKIP][2] ([Intel XE#455])
[2]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/bat-dg2-oem2/igt@kms_dsc@dsc-basic.html
* igt@kms_psr@psr-cursor-plane-move:
- bat-dg2-oem2: NOTRUN -> [SKIP][3] ([Intel XE#2850] / [Intel XE#929]) +2 other tests skip
[3]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/bat-dg2-oem2/igt@kms_psr@psr-cursor-plane-move.html
* igt@sriov_basic@enable-vfs-autoprobe-off:
- bat-dg2-oem2: NOTRUN -> [SKIP][4] ([Intel XE#1091] / [Intel XE#2849]) +1 other test skip
[4]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/bat-dg2-oem2/igt@sriov_basic@enable-vfs-autoprobe-off.html
* igt@xe_exec_fault_mode@twice-bindexecqueue-userptr:
- bat-dg2-oem2: NOTRUN -> [SKIP][5] ([Intel XE#288]) +32 other tests skip
[5]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/bat-dg2-oem2/igt@xe_exec_fault_mode@twice-bindexecqueue-userptr.html
* igt@xe_huc_copy@huc_copy:
- bat-dg2-oem2: NOTRUN -> [SKIP][6] ([Intel XE#255])
[6]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/bat-dg2-oem2/igt@xe_huc_copy@huc_copy.html
* igt@xe_live_ktest@xe_migrate@xe_validate_ccs_kunit:
- bat-dg2-oem2: NOTRUN -> [SKIP][7] ([Intel XE#2229])
[7]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/bat-dg2-oem2/igt@xe_live_ktest@xe_migrate@xe_validate_ccs_kunit.html
* igt@xe_pat@pat-index-xe2:
- bat-dg2-oem2: NOTRUN -> [SKIP][8] ([Intel XE#977])
[8]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/bat-dg2-oem2/igt@xe_pat@pat-index-xe2.html
* igt@xe_pat@pat-index-xehpc:
- bat-dg2-oem2: NOTRUN -> [SKIP][9] ([Intel XE#2838] / [Intel XE#979])
[9]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/bat-dg2-oem2/igt@xe_pat@pat-index-xehpc.html
* igt@xe_pat@pat-index-xelpg:
- bat-dg2-oem2: NOTRUN -> [SKIP][10] ([Intel XE#979])
[10]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/bat-dg2-oem2/igt@xe_pat@pat-index-xelpg.html
* igt@xe_sriov_flr@flr-vf1-clear:
- bat-dg2-oem2: NOTRUN -> [SKIP][11] ([Intel XE#3342])
[11]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/bat-dg2-oem2/igt@xe_sriov_flr@flr-vf1-clear.html
#### Possible fixes ####
* igt@xe_module_load@load:
- bat-dg2-oem2: [ABORT][12] -> [PASS][13]
[12]: https://intel-gfx-ci.01.org/tree/intel-xe/IGT_8339/bat-dg2-oem2/igt@xe_module_load@load.html
[13]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/bat-dg2-oem2/igt@xe_module_load@load.html
[Intel XE#1091]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/1091
[Intel XE#2229]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/2229
[Intel XE#255]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/255
[Intel XE#2838]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/2838
[Intel XE#2849]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/2849
[Intel XE#2850]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/2850
[Intel XE#288]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/288
[Intel XE#3342]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/3342
[Intel XE#455]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/455
[Intel XE#623]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/623
[Intel XE#929]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/929
[Intel XE#977]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/977
[Intel XE#979]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/979
Build changes
-------------
* IGT: IGT_8339 -> IGTPW_13046
IGTPW_13046: 13046
IGT_8339: 8339
xe-2999-6e67a7af43567bb9f23fe156fde7efa3d214fd20: 6e67a7af43567bb9f23fe156fde7efa3d214fd20
== Logs ==
For more details see: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/index.html
[-- Attachment #2: Type: text/html, Size: 5596 bytes --]
^ permalink raw reply [flat|nested] 16+ messages in thread
* ✓ i915.CI.BAT: success for tests/xe: Add system_allocator test (rev5)
2025-04-25 18:20 [PATCH] tests/xe: Add system_allocator test Matthew Brost
2025-04-25 21:03 ` ✓ Xe.CI.BAT: success for tests/xe: Add system_allocator test (rev5) Patchwork
@ 2025-04-25 21:21 ` Patchwork
2025-04-26 6:28 ` ✓ i915.CI.Full: " Patchwork
2025-04-26 10:21 ` ✗ Xe.CI.Full: failure " Patchwork
3 siblings, 0 replies; 16+ messages in thread
From: Patchwork @ 2025-04-25 21:21 UTC (permalink / raw)
To: Matthew Brost; +Cc: igt-dev
[-- Attachment #1: Type: text/plain, Size: 3139 bytes --]
== Series Details ==
Series: tests/xe: Add system_allocator test (rev5)
URL : https://patchwork.freedesktop.org/series/137545/
State : success
== Summary ==
CI Bug Log - changes from IGT_8339 -> IGTPW_13046
====================================================
Summary
-------
**SUCCESS**
No regressions found.
External URL: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/index.html
Participating hosts (42 -> 42)
------------------------------
No changes in participating hosts
Known issues
------------
Here are the changes found in IGTPW_13046 that come from known issues:
### IGT changes ###
#### Issues hit ####
* igt@i915_selftest@live@workarounds:
- bat-mtlp-6: [PASS][1] -> [DMESG-FAIL][2] ([i915#12061]) +1 other test dmesg-fail
[1]: https://intel-gfx-ci.01.org/tree/drm-tip/IGT_8339/bat-mtlp-6/igt@i915_selftest@live@workarounds.html
[2]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/bat-mtlp-6/igt@i915_selftest@live@workarounds.html
#### Possible fixes ####
* igt@i915_module_load@load:
- bat-mtlp-9: [DMESG-WARN][3] ([i915#13494]) -> [PASS][4]
[3]: https://intel-gfx-ci.01.org/tree/drm-tip/IGT_8339/bat-mtlp-9/igt@i915_module_load@load.html
[4]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/bat-mtlp-9/igt@i915_module_load@load.html
* igt@i915_selftest@live@workarounds:
- bat-arls-5: [DMESG-FAIL][5] ([i915#12061]) -> [PASS][6] +1 other test pass
[5]: https://intel-gfx-ci.01.org/tree/drm-tip/IGT_8339/bat-arls-5/igt@i915_selftest@live@workarounds.html
[6]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/bat-arls-5/igt@i915_selftest@live@workarounds.html
- bat-dg2-9: [DMESG-FAIL][7] ([i915#12061]) -> [PASS][8] +1 other test pass
[7]: https://intel-gfx-ci.01.org/tree/drm-tip/IGT_8339/bat-dg2-9/igt@i915_selftest@live@workarounds.html
[8]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/bat-dg2-9/igt@i915_selftest@live@workarounds.html
- bat-dg2-14: [DMESG-FAIL][9] ([i915#12061]) -> [PASS][10] +1 other test pass
[9]: https://intel-gfx-ci.01.org/tree/drm-tip/IGT_8339/bat-dg2-14/igt@i915_selftest@live@workarounds.html
[10]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/bat-dg2-14/igt@i915_selftest@live@workarounds.html
- bat-mtlp-9: [DMESG-FAIL][11] ([i915#12061]) -> [PASS][12] +1 other test pass
[11]: https://intel-gfx-ci.01.org/tree/drm-tip/IGT_8339/bat-mtlp-9/igt@i915_selftest@live@workarounds.html
[12]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/bat-mtlp-9/igt@i915_selftest@live@workarounds.html
[i915#12061]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/12061
[i915#13494]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/13494
Build changes
-------------
* CI: CI-20190529 -> None
* IGT: IGT_8339 -> IGTPW_13046
CI-20190529: 20190529
CI_DRM_16465: 6e67a7af43567bb9f23fe156fde7efa3d214fd20 @ git://anongit.freedesktop.org/gfx-ci/linux
IGTPW_13046: 13046
IGT_8339: 8339
== Logs ==
For more details see: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/index.html
[-- Attachment #2: Type: text/html, Size: 4105 bytes --]
^ permalink raw reply [flat|nested] 16+ messages in thread
* ✓ i915.CI.Full: success for tests/xe: Add system_allocator test (rev5)
2025-04-25 18:20 [PATCH] tests/xe: Add system_allocator test Matthew Brost
2025-04-25 21:03 ` ✓ Xe.CI.BAT: success for tests/xe: Add system_allocator test (rev5) Patchwork
2025-04-25 21:21 ` ✓ i915.CI.BAT: " Patchwork
@ 2025-04-26 6:28 ` Patchwork
2025-04-26 10:21 ` ✗ Xe.CI.Full: failure " Patchwork
3 siblings, 0 replies; 16+ messages in thread
From: Patchwork @ 2025-04-26 6:28 UTC (permalink / raw)
To: Matthew Brost; +Cc: igt-dev
[-- Attachment #1: Type: text/plain, Size: 94104 bytes --]
== Series Details ==
Series: tests/xe: Add system_allocator test (rev5)
URL : https://patchwork.freedesktop.org/series/137545/
State : success
== Summary ==
CI Bug Log - changes from IGT_8339_full -> IGTPW_13046_full
====================================================
Summary
-------
**SUCCESS**
No regressions found.
External URL: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/index.html
Participating hosts (10 -> 11)
------------------------------
Additional (1): shard-dg2-set2
Known issues
------------
Here are the changes found in IGTPW_13046_full that come from known issues:
### IGT changes ###
#### Issues hit ####
* igt@api_intel_bb@blit-reloc-keep-cache:
- shard-dg2: NOTRUN -> [SKIP][1] ([i915#8411])
[1]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg2-3/igt@api_intel_bb@blit-reloc-keep-cache.html
* igt@api_intel_bb@blit-reloc-purge-cache:
- shard-rkl: NOTRUN -> [SKIP][2] ([i915#8411])
[2]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-rkl-8/igt@api_intel_bb@blit-reloc-purge-cache.html
* igt@gem_close_race@multigpu-basic-threads:
- shard-dg2: NOTRUN -> [SKIP][3] ([i915#7697])
[3]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg2-7/igt@gem_close_race@multigpu-basic-threads.html
- shard-mtlp: NOTRUN -> [SKIP][4] ([i915#7697])
[4]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-mtlp-1/igt@gem_close_race@multigpu-basic-threads.html
* igt@gem_compute@compute-square:
- shard-dg2: NOTRUN -> [FAIL][5] ([i915#13665])
[5]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg2-11/igt@gem_compute@compute-square.html
* igt@gem_create@create-ext-set-pat:
- shard-rkl: NOTRUN -> [SKIP][6] ([i915#8562])
[6]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-rkl-8/igt@gem_create@create-ext-set-pat.html
* igt@gem_ctx_persistence@heartbeat-hostile:
- shard-dg2: NOTRUN -> [SKIP][7] ([i915#8555]) +1 other test skip
[7]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg2-1/igt@gem_ctx_persistence@heartbeat-hostile.html
* igt@gem_ctx_persistence@legacy-engines-cleanup:
- shard-snb: NOTRUN -> [SKIP][8] ([i915#1099])
[8]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-snb6/igt@gem_ctx_persistence@legacy-engines-cleanup.html
* igt@gem_ctx_persistence@saturated-hostile-nopreempt:
- shard-dg2: NOTRUN -> [SKIP][9] ([i915#5882]) +7 other tests skip
[9]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg2-10/igt@gem_ctx_persistence@saturated-hostile-nopreempt.html
* igt@gem_ctx_sseu@invalid-args:
- shard-dg2: NOTRUN -> [SKIP][10] ([i915#280]) +1 other test skip
[10]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg2-5/igt@gem_ctx_sseu@invalid-args.html
- shard-mtlp: NOTRUN -> [SKIP][11] ([i915#280])
[11]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-mtlp-7/igt@gem_ctx_sseu@invalid-args.html
* igt@gem_ctx_sseu@mmap-args:
- shard-dg1: NOTRUN -> [SKIP][12] ([i915#280]) +1 other test skip
[12]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg1-14/igt@gem_ctx_sseu@mmap-args.html
* igt@gem_eio@banned:
- shard-mtlp: [PASS][13] -> [ABORT][14] ([i915#13193] / [i915#13723]) +1 other test abort
[13]: https://intel-gfx-ci.01.org/tree/drm-tip/IGT_8339/shard-mtlp-6/igt@gem_eio@banned.html
[14]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-mtlp-7/igt@gem_eio@banned.html
* igt@gem_eio@unwedge-stress:
- shard-dg2-9: NOTRUN -> [FAIL][15] ([i915#12714] / [i915#5784])
[15]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg2-9/igt@gem_eio@unwedge-stress.html
- shard-dg1: [PASS][16] -> [FAIL][17] ([i915#12714] / [i915#5784])
[16]: https://intel-gfx-ci.01.org/tree/drm-tip/IGT_8339/shard-dg1-19/igt@gem_eio@unwedge-stress.html
[17]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg1-17/igt@gem_eio@unwedge-stress.html
* igt@gem_exec_balancer@bonded-false-hang:
- shard-dg2: NOTRUN -> [SKIP][18] ([i915#4812]) +2 other tests skip
[18]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg2-7/igt@gem_exec_balancer@bonded-false-hang.html
* igt@gem_exec_balancer@bonded-pair:
- shard-dg1: NOTRUN -> [SKIP][19] ([i915#4771])
[19]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg1-14/igt@gem_exec_balancer@bonded-pair.html
* igt@gem_exec_balancer@bonded-true-hang:
- shard-dg1: NOTRUN -> [SKIP][20] ([i915#4812])
[20]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg1-18/igt@gem_exec_balancer@bonded-true-hang.html
- shard-mtlp: NOTRUN -> [SKIP][21] ([i915#4812])
[21]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-mtlp-8/igt@gem_exec_balancer@bonded-true-hang.html
* igt@gem_exec_balancer@parallel-balancer:
- shard-rkl: NOTRUN -> [SKIP][22] ([i915#4525])
[22]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-rkl-8/igt@gem_exec_balancer@parallel-balancer.html
* igt@gem_exec_capture@capture-invisible:
- shard-dg2-9: NOTRUN -> [SKIP][23] ([i915#6334]) +2 other tests skip
[23]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg2-9/igt@gem_exec_capture@capture-invisible.html
* igt@gem_exec_capture@capture@vecs0-lmem0:
- shard-dg1: NOTRUN -> [FAIL][24] ([i915#11965]) +2 other tests fail
[24]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg1-16/igt@gem_exec_capture@capture@vecs0-lmem0.html
* igt@gem_exec_flush@basic-batch-kernel-default-cmd:
- shard-mtlp: NOTRUN -> [SKIP][25] ([i915#3711])
[25]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-mtlp-3/igt@gem_exec_flush@basic-batch-kernel-default-cmd.html
* igt@gem_exec_flush@basic-uc-prw-default:
- shard-dg2-9: NOTRUN -> [SKIP][26] ([i915#3539])
[26]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg2-9/igt@gem_exec_flush@basic-uc-prw-default.html
* igt@gem_exec_flush@basic-wb-ro-default:
- shard-dg2: NOTRUN -> [SKIP][27] ([i915#3539] / [i915#4852]) +1 other test skip
[27]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg2-8/igt@gem_exec_flush@basic-wb-ro-default.html
* igt@gem_exec_flush@basic-wb-rw-default:
- shard-dg1: NOTRUN -> [SKIP][28] ([i915#3539] / [i915#4852])
[28]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg1-17/igt@gem_exec_flush@basic-wb-rw-default.html
* igt@gem_exec_flush@basic-wb-set-default:
- shard-dg2-9: NOTRUN -> [SKIP][29] ([i915#3539] / [i915#4852])
[29]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg2-9/igt@gem_exec_flush@basic-wb-set-default.html
* igt@gem_exec_reloc@basic-active:
- shard-rkl: NOTRUN -> [SKIP][30] ([i915#3281]) +5 other tests skip
[30]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-rkl-3/igt@gem_exec_reloc@basic-active.html
* igt@gem_exec_reloc@basic-concurrent16:
- shard-dg2-9: NOTRUN -> [SKIP][31] ([i915#3281]) +1 other test skip
[31]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg2-9/igt@gem_exec_reloc@basic-concurrent16.html
* igt@gem_exec_reloc@basic-wc-gtt:
- shard-mtlp: NOTRUN -> [SKIP][32] ([i915#3281]) +5 other tests skip
[32]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-mtlp-2/igt@gem_exec_reloc@basic-wc-gtt.html
* igt@gem_exec_reloc@basic-write-cpu-active:
- shard-dg1: NOTRUN -> [SKIP][33] ([i915#3281]) +7 other tests skip
[33]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg1-19/igt@gem_exec_reloc@basic-write-cpu-active.html
* igt@gem_exec_reloc@basic-write-read-active:
- shard-dg2: NOTRUN -> [SKIP][34] ([i915#3281]) +7 other tests skip
[34]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg2-5/igt@gem_exec_reloc@basic-write-read-active.html
* igt@gem_exec_schedule@reorder-wide:
- shard-mtlp: NOTRUN -> [SKIP][35] ([i915#4537] / [i915#4812])
[35]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-mtlp-3/igt@gem_exec_schedule@reorder-wide.html
* igt@gem_exec_suspend@basic-s3:
- shard-rkl: NOTRUN -> [INCOMPLETE][36] ([i915#13304]) +1 other test incomplete
[36]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-rkl-3/igt@gem_exec_suspend@basic-s3.html
* igt@gem_exec_suspend@basic-s4-devices:
- shard-mtlp: [PASS][37] -> [ABORT][38] ([i915#13193] / [i915#13723] / [i915#7975]) +1 other test abort
[37]: https://intel-gfx-ci.01.org/tree/drm-tip/IGT_8339/shard-mtlp-3/igt@gem_exec_suspend@basic-s4-devices.html
[38]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-mtlp-7/igt@gem_exec_suspend@basic-s4-devices.html
* igt@gem_fence_thrash@bo-write-verify-none:
- shard-dg1: NOTRUN -> [SKIP][39] ([i915#4860])
[39]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg1-15/igt@gem_fence_thrash@bo-write-verify-none.html
* igt@gem_fence_thrash@bo-write-verify-x:
- shard-dg2-9: NOTRUN -> [SKIP][40] ([i915#4860]) +1 other test skip
[40]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg2-9/igt@gem_fence_thrash@bo-write-verify-x.html
* igt@gem_fenced_exec_thrash@no-spare-fences-interruptible:
- shard-dg2: NOTRUN -> [SKIP][41] ([i915#4860])
[41]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg2-6/igt@gem_fenced_exec_thrash@no-spare-fences-interruptible.html
* igt@gem_lmem_evict@dontneed-evict-race:
- shard-rkl: NOTRUN -> [SKIP][42] ([i915#4613] / [i915#7582])
[42]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-rkl-8/igt@gem_lmem_evict@dontneed-evict-race.html
* igt@gem_lmem_swapping@heavy-verify-multi:
- shard-mtlp: NOTRUN -> [SKIP][43] ([i915#4613]) +1 other test skip
[43]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-mtlp-3/igt@gem_lmem_swapping@heavy-verify-multi.html
* igt@gem_lmem_swapping@heavy-verify-multi-ccs:
- shard-glk: NOTRUN -> [SKIP][44] ([i915#4613]) +3 other tests skip
[44]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-glk2/igt@gem_lmem_swapping@heavy-verify-multi-ccs.html
* igt@gem_lmem_swapping@heavy-verify-random-ccs:
- shard-dg1: NOTRUN -> [SKIP][45] ([i915#12193])
[45]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg1-14/igt@gem_lmem_swapping@heavy-verify-random-ccs.html
* igt@gem_lmem_swapping@heavy-verify-random-ccs@lmem0:
- shard-dg1: NOTRUN -> [SKIP][46] ([i915#4565])
[46]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg1-14/igt@gem_lmem_swapping@heavy-verify-random-ccs@lmem0.html
* igt@gem_lmem_swapping@smem-oom@lmem0:
- shard-dg2: [PASS][47] -> [TIMEOUT][48] ([i915#5493]) +1 other test timeout
[47]: https://intel-gfx-ci.01.org/tree/drm-tip/IGT_8339/shard-dg2-7/igt@gem_lmem_swapping@smem-oom@lmem0.html
[48]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg2-7/igt@gem_lmem_swapping@smem-oom@lmem0.html
* igt@gem_lmem_swapping@verify:
- shard-rkl: NOTRUN -> [SKIP][49] ([i915#4613]) +1 other test skip
[49]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-rkl-8/igt@gem_lmem_swapping@verify.html
* igt@gem_mmap@big-bo:
- shard-dg2-9: NOTRUN -> [SKIP][50] ([i915#4083]) +2 other tests skip
[50]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg2-9/igt@gem_mmap@big-bo.html
* igt@gem_mmap@pf-nonblock:
- shard-dg1: NOTRUN -> [SKIP][51] ([i915#4083]) +1 other test skip
[51]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg1-13/igt@gem_mmap@pf-nonblock.html
* igt@gem_mmap_gtt@basic-read:
- shard-dg2: NOTRUN -> [SKIP][52] ([i915#4077]) +12 other tests skip
[52]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg2-7/igt@gem_mmap_gtt@basic-read.html
* igt@gem_mmap_gtt@big-copy-odd:
- shard-dg1: NOTRUN -> [SKIP][53] ([i915#4077]) +5 other tests skip
[53]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg1-12/igt@gem_mmap_gtt@big-copy-odd.html
* igt@gem_mmap_gtt@big-copy-xy:
- shard-dg2-9: NOTRUN -> [SKIP][54] ([i915#4077]) +3 other tests skip
[54]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg2-9/igt@gem_mmap_gtt@big-copy-xy.html
* igt@gem_mmap_gtt@fault-concurrent-y:
- shard-mtlp: NOTRUN -> [SKIP][55] ([i915#4077]) +7 other tests skip
[55]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-mtlp-2/igt@gem_mmap_gtt@fault-concurrent-y.html
* igt@gem_mmap_wc@write-wc-read-gtt:
- shard-dg2: NOTRUN -> [SKIP][56] ([i915#4083]) +3 other tests skip
[56]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg2-3/igt@gem_mmap_wc@write-wc-read-gtt.html
* igt@gem_partial_pwrite_pread@writes-after-reads:
- shard-rkl: NOTRUN -> [SKIP][57] ([i915#3282]) +4 other tests skip
[57]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-rkl-3/igt@gem_partial_pwrite_pread@writes-after-reads.html
* igt@gem_partial_pwrite_pread@writes-after-reads-display:
- shard-dg2: NOTRUN -> [SKIP][58] ([i915#3282]) +1 other test skip
[58]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg2-2/igt@gem_partial_pwrite_pread@writes-after-reads-display.html
- shard-mtlp: NOTRUN -> [SKIP][59] ([i915#3282]) +1 other test skip
[59]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-mtlp-8/igt@gem_partial_pwrite_pread@writes-after-reads-display.html
* igt@gem_pread@exhaustion:
- shard-dg1: NOTRUN -> [SKIP][60] ([i915#3282]) +6 other tests skip
[60]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg1-13/igt@gem_pread@exhaustion.html
* igt@gem_pread@snoop:
- shard-dg2-9: NOTRUN -> [SKIP][61] ([i915#3282]) +4 other tests skip
[61]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg2-9/igt@gem_pread@snoop.html
* igt@gem_pxp@create-regular-context-1:
- shard-dg2: NOTRUN -> [SKIP][62] ([i915#4270]) +2 other tests skip
[62]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg2-11/igt@gem_pxp@create-regular-context-1.html
* igt@gem_pxp@hw-rejects-pxp-buffer:
- shard-rkl: NOTRUN -> [SKIP][63] ([i915#13717])
[63]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-rkl-8/igt@gem_pxp@hw-rejects-pxp-buffer.html
* igt@gem_pxp@protected-encrypted-src-copy-not-readible:
- shard-rkl: NOTRUN -> [TIMEOUT][64] ([i915#12917] / [i915#12964]) +1 other test timeout
[64]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-rkl-3/igt@gem_pxp@protected-encrypted-src-copy-not-readible.html
- shard-dg1: NOTRUN -> [SKIP][65] ([i915#4270]) +2 other tests skip
[65]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg1-15/igt@gem_pxp@protected-encrypted-src-copy-not-readible.html
- shard-dg2-9: NOTRUN -> [SKIP][66] ([i915#4270])
[66]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg2-9/igt@gem_pxp@protected-encrypted-src-copy-not-readible.html
* igt@gem_render_copy@y-tiled-ccs-to-y-tiled-mc-ccs:
- shard-glk: NOTRUN -> [SKIP][67] +297 other tests skip
[67]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-glk1/igt@gem_render_copy@y-tiled-ccs-to-y-tiled-mc-ccs.html
- shard-dg2: NOTRUN -> [SKIP][68] ([i915#5190] / [i915#8428]) +3 other tests skip
[68]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg2-11/igt@gem_render_copy@y-tiled-ccs-to-y-tiled-mc-ccs.html
* igt@gem_render_copy@yf-tiled-ccs-to-yf-tiled:
- shard-dg2-9: NOTRUN -> [SKIP][69] ([i915#5190] / [i915#8428]) +2 other tests skip
[69]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg2-9/igt@gem_render_copy@yf-tiled-ccs-to-yf-tiled.html
* igt@gem_render_copy@yf-tiled-to-vebox-x-tiled:
- shard-mtlp: NOTRUN -> [SKIP][70] ([i915#8428])
[70]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-mtlp-5/igt@gem_render_copy@yf-tiled-to-vebox-x-tiled.html
* igt@gem_set_tiling_vs_blt@tiled-to-tiled:
- shard-dg2-9: NOTRUN -> [SKIP][71] ([i915#4079])
[71]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg2-9/igt@gem_set_tiling_vs_blt@tiled-to-tiled.html
* igt@gem_set_tiling_vs_gtt:
- shard-dg1: NOTRUN -> [SKIP][72] ([i915#4079]) +1 other test skip
[72]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg1-17/igt@gem_set_tiling_vs_gtt.html
* igt@gem_softpin@evict-snoop:
- shard-mtlp: NOTRUN -> [SKIP][73] ([i915#4885])
[73]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-mtlp-4/igt@gem_softpin@evict-snoop.html
* igt@gem_softpin@evict-snoop-interruptible:
- shard-dg1: NOTRUN -> [SKIP][74] ([i915#4885])
[74]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg1-14/igt@gem_softpin@evict-snoop-interruptible.html
* igt@gem_tiled_pread_basic:
- shard-mtlp: NOTRUN -> [SKIP][75] ([i915#4079])
[75]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-mtlp-6/igt@gem_tiled_pread_basic.html
* igt@gem_userptr_blits@coherency-unsync:
- shard-rkl: NOTRUN -> [SKIP][76] ([i915#3297]) +1 other test skip
[76]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-rkl-8/igt@gem_userptr_blits@coherency-unsync.html
* igt@gem_userptr_blits@create-destroy-unsync:
- shard-dg2: NOTRUN -> [SKIP][77] ([i915#3297]) +2 other tests skip
[77]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg2-11/igt@gem_userptr_blits@create-destroy-unsync.html
* igt@gem_userptr_blits@forbidden-operations:
- shard-dg1: NOTRUN -> [SKIP][78] ([i915#3282] / [i915#3297])
[78]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg1-15/igt@gem_userptr_blits@forbidden-operations.html
* igt@gem_userptr_blits@invalid-mmap-offset-unsync:
- shard-mtlp: NOTRUN -> [SKIP][79] ([i915#3297])
[79]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-mtlp-2/igt@gem_userptr_blits@invalid-mmap-offset-unsync.html
- shard-dg1: NOTRUN -> [SKIP][80] ([i915#3297])
[80]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg1-14/igt@gem_userptr_blits@invalid-mmap-offset-unsync.html
* igt@gem_userptr_blits@map-fixed-invalidate-busy:
- shard-dg1: NOTRUN -> [SKIP][81] ([i915#3297] / [i915#4880])
[81]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg1-15/igt@gem_userptr_blits@map-fixed-invalidate-busy.html
* igt@gem_userptr_blits@sd-probe:
- shard-dg2-9: NOTRUN -> [SKIP][82] ([i915#3297] / [i915#4958])
[82]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg2-9/igt@gem_userptr_blits@sd-probe.html
* igt@gen9_exec_parse@batch-without-end:
- shard-mtlp: NOTRUN -> [SKIP][83] ([i915#2856]) +1 other test skip
[83]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-mtlp-5/igt@gen9_exec_parse@batch-without-end.html
* igt@gen9_exec_parse@batch-zero-length:
- shard-dg2: NOTRUN -> [SKIP][84] ([i915#2856]) +2 other tests skip
[84]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg2-4/igt@gen9_exec_parse@batch-zero-length.html
* igt@gen9_exec_parse@bb-oversize:
- shard-rkl: NOTRUN -> [SKIP][85] ([i915#2527]) +4 other tests skip
[85]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-rkl-3/igt@gen9_exec_parse@bb-oversize.html
* igt@gen9_exec_parse@bb-secure:
- shard-dg1: NOTRUN -> [SKIP][86] ([i915#2527]) +3 other tests skip
[86]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg1-13/igt@gen9_exec_parse@bb-secure.html
* igt@gen9_exec_parse@cmd-crossing-page:
- shard-dg2-9: NOTRUN -> [SKIP][87] ([i915#2856]) +1 other test skip
[87]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg2-9/igt@gen9_exec_parse@cmd-crossing-page.html
* igt@i915_drm_fdinfo@all-busy-check-all:
- shard-dg2: NOTRUN -> [SKIP][88] ([i915#14123])
[88]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg2-11/igt@i915_drm_fdinfo@all-busy-check-all.html
* igt@i915_drm_fdinfo@busy@rcs0:
- shard-dg1: NOTRUN -> [SKIP][89] ([i915#14073]) +5 other tests skip
[89]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg1-15/igt@i915_drm_fdinfo@busy@rcs0.html
* igt@i915_drm_fdinfo@virtual-busy:
- shard-mtlp: NOTRUN -> [SKIP][90] ([i915#14118])
[90]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-mtlp-1/igt@i915_drm_fdinfo@virtual-busy.html
* igt@i915_drm_fdinfo@virtual-busy-idle:
- shard-dg1: NOTRUN -> [SKIP][91] ([i915#14118])
[91]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg1-18/igt@i915_drm_fdinfo@virtual-busy-idle.html
* igt@i915_fb_tiling@basic-x-tiling:
- shard-dg1: NOTRUN -> [SKIP][92] ([i915#13786])
[92]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg1-15/igt@i915_fb_tiling@basic-x-tiling.html
* igt@i915_module_load@reload:
- shard-snb: [PASS][93] -> [ABORT][94] ([i915#11703])
[93]: https://intel-gfx-ci.01.org/tree/drm-tip/IGT_8339/shard-snb1/igt@i915_module_load@reload.html
[94]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-snb1/igt@i915_module_load@reload.html
* igt@i915_module_load@reload-with-fault-injection:
- shard-rkl: NOTRUN -> [DMESG-WARN][95] ([i915#12964])
[95]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-rkl-3/igt@i915_module_load@reload-with-fault-injection.html
* igt@i915_pm_freq_api@freq-suspend:
- shard-rkl: NOTRUN -> [SKIP][96] ([i915#8399])
[96]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-rkl-8/igt@i915_pm_freq_api@freq-suspend.html
* igt@i915_pm_freq_api@freq-suspend@gt0:
- shard-dg2: [PASS][97] -> [INCOMPLETE][98] ([i915#12455] / [i915#13820]) +1 other test incomplete
[97]: https://intel-gfx-ci.01.org/tree/drm-tip/IGT_8339/shard-dg2-4/igt@i915_pm_freq_api@freq-suspend@gt0.html
[98]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg2-1/igt@i915_pm_freq_api@freq-suspend@gt0.html
* igt@i915_pm_freq_mult@media-freq@gt0:
- shard-rkl: NOTRUN -> [SKIP][99] ([i915#6590]) +1 other test skip
[99]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-rkl-8/igt@i915_pm_freq_mult@media-freq@gt0.html
* igt@i915_pm_rc6_residency@rc6-idle:
- shard-dg1: [PASS][100] -> [FAIL][101] ([i915#3591]) +1 other test fail
[100]: https://intel-gfx-ci.01.org/tree/drm-tip/IGT_8339/shard-dg1-14/igt@i915_pm_rc6_residency@rc6-idle.html
[101]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg1-16/igt@i915_pm_rc6_residency@rc6-idle.html
* igt@i915_pm_rpm@system-suspend:
- shard-glk: NOTRUN -> [INCOMPLETE][102] ([i915#12797])
[102]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-glk2/igt@i915_pm_rpm@system-suspend.html
* igt@i915_pm_rps@thresholds-idle:
- shard-dg2-9: NOTRUN -> [SKIP][103] ([i915#11681])
[103]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg2-9/igt@i915_pm_rps@thresholds-idle.html
* igt@i915_pm_rps@thresholds-idle-park:
- shard-mtlp: NOTRUN -> [SKIP][104] ([i915#11681])
[104]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-mtlp-8/igt@i915_pm_rps@thresholds-idle-park.html
* igt@i915_selftest@live@workarounds:
- shard-dg2-9: NOTRUN -> [DMESG-FAIL][105] ([i915#12061]) +1 other test dmesg-fail
[105]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg2-9/igt@i915_selftest@live@workarounds.html
* igt@i915_selftest@mock:
- shard-glk: NOTRUN -> [DMESG-WARN][106] ([i915#9311]) +1 other test dmesg-warn
[106]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-glk1/igt@i915_selftest@mock.html
* igt@kms_addfb_basic@invalid-smem-bo-on-discrete:
- shard-rkl: NOTRUN -> [SKIP][107] ([i915#12454] / [i915#12712])
[107]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-rkl-8/igt@kms_addfb_basic@invalid-smem-bo-on-discrete.html
* igt@kms_async_flips@async-flip-with-page-flip-events@pipe-a-hdmi-a-3-y-rc-ccs-cc:
- shard-dg1: NOTRUN -> [SKIP][108] ([i915#8709]) +3 other tests skip
[108]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg1-13/igt@kms_async_flips@async-flip-with-page-flip-events@pipe-a-hdmi-a-3-y-rc-ccs-cc.html
* igt@kms_async_flips@async-flip-with-page-flip-events@pipe-c-hdmi-a-1-4-mc-ccs:
- shard-dg2: NOTRUN -> [SKIP][109] ([i915#8709]) +15 other tests skip
[109]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg2-4/igt@kms_async_flips@async-flip-with-page-flip-events@pipe-c-hdmi-a-1-4-mc-ccs.html
* igt@kms_async_flips@invalid-async-flip:
- shard-dg2: NOTRUN -> [SKIP][110] ([i915#12967] / [i915#6228])
[110]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg2-5/igt@kms_async_flips@invalid-async-flip.html
* igt@kms_atomic_transition@plane-all-modeset-transition-fencing:
- shard-dg2: NOTRUN -> [FAIL][111] ([i915#5956]) +1 other test fail
[111]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg2-6/igt@kms_atomic_transition@plane-all-modeset-transition-fencing.html
- shard-mtlp: NOTRUN -> [SKIP][112] ([i915#1769] / [i915#3555])
[112]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-mtlp-2/igt@kms_atomic_transition@plane-all-modeset-transition-fencing.html
* igt@kms_big_fb@4-tiled-16bpp-rotate-90:
- shard-mtlp: NOTRUN -> [SKIP][113] +17 other tests skip
[113]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-mtlp-8/igt@kms_big_fb@4-tiled-16bpp-rotate-90.html
- shard-rkl: NOTRUN -> [SKIP][114] ([i915#5286]) +3 other tests skip
[114]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-rkl-3/igt@kms_big_fb@4-tiled-16bpp-rotate-90.html
* igt@kms_big_fb@4-tiled-max-hw-stride-32bpp-rotate-0-hflip:
- shard-dg1: NOTRUN -> [SKIP][115] ([i915#4538] / [i915#5286]) +3 other tests skip
[115]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg1-18/igt@kms_big_fb@4-tiled-max-hw-stride-32bpp-rotate-0-hflip.html
* igt@kms_big_fb@linear-64bpp-rotate-90:
- shard-dg1: NOTRUN -> [SKIP][116] ([i915#3638]) +1 other test skip
[116]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg1-16/igt@kms_big_fb@linear-64bpp-rotate-90.html
* igt@kms_big_fb@x-tiled-64bpp-rotate-270:
- shard-rkl: NOTRUN -> [SKIP][117] ([i915#3638]) +2 other tests skip
[117]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-rkl-8/igt@kms_big_fb@x-tiled-64bpp-rotate-270.html
* igt@kms_big_fb@y-tiled-max-hw-stride-64bpp-rotate-0-async-flip:
- shard-dg2: NOTRUN -> [SKIP][118] ([i915#4538] / [i915#5190]) +11 other tests skip
[118]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg2-5/igt@kms_big_fb@y-tiled-max-hw-stride-64bpp-rotate-0-async-flip.html
* igt@kms_big_fb@yf-tiled-16bpp-rotate-90:
- shard-dg1: NOTRUN -> [SKIP][119] ([i915#4538]) +3 other tests skip
[119]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg1-18/igt@kms_big_fb@yf-tiled-16bpp-rotate-90.html
* igt@kms_big_fb@yf-tiled-32bpp-rotate-90:
- shard-dg2-9: NOTRUN -> [SKIP][120] ([i915#4538] / [i915#5190]) +4 other tests skip
[120]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg2-9/igt@kms_big_fb@yf-tiled-32bpp-rotate-90.html
* igt@kms_big_fb@yf-tiled-addfb-size-offset-overflow:
- shard-dg2: NOTRUN -> [SKIP][121] ([i915#5190]) +1 other test skip
[121]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg2-6/igt@kms_big_fb@yf-tiled-addfb-size-offset-overflow.html
* igt@kms_ccs@bad-aux-stride-y-tiled-gen12-rc-ccs-cc@pipe-d-hdmi-a-1:
- shard-dg2: NOTRUN -> [SKIP][122] ([i915#10307] / [i915#10434] / [i915#6095]) +3 other tests skip
[122]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg2-4/igt@kms_ccs@bad-aux-stride-y-tiled-gen12-rc-ccs-cc@pipe-d-hdmi-a-1.html
* igt@kms_ccs@bad-rotation-90-4-tiled-dg2-rc-ccs-cc@pipe-a-hdmi-a-2:
- shard-rkl: NOTRUN -> [SKIP][123] ([i915#6095]) +10 other tests skip
[123]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-rkl-8/igt@kms_ccs@bad-rotation-90-4-tiled-dg2-rc-ccs-cc@pipe-a-hdmi-a-2.html
* igt@kms_ccs@ccs-on-another-bo-y-tiled-gen12-mc-ccs@pipe-b-edp-1:
- shard-mtlp: NOTRUN -> [SKIP][124] ([i915#6095]) +39 other tests skip
[124]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-mtlp-2/igt@kms_ccs@ccs-on-another-bo-y-tiled-gen12-mc-ccs@pipe-b-edp-1.html
* igt@kms_ccs@crc-primary-basic-4-tiled-bmg-ccs:
- shard-dg2-9: NOTRUN -> [SKIP][125] ([i915#12313])
[125]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg2-9/igt@kms_ccs@crc-primary-basic-4-tiled-bmg-ccs.html
* igt@kms_ccs@crc-primary-basic-y-tiled-gen12-rc-ccs@pipe-a-dp-3:
- shard-dg2: NOTRUN -> [SKIP][126] ([i915#10307] / [i915#6095]) +156 other tests skip
[126]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg2-11/igt@kms_ccs@crc-primary-basic-y-tiled-gen12-rc-ccs@pipe-a-dp-3.html
* igt@kms_ccs@crc-primary-basic-yf-tiled-ccs@pipe-d-hdmi-a-2:
- shard-dg2-9: NOTRUN -> [SKIP][127] ([i915#10307] / [i915#6095]) +19 other tests skip
[127]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg2-9/igt@kms_ccs@crc-primary-basic-yf-tiled-ccs@pipe-d-hdmi-a-2.html
* igt@kms_ccs@crc-primary-rotation-180-4-tiled-lnl-ccs:
- shard-dg2: NOTRUN -> [SKIP][128] ([i915#12313]) +1 other test skip
[128]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg2-2/igt@kms_ccs@crc-primary-rotation-180-4-tiled-lnl-ccs.html
* igt@kms_ccs@crc-primary-suspend-4-tiled-lnl-ccs:
- shard-dg1: NOTRUN -> [SKIP][129] ([i915#12805])
[129]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg1-13/igt@kms_ccs@crc-primary-suspend-4-tiled-lnl-ccs.html
* igt@kms_ccs@crc-primary-suspend-y-tiled-gen12-rc-ccs-cc@pipe-b-hdmi-a-3:
- shard-dg2: NOTRUN -> [SKIP][130] ([i915#6095]) +17 other tests skip
[130]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg2-6/igt@kms_ccs@crc-primary-suspend-y-tiled-gen12-rc-ccs-cc@pipe-b-hdmi-a-3.html
* igt@kms_ccs@crc-primary-suspend-yf-tiled-ccs:
- shard-rkl: NOTRUN -> [SKIP][131] ([i915#14098] / [i915#6095]) +17 other tests skip
[131]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-rkl-8/igt@kms_ccs@crc-primary-suspend-yf-tiled-ccs.html
* igt@kms_ccs@random-ccs-data-4-tiled-lnl-ccs:
- shard-dg1: NOTRUN -> [SKIP][132] ([i915#12313])
[132]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg1-13/igt@kms_ccs@random-ccs-data-4-tiled-lnl-ccs.html
- shard-mtlp: NOTRUN -> [SKIP][133] ([i915#12313])
[133]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-mtlp-4/igt@kms_ccs@random-ccs-data-4-tiled-lnl-ccs.html
* igt@kms_ccs@random-ccs-data-4-tiled-mtl-mc-ccs@pipe-d-hdmi-a-3:
- shard-dg1: NOTRUN -> [SKIP][134] ([i915#6095]) +141 other tests skip
[134]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg1-12/igt@kms_ccs@random-ccs-data-4-tiled-mtl-mc-ccs@pipe-d-hdmi-a-3.html
* igt@kms_cdclk@mode-transition-all-outputs:
- shard-dg2-9: NOTRUN -> [SKIP][135] ([i915#13784])
[135]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg2-9/igt@kms_cdclk@mode-transition-all-outputs.html
* igt@kms_cdclk@plane-scaling@pipe-b-hdmi-a-3:
- shard-dg2: NOTRUN -> [SKIP][136] ([i915#13783]) +3 other tests skip
[136]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg2-3/igt@kms_cdclk@plane-scaling@pipe-b-hdmi-a-3.html
* igt@kms_chamelium_color@ctm-blue-to-red:
- shard-dg2-9: NOTRUN -> [SKIP][137] +2 other tests skip
[137]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg2-9/igt@kms_chamelium_color@ctm-blue-to-red.html
* igt@kms_chamelium_color@degamma:
- shard-dg2: NOTRUN -> [SKIP][138] +15 other tests skip
[138]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg2-10/igt@kms_chamelium_color@degamma.html
* igt@kms_chamelium_edid@dp-edid-stress-resolution-non-4k:
- shard-dg2: NOTRUN -> [SKIP][139] ([i915#11151] / [i915#7828]) +10 other tests skip
[139]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg2-2/igt@kms_chamelium_edid@dp-edid-stress-resolution-non-4k.html
* igt@kms_chamelium_frames@dp-crc-multiple:
- shard-dg2-9: NOTRUN -> [SKIP][140] ([i915#11151] / [i915#7828]) +2 other tests skip
[140]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg2-9/igt@kms_chamelium_frames@dp-crc-multiple.html
* igt@kms_chamelium_hpd@hdmi-hpd-fast:
- shard-rkl: NOTRUN -> [SKIP][141] ([i915#11151] / [i915#7828]) +5 other tests skip
[141]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-rkl-3/igt@kms_chamelium_hpd@hdmi-hpd-fast.html
* igt@kms_chamelium_hpd@hdmi-hpd-storm-disable:
- shard-dg1: NOTRUN -> [SKIP][142] ([i915#11151] / [i915#7828]) +7 other tests skip
[142]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg1-13/igt@kms_chamelium_hpd@hdmi-hpd-storm-disable.html
* igt@kms_chamelium_hpd@vga-hpd-with-enabled-mode:
- shard-mtlp: NOTRUN -> [SKIP][143] ([i915#11151] / [i915#7828]) +4 other tests skip
[143]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-mtlp-1/igt@kms_chamelium_hpd@vga-hpd-with-enabled-mode.html
* igt@kms_content_protection@content-type-change:
- shard-dg1: NOTRUN -> [SKIP][144] ([i915#9424])
[144]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg1-19/igt@kms_content_protection@content-type-change.html
* igt@kms_content_protection@legacy:
- shard-rkl: NOTRUN -> [SKIP][145] ([i915#7118] / [i915#9424])
[145]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-rkl-8/igt@kms_content_protection@legacy.html
* igt@kms_content_protection@lic-type-0@pipe-a-dp-3:
- shard-dg2: NOTRUN -> [FAIL][146] ([i915#7173])
[146]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg2-11/igt@kms_content_protection@lic-type-0@pipe-a-dp-3.html
* igt@kms_content_protection@srm:
- shard-dg2: NOTRUN -> [SKIP][147] ([i915#7118])
[147]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg2-6/igt@kms_content_protection@srm.html
* igt@kms_cursor_crc@cursor-offscreen-128x42:
- shard-mtlp: NOTRUN -> [SKIP][148] ([i915#8814]) +2 other tests skip
[148]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-mtlp-3/igt@kms_cursor_crc@cursor-offscreen-128x42.html
* igt@kms_cursor_crc@cursor-offscreen-32x10:
- shard-mtlp: NOTRUN -> [SKIP][149] ([i915#3555] / [i915#8814]) +1 other test skip
[149]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-mtlp-7/igt@kms_cursor_crc@cursor-offscreen-32x10.html
* igt@kms_cursor_crc@cursor-onscreen-512x512:
- shard-rkl: NOTRUN -> [SKIP][150] ([i915#13049]) +1 other test skip
[150]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-rkl-8/igt@kms_cursor_crc@cursor-onscreen-512x512.html
* igt@kms_cursor_crc@cursor-rapid-movement-512x170:
- shard-mtlp: NOTRUN -> [SKIP][151] ([i915#13049])
[151]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-mtlp-2/igt@kms_cursor_crc@cursor-rapid-movement-512x170.html
- shard-dg2: NOTRUN -> [SKIP][152] ([i915#13049]) +1 other test skip
[152]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg2-3/igt@kms_cursor_crc@cursor-rapid-movement-512x170.html
- shard-dg1: NOTRUN -> [SKIP][153] ([i915#13049])
[153]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg1-12/igt@kms_cursor_crc@cursor-rapid-movement-512x170.html
* igt@kms_cursor_crc@cursor-rapid-movement-max-size:
- shard-dg2: NOTRUN -> [SKIP][154] ([i915#3555]) +9 other tests skip
[154]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg2-8/igt@kms_cursor_crc@cursor-rapid-movement-max-size.html
* igt@kms_cursor_legacy@2x-long-flip-vs-cursor-atomic:
- shard-glk: NOTRUN -> [FAIL][155] ([i915#13028])
[155]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-glk1/igt@kms_cursor_legacy@2x-long-flip-vs-cursor-atomic.html
* igt@kms_cursor_legacy@2x-long-flip-vs-cursor-legacy:
- shard-rkl: NOTRUN -> [SKIP][156] +14 other tests skip
[156]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-rkl-8/igt@kms_cursor_legacy@2x-long-flip-vs-cursor-legacy.html
* igt@kms_cursor_legacy@basic-busy-flip-before-cursor-atomic:
- shard-dg2-9: NOTRUN -> [SKIP][157] ([i915#4103] / [i915#4213]) +1 other test skip
[157]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg2-9/igt@kms_cursor_legacy@basic-busy-flip-before-cursor-atomic.html
* igt@kms_cursor_legacy@basic-busy-flip-before-cursor-legacy:
- shard-rkl: NOTRUN -> [SKIP][158] ([i915#4103])
[158]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-rkl-3/igt@kms_cursor_legacy@basic-busy-flip-before-cursor-legacy.html
* igt@kms_cursor_legacy@cursora-vs-flipb-toggle:
- shard-dg2: NOTRUN -> [SKIP][159] ([i915#13046] / [i915#5354])
[159]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg2-1/igt@kms_cursor_legacy@cursora-vs-flipb-toggle.html
* igt@kms_cursor_legacy@cursorb-vs-flipb-varying-size:
- shard-dg2-9: NOTRUN -> [SKIP][160] ([i915#13046] / [i915#5354]) +2 other tests skip
[160]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg2-9/igt@kms_cursor_legacy@cursorb-vs-flipb-varying-size.html
* igt@kms_cursor_legacy@short-busy-flip-before-cursor-atomic-transitions:
- shard-dg1: NOTRUN -> [SKIP][161] ([i915#4103] / [i915#4213])
[161]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg1-13/igt@kms_cursor_legacy@short-busy-flip-before-cursor-atomic-transitions.html
* igt@kms_cursor_legacy@short-busy-flip-before-cursor-atomic-transitions-varying-size:
- shard-mtlp: NOTRUN -> [SKIP][162] ([i915#4213])
[162]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-mtlp-8/igt@kms_cursor_legacy@short-busy-flip-before-cursor-atomic-transitions-varying-size.html
* igt@kms_cursor_legacy@short-busy-flip-before-cursor-toggle:
- shard-dg2: NOTRUN -> [SKIP][163] ([i915#4103] / [i915#4213])
[163]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg2-11/igt@kms_cursor_legacy@short-busy-flip-before-cursor-toggle.html
* igt@kms_dither@fb-8bpc-vs-panel-6bpc:
- shard-dg2: [PASS][164] -> [SKIP][165] ([i915#3555])
[164]: https://intel-gfx-ci.01.org/tree/drm-tip/IGT_8339/shard-dg2-10/igt@kms_dither@fb-8bpc-vs-panel-6bpc.html
[165]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg2-3/igt@kms_dither@fb-8bpc-vs-panel-6bpc.html
* igt@kms_dither@fb-8bpc-vs-panel-8bpc:
- shard-dg2-9: NOTRUN -> [SKIP][166] ([i915#3555]) +2 other tests skip
[166]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg2-9/igt@kms_dither@fb-8bpc-vs-panel-8bpc.html
* igt@kms_dp_link_training@non-uhbr-mst:
- shard-dg1: NOTRUN -> [SKIP][167] ([i915#13749])
[167]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg1-18/igt@kms_dp_link_training@non-uhbr-mst.html
* igt@kms_dp_link_training@non-uhbr-sst:
- shard-rkl: NOTRUN -> [SKIP][168] ([i915#13749])
[168]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-rkl-8/igt@kms_dp_link_training@non-uhbr-sst.html
* igt@kms_dp_link_training@uhbr-mst:
- shard-mtlp: NOTRUN -> [SKIP][169] ([i915#13749])
[169]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-mtlp-5/igt@kms_dp_link_training@uhbr-mst.html
- shard-rkl: NOTRUN -> [SKIP][170] ([i915#13748])
[170]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-rkl-8/igt@kms_dp_link_training@uhbr-mst.html
* igt@kms_dp_linktrain_fallback@dsc-fallback:
- shard-dg2-9: NOTRUN -> [SKIP][171] ([i915#13707])
[171]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg2-9/igt@kms_dp_linktrain_fallback@dsc-fallback.html
* igt@kms_draw_crc@draw-method-mmap-gtt:
- shard-dg1: NOTRUN -> [SKIP][172] ([i915#8812]) +1 other test skip
[172]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg1-12/igt@kms_draw_crc@draw-method-mmap-gtt.html
* igt@kms_draw_crc@draw-method-mmap-wc:
- shard-dg2: NOTRUN -> [SKIP][173] ([i915#8812])
[173]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg2-7/igt@kms_draw_crc@draw-method-mmap-wc.html
* igt@kms_dsc@dsc-basic:
- shard-dg2: NOTRUN -> [SKIP][174] ([i915#3555] / [i915#3840]) +1 other test skip
[174]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg2-10/igt@kms_dsc@dsc-basic.html
* igt@kms_dsc@dsc-with-output-formats-with-bpc:
- shard-mtlp: NOTRUN -> [SKIP][175] ([i915#3555] / [i915#3840] / [i915#9053])
[175]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-mtlp-1/igt@kms_dsc@dsc-with-output-formats-with-bpc.html
* igt@kms_fbc_dirty_rect@fbc-dirty-rectangle-different-formats:
- shard-dg2-9: NOTRUN -> [SKIP][176] ([i915#13798])
[176]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg2-9/igt@kms_fbc_dirty_rect@fbc-dirty-rectangle-different-formats.html
* igt@kms_feature_discovery@display-2x:
- shard-dg2-9: NOTRUN -> [SKIP][177] ([i915#1839])
[177]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg2-9/igt@kms_feature_discovery@display-2x.html
* igt@kms_feature_discovery@psr2:
- shard-dg2: NOTRUN -> [SKIP][178] ([i915#658])
[178]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg2-3/igt@kms_feature_discovery@psr2.html
* igt@kms_fence_pin_leak:
- shard-dg2-9: NOTRUN -> [SKIP][179] ([i915#4881])
[179]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg2-9/igt@kms_fence_pin_leak.html
* igt@kms_flip@2x-flip-vs-dpms:
- shard-rkl: NOTRUN -> [SKIP][180] ([i915#9934]) +4 other tests skip
[180]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-rkl-8/igt@kms_flip@2x-flip-vs-dpms.html
* igt@kms_flip@2x-flip-vs-dpms-off-vs-modeset-interruptible:
- shard-dg1: NOTRUN -> [SKIP][181] ([i915#9934]) +4 other tests skip
[181]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg1-16/igt@kms_flip@2x-flip-vs-dpms-off-vs-modeset-interruptible.html
* igt@kms_flip@2x-flip-vs-panning:
- shard-dg2-9: NOTRUN -> [SKIP][182] ([i915#9934]) +2 other tests skip
[182]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg2-9/igt@kms_flip@2x-flip-vs-panning.html
* igt@kms_flip@2x-plain-flip-ts-check-interruptible:
- shard-mtlp: NOTRUN -> [SKIP][183] ([i915#3637] / [i915#9934]) +3 other tests skip
[183]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-mtlp-3/igt@kms_flip@2x-plain-flip-ts-check-interruptible.html
* igt@kms_flip@2x-single-buffer-flip-vs-dpms-off-vs-modeset:
- shard-dg2: NOTRUN -> [SKIP][184] ([i915#9934]) +8 other tests skip
[184]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg2-11/igt@kms_flip@2x-single-buffer-flip-vs-dpms-off-vs-modeset.html
* igt@kms_flip@flip-vs-suspend-interruptible@c-hdmi-a4:
- shard-dg1: NOTRUN -> [DMESG-WARN][185] ([i915#4423])
[185]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg1-19/igt@kms_flip@flip-vs-suspend-interruptible@c-hdmi-a4.html
* igt@kms_flip_scaled_crc@flip-32bpp-4tile-to-64bpp-4tile-downscaling:
- shard-rkl: NOTRUN -> [SKIP][186] ([i915#2672] / [i915#3555]) +1 other test skip
[186]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-rkl-3/igt@kms_flip_scaled_crc@flip-32bpp-4tile-to-64bpp-4tile-downscaling.html
* igt@kms_flip_scaled_crc@flip-32bpp-4tile-to-64bpp-4tile-upscaling@pipe-a-valid-mode:
- shard-rkl: NOTRUN -> [SKIP][187] ([i915#2672]) +1 other test skip
[187]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-rkl-8/igt@kms_flip_scaled_crc@flip-32bpp-4tile-to-64bpp-4tile-upscaling@pipe-a-valid-mode.html
* igt@kms_flip_scaled_crc@flip-32bpp-yftile-to-64bpp-yftile-upscaling:
- shard-dg2: NOTRUN -> [SKIP][188] ([i915#2672] / [i915#3555])
[188]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg2-5/igt@kms_flip_scaled_crc@flip-32bpp-yftile-to-64bpp-yftile-upscaling.html
* igt@kms_flip_scaled_crc@flip-32bpp-ytile-to-32bpp-ytileccs-downscaling@pipe-a-valid-mode:
- shard-dg2-9: NOTRUN -> [SKIP][189] ([i915#2672]) +1 other test skip
[189]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg2-9/igt@kms_flip_scaled_crc@flip-32bpp-ytile-to-32bpp-ytileccs-downscaling@pipe-a-valid-mode.html
* igt@kms_flip_scaled_crc@flip-32bpp-ytile-to-32bpp-ytileccs-upscaling:
- shard-dg1: NOTRUN -> [SKIP][190] ([i915#2587] / [i915#2672] / [i915#3555]) +1 other test skip
[190]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg1-16/igt@kms_flip_scaled_crc@flip-32bpp-ytile-to-32bpp-ytileccs-upscaling.html
- shard-mtlp: NOTRUN -> [SKIP][191] ([i915#2672] / [i915#3555] / [i915#8813]) +3 other tests skip
[191]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-mtlp-4/igt@kms_flip_scaled_crc@flip-32bpp-ytile-to-32bpp-ytileccs-upscaling.html
* igt@kms_flip_scaled_crc@flip-32bpp-ytile-to-32bpp-ytileccs-upscaling@pipe-a-default-mode:
- shard-mtlp: NOTRUN -> [SKIP][192] ([i915#2672] / [i915#8813]) +1 other test skip
[192]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-mtlp-4/igt@kms_flip_scaled_crc@flip-32bpp-ytile-to-32bpp-ytileccs-upscaling@pipe-a-default-mode.html
* igt@kms_flip_scaled_crc@flip-32bpp-ytile-to-32bpp-ytileccs-upscaling@pipe-a-valid-mode:
- shard-dg1: NOTRUN -> [SKIP][193] ([i915#2587] / [i915#2672]) +3 other tests skip
[193]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg1-16/igt@kms_flip_scaled_crc@flip-32bpp-ytile-to-32bpp-ytileccs-upscaling@pipe-a-valid-mode.html
* igt@kms_flip_scaled_crc@flip-64bpp-4tile-to-32bpp-4tiledg2rcccs-upscaling:
- shard-dg1: NOTRUN -> [SKIP][194] ([i915#2672] / [i915#3555]) +1 other test skip
[194]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg1-12/igt@kms_flip_scaled_crc@flip-64bpp-4tile-to-32bpp-4tiledg2rcccs-upscaling.html
* igt@kms_flip_scaled_crc@flip-64bpp-yftile-to-16bpp-yftile-downscaling:
- shard-snb: NOTRUN -> [SKIP][195] +79 other tests skip
[195]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-snb4/igt@kms_flip_scaled_crc@flip-64bpp-yftile-to-16bpp-yftile-downscaling.html
* igt@kms_flip_scaled_crc@flip-64bpp-ytile-to-16bpp-ytile-upscaling:
- shard-dg2-9: NOTRUN -> [SKIP][196] ([i915#2672] / [i915#3555] / [i915#5190]) +1 other test skip
[196]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg2-9/igt@kms_flip_scaled_crc@flip-64bpp-ytile-to-16bpp-ytile-upscaling.html
* igt@kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytile-upscaling:
- shard-dg2: NOTRUN -> [SKIP][197] ([i915#2672] / [i915#3555] / [i915#5190]) +1 other test skip
[197]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg2-4/igt@kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytile-upscaling.html
* igt@kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytile-upscaling@pipe-a-valid-mode:
- shard-dg2: NOTRUN -> [SKIP][198] ([i915#2672]) +2 other tests skip
[198]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg2-4/igt@kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytile-upscaling@pipe-a-valid-mode.html
* igt@kms_frontbuffer_tracking@fbc-1p-primscrn-pri-shrfb-draw-mmap-wc:
- shard-dg2: NOTRUN -> [SKIP][199] ([i915#8708]) +12 other tests skip
[199]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg2-6/igt@kms_frontbuffer_tracking@fbc-1p-primscrn-pri-shrfb-draw-mmap-wc.html
* igt@kms_frontbuffer_tracking@fbc-2p-primscrn-pri-indfb-draw-mmap-gtt:
- shard-mtlp: NOTRUN -> [SKIP][200] ([i915#8708]) +7 other tests skip
[200]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-mtlp-6/igt@kms_frontbuffer_tracking@fbc-2p-primscrn-pri-indfb-draw-mmap-gtt.html
* igt@kms_frontbuffer_tracking@fbc-2p-primscrn-pri-shrfb-draw-mmap-cpu:
- shard-dg1: NOTRUN -> [SKIP][201] +22 other tests skip
[201]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg1-17/igt@kms_frontbuffer_tracking@fbc-2p-primscrn-pri-shrfb-draw-mmap-cpu.html
* igt@kms_frontbuffer_tracking@fbcpsr-1p-primscrn-pri-shrfb-draw-mmap-cpu:
- shard-dg2-9: NOTRUN -> [SKIP][202] ([i915#3458]) +8 other tests skip
[202]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg2-9/igt@kms_frontbuffer_tracking@fbcpsr-1p-primscrn-pri-shrfb-draw-mmap-cpu.html
* igt@kms_frontbuffer_tracking@fbcpsr-2p-primscrn-cur-indfb-draw-mmap-wc:
- shard-dg2-9: NOTRUN -> [SKIP][203] ([i915#8708]) +7 other tests skip
[203]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg2-9/igt@kms_frontbuffer_tracking@fbcpsr-2p-primscrn-cur-indfb-draw-mmap-wc.html
* igt@kms_frontbuffer_tracking@fbcpsr-2p-primscrn-shrfb-plflip-blt:
- shard-dg2: NOTRUN -> [SKIP][204] ([i915#5354]) +29 other tests skip
[204]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg2-4/igt@kms_frontbuffer_tracking@fbcpsr-2p-primscrn-shrfb-plflip-blt.html
* igt@kms_frontbuffer_tracking@fbcpsr-rgb101010-draw-mmap-wc:
- shard-rkl: NOTRUN -> [SKIP][205] ([i915#3023]) +17 other tests skip
[205]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-rkl-3/igt@kms_frontbuffer_tracking@fbcpsr-rgb101010-draw-mmap-wc.html
* igt@kms_frontbuffer_tracking@psr-1p-primscrn-cur-indfb-move:
- shard-dg2: NOTRUN -> [SKIP][206] ([i915#3458]) +17 other tests skip
[206]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg2-10/igt@kms_frontbuffer_tracking@psr-1p-primscrn-cur-indfb-move.html
* igt@kms_frontbuffer_tracking@psr-2p-pri-indfb-multidraw:
- shard-dg2-9: NOTRUN -> [SKIP][207] ([i915#5354]) +9 other tests skip
[207]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg2-9/igt@kms_frontbuffer_tracking@psr-2p-pri-indfb-multidraw.html
* igt@kms_frontbuffer_tracking@psr-2p-primscrn-shrfb-msflip-blt:
- shard-mtlp: NOTRUN -> [SKIP][208] ([i915#1825]) +18 other tests skip
[208]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-mtlp-4/igt@kms_frontbuffer_tracking@psr-2p-primscrn-shrfb-msflip-blt.html
* igt@kms_frontbuffer_tracking@psr-2p-primscrn-spr-indfb-draw-mmap-wc:
- shard-rkl: NOTRUN -> [SKIP][209] ([i915#1825]) +22 other tests skip
[209]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-rkl-3/igt@kms_frontbuffer_tracking@psr-2p-primscrn-spr-indfb-draw-mmap-wc.html
* igt@kms_frontbuffer_tracking@psr-2p-scndscrn-pri-indfb-draw-mmap-wc:
- shard-dg1: NOTRUN -> [SKIP][210] ([i915#8708]) +17 other tests skip
[210]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg1-12/igt@kms_frontbuffer_tracking@psr-2p-scndscrn-pri-indfb-draw-mmap-wc.html
* igt@kms_frontbuffer_tracking@psr-rgb101010-draw-mmap-cpu:
- shard-dg1: NOTRUN -> [SKIP][211] ([i915#3458]) +8 other tests skip
[211]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg1-15/igt@kms_frontbuffer_tracking@psr-rgb101010-draw-mmap-cpu.html
* igt@kms_hdr@invalid-metadata-sizes:
- shard-dg2: [PASS][212] -> [SKIP][213] ([i915#3555] / [i915#8228])
[212]: https://intel-gfx-ci.01.org/tree/drm-tip/IGT_8339/shard-dg2-10/igt@kms_hdr@invalid-metadata-sizes.html
[213]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg2-8/igt@kms_hdr@invalid-metadata-sizes.html
* igt@kms_hdr@static-toggle-dpms:
- shard-mtlp: NOTRUN -> [SKIP][214] ([i915#3555] / [i915#8228])
[214]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-mtlp-8/igt@kms_hdr@static-toggle-dpms.html
- shard-dg2-9: NOTRUN -> [SKIP][215] ([i915#3555] / [i915#8228]) +1 other test skip
[215]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg2-9/igt@kms_hdr@static-toggle-dpms.html
- shard-rkl: NOTRUN -> [SKIP][216] ([i915#3555] / [i915#8228])
[216]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-rkl-3/igt@kms_hdr@static-toggle-dpms.html
- shard-dg1: NOTRUN -> [SKIP][217] ([i915#3555] / [i915#8228])
[217]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg1-15/igt@kms_hdr@static-toggle-dpms.html
* igt@kms_invalid_mode@clock-too-high:
- shard-mtlp: NOTRUN -> [SKIP][218] ([i915#3555] / [i915#6403] / [i915#8826])
[218]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-mtlp-2/igt@kms_invalid_mode@clock-too-high.html
* igt@kms_invalid_mode@clock-too-high@pipe-a-edp-1:
- shard-mtlp: NOTRUN -> [SKIP][219] ([i915#9457]) +2 other tests skip
[219]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-mtlp-2/igt@kms_invalid_mode@clock-too-high@pipe-a-edp-1.html
* igt@kms_invalid_mode@clock-too-high@pipe-d-edp-1:
- shard-mtlp: NOTRUN -> [SKIP][220] ([i915#8826] / [i915#9457])
[220]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-mtlp-2/igt@kms_invalid_mode@clock-too-high@pipe-d-edp-1.html
* igt@kms_joiner@basic-big-joiner:
- shard-dg1: NOTRUN -> [SKIP][221] ([i915#10656])
[221]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg1-15/igt@kms_joiner@basic-big-joiner.html
* igt@kms_joiner@basic-max-non-joiner:
- shard-dg2-9: NOTRUN -> [SKIP][222] ([i915#13688])
[222]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg2-9/igt@kms_joiner@basic-max-non-joiner.html
* igt@kms_joiner@basic-ultra-joiner:
- shard-dg1: NOTRUN -> [SKIP][223] ([i915#12339])
[223]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg1-13/igt@kms_joiner@basic-ultra-joiner.html
* igt@kms_plane_alpha_blend@alpha-opaque-fb:
- shard-glk: NOTRUN -> [FAIL][224] ([i915#10647] / [i915#12169])
[224]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-glk1/igt@kms_plane_alpha_blend@alpha-opaque-fb.html
* igt@kms_plane_alpha_blend@alpha-opaque-fb@pipe-a-hdmi-a-1:
- shard-glk: NOTRUN -> [FAIL][225] ([i915#10647]) +1 other test fail
[225]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-glk1/igt@kms_plane_alpha_blend@alpha-opaque-fb@pipe-a-hdmi-a-1.html
* igt@kms_plane_lowres@tiling-yf:
- shard-dg2-9: NOTRUN -> [SKIP][226] ([i915#3555] / [i915#8821])
[226]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg2-9/igt@kms_plane_lowres@tiling-yf.html
* igt@kms_plane_multiple@2x-tiling-4:
- shard-dg1: NOTRUN -> [SKIP][227] ([i915#13958]) +1 other test skip
[227]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg1-13/igt@kms_plane_multiple@2x-tiling-4.html
* igt@kms_plane_multiple@2x-tiling-x:
- shard-mtlp: NOTRUN -> [SKIP][228] ([i915#13958])
[228]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-mtlp-5/igt@kms_plane_multiple@2x-tiling-x.html
* igt@kms_plane_multiple@tiling-yf:
- shard-rkl: NOTRUN -> [SKIP][229] ([i915#3555]) +1 other test skip
[229]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-rkl-3/igt@kms_plane_multiple@tiling-yf.html
- shard-dg1: NOTRUN -> [SKIP][230] ([i915#3555]) +2 other tests skip
[230]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg1-15/igt@kms_plane_multiple@tiling-yf.html
- shard-mtlp: NOTRUN -> [SKIP][231] ([i915#3555] / [i915#8806])
[231]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-mtlp-8/igt@kms_plane_multiple@tiling-yf.html
- shard-dg2-9: NOTRUN -> [SKIP][232] ([i915#3555] / [i915#8806])
[232]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg2-9/igt@kms_plane_multiple@tiling-yf.html
* igt@kms_plane_scaling@plane-downscale-factor-0-25-with-modifiers@pipe-a:
- shard-dg1: NOTRUN -> [SKIP][233] ([i915#12247]) +4 other tests skip
[233]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg1-19/igt@kms_plane_scaling@plane-downscale-factor-0-25-with-modifiers@pipe-a.html
* igt@kms_plane_scaling@planes-downscale-factor-0-25:
- shard-rkl: NOTRUN -> [SKIP][234] ([i915#12247] / [i915#6953])
[234]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-rkl-8/igt@kms_plane_scaling@planes-downscale-factor-0-25.html
* igt@kms_plane_scaling@planes-downscale-factor-0-25-upscale-20x20:
- shard-dg2: NOTRUN -> [SKIP][235] ([i915#12247] / [i915#9423])
[235]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg2-2/igt@kms_plane_scaling@planes-downscale-factor-0-25-upscale-20x20.html
* igt@kms_plane_scaling@planes-downscale-factor-0-25@pipe-b:
- shard-rkl: NOTRUN -> [SKIP][236] ([i915#12247]) +2 other tests skip
[236]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-rkl-8/igt@kms_plane_scaling@planes-downscale-factor-0-25@pipe-b.html
* igt@kms_plane_scaling@planes-downscale-factor-0-75:
- shard-mtlp: NOTRUN -> [SKIP][237] ([i915#12247] / [i915#3555] / [i915#6953])
[237]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-mtlp-4/igt@kms_plane_scaling@planes-downscale-factor-0-75.html
* igt@kms_plane_scaling@planes-downscale-factor-0-75@pipe-b:
- shard-mtlp: NOTRUN -> [SKIP][238] ([i915#12247]) +3 other tests skip
[238]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-mtlp-4/igt@kms_plane_scaling@planes-downscale-factor-0-75@pipe-b.html
* igt@kms_plane_scaling@planes-unity-scaling-downscale-factor-0-25:
- shard-dg2: NOTRUN -> [SKIP][239] ([i915#12247] / [i915#6953] / [i915#9423])
[239]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg2-2/igt@kms_plane_scaling@planes-unity-scaling-downscale-factor-0-25.html
* igt@kms_plane_scaling@planes-unity-scaling-downscale-factor-0-25@pipe-c:
- shard-dg2: NOTRUN -> [SKIP][240] ([i915#12247]) +7 other tests skip
[240]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg2-2/igt@kms_plane_scaling@planes-unity-scaling-downscale-factor-0-25@pipe-c.html
* igt@kms_pm_backlight@basic-brightness:
- shard-rkl: NOTRUN -> [SKIP][241] ([i915#5354])
[241]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-rkl-8/igt@kms_pm_backlight@basic-brightness.html
* igt@kms_pm_backlight@brightness-with-dpms:
- shard-rkl: NOTRUN -> [SKIP][242] ([i915#12343])
[242]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-rkl-3/igt@kms_pm_backlight@brightness-with-dpms.html
- shard-dg2-9: NOTRUN -> [SKIP][243] ([i915#12343])
[243]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg2-9/igt@kms_pm_backlight@brightness-with-dpms.html
* igt@kms_pm_dc@dc5-retention-flops:
- shard-dg2-9: NOTRUN -> [SKIP][244] ([i915#3828])
[244]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg2-9/igt@kms_pm_dc@dc5-retention-flops.html
* igt@kms_pm_dc@dc9-dpms:
- shard-rkl: NOTRUN -> [SKIP][245] ([i915#4281])
[245]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-rkl-3/igt@kms_pm_dc@dc9-dpms.html
* igt@kms_pm_rpm@dpms-mode-unset-lpsp:
- shard-rkl: NOTRUN -> [SKIP][246] ([i915#9519])
[246]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-rkl-8/igt@kms_pm_rpm@dpms-mode-unset-lpsp.html
* igt@kms_pm_rpm@modeset-non-lpsp-stress:
- shard-dg2: [PASS][247] -> [SKIP][248] ([i915#9519]) +1 other test skip
[247]: https://intel-gfx-ci.01.org/tree/drm-tip/IGT_8339/shard-dg2-3/igt@kms_pm_rpm@modeset-non-lpsp-stress.html
[248]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg2-4/igt@kms_pm_rpm@modeset-non-lpsp-stress.html
* igt@kms_psr2_sf@fbc-pr-cursor-plane-move-continuous-exceed-sf:
- shard-dg2: NOTRUN -> [SKIP][249] ([i915#11520]) +7 other tests skip
[249]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg2-2/igt@kms_psr2_sf@fbc-pr-cursor-plane-move-continuous-exceed-sf.html
* igt@kms_psr2_sf@fbc-psr2-cursor-plane-move-continuous-sf:
- shard-dg1: NOTRUN -> [SKIP][250] ([i915#11520]) +3 other tests skip
[250]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg1-16/igt@kms_psr2_sf@fbc-psr2-cursor-plane-move-continuous-sf.html
* igt@kms_psr2_sf@fbc-psr2-overlay-plane-move-continuous-exceed-fully-sf:
- shard-dg2-9: NOTRUN -> [SKIP][251] ([i915#11520]) +3 other tests skip
[251]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg2-9/igt@kms_psr2_sf@fbc-psr2-overlay-plane-move-continuous-exceed-fully-sf.html
* igt@kms_psr2_sf@fbc-psr2-overlay-primary-update-sf-dmg-area:
- shard-snb: NOTRUN -> [SKIP][252] ([i915#11520])
[252]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-snb7/igt@kms_psr2_sf@fbc-psr2-overlay-primary-update-sf-dmg-area.html
* igt@kms_psr2_sf@fbc-psr2-overlay-primary-update-sf-dmg-area@pipe-a-edp-1:
- shard-mtlp: NOTRUN -> [SKIP][253] ([i915#9808])
[253]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-mtlp-4/igt@kms_psr2_sf@fbc-psr2-overlay-primary-update-sf-dmg-area@pipe-a-edp-1.html
* igt@kms_psr2_sf@fbc-psr2-overlay-primary-update-sf-dmg-area@pipe-b-edp-1:
- shard-mtlp: NOTRUN -> [SKIP][254] ([i915#12316]) +2 other tests skip
[254]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-mtlp-4/igt@kms_psr2_sf@fbc-psr2-overlay-primary-update-sf-dmg-area@pipe-b-edp-1.html
* igt@kms_psr2_sf@pr-overlay-plane-update-continuous-sf:
- shard-rkl: NOTRUN -> [SKIP][255] ([i915#11520]) +6 other tests skip
[255]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-rkl-3/igt@kms_psr2_sf@pr-overlay-plane-update-continuous-sf.html
* igt@kms_psr2_sf@psr2-overlay-primary-update-sf-dmg-area:
- shard-glk: NOTRUN -> [SKIP][256] ([i915#11520]) +7 other tests skip
[256]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-glk2/igt@kms_psr2_sf@psr2-overlay-primary-update-sf-dmg-area.html
* igt@kms_psr2_su@frontbuffer-xrgb8888:
- shard-dg2-9: NOTRUN -> [SKIP][257] ([i915#9683])
[257]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg2-9/igt@kms_psr2_su@frontbuffer-xrgb8888.html
* igt@kms_psr2_su@page_flip-p010:
- shard-dg1: NOTRUN -> [SKIP][258] ([i915#9683])
[258]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg1-19/igt@kms_psr2_su@page_flip-p010.html
* igt@kms_psr2_su@page_flip-xrgb8888:
- shard-dg2: NOTRUN -> [SKIP][259] ([i915#9683])
[259]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg2-2/igt@kms_psr2_su@page_flip-xrgb8888.html
* igt@kms_psr@fbc-pr-sprite-plane-onoff:
- shard-dg1: NOTRUN -> [SKIP][260] ([i915#1072] / [i915#9732]) +12 other tests skip
[260]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg1-19/igt@kms_psr@fbc-pr-sprite-plane-onoff.html
* igt@kms_psr@fbc-psr-cursor-plane-move:
- shard-dg2-9: NOTRUN -> [SKIP][261] ([i915#1072] / [i915#9732]) +7 other tests skip
[261]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg2-9/igt@kms_psr@fbc-psr-cursor-plane-move.html
* igt@kms_psr@pr-sprite-blt:
- shard-mtlp: NOTRUN -> [SKIP][262] ([i915#9688]) +14 other tests skip
[262]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-mtlp-4/igt@kms_psr@pr-sprite-blt.html
* igt@kms_psr@psr-cursor-mmap-cpu:
- shard-dg2: NOTRUN -> [SKIP][263] ([i915#1072] / [i915#9732]) +18 other tests skip
[263]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg2-3/igt@kms_psr@psr-cursor-mmap-cpu.html
* igt@kms_psr@psr-sprite-plane-move:
- shard-rkl: NOTRUN -> [SKIP][264] ([i915#1072] / [i915#9732]) +11 other tests skip
[264]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-rkl-8/igt@kms_psr@psr-sprite-plane-move.html
* igt@kms_psr_stress_test@flip-primary-invalidate-overlay:
- shard-dg1: NOTRUN -> [SKIP][265] ([i915#9685])
[265]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg1-15/igt@kms_psr_stress_test@flip-primary-invalidate-overlay.html
* igt@kms_rotation_crc@primary-y-tiled-reflect-x-0:
- shard-dg2-9: NOTRUN -> [SKIP][266] ([i915#5190])
[266]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg2-9/igt@kms_rotation_crc@primary-y-tiled-reflect-x-0.html
* igt@kms_rotation_crc@primary-yf-tiled-reflect-x-0:
- shard-dg1: NOTRUN -> [SKIP][267] ([i915#5289])
[267]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg1-12/igt@kms_rotation_crc@primary-yf-tiled-reflect-x-0.html
* igt@kms_rotation_crc@sprite-rotation-270:
- shard-dg2: NOTRUN -> [SKIP][268] ([i915#12755])
[268]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg2-8/igt@kms_rotation_crc@sprite-rotation-270.html
* igt@kms_rotation_crc@sprite-rotation-90:
- shard-dg2-9: NOTRUN -> [SKIP][269] ([i915#12755])
[269]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg2-9/igt@kms_rotation_crc@sprite-rotation-90.html
* igt@kms_setmode@invalid-clone-exclusive-crtc:
- shard-mtlp: NOTRUN -> [SKIP][270] ([i915#3555] / [i915#8809] / [i915#8823])
[270]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-mtlp-2/igt@kms_setmode@invalid-clone-exclusive-crtc.html
* igt@kms_setmode@invalid-clone-single-crtc:
- shard-mtlp: NOTRUN -> [SKIP][271] ([i915#3555] / [i915#8809])
[271]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-mtlp-1/igt@kms_setmode@invalid-clone-single-crtc.html
* igt@kms_vrr@flip-suspend:
- shard-mtlp: NOTRUN -> [SKIP][272] ([i915#3555] / [i915#8808])
[272]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-mtlp-2/igt@kms_vrr@flip-suspend.html
* igt@kms_vrr@lobf:
- shard-dg2: NOTRUN -> [SKIP][273] ([i915#11920])
[273]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg2-7/igt@kms_vrr@lobf.html
* igt@kms_vrr@seamless-rr-switch-vrr:
- shard-dg2: NOTRUN -> [SKIP][274] ([i915#9906]) +1 other test skip
[274]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg2-3/igt@kms_vrr@seamless-rr-switch-vrr.html
* igt@kms_writeback@writeback-fb-id:
- shard-glk: NOTRUN -> [SKIP][275] ([i915#2437])
[275]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-glk2/igt@kms_writeback@writeback-fb-id.html
- shard-dg2-9: NOTRUN -> [SKIP][276] ([i915#2437])
[276]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg2-9/igt@kms_writeback@writeback-fb-id.html
- shard-dg1: NOTRUN -> [SKIP][277] ([i915#2437])
[277]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg1-15/igt@kms_writeback@writeback-fb-id.html
* igt@kms_writeback@writeback-pixel-formats:
- shard-dg2-9: NOTRUN -> [SKIP][278] ([i915#2437] / [i915#9412])
[278]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg2-9/igt@kms_writeback@writeback-pixel-formats.html
* igt@perf@global-sseu-config-invalid:
- shard-dg2: NOTRUN -> [SKIP][279] ([i915#7387])
[279]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg2-11/igt@perf@global-sseu-config-invalid.html
* igt@perf@non-zero-reason:
- shard-dg2: NOTRUN -> [FAIL][280] ([i915#9100]) +1 other test fail
[280]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg2-5/igt@perf@non-zero-reason.html
* igt@perf_pmu@frequency@gt0:
- shard-dg2-9: NOTRUN -> [FAIL][281] ([i915#12549] / [i915#6806]) +1 other test fail
[281]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg2-9/igt@perf_pmu@frequency@gt0.html
* igt@perf_pmu@multi-client:
- shard-dg1: [PASS][282] -> [FAIL][283] ([i915#4349]) +1 other test fail
[282]: https://intel-gfx-ci.01.org/tree/drm-tip/IGT_8339/shard-dg1-15/igt@perf_pmu@multi-client.html
[283]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg1-19/igt@perf_pmu@multi-client.html
- shard-mtlp: [PASS][284] -> [FAIL][285] ([i915#4349]) +1 other test fail
[284]: https://intel-gfx-ci.01.org/tree/drm-tip/IGT_8339/shard-mtlp-8/igt@perf_pmu@multi-client.html
[285]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-mtlp-6/igt@perf_pmu@multi-client.html
* igt@prime_mmap@test_aperture_limit:
- shard-dg2-9: NOTRUN -> [SKIP][286] ([i915#14121]) +1 other test skip
[286]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg2-9/igt@prime_mmap@test_aperture_limit.html
* igt@prime_vgem@basic-gtt:
- shard-mtlp: NOTRUN -> [SKIP][287] ([i915#3708] / [i915#4077])
[287]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-mtlp-7/igt@prime_vgem@basic-gtt.html
- shard-dg2: NOTRUN -> [SKIP][288] ([i915#3708] / [i915#4077]) +1 other test skip
[288]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg2-8/igt@prime_vgem@basic-gtt.html
- shard-dg1: NOTRUN -> [SKIP][289] ([i915#3708] / [i915#4077])
[289]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg1-15/igt@prime_vgem@basic-gtt.html
* igt@prime_vgem@fence-flip-hang:
- shard-mtlp: NOTRUN -> [SKIP][290] ([i915#3708])
[290]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-mtlp-3/igt@prime_vgem@fence-flip-hang.html
* igt@sriov_basic@bind-unbind-vf@vf-5:
- shard-mtlp: NOTRUN -> [FAIL][291] ([i915#12910]) +9 other tests fail
[291]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-mtlp-2/igt@sriov_basic@bind-unbind-vf@vf-5.html
* igt@sriov_basic@enable-vfs-autoprobe-on:
- shard-dg1: NOTRUN -> [SKIP][292] ([i915#9917])
[292]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg1-17/igt@sriov_basic@enable-vfs-autoprobe-on.html
* igt@sriov_basic@enable-vfs-bind-unbind-each:
- shard-dg2: NOTRUN -> [SKIP][293] ([i915#9917])
[293]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg2-1/igt@sriov_basic@enable-vfs-bind-unbind-each.html
- shard-rkl: NOTRUN -> [SKIP][294] ([i915#9917])
[294]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-rkl-8/igt@sriov_basic@enable-vfs-bind-unbind-each.html
#### Possible fixes ####
* igt@core_hotunplug@unbind-rebind:
- shard-snb: [ABORT][295] ([i915#11703]) -> [PASS][296]
[295]: https://intel-gfx-ci.01.org/tree/drm-tip/IGT_8339/shard-snb7/igt@core_hotunplug@unbind-rebind.html
[296]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-snb5/igt@core_hotunplug@unbind-rebind.html
* igt@gem_eio@reset-stress:
- shard-dg1: [FAIL][297] ([i915#12543] / [i915#5784]) -> [PASS][298]
[297]: https://intel-gfx-ci.01.org/tree/drm-tip/IGT_8339/shard-dg1-14/igt@gem_eio@reset-stress.html
[298]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg1-14/igt@gem_eio@reset-stress.html
* igt@gem_exec_schedule@fairslice:
- shard-rkl: [DMESG-WARN][299] ([i915#12964]) -> [PASS][300] +1 other test pass
[299]: https://intel-gfx-ci.01.org/tree/drm-tip/IGT_8339/shard-rkl-3/igt@gem_exec_schedule@fairslice.html
[300]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-rkl-8/igt@gem_exec_schedule@fairslice.html
* igt@gem_workarounds@reset:
- shard-mtlp: [ABORT][301] ([i915#13193] / [i915#13723]) -> [PASS][302] +1 other test pass
[301]: https://intel-gfx-ci.01.org/tree/drm-tip/IGT_8339/shard-mtlp-7/igt@gem_workarounds@reset.html
[302]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-mtlp-4/igt@gem_workarounds@reset.html
* igt@kms_flip@2x-blocking-wf_vblank@ab-vga1-hdmi-a1:
- shard-snb: [FAIL][303] ([i915#11832] / [i915#13734]) -> [PASS][304] +1 other test pass
[303]: https://intel-gfx-ci.01.org/tree/drm-tip/IGT_8339/shard-snb5/igt@kms_flip@2x-blocking-wf_vblank@ab-vga1-hdmi-a1.html
[304]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-snb1/igt@kms_flip@2x-blocking-wf_vblank@ab-vga1-hdmi-a1.html
* igt@kms_flip@flip-vs-blocking-wf-vblank:
- shard-snb: [FAIL][305] ([i915#13734]) -> [PASS][306] +2 other tests pass
[305]: https://intel-gfx-ci.01.org/tree/drm-tip/IGT_8339/shard-snb1/igt@kms_flip@flip-vs-blocking-wf-vblank.html
[306]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-snb6/igt@kms_flip@flip-vs-blocking-wf-vblank.html
* igt@kms_flip_scaled_crc@flip-64bpp-xtile-to-32bpp-xtile-upscaling:
- shard-dg1: [DMESG-WARN][307] ([i915#4423]) -> [PASS][308]
[307]: https://intel-gfx-ci.01.org/tree/drm-tip/IGT_8339/shard-dg1-14/igt@kms_flip_scaled_crc@flip-64bpp-xtile-to-32bpp-xtile-upscaling.html
[308]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg1-17/igt@kms_flip_scaled_crc@flip-64bpp-xtile-to-32bpp-xtile-upscaling.html
* igt@kms_plane_scaling@intel-max-src-size:
- shard-dg2: [SKIP][309] ([i915#6953] / [i915#9423]) -> [PASS][310]
[309]: https://intel-gfx-ci.01.org/tree/drm-tip/IGT_8339/shard-dg2-3/igt@kms_plane_scaling@intel-max-src-size.html
[310]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg2-11/igt@kms_plane_scaling@intel-max-src-size.html
* igt@kms_psr@psr2-cursor-mmap-cpu:
- shard-mtlp: [FAIL][311] -> [PASS][312] +1 other test pass
[311]: https://intel-gfx-ci.01.org/tree/drm-tip/IGT_8339/shard-mtlp-5/igt@kms_psr@psr2-cursor-mmap-cpu.html
[312]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-mtlp-7/igt@kms_psr@psr2-cursor-mmap-cpu.html
* igt@kms_setmode@basic:
- shard-snb: [FAIL][313] ([i915#5465]) -> [PASS][314] +2 other tests pass
[313]: https://intel-gfx-ci.01.org/tree/drm-tip/IGT_8339/shard-snb6/igt@kms_setmode@basic.html
[314]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-snb6/igt@kms_setmode@basic.html
* igt@kms_vrr@negative-basic:
- shard-dg2: [SKIP][315] ([i915#3555] / [i915#9906]) -> [PASS][316]
[315]: https://intel-gfx-ci.01.org/tree/drm-tip/IGT_8339/shard-dg2-8/igt@kms_vrr@negative-basic.html
[316]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg2-10/igt@kms_vrr@negative-basic.html
* igt@perf_pmu@most-busy-check-all@vecs0:
- shard-dg2: [FAIL][317] ([i915#11943]) -> [PASS][318] +5 other tests pass
[317]: https://intel-gfx-ci.01.org/tree/drm-tip/IGT_8339/shard-dg2-6/igt@perf_pmu@most-busy-check-all@vecs0.html
[318]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg2-4/igt@perf_pmu@most-busy-check-all@vecs0.html
* igt@perf_pmu@most-busy-check-all@vecs1:
- shard-dg2: [FAIL][319] -> [PASS][320]
[319]: https://intel-gfx-ci.01.org/tree/drm-tip/IGT_8339/shard-dg2-6/igt@perf_pmu@most-busy-check-all@vecs1.html
[320]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg2-4/igt@perf_pmu@most-busy-check-all@vecs1.html
#### Warnings ####
* igt@kms_ccs@crc-sprite-planes-basic-y-tiled-ccs@pipe-b-hdmi-a-2:
- shard-rkl: [SKIP][321] ([i915#14098] / [i915#6095]) -> [SKIP][322] ([i915#6095]) +1 other test skip
[321]: https://intel-gfx-ci.01.org/tree/drm-tip/IGT_8339/shard-rkl-3/igt@kms_ccs@crc-sprite-planes-basic-y-tiled-ccs@pipe-b-hdmi-a-2.html
[322]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-rkl-8/igt@kms_ccs@crc-sprite-planes-basic-y-tiled-ccs@pipe-b-hdmi-a-2.html
* igt@kms_content_protection@lic-type-0:
- shard-dg2: [SKIP][323] ([i915#9424]) -> [FAIL][324] ([i915#7173])
[323]: https://intel-gfx-ci.01.org/tree/drm-tip/IGT_8339/shard-dg2-8/igt@kms_content_protection@lic-type-0.html
[324]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg2-11/igt@kms_content_protection@lic-type-0.html
* igt@kms_frontbuffer_tracking@fbc-2p-scndscrn-pri-shrfb-draw-mmap-wc:
- shard-dg1: [SKIP][325] ([i915#4423] / [i915#8708]) -> [SKIP][326] ([i915#8708])
[325]: https://intel-gfx-ci.01.org/tree/drm-tip/IGT_8339/shard-dg1-14/igt@kms_frontbuffer_tracking@fbc-2p-scndscrn-pri-shrfb-draw-mmap-wc.html
[326]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg1-19/igt@kms_frontbuffer_tracking@fbc-2p-scndscrn-pri-shrfb-draw-mmap-wc.html
* igt@kms_frontbuffer_tracking@fbc-2p-scndscrn-shrfb-plflip-blt:
- shard-dg1: [SKIP][327] -> [SKIP][328] ([i915#4423])
[327]: https://intel-gfx-ci.01.org/tree/drm-tip/IGT_8339/shard-dg1-16/igt@kms_frontbuffer_tracking@fbc-2p-scndscrn-shrfb-plflip-blt.html
[328]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg1-12/igt@kms_frontbuffer_tracking@fbc-2p-scndscrn-shrfb-plflip-blt.html
* igt@kms_frontbuffer_tracking@psr-1p-primscrn-cur-indfb-draw-render:
- shard-dg1: [SKIP][329] ([i915#3458] / [i915#4423]) -> [SKIP][330] ([i915#3458])
[329]: https://intel-gfx-ci.01.org/tree/drm-tip/IGT_8339/shard-dg1-17/igt@kms_frontbuffer_tracking@psr-1p-primscrn-cur-indfb-draw-render.html
[330]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg1-18/igt@kms_frontbuffer_tracking@psr-1p-primscrn-cur-indfb-draw-render.html
* igt@kms_frontbuffer_tracking@psr-indfb-scaledprimary:
- shard-dg2: [SKIP][331] ([i915#10433] / [i915#3458]) -> [SKIP][332] ([i915#3458]) +1 other test skip
[331]: https://intel-gfx-ci.01.org/tree/drm-tip/IGT_8339/shard-dg2-4/igt@kms_frontbuffer_tracking@psr-indfb-scaledprimary.html
[332]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg2-2/igt@kms_frontbuffer_tracking@psr-indfb-scaledprimary.html
* igt@kms_frontbuffer_tracking@psr-rgb565-draw-mmap-cpu:
- shard-dg2: [SKIP][333] ([i915#3458]) -> [SKIP][334] ([i915#10433] / [i915#3458]) +2 other tests skip
[333]: https://intel-gfx-ci.01.org/tree/drm-tip/IGT_8339/shard-dg2-1/igt@kms_frontbuffer_tracking@psr-rgb565-draw-mmap-cpu.html
[334]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg2-4/igt@kms_frontbuffer_tracking@psr-rgb565-draw-mmap-cpu.html
* igt@kms_hdr@brightness-with-hdr:
- shard-mtlp: [SKIP][335] ([i915#1187] / [i915#12713]) -> [SKIP][336] ([i915#12713])
[335]: https://intel-gfx-ci.01.org/tree/drm-tip/IGT_8339/shard-mtlp-1/igt@kms_hdr@brightness-with-hdr.html
[336]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-mtlp-5/igt@kms_hdr@brightness-with-hdr.html
* igt@kms_psr@fbc-psr2-sprite-plane-onoff:
- shard-dg1: [SKIP][337] ([i915#1072] / [i915#4423] / [i915#9732]) -> [SKIP][338] ([i915#1072] / [i915#9732])
[337]: https://intel-gfx-ci.01.org/tree/drm-tip/IGT_8339/shard-dg1-14/igt@kms_psr@fbc-psr2-sprite-plane-onoff.html
[338]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg1-12/igt@kms_psr@fbc-psr2-sprite-plane-onoff.html
* igt@kms_tiled_display@basic-test-pattern-with-chamelium:
- shard-dg1: [SKIP][339] ([i915#8623]) -> [SKIP][340] ([i915#4423] / [i915#8623])
[339]: https://intel-gfx-ci.01.org/tree/drm-tip/IGT_8339/shard-dg1-19/igt@kms_tiled_display@basic-test-pattern-with-chamelium.html
[340]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg1-12/igt@kms_tiled_display@basic-test-pattern-with-chamelium.html
* igt@kms_vrr@negative-basic:
- shard-dg1: [SKIP][341] ([i915#3555] / [i915#9906]) -> [SKIP][342] ([i915#3555] / [i915#4423] / [i915#9906])
[341]: https://intel-gfx-ci.01.org/tree/drm-tip/IGT_8339/shard-dg1-12/igt@kms_vrr@negative-basic.html
[342]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/shard-dg1-14/igt@kms_vrr@negative-basic.html
[i915#10307]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/10307
[i915#10433]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/10433
[i915#10434]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/10434
[i915#10647]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/10647
[i915#10656]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/10656
[i915#1072]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/1072
[i915#1099]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/1099
[i915#11151]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/11151
[i915#11520]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/11520
[i915#11681]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/11681
[i915#11703]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/11703
[i915#11832]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/11832
[i915#1187]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/1187
[i915#11920]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/11920
[i915#11943]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/11943
[i915#11965]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/11965
[i915#12061]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/12061
[i915#12169]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/12169
[i915#12193]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/12193
[i915#12247]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/12247
[i915#12313]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/12313
[i915#12316]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/12316
[i915#12339]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/12339
[i915#12343]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/12343
[i915#12454]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/12454
[i915#12455]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/12455
[i915#12543]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/12543
[i915#12549]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/12549
[i915#12712]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/12712
[i915#12713]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/12713
[i915#12714]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/12714
[i915#12755]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/12755
[i915#12797]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/12797
[i915#12805]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/12805
[i915#12910]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/12910
[i915#12917]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/12917
[i915#12964]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/12964
[i915#12967]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/12967
[i915#13028]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/13028
[i915#13046]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/13046
[i915#13049]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/13049
[i915#13193]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/13193
[i915#13304]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/13304
[i915#13665]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/13665
[i915#13688]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/13688
[i915#13707]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/13707
[i915#13717]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/13717
[i915#13723]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/13723
[i915#13734]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/13734
[i915#13748]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/13748
[i915#13749]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/13749
[i915#13783]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/13783
[i915#13784]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/13784
[i915#13786]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/13786
[i915#13798]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/13798
[i915#13820]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/13820
[i915#13958]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/13958
[i915#14073]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/14073
[i915#14098]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/14098
[i915#14118]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/14118
[i915#14121]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/14121
[i915#14123]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/14123
[i915#1769]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/1769
[i915#1825]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/1825
[i915#1839]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/1839
[i915#2437]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/2437
[i915#2527]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/2527
[i915#2587]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/2587
[i915#2672]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/2672
[i915#280]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/280
[i915#2856]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/2856
[i915#3023]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/3023
[i915#3281]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/3281
[i915#3282]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/3282
[i915#3297]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/3297
[i915#3458]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/3458
[i915#3539]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/3539
[i915#3555]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/3555
[i915#3591]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/3591
[i915#3637]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/3637
[i915#3638]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/3638
[i915#3708]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/3708
[i915#3711]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/3711
[i915#3828]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/3828
[i915#3840]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/3840
[i915#4077]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/4077
[i915#4079]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/4079
[i915#4083]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/4083
[i915#4103]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/4103
[i915#4213]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/4213
[i915#4270]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/4270
[i915#4281]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/4281
[i915#4349]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/4349
[i915#4423]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/4423
[i915#4525]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/4525
[i915#4537]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/4537
[i915#4538]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/4538
[i915#4565]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/4565
[i915#4613]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/4613
[i915#4771]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/4771
[i915#4812]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/4812
[i915#4852]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/4852
[i915#4860]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/4860
[i915#4880]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/4880
[i915#4881]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/4881
[i915#4885]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/4885
[i915#4958]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/4958
[i915#5190]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/5190
[i915#5286]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/5286
[i915#5289]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/5289
[i915#5354]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/5354
[i915#5465]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/5465
[i915#5493]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/5493
[i915#5784]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/5784
[i915#5882]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/5882
[i915#5956]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/5956
[i915#6095]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/6095
[i915#6228]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/6228
[i915#6334]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/6334
[i915#6403]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/6403
[i915#658]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/658
[i915#6590]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/6590
[i915#6806]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/6806
[i915#6953]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/6953
[i915#7118]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/7118
[i915#7173]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/7173
[i915#7387]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/7387
[i915#7582]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/7582
[i915#7697]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/7697
[i915#7828]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/7828
[i915#7975]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/7975
[i915#8228]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/8228
[i915#8399]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/8399
[i915#8411]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/8411
[i915#8428]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/8428
[i915#8555]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/8555
[i915#8562]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/8562
[i915#8623]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/8623
[i915#8708]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/8708
[i915#8709]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/8709
[i915#8806]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/8806
[i915#8808]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/8808
[i915#8809]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/8809
[i915#8812]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/8812
[i915#8813]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/8813
[i915#8814]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/8814
[i915#8821]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/8821
[i915#8823]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/8823
[i915#8826]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/8826
[i915#9053]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/9053
[i915#9100]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/9100
[i915#9311]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/9311
[i915#9412]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/9412
[i915#9423]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/9423
[i915#9424]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/9424
[i915#9457]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/9457
[i915#9519]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/9519
[i915#9683]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/9683
[i915#9685]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/9685
[i915#9688]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/9688
[i915#9732]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/9732
[i915#9808]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/9808
[i915#9906]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/9906
[i915#9917]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/9917
[i915#9934]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/9934
Build changes
-------------
* CI: CI-20190529 -> None
* IGT: IGT_8339 -> IGTPW_13046
CI-20190529: 20190529
CI_DRM_16465: 6e67a7af43567bb9f23fe156fde7efa3d214fd20 @ git://anongit.freedesktop.org/gfx-ci/linux
IGTPW_13046: 13046
IGT_8339: 8339
== Logs ==
For more details see: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13046/index.html
[-- Attachment #2: Type: text/html, Size: 119952 bytes --]
^ permalink raw reply [flat|nested] 16+ messages in thread
* ✗ Xe.CI.Full: failure for tests/xe: Add system_allocator test (rev5)
2025-04-25 18:20 [PATCH] tests/xe: Add system_allocator test Matthew Brost
` (2 preceding siblings ...)
2025-04-26 6:28 ` ✓ i915.CI.Full: " Patchwork
@ 2025-04-26 10:21 ` Patchwork
3 siblings, 0 replies; 16+ messages in thread
From: Patchwork @ 2025-04-26 10:21 UTC (permalink / raw)
To: Matthew Brost; +Cc: igt-dev
[-- Attachment #1: Type: text/plain, Size: 279607 bytes --]
== Series Details ==
Series: tests/xe: Add system_allocator test (rev5)
URL : https://patchwork.freedesktop.org/series/137545/
State : failure
== Summary ==
CI Bug Log - changes from XEIGT_8339_FULL -> XEIGTPW_13046_FULL
====================================================
Summary
-------
**FAILURE**
Serious unknown changes coming with XEIGTPW_13046_FULL absolutely need to be
verified manually.
If you think the reported changes have nothing to do with the changes
introduced in XEIGTPW_13046_FULL, please notify your bug team (I915-ci-infra@lists.freedesktop.org) to allow them
to document this new failure mode, which will reduce false positives in CI.
Participating hosts (4 -> 3)
------------------------------
Missing (1): shard-adlp
Possible new issues
-------------------
Here are the unknown changes that may have been introduced in XEIGTPW_13046_FULL:
### IGT changes ###
#### Possible regressions ####
* igt@kms_hdr@static-toggle:
- shard-dg2-set2: [PASS][1] -> [FAIL][2] +1 other test fail
[1]: https://intel-gfx-ci.01.org/tree/intel-xe/IGT_8339/shard-dg2-433/igt@kms_hdr@static-toggle.html
[2]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-dg2-434/igt@kms_hdr@static-toggle.html
* igt@kms_plane_multiple@tiling-4@pipe-c-dp-4:
- shard-dg2-set2: NOTRUN -> [FAIL][3]
[3]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-dg2-434/igt@kms_plane_multiple@tiling-4@pipe-c-dp-4.html
* igt@xe_exec_basic@multigpu-no-exec-null-rebind:
- shard-dg2-set2: [PASS][4] -> [INCOMPLETE][5]
[4]: https://intel-gfx-ci.01.org/tree/intel-xe/IGT_8339/shard-dg2-433/igt@xe_exec_basic@multigpu-no-exec-null-rebind.html
[5]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-dg2-436/igt@xe_exec_basic@multigpu-no-exec-null-rebind.html
* {igt@xe_exec_system_allocator@threads-many-large-mmap-shared-remap-dontunmap-eocheck} (NEW):
- shard-dg2-set2: NOTRUN -> [SKIP][6] +1373 other tests skip
[6]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-dg2-464/igt@xe_exec_system_allocator@threads-many-large-mmap-shared-remap-dontunmap-eocheck.html
* igt@xe_exec_system_allocator@threads-shared-vm-many-execqueues-mmap-free-huge (NEW):
- shard-lnl: NOTRUN -> [FAIL][7] +129 other tests fail
[7]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-lnl-3/igt@xe_exec_system_allocator@threads-shared-vm-many-execqueues-mmap-free-huge.html
* igt@xe_exec_system_allocator@threads-shared-vm-many-execqueues-mmap-new-huge (NEW):
- shard-bmg: NOTRUN -> [FAIL][8] +100 other tests fail
[8]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-bmg-4/igt@xe_exec_system_allocator@threads-shared-vm-many-execqueues-mmap-new-huge.html
New tests
---------
New tests have been introduced between XEIGT_8339_FULL and XEIGTPW_13046_FULL:
### New IGT tests (1536) ###
* igt@xe_exec_system_allocator@evict-malloc:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 26.77] s
* igt@xe_exec_system_allocator@evict-malloc-mix-bo:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.00] s
* igt@xe_exec_system_allocator@fault:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.07] s
* igt@xe_exec_system_allocator@fault-benchmark:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 4.33] s
* igt@xe_exec_system_allocator@fault-process-benchmark:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 6.16] s
* igt@xe_exec_system_allocator@fault-process-same-page-benchmark:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 7.54] s
* igt@xe_exec_system_allocator@fault-threads-benchmark:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 4.69] s
* igt@xe_exec_system_allocator@fault-threads-same-page-benchmark:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 3.87] s
* igt@xe_exec_system_allocator@many-execqueues-free:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.35] s
* igt@xe_exec_system_allocator@many-execqueues-free-nomemset:
- Statuses :
- Exec time: [None] s
* igt@xe_exec_system_allocator@many-execqueues-free-race:
- Statuses : 2 pass(s)
- Exec time: [0.26, 0.42] s
* igt@xe_exec_system_allocator@many-execqueues-free-race-nomemset:
- Statuses : 1 pass(s)
- Exec time: [0.28] s
* igt@xe_exec_system_allocator@many-execqueues-malloc:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.28] s
* igt@xe_exec_system_allocator@many-execqueues-malloc-bo-unmap:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.25] s
* igt@xe_exec_system_allocator@many-execqueues-malloc-bo-unmap-nomemset:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.32] s
* igt@xe_exec_system_allocator@many-execqueues-malloc-busy:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.51] s
* igt@xe_exec_system_allocator@many-execqueues-malloc-busy-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.48] s
* igt@xe_exec_system_allocator@many-execqueues-malloc-fork-read:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 14.00] s
* igt@xe_exec_system_allocator@many-execqueues-malloc-fork-read-after:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 13.81] s
* igt@xe_exec_system_allocator@many-execqueues-malloc-mlock:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.51] s
* igt@xe_exec_system_allocator@many-execqueues-malloc-mlock-nomemset:
- Statuses :
- Exec time: [None] s
* igt@xe_exec_system_allocator@many-execqueues-malloc-multi-fault:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0] s
* igt@xe_exec_system_allocator@many-execqueues-malloc-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.54] s
* igt@xe_exec_system_allocator@many-execqueues-malloc-race:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.51] s
* igt@xe_exec_system_allocator@many-execqueues-malloc-race-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.42] s
* igt@xe_exec_system_allocator@many-execqueues-mmap:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.61] s
* igt@xe_exec_system_allocator@many-execqueues-mmap-file:
- Statuses : 2 pass(s)
- Exec time: [0.22, 0.29] s
* igt@xe_exec_system_allocator@many-execqueues-mmap-file-mlock:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.29] s
* igt@xe_exec_system_allocator@many-execqueues-mmap-file-mlock-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.55] s
* igt@xe_exec_system_allocator@many-execqueues-mmap-file-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.50] s
* igt@xe_exec_system_allocator@many-execqueues-mmap-free:
- Statuses : 1 pass(s)
- Exec time: [0.95] s
* igt@xe_exec_system_allocator@many-execqueues-mmap-free-huge:
- Statuses : 2 fail(s) 1 skip(s)
- Exec time: [0.0, 0.01] s
* igt@xe_exec_system_allocator@many-execqueues-mmap-free-huge-nomemset:
- Statuses : 1 fail(s) 1 skip(s)
- Exec time: [0.0, 0.01] s
* igt@xe_exec_system_allocator@many-execqueues-mmap-free-nomemset:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.73] s
* igt@xe_exec_system_allocator@many-execqueues-mmap-free-race:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 1.91] s
* igt@xe_exec_system_allocator@many-execqueues-mmap-free-race-nomemset:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 1.34] s
* igt@xe_exec_system_allocator@many-execqueues-mmap-huge:
- Statuses : 2 fail(s) 1 skip(s)
- Exec time: [0.0, 0.01] s
* igt@xe_exec_system_allocator@many-execqueues-mmap-huge-nomemset:
- Statuses : 1 fail(s) 1 skip(s)
- Exec time: [0.0, 0.01] s
* igt@xe_exec_system_allocator@many-execqueues-mmap-mlock:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.42] s
* igt@xe_exec_system_allocator@many-execqueues-mmap-mlock-nomemset:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.58] s
* igt@xe_exec_system_allocator@many-execqueues-mmap-new:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 1.95] s
* igt@xe_exec_system_allocator@many-execqueues-mmap-new-huge:
- Statuses : 2 fail(s) 1 skip(s)
- Exec time: [0.0, 0.01] s
* igt@xe_exec_system_allocator@many-execqueues-mmap-new-huge-nomemset:
- Statuses : 2 fail(s) 1 skip(s)
- Exec time: [0.0, 0.01] s
* igt@xe_exec_system_allocator@many-execqueues-mmap-new-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 1.32] s
* igt@xe_exec_system_allocator@many-execqueues-mmap-new-race:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 1.21] s
* igt@xe_exec_system_allocator@many-execqueues-mmap-new-race-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 1.39] s
* igt@xe_exec_system_allocator@many-execqueues-mmap-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.60] s
* igt@xe_exec_system_allocator@many-execqueues-mmap-race:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.26] s
* igt@xe_exec_system_allocator@many-execqueues-mmap-race-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.52] s
* igt@xe_exec_system_allocator@many-execqueues-mmap-remap:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 1.63] s
* igt@xe_exec_system_allocator@many-execqueues-mmap-remap-dontunmap:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 1.47] s
* igt@xe_exec_system_allocator@many-execqueues-mmap-remap-dontunmap-eocheck:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 1.63] s
* igt@xe_exec_system_allocator@many-execqueues-mmap-remap-eocheck:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 1.68] s
* igt@xe_exec_system_allocator@many-execqueues-mmap-remap-ro:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 1.01] s
* igt@xe_exec_system_allocator@many-execqueues-mmap-remap-ro-dontunmap:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 1.03] s
* igt@xe_exec_system_allocator@many-execqueues-mmap-remap-ro-dontunmap-eocheck:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 1.15] s
* igt@xe_exec_system_allocator@many-execqueues-mmap-remap-ro-eocheck:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 1.04] s
* igt@xe_exec_system_allocator@many-execqueues-mmap-shared:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.35] s
* igt@xe_exec_system_allocator@many-execqueues-mmap-shared-nomemset:
- Statuses : 1 pass(s)
- Exec time: [0.33] s
* igt@xe_exec_system_allocator@many-execqueues-mmap-shared-remap:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 1.38] s
* igt@xe_exec_system_allocator@many-execqueues-mmap-shared-remap-dontunmap:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 1.72] s
* igt@xe_exec_system_allocator@many-execqueues-mmap-shared-remap-dontunmap-eocheck:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 1.72] s
* igt@xe_exec_system_allocator@many-execqueues-mmap-shared-remap-eocheck:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 1.35] s
* igt@xe_exec_system_allocator@many-execqueues-new:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.69] s
* igt@xe_exec_system_allocator@many-execqueues-new-bo-map:
- Statuses :
- Exec time: [None] s
* igt@xe_exec_system_allocator@many-execqueues-new-bo-map-nomemset:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.77] s
* igt@xe_exec_system_allocator@many-execqueues-new-busy:
- Statuses : 1 pass(s)
- Exec time: [0.51] s
* igt@xe_exec_system_allocator@many-execqueues-new-busy-nomemset:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.58] s
* igt@xe_exec_system_allocator@many-execqueues-new-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 1.42] s
* igt@xe_exec_system_allocator@many-execqueues-new-race:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.55] s
* igt@xe_exec_system_allocator@many-execqueues-new-race-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 1.28] s
* igt@xe_exec_system_allocator@many-free:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.16] s
* igt@xe_exec_system_allocator@many-free-nomemset:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.23] s
* igt@xe_exec_system_allocator@many-free-race:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.34] s
* igt@xe_exec_system_allocator@many-free-race-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.37] s
* igt@xe_exec_system_allocator@many-large-execqueues-free:
- Statuses :
- Exec time: [None] s
* igt@xe_exec_system_allocator@many-large-execqueues-free-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 3.28] s
* igt@xe_exec_system_allocator@many-large-execqueues-free-race:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 9.48] s
* igt@xe_exec_system_allocator@many-large-execqueues-free-race-nomemset:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 3.15] s
* igt@xe_exec_system_allocator@many-large-execqueues-malloc:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.39] s
* igt@xe_exec_system_allocator@many-large-execqueues-malloc-bo-unmap:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.63] s
* igt@xe_exec_system_allocator@many-large-execqueues-malloc-bo-unmap-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.55] s
* igt@xe_exec_system_allocator@many-large-execqueues-malloc-busy:
- Statuses : 2 pass(s)
- Exec time: [0.31, 0.59] s
* igt@xe_exec_system_allocator@many-large-execqueues-malloc-busy-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.68] s
* igt@xe_exec_system_allocator@many-large-execqueues-malloc-fork-read:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 13.66] s
* igt@xe_exec_system_allocator@many-large-execqueues-malloc-fork-read-after:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 14.73] s
* igt@xe_exec_system_allocator@many-large-execqueues-malloc-mlock:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.48] s
* igt@xe_exec_system_allocator@many-large-execqueues-malloc-mlock-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.31] s
* igt@xe_exec_system_allocator@many-large-execqueues-malloc-multi-fault:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.85] s
* igt@xe_exec_system_allocator@many-large-execqueues-malloc-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.47] s
* igt@xe_exec_system_allocator@many-large-execqueues-malloc-race:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.35] s
* igt@xe_exec_system_allocator@many-large-execqueues-malloc-race-nomemset:
- Statuses : 2 pass(s)
- Exec time: [0.28, 0.50] s
* igt@xe_exec_system_allocator@many-large-execqueues-mmap:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.28] s
* igt@xe_exec_system_allocator@many-large-execqueues-mmap-file:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.55] s
* igt@xe_exec_system_allocator@many-large-execqueues-mmap-file-mlock:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.50] s
* igt@xe_exec_system_allocator@many-large-execqueues-mmap-file-mlock-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.30] s
* igt@xe_exec_system_allocator@many-large-execqueues-mmap-file-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.61] s
* igt@xe_exec_system_allocator@many-large-execqueues-mmap-free:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 2.64] s
* igt@xe_exec_system_allocator@many-large-execqueues-mmap-free-huge:
- Statuses : 1 fail(s) 1 skip(s)
- Exec time: [0.0, 0.01] s
* igt@xe_exec_system_allocator@many-large-execqueues-mmap-free-huge-nomemset:
- Statuses : 2 fail(s) 1 skip(s)
- Exec time: [0.0, 0.01] s
* igt@xe_exec_system_allocator@many-large-execqueues-mmap-free-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 1.76] s
* igt@xe_exec_system_allocator@many-large-execqueues-mmap-free-race:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 2.78] s
* igt@xe_exec_system_allocator@many-large-execqueues-mmap-free-race-nomemset:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 1.27] s
* igt@xe_exec_system_allocator@many-large-execqueues-mmap-huge:
- Statuses : 2 fail(s) 1 skip(s)
- Exec time: [0.0, 0.02] s
* igt@xe_exec_system_allocator@many-large-execqueues-mmap-huge-nomemset:
- Statuses : 2 fail(s) 1 skip(s)
- Exec time: [0.0, 0.01] s
* igt@xe_exec_system_allocator@many-large-execqueues-mmap-mlock:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.62] s
* igt@xe_exec_system_allocator@many-large-execqueues-mmap-mlock-nomemset:
- Statuses :
- Exec time: [None] s
* igt@xe_exec_system_allocator@many-large-execqueues-mmap-new:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 2.44] s
* igt@xe_exec_system_allocator@many-large-execqueues-mmap-new-huge:
- Statuses : 1 fail(s) 1 skip(s)
- Exec time: [0.0, 0.01] s
* igt@xe_exec_system_allocator@many-large-execqueues-mmap-new-huge-nomemset:
- Statuses : 1 fail(s) 1 skip(s)
- Exec time: [0.0, 0.01] s
* igt@xe_exec_system_allocator@many-large-execqueues-mmap-new-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 2.42] s
* igt@xe_exec_system_allocator@many-large-execqueues-mmap-new-race:
- Statuses : 1 pass(s)
- Exec time: [2.64] s
* igt@xe_exec_system_allocator@many-large-execqueues-mmap-new-race-nomemset:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 2.15] s
* igt@xe_exec_system_allocator@many-large-execqueues-mmap-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.28] s
* igt@xe_exec_system_allocator@many-large-execqueues-mmap-race:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.59] s
* igt@xe_exec_system_allocator@many-large-execqueues-mmap-race-nomemset:
- Statuses : 1 pass(s)
- Exec time: [0.33] s
* igt@xe_exec_system_allocator@many-large-execqueues-mmap-remap:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 6.73] s
* igt@xe_exec_system_allocator@many-large-execqueues-mmap-remap-dontunmap:
- Statuses : 2 pass(s)
- Exec time: [1.93, 7.84] s
* igt@xe_exec_system_allocator@many-large-execqueues-mmap-remap-dontunmap-eocheck:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 1.84] s
* igt@xe_exec_system_allocator@many-large-execqueues-mmap-remap-eocheck:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 5.76] s
* igt@xe_exec_system_allocator@many-large-execqueues-mmap-remap-ro:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 9.27] s
* igt@xe_exec_system_allocator@many-large-execqueues-mmap-remap-ro-dontunmap:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 9.98] s
* igt@xe_exec_system_allocator@many-large-execqueues-mmap-remap-ro-dontunmap-eocheck:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 4.85] s
* igt@xe_exec_system_allocator@many-large-execqueues-mmap-remap-ro-eocheck:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 5.79] s
* igt@xe_exec_system_allocator@many-large-execqueues-mmap-shared:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.30] s
* igt@xe_exec_system_allocator@many-large-execqueues-mmap-shared-nomemset:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.54] s
* igt@xe_exec_system_allocator@many-large-execqueues-mmap-shared-remap:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 3.02] s
* igt@xe_exec_system_allocator@many-large-execqueues-mmap-shared-remap-dontunmap:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 3.43] s
* igt@xe_exec_system_allocator@many-large-execqueues-mmap-shared-remap-dontunmap-eocheck:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 2.02] s
* igt@xe_exec_system_allocator@many-large-execqueues-mmap-shared-remap-eocheck:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 2.13] s
* igt@xe_exec_system_allocator@many-large-execqueues-new:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 5.84] s
* igt@xe_exec_system_allocator@many-large-execqueues-new-bo-map:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 4.31] s
* igt@xe_exec_system_allocator@many-large-execqueues-new-bo-map-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 2.67] s
* igt@xe_exec_system_allocator@many-large-execqueues-new-busy:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 2.40] s
* igt@xe_exec_system_allocator@many-large-execqueues-new-busy-nomemset:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 3.81] s
* igt@xe_exec_system_allocator@many-large-execqueues-new-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 4.09] s
* igt@xe_exec_system_allocator@many-large-execqueues-new-race:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 5.68] s
* igt@xe_exec_system_allocator@many-large-execqueues-new-race-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 2.64] s
* igt@xe_exec_system_allocator@many-large-free:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 2.18] s
* igt@xe_exec_system_allocator@many-large-free-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 4.45] s
* igt@xe_exec_system_allocator@many-large-free-race:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 11.40] s
* igt@xe_exec_system_allocator@many-large-free-race-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 3.23] s
* igt@xe_exec_system_allocator@many-large-malloc:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.42] s
* igt@xe_exec_system_allocator@many-large-malloc-bo-unmap:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.22] s
* igt@xe_exec_system_allocator@many-large-malloc-bo-unmap-nomemset:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.20] s
* igt@xe_exec_system_allocator@many-large-malloc-busy:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.40] s
* igt@xe_exec_system_allocator@many-large-malloc-busy-nomemset:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.20] s
* igt@xe_exec_system_allocator@many-large-malloc-fork-read:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 15.31] s
* igt@xe_exec_system_allocator@many-large-malloc-fork-read-after:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 14.80] s
* igt@xe_exec_system_allocator@many-large-malloc-mlock:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.29] s
* igt@xe_exec_system_allocator@many-large-malloc-mlock-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.43] s
* igt@xe_exec_system_allocator@many-large-malloc-multi-fault:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.30] s
* igt@xe_exec_system_allocator@many-large-malloc-nomemset:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.18] s
* igt@xe_exec_system_allocator@many-large-malloc-race:
- Statuses : 1 pass(s)
- Exec time: [0.20] s
* igt@xe_exec_system_allocator@many-large-malloc-race-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.36] s
* igt@xe_exec_system_allocator@many-large-mmap:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.40] s
* igt@xe_exec_system_allocator@many-large-mmap-file:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.33] s
* igt@xe_exec_system_allocator@many-large-mmap-file-mlock:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.21] s
* igt@xe_exec_system_allocator@many-large-mmap-file-mlock-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.38] s
* igt@xe_exec_system_allocator@many-large-mmap-file-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.25] s
* igt@xe_exec_system_allocator@many-large-mmap-free:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 2.62] s
* igt@xe_exec_system_allocator@many-large-mmap-free-huge:
- Statuses : 2 fail(s) 1 skip(s)
- Exec time: [0.0, 0.01] s
* igt@xe_exec_system_allocator@many-large-mmap-free-huge-nomemset:
- Statuses :
- Exec time: [None] s
* igt@xe_exec_system_allocator@many-large-mmap-free-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 1.52] s
* igt@xe_exec_system_allocator@many-large-mmap-free-race:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 2.05] s
* igt@xe_exec_system_allocator@many-large-mmap-free-race-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 1.32] s
* igt@xe_exec_system_allocator@many-large-mmap-huge:
- Statuses : 2 fail(s) 1 skip(s)
- Exec time: [0.0, 0.01] s
* igt@xe_exec_system_allocator@many-large-mmap-huge-nomemset:
- Statuses : 2 fail(s)
- Exec time: [0.01] s
* igt@xe_exec_system_allocator@many-large-mmap-mlock:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.21] s
* igt@xe_exec_system_allocator@many-large-mmap-mlock-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.40] s
* igt@xe_exec_system_allocator@many-large-mmap-new:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 2.70] s
* igt@xe_exec_system_allocator@many-large-mmap-new-huge:
- Statuses : 2 fail(s) 1 skip(s)
- Exec time: [0.0, 0.01] s
* igt@xe_exec_system_allocator@many-large-mmap-new-huge-nomemset:
- Statuses : 2 fail(s) 1 skip(s)
- Exec time: [0.0, 0.01] s
* igt@xe_exec_system_allocator@many-large-mmap-new-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 2.47] s
* igt@xe_exec_system_allocator@many-large-mmap-new-race:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 2.63] s
* igt@xe_exec_system_allocator@many-large-mmap-new-race-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 2.33] s
* igt@xe_exec_system_allocator@many-large-mmap-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.15] s
* igt@xe_exec_system_allocator@many-large-mmap-race:
- Statuses :
- Exec time: [None] s
* igt@xe_exec_system_allocator@many-large-mmap-race-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.37] s
* igt@xe_exec_system_allocator@many-large-mmap-remap:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 4.95] s
* igt@xe_exec_system_allocator@many-large-mmap-remap-dontunmap:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 2.05] s
* igt@xe_exec_system_allocator@many-large-mmap-remap-dontunmap-eocheck:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 5.49] s
* igt@xe_exec_system_allocator@many-large-mmap-remap-eocheck:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 5.36] s
* igt@xe_exec_system_allocator@many-large-mmap-remap-ro:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 6.05] s
* igt@xe_exec_system_allocator@many-large-mmap-remap-ro-dontunmap:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 7.92] s
* igt@xe_exec_system_allocator@many-large-mmap-remap-ro-dontunmap-eocheck:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 1.88] s
* igt@xe_exec_system_allocator@many-large-mmap-remap-ro-eocheck:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 2.00] s
* igt@xe_exec_system_allocator@many-large-mmap-shared:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.26] s
* igt@xe_exec_system_allocator@many-large-mmap-shared-nomemset:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.41] s
* igt@xe_exec_system_allocator@many-large-mmap-shared-remap:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 1.63] s
* igt@xe_exec_system_allocator@many-large-mmap-shared-remap-dontunmap:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 2.81] s
* igt@xe_exec_system_allocator@many-large-mmap-shared-remap-dontunmap-eocheck:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 2.31] s
* igt@xe_exec_system_allocator@many-large-mmap-shared-remap-eocheck:
- Statuses : 1 pass(s)
- Exec time: [1.88] s
* igt@xe_exec_system_allocator@many-large-new:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 2.11] s
* igt@xe_exec_system_allocator@many-large-new-bo-map:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 3.42] s
* igt@xe_exec_system_allocator@many-large-new-bo-map-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 2.29] s
* igt@xe_exec_system_allocator@many-large-new-busy:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 5.85] s
* igt@xe_exec_system_allocator@many-large-new-busy-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 4.03] s
* igt@xe_exec_system_allocator@many-large-new-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 2.97] s
* igt@xe_exec_system_allocator@many-large-new-race:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 2.19] s
* igt@xe_exec_system_allocator@many-large-new-race-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 2.69] s
* igt@xe_exec_system_allocator@many-malloc:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.18] s
* igt@xe_exec_system_allocator@many-malloc-bo-unmap:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.31] s
* igt@xe_exec_system_allocator@many-malloc-bo-unmap-nomemset:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.27] s
* igt@xe_exec_system_allocator@many-malloc-busy:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.17] s
* igt@xe_exec_system_allocator@many-malloc-busy-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.23] s
* igt@xe_exec_system_allocator@many-malloc-fork-read:
- Statuses : 2 pass(s)
- Exec time: [6.72, 12.62] s
* igt@xe_exec_system_allocator@many-malloc-fork-read-after:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 13.10] s
* igt@xe_exec_system_allocator@many-malloc-mlock:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.34] s
* igt@xe_exec_system_allocator@many-malloc-mlock-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.37] s
* igt@xe_exec_system_allocator@many-malloc-multi-fault:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0] s
* igt@xe_exec_system_allocator@many-malloc-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.35] s
* igt@xe_exec_system_allocator@many-malloc-race:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.28] s
* igt@xe_exec_system_allocator@many-malloc-race-nomemset:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.13] s
* igt@xe_exec_system_allocator@many-mmap:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.25] s
* igt@xe_exec_system_allocator@many-mmap-file:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.21] s
* igt@xe_exec_system_allocator@many-mmap-file-mlock:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.36] s
* igt@xe_exec_system_allocator@many-mmap-file-mlock-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.21] s
* igt@xe_exec_system_allocator@many-mmap-file-nomemset:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.18] s
* igt@xe_exec_system_allocator@many-mmap-free:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 1.41] s
* igt@xe_exec_system_allocator@many-mmap-free-huge:
- Statuses : 2 fail(s) 1 skip(s)
- Exec time: [0.0, 0.01] s
* igt@xe_exec_system_allocator@many-mmap-free-huge-nomemset:
- Statuses : 2 fail(s) 1 skip(s)
- Exec time: [0.0, 0.01] s
* igt@xe_exec_system_allocator@many-mmap-free-nomemset:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.96] s
* igt@xe_exec_system_allocator@many-mmap-free-race:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 1.13] s
* igt@xe_exec_system_allocator@many-mmap-free-race-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 1.27] s
* igt@xe_exec_system_allocator@many-mmap-huge:
- Statuses : 2 fail(s) 1 skip(s)
- Exec time: [0.0, 0.01] s
* igt@xe_exec_system_allocator@many-mmap-huge-nomemset:
- Statuses : 2 fail(s) 1 skip(s)
- Exec time: [0.0, 0.01] s
* igt@xe_exec_system_allocator@many-mmap-mlock:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.37] s
* igt@xe_exec_system_allocator@many-mmap-mlock-nomemset:
- Statuses :
- Exec time: [None] s
* igt@xe_exec_system_allocator@many-mmap-new:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.77] s
* igt@xe_exec_system_allocator@many-mmap-new-huge:
- Statuses : 1 fail(s)
- Exec time: [0.01] s
* igt@xe_exec_system_allocator@many-mmap-new-huge-nomemset:
- Statuses : 1 fail(s) 1 skip(s)
- Exec time: [0.0, 0.01] s
* igt@xe_exec_system_allocator@many-mmap-new-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 1.39] s
* igt@xe_exec_system_allocator@many-mmap-new-race:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 1.73] s
* igt@xe_exec_system_allocator@many-mmap-new-race-nomemset:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.83] s
* igt@xe_exec_system_allocator@many-mmap-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.37] s
* igt@xe_exec_system_allocator@many-mmap-race:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.35] s
* igt@xe_exec_system_allocator@many-mmap-race-nomemset:
- Statuses : 2 pass(s)
- Exec time: [0.18, 0.23] s
* igt@xe_exec_system_allocator@many-mmap-remap:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 1.83] s
* igt@xe_exec_system_allocator@many-mmap-remap-dontunmap:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.90] s
* igt@xe_exec_system_allocator@many-mmap-remap-dontunmap-eocheck:
- Statuses :
- Exec time: [None] s
* igt@xe_exec_system_allocator@many-mmap-remap-eocheck:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 1.20] s
* igt@xe_exec_system_allocator@many-mmap-remap-ro:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 1.62] s
* igt@xe_exec_system_allocator@many-mmap-remap-ro-dontunmap:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 1.24] s
* igt@xe_exec_system_allocator@many-mmap-remap-ro-dontunmap-eocheck:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 1.31] s
* igt@xe_exec_system_allocator@many-mmap-remap-ro-eocheck:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 1.38] s
* igt@xe_exec_system_allocator@many-mmap-shared:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.38] s
* igt@xe_exec_system_allocator@many-mmap-shared-nomemset:
- Statuses : 1 pass(s)
- Exec time: [0.22] s
* igt@xe_exec_system_allocator@many-mmap-shared-remap:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 1.57] s
* igt@xe_exec_system_allocator@many-mmap-shared-remap-dontunmap:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 1.82] s
* igt@xe_exec_system_allocator@many-mmap-shared-remap-dontunmap-eocheck:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.83] s
* igt@xe_exec_system_allocator@many-mmap-shared-remap-eocheck:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 1.47] s
* igt@xe_exec_system_allocator@many-new:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 1.23] s
* igt@xe_exec_system_allocator@many-new-bo-map:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.80] s
* igt@xe_exec_system_allocator@many-new-bo-map-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 1.15] s
* igt@xe_exec_system_allocator@many-new-busy:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 1.35] s
* igt@xe_exec_system_allocator@many-new-busy-nomemset:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.34] s
* igt@xe_exec_system_allocator@many-new-nomemset:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.55] s
* igt@xe_exec_system_allocator@many-new-race:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.65] s
* igt@xe_exec_system_allocator@many-new-race-nomemset:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.40] s
* igt@xe_exec_system_allocator@many-stride-free:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 4.88] s
* igt@xe_exec_system_allocator@many-stride-free-nomemset:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 1.64] s
* igt@xe_exec_system_allocator@many-stride-free-race:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 6.36] s
* igt@xe_exec_system_allocator@many-stride-free-race-nomemset:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 2.10] s
* igt@xe_exec_system_allocator@many-stride-malloc:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.70] s
* igt@xe_exec_system_allocator@many-stride-malloc-bo-unmap:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 1.00] s
* igt@xe_exec_system_allocator@many-stride-malloc-bo-unmap-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.65] s
* igt@xe_exec_system_allocator@many-stride-malloc-busy:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 1.35] s
* igt@xe_exec_system_allocator@many-stride-malloc-busy-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.38] s
* igt@xe_exec_system_allocator@many-stride-malloc-fork-read:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 12.68] s
* igt@xe_exec_system_allocator@many-stride-malloc-fork-read-after:
- Statuses : 2 pass(s)
- Exec time: [9.56, 15.04] s
* igt@xe_exec_system_allocator@many-stride-malloc-mlock:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 1.29] s
* igt@xe_exec_system_allocator@many-stride-malloc-mlock-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.72] s
* igt@xe_exec_system_allocator@many-stride-malloc-multi-fault:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0] s
* igt@xe_exec_system_allocator@many-stride-malloc-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.69] s
* igt@xe_exec_system_allocator@many-stride-malloc-race:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.50] s
* igt@xe_exec_system_allocator@many-stride-malloc-race-nomemset:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.26] s
* igt@xe_exec_system_allocator@many-stride-mmap:
- Statuses : 1 pass(s)
- Exec time: [0.35] s
* igt@xe_exec_system_allocator@many-stride-mmap-file:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.32] s
* igt@xe_exec_system_allocator@many-stride-mmap-file-mlock:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.42] s
* igt@xe_exec_system_allocator@many-stride-mmap-file-mlock-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.38] s
* igt@xe_exec_system_allocator@many-stride-mmap-file-nomemset:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.45] s
* igt@xe_exec_system_allocator@many-stride-mmap-free:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 5.26] s
* igt@xe_exec_system_allocator@many-stride-mmap-free-huge:
- Statuses : 2 fail(s) 1 skip(s)
- Exec time: [0.0, 0.01] s
* igt@xe_exec_system_allocator@many-stride-mmap-free-huge-nomemset:
- Statuses :
- Exec time: [None] s
* igt@xe_exec_system_allocator@many-stride-mmap-free-nomemset:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 2.33] s
* igt@xe_exec_system_allocator@many-stride-mmap-free-race:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 5.84] s
* igt@xe_exec_system_allocator@many-stride-mmap-free-race-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 3.94] s
* igt@xe_exec_system_allocator@many-stride-mmap-huge:
- Statuses : 1 fail(s) 1 skip(s)
- Exec time: [0.0, 0.01] s
* igt@xe_exec_system_allocator@many-stride-mmap-huge-nomemset:
- Statuses : 2 fail(s) 1 skip(s)
- Exec time: [0.0, 0.02] s
* igt@xe_exec_system_allocator@many-stride-mmap-mlock:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.81] s
* igt@xe_exec_system_allocator@many-stride-mmap-mlock-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 1.25] s
* igt@xe_exec_system_allocator@many-stride-mmap-new:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 4.86] s
* igt@xe_exec_system_allocator@many-stride-mmap-new-huge:
- Statuses : 1 fail(s) 1 skip(s)
- Exec time: [0.0, 0.01] s
* igt@xe_exec_system_allocator@many-stride-mmap-new-huge-nomemset:
- Statuses : 2 fail(s) 1 skip(s)
- Exec time: [0.0, 0.01] s
* igt@xe_exec_system_allocator@many-stride-mmap-new-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 1.61] s
* igt@xe_exec_system_allocator@many-stride-mmap-new-race:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 5.81] s
* igt@xe_exec_system_allocator@many-stride-mmap-new-race-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 1.49] s
* igt@xe_exec_system_allocator@many-stride-mmap-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.58] s
* igt@xe_exec_system_allocator@many-stride-mmap-race:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.54] s
* igt@xe_exec_system_allocator@many-stride-mmap-race-nomemset:
- Statuses : 2 pass(s)
- Exec time: [0.34, 0.37] s
* igt@xe_exec_system_allocator@many-stride-mmap-remap:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 3.91] s
* igt@xe_exec_system_allocator@many-stride-mmap-remap-dontunmap:
- Statuses :
- Exec time: [None] s
* igt@xe_exec_system_allocator@many-stride-mmap-remap-dontunmap-eocheck:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 2.96] s
* igt@xe_exec_system_allocator@many-stride-mmap-remap-eocheck:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 3.75] s
* igt@xe_exec_system_allocator@many-stride-mmap-remap-ro:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 2.56] s
* igt@xe_exec_system_allocator@many-stride-mmap-remap-ro-dontunmap:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 5.20] s
* igt@xe_exec_system_allocator@many-stride-mmap-remap-ro-dontunmap-eocheck:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 3.16] s
* igt@xe_exec_system_allocator@many-stride-mmap-remap-ro-eocheck:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 1.38] s
* igt@xe_exec_system_allocator@many-stride-mmap-shared:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.43] s
* igt@xe_exec_system_allocator@many-stride-mmap-shared-nomemset:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.41] s
* igt@xe_exec_system_allocator@many-stride-mmap-shared-remap:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 2.06] s
* igt@xe_exec_system_allocator@many-stride-mmap-shared-remap-dontunmap:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 1.61] s
* igt@xe_exec_system_allocator@many-stride-mmap-shared-remap-dontunmap-eocheck:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 2.49] s
* igt@xe_exec_system_allocator@many-stride-mmap-shared-remap-eocheck:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 1.65] s
* igt@xe_exec_system_allocator@many-stride-new:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 6.16] s
* igt@xe_exec_system_allocator@many-stride-new-bo-map:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 3.35] s
* igt@xe_exec_system_allocator@many-stride-new-bo-map-nomemset:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 1.53] s
* igt@xe_exec_system_allocator@many-stride-new-busy:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 6.18] s
* igt@xe_exec_system_allocator@many-stride-new-busy-nomemset:
- Statuses : 2 pass(s)
- Exec time: [1.40, 2.83] s
* igt@xe_exec_system_allocator@many-stride-new-nomemset:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 2.43] s
* igt@xe_exec_system_allocator@many-stride-new-race:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 3.82] s
* igt@xe_exec_system_allocator@many-stride-new-race-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 2.22] s
* igt@xe_exec_system_allocator@once-free:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.03] s
* igt@xe_exec_system_allocator@once-free-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.04] s
* igt@xe_exec_system_allocator@once-free-race:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.03] s
* igt@xe_exec_system_allocator@once-free-race-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.03] s
* igt@xe_exec_system_allocator@once-large-free:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.10] s
* igt@xe_exec_system_allocator@once-large-free-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.03] s
* igt@xe_exec_system_allocator@once-large-free-race:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.03] s
* igt@xe_exec_system_allocator@once-large-free-race-nomemset:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.04] s
* igt@xe_exec_system_allocator@once-large-malloc:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.06] s
* igt@xe_exec_system_allocator@once-large-malloc-bo-unmap:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.18] s
* igt@xe_exec_system_allocator@once-large-malloc-bo-unmap-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.06] s
* igt@xe_exec_system_allocator@once-large-malloc-busy:
- Statuses :
- Exec time: [None] s
* igt@xe_exec_system_allocator@once-large-malloc-busy-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.10] s
* igt@xe_exec_system_allocator@once-large-malloc-fork-read:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.16] s
* igt@xe_exec_system_allocator@once-large-malloc-fork-read-after:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.18] s
* igt@xe_exec_system_allocator@once-large-malloc-mlock:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.04] s
* igt@xe_exec_system_allocator@once-large-malloc-mlock-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.07] s
* igt@xe_exec_system_allocator@once-large-malloc-multi-fault:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.09] s
* igt@xe_exec_system_allocator@once-large-malloc-nomemset:
- Statuses :
- Exec time: [None] s
* igt@xe_exec_system_allocator@once-large-malloc-race:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.10] s
* igt@xe_exec_system_allocator@once-large-malloc-race-nomemset:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.08] s
* igt@xe_exec_system_allocator@once-large-mmap:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.06] s
* igt@xe_exec_system_allocator@once-large-mmap-file:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.06] s
* igt@xe_exec_system_allocator@once-large-mmap-file-mlock:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.05] s
* igt@xe_exec_system_allocator@once-large-mmap-file-mlock-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.06] s
* igt@xe_exec_system_allocator@once-large-mmap-file-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.05] s
* igt@xe_exec_system_allocator@once-large-mmap-free:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.05] s
* igt@xe_exec_system_allocator@once-large-mmap-free-huge:
- Statuses : 1 fail(s)
- Exec time: [0.01] s
* igt@xe_exec_system_allocator@once-large-mmap-free-huge-nomemset:
- Statuses : 2 fail(s) 1 skip(s)
- Exec time: [0.0, 0.01] s
* igt@xe_exec_system_allocator@once-large-mmap-free-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.07] s
* igt@xe_exec_system_allocator@once-large-mmap-free-race:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.05] s
* igt@xe_exec_system_allocator@once-large-mmap-free-race-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.05] s
* igt@xe_exec_system_allocator@once-large-mmap-huge:
- Statuses : 2 fail(s) 1 skip(s)
- Exec time: [0.0, 0.01] s
* igt@xe_exec_system_allocator@once-large-mmap-huge-nomemset:
- Statuses : 1 fail(s) 1 skip(s)
- Exec time: [0.0, 0.01] s
* igt@xe_exec_system_allocator@once-large-mmap-mlock:
- Statuses : 2 pass(s)
- Exec time: [0.05, 0.07] s
* igt@xe_exec_system_allocator@once-large-mmap-mlock-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.11] s
* igt@xe_exec_system_allocator@once-large-mmap-new:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.09] s
* igt@xe_exec_system_allocator@once-large-mmap-new-huge:
- Statuses : 2 fail(s) 1 skip(s)
- Exec time: [0.0, 0.01] s
* igt@xe_exec_system_allocator@once-large-mmap-new-huge-nomemset:
- Statuses : 2 fail(s) 1 skip(s)
- Exec time: [0.0, 0.01] s
* igt@xe_exec_system_allocator@once-large-mmap-new-nomemset:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.04] s
* igt@xe_exec_system_allocator@once-large-mmap-new-race:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.15] s
* igt@xe_exec_system_allocator@once-large-mmap-new-race-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.07] s
* igt@xe_exec_system_allocator@once-large-mmap-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.06] s
* igt@xe_exec_system_allocator@once-large-mmap-race:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.13] s
* igt@xe_exec_system_allocator@once-large-mmap-race-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.06] s
* igt@xe_exec_system_allocator@once-large-mmap-remap:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.10] s
* igt@xe_exec_system_allocator@once-large-mmap-remap-dontunmap:
- Statuses : 1 pass(s)
- Exec time: [0.05] s
* igt@xe_exec_system_allocator@once-large-mmap-remap-dontunmap-eocheck:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0] s
* igt@xe_exec_system_allocator@once-large-mmap-remap-eocheck:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0] s
* igt@xe_exec_system_allocator@once-large-mmap-remap-ro:
- Statuses : 2 pass(s)
- Exec time: [0.04, 0.06] s
* igt@xe_exec_system_allocator@once-large-mmap-remap-ro-dontunmap:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.07] s
* igt@xe_exec_system_allocator@once-large-mmap-remap-ro-dontunmap-eocheck:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0] s
* igt@xe_exec_system_allocator@once-large-mmap-remap-ro-eocheck:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0] s
* igt@xe_exec_system_allocator@once-large-mmap-shared:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.06] s
* igt@xe_exec_system_allocator@once-large-mmap-shared-nomemset:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.04] s
* igt@xe_exec_system_allocator@once-large-mmap-shared-remap:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.05] s
* igt@xe_exec_system_allocator@once-large-mmap-shared-remap-dontunmap:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.05] s
* igt@xe_exec_system_allocator@once-large-mmap-shared-remap-dontunmap-eocheck:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0] s
* igt@xe_exec_system_allocator@once-large-mmap-shared-remap-eocheck:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0] s
* igt@xe_exec_system_allocator@once-large-new:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.06] s
* igt@xe_exec_system_allocator@once-large-new-bo-map:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.11] s
* igt@xe_exec_system_allocator@once-large-new-bo-map-nomemset:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.07] s
* igt@xe_exec_system_allocator@once-large-new-busy:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.07] s
* igt@xe_exec_system_allocator@once-large-new-busy-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.08] s
* igt@xe_exec_system_allocator@once-large-new-nomemset:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.05] s
* igt@xe_exec_system_allocator@once-large-new-race:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.12] s
* igt@xe_exec_system_allocator@once-large-new-race-nomemset:
- Statuses :
- Exec time: [None] s
* igt@xe_exec_system_allocator@once-malloc:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.02] s
* igt@xe_exec_system_allocator@once-malloc-bo-unmap:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.03] s
* igt@xe_exec_system_allocator@once-malloc-bo-unmap-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.03] s
* igt@xe_exec_system_allocator@once-malloc-busy:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.03] s
* igt@xe_exec_system_allocator@once-malloc-busy-nomemset:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.02] s
* igt@xe_exec_system_allocator@once-malloc-fork-read:
- Statuses :
- Exec time: [None] s
* igt@xe_exec_system_allocator@once-malloc-fork-read-after:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.17] s
* igt@xe_exec_system_allocator@once-malloc-mlock:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.04] s
* igt@xe_exec_system_allocator@once-malloc-mlock-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.03] s
* igt@xe_exec_system_allocator@once-malloc-multi-fault:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0] s
* igt@xe_exec_system_allocator@once-malloc-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.03] s
* igt@xe_exec_system_allocator@once-malloc-race:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.01] s
* igt@xe_exec_system_allocator@once-malloc-race-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.02] s
* igt@xe_exec_system_allocator@once-mmap:
- Statuses : 2 pass(s)
- Exec time: [0.03] s
* igt@xe_exec_system_allocator@once-mmap-file:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.03] s
* igt@xe_exec_system_allocator@once-mmap-file-mlock:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.03] s
* igt@xe_exec_system_allocator@once-mmap-file-mlock-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.02] s
* igt@xe_exec_system_allocator@once-mmap-file-nomemset:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.04] s
* igt@xe_exec_system_allocator@once-mmap-free:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.02] s
* igt@xe_exec_system_allocator@once-mmap-free-huge:
- Statuses : 1 fail(s)
- Exec time: [0.01] s
* igt@xe_exec_system_allocator@once-mmap-free-huge-nomemset:
- Statuses : 2 fail(s) 1 skip(s)
- Exec time: [0.0, 0.01] s
* igt@xe_exec_system_allocator@once-mmap-free-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.02] s
* igt@xe_exec_system_allocator@once-mmap-free-race:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.02] s
* igt@xe_exec_system_allocator@once-mmap-free-race-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.04] s
* igt@xe_exec_system_allocator@once-mmap-huge:
- Statuses : 1 fail(s) 1 skip(s)
- Exec time: [0.0, 0.01] s
* igt@xe_exec_system_allocator@once-mmap-huge-nomemset:
- Statuses : 2 fail(s) 1 skip(s)
- Exec time: [0.0, 0.01] s
* igt@xe_exec_system_allocator@once-mmap-mlock:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.03] s
* igt@xe_exec_system_allocator@once-mmap-mlock-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.05] s
* igt@xe_exec_system_allocator@once-mmap-new:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.02] s
* igt@xe_exec_system_allocator@once-mmap-new-huge:
- Statuses : 2 fail(s) 1 skip(s)
- Exec time: [0.0, 0.01] s
* igt@xe_exec_system_allocator@once-mmap-new-huge-nomemset:
- Statuses :
- Exec time: [None] s
* igt@xe_exec_system_allocator@once-mmap-new-nomemset:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.04] s
* igt@xe_exec_system_allocator@once-mmap-new-race:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.03] s
* igt@xe_exec_system_allocator@once-mmap-new-race-nomemset:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.02] s
* igt@xe_exec_system_allocator@once-mmap-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.03] s
* igt@xe_exec_system_allocator@once-mmap-race:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.01] s
* igt@xe_exec_system_allocator@once-mmap-race-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.03] s
* igt@xe_exec_system_allocator@once-mmap-remap:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.02] s
* igt@xe_exec_system_allocator@once-mmap-remap-dontunmap:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.06] s
* igt@xe_exec_system_allocator@once-mmap-remap-dontunmap-eocheck:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0] s
* igt@xe_exec_system_allocator@once-mmap-remap-eocheck:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0] s
* igt@xe_exec_system_allocator@once-mmap-remap-ro:
- Statuses : 2 pass(s)
- Exec time: [0.02, 0.03] s
* igt@xe_exec_system_allocator@once-mmap-remap-ro-dontunmap:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.03] s
* igt@xe_exec_system_allocator@once-mmap-remap-ro-dontunmap-eocheck:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0] s
* igt@xe_exec_system_allocator@once-mmap-remap-ro-eocheck:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0] s
* igt@xe_exec_system_allocator@once-mmap-shared:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.03] s
* igt@xe_exec_system_allocator@once-mmap-shared-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.04] s
* igt@xe_exec_system_allocator@once-mmap-shared-remap:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.03] s
* igt@xe_exec_system_allocator@once-mmap-shared-remap-dontunmap:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.04] s
* igt@xe_exec_system_allocator@once-mmap-shared-remap-dontunmap-eocheck:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0] s
* igt@xe_exec_system_allocator@once-mmap-shared-remap-eocheck:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0] s
* igt@xe_exec_system_allocator@once-new:
- Statuses : 1 pass(s)
- Exec time: [0.01] s
* igt@xe_exec_system_allocator@once-new-bo-map:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.03] s
* igt@xe_exec_system_allocator@once-new-bo-map-nomemset:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.03] s
* igt@xe_exec_system_allocator@once-new-busy:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.03] s
* igt@xe_exec_system_allocator@once-new-busy-nomemset:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.03] s
* igt@xe_exec_system_allocator@once-new-nomemset:
- Statuses : 2 pass(s)
- Exec time: [0.02] s
* igt@xe_exec_system_allocator@once-new-race:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.01] s
* igt@xe_exec_system_allocator@once-new-race-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.02] s
* igt@xe_exec_system_allocator@partial-middle-munmap-cpu-fault:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.09] s
* igt@xe_exec_system_allocator@partial-middle-munmap-no-cpu-fault:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.08] s
* igt@xe_exec_system_allocator@partial-middle-remap-cpu-fault:
- Statuses :
- Exec time: [None] s
* igt@xe_exec_system_allocator@partial-middle-remap-no-cpu-fault:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.08] s
* igt@xe_exec_system_allocator@partial-munmap-cpu-fault:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.07] s
* igt@xe_exec_system_allocator@partial-munmap-no-cpu-fault:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.06] s
* igt@xe_exec_system_allocator@partial-remap-cpu-fault:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.12] s
* igt@xe_exec_system_allocator@partial-remap-no-cpu-fault:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.07] s
* igt@xe_exec_system_allocator@process-many-execqueues-free:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.26] s
* igt@xe_exec_system_allocator@process-many-execqueues-free-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.20] s
* igt@xe_exec_system_allocator@process-many-execqueues-free-race:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.28] s
* igt@xe_exec_system_allocator@process-many-execqueues-free-race-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.26] s
* igt@xe_exec_system_allocator@process-many-execqueues-malloc:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.15] s
* igt@xe_exec_system_allocator@process-many-execqueues-malloc-bo-unmap:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.17] s
* igt@xe_exec_system_allocator@process-many-execqueues-malloc-bo-unmap-nomemset:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.17] s
* igt@xe_exec_system_allocator@process-many-execqueues-malloc-busy:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.18] s
* igt@xe_exec_system_allocator@process-many-execqueues-malloc-busy-nomemset:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.12] s
* igt@xe_exec_system_allocator@process-many-execqueues-malloc-fork-read:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0] s
* igt@xe_exec_system_allocator@process-many-execqueues-malloc-fork-read-after:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0] s
* igt@xe_exec_system_allocator@process-many-execqueues-malloc-mlock:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.20] s
* igt@xe_exec_system_allocator@process-many-execqueues-malloc-mlock-nomemset:
- Statuses : 2 pass(s)
- Exec time: [0.14, 0.27] s
* igt@xe_exec_system_allocator@process-many-execqueues-malloc-multi-fault:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.04] s
* igt@xe_exec_system_allocator@process-many-execqueues-malloc-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.25] s
* igt@xe_exec_system_allocator@process-many-execqueues-malloc-race:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.15] s
* igt@xe_exec_system_allocator@process-many-execqueues-malloc-race-nomemset:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.15] s
* igt@xe_exec_system_allocator@process-many-execqueues-mmap:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.16] s
* igt@xe_exec_system_allocator@process-many-execqueues-mmap-file:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.20] s
* igt@xe_exec_system_allocator@process-many-execqueues-mmap-file-mlock:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.15] s
* igt@xe_exec_system_allocator@process-many-execqueues-mmap-file-mlock-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.20] s
* igt@xe_exec_system_allocator@process-many-execqueues-mmap-file-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.27] s
* igt@xe_exec_system_allocator@process-many-execqueues-mmap-free:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.29] s
* igt@xe_exec_system_allocator@process-many-execqueues-mmap-free-huge:
- Statuses : 1 fail(s) 1 skip(s)
- Exec time: [0.0, 0.05] s
* igt@xe_exec_system_allocator@process-many-execqueues-mmap-free-huge-nomemset:
- Statuses : 2 fail(s) 1 skip(s)
- Exec time: [0.0, 0.05] s
* igt@xe_exec_system_allocator@process-many-execqueues-mmap-free-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.31] s
* igt@xe_exec_system_allocator@process-many-execqueues-mmap-free-race:
- Statuses : 1 pass(s)
- Exec time: [0.32] s
* igt@xe_exec_system_allocator@process-many-execqueues-mmap-free-race-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.31] s
* igt@xe_exec_system_allocator@process-many-execqueues-mmap-huge:
- Statuses : 2 fail(s) 1 skip(s)
- Exec time: [0.0, 0.05] s
* igt@xe_exec_system_allocator@process-many-execqueues-mmap-huge-nomemset:
- Statuses :
- Exec time: [None] s
* igt@xe_exec_system_allocator@process-many-execqueues-mmap-mlock:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.20] s
* igt@xe_exec_system_allocator@process-many-execqueues-mmap-mlock-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.29] s
* igt@xe_exec_system_allocator@process-many-execqueues-mmap-new:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.41] s
* igt@xe_exec_system_allocator@process-many-execqueues-mmap-new-huge:
- Statuses :
- Exec time: [None] s
* igt@xe_exec_system_allocator@process-many-execqueues-mmap-new-huge-nomemset:
- Statuses : 2 fail(s) 1 skip(s)
- Exec time: [0.0, 0.05] s
* igt@xe_exec_system_allocator@process-many-execqueues-mmap-new-nomemset:
- Statuses :
- Exec time: [None] s
* igt@xe_exec_system_allocator@process-many-execqueues-mmap-new-race:
- Statuses : 2 pass(s)
- Exec time: [0.24, 0.31] s
* igt@xe_exec_system_allocator@process-many-execqueues-mmap-new-race-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.45] s
* igt@xe_exec_system_allocator@process-many-execqueues-mmap-nomemset:
- Statuses : 1 pass(s)
- Exec time: [0.14] s
* igt@xe_exec_system_allocator@process-many-execqueues-mmap-race:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.16] s
* igt@xe_exec_system_allocator@process-many-execqueues-mmap-race-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.31] s
* igt@xe_exec_system_allocator@process-many-execqueues-mmap-remap:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.32] s
* igt@xe_exec_system_allocator@process-many-execqueues-mmap-remap-dontunmap:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.28] s
* igt@xe_exec_system_allocator@process-many-execqueues-mmap-remap-dontunmap-eocheck:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.29] s
* igt@xe_exec_system_allocator@process-many-execqueues-mmap-remap-eocheck:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.30] s
* igt@xe_exec_system_allocator@process-many-execqueues-mmap-remap-ro:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.27] s
* igt@xe_exec_system_allocator@process-many-execqueues-mmap-remap-ro-dontunmap:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.28] s
* igt@xe_exec_system_allocator@process-many-execqueues-mmap-remap-ro-dontunmap-eocheck:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.31] s
* igt@xe_exec_system_allocator@process-many-execqueues-mmap-remap-ro-eocheck:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.30] s
* igt@xe_exec_system_allocator@process-many-execqueues-mmap-shared:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.18] s
* igt@xe_exec_system_allocator@process-many-execqueues-mmap-shared-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.18] s
* igt@xe_exec_system_allocator@process-many-execqueues-mmap-shared-remap:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.27] s
* igt@xe_exec_system_allocator@process-many-execqueues-mmap-shared-remap-dontunmap:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.31] s
* igt@xe_exec_system_allocator@process-many-execqueues-mmap-shared-remap-dontunmap-eocheck:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.31] s
* igt@xe_exec_system_allocator@process-many-execqueues-mmap-shared-remap-eocheck:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.28] s
* igt@xe_exec_system_allocator@process-many-execqueues-new:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.28] s
* igt@xe_exec_system_allocator@process-many-execqueues-new-bo-map:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.63] s
* igt@xe_exec_system_allocator@process-many-execqueues-new-bo-map-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.33] s
* igt@xe_exec_system_allocator@process-many-execqueues-new-busy:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.25] s
* igt@xe_exec_system_allocator@process-many-execqueues-new-busy-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.37] s
* igt@xe_exec_system_allocator@process-many-execqueues-new-nomemset:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.22] s
* igt@xe_exec_system_allocator@process-many-execqueues-new-race:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.26] s
* igt@xe_exec_system_allocator@process-many-execqueues-new-race-nomemset:
- Statuses : 1 pass(s)
- Exec time: [0.17] s
* igt@xe_exec_system_allocator@process-many-free:
- Statuses : 2 pass(s)
- Exec time: [0.09, 0.11] s
* igt@xe_exec_system_allocator@process-many-free-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.20] s
* igt@xe_exec_system_allocator@process-many-free-race:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.11] s
* igt@xe_exec_system_allocator@process-many-free-race-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.10] s
* igt@xe_exec_system_allocator@process-many-large-execqueues-free:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.49] s
* igt@xe_exec_system_allocator@process-many-large-execqueues-free-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.60] s
* igt@xe_exec_system_allocator@process-many-large-execqueues-free-race:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 1.41] s
* igt@xe_exec_system_allocator@process-many-large-execqueues-free-race-nomemset:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 1.15] s
* igt@xe_exec_system_allocator@process-many-large-execqueues-malloc:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.16] s
* igt@xe_exec_system_allocator@process-many-large-execqueues-malloc-bo-unmap:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.28] s
* igt@xe_exec_system_allocator@process-many-large-execqueues-malloc-bo-unmap-nomemset:
- Statuses :
- Exec time: [None] s
* igt@xe_exec_system_allocator@process-many-large-execqueues-malloc-busy:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.28] s
* igt@xe_exec_system_allocator@process-many-large-execqueues-malloc-busy-nomemset:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.13] s
* igt@xe_exec_system_allocator@process-many-large-execqueues-malloc-fork-read:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0] s
* igt@xe_exec_system_allocator@process-many-large-execqueues-malloc-fork-read-after:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0] s
* igt@xe_exec_system_allocator@process-many-large-execqueues-malloc-mlock:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.17] s
* igt@xe_exec_system_allocator@process-many-large-execqueues-malloc-mlock-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.17] s
* igt@xe_exec_system_allocator@process-many-large-execqueues-malloc-multi-fault:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.18] s
* igt@xe_exec_system_allocator@process-many-large-execqueues-malloc-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.28] s
* igt@xe_exec_system_allocator@process-many-large-execqueues-malloc-race:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.28] s
* igt@xe_exec_system_allocator@process-many-large-execqueues-malloc-race-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.26] s
* igt@xe_exec_system_allocator@process-many-large-execqueues-mmap:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.28] s
* igt@xe_exec_system_allocator@process-many-large-execqueues-mmap-file:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.14] s
* igt@xe_exec_system_allocator@process-many-large-execqueues-mmap-file-mlock:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.16] s
* igt@xe_exec_system_allocator@process-many-large-execqueues-mmap-file-mlock-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.49] s
* igt@xe_exec_system_allocator@process-many-large-execqueues-mmap-file-nomemset:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.14] s
* igt@xe_exec_system_allocator@process-many-large-execqueues-mmap-free:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.59] s
* igt@xe_exec_system_allocator@process-many-large-execqueues-mmap-free-huge:
- Statuses : 2 fail(s) 1 skip(s)
- Exec time: [0.0, 0.06] s
* igt@xe_exec_system_allocator@process-many-large-execqueues-mmap-free-huge-nomemset:
- Statuses : 2 fail(s) 1 skip(s)
- Exec time: [0.0, 0.05] s
* igt@xe_exec_system_allocator@process-many-large-execqueues-mmap-free-nomemset:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.22] s
* igt@xe_exec_system_allocator@process-many-large-execqueues-mmap-free-race:
- Statuses : 2 pass(s)
- Exec time: [0.53, 0.64] s
* igt@xe_exec_system_allocator@process-many-large-execqueues-mmap-free-race-nomemset:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.30] s
* igt@xe_exec_system_allocator@process-many-large-execqueues-mmap-huge:
- Statuses : 1 fail(s) 1 skip(s)
- Exec time: [0.0, 0.06] s
* igt@xe_exec_system_allocator@process-many-large-execqueues-mmap-huge-nomemset:
- Statuses :
- Exec time: [None] s
* igt@xe_exec_system_allocator@process-many-large-execqueues-mmap-mlock:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.19] s
* igt@xe_exec_system_allocator@process-many-large-execqueues-mmap-mlock-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.28] s
* igt@xe_exec_system_allocator@process-many-large-execqueues-mmap-new:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.67] s
* igt@xe_exec_system_allocator@process-many-large-execqueues-mmap-new-huge:
- Statuses : 2 fail(s) 1 skip(s)
- Exec time: [0.0, 0.06] s
* igt@xe_exec_system_allocator@process-many-large-execqueues-mmap-new-huge-nomemset:
- Statuses : 2 fail(s) 1 skip(s)
- Exec time: [0.0, 0.06] s
* igt@xe_exec_system_allocator@process-many-large-execqueues-mmap-new-nomemset:
- Statuses : 1 pass(s)
- Exec time: [0.85] s
* igt@xe_exec_system_allocator@process-many-large-execqueues-mmap-new-race:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.78] s
* igt@xe_exec_system_allocator@process-many-large-execqueues-mmap-new-race-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.86] s
* igt@xe_exec_system_allocator@process-many-large-execqueues-mmap-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.16] s
* igt@xe_exec_system_allocator@process-many-large-execqueues-mmap-race:
- Statuses : 1 pass(s)
- Exec time: [0.12] s
* igt@xe_exec_system_allocator@process-many-large-execqueues-mmap-race-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.27] s
* igt@xe_exec_system_allocator@process-many-large-execqueues-mmap-remap:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.29] s
* igt@xe_exec_system_allocator@process-many-large-execqueues-mmap-remap-dontunmap:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.77] s
* igt@xe_exec_system_allocator@process-many-large-execqueues-mmap-remap-dontunmap-eocheck:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.26] s
* igt@xe_exec_system_allocator@process-many-large-execqueues-mmap-remap-eocheck:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.55] s
* igt@xe_exec_system_allocator@process-many-large-execqueues-mmap-remap-ro:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.85] s
* igt@xe_exec_system_allocator@process-many-large-execqueues-mmap-remap-ro-dontunmap:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.73] s
* igt@xe_exec_system_allocator@process-many-large-execqueues-mmap-remap-ro-dontunmap-eocheck:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.26] s
* igt@xe_exec_system_allocator@process-many-large-execqueues-mmap-remap-ro-eocheck:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.59] s
* igt@xe_exec_system_allocator@process-many-large-execqueues-mmap-shared:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.15] s
* igt@xe_exec_system_allocator@process-many-large-execqueues-mmap-shared-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.16] s
* igt@xe_exec_system_allocator@process-many-large-execqueues-mmap-shared-remap:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.46] s
* igt@xe_exec_system_allocator@process-many-large-execqueues-mmap-shared-remap-dontunmap:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.29] s
* igt@xe_exec_system_allocator@process-many-large-execqueues-mmap-shared-remap-dontunmap-eocheck:
- Statuses : 2 pass(s)
- Exec time: [0.28, 0.37] s
* igt@xe_exec_system_allocator@process-many-large-execqueues-mmap-shared-remap-eocheck:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.29] s
* igt@xe_exec_system_allocator@process-many-large-execqueues-new:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 1.01] s
* igt@xe_exec_system_allocator@process-many-large-execqueues-new-bo-map:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.63] s
* igt@xe_exec_system_allocator@process-many-large-execqueues-new-bo-map-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.52] s
* igt@xe_exec_system_allocator@process-many-large-execqueues-new-busy:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 1.02] s
* igt@xe_exec_system_allocator@process-many-large-execqueues-new-busy-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.73] s
* igt@xe_exec_system_allocator@process-many-large-execqueues-new-nomemset:
- Statuses :
- Exec time: [None] s
* igt@xe_exec_system_allocator@process-many-large-execqueues-new-race:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 1.74] s
* igt@xe_exec_system_allocator@process-many-large-execqueues-new-race-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 1.41] s
* igt@xe_exec_system_allocator@process-many-large-free:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.50] s
* igt@xe_exec_system_allocator@process-many-large-free-nomemset:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.57] s
* igt@xe_exec_system_allocator@process-many-large-free-race:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 1.39] s
* igt@xe_exec_system_allocator@process-many-large-free-race-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 1.26] s
* igt@xe_exec_system_allocator@process-many-large-malloc:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.20] s
* igt@xe_exec_system_allocator@process-many-large-malloc-bo-unmap:
- Statuses : 2 pass(s)
- Exec time: [0.07, 0.12] s
* igt@xe_exec_system_allocator@process-many-large-malloc-bo-unmap-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.21] s
* igt@xe_exec_system_allocator@process-many-large-malloc-busy:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.17] s
* igt@xe_exec_system_allocator@process-many-large-malloc-busy-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.28] s
* igt@xe_exec_system_allocator@process-many-large-malloc-fork-read:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0] s
* igt@xe_exec_system_allocator@process-many-large-malloc-fork-read-after:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0] s
* igt@xe_exec_system_allocator@process-many-large-malloc-mlock:
- Statuses :
- Exec time: [None] s
* igt@xe_exec_system_allocator@process-many-large-malloc-mlock-nomemset:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.10] s
* igt@xe_exec_system_allocator@process-many-large-malloc-multi-fault:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.18] s
* igt@xe_exec_system_allocator@process-many-large-malloc-nomemset:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.09] s
* igt@xe_exec_system_allocator@process-many-large-malloc-race:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.07] s
* igt@xe_exec_system_allocator@process-many-large-malloc-race-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.17] s
* igt@xe_exec_system_allocator@process-many-large-mmap:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.10] s
* igt@xe_exec_system_allocator@process-many-large-mmap-file:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.10] s
* igt@xe_exec_system_allocator@process-many-large-mmap-file-mlock:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.12] s
* igt@xe_exec_system_allocator@process-many-large-mmap-file-mlock-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.13] s
* igt@xe_exec_system_allocator@process-many-large-mmap-file-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.10] s
* igt@xe_exec_system_allocator@process-many-large-mmap-free:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.54] s
* igt@xe_exec_system_allocator@process-many-large-mmap-free-huge:
- Statuses : 1 fail(s) 1 skip(s)
- Exec time: [0.0, 0.04] s
* igt@xe_exec_system_allocator@process-many-large-mmap-free-huge-nomemset:
- Statuses : 2 fail(s) 1 skip(s)
- Exec time: [0.0, 0.05] s
* igt@xe_exec_system_allocator@process-many-large-mmap-free-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.24] s
* igt@xe_exec_system_allocator@process-many-large-mmap-free-race:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.72] s
* igt@xe_exec_system_allocator@process-many-large-mmap-free-race-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.33] s
* igt@xe_exec_system_allocator@process-many-large-mmap-huge:
- Statuses : 2 fail(s) 1 skip(s)
- Exec time: [0.0, 0.04] s
* igt@xe_exec_system_allocator@process-many-large-mmap-huge-nomemset:
- Statuses : 2 fail(s) 1 skip(s)
- Exec time: [0.0, 0.06] s
* igt@xe_exec_system_allocator@process-many-large-mmap-mlock:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.13] s
* igt@xe_exec_system_allocator@process-many-large-mmap-mlock-nomemset:
- Statuses :
- Exec time: [None] s
* igt@xe_exec_system_allocator@process-many-large-mmap-new:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.83] s
* igt@xe_exec_system_allocator@process-many-large-mmap-new-huge:
- Statuses : 2 fail(s) 1 skip(s)
- Exec time: [0.0, 0.05] s
* igt@xe_exec_system_allocator@process-many-large-mmap-new-huge-nomemset:
- Statuses : 2 fail(s) 1 skip(s)
- Exec time: [0.0, 0.06] s
* igt@xe_exec_system_allocator@process-many-large-mmap-new-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.86] s
* igt@xe_exec_system_allocator@process-many-large-mmap-new-race:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.93] s
* igt@xe_exec_system_allocator@process-many-large-mmap-new-race-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.90] s
* igt@xe_exec_system_allocator@process-many-large-mmap-nomemset:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.11] s
* igt@xe_exec_system_allocator@process-many-large-mmap-race:
- Statuses : 1 pass(s)
- Exec time: [0.07] s
* igt@xe_exec_system_allocator@process-many-large-mmap-race-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.09] s
* igt@xe_exec_system_allocator@process-many-large-mmap-remap:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.73] s
* igt@xe_exec_system_allocator@process-many-large-mmap-remap-dontunmap:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.73] s
* igt@xe_exec_system_allocator@process-many-large-mmap-remap-dontunmap-eocheck:
- Statuses : 2 pass(s)
- Exec time: [0.26, 0.53] s
* igt@xe_exec_system_allocator@process-many-large-mmap-remap-eocheck:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.87] s
* igt@xe_exec_system_allocator@process-many-large-mmap-remap-ro:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.75] s
* igt@xe_exec_system_allocator@process-many-large-mmap-remap-ro-dontunmap:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.70] s
* igt@xe_exec_system_allocator@process-many-large-mmap-remap-ro-dontunmap-eocheck:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.26] s
* igt@xe_exec_system_allocator@process-many-large-mmap-remap-ro-eocheck:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.28] s
* igt@xe_exec_system_allocator@process-many-large-mmap-shared:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.14] s
* igt@xe_exec_system_allocator@process-many-large-mmap-shared-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.17] s
* igt@xe_exec_system_allocator@process-many-large-mmap-shared-remap:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.46] s
* igt@xe_exec_system_allocator@process-many-large-mmap-shared-remap-dontunmap:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.25] s
* igt@xe_exec_system_allocator@process-many-large-mmap-shared-remap-dontunmap-eocheck:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.26] s
* igt@xe_exec_system_allocator@process-many-large-mmap-shared-remap-eocheck:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.45] s
* igt@xe_exec_system_allocator@process-many-large-new:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.95] s
* igt@xe_exec_system_allocator@process-many-large-new-bo-map:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.74] s
* igt@xe_exec_system_allocator@process-many-large-new-bo-map-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.49] s
* igt@xe_exec_system_allocator@process-many-large-new-busy:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.66] s
* igt@xe_exec_system_allocator@process-many-large-new-busy-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.63] s
* igt@xe_exec_system_allocator@process-many-large-new-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.80] s
* igt@xe_exec_system_allocator@process-many-large-new-race:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 1.75] s
* igt@xe_exec_system_allocator@process-many-large-new-race-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.91] s
* igt@xe_exec_system_allocator@process-many-malloc:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.18] s
* igt@xe_exec_system_allocator@process-many-malloc-bo-unmap:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.14] s
* igt@xe_exec_system_allocator@process-many-malloc-bo-unmap-nomemset:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.16] s
* igt@xe_exec_system_allocator@process-many-malloc-busy:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.11] s
* igt@xe_exec_system_allocator@process-many-malloc-busy-nomemset:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.10] s
* igt@xe_exec_system_allocator@process-many-malloc-fork-read:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0] s
* igt@xe_exec_system_allocator@process-many-malloc-fork-read-after:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0] s
* igt@xe_exec_system_allocator@process-many-malloc-mlock:
- Statuses : 1 pass(s)
- Exec time: [0.07] s
* igt@xe_exec_system_allocator@process-many-malloc-mlock-nomemset:
- Statuses : 2 pass(s)
- Exec time: [0.10, 0.12] s
* igt@xe_exec_system_allocator@process-many-malloc-multi-fault:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.04] s
* igt@xe_exec_system_allocator@process-many-malloc-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.18] s
* igt@xe_exec_system_allocator@process-many-malloc-race:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.17] s
* igt@xe_exec_system_allocator@process-many-malloc-race-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.09] s
* igt@xe_exec_system_allocator@process-many-mmap:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.20] s
* igt@xe_exec_system_allocator@process-many-mmap-file:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.12] s
* igt@xe_exec_system_allocator@process-many-mmap-file-mlock:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.12] s
* igt@xe_exec_system_allocator@process-many-mmap-file-mlock-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.18] s
* igt@xe_exec_system_allocator@process-many-mmap-file-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.17] s
* igt@xe_exec_system_allocator@process-many-mmap-free:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.16] s
* igt@xe_exec_system_allocator@process-many-mmap-free-huge:
- Statuses : 1 fail(s) 1 skip(s)
- Exec time: [0.0, 0.05] s
* igt@xe_exec_system_allocator@process-many-mmap-free-huge-nomemset:
- Statuses : 2 fail(s) 1 skip(s)
- Exec time: [0.0, 0.04] s
* igt@xe_exec_system_allocator@process-many-mmap-free-nomemset:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.29] s
* igt@xe_exec_system_allocator@process-many-mmap-free-race:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.30] s
* igt@xe_exec_system_allocator@process-many-mmap-free-race-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.57] s
* igt@xe_exec_system_allocator@process-many-mmap-huge:
- Statuses : 2 fail(s) 1 skip(s)
- Exec time: [0.0, 0.05] s
* igt@xe_exec_system_allocator@process-many-mmap-huge-nomemset:
- Statuses : 1 fail(s) 1 skip(s)
- Exec time: [0.0, 0.04] s
* igt@xe_exec_system_allocator@process-many-mmap-mlock:
- Statuses : 2 pass(s)
- Exec time: [0.10, 0.19] s
* igt@xe_exec_system_allocator@process-many-mmap-mlock-nomemset:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.18] s
* igt@xe_exec_system_allocator@process-many-mmap-new:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.37] s
* igt@xe_exec_system_allocator@process-many-mmap-new-huge:
- Statuses : 2 fail(s) 1 skip(s)
- Exec time: [0.0, 0.06] s
* igt@xe_exec_system_allocator@process-many-mmap-new-huge-nomemset:
- Statuses :
- Exec time: [None] s
* igt@xe_exec_system_allocator@process-many-mmap-new-nomemset:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.23] s
* igt@xe_exec_system_allocator@process-many-mmap-new-race:
- Statuses : 1 pass(s)
- Exec time: [0.27] s
* igt@xe_exec_system_allocator@process-many-mmap-new-race-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.20] s
* igt@xe_exec_system_allocator@process-many-mmap-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.16] s
* igt@xe_exec_system_allocator@process-many-mmap-race:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.17] s
* igt@xe_exec_system_allocator@process-many-mmap-race-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.16] s
* igt@xe_exec_system_allocator@process-many-mmap-remap:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.30] s
* igt@xe_exec_system_allocator@process-many-mmap-remap-dontunmap:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.28] s
* igt@xe_exec_system_allocator@process-many-mmap-remap-dontunmap-eocheck:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.25] s
* igt@xe_exec_system_allocator@process-many-mmap-remap-eocheck:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.30] s
* igt@xe_exec_system_allocator@process-many-mmap-remap-ro:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.17] s
* igt@xe_exec_system_allocator@process-many-mmap-remap-ro-dontunmap:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.33] s
* igt@xe_exec_system_allocator@process-many-mmap-remap-ro-dontunmap-eocheck:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.56] s
* igt@xe_exec_system_allocator@process-many-mmap-remap-ro-eocheck:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.46] s
* igt@xe_exec_system_allocator@process-many-mmap-shared:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.16] s
* igt@xe_exec_system_allocator@process-many-mmap-shared-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.18] s
* igt@xe_exec_system_allocator@process-many-mmap-shared-remap:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.26] s
* igt@xe_exec_system_allocator@process-many-mmap-shared-remap-dontunmap:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.25] s
* igt@xe_exec_system_allocator@process-many-mmap-shared-remap-dontunmap-eocheck:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.26] s
* igt@xe_exec_system_allocator@process-many-mmap-shared-remap-eocheck:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.28] s
* igt@xe_exec_system_allocator@process-many-new:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.28] s
* igt@xe_exec_system_allocator@process-many-new-bo-map:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.27] s
* igt@xe_exec_system_allocator@process-many-new-bo-map-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.35] s
* igt@xe_exec_system_allocator@process-many-new-busy:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.27] s
* igt@xe_exec_system_allocator@process-many-new-busy-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.30] s
* igt@xe_exec_system_allocator@process-many-new-nomemset:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.18] s
* igt@xe_exec_system_allocator@process-many-new-race:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.15] s
* igt@xe_exec_system_allocator@process-many-new-race-nomemset:
- Statuses :
- Exec time: [None] s
* igt@xe_exec_system_allocator@process-many-stride-free:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 1.05] s
* igt@xe_exec_system_allocator@process-many-stride-free-nomemset:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.40] s
* igt@xe_exec_system_allocator@process-many-stride-free-race:
- Statuses : 1 pass(s)
- Exec time: [0.93] s
* igt@xe_exec_system_allocator@process-many-stride-free-race-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.92] s
* igt@xe_exec_system_allocator@process-many-stride-malloc:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.23] s
* igt@xe_exec_system_allocator@process-many-stride-malloc-bo-unmap:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.19] s
* igt@xe_exec_system_allocator@process-many-stride-malloc-bo-unmap-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.21] s
* igt@xe_exec_system_allocator@process-many-stride-malloc-busy:
- Statuses :
- Exec time: [None] s
* igt@xe_exec_system_allocator@process-many-stride-malloc-busy-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.22] s
* igt@xe_exec_system_allocator@process-many-stride-malloc-fork-read:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0] s
* igt@xe_exec_system_allocator@process-many-stride-malloc-fork-read-after:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0] s
* igt@xe_exec_system_allocator@process-many-stride-malloc-mlock:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.22] s
* igt@xe_exec_system_allocator@process-many-stride-malloc-mlock-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.14] s
* igt@xe_exec_system_allocator@process-many-stride-malloc-multi-fault:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.04] s
* igt@xe_exec_system_allocator@process-many-stride-malloc-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.26] s
* igt@xe_exec_system_allocator@process-many-stride-malloc-race:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.15] s
* igt@xe_exec_system_allocator@process-many-stride-malloc-race-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.22] s
* igt@xe_exec_system_allocator@process-many-stride-mmap:
- Statuses : 2 pass(s)
- Exec time: [0.09, 0.14] s
* igt@xe_exec_system_allocator@process-many-stride-mmap-file:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.17] s
* igt@xe_exec_system_allocator@process-many-stride-mmap-file-mlock:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.16] s
* igt@xe_exec_system_allocator@process-many-stride-mmap-file-mlock-nomemset:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.18] s
* igt@xe_exec_system_allocator@process-many-stride-mmap-file-nomemset:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.12] s
* igt@xe_exec_system_allocator@process-many-stride-mmap-free:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 1.17] s
* igt@xe_exec_system_allocator@process-many-stride-mmap-free-huge:
- Statuses : 2 fail(s) 1 skip(s)
- Exec time: [0.0, 0.06] s
* igt@xe_exec_system_allocator@process-many-stride-mmap-free-huge-nomemset:
- Statuses : 2 fail(s) 1 skip(s)
- Exec time: [0.0, 0.05] s
* igt@xe_exec_system_allocator@process-many-stride-mmap-free-nomemset:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.59] s
* igt@xe_exec_system_allocator@process-many-stride-mmap-free-race:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 1.19] s
* igt@xe_exec_system_allocator@process-many-stride-mmap-free-race-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.79] s
* igt@xe_exec_system_allocator@process-many-stride-mmap-huge:
- Statuses : 2 fail(s) 1 skip(s)
- Exec time: [0.0, 0.05] s
* igt@xe_exec_system_allocator@process-many-stride-mmap-huge-nomemset:
- Statuses : 1 fail(s) 1 skip(s)
- Exec time: [0.0, 0.05] s
* igt@xe_exec_system_allocator@process-many-stride-mmap-mlock:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.23] s
* igt@xe_exec_system_allocator@process-many-stride-mmap-mlock-nomemset:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.17] s
* igt@xe_exec_system_allocator@process-many-stride-mmap-new:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 1.35] s
* igt@xe_exec_system_allocator@process-many-stride-mmap-new-huge:
- Statuses : 1 fail(s) 1 skip(s)
- Exec time: [0.0, 0.04] s
* igt@xe_exec_system_allocator@process-many-stride-mmap-new-huge-nomemset:
- Statuses : 1 fail(s) 1 skip(s)
- Exec time: [0.0, 0.05] s
* igt@xe_exec_system_allocator@process-many-stride-mmap-new-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.60] s
* igt@xe_exec_system_allocator@process-many-stride-mmap-new-race:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 1.18] s
* igt@xe_exec_system_allocator@process-many-stride-mmap-new-race-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.64] s
* igt@xe_exec_system_allocator@process-many-stride-mmap-nomemset:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.12] s
* igt@xe_exec_system_allocator@process-many-stride-mmap-race:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.16] s
* igt@xe_exec_system_allocator@process-many-stride-mmap-race-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.24] s
* igt@xe_exec_system_allocator@process-many-stride-mmap-remap:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.58] s
* igt@xe_exec_system_allocator@process-many-stride-mmap-remap-dontunmap:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.88] s
* igt@xe_exec_system_allocator@process-many-stride-mmap-remap-dontunmap-eocheck:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.25] s
* igt@xe_exec_system_allocator@process-many-stride-mmap-remap-eocheck:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.64] s
* igt@xe_exec_system_allocator@process-many-stride-mmap-remap-ro:
- Statuses :
- Exec time: [None] s
* igt@xe_exec_system_allocator@process-many-stride-mmap-remap-ro-dontunmap:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.58] s
* igt@xe_exec_system_allocator@process-many-stride-mmap-remap-ro-dontunmap-eocheck:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.73] s
* igt@xe_exec_system_allocator@process-many-stride-mmap-remap-ro-eocheck:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.68] s
* igt@xe_exec_system_allocator@process-many-stride-mmap-shared:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.18] s
* igt@xe_exec_system_allocator@process-many-stride-mmap-shared-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.21] s
* igt@xe_exec_system_allocator@process-many-stride-mmap-shared-remap:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.24] s
* igt@xe_exec_system_allocator@process-many-stride-mmap-shared-remap-dontunmap:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.41] s
* igt@xe_exec_system_allocator@process-many-stride-mmap-shared-remap-dontunmap-eocheck:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.24] s
* igt@xe_exec_system_allocator@process-many-stride-mmap-shared-remap-eocheck:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.26] s
* igt@xe_exec_system_allocator@process-many-stride-new:
- Statuses : 1 pass(s)
- Exec time: [0.93] s
* igt@xe_exec_system_allocator@process-many-stride-new-bo-map:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.83] s
* igt@xe_exec_system_allocator@process-many-stride-new-bo-map-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.51] s
* igt@xe_exec_system_allocator@process-many-stride-new-busy:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 1.15] s
* igt@xe_exec_system_allocator@process-many-stride-new-busy-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.77] s
* igt@xe_exec_system_allocator@process-many-stride-new-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.69] s
* igt@xe_exec_system_allocator@process-many-stride-new-race:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.90] s
* igt@xe_exec_system_allocator@process-many-stride-new-race-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.93] s
* igt@xe_exec_system_allocator@processes-evict-malloc:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 21.39] s
* igt@xe_exec_system_allocator@processes-evict-malloc-mix-bo:
- Statuses :
- Exec time: [None] s
* igt@xe_exec_system_allocator@threads-many-execqueues-free:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.21] s
* igt@xe_exec_system_allocator@threads-many-execqueues-free-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.24] s
* igt@xe_exec_system_allocator@threads-many-execqueues-free-race:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.10] s
* igt@xe_exec_system_allocator@threads-many-execqueues-free-race-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.10] s
* igt@xe_exec_system_allocator@threads-many-execqueues-malloc:
- Statuses : 2 pass(s)
- Exec time: [0.07, 0.11] s
* igt@xe_exec_system_allocator@threads-many-execqueues-malloc-bo-unmap:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.29] s
* igt@xe_exec_system_allocator@threads-many-execqueues-malloc-bo-unmap-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.22] s
* igt@xe_exec_system_allocator@threads-many-execqueues-malloc-busy:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.19] s
* igt@xe_exec_system_allocator@threads-many-execqueues-malloc-busy-nomemset:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.11] s
* igt@xe_exec_system_allocator@threads-many-execqueues-malloc-fork-read:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0] s
* igt@xe_exec_system_allocator@threads-many-execqueues-malloc-fork-read-after:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0] s
* igt@xe_exec_system_allocator@threads-many-execqueues-malloc-mlock:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.22] s
* igt@xe_exec_system_allocator@threads-many-execqueues-malloc-mlock-nomemset:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.09] s
* igt@xe_exec_system_allocator@threads-many-execqueues-malloc-multi-fault:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.00] s
* igt@xe_exec_system_allocator@threads-many-execqueues-malloc-nomemset:
- Statuses :
- Exec time: [None] s
* igt@xe_exec_system_allocator@threads-many-execqueues-malloc-race:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.21] s
* igt@xe_exec_system_allocator@threads-many-execqueues-malloc-race-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.10] s
* igt@xe_exec_system_allocator@threads-many-execqueues-mmap:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.17] s
* igt@xe_exec_system_allocator@threads-many-execqueues-mmap-file:
- Statuses : 2 pass(s)
- Exec time: [0.0] s
* igt@xe_exec_system_allocator@threads-many-execqueues-mmap-file-mlock:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0] s
* igt@xe_exec_system_allocator@threads-many-execqueues-mmap-file-mlock-nomemset:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0] s
* igt@xe_exec_system_allocator@threads-many-execqueues-mmap-file-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0] s
* igt@xe_exec_system_allocator@threads-many-execqueues-mmap-free:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.25] s
* igt@xe_exec_system_allocator@threads-many-execqueues-mmap-free-huge:
- Statuses : 2 fail(s) 1 skip(s)
- Exec time: [0.0, 0.03] s
* igt@xe_exec_system_allocator@threads-many-execqueues-mmap-free-huge-nomemset:
- Statuses : 2 fail(s) 1 skip(s)
- Exec time: [0.0, 0.02] s
* igt@xe_exec_system_allocator@threads-many-execqueues-mmap-free-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.27] s
* igt@xe_exec_system_allocator@threads-many-execqueues-mmap-free-race:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.28] s
* igt@xe_exec_system_allocator@threads-many-execqueues-mmap-free-race-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.59] s
* igt@xe_exec_system_allocator@threads-many-execqueues-mmap-huge:
- Statuses : 1 fail(s)
- Exec time: [0.02] s
* igt@xe_exec_system_allocator@threads-many-execqueues-mmap-huge-nomemset:
- Statuses : 1 fail(s) 1 skip(s)
- Exec time: [0.0, 0.01] s
* igt@xe_exec_system_allocator@threads-many-execqueues-mmap-mlock:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.12] s
* igt@xe_exec_system_allocator@threads-many-execqueues-mmap-mlock-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.16] s
* igt@xe_exec_system_allocator@threads-many-execqueues-mmap-new:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.33] s
* igt@xe_exec_system_allocator@threads-many-execqueues-mmap-new-huge:
- Statuses : 2 fail(s) 1 skip(s)
- Exec time: [0.0, 0.02] s
* igt@xe_exec_system_allocator@threads-many-execqueues-mmap-new-huge-nomemset:
- Statuses : 1 fail(s) 1 skip(s)
- Exec time: [0.0, 0.04] s
* igt@xe_exec_system_allocator@threads-many-execqueues-mmap-new-nomemset:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.22] s
* igt@xe_exec_system_allocator@threads-many-execqueues-mmap-new-race:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.36] s
* igt@xe_exec_system_allocator@threads-many-execqueues-mmap-new-race-nomemset:
- Statuses :
- Exec time: [None] s
* igt@xe_exec_system_allocator@threads-many-execqueues-mmap-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.29] s
* igt@xe_exec_system_allocator@threads-many-execqueues-mmap-race:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.14] s
* igt@xe_exec_system_allocator@threads-many-execqueues-mmap-race-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.15] s
* igt@xe_exec_system_allocator@threads-many-execqueues-mmap-remap:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.30] s
* igt@xe_exec_system_allocator@threads-many-execqueues-mmap-remap-dontunmap:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.23] s
* igt@xe_exec_system_allocator@threads-many-execqueues-mmap-remap-dontunmap-eocheck:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.51] s
* igt@xe_exec_system_allocator@threads-many-execqueues-mmap-remap-eocheck:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.30] s
* igt@xe_exec_system_allocator@threads-many-execqueues-mmap-remap-ro:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.31] s
* igt@xe_exec_system_allocator@threads-many-execqueues-mmap-remap-ro-dontunmap:
- Statuses : 2 pass(s)
- Exec time: [0.26, 0.32] s
* igt@xe_exec_system_allocator@threads-many-execqueues-mmap-remap-ro-dontunmap-eocheck:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.29] s
* igt@xe_exec_system_allocator@threads-many-execqueues-mmap-remap-ro-eocheck:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.62] s
* igt@xe_exec_system_allocator@threads-many-execqueues-mmap-shared:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.22] s
* igt@xe_exec_system_allocator@threads-many-execqueues-mmap-shared-nomemset:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.17] s
* igt@xe_exec_system_allocator@threads-many-execqueues-mmap-shared-remap:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.28] s
* igt@xe_exec_system_allocator@threads-many-execqueues-mmap-shared-remap-dontunmap:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.29] s
* igt@xe_exec_system_allocator@threads-many-execqueues-mmap-shared-remap-dontunmap-eocheck:
- Statuses : 1 pass(s)
- Exec time: [0.23] s
* igt@xe_exec_system_allocator@threads-many-execqueues-mmap-shared-remap-eocheck:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.28] s
* igt@xe_exec_system_allocator@threads-many-execqueues-new:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.18] s
* igt@xe_exec_system_allocator@threads-many-execqueues-new-bo-map:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.28] s
* igt@xe_exec_system_allocator@threads-many-execqueues-new-bo-map-nomemset:
- Statuses :
- Exec time: [None] s
* igt@xe_exec_system_allocator@threads-many-execqueues-new-busy:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.13] s
* igt@xe_exec_system_allocator@threads-many-execqueues-new-busy-nomemset:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.18] s
* igt@xe_exec_system_allocator@threads-many-execqueues-new-nomemset:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.17] s
* igt@xe_exec_system_allocator@threads-many-execqueues-new-race:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.17] s
* igt@xe_exec_system_allocator@threads-many-execqueues-new-race-nomemset:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.13] s
* igt@xe_exec_system_allocator@threads-many-free:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.15] s
* igt@xe_exec_system_allocator@threads-many-free-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.15] s
* igt@xe_exec_system_allocator@threads-many-free-race:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.07] s
* igt@xe_exec_system_allocator@threads-many-free-race-nomemset:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.09] s
* igt@xe_exec_system_allocator@threads-many-large-execqueues-free:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.53] s
* igt@xe_exec_system_allocator@threads-many-large-execqueues-free-nomemset:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 1.03] s
* igt@xe_exec_system_allocator@threads-many-large-execqueues-free-race:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 4.09] s
* igt@xe_exec_system_allocator@threads-many-large-execqueues-free-race-nomemset:
- Statuses : 1 pass(s)
- Exec time: [1.17] s
* igt@xe_exec_system_allocator@threads-many-large-execqueues-malloc:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.14] s
* igt@xe_exec_system_allocator@threads-many-large-execqueues-malloc-bo-unmap:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.17] s
* igt@xe_exec_system_allocator@threads-many-large-execqueues-malloc-bo-unmap-nomemset:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.16] s
* igt@xe_exec_system_allocator@threads-many-large-execqueues-malloc-busy:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.23] s
* igt@xe_exec_system_allocator@threads-many-large-execqueues-malloc-busy-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.12] s
* igt@xe_exec_system_allocator@threads-many-large-execqueues-malloc-fork-read:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0] s
* igt@xe_exec_system_allocator@threads-many-large-execqueues-malloc-fork-read-after:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0] s
* igt@xe_exec_system_allocator@threads-many-large-execqueues-malloc-mlock:
- Statuses :
- Exec time: [None] s
* igt@xe_exec_system_allocator@threads-many-large-execqueues-malloc-mlock-nomemset:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.16] s
* igt@xe_exec_system_allocator@threads-many-large-execqueues-malloc-multi-fault:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.16] s
* igt@xe_exec_system_allocator@threads-many-large-execqueues-malloc-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.17] s
* igt@xe_exec_system_allocator@threads-many-large-execqueues-malloc-race:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.15] s
* igt@xe_exec_system_allocator@threads-many-large-execqueues-malloc-race-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.14] s
* igt@xe_exec_system_allocator@threads-many-large-execqueues-mmap:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.22] s
* igt@xe_exec_system_allocator@threads-many-large-execqueues-mmap-file:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0] s
* igt@xe_exec_system_allocator@threads-many-large-execqueues-mmap-file-mlock:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0] s
* igt@xe_exec_system_allocator@threads-many-large-execqueues-mmap-file-mlock-nomemset:
- Statuses : 2 pass(s)
- Exec time: [0.0] s
* igt@xe_exec_system_allocator@threads-many-large-execqueues-mmap-file-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0] s
* igt@xe_exec_system_allocator@threads-many-large-execqueues-mmap-free:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.53] s
* igt@xe_exec_system_allocator@threads-many-large-execqueues-mmap-free-huge:
- Statuses : 2 fail(s) 1 skip(s)
- Exec time: [0.0, 0.02] s
* igt@xe_exec_system_allocator@threads-many-large-execqueues-mmap-free-huge-nomemset:
- Statuses : 2 fail(s) 1 skip(s)
- Exec time: [0.0, 0.02] s
* igt@xe_exec_system_allocator@threads-many-large-execqueues-mmap-free-nomemset:
- Statuses : 1 pass(s)
- Exec time: [0.21] s
* igt@xe_exec_system_allocator@threads-many-large-execqueues-mmap-free-race:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.63] s
* igt@xe_exec_system_allocator@threads-many-large-execqueues-mmap-free-race-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.29] s
* igt@xe_exec_system_allocator@threads-many-large-execqueues-mmap-huge:
- Statuses : 2 fail(s) 1 skip(s)
- Exec time: [0.0, 0.02] s
* igt@xe_exec_system_allocator@threads-many-large-execqueues-mmap-huge-nomemset:
- Statuses : 2 fail(s) 1 skip(s)
- Exec time: [0.0, 0.02] s
* igt@xe_exec_system_allocator@threads-many-large-execqueues-mmap-mlock:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.15] s
* igt@xe_exec_system_allocator@threads-many-large-execqueues-mmap-mlock-nomemset:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.14] s
* igt@xe_exec_system_allocator@threads-many-large-execqueues-mmap-new:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 1.00] s
* igt@xe_exec_system_allocator@threads-many-large-execqueues-mmap-new-huge:
- Statuses : 2 fail(s) 1 skip(s)
- Exec time: [0.0, 0.02] s
* igt@xe_exec_system_allocator@threads-many-large-execqueues-mmap-new-huge-nomemset:
- Statuses : 2 fail(s) 1 skip(s)
- Exec time: [0.0, 0.02] s
* igt@xe_exec_system_allocator@threads-many-large-execqueues-mmap-new-nomemset:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 1.37] s
* igt@xe_exec_system_allocator@threads-many-large-execqueues-mmap-new-race:
- Statuses :
- Exec time: [None] s
* igt@xe_exec_system_allocator@threads-many-large-execqueues-mmap-new-race-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 1.41] s
* igt@xe_exec_system_allocator@threads-many-large-execqueues-mmap-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.19] s
* igt@xe_exec_system_allocator@threads-many-large-execqueues-mmap-race:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.22] s
* igt@xe_exec_system_allocator@threads-many-large-execqueues-mmap-race-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.22] s
* igt@xe_exec_system_allocator@threads-many-large-execqueues-mmap-remap:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 1.32] s
* igt@xe_exec_system_allocator@threads-many-large-execqueues-mmap-remap-dontunmap:
- Statuses : 2 pass(s)
- Exec time: [1.27, 7.59] s
* igt@xe_exec_system_allocator@threads-many-large-execqueues-mmap-remap-dontunmap-eocheck:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 1.33] s
* igt@xe_exec_system_allocator@threads-many-large-execqueues-mmap-remap-eocheck:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 1.01] s
* igt@xe_exec_system_allocator@threads-many-large-execqueues-mmap-remap-ro:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 8.49] s
* igt@xe_exec_system_allocator@threads-many-large-execqueues-mmap-remap-ro-dontunmap:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 8.95] s
* igt@xe_exec_system_allocator@threads-many-large-execqueues-mmap-remap-ro-dontunmap-eocheck:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 5.69] s
* igt@xe_exec_system_allocator@threads-many-large-execqueues-mmap-remap-ro-eocheck:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 5.37] s
* igt@xe_exec_system_allocator@threads-many-large-execqueues-mmap-shared:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.15] s
* igt@xe_exec_system_allocator@threads-many-large-execqueues-mmap-shared-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.15] s
* igt@xe_exec_system_allocator@threads-many-large-execqueues-mmap-shared-remap:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 1.10] s
* igt@xe_exec_system_allocator@threads-many-large-execqueues-mmap-shared-remap-dontunmap:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 1.41] s
* igt@xe_exec_system_allocator@threads-many-large-execqueues-mmap-shared-remap-dontunmap-eocheck:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 1.67] s
* igt@xe_exec_system_allocator@threads-many-large-execqueues-mmap-shared-remap-eocheck:
- Statuses : 1 pass(s)
- Exec time: [1.07] s
* igt@xe_exec_system_allocator@threads-many-large-execqueues-new:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 2.29] s
* igt@xe_exec_system_allocator@threads-many-large-execqueues-new-bo-map:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 1.83] s
* igt@xe_exec_system_allocator@threads-many-large-execqueues-new-bo-map-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 1.66] s
* igt@xe_exec_system_allocator@threads-many-large-execqueues-new-busy:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 2.10] s
* igt@xe_exec_system_allocator@threads-many-large-execqueues-new-busy-nomemset:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 1.64] s
* igt@xe_exec_system_allocator@threads-many-large-execqueues-new-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 1.72] s
* igt@xe_exec_system_allocator@threads-many-large-execqueues-new-race:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 3.42] s
* igt@xe_exec_system_allocator@threads-many-large-execqueues-new-race-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 4.02] s
* igt@xe_exec_system_allocator@threads-many-large-free:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 1.58] s
* igt@xe_exec_system_allocator@threads-many-large-free-nomemset:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.81] s
* igt@xe_exec_system_allocator@threads-many-large-free-race:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.57] s
* igt@xe_exec_system_allocator@threads-many-large-free-race-nomemset:
- Statuses : 2 pass(s)
- Exec time: [0.90, 4.81] s
* igt@xe_exec_system_allocator@threads-many-large-malloc:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.11] s
* igt@xe_exec_system_allocator@threads-many-large-malloc-bo-unmap:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.09] s
* igt@xe_exec_system_allocator@threads-many-large-malloc-bo-unmap-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.09] s
* igt@xe_exec_system_allocator@threads-many-large-malloc-busy:
- Statuses :
- Exec time: [None] s
* igt@xe_exec_system_allocator@threads-many-large-malloc-busy-nomemset:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.11] s
* igt@xe_exec_system_allocator@threads-many-large-malloc-fork-read:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0] s
* igt@xe_exec_system_allocator@threads-many-large-malloc-fork-read-after:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0] s
* igt@xe_exec_system_allocator@threads-many-large-malloc-mlock:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.16] s
* igt@xe_exec_system_allocator@threads-many-large-malloc-mlock-nomemset:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.08] s
* igt@xe_exec_system_allocator@threads-many-large-malloc-multi-fault:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.10] s
* igt@xe_exec_system_allocator@threads-many-large-malloc-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.11] s
* igt@xe_exec_system_allocator@threads-many-large-malloc-race:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.06] s
* igt@xe_exec_system_allocator@threads-many-large-malloc-race-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.13] s
* igt@xe_exec_system_allocator@threads-many-large-mmap:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.08] s
* igt@xe_exec_system_allocator@threads-many-large-mmap-file:
- Statuses :
- Exec time: [None] s
* igt@xe_exec_system_allocator@threads-many-large-mmap-file-mlock:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0] s
* igt@xe_exec_system_allocator@threads-many-large-mmap-file-mlock-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0] s
* igt@xe_exec_system_allocator@threads-many-large-mmap-file-nomemset:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0] s
* igt@xe_exec_system_allocator@threads-many-large-mmap-free:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.53] s
* igt@xe_exec_system_allocator@threads-many-large-mmap-free-huge:
- Statuses : 2 fail(s) 1 skip(s)
- Exec time: [0.0, 0.02] s
* igt@xe_exec_system_allocator@threads-many-large-mmap-free-huge-nomemset:
- Statuses : 2 fail(s) 1 skip(s)
- Exec time: [0.0, 0.02] s
* igt@xe_exec_system_allocator@threads-many-large-mmap-free-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.36] s
* igt@xe_exec_system_allocator@threads-many-large-mmap-free-race:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.60] s
* igt@xe_exec_system_allocator@threads-many-large-mmap-free-race-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.28] s
* igt@xe_exec_system_allocator@threads-many-large-mmap-huge:
- Statuses : 2 fail(s) 1 skip(s)
- Exec time: [0.0, 0.01] s
* igt@xe_exec_system_allocator@threads-many-large-mmap-huge-nomemset:
- Statuses : 2 fail(s) 1 skip(s)
- Exec time: [0.0, 0.01] s
* igt@xe_exec_system_allocator@threads-many-large-mmap-mlock:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.10] s
* igt@xe_exec_system_allocator@threads-many-large-mmap-mlock-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.17] s
* igt@xe_exec_system_allocator@threads-many-large-mmap-new:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.94] s
* igt@xe_exec_system_allocator@threads-many-large-mmap-new-huge:
- Statuses : 2 fail(s) 1 skip(s)
- Exec time: [0.0, 0.02] s
* igt@xe_exec_system_allocator@threads-many-large-mmap-new-huge-nomemset:
- Statuses : 1 fail(s) 1 skip(s)
- Exec time: [0.0, 0.01] s
* igt@xe_exec_system_allocator@threads-many-large-mmap-new-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 1.56] s
* igt@xe_exec_system_allocator@threads-many-large-mmap-new-race:
- Statuses : 1 pass(s)
- Exec time: [1.08] s
* igt@xe_exec_system_allocator@threads-many-large-mmap-new-race-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 1.42] s
* igt@xe_exec_system_allocator@threads-many-large-mmap-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.06] s
* igt@xe_exec_system_allocator@threads-many-large-mmap-race:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.10] s
* igt@xe_exec_system_allocator@threads-many-large-mmap-race-nomemset:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.13] s
* igt@xe_exec_system_allocator@threads-many-large-mmap-remap:
- Statuses : 2 pass(s)
- Exec time: [1.17, 6.57] s
* igt@xe_exec_system_allocator@threads-many-large-mmap-remap-dontunmap:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 7.81] s
* igt@xe_exec_system_allocator@threads-many-large-mmap-remap-dontunmap-eocheck:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 5.37] s
* igt@xe_exec_system_allocator@threads-many-large-mmap-remap-eocheck:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 1.25] s
* igt@xe_exec_system_allocator@threads-many-large-mmap-remap-ro:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 1.29] s
* igt@xe_exec_system_allocator@threads-many-large-mmap-remap-ro-dontunmap:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 9.06] s
* igt@xe_exec_system_allocator@threads-many-large-mmap-remap-ro-dontunmap-eocheck:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 1.34] s
* igt@xe_exec_system_allocator@threads-many-large-mmap-remap-ro-eocheck:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 5.49] s
* igt@xe_exec_system_allocator@threads-many-large-mmap-shared:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.13] s
* igt@xe_exec_system_allocator@threads-many-large-mmap-shared-nomemset:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.06] s
* igt@xe_exec_system_allocator@threads-many-large-mmap-shared-remap:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 1.95] s
* igt@xe_exec_system_allocator@threads-many-large-mmap-shared-remap-dontunmap:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 2.10] s
* igt@xe_exec_system_allocator@threads-many-large-mmap-shared-remap-dontunmap-eocheck:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 1.20] s
* igt@xe_exec_system_allocator@threads-many-large-mmap-shared-remap-eocheck:
- Statuses :
- Exec time: [None] s
* igt@xe_exec_system_allocator@threads-many-large-new:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 1.25] s
* igt@xe_exec_system_allocator@threads-many-large-new-bo-map:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 1.79] s
* igt@xe_exec_system_allocator@threads-many-large-new-bo-map-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 2.25] s
* igt@xe_exec_system_allocator@threads-many-large-new-busy:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 2.96] s
* igt@xe_exec_system_allocator@threads-many-large-new-busy-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 1.77] s
* igt@xe_exec_system_allocator@threads-many-large-new-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 2.38] s
* igt@xe_exec_system_allocator@threads-many-large-new-race:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 1.41] s
* igt@xe_exec_system_allocator@threads-many-large-new-race-nomemset:
- Statuses :
- Exec time: [None] s
* igt@xe_exec_system_allocator@threads-many-malloc:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.14] s
* igt@xe_exec_system_allocator@threads-many-malloc-bo-unmap:
- Statuses : 2 pass(s)
- Exec time: [0.08, 0.16] s
* igt@xe_exec_system_allocator@threads-many-malloc-bo-unmap-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.10] s
* igt@xe_exec_system_allocator@threads-many-malloc-busy:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.07] s
* igt@xe_exec_system_allocator@threads-many-malloc-busy-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.16] s
* igt@xe_exec_system_allocator@threads-many-malloc-fork-read:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0] s
* igt@xe_exec_system_allocator@threads-many-malloc-fork-read-after:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0] s
* igt@xe_exec_system_allocator@threads-many-malloc-mlock:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.05] s
* igt@xe_exec_system_allocator@threads-many-malloc-mlock-nomemset:
- Statuses :
- Exec time: [None] s
* igt@xe_exec_system_allocator@threads-many-malloc-multi-fault:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.00] s
* igt@xe_exec_system_allocator@threads-many-malloc-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.10] s
* igt@xe_exec_system_allocator@threads-many-malloc-race:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.09] s
* igt@xe_exec_system_allocator@threads-many-malloc-race-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.11] s
* igt@xe_exec_system_allocator@threads-many-mmap:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.14] s
* igt@xe_exec_system_allocator@threads-many-mmap-file:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0] s
* igt@xe_exec_system_allocator@threads-many-mmap-file-mlock:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0] s
* igt@xe_exec_system_allocator@threads-many-mmap-file-mlock-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0] s
* igt@xe_exec_system_allocator@threads-many-mmap-file-nomemset:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0] s
* igt@xe_exec_system_allocator@threads-many-mmap-free:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.24] s
* igt@xe_exec_system_allocator@threads-many-mmap-free-huge:
- Statuses : 2 fail(s) 1 skip(s)
- Exec time: [0.0, 0.03] s
* igt@xe_exec_system_allocator@threads-many-mmap-free-huge-nomemset:
- Statuses : 2 fail(s) 1 skip(s)
- Exec time: [0.0, 0.02] s
* igt@xe_exec_system_allocator@threads-many-mmap-free-nomemset:
- Statuses : 1 pass(s)
- Exec time: [0.15] s
* igt@xe_exec_system_allocator@threads-many-mmap-free-race:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.27] s
* igt@xe_exec_system_allocator@threads-many-mmap-free-race-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.28] s
* igt@xe_exec_system_allocator@threads-many-mmap-huge:
- Statuses : 2 fail(s) 1 skip(s)
- Exec time: [0.0, 0.02] s
* igt@xe_exec_system_allocator@threads-many-mmap-huge-nomemset:
- Statuses : 2 fail(s) 1 skip(s)
- Exec time: [0.0, 0.04] s
* igt@xe_exec_system_allocator@threads-many-mmap-mlock:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.18] s
* igt@xe_exec_system_allocator@threads-many-mmap-mlock-nomemset:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.06] s
* igt@xe_exec_system_allocator@threads-many-mmap-new:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.27] s
* igt@xe_exec_system_allocator@threads-many-mmap-new-huge:
- Statuses : 2 fail(s) 1 skip(s)
- Exec time: [0.0, 0.02] s
* igt@xe_exec_system_allocator@threads-many-mmap-new-huge-nomemset:
- Statuses : 1 fail(s) 1 skip(s)
- Exec time: [0.0, 0.02] s
* igt@xe_exec_system_allocator@threads-many-mmap-new-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.26] s
* igt@xe_exec_system_allocator@threads-many-mmap-new-race:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.32] s
* igt@xe_exec_system_allocator@threads-many-mmap-new-race-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.34] s
* igt@xe_exec_system_allocator@threads-many-mmap-nomemset:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.06] s
* igt@xe_exec_system_allocator@threads-many-mmap-race:
- Statuses : 2 pass(s)
- Exec time: [0.08, 0.15] s
* igt@xe_exec_system_allocator@threads-many-mmap-race-nomemset:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.05] s
* igt@xe_exec_system_allocator@threads-many-mmap-remap:
- Statuses :
- Exec time: [None] s
* igt@xe_exec_system_allocator@threads-many-mmap-remap-dontunmap:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.55] s
* igt@xe_exec_system_allocator@threads-many-mmap-remap-dontunmap-eocheck:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.29] s
* igt@xe_exec_system_allocator@threads-many-mmap-remap-eocheck:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.25] s
* igt@xe_exec_system_allocator@threads-many-mmap-remap-ro:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.29] s
* igt@xe_exec_system_allocator@threads-many-mmap-remap-ro-dontunmap:
- Statuses : 1 pass(s)
- Exec time: [0.22] s
* igt@xe_exec_system_allocator@threads-many-mmap-remap-ro-dontunmap-eocheck:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.29] s
* igt@xe_exec_system_allocator@threads-many-mmap-remap-ro-eocheck:
- Statuses :
- Exec time: [None] s
* igt@xe_exec_system_allocator@threads-many-mmap-shared:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.17] s
* igt@xe_exec_system_allocator@threads-many-mmap-shared-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.15] s
* igt@xe_exec_system_allocator@threads-many-mmap-shared-remap:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.27] s
* igt@xe_exec_system_allocator@threads-many-mmap-shared-remap-dontunmap:
- Statuses : 1 pass(s)
- Exec time: [0.23] s
* igt@xe_exec_system_allocator@threads-many-mmap-shared-remap-dontunmap-eocheck:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.26] s
* igt@xe_exec_system_allocator@threads-many-mmap-shared-remap-eocheck:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.24] s
* igt@xe_exec_system_allocator@threads-many-new:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.19] s
* igt@xe_exec_system_allocator@threads-many-new-bo-map:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.19] s
* igt@xe_exec_system_allocator@threads-many-new-bo-map-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.20] s
* igt@xe_exec_system_allocator@threads-many-new-busy:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.16] s
* igt@xe_exec_system_allocator@threads-many-new-busy-nomemset:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.13] s
* igt@xe_exec_system_allocator@threads-many-new-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.32] s
* igt@xe_exec_system_allocator@threads-many-new-race:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.20] s
* igt@xe_exec_system_allocator@threads-many-new-race-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.14] s
* igt@xe_exec_system_allocator@threads-many-stride-free:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 2.17] s
* igt@xe_exec_system_allocator@threads-many-stride-free-nomemset:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.59] s
* igt@xe_exec_system_allocator@threads-many-stride-free-race:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 1.09] s
* igt@xe_exec_system_allocator@threads-many-stride-free-race-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 2.98] s
* igt@xe_exec_system_allocator@threads-many-stride-malloc:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.24] s
* igt@xe_exec_system_allocator@threads-many-stride-malloc-bo-unmap:
- Statuses : 2 pass(s)
- Exec time: [0.12, 0.17] s
* igt@xe_exec_system_allocator@threads-many-stride-malloc-bo-unmap-nomemset:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.12] s
* igt@xe_exec_system_allocator@threads-many-stride-malloc-busy:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.07] s
* igt@xe_exec_system_allocator@threads-many-stride-malloc-busy-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.17] s
* igt@xe_exec_system_allocator@threads-many-stride-malloc-fork-read:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0] s
* igt@xe_exec_system_allocator@threads-many-stride-malloc-fork-read-after:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0] s
* igt@xe_exec_system_allocator@threads-many-stride-malloc-mlock:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.16] s
* igt@xe_exec_system_allocator@threads-many-stride-malloc-mlock-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.15] s
* igt@xe_exec_system_allocator@threads-many-stride-malloc-multi-fault:
- Statuses :
- Exec time: [None] s
* igt@xe_exec_system_allocator@threads-many-stride-malloc-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.14] s
* igt@xe_exec_system_allocator@threads-many-stride-malloc-race:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.18] s
* igt@xe_exec_system_allocator@threads-many-stride-malloc-race-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.16] s
* igt@xe_exec_system_allocator@threads-many-stride-mmap:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.10] s
* igt@xe_exec_system_allocator@threads-many-stride-mmap-file:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0] s
* igt@xe_exec_system_allocator@threads-many-stride-mmap-file-mlock:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0] s
* igt@xe_exec_system_allocator@threads-many-stride-mmap-file-mlock-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0] s
* igt@xe_exec_system_allocator@threads-many-stride-mmap-file-nomemset:
- Statuses : 2 pass(s)
- Exec time: [0.0] s
* igt@xe_exec_system_allocator@threads-many-stride-mmap-free:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 1.18] s
* igt@xe_exec_system_allocator@threads-many-stride-mmap-free-huge:
- Statuses : 2 fail(s) 1 skip(s)
- Exec time: [0.0, 0.02] s
* igt@xe_exec_system_allocator@threads-many-stride-mmap-free-huge-nomemset:
- Statuses : 2 fail(s) 1 skip(s)
- Exec time: [0.0, 0.02] s
* igt@xe_exec_system_allocator@threads-many-stride-mmap-free-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 1.74] s
* igt@xe_exec_system_allocator@threads-many-stride-mmap-free-race:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 1.23] s
* igt@xe_exec_system_allocator@threads-many-stride-mmap-free-race-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 1.74] s
* igt@xe_exec_system_allocator@threads-many-stride-mmap-huge:
- Statuses : 1 fail(s) 1 skip(s)
- Exec time: [0.0, 0.01] s
* igt@xe_exec_system_allocator@threads-many-stride-mmap-huge-nomemset:
- Statuses : 2 fail(s) 1 skip(s)
- Exec time: [0.0, 0.05] s
* igt@xe_exec_system_allocator@threads-many-stride-mmap-mlock:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.14] s
* igt@xe_exec_system_allocator@threads-many-stride-mmap-mlock-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.17] s
* igt@xe_exec_system_allocator@threads-many-stride-mmap-new:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 1.36] s
* igt@xe_exec_system_allocator@threads-many-stride-mmap-new-huge:
- Statuses :
- Exec time: [None] s
* igt@xe_exec_system_allocator@threads-many-stride-mmap-new-huge-nomemset:
- Statuses : 2 fail(s) 1 skip(s)
- Exec time: [0.0, 0.02] s
* igt@xe_exec_system_allocator@threads-many-stride-mmap-new-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 1.53] s
* igt@xe_exec_system_allocator@threads-many-stride-mmap-new-race:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 1.32] s
* igt@xe_exec_system_allocator@threads-many-stride-mmap-new-race-nomemset:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.98] s
* igt@xe_exec_system_allocator@threads-many-stride-mmap-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.16] s
* igt@xe_exec_system_allocator@threads-many-stride-mmap-race:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.14] s
* igt@xe_exec_system_allocator@threads-many-stride-mmap-race-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.18] s
* igt@xe_exec_system_allocator@threads-many-stride-mmap-remap:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 4.58] s
* igt@xe_exec_system_allocator@threads-many-stride-mmap-remap-dontunmap:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 4.28] s
* igt@xe_exec_system_allocator@threads-many-stride-mmap-remap-dontunmap-eocheck:
- Statuses : 2 pass(s)
- Exec time: [0.82, 3.81] s
* igt@xe_exec_system_allocator@threads-many-stride-mmap-remap-eocheck:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 3.90] s
* igt@xe_exec_system_allocator@threads-many-stride-mmap-remap-ro:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 5.57] s
* igt@xe_exec_system_allocator@threads-many-stride-mmap-remap-ro-dontunmap:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 4.71] s
* igt@xe_exec_system_allocator@threads-many-stride-mmap-remap-ro-dontunmap-eocheck:
- Statuses :
- Exec time: [None] s
* igt@xe_exec_system_allocator@threads-many-stride-mmap-remap-ro-eocheck:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.89] s
* igt@xe_exec_system_allocator@threads-many-stride-mmap-shared:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.18] s
* igt@xe_exec_system_allocator@threads-many-stride-mmap-shared-nomemset:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.13] s
* igt@xe_exec_system_allocator@threads-many-stride-mmap-shared-remap:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 1.16] s
* igt@xe_exec_system_allocator@threads-many-stride-mmap-shared-remap-dontunmap:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 1.00] s
* igt@xe_exec_system_allocator@threads-many-stride-mmap-shared-remap-dontunmap-eocheck:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.86] s
* igt@xe_exec_system_allocator@threads-many-stride-mmap-shared-remap-eocheck:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.81] s
* igt@xe_exec_system_allocator@threads-many-stride-new:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 2.85] s
* igt@xe_exec_system_allocator@threads-many-stride-new-bo-map:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 2.15] s
* igt@xe_exec_system_allocator@threads-many-stride-new-bo-map-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 1.95] s
* igt@xe_exec_system_allocator@threads-many-stride-new-busy:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 2.18] s
* igt@xe_exec_system_allocator@threads-many-stride-new-busy-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 1.79] s
* igt@xe_exec_system_allocator@threads-many-stride-new-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 1.86] s
* igt@xe_exec_system_allocator@threads-many-stride-new-race:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 3.53] s
* igt@xe_exec_system_allocator@threads-many-stride-new-race-nomemset:
- Statuses : 1 pass(s)
- Exec time: [1.22] s
* igt@xe_exec_system_allocator@threads-shared-alloc-many-stride-malloc:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.10] s
* igt@xe_exec_system_allocator@threads-shared-alloc-many-stride-malloc-race:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.12] s
* igt@xe_exec_system_allocator@threads-shared-alloc-many-stride-malloc-sync:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.19] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-execqueues-free:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.18] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-execqueues-free-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.24] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-execqueues-free-race:
- Statuses : 2 pass(s)
- Exec time: [0.16, 0.31] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-execqueues-free-race-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.28] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-execqueues-malloc:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.27] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-execqueues-malloc-bo-unmap:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.18] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-execqueues-malloc-bo-unmap-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.32] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-execqueues-malloc-busy:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.26] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-execqueues-malloc-busy-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.30] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-execqueues-malloc-fork-read:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-execqueues-malloc-fork-read-after:
- Statuses : 1 skip(s)
- Exec time: [0.0] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-execqueues-malloc-mlock:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.32] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-execqueues-malloc-mlock-nomemset:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.13] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-execqueues-malloc-multi-fault:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.00] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-execqueues-malloc-nomemset:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.20] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-execqueues-malloc-race:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.14] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-execqueues-malloc-race-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.11] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-execqueues-mmap:
- Statuses :
- Exec time: [None] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-execqueues-mmap-file:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-execqueues-mmap-file-mlock:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-execqueues-mmap-file-mlock-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-execqueues-mmap-file-nomemset:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-execqueues-mmap-free:
- Statuses : 1 pass(s)
- Exec time: [0.36] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-execqueues-mmap-free-huge:
- Statuses : 1 fail(s) 1 skip(s)
- Exec time: [0.0, 0.01] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-execqueues-mmap-free-huge-nomemset:
- Statuses : 1 fail(s) 1 skip(s)
- Exec time: [0.0, 0.01] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-execqueues-mmap-free-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.46] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-execqueues-mmap-free-race:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.66] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-execqueues-mmap-free-race-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.81] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-execqueues-mmap-huge:
- Statuses : 2 fail(s) 1 skip(s)
- Exec time: [0.0, 0.03] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-execqueues-mmap-huge-nomemset:
- Statuses : 2 fail(s) 1 skip(s)
- Exec time: [0.0, 0.03] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-execqueues-mmap-mlock:
- Statuses : 2 pass(s)
- Exec time: [0.18, 0.34] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-execqueues-mmap-mlock-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.15] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-execqueues-mmap-new:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.51] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-execqueues-mmap-new-huge:
- Statuses : 2 fail(s) 1 skip(s)
- Exec time: [0.0, 0.02] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-execqueues-mmap-new-huge-nomemset:
- Statuses : 2 fail(s) 1 skip(s)
- Exec time: [0.0, 0.03] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-execqueues-mmap-new-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.97] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-execqueues-mmap-new-race:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.53] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-execqueues-mmap-new-race-nomemset:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.47] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-execqueues-mmap-nomemset:
- Statuses :
- Exec time: [None] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-execqueues-mmap-race:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.32] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-execqueues-mmap-race-nomemset:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.28] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-execqueues-mmap-remap:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.51] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-execqueues-mmap-remap-dontunmap:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.57] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-execqueues-mmap-remap-dontunmap-eocheck:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.68] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-execqueues-mmap-remap-eocheck:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.53] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-execqueues-mmap-remap-ro:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.71] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-execqueues-mmap-remap-ro-dontunmap:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.49] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-execqueues-mmap-remap-ro-dontunmap-eocheck:
- Statuses : 1 pass(s)
- Exec time: [0.49] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-execqueues-mmap-remap-ro-eocheck:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.59] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-execqueues-mmap-shared:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.34] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-execqueues-mmap-shared-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.18] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-execqueues-mmap-shared-remap:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.50] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-execqueues-mmap-shared-remap-dontunmap:
- Statuses : 2 pass(s)
- Exec time: [0.51, 0.52] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-execqueues-mmap-shared-remap-dontunmap-eocheck:
- Statuses :
- Exec time: [None] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-execqueues-mmap-shared-remap-eocheck:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.68] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-execqueues-new:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.19] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-execqueues-new-bo-map:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.53] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-execqueues-new-bo-map-nomemset:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.47] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-execqueues-new-busy:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.24] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-execqueues-new-busy-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.24] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-execqueues-new-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.24] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-execqueues-new-race:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.18] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-execqueues-new-race-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.38] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-free:
- Statuses :
- Exec time: [None] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-free-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.11] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-free-race:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.17] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-free-race-nomemset:
- Statuses : 1 pass(s)
- Exec time: [0.09] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-large-execqueues-free:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 1.95] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-large-execqueues-free-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 2.46] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-large-execqueues-free-race:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 5.94] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-large-execqueues-free-race-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 8.66] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-large-execqueues-malloc:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.26] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-large-execqueues-malloc-bo-unmap:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.21] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-large-execqueues-malloc-bo-unmap-nomemset:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.23] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-large-execqueues-malloc-busy:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.20] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-large-execqueues-malloc-busy-nomemset:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.20] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-large-execqueues-malloc-fork-read:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-large-execqueues-malloc-fork-read-after:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-large-execqueues-malloc-mlock:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.24] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-large-execqueues-malloc-mlock-nomemset:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.29] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-large-execqueues-malloc-multi-fault:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.24] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-large-execqueues-malloc-nomemset:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.18] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-large-execqueues-malloc-race:
- Statuses :
- Exec time: [None] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-large-execqueues-malloc-race-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.23] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-large-execqueues-mmap:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.29] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-large-execqueues-mmap-file:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-large-execqueues-mmap-file-mlock:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-large-execqueues-mmap-file-mlock-nomemset:
- Statuses : 2 pass(s)
- Exec time: [0.0] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-large-execqueues-mmap-file-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-large-execqueues-mmap-free:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.64] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-large-execqueues-mmap-free-huge:
- Statuses : 1 fail(s) 1 skip(s)
- Exec time: [0.0, 0.02] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-large-execqueues-mmap-free-huge-nomemset:
- Statuses : 1 fail(s) 1 skip(s)
- Exec time: [0.0, 0.02] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-large-execqueues-mmap-free-nomemset:
- Statuses : 1 pass(s)
- Exec time: [0.45] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-large-execqueues-mmap-free-race:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.72] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-large-execqueues-mmap-free-race-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.77] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-large-execqueues-mmap-huge:
- Statuses : 2 fail(s) 1 skip(s)
- Exec time: [0.0, 0.03] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-large-execqueues-mmap-huge-nomemset:
- Statuses : 2 fail(s) 1 skip(s)
- Exec time: [0.0, 0.03] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-large-execqueues-mmap-mlock:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.20] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-large-execqueues-mmap-mlock-nomemset:
- Statuses : 2 pass(s)
- Exec time: [0.18, 0.26] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-large-execqueues-mmap-new:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 1.86] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-large-execqueues-mmap-new-huge:
- Statuses : 2 fail(s) 1 skip(s)
- Exec time: [0.0, 0.01] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-large-execqueues-mmap-new-huge-nomemset:
- Statuses : 1 fail(s) 1 skip(s)
- Exec time: [0.0, 0.03] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-large-execqueues-mmap-new-nomemset:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 2.36] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-large-execqueues-mmap-new-race:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 1.84] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-large-execqueues-mmap-new-race-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 2.22] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-large-execqueues-mmap-nomemset:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.30] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-large-execqueues-mmap-race:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.24] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-large-execqueues-mmap-race-nomemset:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.17] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-large-execqueues-mmap-remap:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 9.52] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-large-execqueues-mmap-remap-dontunmap:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 2.04] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-large-execqueues-mmap-remap-dontunmap-eocheck:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 4.83] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-large-execqueues-mmap-remap-eocheck:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 1.69] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-large-execqueues-mmap-remap-ro:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 8.64] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-large-execqueues-mmap-remap-ro-dontunmap:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 9.51] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-large-execqueues-mmap-remap-ro-dontunmap-eocheck:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 6.02] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-large-execqueues-mmap-remap-ro-eocheck:
- Statuses :
- Exec time: [None] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-large-execqueues-mmap-shared:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.18] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-large-execqueues-mmap-shared-nomemset:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.16] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-large-execqueues-mmap-shared-remap:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 2.46] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-large-execqueues-mmap-shared-remap-dontunmap:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 2.70] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-large-execqueues-mmap-shared-remap-dontunmap-eocheck:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 2.74] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-large-execqueues-mmap-shared-remap-eocheck:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 2.83] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-large-execqueues-new:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 2.31] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-large-execqueues-new-bo-map:
- Statuses :
- Exec time: [None] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-large-execqueues-new-bo-map-nomemset:
- Statuses : 1 fail(s) 1 pass(s) 1 skip(s)
- Exec time: [0.0, 3.05] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-large-execqueues-new-busy:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 2.36] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-large-execqueues-new-busy-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 3.07] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-large-execqueues-new-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 3.88] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-large-execqueues-new-race:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 1.67] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-large-execqueues-new-race-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 7.19] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-large-free:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.70] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-large-free-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 2.27] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-large-free-race:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 7.92] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-large-free-race-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 8.12] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-large-malloc:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.09] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-large-malloc-bo-unmap:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.14] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-large-malloc-bo-unmap-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.14] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-large-malloc-busy:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.17] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-large-malloc-busy-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.15] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-large-malloc-fork-read:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-large-malloc-fork-read-after:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-large-malloc-mlock:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.25] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-large-malloc-mlock-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.13] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-large-malloc-multi-fault:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.16] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-large-malloc-nomemset:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.09] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-large-malloc-race:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.16] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-large-malloc-race-nomemset:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.16] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-large-mmap:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.12] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-large-mmap-file:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-large-mmap-file-mlock:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-large-mmap-file-mlock-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-large-mmap-file-nomemset:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-large-mmap-free:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.56] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-large-mmap-free-huge:
- Statuses : 2 fail(s) 1 skip(s)
- Exec time: [0.0, 0.02] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-large-mmap-free-huge-nomemset:
- Statuses : 1 fail(s) 1 skip(s)
- Exec time: [0.0, 0.01] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-large-mmap-free-nomemset:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.50] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-large-mmap-free-race:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.68] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-large-mmap-free-race-nomemset:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.76] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-large-mmap-huge:
- Statuses : 2 fail(s) 1 skip(s)
- Exec time: [0.0, 0.03] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-large-mmap-huge-nomemset:
- Statuses : 2 fail(s) 1 skip(s)
- Exec time: [0.0, 0.02] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-large-mmap-mlock:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.21] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-large-mmap-mlock-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.18] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-large-mmap-new:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 1.09] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-large-mmap-new-huge:
- Statuses : 2 fail(s) 1 skip(s)
- Exec time: [0.0, 0.02] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-large-mmap-new-huge-nomemset:
- Statuses :
- Exec time: [None] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-large-mmap-new-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 2.93] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-large-mmap-new-race:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 1.84] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-large-mmap-new-race-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 2.13] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-large-mmap-nomemset:
- Statuses : 1 pass(s)
- Exec time: [0.10] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-large-mmap-race:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.16] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-large-mmap-race-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.12] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-large-mmap-remap:
- Statuses : 2 pass(s)
- Exec time: [1.70, 8.58] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-large-mmap-remap-dontunmap:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 9.40] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-large-mmap-remap-dontunmap-eocheck:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 1.70] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-large-mmap-remap-eocheck:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 3.90] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-large-mmap-remap-ro:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 1.53] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-large-mmap-remap-ro-dontunmap:
- Statuses :
- Exec time: [None] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-large-mmap-remap-ro-dontunmap-eocheck:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 5.02] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-large-mmap-remap-ro-eocheck:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 4.71] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-large-mmap-shared:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.10] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-large-mmap-shared-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.12] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-large-mmap-shared-remap:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 2.86] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-large-mmap-shared-remap-dontunmap:
- Statuses : 2 pass(s)
- Exec time: [1.65, 2.73] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-large-mmap-shared-remap-dontunmap-eocheck:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 2.55] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-large-mmap-shared-remap-eocheck:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 2.09] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-large-new:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 2.35] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-large-new-bo-map:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 1.29] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-large-new-bo-map-nomemset:
- Statuses : 1 fail(s) 1 pass(s) 1 skip(s)
- Exec time: [0.0, 3.42] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-large-new-busy:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 2.37] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-large-new-busy-nomemset:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 1.43] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-large-new-nomemset:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 3.06] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-large-new-race:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 7.00] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-large-new-race-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 4.98] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-malloc:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.08] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-malloc-bo-unmap:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.13] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-malloc-bo-unmap-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.12] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-malloc-busy:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.09] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-malloc-busy-nomemset:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.11] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-malloc-fork-read:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-malloc-fork-read-after:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-malloc-mlock:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.10] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-malloc-mlock-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.08] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-malloc-multi-fault:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.00] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-malloc-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.09] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-malloc-race:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.16] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-malloc-race-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.08] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-mmap:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.14] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-mmap-file:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-mmap-file-mlock:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-mmap-file-mlock-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-mmap-file-nomemset:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-mmap-free:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.34] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-mmap-free-huge:
- Statuses : 2 fail(s) 1 skip(s)
- Exec time: [0.0, 0.03] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-mmap-free-huge-nomemset:
- Statuses : 1 fail(s) 1 skip(s)
- Exec time: [0.0, 0.01] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-mmap-free-nomemset:
- Statuses : 1 pass(s)
- Exec time: [0.41] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-mmap-free-race:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.60] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-mmap-free-race-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.58] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-mmap-huge:
- Statuses : 2 fail(s) 1 skip(s)
- Exec time: [0.0, 0.04] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-mmap-huge-nomemset:
- Statuses : 1 fail(s) 1 skip(s)
- Exec time: [0.0, 0.01] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-mmap-mlock:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.17] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-mmap-mlock-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.12] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-mmap-new:
- Statuses :
- Exec time: [None] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-mmap-new-huge:
- Statuses : 2 fail(s) 1 skip(s)
- Exec time: [0.0, 0.03] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-mmap-new-huge-nomemset:
- Statuses : 2 fail(s) 1 skip(s)
- Exec time: [0.0, 0.02] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-mmap-new-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.39] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-mmap-new-race:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.43] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-mmap-new-race-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.41] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-mmap-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.14] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-mmap-race:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.09] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-mmap-race-nomemset:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.09] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-mmap-remap:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.58] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-mmap-remap-dontunmap:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.49] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-mmap-remap-dontunmap-eocheck:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.42] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-mmap-remap-eocheck:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.58] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-mmap-remap-ro:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.37] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-mmap-remap-ro-dontunmap:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 1.03] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-mmap-remap-ro-dontunmap-eocheck:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.47] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-mmap-remap-ro-eocheck:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.31] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-mmap-shared:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.17] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-mmap-shared-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.10] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-mmap-shared-remap:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.43] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-mmap-shared-remap-dontunmap:
- Statuses : 2 pass(s)
- Exec time: [0.40, 0.45] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-mmap-shared-remap-dontunmap-eocheck:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.47] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-mmap-shared-remap-eocheck:
- Statuses : 1 pass(s)
- Exec time: [0.47] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-new:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.15] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-new-bo-map:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.33] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-new-bo-map-nomemset:
- Statuses :
- Exec time: [None] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-new-busy:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.18] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-new-busy-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.13] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-new-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.14] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-new-race:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.18] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-new-race-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.16] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-stride-free:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 1.04] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-stride-free-nomemset:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 1.33] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-stride-free-race:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 1.02] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-stride-free-race-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 6.55] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-stride-malloc:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.11] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-stride-malloc-bo-unmap:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.27] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-stride-malloc-bo-unmap-nomemset:
- Statuses : 1 pass(s)
- Exec time: [0.18] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-stride-malloc-busy:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.48] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-stride-malloc-busy-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.22] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-stride-malloc-fork-read:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-stride-malloc-fork-read-after:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-stride-malloc-mlock:
- Statuses :
- Exec time: [None] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-stride-malloc-mlock-nomemset:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.33] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-stride-malloc-multi-fault:
- Statuses : 2 pass(s)
- Exec time: [0.00] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-stride-malloc-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.21] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-stride-malloc-race:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.32] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-stride-malloc-race-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.23] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-stride-mmap:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.22] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-stride-mmap-file:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-stride-mmap-file-mlock:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-stride-mmap-file-mlock-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-stride-mmap-file-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-stride-mmap-free:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 1.18] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-stride-mmap-free-huge:
- Statuses : 2 fail(s) 1 skip(s)
- Exec time: [0.0, 0.03] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-stride-mmap-free-huge-nomemset:
- Statuses : 1 fail(s) 1 skip(s)
- Exec time: [0.0, 0.02] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-stride-mmap-free-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 2.30] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-stride-mmap-free-race:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 1.14] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-stride-mmap-free-race-nomemset:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 2.03] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-stride-mmap-huge:
- Statuses : 1 fail(s) 1 skip(s)
- Exec time: [0.0, 0.02] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-stride-mmap-huge-nomemset:
- Statuses : 2 fail(s) 1 skip(s)
- Exec time: [0.0, 0.03] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-stride-mmap-mlock:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.24] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-stride-mmap-mlock-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.25] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-stride-mmap-new:
- Statuses :
- Exec time: [None] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-stride-mmap-new-huge:
- Statuses : 1 fail(s)
- Exec time: [0.02] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-stride-mmap-new-huge-nomemset:
- Statuses : 2 fail(s) 1 skip(s)
- Exec time: [0.0, 0.02] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-stride-mmap-new-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 2.11] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-stride-mmap-new-race:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 1.80] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-stride-mmap-new-race-nomemset:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 1.44] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-stride-mmap-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.15] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-stride-mmap-race:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.34] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-stride-mmap-race-nomemset:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.20] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-stride-mmap-remap:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 1.40] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-stride-mmap-remap-dontunmap:
- Statuses : 2 pass(s)
- Exec time: [1.02, 2.88] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-stride-mmap-remap-dontunmap-eocheck:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 2.29] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-stride-mmap-remap-eocheck:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 2.82] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-stride-mmap-remap-ro:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 3.38] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-stride-mmap-remap-ro-dontunmap:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 2.50] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-stride-mmap-remap-ro-dontunmap-eocheck:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 3.29] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-stride-mmap-remap-ro-eocheck:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 2.78] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-stride-mmap-shared:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.21] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-stride-mmap-shared-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.23] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-stride-mmap-shared-remap:
- Statuses : 2 pass(s)
- Exec time: [1.07, 1.42] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-stride-mmap-shared-remap-dontunmap:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 2.00] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-stride-mmap-shared-remap-dontunmap-eocheck:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 1.34] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-stride-mmap-shared-remap-eocheck:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 1.84] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-stride-new:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 2.79] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-stride-new-bo-map:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 1.68] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-stride-new-bo-map-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 1.89] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-stride-new-busy:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 2.76] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-stride-new-busy-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 2.87] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-stride-new-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 2.86] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-stride-new-race:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 6.35] s
* igt@xe_exec_system_allocator@threads-shared-vm-many-stride-new-race-nomemset:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 1.71] s
* igt@xe_exec_system_allocator@threads-shared-vm-shared-alloc-many-stride-malloc:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.18] s
* igt@xe_exec_system_allocator@threads-shared-vm-shared-alloc-many-stride-malloc-race:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.20] s
* igt@xe_exec_system_allocator@twice-free:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.02] s
* igt@xe_exec_system_allocator@twice-free-nomemset:
- Statuses :
- Exec time: [None] s
* igt@xe_exec_system_allocator@twice-free-race:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.03] s
* igt@xe_exec_system_allocator@twice-free-race-nomemset:
- Statuses : 1 pass(s)
- Exec time: [0.02] s
* igt@xe_exec_system_allocator@twice-large-free:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.09] s
* igt@xe_exec_system_allocator@twice-large-free-nomemset:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.08] s
* igt@xe_exec_system_allocator@twice-large-free-race:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.26] s
* igt@xe_exec_system_allocator@twice-large-free-race-nomemset:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.06] s
* igt@xe_exec_system_allocator@twice-large-malloc:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.13] s
* igt@xe_exec_system_allocator@twice-large-malloc-bo-unmap:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.06] s
* igt@xe_exec_system_allocator@twice-large-malloc-bo-unmap-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.06] s
* igt@xe_exec_system_allocator@twice-large-malloc-busy:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.15] s
* igt@xe_exec_system_allocator@twice-large-malloc-busy-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.10] s
* igt@xe_exec_system_allocator@twice-large-malloc-fork-read:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.29] s
* igt@xe_exec_system_allocator@twice-large-malloc-fork-read-after:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.19] s
* igt@xe_exec_system_allocator@twice-large-malloc-mlock:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.04] s
* igt@xe_exec_system_allocator@twice-large-malloc-mlock-nomemset:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.07] s
* igt@xe_exec_system_allocator@twice-large-malloc-multi-fault:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.19] s
* igt@xe_exec_system_allocator@twice-large-malloc-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.07] s
* igt@xe_exec_system_allocator@twice-large-malloc-race:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.05] s
* igt@xe_exec_system_allocator@twice-large-malloc-race-nomemset:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.03] s
* igt@xe_exec_system_allocator@twice-large-mmap:
- Statuses :
- Exec time: [None] s
* igt@xe_exec_system_allocator@twice-large-mmap-file:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.05] s
* igt@xe_exec_system_allocator@twice-large-mmap-file-mlock:
- Statuses : 2 pass(s)
- Exec time: [0.04, 0.06] s
* igt@xe_exec_system_allocator@twice-large-mmap-file-mlock-nomemset:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.07] s
* igt@xe_exec_system_allocator@twice-large-mmap-file-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.08] s
* igt@xe_exec_system_allocator@twice-large-mmap-free:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.08] s
* igt@xe_exec_system_allocator@twice-large-mmap-free-huge:
- Statuses : 2 fail(s) 1 skip(s)
- Exec time: [0.0, 0.01] s
* igt@xe_exec_system_allocator@twice-large-mmap-free-huge-nomemset:
- Statuses : 2 fail(s) 1 skip(s)
- Exec time: [0.0, 0.01] s
* igt@xe_exec_system_allocator@twice-large-mmap-free-nomemset:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.07] s
* igt@xe_exec_system_allocator@twice-large-mmap-free-race:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.14] s
* igt@xe_exec_system_allocator@twice-large-mmap-free-race-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.04] s
* igt@xe_exec_system_allocator@twice-large-mmap-huge:
- Statuses : 2 fail(s) 1 skip(s)
- Exec time: [0.0, 0.01] s
* igt@xe_exec_system_allocator@twice-large-mmap-huge-nomemset:
- Statuses : 1 fail(s)
- Exec time: [0.01] s
* igt@xe_exec_system_allocator@twice-large-mmap-mlock:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.12] s
* igt@xe_exec_system_allocator@twice-large-mmap-mlock-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.11] s
* igt@xe_exec_system_allocator@twice-large-mmap-new:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.12] s
* igt@xe_exec_system_allocator@twice-large-mmap-new-huge:
- Statuses : 2 fail(s) 1 skip(s)
- Exec time: [0.0, 0.02] s
* igt@xe_exec_system_allocator@twice-large-mmap-new-huge-nomemset:
- Statuses : 1 fail(s) 1 skip(s)
- Exec time: [0.0, 0.01] s
* igt@xe_exec_system_allocator@twice-large-mmap-new-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.13] s
* igt@xe_exec_system_allocator@twice-large-mmap-new-race:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.07] s
* igt@xe_exec_system_allocator@twice-large-mmap-new-race-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.07] s
* igt@xe_exec_system_allocator@twice-large-mmap-nomemset:
- Statuses : 2 pass(s)
- Exec time: [0.05] s
* igt@xe_exec_system_allocator@twice-large-mmap-race:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.12] s
* igt@xe_exec_system_allocator@twice-large-mmap-race-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.04] s
* igt@xe_exec_system_allocator@twice-large-mmap-remap:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.18] s
* igt@xe_exec_system_allocator@twice-large-mmap-remap-dontunmap:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.16] s
* igt@xe_exec_system_allocator@twice-large-mmap-remap-dontunmap-eocheck:
- Statuses :
- Exec time: [None] s
* igt@xe_exec_system_allocator@twice-large-mmap-remap-eocheck:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.08] s
* igt@xe_exec_system_allocator@twice-large-mmap-remap-ro:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.11] s
* igt@xe_exec_system_allocator@twice-large-mmap-remap-ro-dontunmap:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.09] s
* igt@xe_exec_system_allocator@twice-large-mmap-remap-ro-dontunmap-eocheck:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.08] s
* igt@xe_exec_system_allocator@twice-large-mmap-remap-ro-eocheck:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.07] s
* igt@xe_exec_system_allocator@twice-large-mmap-shared:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.07] s
* igt@xe_exec_system_allocator@twice-large-mmap-shared-nomemset:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.03] s
* igt@xe_exec_system_allocator@twice-large-mmap-shared-remap:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.08] s
* igt@xe_exec_system_allocator@twice-large-mmap-shared-remap-dontunmap:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.07] s
* igt@xe_exec_system_allocator@twice-large-mmap-shared-remap-dontunmap-eocheck:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.06] s
* igt@xe_exec_system_allocator@twice-large-mmap-shared-remap-eocheck:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.07] s
* igt@xe_exec_system_allocator@twice-large-new:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.10] s
* igt@xe_exec_system_allocator@twice-large-new-bo-map:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.10] s
* igt@xe_exec_system_allocator@twice-large-new-bo-map-nomemset:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.07] s
* igt@xe_exec_system_allocator@twice-large-new-busy:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.25] s
* igt@xe_exec_system_allocator@twice-large-new-busy-nomemset:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.08] s
* igt@xe_exec_system_allocator@twice-large-new-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.15] s
* igt@xe_exec_system_allocator@twice-large-new-race:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.21] s
* igt@xe_exec_system_allocator@twice-large-new-race-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.08] s
* igt@xe_exec_system_allocator@twice-malloc:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.04] s
* igt@xe_exec_system_allocator@twice-malloc-bo-unmap:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.04] s
* igt@xe_exec_system_allocator@twice-malloc-bo-unmap-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.04] s
* igt@xe_exec_system_allocator@twice-malloc-busy:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.03] s
* igt@xe_exec_system_allocator@twice-malloc-busy-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.03] s
* igt@xe_exec_system_allocator@twice-malloc-fork-read:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.27] s
* igt@xe_exec_system_allocator@twice-malloc-fork-read-after:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.22] s
* igt@xe_exec_system_allocator@twice-malloc-mlock:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.03] s
* igt@xe_exec_system_allocator@twice-malloc-mlock-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.03] s
* igt@xe_exec_system_allocator@twice-malloc-multi-fault:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0] s
* igt@xe_exec_system_allocator@twice-malloc-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.05] s
* igt@xe_exec_system_allocator@twice-malloc-race:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.03] s
* igt@xe_exec_system_allocator@twice-malloc-race-nomemset:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.03] s
* igt@xe_exec_system_allocator@twice-mmap:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.03] s
* igt@xe_exec_system_allocator@twice-mmap-file:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.02] s
* igt@xe_exec_system_allocator@twice-mmap-file-mlock:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.04] s
* igt@xe_exec_system_allocator@twice-mmap-file-mlock-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.05] s
* igt@xe_exec_system_allocator@twice-mmap-file-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.04] s
* igt@xe_exec_system_allocator@twice-mmap-free:
- Statuses : 1 pass(s)
- Exec time: [0.03] s
* igt@xe_exec_system_allocator@twice-mmap-free-huge:
- Statuses : 2 fail(s) 1 skip(s)
- Exec time: [0.0, 0.01] s
* igt@xe_exec_system_allocator@twice-mmap-free-huge-nomemset:
- Statuses : 1 fail(s) 1 skip(s)
- Exec time: [0.0, 0.01] s
* igt@xe_exec_system_allocator@twice-mmap-free-nomemset:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.04] s
* igt@xe_exec_system_allocator@twice-mmap-free-race:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.04] s
* igt@xe_exec_system_allocator@twice-mmap-free-race-nomemset:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.05] s
* igt@xe_exec_system_allocator@twice-mmap-huge:
- Statuses : 2 fail(s) 1 skip(s)
- Exec time: [0.0, 0.01] s
* igt@xe_exec_system_allocator@twice-mmap-huge-nomemset:
- Statuses : 2 fail(s) 1 skip(s)
- Exec time: [0.0, 0.01] s
* igt@xe_exec_system_allocator@twice-mmap-mlock:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.03] s
* igt@xe_exec_system_allocator@twice-mmap-mlock-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.09] s
* igt@xe_exec_system_allocator@twice-mmap-new:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.04] s
* igt@xe_exec_system_allocator@twice-mmap-new-huge:
- Statuses : 2 fail(s) 1 skip(s)
- Exec time: [0.0, 0.01] s
* igt@xe_exec_system_allocator@twice-mmap-new-huge-nomemset:
- Statuses : 2 fail(s) 1 skip(s)
- Exec time: [0.0, 0.01] s
* igt@xe_exec_system_allocator@twice-mmap-new-nomemset:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.04] s
* igt@xe_exec_system_allocator@twice-mmap-new-race:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.04] s
* igt@xe_exec_system_allocator@twice-mmap-new-race-nomemset:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.02] s
* igt@xe_exec_system_allocator@twice-mmap-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.02] s
* igt@xe_exec_system_allocator@twice-mmap-race:
- Statuses : 2 pass(s)
- Exec time: [0.01, 0.04] s
* igt@xe_exec_system_allocator@twice-mmap-race-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.03] s
* igt@xe_exec_system_allocator@twice-mmap-remap:
- Statuses :
- Exec time: [None] s
* igt@xe_exec_system_allocator@twice-mmap-remap-dontunmap:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.04] s
* igt@xe_exec_system_allocator@twice-mmap-remap-dontunmap-eocheck:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.05] s
* igt@xe_exec_system_allocator@twice-mmap-remap-eocheck:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.07] s
* igt@xe_exec_system_allocator@twice-mmap-remap-ro:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.07] s
* igt@xe_exec_system_allocator@twice-mmap-remap-ro-dontunmap:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.05] s
* igt@xe_exec_system_allocator@twice-mmap-remap-ro-dontunmap-eocheck:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.03] s
* igt@xe_exec_system_allocator@twice-mmap-remap-ro-eocheck:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.06] s
* igt@xe_exec_system_allocator@twice-mmap-shared:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.04] s
* igt@xe_exec_system_allocator@twice-mmap-shared-nomemset:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.03] s
* igt@xe_exec_system_allocator@twice-mmap-shared-remap:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.03] s
* igt@xe_exec_system_allocator@twice-mmap-shared-remap-dontunmap:
- Statuses :
- Exec time: [None] s
* igt@xe_exec_system_allocator@twice-mmap-shared-remap-dontunmap-eocheck:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.05] s
* igt@xe_exec_system_allocator@twice-mmap-shared-remap-eocheck:
- Statuses : 1 pass(s) 1 skip(s)
- Exec time: [0.0, 0.07] s
* igt@xe_exec_system_allocator@twice-new:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.05] s
* igt@xe_exec_system_allocator@twice-new-bo-map:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.04] s
* igt@xe_exec_system_allocator@twice-new-bo-map-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.08] s
* igt@xe_exec_system_allocator@twice-new-busy:
- Statuses :
- Exec time: [None] s
* igt@xe_exec_system_allocator@twice-new-busy-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.03] s
* igt@xe_exec_system_allocator@twice-new-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.05] s
* igt@xe_exec_system_allocator@twice-new-race:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.02] s
* igt@xe_exec_system_allocator@twice-new-race-nomemset:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.04] s
* igt@xe_exec_system_allocator@unaligned-alloc:
- Statuses : 2 pass(s) 1 skip(s)
- Exec time: [0.0, 0.23] s
Known issues
------------
Here are the changes found in XEIGTPW_13046_FULL that come from known issues:
### IGT changes ###
#### Issues hit ####
* igt@kms_addfb_basic@addfb25-y-tiled-small-legacy:
- shard-bmg: NOTRUN -> [SKIP][9] ([Intel XE#2233])
[9]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-bmg-3/igt@kms_addfb_basic@addfb25-y-tiled-small-legacy.html
- shard-dg2-set2: NOTRUN -> [SKIP][10] ([Intel XE#623])
[10]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-dg2-436/igt@kms_addfb_basic@addfb25-y-tiled-small-legacy.html
- shard-lnl: NOTRUN -> [SKIP][11] ([Intel XE#1466])
[11]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-lnl-1/igt@kms_addfb_basic@addfb25-y-tiled-small-legacy.html
* igt@kms_addfb_basic@invalid-smem-bo-on-discrete:
- shard-lnl: NOTRUN -> [SKIP][12] ([Intel XE#3157])
[12]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-lnl-3/igt@kms_addfb_basic@invalid-smem-bo-on-discrete.html
* igt@kms_async_flips@async-flip-with-page-flip-events@pipe-a-edp-1-linear:
- shard-lnl: [PASS][13] -> [FAIL][14] ([Intel XE#911]) +3 other tests fail
[13]: https://intel-gfx-ci.01.org/tree/intel-xe/IGT_8339/shard-lnl-1/igt@kms_async_flips@async-flip-with-page-flip-events@pipe-a-edp-1-linear.html
[14]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-lnl-8/igt@kms_async_flips@async-flip-with-page-flip-events@pipe-a-edp-1-linear.html
* igt@kms_atomic_transition@plane-all-modeset-transition-fencing:
- shard-lnl: NOTRUN -> [SKIP][15] ([Intel XE#3279])
[15]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-lnl-7/igt@kms_atomic_transition@plane-all-modeset-transition-fencing.html
* igt@kms_big_fb@4-tiled-max-hw-stride-32bpp-rotate-0-hflip-async-flip:
- shard-lnl: NOTRUN -> [SKIP][16] ([Intel XE#3658])
[16]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-lnl-1/igt@kms_big_fb@4-tiled-max-hw-stride-32bpp-rotate-0-hflip-async-flip.html
* igt@kms_big_fb@linear-8bpp-rotate-90:
- shard-bmg: NOTRUN -> [SKIP][17] ([Intel XE#2327])
[17]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-bmg-2/igt@kms_big_fb@linear-8bpp-rotate-90.html
* igt@kms_big_fb@x-tiled-64bpp-rotate-90:
- shard-dg2-set2: NOTRUN -> [SKIP][18] ([Intel XE#316]) +1 other test skip
[18]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-dg2-433/igt@kms_big_fb@x-tiled-64bpp-rotate-90.html
- shard-lnl: NOTRUN -> [SKIP][19] ([Intel XE#1407]) +3 other tests skip
[19]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-lnl-3/igt@kms_big_fb@x-tiled-64bpp-rotate-90.html
* igt@kms_big_fb@y-tiled-8bpp-rotate-90:
- shard-bmg: NOTRUN -> [SKIP][20] ([Intel XE#1124])
[20]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-bmg-3/igt@kms_big_fb@y-tiled-8bpp-rotate-90.html
* igt@kms_big_fb@yf-tiled-64bpp-rotate-90:
- shard-dg2-set2: NOTRUN -> [SKIP][21] ([Intel XE#1124]) +3 other tests skip
[21]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-dg2-466/igt@kms_big_fb@yf-tiled-64bpp-rotate-90.html
* igt@kms_big_fb@yf-tiled-max-hw-stride-64bpp-rotate-180-hflip:
- shard-lnl: NOTRUN -> [SKIP][22] ([Intel XE#1124]) +4 other tests skip
[22]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-lnl-8/igt@kms_big_fb@yf-tiled-max-hw-stride-64bpp-rotate-180-hflip.html
* igt@kms_bw@connected-linear-tiling-3-displays-2160x1440p:
- shard-bmg: NOTRUN -> [SKIP][23] ([Intel XE#2314] / [Intel XE#2894]) +1 other test skip
[23]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-bmg-5/igt@kms_bw@connected-linear-tiling-3-displays-2160x1440p.html
- shard-dg2-set2: NOTRUN -> [SKIP][24] ([Intel XE#2191])
[24]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-dg2-432/igt@kms_bw@connected-linear-tiling-3-displays-2160x1440p.html
* igt@kms_bw@connected-linear-tiling-4-displays-3840x2160p:
- shard-lnl: NOTRUN -> [SKIP][25] ([Intel XE#1512])
[25]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-lnl-3/igt@kms_bw@connected-linear-tiling-4-displays-3840x2160p.html
* igt@kms_bw@linear-tiling-2-displays-2560x1440p:
- shard-bmg: NOTRUN -> [SKIP][26] ([Intel XE#367]) +1 other test skip
[26]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-bmg-2/igt@kms_bw@linear-tiling-2-displays-2560x1440p.html
* igt@kms_bw@linear-tiling-3-displays-2160x1440p:
- shard-dg2-set2: NOTRUN -> [SKIP][27] ([Intel XE#367]) +1 other test skip
[27]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-dg2-466/igt@kms_bw@linear-tiling-3-displays-2160x1440p.html
* igt@kms_bw@linear-tiling-3-displays-2560x1440p:
- shard-lnl: NOTRUN -> [SKIP][28] ([Intel XE#367]) +1 other test skip
[28]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-lnl-3/igt@kms_bw@linear-tiling-3-displays-2560x1440p.html
* igt@kms_ccs@bad-rotation-90-4-tiled-dg2-rc-ccs:
- shard-lnl: NOTRUN -> [SKIP][29] ([Intel XE#2887]) +8 other tests skip
[29]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-lnl-3/igt@kms_ccs@bad-rotation-90-4-tiled-dg2-rc-ccs.html
* igt@kms_ccs@ccs-on-another-bo-y-tiled-gen12-rc-ccs-cc@pipe-b-dp-2:
- shard-dg2-set2: NOTRUN -> [SKIP][30] ([Intel XE#787]) +195 other tests skip
[30]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-dg2-432/igt@kms_ccs@ccs-on-another-bo-y-tiled-gen12-rc-ccs-cc@pipe-b-dp-2.html
* igt@kms_ccs@crc-primary-suspend-4-tiled-mtl-mc-ccs:
- shard-lnl: NOTRUN -> [SKIP][31] ([Intel XE#3432])
[31]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-lnl-6/igt@kms_ccs@crc-primary-suspend-4-tiled-mtl-mc-ccs.html
* igt@kms_ccs@crc-sprite-planes-basic-4-tiled-mtl-rc-ccs@pipe-d-dp-2:
- shard-dg2-set2: NOTRUN -> [SKIP][32] ([Intel XE#455] / [Intel XE#787]) +34 other tests skip
[32]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-dg2-432/igt@kms_ccs@crc-sprite-planes-basic-4-tiled-mtl-rc-ccs@pipe-d-dp-2.html
* igt@kms_ccs@crc-sprite-planes-basic-yf-tiled-ccs:
- shard-bmg: NOTRUN -> [SKIP][33] ([Intel XE#2887]) +5 other tests skip
[33]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-bmg-8/igt@kms_ccs@crc-sprite-planes-basic-yf-tiled-ccs.html
* igt@kms_ccs@random-ccs-data-4-tiled-dg2-rc-ccs-cc@pipe-d-dp-4:
- shard-dg2-set2: NOTRUN -> [INCOMPLETE][34] ([Intel XE#1727] / [Intel XE#2705] / [Intel XE#3113] / [Intel XE#4212] / [Intel XE#4522])
[34]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-dg2-435/igt@kms_ccs@random-ccs-data-4-tiled-dg2-rc-ccs-cc@pipe-d-dp-4.html
* igt@kms_ccs@random-ccs-data-4-tiled-lnl-ccs:
- shard-dg2-set2: NOTRUN -> [SKIP][35] ([Intel XE#2907])
[35]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-dg2-432/igt@kms_ccs@random-ccs-data-4-tiled-lnl-ccs.html
* igt@kms_ccs@random-ccs-data-4-tiled-lnl-ccs@pipe-d-hdmi-a-3:
- shard-bmg: NOTRUN -> [SKIP][36] ([Intel XE#2652] / [Intel XE#787]) +8 other tests skip
[36]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-bmg-5/igt@kms_ccs@random-ccs-data-4-tiled-lnl-ccs@pipe-d-hdmi-a-3.html
* igt@kms_chamelium_color@degamma:
- shard-dg2-set2: NOTRUN -> [SKIP][37] ([Intel XE#306]) +1 other test skip
[37]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-dg2-433/igt@kms_chamelium_color@degamma.html
* igt@kms_chamelium_color@gamma:
- shard-lnl: NOTRUN -> [SKIP][38] ([Intel XE#306]) +1 other test skip
[38]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-lnl-4/igt@kms_chamelium_color@gamma.html
* igt@kms_chamelium_edid@hdmi-edid-change-during-hibernate:
- shard-bmg: NOTRUN -> [SKIP][39] ([Intel XE#2252]) +3 other tests skip
[39]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-bmg-4/igt@kms_chamelium_edid@hdmi-edid-change-during-hibernate.html
* igt@kms_chamelium_frames@hdmi-crc-nonplanar-formats:
- shard-dg2-set2: NOTRUN -> [SKIP][40] ([Intel XE#373]) +3 other tests skip
[40]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-dg2-464/igt@kms_chamelium_frames@hdmi-crc-nonplanar-formats.html
- shard-lnl: NOTRUN -> [SKIP][41] ([Intel XE#373]) +3 other tests skip
[41]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-lnl-6/igt@kms_chamelium_frames@hdmi-crc-nonplanar-formats.html
* igt@kms_content_protection@atomic:
- shard-bmg: NOTRUN -> [FAIL][42] ([Intel XE#1178]) +1 other test fail
[42]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-bmg-1/igt@kms_content_protection@atomic.html
- shard-dg2-set2: NOTRUN -> [FAIL][43] ([Intel XE#1178]) +1 other test fail
[43]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-dg2-464/igt@kms_content_protection@atomic.html
* igt@kms_content_protection@atomic-dpms:
- shard-bmg: NOTRUN -> [SKIP][44] ([Intel XE#2341])
[44]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-bmg-4/igt@kms_content_protection@atomic-dpms.html
* igt@kms_content_protection@dp-mst-type-0:
- shard-dg2-set2: NOTRUN -> [SKIP][45] ([Intel XE#307])
[45]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-dg2-435/igt@kms_content_protection@dp-mst-type-0.html
* igt@kms_content_protection@lic-type-0:
- shard-lnl: NOTRUN -> [SKIP][46] ([Intel XE#3278])
[46]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-lnl-2/igt@kms_content_protection@lic-type-0.html
* igt@kms_cursor_crc@cursor-offscreen-512x170:
- shard-lnl: NOTRUN -> [SKIP][47] ([Intel XE#2321])
[47]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-lnl-1/igt@kms_cursor_crc@cursor-offscreen-512x170.html
* igt@kms_cursor_crc@cursor-onscreen-256x85:
- shard-lnl: NOTRUN -> [SKIP][48] ([Intel XE#1424]) +1 other test skip
[48]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-lnl-1/igt@kms_cursor_crc@cursor-onscreen-256x85.html
* igt@kms_cursor_crc@cursor-rapid-movement-32x10:
- shard-bmg: NOTRUN -> [SKIP][49] ([Intel XE#2320])
[49]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-bmg-5/igt@kms_cursor_crc@cursor-rapid-movement-32x10.html
* igt@kms_cursor_crc@cursor-rapid-movement-512x170:
- shard-dg2-set2: NOTRUN -> [SKIP][50] ([Intel XE#308])
[50]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-dg2-466/igt@kms_cursor_crc@cursor-rapid-movement-512x170.html
- shard-bmg: NOTRUN -> [SKIP][51] ([Intel XE#2321])
[51]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-bmg-4/igt@kms_cursor_crc@cursor-rapid-movement-512x170.html
* igt@kms_cursor_legacy@basic-busy-flip-before-cursor-varying-size:
- shard-lnl: NOTRUN -> [SKIP][52] ([Intel XE#323])
[52]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-lnl-6/igt@kms_cursor_legacy@basic-busy-flip-before-cursor-varying-size.html
* igt@kms_cursor_legacy@cursora-vs-flipb-atomic-transitions-varying-size:
- shard-bmg: [PASS][53] -> [SKIP][54] ([Intel XE#2291]) +4 other tests skip
[53]: https://intel-gfx-ci.01.org/tree/intel-xe/IGT_8339/shard-bmg-1/igt@kms_cursor_legacy@cursora-vs-flipb-atomic-transitions-varying-size.html
[54]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-bmg-5/igt@kms_cursor_legacy@cursora-vs-flipb-atomic-transitions-varying-size.html
* igt@kms_cursor_legacy@cursorb-vs-flipa-atomic-transitions-varying-size:
- shard-lnl: NOTRUN -> [SKIP][55] ([Intel XE#309]) +3 other tests skip
[55]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-lnl-6/igt@kms_cursor_legacy@cursorb-vs-flipa-atomic-transitions-varying-size.html
* igt@kms_cursor_legacy@flip-vs-cursor-atomic:
- shard-bmg: [PASS][56] -> [FAIL][57] ([Intel XE#1475])
[56]: https://intel-gfx-ci.01.org/tree/intel-xe/IGT_8339/shard-bmg-5/igt@kms_cursor_legacy@flip-vs-cursor-atomic.html
[57]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-bmg-1/igt@kms_cursor_legacy@flip-vs-cursor-atomic.html
* igt@kms_dp_linktrain_fallback@dp-fallback:
- shard-bmg: [PASS][58] -> [SKIP][59] ([Intel XE#4294])
[58]: https://intel-gfx-ci.01.org/tree/intel-xe/IGT_8339/shard-bmg-3/igt@kms_dp_linktrain_fallback@dp-fallback.html
[59]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-bmg-5/igt@kms_dp_linktrain_fallback@dp-fallback.html
* igt@kms_dsc@dsc-fractional-bpp:
- shard-bmg: NOTRUN -> [SKIP][60] ([Intel XE#2244])
[60]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-bmg-2/igt@kms_dsc@dsc-fractional-bpp.html
* igt@kms_dsc@dsc-with-output-formats-with-bpc:
- shard-dg2-set2: NOTRUN -> [SKIP][61] ([Intel XE#455]) +5 other tests skip
[61]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-dg2-464/igt@kms_dsc@dsc-with-output-formats-with-bpc.html
- shard-lnl: NOTRUN -> [SKIP][62] ([Intel XE#2244]) +2 other tests skip
[62]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-lnl-3/igt@kms_dsc@dsc-with-output-formats-with-bpc.html
* igt@kms_feature_discovery@chamelium:
- shard-dg2-set2: NOTRUN -> [SKIP][63] ([Intel XE#701])
[63]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-dg2-434/igt@kms_feature_discovery@chamelium.html
* igt@kms_flip@2x-blocking-absolute-wf_vblank-interruptible:
- shard-lnl: NOTRUN -> [SKIP][64] ([Intel XE#1421]) +3 other tests skip
[64]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-lnl-4/igt@kms_flip@2x-blocking-absolute-wf_vblank-interruptible.html
* igt@kms_flip@2x-flip-vs-dpms-on-nop-interruptible:
- shard-bmg: [PASS][65] -> [SKIP][66] ([Intel XE#2316]) +3 other tests skip
[65]: https://intel-gfx-ci.01.org/tree/intel-xe/IGT_8339/shard-bmg-2/igt@kms_flip@2x-flip-vs-dpms-on-nop-interruptible.html
[66]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-bmg-5/igt@kms_flip@2x-flip-vs-dpms-on-nop-interruptible.html
* igt@kms_flip@2x-flip-vs-expired-vblank-interruptible@ab-hdmi-a6-dp4:
- shard-dg2-set2: [PASS][67] -> [FAIL][68] ([Intel XE#301] / [Intel XE#3321]) +1 other test fail
[67]: https://intel-gfx-ci.01.org/tree/intel-xe/IGT_8339/shard-dg2-435/igt@kms_flip@2x-flip-vs-expired-vblank-interruptible@ab-hdmi-a6-dp4.html
[68]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-dg2-434/igt@kms_flip@2x-flip-vs-expired-vblank-interruptible@ab-hdmi-a6-dp4.html
* igt@kms_flip@2x-flip-vs-expired-vblank-interruptible@bd-dp2-hdmi-a3:
- shard-bmg: [PASS][69] -> [FAIL][70] ([Intel XE#3321]) +2 other tests fail
[69]: https://intel-gfx-ci.01.org/tree/intel-xe/IGT_8339/shard-bmg-1/igt@kms_flip@2x-flip-vs-expired-vblank-interruptible@bd-dp2-hdmi-a3.html
[70]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-bmg-7/igt@kms_flip@2x-flip-vs-expired-vblank-interruptible@bd-dp2-hdmi-a3.html
* igt@kms_flip@2x-flip-vs-expired-vblank-interruptible@cd-hdmi-a6-dp4:
- shard-dg2-set2: [PASS][71] -> [FAIL][72] ([Intel XE#301]) +7 other tests fail
[71]: https://intel-gfx-ci.01.org/tree/intel-xe/IGT_8339/shard-dg2-435/igt@kms_flip@2x-flip-vs-expired-vblank-interruptible@cd-hdmi-a6-dp4.html
[72]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-dg2-434/igt@kms_flip@2x-flip-vs-expired-vblank-interruptible@cd-hdmi-a6-dp4.html
* igt@kms_flip@2x-flip-vs-expired-vblank@ad-hdmi-a6-dp4:
- shard-dg2-set2: NOTRUN -> [FAIL][73] ([Intel XE#301])
[73]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-dg2-436/igt@kms_flip@2x-flip-vs-expired-vblank@ad-hdmi-a6-dp4.html
* igt@kms_flip@2x-plain-flip-ts-check-interruptible:
- shard-bmg: NOTRUN -> [SKIP][74] ([Intel XE#2316])
[74]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-bmg-4/igt@kms_flip@2x-plain-flip-ts-check-interruptible.html
* igt@kms_flip@flip-vs-expired-vblank@a-edp1:
- shard-lnl: NOTRUN -> [FAIL][75] ([Intel XE#301]) +1 other test fail
[75]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-lnl-5/igt@kms_flip@flip-vs-expired-vblank@a-edp1.html
* igt@kms_flip@flip-vs-expired-vblank@c-edp1:
- shard-lnl: NOTRUN -> [FAIL][76] ([Intel XE#301] / [Intel XE#3149]) +1 other test fail
[76]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-lnl-5/igt@kms_flip@flip-vs-expired-vblank@c-edp1.html
* igt@kms_flip@plain-flip-fb-recreate-interruptible@a-edp1:
- shard-lnl: [PASS][77] -> [FAIL][78] ([Intel XE#886]) +4 other tests fail
[77]: https://intel-gfx-ci.01.org/tree/intel-xe/IGT_8339/shard-lnl-2/igt@kms_flip@plain-flip-fb-recreate-interruptible@a-edp1.html
[78]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-lnl-5/igt@kms_flip@plain-flip-fb-recreate-interruptible@a-edp1.html
* igt@kms_flip@plain-flip-fb-recreate-interruptible@b-hdmi-a3:
- shard-bmg: [PASS][79] -> [FAIL][80] ([Intel XE#2882]) +2 other tests fail
[79]: https://intel-gfx-ci.01.org/tree/intel-xe/IGT_8339/shard-bmg-3/igt@kms_flip@plain-flip-fb-recreate-interruptible@b-hdmi-a3.html
[80]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-bmg-4/igt@kms_flip@plain-flip-fb-recreate-interruptible@b-hdmi-a3.html
* igt@kms_flip_scaled_crc@flip-32bpp-ytile-to-64bpp-ytile-upscaling:
- shard-bmg: NOTRUN -> [SKIP][81] ([Intel XE#2293] / [Intel XE#2380])
[81]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-bmg-1/igt@kms_flip_scaled_crc@flip-32bpp-ytile-to-64bpp-ytile-upscaling.html
* igt@kms_flip_scaled_crc@flip-32bpp-ytile-to-64bpp-ytile-upscaling@pipe-a-valid-mode:
- shard-bmg: NOTRUN -> [SKIP][82] ([Intel XE#2293])
[82]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-bmg-1/igt@kms_flip_scaled_crc@flip-32bpp-ytile-to-64bpp-ytile-upscaling@pipe-a-valid-mode.html
* igt@kms_flip_scaled_crc@flip-64bpp-yftile-to-16bpp-yftile-upscaling:
- shard-lnl: NOTRUN -> [SKIP][83] ([Intel XE#1401] / [Intel XE#1745]) +3 other tests skip
[83]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-lnl-2/igt@kms_flip_scaled_crc@flip-64bpp-yftile-to-16bpp-yftile-upscaling.html
* igt@kms_flip_scaled_crc@flip-64bpp-yftile-to-16bpp-yftile-upscaling@pipe-a-default-mode:
- shard-lnl: NOTRUN -> [SKIP][84] ([Intel XE#1401]) +3 other tests skip
[84]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-lnl-2/igt@kms_flip_scaled_crc@flip-64bpp-yftile-to-16bpp-yftile-upscaling@pipe-a-default-mode.html
* igt@kms_frontbuffer_tracking@drrs-2p-primscrn-pri-indfb-draw-mmap-wc:
- shard-bmg: NOTRUN -> [SKIP][85] ([Intel XE#2311]) +6 other tests skip
[85]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-bmg-2/igt@kms_frontbuffer_tracking@drrs-2p-primscrn-pri-indfb-draw-mmap-wc.html
* igt@kms_frontbuffer_tracking@drrs-2p-primscrn-spr-indfb-draw-blt:
- shard-bmg: NOTRUN -> [SKIP][86] ([Intel XE#2312]) +2 other tests skip
[86]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-bmg-5/igt@kms_frontbuffer_tracking@drrs-2p-primscrn-spr-indfb-draw-blt.html
* igt@kms_frontbuffer_tracking@fbc-2p-scndscrn-pri-shrfb-draw-mmap-wc:
- shard-bmg: NOTRUN -> [SKIP][87] ([Intel XE#4141]) +2 other tests skip
[87]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-bmg-8/igt@kms_frontbuffer_tracking@fbc-2p-scndscrn-pri-shrfb-draw-mmap-wc.html
* igt@kms_frontbuffer_tracking@fbcdrrs-1p-primscrn-cur-indfb-draw-render:
- shard-dg2-set2: NOTRUN -> [SKIP][88] ([Intel XE#651]) +13 other tests skip
[88]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-dg2-432/igt@kms_frontbuffer_tracking@fbcdrrs-1p-primscrn-cur-indfb-draw-render.html
- shard-lnl: NOTRUN -> [SKIP][89] ([Intel XE#651]) +10 other tests skip
[89]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-lnl-4/igt@kms_frontbuffer_tracking@fbcdrrs-1p-primscrn-cur-indfb-draw-render.html
* igt@kms_frontbuffer_tracking@fbcpsr-2p-primscrn-spr-indfb-draw-render:
- shard-dg2-set2: NOTRUN -> [SKIP][90] ([Intel XE#653]) +13 other tests skip
[90]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-dg2-464/igt@kms_frontbuffer_tracking@fbcpsr-2p-primscrn-spr-indfb-draw-render.html
* igt@kms_frontbuffer_tracking@psr-1p-primscrn-cur-indfb-draw-blt:
- shard-bmg: NOTRUN -> [SKIP][91] ([Intel XE#2313]) +5 other tests skip
[91]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-bmg-8/igt@kms_frontbuffer_tracking@psr-1p-primscrn-cur-indfb-draw-blt.html
* igt@kms_frontbuffer_tracking@psr-2p-primscrn-shrfb-msflip-blt:
- shard-lnl: NOTRUN -> [SKIP][92] ([Intel XE#656]) +23 other tests skip
[92]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-lnl-7/igt@kms_frontbuffer_tracking@psr-2p-primscrn-shrfb-msflip-blt.html
* igt@kms_hdmi_inject@inject-4k:
- shard-lnl: NOTRUN -> [SKIP][93] ([Intel XE#1470])
[93]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-lnl-5/igt@kms_hdmi_inject@inject-4k.html
* igt@kms_hdr@invalid-metadata-sizes:
- shard-lnl: NOTRUN -> [SKIP][94] ([Intel XE#1503])
[94]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-lnl-3/igt@kms_hdr@invalid-metadata-sizes.html
* igt@kms_plane_cursor@primary@pipe-a-hdmi-a-2-size-256:
- shard-dg2-set2: NOTRUN -> [FAIL][95] ([Intel XE#616]) +3 other tests fail
[95]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-dg2-432/igt@kms_plane_cursor@primary@pipe-a-hdmi-a-2-size-256.html
* igt@kms_plane_cursor@viewport:
- shard-dg2-set2: [PASS][96] -> [FAIL][97] ([Intel XE#616])
[96]: https://intel-gfx-ci.01.org/tree/intel-xe/IGT_8339/shard-dg2-432/igt@kms_plane_cursor@viewport.html
[97]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-dg2-464/igt@kms_plane_cursor@viewport.html
* igt@kms_plane_lowres@tiling-x@pipe-b-edp-1:
- shard-lnl: NOTRUN -> [SKIP][98] ([Intel XE#599]) +3 other tests skip
[98]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-lnl-6/igt@kms_plane_lowres@tiling-x@pipe-b-edp-1.html
* igt@kms_plane_multiple@2x-tiling-none:
- shard-bmg: [PASS][99] -> [SKIP][100] ([Intel XE#4596])
[99]: https://intel-gfx-ci.01.org/tree/intel-xe/IGT_8339/shard-bmg-8/igt@kms_plane_multiple@2x-tiling-none.html
[100]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-bmg-5/igt@kms_plane_multiple@2x-tiling-none.html
* igt@kms_plane_multiple@tiling-4:
- shard-dg2-set2: NOTRUN -> [FAIL][101] ([Intel XE#4427])
[101]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-dg2-434/igt@kms_plane_multiple@tiling-4.html
* igt@kms_plane_multiple@tiling-yf:
- shard-lnl: NOTRUN -> [SKIP][102] ([Intel XE#2493])
[102]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-lnl-6/igt@kms_plane_multiple@tiling-yf.html
* igt@kms_plane_scaling@planes-downscale-factor-0-5:
- shard-lnl: NOTRUN -> [SKIP][103] ([Intel XE#2763]) +3 other tests skip
[103]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-lnl-7/igt@kms_plane_scaling@planes-downscale-factor-0-5.html
* igt@kms_plane_scaling@planes-upscale-20x20-downscale-factor-0-25:
- shard-dg2-set2: NOTRUN -> [SKIP][104] ([Intel XE#2763] / [Intel XE#455]) +3 other tests skip
[104]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-dg2-464/igt@kms_plane_scaling@planes-upscale-20x20-downscale-factor-0-25.html
* igt@kms_plane_scaling@planes-upscale-20x20-downscale-factor-0-25@pipe-b:
- shard-dg2-set2: NOTRUN -> [SKIP][105] ([Intel XE#2763]) +2 other tests skip
[105]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-dg2-464/igt@kms_plane_scaling@planes-upscale-20x20-downscale-factor-0-25@pipe-b.html
* igt@kms_pm_rpm@dpms-non-lpsp:
- shard-lnl: NOTRUN -> [SKIP][106] ([Intel XE#1439] / [Intel XE#3141])
[106]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-lnl-3/igt@kms_pm_rpm@dpms-non-lpsp.html
* igt@kms_psr2_sf@fbc-pr-overlay-plane-move-continuous-exceed-sf:
- shard-lnl: NOTRUN -> [SKIP][107] ([Intel XE#2893]) +1 other test skip
[107]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-lnl-7/igt@kms_psr2_sf@fbc-pr-overlay-plane-move-continuous-exceed-sf.html
- shard-bmg: NOTRUN -> [SKIP][108] ([Intel XE#1489])
[108]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-bmg-7/igt@kms_psr2_sf@fbc-pr-overlay-plane-move-continuous-exceed-sf.html
* igt@kms_psr2_sf@fbc-psr2-cursor-plane-move-continuous-sf:
- shard-lnl: NOTRUN -> [SKIP][109] ([Intel XE#2893] / [Intel XE#4608]) +1 other test skip
[109]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-lnl-4/igt@kms_psr2_sf@fbc-psr2-cursor-plane-move-continuous-sf.html
* igt@kms_psr2_sf@fbc-psr2-cursor-plane-move-continuous-sf@pipe-a-edp-1:
- shard-lnl: NOTRUN -> [SKIP][110] ([Intel XE#4608]) +6 other tests skip
[110]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-lnl-4/igt@kms_psr2_sf@fbc-psr2-cursor-plane-move-continuous-sf@pipe-a-edp-1.html
* igt@kms_psr2_sf@psr2-overlay-primary-update-sf-dmg-area:
- shard-dg2-set2: NOTRUN -> [SKIP][111] ([Intel XE#1489]) +3 other tests skip
[111]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-dg2-432/igt@kms_psr2_sf@psr2-overlay-primary-update-sf-dmg-area.html
* igt@kms_psr2_su@page_flip-xrgb8888:
- shard-lnl: NOTRUN -> [SKIP][112] ([Intel XE#1128])
[112]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-lnl-7/igt@kms_psr2_su@page_flip-xrgb8888.html
* igt@kms_psr@fbc-psr-cursor-blt:
- shard-bmg: NOTRUN -> [SKIP][113] ([Intel XE#2234] / [Intel XE#2850]) +5 other tests skip
[113]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-bmg-3/igt@kms_psr@fbc-psr-cursor-blt.html
* igt@kms_psr@fbc-psr2-no-drrs:
- shard-dg2-set2: NOTRUN -> [SKIP][114] ([Intel XE#2850] / [Intel XE#929]) +7 other tests skip
[114]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-dg2-466/igt@kms_psr@fbc-psr2-no-drrs.html
* igt@kms_psr@fbc-psr2-no-drrs@edp-1:
- shard-lnl: NOTRUN -> [SKIP][115] ([Intel XE#4609]) +1 other test skip
[115]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-lnl-6/igt@kms_psr@fbc-psr2-no-drrs@edp-1.html
* igt@kms_psr@fbc-psr2-primary-render:
- shard-lnl: NOTRUN -> [SKIP][116] ([Intel XE#1406]) +5 other tests skip
[116]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-lnl-8/igt@kms_psr@fbc-psr2-primary-render.html
* igt@kms_psr@psr2-primary-render:
- shard-bmg: NOTRUN -> [SKIP][117] ([Intel XE#2234])
[117]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-bmg-8/igt@kms_psr@psr2-primary-render.html
* igt@kms_rotation_crc@bad-pixel-format:
- shard-dg2-set2: NOTRUN -> [SKIP][118] ([Intel XE#3414])
[118]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-dg2-433/igt@kms_rotation_crc@bad-pixel-format.html
* igt@kms_rotation_crc@primary-4-tiled-reflect-x-180:
- shard-lnl: NOTRUN -> [SKIP][119] ([Intel XE#3414] / [Intel XE#3904])
[119]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-lnl-8/igt@kms_rotation_crc@primary-4-tiled-reflect-x-180.html
* igt@kms_rotation_crc@primary-yf-tiled-reflect-x-0:
- shard-lnl: NOTRUN -> [SKIP][120] ([Intel XE#1127])
[120]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-lnl-1/igt@kms_rotation_crc@primary-yf-tiled-reflect-x-0.html
- shard-bmg: NOTRUN -> [SKIP][121] ([Intel XE#2330])
[121]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-bmg-4/igt@kms_rotation_crc@primary-yf-tiled-reflect-x-0.html
- shard-dg2-set2: NOTRUN -> [SKIP][122] ([Intel XE#1127])
[122]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-dg2-466/igt@kms_rotation_crc@primary-yf-tiled-reflect-x-0.html
* igt@kms_setmode@invalid-clone-single-crtc:
- shard-bmg: [PASS][123] -> [SKIP][124] ([Intel XE#1435])
[123]: https://intel-gfx-ci.01.org/tree/intel-xe/IGT_8339/shard-bmg-1/igt@kms_setmode@invalid-clone-single-crtc.html
[124]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-bmg-5/igt@kms_setmode@invalid-clone-single-crtc.html
* igt@kms_tiled_display@basic-test-pattern:
- shard-dg2-set2: NOTRUN -> [FAIL][125] ([Intel XE#1729])
[125]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-dg2-464/igt@kms_tiled_display@basic-test-pattern.html
- shard-lnl: NOTRUN -> [SKIP][126] ([Intel XE#362])
[126]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-lnl-3/igt@kms_tiled_display@basic-test-pattern.html
* igt@kms_writeback@writeback-fb-id:
- shard-dg2-set2: NOTRUN -> [SKIP][127] ([Intel XE#756])
[127]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-dg2-435/igt@kms_writeback@writeback-fb-id.html
- shard-lnl: NOTRUN -> [SKIP][128] ([Intel XE#756])
[128]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-lnl-1/igt@kms_writeback@writeback-fb-id.html
* igt@xe_copy_basic@mem-copy-linear-0x3fff:
- shard-dg2-set2: NOTRUN -> [SKIP][129] ([Intel XE#1123])
[129]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-dg2-432/igt@xe_copy_basic@mem-copy-linear-0x3fff.html
* igt@xe_eudebug@basic-close:
- shard-dg2-set2: NOTRUN -> [SKIP][130] ([Intel XE#4837]) +9 other tests skip
[130]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-dg2-436/igt@xe_eudebug@basic-close.html
* igt@xe_eudebug@basic-vm-bind-extended-discovery:
- shard-lnl: NOTRUN -> [SKIP][131] ([Intel XE#4837]) +7 other tests skip
[131]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-lnl-6/igt@xe_eudebug@basic-vm-bind-extended-discovery.html
* igt@xe_eudebug@discovery-empty:
- shard-bmg: NOTRUN -> [SKIP][132] ([Intel XE#4837]) +6 other tests skip
[132]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-bmg-3/igt@xe_eudebug@discovery-empty.html
* igt@xe_eudebug_sriov@deny-sriov:
- shard-dg2-set2: NOTRUN -> [SKIP][133] ([Intel XE#4518])
[133]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-dg2-432/igt@xe_eudebug_sriov@deny-sriov.html
* igt@xe_evict_ccs@evict-overcommit-standalone-nofree-reopen:
- shard-lnl: NOTRUN -> [SKIP][134] ([Intel XE#688]) +2 other tests skip
[134]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-lnl-2/igt@xe_evict_ccs@evict-overcommit-standalone-nofree-reopen.html
* igt@xe_exec_basic@multigpu-many-execqueues-many-vm-null-defer-bind:
- shard-bmg: NOTRUN -> [SKIP][135] ([Intel XE#2322]) +4 other tests skip
[135]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-bmg-2/igt@xe_exec_basic@multigpu-many-execqueues-many-vm-null-defer-bind.html
* igt@xe_exec_basic@multigpu-many-execqueues-many-vm-rebind:
- shard-dg2-set2: NOTRUN -> [SKIP][136] ([Intel XE#1392]) +1 other test skip
[136]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-dg2-432/igt@xe_exec_basic@multigpu-many-execqueues-many-vm-rebind.html
* igt@xe_exec_basic@multigpu-no-exec-null-defer-bind:
- shard-dg2-set2: [PASS][137] -> [SKIP][138] ([Intel XE#1392]) +2 other tests skip
[137]: https://intel-gfx-ci.01.org/tree/intel-xe/IGT_8339/shard-dg2-435/igt@xe_exec_basic@multigpu-no-exec-null-defer-bind.html
[138]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-dg2-432/igt@xe_exec_basic@multigpu-no-exec-null-defer-bind.html
* igt@xe_exec_basic@multigpu-once-bindexecqueue-userptr-rebind:
- shard-lnl: NOTRUN -> [SKIP][139] ([Intel XE#1392]) +4 other tests skip
[139]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-lnl-7/igt@xe_exec_basic@multigpu-once-bindexecqueue-userptr-rebind.html
* igt@xe_exec_fault_mode@once-rebind-imm:
- shard-dg2-set2: NOTRUN -> [SKIP][140] ([Intel XE#288]) +16 other tests skip
[140]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-dg2-436/igt@xe_exec_fault_mode@once-rebind-imm.html
* igt@xe_exec_mix_modes@exec-spinner-interrupted-dma-fence:
- shard-dg2-set2: NOTRUN -> [SKIP][141] ([Intel XE#2360])
[141]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-dg2-466/igt@xe_exec_mix_modes@exec-spinner-interrupted-dma-fence.html
* igt@xe_gt_freq@freq_suspend:
- shard-lnl: NOTRUN -> [SKIP][142] ([Intel XE#584]) +1 other test skip
[142]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-lnl-2/igt@xe_gt_freq@freq_suspend.html
* igt@xe_media_fill@media-fill:
- shard-lnl: NOTRUN -> [SKIP][143] ([Intel XE#560])
[143]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-lnl-7/igt@xe_media_fill@media-fill.html
* igt@xe_mmap@pci-membarrier-bad-object:
- shard-lnl: NOTRUN -> [SKIP][144] ([Intel XE#4045])
[144]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-lnl-1/igt@xe_mmap@pci-membarrier-bad-object.html
* igt@xe_module_load@force-load:
- shard-lnl: NOTRUN -> [SKIP][145] ([Intel XE#378])
[145]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-lnl-8/igt@xe_module_load@force-load.html
* igt@xe_pat@pat-index-xelp:
- shard-lnl: NOTRUN -> [SKIP][146] ([Intel XE#977])
[146]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-lnl-8/igt@xe_pat@pat-index-xelp.html
* igt@xe_pm@d3cold-mocs:
- shard-dg2-set2: NOTRUN -> [SKIP][147] ([Intel XE#2284])
[147]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-dg2-464/igt@xe_pm@d3cold-mocs.html
* igt@xe_pm@s2idle-d3cold-basic-exec:
- shard-lnl: NOTRUN -> [SKIP][148] ([Intel XE#2284] / [Intel XE#366])
[148]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-lnl-6/igt@xe_pm@s2idle-d3cold-basic-exec.html
* igt@xe_pm@s4-vm-bind-prefetch:
- shard-lnl: [PASS][149] -> [ABORT][150] ([Intel XE#1794])
[149]: https://intel-gfx-ci.01.org/tree/intel-xe/IGT_8339/shard-lnl-3/igt@xe_pm@s4-vm-bind-prefetch.html
[150]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-lnl-2/igt@xe_pm@s4-vm-bind-prefetch.html
* igt@xe_query@multigpu-query-invalid-cs-cycles:
- shard-bmg: NOTRUN -> [SKIP][151] ([Intel XE#944])
[151]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-bmg-1/igt@xe_query@multigpu-query-invalid-cs-cycles.html
* igt@xe_sriov_flr@flr-vf1-clear:
- shard-dg2-set2: NOTRUN -> [SKIP][152] ([Intel XE#3342])
[152]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-dg2-433/igt@xe_sriov_flr@flr-vf1-clear.html
#### Possible fixes ####
* igt@kms_addfb_basic@master-rmfb:
- shard-dg2-set2: [SKIP][153] ([Intel XE#4208] / [i915#2575]) -> [PASS][154]
[153]: https://intel-gfx-ci.01.org/tree/intel-xe/IGT_8339/shard-dg2-432/igt@kms_addfb_basic@master-rmfb.html
[154]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-dg2-433/igt@kms_addfb_basic@master-rmfb.html
* igt@kms_async_flips@async-flip-with-page-flip-events-atomic@pipe-c-edp-1-linear:
- shard-lnl: [FAIL][155] ([Intel XE#3719] / [Intel XE#911]) -> [PASS][156]
[155]: https://intel-gfx-ci.01.org/tree/intel-xe/IGT_8339/shard-lnl-3/igt@kms_async_flips@async-flip-with-page-flip-events-atomic@pipe-c-edp-1-linear.html
[156]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-lnl-7/igt@kms_async_flips@async-flip-with-page-flip-events-atomic@pipe-c-edp-1-linear.html
* igt@kms_bw@connected-linear-tiling-2-displays-2160x1440p:
- shard-bmg: [SKIP][157] ([Intel XE#2314] / [Intel XE#2894]) -> [PASS][158] +1 other test pass
[157]: https://intel-gfx-ci.01.org/tree/intel-xe/IGT_8339/shard-bmg-4/igt@kms_bw@connected-linear-tiling-2-displays-2160x1440p.html
[158]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-bmg-3/igt@kms_bw@connected-linear-tiling-2-displays-2160x1440p.html
* igt@kms_ccs@random-ccs-data-4-tiled-dg2-rc-ccs-cc@pipe-b-hdmi-a-6:
- shard-dg2-set2: [INCOMPLETE][159] ([Intel XE#1727] / [Intel XE#3113] / [Intel XE#4212] / [Intel XE#4522]) -> [PASS][160]
[159]: https://intel-gfx-ci.01.org/tree/intel-xe/IGT_8339/shard-dg2-466/igt@kms_ccs@random-ccs-data-4-tiled-dg2-rc-ccs-cc@pipe-b-hdmi-a-6.html
[160]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-dg2-435/igt@kms_ccs@random-ccs-data-4-tiled-dg2-rc-ccs-cc@pipe-b-hdmi-a-6.html
* igt@kms_cursor_crc@cursor-random-64x64:
- shard-bmg: [SKIP][161] ([Intel XE#2320]) -> [PASS][162] +1 other test pass
[161]: https://intel-gfx-ci.01.org/tree/intel-xe/IGT_8339/shard-bmg-6/igt@kms_cursor_crc@cursor-random-64x64.html
[162]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-bmg-7/igt@kms_cursor_crc@cursor-random-64x64.html
* igt@kms_cursor_legacy@2x-flip-vs-cursor-legacy:
- shard-bmg: [SKIP][163] ([Intel XE#2291]) -> [PASS][164] +1 other test pass
[163]: https://intel-gfx-ci.01.org/tree/intel-xe/IGT_8339/shard-bmg-6/igt@kms_cursor_legacy@2x-flip-vs-cursor-legacy.html
[164]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-bmg-8/igt@kms_cursor_legacy@2x-flip-vs-cursor-legacy.html
* igt@kms_draw_crc@draw-method-blt@rgb565-4tiled:
- shard-bmg: [FAIL][165] -> [PASS][166] +4 other tests pass
[165]: https://intel-gfx-ci.01.org/tree/intel-xe/IGT_8339/shard-bmg-6/igt@kms_draw_crc@draw-method-blt@rgb565-4tiled.html
[166]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-bmg-4/igt@kms_draw_crc@draw-method-blt@rgb565-4tiled.html
* igt@kms_feature_discovery@display-2x:
- shard-bmg: [SKIP][167] ([Intel XE#2373]) -> [PASS][168]
[167]: https://intel-gfx-ci.01.org/tree/intel-xe/IGT_8339/shard-bmg-4/igt@kms_feature_discovery@display-2x.html
[168]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-bmg-8/igt@kms_feature_discovery@display-2x.html
* igt@kms_flip@2x-dpms-vs-vblank-race@cd-dp2-hdmi-a3:
- shard-bmg: [FAIL][169] ([Intel XE#2882]) -> [PASS][170] +9 other tests pass
[169]: https://intel-gfx-ci.01.org/tree/intel-xe/IGT_8339/shard-bmg-6/igt@kms_flip@2x-dpms-vs-vblank-race@cd-dp2-hdmi-a3.html
[170]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-bmg-7/igt@kms_flip@2x-dpms-vs-vblank-race@cd-dp2-hdmi-a3.html
* igt@kms_flip@2x-flip-vs-absolute-wf_vblank-interruptible:
- shard-bmg: [SKIP][171] ([Intel XE#2316]) -> [PASS][172] +5 other tests pass
[171]: https://intel-gfx-ci.01.org/tree/intel-xe/IGT_8339/shard-bmg-4/igt@kms_flip@2x-flip-vs-absolute-wf_vblank-interruptible.html
[172]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-bmg-2/igt@kms_flip@2x-flip-vs-absolute-wf_vblank-interruptible.html
* igt@kms_flip@blocking-absolute-wf_vblank-interruptible:
- shard-bmg: [FAIL][173] ([Intel XE#2882] / [Intel XE#4309]) -> [PASS][174]
[173]: https://intel-gfx-ci.01.org/tree/intel-xe/IGT_8339/shard-bmg-6/igt@kms_flip@blocking-absolute-wf_vblank-interruptible.html
[174]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-bmg-8/igt@kms_flip@blocking-absolute-wf_vblank-interruptible.html
* igt@kms_flip@blocking-absolute-wf_vblank-interruptible@a-dp2:
- shard-bmg: [FAIL][175] ([Intel XE#4309]) -> [PASS][176]
[175]: https://intel-gfx-ci.01.org/tree/intel-xe/IGT_8339/shard-bmg-6/igt@kms_flip@blocking-absolute-wf_vblank-interruptible@a-dp2.html
[176]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-bmg-8/igt@kms_flip@blocking-absolute-wf_vblank-interruptible@a-dp2.html
* igt@kms_flip@flip-vs-expired-vblank-interruptible@b-edp1:
- shard-lnl: [FAIL][177] ([Intel XE#301]) -> [PASS][178]
[177]: https://intel-gfx-ci.01.org/tree/intel-xe/IGT_8339/shard-lnl-6/igt@kms_flip@flip-vs-expired-vblank-interruptible@b-edp1.html
[178]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-lnl-1/igt@kms_flip@flip-vs-expired-vblank-interruptible@b-edp1.html
* igt@kms_flip@flip-vs-expired-vblank@b-hdmi-a6:
- shard-dg2-set2: [FAIL][179] ([Intel XE#301]) -> [PASS][180] +2 other tests pass
[179]: https://intel-gfx-ci.01.org/tree/intel-xe/IGT_8339/shard-dg2-436/igt@kms_flip@flip-vs-expired-vblank@b-hdmi-a6.html
[180]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-dg2-435/igt@kms_flip@flip-vs-expired-vblank@b-hdmi-a6.html
* igt@kms_flip@flip-vs-expired-vblank@c-dp4:
- shard-dg2-set2: [FAIL][181] ([Intel XE#301] / [Intel XE#3321]) -> [PASS][182] +2 other tests pass
[181]: https://intel-gfx-ci.01.org/tree/intel-xe/IGT_8339/shard-dg2-436/igt@kms_flip@flip-vs-expired-vblank@c-dp4.html
[182]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-dg2-435/igt@kms_flip@flip-vs-expired-vblank@c-dp4.html
* igt@kms_hdr@static-swap:
- shard-bmg: [SKIP][183] ([Intel XE#1503]) -> [PASS][184] +1 other test pass
[183]: https://intel-gfx-ci.01.org/tree/intel-xe/IGT_8339/shard-bmg-4/igt@kms_hdr@static-swap.html
[184]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-bmg-3/igt@kms_hdr@static-swap.html
* igt@kms_pipe_stress@stress-xrgb8888-untiled:
- shard-bmg: [FAIL][185] ([Intel XE#4891]) -> [PASS][186]
[185]: https://intel-gfx-ci.01.org/tree/intel-xe/IGT_8339/shard-bmg-6/igt@kms_pipe_stress@stress-xrgb8888-untiled.html
[186]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-bmg-3/igt@kms_pipe_stress@stress-xrgb8888-untiled.html
* igt@kms_psr_stress_test@invalidate-primary-flip-overlay:
- shard-lnl: [SKIP][187] ([Intel XE#4692]) -> [PASS][188]
[187]: https://intel-gfx-ci.01.org/tree/intel-xe/IGT_8339/shard-lnl-7/igt@kms_psr_stress_test@invalidate-primary-flip-overlay.html
[188]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-lnl-1/igt@kms_psr_stress_test@invalidate-primary-flip-overlay.html
* igt@kms_setmode@basic:
- shard-bmg: [FAIL][189] ([Intel XE#4794] / [Intel XE#4807]) -> [PASS][190]
[189]: https://intel-gfx-ci.01.org/tree/intel-xe/IGT_8339/shard-bmg-6/igt@kms_setmode@basic.html
[190]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-bmg-8/igt@kms_setmode@basic.html
* igt@kms_setmode@basic@pipe-a-dp-2:
- shard-bmg: [FAIL][191] ([Intel XE#4807]) -> [PASS][192]
[191]: https://intel-gfx-ci.01.org/tree/intel-xe/IGT_8339/shard-bmg-6/igt@kms_setmode@basic@pipe-a-dp-2.html
[192]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-bmg-8/igt@kms_setmode@basic@pipe-a-dp-2.html
* igt@kms_setmode@basic@pipe-a-hdmi-a-3:
- shard-bmg: [FAIL][193] ([Intel XE#4794]) -> [PASS][194] +4 other tests pass
[193]: https://intel-gfx-ci.01.org/tree/intel-xe/IGT_8339/shard-bmg-6/igt@kms_setmode@basic@pipe-a-hdmi-a-3.html
[194]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-bmg-8/igt@kms_setmode@basic@pipe-a-hdmi-a-3.html
* igt@kms_setmode@basic@pipe-a-hdmi-a-6:
- shard-dg2-set2: [FAIL][195] ([Intel XE#2883]) -> [PASS][196] +3 other tests pass
[195]: https://intel-gfx-ci.01.org/tree/intel-xe/IGT_8339/shard-dg2-434/igt@kms_setmode@basic@pipe-a-hdmi-a-6.html
[196]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-dg2-436/igt@kms_setmode@basic@pipe-a-hdmi-a-6.html
* igt@kms_setmode@clone-exclusive-crtc:
- shard-bmg: [SKIP][197] ([Intel XE#1435]) -> [PASS][198]
[197]: https://intel-gfx-ci.01.org/tree/intel-xe/IGT_8339/shard-bmg-5/igt@kms_setmode@clone-exclusive-crtc.html
[198]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-bmg-3/igt@kms_setmode@clone-exclusive-crtc.html
* igt@kms_vblank@query-busy:
- shard-bmg: [FAIL][199] ([Intel XE#4892]) -> [PASS][200]
[199]: https://intel-gfx-ci.01.org/tree/intel-xe/IGT_8339/shard-bmg-6/igt@kms_vblank@query-busy.html
[200]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-bmg-5/igt@kms_vblank@query-busy.html
* igt@xe_ccs@block-copy-compressed-inc-dimension@xmajor-compressed-compfmt0-system-vram01-241x241:
- shard-bmg: [DMESG-WARN][201] ([Intel XE#3428]) -> [PASS][202] +13 other tests pass
[201]: https://intel-gfx-ci.01.org/tree/intel-xe/IGT_8339/shard-bmg-6/igt@xe_ccs@block-copy-compressed-inc-dimension@xmajor-compressed-compfmt0-system-vram01-241x241.html
[202]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-bmg-8/igt@xe_ccs@block-copy-compressed-inc-dimension@xmajor-compressed-compfmt0-system-vram01-241x241.html
* igt@xe_exec_basic@multigpu-once-bindexecqueue-userptr-invalidate-race:
- shard-dg2-set2: [SKIP][203] ([Intel XE#1392]) -> [PASS][204] +6 other tests pass
[203]: https://intel-gfx-ci.01.org/tree/intel-xe/IGT_8339/shard-dg2-432/igt@xe_exec_basic@multigpu-once-bindexecqueue-userptr-invalidate-race.html
[204]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-dg2-435/igt@xe_exec_basic@multigpu-once-bindexecqueue-userptr-invalidate-race.html
* igt@xe_pm@s4-basic:
- shard-lnl: [ABORT][205] ([Intel XE#1794]) -> [PASS][206] +1 other test pass
[205]: https://intel-gfx-ci.01.org/tree/intel-xe/IGT_8339/shard-lnl-2/igt@xe_pm@s4-basic.html
[206]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-lnl-7/igt@xe_pm@s4-basic.html
* igt@xe_vm@munmap-style-unbind-one-partial:
- shard-dg2-set2: [SKIP][207] ([Intel XE#4208]) -> [PASS][208] +1 other test pass
[207]: https://intel-gfx-ci.01.org/tree/intel-xe/IGT_8339/shard-dg2-432/igt@xe_vm@munmap-style-unbind-one-partial.html
[208]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-dg2-433/igt@xe_vm@munmap-style-unbind-one-partial.html
#### Warnings ####
* igt@kms_flip@2x-flip-vs-rmfb-interruptible:
- shard-bmg: [DMESG-WARN][209] ([Intel XE#3428]) -> [SKIP][210] ([Intel XE#2316])
[209]: https://intel-gfx-ci.01.org/tree/intel-xe/IGT_8339/shard-bmg-6/igt@kms_flip@2x-flip-vs-rmfb-interruptible.html
[210]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-bmg-5/igt@kms_flip@2x-flip-vs-rmfb-interruptible.html
* igt@kms_frontbuffer_tracking@drrs-2p-primscrn-cur-indfb-draw-render:
- shard-bmg: [SKIP][211] ([Intel XE#2311]) -> [SKIP][212] ([Intel XE#2312]) +9 other tests skip
[211]: https://intel-gfx-ci.01.org/tree/intel-xe/IGT_8339/shard-bmg-8/igt@kms_frontbuffer_tracking@drrs-2p-primscrn-cur-indfb-draw-render.html
[212]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-bmg-4/igt@kms_frontbuffer_tracking@drrs-2p-primscrn-cur-indfb-draw-render.html
* igt@kms_frontbuffer_tracking@fbc-2p-primscrn-spr-indfb-fullscreen:
- shard-bmg: [SKIP][213] ([Intel XE#2312]) -> [SKIP][214] ([Intel XE#4141]) +5 other tests skip
[213]: https://intel-gfx-ci.01.org/tree/intel-xe/IGT_8339/shard-bmg-4/igt@kms_frontbuffer_tracking@fbc-2p-primscrn-spr-indfb-fullscreen.html
[214]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-bmg-8/igt@kms_frontbuffer_tracking@fbc-2p-primscrn-spr-indfb-fullscreen.html
* igt@kms_frontbuffer_tracking@fbc-2p-scndscrn-cur-indfb-draw-render:
- shard-bmg: [SKIP][215] ([Intel XE#4141]) -> [SKIP][216] ([Intel XE#2312]) +7 other tests skip
[215]: https://intel-gfx-ci.01.org/tree/intel-xe/IGT_8339/shard-bmg-2/igt@kms_frontbuffer_tracking@fbc-2p-scndscrn-cur-indfb-draw-render.html
[216]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-bmg-5/igt@kms_frontbuffer_tracking@fbc-2p-scndscrn-cur-indfb-draw-render.html
* igt@kms_frontbuffer_tracking@fbcdrrs-2p-scndscrn-cur-indfb-draw-mmap-wc:
- shard-bmg: [SKIP][217] ([Intel XE#2312]) -> [SKIP][218] ([Intel XE#2311]) +17 other tests skip
[217]: https://intel-gfx-ci.01.org/tree/intel-xe/IGT_8339/shard-bmg-4/igt@kms_frontbuffer_tracking@fbcdrrs-2p-scndscrn-cur-indfb-draw-mmap-wc.html
[218]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-bmg-8/igt@kms_frontbuffer_tracking@fbcdrrs-2p-scndscrn-cur-indfb-draw-mmap-wc.html
* igt@kms_frontbuffer_tracking@fbcpsr-2p-primscrn-spr-indfb-draw-blt:
- shard-bmg: [SKIP][219] ([Intel XE#2313]) -> [SKIP][220] ([Intel XE#2312]) +9 other tests skip
[219]: https://intel-gfx-ci.01.org/tree/intel-xe/IGT_8339/shard-bmg-2/igt@kms_frontbuffer_tracking@fbcpsr-2p-primscrn-spr-indfb-draw-blt.html
[220]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-bmg-4/igt@kms_frontbuffer_tracking@fbcpsr-2p-primscrn-spr-indfb-draw-blt.html
* igt@kms_frontbuffer_tracking@psr-2p-primscrn-spr-indfb-fullscreen:
- shard-bmg: [SKIP][221] ([Intel XE#2312]) -> [SKIP][222] ([Intel XE#2313]) +20 other tests skip
[221]: https://intel-gfx-ci.01.org/tree/intel-xe/IGT_8339/shard-bmg-5/igt@kms_frontbuffer_tracking@psr-2p-primscrn-spr-indfb-fullscreen.html
[222]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-bmg-3/igt@kms_frontbuffer_tracking@psr-2p-primscrn-spr-indfb-fullscreen.html
* igt@kms_plane_multiple@2x-tiling-yf:
- shard-bmg: [SKIP][223] ([Intel XE#4596]) -> [SKIP][224] ([Intel XE#2493])
[223]: https://intel-gfx-ci.01.org/tree/intel-xe/IGT_8339/shard-bmg-5/igt@kms_plane_multiple@2x-tiling-yf.html
[224]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-bmg-7/igt@kms_plane_multiple@2x-tiling-yf.html
* igt@xe_module_load@load:
- shard-bmg: ([PASS][225], [PASS][226], [PASS][227], [PASS][228], [PASS][229], [PASS][230], [PASS][231], [PASS][232], [PASS][233], [PASS][234], [PASS][235], [PASS][236], [PASS][237], [PASS][238], [PASS][239], [PASS][240], [PASS][241], [PASS][242], [PASS][243], [PASS][244], [PASS][245], [PASS][246], [PASS][247], [SKIP][248], [PASS][249], [PASS][250]) ([Intel XE#2457]) -> ([PASS][251], [SKIP][252], [PASS][253], [PASS][254], [PASS][255], [DMESG-WARN][256], [DMESG-WARN][257], [PASS][258], [DMESG-WARN][259], [DMESG-WARN][260], [DMESG-WARN][261], [DMESG-WARN][262], [PASS][263], [PASS][264], [PASS][265], [PASS][266], [PASS][267], [PASS][268], [PASS][269], [PASS][270], [PASS][271], [PASS][272], [PASS][273], [PASS][274], [PASS][275], [PASS][276]) ([Intel XE#2457] / [Intel XE#3428])
[225]: https://intel-gfx-ci.01.org/tree/intel-xe/IGT_8339/shard-bmg-3/igt@xe_module_load@load.html
[226]: https://intel-gfx-ci.01.org/tree/intel-xe/IGT_8339/shard-bmg-6/igt@xe_module_load@load.html
[227]: https://intel-gfx-ci.01.org/tree/intel-xe/IGT_8339/shard-bmg-3/igt@xe_module_load@load.html
[228]: https://intel-gfx-ci.01.org/tree/intel-xe/IGT_8339/shard-bmg-3/igt@xe_module_load@load.html
[229]: https://intel-gfx-ci.01.org/tree/intel-xe/IGT_8339/shard-bmg-3/igt@xe_module_load@load.html
[230]: https://intel-gfx-ci.01.org/tree/intel-xe/IGT_8339/shard-bmg-5/igt@xe_module_load@load.html
[231]: https://intel-gfx-ci.01.org/tree/intel-xe/IGT_8339/shard-bmg-5/igt@xe_module_load@load.html
[232]: https://intel-gfx-ci.01.org/tree/intel-xe/IGT_8339/shard-bmg-8/igt@xe_module_load@load.html
[233]: https://intel-gfx-ci.01.org/tree/intel-xe/IGT_8339/shard-bmg-8/igt@xe_module_load@load.html
[234]: https://intel-gfx-ci.01.org/tree/intel-xe/IGT_8339/shard-bmg-5/igt@xe_module_load@load.html
[235]: https://intel-gfx-ci.01.org/tree/intel-xe/IGT_8339/shard-bmg-5/igt@xe_module_load@load.html
[236]: https://intel-gfx-ci.01.org/tree/intel-xe/IGT_8339/shard-bmg-8/igt@xe_module_load@load.html
[237]: https://intel-gfx-ci.01.org/tree/intel-xe/IGT_8339/shard-bmg-6/igt@xe_module_load@load.html
[238]: https://intel-gfx-ci.01.org/tree/intel-xe/IGT_8339/shard-bmg-4/igt@xe_module_load@load.html
[239]: https://intel-gfx-ci.01.org/tree/intel-xe/IGT_8339/shard-bmg-4/igt@xe_module_load@load.html
[240]: https://intel-gfx-ci.01.org/tree/intel-xe/IGT_8339/shard-bmg-2/igt@xe_module_load@load.html
[241]: https://intel-gfx-ci.01.org/tree/intel-xe/IGT_8339/shard-bmg-2/igt@xe_module_load@load.html
[242]: https://intel-gfx-ci.01.org/tree/intel-xe/IGT_8339/shard-bmg-4/igt@xe_module_load@load.html
[243]: https://intel-gfx-ci.01.org/tree/intel-xe/IGT_8339/shard-bmg-4/igt@xe_module_load@load.html
[244]: https://intel-gfx-ci.01.org/tree/intel-xe/IGT_8339/shard-bmg-7/igt@xe_module_load@load.html
[245]: https://intel-gfx-ci.01.org/tree/intel-xe/IGT_8339/shard-bmg-7/igt@xe_module_load@load.html
[246]: https://intel-gfx-ci.01.org/tree/intel-xe/IGT_8339/shard-bmg-7/igt@xe_module_load@load.html
[247]: https://intel-gfx-ci.01.org/tree/intel-xe/IGT_8339/shard-bmg-2/igt@xe_module_load@load.html
[248]: https://intel-gfx-ci.01.org/tree/intel-xe/IGT_8339/shard-bmg-7/igt@xe_module_load@load.html
[249]: https://intel-gfx-ci.01.org/tree/intel-xe/IGT_8339/shard-bmg-1/igt@xe_module_load@load.html
[250]: https://intel-gfx-ci.01.org/tree/intel-xe/IGT_8339/shard-bmg-1/igt@xe_module_load@load.html
[251]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-bmg-1/igt@xe_module_load@load.html
[252]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-bmg-8/igt@xe_module_load@load.html
[253]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-bmg-8/igt@xe_module_load@load.html
[254]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-bmg-8/igt@xe_module_load@load.html
[255]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-bmg-3/igt@xe_module_load@load.html
[256]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-bmg-6/igt@xe_module_load@load.html
[257]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-bmg-6/igt@xe_module_load@load.html
[258]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-bmg-5/igt@xe_module_load@load.html
[259]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-bmg-6/igt@xe_module_load@load.html
[260]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-bmg-6/igt@xe_module_load@load.html
[261]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-bmg-6/igt@xe_module_load@load.html
[262]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-bmg-6/igt@xe_module_load@load.html
[263]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-bmg-5/igt@xe_module_load@load.html
[264]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-bmg-3/igt@xe_module_load@load.html
[265]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-bmg-4/igt@xe_module_load@load.html
[266]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-bmg-2/igt@xe_module_load@load.html
[267]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-bmg-4/igt@xe_module_load@load.html
[268]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-bmg-4/igt@xe_module_load@load.html
[269]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-bmg-2/igt@xe_module_load@load.html
[270]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-bmg-2/igt@xe_module_load@load.html
[271]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-bmg-7/igt@xe_module_load@load.html
[272]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-bmg-7/igt@xe_module_load@load.html
[273]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-bmg-5/igt@xe_module_load@load.html
[274]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-bmg-1/igt@xe_module_load@load.html
[275]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-bmg-1/igt@xe_module_load@load.html
[276]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/shard-bmg-8/igt@xe_module_load@load.html
{name}: This element is suppressed. This means it is ignored when computing
the status of the difference (SUCCESS, WARNING, or FAILURE).
[Intel XE#1123]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/1123
[Intel XE#1124]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/1124
[Intel XE#1127]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/1127
[Intel XE#1128]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/1128
[Intel XE#1178]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/1178
[Intel XE#1392]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/1392
[Intel XE#1401]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/1401
[Intel XE#1406]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/1406
[Intel XE#1407]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/1407
[Intel XE#1421]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/1421
[Intel XE#1424]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/1424
[Intel XE#1435]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/1435
[Intel XE#1439]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/1439
[Intel XE#1466]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/1466
[Intel XE#1470]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/1470
[Intel XE#1475]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/1475
[Intel XE#1489]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/1489
[Intel XE#1503]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/1503
[Intel XE#1512]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/1512
[Intel XE#1727]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/1727
[Intel XE#1729]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/1729
[Intel XE#1745]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/1745
[Intel XE#1794]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/1794
[Intel XE#2191]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/2191
[Intel XE#2233]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/2233
[Intel XE#2234]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/2234
[Intel XE#2244]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/2244
[Intel XE#2252]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/2252
[Intel XE#2284]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/2284
[Intel XE#2291]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/2291
[Intel XE#2293]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/2293
[Intel XE#2311]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/2311
[Intel XE#2312]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/2312
[Intel XE#2313]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/2313
[Intel XE#2314]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/2314
[Intel XE#2316]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/2316
[Intel XE#2320]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/2320
[Intel XE#2321]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/2321
[Intel XE#2322]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/2322
[Intel XE#2327]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/2327
[Intel XE#2330]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/2330
[Intel XE#2341]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/2341
[Intel XE#2360]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/2360
[Intel XE#2373]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/2373
[Intel XE#2380]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/2380
[Intel XE#2457]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/2457
[Intel XE#2493]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/2493
[Intel XE#2652]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/2652
[Intel XE#2705]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/2705
[Intel XE#2763]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/2763
[Intel XE#2850]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/2850
[Intel XE#288]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/288
[Intel XE#2882]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/2882
[Intel XE#2883]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/2883
[Intel XE#2887]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/2887
[Intel XE#2893]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/2893
[Intel XE#2894]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/2894
[Intel XE#2907]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/2907
[Intel XE#301]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/301
[Intel XE#306]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/306
[Intel XE#307]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/307
[Intel XE#308]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/308
[Intel XE#309]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/309
[Intel XE#3113]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/3113
[Intel XE#3141]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/3141
[Intel XE#3149]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/3149
[Intel XE#3157]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/3157
[Intel XE#316]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/316
[Intel XE#323]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/323
[Intel XE#3278]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/3278
[Intel XE#3279]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/3279
[Intel XE#3321]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/3321
[Intel XE#3342]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/3342
[Intel XE#3414]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/3414
[Intel XE#3428]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/3428
[Intel XE#3432]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/3432
[Intel XE#362]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/362
[Intel XE#3658]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/3658
[Intel XE#366]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/366
[Intel XE#367]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/367
[Intel XE#3719]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/3719
[Intel XE#373]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/373
[Intel XE#378]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/378
[Intel XE#3904]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/3904
[Intel XE#4045]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/4045
[Intel XE#4141]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/4141
[Intel XE#4208]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/4208
[Intel XE#4212]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/4212
[Intel XE#4294]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/4294
[Intel XE#4309]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/4309
[Intel XE#4427]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/4427
[Intel XE#4518]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/4518
[Intel XE#4522]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/4522
[Intel XE#455]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/455
[Intel XE#4596]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/4596
[Intel XE#4608]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/4608
[Intel XE#4609]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/4609
[Intel XE#4692]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/4692
[Intel XE#4794]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/4794
[Intel XE#4807]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/4807
[Intel XE#4837]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/4837
[Intel XE#4891]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/4891
[Intel XE#4892]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/4892
[Intel XE#560]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/560
[Intel XE#584]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/584
[Intel XE#599]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/599
[Intel XE#616]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/616
[Intel XE#623]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/623
[Intel XE#651]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/651
[Intel XE#653]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/653
[Intel XE#656]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/656
[Intel XE#688]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/688
[Intel XE#701]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/701
[Intel XE#756]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/756
[Intel XE#787]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/787
[Intel XE#886]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/886
[Intel XE#911]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/911
[Intel XE#929]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/929
[Intel XE#944]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/944
[Intel XE#977]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/977
[i915#2575]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/2575
Build changes
-------------
* IGT: IGT_8339 -> IGTPW_13046
IGTPW_13046: 13046
IGT_8339: 8339
xe-2999-6e67a7af43567bb9f23fe156fde7efa3d214fd20: 6e67a7af43567bb9f23fe156fde7efa3d214fd20
== Logs ==
For more details see: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13046/index.html
[-- Attachment #2: Type: text/html, Size: 347035 bytes --]
^ permalink raw reply [flat|nested] 16+ messages in thread
end of thread, other threads:[~2025-04-26 10:21 UTC | newest]
Thread overview: 16+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2025-04-25 18:20 [PATCH] tests/xe: Add system_allocator test Matthew Brost
2025-04-25 21:03 ` ✓ Xe.CI.BAT: success for tests/xe: Add system_allocator test (rev5) Patchwork
2025-04-25 21:21 ` ✓ i915.CI.BAT: " Patchwork
2025-04-26 6:28 ` ✓ i915.CI.Full: " Patchwork
2025-04-26 10:21 ` ✗ Xe.CI.Full: failure " Patchwork
-- strict thread matches above, loose matches on Subject: below --
2025-04-16 2:20 [PATCH] tests/xe: Add system_allocator test Matthew Brost
2025-04-16 17:09 ` Thomas Hellström
2025-04-16 18:36 ` Matthew Brost
2025-04-18 15:47 ` Francois Dugast
2025-04-18 19:44 ` Matthew Brost
2025-04-24 19:28 ` Francois Dugast
2025-04-24 19:46 ` Matthew Brost
2024-10-16 3:04 Matthew Brost
2024-08-27 23:16 Matthew Brost
2024-08-21 1:41 Matthew Brost
2024-05-21 4:18 [PATCH] tests/xe: Add System Allocator test Matthew Brost
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox