* [igt-dev] [PATCH 0/5] IGT updates for upstreaming prep
@ 2023-05-01 23:04 Matthew Brost
2023-05-01 23:04 ` [igt-dev] [PATCH 1/5] xe: Update to latest uAPI Matthew Brost
` (5 more replies)
0 siblings, 6 replies; 7+ messages in thread
From: Matthew Brost @ 2023-05-01 23:04 UTC (permalink / raw)
To: igt-dev
Main changes:
- Add tests for NULL bindings (VK Sparse)
- Test new GPUVA features (MMAP style binds, avoiding rebinds, doing rebinds)
- Update to VM bind uAPI + error handling flow
KMD series associated with these changes:
https://patchwork.freedesktop.org/series/117156/
Signed-off-by: Matthew Brost <matthew.brost@intel.com>
Matthew Brost (5):
xe: Update to latest uAPI
xe_exec_basic: Add NULL VM bind section
xe_vm: MMAP style VM binds section
xe_vm: Add mmap / munmap sections that split large pages
xe: Update to new VM bind uAPI
include/drm-uapi/xe_drm.h | 106 ++----
lib/intel_batchbuffer.c | 2 +-
lib/xe/xe_compute.c | 2 +-
lib/xe/xe_ioctl.c | 15 +-
lib/xe/xe_ioctl.h | 2 +-
lib/xe/xe_query.c | 2 +-
tests/xe/xe_create.c | 2 +-
tests/xe/xe_evict.c | 23 +-
tests/xe/xe_exec_balancer.c | 6 +-
tests/xe/xe_exec_basic.c | 38 ++-
tests/xe/xe_exec_compute_mode.c | 6 +-
tests/xe/xe_exec_fault_mode.c | 6 +-
tests/xe/xe_exec_reset.c | 8 +-
tests/xe/xe_exec_threads.c | 112 ++----
tests/xe/xe_guc_pc.c | 2 +-
tests/xe/xe_huc_copy.c | 2 +-
tests/xe/xe_intel_bb.c | 2 +-
tests/xe/xe_pm.c | 2 +-
tests/xe/xe_vm.c | 583 ++++++++++++++++++++++----------
tests/xe/xe_waitfence.c | 18 +-
20 files changed, 532 insertions(+), 407 deletions(-)
--
2.34.1
^ permalink raw reply [flat|nested] 7+ messages in thread
* [igt-dev] [PATCH 1/5] xe: Update to latest uAPI
2023-05-01 23:04 [igt-dev] [PATCH 0/5] IGT updates for upstreaming prep Matthew Brost
@ 2023-05-01 23:04 ` Matthew Brost
2023-05-01 23:04 ` [igt-dev] [PATCH 2/5] xe_exec_basic: Add NULL VM bind section Matthew Brost
` (4 subsequent siblings)
5 siblings, 0 replies; 7+ messages in thread
From: Matthew Brost @ 2023-05-01 23:04 UTC (permalink / raw)
To: igt-dev
Includes NULL bindings support.
Signed-off-by: Matthew Brost <matthew.brost@intel.com>
---
include/drm-uapi/xe_drm.h | 27 ++++++++++++++++++---------
1 file changed, 18 insertions(+), 9 deletions(-)
diff --git a/include/drm-uapi/xe_drm.h b/include/drm-uapi/xe_drm.h
index 593b01ba5..27c51946f 100644
--- a/include/drm-uapi/xe_drm.h
+++ b/include/drm-uapi/xe_drm.h
@@ -37,7 +37,7 @@ extern "C" {
*/
/**
- * struct i915_user_extension - Base class for defining a chain of extensions
+ * struct xe_user_extension - Base class for defining a chain of extensions
*
* Many interfaces need to grow over time. In most cases we can simply
* extend the struct and have userspace pass in more data. Another option,
@@ -55,20 +55,20 @@ extern "C" {
*
* .. code-block:: C
*
- * struct i915_user_extension ext3 {
+ * struct xe_user_extension ext3 {
* .next_extension = 0, // end
* .name = ...,
* };
- * struct i915_user_extension ext2 {
+ * struct xe_user_extension ext2 {
* .next_extension = (uintptr_t)&ext3,
* .name = ...,
* };
- * struct i915_user_extension ext1 {
+ * struct xe_user_extension ext1 {
* .next_extension = (uintptr_t)&ext2,
* .name = ...,
* };
*
- * Typically the struct i915_user_extension would be embedded in some uAPI
+ * Typically the struct xe_user_extension would be embedded in some uAPI
* struct, and in this case we would feed it the head of the chain(i.e ext1),
* which would then apply all of the above extensions.
*
@@ -77,7 +77,7 @@ struct xe_user_extension {
/**
* @next_extension:
*
- * Pointer to the next struct i915_user_extension, or zero if the end.
+ * Pointer to the next struct xe_user_extension, or zero if the end.
*/
__u64 next_extension;
/**
@@ -87,7 +87,7 @@ struct xe_user_extension {
*
* Also note that the name space for this is not global for the whole
* driver, but rather its scope/meaning is limited to the specific piece
- * of uAPI which has embedded the struct i915_user_extension.
+ * of uAPI which has embedded the struct xe_user_extension.
*/
__u32 name;
/**
@@ -99,7 +99,7 @@ struct xe_user_extension {
};
/*
- * i915 specific ioctls.
+ * xe specific ioctls.
*
* The device specific ioctl range is [DRM_COMMAND_BASE, DRM_COMMAND_END) ie
* [0x40, 0xa0) (a0 is excluded). The numbers below are defined as offset
@@ -184,7 +184,8 @@ struct drm_xe_query_config {
#define XE_QUERY_CONFIG_VA_BITS 3
#define XE_QUERY_CONFIG_GT_COUNT 4
#define XE_QUERY_CONFIG_MEM_REGION_COUNT 5
-#define XE_QUERY_CONFIG_NUM_PARAM XE_QUERY_CONFIG_MEM_REGION_COUNT + 1
+#define XE_QUERY_CONFIG_MAX_ENGINE_PRIORITY 6
+#define XE_QUERY_CONFIG_NUM_PARAM XE_QUERY_CONFIG_MAX_ENGINE_PRIORITY + 1
__u64 info[];
};
@@ -446,6 +447,14 @@ struct drm_xe_vm_bind_op {
* than differing the MAP to the page fault handler.
*/
#define XE_VM_BIND_FLAG_IMMEDIATE (0x1 << 18)
+ /*
+ * When the NULL flag is set, the page tables are setup with a special
+ * bit which indicates writes are dropped and all reads return zero. The
+ * NULL flags is only valid for XE_VM_BIND_OP_MAP operations, the BO
+ * handle MBZ, and the BO offset MBZ. This flag is intended to implement
+ * VK sparse bindings.
+ */
+#define XE_VM_BIND_FLAG_NULL (0x1 << 19)
/** @reserved: Reserved */
__u64 reserved[2];
--
2.34.1
^ permalink raw reply related [flat|nested] 7+ messages in thread
* [igt-dev] [PATCH 2/5] xe_exec_basic: Add NULL VM bind section
2023-05-01 23:04 [igt-dev] [PATCH 0/5] IGT updates for upstreaming prep Matthew Brost
2023-05-01 23:04 ` [igt-dev] [PATCH 1/5] xe: Update to latest uAPI Matthew Brost
@ 2023-05-01 23:04 ` Matthew Brost
2023-05-01 23:04 ` [igt-dev] [PATCH 3/5] xe_vm: MMAP style VM binds section Matthew Brost
` (3 subsequent siblings)
5 siblings, 0 replies; 7+ messages in thread
From: Matthew Brost @ 2023-05-01 23:04 UTC (permalink / raw)
To: igt-dev
A NULL VM bind results in writes being dropped and reads returning zero,
verify uAPI for NULL VM binds is working as designed.
Signed-off-by: Matthew Brost <matthew.brost@intel.com>
---
tests/xe/xe_exec_basic.c | 33 ++++++++++++++++++++++++++++-----
1 file changed, 28 insertions(+), 5 deletions(-)
diff --git a/tests/xe/xe_exec_basic.c b/tests/xe/xe_exec_basic.c
index 2a176a5b3..f7ad6aeeb 100644
--- a/tests/xe/xe_exec_basic.c
+++ b/tests/xe/xe_exec_basic.c
@@ -27,6 +27,7 @@
#define BIND_ENGINE (0x1 << 4)
#define DEFER_ALLOC (0x1 << 5)
#define DEFER_BIND (0x1 << 6)
+#define SPARSE (0x1 << 7)
/**
* SUBTEST: once-%s
@@ -67,6 +68,10 @@
* @userptr-rebind: userptr rebind
* @userptr-invalidate: userptr invalidate
* @userptr-invalidate-race: userptr invalidate racy
+ * @null: null
+ * @null-defer-mmap: null defer mmap
+ * @null-defer-bind: null defer bind
+ * @null-rebind: null rebind
* @bindengine: bind engine
* @bindengine-userptr: bind engine userptr description
* @bindengine-rebind: bind engine rebind description
@@ -89,6 +94,7 @@ test_exec(int fd, struct drm_xe_engine_class_instance *eci,
.syncs = to_user_pointer(sync),
};
uint64_t addr[MAX_N_ENGINES];
+ uint64_t sparse_addr[MAX_N_ENGINES];
uint32_t vm[MAX_N_ENGINES];
uint32_t engines[MAX_N_ENGINES];
uint32_t bind_engines[MAX_N_ENGINES];
@@ -113,8 +119,11 @@ test_exec(int fd, struct drm_xe_engine_class_instance *eci,
xe_get_default_alignment(fd));
addr[0] = 0x1a0000;
- for (i = 1; i < MAX_N_ENGINES; ++i)
+ sparse_addr[0] = 0x301a0000;
+ for (i = 1; i < MAX_N_ENGINES; ++i) {
addr[i] = addr[i - 1] + (0x1ull << 32);
+ sparse_addr[i] = sparse_addr[i - 1] + (0x1ull << 32);
+ }
if (flags & USERPTR) {
#define MAP_ADDRESS 0x00007fadeadbe000
@@ -163,6 +172,13 @@ test_exec(int fd, struct drm_xe_engine_class_instance *eci,
xe_vm_bind_userptr_async(fd, vm[i], bind_engines[i],
to_user_pointer(data), addr[i],
bo_size, sync, 1);
+ if (flags & SPARSE)
+ __xe_vm_bind_assert(fd, vm[i], bind_engines[i],
+ 0, 0, sparse_addr[i], bo_size,
+ XE_VM_BIND_OP_MAP |
+ XE_VM_BIND_FLAG_ASYNC |
+ XE_VM_BIND_FLAG_NULL, sync,
+ 1, 0, 0);
}
if (flags & DEFER_BIND)
@@ -174,7 +190,8 @@ test_exec(int fd, struct drm_xe_engine_class_instance *eci,
uint64_t batch_offset = (char *)&data[i].batch - (char *)data;
uint64_t batch_addr = __addr + batch_offset;
uint64_t sdi_offset = (char *)&data[i].data - (char *)data;
- uint64_t sdi_addr = __addr + sdi_offset;
+ uint64_t sdi_addr = (flags & SPARSE ? sparse_addr[i % n_vm] :
+ __addr)+ sdi_offset;
int e = i % n_engines;
b = 0;
@@ -261,9 +278,11 @@ test_exec(int fd, struct drm_xe_engine_class_instance *eci,
INT64_MAX, 0, NULL));
}
- for (i = (flags & INVALIDATE && n_execs) ? n_execs - 1 : 0;
- i < n_execs; i++)
- igt_assert_eq(data[i].data, 0xc0ffee);
+ if (!(flags & SPARSE)) {
+ for (i = (flags & INVALIDATE && n_execs) ? n_execs - 1 : 0;
+ i < n_execs; i++)
+ igt_assert_eq(data[i].data, 0xc0ffee);
+ }
for (i = 0; i < n_engines; i++) {
syncobj_destroy(fd, syncobjs[i]);
@@ -296,6 +315,10 @@ igt_main
{ "basic-defer-bind", DEFER_ALLOC | DEFER_BIND },
{ "userptr", USERPTR },
{ "rebind", REBIND },
+ { "null", SPARSE },
+ { "null-defer-mmap", SPARSE | DEFER_ALLOC },
+ { "null-defer-bind", SPARSE | DEFER_ALLOC | DEFER_BIND },
+ { "null-rebind", SPARSE | REBIND },
{ "userptr-rebind", USERPTR | REBIND },
{ "userptr-invalidate", USERPTR | INVALIDATE },
{ "userptr-invalidate-race", USERPTR | INVALIDATE | RACE },
--
2.34.1
^ permalink raw reply related [flat|nested] 7+ messages in thread
* [igt-dev] [PATCH 3/5] xe_vm: MMAP style VM binds section
2023-05-01 23:04 [igt-dev] [PATCH 0/5] IGT updates for upstreaming prep Matthew Brost
2023-05-01 23:04 ` [igt-dev] [PATCH 1/5] xe: Update to latest uAPI Matthew Brost
2023-05-01 23:04 ` [igt-dev] [PATCH 2/5] xe_exec_basic: Add NULL VM bind section Matthew Brost
@ 2023-05-01 23:04 ` Matthew Brost
2023-05-01 23:04 ` [igt-dev] [PATCH 4/5] xe_vm: Add mmap / munmap sections that split large pages Matthew Brost
` (2 subsequent siblings)
5 siblings, 0 replies; 7+ messages in thread
From: Matthew Brost @ 2023-05-01 23:04 UTC (permalink / raw)
To: igt-dev
GPUVA added support for this let's test it.
Signed-off-by: Matthew Brost <matthew.brost@intel.com>
---
tests/xe/xe_vm.c | 350 ++++++++++++++++++++++++++++++++++++++++++-----
1 file changed, 314 insertions(+), 36 deletions(-)
diff --git a/tests/xe/xe_vm.c b/tests/xe/xe_vm.c
index d4cec104e..038a80600 100644
--- a/tests/xe/xe_vm.c
+++ b/tests/xe/xe_vm.c
@@ -1233,9 +1233,9 @@ static void *hammer_thread(void *tdata)
return NULL;
}
-#define MUNMAP_FLAG_USERPTR (0x1 << 0)
-#define MUNMAP_FLAG_INVALIDATE (0x1 << 1)
-#define MUNMAP_FLAG_HAMMER_FIRST_PAGE (0x1 << 2)
+#define MAP_FLAG_USERPTR (0x1 << 0)
+#define MAP_FLAG_INVALIDATE (0x1 << 1)
+#define MAP_FLAG_HAMMER_FIRST_PAGE (0x1 << 2)
/**
@@ -1327,7 +1327,7 @@ test_munmap_style_unbind(int fd, struct drm_xe_engine_class_instance *eci,
vm = xe_vm_create(fd, DRM_XE_VM_CREATE_ASYNC_BIND_OPS, 0);
bo_size = page_size * bo_n_pages;
- if (flags & MUNMAP_FLAG_USERPTR) {
+ if (flags & MAP_FLAG_USERPTR) {
map = mmap(from_user_pointer(addr), bo_size, PROT_READ |
PROT_WRITE, MAP_SHARED | MAP_FIXED |
MAP_ANONYMOUS, -1, 0);
@@ -1346,7 +1346,7 @@ test_munmap_style_unbind(int fd, struct drm_xe_engine_class_instance *eci,
/* Do initial binds */
bind_size = (page_size * bo_n_pages) / n_binds;
for (i = 0; i < n_binds; ++i) {
- if (flags & MUNMAP_FLAG_USERPTR)
+ if (flags & MAP_FLAG_USERPTR)
xe_vm_bind_userptr_async(fd, vm, 0, addr, addr,
bind_size, sync, 1);
else
@@ -1361,7 +1361,7 @@ test_munmap_style_unbind(int fd, struct drm_xe_engine_class_instance *eci,
* cause a fault if a rebind occurs during munmap style VM unbind
* (partial VMAs unbound).
*/
- if (flags & MUNMAP_FLAG_HAMMER_FIRST_PAGE) {
+ if (flags & MAP_FLAG_HAMMER_FIRST_PAGE) {
t.fd = fd;
t.vm = vm;
#define PAGE_SIZE 4096
@@ -1420,7 +1420,7 @@ test_munmap_style_unbind(int fd, struct drm_xe_engine_class_instance *eci,
data = map + i * page_size;
igt_assert_eq(data->data, 0xc0ffee);
}
- if (flags & MUNMAP_FLAG_HAMMER_FIRST_PAGE) {
+ if (flags & MAP_FLAG_HAMMER_FIRST_PAGE) {
memset(map, 0, PAGE_SIZE / 2);
memset(map + PAGE_SIZE, 0, bo_size - PAGE_SIZE);
} else {
@@ -1470,7 +1470,7 @@ try_again_after_invalidate:
igt_assert_eq(data->data, 0xc0ffee);
}
}
- if (flags & MUNMAP_FLAG_HAMMER_FIRST_PAGE) {
+ if (flags & MAP_FLAG_HAMMER_FIRST_PAGE) {
memset(map, 0, PAGE_SIZE / 2);
memset(map + PAGE_SIZE, 0, bo_size - PAGE_SIZE);
} else {
@@ -1481,7 +1481,7 @@ try_again_after_invalidate:
* The munmap style VM unbind can create new VMAs, make sure those are
* in the bookkeeping for another rebind after a userptr invalidate.
*/
- if (flags & MUNMAP_FLAG_INVALIDATE && !invalidate++) {
+ if (flags & MAP_FLAG_INVALIDATE && !invalidate++) {
map = mmap(from_user_pointer(addr), bo_size, PROT_READ |
PROT_WRITE, MAP_SHARED | MAP_FIXED |
MAP_ANONYMOUS, -1, 0);
@@ -1492,7 +1492,7 @@ try_again_after_invalidate:
/* Confirm unbound region can be rebound */
syncobj_reset(fd, &sync[0].handle, 1);
sync[0].flags |= DRM_XE_SYNC_SIGNAL;
- if (flags & MUNMAP_FLAG_USERPTR)
+ if (flags & MAP_FLAG_USERPTR)
xe_vm_bind_userptr_async(fd, vm, 0,
addr + unbind_n_page_offfset * page_size,
addr + unbind_n_page_offfset * page_size,
@@ -1540,7 +1540,7 @@ try_again_after_invalidate:
igt_assert_eq(data->data, 0xc0ffee);
}
- if (flags & MUNMAP_FLAG_HAMMER_FIRST_PAGE) {
+ if (flags & MAP_FLAG_HAMMER_FIRST_PAGE) {
exit = 1;
pthread_join(t.thread, NULL);
pthread_barrier_destroy(&barrier);
@@ -1555,6 +1555,251 @@ try_again_after_invalidate:
xe_vm_destroy(fd, vm);
}
+/**
+ * SUBTEST: mmap-style-bind-%s
+ * Description: Test mmap style unbind with %arg[1]
+ * Run type: FULL
+ * TODO: change ``'Run type' == FULL`` to a better category
+ *
+ * arg[1]:
+ *
+ * @all: all
+ * @one-partial: one partial
+ * @either-side-partial: either side partial
+ * @either-side-full: either side full
+ * @either-side-partial-hammer: either side partial hammer
+ * @end: end
+ * @front: front
+ * @many-all: many all
+ * @many-either-side-partial: many either side partial
+ * @many-either-side-partial-hammer: many either side partial hammer
+ * @userptr-all: userptr all
+ * @userptr-one-partial: userptr one partial
+ * @userptr-either-side-partial: userptr either side partial
+ * @userptr-either-side-full: userptr either side full
+ */
+
+static void
+test_mmap_style_bind(int fd, struct drm_xe_engine_class_instance *eci,
+ int bo_n_pages, int n_binds, int unbind_n_page_offfset,
+ int unbind_n_pages, unsigned int flags)
+{
+ struct drm_xe_sync sync[2] = {
+ { .flags = DRM_XE_SYNC_SYNCOBJ | DRM_XE_SYNC_SIGNAL, },
+ { .flags = DRM_XE_SYNC_SYNCOBJ | DRM_XE_SYNC_SIGNAL, },
+ };
+ struct drm_xe_exec exec = {
+ .num_batch_buffer = 1,
+ .num_syncs = 2,
+ .syncs = to_user_pointer(sync),
+ };
+ uint64_t addr = 0x1a0000, base_addr = 0x1a0000;
+ uint32_t vm;
+ uint32_t engine;
+ size_t bo_size;
+ uint32_t bo0 = 0, bo1 = 0;
+ uint64_t bind_size;
+ uint64_t page_size = xe_get_default_alignment(fd);
+ struct {
+ uint32_t batch[16];
+ uint64_t pad;
+ uint32_t data;
+ } *data;
+ void *map0, *map1;
+ int i, b;
+ struct thread_data t;
+ pthread_barrier_t barrier;
+ int exit = 0;
+
+ vm = xe_vm_create(fd, DRM_XE_VM_CREATE_ASYNC_BIND_OPS, 0);
+ bo_size = page_size * bo_n_pages;
+
+ if (flags & MAP_FLAG_USERPTR) {
+ map0 = mmap(from_user_pointer(addr), bo_size, PROT_READ |
+ PROT_WRITE, MAP_SHARED | MAP_FIXED |
+ MAP_ANONYMOUS, -1, 0);
+ map1 = mmap(from_user_pointer(addr + bo_size),
+ bo_size, PROT_READ | PROT_WRITE, MAP_SHARED |
+ MAP_FIXED | MAP_ANONYMOUS, -1, 0);
+ igt_assert(map0 != MAP_FAILED);
+ igt_assert(map1 != MAP_FAILED);
+ } else {
+ bo0 = xe_bo_create(fd, eci->gt_id, vm, bo_size);
+ map0 = xe_bo_map(fd, bo0, bo_size);
+ bo1 = xe_bo_create(fd, eci->gt_id, vm, bo_size);
+ map1 = xe_bo_map(fd, bo1, bo_size);
+ }
+ memset(map0, 0, bo_size);
+ memset(map1, 0, bo_size);
+
+ engine = xe_engine_create(fd, vm, eci, 0);
+
+ sync[0].handle = syncobj_create(fd, 0);
+ sync[1].handle = syncobj_create(fd, 0);
+
+ /* Do initial binds */
+ bind_size = (page_size * bo_n_pages) / n_binds;
+ for (i = 0; i < n_binds; ++i) {
+ if (flags & MAP_FLAG_USERPTR)
+ xe_vm_bind_userptr_async(fd, vm, 0, addr, addr,
+ bind_size, sync, 1);
+ else
+ xe_vm_bind_async(fd, vm, 0, bo0, i * bind_size,
+ addr, bind_size, sync, 1);
+ addr += bind_size;
+ }
+ addr = base_addr;
+
+ /*
+ * Kick a thread to write the first page continously to ensure we can't
+ * cause a fault if a rebind occurs during munmap style VM unbind
+ * (partial VMAs unbound).
+ */
+ if (flags & MAP_FLAG_HAMMER_FIRST_PAGE) {
+ t.fd = fd;
+ t.vm = vm;
+#define PAGE_SIZE 4096
+ t.addr = addr + PAGE_SIZE / 2;
+ t.eci = eci;
+ t.exit = &exit;
+ t.map = map0 + PAGE_SIZE / 2;
+ t.barrier = &barrier;
+ pthread_barrier_init(&barrier, NULL, 2);
+ pthread_create(&t.thread, 0, hammer_thread, &t);
+ pthread_barrier_wait(&barrier);
+ }
+
+ /* Verify we can use every page */
+ for (i = 0; i < n_binds; ++i) {
+ uint64_t batch_offset = (char *)&data->batch - (char *)data;
+ uint64_t batch_addr = addr + batch_offset;
+ uint64_t sdi_offset = (char *)&data->data - (char *)data;
+ uint64_t sdi_addr = addr + sdi_offset;
+ data = map0 + i * page_size;
+
+ b = 0;
+ data->batch[b++] = MI_STORE_DWORD_IMM_GEN4;
+ data->batch[b++] = sdi_addr;
+ data->batch[b++] = sdi_addr >> 32;
+ data->batch[b++] = 0xc0ffee;
+ data->batch[b++] = MI_BATCH_BUFFER_END;
+ igt_assert(b <= ARRAY_SIZE(data[i].batch));
+
+ sync[0].flags &= ~DRM_XE_SYNC_SIGNAL;
+ if (i)
+ syncobj_reset(fd, &sync[1].handle, 1);
+ sync[1].flags |= DRM_XE_SYNC_SIGNAL;
+
+ exec.engine_id = engine;
+ exec.address = batch_addr;
+ xe_exec(fd, &exec);
+
+ addr += page_size;
+ }
+ addr = base_addr;
+
+ /* Bind some of the pages to different BO / userptr */
+ syncobj_reset(fd, &sync[0].handle, 1);
+ sync[0].flags |= DRM_XE_SYNC_SIGNAL;
+ sync[1].flags &= ~DRM_XE_SYNC_SIGNAL;
+ if (flags & MAP_FLAG_USERPTR)
+ xe_vm_bind_userptr_async(fd, vm, 0, addr + bo_size +
+ unbind_n_page_offfset * page_size,
+ addr + unbind_n_page_offfset * page_size,
+ unbind_n_pages * page_size, sync, 2);
+ else
+ xe_vm_bind_async(fd, vm, 0, bo1,
+ unbind_n_page_offfset * page_size,
+ addr + unbind_n_page_offfset * page_size,
+ unbind_n_pages * page_size, sync, 2);
+ igt_assert(syncobj_wait(fd, &sync[0].handle, 1, INT64_MAX, 0, NULL));
+ igt_assert(syncobj_wait(fd, &sync[1].handle, 1, INT64_MAX, 0, NULL));
+
+ /* Verify all pages written */
+ for (i = 0; i < n_binds; ++i) {
+ data = map0 + i * page_size;
+ igt_assert_eq(data->data, 0xc0ffee);
+ }
+ if (flags & MAP_FLAG_HAMMER_FIRST_PAGE) {
+ memset(map0, 0, PAGE_SIZE / 2);
+ memset(map0 + PAGE_SIZE, 0, bo_size - PAGE_SIZE);
+ } else {
+ memset(map0, 0, bo_size);
+ memset(map1, 0, bo_size);
+ }
+
+ /* Verify we can use every page */
+ for (i = 0; i < n_binds; ++i) {
+ uint64_t batch_offset = (char *)&data->batch - (char *)data;
+ uint64_t batch_addr = addr + batch_offset;
+ uint64_t sdi_offset = (char *)&data->data - (char *)data;
+ uint64_t sdi_addr = addr + sdi_offset;
+
+ data = map0 + i * page_size;
+ b = 0;
+ data->batch[b++] = MI_STORE_DWORD_IMM_GEN4;
+ data->batch[b++] = sdi_addr;
+ data->batch[b++] = sdi_addr >> 32;
+ data->batch[b++] = 0xc0ffee;
+ data->batch[b++] = MI_BATCH_BUFFER_END;
+ igt_assert(b <= ARRAY_SIZE(data[i].batch));
+
+ data = map1 + i * page_size;
+ b = 0;
+ data->batch[b++] = MI_STORE_DWORD_IMM_GEN4;
+ data->batch[b++] = sdi_addr;
+ data->batch[b++] = sdi_addr >> 32;
+ data->batch[b++] = 0xc0ffee;
+ data->batch[b++] = MI_BATCH_BUFFER_END;
+ igt_assert(b <= ARRAY_SIZE(data[i].batch));
+
+ sync[0].flags &= ~DRM_XE_SYNC_SIGNAL;
+ if (i)
+ syncobj_reset(fd, &sync[1].handle, 1);
+ sync[1].flags |= DRM_XE_SYNC_SIGNAL;
+
+ exec.engine_id = engine;
+ exec.address = batch_addr;
+ xe_exec(fd, &exec);
+
+ addr += page_size;
+ }
+ addr = base_addr;
+
+ igt_assert(syncobj_wait(fd, &sync[0].handle, 1, INT64_MAX, 0, NULL));
+ igt_assert(syncobj_wait(fd, &sync[1].handle, 1, INT64_MAX, 0, NULL));
+
+ /* Verify all pages written */
+ for (i = 0; i < n_binds; ++i) {
+ uint32_t result = 0;
+
+ data = map0 + i * page_size;
+ result |= data->data;
+
+ data = map1 + i * page_size;
+ result |= data->data;
+
+ igt_assert_eq(result, 0xc0ffee);
+ }
+
+ if (flags & MAP_FLAG_HAMMER_FIRST_PAGE) {
+ exit = 1;
+ pthread_join(t.thread, NULL);
+ pthread_barrier_destroy(&barrier);
+ }
+
+ syncobj_destroy(fd, sync[0].handle);
+ syncobj_destroy(fd, sync[1].handle);
+ xe_engine_destroy(fd, engine);
+ munmap(map0, bo_size);
+ munmap(map1, bo_size);
+ if (bo0)
+ gem_close(fd, bo0);
+ if (bo1)
+ gem_close(fd, bo1);
+ xe_vm_destroy(fd, vm);
+}
+
igt_main
{
struct drm_xe_engine_class_instance *hwe, *hwe_non_copy = NULL;
@@ -1567,55 +1812,74 @@ igt_main
int unbind_n_page_offfset;
int unbind_n_pages;
unsigned int flags;
- } sections[] = {
+ } munmap_sections[] = {
{ "all", 4, 2, 0, 4, 0 },
{ "one-partial", 4, 1, 1, 2, 0 },
{ "either-side-partial", 4, 2, 1, 2, 0 },
{ "either-side-partial-hammer", 4, 2, 1, 2,
- MUNMAP_FLAG_HAMMER_FIRST_PAGE },
+ MAP_FLAG_HAMMER_FIRST_PAGE },
{ "either-side-full", 4, 4, 1, 2, 0 },
{ "end", 4, 2, 0, 3, 0 },
{ "front", 4, 2, 1, 3, 0 },
{ "many-all", 4 * 8, 2 * 8, 0 * 8, 4 * 8, 0 },
{ "many-either-side-partial", 4 * 8, 2 * 8, 1, 4 * 8 - 2, 0 },
{ "many-either-side-partial-hammer", 4 * 8, 2 * 8, 1, 4 * 8 - 2,
- MUNMAP_FLAG_HAMMER_FIRST_PAGE },
+ MAP_FLAG_HAMMER_FIRST_PAGE },
{ "many-either-side-full", 4 * 8, 4 * 8, 1 * 8, 2 * 8, 0 },
{ "many-end", 4 * 8, 4, 0 * 8, 3 * 8 + 2, 0 },
{ "many-front", 4 * 8, 4, 1 * 8 - 2, 3 * 8 + 2, 0 },
- { "userptr-all", 4, 2, 0, 4, MUNMAP_FLAG_USERPTR },
- { "userptr-one-partial", 4, 1, 1, 2, MUNMAP_FLAG_USERPTR },
+ { "userptr-all", 4, 2, 0, 4, MAP_FLAG_USERPTR },
+ { "userptr-one-partial", 4, 1, 1, 2, MAP_FLAG_USERPTR },
{ "userptr-either-side-partial", 4, 2, 1, 2,
- MUNMAP_FLAG_USERPTR },
+ MAP_FLAG_USERPTR },
{ "userptr-either-side-full", 4, 4, 1, 2,
- MUNMAP_FLAG_USERPTR },
- { "userptr-end", 4, 2, 0, 3, MUNMAP_FLAG_USERPTR },
- { "userptr-front", 4, 2, 1, 3, MUNMAP_FLAG_USERPTR },
+ MAP_FLAG_USERPTR },
+ { "userptr-end", 4, 2, 0, 3, MAP_FLAG_USERPTR },
+ { "userptr-front", 4, 2, 1, 3, MAP_FLAG_USERPTR },
{ "userptr-many-all", 4 * 8, 2 * 8, 0 * 8, 4 * 8,
- MUNMAP_FLAG_USERPTR },
+ MAP_FLAG_USERPTR },
{ "userptr-many-either-side-full", 4 * 8, 4 * 8, 1 * 8, 2 * 8,
- MUNMAP_FLAG_USERPTR },
+ MAP_FLAG_USERPTR },
{ "userptr-many-end", 4 * 8, 4, 0 * 8, 3 * 8 + 2,
- MUNMAP_FLAG_USERPTR },
+ MAP_FLAG_USERPTR },
{ "userptr-many-front", 4 * 8, 4, 1 * 8 - 2, 3 * 8 + 2,
- MUNMAP_FLAG_USERPTR },
+ MAP_FLAG_USERPTR },
{ "userptr-inval-either-side-full", 4, 4, 1, 2,
- MUNMAP_FLAG_USERPTR | MUNMAP_FLAG_INVALIDATE },
- { "userptr-inval-end", 4, 2, 0, 3, MUNMAP_FLAG_USERPTR |
- MUNMAP_FLAG_INVALIDATE },
- { "userptr-inval-front", 4, 2, 1, 3, MUNMAP_FLAG_USERPTR |
- MUNMAP_FLAG_INVALIDATE },
+ MAP_FLAG_USERPTR | MAP_FLAG_INVALIDATE },
+ { "userptr-inval-end", 4, 2, 0, 3, MAP_FLAG_USERPTR |
+ MAP_FLAG_INVALIDATE },
+ { "userptr-inval-front", 4, 2, 1, 3, MAP_FLAG_USERPTR |
+ MAP_FLAG_INVALIDATE },
{ "userptr-inval-many-all", 4 * 8, 2 * 8, 0 * 8, 4 * 8,
- MUNMAP_FLAG_USERPTR | MUNMAP_FLAG_INVALIDATE },
+ MAP_FLAG_USERPTR | MAP_FLAG_INVALIDATE },
{ "userptr-inval-many-either-side-partial", 4 * 8, 2 * 8, 1,
- 4 * 8 - 2, MUNMAP_FLAG_USERPTR |
- MUNMAP_FLAG_INVALIDATE },
+ 4 * 8 - 2, MAP_FLAG_USERPTR |
+ MAP_FLAG_INVALIDATE },
{ "userptr-inval-many-either-side-full", 4 * 8, 4 * 8, 1 * 8,
- 2 * 8, MUNMAP_FLAG_USERPTR | MUNMAP_FLAG_INVALIDATE },
+ 2 * 8, MAP_FLAG_USERPTR | MAP_FLAG_INVALIDATE },
{ "userptr-inval-many-end", 4 * 8, 4, 0 * 8, 3 * 8 + 2,
- MUNMAP_FLAG_USERPTR | MUNMAP_FLAG_INVALIDATE },
+ MAP_FLAG_USERPTR | MAP_FLAG_INVALIDATE },
{ "userptr-inval-many-front", 4 * 8, 4, 1 * 8 - 2, 3 * 8 + 2,
- MUNMAP_FLAG_USERPTR | MUNMAP_FLAG_INVALIDATE },
+ MAP_FLAG_USERPTR | MAP_FLAG_INVALIDATE },
+ { NULL },
+ };
+ const struct section mmap_sections[] = {
+ { "all", 4, 2, 0, 4, 0 },
+ { "one-partial", 4, 1, 1, 2, 0 },
+ { "either-side-partial", 4, 2, 1, 2, 0 },
+ { "either-side-full", 4, 4, 1, 2, 0 },
+ { "either-side-partial-hammer", 4, 2, 1, 2,
+ MAP_FLAG_HAMMER_FIRST_PAGE },
+ { "end", 4, 2, 0, 3, 0 },
+ { "front", 4, 2, 1, 3, 0 },
+ { "many-all", 4 * 8, 2 * 8, 0 * 8, 4 * 8, 0 },
+ { "many-either-side-partial", 4 * 8, 2 * 8, 1, 4 * 8 - 2, 0 },
+ { "many-either-side-partial-hammer", 4 * 8, 2 * 8, 1, 4 * 8 - 2,
+ MAP_FLAG_HAMMER_FIRST_PAGE },
+ { "userptr-all", 4, 2, 0, 4, MAP_FLAG_USERPTR },
+ { "userptr-one-partial", 4, 1, 1, 2, MAP_FLAG_USERPTR },
+ { "userptr-either-side-partial", 4, 2, 1, 2, MAP_FLAG_USERPTR },
+ { "userptr-either-side-full", 4, 4, 1, 2, MAP_FLAG_USERPTR },
{ NULL },
};
@@ -1823,7 +2087,7 @@ igt_main
break;
}
- for (const struct section *s = sections; s->name; s++) {
+ for (const struct section *s = munmap_sections; s->name; s++) {
igt_subtest_f("munmap-style-unbind-%s", s->name) {
igt_require_f(hwe_non_copy,
"Requires non-copy engine to run\n");
@@ -1837,6 +2101,20 @@ igt_main
}
}
+ for (const struct section *s = mmap_sections; s->name; s++) {
+ igt_subtest_f("mmap-style-bind-%s", s->name) {
+ igt_require_f(hwe_non_copy,
+ "Requires non-copy engine to run\n");
+
+ test_mmap_style_bind(fd, hwe_non_copy,
+ s->bo_n_pages,
+ s->n_binds,
+ s->unbind_n_page_offfset,
+ s->unbind_n_pages,
+ s->flags);
+ }
+ }
+
igt_fixture {
xe_device_put(fd);
close(fd);
--
2.34.1
^ permalink raw reply related [flat|nested] 7+ messages in thread
* [igt-dev] [PATCH 4/5] xe_vm: Add mmap / munmap sections that split large pages
2023-05-01 23:04 [igt-dev] [PATCH 0/5] IGT updates for upstreaming prep Matthew Brost
` (2 preceding siblings ...)
2023-05-01 23:04 ` [igt-dev] [PATCH 3/5] xe_vm: MMAP style VM binds section Matthew Brost
@ 2023-05-01 23:04 ` Matthew Brost
2023-05-01 23:04 ` [igt-dev] [PATCH 5/5] xe: Update to new VM bind uAPI Matthew Brost
2023-05-01 23:10 ` [igt-dev] ✗ Fi.CI.BUILD: failure for IGT updates for upstreaming prep Patchwork
5 siblings, 0 replies; 7+ messages in thread
From: Matthew Brost @ 2023-05-01 23:04 UTC (permalink / raw)
To: igt-dev
Splitting large pages involves using dma-resv slots for ordering, make
sure this works.
Signed-off-by: Matthew Brost <matthew.brost@intel.com>
---
tests/xe/xe_vm.c | 81 +++++++++++++++++++++++++++++++++++-------------
1 file changed, 60 insertions(+), 21 deletions(-)
diff --git a/tests/xe/xe_vm.c b/tests/xe/xe_vm.c
index 038a80600..694f829b3 100644
--- a/tests/xe/xe_vm.c
+++ b/tests/xe/xe_vm.c
@@ -1236,7 +1236,8 @@ static void *hammer_thread(void *tdata)
#define MAP_FLAG_USERPTR (0x1 << 0)
#define MAP_FLAG_INVALIDATE (0x1 << 1)
#define MAP_FLAG_HAMMER_FIRST_PAGE (0x1 << 2)
-
+#define MAP_FLAG_LARGE_PAGE (0x1 << 3)
+#define MAP_FLAG_LARGE_PAGE_NO_SPLIT (0x1 << 4)
/**
* SUBTEST: munmap-style-unbind-%s
@@ -1288,12 +1289,16 @@ static void *hammer_thread(void *tdata)
* userptr inval many either side full
* @userptr-inval-many-end: userptr inval many end
* @userptr-inval-many-front: userptr inval many front
+ * @either-side-partial-large-page-hammer:
+ * either side partial large page hammer
+ * @either-side-partial-split-page-hammer:
+ * either side partial split page hammer
*/
static void
test_munmap_style_unbind(int fd, struct drm_xe_engine_class_instance *eci,
int bo_n_pages, int n_binds,
- int unbind_n_page_offfset, int unbind_n_pages,
+ int unbind_n_page_offset, int unbind_n_pages,
unsigned int flags)
{
struct drm_xe_sync sync[2] = {
@@ -1305,7 +1310,7 @@ test_munmap_style_unbind(int fd, struct drm_xe_engine_class_instance *eci,
.num_syncs = 2,
.syncs = to_user_pointer(sync),
};
- uint64_t addr = 0x1a0000, base_addr = 0x1a0000;
+ uint64_t addr = 0x1a00000, base_addr = 0x1a00000;
uint32_t vm;
uint32_t engine;
size_t bo_size;
@@ -1323,6 +1328,14 @@ test_munmap_style_unbind(int fd, struct drm_xe_engine_class_instance *eci,
struct thread_data t;
pthread_barrier_t barrier;
int exit = 0;
+ int n_page_per_2mb = 0x200000 / xe_get_default_alignment(fd);
+
+ if (flags & MAP_FLAG_LARGE_PAGE) {
+ bo_n_pages *= n_page_per_2mb;
+ unbind_n_pages *= n_page_per_2mb;
+ if (flags & MAP_FLAG_LARGE_PAGE_NO_SPLIT)
+ unbind_n_page_offset *= n_page_per_2mb;
+ }
vm = xe_vm_create(fd, DRM_XE_VM_CREATE_ASYNC_BIND_OPS, 0);
bo_size = page_size * bo_n_pages;
@@ -1409,7 +1422,7 @@ test_munmap_style_unbind(int fd, struct drm_xe_engine_class_instance *eci,
sync[0].flags |= DRM_XE_SYNC_SIGNAL;
sync[1].flags &= ~DRM_XE_SYNC_SIGNAL;
xe_vm_unbind_async(fd, vm, 0, 0,
- addr + unbind_n_page_offfset * page_size,
+ addr + unbind_n_page_offset * page_size,
unbind_n_pages * page_size, sync, 2);
igt_assert(syncobj_wait(fd, &sync[0].handle, 1, INT64_MAX, 0, NULL));
@@ -1438,8 +1451,8 @@ try_again_after_invalidate:
data = map + i * page_size;
addr += page_size;
- if (i < unbind_n_page_offfset ||
- i + 1 > unbind_n_page_offfset + unbind_n_pages) {
+ if (i < unbind_n_page_offset ||
+ i + 1 > unbind_n_page_offset + unbind_n_pages) {
b = 0;
data->batch[b++] = MI_STORE_DWORD_IMM_GEN4;
data->batch[b++] = sdi_addr;
@@ -1464,8 +1477,8 @@ try_again_after_invalidate:
/* Verify all pages still bound written */
for (i = 0; i < n_binds; ++i) {
- if (i < unbind_n_page_offfset ||
- i + 1 > unbind_n_page_offfset + unbind_n_pages) {
+ if (i < unbind_n_page_offset ||
+ i + 1 > unbind_n_page_offset + unbind_n_pages) {
data = map + i * page_size;
igt_assert_eq(data->data, 0xc0ffee);
}
@@ -1494,13 +1507,13 @@ try_again_after_invalidate:
sync[0].flags |= DRM_XE_SYNC_SIGNAL;
if (flags & MAP_FLAG_USERPTR)
xe_vm_bind_userptr_async(fd, vm, 0,
- addr + unbind_n_page_offfset * page_size,
- addr + unbind_n_page_offfset * page_size,
+ addr + unbind_n_page_offset * page_size,
+ addr + unbind_n_page_offset * page_size,
unbind_n_pages * page_size, sync, 1);
else
xe_vm_bind_async(fd, vm, 0, bo,
- unbind_n_page_offfset * page_size,
- addr + unbind_n_page_offfset * page_size,
+ unbind_n_page_offset * page_size,
+ addr + unbind_n_page_offset * page_size,
unbind_n_pages * page_size, sync, 1);
/* Verify we can use every page */
@@ -1577,11 +1590,15 @@ try_again_after_invalidate:
* @userptr-one-partial: userptr one partial
* @userptr-either-side-partial: userptr either side partial
* @userptr-either-side-full: userptr either side full
+ * @either-side-partial-large-page-hammer:
+ * either side partial large page hammer
+ * @either-side-partial-split-page-hammer:
+ * either side partial split page hammer
*/
static void
test_mmap_style_bind(int fd, struct drm_xe_engine_class_instance *eci,
- int bo_n_pages, int n_binds, int unbind_n_page_offfset,
+ int bo_n_pages, int n_binds, int unbind_n_page_offset,
int unbind_n_pages, unsigned int flags)
{
struct drm_xe_sync sync[2] = {
@@ -1593,7 +1610,7 @@ test_mmap_style_bind(int fd, struct drm_xe_engine_class_instance *eci,
.num_syncs = 2,
.syncs = to_user_pointer(sync),
};
- uint64_t addr = 0x1a0000, base_addr = 0x1a0000;
+ uint64_t addr = 0x1a00000, base_addr = 0x1a00000;
uint32_t vm;
uint32_t engine;
size_t bo_size;
@@ -1610,6 +1627,14 @@ test_mmap_style_bind(int fd, struct drm_xe_engine_class_instance *eci,
struct thread_data t;
pthread_barrier_t barrier;
int exit = 0;
+ int n_page_per_2mb = 0x200000 / xe_get_default_alignment(fd);
+
+ if (flags & MAP_FLAG_LARGE_PAGE) {
+ bo_n_pages *= n_page_per_2mb;
+ unbind_n_pages *= n_page_per_2mb;
+ if (flags & MAP_FLAG_LARGE_PAGE_NO_SPLIT)
+ unbind_n_page_offset *= n_page_per_2mb;
+ }
vm = xe_vm_create(fd, DRM_XE_VM_CREATE_ASYNC_BIND_OPS, 0);
bo_size = page_size * bo_n_pages;
@@ -1704,13 +1729,13 @@ test_mmap_style_bind(int fd, struct drm_xe_engine_class_instance *eci,
sync[1].flags &= ~DRM_XE_SYNC_SIGNAL;
if (flags & MAP_FLAG_USERPTR)
xe_vm_bind_userptr_async(fd, vm, 0, addr + bo_size +
- unbind_n_page_offfset * page_size,
- addr + unbind_n_page_offfset * page_size,
+ unbind_n_page_offset * page_size,
+ addr + unbind_n_page_offset * page_size,
unbind_n_pages * page_size, sync, 2);
else
xe_vm_bind_async(fd, vm, 0, bo1,
- unbind_n_page_offfset * page_size,
- addr + unbind_n_page_offfset * page_size,
+ unbind_n_page_offset * page_size,
+ addr + unbind_n_page_offset * page_size,
unbind_n_pages * page_size, sync, 2);
igt_assert(syncobj_wait(fd, &sync[0].handle, 1, INT64_MAX, 0, NULL));
igt_assert(syncobj_wait(fd, &sync[1].handle, 1, INT64_MAX, 0, NULL));
@@ -1809,7 +1834,7 @@ igt_main
const char *name;
int bo_n_pages;
int n_binds;
- int unbind_n_page_offfset;
+ int unbind_n_page_offset;
int unbind_n_pages;
unsigned int flags;
} munmap_sections[] = {
@@ -1818,6 +1843,13 @@ igt_main
{ "either-side-partial", 4, 2, 1, 2, 0 },
{ "either-side-partial-hammer", 4, 2, 1, 2,
MAP_FLAG_HAMMER_FIRST_PAGE },
+ { "either-side-partial-split-page-hammer", 4, 2, 1, 2,
+ MAP_FLAG_HAMMER_FIRST_PAGE |
+ MAP_FLAG_LARGE_PAGE },
+ { "either-side-partial-large-page-hammer", 4, 2, 1, 2,
+ MAP_FLAG_HAMMER_FIRST_PAGE |
+ MAP_FLAG_LARGE_PAGE |
+ MAP_FLAG_LARGE_PAGE_NO_SPLIT },
{ "either-side-full", 4, 4, 1, 2, 0 },
{ "end", 4, 2, 0, 3, 0 },
{ "front", 4, 2, 1, 3, 0 },
@@ -1870,6 +1902,13 @@ igt_main
{ "either-side-full", 4, 4, 1, 2, 0 },
{ "either-side-partial-hammer", 4, 2, 1, 2,
MAP_FLAG_HAMMER_FIRST_PAGE },
+ { "either-side-partial-split-page-hammer", 4, 2, 1, 2,
+ MAP_FLAG_HAMMER_FIRST_PAGE |
+ MAP_FLAG_LARGE_PAGE },
+ { "either-side-partial-large-page-hammer", 4, 2, 1, 2,
+ MAP_FLAG_HAMMER_FIRST_PAGE |
+ MAP_FLAG_LARGE_PAGE |
+ MAP_FLAG_LARGE_PAGE_NO_SPLIT },
{ "end", 4, 2, 0, 3, 0 },
{ "front", 4, 2, 1, 3, 0 },
{ "many-all", 4 * 8, 2 * 8, 0 * 8, 4 * 8, 0 },
@@ -2095,7 +2134,7 @@ igt_main
test_munmap_style_unbind(fd, hwe_non_copy,
s->bo_n_pages,
s->n_binds,
- s->unbind_n_page_offfset,
+ s->unbind_n_page_offset,
s->unbind_n_pages,
s->flags);
}
@@ -2109,7 +2148,7 @@ igt_main
test_mmap_style_bind(fd, hwe_non_copy,
s->bo_n_pages,
s->n_binds,
- s->unbind_n_page_offfset,
+ s->unbind_n_page_offset,
s->unbind_n_pages,
s->flags);
}
--
2.34.1
^ permalink raw reply related [flat|nested] 7+ messages in thread
* [igt-dev] [PATCH 5/5] xe: Update to new VM bind uAPI
2023-05-01 23:04 [igt-dev] [PATCH 0/5] IGT updates for upstreaming prep Matthew Brost
` (3 preceding siblings ...)
2023-05-01 23:04 ` [igt-dev] [PATCH 4/5] xe_vm: Add mmap / munmap sections that split large pages Matthew Brost
@ 2023-05-01 23:04 ` Matthew Brost
2023-05-01 23:10 ` [igt-dev] ✗ Fi.CI.BUILD: failure for IGT updates for upstreaming prep Patchwork
5 siblings, 0 replies; 7+ messages in thread
From: Matthew Brost @ 2023-05-01 23:04 UTC (permalink / raw)
To: igt-dev
Sync vs. async changes and new error handling.
Signed-off-by: Matthew Brost <matthew.brost@intel.com>
---
include/drm-uapi/xe_drm.h | 79 ++-------------
lib/intel_batchbuffer.c | 2 +-
lib/xe/xe_compute.c | 2 +-
lib/xe/xe_ioctl.c | 15 +--
lib/xe/xe_ioctl.h | 2 +-
lib/xe/xe_query.c | 2 +-
tests/xe/xe_create.c | 2 +-
tests/xe/xe_evict.c | 23 +++--
tests/xe/xe_exec_balancer.c | 6 +-
tests/xe/xe_exec_basic.c | 5 +-
tests/xe/xe_exec_compute_mode.c | 6 +-
tests/xe/xe_exec_fault_mode.c | 6 +-
tests/xe/xe_exec_reset.c | 8 +-
tests/xe/xe_exec_threads.c | 112 ++++++---------------
tests/xe/xe_guc_pc.c | 2 +-
tests/xe/xe_huc_copy.c | 2 +-
tests/xe/xe_intel_bb.c | 2 +-
tests/xe/xe_pm.c | 2 +-
tests/xe/xe_vm.c | 168 +++++++-------------------------
tests/xe/xe_waitfence.c | 18 +---
20 files changed, 120 insertions(+), 344 deletions(-)
diff --git a/include/drm-uapi/xe_drm.h b/include/drm-uapi/xe_drm.h
index 27c51946f..cb4debe4e 100644
--- a/include/drm-uapi/xe_drm.h
+++ b/include/drm-uapi/xe_drm.h
@@ -145,10 +145,11 @@ struct drm_xe_engine_class_instance {
#define DRM_XE_ENGINE_CLASS_VIDEO_ENHANCE 3
#define DRM_XE_ENGINE_CLASS_COMPUTE 4
/*
- * Kernel only class (not actual hardware engine class). Used for
+ * Kernel only classes (not actual hardware engine class). Used for
* creating ordered queues of VM bind operations.
*/
-#define DRM_XE_ENGINE_CLASS_VM_BIND 5
+#define DRM_XE_ENGINE_CLASS_VM_BIND_ASYNC 5
+#define DRM_XE_ENGINE_CLASS_VM_BIND_SYNC 6
__u16 engine_instance;
__u16 gt_id;
@@ -312,39 +313,8 @@ struct drm_xe_gem_mmap_offset {
__u64 reserved[2];
};
-/**
- * struct drm_xe_vm_bind_op_error_capture - format of VM bind op error capture
- */
-struct drm_xe_vm_bind_op_error_capture {
- /** @error: errno that occured */
- __s32 error;
- /** @op: operation that encounter an error */
- __u32 op;
- /** @addr: address of bind op */
- __u64 addr;
- /** @size: size of bind */
- __u64 size;
-};
-
-/** struct drm_xe_ext_vm_set_property - VM set property extension */
-struct drm_xe_ext_vm_set_property {
- /** @base: base user extension */
- struct xe_user_extension base;
-
- /** @property: property to set */
-#define XE_VM_PROPERTY_BIND_OP_ERROR_CAPTURE_ADDRESS 0
- __u32 property;
-
- /** @value: property value */
- __u64 value;
-
- /** @reserved: Reserved */
- __u64 reserved[2];
-};
-
struct drm_xe_vm_create {
/** @extensions: Pointer to the first extension struct, if any */
-#define XE_VM_EXTENSION_SET_PROPERTY 0
__u64 extensions;
/** @flags: Flags */
@@ -352,7 +322,7 @@ struct drm_xe_vm_create {
#define DRM_XE_VM_CREATE_SCRATCH_PAGE (0x1 << 0)
#define DRM_XE_VM_CREATE_COMPUTE_MODE (0x1 << 1)
-#define DRM_XE_VM_CREATE_ASYNC_BIND_OPS (0x1 << 2)
+#define DRM_XE_VM_CREATE_ASYNC_DEFAULT (0x1 << 2)
#define DRM_XE_VM_CREATE_FAULT_MODE (0x1 << 3)
/** @vm_id: Returned VM ID */
@@ -417,30 +387,6 @@ struct drm_xe_vm_bind_op {
#define XE_VM_BIND_OP_PREFETCH 0x5
#define XE_VM_BIND_FLAG_READONLY (0x1 << 16)
- /*
- * A bind ops completions are always async, hence the support for out
- * sync. This flag indicates the allocation of the memory for new page
- * tables and the job to program the pages tables is asynchronous
- * relative to the IOCTL. That part of a bind operation can fail under
- * memory pressure, the job in practice can't fail unless the system is
- * totally shot.
- *
- * If this flag is clear and the IOCTL doesn't return an error, in
- * practice the bind op is good and will complete.
- *
- * If this flag is set and doesn't return return an error, the bind op
- * can still fail and recovery is needed. If configured, the bind op that
- * caused the error will be captured in drm_xe_vm_bind_op_error_capture.
- * Once the user sees the error (via a ufence +
- * XE_VM_PROPERTY_BIND_OP_ERROR_CAPTURE_ADDRESS), it should free memory
- * via non-async unbinds, and then restart all queue'd async binds op via
- * XE_VM_BIND_OP_RESTART. Or alternatively the user should destroy the
- * VM.
- *
- * This flag is only allowed when DRM_XE_VM_CREATE_ASYNC_BIND_OPS is
- * configured in the VM and must be set if the VM is configured with
- * DRM_XE_VM_CREATE_ASYNC_BIND_OPS and not in an error state.
- */
#define XE_VM_BIND_FLAG_ASYNC (0x1 << 17)
/*
* Valid on a faulting VM only, do the MAP operation immediately rather
@@ -455,6 +401,7 @@ struct drm_xe_vm_bind_op {
* VK sparse bindings.
*/
#define XE_VM_BIND_FLAG_NULL (0x1 << 19)
+#define XE_VM_BIND_FLAG_RECLAIM (0x1 << 20)
/** @reserved: Reserved */
__u64 reserved[2];
@@ -702,17 +649,10 @@ struct drm_xe_mmio {
struct drm_xe_wait_user_fence {
/** @extensions: Pointer to the first extension struct, if any */
__u64 extensions;
- union {
- /**
- * @addr: user pointer address to wait on, must qword aligned
- */
- __u64 addr;
- /**
- * @vm_id: The ID of the VM which encounter an error used with
- * DRM_XE_UFENCE_WAIT_VM_ERROR. Upper 32 bits must be clear.
- */
- __u64 vm_id;
- };
+ /**
+ * @addr: user pointer address to wait on, must qword aligned
+ */
+ __u64 addr;
/** @op: wait operation (type of comparison) */
#define DRM_XE_UFENCE_WAIT_EQ 0
#define DRM_XE_UFENCE_WAIT_NEQ 1
@@ -724,7 +664,6 @@ struct drm_xe_wait_user_fence {
/** @flags: wait flags */
#define DRM_XE_UFENCE_WAIT_SOFT_OP (1 << 0) /* e.g. Wait on VM bind */
#define DRM_XE_UFENCE_WAIT_ABSTIME (1 << 1)
-#define DRM_XE_UFENCE_WAIT_VM_ERROR (1 << 2)
__u16 flags;
/** @value: compare value */
__u64 value;
diff --git a/lib/intel_batchbuffer.c b/lib/intel_batchbuffer.c
index 545d17054..b4a6db566 100644
--- a/lib/intel_batchbuffer.c
+++ b/lib/intel_batchbuffer.c
@@ -940,7 +940,7 @@ __intel_bb_create(int fd, uint32_t ctx, const intel_ctx_cfg_t *cfg,
ibb->gtt_size = 1ull << xe_va_bits(fd);
if (!ctx)
- ctx = xe_vm_create(fd, DRM_XE_VM_CREATE_ASYNC_BIND_OPS, 0);
+ ctx = xe_vm_create(fd, DRM_XE_VM_CREATE_ASYNC_DEFAULT, 0);
ibb->uses_full_ppgtt = true;
ibb->allocator_handle =
diff --git a/lib/xe/xe_compute.c b/lib/xe/xe_compute.c
index 2a3686a1b..e5a8ffcbb 100644
--- a/lib/xe/xe_compute.c
+++ b/lib/xe/xe_compute.c
@@ -406,7 +406,7 @@ static void tgl_compute_exec(int fd, const unsigned char *kernel,
/* Sets Kernel size */
bo_dict[0].size = ALIGN(size, 0x1000);
- vm = xe_vm_create(fd, DRM_XE_VM_CREATE_ASYNC_BIND_OPS, 0);
+ vm = xe_vm_create(fd, DRM_XE_VM_CREATE_ASYNC_DEFAULT, 0);
engine = xe_engine_create_class(fd, vm, DRM_XE_ENGINE_CLASS_RENDER);
sync.flags = DRM_XE_SYNC_SYNCOBJ | DRM_XE_SYNC_SIGNAL;
sync.handle = syncobj_create(fd, 0);
diff --git a/lib/xe/xe_ioctl.c b/lib/xe/xe_ioctl.c
index 66a8393fe..77e6d2d13 100644
--- a/lib/xe/xe_ioctl.c
+++ b/lib/xe/xe_ioctl.c
@@ -199,16 +199,8 @@ void xe_vm_unbind_async(int fd, uint32_t vm, uint32_t engine,
static void __xe_vm_bind_sync(int fd, uint32_t vm, uint32_t bo, uint64_t offset,
uint64_t addr, uint64_t size, uint32_t op)
{
- struct drm_xe_sync sync = {
- .flags = DRM_XE_SYNC_SYNCOBJ | DRM_XE_SYNC_SIGNAL,
- .handle = syncobj_create(fd, 0),
- };
-
- __xe_vm_bind_assert(fd, vm, 0, bo, offset, addr, size, op, &sync, 1, 0,
+ __xe_vm_bind_assert(fd, vm, 0, bo, offset, addr, size, op, NULL, 0, 0,
0);
-
- igt_assert(syncobj_wait(fd, &sync.handle, 1, INT64_MAX, 0, NULL));
- syncobj_destroy(fd, sync.handle);
}
void xe_vm_bind_sync(int fd, uint32_t vm, uint32_t bo, uint64_t offset,
@@ -260,10 +252,11 @@ uint32_t xe_bo_create(int fd, int gt, uint32_t vm, uint64_t size)
return create.handle;
}
-uint32_t xe_bind_engine_create(int fd, uint32_t vm, uint64_t ext)
+uint32_t xe_bind_engine_create(int fd, uint32_t vm, uint64_t ext, bool async)
{
struct drm_xe_engine_class_instance instance = {
- .engine_class = DRM_XE_ENGINE_CLASS_VM_BIND,
+ .engine_class = async ? DRM_XE_ENGINE_CLASS_VM_BIND_ASYNC :
+ DRM_XE_ENGINE_CLASS_VM_BIND_SYNC,
};
struct drm_xe_engine_create create = {
.extensions = ext,
diff --git a/lib/xe/xe_ioctl.h b/lib/xe/xe_ioctl.h
index 049cd183d..f5d39e81c 100644
--- a/lib/xe/xe_ioctl.h
+++ b/lib/xe/xe_ioctl.h
@@ -69,7 +69,7 @@ uint32_t xe_bo_create(int fd, int gt, uint32_t vm, uint64_t size);
uint32_t xe_engine_create(int fd, uint32_t vm,
struct drm_xe_engine_class_instance *instance,
uint64_t ext);
-uint32_t xe_bind_engine_create(int fd, uint32_t vm, uint64_t ext);
+uint32_t xe_bind_engine_create(int fd, uint32_t vm, uint64_t ext, bool async);
uint32_t xe_engine_create_class(int fd, uint32_t vm, uint16_t class);
void xe_engine_destroy(int fd, uint32_t engine);
uint64_t xe_bo_mmap_offset(int fd, uint32_t bo);
diff --git a/lib/xe/xe_query.c b/lib/xe/xe_query.c
index bd5eb1d18..b3a6c5546 100644
--- a/lib/xe/xe_query.c
+++ b/lib/xe/xe_query.c
@@ -300,7 +300,7 @@ bool xe_supports_faults(int fd)
bool supports_faults;
struct drm_xe_vm_create create = {
- .flags = DRM_XE_VM_CREATE_ASYNC_BIND_OPS |
+ .flags = DRM_XE_VM_CREATE_ASYNC_DEFAULT |
DRM_XE_VM_CREATE_FAULT_MODE,
};
diff --git a/tests/xe/xe_create.c b/tests/xe/xe_create.c
index ae841f809..07744753b 100644
--- a/tests/xe/xe_create.c
+++ b/tests/xe/xe_create.c
@@ -54,7 +54,7 @@ static void create_invalid_size(int fd)
uint32_t handle;
int ret;
- vm = xe_vm_create(fd, DRM_XE_VM_CREATE_ASYNC_BIND_OPS, 0);
+ vm = xe_vm_create(fd, DRM_XE_VM_CREATE_ASYNC_DEFAULT, 0);
xe_for_each_mem_region(fd, memreg, region) {
memregion = xe_mem_region(fd, region);
diff --git a/tests/xe/xe_evict.c b/tests/xe/xe_evict.c
index 5687cce30..d5ddf9003 100644
--- a/tests/xe/xe_evict.c
+++ b/tests/xe/xe_evict.c
@@ -64,15 +64,17 @@ test_evict(int fd, struct drm_xe_engine_class_instance *eci,
fd = drm_open_driver(DRIVER_XE);
xe_device_get(fd);
- vm = xe_vm_create(fd, DRM_XE_VM_CREATE_ASYNC_BIND_OPS, 0);
+ vm = xe_vm_create(fd, DRM_XE_VM_CREATE_ASYNC_DEFAULT, 0);
if (flags & BIND_ENGINE)
- bind_engines[0] = xe_bind_engine_create(fd, vm, 0);
+ bind_engines[0] = xe_bind_engine_create(fd, vm, 0, true);
if (flags & MULTI_VM) {
- vm2 = xe_vm_create(fd, DRM_XE_VM_CREATE_ASYNC_BIND_OPS, 0);
- vm3 = xe_vm_create(fd, DRM_XE_VM_CREATE_ASYNC_BIND_OPS, 0);
+ vm2 = xe_vm_create(fd, DRM_XE_VM_CREATE_ASYNC_DEFAULT, 0);
+ vm3 = xe_vm_create(fd, DRM_XE_VM_CREATE_ASYNC_DEFAULT, 0);
if (flags & BIND_ENGINE) {
- bind_engines[1] = xe_bind_engine_create(fd, vm2, 0);
- bind_engines[2] = xe_bind_engine_create(fd, vm3, 0);
+ bind_engines[1] = xe_bind_engine_create(fd, vm2, 0,
+ true);
+ bind_engines[2] = xe_bind_engine_create(fd, vm3, 0,
+ true);
}
}
@@ -241,15 +243,16 @@ test_evict_cm(int fd, struct drm_xe_engine_class_instance *eci,
fd = drm_open_driver(DRIVER_XE);
xe_device_get(fd);
- vm = xe_vm_create(fd, DRM_XE_VM_CREATE_ASYNC_BIND_OPS |
+ vm = xe_vm_create(fd, DRM_XE_VM_CREATE_ASYNC_DEFAULT |
DRM_XE_VM_CREATE_COMPUTE_MODE, 0);
if (flags & BIND_ENGINE)
- bind_engines[0] = xe_bind_engine_create(fd, vm, 0);
+ bind_engines[0] = xe_bind_engine_create(fd, vm, 0, true);
if (flags & MULTI_VM) {
- vm2 = xe_vm_create(fd, DRM_XE_VM_CREATE_ASYNC_BIND_OPS |
+ vm2 = xe_vm_create(fd, DRM_XE_VM_CREATE_ASYNC_DEFAULT |
DRM_XE_VM_CREATE_COMPUTE_MODE, 0);
if (flags & BIND_ENGINE)
- bind_engines[1] = xe_bind_engine_create(fd, vm2, 0);
+ bind_engines[1] = xe_bind_engine_create(fd, vm2, 0,
+ true);
}
for (i = 0; i < n_engines; i++) {
diff --git a/tests/xe/xe_exec_balancer.c b/tests/xe/xe_exec_balancer.c
index 2018c8104..7096243d7 100644
--- a/tests/xe/xe_exec_balancer.c
+++ b/tests/xe/xe_exec_balancer.c
@@ -66,7 +66,7 @@ static void test_all_active(int fd, int gt, int class)
if (num_placements < 2)
return;
- vm = xe_vm_create(fd, DRM_XE_VM_CREATE_ASYNC_BIND_OPS, 0);
+ vm = xe_vm_create(fd, DRM_XE_VM_CREATE_ASYNC_DEFAULT, 0);
bo_size = sizeof(*data) * num_placements;
bo_size = ALIGN(bo_size + xe_cs_prefetch_size(fd), xe_get_default_alignment(fd));
@@ -212,7 +212,7 @@ test_exec(int fd, int gt, int class, int n_engines, int n_execs,
if (num_placements < 2)
return;
- vm = xe_vm_create(fd, DRM_XE_VM_CREATE_ASYNC_BIND_OPS, 0);
+ vm = xe_vm_create(fd, DRM_XE_VM_CREATE_ASYNC_DEFAULT, 0);
bo_size = sizeof(*data) * n_execs;
bo_size = ALIGN(bo_size + xe_cs_prefetch_size(fd), xe_get_default_alignment(fd));
@@ -435,7 +435,7 @@ test_cm(int fd, int gt, int class, int n_engines, int n_execs,
if (num_placements < 2)
return;
- vm = xe_vm_create(fd, DRM_XE_VM_CREATE_ASYNC_BIND_OPS |
+ vm = xe_vm_create(fd, DRM_XE_VM_CREATE_ASYNC_DEFAULT |
DRM_XE_VM_CREATE_COMPUTE_MODE, 0);
bo_size = sizeof(*data) * n_execs;
bo_size = ALIGN(bo_size + xe_cs_prefetch_size(fd),
diff --git a/tests/xe/xe_exec_basic.c b/tests/xe/xe_exec_basic.c
index f7ad6aeeb..98b7252fa 100644
--- a/tests/xe/xe_exec_basic.c
+++ b/tests/xe/xe_exec_basic.c
@@ -113,7 +113,7 @@ test_exec(int fd, struct drm_xe_engine_class_instance *eci,
igt_assert(n_vm <= MAX_N_ENGINES);
for (i = 0; i < n_vm; ++i)
- vm[i] = xe_vm_create(fd, DRM_XE_VM_CREATE_ASYNC_BIND_OPS, 0);
+ vm[i] = xe_vm_create(fd, DRM_XE_VM_CREATE_ASYNC_DEFAULT, 0);
bo_size = sizeof(*data) * n_execs;
bo_size = ALIGN(bo_size + xe_cs_prefetch_size(fd),
xe_get_default_alignment(fd));
@@ -156,7 +156,8 @@ test_exec(int fd, struct drm_xe_engine_class_instance *eci,
engines[i] = xe_engine_create(fd, __vm, eci, 0);
if (flags & BIND_ENGINE)
- bind_engines[i] = xe_bind_engine_create(fd, __vm, 0);
+ bind_engines[i] = xe_bind_engine_create(fd, __vm, 0,
+ true);
else
bind_engines[i] = 0;
syncobjs[i] = syncobj_create(fd, 0);
diff --git a/tests/xe/xe_exec_compute_mode.c b/tests/xe/xe_exec_compute_mode.c
index 685193990..750815764 100644
--- a/tests/xe/xe_exec_compute_mode.c
+++ b/tests/xe/xe_exec_compute_mode.c
@@ -116,7 +116,7 @@ test_exec(int fd, struct drm_xe_engine_class_instance *eci,
igt_assert(n_engines <= MAX_N_ENGINES);
- vm = xe_vm_create(fd, DRM_XE_VM_CREATE_ASYNC_BIND_OPS |
+ vm = xe_vm_create(fd, DRM_XE_VM_CREATE_ASYNC_DEFAULT |
DRM_XE_VM_CREATE_COMPUTE_MODE, 0);
bo_size = sizeof(*data) * n_execs;
bo_size = ALIGN(bo_size + xe_cs_prefetch_size(fd),
@@ -134,7 +134,7 @@ test_exec(int fd, struct drm_xe_engine_class_instance *eci,
to_user_pointer(&ext));
if (flags & BIND_ENGINE)
bind_engines[i] =
- xe_bind_engine_create(fd, vm, 0);
+ xe_bind_engine_create(fd, vm, 0, true);
else
bind_engines[i] = 0;
};
@@ -170,7 +170,7 @@ test_exec(int fd, struct drm_xe_engine_class_instance *eci,
to_user_pointer(&ext));
if (flags & BIND_ENGINE)
bind_engines[i] =
- xe_bind_engine_create(fd, vm, 0);
+ xe_bind_engine_create(fd, vm, 0, true);
else
bind_engines[i] = 0;
};
diff --git a/tests/xe/xe_exec_fault_mode.c b/tests/xe/xe_exec_fault_mode.c
index a3ab17270..9950ab542 100644
--- a/tests/xe/xe_exec_fault_mode.c
+++ b/tests/xe/xe_exec_fault_mode.c
@@ -136,7 +136,7 @@ test_exec(int fd, struct drm_xe_engine_class_instance *eci,
igt_assert(n_engines <= MAX_N_ENGINES);
- vm = xe_vm_create(fd, DRM_XE_VM_CREATE_ASYNC_BIND_OPS |
+ vm = xe_vm_create(fd, DRM_XE_VM_CREATE_ASYNC_DEFAULT |
DRM_XE_VM_CREATE_FAULT_MODE, 0);
bo_size = sizeof(*data) * n_execs;
bo_size = ALIGN(bo_size + xe_cs_prefetch_size(fd),
@@ -168,7 +168,7 @@ test_exec(int fd, struct drm_xe_engine_class_instance *eci,
engines[i] = xe_engine_create(fd, vm, eci, 0);
if (flags & BIND_ENGINE)
bind_engines[i] =
- xe_bind_engine_create(fd, vm, 0);
+ xe_bind_engine_create(fd, vm, 0, true);
else
bind_engines[i] = 0;
};
@@ -382,7 +382,7 @@ test_atomic(int fd, struct drm_xe_engine_class_instance *eci,
uint32_t *ptr;
int i, b, wait_idx = 0;
- vm = xe_vm_create(fd, DRM_XE_VM_CREATE_ASYNC_BIND_OPS |
+ vm = xe_vm_create(fd, DRM_XE_VM_CREATE_ASYNC_DEFAULT |
DRM_XE_VM_CREATE_FAULT_MODE, 0);
bo_size = sizeof(*data) * n_atomic;
bo_size = ALIGN(bo_size + xe_cs_prefetch_size(fd),
diff --git a/tests/xe/xe_exec_reset.c b/tests/xe/xe_exec_reset.c
index 0d72a3f20..6007bcf6b 100644
--- a/tests/xe/xe_exec_reset.c
+++ b/tests/xe/xe_exec_reset.c
@@ -46,7 +46,7 @@ static void test_spin(int fd, struct drm_xe_engine_class_instance *eci)
uint32_t bo = 0;
struct xe_spin *spin;
- vm = xe_vm_create(fd, DRM_XE_VM_CREATE_ASYNC_BIND_OPS, 0);
+ vm = xe_vm_create(fd, DRM_XE_VM_CREATE_ASYNC_DEFAULT, 0);
bo_size = sizeof(*spin);
bo_size = ALIGN(bo_size + xe_cs_prefetch_size(fd),
xe_get_default_alignment(fd));
@@ -192,7 +192,7 @@ test_balancer(int fd, int gt, int class, int n_engines, int n_execs,
if (num_placements < 2)
return;
- vm = xe_vm_create(fd, DRM_XE_VM_CREATE_ASYNC_BIND_OPS, 0);
+ vm = xe_vm_create(fd, DRM_XE_VM_CREATE_ASYNC_DEFAULT, 0);
bo_size = sizeof(*data) * n_execs;
bo_size = ALIGN(bo_size + xe_cs_prefetch_size(fd),
xe_get_default_alignment(fd));
@@ -393,7 +393,7 @@ test_legacy_mode(int fd, struct drm_xe_engine_class_instance *eci,
xe_device_get(fd);
}
- vm = xe_vm_create(fd, DRM_XE_VM_CREATE_ASYNC_BIND_OPS, 0);
+ vm = xe_vm_create(fd, DRM_XE_VM_CREATE_ASYNC_DEFAULT, 0);
bo_size = sizeof(*data) * n_execs;
bo_size = ALIGN(bo_size + xe_cs_prefetch_size(fd),
xe_get_default_alignment(fd));
@@ -571,7 +571,7 @@ test_compute_mode(int fd, struct drm_xe_engine_class_instance *eci,
xe_device_get(fd);
}
- vm = xe_vm_create(fd, DRM_XE_VM_CREATE_ASYNC_BIND_OPS |
+ vm = xe_vm_create(fd, DRM_XE_VM_CREATE_ASYNC_DEFAULT |
DRM_XE_VM_CREATE_COMPUTE_MODE, 0);
bo_size = sizeof(*data) * n_execs;
bo_size = ALIGN(bo_size + xe_cs_prefetch_size(fd),
diff --git a/tests/xe/xe_exec_threads.c b/tests/xe/xe_exec_threads.c
index 3f2c2de9e..01bfa6d9e 100644
--- a/tests/xe/xe_exec_threads.c
+++ b/tests/xe/xe_exec_threads.c
@@ -78,7 +78,7 @@ test_balancer(int fd, int gt, uint32_t vm, uint64_t addr, uint64_t userptr,
}
if (!vm) {
- vm = xe_vm_create(fd, DRM_XE_VM_CREATE_ASYNC_BIND_OPS, 0);
+ vm = xe_vm_create(fd, DRM_XE_VM_CREATE_ASYNC_DEFAULT, 0);
owns_vm = true;
}
@@ -287,7 +287,7 @@ test_compute_mode(int fd, uint32_t vm, uint64_t addr, uint64_t userptr,
}
if (!vm) {
- vm = xe_vm_create(fd, DRM_XE_VM_CREATE_ASYNC_BIND_OPS |
+ vm = xe_vm_create(fd, DRM_XE_VM_CREATE_ASYNC_DEFAULT |
XE_ENGINE_SET_PROPERTY_COMPUTE_MODE, 0);
owns_vm = true;
}
@@ -475,6 +475,7 @@ test_legacy_mode(int fd, uint32_t vm, uint64_t addr, uint64_t userptr,
};
uint32_t engines[MAX_N_ENGINES];
uint32_t bind_engines[MAX_N_ENGINES];
+ uint32_t restart_engine;
uint32_t syncobjs[MAX_N_ENGINES];
size_t bo_size;
uint32_t bo = 0;
@@ -496,7 +497,7 @@ test_legacy_mode(int fd, uint32_t vm, uint64_t addr, uint64_t userptr,
}
if (!vm) {
- vm = xe_vm_create(fd, DRM_XE_VM_CREATE_ASYNC_BIND_OPS, 0);
+ vm = xe_vm_create(fd, DRM_XE_VM_CREATE_ASYNC_DEFAULT, 0);
owns_vm = true;
}
@@ -537,13 +538,15 @@ test_legacy_mode(int fd, uint32_t vm, uint64_t addr, uint64_t userptr,
else
engines[i] = xe_engine_create(fd, vm, eci, 0);
if (flags & BIND_ENGINE)
- bind_engines[i] = xe_bind_engine_create(fd, vm, 0);
+ bind_engines[i] = xe_bind_engine_create(fd, vm, 0,
+ true);
else
bind_engines[i] = 0;
syncobjs[i] = syncobj_create(fd, 0);
sync_all[i].flags = DRM_XE_SYNC_SYNCOBJ;
sync_all[i].handle = syncobjs[i];
- };
+ }
+ restart_engine = xe_bind_engine_create(fd, vm, 0, false);
pthread_barrier_wait(&barrier);
@@ -603,17 +606,23 @@ test_legacy_mode(int fd, uint32_t vm, uint64_t addr, uint64_t userptr,
if (flags & REBIND && i &&
(!(i & 0x1f) || rebind_error_inject == i)) {
#define INJECT_ERROR (0x1 << 31)
- if (rebind_error_inject == i)
- __xe_vm_bind_assert(fd, vm, bind_engines[e],
- 0, 0, addr, bo_size,
- XE_VM_BIND_OP_UNMAP |
- XE_VM_BIND_FLAG_ASYNC |
- INJECT_ERROR, sync_all,
- n_engines, 0, 0);
- else
+ if (rebind_error_inject == i) {
+ __xe_vm_bind(fd, vm, bind_engines[e],
+ 0, 0, addr, bo_size,
+ XE_VM_BIND_OP_UNMAP |
+ XE_VM_BIND_FLAG_ASYNC |
+ INJECT_ERROR, sync_all,
+ n_engines, 0, 0);
+ __xe_vm_bind_assert(fd, vm, restart_engine,
+ 0, 0, 0, 0,
+ XE_VM_BIND_OP_RESTART |
+ XE_VM_BIND_FLAG_RECLAIM, NULL,
+ 0, 0, 0);
+ } else {
xe_vm_unbind_async(fd, vm, bind_engines[e],
0, addr, bo_size,
sync_all, n_engines);
+ }
sync[0].flags |= DRM_XE_SYNC_SIGNAL;
addr += bo_size;
@@ -687,6 +696,7 @@ test_legacy_mode(int fd, uint32_t vm, uint64_t addr, uint64_t userptr,
if (bind_engines[i])
xe_engine_destroy(fd, bind_engines[i]);
}
+ xe_engine_destroy(fd, restart_engine);
if (bo) {
munmap(data, bo_size);
@@ -746,47 +756,6 @@ static void *thread(void *data)
return NULL;
}
-struct vm_thread_data {
- pthread_t thread;
- struct drm_xe_vm_bind_op_error_capture *capture;
- int fd;
- int vm;
-};
-
-static void *vm_async_ops_err_thread(void *data)
-{
- struct vm_thread_data *args = data;
- int fd = args->fd;
- int ret;
-
- struct drm_xe_wait_user_fence wait = {
- .vm_id = args->vm,
- .op = DRM_XE_UFENCE_WAIT_NEQ,
- .flags = DRM_XE_UFENCE_WAIT_VM_ERROR,
- .mask = DRM_XE_UFENCE_WAIT_U32,
-#define BASICALLY_FOREVER 0xffffffffffff
- .timeout = BASICALLY_FOREVER,
- };
-
- ret = igt_ioctl(fd, DRM_IOCTL_XE_WAIT_USER_FENCE, &wait);
-
- while (!ret) {
- struct drm_xe_vm_bind bind = {
- .vm_id = args->vm,
- .num_binds = 1,
- .bind.op = XE_VM_BIND_OP_RESTART,
- };
-
- /* Restart and wait for next error */
- igt_assert_eq(igt_ioctl(fd, DRM_IOCTL_XE_VM_BIND,
- &bind), 0);
- args->capture->error = 0;
- ret = igt_ioctl(fd, DRM_IOCTL_XE_WAIT_USER_FENCE, &wait);
- }
-
- return NULL;
-}
-
/**
* SUBTEST: threads-%s
* Description: Run threads %arg[1] test with multi threads
@@ -1032,8 +1001,6 @@ static void threads(int fd, int flags)
int n_hw_engines = 0, class;
uint64_t i = 0;
uint32_t vm_legacy_mode = 0, vm_compute_mode = 0;
- struct drm_xe_vm_bind_op_error_capture capture = {};
- struct vm_thread_data vm_err_thread = {};
bool go = false;
int n_threads = 0;
int gt;
@@ -1065,28 +1032,13 @@ static void threads(int fd, int flags)
pthread_cond_init(&cond, 0);
if (flags & SHARED_VM) {
- struct drm_xe_ext_vm_set_property ext = {
- .base.next_extension = 0,
- .base.name = XE_VM_EXTENSION_SET_PROPERTY,
- .property =
- XE_VM_PROPERTY_BIND_OP_ERROR_CAPTURE_ADDRESS,
- .value = to_user_pointer(&capture),
- };
-
vm_legacy_mode = xe_vm_create(fd,
- DRM_XE_VM_CREATE_ASYNC_BIND_OPS,
- to_user_pointer(&ext));
+ DRM_XE_VM_CREATE_ASYNC_DEFAULT,
+ 0);
vm_compute_mode = xe_vm_create(fd,
- DRM_XE_VM_CREATE_ASYNC_BIND_OPS |
+ DRM_XE_VM_CREATE_ASYNC_DEFAULT |
XE_ENGINE_SET_PROPERTY_COMPUTE_MODE,
0);
-
- vm_err_thread.capture = &capture;
- vm_err_thread.fd = fd;
- vm_err_thread.vm = vm_legacy_mode;
- pthread_create(&vm_err_thread.thread, 0,
- vm_async_ops_err_thread, &vm_err_thread);
-
}
xe_for_each_hw_engine(fd, hwe) {
@@ -1213,8 +1165,6 @@ static void threads(int fd, int flags)
if (vm_compute_mode)
xe_vm_destroy(fd, vm_compute_mode);
free(threads_data);
- if (flags & SHARED_VM)
- pthread_join(vm_err_thread.thread, NULL);
pthread_barrier_destroy(&barrier);
}
@@ -1237,9 +1187,8 @@ igt_main
{ "shared-vm-rebind-bindengine", SHARED_VM | REBIND |
BIND_ENGINE },
{ "shared-vm-userptr-rebind", SHARED_VM | USERPTR | REBIND },
- { "shared-vm-rebind-err", SHARED_VM | REBIND | REBIND_ERROR },
- { "shared-vm-userptr-rebind-err", SHARED_VM | USERPTR |
- REBIND | REBIND_ERROR},
+ { "rebind-err", REBIND | REBIND_ERROR },
+ { "userptr-rebind-err", USERPTR | REBIND | REBIND_ERROR},
{ "shared-vm-userptr-invalidate", SHARED_VM | USERPTR |
INVALIDATE },
{ "shared-vm-userptr-invalidate-race", SHARED_VM | USERPTR |
@@ -1263,10 +1212,9 @@ igt_main
{ "hang-shared-vm-rebind", HANG | SHARED_VM | REBIND },
{ "hang-shared-vm-userptr-rebind", HANG | SHARED_VM | USERPTR |
REBIND },
- { "hang-shared-vm-rebind-err", HANG | SHARED_VM | REBIND |
+ { "hang-rebind-err", HANG | REBIND | REBIND_ERROR },
+ { "hang-userptr-rebind-err", HANG | USERPTR | REBIND |
REBIND_ERROR },
- { "hang-shared-vm-userptr-rebind-err", HANG | SHARED_VM |
- USERPTR | REBIND | REBIND_ERROR },
{ "hang-shared-vm-userptr-invalidate", HANG | SHARED_VM |
USERPTR | INVALIDATE },
{ "hang-shared-vm-userptr-invalidate-race", HANG | SHARED_VM |
diff --git a/tests/xe/xe_guc_pc.c b/tests/xe/xe_guc_pc.c
index 5c71ae147..4025bb312 100644
--- a/tests/xe/xe_guc_pc.c
+++ b/tests/xe/xe_guc_pc.c
@@ -59,7 +59,7 @@ static void exec_basic(int fd, struct drm_xe_engine_class_instance *eci,
igt_assert(n_engines <= MAX_N_ENGINES);
igt_assert(n_execs > 0);
- vm = xe_vm_create(fd, DRM_XE_VM_CREATE_ASYNC_BIND_OPS, 0);
+ vm = xe_vm_create(fd, DRM_XE_VM_CREATE_ASYNC_DEFAULT, 0);
bo_size = sizeof(*data) * n_execs;
bo_size = ALIGN(bo_size + xe_cs_prefetch_size(fd),
xe_get_default_alignment(fd));
diff --git a/tests/xe/xe_huc_copy.c b/tests/xe/xe_huc_copy.c
index fdac907d6..cff212180 100644
--- a/tests/xe/xe_huc_copy.c
+++ b/tests/xe/xe_huc_copy.c
@@ -118,7 +118,7 @@ test_huc_copy(int fd)
{ .addr = ADDR_BATCH, .size = SIZE_BATCH }, // batch
};
- vm = xe_vm_create(fd, DRM_XE_VM_CREATE_ASYNC_BIND_OPS, 0);
+ vm = xe_vm_create(fd, DRM_XE_VM_CREATE_ASYNC_DEFAULT, 0);
engine = xe_engine_create_class(fd, vm, DRM_XE_ENGINE_CLASS_VIDEO_DECODE);
sync.flags = DRM_XE_SYNC_SYNCOBJ | DRM_XE_SYNC_SIGNAL;
sync.handle = syncobj_create(fd, 0);
diff --git a/tests/xe/xe_intel_bb.c b/tests/xe/xe_intel_bb.c
index 35d61608e..5e24360c2 100644
--- a/tests/xe/xe_intel_bb.c
+++ b/tests/xe/xe_intel_bb.c
@@ -195,7 +195,7 @@ static void simple_bb(struct buf_ops *bops, bool new_context)
intel_bb_reset(ibb, true);
if (new_context) {
- ctx = xe_vm_create(xe, DRM_XE_VM_CREATE_ASYNC_BIND_OPS, 0);
+ ctx = xe_vm_create(xe, DRM_XE_VM_CREATE_ASYNC_DEFAULT, 0);
intel_bb_destroy(ibb);
ibb = intel_bb_create_with_context(xe, ctx, NULL, PAGE_SIZE);
intel_bb_out(ibb, MI_BATCH_BUFFER_END);
diff --git a/tests/xe/xe_pm.c b/tests/xe/xe_pm.c
index 44154143c..97e73e82a 100644
--- a/tests/xe/xe_pm.c
+++ b/tests/xe/xe_pm.c
@@ -238,7 +238,7 @@ test_exec(device_t device, struct drm_xe_engine_class_instance *eci,
if (check_rpm)
igt_assert(in_d3(device, d_state));
- vm = xe_vm_create(device.fd_xe, DRM_XE_VM_CREATE_ASYNC_BIND_OPS, 0);
+ vm = xe_vm_create(device.fd_xe, DRM_XE_VM_CREATE_ASYNC_DEFAULT, 0);
if (check_rpm)
igt_assert(out_of_d3(device, d_state));
diff --git a/tests/xe/xe_vm.c b/tests/xe/xe_vm.c
index 694f829b3..96cb06779 100644
--- a/tests/xe/xe_vm.c
+++ b/tests/xe/xe_vm.c
@@ -275,7 +275,7 @@ static void unbind_all(int fd, int n_vmas)
{ .flags = DRM_XE_SYNC_SYNCOBJ | DRM_XE_SYNC_SIGNAL, },
};
- vm = xe_vm_create(fd, DRM_XE_VM_CREATE_ASYNC_BIND_OPS, 0);
+ vm = xe_vm_create(fd, DRM_XE_VM_CREATE_ASYNC_DEFAULT, 0);
bo = xe_bo_create(fd, 0, vm, bo_size);
for (i = 0; i < n_vmas; ++i)
@@ -344,57 +344,6 @@ struct vm_thread_data {
* TODO: change ``'Run type' == FULL`` to a better category
*/
-static void *vm_async_ops_err_thread(void *data)
-{
- struct vm_thread_data *args = data;
- int fd = args->fd;
- uint64_t addr = 0x201a0000;
- int num_binds = 0;
- int ret;
-
- struct drm_xe_wait_user_fence wait = {
- .vm_id = args->vm,
- .op = DRM_XE_UFENCE_WAIT_NEQ,
- .flags = DRM_XE_UFENCE_WAIT_VM_ERROR,
- .mask = DRM_XE_UFENCE_WAIT_U32,
- .timeout = 1000,
- };
-
- igt_assert_eq(igt_ioctl(fd, DRM_IOCTL_XE_WAIT_USER_FENCE,
- &wait), 0);
- if (args->destroy) {
- usleep(5000); /* Wait other binds to queue up */
- xe_vm_destroy(fd, args->vm);
- return NULL;
- }
-
- while (!ret) {
- struct drm_xe_vm_bind bind = {
- .vm_id = args->vm,
- .num_binds = 1,
- .bind.op = XE_VM_BIND_OP_RESTART,
- };
-
- /* VM sync ops should work */
- if (!(num_binds++ % 2)) {
- xe_vm_bind_sync(fd, args->vm, args->bo, 0, addr,
- args->bo_size);
- } else {
- xe_vm_unbind_sync(fd, args->vm, 0, addr,
- args->bo_size);
- addr += args->bo_size * 2;
- }
-
- /* Restart and wait for next error */
- igt_assert_eq(igt_ioctl(fd, DRM_IOCTL_XE_VM_BIND,
- &bind), 0);
- args->capture->error = 0;
- ret = igt_ioctl(fd, DRM_IOCTL_XE_WAIT_USER_FENCE, &wait);
- }
-
- return NULL;
-}
-
static void vm_async_ops_err(int fd, bool destroy)
{
uint32_t vm;
@@ -403,99 +352,56 @@ static void vm_async_ops_err(int fd, bool destroy)
.flags = DRM_XE_SYNC_SYNCOBJ | DRM_XE_SYNC_SIGNAL,
};
#define N_BINDS 32
- struct drm_xe_vm_bind_op_error_capture capture = {};
- struct drm_xe_ext_vm_set_property ext = {
- .base.next_extension = 0,
- .base.name = XE_VM_EXTENSION_SET_PROPERTY,
- .property = XE_VM_PROPERTY_BIND_OP_ERROR_CAPTURE_ADDRESS,
- .value = to_user_pointer(&capture),
- };
- struct vm_thread_data thread = {};
uint32_t syncobjs[N_BINDS];
+ uint32_t restart_engine;
size_t bo_size = 0x1000 * 32;
uint32_t bo;
- int i, j;
+ int i, j = 0;
- vm = xe_vm_create(fd, DRM_XE_VM_CREATE_ASYNC_BIND_OPS,
- to_user_pointer(&ext));
+ vm = xe_vm_create(fd, DRM_XE_VM_CREATE_ASYNC_DEFAULT, 0);
bo = xe_bo_create(fd, 0, vm, bo_size);
-
- thread.capture = &capture;
- thread.fd = fd;
- thread.vm = vm;
- thread.bo = bo;
- thread.bo_size = bo_size;
- thread.destroy = destroy;
- pthread_create(&thread.thread, 0, vm_async_ops_err_thread, &thread);
+ restart_engine = xe_bind_engine_create(fd, vm, 0, false);
for (i = 0; i < N_BINDS; i++)
syncobjs[i] = syncobj_create(fd, 0);
- for (j = 0, i = 0; i < N_BINDS / 4; i++, j++) {
- sync.handle = syncobjs[j];
+ for (i = 0; i < N_BINDS; i++) {
+ sync.handle = syncobjs[i];
#define INJECT_ERROR (0x1 << 31)
- if (i == N_BINDS / 8) /* Inject error on this bind */
- __xe_vm_bind_assert(fd, vm, 0, bo, 0,
- addr + i * bo_size * 2,
- bo_size, XE_VM_BIND_OP_MAP |
- XE_VM_BIND_FLAG_ASYNC |
- INJECT_ERROR, &sync, 1, 0, 0);
- else
- xe_vm_bind_async(fd, vm, 0, bo, 0,
- addr + i * bo_size * 2,
- bo_size, &sync, 1);
- }
+ if ((i == N_BINDS / 8 && destroy) ||
+ (!((i + 1) % (N_BINDS / 8)) && !destroy)) { /* Inject error on this bind */
+ __xe_vm_bind(fd, vm, 0, bo, 0,
+ addr + i * bo_size * 2,
+ bo_size, XE_VM_BIND_OP_MAP |
+ XE_VM_BIND_FLAG_ASYNC | INJECT_ERROR,
+ &sync, 1, 0, 0);
+
+ if (destroy)
+ break;
- for (i = 0; i < N_BINDS / 4; i++, j++) {
- sync.handle = syncobjs[j];
- if (i == N_BINDS / 8)
- __xe_vm_bind_assert(fd, vm, 0, 0, 0,
- addr + i * bo_size * 2,
+ __xe_vm_bind_assert(fd, vm, restart_engine, 0, 0,
+ addr + j++ * bo_size * 2,
bo_size, XE_VM_BIND_OP_UNMAP |
- XE_VM_BIND_FLAG_ASYNC |
- INJECT_ERROR, &sync, 1, 0, 0);
- else
- xe_vm_unbind_async(fd, vm, 0, 0,
- addr + i * bo_size * 2,
- bo_size, &sync, 1);
- }
-
- for (i = 0; i < N_BINDS / 4; i++, j++) {
- sync.handle = syncobjs[j];
- if (i == N_BINDS / 8)
- __xe_vm_bind_assert(fd, vm, 0, bo, 0,
- addr + i * bo_size * 2,
- bo_size, XE_VM_BIND_OP_MAP |
- XE_VM_BIND_FLAG_ASYNC |
- INJECT_ERROR, &sync, 1, 0, 0);
- else
+ XE_VM_BIND_FLAG_RECLAIM,
+ 0, 0, 0, 0);
+ __xe_vm_bind_assert(fd, vm, restart_engine,
+ 0, 0, 0, 0,
+ XE_VM_BIND_OP_RESTART |
+ XE_VM_BIND_FLAG_RECLAIM, NULL,
+ 0, 0, 0);
+ } else {
xe_vm_bind_async(fd, vm, 0, bo, 0,
addr + i * bo_size * 2,
bo_size, &sync, 1);
+ }
}
- for (i = 0; i < N_BINDS / 4; i++, j++) {
- sync.handle = syncobjs[j];
- if (i == N_BINDS / 8)
- __xe_vm_bind_assert(fd, vm, 0, 0, 0,
- addr + i * bo_size * 2,
- bo_size, XE_VM_BIND_OP_UNMAP |
- XE_VM_BIND_FLAG_ASYNC |
- INJECT_ERROR, &sync, 1, 0, 0);
- else
- xe_vm_unbind_async(fd, vm, 0, 0,
- addr + i * bo_size * 2,
- bo_size, &sync, 1);
- }
-
- for (i = 0; i < N_BINDS; i++)
+ for (i = 0; i < (destroy ? (N_BINDS / 8 - 1) : N_BINDS); i++)
igt_assert(syncobj_wait(fd, &syncobjs[i], 1, INT64_MAX, 0,
NULL));
if (!destroy)
xe_vm_destroy(fd, vm);
-
- pthread_join(thread.thread, NULL);
}
/**
@@ -547,7 +453,7 @@ shared_pte_page(int fd, struct drm_xe_engine_class_instance *eci, int n_bo,
data = malloc(sizeof(*data) * n_bo);
igt_assert(data);
- vm = xe_vm_create(fd, DRM_XE_VM_CREATE_ASYNC_BIND_OPS, 0);
+ vm = xe_vm_create(fd, DRM_XE_VM_CREATE_ASYNC_DEFAULT, 0);
bo_size = sizeof(struct shared_pte_page_data);
bo_size = ALIGN(bo_size + xe_cs_prefetch_size(fd),
xe_get_default_alignment(fd));
@@ -717,7 +623,7 @@ test_bind_engines_independent(int fd, struct drm_xe_engine_class_instance *eci)
} *data;
int i, b;
- vm = xe_vm_create(fd, DRM_XE_VM_CREATE_ASYNC_BIND_OPS, 0);
+ vm = xe_vm_create(fd, DRM_XE_VM_CREATE_ASYNC_DEFAULT, 0);
bo_size = sizeof(*data) * N_ENGINES;
bo_size = ALIGN(bo_size + xe_cs_prefetch_size(fd),
xe_get_default_alignment(fd));
@@ -726,7 +632,7 @@ test_bind_engines_independent(int fd, struct drm_xe_engine_class_instance *eci)
for (i = 0; i < N_ENGINES; i++) {
engines[i] = xe_engine_create(fd, vm, eci, 0);
- bind_engines[i] = xe_bind_engine_create(fd, vm, 0);
+ bind_engines[i] = xe_bind_engine_create(fd, vm, 0, true);
syncobjs[i] = syncobj_create(fd, 0);
}
syncobjs[N_ENGINES] = syncobj_create(fd, 0);
@@ -873,7 +779,7 @@ test_bind_array(int fd, struct drm_xe_engine_class_instance *eci, int n_execs,
igt_assert(n_execs <= BIND_ARRAY_MAX_N_EXEC);
- vm = xe_vm_create(fd, DRM_XE_VM_CREATE_ASYNC_BIND_OPS, 0);
+ vm = xe_vm_create(fd, DRM_XE_VM_CREATE_ASYNC_DEFAULT, 0);
bo_size = sizeof(*data) * n_execs;
bo_size = ALIGN(bo_size + xe_cs_prefetch_size(fd),
xe_get_default_alignment(fd));
@@ -882,7 +788,7 @@ test_bind_array(int fd, struct drm_xe_engine_class_instance *eci, int n_execs,
data = xe_bo_map(fd, bo, bo_size);
if (flags & BIND_ARRAY_BIND_ENGINE_FLAG)
- bind_engine = xe_bind_engine_create(fd, vm, 0);
+ bind_engine = xe_bind_engine_create(fd, vm, 0, true);
engine = xe_engine_create(fd, vm, eci, 0);
for (i = 0; i < n_execs; ++i) {
@@ -1050,7 +956,7 @@ test_large_binds(int fd, struct drm_xe_engine_class_instance *eci,
}
igt_assert(n_engines <= MAX_N_ENGINES);
- vm = xe_vm_create(fd, DRM_XE_VM_CREATE_ASYNC_BIND_OPS, 0);
+ vm = xe_vm_create(fd, DRM_XE_VM_CREATE_ASYNC_DEFAULT, 0);
if (flags & LARGE_BIND_FLAG_USERPTR) {
map = aligned_alloc(xe_get_default_alignment(fd), bo_size);
@@ -1337,7 +1243,7 @@ test_munmap_style_unbind(int fd, struct drm_xe_engine_class_instance *eci,
unbind_n_page_offset *= n_page_per_2mb;
}
- vm = xe_vm_create(fd, DRM_XE_VM_CREATE_ASYNC_BIND_OPS, 0);
+ vm = xe_vm_create(fd, DRM_XE_VM_CREATE_ASYNC_DEFAULT, 0);
bo_size = page_size * bo_n_pages;
if (flags & MAP_FLAG_USERPTR) {
@@ -1636,7 +1542,7 @@ test_mmap_style_bind(int fd, struct drm_xe_engine_class_instance *eci,
unbind_n_page_offset *= n_page_per_2mb;
}
- vm = xe_vm_create(fd, DRM_XE_VM_CREATE_ASYNC_BIND_OPS, 0);
+ vm = xe_vm_create(fd, DRM_XE_VM_CREATE_ASYNC_DEFAULT, 0);
bo_size = page_size * bo_n_pages;
if (flags & MAP_FLAG_USERPTR) {
diff --git a/tests/xe/xe_waitfence.c b/tests/xe/xe_waitfence.c
index cdfcacdb4..917957442 100644
--- a/tests/xe/xe_waitfence.c
+++ b/tests/xe/xe_waitfence.c
@@ -34,7 +34,7 @@ static void do_bind(int fd, uint32_t vm, uint32_t bo, uint64_t offset,
sync[0].addr = to_user_pointer(&wait_fence);
sync[0].timeline_value = val;
- xe_vm_bind(fd, vm, bo, offset, addr, size, sync, 1);
+ xe_vm_bind_async(fd, vm, 0, bo, offset, addr, size, sync, 1);
}
/**
@@ -52,7 +52,7 @@ test(int fd)
uint32_t bo_6;
uint32_t bo_7;
- uint32_t vm = xe_vm_create(fd, 0, 0);
+ uint32_t vm = xe_vm_create(fd, DRM_XE_VM_CREATE_ASYNC_DEFAULT, 0);
bo_1 = xe_bo_create_flags(fd, vm, 0x40000, MY_FLAG);
do_bind(fd, vm, bo_1, 0, 0x200000, 0x40000, 1);
bo_2 = xe_bo_create_flags(fd, vm, 0x40000, MY_FLAG);
@@ -68,20 +68,6 @@ test(int fd)
bo_7 = xe_bo_create_flags(fd, vm, 0x10000, MY_FLAG);
do_bind(fd, vm, bo_7, 0, 0xeffff0000, 0x10000, 7);
xe_wait_ufence(fd, &wait_fence, 7, NULL, 2000);
- xe_vm_unbind_sync(fd, vm, 0, 0x200000, 0x40000);
- xe_vm_unbind_sync(fd, vm, 0, 0xc0000000, 0x40000);
- xe_vm_unbind_sync(fd, vm, 0, 0x180000000, 0x40000);
- xe_vm_unbind_sync(fd, vm, 0, 0x140000000, 0x10000);
- xe_vm_unbind_sync(fd, vm, 0, 0x100000000, 0x100000);
- xe_vm_unbind_sync(fd, vm, 0, 0xc0040000, 0x1c0000);
- xe_vm_unbind_sync(fd, vm, 0, 0xeffff0000, 0x10000);
- gem_close(fd, bo_7);
- gem_close(fd, bo_6);
- gem_close(fd, bo_5);
- gem_close(fd, bo_4);
- gem_close(fd, bo_3);
- gem_close(fd, bo_2);
- gem_close(fd, bo_1);
}
igt_main
--
2.34.1
^ permalink raw reply related [flat|nested] 7+ messages in thread
* [igt-dev] ✗ Fi.CI.BUILD: failure for IGT updates for upstreaming prep
2023-05-01 23:04 [igt-dev] [PATCH 0/5] IGT updates for upstreaming prep Matthew Brost
` (4 preceding siblings ...)
2023-05-01 23:04 ` [igt-dev] [PATCH 5/5] xe: Update to new VM bind uAPI Matthew Brost
@ 2023-05-01 23:10 ` Patchwork
5 siblings, 0 replies; 7+ messages in thread
From: Patchwork @ 2023-05-01 23:10 UTC (permalink / raw)
To: Matthew Brost; +Cc: igt-dev
== Series Details ==
Series: IGT updates for upstreaming prep
URL : https://patchwork.freedesktop.org/series/117177/
State : failure
== Summary ==
IGT patchset build failed on latest successful build
1cb3507f3ff28d11bd5cfabcde576fe78ddab571 intel-ci/xe-fast-feedback.testlist: Add eviction tests
ninja: Entering directory `/opt/igt/build'
[1/436] Generating version.h with a custom command.
[2/4] Generating xe_tests.rst with a custom command.
FAILED: docs/testplan/xe_tests.rst
/usr/src/igt-gpu-tools/scripts/igt_doc.py --config /usr/src/igt-gpu-tools/tests/xe/xe_test_config.json --rest docs/testplan/xe_tests.rst --check-testlist --igt-build-path /opt/igt/build
Warning: Missing documentation for igt@xe_exec_threads@threads-hang-rebind-err
Warning: Missing documentation for igt@xe_exec_threads@threads-hang-userptr-rebind-err
Warning: Missing documentation for igt@xe_exec_threads@threads-rebind-err
Warning: Missing documentation for igt@xe_exec_threads@threads-userptr-rebind-err
ninja: build stopped: subcommand failed.
^ permalink raw reply [flat|nested] 7+ messages in thread
end of thread, other threads:[~2023-05-01 23:10 UTC | newest]
Thread overview: 7+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2023-05-01 23:04 [igt-dev] [PATCH 0/5] IGT updates for upstreaming prep Matthew Brost
2023-05-01 23:04 ` [igt-dev] [PATCH 1/5] xe: Update to latest uAPI Matthew Brost
2023-05-01 23:04 ` [igt-dev] [PATCH 2/5] xe_exec_basic: Add NULL VM bind section Matthew Brost
2023-05-01 23:04 ` [igt-dev] [PATCH 3/5] xe_vm: MMAP style VM binds section Matthew Brost
2023-05-01 23:04 ` [igt-dev] [PATCH 4/5] xe_vm: Add mmap / munmap sections that split large pages Matthew Brost
2023-05-01 23:04 ` [igt-dev] [PATCH 5/5] xe: Update to new VM bind uAPI Matthew Brost
2023-05-01 23:10 ` [igt-dev] ✗ Fi.CI.BUILD: failure for IGT updates for upstreaming prep Patchwork
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox