Igt-dev Archive on lore.kernel.org
 help / color / mirror / Atom feed
* [igt-dev] [PATCH 2/5] xe_vm: MMAP style VM binds section
  2023-07-10 14:58 [igt-dev] [PATCH 1/5] " Matthew Brost
@ 2023-07-10 14:58 ` Matthew Brost
  2023-07-13 15:24   ` Rodrigo Vivi
  0 siblings, 1 reply; 11+ messages in thread
From: Matthew Brost @ 2023-07-10 14:58 UTC (permalink / raw)
  To: igt-dev

GPUVA added support for this let's test it.

Signed-off-by: Matthew Brost <matthew.brost@intel.com>
---
 tests/xe/xe_vm.c | 350 ++++++++++++++++++++++++++++++++++++++++++-----
 1 file changed, 314 insertions(+), 36 deletions(-)

diff --git a/tests/xe/xe_vm.c b/tests/xe/xe_vm.c
index 04d6c3956..434bdb332 100644
--- a/tests/xe/xe_vm.c
+++ b/tests/xe/xe_vm.c
@@ -1249,9 +1249,9 @@ static void *hammer_thread(void *tdata)
 	return NULL;
 }
 
-#define MUNMAP_FLAG_USERPTR		(0x1 << 0)
-#define MUNMAP_FLAG_INVALIDATE		(0x1 << 1)
-#define MUNMAP_FLAG_HAMMER_FIRST_PAGE	(0x1 << 2)
+#define MAP_FLAG_USERPTR		(0x1 << 0)
+#define MAP_FLAG_INVALIDATE		(0x1 << 1)
+#define MAP_FLAG_HAMMER_FIRST_PAGE	(0x1 << 2)
 
 
 /**
@@ -1344,7 +1344,7 @@ test_munmap_style_unbind(int fd, struct drm_xe_engine_class_instance *eci,
 	vm = xe_vm_create(fd, DRM_XE_VM_CREATE_ASYNC_BIND_OPS, 0);
 	bo_size = page_size * bo_n_pages;
 
-	if (flags & MUNMAP_FLAG_USERPTR) {
+	if (flags & MAP_FLAG_USERPTR) {
 		map = mmap(from_user_pointer(addr), bo_size, PROT_READ |
 			    PROT_WRITE, MAP_SHARED | MAP_FIXED |
 			    MAP_ANONYMOUS, -1, 0);
@@ -1363,7 +1363,7 @@ test_munmap_style_unbind(int fd, struct drm_xe_engine_class_instance *eci,
 	/* Do initial binds */
 	bind_size = (page_size * bo_n_pages) / n_binds;
 	for (i = 0; i < n_binds; ++i) {
-		if (flags & MUNMAP_FLAG_USERPTR)
+		if (flags & MAP_FLAG_USERPTR)
 			xe_vm_bind_userptr_async(fd, vm, 0, addr, addr,
 						 bind_size, sync, 1);
 		else
@@ -1378,7 +1378,7 @@ test_munmap_style_unbind(int fd, struct drm_xe_engine_class_instance *eci,
 	 * cause a fault if a rebind occurs during munmap style VM unbind
 	 * (partial VMAs unbound).
 	 */
-	if (flags & MUNMAP_FLAG_HAMMER_FIRST_PAGE) {
+	if (flags & MAP_FLAG_HAMMER_FIRST_PAGE) {
 		t.fd = fd;
 		t.vm = vm;
 #define PAGE_SIZE	4096
@@ -1437,7 +1437,7 @@ test_munmap_style_unbind(int fd, struct drm_xe_engine_class_instance *eci,
 		data = map + i * page_size;
 		igt_assert_eq(data->data, 0xc0ffee);
 	}
-	if (flags & MUNMAP_FLAG_HAMMER_FIRST_PAGE) {
+	if (flags & MAP_FLAG_HAMMER_FIRST_PAGE) {
 		memset(map, 0, PAGE_SIZE / 2);
 		memset(map + PAGE_SIZE, 0, bo_size - PAGE_SIZE);
 	} else {
@@ -1487,7 +1487,7 @@ try_again_after_invalidate:
 			igt_assert_eq(data->data, 0xc0ffee);
 		}
 	}
-	if (flags & MUNMAP_FLAG_HAMMER_FIRST_PAGE) {
+	if (flags & MAP_FLAG_HAMMER_FIRST_PAGE) {
 		memset(map, 0, PAGE_SIZE / 2);
 		memset(map + PAGE_SIZE, 0, bo_size - PAGE_SIZE);
 	} else {
@@ -1498,7 +1498,7 @@ try_again_after_invalidate:
 	 * The munmap style VM unbind can create new VMAs, make sure those are
 	 * in the bookkeeping for another rebind after a userptr invalidate.
 	 */
-	if (flags & MUNMAP_FLAG_INVALIDATE && !invalidate++) {
+	if (flags & MAP_FLAG_INVALIDATE && !invalidate++) {
 		map = mmap(from_user_pointer(addr), bo_size, PROT_READ |
 			    PROT_WRITE, MAP_SHARED | MAP_FIXED |
 			    MAP_ANONYMOUS, -1, 0);
@@ -1509,7 +1509,7 @@ try_again_after_invalidate:
 	/* Confirm unbound region can be rebound */
 	syncobj_reset(fd, &sync[0].handle, 1);
 	sync[0].flags |= DRM_XE_SYNC_SIGNAL;
-	if (flags & MUNMAP_FLAG_USERPTR)
+	if (flags & MAP_FLAG_USERPTR)
 		xe_vm_bind_userptr_async(fd, vm, 0,
 					 addr + unbind_n_page_offfset * page_size,
 					 addr + unbind_n_page_offfset * page_size,
@@ -1557,7 +1557,7 @@ try_again_after_invalidate:
 		igt_assert_eq(data->data, 0xc0ffee);
 	}
 
-	if (flags & MUNMAP_FLAG_HAMMER_FIRST_PAGE) {
+	if (flags & MAP_FLAG_HAMMER_FIRST_PAGE) {
 		exit = 1;
 		pthread_join(t.thread, NULL);
 		pthread_barrier_destroy(&barrier);
@@ -1572,6 +1572,251 @@ try_again_after_invalidate:
 	xe_vm_destroy(fd, vm);
 }
 
+/**
+ * SUBTEST: mmap-style-bind-%s
+ * Description: Test mmap style unbind with %arg[1]
+ * Run type: FULL
+ * TODO: change ``'Run type' == FULL`` to a better category
+ *
+ * arg[1]:
+ *
+ * @all:				all
+ * @one-partial:			one partial
+ * @either-side-partial:		either side partial
+ * @either-side-full:			either side full
+ * @either-side-partial-hammer:		either side partial hammer
+ * @end:				end
+ * @front:				front
+ * @many-all:				many all
+ * @many-either-side-partial:		many either side partial
+ * @many-either-side-partial-hammer:	many either side partial hammer
+ * @userptr-all:			userptr all
+ * @userptr-one-partial:		userptr one partial
+ * @userptr-either-side-partial:	userptr either side partial
+ * @userptr-either-side-full:		userptr either side full
+ */
+
+static void
+test_mmap_style_bind(int fd, struct drm_xe_engine_class_instance *eci,
+		     int bo_n_pages, int n_binds, int unbind_n_page_offfset,
+		     int unbind_n_pages, unsigned int flags)
+{
+	struct drm_xe_sync sync[2] = {
+		{ .flags = DRM_XE_SYNC_SYNCOBJ | DRM_XE_SYNC_SIGNAL, },
+		{ .flags = DRM_XE_SYNC_SYNCOBJ | DRM_XE_SYNC_SIGNAL, },
+	};
+	struct drm_xe_exec exec = {
+		.num_batch_buffer = 1,
+		.num_syncs = 2,
+		.syncs = to_user_pointer(sync),
+	};
+	uint64_t addr = 0x1a0000, base_addr = 0x1a0000;
+	uint32_t vm;
+	uint32_t engine;
+	size_t bo_size;
+	uint32_t bo0 = 0, bo1 = 0;
+	uint64_t bind_size;
+	uint64_t page_size = xe_get_default_alignment(fd);
+	struct {
+		uint32_t batch[16];
+		uint64_t pad;
+		uint32_t data;
+	} *data;
+	void *map0, *map1;
+	int i, b;
+	struct thread_data t;
+	pthread_barrier_t barrier;
+	int exit = 0;
+
+	vm = xe_vm_create(fd, DRM_XE_VM_CREATE_ASYNC_BIND_OPS, 0);
+	bo_size = page_size * bo_n_pages;
+
+	if (flags & MAP_FLAG_USERPTR) {
+		map0 = mmap(from_user_pointer(addr), bo_size, PROT_READ |
+			    PROT_WRITE, MAP_SHARED | MAP_FIXED |
+			    MAP_ANONYMOUS, -1, 0);
+		map1 = mmap(from_user_pointer(addr + bo_size),
+			    bo_size, PROT_READ | PROT_WRITE, MAP_SHARED |
+			    MAP_FIXED | MAP_ANONYMOUS, -1, 0);
+		igt_assert(map0 != MAP_FAILED);
+		igt_assert(map1 != MAP_FAILED);
+	} else {
+		bo0 = xe_bo_create(fd, eci->gt_id, vm, bo_size);
+		map0 = xe_bo_map(fd, bo0, bo_size);
+		bo1 = xe_bo_create(fd, eci->gt_id, vm, bo_size);
+		map1 = xe_bo_map(fd, bo1, bo_size);
+	}
+	memset(map0, 0, bo_size);
+	memset(map1, 0, bo_size);
+
+	engine = xe_engine_create(fd, vm, eci, 0);
+
+	sync[0].handle = syncobj_create(fd, 0);
+	sync[1].handle = syncobj_create(fd, 0);
+
+	/* Do initial binds */
+	bind_size = (page_size * bo_n_pages) / n_binds;
+	for (i = 0; i < n_binds; ++i) {
+		if (flags & MAP_FLAG_USERPTR)
+			xe_vm_bind_userptr_async(fd, vm, 0, addr, addr,
+						 bind_size, sync, 1);
+		else
+			xe_vm_bind_async(fd, vm, 0, bo0, i * bind_size,
+					 addr, bind_size, sync, 1);
+		addr += bind_size;
+	}
+	addr = base_addr;
+
+	/*
+	 * Kick a thread to write the first page continously to ensure we can't
+	 * cause a fault if a rebind occurs during munmap style VM unbind
+	 * (partial VMAs unbound).
+	 */
+	if (flags & MAP_FLAG_HAMMER_FIRST_PAGE) {
+		t.fd = fd;
+		t.vm = vm;
+#define PAGE_SIZE	4096
+		t.addr = addr + PAGE_SIZE / 2;
+		t.eci = eci;
+		t.exit = &exit;
+		t.map = map0 + PAGE_SIZE / 2;
+		t.barrier = &barrier;
+		pthread_barrier_init(&barrier, NULL, 2);
+		pthread_create(&t.thread, 0, hammer_thread, &t);
+		pthread_barrier_wait(&barrier);
+	}
+
+	/* Verify we can use every page */
+	for (i = 0; i < n_binds; ++i) {
+		uint64_t batch_offset = (char *)&data->batch - (char *)data;
+		uint64_t batch_addr = addr + batch_offset;
+		uint64_t sdi_offset = (char *)&data->data - (char *)data;
+		uint64_t sdi_addr = addr + sdi_offset;
+		data = map0 + i * page_size;
+
+		b = 0;
+		data->batch[b++] = MI_STORE_DWORD_IMM_GEN4;
+		data->batch[b++] = sdi_addr;
+		data->batch[b++] = sdi_addr >> 32;
+		data->batch[b++] = 0xc0ffee;
+		data->batch[b++] = MI_BATCH_BUFFER_END;
+		igt_assert(b <= ARRAY_SIZE(data[i].batch));
+
+		sync[0].flags &= ~DRM_XE_SYNC_SIGNAL;
+		if (i)
+			syncobj_reset(fd, &sync[1].handle, 1);
+		sync[1].flags |= DRM_XE_SYNC_SIGNAL;
+
+		exec.engine_id = engine;
+		exec.address = batch_addr;
+		xe_exec(fd, &exec);
+
+		addr += page_size;
+	}
+	addr = base_addr;
+
+	/* Bind some of the pages to different BO / userptr */
+	syncobj_reset(fd, &sync[0].handle, 1);
+	sync[0].flags |= DRM_XE_SYNC_SIGNAL;
+	sync[1].flags &= ~DRM_XE_SYNC_SIGNAL;
+	if (flags & MAP_FLAG_USERPTR)
+		xe_vm_bind_userptr_async(fd, vm, 0, addr + bo_size +
+					 unbind_n_page_offfset * page_size,
+					 addr + unbind_n_page_offfset * page_size,
+					 unbind_n_pages * page_size, sync, 2);
+	else
+		xe_vm_bind_async(fd, vm, 0, bo1,
+				 unbind_n_page_offfset * page_size,
+				 addr + unbind_n_page_offfset * page_size,
+				 unbind_n_pages * page_size, sync, 2);
+	igt_assert(syncobj_wait(fd, &sync[0].handle, 1, INT64_MAX, 0, NULL));
+	igt_assert(syncobj_wait(fd, &sync[1].handle, 1, INT64_MAX, 0, NULL));
+
+	/* Verify all pages written */
+	for (i = 0; i < n_binds; ++i) {
+		data = map0 + i * page_size;
+		igt_assert_eq(data->data, 0xc0ffee);
+	}
+	if (flags & MAP_FLAG_HAMMER_FIRST_PAGE) {
+		memset(map0, 0, PAGE_SIZE / 2);
+		memset(map0 + PAGE_SIZE, 0, bo_size - PAGE_SIZE);
+	} else {
+		memset(map0, 0, bo_size);
+		memset(map1, 0, bo_size);
+	}
+
+	/* Verify we can use every page */
+	for (i = 0; i < n_binds; ++i) {
+		uint64_t batch_offset = (char *)&data->batch - (char *)data;
+		uint64_t batch_addr = addr + batch_offset;
+		uint64_t sdi_offset = (char *)&data->data - (char *)data;
+		uint64_t sdi_addr = addr + sdi_offset;
+
+		data = map0 + i * page_size;
+		b = 0;
+		data->batch[b++] = MI_STORE_DWORD_IMM_GEN4;
+		data->batch[b++] = sdi_addr;
+		data->batch[b++] = sdi_addr >> 32;
+		data->batch[b++] = 0xc0ffee;
+		data->batch[b++] = MI_BATCH_BUFFER_END;
+		igt_assert(b <= ARRAY_SIZE(data[i].batch));
+
+		data = map1 + i * page_size;
+		b = 0;
+		data->batch[b++] = MI_STORE_DWORD_IMM_GEN4;
+		data->batch[b++] = sdi_addr;
+		data->batch[b++] = sdi_addr >> 32;
+		data->batch[b++] = 0xc0ffee;
+		data->batch[b++] = MI_BATCH_BUFFER_END;
+		igt_assert(b <= ARRAY_SIZE(data[i].batch));
+
+		sync[0].flags &= ~DRM_XE_SYNC_SIGNAL;
+		if (i)
+			syncobj_reset(fd, &sync[1].handle, 1);
+		sync[1].flags |= DRM_XE_SYNC_SIGNAL;
+
+		exec.engine_id = engine;
+		exec.address = batch_addr;
+		xe_exec(fd, &exec);
+
+		addr += page_size;
+	}
+	addr = base_addr;
+
+	igt_assert(syncobj_wait(fd, &sync[0].handle, 1, INT64_MAX, 0, NULL));
+	igt_assert(syncobj_wait(fd, &sync[1].handle, 1, INT64_MAX, 0, NULL));
+
+	/* Verify all pages written */
+	for (i = 0; i < n_binds; ++i) {
+		uint32_t result = 0;
+
+		data = map0 + i * page_size;
+		result |= data->data;
+
+		data = map1 + i * page_size;
+		result |= data->data;
+
+		igt_assert_eq(result, 0xc0ffee);
+	}
+
+	if (flags & MAP_FLAG_HAMMER_FIRST_PAGE) {
+		exit = 1;
+		pthread_join(t.thread, NULL);
+		pthread_barrier_destroy(&barrier);
+	}
+
+	syncobj_destroy(fd, sync[0].handle);
+	syncobj_destroy(fd, sync[1].handle);
+	xe_engine_destroy(fd, engine);
+	munmap(map0, bo_size);
+	munmap(map1, bo_size);
+	if (bo0)
+		gem_close(fd, bo0);
+	if (bo1)
+		gem_close(fd, bo1);
+	xe_vm_destroy(fd, vm);
+}
+
 igt_main
 {
 	struct drm_xe_engine_class_instance *hwe, *hwe_non_copy = NULL;
@@ -1584,55 +1829,74 @@ igt_main
 		int unbind_n_page_offfset;
 		int unbind_n_pages;
 		unsigned int flags;
-	} sections[] = {
+	} munmap_sections[] = {
 		{ "all", 4, 2, 0, 4, 0 },
 		{ "one-partial", 4, 1, 1, 2, 0 },
 		{ "either-side-partial", 4, 2, 1, 2, 0 },
 		{ "either-side-partial-hammer", 4, 2, 1, 2,
-			MUNMAP_FLAG_HAMMER_FIRST_PAGE },
+			MAP_FLAG_HAMMER_FIRST_PAGE },
 		{ "either-side-full", 4, 4, 1, 2, 0 },
 		{ "end", 4, 2, 0, 3, 0 },
 		{ "front", 4, 2, 1, 3, 0 },
 		{ "many-all", 4 * 8, 2 * 8, 0 * 8, 4 * 8, 0 },
 		{ "many-either-side-partial", 4 * 8, 2 * 8, 1, 4 * 8 - 2, 0 },
 		{ "many-either-side-partial-hammer", 4 * 8, 2 * 8, 1, 4 * 8 - 2,
-			MUNMAP_FLAG_HAMMER_FIRST_PAGE },
+			MAP_FLAG_HAMMER_FIRST_PAGE },
 		{ "many-either-side-full", 4 * 8, 4 * 8, 1 * 8, 2 * 8, 0 },
 		{ "many-end", 4 * 8, 4, 0 * 8, 3 * 8 + 2, 0 },
 		{ "many-front", 4 * 8, 4, 1 * 8 - 2, 3 * 8 + 2, 0 },
-		{ "userptr-all", 4, 2, 0, 4, MUNMAP_FLAG_USERPTR },
-		{ "userptr-one-partial", 4, 1, 1, 2, MUNMAP_FLAG_USERPTR },
+		{ "userptr-all", 4, 2, 0, 4, MAP_FLAG_USERPTR },
+		{ "userptr-one-partial", 4, 1, 1, 2, MAP_FLAG_USERPTR },
 		{ "userptr-either-side-partial", 4, 2, 1, 2,
-			MUNMAP_FLAG_USERPTR },
+			MAP_FLAG_USERPTR },
 		{ "userptr-either-side-full", 4, 4, 1, 2,
-			MUNMAP_FLAG_USERPTR },
-		{ "userptr-end", 4, 2, 0, 3, MUNMAP_FLAG_USERPTR },
-		{ "userptr-front", 4, 2, 1, 3, MUNMAP_FLAG_USERPTR },
+			MAP_FLAG_USERPTR },
+		{ "userptr-end", 4, 2, 0, 3, MAP_FLAG_USERPTR },
+		{ "userptr-front", 4, 2, 1, 3, MAP_FLAG_USERPTR },
 		{ "userptr-many-all", 4 * 8, 2 * 8, 0 * 8, 4 * 8,
-			MUNMAP_FLAG_USERPTR },
+			MAP_FLAG_USERPTR },
 		{ "userptr-many-either-side-full", 4 * 8, 4 * 8, 1 * 8, 2 * 8,
-			MUNMAP_FLAG_USERPTR },
+			MAP_FLAG_USERPTR },
 		{ "userptr-many-end", 4 * 8, 4, 0 * 8, 3 * 8 + 2,
-			MUNMAP_FLAG_USERPTR },
+			MAP_FLAG_USERPTR },
 		{ "userptr-many-front", 4 * 8, 4, 1 * 8 - 2, 3 * 8 + 2,
-			MUNMAP_FLAG_USERPTR },
+			MAP_FLAG_USERPTR },
 		{ "userptr-inval-either-side-full", 4, 4, 1, 2,
-			MUNMAP_FLAG_USERPTR | MUNMAP_FLAG_INVALIDATE },
-		{ "userptr-inval-end", 4, 2, 0, 3, MUNMAP_FLAG_USERPTR |
-			MUNMAP_FLAG_INVALIDATE },
-		{ "userptr-inval-front", 4, 2, 1, 3, MUNMAP_FLAG_USERPTR |
-			MUNMAP_FLAG_INVALIDATE },
+			MAP_FLAG_USERPTR | MAP_FLAG_INVALIDATE },
+		{ "userptr-inval-end", 4, 2, 0, 3, MAP_FLAG_USERPTR |
+			MAP_FLAG_INVALIDATE },
+		{ "userptr-inval-front", 4, 2, 1, 3, MAP_FLAG_USERPTR |
+			MAP_FLAG_INVALIDATE },
 		{ "userptr-inval-many-all", 4 * 8, 2 * 8, 0 * 8, 4 * 8,
-			MUNMAP_FLAG_USERPTR | MUNMAP_FLAG_INVALIDATE },
+			MAP_FLAG_USERPTR | MAP_FLAG_INVALIDATE },
 		{ "userptr-inval-many-either-side-partial", 4 * 8, 2 * 8, 1,
-			4 * 8 - 2, MUNMAP_FLAG_USERPTR |
-				MUNMAP_FLAG_INVALIDATE },
+			4 * 8 - 2, MAP_FLAG_USERPTR |
+				MAP_FLAG_INVALIDATE },
 		{ "userptr-inval-many-either-side-full", 4 * 8, 4 * 8, 1 * 8,
-			2 * 8, MUNMAP_FLAG_USERPTR | MUNMAP_FLAG_INVALIDATE },
+			2 * 8, MAP_FLAG_USERPTR | MAP_FLAG_INVALIDATE },
 		{ "userptr-inval-many-end", 4 * 8, 4, 0 * 8, 3 * 8 + 2,
-			MUNMAP_FLAG_USERPTR | MUNMAP_FLAG_INVALIDATE },
+			MAP_FLAG_USERPTR | MAP_FLAG_INVALIDATE },
 		{ "userptr-inval-many-front", 4 * 8, 4, 1 * 8 - 2, 3 * 8 + 2,
-			MUNMAP_FLAG_USERPTR | MUNMAP_FLAG_INVALIDATE },
+			MAP_FLAG_USERPTR | MAP_FLAG_INVALIDATE },
+		{ NULL },
+	};
+	const struct section mmap_sections[] = {
+		{ "all", 4, 2, 0, 4, 0 },
+		{ "one-partial", 4, 1, 1, 2, 0 },
+		{ "either-side-partial", 4, 2, 1, 2, 0 },
+		{ "either-side-full", 4, 4, 1, 2, 0 },
+		{ "either-side-partial-hammer", 4, 2, 1, 2,
+			MAP_FLAG_HAMMER_FIRST_PAGE },
+		{ "end", 4, 2, 0, 3, 0 },
+		{ "front", 4, 2, 1, 3, 0 },
+		{ "many-all", 4 * 8, 2 * 8, 0 * 8, 4 * 8, 0 },
+		{ "many-either-side-partial", 4 * 8, 2 * 8, 1, 4 * 8 - 2, 0 },
+		{ "many-either-side-partial-hammer", 4 * 8, 2 * 8, 1, 4 * 8 - 2,
+			MAP_FLAG_HAMMER_FIRST_PAGE },
+		{ "userptr-all", 4, 2, 0, 4, MAP_FLAG_USERPTR },
+		{ "userptr-one-partial", 4, 1, 1, 2, MAP_FLAG_USERPTR },
+		{ "userptr-either-side-partial", 4, 2, 1, 2, MAP_FLAG_USERPTR },
+		{ "userptr-either-side-full", 4, 4, 1, 2, MAP_FLAG_USERPTR },
 		{ NULL },
 	};
 
@@ -1839,7 +2103,7 @@ igt_main
 			break;
 		}
 
-	for (const struct section *s = sections; s->name; s++) {
+	for (const struct section *s = munmap_sections; s->name; s++) {
 		igt_subtest_f("munmap-style-unbind-%s", s->name) {
 			igt_require_f(hwe_non_copy,
 				      "Requires non-copy engine to run\n");
@@ -1853,6 +2117,20 @@ igt_main
 		}
 	}
 
+	for (const struct section *s = mmap_sections; s->name; s++) {
+		igt_subtest_f("mmap-style-bind-%s", s->name) {
+			igt_require_f(hwe_non_copy,
+				      "Requires non-copy engine to run\n");
+
+			test_mmap_style_bind(fd, hwe_non_copy,
+					     s->bo_n_pages,
+					     s->n_binds,
+					     s->unbind_n_page_offfset,
+					     s->unbind_n_pages,
+					     s->flags);
+		}
+	}
+
 	igt_fixture
 		drm_close_driver(fd);
 }
-- 
2.34.1

^ permalink raw reply related	[flat|nested] 11+ messages in thread

* Re: [igt-dev] [PATCH 2/5] xe_vm: MMAP style VM binds section
  2023-07-10 14:58 ` [igt-dev] [PATCH 2/5] xe_vm: MMAP style VM binds section Matthew Brost
@ 2023-07-13 15:24   ` Rodrigo Vivi
  2023-07-14  4:13     ` Matthew Brost
  0 siblings, 1 reply; 11+ messages in thread
From: Rodrigo Vivi @ 2023-07-13 15:24 UTC (permalink / raw)
  To: Matthew Brost; +Cc: igt-dev

On Mon, Jul 10, 2023 at 07:58:53AM -0700, Matthew Brost wrote:
> GPUVA added support for this let's test it.
> 
> Signed-off-by: Matthew Brost <matthew.brost@intel.com>
> ---
>  tests/xe/xe_vm.c | 350 ++++++++++++++++++++++++++++++++++++++++++-----
>  1 file changed, 314 insertions(+), 36 deletions(-)
> 
> diff --git a/tests/xe/xe_vm.c b/tests/xe/xe_vm.c
> index 04d6c3956..434bdb332 100644
> --- a/tests/xe/xe_vm.c
> +++ b/tests/xe/xe_vm.c
> @@ -1249,9 +1249,9 @@ static void *hammer_thread(void *tdata)
>  	return NULL;
>  }
>  
> -#define MUNMAP_FLAG_USERPTR		(0x1 << 0)
> -#define MUNMAP_FLAG_INVALIDATE		(0x1 << 1)
> -#define MUNMAP_FLAG_HAMMER_FIRST_PAGE	(0x1 << 2)
> +#define MAP_FLAG_USERPTR		(0x1 << 0)
> +#define MAP_FLAG_INVALIDATE		(0x1 << 1)
> +#define MAP_FLAG_HAMMER_FIRST_PAGE	(0x1 << 2)
>  
>  
>  /**
> @@ -1344,7 +1344,7 @@ test_munmap_style_unbind(int fd, struct drm_xe_engine_class_instance *eci,
>  	vm = xe_vm_create(fd, DRM_XE_VM_CREATE_ASYNC_BIND_OPS, 0);
>  	bo_size = page_size * bo_n_pages;
>  
> -	if (flags & MUNMAP_FLAG_USERPTR) {
> +	if (flags & MAP_FLAG_USERPTR) {
>  		map = mmap(from_user_pointer(addr), bo_size, PROT_READ |
>  			    PROT_WRITE, MAP_SHARED | MAP_FIXED |
>  			    MAP_ANONYMOUS, -1, 0);
> @@ -1363,7 +1363,7 @@ test_munmap_style_unbind(int fd, struct drm_xe_engine_class_instance *eci,
>  	/* Do initial binds */
>  	bind_size = (page_size * bo_n_pages) / n_binds;
>  	for (i = 0; i < n_binds; ++i) {
> -		if (flags & MUNMAP_FLAG_USERPTR)
> +		if (flags & MAP_FLAG_USERPTR)
>  			xe_vm_bind_userptr_async(fd, vm, 0, addr, addr,
>  						 bind_size, sync, 1);
>  		else
> @@ -1378,7 +1378,7 @@ test_munmap_style_unbind(int fd, struct drm_xe_engine_class_instance *eci,
>  	 * cause a fault if a rebind occurs during munmap style VM unbind
>  	 * (partial VMAs unbound).
>  	 */
> -	if (flags & MUNMAP_FLAG_HAMMER_FIRST_PAGE) {
> +	if (flags & MAP_FLAG_HAMMER_FIRST_PAGE) {
>  		t.fd = fd;
>  		t.vm = vm;
>  #define PAGE_SIZE	4096
> @@ -1437,7 +1437,7 @@ test_munmap_style_unbind(int fd, struct drm_xe_engine_class_instance *eci,
>  		data = map + i * page_size;
>  		igt_assert_eq(data->data, 0xc0ffee);
>  	}
> -	if (flags & MUNMAP_FLAG_HAMMER_FIRST_PAGE) {
> +	if (flags & MAP_FLAG_HAMMER_FIRST_PAGE) {
>  		memset(map, 0, PAGE_SIZE / 2);
>  		memset(map + PAGE_SIZE, 0, bo_size - PAGE_SIZE);
>  	} else {
> @@ -1487,7 +1487,7 @@ try_again_after_invalidate:
>  			igt_assert_eq(data->data, 0xc0ffee);
>  		}
>  	}
> -	if (flags & MUNMAP_FLAG_HAMMER_FIRST_PAGE) {
> +	if (flags & MAP_FLAG_HAMMER_FIRST_PAGE) {
>  		memset(map, 0, PAGE_SIZE / 2);
>  		memset(map + PAGE_SIZE, 0, bo_size - PAGE_SIZE);
>  	} else {
> @@ -1498,7 +1498,7 @@ try_again_after_invalidate:
>  	 * The munmap style VM unbind can create new VMAs, make sure those are
>  	 * in the bookkeeping for another rebind after a userptr invalidate.
>  	 */
> -	if (flags & MUNMAP_FLAG_INVALIDATE && !invalidate++) {
> +	if (flags & MAP_FLAG_INVALIDATE && !invalidate++) {
>  		map = mmap(from_user_pointer(addr), bo_size, PROT_READ |
>  			    PROT_WRITE, MAP_SHARED | MAP_FIXED |
>  			    MAP_ANONYMOUS, -1, 0);
> @@ -1509,7 +1509,7 @@ try_again_after_invalidate:
>  	/* Confirm unbound region can be rebound */
>  	syncobj_reset(fd, &sync[0].handle, 1);
>  	sync[0].flags |= DRM_XE_SYNC_SIGNAL;
> -	if (flags & MUNMAP_FLAG_USERPTR)
> +	if (flags & MAP_FLAG_USERPTR)
>  		xe_vm_bind_userptr_async(fd, vm, 0,
>  					 addr + unbind_n_page_offfset * page_size,
>  					 addr + unbind_n_page_offfset * page_size,
> @@ -1557,7 +1557,7 @@ try_again_after_invalidate:
>  		igt_assert_eq(data->data, 0xc0ffee);
>  	}
>  
> -	if (flags & MUNMAP_FLAG_HAMMER_FIRST_PAGE) {
> +	if (flags & MAP_FLAG_HAMMER_FIRST_PAGE) {
>  		exit = 1;
>  		pthread_join(t.thread, NULL);
>  		pthread_barrier_destroy(&barrier);
> @@ -1572,6 +1572,251 @@ try_again_after_invalidate:
>  	xe_vm_destroy(fd, vm);
>  }
>  
> +/**
> + * SUBTEST: mmap-style-bind-%s
> + * Description: Test mmap style unbind with %arg[1]
> + * Run type: FULL
> + * TODO: change ``'Run type' == FULL`` to a better category
> + *
> + * arg[1]:
> + *
> + * @all:				all
> + * @one-partial:			one partial
> + * @either-side-partial:		either side partial
> + * @either-side-full:			either side full
> + * @either-side-partial-hammer:		either side partial hammer
> + * @end:				end
> + * @front:				front
> + * @many-all:				many all
> + * @many-either-side-partial:		many either side partial
> + * @many-either-side-partial-hammer:	many either side partial hammer
> + * @userptr-all:			userptr all
> + * @userptr-one-partial:		userptr one partial
> + * @userptr-either-side-partial:	userptr either side partial
> + * @userptr-either-side-full:		userptr either side full
> + */
> +
> +static void
> +test_mmap_style_bind(int fd, struct drm_xe_engine_class_instance *eci,
> +		     int bo_n_pages, int n_binds, int unbind_n_page_offfset,
> +		     int unbind_n_pages, unsigned int flags)
> +{
> +	struct drm_xe_sync sync[2] = {
> +		{ .flags = DRM_XE_SYNC_SYNCOBJ | DRM_XE_SYNC_SIGNAL, },
> +		{ .flags = DRM_XE_SYNC_SYNCOBJ | DRM_XE_SYNC_SIGNAL, },
> +	};
> +	struct drm_xe_exec exec = {
> +		.num_batch_buffer = 1,
> +		.num_syncs = 2,
> +		.syncs = to_user_pointer(sync),
> +	};
> +	uint64_t addr = 0x1a0000, base_addr = 0x1a0000;
> +	uint32_t vm;
> +	uint32_t engine;
> +	size_t bo_size;
> +	uint32_t bo0 = 0, bo1 = 0;
> +	uint64_t bind_size;
> +	uint64_t page_size = xe_get_default_alignment(fd);

Do we have plans to add support to the other supported cases with different
offsets, alignment, etc?

anyway this test looks correct to me

Reviewed-by: Rodrigo Vivi <rodrigo.vivi@intel.com>

> +	struct {
> +		uint32_t batch[16];
> +		uint64_t pad;
> +		uint32_t data;
> +	} *data;
> +	void *map0, *map1;
> +	int i, b;
> +	struct thread_data t;
> +	pthread_barrier_t barrier;
> +	int exit = 0;
> +
> +	vm = xe_vm_create(fd, DRM_XE_VM_CREATE_ASYNC_BIND_OPS, 0);
> +	bo_size = page_size * bo_n_pages;
> +
> +	if (flags & MAP_FLAG_USERPTR) {
> +		map0 = mmap(from_user_pointer(addr), bo_size, PROT_READ |
> +			    PROT_WRITE, MAP_SHARED | MAP_FIXED |
> +			    MAP_ANONYMOUS, -1, 0);
> +		map1 = mmap(from_user_pointer(addr + bo_size),
> +			    bo_size, PROT_READ | PROT_WRITE, MAP_SHARED |
> +			    MAP_FIXED | MAP_ANONYMOUS, -1, 0);
> +		igt_assert(map0 != MAP_FAILED);
> +		igt_assert(map1 != MAP_FAILED);
> +	} else {
> +		bo0 = xe_bo_create(fd, eci->gt_id, vm, bo_size);
> +		map0 = xe_bo_map(fd, bo0, bo_size);
> +		bo1 = xe_bo_create(fd, eci->gt_id, vm, bo_size);
> +		map1 = xe_bo_map(fd, bo1, bo_size);
> +	}
> +	memset(map0, 0, bo_size);
> +	memset(map1, 0, bo_size);
> +
> +	engine = xe_engine_create(fd, vm, eci, 0);
> +
> +	sync[0].handle = syncobj_create(fd, 0);
> +	sync[1].handle = syncobj_create(fd, 0);
> +
> +	/* Do initial binds */
> +	bind_size = (page_size * bo_n_pages) / n_binds;
> +	for (i = 0; i < n_binds; ++i) {
> +		if (flags & MAP_FLAG_USERPTR)
> +			xe_vm_bind_userptr_async(fd, vm, 0, addr, addr,
> +						 bind_size, sync, 1);
> +		else
> +			xe_vm_bind_async(fd, vm, 0, bo0, i * bind_size,
> +					 addr, bind_size, sync, 1);
> +		addr += bind_size;
> +	}
> +	addr = base_addr;
> +
> +	/*
> +	 * Kick a thread to write the first page continously to ensure we can't
> +	 * cause a fault if a rebind occurs during munmap style VM unbind
> +	 * (partial VMAs unbound).
> +	 */
> +	if (flags & MAP_FLAG_HAMMER_FIRST_PAGE) {
> +		t.fd = fd;
> +		t.vm = vm;
> +#define PAGE_SIZE	4096
> +		t.addr = addr + PAGE_SIZE / 2;
> +		t.eci = eci;
> +		t.exit = &exit;
> +		t.map = map0 + PAGE_SIZE / 2;
> +		t.barrier = &barrier;
> +		pthread_barrier_init(&barrier, NULL, 2);
> +		pthread_create(&t.thread, 0, hammer_thread, &t);
> +		pthread_barrier_wait(&barrier);
> +	}
> +
> +	/* Verify we can use every page */
> +	for (i = 0; i < n_binds; ++i) {
> +		uint64_t batch_offset = (char *)&data->batch - (char *)data;
> +		uint64_t batch_addr = addr + batch_offset;
> +		uint64_t sdi_offset = (char *)&data->data - (char *)data;
> +		uint64_t sdi_addr = addr + sdi_offset;
> +		data = map0 + i * page_size;
> +
> +		b = 0;
> +		data->batch[b++] = MI_STORE_DWORD_IMM_GEN4;
> +		data->batch[b++] = sdi_addr;
> +		data->batch[b++] = sdi_addr >> 32;
> +		data->batch[b++] = 0xc0ffee;
> +		data->batch[b++] = MI_BATCH_BUFFER_END;
> +		igt_assert(b <= ARRAY_SIZE(data[i].batch));
> +
> +		sync[0].flags &= ~DRM_XE_SYNC_SIGNAL;
> +		if (i)
> +			syncobj_reset(fd, &sync[1].handle, 1);
> +		sync[1].flags |= DRM_XE_SYNC_SIGNAL;
> +
> +		exec.engine_id = engine;
> +		exec.address = batch_addr;
> +		xe_exec(fd, &exec);
> +
> +		addr += page_size;
> +	}
> +	addr = base_addr;
> +
> +	/* Bind some of the pages to different BO / userptr */
> +	syncobj_reset(fd, &sync[0].handle, 1);
> +	sync[0].flags |= DRM_XE_SYNC_SIGNAL;
> +	sync[1].flags &= ~DRM_XE_SYNC_SIGNAL;
> +	if (flags & MAP_FLAG_USERPTR)
> +		xe_vm_bind_userptr_async(fd, vm, 0, addr + bo_size +
> +					 unbind_n_page_offfset * page_size,
> +					 addr + unbind_n_page_offfset * page_size,
> +					 unbind_n_pages * page_size, sync, 2);
> +	else
> +		xe_vm_bind_async(fd, vm, 0, bo1,
> +				 unbind_n_page_offfset * page_size,
> +				 addr + unbind_n_page_offfset * page_size,
> +				 unbind_n_pages * page_size, sync, 2);
> +	igt_assert(syncobj_wait(fd, &sync[0].handle, 1, INT64_MAX, 0, NULL));
> +	igt_assert(syncobj_wait(fd, &sync[1].handle, 1, INT64_MAX, 0, NULL));
> +
> +	/* Verify all pages written */
> +	for (i = 0; i < n_binds; ++i) {
> +		data = map0 + i * page_size;
> +		igt_assert_eq(data->data, 0xc0ffee);
> +	}
> +	if (flags & MAP_FLAG_HAMMER_FIRST_PAGE) {
> +		memset(map0, 0, PAGE_SIZE / 2);
> +		memset(map0 + PAGE_SIZE, 0, bo_size - PAGE_SIZE);
> +	} else {
> +		memset(map0, 0, bo_size);
> +		memset(map1, 0, bo_size);
> +	}
> +
> +	/* Verify we can use every page */
> +	for (i = 0; i < n_binds; ++i) {
> +		uint64_t batch_offset = (char *)&data->batch - (char *)data;
> +		uint64_t batch_addr = addr + batch_offset;
> +		uint64_t sdi_offset = (char *)&data->data - (char *)data;
> +		uint64_t sdi_addr = addr + sdi_offset;
> +
> +		data = map0 + i * page_size;
> +		b = 0;
> +		data->batch[b++] = MI_STORE_DWORD_IMM_GEN4;
> +		data->batch[b++] = sdi_addr;
> +		data->batch[b++] = sdi_addr >> 32;
> +		data->batch[b++] = 0xc0ffee;
> +		data->batch[b++] = MI_BATCH_BUFFER_END;
> +		igt_assert(b <= ARRAY_SIZE(data[i].batch));
> +
> +		data = map1 + i * page_size;
> +		b = 0;
> +		data->batch[b++] = MI_STORE_DWORD_IMM_GEN4;
> +		data->batch[b++] = sdi_addr;
> +		data->batch[b++] = sdi_addr >> 32;
> +		data->batch[b++] = 0xc0ffee;
> +		data->batch[b++] = MI_BATCH_BUFFER_END;
> +		igt_assert(b <= ARRAY_SIZE(data[i].batch));
> +
> +		sync[0].flags &= ~DRM_XE_SYNC_SIGNAL;
> +		if (i)
> +			syncobj_reset(fd, &sync[1].handle, 1);
> +		sync[1].flags |= DRM_XE_SYNC_SIGNAL;
> +
> +		exec.engine_id = engine;
> +		exec.address = batch_addr;
> +		xe_exec(fd, &exec);
> +
> +		addr += page_size;
> +	}
> +	addr = base_addr;
> +
> +	igt_assert(syncobj_wait(fd, &sync[0].handle, 1, INT64_MAX, 0, NULL));
> +	igt_assert(syncobj_wait(fd, &sync[1].handle, 1, INT64_MAX, 0, NULL));
> +
> +	/* Verify all pages written */
> +	for (i = 0; i < n_binds; ++i) {
> +		uint32_t result = 0;
> +
> +		data = map0 + i * page_size;
> +		result |= data->data;
> +
> +		data = map1 + i * page_size;
> +		result |= data->data;
> +
> +		igt_assert_eq(result, 0xc0ffee);
> +	}
> +
> +	if (flags & MAP_FLAG_HAMMER_FIRST_PAGE) {
> +		exit = 1;
> +		pthread_join(t.thread, NULL);
> +		pthread_barrier_destroy(&barrier);
> +	}
> +
> +	syncobj_destroy(fd, sync[0].handle);
> +	syncobj_destroy(fd, sync[1].handle);
> +	xe_engine_destroy(fd, engine);
> +	munmap(map0, bo_size);
> +	munmap(map1, bo_size);
> +	if (bo0)
> +		gem_close(fd, bo0);
> +	if (bo1)
> +		gem_close(fd, bo1);
> +	xe_vm_destroy(fd, vm);
> +}
> +
>  igt_main
>  {
>  	struct drm_xe_engine_class_instance *hwe, *hwe_non_copy = NULL;
> @@ -1584,55 +1829,74 @@ igt_main
>  		int unbind_n_page_offfset;
>  		int unbind_n_pages;
>  		unsigned int flags;
> -	} sections[] = {
> +	} munmap_sections[] = {
>  		{ "all", 4, 2, 0, 4, 0 },
>  		{ "one-partial", 4, 1, 1, 2, 0 },
>  		{ "either-side-partial", 4, 2, 1, 2, 0 },
>  		{ "either-side-partial-hammer", 4, 2, 1, 2,
> -			MUNMAP_FLAG_HAMMER_FIRST_PAGE },
> +			MAP_FLAG_HAMMER_FIRST_PAGE },
>  		{ "either-side-full", 4, 4, 1, 2, 0 },
>  		{ "end", 4, 2, 0, 3, 0 },
>  		{ "front", 4, 2, 1, 3, 0 },
>  		{ "many-all", 4 * 8, 2 * 8, 0 * 8, 4 * 8, 0 },
>  		{ "many-either-side-partial", 4 * 8, 2 * 8, 1, 4 * 8 - 2, 0 },
>  		{ "many-either-side-partial-hammer", 4 * 8, 2 * 8, 1, 4 * 8 - 2,
> -			MUNMAP_FLAG_HAMMER_FIRST_PAGE },
> +			MAP_FLAG_HAMMER_FIRST_PAGE },
>  		{ "many-either-side-full", 4 * 8, 4 * 8, 1 * 8, 2 * 8, 0 },
>  		{ "many-end", 4 * 8, 4, 0 * 8, 3 * 8 + 2, 0 },
>  		{ "many-front", 4 * 8, 4, 1 * 8 - 2, 3 * 8 + 2, 0 },
> -		{ "userptr-all", 4, 2, 0, 4, MUNMAP_FLAG_USERPTR },
> -		{ "userptr-one-partial", 4, 1, 1, 2, MUNMAP_FLAG_USERPTR },
> +		{ "userptr-all", 4, 2, 0, 4, MAP_FLAG_USERPTR },
> +		{ "userptr-one-partial", 4, 1, 1, 2, MAP_FLAG_USERPTR },
>  		{ "userptr-either-side-partial", 4, 2, 1, 2,
> -			MUNMAP_FLAG_USERPTR },
> +			MAP_FLAG_USERPTR },
>  		{ "userptr-either-side-full", 4, 4, 1, 2,
> -			MUNMAP_FLAG_USERPTR },
> -		{ "userptr-end", 4, 2, 0, 3, MUNMAP_FLAG_USERPTR },
> -		{ "userptr-front", 4, 2, 1, 3, MUNMAP_FLAG_USERPTR },
> +			MAP_FLAG_USERPTR },
> +		{ "userptr-end", 4, 2, 0, 3, MAP_FLAG_USERPTR },
> +		{ "userptr-front", 4, 2, 1, 3, MAP_FLAG_USERPTR },
>  		{ "userptr-many-all", 4 * 8, 2 * 8, 0 * 8, 4 * 8,
> -			MUNMAP_FLAG_USERPTR },
> +			MAP_FLAG_USERPTR },
>  		{ "userptr-many-either-side-full", 4 * 8, 4 * 8, 1 * 8, 2 * 8,
> -			MUNMAP_FLAG_USERPTR },
> +			MAP_FLAG_USERPTR },
>  		{ "userptr-many-end", 4 * 8, 4, 0 * 8, 3 * 8 + 2,
> -			MUNMAP_FLAG_USERPTR },
> +			MAP_FLAG_USERPTR },
>  		{ "userptr-many-front", 4 * 8, 4, 1 * 8 - 2, 3 * 8 + 2,
> -			MUNMAP_FLAG_USERPTR },
> +			MAP_FLAG_USERPTR },
>  		{ "userptr-inval-either-side-full", 4, 4, 1, 2,
> -			MUNMAP_FLAG_USERPTR | MUNMAP_FLAG_INVALIDATE },
> -		{ "userptr-inval-end", 4, 2, 0, 3, MUNMAP_FLAG_USERPTR |
> -			MUNMAP_FLAG_INVALIDATE },
> -		{ "userptr-inval-front", 4, 2, 1, 3, MUNMAP_FLAG_USERPTR |
> -			MUNMAP_FLAG_INVALIDATE },
> +			MAP_FLAG_USERPTR | MAP_FLAG_INVALIDATE },
> +		{ "userptr-inval-end", 4, 2, 0, 3, MAP_FLAG_USERPTR |
> +			MAP_FLAG_INVALIDATE },
> +		{ "userptr-inval-front", 4, 2, 1, 3, MAP_FLAG_USERPTR |
> +			MAP_FLAG_INVALIDATE },
>  		{ "userptr-inval-many-all", 4 * 8, 2 * 8, 0 * 8, 4 * 8,
> -			MUNMAP_FLAG_USERPTR | MUNMAP_FLAG_INVALIDATE },
> +			MAP_FLAG_USERPTR | MAP_FLAG_INVALIDATE },
>  		{ "userptr-inval-many-either-side-partial", 4 * 8, 2 * 8, 1,
> -			4 * 8 - 2, MUNMAP_FLAG_USERPTR |
> -				MUNMAP_FLAG_INVALIDATE },
> +			4 * 8 - 2, MAP_FLAG_USERPTR |
> +				MAP_FLAG_INVALIDATE },
>  		{ "userptr-inval-many-either-side-full", 4 * 8, 4 * 8, 1 * 8,
> -			2 * 8, MUNMAP_FLAG_USERPTR | MUNMAP_FLAG_INVALIDATE },
> +			2 * 8, MAP_FLAG_USERPTR | MAP_FLAG_INVALIDATE },
>  		{ "userptr-inval-many-end", 4 * 8, 4, 0 * 8, 3 * 8 + 2,
> -			MUNMAP_FLAG_USERPTR | MUNMAP_FLAG_INVALIDATE },
> +			MAP_FLAG_USERPTR | MAP_FLAG_INVALIDATE },
>  		{ "userptr-inval-many-front", 4 * 8, 4, 1 * 8 - 2, 3 * 8 + 2,
> -			MUNMAP_FLAG_USERPTR | MUNMAP_FLAG_INVALIDATE },
> +			MAP_FLAG_USERPTR | MAP_FLAG_INVALIDATE },
> +		{ NULL },
> +	};
> +	const struct section mmap_sections[] = {
> +		{ "all", 4, 2, 0, 4, 0 },
> +		{ "one-partial", 4, 1, 1, 2, 0 },
> +		{ "either-side-partial", 4, 2, 1, 2, 0 },
> +		{ "either-side-full", 4, 4, 1, 2, 0 },
> +		{ "either-side-partial-hammer", 4, 2, 1, 2,
> +			MAP_FLAG_HAMMER_FIRST_PAGE },
> +		{ "end", 4, 2, 0, 3, 0 },
> +		{ "front", 4, 2, 1, 3, 0 },
> +		{ "many-all", 4 * 8, 2 * 8, 0 * 8, 4 * 8, 0 },
> +		{ "many-either-side-partial", 4 * 8, 2 * 8, 1, 4 * 8 - 2, 0 },
> +		{ "many-either-side-partial-hammer", 4 * 8, 2 * 8, 1, 4 * 8 - 2,
> +			MAP_FLAG_HAMMER_FIRST_PAGE },
> +		{ "userptr-all", 4, 2, 0, 4, MAP_FLAG_USERPTR },
> +		{ "userptr-one-partial", 4, 1, 1, 2, MAP_FLAG_USERPTR },
> +		{ "userptr-either-side-partial", 4, 2, 1, 2, MAP_FLAG_USERPTR },
> +		{ "userptr-either-side-full", 4, 4, 1, 2, MAP_FLAG_USERPTR },
>  		{ NULL },
>  	};
>  
> @@ -1839,7 +2103,7 @@ igt_main
>  			break;
>  		}
>  
> -	for (const struct section *s = sections; s->name; s++) {
> +	for (const struct section *s = munmap_sections; s->name; s++) {
>  		igt_subtest_f("munmap-style-unbind-%s", s->name) {
>  			igt_require_f(hwe_non_copy,
>  				      "Requires non-copy engine to run\n");
> @@ -1853,6 +2117,20 @@ igt_main
>  		}
>  	}
>  
> +	for (const struct section *s = mmap_sections; s->name; s++) {
> +		igt_subtest_f("mmap-style-bind-%s", s->name) {
> +			igt_require_f(hwe_non_copy,
> +				      "Requires non-copy engine to run\n");
> +
> +			test_mmap_style_bind(fd, hwe_non_copy,
> +					     s->bo_n_pages,
> +					     s->n_binds,
> +					     s->unbind_n_page_offfset,
> +					     s->unbind_n_pages,
> +					     s->flags);
> +		}
> +	}
> +
>  	igt_fixture
>  		drm_close_driver(fd);
>  }
> -- 
> 2.34.1
> 

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [igt-dev] [PATCH 2/5] xe_vm: MMAP style VM binds section
  2023-07-13 15:24   ` Rodrigo Vivi
@ 2023-07-14  4:13     ` Matthew Brost
  0 siblings, 0 replies; 11+ messages in thread
From: Matthew Brost @ 2023-07-14  4:13 UTC (permalink / raw)
  To: Rodrigo Vivi; +Cc: igt-dev

On Thu, Jul 13, 2023 at 11:24:47AM -0400, Rodrigo Vivi wrote:
> On Mon, Jul 10, 2023 at 07:58:53AM -0700, Matthew Brost wrote:
> > GPUVA added support for this let's test it.
> > 
> > Signed-off-by: Matthew Brost <matthew.brost@intel.com>
> > ---
> >  tests/xe/xe_vm.c | 350 ++++++++++++++++++++++++++++++++++++++++++-----
> >  1 file changed, 314 insertions(+), 36 deletions(-)
> > 
> > diff --git a/tests/xe/xe_vm.c b/tests/xe/xe_vm.c
> > index 04d6c3956..434bdb332 100644
> > --- a/tests/xe/xe_vm.c
> > +++ b/tests/xe/xe_vm.c
> > @@ -1249,9 +1249,9 @@ static void *hammer_thread(void *tdata)
> >  	return NULL;
> >  }
> >  
> > -#define MUNMAP_FLAG_USERPTR		(0x1 << 0)
> > -#define MUNMAP_FLAG_INVALIDATE		(0x1 << 1)
> > -#define MUNMAP_FLAG_HAMMER_FIRST_PAGE	(0x1 << 2)
> > +#define MAP_FLAG_USERPTR		(0x1 << 0)
> > +#define MAP_FLAG_INVALIDATE		(0x1 << 1)
> > +#define MAP_FLAG_HAMMER_FIRST_PAGE	(0x1 << 2)
> >  
> >  
> >  /**
> > @@ -1344,7 +1344,7 @@ test_munmap_style_unbind(int fd, struct drm_xe_engine_class_instance *eci,
> >  	vm = xe_vm_create(fd, DRM_XE_VM_CREATE_ASYNC_BIND_OPS, 0);
> >  	bo_size = page_size * bo_n_pages;
> >  
> > -	if (flags & MUNMAP_FLAG_USERPTR) {
> > +	if (flags & MAP_FLAG_USERPTR) {
> >  		map = mmap(from_user_pointer(addr), bo_size, PROT_READ |
> >  			    PROT_WRITE, MAP_SHARED | MAP_FIXED |
> >  			    MAP_ANONYMOUS, -1, 0);
> > @@ -1363,7 +1363,7 @@ test_munmap_style_unbind(int fd, struct drm_xe_engine_class_instance *eci,
> >  	/* Do initial binds */
> >  	bind_size = (page_size * bo_n_pages) / n_binds;
> >  	for (i = 0; i < n_binds; ++i) {
> > -		if (flags & MUNMAP_FLAG_USERPTR)
> > +		if (flags & MAP_FLAG_USERPTR)
> >  			xe_vm_bind_userptr_async(fd, vm, 0, addr, addr,
> >  						 bind_size, sync, 1);
> >  		else
> > @@ -1378,7 +1378,7 @@ test_munmap_style_unbind(int fd, struct drm_xe_engine_class_instance *eci,
> >  	 * cause a fault if a rebind occurs during munmap style VM unbind
> >  	 * (partial VMAs unbound).
> >  	 */
> > -	if (flags & MUNMAP_FLAG_HAMMER_FIRST_PAGE) {
> > +	if (flags & MAP_FLAG_HAMMER_FIRST_PAGE) {
> >  		t.fd = fd;
> >  		t.vm = vm;
> >  #define PAGE_SIZE	4096
> > @@ -1437,7 +1437,7 @@ test_munmap_style_unbind(int fd, struct drm_xe_engine_class_instance *eci,
> >  		data = map + i * page_size;
> >  		igt_assert_eq(data->data, 0xc0ffee);
> >  	}
> > -	if (flags & MUNMAP_FLAG_HAMMER_FIRST_PAGE) {
> > +	if (flags & MAP_FLAG_HAMMER_FIRST_PAGE) {
> >  		memset(map, 0, PAGE_SIZE / 2);
> >  		memset(map + PAGE_SIZE, 0, bo_size - PAGE_SIZE);
> >  	} else {
> > @@ -1487,7 +1487,7 @@ try_again_after_invalidate:
> >  			igt_assert_eq(data->data, 0xc0ffee);
> >  		}
> >  	}
> > -	if (flags & MUNMAP_FLAG_HAMMER_FIRST_PAGE) {
> > +	if (flags & MAP_FLAG_HAMMER_FIRST_PAGE) {
> >  		memset(map, 0, PAGE_SIZE / 2);
> >  		memset(map + PAGE_SIZE, 0, bo_size - PAGE_SIZE);
> >  	} else {
> > @@ -1498,7 +1498,7 @@ try_again_after_invalidate:
> >  	 * The munmap style VM unbind can create new VMAs, make sure those are
> >  	 * in the bookkeeping for another rebind after a userptr invalidate.
> >  	 */
> > -	if (flags & MUNMAP_FLAG_INVALIDATE && !invalidate++) {
> > +	if (flags & MAP_FLAG_INVALIDATE && !invalidate++) {
> >  		map = mmap(from_user_pointer(addr), bo_size, PROT_READ |
> >  			    PROT_WRITE, MAP_SHARED | MAP_FIXED |
> >  			    MAP_ANONYMOUS, -1, 0);
> > @@ -1509,7 +1509,7 @@ try_again_after_invalidate:
> >  	/* Confirm unbound region can be rebound */
> >  	syncobj_reset(fd, &sync[0].handle, 1);
> >  	sync[0].flags |= DRM_XE_SYNC_SIGNAL;
> > -	if (flags & MUNMAP_FLAG_USERPTR)
> > +	if (flags & MAP_FLAG_USERPTR)
> >  		xe_vm_bind_userptr_async(fd, vm, 0,
> >  					 addr + unbind_n_page_offfset * page_size,
> >  					 addr + unbind_n_page_offfset * page_size,
> > @@ -1557,7 +1557,7 @@ try_again_after_invalidate:
> >  		igt_assert_eq(data->data, 0xc0ffee);
> >  	}
> >  
> > -	if (flags & MUNMAP_FLAG_HAMMER_FIRST_PAGE) {
> > +	if (flags & MAP_FLAG_HAMMER_FIRST_PAGE) {
> >  		exit = 1;
> >  		pthread_join(t.thread, NULL);
> >  		pthread_barrier_destroy(&barrier);
> > @@ -1572,6 +1572,251 @@ try_again_after_invalidate:
> >  	xe_vm_destroy(fd, vm);
> >  }
> >  
> > +/**
> > + * SUBTEST: mmap-style-bind-%s
> > + * Description: Test mmap style unbind with %arg[1]
> > + * Run type: FULL
> > + * TODO: change ``'Run type' == FULL`` to a better category
> > + *
> > + * arg[1]:
> > + *
> > + * @all:				all
> > + * @one-partial:			one partial
> > + * @either-side-partial:		either side partial
> > + * @either-side-full:			either side full
> > + * @either-side-partial-hammer:		either side partial hammer
> > + * @end:				end
> > + * @front:				front
> > + * @many-all:				many all
> > + * @many-either-side-partial:		many either side partial
> > + * @many-either-side-partial-hammer:	many either side partial hammer
> > + * @userptr-all:			userptr all
> > + * @userptr-one-partial:		userptr one partial
> > + * @userptr-either-side-partial:	userptr either side partial
> > + * @userptr-either-side-full:		userptr either side full
> > + */
> > +
> > +static void
> > +test_mmap_style_bind(int fd, struct drm_xe_engine_class_instance *eci,
> > +		     int bo_n_pages, int n_binds, int unbind_n_page_offfset,
> > +		     int unbind_n_pages, unsigned int flags)
> > +{
> > +	struct drm_xe_sync sync[2] = {
> > +		{ .flags = DRM_XE_SYNC_SYNCOBJ | DRM_XE_SYNC_SIGNAL, },
> > +		{ .flags = DRM_XE_SYNC_SYNCOBJ | DRM_XE_SYNC_SIGNAL, },
> > +	};
> > +	struct drm_xe_exec exec = {
> > +		.num_batch_buffer = 1,
> > +		.num_syncs = 2,
> > +		.syncs = to_user_pointer(sync),
> > +	};
> > +	uint64_t addr = 0x1a0000, base_addr = 0x1a0000;
> > +	uint32_t vm;
> > +	uint32_t engine;
> > +	size_t bo_size;
> > +	uint32_t bo0 = 0, bo1 = 0;
> > +	uint64_t bind_size;
> > +	uint64_t page_size = xe_get_default_alignment(fd);
> 
> Do we have plans to add support to the other supported cases with different
> offsets, alignment, etc?
> 

The next patch does some of this. The way the test is structured you
just need to add a table entry too. 

> anyway this test looks correct to me
> 
> Reviewed-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
> 

Thanks.

Matt

> > +	struct {
> > +		uint32_t batch[16];
> > +		uint64_t pad;
> > +		uint32_t data;
> > +	} *data;
> > +	void *map0, *map1;
> > +	int i, b;
> > +	struct thread_data t;
> > +	pthread_barrier_t barrier;
> > +	int exit = 0;
> > +
> > +	vm = xe_vm_create(fd, DRM_XE_VM_CREATE_ASYNC_BIND_OPS, 0);
> > +	bo_size = page_size * bo_n_pages;
> > +
> > +	if (flags & MAP_FLAG_USERPTR) {
> > +		map0 = mmap(from_user_pointer(addr), bo_size, PROT_READ |
> > +			    PROT_WRITE, MAP_SHARED | MAP_FIXED |
> > +			    MAP_ANONYMOUS, -1, 0);
> > +		map1 = mmap(from_user_pointer(addr + bo_size),
> > +			    bo_size, PROT_READ | PROT_WRITE, MAP_SHARED |
> > +			    MAP_FIXED | MAP_ANONYMOUS, -1, 0);
> > +		igt_assert(map0 != MAP_FAILED);
> > +		igt_assert(map1 != MAP_FAILED);
> > +	} else {
> > +		bo0 = xe_bo_create(fd, eci->gt_id, vm, bo_size);
> > +		map0 = xe_bo_map(fd, bo0, bo_size);
> > +		bo1 = xe_bo_create(fd, eci->gt_id, vm, bo_size);
> > +		map1 = xe_bo_map(fd, bo1, bo_size);
> > +	}
> > +	memset(map0, 0, bo_size);
> > +	memset(map1, 0, bo_size);
> > +
> > +	engine = xe_engine_create(fd, vm, eci, 0);
> > +
> > +	sync[0].handle = syncobj_create(fd, 0);
> > +	sync[1].handle = syncobj_create(fd, 0);
> > +
> > +	/* Do initial binds */
> > +	bind_size = (page_size * bo_n_pages) / n_binds;
> > +	for (i = 0; i < n_binds; ++i) {
> > +		if (flags & MAP_FLAG_USERPTR)
> > +			xe_vm_bind_userptr_async(fd, vm, 0, addr, addr,
> > +						 bind_size, sync, 1);
> > +		else
> > +			xe_vm_bind_async(fd, vm, 0, bo0, i * bind_size,
> > +					 addr, bind_size, sync, 1);
> > +		addr += bind_size;
> > +	}
> > +	addr = base_addr;
> > +
> > +	/*
> > +	 * Kick a thread to write the first page continously to ensure we can't
> > +	 * cause a fault if a rebind occurs during munmap style VM unbind
> > +	 * (partial VMAs unbound).
> > +	 */
> > +	if (flags & MAP_FLAG_HAMMER_FIRST_PAGE) {
> > +		t.fd = fd;
> > +		t.vm = vm;
> > +#define PAGE_SIZE	4096
> > +		t.addr = addr + PAGE_SIZE / 2;
> > +		t.eci = eci;
> > +		t.exit = &exit;
> > +		t.map = map0 + PAGE_SIZE / 2;
> > +		t.barrier = &barrier;
> > +		pthread_barrier_init(&barrier, NULL, 2);
> > +		pthread_create(&t.thread, 0, hammer_thread, &t);
> > +		pthread_barrier_wait(&barrier);
> > +	}
> > +
> > +	/* Verify we can use every page */
> > +	for (i = 0; i < n_binds; ++i) {
> > +		uint64_t batch_offset = (char *)&data->batch - (char *)data;
> > +		uint64_t batch_addr = addr + batch_offset;
> > +		uint64_t sdi_offset = (char *)&data->data - (char *)data;
> > +		uint64_t sdi_addr = addr + sdi_offset;
> > +		data = map0 + i * page_size;
> > +
> > +		b = 0;
> > +		data->batch[b++] = MI_STORE_DWORD_IMM_GEN4;
> > +		data->batch[b++] = sdi_addr;
> > +		data->batch[b++] = sdi_addr >> 32;
> > +		data->batch[b++] = 0xc0ffee;
> > +		data->batch[b++] = MI_BATCH_BUFFER_END;
> > +		igt_assert(b <= ARRAY_SIZE(data[i].batch));
> > +
> > +		sync[0].flags &= ~DRM_XE_SYNC_SIGNAL;
> > +		if (i)
> > +			syncobj_reset(fd, &sync[1].handle, 1);
> > +		sync[1].flags |= DRM_XE_SYNC_SIGNAL;
> > +
> > +		exec.engine_id = engine;
> > +		exec.address = batch_addr;
> > +		xe_exec(fd, &exec);
> > +
> > +		addr += page_size;
> > +	}
> > +	addr = base_addr;
> > +
> > +	/* Bind some of the pages to different BO / userptr */
> > +	syncobj_reset(fd, &sync[0].handle, 1);
> > +	sync[0].flags |= DRM_XE_SYNC_SIGNAL;
> > +	sync[1].flags &= ~DRM_XE_SYNC_SIGNAL;
> > +	if (flags & MAP_FLAG_USERPTR)
> > +		xe_vm_bind_userptr_async(fd, vm, 0, addr + bo_size +
> > +					 unbind_n_page_offfset * page_size,
> > +					 addr + unbind_n_page_offfset * page_size,
> > +					 unbind_n_pages * page_size, sync, 2);
> > +	else
> > +		xe_vm_bind_async(fd, vm, 0, bo1,
> > +				 unbind_n_page_offfset * page_size,
> > +				 addr + unbind_n_page_offfset * page_size,
> > +				 unbind_n_pages * page_size, sync, 2);
> > +	igt_assert(syncobj_wait(fd, &sync[0].handle, 1, INT64_MAX, 0, NULL));
> > +	igt_assert(syncobj_wait(fd, &sync[1].handle, 1, INT64_MAX, 0, NULL));
> > +
> > +	/* Verify all pages written */
> > +	for (i = 0; i < n_binds; ++i) {
> > +		data = map0 + i * page_size;
> > +		igt_assert_eq(data->data, 0xc0ffee);
> > +	}
> > +	if (flags & MAP_FLAG_HAMMER_FIRST_PAGE) {
> > +		memset(map0, 0, PAGE_SIZE / 2);
> > +		memset(map0 + PAGE_SIZE, 0, bo_size - PAGE_SIZE);
> > +	} else {
> > +		memset(map0, 0, bo_size);
> > +		memset(map1, 0, bo_size);
> > +	}
> > +
> > +	/* Verify we can use every page */
> > +	for (i = 0; i < n_binds; ++i) {
> > +		uint64_t batch_offset = (char *)&data->batch - (char *)data;
> > +		uint64_t batch_addr = addr + batch_offset;
> > +		uint64_t sdi_offset = (char *)&data->data - (char *)data;
> > +		uint64_t sdi_addr = addr + sdi_offset;
> > +
> > +		data = map0 + i * page_size;
> > +		b = 0;
> > +		data->batch[b++] = MI_STORE_DWORD_IMM_GEN4;
> > +		data->batch[b++] = sdi_addr;
> > +		data->batch[b++] = sdi_addr >> 32;
> > +		data->batch[b++] = 0xc0ffee;
> > +		data->batch[b++] = MI_BATCH_BUFFER_END;
> > +		igt_assert(b <= ARRAY_SIZE(data[i].batch));
> > +
> > +		data = map1 + i * page_size;
> > +		b = 0;
> > +		data->batch[b++] = MI_STORE_DWORD_IMM_GEN4;
> > +		data->batch[b++] = sdi_addr;
> > +		data->batch[b++] = sdi_addr >> 32;
> > +		data->batch[b++] = 0xc0ffee;
> > +		data->batch[b++] = MI_BATCH_BUFFER_END;
> > +		igt_assert(b <= ARRAY_SIZE(data[i].batch));
> > +
> > +		sync[0].flags &= ~DRM_XE_SYNC_SIGNAL;
> > +		if (i)
> > +			syncobj_reset(fd, &sync[1].handle, 1);
> > +		sync[1].flags |= DRM_XE_SYNC_SIGNAL;
> > +
> > +		exec.engine_id = engine;
> > +		exec.address = batch_addr;
> > +		xe_exec(fd, &exec);
> > +
> > +		addr += page_size;
> > +	}
> > +	addr = base_addr;
> > +
> > +	igt_assert(syncobj_wait(fd, &sync[0].handle, 1, INT64_MAX, 0, NULL));
> > +	igt_assert(syncobj_wait(fd, &sync[1].handle, 1, INT64_MAX, 0, NULL));
> > +
> > +	/* Verify all pages written */
> > +	for (i = 0; i < n_binds; ++i) {
> > +		uint32_t result = 0;
> > +
> > +		data = map0 + i * page_size;
> > +		result |= data->data;
> > +
> > +		data = map1 + i * page_size;
> > +		result |= data->data;
> > +
> > +		igt_assert_eq(result, 0xc0ffee);
> > +	}
> > +
> > +	if (flags & MAP_FLAG_HAMMER_FIRST_PAGE) {
> > +		exit = 1;
> > +		pthread_join(t.thread, NULL);
> > +		pthread_barrier_destroy(&barrier);
> > +	}
> > +
> > +	syncobj_destroy(fd, sync[0].handle);
> > +	syncobj_destroy(fd, sync[1].handle);
> > +	xe_engine_destroy(fd, engine);
> > +	munmap(map0, bo_size);
> > +	munmap(map1, bo_size);
> > +	if (bo0)
> > +		gem_close(fd, bo0);
> > +	if (bo1)
> > +		gem_close(fd, bo1);
> > +	xe_vm_destroy(fd, vm);
> > +}
> > +
> >  igt_main
> >  {
> >  	struct drm_xe_engine_class_instance *hwe, *hwe_non_copy = NULL;
> > @@ -1584,55 +1829,74 @@ igt_main
> >  		int unbind_n_page_offfset;
> >  		int unbind_n_pages;
> >  		unsigned int flags;
> > -	} sections[] = {
> > +	} munmap_sections[] = {
> >  		{ "all", 4, 2, 0, 4, 0 },
> >  		{ "one-partial", 4, 1, 1, 2, 0 },
> >  		{ "either-side-partial", 4, 2, 1, 2, 0 },
> >  		{ "either-side-partial-hammer", 4, 2, 1, 2,
> > -			MUNMAP_FLAG_HAMMER_FIRST_PAGE },
> > +			MAP_FLAG_HAMMER_FIRST_PAGE },
> >  		{ "either-side-full", 4, 4, 1, 2, 0 },
> >  		{ "end", 4, 2, 0, 3, 0 },
> >  		{ "front", 4, 2, 1, 3, 0 },
> >  		{ "many-all", 4 * 8, 2 * 8, 0 * 8, 4 * 8, 0 },
> >  		{ "many-either-side-partial", 4 * 8, 2 * 8, 1, 4 * 8 - 2, 0 },
> >  		{ "many-either-side-partial-hammer", 4 * 8, 2 * 8, 1, 4 * 8 - 2,
> > -			MUNMAP_FLAG_HAMMER_FIRST_PAGE },
> > +			MAP_FLAG_HAMMER_FIRST_PAGE },
> >  		{ "many-either-side-full", 4 * 8, 4 * 8, 1 * 8, 2 * 8, 0 },
> >  		{ "many-end", 4 * 8, 4, 0 * 8, 3 * 8 + 2, 0 },
> >  		{ "many-front", 4 * 8, 4, 1 * 8 - 2, 3 * 8 + 2, 0 },
> > -		{ "userptr-all", 4, 2, 0, 4, MUNMAP_FLAG_USERPTR },
> > -		{ "userptr-one-partial", 4, 1, 1, 2, MUNMAP_FLAG_USERPTR },
> > +		{ "userptr-all", 4, 2, 0, 4, MAP_FLAG_USERPTR },
> > +		{ "userptr-one-partial", 4, 1, 1, 2, MAP_FLAG_USERPTR },
> >  		{ "userptr-either-side-partial", 4, 2, 1, 2,
> > -			MUNMAP_FLAG_USERPTR },
> > +			MAP_FLAG_USERPTR },
> >  		{ "userptr-either-side-full", 4, 4, 1, 2,
> > -			MUNMAP_FLAG_USERPTR },
> > -		{ "userptr-end", 4, 2, 0, 3, MUNMAP_FLAG_USERPTR },
> > -		{ "userptr-front", 4, 2, 1, 3, MUNMAP_FLAG_USERPTR },
> > +			MAP_FLAG_USERPTR },
> > +		{ "userptr-end", 4, 2, 0, 3, MAP_FLAG_USERPTR },
> > +		{ "userptr-front", 4, 2, 1, 3, MAP_FLAG_USERPTR },
> >  		{ "userptr-many-all", 4 * 8, 2 * 8, 0 * 8, 4 * 8,
> > -			MUNMAP_FLAG_USERPTR },
> > +			MAP_FLAG_USERPTR },
> >  		{ "userptr-many-either-side-full", 4 * 8, 4 * 8, 1 * 8, 2 * 8,
> > -			MUNMAP_FLAG_USERPTR },
> > +			MAP_FLAG_USERPTR },
> >  		{ "userptr-many-end", 4 * 8, 4, 0 * 8, 3 * 8 + 2,
> > -			MUNMAP_FLAG_USERPTR },
> > +			MAP_FLAG_USERPTR },
> >  		{ "userptr-many-front", 4 * 8, 4, 1 * 8 - 2, 3 * 8 + 2,
> > -			MUNMAP_FLAG_USERPTR },
> > +			MAP_FLAG_USERPTR },
> >  		{ "userptr-inval-either-side-full", 4, 4, 1, 2,
> > -			MUNMAP_FLAG_USERPTR | MUNMAP_FLAG_INVALIDATE },
> > -		{ "userptr-inval-end", 4, 2, 0, 3, MUNMAP_FLAG_USERPTR |
> > -			MUNMAP_FLAG_INVALIDATE },
> > -		{ "userptr-inval-front", 4, 2, 1, 3, MUNMAP_FLAG_USERPTR |
> > -			MUNMAP_FLAG_INVALIDATE },
> > +			MAP_FLAG_USERPTR | MAP_FLAG_INVALIDATE },
> > +		{ "userptr-inval-end", 4, 2, 0, 3, MAP_FLAG_USERPTR |
> > +			MAP_FLAG_INVALIDATE },
> > +		{ "userptr-inval-front", 4, 2, 1, 3, MAP_FLAG_USERPTR |
> > +			MAP_FLAG_INVALIDATE },
> >  		{ "userptr-inval-many-all", 4 * 8, 2 * 8, 0 * 8, 4 * 8,
> > -			MUNMAP_FLAG_USERPTR | MUNMAP_FLAG_INVALIDATE },
> > +			MAP_FLAG_USERPTR | MAP_FLAG_INVALIDATE },
> >  		{ "userptr-inval-many-either-side-partial", 4 * 8, 2 * 8, 1,
> > -			4 * 8 - 2, MUNMAP_FLAG_USERPTR |
> > -				MUNMAP_FLAG_INVALIDATE },
> > +			4 * 8 - 2, MAP_FLAG_USERPTR |
> > +				MAP_FLAG_INVALIDATE },
> >  		{ "userptr-inval-many-either-side-full", 4 * 8, 4 * 8, 1 * 8,
> > -			2 * 8, MUNMAP_FLAG_USERPTR | MUNMAP_FLAG_INVALIDATE },
> > +			2 * 8, MAP_FLAG_USERPTR | MAP_FLAG_INVALIDATE },
> >  		{ "userptr-inval-many-end", 4 * 8, 4, 0 * 8, 3 * 8 + 2,
> > -			MUNMAP_FLAG_USERPTR | MUNMAP_FLAG_INVALIDATE },
> > +			MAP_FLAG_USERPTR | MAP_FLAG_INVALIDATE },
> >  		{ "userptr-inval-many-front", 4 * 8, 4, 1 * 8 - 2, 3 * 8 + 2,
> > -			MUNMAP_FLAG_USERPTR | MUNMAP_FLAG_INVALIDATE },
> > +			MAP_FLAG_USERPTR | MAP_FLAG_INVALIDATE },
> > +		{ NULL },
> > +	};
> > +	const struct section mmap_sections[] = {
> > +		{ "all", 4, 2, 0, 4, 0 },
> > +		{ "one-partial", 4, 1, 1, 2, 0 },
> > +		{ "either-side-partial", 4, 2, 1, 2, 0 },
> > +		{ "either-side-full", 4, 4, 1, 2, 0 },
> > +		{ "either-side-partial-hammer", 4, 2, 1, 2,
> > +			MAP_FLAG_HAMMER_FIRST_PAGE },
> > +		{ "end", 4, 2, 0, 3, 0 },
> > +		{ "front", 4, 2, 1, 3, 0 },
> > +		{ "many-all", 4 * 8, 2 * 8, 0 * 8, 4 * 8, 0 },
> > +		{ "many-either-side-partial", 4 * 8, 2 * 8, 1, 4 * 8 - 2, 0 },
> > +		{ "many-either-side-partial-hammer", 4 * 8, 2 * 8, 1, 4 * 8 - 2,
> > +			MAP_FLAG_HAMMER_FIRST_PAGE },
> > +		{ "userptr-all", 4, 2, 0, 4, MAP_FLAG_USERPTR },
> > +		{ "userptr-one-partial", 4, 1, 1, 2, MAP_FLAG_USERPTR },
> > +		{ "userptr-either-side-partial", 4, 2, 1, 2, MAP_FLAG_USERPTR },
> > +		{ "userptr-either-side-full", 4, 4, 1, 2, MAP_FLAG_USERPTR },
> >  		{ NULL },
> >  	};
> >  
> > @@ -1839,7 +2103,7 @@ igt_main
> >  			break;
> >  		}
> >  
> > -	for (const struct section *s = sections; s->name; s++) {
> > +	for (const struct section *s = munmap_sections; s->name; s++) {
> >  		igt_subtest_f("munmap-style-unbind-%s", s->name) {
> >  			igt_require_f(hwe_non_copy,
> >  				      "Requires non-copy engine to run\n");
> > @@ -1853,6 +2117,20 @@ igt_main
> >  		}
> >  	}
> >  
> > +	for (const struct section *s = mmap_sections; s->name; s++) {
> > +		igt_subtest_f("mmap-style-bind-%s", s->name) {
> > +			igt_require_f(hwe_non_copy,
> > +				      "Requires non-copy engine to run\n");
> > +
> > +			test_mmap_style_bind(fd, hwe_non_copy,
> > +					     s->bo_n_pages,
> > +					     s->n_binds,
> > +					     s->unbind_n_page_offfset,
> > +					     s->unbind_n_pages,
> > +					     s->flags);
> > +		}
> > +	}
> > +
> >  	igt_fixture
> >  		drm_close_driver(fd);
> >  }
> > -- 
> > 2.34.1
> > 

^ permalink raw reply	[flat|nested] 11+ messages in thread

* [igt-dev] [PATCH 1/5] xe/xe_exec_basic: A sections to test NULL (sparse) bindings
@ 2023-07-21  4:37 Matthew Brost
  2023-07-21  4:37 ` [igt-dev] [PATCH 2/5] xe_vm: MMAP style VM binds section Matthew Brost
                   ` (5 more replies)
  0 siblings, 6 replies; 11+ messages in thread
From: Matthew Brost @ 2023-07-21  4:37 UTC (permalink / raw)
  To: igt-dev; +Cc: Rodrigo Vivi

Update xe_exec_basic which create a NULL binding for store data address,
this store should just be dropped.

Reviewed-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Acked-by: Mauro Carvalho Chehab <mchehab@kernel.org>
Signed-off-by: Matthew Brost <matthew.brost@intel.com>
---
 tests/xe/xe_exec_basic.c | 33 ++++++++++++++++++++++++++++-----
 1 file changed, 28 insertions(+), 5 deletions(-)

diff --git a/tests/xe/xe_exec_basic.c b/tests/xe/xe_exec_basic.c
index af581c327d..5624d31aa7 100644
--- a/tests/xe/xe_exec_basic.c
+++ b/tests/xe/xe_exec_basic.c
@@ -28,6 +28,7 @@
 #define BIND_ENGINE	(0x1 << 4)
 #define DEFER_ALLOC	(0x1 << 5)
 #define DEFER_BIND	(0x1 << 6)
+#define SPARSE		(0x1 << 7)
 
 /**
  * SUBTEST: once-%s
@@ -70,6 +71,10 @@
  * @bindengine-userptr-rebind:		bind engine userptr rebind
  * @bindengine-userptr-invalidate:	bind engine userptr invalidate
  * @bindengine-userptr-invalidate-race:	bind engine userptr invalidate racy
+ * @null:				null
+ * @null-defer-mmap:			null defer mmap
+ * @null-defer-bind:			null defer bind
+ * @null-rebind:			null rebind
  */
 
 static void
@@ -86,6 +91,7 @@ test_exec(int fd, struct drm_xe_engine_class_instance *eci,
 		.syncs = to_user_pointer(sync),
 	};
 	uint64_t addr[MAX_N_ENGINES];
+	uint64_t sparse_addr[MAX_N_ENGINES];
 	uint32_t vm[MAX_N_ENGINES];
 	uint32_t engines[MAX_N_ENGINES];
 	uint32_t bind_engines[MAX_N_ENGINES];
@@ -110,8 +116,11 @@ test_exec(int fd, struct drm_xe_engine_class_instance *eci,
 			xe_get_default_alignment(fd));
 
 	addr[0] = 0x1a0000;
-	for (i = 1; i < MAX_N_ENGINES; ++i)
+	sparse_addr[0] = 0x301a0000;
+	for (i = 1; i < MAX_N_ENGINES; ++i) {
 		addr[i] = addr[i - 1] + (0x1ull << 32);
+		sparse_addr[i] = sparse_addr[i - 1] + (0x1ull << 32);
+	}
 
 	if (flags & USERPTR) {
 #define	MAP_ADDRESS	0x00007fadeadbe000
@@ -160,6 +169,13 @@ test_exec(int fd, struct drm_xe_engine_class_instance *eci,
 			xe_vm_bind_userptr_async(fd, vm[i], bind_engines[i],
 						 to_user_pointer(data), addr[i],
 						 bo_size, sync, 1);
+		if (flags & SPARSE)
+			__xe_vm_bind_assert(fd, vm[i], bind_engines[i],
+					    0, 0, sparse_addr[i], bo_size,
+					    XE_VM_BIND_OP_MAP |
+					    XE_VM_BIND_FLAG_ASYNC |
+					    XE_VM_BIND_FLAG_NULL, sync,
+					    1, 0, 0);
 	}
 
 	if (flags & DEFER_BIND)
@@ -171,7 +187,8 @@ test_exec(int fd, struct drm_xe_engine_class_instance *eci,
 		uint64_t batch_offset = (char *)&data[i].batch - (char *)data;
 		uint64_t batch_addr = __addr + batch_offset;
 		uint64_t sdi_offset = (char *)&data[i].data - (char *)data;
-		uint64_t sdi_addr = __addr + sdi_offset;
+		uint64_t sdi_addr = (flags & SPARSE ? sparse_addr[i % n_vm] :
+				     __addr)+ sdi_offset;
 		int e = i % n_engines;
 
 		b = 0;
@@ -258,9 +275,11 @@ test_exec(int fd, struct drm_xe_engine_class_instance *eci,
 					INT64_MAX, 0, NULL));
 	}
 
-	for (i = (flags & INVALIDATE && n_execs) ? n_execs - 1 : 0;
-	     i < n_execs; i++)
-		igt_assert_eq(data[i].data, 0xc0ffee);
+	if (!(flags & SPARSE)) {
+		for (i = (flags & INVALIDATE && n_execs) ? n_execs - 1 : 0;
+		     i < n_execs; i++)
+			igt_assert_eq(data[i].data, 0xc0ffee);
+	}
 
 	for (i = 0; i < n_engines; i++) {
 		syncobj_destroy(fd, syncobjs[i]);
@@ -293,6 +312,10 @@ igt_main
 		{ "basic-defer-bind", DEFER_ALLOC | DEFER_BIND },
 		{ "userptr", USERPTR },
 		{ "rebind", REBIND },
+		{ "null", SPARSE },
+		{ "null-defer-mmap", SPARSE | DEFER_ALLOC },
+		{ "null-defer-bind", SPARSE | DEFER_ALLOC | DEFER_BIND },
+		{ "null-rebind", SPARSE | REBIND },
 		{ "userptr-rebind", USERPTR | REBIND },
 		{ "userptr-invalidate", USERPTR | INVALIDATE },
 		{ "userptr-invalidate-race", USERPTR | INVALIDATE | RACE },
-- 
2.34.1

^ permalink raw reply related	[flat|nested] 11+ messages in thread

* [igt-dev] [PATCH 2/5] xe_vm: MMAP style VM binds section
  2023-07-21  4:37 [igt-dev] [PATCH 1/5] xe/xe_exec_basic: A sections to test NULL (sparse) bindings Matthew Brost
@ 2023-07-21  4:37 ` Matthew Brost
  2023-07-21  4:37 ` [igt-dev] [PATCH 3/5] xe_vm: Add mmap / munmap sections that split large pages Matthew Brost
                   ` (4 subsequent siblings)
  5 siblings, 0 replies; 11+ messages in thread
From: Matthew Brost @ 2023-07-21  4:37 UTC (permalink / raw)
  To: igt-dev; +Cc: Rodrigo Vivi

GPUVA added support for this let's test it.

Reviewed-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Signed-off-by: Matthew Brost <matthew.brost@intel.com>
---
 tests/xe/xe_vm.c | 350 ++++++++++++++++++++++++++++++++++++++++++-----
 1 file changed, 314 insertions(+), 36 deletions(-)

diff --git a/tests/xe/xe_vm.c b/tests/xe/xe_vm.c
index 04d6c39568..434bdb3322 100644
--- a/tests/xe/xe_vm.c
+++ b/tests/xe/xe_vm.c
@@ -1249,9 +1249,9 @@ static void *hammer_thread(void *tdata)
 	return NULL;
 }
 
-#define MUNMAP_FLAG_USERPTR		(0x1 << 0)
-#define MUNMAP_FLAG_INVALIDATE		(0x1 << 1)
-#define MUNMAP_FLAG_HAMMER_FIRST_PAGE	(0x1 << 2)
+#define MAP_FLAG_USERPTR		(0x1 << 0)
+#define MAP_FLAG_INVALIDATE		(0x1 << 1)
+#define MAP_FLAG_HAMMER_FIRST_PAGE	(0x1 << 2)
 
 
 /**
@@ -1344,7 +1344,7 @@ test_munmap_style_unbind(int fd, struct drm_xe_engine_class_instance *eci,
 	vm = xe_vm_create(fd, DRM_XE_VM_CREATE_ASYNC_BIND_OPS, 0);
 	bo_size = page_size * bo_n_pages;
 
-	if (flags & MUNMAP_FLAG_USERPTR) {
+	if (flags & MAP_FLAG_USERPTR) {
 		map = mmap(from_user_pointer(addr), bo_size, PROT_READ |
 			    PROT_WRITE, MAP_SHARED | MAP_FIXED |
 			    MAP_ANONYMOUS, -1, 0);
@@ -1363,7 +1363,7 @@ test_munmap_style_unbind(int fd, struct drm_xe_engine_class_instance *eci,
 	/* Do initial binds */
 	bind_size = (page_size * bo_n_pages) / n_binds;
 	for (i = 0; i < n_binds; ++i) {
-		if (flags & MUNMAP_FLAG_USERPTR)
+		if (flags & MAP_FLAG_USERPTR)
 			xe_vm_bind_userptr_async(fd, vm, 0, addr, addr,
 						 bind_size, sync, 1);
 		else
@@ -1378,7 +1378,7 @@ test_munmap_style_unbind(int fd, struct drm_xe_engine_class_instance *eci,
 	 * cause a fault if a rebind occurs during munmap style VM unbind
 	 * (partial VMAs unbound).
 	 */
-	if (flags & MUNMAP_FLAG_HAMMER_FIRST_PAGE) {
+	if (flags & MAP_FLAG_HAMMER_FIRST_PAGE) {
 		t.fd = fd;
 		t.vm = vm;
 #define PAGE_SIZE	4096
@@ -1437,7 +1437,7 @@ test_munmap_style_unbind(int fd, struct drm_xe_engine_class_instance *eci,
 		data = map + i * page_size;
 		igt_assert_eq(data->data, 0xc0ffee);
 	}
-	if (flags & MUNMAP_FLAG_HAMMER_FIRST_PAGE) {
+	if (flags & MAP_FLAG_HAMMER_FIRST_PAGE) {
 		memset(map, 0, PAGE_SIZE / 2);
 		memset(map + PAGE_SIZE, 0, bo_size - PAGE_SIZE);
 	} else {
@@ -1487,7 +1487,7 @@ try_again_after_invalidate:
 			igt_assert_eq(data->data, 0xc0ffee);
 		}
 	}
-	if (flags & MUNMAP_FLAG_HAMMER_FIRST_PAGE) {
+	if (flags & MAP_FLAG_HAMMER_FIRST_PAGE) {
 		memset(map, 0, PAGE_SIZE / 2);
 		memset(map + PAGE_SIZE, 0, bo_size - PAGE_SIZE);
 	} else {
@@ -1498,7 +1498,7 @@ try_again_after_invalidate:
 	 * The munmap style VM unbind can create new VMAs, make sure those are
 	 * in the bookkeeping for another rebind after a userptr invalidate.
 	 */
-	if (flags & MUNMAP_FLAG_INVALIDATE && !invalidate++) {
+	if (flags & MAP_FLAG_INVALIDATE && !invalidate++) {
 		map = mmap(from_user_pointer(addr), bo_size, PROT_READ |
 			    PROT_WRITE, MAP_SHARED | MAP_FIXED |
 			    MAP_ANONYMOUS, -1, 0);
@@ -1509,7 +1509,7 @@ try_again_after_invalidate:
 	/* Confirm unbound region can be rebound */
 	syncobj_reset(fd, &sync[0].handle, 1);
 	sync[0].flags |= DRM_XE_SYNC_SIGNAL;
-	if (flags & MUNMAP_FLAG_USERPTR)
+	if (flags & MAP_FLAG_USERPTR)
 		xe_vm_bind_userptr_async(fd, vm, 0,
 					 addr + unbind_n_page_offfset * page_size,
 					 addr + unbind_n_page_offfset * page_size,
@@ -1557,7 +1557,7 @@ try_again_after_invalidate:
 		igt_assert_eq(data->data, 0xc0ffee);
 	}
 
-	if (flags & MUNMAP_FLAG_HAMMER_FIRST_PAGE) {
+	if (flags & MAP_FLAG_HAMMER_FIRST_PAGE) {
 		exit = 1;
 		pthread_join(t.thread, NULL);
 		pthread_barrier_destroy(&barrier);
@@ -1572,6 +1572,251 @@ try_again_after_invalidate:
 	xe_vm_destroy(fd, vm);
 }
 
+/**
+ * SUBTEST: mmap-style-bind-%s
+ * Description: Test mmap style unbind with %arg[1]
+ * Run type: FULL
+ * TODO: change ``'Run type' == FULL`` to a better category
+ *
+ * arg[1]:
+ *
+ * @all:				all
+ * @one-partial:			one partial
+ * @either-side-partial:		either side partial
+ * @either-side-full:			either side full
+ * @either-side-partial-hammer:		either side partial hammer
+ * @end:				end
+ * @front:				front
+ * @many-all:				many all
+ * @many-either-side-partial:		many either side partial
+ * @many-either-side-partial-hammer:	many either side partial hammer
+ * @userptr-all:			userptr all
+ * @userptr-one-partial:		userptr one partial
+ * @userptr-either-side-partial:	userptr either side partial
+ * @userptr-either-side-full:		userptr either side full
+ */
+
+static void
+test_mmap_style_bind(int fd, struct drm_xe_engine_class_instance *eci,
+		     int bo_n_pages, int n_binds, int unbind_n_page_offfset,
+		     int unbind_n_pages, unsigned int flags)
+{
+	struct drm_xe_sync sync[2] = {
+		{ .flags = DRM_XE_SYNC_SYNCOBJ | DRM_XE_SYNC_SIGNAL, },
+		{ .flags = DRM_XE_SYNC_SYNCOBJ | DRM_XE_SYNC_SIGNAL, },
+	};
+	struct drm_xe_exec exec = {
+		.num_batch_buffer = 1,
+		.num_syncs = 2,
+		.syncs = to_user_pointer(sync),
+	};
+	uint64_t addr = 0x1a0000, base_addr = 0x1a0000;
+	uint32_t vm;
+	uint32_t engine;
+	size_t bo_size;
+	uint32_t bo0 = 0, bo1 = 0;
+	uint64_t bind_size;
+	uint64_t page_size = xe_get_default_alignment(fd);
+	struct {
+		uint32_t batch[16];
+		uint64_t pad;
+		uint32_t data;
+	} *data;
+	void *map0, *map1;
+	int i, b;
+	struct thread_data t;
+	pthread_barrier_t barrier;
+	int exit = 0;
+
+	vm = xe_vm_create(fd, DRM_XE_VM_CREATE_ASYNC_BIND_OPS, 0);
+	bo_size = page_size * bo_n_pages;
+
+	if (flags & MAP_FLAG_USERPTR) {
+		map0 = mmap(from_user_pointer(addr), bo_size, PROT_READ |
+			    PROT_WRITE, MAP_SHARED | MAP_FIXED |
+			    MAP_ANONYMOUS, -1, 0);
+		map1 = mmap(from_user_pointer(addr + bo_size),
+			    bo_size, PROT_READ | PROT_WRITE, MAP_SHARED |
+			    MAP_FIXED | MAP_ANONYMOUS, -1, 0);
+		igt_assert(map0 != MAP_FAILED);
+		igt_assert(map1 != MAP_FAILED);
+	} else {
+		bo0 = xe_bo_create(fd, eci->gt_id, vm, bo_size);
+		map0 = xe_bo_map(fd, bo0, bo_size);
+		bo1 = xe_bo_create(fd, eci->gt_id, vm, bo_size);
+		map1 = xe_bo_map(fd, bo1, bo_size);
+	}
+	memset(map0, 0, bo_size);
+	memset(map1, 0, bo_size);
+
+	engine = xe_engine_create(fd, vm, eci, 0);
+
+	sync[0].handle = syncobj_create(fd, 0);
+	sync[1].handle = syncobj_create(fd, 0);
+
+	/* Do initial binds */
+	bind_size = (page_size * bo_n_pages) / n_binds;
+	for (i = 0; i < n_binds; ++i) {
+		if (flags & MAP_FLAG_USERPTR)
+			xe_vm_bind_userptr_async(fd, vm, 0, addr, addr,
+						 bind_size, sync, 1);
+		else
+			xe_vm_bind_async(fd, vm, 0, bo0, i * bind_size,
+					 addr, bind_size, sync, 1);
+		addr += bind_size;
+	}
+	addr = base_addr;
+
+	/*
+	 * Kick a thread to write the first page continously to ensure we can't
+	 * cause a fault if a rebind occurs during munmap style VM unbind
+	 * (partial VMAs unbound).
+	 */
+	if (flags & MAP_FLAG_HAMMER_FIRST_PAGE) {
+		t.fd = fd;
+		t.vm = vm;
+#define PAGE_SIZE	4096
+		t.addr = addr + PAGE_SIZE / 2;
+		t.eci = eci;
+		t.exit = &exit;
+		t.map = map0 + PAGE_SIZE / 2;
+		t.barrier = &barrier;
+		pthread_barrier_init(&barrier, NULL, 2);
+		pthread_create(&t.thread, 0, hammer_thread, &t);
+		pthread_barrier_wait(&barrier);
+	}
+
+	/* Verify we can use every page */
+	for (i = 0; i < n_binds; ++i) {
+		uint64_t batch_offset = (char *)&data->batch - (char *)data;
+		uint64_t batch_addr = addr + batch_offset;
+		uint64_t sdi_offset = (char *)&data->data - (char *)data;
+		uint64_t sdi_addr = addr + sdi_offset;
+		data = map0 + i * page_size;
+
+		b = 0;
+		data->batch[b++] = MI_STORE_DWORD_IMM_GEN4;
+		data->batch[b++] = sdi_addr;
+		data->batch[b++] = sdi_addr >> 32;
+		data->batch[b++] = 0xc0ffee;
+		data->batch[b++] = MI_BATCH_BUFFER_END;
+		igt_assert(b <= ARRAY_SIZE(data[i].batch));
+
+		sync[0].flags &= ~DRM_XE_SYNC_SIGNAL;
+		if (i)
+			syncobj_reset(fd, &sync[1].handle, 1);
+		sync[1].flags |= DRM_XE_SYNC_SIGNAL;
+
+		exec.engine_id = engine;
+		exec.address = batch_addr;
+		xe_exec(fd, &exec);
+
+		addr += page_size;
+	}
+	addr = base_addr;
+
+	/* Bind some of the pages to different BO / userptr */
+	syncobj_reset(fd, &sync[0].handle, 1);
+	sync[0].flags |= DRM_XE_SYNC_SIGNAL;
+	sync[1].flags &= ~DRM_XE_SYNC_SIGNAL;
+	if (flags & MAP_FLAG_USERPTR)
+		xe_vm_bind_userptr_async(fd, vm, 0, addr + bo_size +
+					 unbind_n_page_offfset * page_size,
+					 addr + unbind_n_page_offfset * page_size,
+					 unbind_n_pages * page_size, sync, 2);
+	else
+		xe_vm_bind_async(fd, vm, 0, bo1,
+				 unbind_n_page_offfset * page_size,
+				 addr + unbind_n_page_offfset * page_size,
+				 unbind_n_pages * page_size, sync, 2);
+	igt_assert(syncobj_wait(fd, &sync[0].handle, 1, INT64_MAX, 0, NULL));
+	igt_assert(syncobj_wait(fd, &sync[1].handle, 1, INT64_MAX, 0, NULL));
+
+	/* Verify all pages written */
+	for (i = 0; i < n_binds; ++i) {
+		data = map0 + i * page_size;
+		igt_assert_eq(data->data, 0xc0ffee);
+	}
+	if (flags & MAP_FLAG_HAMMER_FIRST_PAGE) {
+		memset(map0, 0, PAGE_SIZE / 2);
+		memset(map0 + PAGE_SIZE, 0, bo_size - PAGE_SIZE);
+	} else {
+		memset(map0, 0, bo_size);
+		memset(map1, 0, bo_size);
+	}
+
+	/* Verify we can use every page */
+	for (i = 0; i < n_binds; ++i) {
+		uint64_t batch_offset = (char *)&data->batch - (char *)data;
+		uint64_t batch_addr = addr + batch_offset;
+		uint64_t sdi_offset = (char *)&data->data - (char *)data;
+		uint64_t sdi_addr = addr + sdi_offset;
+
+		data = map0 + i * page_size;
+		b = 0;
+		data->batch[b++] = MI_STORE_DWORD_IMM_GEN4;
+		data->batch[b++] = sdi_addr;
+		data->batch[b++] = sdi_addr >> 32;
+		data->batch[b++] = 0xc0ffee;
+		data->batch[b++] = MI_BATCH_BUFFER_END;
+		igt_assert(b <= ARRAY_SIZE(data[i].batch));
+
+		data = map1 + i * page_size;
+		b = 0;
+		data->batch[b++] = MI_STORE_DWORD_IMM_GEN4;
+		data->batch[b++] = sdi_addr;
+		data->batch[b++] = sdi_addr >> 32;
+		data->batch[b++] = 0xc0ffee;
+		data->batch[b++] = MI_BATCH_BUFFER_END;
+		igt_assert(b <= ARRAY_SIZE(data[i].batch));
+
+		sync[0].flags &= ~DRM_XE_SYNC_SIGNAL;
+		if (i)
+			syncobj_reset(fd, &sync[1].handle, 1);
+		sync[1].flags |= DRM_XE_SYNC_SIGNAL;
+
+		exec.engine_id = engine;
+		exec.address = batch_addr;
+		xe_exec(fd, &exec);
+
+		addr += page_size;
+	}
+	addr = base_addr;
+
+	igt_assert(syncobj_wait(fd, &sync[0].handle, 1, INT64_MAX, 0, NULL));
+	igt_assert(syncobj_wait(fd, &sync[1].handle, 1, INT64_MAX, 0, NULL));
+
+	/* Verify all pages written */
+	for (i = 0; i < n_binds; ++i) {
+		uint32_t result = 0;
+
+		data = map0 + i * page_size;
+		result |= data->data;
+
+		data = map1 + i * page_size;
+		result |= data->data;
+
+		igt_assert_eq(result, 0xc0ffee);
+	}
+
+	if (flags & MAP_FLAG_HAMMER_FIRST_PAGE) {
+		exit = 1;
+		pthread_join(t.thread, NULL);
+		pthread_barrier_destroy(&barrier);
+	}
+
+	syncobj_destroy(fd, sync[0].handle);
+	syncobj_destroy(fd, sync[1].handle);
+	xe_engine_destroy(fd, engine);
+	munmap(map0, bo_size);
+	munmap(map1, bo_size);
+	if (bo0)
+		gem_close(fd, bo0);
+	if (bo1)
+		gem_close(fd, bo1);
+	xe_vm_destroy(fd, vm);
+}
+
 igt_main
 {
 	struct drm_xe_engine_class_instance *hwe, *hwe_non_copy = NULL;
@@ -1584,55 +1829,74 @@ igt_main
 		int unbind_n_page_offfset;
 		int unbind_n_pages;
 		unsigned int flags;
-	} sections[] = {
+	} munmap_sections[] = {
 		{ "all", 4, 2, 0, 4, 0 },
 		{ "one-partial", 4, 1, 1, 2, 0 },
 		{ "either-side-partial", 4, 2, 1, 2, 0 },
 		{ "either-side-partial-hammer", 4, 2, 1, 2,
-			MUNMAP_FLAG_HAMMER_FIRST_PAGE },
+			MAP_FLAG_HAMMER_FIRST_PAGE },
 		{ "either-side-full", 4, 4, 1, 2, 0 },
 		{ "end", 4, 2, 0, 3, 0 },
 		{ "front", 4, 2, 1, 3, 0 },
 		{ "many-all", 4 * 8, 2 * 8, 0 * 8, 4 * 8, 0 },
 		{ "many-either-side-partial", 4 * 8, 2 * 8, 1, 4 * 8 - 2, 0 },
 		{ "many-either-side-partial-hammer", 4 * 8, 2 * 8, 1, 4 * 8 - 2,
-			MUNMAP_FLAG_HAMMER_FIRST_PAGE },
+			MAP_FLAG_HAMMER_FIRST_PAGE },
 		{ "many-either-side-full", 4 * 8, 4 * 8, 1 * 8, 2 * 8, 0 },
 		{ "many-end", 4 * 8, 4, 0 * 8, 3 * 8 + 2, 0 },
 		{ "many-front", 4 * 8, 4, 1 * 8 - 2, 3 * 8 + 2, 0 },
-		{ "userptr-all", 4, 2, 0, 4, MUNMAP_FLAG_USERPTR },
-		{ "userptr-one-partial", 4, 1, 1, 2, MUNMAP_FLAG_USERPTR },
+		{ "userptr-all", 4, 2, 0, 4, MAP_FLAG_USERPTR },
+		{ "userptr-one-partial", 4, 1, 1, 2, MAP_FLAG_USERPTR },
 		{ "userptr-either-side-partial", 4, 2, 1, 2,
-			MUNMAP_FLAG_USERPTR },
+			MAP_FLAG_USERPTR },
 		{ "userptr-either-side-full", 4, 4, 1, 2,
-			MUNMAP_FLAG_USERPTR },
-		{ "userptr-end", 4, 2, 0, 3, MUNMAP_FLAG_USERPTR },
-		{ "userptr-front", 4, 2, 1, 3, MUNMAP_FLAG_USERPTR },
+			MAP_FLAG_USERPTR },
+		{ "userptr-end", 4, 2, 0, 3, MAP_FLAG_USERPTR },
+		{ "userptr-front", 4, 2, 1, 3, MAP_FLAG_USERPTR },
 		{ "userptr-many-all", 4 * 8, 2 * 8, 0 * 8, 4 * 8,
-			MUNMAP_FLAG_USERPTR },
+			MAP_FLAG_USERPTR },
 		{ "userptr-many-either-side-full", 4 * 8, 4 * 8, 1 * 8, 2 * 8,
-			MUNMAP_FLAG_USERPTR },
+			MAP_FLAG_USERPTR },
 		{ "userptr-many-end", 4 * 8, 4, 0 * 8, 3 * 8 + 2,
-			MUNMAP_FLAG_USERPTR },
+			MAP_FLAG_USERPTR },
 		{ "userptr-many-front", 4 * 8, 4, 1 * 8 - 2, 3 * 8 + 2,
-			MUNMAP_FLAG_USERPTR },
+			MAP_FLAG_USERPTR },
 		{ "userptr-inval-either-side-full", 4, 4, 1, 2,
-			MUNMAP_FLAG_USERPTR | MUNMAP_FLAG_INVALIDATE },
-		{ "userptr-inval-end", 4, 2, 0, 3, MUNMAP_FLAG_USERPTR |
-			MUNMAP_FLAG_INVALIDATE },
-		{ "userptr-inval-front", 4, 2, 1, 3, MUNMAP_FLAG_USERPTR |
-			MUNMAP_FLAG_INVALIDATE },
+			MAP_FLAG_USERPTR | MAP_FLAG_INVALIDATE },
+		{ "userptr-inval-end", 4, 2, 0, 3, MAP_FLAG_USERPTR |
+			MAP_FLAG_INVALIDATE },
+		{ "userptr-inval-front", 4, 2, 1, 3, MAP_FLAG_USERPTR |
+			MAP_FLAG_INVALIDATE },
 		{ "userptr-inval-many-all", 4 * 8, 2 * 8, 0 * 8, 4 * 8,
-			MUNMAP_FLAG_USERPTR | MUNMAP_FLAG_INVALIDATE },
+			MAP_FLAG_USERPTR | MAP_FLAG_INVALIDATE },
 		{ "userptr-inval-many-either-side-partial", 4 * 8, 2 * 8, 1,
-			4 * 8 - 2, MUNMAP_FLAG_USERPTR |
-				MUNMAP_FLAG_INVALIDATE },
+			4 * 8 - 2, MAP_FLAG_USERPTR |
+				MAP_FLAG_INVALIDATE },
 		{ "userptr-inval-many-either-side-full", 4 * 8, 4 * 8, 1 * 8,
-			2 * 8, MUNMAP_FLAG_USERPTR | MUNMAP_FLAG_INVALIDATE },
+			2 * 8, MAP_FLAG_USERPTR | MAP_FLAG_INVALIDATE },
 		{ "userptr-inval-many-end", 4 * 8, 4, 0 * 8, 3 * 8 + 2,
-			MUNMAP_FLAG_USERPTR | MUNMAP_FLAG_INVALIDATE },
+			MAP_FLAG_USERPTR | MAP_FLAG_INVALIDATE },
 		{ "userptr-inval-many-front", 4 * 8, 4, 1 * 8 - 2, 3 * 8 + 2,
-			MUNMAP_FLAG_USERPTR | MUNMAP_FLAG_INVALIDATE },
+			MAP_FLAG_USERPTR | MAP_FLAG_INVALIDATE },
+		{ NULL },
+	};
+	const struct section mmap_sections[] = {
+		{ "all", 4, 2, 0, 4, 0 },
+		{ "one-partial", 4, 1, 1, 2, 0 },
+		{ "either-side-partial", 4, 2, 1, 2, 0 },
+		{ "either-side-full", 4, 4, 1, 2, 0 },
+		{ "either-side-partial-hammer", 4, 2, 1, 2,
+			MAP_FLAG_HAMMER_FIRST_PAGE },
+		{ "end", 4, 2, 0, 3, 0 },
+		{ "front", 4, 2, 1, 3, 0 },
+		{ "many-all", 4 * 8, 2 * 8, 0 * 8, 4 * 8, 0 },
+		{ "many-either-side-partial", 4 * 8, 2 * 8, 1, 4 * 8 - 2, 0 },
+		{ "many-either-side-partial-hammer", 4 * 8, 2 * 8, 1, 4 * 8 - 2,
+			MAP_FLAG_HAMMER_FIRST_PAGE },
+		{ "userptr-all", 4, 2, 0, 4, MAP_FLAG_USERPTR },
+		{ "userptr-one-partial", 4, 1, 1, 2, MAP_FLAG_USERPTR },
+		{ "userptr-either-side-partial", 4, 2, 1, 2, MAP_FLAG_USERPTR },
+		{ "userptr-either-side-full", 4, 4, 1, 2, MAP_FLAG_USERPTR },
 		{ NULL },
 	};
 
@@ -1839,7 +2103,7 @@ igt_main
 			break;
 		}
 
-	for (const struct section *s = sections; s->name; s++) {
+	for (const struct section *s = munmap_sections; s->name; s++) {
 		igt_subtest_f("munmap-style-unbind-%s", s->name) {
 			igt_require_f(hwe_non_copy,
 				      "Requires non-copy engine to run\n");
@@ -1853,6 +2117,20 @@ igt_main
 		}
 	}
 
+	for (const struct section *s = mmap_sections; s->name; s++) {
+		igt_subtest_f("mmap-style-bind-%s", s->name) {
+			igt_require_f(hwe_non_copy,
+				      "Requires non-copy engine to run\n");
+
+			test_mmap_style_bind(fd, hwe_non_copy,
+					     s->bo_n_pages,
+					     s->n_binds,
+					     s->unbind_n_page_offfset,
+					     s->unbind_n_pages,
+					     s->flags);
+		}
+	}
+
 	igt_fixture
 		drm_close_driver(fd);
 }
-- 
2.34.1

^ permalink raw reply related	[flat|nested] 11+ messages in thread

* [igt-dev] [PATCH 3/5] xe_vm: Add mmap / munmap sections that split large pages
  2023-07-21  4:37 [igt-dev] [PATCH 1/5] xe/xe_exec_basic: A sections to test NULL (sparse) bindings Matthew Brost
  2023-07-21  4:37 ` [igt-dev] [PATCH 2/5] xe_vm: MMAP style VM binds section Matthew Brost
@ 2023-07-21  4:37 ` Matthew Brost
  2023-07-21  4:37 ` [igt-dev] [PATCH 4/5] xe_vm: Unmap BOs in bind queue independent test Matthew Brost
                   ` (3 subsequent siblings)
  5 siblings, 0 replies; 11+ messages in thread
From: Matthew Brost @ 2023-07-21  4:37 UTC (permalink / raw)
  To: igt-dev; +Cc: Rodrigo Vivi

Splitting large pages involves using dma-resv slots for ordering, make
sure this works.

Reviewed-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Signed-off-by: Matthew Brost <matthew.brost@intel.com>
---
 tests/xe/xe_vm.c | 81 +++++++++++++++++++++++++++++++++++-------------
 1 file changed, 60 insertions(+), 21 deletions(-)

diff --git a/tests/xe/xe_vm.c b/tests/xe/xe_vm.c
index 434bdb3322..5fd511f0b8 100644
--- a/tests/xe/xe_vm.c
+++ b/tests/xe/xe_vm.c
@@ -1252,7 +1252,8 @@ static void *hammer_thread(void *tdata)
 #define MAP_FLAG_USERPTR		(0x1 << 0)
 #define MAP_FLAG_INVALIDATE		(0x1 << 1)
 #define MAP_FLAG_HAMMER_FIRST_PAGE	(0x1 << 2)
-
+#define MAP_FLAG_LARGE_PAGE		(0x1 << 3)
+#define MAP_FLAG_LARGE_PAGE_NO_SPLIT	(0x1 << 4)
 
 /**
  * SUBTEST: munmap-style-unbind-%s
@@ -1305,12 +1306,16 @@ static void *hammer_thread(void *tdata)
  *					userptr inval many either side full
  * @userptr-inval-many-end:		userptr inval many end
  * @userptr-inval-many-front:		userptr inval many front
+ * @either-side-partial-large-page-hammer:
+ *					either side partial large page hammer
+ * @either-side-partial-split-page-hammer:
+ *					either side partial split page hammer
  */
 
 static void
 test_munmap_style_unbind(int fd, struct drm_xe_engine_class_instance *eci,
 			 int bo_n_pages, int n_binds,
-			 int unbind_n_page_offfset, int unbind_n_pages,
+			 int unbind_n_page_offset, int unbind_n_pages,
 			 unsigned int flags)
 {
 	struct drm_xe_sync sync[2] = {
@@ -1322,7 +1327,7 @@ test_munmap_style_unbind(int fd, struct drm_xe_engine_class_instance *eci,
 		.num_syncs = 2,
 		.syncs = to_user_pointer(sync),
 	};
-	uint64_t addr = 0x1a0000, base_addr = 0x1a0000;
+	uint64_t addr = 0x1a00000, base_addr = 0x1a00000;
 	uint32_t vm;
 	uint32_t engine;
 	size_t bo_size;
@@ -1340,6 +1345,14 @@ test_munmap_style_unbind(int fd, struct drm_xe_engine_class_instance *eci,
 	struct thread_data t;
 	pthread_barrier_t barrier;
 	int exit = 0;
+	int n_page_per_2mb = 0x200000 / xe_get_default_alignment(fd);
+
+	if (flags & MAP_FLAG_LARGE_PAGE) {
+		bo_n_pages *= n_page_per_2mb;
+		unbind_n_pages *= n_page_per_2mb;
+		if (flags & MAP_FLAG_LARGE_PAGE_NO_SPLIT)
+			unbind_n_page_offset *= n_page_per_2mb;
+	}
 
 	vm = xe_vm_create(fd, DRM_XE_VM_CREATE_ASYNC_BIND_OPS, 0);
 	bo_size = page_size * bo_n_pages;
@@ -1426,7 +1439,7 @@ test_munmap_style_unbind(int fd, struct drm_xe_engine_class_instance *eci,
 	sync[0].flags |= DRM_XE_SYNC_SIGNAL;
 	sync[1].flags &= ~DRM_XE_SYNC_SIGNAL;
 	xe_vm_unbind_async(fd, vm, 0, 0,
-			   addr + unbind_n_page_offfset * page_size,
+			   addr + unbind_n_page_offset * page_size,
 			   unbind_n_pages * page_size, sync, 2);
 
 	igt_assert(syncobj_wait(fd, &sync[0].handle, 1, INT64_MAX, 0, NULL));
@@ -1455,8 +1468,8 @@ try_again_after_invalidate:
 		data = map + i * page_size;
 		addr += page_size;
 
-		if (i < unbind_n_page_offfset ||
-		    i + 1 > unbind_n_page_offfset + unbind_n_pages) {
+		if (i < unbind_n_page_offset ||
+		    i + 1 > unbind_n_page_offset + unbind_n_pages) {
 			b = 0;
 			data->batch[b++] = MI_STORE_DWORD_IMM_GEN4;
 			data->batch[b++] = sdi_addr;
@@ -1481,8 +1494,8 @@ try_again_after_invalidate:
 
 	/* Verify all pages still bound written */
 	for (i = 0; i < n_binds; ++i) {
-		if (i < unbind_n_page_offfset ||
-		    i + 1 > unbind_n_page_offfset + unbind_n_pages) {
+		if (i < unbind_n_page_offset ||
+		    i + 1 > unbind_n_page_offset + unbind_n_pages) {
 			data = map + i * page_size;
 			igt_assert_eq(data->data, 0xc0ffee);
 		}
@@ -1511,13 +1524,13 @@ try_again_after_invalidate:
 	sync[0].flags |= DRM_XE_SYNC_SIGNAL;
 	if (flags & MAP_FLAG_USERPTR)
 		xe_vm_bind_userptr_async(fd, vm, 0,
-					 addr + unbind_n_page_offfset * page_size,
-					 addr + unbind_n_page_offfset * page_size,
+					 addr + unbind_n_page_offset * page_size,
+					 addr + unbind_n_page_offset * page_size,
 					 unbind_n_pages * page_size, sync, 1);
 	else
 		xe_vm_bind_async(fd, vm, 0, bo,
-				 unbind_n_page_offfset * page_size,
-				 addr + unbind_n_page_offfset * page_size,
+				 unbind_n_page_offset * page_size,
+				 addr + unbind_n_page_offset * page_size,
 				 unbind_n_pages * page_size, sync, 1);
 
 	/* Verify we can use every page */
@@ -1594,11 +1607,15 @@ try_again_after_invalidate:
  * @userptr-one-partial:		userptr one partial
  * @userptr-either-side-partial:	userptr either side partial
  * @userptr-either-side-full:		userptr either side full
+ * @either-side-partial-large-page-hammer:
+ *					either side partial large page hammer
+ * @either-side-partial-split-page-hammer:
+ *					either side partial split page hammer
  */
 
 static void
 test_mmap_style_bind(int fd, struct drm_xe_engine_class_instance *eci,
-		     int bo_n_pages, int n_binds, int unbind_n_page_offfset,
+		     int bo_n_pages, int n_binds, int unbind_n_page_offset,
 		     int unbind_n_pages, unsigned int flags)
 {
 	struct drm_xe_sync sync[2] = {
@@ -1610,7 +1627,7 @@ test_mmap_style_bind(int fd, struct drm_xe_engine_class_instance *eci,
 		.num_syncs = 2,
 		.syncs = to_user_pointer(sync),
 	};
-	uint64_t addr = 0x1a0000, base_addr = 0x1a0000;
+	uint64_t addr = 0x1a00000, base_addr = 0x1a00000;
 	uint32_t vm;
 	uint32_t engine;
 	size_t bo_size;
@@ -1627,6 +1644,14 @@ test_mmap_style_bind(int fd, struct drm_xe_engine_class_instance *eci,
 	struct thread_data t;
 	pthread_barrier_t barrier;
 	int exit = 0;
+	int n_page_per_2mb = 0x200000 / xe_get_default_alignment(fd);
+
+	if (flags & MAP_FLAG_LARGE_PAGE) {
+		bo_n_pages *= n_page_per_2mb;
+		unbind_n_pages *= n_page_per_2mb;
+		if (flags & MAP_FLAG_LARGE_PAGE_NO_SPLIT)
+			unbind_n_page_offset *= n_page_per_2mb;
+	}
 
 	vm = xe_vm_create(fd, DRM_XE_VM_CREATE_ASYNC_BIND_OPS, 0);
 	bo_size = page_size * bo_n_pages;
@@ -1721,13 +1746,13 @@ test_mmap_style_bind(int fd, struct drm_xe_engine_class_instance *eci,
 	sync[1].flags &= ~DRM_XE_SYNC_SIGNAL;
 	if (flags & MAP_FLAG_USERPTR)
 		xe_vm_bind_userptr_async(fd, vm, 0, addr + bo_size +
-					 unbind_n_page_offfset * page_size,
-					 addr + unbind_n_page_offfset * page_size,
+					 unbind_n_page_offset * page_size,
+					 addr + unbind_n_page_offset * page_size,
 					 unbind_n_pages * page_size, sync, 2);
 	else
 		xe_vm_bind_async(fd, vm, 0, bo1,
-				 unbind_n_page_offfset * page_size,
-				 addr + unbind_n_page_offfset * page_size,
+				 unbind_n_page_offset * page_size,
+				 addr + unbind_n_page_offset * page_size,
 				 unbind_n_pages * page_size, sync, 2);
 	igt_assert(syncobj_wait(fd, &sync[0].handle, 1, INT64_MAX, 0, NULL));
 	igt_assert(syncobj_wait(fd, &sync[1].handle, 1, INT64_MAX, 0, NULL));
@@ -1826,7 +1851,7 @@ igt_main
 		const char *name;
 		int bo_n_pages;
 		int n_binds;
-		int unbind_n_page_offfset;
+		int unbind_n_page_offset;
 		int unbind_n_pages;
 		unsigned int flags;
 	} munmap_sections[] = {
@@ -1835,6 +1860,13 @@ igt_main
 		{ "either-side-partial", 4, 2, 1, 2, 0 },
 		{ "either-side-partial-hammer", 4, 2, 1, 2,
 			MAP_FLAG_HAMMER_FIRST_PAGE },
+		{ "either-side-partial-split-page-hammer", 4, 2, 1, 2,
+			MAP_FLAG_HAMMER_FIRST_PAGE |
+			MAP_FLAG_LARGE_PAGE },
+		{ "either-side-partial-large-page-hammer", 4, 2, 1, 2,
+			MAP_FLAG_HAMMER_FIRST_PAGE |
+			MAP_FLAG_LARGE_PAGE |
+			MAP_FLAG_LARGE_PAGE_NO_SPLIT },
 		{ "either-side-full", 4, 4, 1, 2, 0 },
 		{ "end", 4, 2, 0, 3, 0 },
 		{ "front", 4, 2, 1, 3, 0 },
@@ -1887,6 +1919,13 @@ igt_main
 		{ "either-side-full", 4, 4, 1, 2, 0 },
 		{ "either-side-partial-hammer", 4, 2, 1, 2,
 			MAP_FLAG_HAMMER_FIRST_PAGE },
+		{ "either-side-partial-split-page-hammer", 4, 2, 1, 2,
+			MAP_FLAG_HAMMER_FIRST_PAGE |
+			MAP_FLAG_LARGE_PAGE },
+		{ "either-side-partial-large-page-hammer", 4, 2, 1, 2,
+			MAP_FLAG_HAMMER_FIRST_PAGE |
+			MAP_FLAG_LARGE_PAGE |
+			MAP_FLAG_LARGE_PAGE_NO_SPLIT },
 		{ "end", 4, 2, 0, 3, 0 },
 		{ "front", 4, 2, 1, 3, 0 },
 		{ "many-all", 4 * 8, 2 * 8, 0 * 8, 4 * 8, 0 },
@@ -2111,7 +2150,7 @@ igt_main
 			test_munmap_style_unbind(fd, hwe_non_copy,
 						 s->bo_n_pages,
 						 s->n_binds,
-						 s->unbind_n_page_offfset,
+						 s->unbind_n_page_offset,
 						 s->unbind_n_pages,
 						 s->flags);
 		}
@@ -2125,7 +2164,7 @@ igt_main
 			test_mmap_style_bind(fd, hwe_non_copy,
 					     s->bo_n_pages,
 					     s->n_binds,
-					     s->unbind_n_page_offfset,
+					     s->unbind_n_page_offset,
 					     s->unbind_n_pages,
 					     s->flags);
 		}
-- 
2.34.1

^ permalink raw reply related	[flat|nested] 11+ messages in thread

* [igt-dev] [PATCH 4/5] xe_vm: Unmap BOs in bind queue independent test
  2023-07-21  4:37 [igt-dev] [PATCH 1/5] xe/xe_exec_basic: A sections to test NULL (sparse) bindings Matthew Brost
  2023-07-21  4:37 ` [igt-dev] [PATCH 2/5] xe_vm: MMAP style VM binds section Matthew Brost
  2023-07-21  4:37 ` [igt-dev] [PATCH 3/5] xe_vm: Add mmap / munmap sections that split large pages Matthew Brost
@ 2023-07-21  4:37 ` Matthew Brost
  2023-07-21 20:25   ` Rodrigo Vivi
  2023-07-21  4:37 ` [igt-dev] [PATCH 5/5] xe_vm: add bind engine conflict section Matthew Brost
                   ` (2 subsequent siblings)
  5 siblings, 1 reply; 11+ messages in thread
From: Matthew Brost @ 2023-07-21  4:37 UTC (permalink / raw)
  To: igt-dev

Unmaps also exercise the bind conflict logic in Xe, add an unmap to the
bind queue independent test to further test the conflict logic.

v2: new commit message (Rodrigo)

Signed-off-by: Matthew Brost <matthew.brost@intel.com>
---
 tests/xe/xe_vm.c | 6 ++++++
 1 file changed, 6 insertions(+)

diff --git a/tests/xe/xe_vm.c b/tests/xe/xe_vm.c
index 5fd511f0b8..36cd803577 100644
--- a/tests/xe/xe_vm.c
+++ b/tests/xe/xe_vm.c
@@ -811,6 +811,12 @@ test_bind_engines_independent(int fd, struct drm_xe_engine_class_instance *eci)
 				NULL));
 	igt_assert_eq(data[0].data, 0xc0ffee);
 
+	syncobj_destroy(fd, sync[0].handle);
+	sync[0].handle = syncobj_create(fd, 0);
+	sync[0].flags |= DRM_XE_SYNC_SIGNAL;
+	xe_vm_unbind_all_async(fd, vm, 0, bo, sync, 1);
+	igt_assert(syncobj_wait(fd, &sync[0].handle, 1, INT64_MAX, 0, NULL));
+
 	syncobj_destroy(fd, sync[0].handle);
 	for (i = 0; i < N_ENGINES; i++) {
 		syncobj_destroy(fd, syncobjs[i]);
-- 
2.34.1

^ permalink raw reply related	[flat|nested] 11+ messages in thread

* [igt-dev] [PATCH 5/5] xe_vm: add bind engine conflict section
  2023-07-21  4:37 [igt-dev] [PATCH 1/5] xe/xe_exec_basic: A sections to test NULL (sparse) bindings Matthew Brost
                   ` (2 preceding siblings ...)
  2023-07-21  4:37 ` [igt-dev] [PATCH 4/5] xe_vm: Unmap BOs in bind queue independent test Matthew Brost
@ 2023-07-21  4:37 ` Matthew Brost
  2023-07-21  5:27 ` [igt-dev] ○ CI.xeBAT: info for series starting with [1/5] xe/xe_exec_basic: A sections to test NULL (sparse) bindings Patchwork
  2023-07-21  5:34 ` [igt-dev] ✗ Fi.CI.BAT: failure " Patchwork
  5 siblings, 0 replies; 11+ messages in thread
From: Matthew Brost @ 2023-07-21  4:37 UTC (permalink / raw)
  To: igt-dev; +Cc: Rodrigo Vivi

Verify bind engines can't race and corrupt page tables.

Reviewed-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Signed-of-by: Matthew Brost <matthew.brost@intel.com>
---
 tests/xe/xe_vm.c | 43 ++++++++++++++++++++++++++++++++++++-------
 1 file changed, 36 insertions(+), 7 deletions(-)

diff --git a/tests/xe/xe_vm.c b/tests/xe/xe_vm.c
index 36cd803577..315cb70094 100644
--- a/tests/xe/xe_vm.c
+++ b/tests/xe/xe_vm.c
@@ -689,10 +689,18 @@ shared_pte_page(int fd, struct drm_xe_engine_class_instance *eci, int n_bo,
  * Description: Test independent bind engines
  * Functionality: bind engines
  * Run type: BAT
+ *
+ * SUBTEST: bind-engines-conflict
+ * Description: Test conflict bind engines
+ * Functionality: bind engines
+ * Run type: BAT
  */
 
+#define CONFLICT	(0x1 << 0)
+
 static void
-test_bind_engines_independent(int fd, struct drm_xe_engine_class_instance *eci)
+test_bind_engines_independent(int fd, struct drm_xe_engine_class_instance *eci,
+			      unsigned int flags)
 {
 	uint32_t vm;
 	uint64_t addr = 0x1a0000;
@@ -759,7 +767,7 @@ test_bind_engines_independent(int fd, struct drm_xe_engine_class_instance *eci)
 			xe_spin_wait_started(&data[i].spin);
 
 			/* Do bind to 1st engine blocked on cork */
-			addr += bo_size;
+			addr += (flags & CONFLICT) ? (0x1 << 21) : bo_size;
 			sync[1].flags &= ~DRM_XE_SYNC_SIGNAL;
 			sync[1].handle = syncobjs[e];
 			xe_vm_bind_async(fd, vm, bind_engines[e], bo, 0, addr,
@@ -795,10 +803,20 @@ test_bind_engines_independent(int fd, struct drm_xe_engine_class_instance *eci)
 		xe_exec(fd, &exec);
 	}
 
-	/* Verify initial bind, bind + write to 2nd engine done */
-	igt_assert(syncobj_wait(fd, &syncobjs[1], 1, INT64_MAX, 0, NULL));
-	igt_assert(syncobj_wait(fd, &sync[0].handle, 1, INT64_MAX, 0, NULL));
-	igt_assert_eq(data[1].data, 0xc0ffee);
+	if (!(flags & CONFLICT)) {
+		/* Verify initial bind, bind + write to 2nd engine done */
+		igt_assert(syncobj_wait(fd, &syncobjs[1], 1, INT64_MAX, 0,
+					NULL));
+		igt_assert(syncobj_wait(fd, &sync[0].handle, 1, INT64_MAX, 0,
+					NULL));
+		igt_assert_eq(data[1].data, 0xc0ffee);
+	} else {
+		/* Let jobs runs for a bit */
+		usleep(100000);
+		/* bind + write to 2nd engine waiting */
+		igt_assert(!syncobj_wait(fd, &syncobjs[1], 1, 1, 0, NULL));
+		igt_assert(!syncobj_wait(fd, &sync[0].handle, 1, 0, 0, NULL));
+	}
 
 	/* Verify bind + write to 1st engine still inflight */
 	igt_assert(!syncobj_wait(fd, &syncobjs[0], 1, 1, 0, NULL));
@@ -811,6 +829,13 @@ test_bind_engines_independent(int fd, struct drm_xe_engine_class_instance *eci)
 				NULL));
 	igt_assert_eq(data[0].data, 0xc0ffee);
 
+	if (flags & CONFLICT) {
+		/* Verify bind + write to 2nd engine done */
+		igt_assert(syncobj_wait(fd, &syncobjs[1], 1, INT64_MAX, 0,
+					NULL));
+		igt_assert_eq(data[1].data, 0xc0ffee);
+	}
+
 	syncobj_destroy(fd, sync[0].handle);
 	sync[0].handle = syncobj_create(fd, 0);
 	sync[0].flags |= DRM_XE_SYNC_SIGNAL;
@@ -2001,7 +2026,11 @@ igt_main
 
 	igt_subtest("bind-engines-independent")
 		xe_for_each_hw_engine(fd, hwe)
-			test_bind_engines_independent(fd, hwe);
+			test_bind_engines_independent(fd, hwe, 0);
+
+	igt_subtest("bind-engines-conflict")
+		xe_for_each_hw_engine(fd, hwe)
+			test_bind_engines_independent(fd, hwe, CONFLICT);
 
 	igt_subtest("bind-array-twice")
 		xe_for_each_hw_engine(fd, hwe)
-- 
2.34.1

^ permalink raw reply related	[flat|nested] 11+ messages in thread

* [igt-dev] ○ CI.xeBAT: info for series starting with [1/5] xe/xe_exec_basic: A sections to test NULL (sparse) bindings
  2023-07-21  4:37 [igt-dev] [PATCH 1/5] xe/xe_exec_basic: A sections to test NULL (sparse) bindings Matthew Brost
                   ` (3 preceding siblings ...)
  2023-07-21  4:37 ` [igt-dev] [PATCH 5/5] xe_vm: add bind engine conflict section Matthew Brost
@ 2023-07-21  5:27 ` Patchwork
  2023-07-21  5:34 ` [igt-dev] ✗ Fi.CI.BAT: failure " Patchwork
  5 siblings, 0 replies; 11+ messages in thread
From: Patchwork @ 2023-07-21  5:27 UTC (permalink / raw)
  To: Matthew Brost; +Cc: igt-dev

[-- Attachment #1: Type: text/plain, Size: 374 bytes --]

== Series Details ==

Series: series starting with [1/5] xe/xe_exec_basic: A sections to test NULL (sparse) bindings
URL   : https://patchwork.freedesktop.org/series/121090/
State : info

== Summary ==

Participating hosts:
bat-atsm-2
bat-dg2-oem2
bat-adlp-7
Missing hosts results[0]:
Results: [IGTPW_9445](https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_9445/index.html)



[-- Attachment #2: Type: text/html, Size: 884 bytes --]

^ permalink raw reply	[flat|nested] 11+ messages in thread

* [igt-dev] ✗ Fi.CI.BAT: failure for series starting with [1/5] xe/xe_exec_basic: A sections to test NULL (sparse) bindings
  2023-07-21  4:37 [igt-dev] [PATCH 1/5] xe/xe_exec_basic: A sections to test NULL (sparse) bindings Matthew Brost
                   ` (4 preceding siblings ...)
  2023-07-21  5:27 ` [igt-dev] ○ CI.xeBAT: info for series starting with [1/5] xe/xe_exec_basic: A sections to test NULL (sparse) bindings Patchwork
@ 2023-07-21  5:34 ` Patchwork
  5 siblings, 0 replies; 11+ messages in thread
From: Patchwork @ 2023-07-21  5:34 UTC (permalink / raw)
  To: Matthew Brost; +Cc: igt-dev

[-- Attachment #1: Type: text/plain, Size: 11096 bytes --]

== Series Details ==

Series: series starting with [1/5] xe/xe_exec_basic: A sections to test NULL (sparse) bindings
URL   : https://patchwork.freedesktop.org/series/121090/
State : failure

== Summary ==

CI Bug Log - changes from CI_DRM_13402 -> IGTPW_9445
====================================================

Summary
-------

  **FAILURE**

  Serious unknown changes coming with IGTPW_9445 absolutely need to be
  verified manually.
  
  If you think the reported changes have nothing to do with the changes
  introduced in IGTPW_9445, please notify your bug team to allow them
  to document this new failure mode, which will reduce false positives in CI.

  External URL: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_9445/index.html

Participating hosts (43 -> 43)
------------------------------

  Additional (1): fi-kbl-soraka 
  Missing    (1): fi-snb-2520m 

Possible new issues
-------------------

  Here are the unknown changes that may have been introduced in IGTPW_9445:

### IGT changes ###

#### Possible regressions ####

  * igt@gem_lmem_swapping@parallel-random-engines:
    - fi-kbl-soraka:      NOTRUN -> [INCOMPLETE][1]
   [1]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_9445/fi-kbl-soraka/igt@gem_lmem_swapping@parallel-random-engines.html

  
Known issues
------------

  Here are the changes found in IGTPW_9445 that come from known issues:

### IGT changes ###

#### Issues hit ####

  * igt@gem_huc_copy@huc-copy:
    - fi-kbl-soraka:      NOTRUN -> [SKIP][2] ([fdo#109271] / [i915#2190])
   [2]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_9445/fi-kbl-soraka/igt@gem_huc_copy@huc-copy.html

  * igt@gem_lmem_swapping@basic:
    - fi-kbl-soraka:      NOTRUN -> [SKIP][3] ([fdo#109271] / [i915#4613]) +2 similar issues
   [3]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_9445/fi-kbl-soraka/igt@gem_lmem_swapping@basic.html

  * igt@i915_pm_rpm@basic-rte:
    - fi-cfl-guc:         [PASS][4] -> [FAIL][5] ([i915#7940])
   [4]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_13402/fi-cfl-guc/igt@i915_pm_rpm@basic-rte.html
   [5]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_9445/fi-cfl-guc/igt@i915_pm_rpm@basic-rte.html
    - fi-cfl-8109u:       [PASS][6] -> [FAIL][7] ([i915#7940])
   [6]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_13402/fi-cfl-8109u/igt@i915_pm_rpm@basic-rte.html
   [7]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_9445/fi-cfl-8109u/igt@i915_pm_rpm@basic-rte.html

  * igt@i915_pm_rpm@module-reload:
    - fi-cfl-8700k:       [PASS][8] -> [FAIL][9] ([i915#7940])
   [8]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_13402/fi-cfl-8700k/igt@i915_pm_rpm@module-reload.html
   [9]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_9445/fi-cfl-8700k/igt@i915_pm_rpm@module-reload.html

  * igt@i915_selftest@live@gt_pm:
    - fi-kbl-soraka:      NOTRUN -> [DMESG-FAIL][10] ([i915#1886] / [i915#7913])
   [10]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_9445/fi-kbl-soraka/igt@i915_selftest@live@gt_pm.html

  * igt@i915_selftest@live@mman:
    - bat-rpls-2:         NOTRUN -> [TIMEOUT][11] ([i915#6794] / [i915#7392])
   [11]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_9445/bat-rpls-2/igt@i915_selftest@live@mman.html

  * igt@i915_selftest@live@requests:
    - bat-rpls-1:         [PASS][12] -> [ABORT][13] ([i915#4983] / [i915#7911] / [i915#7920])
   [12]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_13402/bat-rpls-1/igt@i915_selftest@live@requests.html
   [13]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_9445/bat-rpls-1/igt@i915_selftest@live@requests.html

  * igt@i915_suspend@basic-s2idle-without-i915:
    - bat-rpls-2:         NOTRUN -> [WARN][14] ([i915#8747])
   [14]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_9445/bat-rpls-2/igt@i915_suspend@basic-s2idle-without-i915.html

  * igt@kms_chamelium_hpd@common-hpd-after-suspend:
    - bat-adlp-9:         NOTRUN -> [SKIP][15] ([i915#7828])
   [15]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_9445/bat-adlp-9/igt@kms_chamelium_hpd@common-hpd-after-suspend.html
    - bat-rpls-2:         NOTRUN -> [SKIP][16] ([i915#7828])
   [16]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_9445/bat-rpls-2/igt@kms_chamelium_hpd@common-hpd-after-suspend.html

  * igt@kms_cursor_legacy@basic-busy-flip-before-cursor-atomic:
    - fi-kbl-soraka:      NOTRUN -> [SKIP][17] ([fdo#109271]) +15 similar issues
   [17]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_9445/fi-kbl-soraka/igt@kms_cursor_legacy@basic-busy-flip-before-cursor-atomic.html

  * igt@kms_pipe_crc_basic@read-crc-frame-sequence@pipe-d-edp-1:
    - bat-rplp-1:         [PASS][18] -> [ABORT][19] ([i915#8442] / [i915#8668])
   [18]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_13402/bat-rplp-1/igt@kms_pipe_crc_basic@read-crc-frame-sequence@pipe-d-edp-1.html
   [19]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_9445/bat-rplp-1/igt@kms_pipe_crc_basic@read-crc-frame-sequence@pipe-d-edp-1.html

  * igt@kms_pipe_crc_basic@suspend-read-crc:
    - bat-rpls-2:         NOTRUN -> [SKIP][20] ([i915#1845])
   [20]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_9445/bat-rpls-2/igt@kms_pipe_crc_basic@suspend-read-crc.html

  
#### Possible fixes ####

  * igt@i915_pm_rpm@module-reload:
    - fi-rkl-11600:       [FAIL][21] ([i915#7940]) -> [PASS][22]
   [21]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_13402/fi-rkl-11600/igt@i915_pm_rpm@module-reload.html
   [22]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_9445/fi-rkl-11600/igt@i915_pm_rpm@module-reload.html

  * igt@i915_selftest@live@gt_lrc:
    - bat-adlp-9:         [INCOMPLETE][23] ([i915#4983] / [i915#7913]) -> [PASS][24]
   [23]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_13402/bat-adlp-9/igt@i915_selftest@live@gt_lrc.html
   [24]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_9445/bat-adlp-9/igt@i915_selftest@live@gt_lrc.html

  * igt@i915_selftest@live@requests:
    - bat-rpls-2:         [ABORT][25] ([i915#7913] / [i915#7982]) -> [PASS][26]
   [25]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_13402/bat-rpls-2/igt@i915_selftest@live@requests.html
   [26]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_9445/bat-rpls-2/igt@i915_selftest@live@requests.html

  * igt@i915_selftest@live@workarounds:
    - bat-dg2-11:         [DMESG-FAIL][27] ([i915#7913]) -> [PASS][28]
   [27]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_13402/bat-dg2-11/igt@i915_selftest@live@workarounds.html
   [28]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_9445/bat-dg2-11/igt@i915_selftest@live@workarounds.html

  
#### Warnings ####

  * igt@i915_selftest@live@gt_tlb:
    - bat-mtlp-8:         [DMESG-FAIL][29] ([i915#8937]) -> [DMESG-WARN][30] ([i915#8937])
   [29]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_13402/bat-mtlp-8/igt@i915_selftest@live@gt_tlb.html
   [30]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_9445/bat-mtlp-8/igt@i915_selftest@live@gt_tlb.html

  * igt@i915_selftest@live@slpc:
    - bat-mtlp-8:         [DMESG-WARN][31] ([i915#8937]) -> [DMESG-WARN][32] ([i915#6367] / [i915#8937])
   [31]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_13402/bat-mtlp-8/igt@i915_selftest@live@slpc.html
   [32]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_9445/bat-mtlp-8/igt@i915_selftest@live@slpc.html

  
  {name}: This element is suppressed. This means it is ignored when computing
          the status of the difference (SUCCESS, WARNING, or FAILURE).

  [fdo#109271]: https://bugs.freedesktop.org/show_bug.cgi?id=109271
  [i915#1845]: https://gitlab.freedesktop.org/drm/intel/issues/1845
  [i915#1886]: https://gitlab.freedesktop.org/drm/intel/issues/1886
  [i915#2190]: https://gitlab.freedesktop.org/drm/intel/issues/2190
  [i915#4613]: https://gitlab.freedesktop.org/drm/intel/issues/4613
  [i915#4983]: https://gitlab.freedesktop.org/drm/intel/issues/4983
  [i915#6367]: https://gitlab.freedesktop.org/drm/intel/issues/6367
  [i915#6794]: https://gitlab.freedesktop.org/drm/intel/issues/6794
  [i915#7392]: https://gitlab.freedesktop.org/drm/intel/issues/7392
  [i915#7828]: https://gitlab.freedesktop.org/drm/intel/issues/7828
  [i915#7911]: https://gitlab.freedesktop.org/drm/intel/issues/7911
  [i915#7913]: https://gitlab.freedesktop.org/drm/intel/issues/7913
  [i915#7920]: https://gitlab.freedesktop.org/drm/intel/issues/7920
  [i915#7940]: https://gitlab.freedesktop.org/drm/intel/issues/7940
  [i915#7982]: https://gitlab.freedesktop.org/drm/intel/issues/7982
  [i915#8442]: https://gitlab.freedesktop.org/drm/intel/issues/8442
  [i915#8668]: https://gitlab.freedesktop.org/drm/intel/issues/8668
  [i915#8747]: https://gitlab.freedesktop.org/drm/intel/issues/8747
  [i915#8937]: https://gitlab.freedesktop.org/drm/intel/issues/8937


Build changes
-------------

  * CI: CI-20190529 -> None
  * IGT: IGT_7396 -> IGTPW_9445

  CI-20190529: 20190529
  CI_DRM_13402: 7a742f6e1e77987a00325c93e41ade65ad85f0d5 @ git://anongit.freedesktop.org/gfx-ci/linux
  IGTPW_9445: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_9445/index.html
  IGT_7396: 8e84faf33c2cf3482c7dff814d256089bc03db5d @ https://gitlab.freedesktop.org/drm/igt-gpu-tools.git


Testlist changes
----------------

+igt@xe_exec_basic@many-engines-many-vm-null
+igt@xe_exec_basic@many-engines-many-vm-null-defer-bind
+igt@xe_exec_basic@many-engines-many-vm-null-defer-mmap
+igt@xe_exec_basic@many-engines-many-vm-null-rebind
+igt@xe_exec_basic@many-engines-null
+igt@xe_exec_basic@many-engines-null-defer-bind
+igt@xe_exec_basic@many-engines-null-defer-mmap
+igt@xe_exec_basic@many-engines-null-rebind
+igt@xe_exec_basic@many-null
+igt@xe_exec_basic@many-null-defer-bind
+igt@xe_exec_basic@many-null-defer-mmap
+igt@xe_exec_basic@many-null-rebind
+igt@xe_exec_basic@no-exec-null
+igt@xe_exec_basic@no-exec-null-defer-bind
+igt@xe_exec_basic@no-exec-null-defer-mmap
+igt@xe_exec_basic@no-exec-null-rebind
+igt@xe_exec_basic@once-null
+igt@xe_exec_basic@once-null-defer-bind
+igt@xe_exec_basic@once-null-defer-mmap
+igt@xe_exec_basic@once-null-rebind
+igt@xe_exec_basic@twice-null
+igt@xe_exec_basic@twice-null-defer-bind
+igt@xe_exec_basic@twice-null-defer-mmap
+igt@xe_exec_basic@twice-null-rebind
+igt@xe_vm@bind-engines-conflict
+igt@xe_vm@mmap-style-bind-all
+igt@xe_vm@mmap-style-bind-either-side-full
+igt@xe_vm@mmap-style-bind-either-side-partial
+igt@xe_vm@mmap-style-bind-either-side-partial-hammer
+igt@xe_vm@mmap-style-bind-either-side-partial-large-page-hammer
+igt@xe_vm@mmap-style-bind-either-side-partial-split-page-hammer
+igt@xe_vm@mmap-style-bind-end
+igt@xe_vm@mmap-style-bind-front
+igt@xe_vm@mmap-style-bind-many-all
+igt@xe_vm@mmap-style-bind-many-either-side-partial
+igt@xe_vm@mmap-style-bind-many-either-side-partial-hammer
+igt@xe_vm@mmap-style-bind-one-partial
+igt@xe_vm@mmap-style-bind-userptr-all
+igt@xe_vm@mmap-style-bind-userptr-either-side-full
+igt@xe_vm@mmap-style-bind-userptr-either-side-partial
+igt@xe_vm@mmap-style-bind-userptr-one-partial
+igt@xe_vm@munmap-style-unbind-either-side-partial-large-page-hammer
+igt@xe_vm@munmap-style-unbind-either-side-partial-split-page-hammer

== Logs ==

For more details see: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_9445/index.html

[-- Attachment #2: Type: text/html, Size: 13336 bytes --]

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [igt-dev] [PATCH 4/5] xe_vm: Unmap BOs in bind queue independent test
  2023-07-21  4:37 ` [igt-dev] [PATCH 4/5] xe_vm: Unmap BOs in bind queue independent test Matthew Brost
@ 2023-07-21 20:25   ` Rodrigo Vivi
  0 siblings, 0 replies; 11+ messages in thread
From: Rodrigo Vivi @ 2023-07-21 20:25 UTC (permalink / raw)
  To: Matthew Brost; +Cc: igt-dev

On Thu, Jul 20, 2023 at 09:37:34PM -0700, Matthew Brost wrote:
> Unmaps also exercise the bind conflict logic in Xe, add an unmap to the
> bind queue independent test to further test the conflict logic.
> 
> v2: new commit message (Rodrigo)

Thanks for this and for the extra explanation offline.

> 
> Signed-off-by: Matthew Brost <matthew.brost@intel.com>
> ---
>  tests/xe/xe_vm.c | 6 ++++++
>  1 file changed, 6 insertions(+)
> 
> diff --git a/tests/xe/xe_vm.c b/tests/xe/xe_vm.c
> index 5fd511f0b8..36cd803577 100644
> --- a/tests/xe/xe_vm.c
> +++ b/tests/xe/xe_vm.c
> @@ -811,6 +811,12 @@ test_bind_engines_independent(int fd, struct drm_xe_engine_class_instance *eci)
>  				NULL));
>  	igt_assert_eq(data[0].data, 0xc0ffee);
>  
> +	syncobj_destroy(fd, sync[0].handle);
> +	sync[0].handle = syncobj_create(fd, 0);
> +	sync[0].flags |= DRM_XE_SYNC_SIGNAL;
> +	xe_vm_unbind_all_async(fd, vm, 0, bo, sync, 1);
> +	igt_assert(syncobj_wait(fd, &sync[0].handle, 1, INT64_MAX, 0, NULL));

it looks like this would work even without the conflict logic, but yeap,
it will exercise that piece of tree, and it doesn't hurt. So, let's move
with it:

Reviewed-by: Rodrigo Vivi <rodrigo.vivi@intel.com>


> +
>  	syncobj_destroy(fd, sync[0].handle);
>  	for (i = 0; i < N_ENGINES; i++) {
>  		syncobj_destroy(fd, syncobjs[i]);
> -- 
> 2.34.1
> 

^ permalink raw reply	[flat|nested] 11+ messages in thread

end of thread, other threads:[~2023-07-21 20:25 UTC | newest]

Thread overview: 11+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2023-07-21  4:37 [igt-dev] [PATCH 1/5] xe/xe_exec_basic: A sections to test NULL (sparse) bindings Matthew Brost
2023-07-21  4:37 ` [igt-dev] [PATCH 2/5] xe_vm: MMAP style VM binds section Matthew Brost
2023-07-21  4:37 ` [igt-dev] [PATCH 3/5] xe_vm: Add mmap / munmap sections that split large pages Matthew Brost
2023-07-21  4:37 ` [igt-dev] [PATCH 4/5] xe_vm: Unmap BOs in bind queue independent test Matthew Brost
2023-07-21 20:25   ` Rodrigo Vivi
2023-07-21  4:37 ` [igt-dev] [PATCH 5/5] xe_vm: add bind engine conflict section Matthew Brost
2023-07-21  5:27 ` [igt-dev] ○ CI.xeBAT: info for series starting with [1/5] xe/xe_exec_basic: A sections to test NULL (sparse) bindings Patchwork
2023-07-21  5:34 ` [igt-dev] ✗ Fi.CI.BAT: failure " Patchwork
  -- strict thread matches above, loose matches on Subject: below --
2023-07-10 14:58 [igt-dev] [PATCH 1/5] " Matthew Brost
2023-07-10 14:58 ` [igt-dev] [PATCH 2/5] xe_vm: MMAP style VM binds section Matthew Brost
2023-07-13 15:24   ` Rodrigo Vivi
2023-07-14  4:13     ` Matthew Brost

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox