From: Matthew Brost <matthew.brost@intel.com>
To: igt-dev@lists.freedesktop.org
Subject: [igt-dev] [PATCH v2 6/7] xe_exec_compute_mode: All dma-fences as in-syncs to compute execs
Date: Mon, 1 May 2023 23:55:35 -0700 [thread overview]
Message-ID: <20230502065536.3223489-7-matthew.brost@intel.com> (raw)
In-Reply-To: <20230502065536.3223489-1-matthew.brost@intel.com>
We allow this behavior now, lets test it.
Signed-of-by: Matthew Brost <matthew.brost@intel.com>
---
tests/xe/xe_exec_compute_mode.c | 159 ++++++++++++++++++++++++++++----
1 file changed, 141 insertions(+), 18 deletions(-)
diff --git a/tests/xe/xe_exec_compute_mode.c b/tests/xe/xe_exec_compute_mode.c
index 750815764..750e65b5d 100644
--- a/tests/xe/xe_exec_compute_mode.c
+++ b/tests/xe/xe_exec_compute_mode.c
@@ -30,6 +30,7 @@
#define BIND_ENGINE (0x1 << 4)
#define VM_FOR_BO (0x1 << 5)
#define ENGINE_EARLY (0x1 << 6)
+#define DMA_FENCES_FOR_BINDS (0x1 << 7)
/**
* SUBTEST: twice-%s
@@ -61,6 +62,32 @@
* @bindengine-userptr-rebind: bindengine userptr rebind
* @bindengine-userptr-invalidate: bindengine userptr invalidate
* @bindengine-userptr-invalidate-race: bindengine-userptr invalidate race
+ * @basic-dma-fences:
+ * basic dma fences
+ * @preempt-fence-early-dma-fences:
+ * preempt fence early dma fences
+ * @userptr-dma-fences:
+ * userptr dma fences
+ * @rebind-dma-fences:
+ * rebind dma fences
+ * @userptr-rebind-dma-fences:
+ * userptr rebind dma fences
+ * @userptr-invalidate-dma-fences:
+ * userptr invalidate dma fences
+ * @userptr-invalidate-race-dma-fences:
+ * userptr invalidate race dma fences
+ * @bindengine-dma-fences:
+ * bindengine dma fences
+ * @bindengine-userptr-dma-fences:
+ * bindengine userptr dma fences
+ * @bindengine-rebind-dma-fences:
+ * bindengine rebind dma fences
+ * @bindengine-userptr-rebind-dma-fences:
+ * bindengine userptr rebind dma fences
+ * @bindengine-userptr-invalidate-dma-fences:
+ * bindengine userptr invalidate dma fences
+ * @bindengine-userptr-invalidate-race-dma-fences:
+ * bindengine-userptr invalidate race dma fences
*/
/**
@@ -83,6 +110,28 @@
* @bindengine-rebind: bindengine rebind
* @bindengine-userptr-rebind: bindengine userptr rebind
* @bindengine-userptr-invalidate: bindengine userptr invalidate
+ * @basic-dma-fences:
+ * basic dma fences
+ * @preempt-fence-early-dma-fences:
+ * preempt fence early dma fences
+ * @userptr-dma-fences:
+ * userptr dma fences
+ * @rebind-dma-fences:
+ * rebind dma fences
+ * @userptr-rebind-dma-fences:
+ * userptr rebind dma fences
+ * @userptr-invalidate-dma-fences:
+ * userptr invalidate dma fences
+ * @bindengine-dma-fences:
+ * bindengine dma fences
+ * @bindengine-userptr-dma-fences:
+ * bindengine userptr dma fences
+ * @bindengine-rebind-dma-fences:
+ * bindengine rebind dma fences
+ * @bindengine-userptr-rebind-dma-fences:
+ * bindengine userptr rebind dma fences
+ * @bindengine-userptr-invalidate-dma-fences:
+ * bindengine userptr invalidate dma fences
*/
static void
test_exec(int fd, struct drm_xe_engine_class_instance *eci,
@@ -91,14 +140,21 @@ test_exec(int fd, struct drm_xe_engine_class_instance *eci,
uint32_t vm;
uint64_t addr = 0x1a0000;
#define USER_FENCE_VALUE 0xdeadbeefdeadbeefull
+ bool dma_fences = flags & DMA_FENCES_FOR_BINDS;
struct drm_xe_sync sync[1] = {
{ .flags = DRM_XE_SYNC_USER_FENCE | DRM_XE_SYNC_SIGNAL,
.timeline_value = USER_FENCE_VALUE },
};
+ struct drm_xe_sync dma_sync[2] = {
+ { .flags = DRM_XE_SYNC_SYNCOBJ | DRM_XE_SYNC_SIGNAL, },
+ { .flags = DRM_XE_SYNC_USER_FENCE | DRM_XE_SYNC_SIGNAL,
+ .timeline_value = USER_FENCE_VALUE },
+ };
struct drm_xe_exec exec = {
.num_batch_buffer = 1,
- .num_syncs = 1,
- .syncs = to_user_pointer(sync),
+ .num_syncs = dma_fences ? 2 : 1,
+ .syncs = dma_fences ? to_user_pointer(dma_sync) :
+ to_user_pointer(sync),
};
uint32_t engines[MAX_N_ENGINES];
uint32_t bind_engines[MAX_N_ENGINES];
@@ -113,6 +169,7 @@ test_exec(int fd, struct drm_xe_engine_class_instance *eci,
} *data;
int i, j, b;
int map_fd = -1;
+ uint32_t syncobj;
igt_assert(n_engines <= MAX_N_ENGINES);
@@ -175,17 +232,29 @@ test_exec(int fd, struct drm_xe_engine_class_instance *eci,
bind_engines[i] = 0;
};
- sync[0].addr = to_user_pointer(&data[0].vm_sync);
+ if (dma_fences) {
+ syncobj = syncobj_create(fd, 0);
+ dma_sync[0].handle = syncobj;
+ } else {
+ sync[0].addr = to_user_pointer(&data[0].vm_sync);
+ }
+
if (bo)
xe_vm_bind_async(fd, vm, bind_engines[0], bo, 0, addr,
- bo_size, sync, 1);
+ bo_size, dma_fences ? dma_sync : sync, 1);
else
xe_vm_bind_userptr_async(fd, vm, bind_engines[0],
to_user_pointer(data), addr,
- bo_size, sync, 1);
+ bo_size, dma_fences ? dma_sync : sync,
+ 1);
#define ONE_SEC 1000
- xe_wait_ufence(fd, &data[0].vm_sync, USER_FENCE_VALUE, NULL, ONE_SEC);
- data[0].vm_sync = 0;
+ if (!dma_fences) {
+ xe_wait_ufence(fd, &data[0].vm_sync, USER_FENCE_VALUE, NULL,
+ ONE_SEC);
+ data[0].vm_sync = 0;
+ } else {
+ dma_sync[0].flags &= ~DRM_XE_SYNC_SIGNAL;
+ }
for (i = 0; i < n_execs; i++) {
uint64_t batch_offset = (char *)&data[i].batch - (char *)data;
@@ -202,7 +271,12 @@ test_exec(int fd, struct drm_xe_engine_class_instance *eci,
data[i].batch[b++] = MI_BATCH_BUFFER_END;
igt_assert(b <= ARRAY_SIZE(data[i].batch));
- sync[0].addr = addr + (char *)&data[i].exec_sync - (char *)data;
+ if (dma_fences)
+ dma_sync[1].addr = addr +
+ (char *)&data[i].exec_sync - (char *)data;
+ else
+ sync[0].addr = addr +
+ (char *)&data[i].exec_sync - (char *)data;
exec.engine_id = engines[e];
exec.address = batch_addr;
@@ -214,20 +288,31 @@ test_exec(int fd, struct drm_xe_engine_class_instance *eci,
xe_vm_unbind_async(fd, vm, bind_engines[e], 0,
addr, bo_size, NULL, 0);
- sync[0].addr = to_user_pointer(&data[0].vm_sync);
+ if (dma_fences)
+ dma_sync[0].flags |= DRM_XE_SYNC_SIGNAL;
+ else
+ sync[0].addr =
+ to_user_pointer(&data[0].vm_sync);
addr += bo_size;
if (bo)
xe_vm_bind_async(fd, vm, bind_engines[e], bo,
- 0, addr, bo_size, sync, 1);
+ 0, addr, bo_size,
+ dma_fences ? dma_sync : sync,
+ 1);
else
xe_vm_bind_userptr_async(fd, vm,
bind_engines[e],
to_user_pointer(data),
- addr, bo_size, sync,
- 1);
- xe_wait_ufence(fd, &data[0].vm_sync, USER_FENCE_VALUE,
- NULL, ONE_SEC);
- data[0].vm_sync = 0;
+ addr, bo_size,
+ dma_fences ? dma_sync :
+ sync, 1);
+ if (!dma_fences) {
+ xe_wait_ufence(fd, &data[0].vm_sync,
+ USER_FENCE_VALUE, NULL, ONE_SEC);
+ data[0].vm_sync = 0;
+ } else {
+ dma_sync[0].flags &= ~DRM_XE_SYNC_SIGNAL;
+ }
}
if (flags & INVALIDATE && i + 1 != n_execs) {
@@ -275,10 +360,21 @@ test_exec(int fd, struct drm_xe_engine_class_instance *eci,
if (flags & INVALIDATE)
usleep(250000);
- sync[0].addr = to_user_pointer(&data[0].vm_sync);
+ if (dma_fences) {
+ syncobj_reset(fd, &syncobj, 1);
+ dma_sync[0].flags |= DRM_XE_SYNC_SIGNAL;
+ } else {
+ sync[0].addr = to_user_pointer(&data[0].vm_sync);
+ }
+
xe_vm_unbind_async(fd, vm, bind_engines[0], 0, addr, bo_size,
- sync, 1);
- xe_wait_ufence(fd, &data[0].vm_sync, USER_FENCE_VALUE, NULL, ONE_SEC);
+ dma_fences ? dma_sync : sync, 1);
+ if (dma_fences)
+ igt_assert(syncobj_wait(fd, &syncobj, 1, INT64_MAX, 0,
+ NULL));
+ else
+ xe_wait_ufence(fd, &data[0].vm_sync, USER_FENCE_VALUE, NULL,
+ ONE_SEC);
for (i = j; i < n_execs; i++)
igt_assert_eq(data[i].data, 0xc0ffee);
@@ -289,6 +385,9 @@ test_exec(int fd, struct drm_xe_engine_class_instance *eci,
xe_engine_destroy(fd, bind_engines[i]);
}
+ if (dma_fences)
+ syncobj_destroy(fd, syncobj);
+
if (bo) {
munmap(data, bo_size);
gem_close(fd, bo);
@@ -323,6 +422,30 @@ igt_main
INVALIDATE },
{ "bindengine-userptr-invalidate-race", BIND_ENGINE | USERPTR |
INVALIDATE | RACE },
+ { "basic-dma-fences", DMA_FENCES_FOR_BINDS | 0 },
+ { "preempt-fence-early-dma-fences", DMA_FENCES_FOR_BINDS |
+ VM_FOR_BO | ENGINE_EARLY },
+ { "userptr-dma-fences", DMA_FENCES_FOR_BINDS | USERPTR },
+ { "rebind-dma-fences", DMA_FENCES_FOR_BINDS | REBIND },
+ { "userptr-rebind-dma-fences", DMA_FENCES_FOR_BINDS | USERPTR |
+ REBIND },
+ { "userptr-invalidate-dma-fences", DMA_FENCES_FOR_BINDS |
+ USERPTR | INVALIDATE },
+ { "userptr-invalidate-race-dma-fences", DMA_FENCES_FOR_BINDS |
+ USERPTR | INVALIDATE | RACE },
+ { "bindengine-dma-fences", DMA_FENCES_FOR_BINDS | BIND_ENGINE },
+ { "bindengine-userptr-dma-fences", DMA_FENCES_FOR_BINDS |
+ BIND_ENGINE | USERPTR },
+ { "bindengine-rebind-dma-fences", DMA_FENCES_FOR_BINDS |
+ BIND_ENGINE | REBIND },
+ { "bindengine-userptr-rebind-dma-fences", DMA_FENCES_FOR_BINDS |
+ BIND_ENGINE | USERPTR | REBIND },
+ { "bindengine-userptr-invalidate-dma-fences",
+ DMA_FENCES_FOR_BINDS | BIND_ENGINE | USERPTR |
+ INVALIDATE },
+ { "bindengine-userptr-invalidate-race-dma-fences",
+ DMA_FENCES_FOR_BINDS | BIND_ENGINE | USERPTR |
+ INVALIDATE | RACE },
{ NULL },
};
int fd;
--
2.34.1
next prev parent reply other threads:[~2023-05-02 6:55 UTC|newest]
Thread overview: 10+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-05-02 6:55 [igt-dev] [PATCH v2 0/7] IGT updates for upstreaming prep Matthew Brost
2023-05-02 6:55 ` [igt-dev] [PATCH v2 1/7] xe: Update to latest uAPI Matthew Brost
2023-05-02 6:55 ` [igt-dev] [PATCH v2 2/7] xe_exec_basic: Add NULL VM bind section Matthew Brost
2023-05-02 6:55 ` [igt-dev] [PATCH v2 3/7] xe_vm: MMAP style VM binds section Matthew Brost
2023-05-02 6:55 ` [igt-dev] [PATCH v2 4/7] xe_vm: Add mmap / munmap sections that split large pages Matthew Brost
2023-05-02 6:55 ` [igt-dev] [PATCH v2 5/7] xe: Update to new VM bind uAPI Matthew Brost
2023-05-02 6:55 ` Matthew Brost [this message]
2023-05-02 6:55 ` [igt-dev] [PATCH v2 7/7] xe_vm: Add EIO test Matthew Brost
2023-05-02 7:30 ` [igt-dev] ✓ Fi.CI.BAT: success for IGT updates for upstreaming prep (rev2) Patchwork
2023-05-02 8:42 ` [igt-dev] ✓ Fi.CI.IGT: " Patchwork
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20230502065536.3223489-7-matthew.brost@intel.com \
--to=matthew.brost@intel.com \
--cc=igt-dev@lists.freedesktop.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox