* [PATCH v2 2/2] tests/xe/xe_exec_basic: Add section to test zero number of BB in exec IOCTL
2024-07-19 19:41 [PATCH v2 1/2] tests/xe/xe_vm: Add section to test zero number of VM binds Matthew Brost
@ 2024-07-19 19:41 ` Matthew Brost
2024-07-23 12:27 ` Kamil Konieczny
2024-07-19 20:33 ` ✗ Fi.CI.BUILD: failure for series starting with [v2,1/2] tests/xe/xe_vm: Add section to test zero number of VM binds Patchwork
` (2 subsequent siblings)
3 siblings, 1 reply; 8+ messages in thread
From: Matthew Brost @ 2024-07-19 19:41 UTC (permalink / raw)
To: igt-dev
Verify the exec queue ordering works wrt out-sync signaling when zero
number of BB passed to exec IOCTL.
v2;
- Fix build error
Signed-off-by: Matthew Brost <matthew.brost@intel.com>
---
tests/intel/xe_exec_basic.c | 108 ++++++++++++++++++++++++++++++++++++
1 file changed, 108 insertions(+)
diff --git a/tests/intel/xe_exec_basic.c b/tests/intel/xe_exec_basic.c
index 0fd1ae062c..02b78d55f6 100644
--- a/tests/intel/xe_exec_basic.c
+++ b/tests/intel/xe_exec_basic.c
@@ -19,6 +19,7 @@
#include "xe/xe_ioctl.h"
#include "xe/xe_query.h"
+#include "xe/xe_spin.h"
#include <string.h>
#define MAX_N_EXEC_QUEUES 16
@@ -314,6 +315,109 @@ test_exec(int fd, struct drm_xe_engine_class_instance *eci,
}
}
+/**
+ * SUBTEST: zero-execs
+ * Description: Test zero execs in IOCTL
+ * Functionality: exec IOCTL
+ * Run type: BAT
+ */
+
+static void test_zero_execs(int fd, struct drm_xe_engine_class_instance *eci,
+ int n_execs)
+{
+ uint32_t vm;
+ uint64_t addr = 0x1a0000;
+ struct drm_xe_sync sync[2] = {
+ { .flags = DRM_XE_SYNC_TYPE_SYNCOBJ | DRM_XE_SYNC_FLAG_SIGNAL, },
+ { .flags = DRM_XE_SYNC_TYPE_SYNCOBJ | DRM_XE_SYNC_FLAG_SIGNAL, },
+ };
+ struct drm_xe_exec exec = {
+ .num_batch_buffer = 1,
+ .num_syncs = 1,
+ .syncs = to_user_pointer(sync),
+ };
+ size_t bo_size;
+ uint32_t bo = 0;
+ uint32_t syncobj;
+ uint32_t exec_queue;
+ struct xe_cork cork;
+ struct {
+ uint32_t batch[16];
+ uint64_t pad;
+ uint32_t data;
+ } *data;
+ int i, b;
+
+ vm = xe_vm_create(fd, 0, 0);
+ bo_size = sizeof(*data) * n_execs;
+ bo_size = ALIGN(bo_size + xe_cs_prefetch_size(fd),
+ xe_get_default_alignment(fd));
+ bo = xe_bo_create(fd, vm, bo_size,
+ vram_if_possible(fd, eci->gt_id),
+ DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM);
+ data = xe_bo_map(fd, bo, bo_size);
+ exec_queue = xe_exec_queue_create(fd, vm, eci, 0);
+
+ xe_cork_init(fd, eci, &cork);
+ xe_cork_wait_started(&cork);
+
+ /* Initial bind behind cork */
+ sync[0].handle = syncobj = syncobj_create(fd, 0);
+ sync[0].flags |= DRM_XE_SYNC_FLAG_SIGNAL;
+ sync[1].handle = cork.syncobj;
+ sync[1].flags &= ~DRM_XE_SYNC_FLAG_SIGNAL;
+ xe_vm_bind_async(fd, vm, 0, bo, 0, addr, bo_size, sync, 2);
+
+ /* Exec behind bind */
+ for (i = 0; i < n_execs; i++) {
+ uint64_t batch_offset = (char *)&data[i].batch - (char *)data;
+ uint64_t batch_addr = addr + batch_offset;
+ uint64_t sdi_offset = (char *)&data[i].data - (char *)data;
+ uint64_t sdi_addr = addr + sdi_offset;
+
+ b = 0;
+ data[i].batch[b++] = MI_STORE_DWORD_IMM_GEN4;
+ data[i].batch[b++] = sdi_addr;
+ data[i].batch[b++] = sdi_addr >> 32;
+ data[i].batch[b++] = 0xc0ffee;
+ data[i].batch[b++] = MI_BATCH_BUFFER_END;
+ igt_assert(b <= ARRAY_SIZE(data[i].batch));
+
+ sync[0].flags &= ~DRM_XE_SYNC_FLAG_SIGNAL;
+
+ exec.exec_queue_id = exec_queue;
+ exec.address = batch_addr;
+ xe_exec(fd, &exec);
+ }
+
+ /* Exec with no batch buffer */
+ sync[0].handle = syncobj_create(fd, 0);
+ sync[0].flags |= DRM_XE_SYNC_FLAG_SIGNAL;
+ exec.num_batch_buffer = 0;
+ exec.address = 0;
+ xe_exec(fd, &exec);
+
+ /* Let jobs runs for a bit */
+ usleep(100000);
+
+ /* both bind and execs are waiting */
+ igt_assert(!syncobj_wait(fd, &syncobj, 1, 0, 0, NULL));
+ igt_assert(!syncobj_wait(fd, &sync[0].handle, 1, 0, 0, NULL));
+
+ /* Release cork */
+ xe_cork_end(&cork);
+ xe_cork_wait_done(&cork);
+ xe_cork_fini(&cork);
+
+ /* both binds are done */
+ igt_assert(syncobj_wait(fd, &syncobj, 1, INT64_MAX, 0, NULL));
+ igt_assert(syncobj_wait(fd, &sync[0].handle, 1, INT64_MAX, 0, NULL));
+
+ syncobj_destroy(fd, sync[0].handle);
+ gem_close(fd, bo);
+ xe_vm_destroy(fd, vm);
+}
+
igt_main
{
struct drm_xe_engine_class_instance *hwe;
@@ -383,6 +487,10 @@ igt_main
test_exec(fd, hwe, 1, 0, 1, s->flags);
}
+ igt_subtest("zero-execs")
+ xe_for_each_engine(fd, hwe)
+ test_zero_execs(fd, hwe, 1);
+
igt_fixture
drm_close_driver(fd);
--
2.34.1
^ permalink raw reply related [flat|nested] 8+ messages in thread* Re: [PATCH v2 2/2] tests/xe/xe_exec_basic: Add section to test zero number of BB in exec IOCTL
2024-07-19 19:41 ` [PATCH v2 2/2] tests/xe/xe_exec_basic: Add section to test zero number of BB in exec IOCTL Matthew Brost
@ 2024-07-23 12:27 ` Kamil Konieczny
2024-07-23 18:39 ` Matthew Brost
0 siblings, 1 reply; 8+ messages in thread
From: Kamil Konieczny @ 2024-07-23 12:27 UTC (permalink / raw)
To: igt-dev; +Cc: Matthew Brost
Hi Matthew,
On 2024-07-19 at 12:41:07 -0700, Matthew Brost wrote:
in subject you use 'tests/xe/':
[PATCH v2 2/2] tests/xe/xe_exec_basic: Add section to test zero number of BB in exec IOCTL
imho this should be:
[PATCH v2 2/2] tests/intel/xe_exec_basic: Add test for zero number of BB
Regards,
Kamil
> Verify the exec queue ordering works wrt out-sync signaling when zero
> number of BB passed to exec IOCTL.
>
> v2;
> - Fix build error
>
> Signed-off-by: Matthew Brost <matthew.brost@intel.com>
> ---
> tests/intel/xe_exec_basic.c | 108 ++++++++++++++++++++++++++++++++++++
> 1 file changed, 108 insertions(+)
>
> diff --git a/tests/intel/xe_exec_basic.c b/tests/intel/xe_exec_basic.c
> index 0fd1ae062c..02b78d55f6 100644
> --- a/tests/intel/xe_exec_basic.c
> +++ b/tests/intel/xe_exec_basic.c
> @@ -19,6 +19,7 @@
>
> #include "xe/xe_ioctl.h"
> #include "xe/xe_query.h"
> +#include "xe/xe_spin.h"
> #include <string.h>
>
> #define MAX_N_EXEC_QUEUES 16
> @@ -314,6 +315,109 @@ test_exec(int fd, struct drm_xe_engine_class_instance *eci,
> }
> }
>
> +/**
> + * SUBTEST: zero-execs
> + * Description: Test zero execs in IOCTL
> + * Functionality: exec IOCTL
> + * Run type: BAT
> + */
> +
> +static void test_zero_execs(int fd, struct drm_xe_engine_class_instance *eci,
> + int n_execs)
> +{
> + uint32_t vm;
> + uint64_t addr = 0x1a0000;
> + struct drm_xe_sync sync[2] = {
> + { .flags = DRM_XE_SYNC_TYPE_SYNCOBJ | DRM_XE_SYNC_FLAG_SIGNAL, },
> + { .flags = DRM_XE_SYNC_TYPE_SYNCOBJ | DRM_XE_SYNC_FLAG_SIGNAL, },
> + };
> + struct drm_xe_exec exec = {
> + .num_batch_buffer = 1,
> + .num_syncs = 1,
> + .syncs = to_user_pointer(sync),
> + };
> + size_t bo_size;
> + uint32_t bo = 0;
> + uint32_t syncobj;
> + uint32_t exec_queue;
> + struct xe_cork cork;
> + struct {
> + uint32_t batch[16];
> + uint64_t pad;
> + uint32_t data;
> + } *data;
> + int i, b;
> +
> + vm = xe_vm_create(fd, 0, 0);
> + bo_size = sizeof(*data) * n_execs;
> + bo_size = ALIGN(bo_size + xe_cs_prefetch_size(fd),
> + xe_get_default_alignment(fd));
> + bo = xe_bo_create(fd, vm, bo_size,
> + vram_if_possible(fd, eci->gt_id),
> + DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM);
> + data = xe_bo_map(fd, bo, bo_size);
> + exec_queue = xe_exec_queue_create(fd, vm, eci, 0);
> +
> + xe_cork_init(fd, eci, &cork);
> + xe_cork_wait_started(&cork);
> +
> + /* Initial bind behind cork */
> + sync[0].handle = syncobj = syncobj_create(fd, 0);
> + sync[0].flags |= DRM_XE_SYNC_FLAG_SIGNAL;
> + sync[1].handle = cork.syncobj;
> + sync[1].flags &= ~DRM_XE_SYNC_FLAG_SIGNAL;
> + xe_vm_bind_async(fd, vm, 0, bo, 0, addr, bo_size, sync, 2);
> +
> + /* Exec behind bind */
> + for (i = 0; i < n_execs; i++) {
> + uint64_t batch_offset = (char *)&data[i].batch - (char *)data;
> + uint64_t batch_addr = addr + batch_offset;
> + uint64_t sdi_offset = (char *)&data[i].data - (char *)data;
> + uint64_t sdi_addr = addr + sdi_offset;
> +
> + b = 0;
> + data[i].batch[b++] = MI_STORE_DWORD_IMM_GEN4;
> + data[i].batch[b++] = sdi_addr;
> + data[i].batch[b++] = sdi_addr >> 32;
> + data[i].batch[b++] = 0xc0ffee;
> + data[i].batch[b++] = MI_BATCH_BUFFER_END;
> + igt_assert(b <= ARRAY_SIZE(data[i].batch));
> +
> + sync[0].flags &= ~DRM_XE_SYNC_FLAG_SIGNAL;
> +
> + exec.exec_queue_id = exec_queue;
> + exec.address = batch_addr;
> + xe_exec(fd, &exec);
> + }
> +
> + /* Exec with no batch buffer */
> + sync[0].handle = syncobj_create(fd, 0);
> + sync[0].flags |= DRM_XE_SYNC_FLAG_SIGNAL;
> + exec.num_batch_buffer = 0;
> + exec.address = 0;
> + xe_exec(fd, &exec);
> +
> + /* Let jobs runs for a bit */
> + usleep(100000);
> +
> + /* both bind and execs are waiting */
> + igt_assert(!syncobj_wait(fd, &syncobj, 1, 0, 0, NULL));
> + igt_assert(!syncobj_wait(fd, &sync[0].handle, 1, 0, 0, NULL));
> +
> + /* Release cork */
> + xe_cork_end(&cork);
> + xe_cork_wait_done(&cork);
> + xe_cork_fini(&cork);
> +
> + /* both binds are done */
> + igt_assert(syncobj_wait(fd, &syncobj, 1, INT64_MAX, 0, NULL));
> + igt_assert(syncobj_wait(fd, &sync[0].handle, 1, INT64_MAX, 0, NULL));
> +
> + syncobj_destroy(fd, sync[0].handle);
> + gem_close(fd, bo);
> + xe_vm_destroy(fd, vm);
> +}
> +
> igt_main
> {
> struct drm_xe_engine_class_instance *hwe;
> @@ -383,6 +487,10 @@ igt_main
> test_exec(fd, hwe, 1, 0, 1, s->flags);
> }
>
> + igt_subtest("zero-execs")
> + xe_for_each_engine(fd, hwe)
> + test_zero_execs(fd, hwe, 1);
> +
> igt_fixture
> drm_close_driver(fd);
>
> --
> 2.34.1
>
^ permalink raw reply [flat|nested] 8+ messages in thread* Re: [PATCH v2 2/2] tests/xe/xe_exec_basic: Add section to test zero number of BB in exec IOCTL
2024-07-23 12:27 ` Kamil Konieczny
@ 2024-07-23 18:39 ` Matthew Brost
0 siblings, 0 replies; 8+ messages in thread
From: Matthew Brost @ 2024-07-23 18:39 UTC (permalink / raw)
To: Kamil Konieczny; +Cc: igt-dev
On Tue, Jul 23, 2024 at 02:27:33PM +0200, Kamil Konieczny wrote:
> Hi Matthew,
> On 2024-07-19 at 12:41:07 -0700, Matthew Brost wrote:
>
> in subject you use 'tests/xe/':
> [PATCH v2 2/2] tests/xe/xe_exec_basic: Add section to test zero number of BB in exec IOCTL
>
> imho this should be:
> [PATCH v2 2/2] tests/intel/xe_exec_basic: Add test for zero number of BB
>
Will change.
Matt
> Regards,
> Kamil
>
> > Verify the exec queue ordering works wrt out-sync signaling when zero
> > number of BB passed to exec IOCTL.
> >
> > v2;
> > - Fix build error
> >
> > Signed-off-by: Matthew Brost <matthew.brost@intel.com>
> > ---
> > tests/intel/xe_exec_basic.c | 108 ++++++++++++++++++++++++++++++++++++
> > 1 file changed, 108 insertions(+)
> >
> > diff --git a/tests/intel/xe_exec_basic.c b/tests/intel/xe_exec_basic.c
> > index 0fd1ae062c..02b78d55f6 100644
> > --- a/tests/intel/xe_exec_basic.c
> > +++ b/tests/intel/xe_exec_basic.c
> > @@ -19,6 +19,7 @@
> >
> > #include "xe/xe_ioctl.h"
> > #include "xe/xe_query.h"
> > +#include "xe/xe_spin.h"
> > #include <string.h>
> >
> > #define MAX_N_EXEC_QUEUES 16
> > @@ -314,6 +315,109 @@ test_exec(int fd, struct drm_xe_engine_class_instance *eci,
> > }
> > }
> >
> > +/**
> > + * SUBTEST: zero-execs
> > + * Description: Test zero execs in IOCTL
> > + * Functionality: exec IOCTL
> > + * Run type: BAT
> > + */
> > +
> > +static void test_zero_execs(int fd, struct drm_xe_engine_class_instance *eci,
> > + int n_execs)
> > +{
> > + uint32_t vm;
> > + uint64_t addr = 0x1a0000;
> > + struct drm_xe_sync sync[2] = {
> > + { .flags = DRM_XE_SYNC_TYPE_SYNCOBJ | DRM_XE_SYNC_FLAG_SIGNAL, },
> > + { .flags = DRM_XE_SYNC_TYPE_SYNCOBJ | DRM_XE_SYNC_FLAG_SIGNAL, },
> > + };
> > + struct drm_xe_exec exec = {
> > + .num_batch_buffer = 1,
> > + .num_syncs = 1,
> > + .syncs = to_user_pointer(sync),
> > + };
> > + size_t bo_size;
> > + uint32_t bo = 0;
> > + uint32_t syncobj;
> > + uint32_t exec_queue;
> > + struct xe_cork cork;
> > + struct {
> > + uint32_t batch[16];
> > + uint64_t pad;
> > + uint32_t data;
> > + } *data;
> > + int i, b;
> > +
> > + vm = xe_vm_create(fd, 0, 0);
> > + bo_size = sizeof(*data) * n_execs;
> > + bo_size = ALIGN(bo_size + xe_cs_prefetch_size(fd),
> > + xe_get_default_alignment(fd));
> > + bo = xe_bo_create(fd, vm, bo_size,
> > + vram_if_possible(fd, eci->gt_id),
> > + DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM);
> > + data = xe_bo_map(fd, bo, bo_size);
> > + exec_queue = xe_exec_queue_create(fd, vm, eci, 0);
> > +
> > + xe_cork_init(fd, eci, &cork);
> > + xe_cork_wait_started(&cork);
> > +
> > + /* Initial bind behind cork */
> > + sync[0].handle = syncobj = syncobj_create(fd, 0);
> > + sync[0].flags |= DRM_XE_SYNC_FLAG_SIGNAL;
> > + sync[1].handle = cork.syncobj;
> > + sync[1].flags &= ~DRM_XE_SYNC_FLAG_SIGNAL;
> > + xe_vm_bind_async(fd, vm, 0, bo, 0, addr, bo_size, sync, 2);
> > +
> > + /* Exec behind bind */
> > + for (i = 0; i < n_execs; i++) {
> > + uint64_t batch_offset = (char *)&data[i].batch - (char *)data;
> > + uint64_t batch_addr = addr + batch_offset;
> > + uint64_t sdi_offset = (char *)&data[i].data - (char *)data;
> > + uint64_t sdi_addr = addr + sdi_offset;
> > +
> > + b = 0;
> > + data[i].batch[b++] = MI_STORE_DWORD_IMM_GEN4;
> > + data[i].batch[b++] = sdi_addr;
> > + data[i].batch[b++] = sdi_addr >> 32;
> > + data[i].batch[b++] = 0xc0ffee;
> > + data[i].batch[b++] = MI_BATCH_BUFFER_END;
> > + igt_assert(b <= ARRAY_SIZE(data[i].batch));
> > +
> > + sync[0].flags &= ~DRM_XE_SYNC_FLAG_SIGNAL;
> > +
> > + exec.exec_queue_id = exec_queue;
> > + exec.address = batch_addr;
> > + xe_exec(fd, &exec);
> > + }
> > +
> > + /* Exec with no batch buffer */
> > + sync[0].handle = syncobj_create(fd, 0);
> > + sync[0].flags |= DRM_XE_SYNC_FLAG_SIGNAL;
> > + exec.num_batch_buffer = 0;
> > + exec.address = 0;
> > + xe_exec(fd, &exec);
> > +
> > + /* Let jobs runs for a bit */
> > + usleep(100000);
> > +
> > + /* both bind and execs are waiting */
> > + igt_assert(!syncobj_wait(fd, &syncobj, 1, 0, 0, NULL));
> > + igt_assert(!syncobj_wait(fd, &sync[0].handle, 1, 0, 0, NULL));
> > +
> > + /* Release cork */
> > + xe_cork_end(&cork);
> > + xe_cork_wait_done(&cork);
> > + xe_cork_fini(&cork);
> > +
> > + /* both binds are done */
> > + igt_assert(syncobj_wait(fd, &syncobj, 1, INT64_MAX, 0, NULL));
> > + igt_assert(syncobj_wait(fd, &sync[0].handle, 1, INT64_MAX, 0, NULL));
> > +
> > + syncobj_destroy(fd, sync[0].handle);
> > + gem_close(fd, bo);
> > + xe_vm_destroy(fd, vm);
> > +}
> > +
> > igt_main
> > {
> > struct drm_xe_engine_class_instance *hwe;
> > @@ -383,6 +487,10 @@ igt_main
> > test_exec(fd, hwe, 1, 0, 1, s->flags);
> > }
> >
> > + igt_subtest("zero-execs")
> > + xe_for_each_engine(fd, hwe)
> > + test_zero_execs(fd, hwe, 1);
> > +
> > igt_fixture
> > drm_close_driver(fd);
> >
> > --
> > 2.34.1
> >
^ permalink raw reply [flat|nested] 8+ messages in thread
* ✗ Fi.CI.BUILD: failure for series starting with [v2,1/2] tests/xe/xe_vm: Add section to test zero number of VM binds
2024-07-19 19:41 [PATCH v2 1/2] tests/xe/xe_vm: Add section to test zero number of VM binds Matthew Brost
2024-07-19 19:41 ` [PATCH v2 2/2] tests/xe/xe_exec_basic: Add section to test zero number of BB in exec IOCTL Matthew Brost
@ 2024-07-19 20:33 ` Patchwork
2024-07-23 7:55 ` [PATCH v2 1/2] " Kamil Konieczny
2024-07-23 12:46 ` Kamil Konieczny
3 siblings, 0 replies; 8+ messages in thread
From: Patchwork @ 2024-07-19 20:33 UTC (permalink / raw)
To: Matthew Brost; +Cc: igt-dev
== Series Details ==
Series: series starting with [v2,1/2] tests/xe/xe_vm: Add section to test zero number of VM binds
URL : https://patchwork.freedesktop.org/series/136303/
State : failure
== Summary ==
Applying: tests/xe/xe_vm: Add section to test zero number of VM binds
Patch failed at 0001 tests/xe/xe_vm: Add section to test zero number of VM binds
When you have resolved this problem, run "git am --continue".
If you prefer to skip this patch, run "git am --skip" instead.
To restore the original branch and stop patching, run "git am --abort".
^ permalink raw reply [flat|nested] 8+ messages in thread
* Re: [PATCH v2 1/2] tests/xe/xe_vm: Add section to test zero number of VM binds
2024-07-19 19:41 [PATCH v2 1/2] tests/xe/xe_vm: Add section to test zero number of VM binds Matthew Brost
2024-07-19 19:41 ` [PATCH v2 2/2] tests/xe/xe_exec_basic: Add section to test zero number of BB in exec IOCTL Matthew Brost
2024-07-19 20:33 ` ✗ Fi.CI.BUILD: failure for series starting with [v2,1/2] tests/xe/xe_vm: Add section to test zero number of VM binds Patchwork
@ 2024-07-23 7:55 ` Kamil Konieczny
2024-07-23 12:46 ` Kamil Konieczny
3 siblings, 0 replies; 8+ messages in thread
From: Kamil Konieczny @ 2024-07-23 7:55 UTC (permalink / raw)
To: igt-dev; +Cc: Matthew Brost
Hi Matthew,
On 2024-07-19 at 12:41:06 -0700, Matthew Brost wrote:
> Verify the bind queue ordering works wrt out-sync signaling when zero
> number of binds passed to VM bind IOCTL.
>
> v2:
> - Fix build error
>
> Signed-off-by: Matthew Brost <matthew.brost@intel.com>
You patch do not apply, if it is a part of bigger changes
consider sending them all in one patchseries or wait for
other ones to be merged.
Regards,
Kamil
> ---
> lib/xe/xe_ioctl.c | 13 ++++++++++
> lib/xe/xe_ioctl.h | 2 ++
> tests/intel/xe_vm.c | 63 +++++++++++++++++++++++++++++++++++++++++++++
> 3 files changed, 78 insertions(+)
>
> diff --git a/lib/xe/xe_ioctl.c b/lib/xe/xe_ioctl.c
> index ae43ffd15e..27a4a4e70d 100644
> --- a/lib/xe/xe_ioctl.c
> +++ b/lib/xe/xe_ioctl.c
> @@ -154,6 +154,19 @@ void xe_vm_bind_async(int fd, uint32_t vm, uint32_t exec_queue, uint32_t bo,
> num_syncs, 0, 0);
> }
>
> +void xe_vm_bind_zero(int fd, uint32_t vm, uint32_t exec_queue,
> + struct drm_xe_sync *sync, uint32_t num_syncs)
> +{
> + struct drm_xe_vm_bind bind = {
> + .vm_id = vm,
> + .num_syncs = num_syncs,
> + .syncs = (uintptr_t)sync,
> + .exec_queue_id = exec_queue,
> + };
> +
> + igt_assert_eq(igt_ioctl(fd, DRM_IOCTL_XE_VM_BIND, &bind), 0);
> +}
> +
> void xe_vm_bind_async_flags(int fd, uint32_t vm, uint32_t exec_queue, uint32_t bo,
> uint64_t offset, uint64_t addr, uint64_t size,
> struct drm_xe_sync *sync, uint32_t num_syncs,
> diff --git a/lib/xe/xe_ioctl.h b/lib/xe/xe_ioctl.h
> index b27c0053f0..6dfed1b330 100644
> --- a/lib/xe/xe_ioctl.h
> +++ b/lib/xe/xe_ioctl.h
> @@ -35,6 +35,8 @@ void xe_vm_prefetch_async(int fd, uint32_t vm, uint32_t exec_queue,
> void xe_vm_bind_async(int fd, uint32_t vm, uint32_t exec_queue, uint32_t bo,
> uint64_t offset, uint64_t addr, uint64_t size,
> struct drm_xe_sync *sync, uint32_t num_syncs);
> +void xe_vm_bind_zero(int fd, uint32_t vm, uint32_t exec_queue,
> + struct drm_xe_sync *sync, uint32_t num_syncs);
> void xe_vm_bind_userptr_async(int fd, uint32_t vm, uint32_t exec_queue,
> uint64_t userptr, uint64_t addr, uint64_t size,
> struct drm_xe_sync *sync, uint32_t num_syncs);
> diff --git a/tests/intel/xe_vm.c b/tests/intel/xe_vm.c
> index a088bd5db8..ede75f72aa 100644
> --- a/tests/intel/xe_vm.c
> +++ b/tests/intel/xe_vm.c
> @@ -868,6 +868,63 @@ static void xe_vm_bind_array_err(int fd, uint32_t vm, uint32_t exec_queue,
> do_ioctl_err(fd, DRM_IOCTL_XE_VM_BIND, &bind, err);
> }
>
> +/**
> + * SUBTEST: zero-binds
> + * Description: Test zero binds in IOCTL
> + * Functionality: bind engines
> + * Run type: BAT
> + */
> +
> +static void test_zero_binds(int fd, struct drm_xe_engine_class_instance *eci)
> +{
> + uint32_t vm;
> + uint64_t addr = 0x1a0000;
> + struct drm_xe_sync sync[1] = {
> + { .flags = DRM_XE_SYNC_TYPE_SYNCOBJ | DRM_XE_SYNC_FLAG_SIGNAL, },
> + };
> + size_t bo_size;
> + uint32_t bo = 0;
> + struct xe_cork cork;
> +
> + vm = xe_vm_create(fd, 0, 0);
> + bo_size = ALIGN(xe_cs_prefetch_size(fd),
> + xe_get_default_alignment(fd));
> + bo = xe_bo_create(fd, vm, bo_size,
> + vram_if_possible(fd, eci->gt_id),
> + DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM);
> +
> + xe_cork_init(fd, eci, &cork);
> + xe_cork_wait_started(&cork);
> +
> + /* Initial bind behind cork */
> + sync[0].handle = cork.syncobj;
> + sync[0].flags &= ~DRM_XE_SYNC_FLAG_SIGNAL;
> + xe_vm_bind_async(fd, vm, 0, bo, 0, addr, bo_size, sync, 1);
> +
> + /* Bind /w num_binds == 0 */
> + sync[0].handle = syncobj_create(fd, 0);
> + sync[0].flags |= DRM_XE_SYNC_FLAG_SIGNAL;
> + xe_vm_bind_zero(fd, vm, 0, sync, 1);
> +
> + /* Let jobs runs for a bit */
> + usleep(100000);
> +
> + /* both binds are waiting */
> + igt_assert(!syncobj_wait(fd, &sync[0].handle, 1, 0, 0, NULL));
> +
> + /* Release cork */
> + xe_cork_end(&cork);
> + xe_cork_wait_done(&cork);
> + xe_cork_fini(&cork);
> +
> + /* both binds are done */
> + igt_assert(syncobj_wait(fd, &sync[0].handle, 1, INT64_MAX, 0, NULL));
> +
> + syncobj_destroy(fd, sync[0].handle);
> + gem_close(fd, bo);
> + xe_vm_destroy(fd, vm);
> +}
> +
> #define BIND_ARRAY_BIND_EXEC_QUEUE_FLAG (0x1 << 0)
> #define BIND_ARRAY_ENOBUFS_FLAG (0x1 << 1)
>
> @@ -2416,6 +2473,12 @@ igt_main
> xe_for_each_engine(fd, hwe)
> test_bind_execqueues_independent(fd, hwe, CONFLICT);
>
> + igt_subtest("zero-binds")
> + xe_for_each_engine(fd, hwe) {
> + test_zero_binds(fd, hwe);
> + break;
> + }
> +
> igt_subtest("bind-array-twice")
> xe_for_each_engine(fd, hwe)
> test_bind_array(fd, hwe, 2, 0x1a0000, 0, 0);
> --
> 2.34.1
>
^ permalink raw reply [flat|nested] 8+ messages in thread* Re: [PATCH v2 1/2] tests/xe/xe_vm: Add section to test zero number of VM binds
2024-07-19 19:41 [PATCH v2 1/2] tests/xe/xe_vm: Add section to test zero number of VM binds Matthew Brost
` (2 preceding siblings ...)
2024-07-23 7:55 ` [PATCH v2 1/2] " Kamil Konieczny
@ 2024-07-23 12:46 ` Kamil Konieczny
2024-07-23 18:41 ` Matthew Brost
3 siblings, 1 reply; 8+ messages in thread
From: Kamil Konieczny @ 2024-07-23 12:46 UTC (permalink / raw)
To: igt-dev; +Cc: Matthew Brost
Hi Matthew,
On 2024-07-19 at 12:41:06 -0700, Matthew Brost wrote:
a nit about a subject, you wrote:
[PATCH v2 1/2] tests/xe/xe_vm: Add section to test zero number of VM binds
imho this could be:
[PATCH v2 1/2] tests/intel/xe_vm: Test zero number of VM binds
> Verify the bind queue ordering works wrt out-sync signaling when zero
> number of binds passed to VM bind IOCTL.
>
> v2:
> - Fix build error
>
> Signed-off-by: Matthew Brost <matthew.brost@intel.com>
> ---
> lib/xe/xe_ioctl.c | 13 ++++++++++
> lib/xe/xe_ioctl.h | 2 ++
> tests/intel/xe_vm.c | 63 +++++++++++++++++++++++++++++++++++++++++++++
> 3 files changed, 78 insertions(+)
>
> diff --git a/lib/xe/xe_ioctl.c b/lib/xe/xe_ioctl.c
> index ae43ffd15e..27a4a4e70d 100644
> --- a/lib/xe/xe_ioctl.c
> +++ b/lib/xe/xe_ioctl.c
> @@ -154,6 +154,19 @@ void xe_vm_bind_async(int fd, uint32_t vm, uint32_t exec_queue, uint32_t bo,
> num_syncs, 0, 0);
> }
>
You should document each new public lib function.
> +void xe_vm_bind_zero(int fd, uint32_t vm, uint32_t exec_queue,
> + struct drm_xe_sync *sync, uint32_t num_syncs)
> +{
> + struct drm_xe_vm_bind bind = {
> + .vm_id = vm,
> + .num_syncs = num_syncs,
> + .syncs = (uintptr_t)sync,
> + .exec_queue_id = exec_queue,
> + };
> +
> + igt_assert_eq(igt_ioctl(fd, DRM_IOCTL_XE_VM_BIND, &bind), 0);
> +}
> +
Why new lib function? Why not just using __xe_vm_bind?
Regards,
Kamil
> void xe_vm_bind_async_flags(int fd, uint32_t vm, uint32_t exec_queue, uint32_t bo,
> uint64_t offset, uint64_t addr, uint64_t size,
> struct drm_xe_sync *sync, uint32_t num_syncs,
> diff --git a/lib/xe/xe_ioctl.h b/lib/xe/xe_ioctl.h
> index b27c0053f0..6dfed1b330 100644
> --- a/lib/xe/xe_ioctl.h
> +++ b/lib/xe/xe_ioctl.h
> @@ -35,6 +35,8 @@ void xe_vm_prefetch_async(int fd, uint32_t vm, uint32_t exec_queue,
> void xe_vm_bind_async(int fd, uint32_t vm, uint32_t exec_queue, uint32_t bo,
> uint64_t offset, uint64_t addr, uint64_t size,
> struct drm_xe_sync *sync, uint32_t num_syncs);
> +void xe_vm_bind_zero(int fd, uint32_t vm, uint32_t exec_queue,
> + struct drm_xe_sync *sync, uint32_t num_syncs);
> void xe_vm_bind_userptr_async(int fd, uint32_t vm, uint32_t exec_queue,
> uint64_t userptr, uint64_t addr, uint64_t size,
> struct drm_xe_sync *sync, uint32_t num_syncs);
> diff --git a/tests/intel/xe_vm.c b/tests/intel/xe_vm.c
> index a088bd5db8..ede75f72aa 100644
> --- a/tests/intel/xe_vm.c
> +++ b/tests/intel/xe_vm.c
> @@ -868,6 +868,63 @@ static void xe_vm_bind_array_err(int fd, uint32_t vm, uint32_t exec_queue,
> do_ioctl_err(fd, DRM_IOCTL_XE_VM_BIND, &bind, err);
> }
>
> +/**
> + * SUBTEST: zero-binds
> + * Description: Test zero binds in IOCTL
> + * Functionality: bind engines
> + * Run type: BAT
> + */
> +
> +static void test_zero_binds(int fd, struct drm_xe_engine_class_instance *eci)
> +{
> + uint32_t vm;
> + uint64_t addr = 0x1a0000;
> + struct drm_xe_sync sync[1] = {
> + { .flags = DRM_XE_SYNC_TYPE_SYNCOBJ | DRM_XE_SYNC_FLAG_SIGNAL, },
> + };
> + size_t bo_size;
> + uint32_t bo = 0;
> + struct xe_cork cork;
> +
> + vm = xe_vm_create(fd, 0, 0);
> + bo_size = ALIGN(xe_cs_prefetch_size(fd),
> + xe_get_default_alignment(fd));
> + bo = xe_bo_create(fd, vm, bo_size,
> + vram_if_possible(fd, eci->gt_id),
> + DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM);
> +
> + xe_cork_init(fd, eci, &cork);
> + xe_cork_wait_started(&cork);
> +
> + /* Initial bind behind cork */
> + sync[0].handle = cork.syncobj;
> + sync[0].flags &= ~DRM_XE_SYNC_FLAG_SIGNAL;
> + xe_vm_bind_async(fd, vm, 0, bo, 0, addr, bo_size, sync, 1);
> +
> + /* Bind /w num_binds == 0 */
> + sync[0].handle = syncobj_create(fd, 0);
> + sync[0].flags |= DRM_XE_SYNC_FLAG_SIGNAL;
> + xe_vm_bind_zero(fd, vm, 0, sync, 1);
> +
> + /* Let jobs runs for a bit */
> + usleep(100000);
> +
> + /* both binds are waiting */
> + igt_assert(!syncobj_wait(fd, &sync[0].handle, 1, 0, 0, NULL));
> +
> + /* Release cork */
> + xe_cork_end(&cork);
> + xe_cork_wait_done(&cork);
> + xe_cork_fini(&cork);
> +
> + /* both binds are done */
> + igt_assert(syncobj_wait(fd, &sync[0].handle, 1, INT64_MAX, 0, NULL));
> +
> + syncobj_destroy(fd, sync[0].handle);
> + gem_close(fd, bo);
> + xe_vm_destroy(fd, vm);
> +}
> +
> #define BIND_ARRAY_BIND_EXEC_QUEUE_FLAG (0x1 << 0)
> #define BIND_ARRAY_ENOBUFS_FLAG (0x1 << 1)
>
> @@ -2416,6 +2473,12 @@ igt_main
> xe_for_each_engine(fd, hwe)
> test_bind_execqueues_independent(fd, hwe, CONFLICT);
>
> + igt_subtest("zero-binds")
> + xe_for_each_engine(fd, hwe) {
> + test_zero_binds(fd, hwe);
> + break;
> + }
> +
> igt_subtest("bind-array-twice")
> xe_for_each_engine(fd, hwe)
> test_bind_array(fd, hwe, 2, 0x1a0000, 0, 0);
> --
> 2.34.1
>
^ permalink raw reply [flat|nested] 8+ messages in thread* Re: [PATCH v2 1/2] tests/xe/xe_vm: Add section to test zero number of VM binds
2024-07-23 12:46 ` Kamil Konieczny
@ 2024-07-23 18:41 ` Matthew Brost
0 siblings, 0 replies; 8+ messages in thread
From: Matthew Brost @ 2024-07-23 18:41 UTC (permalink / raw)
To: Kamil Konieczny; +Cc: igt-dev
On Tue, Jul 23, 2024 at 02:46:11PM +0200, Kamil Konieczny wrote:
> Hi Matthew,
> On 2024-07-19 at 12:41:06 -0700, Matthew Brost wrote:
>
> a nit about a subject, you wrote:
>
> [PATCH v2 1/2] tests/xe/xe_vm: Add section to test zero number of VM binds
>
> imho this could be:
>
> [PATCH v2 1/2] tests/intel/xe_vm: Test zero number of VM binds
>
Will fix.
> > Verify the bind queue ordering works wrt out-sync signaling when zero
> > number of binds passed to VM bind IOCTL.
> >
> > v2:
> > - Fix build error
> >
> > Signed-off-by: Matthew Brost <matthew.brost@intel.com>
> > ---
> > lib/xe/xe_ioctl.c | 13 ++++++++++
> > lib/xe/xe_ioctl.h | 2 ++
> > tests/intel/xe_vm.c | 63 +++++++++++++++++++++++++++++++++++++++++++++
> > 3 files changed, 78 insertions(+)
> >
> > diff --git a/lib/xe/xe_ioctl.c b/lib/xe/xe_ioctl.c
> > index ae43ffd15e..27a4a4e70d 100644
> > --- a/lib/xe/xe_ioctl.c
> > +++ b/lib/xe/xe_ioctl.c
> > @@ -154,6 +154,19 @@ void xe_vm_bind_async(int fd, uint32_t vm, uint32_t exec_queue, uint32_t bo,
> > num_syncs, 0, 0);
> > }
> >
>
> You should document each new public lib function.
>
> > +void xe_vm_bind_zero(int fd, uint32_t vm, uint32_t exec_queue,
> > + struct drm_xe_sync *sync, uint32_t num_syncs)
> > +{
> > + struct drm_xe_vm_bind bind = {
> > + .vm_id = vm,
> > + .num_syncs = num_syncs,
> > + .syncs = (uintptr_t)sync,
> > + .exec_queue_id = exec_queue,
> > + };
> > +
> > + igt_assert_eq(igt_ioctl(fd, DRM_IOCTL_XE_VM_BIND, &bind), 0);
> > +}
> > +
>
> Why new lib function? Why not just using __xe_vm_bind?
>
Maybe I'll just move this to xe_vm.c as a static function as I don't any
other test would use this.
Matt
> Regards,
> Kamil
>
> > void xe_vm_bind_async_flags(int fd, uint32_t vm, uint32_t exec_queue, uint32_t bo,
> > uint64_t offset, uint64_t addr, uint64_t size,
> > struct drm_xe_sync *sync, uint32_t num_syncs,
> > diff --git a/lib/xe/xe_ioctl.h b/lib/xe/xe_ioctl.h
> > index b27c0053f0..6dfed1b330 100644
> > --- a/lib/xe/xe_ioctl.h
> > +++ b/lib/xe/xe_ioctl.h
> > @@ -35,6 +35,8 @@ void xe_vm_prefetch_async(int fd, uint32_t vm, uint32_t exec_queue,
> > void xe_vm_bind_async(int fd, uint32_t vm, uint32_t exec_queue, uint32_t bo,
> > uint64_t offset, uint64_t addr, uint64_t size,
> > struct drm_xe_sync *sync, uint32_t num_syncs);
> > +void xe_vm_bind_zero(int fd, uint32_t vm, uint32_t exec_queue,
> > + struct drm_xe_sync *sync, uint32_t num_syncs);
> > void xe_vm_bind_userptr_async(int fd, uint32_t vm, uint32_t exec_queue,
> > uint64_t userptr, uint64_t addr, uint64_t size,
> > struct drm_xe_sync *sync, uint32_t num_syncs);
> > diff --git a/tests/intel/xe_vm.c b/tests/intel/xe_vm.c
> > index a088bd5db8..ede75f72aa 100644
> > --- a/tests/intel/xe_vm.c
> > +++ b/tests/intel/xe_vm.c
> > @@ -868,6 +868,63 @@ static void xe_vm_bind_array_err(int fd, uint32_t vm, uint32_t exec_queue,
> > do_ioctl_err(fd, DRM_IOCTL_XE_VM_BIND, &bind, err);
> > }
> >
> > +/**
> > + * SUBTEST: zero-binds
> > + * Description: Test zero binds in IOCTL
> > + * Functionality: bind engines
> > + * Run type: BAT
> > + */
> > +
> > +static void test_zero_binds(int fd, struct drm_xe_engine_class_instance *eci)
> > +{
> > + uint32_t vm;
> > + uint64_t addr = 0x1a0000;
> > + struct drm_xe_sync sync[1] = {
> > + { .flags = DRM_XE_SYNC_TYPE_SYNCOBJ | DRM_XE_SYNC_FLAG_SIGNAL, },
> > + };
> > + size_t bo_size;
> > + uint32_t bo = 0;
> > + struct xe_cork cork;
> > +
> > + vm = xe_vm_create(fd, 0, 0);
> > + bo_size = ALIGN(xe_cs_prefetch_size(fd),
> > + xe_get_default_alignment(fd));
> > + bo = xe_bo_create(fd, vm, bo_size,
> > + vram_if_possible(fd, eci->gt_id),
> > + DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM);
> > +
> > + xe_cork_init(fd, eci, &cork);
> > + xe_cork_wait_started(&cork);
> > +
> > + /* Initial bind behind cork */
> > + sync[0].handle = cork.syncobj;
> > + sync[0].flags &= ~DRM_XE_SYNC_FLAG_SIGNAL;
> > + xe_vm_bind_async(fd, vm, 0, bo, 0, addr, bo_size, sync, 1);
> > +
> > + /* Bind /w num_binds == 0 */
> > + sync[0].handle = syncobj_create(fd, 0);
> > + sync[0].flags |= DRM_XE_SYNC_FLAG_SIGNAL;
> > + xe_vm_bind_zero(fd, vm, 0, sync, 1);
> > +
> > + /* Let jobs runs for a bit */
> > + usleep(100000);
> > +
> > + /* both binds are waiting */
> > + igt_assert(!syncobj_wait(fd, &sync[0].handle, 1, 0, 0, NULL));
> > +
> > + /* Release cork */
> > + xe_cork_end(&cork);
> > + xe_cork_wait_done(&cork);
> > + xe_cork_fini(&cork);
> > +
> > + /* both binds are done */
> > + igt_assert(syncobj_wait(fd, &sync[0].handle, 1, INT64_MAX, 0, NULL));
> > +
> > + syncobj_destroy(fd, sync[0].handle);
> > + gem_close(fd, bo);
> > + xe_vm_destroy(fd, vm);
> > +}
> > +
> > #define BIND_ARRAY_BIND_EXEC_QUEUE_FLAG (0x1 << 0)
> > #define BIND_ARRAY_ENOBUFS_FLAG (0x1 << 1)
> >
> > @@ -2416,6 +2473,12 @@ igt_main
> > xe_for_each_engine(fd, hwe)
> > test_bind_execqueues_independent(fd, hwe, CONFLICT);
> >
> > + igt_subtest("zero-binds")
> > + xe_for_each_engine(fd, hwe) {
> > + test_zero_binds(fd, hwe);
> > + break;
> > + }
> > +
> > igt_subtest("bind-array-twice")
> > xe_for_each_engine(fd, hwe)
> > test_bind_array(fd, hwe, 2, 0x1a0000, 0, 0);
> > --
> > 2.34.1
> >
^ permalink raw reply [flat|nested] 8+ messages in thread