From: Umesh Nerlige Ramappa <umesh.nerlige.ramappa@intel.com>
To: igt-dev@lists.freedesktop.org,
Lucas De Marchi <lucas.demarchi@intel.com>
Subject: [PATCH i-g-t 3/8] tests/intel/xe_drm_fdinfo: Add helpers for spinning batches
Date: Sat, 22 Jun 2024 07:00:57 +0800 [thread overview]
Message-ID: <20240621230102.238397-4-umesh.nerlige.ramappa@intel.com> (raw)
In-Reply-To: <20240621230102.238397-1-umesh.nerlige.ramappa@intel.com>
Add helpers for submitting batches and waiting for them to start.
Signed-off-by: Umesh Nerlige Ramappa <umesh.nerlige.ramappa@intel.com>
---
tests/intel/xe_drm_fdinfo.c | 135 ++++++++++++++++++++++++++++++++++++
1 file changed, 135 insertions(+)
diff --git a/tests/intel/xe_drm_fdinfo.c b/tests/intel/xe_drm_fdinfo.c
index 41409b2d2..27459b7f1 100644
--- a/tests/intel/xe_drm_fdinfo.c
+++ b/tests/intel/xe_drm_fdinfo.c
@@ -51,6 +51,17 @@ static const char *engine_map[] = {
"vecs",
"ccs",
};
+
+static const uint64_t batch_addr[] = {
+ 0x170000,
+ 0x180000,
+ 0x190000,
+ 0x1a0000,
+ 0x1b0000,
+ 0x1c0000,
+ 0x1d0000,
+ 0x1e0000,
+};
static void read_engine_cycles(int xe, struct pceu_cycles *pceu)
{
struct drm_client_fdinfo info = { };
@@ -316,6 +327,130 @@ static void basic(int xe, unsigned int num_classes)
}
}
+#define MAX_PARALLEL 8
+struct xe_spin_ctx {
+ uint32_t vm;
+ uint64_t addr[MAX_PARALLEL];
+ struct drm_xe_sync sync[2];
+ struct drm_xe_exec exec;
+ uint32_t exec_queue;
+ size_t bo_size;
+ uint32_t bo;
+ struct xe_spin *spin;
+ struct xe_spin_opts spin_opts;
+ bool ended;
+ uint16_t class;
+ uint16_t width;
+ uint16_t num_placements;
+};
+
+static struct xe_spin_ctx *
+xe_spin_ctx_init(int fd, struct drm_xe_engine_class_instance *hwe, uint32_t vm,
+ uint16_t width, uint16_t num_placements)
+{
+ struct xe_spin_ctx *ctx = calloc(1, sizeof(*ctx));
+
+ igt_assert(width && num_placements &&
+ (width == 1 || num_placements == 1));
+
+ igt_assert(width <= MAX_PARALLEL);
+
+ ctx->class = hwe->engine_class;
+ ctx->width = width;
+ ctx->num_placements = num_placements;
+ ctx->vm = vm;
+ for (int i = 0; i < ctx->width; i++)
+ ctx->addr[i] = batch_addr[hwe->engine_class];
+
+ ctx->exec.num_batch_buffer = width;
+ ctx->exec.num_syncs = 2;
+ ctx->exec.syncs = to_user_pointer(ctx->sync);
+
+ ctx->sync[0].type = DRM_XE_SYNC_TYPE_SYNCOBJ;
+ ctx->sync[0].flags = DRM_XE_SYNC_FLAG_SIGNAL;
+ ctx->sync[0].handle = syncobj_create(fd, 0);
+
+ ctx->sync[1].type = DRM_XE_SYNC_TYPE_SYNCOBJ;
+ ctx->sync[1].flags = DRM_XE_SYNC_FLAG_SIGNAL;
+ ctx->sync[1].handle = syncobj_create(fd, 0);
+
+ ctx->bo_size = sizeof(struct xe_spin);
+ ctx->bo_size = xe_bb_size(fd, ctx->bo_size);
+ ctx->bo = xe_bo_create(fd, ctx->vm, ctx->bo_size,
+ vram_if_possible(fd, hwe->gt_id),
+ DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM);
+ ctx->spin = xe_bo_map(fd, ctx->bo, ctx->bo_size);
+
+ igt_assert_eq(__xe_exec_queue_create(fd, ctx->vm, width, num_placements,
+ hwe, 0, &ctx->exec_queue), 0);
+
+ xe_vm_bind_async(fd, ctx->vm, 0, ctx->bo, 0, ctx->addr[0], ctx->bo_size,
+ ctx->sync, 1);
+
+ return ctx;
+}
+
+static void
+xe_spin_sync_start(int fd, struct xe_spin_ctx *ctx)
+{
+ if (!ctx)
+ return;
+
+ ctx->spin_opts.addr = ctx->addr[0];
+ ctx->spin_opts.preempt = true;
+ xe_spin_init(ctx->spin, &ctx->spin_opts);
+
+ /* re-use sync[0] for exec */
+ ctx->sync[0].flags &= ~DRM_XE_SYNC_FLAG_SIGNAL;
+
+ ctx->exec.exec_queue_id = ctx->exec_queue;
+ if (ctx->width > 1)
+ ctx->exec.address = to_user_pointer(ctx->addr);
+ else
+ ctx->exec.address = ctx->addr[0];
+ xe_exec(fd, &ctx->exec);
+
+ xe_spin_wait_started(ctx->spin);
+ igt_assert(!syncobj_wait(fd, &ctx->sync[1].handle, 1, 1, 0, NULL));
+
+ igt_debug("%s: spinner started\n", engine_map[ctx->class]);
+}
+
+static void
+xe_spin_sync_end(int fd, struct xe_spin_ctx *ctx)
+{
+ if (!ctx || ctx->ended)
+ return;
+
+ xe_spin_end(ctx->spin);
+
+ igt_assert(syncobj_wait(fd, &ctx->sync[1].handle, 1, INT64_MAX, 0, NULL));
+ igt_assert(syncobj_wait(fd, &ctx->sync[0].handle, 1, INT64_MAX, 0, NULL));
+
+ ctx->sync[0].flags |= DRM_XE_SYNC_FLAG_SIGNAL;
+ xe_vm_unbind_async(fd, ctx->vm, 0, 0, ctx->addr[0], ctx->bo_size, ctx->sync, 1);
+ igt_assert(syncobj_wait(fd, &ctx->sync[0].handle, 1, INT64_MAX, 0, NULL));
+
+ ctx->ended = true;
+ igt_debug("%s: spinner ended\n", engine_map[ctx->class]);
+}
+
+static void
+xe_spin_ctx_destroy(int fd, struct xe_spin_ctx *ctx)
+{
+ if (!ctx)
+ return;
+
+ syncobj_destroy(fd, ctx->sync[0].handle);
+ syncobj_destroy(fd, ctx->sync[1].handle);
+ xe_exec_queue_destroy(fd, ctx->exec_queue);
+
+ munmap(ctx->spin, ctx->bo_size);
+ gem_close(fd, ctx->bo);
+
+ free(ctx);
+}
+
igt_main
{
struct drm_xe_engine_class_instance *hwe;
--
2.34.1
next prev parent reply other threads:[~2024-06-21 23:02 UTC|newest]
Thread overview: 28+ messages / expand[flat|nested] mbox.gz Atom feed top
2024-06-21 23:00 [PATCH i-g-t 0/8] Add per-client engine utilization tests Umesh Nerlige Ramappa
2024-06-21 23:00 ` [PATCH i-g-t 1/8] tests/intel/xe_drm_fdinfo: Update basic test to include client utilization Umesh Nerlige Ramappa
2024-07-01 16:27 ` Riana Tauro
2024-07-01 18:30 ` Umesh Nerlige Ramappa
2024-07-01 16:52 ` Lucas De Marchi
2024-06-21 23:00 ` [PATCH i-g-t 2/8] tests/intel/xe_drm_fdinfo: Add helper to read utilization for all classes Umesh Nerlige Ramappa
2024-06-21 23:00 ` Umesh Nerlige Ramappa [this message]
2024-07-01 16:57 ` [PATCH i-g-t 3/8] tests/intel/xe_drm_fdinfo: Add helpers for spinning batches Lucas De Marchi
2024-07-01 17:27 ` Umesh Nerlige Ramappa
2024-07-01 18:08 ` Lucas De Marchi
2024-06-21 23:00 ` [PATCH i-g-t 4/8] tests/intel/xe_drm_fdinfo: Add single engine tests Umesh Nerlige Ramappa
2024-07-01 17:35 ` Lucas De Marchi
2024-07-01 18:26 ` Umesh Nerlige Ramappa
2024-07-02 18:18 ` Umesh Nerlige Ramappa
2024-07-02 20:57 ` Umesh Nerlige Ramappa
2024-07-02 20:57 ` Lucas De Marchi
2024-06-21 23:00 ` [PATCH i-g-t 5/8] tests/intel/xe_drm_fdinfo: Add tests to verify all class utilization Umesh Nerlige Ramappa
2024-06-21 23:01 ` [PATCH i-g-t 6/8] tests/intel/xe_drm_fdinfo: Add an iterator for virtual engines Umesh Nerlige Ramappa
2024-06-21 23:01 ` [PATCH i-g-t 7/8] tests/intel/xe_drm_fdinfo: Add tests " Umesh Nerlige Ramappa
2024-06-21 23:01 ` [PATCH i-g-t 8/8] tests/intel/xe_drm_fdinfo: Add tests for parallel engines Umesh Nerlige Ramappa
2024-06-21 23:35 ` ✓ CI.xeBAT: success for Add per-client engine utilization tests Patchwork
2024-06-21 23:44 ` ✓ Fi.CI.BAT: " Patchwork
2024-06-22 0:40 ` ✓ CI.xeFULL: " Patchwork
2024-06-22 21:32 ` ✗ Fi.CI.IGT: failure " Patchwork
2024-06-28 21:27 ` ✓ CI.xeBAT: success for Add per-client engine utilization tests (rev2) Patchwork
2024-06-28 21:37 ` ✓ Fi.CI.BAT: " Patchwork
2024-06-28 22:21 ` ✓ CI.xeFULL: " Patchwork
2024-06-29 22:53 ` ✗ Fi.CI.IGT: failure " Patchwork
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20240621230102.238397-4-umesh.nerlige.ramappa@intel.com \
--to=umesh.nerlige.ramappa@intel.com \
--cc=igt-dev@lists.freedesktop.org \
--cc=lucas.demarchi@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox