From: Chris Wilson <chris@chris-wilson.co.uk>
To: intel-gfx@lists.freedesktop.org
Cc: igt-dev@lists.freedesktop.org, Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Subject: [igt-dev] [PATCH i-g-t] i915/gem_exec_balancer: Check for scheduling bonded-pairs on the same engine
Date: Fri, 20 Sep 2019 23:26:10 +0100 [thread overview]
Message-ID: <20190920222610.29282-1-chris@chris-wilson.co.uk> (raw)
The expectation for bonded submission is that they are run concurrently,
in parallel on multiple engines. However, given a lack of constraints in
the scheduler's selection combined with timeslicing could mean that the
bonded requests could be run in opposite order on the same engine. With
just the right pair of requests, this can cause a GPU hang (or at least
trigger hangchecker), best (worst) case would be execution running
several times slower than ideal.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
---
tests/i915/gem_exec_balancer.c | 151 +++++++++++++++++++++++++++++++++
1 file changed, 151 insertions(+)
diff --git a/tests/i915/gem_exec_balancer.c b/tests/i915/gem_exec_balancer.c
index 407dc0eca..e4fe75747 100644
--- a/tests/i915/gem_exec_balancer.c
+++ b/tests/i915/gem_exec_balancer.c
@@ -30,6 +30,15 @@
IGT_TEST_DESCRIPTION("Exercise in-kernel load-balancing");
+#define MI_SEMAPHORE_WAIT (0x1c << 23)
+#define MI_SEMAPHORE_POLL (1 << 15)
+#define MI_SEMAPHORE_SAD_GT_SDD (0 << 12)
+#define MI_SEMAPHORE_SAD_GTE_SDD (1 << 12)
+#define MI_SEMAPHORE_SAD_LT_SDD (2 << 12)
+#define MI_SEMAPHORE_SAD_LTE_SDD (3 << 12)
+#define MI_SEMAPHORE_SAD_EQ_SDD (4 << 12)
+#define MI_SEMAPHORE_SAD_NEQ_SDD (5 << 12)
+
#define INSTANCE_COUNT (1 << I915_PMU_SAMPLE_INSTANCE_BITS)
static size_t sizeof_load_balance(int count)
@@ -694,6 +703,145 @@ static void bonded(int i915, unsigned int flags)
gem_context_destroy(i915, master);
}
+static unsigned int offset_in_page(void *addr)
+{
+ return (uintptr_t)addr & 4095;
+}
+
+static uint32_t create_semaphore_to_spinner(int i915, igt_spin_t *spin)
+{
+ uint32_t *cs, *map;
+ uint32_t handle;
+
+ handle = gem_create(i915, 4096);
+ cs = map = gem_mmap__cpu(i915, handle, 0, 4096, PROT_WRITE);
+
+ /* Wait until the spinner is running */
+ *cs++ = MI_SEMAPHORE_WAIT |
+ MI_SEMAPHORE_POLL |
+ MI_SEMAPHORE_SAD_NEQ_SDD |
+ (4 - 2);
+ *cs++ = 0;
+ *cs++ = spin->obj[0].offset + 4 * SPIN_POLL_START_IDX;
+ *cs++ = 0;
+
+ /* Then cancel the spinner */
+ *cs++ = MI_STORE_DWORD_IMM;
+ *cs++ = spin->obj[IGT_SPIN_BATCH].offset +
+ offset_in_page(spin->condition);
+ *cs++ = 0;
+ *cs++ = MI_BATCH_BUFFER_END;
+
+ *cs++ = MI_BATCH_BUFFER_END;
+ munmap(map, 4096);
+
+ return handle;
+}
+
+static void bonded_slice(int i915)
+{
+ uint32_t ctx;
+ int *stop;
+
+ igt_require(gem_scheduler_has_semaphores(i915));
+
+ stop = mmap(0, 4096, PROT_WRITE, MAP_SHARED | MAP_ANON, -1, 0);
+ igt_assert(stop != MAP_FAILED);
+
+ ctx = gem_context_create(i915);
+
+ for (int class = 0; class < 32; class++) {
+ struct i915_engine_class_instance *siblings;
+ struct drm_i915_gem_exec_object2 obj[3] = {};
+ struct drm_i915_gem_execbuffer2 eb = {};
+ unsigned int count;
+ igt_spin_t *spin;
+
+ siblings = list_engines(i915, 1u << class, &count);
+ if (!siblings)
+ continue;
+
+ if (count < 2) {
+ free(siblings);
+ continue;
+ }
+
+ /*
+ * A: semaphore wait on spinner; cancel spinner
+ * B: unpreemptable spinner
+ *
+ * A waits for running ack from B, if scheduled on the same
+ * engine -> hang.
+ *
+ * C+: background load across engines
+ */
+
+ set_load_balancer(i915, ctx, siblings, count, NULL);
+
+ spin = __igt_spin_new(i915,
+ .ctx = ctx,
+ .flags = (IGT_SPIN_NO_PREEMPTION |
+ IGT_SPIN_POLL_RUN));
+ igt_spin_end(spin); /* we just want its address for later */
+ gem_sync(i915, spin->handle);
+ igt_spin_reset(spin);
+
+ obj[0] = spin->obj[0];
+ obj[1] = spin->obj[1];
+ obj[2].handle = create_semaphore_to_spinner(i915, spin);
+
+ eb.buffers_ptr = to_user_pointer(obj);
+ eb.rsvd1 = ctx;
+
+ *stop = 0;
+ igt_fork(child, count + 1) {
+ igt_list_del(&spin->link);
+
+ ctx = gem_context_clone(i915, ctx,
+ I915_CONTEXT_CLONE_ENGINES, 0);
+
+ while (!READ_ONCE(*stop)) {
+ spin = igt_spin_new(i915,
+ .ctx = ctx,
+ .engine = (1 + rand() % count),
+ .flags = IGT_SPIN_POLL_RUN);
+ igt_spin_busywait_until_started(spin);
+ usleep(50000);
+ igt_spin_free(i915, spin);
+ }
+
+ gem_context_destroy(i915, ctx);
+ }
+
+ igt_until_timeout(5) {
+ igt_spin_reset(spin);
+
+ /* A: Submit the semaphore wait */
+ eb.buffer_count = 3;
+ eb.flags = (1 + rand() % count) | I915_EXEC_FENCE_OUT;
+ gem_execbuf_wr(i915, &eb);
+
+ /* B: Submit the spinner (in parallel) */
+ eb.buffer_count = 2;
+ eb.flags = 0 | I915_EXEC_FENCE_SUBMIT;
+ eb.rsvd2 >>= 32;
+ gem_execbuf(i915, &eb);
+ close(eb.rsvd2);
+
+ gem_sync(i915, obj[2].handle);
+ }
+
+ *stop = 1;
+ igt_waitchildren();
+
+ gem_close(i915, obj[2].handle);
+ igt_spin_free(i915, spin);
+ }
+
+ gem_context_destroy(i915, ctx);
+ munmap(stop, 4096);
+}
+
static void indices(int i915)
{
I915_DEFINE_CONTEXT_PARAM_ENGINES(engines, I915_EXEC_RING_MASK + 1);
@@ -1320,6 +1468,9 @@ igt_main
igt_subtest("bonded-cork")
bonded(i915, CORK);
+ igt_subtest("bonded-slice")
+ bonded_slice(i915);
+
igt_fixture {
igt_stop_hang_detector();
}
--
2.23.0
_______________________________________________
igt-dev mailing list
igt-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/igt-dev
next reply other threads:[~2019-09-20 22:26 UTC|newest]
Thread overview: 8+ messages / expand[flat|nested] mbox.gz Atom feed top
2019-09-20 22:26 Chris Wilson [this message]
2019-09-20 22:40 ` [igt-dev] ✗ GitLab.Pipeline: warning for i915/gem_exec_balancer: Check for scheduling bonded-pairs on the same engine Patchwork
2019-09-20 23:11 ` [igt-dev] ✓ Fi.CI.BAT: success " Patchwork
2019-09-22 12:24 ` [igt-dev] ✗ Fi.CI.IGT: failure " Patchwork
2019-09-23 14:29 ` [igt-dev] [PATCH i-g-t] " Tvrtko Ursulin
2019-09-23 15:43 ` Chris Wilson
2019-09-23 16:21 ` Tvrtko Ursulin
2019-09-23 18:11 ` Chris Wilson
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20190920222610.29282-1-chris@chris-wilson.co.uk \
--to=chris@chris-wilson.co.uk \
--cc=igt-dev@lists.freedesktop.org \
--cc=intel-gfx@lists.freedesktop.org \
--cc=tvrtko.ursulin@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox