* [igt-dev] [PATCH] [PATCH i-g-t][V2]tests/i915/gem_sync.c :Added __for_each_physical_engine to utilize all available
@ 2020-04-06 9:12 Arjun Melkaveri
2020-04-06 9:19 ` Melkaveri, Arjun
0 siblings, 1 reply; 8+ messages in thread
From: Arjun Melkaveri @ 2020-04-06 9:12 UTC (permalink / raw)
To: arjun.melkaveri, igt-dev
Replaced the legacy for_each_engine* defines with the ones
implemented in the gem_engine_topology library.
Used gem_context_clone_with_engines
to make sure that engine index was potentially created
based on a default context with engine map configured.
Added gem_reopen_driver and gem_context_copy_engines
to transfer the engine map from parent fd default
context.
V2:
Added Legacy engine coverage for sync_ring and sync_all.
Cc: Dec Katarzyna <katarzyna.dec@intel.com>
Cc: Ursulin Tvrtko <tvrtko.ursulin@intel.com>
Signed-off-by: sai gowtham <sai.gowtham.ch@intel.com>
Signed-off-by: Arjun Melkaveri <arjun.melkaveri@intel.com>
---
tests/i915/gem_sync.c | 566 +++++++++++++++++++++++++-----------------
1 file changed, 343 insertions(+), 223 deletions(-)
diff --git a/tests/i915/gem_sync.c b/tests/i915/gem_sync.c
index 2ef55ecc..8efa0668 100644
--- a/tests/i915/gem_sync.c
+++ b/tests/i915/gem_sync.c
@@ -79,52 +79,56 @@ out:
}
static void
-sync_ring(int fd, unsigned ring, int num_children, int timeout)
+sync_ring(int fd, const struct intel_execution_engine2 *e,
+ int num_children, int timeout)
{
+ const struct intel_execution_engine2 *e2;
unsigned engines[16];
const char *names[16];
int num_engines = 0;
- if (ring == ALL_ENGINES) {
- for_each_physical_engine(e, fd) {
- names[num_engines] = e->name;
- engines[num_engines++] = eb_ring(e);
+ if (!e) {
+ __for_each_physical_engine(fd, e2) {
+ names[num_engines] = e2->name;
+ engines[num_engines++] = e2->flags;
if (num_engines == ARRAY_SIZE(engines))
break;
}
num_children *= num_engines;
} else {
- gem_require_ring(fd, ring);
names[num_engines] = NULL;
- engines[num_engines++] = ring;
+ engines[num_engines++] = e->flags;
}
intel_detect_and_clear_missed_interrupts(fd);
igt_fork(child, num_children) {
+ int i915;
const uint32_t bbe = MI_BATCH_BUFFER_END;
struct drm_i915_gem_exec_object2 object;
struct drm_i915_gem_execbuffer2 execbuf;
double start, elapsed;
unsigned long cycles;
+ i915 = gem_reopen_driver(fd);
+ gem_context_copy_engines(fd, 0, i915, 0);
memset(&object, 0, sizeof(object));
- object.handle = gem_create(fd, 4096);
- gem_write(fd, object.handle, 0, &bbe, sizeof(bbe));
+ object.handle = gem_create(i915, 4096);
+ gem_write(i915, object.handle, 0, &bbe, sizeof(bbe));
memset(&execbuf, 0, sizeof(execbuf));
execbuf.buffers_ptr = to_user_pointer(&object);
execbuf.buffer_count = 1;
execbuf.flags = engines[child % num_engines];
- gem_execbuf(fd, &execbuf);
- gem_sync(fd, object.handle);
+ gem_execbuf(i915, &execbuf);
+ gem_sync(i915, object.handle);
start = gettime();
cycles = 0;
do {
do {
- gem_execbuf(fd, &execbuf);
- gem_sync(fd, object.handle);
+ gem_execbuf(i915, &execbuf);
+ gem_sync(i915, object.handle);
} while (++cycles & 1023);
} while ((elapsed = gettime() - start) < timeout);
igt_info("%s%sompleted %ld cycles: %.3f us\n",
@@ -132,14 +136,14 @@ sync_ring(int fd, unsigned ring, int num_children, int timeout)
names[child % num_engines] ? " c" : "C",
cycles, elapsed*1e6/cycles);
- gem_close(fd, object.handle);
+ gem_close(i915, object.handle);
}
igt_waitchildren_timeout(timeout+10, NULL);
igt_assert_eq(intel_detect_and_clear_missed_interrupts(fd), 0);
}
static void
-idle_ring(int fd, unsigned ring, int timeout)
+idle_ring(int fd, const struct intel_execution_engine2 *e, int timeout)
{
const uint32_t bbe = MI_BATCH_BUFFER_END;
struct drm_i915_gem_exec_object2 object;
@@ -147,7 +151,6 @@ idle_ring(int fd, unsigned ring, int timeout)
double start, elapsed;
unsigned long cycles;
- gem_require_ring(fd, ring);
memset(&object, 0, sizeof(object));
object.handle = gem_create(fd, 4096);
@@ -156,7 +159,7 @@ idle_ring(int fd, unsigned ring, int timeout)
memset(&execbuf, 0, sizeof(execbuf));
execbuf.buffers_ptr = to_user_pointer(&object);
execbuf.buffer_count = 1;
- execbuf.flags = ring;
+ execbuf.flags = e->flags;
gem_execbuf(fd, &execbuf);
gem_sync(fd, object.handle);
@@ -178,32 +181,34 @@ idle_ring(int fd, unsigned ring, int timeout)
}
static void
-wakeup_ring(int fd, unsigned ring, int timeout, int wlen)
+wakeup_ring(int fd, const struct intel_execution_engine2 *e,
+ int timeout, int wlen)
{
+ const struct intel_execution_engine2 *e2;
unsigned engines[16];
const char *names[16];
int num_engines = 0;
- if (ring == ALL_ENGINES) {
- for_each_physical_engine(e, fd) {
- if (!gem_can_store_dword(fd, eb_ring(e)))
+ if (!e) {
+ __for_each_physical_engine(fd, e2) {
+ if (!gem_class_can_store_dword(fd, e2->class))
continue;
- names[num_engines] = e->name;
- engines[num_engines++] = eb_ring(e);
+ names[num_engines] = e2->name;
+ engines[num_engines++] = e2->flags;
if (num_engines == ARRAY_SIZE(engines))
break;
}
igt_require(num_engines);
} else {
- gem_require_ring(fd, ring);
- igt_require(gem_can_store_dword(fd, ring));
+ igt_require(gem_class_can_store_dword(fd, e->class));
names[num_engines] = NULL;
- engines[num_engines++] = ring;
+ engines[num_engines++] = e->flags;
}
intel_detect_and_clear_missed_interrupts(fd);
igt_fork(child, num_engines) {
+ int i915;
const uint32_t bbe = MI_BATCH_BUFFER_END;
struct drm_i915_gem_exec_object2 object;
struct drm_i915_gem_execbuffer2 execbuf;
@@ -211,9 +216,11 @@ wakeup_ring(int fd, unsigned ring, int timeout, int wlen)
unsigned long cycles;
igt_spin_t *spin;
+ i915 = gem_reopen_driver(fd);
+ gem_context_copy_engines(fd, 0, i915, 0);
memset(&object, 0, sizeof(object));
- object.handle = gem_create(fd, 4096);
- gem_write(fd, object.handle, 0, &bbe, sizeof(bbe));
+ object.handle = gem_create(i915, 4096);
+ gem_write(i915, object.handle, 0, &bbe, sizeof(bbe));
memset(&execbuf, 0, sizeof(execbuf));
execbuf.buffers_ptr = to_user_pointer(&object);
@@ -226,10 +233,10 @@ wakeup_ring(int fd, unsigned ring, int timeout, int wlen)
IGT_SPIN_FAST));
igt_assert(igt_spin_has_poll(spin));
- gem_execbuf(fd, &execbuf);
+ gem_execbuf(i915, &execbuf);
igt_spin_end(spin);
- gem_sync(fd, object.handle);
+ gem_sync(i915, object.handle);
for (int warmup = 0; warmup <= 1; warmup++) {
end = gettime() + timeout/10.;
@@ -238,12 +245,12 @@ wakeup_ring(int fd, unsigned ring, int timeout, int wlen)
do {
igt_spin_reset(spin);
- gem_execbuf(fd, &spin->execbuf);
+ gem_execbuf(i915, &spin->execbuf);
igt_spin_busywait_until_started(spin);
this = gettime();
igt_spin_end(spin);
- gem_sync(fd, spin->handle);
+ gem_sync(i915, spin->handle);
now = gettime();
elapsed += now - this;
@@ -262,15 +269,15 @@ wakeup_ring(int fd, unsigned ring, int timeout, int wlen)
do {
igt_spin_reset(spin);
- gem_execbuf(fd, &spin->execbuf);
+ gem_execbuf(i915, &spin->execbuf);
igt_spin_busywait_until_started(spin);
for (int n = 0; n < wlen; n++)
- gem_execbuf(fd, &execbuf);
+ gem_execbuf(i915, &execbuf);
this = gettime();
igt_spin_end(spin);
- gem_sync(fd, object.handle);
+ gem_sync(i915, object.handle);
now = gettime();
elapsed += now - this;
@@ -283,49 +290,53 @@ wakeup_ring(int fd, unsigned ring, int timeout, int wlen)
names[child % num_engines] ? " c" : "C",
cycles, 1e6*baseline, elapsed*1e6/cycles);
- igt_spin_free(fd, spin);
- gem_close(fd, object.handle);
+ igt_spin_free(i915, spin);
+ gem_close(i915, object.handle);
}
igt_waitchildren_timeout(2*timeout, NULL);
igt_assert_eq(intel_detect_and_clear_missed_interrupts(fd), 0);
}
-static void active_ring(int fd, unsigned ring, int timeout)
+static void active_ring(int fd, const struct intel_execution_engine2 *e,
+ int timeout)
{
+ const struct intel_execution_engine2 *e2;
unsigned engines[16];
const char *names[16];
int num_engines = 0;
- if (ring == ALL_ENGINES) {
- for_each_physical_engine(e, fd) {
- if (!gem_can_store_dword(fd, eb_ring(e)))
+ if (!e) {
+ __for_each_physical_engine(fd, e2) {
+ if (!gem_class_can_store_dword(fd, e2->class))
continue;
- names[num_engines] = e->name;
- engines[num_engines++] = eb_ring(e);
+ names[num_engines] = e2->name;
+ engines[num_engines++] = e2->flags;
if (num_engines == ARRAY_SIZE(engines))
break;
}
igt_require(num_engines);
} else {
- gem_require_ring(fd, ring);
- igt_require(gem_can_store_dword(fd, ring));
+ igt_require(gem_class_can_store_dword(fd, e->class));
names[num_engines] = NULL;
- engines[num_engines++] = ring;
+ engines[num_engines++] = e->flags;
}
intel_detect_and_clear_missed_interrupts(fd);
igt_fork(child, num_engines) {
+ int i915;
double start, end, elapsed;
unsigned long cycles;
igt_spin_t *spin[2];
- spin[0] = __igt_spin_new(fd,
- .engine = ring,
+ i915 = gem_reopen_driver(fd);
+ gem_context_copy_engines(fd, 0, i915, 0);
+ spin[0] = __igt_spin_new(i915,
+ .engine = e->flags,
.flags = IGT_SPIN_FAST);
- spin[1] = __igt_spin_new(fd,
- .engine = ring,
+ spin[1] = __igt_spin_new(i915,
+ .engine = e->flags,
.flags = IGT_SPIN_FAST);
start = gettime();
@@ -336,16 +347,16 @@ static void active_ring(int fd, unsigned ring, int timeout)
igt_spin_t *s = spin[loop & 1];
igt_spin_end(s);
- gem_sync(fd, s->handle);
+ gem_sync(i915, s->handle);
igt_spin_reset(s);
- gem_execbuf(fd, &s->execbuf);
+ gem_execbuf(i915, &s->execbuf);
}
cycles += 1024;
} while ((elapsed = gettime()) < end);
- igt_spin_free(fd, spin[1]);
- igt_spin_free(fd, spin[0]);
+ igt_spin_free(i915, spin[1]);
+ igt_spin_free(i915, spin[0]);
igt_info("%s%sompleted %ld cycles: %.3f us\n",
names[child % num_engines] ?: "",
@@ -357,32 +368,34 @@ static void active_ring(int fd, unsigned ring, int timeout)
}
static void
-active_wakeup_ring(int fd, unsigned ring, int timeout, int wlen)
+active_wakeup_ring(int fd, const struct intel_execution_engine2 *e,
+ int timeout, int wlen)
{
+ const struct intel_execution_engine2 *e2;
unsigned engines[16];
const char *names[16];
int num_engines = 0;
- if (ring == ALL_ENGINES) {
- for_each_physical_engine(e, fd) {
- if (!gem_can_store_dword(fd, eb_ring(e)))
+ if (!e) {
+ __for_each_physical_engine(fd, e2) {
+ if (!gem_class_can_store_dword(fd, e2->class))
continue;
- names[num_engines] = e->name;
- engines[num_engines++] = eb_ring(e);
+ names[num_engines] = e2->name;
+ engines[num_engines++] = e2->flags;
if (num_engines == ARRAY_SIZE(engines))
break;
}
igt_require(num_engines);
} else {
- gem_require_ring(fd, ring);
- igt_require(gem_can_store_dword(fd, ring));
+ igt_require(gem_class_can_store_dword(fd, e->class));
names[num_engines] = NULL;
- engines[num_engines++] = ring;
+ engines[num_engines++] = e->flags;
}
intel_detect_and_clear_missed_interrupts(fd);
igt_fork(child, num_engines) {
+ int i915;
const uint32_t bbe = MI_BATCH_BUFFER_END;
struct drm_i915_gem_exec_object2 object;
struct drm_i915_gem_execbuffer2 execbuf;
@@ -390,36 +403,38 @@ active_wakeup_ring(int fd, unsigned ring, int timeout, int wlen)
unsigned long cycles;
igt_spin_t *spin[2];
+ i915 = gem_reopen_driver(fd);
+ gem_context_copy_engines(fd, 0, i915, 0);
memset(&object, 0, sizeof(object));
- object.handle = gem_create(fd, 4096);
- gem_write(fd, object.handle, 0, &bbe, sizeof(bbe));
+ object.handle = gem_create(i915, 4096);
+ gem_write(i915, object.handle, 0, &bbe, sizeof(bbe));
memset(&execbuf, 0, sizeof(execbuf));
execbuf.buffers_ptr = to_user_pointer(&object);
execbuf.buffer_count = 1;
execbuf.flags = engines[child % num_engines];
- spin[0] = __igt_spin_new(fd,
+ spin[0] = __igt_spin_new(i915,
.engine = execbuf.flags,
.flags = (IGT_SPIN_POLL_RUN |
IGT_SPIN_FAST));
igt_assert(igt_spin_has_poll(spin[0]));
- spin[1] = __igt_spin_new(fd,
+ spin[1] = __igt_spin_new(i915,
.engine = execbuf.flags,
.flags = (IGT_SPIN_POLL_RUN |
IGT_SPIN_FAST));
- gem_execbuf(fd, &execbuf);
+ gem_execbuf(i915, &execbuf);
igt_spin_end(spin[1]);
igt_spin_end(spin[0]);
- gem_sync(fd, object.handle);
+ gem_sync(i915, object.handle);
for (int warmup = 0; warmup <= 1; warmup++) {
igt_spin_reset(spin[0]);
- gem_execbuf(fd, &spin[0]->execbuf);
+ gem_execbuf(i915, &spin[0]->execbuf);
end = gettime() + timeout/10.;
elapsed = 0;
@@ -429,11 +444,11 @@ active_wakeup_ring(int fd, unsigned ring, int timeout, int wlen)
igt_spin_reset(spin[1]);
- gem_execbuf(fd, &spin[1]->execbuf);
+ gem_execbuf(i915, &spin[1]->execbuf);
this = gettime();
igt_spin_end(spin[0]);
- gem_sync(fd, spin[0]->handle);
+ gem_sync(i915, spin[0]->handle);
now = gettime();
elapsed += now - this;
@@ -450,7 +465,7 @@ active_wakeup_ring(int fd, unsigned ring, int timeout, int wlen)
igt_spin_reset(spin[0]);
- gem_execbuf(fd, &spin[0]->execbuf);
+ gem_execbuf(i915, &spin[0]->execbuf);
end = gettime() + timeout;
elapsed = 0;
@@ -459,15 +474,15 @@ active_wakeup_ring(int fd, unsigned ring, int timeout, int wlen)
igt_spin_busywait_until_started(spin[0]);
for (int n = 0; n < wlen; n++)
- gem_execbuf(fd, &execbuf);
+ gem_execbuf(i915, &execbuf);
igt_spin_reset(spin[1]);
- gem_execbuf(fd, &spin[1]->execbuf);
+ gem_execbuf(i915, &spin[1]->execbuf);
this = gettime();
igt_spin_end(spin[0]);
- gem_sync(fd, object.handle);
+ gem_sync(i915, object.handle);
now = gettime();
elapsed += now - this;
@@ -482,43 +497,45 @@ active_wakeup_ring(int fd, unsigned ring, int timeout, int wlen)
names[child % num_engines] ? " c" : "C",
cycles, 1e6*baseline, elapsed*1e6/cycles);
- igt_spin_free(fd, spin[1]);
- igt_spin_free(fd, spin[0]);
- gem_close(fd, object.handle);
+ igt_spin_free(i915, spin[1]);
+ igt_spin_free(i915, spin[0]);
+ gem_close(i915, object.handle);
}
igt_waitchildren_timeout(2*timeout, NULL);
igt_assert_eq(intel_detect_and_clear_missed_interrupts(fd), 0);
}
static void
-store_ring(int fd, unsigned ring, int num_children, int timeout)
+store_ring(int fd, const struct intel_execution_engine2 *e,
+ int num_children, int timeout)
{
+ const struct intel_execution_engine2 *e2;
const int gen = intel_gen(intel_get_drm_devid(fd));
unsigned engines[16];
const char *names[16];
int num_engines = 0;
- if (ring == ALL_ENGINES) {
- for_each_physical_engine(e, fd) {
- if (!gem_can_store_dword(fd, eb_ring(e)))
+ if (!e) {
+ __for_each_physical_engine(fd, e2) {
+ if (!gem_class_can_store_dword(fd, e2->class))
continue;
- names[num_engines] = e->name;
- engines[num_engines++] = eb_ring(e);
+ names[num_engines] = e2->name;
+ engines[num_engines++] = e2->flags;
if (num_engines == ARRAY_SIZE(engines))
break;
}
num_children *= num_engines;
} else {
- gem_require_ring(fd, ring);
- igt_require(gem_can_store_dword(fd, ring));
+ igt_require(gem_class_can_store_dword(fd, e->class));
names[num_engines] = NULL;
- engines[num_engines++] = ring;
+ engines[num_engines++] = e->flags;
}
intel_detect_and_clear_missed_interrupts(fd);
igt_fork(child, num_children) {
+ int i915;
const uint32_t bbe = MI_BATCH_BUFFER_END;
struct drm_i915_gem_exec_object2 object[2];
struct drm_i915_gem_relocation_entry reloc[1024];
@@ -527,6 +544,8 @@ store_ring(int fd, unsigned ring, int num_children, int timeout)
unsigned long cycles;
uint32_t *batch, *b;
+ i915 = gem_reopen_driver(fd);
+ gem_context_copy_engines(fd, 0, i915, 0);
memset(&execbuf, 0, sizeof(execbuf));
execbuf.buffers_ptr = to_user_pointer(object);
execbuf.flags = engines[child % num_engines];
@@ -536,20 +555,20 @@ store_ring(int fd, unsigned ring, int num_children, int timeout)
execbuf.flags |= I915_EXEC_SECURE;
memset(object, 0, sizeof(object));
- object[0].handle = gem_create(fd, 4096);
- gem_write(fd, object[0].handle, 0, &bbe, sizeof(bbe));
+ object[0].handle = gem_create(i915, 4096);
+ gem_write(i915, object[0].handle, 0, &bbe, sizeof(bbe));
execbuf.buffer_count = 1;
- gem_execbuf(fd, &execbuf);
+ gem_execbuf(i915, &execbuf);
object[0].flags |= EXEC_OBJECT_WRITE;
- object[1].handle = gem_create(fd, 20*1024);
+ object[1].handle = gem_create(i915, 20*1024);
object[1].relocs_ptr = to_user_pointer(reloc);
object[1].relocation_count = 1024;
- batch = gem_mmap__cpu(fd, object[1].handle, 0, 20*1024,
+ batch = gem_mmap__cpu(i915, object[1].handle, 0, 20*1024,
PROT_WRITE | PROT_READ);
- gem_set_domain(fd, object[1].handle,
+ gem_set_domain(i915, object[1].handle,
I915_GEM_DOMAIN_CPU, I915_GEM_DOMAIN_CPU);
memset(reloc, 0, sizeof(reloc));
@@ -582,15 +601,15 @@ store_ring(int fd, unsigned ring, int num_children, int timeout)
igt_assert((b - batch)*sizeof(uint32_t) < 20*1024);
munmap(batch, 20*1024);
execbuf.buffer_count = 2;
- gem_execbuf(fd, &execbuf);
- gem_sync(fd, object[1].handle);
+ gem_execbuf(i915, &execbuf);
+ gem_sync(i915, object[1].handle);
start = gettime();
cycles = 0;
do {
do {
- gem_execbuf(fd, &execbuf);
- gem_sync(fd, object[1].handle);
+ gem_execbuf(i915, &execbuf);
+ gem_sync(i915, object[1].handle);
} while (++cycles & 1023);
} while ((elapsed = gettime() - start) < timeout);
igt_info("%s%sompleted %ld cycles: %.3f us\n",
@@ -598,16 +617,18 @@ store_ring(int fd, unsigned ring, int num_children, int timeout)
names[child % num_engines] ? " c" : "C",
cycles, elapsed*1e6/cycles);
- gem_close(fd, object[1].handle);
- gem_close(fd, object[0].handle);
+ gem_close(i915, object[1].handle);
+ gem_close(i915, object[0].handle);
}
igt_waitchildren_timeout(timeout+10, NULL);
igt_assert_eq(intel_detect_and_clear_missed_interrupts(fd), 0);
}
static void
-switch_ring(int fd, unsigned ring, int num_children, int timeout)
+switch_ring(int fd, const struct intel_execution_engine2 *e,
+ int num_children, int timeout)
{
+ const struct intel_execution_engine2 *e2;
const int gen = intel_gen(intel_get_drm_devid(fd));
unsigned engines[16];
const char *names[16];
@@ -615,27 +636,28 @@ switch_ring(int fd, unsigned ring, int num_children, int timeout)
gem_require_contexts(fd);
- if (ring == ALL_ENGINES) {
- for_each_physical_engine(e, fd) {
- if (!gem_can_store_dword(fd, eb_ring(e)))
+ if (!e) {
+ __for_each_physical_engine(fd, e2) {
+ if (!gem_class_can_store_dword(fd, e2->class))
continue;
- names[num_engines] = e->name;
- engines[num_engines++] = eb_ring(e);
+ names[num_engines] = e2->name;
+ engines[num_engines++] = e2->flags;
if (num_engines == ARRAY_SIZE(engines))
break;
}
num_children *= num_engines;
} else {
- gem_require_ring(fd, ring);
- igt_require(gem_can_store_dword(fd, ring));
+ igt_require(gem_class_can_store_dword(fd, e->class));
names[num_engines] = NULL;
- engines[num_engines++] = ring;
+ engines[num_engines++] = e->flags;
}
intel_detect_and_clear_missed_interrupts(fd);
igt_fork(child, num_children) {
+ int i915;
+ uint32_t ctx;
struct context {
struct drm_i915_gem_exec_object2 object[2];
struct drm_i915_gem_relocation_entry reloc[1024];
@@ -643,7 +665,13 @@ switch_ring(int fd, unsigned ring, int num_children, int timeout)
} contexts[2];
double elapsed, baseline;
unsigned long cycles;
-
+ /*
+ * Ensure the gpu is idle by launching
+ * nop execbuf and stalling for it.
+ */
+ i915 = gem_reopen_driver(fd);
+ ctx = gem_context_create(fd);
+ gem_context_copy_engines(fd, 0, i915, ctx);
for (int i = 0; i < ARRAY_SIZE(contexts); i++) {
const uint32_t bbe = MI_BATCH_BUFFER_END;
const uint32_t sz = 32 << 10;
@@ -657,23 +685,23 @@ switch_ring(int fd, unsigned ring, int num_children, int timeout)
c->execbuf.flags |= LOCAL_I915_EXEC_HANDLE_LUT;
if (gen < 6)
c->execbuf.flags |= I915_EXEC_SECURE;
- c->execbuf.rsvd1 = gem_context_create(fd);
+ c->execbuf.rsvd1 = ctx;
memset(c->object, 0, sizeof(c->object));
- c->object[0].handle = gem_create(fd, 4096);
- gem_write(fd, c->object[0].handle, 0, &bbe, sizeof(bbe));
+ c->object[0].handle = gem_create(i915, 4096);
+ gem_write(i915, c->object[0].handle, 0, &bbe, sizeof(bbe));
c->execbuf.buffer_count = 1;
- gem_execbuf(fd, &c->execbuf);
+ gem_execbuf(i915, &c->execbuf);
c->object[0].flags |= EXEC_OBJECT_WRITE;
- c->object[1].handle = gem_create(fd, sz);
+ c->object[1].handle = gem_create(i915, sz);
c->object[1].relocs_ptr = to_user_pointer(c->reloc);
c->object[1].relocation_count = 1024 * i;
- batch = gem_mmap__cpu(fd, c->object[1].handle, 0, sz,
+ batch = gem_mmap__cpu(i915, c->object[1].handle, 0, sz,
PROT_WRITE | PROT_READ);
- gem_set_domain(fd, c->object[1].handle,
+ gem_set_domain(i915, c->object[1].handle,
I915_GEM_DOMAIN_CPU, I915_GEM_DOMAIN_CPU);
memset(c->reloc, 0, sizeof(c->reloc));
@@ -707,8 +735,8 @@ switch_ring(int fd, unsigned ring, int num_children, int timeout)
igt_assert((b - batch)*sizeof(uint32_t) < sz);
munmap(batch, sz);
c->execbuf.buffer_count = 2;
- gem_execbuf(fd, &c->execbuf);
- gem_sync(fd, c->object[1].handle);
+ gem_execbuf(i915, &c->execbuf);
+ gem_sync(i915, c->object[1].handle);
}
cycles = 0;
@@ -717,12 +745,12 @@ switch_ring(int fd, unsigned ring, int num_children, int timeout)
do {
double this;
- gem_execbuf(fd, &contexts[1].execbuf);
- gem_execbuf(fd, &contexts[0].execbuf);
+ gem_execbuf(i915, &contexts[1].execbuf);
+ gem_execbuf(i915, &contexts[0].execbuf);
this = gettime();
- gem_sync(fd, contexts[1].object[1].handle);
- gem_sync(fd, contexts[0].object[1].handle);
+ gem_sync(i915, contexts[1].object[1].handle);
+ gem_sync(i915, contexts[0].object[1].handle);
baseline += gettime() - this;
} while (++cycles & 1023);
}
@@ -734,14 +762,14 @@ switch_ring(int fd, unsigned ring, int num_children, int timeout)
do {
double this;
- gem_execbuf(fd, &contexts[1].execbuf);
- gem_execbuf(fd, &contexts[0].execbuf);
+ gem_execbuf(i915, &contexts[1].execbuf);
+ gem_execbuf(i915, &contexts[0].execbuf);
this = gettime();
- gem_sync(fd, contexts[0].object[1].handle);
+ gem_sync(i915, contexts[0].object[1].handle);
elapsed += gettime() - this;
- gem_sync(fd, contexts[1].object[1].handle);
+ gem_sync(i915, contexts[1].object[1].handle);
} while (++cycles & 1023);
}
elapsed /= cycles;
@@ -752,9 +780,9 @@ switch_ring(int fd, unsigned ring, int num_children, int timeout)
cycles, elapsed*1e6, baseline*1e6);
for (int i = 0; i < ARRAY_SIZE(contexts); i++) {
- gem_close(fd, contexts[i].object[1].handle);
- gem_close(fd, contexts[i].object[0].handle);
- gem_context_destroy(fd, contexts[i].execbuf.rsvd1);
+ gem_close(i915, contexts[i].object[1].handle);
+ gem_close(i915, contexts[i].object[0].handle);
+ gem_context_destroy(i915, contexts[i].execbuf.rsvd1);
}
}
igt_waitchildren_timeout(timeout+10, NULL);
@@ -803,7 +831,8 @@ static void *waiter(void *arg)
}
static void
-__store_many(int fd, unsigned ring, int timeout, unsigned long *cycles)
+__store_many(int fd, const struct intel_execution_engine2 *e,
+ int timeout, unsigned long *cycles)
{
const int gen = intel_gen(intel_get_drm_devid(fd));
const uint32_t bbe = MI_BATCH_BUFFER_END;
@@ -817,7 +846,7 @@ __store_many(int fd, unsigned ring, int timeout, unsigned long *cycles)
memset(&execbuf, 0, sizeof(execbuf));
execbuf.buffers_ptr = to_user_pointer(object);
- execbuf.flags = ring;
+ execbuf.flags = e->flags;
execbuf.flags |= LOCAL_I915_EXEC_NO_RELOC;
execbuf.flags |= LOCAL_I915_EXEC_HANDLE_LUT;
if (gen < 6)
@@ -931,8 +960,9 @@ __store_many(int fd, unsigned ring, int timeout, unsigned long *cycles)
}
static void
-store_many(int fd, unsigned ring, int timeout)
+store_many(int fd, const struct intel_execution_engine2 *e, int timeout)
{
+ const struct intel_execution_engine2 *e2;
unsigned long *shared;
const char *names[16];
int n = 0;
@@ -942,24 +972,23 @@ store_many(int fd, unsigned ring, int timeout)
intel_detect_and_clear_missed_interrupts(fd);
- if (ring == ALL_ENGINES) {
- for_each_physical_engine(e, fd) {
- if (!gem_can_store_dword(fd, eb_ring(e)))
+ if (!e) {
+ __for_each_physical_engine(fd, e2) {
+ if (!gem_class_can_store_dword(fd, e2->class))
continue;
igt_fork(child, 1)
__store_many(fd,
- eb_ring(e),
+ e2,
timeout,
&shared[n]);
- names[n++] = e->name;
+ names[n++] = e2->name;
}
igt_waitchildren();
} else {
- gem_require_ring(fd, ring);
- igt_require(gem_can_store_dword(fd, ring));
- __store_many(fd, ring, timeout, &shared[n]);
+ igt_require(gem_class_can_store_dword(fd, e->class));
+ __store_many(fd, e, timeout, &shared[n]);
names[n++] = NULL;
}
@@ -1025,15 +1054,16 @@ sync_all(int fd, int num_children, int timeout)
static void
store_all(int fd, int num_children, int timeout)
{
+ const struct intel_execution_engine2 *e;
const int gen = intel_gen(intel_get_drm_devid(fd));
unsigned engines[16];
int num_engines = 0;
- for_each_physical_engine(e, fd) {
- if (!gem_can_store_dword(fd, eb_ring(e)))
+ __for_each_physical_engine(fd, e) {
+ if (!gem_class_can_store_dword(fd, e->class))
continue;
- engines[num_engines++] = eb_ring(e);
+ engines[num_engines++] = e->flags;
if (num_engines == ARRAY_SIZE(engines))
break;
}
@@ -1041,6 +1071,7 @@ store_all(int fd, int num_children, int timeout)
intel_detect_and_clear_missed_interrupts(fd);
igt_fork(child, num_children) {
+ int i915;
const uint32_t bbe = MI_BATCH_BUFFER_END;
struct drm_i915_gem_exec_object2 object[2];
struct drm_i915_gem_relocation_entry reloc[1024];
@@ -1049,6 +1080,8 @@ store_all(int fd, int num_children, int timeout)
unsigned long cycles;
uint32_t *batch, *b;
+ i915 = gem_reopen_driver(fd);
+ gem_context_copy_engines(fd, 0, i915, 0);
memset(&execbuf, 0, sizeof(execbuf));
execbuf.buffers_ptr = to_user_pointer(object);
execbuf.flags |= LOCAL_I915_EXEC_NO_RELOC;
@@ -1057,20 +1090,20 @@ store_all(int fd, int num_children, int timeout)
execbuf.flags |= I915_EXEC_SECURE;
memset(object, 0, sizeof(object));
- object[0].handle = gem_create(fd, 4096);
- gem_write(fd, object[0].handle, 0, &bbe, sizeof(bbe));
+ object[0].handle = gem_create(i915, 4096);
+ gem_write(i915, object[0].handle, 0, &bbe, sizeof(bbe));
execbuf.buffer_count = 1;
- gem_execbuf(fd, &execbuf);
+ gem_execbuf(i915, &execbuf);
object[0].flags |= EXEC_OBJECT_WRITE;
- object[1].handle = gem_create(fd, 1024*16 + 4096);
+ object[1].handle = gem_create(i915, 1024*16 + 4096);
object[1].relocs_ptr = to_user_pointer(reloc);
object[1].relocation_count = 1024;
- batch = gem_mmap__cpu(fd, object[1].handle, 0, 16*1024 + 4096,
+ batch = gem_mmap__cpu(i915, object[1].handle, 0, 16*1024 + 4096,
PROT_WRITE | PROT_READ);
- gem_set_domain(fd, object[1].handle,
+ gem_set_domain(i915, object[1].handle,
I915_GEM_DOMAIN_CPU, I915_GEM_DOMAIN_CPU);
memset(reloc, 0, sizeof(reloc));
@@ -1103,8 +1136,8 @@ store_all(int fd, int num_children, int timeout)
igt_assert((b - batch)*sizeof(uint32_t) < 20*1024);
munmap(batch, 16*1024+4096);
execbuf.buffer_count = 2;
- gem_execbuf(fd, &execbuf);
- gem_sync(fd, object[1].handle);
+ gem_execbuf(i915, &execbuf);
+ gem_sync(i915, object[1].handle);
start = gettime();
cycles = 0;
@@ -1114,101 +1147,104 @@ store_all(int fd, int num_children, int timeout)
for (int n = 0; n < num_engines; n++) {
execbuf.flags &= ~ENGINE_MASK;
execbuf.flags |= engines[n];
- gem_execbuf(fd, &execbuf);
+ gem_execbuf(i915, &execbuf);
}
- gem_sync(fd, object[1].handle);
+ gem_sync(i915, object[1].handle);
} while (++cycles & 1023);
} while ((elapsed = gettime() - start) < timeout);
igt_info("Completed %ld cycles: %.3f us\n",
cycles, elapsed*1e6/cycles);
- gem_close(fd, object[1].handle);
- gem_close(fd, object[0].handle);
+ gem_close(i915, object[1].handle);
+ gem_close(i915, object[0].handle);
}
igt_waitchildren_timeout(timeout+10, NULL);
igt_assert_eq(intel_detect_and_clear_missed_interrupts(fd), 0);
}
static void
-preempt(int fd, unsigned ring, int num_children, int timeout)
+preempt(int fd, const struct intel_execution_engine2 *e,
+ int num_children, int timeout)
{
+ const struct intel_execution_engine2 *e2;
unsigned engines[16];
const char *names[16];
int num_engines = 0;
- uint32_t ctx[2];
- if (ring == ALL_ENGINES) {
- for_each_physical_engine(e, fd) {
- names[num_engines] = e->name;
- engines[num_engines++] = eb_ring(e);
+ if (!e) {
+ __for_each_physical_engine(fd, e2) {
+ names[num_engines] = e2->name;
+ engines[num_engines++] = e2->flags;
if (num_engines == ARRAY_SIZE(engines))
break;
}
num_children *= num_engines;
} else {
- gem_require_ring(fd, ring);
names[num_engines] = NULL;
- engines[num_engines++] = ring;
+ engines[num_engines++] = e->flags;
}
- ctx[0] = gem_context_create(fd);
- gem_context_set_priority(fd, ctx[0], MIN_PRIO);
-
- ctx[1] = gem_context_create(fd);
- gem_context_set_priority(fd, ctx[1], MAX_PRIO);
-
intel_detect_and_clear_missed_interrupts(fd);
igt_fork(child, num_children) {
+ int i915;
+ uint32_t ctx[2];
const uint32_t bbe = MI_BATCH_BUFFER_END;
struct drm_i915_gem_exec_object2 object;
struct drm_i915_gem_execbuffer2 execbuf;
double start, elapsed;
unsigned long cycles;
+ i915 = gem_reopen_driver(fd);
+ ctx[1] = gem_context_clone_with_engines(fd, 0);
+ gem_context_set_priority(fd, ctx[1], MAX_PRIO);
+ gem_context_copy_engines(fd, 0, i915, ctx[1]);
memset(&object, 0, sizeof(object));
- object.handle = gem_create(fd, 4096);
- gem_write(fd, object.handle, 0, &bbe, sizeof(bbe));
+ object.handle = gem_create(i915, 4096);
+ gem_write(i915, object.handle, 0, &bbe, sizeof(bbe));
memset(&execbuf, 0, sizeof(execbuf));
execbuf.buffers_ptr = to_user_pointer(&object);
execbuf.buffer_count = 1;
execbuf.flags = engines[child % num_engines];
execbuf.rsvd1 = ctx[1];
- gem_execbuf(fd, &execbuf);
- gem_sync(fd, object.handle);
-
+ gem_execbuf(i915, &execbuf);
+ gem_sync(i915, object.handle);
+ ctx[0] = gem_context_clone_with_engines(fd, 0);
+ gem_context_set_priority(fd, ctx[0], MIN_PRIO);
+ gem_context_copy_engines(fd, 0, i915, ctx[0]);
start = gettime();
cycles = 0;
do {
igt_spin_t *spin =
- __igt_spin_new(fd,
+ __igt_spin_new(i915,
.ctx = ctx[0],
.engine = execbuf.flags);
do {
- gem_execbuf(fd, &execbuf);
- gem_sync(fd, object.handle);
+ gem_execbuf(i915, &execbuf);
+ gem_sync(i915, object.handle);
} while (++cycles & 1023);
- igt_spin_free(fd, spin);
+ igt_spin_free(i915, spin);
} while ((elapsed = gettime() - start) < timeout);
igt_info("%s%sompleted %ld cycles: %.3f us\n",
names[child % num_engines] ?: "",
names[child % num_engines] ? " c" : "C",
cycles, elapsed*1e6/cycles);
- gem_close(fd, object.handle);
+ gem_context_destroy(i915, ctx[1]);
+ gem_context_destroy(i915, ctx[0]);
+ gem_close(i915, object.handle);
}
igt_waitchildren_timeout(timeout+10, NULL);
igt_assert_eq(intel_detect_and_clear_missed_interrupts(fd), 0);
- gem_context_destroy(fd, ctx[1]);
- gem_context_destroy(fd, ctx[0]);
}
igt_main
{
+ const struct intel_execution_engine2 *e2;
const struct intel_execution_engine *e;
const int ncpus = sysconf(_SC_NPROCESSORS_ONLN);
int fd = -1;
@@ -1222,55 +1258,138 @@ igt_main
igt_fork_hang_detector(fd);
}
+ /* Legacy testing must be first. */
for (e = intel_execution_engines; e->name; e++) {
- igt_subtest_f("%s", e->name)
- sync_ring(fd, eb_ring(e), 1, 20);
- igt_subtest_f("idle-%s", e->name)
- idle_ring(fd, eb_ring(e), 20);
- igt_subtest_f("active-%s", e->name)
- active_ring(fd, eb_ring(e), 20);
- igt_subtest_f("wakeup-%s", e->name)
- wakeup_ring(fd, eb_ring(e), 20, 1);
- igt_subtest_f("active-wakeup-%s", e->name)
- active_wakeup_ring(fd, eb_ring(e), 20, 1);
- igt_subtest_f("double-wakeup-%s", e->name)
- wakeup_ring(fd, eb_ring(e), 20, 2);
- igt_subtest_f("store-%s", e->name)
- store_ring(fd, eb_ring(e), 1, 20);
- igt_subtest_f("switch-%s", e->name)
- switch_ring(fd, eb_ring(e), 1, 20);
- igt_subtest_f("forked-switch-%s", e->name)
- switch_ring(fd, eb_ring(e), ncpus, 20);
- igt_subtest_f("many-%s", e->name)
- store_many(fd, eb_ring(e), 20);
- igt_subtest_f("forked-%s", e->name)
- sync_ring(fd, eb_ring(e), ncpus, 20);
- igt_subtest_f("forked-store-%s", e->name)
- store_ring(fd, eb_ring(e), ncpus, 20);
+ struct intel_execution_engine2 e2__;
+
+ e2__ = gem_eb_flags_to_engine(eb_ring(e));
+ if (e2__.flags == -1)
+ continue;
+ e2 = &e2__;
+
+ igt_subtest_f("legacy_%s", e->name)
+ sync_ring(fd, e2, 1, 20);
+
+ }
+
+ igt_subtest_with_dynamic("basic_sync_ring") {
+ __for_each_physical_engine(fd, e2) {
+ /* Requires master for STORE_DWORD on gen4/5 */
+ igt_dynamic_f("%s", e2->name)
+ sync_ring(fd, e2, 1, 20);
+ }
+ }
+
+ igt_subtest_with_dynamic("idle") {
+ __for_each_physical_engine(fd, e2) {
+ /* Requires master for STORE_DWORD on gen4/5 */
+ igt_dynamic_f("%s", e2->name)
+ idle_ring(fd, e2, 20);
+ }
+ }
+
+ igt_subtest_with_dynamic("active") {
+ __for_each_physical_engine(fd, e2) {
+ /* Requires master for STORE_DWORD on gen4/5 */
+ igt_dynamic_f("%s", e2->name)
+ active_ring(fd, e2, 20);
+ }
+ }
+
+ igt_subtest_with_dynamic("wakeup") {
+ __for_each_physical_engine(fd, e2) {
+ /* Requires master for STORE_DWORD on gen4/5 */
+ igt_dynamic_f("%s", e2->name)
+ wakeup_ring(fd, e2, 150, 1);
+ }
+ }
+
+ igt_subtest_with_dynamic("active-wakeup") {
+ __for_each_physical_engine(fd, e2) {
+ /* Requires master for STORE_DWORD on gen4/5 */
+ igt_dynamic_f("%s", e2->name)
+ active_wakeup_ring(fd, e2, 1, 20);
+ }
+ }
+
+ igt_subtest_with_dynamic("double-wakeup") {
+ __for_each_physical_engine(fd, e2) {
+ /* Requires master for STORE_DWORD on gen4/5 */
+ igt_dynamic_f("%s", e2->name)
+ wakeup_ring(fd, e2, 20, 2);
+ }
+ }
+
+ igt_subtest_with_dynamic("store") {
+ __for_each_physical_engine(fd, e2) {
+ /* Requires master for STORE_DWORD on gen4/5 */
+ igt_dynamic_f("%s", e2->name)
+ store_ring(fd, e2, 1, 20);
+ }
+ }
+
+ igt_subtest_with_dynamic("switch") {
+ __for_each_physical_engine(fd, e2) {
+ /* Requires master for STORE_DWORD on gen4/5 */
+ igt_dynamic_f("%s", e2->name)
+ switch_ring(fd, e2, 1, 20);
+ }
+ }
+
+ igt_subtest_with_dynamic("forked-switch") {
+ __for_each_physical_engine(fd, e2) {
+ /* Requires master for STORE_DWORD on gen4/5 */
+ igt_dynamic_f("%s", e2->name)
+ switch_ring(fd, e2, ncpus, 20);
+ }
+ }
+
+ igt_subtest_with_dynamic("many") {
+ __for_each_physical_engine(fd, e2) {
+ /* Requires master for STORE_DWORD on gen4/5 */
+ igt_dynamic_f("%s", e2->name)
+ store_many(fd, e2, 20);
+ }
+ }
+
+ igt_subtest_with_dynamic("forked") {
+ __for_each_physical_engine(fd, e2) {
+ /* Requires master for STORE_DWORD on gen4/5 */
+ igt_dynamic_f("%s", e2->name)
+ sync_ring(fd, e2, ncpus, 20);
+ }
+ }
+
+ igt_subtest_with_dynamic("forked-store") {
+ __for_each_physical_engine(fd, e2) {
+ /* Requires master for STORE_DWORD on gen4/5 */
+ igt_dynamic_f("%s", e2->name)
+ store_ring(fd, e2, ncpus, 20);
+ }
}
igt_subtest("basic-each")
- sync_ring(fd, ALL_ENGINES, 1, 2);
+ sync_ring(fd, NULL, 1, 2);
igt_subtest("basic-store-each")
- store_ring(fd, ALL_ENGINES, 1, 2);
+ store_ring(fd, NULL, 1, 2);
igt_subtest("basic-many-each")
- store_many(fd, ALL_ENGINES, 2);
+ store_many(fd, NULL, 2);
igt_subtest("switch-each")
- switch_ring(fd, ALL_ENGINES, 1, 20);
+ switch_ring(fd, NULL, 1, 20);
igt_subtest("forked-switch-each")
- switch_ring(fd, ALL_ENGINES, ncpus, 20);
+ switch_ring(fd, NULL, ncpus, 20);
igt_subtest("forked-each")
- sync_ring(fd, ALL_ENGINES, ncpus, 20);
+ sync_ring(fd, NULL, ncpus, 20);
igt_subtest("forked-store-each")
- store_ring(fd, ALL_ENGINES, ncpus, 20);
+ store_ring(fd, NULL, ncpus, 20);
igt_subtest("active-each")
- active_ring(fd, ALL_ENGINES, 20);
+ active_ring(fd, NULL, 20);
igt_subtest("wakeup-each")
- wakeup_ring(fd, ALL_ENGINES, 20, 1);
+ wakeup_ring(fd, NULL, 20, 1);
igt_subtest("active-wakeup-each")
- active_wakeup_ring(fd, ALL_ENGINES, 20, 1);
+ active_wakeup_ring(fd, NULL, 20, 1);
igt_subtest("double-wakeup-each")
- wakeup_ring(fd, ALL_ENGINES, 20, 2);
+ wakeup_ring(fd, NULL, 20, 2);
igt_subtest("basic-all")
sync_all(fd, 1, 2);
@@ -1294,14 +1413,15 @@ igt_main
}
igt_subtest("preempt-all")
- preempt(fd, ALL_ENGINES, 1, 20);
-
- for (e = intel_execution_engines; e->name; e++) {
- igt_subtest_f("preempt-%s", e->name)
- preempt(fd, eb_ring(e), ncpus, 20);
+ preempt(fd, NULL, 1, 20);
+ igt_subtest_with_dynamic("preempt") {
+ __for_each_physical_engine(fd, e2) {
+ /* Requires master for STORE_DWORD on gen4/5 */
+ igt_dynamic_f("%s", e2->name)
+ preempt(fd, e2, ncpus, 20);
+ }
}
}
-
igt_fixture {
igt_stop_hang_detector();
close(fd);
--
2.25.1
_______________________________________________
igt-dev mailing list
igt-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/igt-dev
^ permalink raw reply related [flat|nested] 8+ messages in thread
* Re: [igt-dev] [PATCH] [PATCH i-g-t][V2]tests/i915/gem_sync.c :Added __for_each_physical_engine to utilize all available
2020-04-06 9:12 Arjun Melkaveri
@ 2020-04-06 9:19 ` Melkaveri, Arjun
0 siblings, 0 replies; 8+ messages in thread
From: Melkaveri, Arjun @ 2020-04-06 9:19 UTC (permalink / raw)
To: igt-dev@lists.freedesktop.org
Changes and version is continuing of review from
https://patchwork.freedesktop.org/patch/355243/?series=73955&rev=1
Thanks
Arjun M
-----Original Message-----
From: Melkaveri, Arjun <arjun.melkaveri@intel.com>
Sent: Monday, April 6, 2020 2:43 PM
To: Melkaveri, Arjun <arjun.melkaveri@intel.com>; igt-dev@lists.freedesktop.org
Subject: [PATCH] [PATCH i-g-t][V2]tests/i915/gem_sync.c :Added __for_each_physical_engine to utilize all available
Replaced the legacy for_each_engine* defines with the ones implemented in the gem_engine_topology library.
Used gem_context_clone_with_engines
to make sure that engine index was potentially created based on a default context with engine map configured.
Added gem_reopen_driver and gem_context_copy_engines to transfer the engine map from parent fd default context.
V2:
Added Legacy engine coverage for sync_ring and sync_all.
Cc: Dec Katarzyna <katarzyna.dec@intel.com>
Cc: Ursulin Tvrtko <tvrtko.ursulin@intel.com>
Signed-off-by: sai gowtham <sai.gowtham.ch@intel.com>
Signed-off-by: Arjun Melkaveri <arjun.melkaveri@intel.com>
---
tests/i915/gem_sync.c | 566 +++++++++++++++++++++++++-----------------
1 file changed, 343 insertions(+), 223 deletions(-)
diff --git a/tests/i915/gem_sync.c b/tests/i915/gem_sync.c index 2ef55ecc..8efa0668 100644
--- a/tests/i915/gem_sync.c
+++ b/tests/i915/gem_sync.c
@@ -79,52 +79,56 @@ out:
}
static void
-sync_ring(int fd, unsigned ring, int num_children, int timeout)
+sync_ring(int fd, const struct intel_execution_engine2 *e,
+ int num_children, int timeout)
{
+ const struct intel_execution_engine2 *e2;
unsigned engines[16];
const char *names[16];
int num_engines = 0;
- if (ring == ALL_ENGINES) {
- for_each_physical_engine(e, fd) {
- names[num_engines] = e->name;
- engines[num_engines++] = eb_ring(e);
+ if (!e) {
+ __for_each_physical_engine(fd, e2) {
+ names[num_engines] = e2->name;
+ engines[num_engines++] = e2->flags;
if (num_engines == ARRAY_SIZE(engines))
break;
}
num_children *= num_engines;
} else {
- gem_require_ring(fd, ring);
names[num_engines] = NULL;
- engines[num_engines++] = ring;
+ engines[num_engines++] = e->flags;
}
intel_detect_and_clear_missed_interrupts(fd);
igt_fork(child, num_children) {
+ int i915;
const uint32_t bbe = MI_BATCH_BUFFER_END;
struct drm_i915_gem_exec_object2 object;
struct drm_i915_gem_execbuffer2 execbuf;
double start, elapsed;
unsigned long cycles;
+ i915 = gem_reopen_driver(fd);
+ gem_context_copy_engines(fd, 0, i915, 0);
memset(&object, 0, sizeof(object));
- object.handle = gem_create(fd, 4096);
- gem_write(fd, object.handle, 0, &bbe, sizeof(bbe));
+ object.handle = gem_create(i915, 4096);
+ gem_write(i915, object.handle, 0, &bbe, sizeof(bbe));
memset(&execbuf, 0, sizeof(execbuf));
execbuf.buffers_ptr = to_user_pointer(&object);
execbuf.buffer_count = 1;
execbuf.flags = engines[child % num_engines];
- gem_execbuf(fd, &execbuf);
- gem_sync(fd, object.handle);
+ gem_execbuf(i915, &execbuf);
+ gem_sync(i915, object.handle);
start = gettime();
cycles = 0;
do {
do {
- gem_execbuf(fd, &execbuf);
- gem_sync(fd, object.handle);
+ gem_execbuf(i915, &execbuf);
+ gem_sync(i915, object.handle);
} while (++cycles & 1023);
} while ((elapsed = gettime() - start) < timeout);
igt_info("%s%sompleted %ld cycles: %.3f us\n", @@ -132,14 +136,14 @@ sync_ring(int fd, unsigned ring, int num_children, int timeout)
names[child % num_engines] ? " c" : "C",
cycles, elapsed*1e6/cycles);
- gem_close(fd, object.handle);
+ gem_close(i915, object.handle);
}
igt_waitchildren_timeout(timeout+10, NULL);
igt_assert_eq(intel_detect_and_clear_missed_interrupts(fd), 0); }
static void
-idle_ring(int fd, unsigned ring, int timeout)
+idle_ring(int fd, const struct intel_execution_engine2 *e, int timeout)
{
const uint32_t bbe = MI_BATCH_BUFFER_END;
struct drm_i915_gem_exec_object2 object; @@ -147,7 +151,6 @@ idle_ring(int fd, unsigned ring, int timeout)
double start, elapsed;
unsigned long cycles;
- gem_require_ring(fd, ring);
memset(&object, 0, sizeof(object));
object.handle = gem_create(fd, 4096);
@@ -156,7 +159,7 @@ idle_ring(int fd, unsigned ring, int timeout)
memset(&execbuf, 0, sizeof(execbuf));
execbuf.buffers_ptr = to_user_pointer(&object);
execbuf.buffer_count = 1;
- execbuf.flags = ring;
+ execbuf.flags = e->flags;
gem_execbuf(fd, &execbuf);
gem_sync(fd, object.handle);
@@ -178,32 +181,34 @@ idle_ring(int fd, unsigned ring, int timeout) }
static void
-wakeup_ring(int fd, unsigned ring, int timeout, int wlen)
+wakeup_ring(int fd, const struct intel_execution_engine2 *e,
+ int timeout, int wlen)
{
+ const struct intel_execution_engine2 *e2;
unsigned engines[16];
const char *names[16];
int num_engines = 0;
- if (ring == ALL_ENGINES) {
- for_each_physical_engine(e, fd) {
- if (!gem_can_store_dword(fd, eb_ring(e)))
+ if (!e) {
+ __for_each_physical_engine(fd, e2) {
+ if (!gem_class_can_store_dword(fd, e2->class))
continue;
- names[num_engines] = e->name;
- engines[num_engines++] = eb_ring(e);
+ names[num_engines] = e2->name;
+ engines[num_engines++] = e2->flags;
if (num_engines == ARRAY_SIZE(engines))
break;
}
igt_require(num_engines);
} else {
- gem_require_ring(fd, ring);
- igt_require(gem_can_store_dword(fd, ring));
+ igt_require(gem_class_can_store_dword(fd, e->class));
names[num_engines] = NULL;
- engines[num_engines++] = ring;
+ engines[num_engines++] = e->flags;
}
intel_detect_and_clear_missed_interrupts(fd);
igt_fork(child, num_engines) {
+ int i915;
const uint32_t bbe = MI_BATCH_BUFFER_END;
struct drm_i915_gem_exec_object2 object;
struct drm_i915_gem_execbuffer2 execbuf; @@ -211,9 +216,11 @@ wakeup_ring(int fd, unsigned ring, int timeout, int wlen)
unsigned long cycles;
igt_spin_t *spin;
+ i915 = gem_reopen_driver(fd);
+ gem_context_copy_engines(fd, 0, i915, 0);
memset(&object, 0, sizeof(object));
- object.handle = gem_create(fd, 4096);
- gem_write(fd, object.handle, 0, &bbe, sizeof(bbe));
+ object.handle = gem_create(i915, 4096);
+ gem_write(i915, object.handle, 0, &bbe, sizeof(bbe));
memset(&execbuf, 0, sizeof(execbuf));
execbuf.buffers_ptr = to_user_pointer(&object); @@ -226,10 +233,10 @@ wakeup_ring(int fd, unsigned ring, int timeout, int wlen)
IGT_SPIN_FAST));
igt_assert(igt_spin_has_poll(spin));
- gem_execbuf(fd, &execbuf);
+ gem_execbuf(i915, &execbuf);
igt_spin_end(spin);
- gem_sync(fd, object.handle);
+ gem_sync(i915, object.handle);
for (int warmup = 0; warmup <= 1; warmup++) {
end = gettime() + timeout/10.;
@@ -238,12 +245,12 @@ wakeup_ring(int fd, unsigned ring, int timeout, int wlen)
do {
igt_spin_reset(spin);
- gem_execbuf(fd, &spin->execbuf);
+ gem_execbuf(i915, &spin->execbuf);
igt_spin_busywait_until_started(spin);
this = gettime();
igt_spin_end(spin);
- gem_sync(fd, spin->handle);
+ gem_sync(i915, spin->handle);
now = gettime();
elapsed += now - this;
@@ -262,15 +269,15 @@ wakeup_ring(int fd, unsigned ring, int timeout, int wlen)
do {
igt_spin_reset(spin);
- gem_execbuf(fd, &spin->execbuf);
+ gem_execbuf(i915, &spin->execbuf);
igt_spin_busywait_until_started(spin);
for (int n = 0; n < wlen; n++)
- gem_execbuf(fd, &execbuf);
+ gem_execbuf(i915, &execbuf);
this = gettime();
igt_spin_end(spin);
- gem_sync(fd, object.handle);
+ gem_sync(i915, object.handle);
now = gettime();
elapsed += now - this;
@@ -283,49 +290,53 @@ wakeup_ring(int fd, unsigned ring, int timeout, int wlen)
names[child % num_engines] ? " c" : "C",
cycles, 1e6*baseline, elapsed*1e6/cycles);
- igt_spin_free(fd, spin);
- gem_close(fd, object.handle);
+ igt_spin_free(i915, spin);
+ gem_close(i915, object.handle);
}
igt_waitchildren_timeout(2*timeout, NULL);
igt_assert_eq(intel_detect_and_clear_missed_interrupts(fd), 0); }
-static void active_ring(int fd, unsigned ring, int timeout)
+static void active_ring(int fd, const struct intel_execution_engine2 *e,
+ int timeout)
{
+ const struct intel_execution_engine2 *e2;
unsigned engines[16];
const char *names[16];
int num_engines = 0;
- if (ring == ALL_ENGINES) {
- for_each_physical_engine(e, fd) {
- if (!gem_can_store_dword(fd, eb_ring(e)))
+ if (!e) {
+ __for_each_physical_engine(fd, e2) {
+ if (!gem_class_can_store_dword(fd, e2->class))
continue;
- names[num_engines] = e->name;
- engines[num_engines++] = eb_ring(e);
+ names[num_engines] = e2->name;
+ engines[num_engines++] = e2->flags;
if (num_engines == ARRAY_SIZE(engines))
break;
}
igt_require(num_engines);
} else {
- gem_require_ring(fd, ring);
- igt_require(gem_can_store_dword(fd, ring));
+ igt_require(gem_class_can_store_dword(fd, e->class));
names[num_engines] = NULL;
- engines[num_engines++] = ring;
+ engines[num_engines++] = e->flags;
}
intel_detect_and_clear_missed_interrupts(fd);
igt_fork(child, num_engines) {
+ int i915;
double start, end, elapsed;
unsigned long cycles;
igt_spin_t *spin[2];
- spin[0] = __igt_spin_new(fd,
- .engine = ring,
+ i915 = gem_reopen_driver(fd);
+ gem_context_copy_engines(fd, 0, i915, 0);
+ spin[0] = __igt_spin_new(i915,
+ .engine = e->flags,
.flags = IGT_SPIN_FAST);
- spin[1] = __igt_spin_new(fd,
- .engine = ring,
+ spin[1] = __igt_spin_new(i915,
+ .engine = e->flags,
.flags = IGT_SPIN_FAST);
start = gettime();
@@ -336,16 +347,16 @@ static void active_ring(int fd, unsigned ring, int timeout)
igt_spin_t *s = spin[loop & 1];
igt_spin_end(s);
- gem_sync(fd, s->handle);
+ gem_sync(i915, s->handle);
igt_spin_reset(s);
- gem_execbuf(fd, &s->execbuf);
+ gem_execbuf(i915, &s->execbuf);
}
cycles += 1024;
} while ((elapsed = gettime()) < end);
- igt_spin_free(fd, spin[1]);
- igt_spin_free(fd, spin[0]);
+ igt_spin_free(i915, spin[1]);
+ igt_spin_free(i915, spin[0]);
igt_info("%s%sompleted %ld cycles: %.3f us\n",
names[child % num_engines] ?: "",
@@ -357,32 +368,34 @@ static void active_ring(int fd, unsigned ring, int timeout) }
static void
-active_wakeup_ring(int fd, unsigned ring, int timeout, int wlen)
+active_wakeup_ring(int fd, const struct intel_execution_engine2 *e,
+ int timeout, int wlen)
{
+ const struct intel_execution_engine2 *e2;
unsigned engines[16];
const char *names[16];
int num_engines = 0;
- if (ring == ALL_ENGINES) {
- for_each_physical_engine(e, fd) {
- if (!gem_can_store_dword(fd, eb_ring(e)))
+ if (!e) {
+ __for_each_physical_engine(fd, e2) {
+ if (!gem_class_can_store_dword(fd, e2->class))
continue;
- names[num_engines] = e->name;
- engines[num_engines++] = eb_ring(e);
+ names[num_engines] = e2->name;
+ engines[num_engines++] = e2->flags;
if (num_engines == ARRAY_SIZE(engines))
break;
}
igt_require(num_engines);
} else {
- gem_require_ring(fd, ring);
- igt_require(gem_can_store_dword(fd, ring));
+ igt_require(gem_class_can_store_dword(fd, e->class));
names[num_engines] = NULL;
- engines[num_engines++] = ring;
+ engines[num_engines++] = e->flags;
}
intel_detect_and_clear_missed_interrupts(fd);
igt_fork(child, num_engines) {
+ int i915;
const uint32_t bbe = MI_BATCH_BUFFER_END;
struct drm_i915_gem_exec_object2 object;
struct drm_i915_gem_execbuffer2 execbuf; @@ -390,36 +403,38 @@ active_wakeup_ring(int fd, unsigned ring, int timeout, int wlen)
unsigned long cycles;
igt_spin_t *spin[2];
+ i915 = gem_reopen_driver(fd);
+ gem_context_copy_engines(fd, 0, i915, 0);
memset(&object, 0, sizeof(object));
- object.handle = gem_create(fd, 4096);
- gem_write(fd, object.handle, 0, &bbe, sizeof(bbe));
+ object.handle = gem_create(i915, 4096);
+ gem_write(i915, object.handle, 0, &bbe, sizeof(bbe));
memset(&execbuf, 0, sizeof(execbuf));
execbuf.buffers_ptr = to_user_pointer(&object);
execbuf.buffer_count = 1;
execbuf.flags = engines[child % num_engines];
- spin[0] = __igt_spin_new(fd,
+ spin[0] = __igt_spin_new(i915,
.engine = execbuf.flags,
.flags = (IGT_SPIN_POLL_RUN |
IGT_SPIN_FAST));
igt_assert(igt_spin_has_poll(spin[0]));
- spin[1] = __igt_spin_new(fd,
+ spin[1] = __igt_spin_new(i915,
.engine = execbuf.flags,
.flags = (IGT_SPIN_POLL_RUN |
IGT_SPIN_FAST));
- gem_execbuf(fd, &execbuf);
+ gem_execbuf(i915, &execbuf);
igt_spin_end(spin[1]);
igt_spin_end(spin[0]);
- gem_sync(fd, object.handle);
+ gem_sync(i915, object.handle);
for (int warmup = 0; warmup <= 1; warmup++) {
igt_spin_reset(spin[0]);
- gem_execbuf(fd, &spin[0]->execbuf);
+ gem_execbuf(i915, &spin[0]->execbuf);
end = gettime() + timeout/10.;
elapsed = 0;
@@ -429,11 +444,11 @@ active_wakeup_ring(int fd, unsigned ring, int timeout, int wlen)
igt_spin_reset(spin[1]);
- gem_execbuf(fd, &spin[1]->execbuf);
+ gem_execbuf(i915, &spin[1]->execbuf);
this = gettime();
igt_spin_end(spin[0]);
- gem_sync(fd, spin[0]->handle);
+ gem_sync(i915, spin[0]->handle);
now = gettime();
elapsed += now - this;
@@ -450,7 +465,7 @@ active_wakeup_ring(int fd, unsigned ring, int timeout, int wlen)
igt_spin_reset(spin[0]);
- gem_execbuf(fd, &spin[0]->execbuf);
+ gem_execbuf(i915, &spin[0]->execbuf);
end = gettime() + timeout;
elapsed = 0;
@@ -459,15 +474,15 @@ active_wakeup_ring(int fd, unsigned ring, int timeout, int wlen)
igt_spin_busywait_until_started(spin[0]);
for (int n = 0; n < wlen; n++)
- gem_execbuf(fd, &execbuf);
+ gem_execbuf(i915, &execbuf);
igt_spin_reset(spin[1]);
- gem_execbuf(fd, &spin[1]->execbuf);
+ gem_execbuf(i915, &spin[1]->execbuf);
this = gettime();
igt_spin_end(spin[0]);
- gem_sync(fd, object.handle);
+ gem_sync(i915, object.handle);
now = gettime();
elapsed += now - this;
@@ -482,43 +497,45 @@ active_wakeup_ring(int fd, unsigned ring, int timeout, int wlen)
names[child % num_engines] ? " c" : "C",
cycles, 1e6*baseline, elapsed*1e6/cycles);
- igt_spin_free(fd, spin[1]);
- igt_spin_free(fd, spin[0]);
- gem_close(fd, object.handle);
+ igt_spin_free(i915, spin[1]);
+ igt_spin_free(i915, spin[0]);
+ gem_close(i915, object.handle);
}
igt_waitchildren_timeout(2*timeout, NULL);
igt_assert_eq(intel_detect_and_clear_missed_interrupts(fd), 0); }
static void
-store_ring(int fd, unsigned ring, int num_children, int timeout)
+store_ring(int fd, const struct intel_execution_engine2 *e,
+ int num_children, int timeout)
{
+ const struct intel_execution_engine2 *e2;
const int gen = intel_gen(intel_get_drm_devid(fd));
unsigned engines[16];
const char *names[16];
int num_engines = 0;
- if (ring == ALL_ENGINES) {
- for_each_physical_engine(e, fd) {
- if (!gem_can_store_dword(fd, eb_ring(e)))
+ if (!e) {
+ __for_each_physical_engine(fd, e2) {
+ if (!gem_class_can_store_dword(fd, e2->class))
continue;
- names[num_engines] = e->name;
- engines[num_engines++] = eb_ring(e);
+ names[num_engines] = e2->name;
+ engines[num_engines++] = e2->flags;
if (num_engines == ARRAY_SIZE(engines))
break;
}
num_children *= num_engines;
} else {
- gem_require_ring(fd, ring);
- igt_require(gem_can_store_dword(fd, ring));
+ igt_require(gem_class_can_store_dword(fd, e->class));
names[num_engines] = NULL;
- engines[num_engines++] = ring;
+ engines[num_engines++] = e->flags;
}
intel_detect_and_clear_missed_interrupts(fd);
igt_fork(child, num_children) {
+ int i915;
const uint32_t bbe = MI_BATCH_BUFFER_END;
struct drm_i915_gem_exec_object2 object[2];
struct drm_i915_gem_relocation_entry reloc[1024]; @@ -527,6 +544,8 @@ store_ring(int fd, unsigned ring, int num_children, int timeout)
unsigned long cycles;
uint32_t *batch, *b;
+ i915 = gem_reopen_driver(fd);
+ gem_context_copy_engines(fd, 0, i915, 0);
memset(&execbuf, 0, sizeof(execbuf));
execbuf.buffers_ptr = to_user_pointer(object);
execbuf.flags = engines[child % num_engines]; @@ -536,20 +555,20 @@ store_ring(int fd, unsigned ring, int num_children, int timeout)
execbuf.flags |= I915_EXEC_SECURE;
memset(object, 0, sizeof(object));
- object[0].handle = gem_create(fd, 4096);
- gem_write(fd, object[0].handle, 0, &bbe, sizeof(bbe));
+ object[0].handle = gem_create(i915, 4096);
+ gem_write(i915, object[0].handle, 0, &bbe, sizeof(bbe));
execbuf.buffer_count = 1;
- gem_execbuf(fd, &execbuf);
+ gem_execbuf(i915, &execbuf);
object[0].flags |= EXEC_OBJECT_WRITE;
- object[1].handle = gem_create(fd, 20*1024);
+ object[1].handle = gem_create(i915, 20*1024);
object[1].relocs_ptr = to_user_pointer(reloc);
object[1].relocation_count = 1024;
- batch = gem_mmap__cpu(fd, object[1].handle, 0, 20*1024,
+ batch = gem_mmap__cpu(i915, object[1].handle, 0, 20*1024,
PROT_WRITE | PROT_READ);
- gem_set_domain(fd, object[1].handle,
+ gem_set_domain(i915, object[1].handle,
I915_GEM_DOMAIN_CPU, I915_GEM_DOMAIN_CPU);
memset(reloc, 0, sizeof(reloc));
@@ -582,15 +601,15 @@ store_ring(int fd, unsigned ring, int num_children, int timeout)
igt_assert((b - batch)*sizeof(uint32_t) < 20*1024);
munmap(batch, 20*1024);
execbuf.buffer_count = 2;
- gem_execbuf(fd, &execbuf);
- gem_sync(fd, object[1].handle);
+ gem_execbuf(i915, &execbuf);
+ gem_sync(i915, object[1].handle);
start = gettime();
cycles = 0;
do {
do {
- gem_execbuf(fd, &execbuf);
- gem_sync(fd, object[1].handle);
+ gem_execbuf(i915, &execbuf);
+ gem_sync(i915, object[1].handle);
} while (++cycles & 1023);
} while ((elapsed = gettime() - start) < timeout);
igt_info("%s%sompleted %ld cycles: %.3f us\n", @@ -598,16 +617,18 @@ store_ring(int fd, unsigned ring, int num_children, int timeout)
names[child % num_engines] ? " c" : "C",
cycles, elapsed*1e6/cycles);
- gem_close(fd, object[1].handle);
- gem_close(fd, object[0].handle);
+ gem_close(i915, object[1].handle);
+ gem_close(i915, object[0].handle);
}
igt_waitchildren_timeout(timeout+10, NULL);
igt_assert_eq(intel_detect_and_clear_missed_interrupts(fd), 0); }
static void
-switch_ring(int fd, unsigned ring, int num_children, int timeout)
+switch_ring(int fd, const struct intel_execution_engine2 *e,
+ int num_children, int timeout)
{
+ const struct intel_execution_engine2 *e2;
const int gen = intel_gen(intel_get_drm_devid(fd));
unsigned engines[16];
const char *names[16];
@@ -615,27 +636,28 @@ switch_ring(int fd, unsigned ring, int num_children, int timeout)
gem_require_contexts(fd);
- if (ring == ALL_ENGINES) {
- for_each_physical_engine(e, fd) {
- if (!gem_can_store_dword(fd, eb_ring(e)))
+ if (!e) {
+ __for_each_physical_engine(fd, e2) {
+ if (!gem_class_can_store_dword(fd, e2->class))
continue;
- names[num_engines] = e->name;
- engines[num_engines++] = eb_ring(e);
+ names[num_engines] = e2->name;
+ engines[num_engines++] = e2->flags;
if (num_engines == ARRAY_SIZE(engines))
break;
}
num_children *= num_engines;
} else {
- gem_require_ring(fd, ring);
- igt_require(gem_can_store_dword(fd, ring));
+ igt_require(gem_class_can_store_dword(fd, e->class));
names[num_engines] = NULL;
- engines[num_engines++] = ring;
+ engines[num_engines++] = e->flags;
}
intel_detect_and_clear_missed_interrupts(fd);
igt_fork(child, num_children) {
+ int i915;
+ uint32_t ctx;
struct context {
struct drm_i915_gem_exec_object2 object[2];
struct drm_i915_gem_relocation_entry reloc[1024]; @@ -643,7 +665,13 @@ switch_ring(int fd, unsigned ring, int num_children, int timeout)
} contexts[2];
double elapsed, baseline;
unsigned long cycles;
-
+ /*
+ * Ensure the gpu is idle by launching
+ * nop execbuf and stalling for it.
+ */
+ i915 = gem_reopen_driver(fd);
+ ctx = gem_context_create(fd);
+ gem_context_copy_engines(fd, 0, i915, ctx);
for (int i = 0; i < ARRAY_SIZE(contexts); i++) {
const uint32_t bbe = MI_BATCH_BUFFER_END;
const uint32_t sz = 32 << 10;
@@ -657,23 +685,23 @@ switch_ring(int fd, unsigned ring, int num_children, int timeout)
c->execbuf.flags |= LOCAL_I915_EXEC_HANDLE_LUT;
if (gen < 6)
c->execbuf.flags |= I915_EXEC_SECURE;
- c->execbuf.rsvd1 = gem_context_create(fd);
+ c->execbuf.rsvd1 = ctx;
memset(c->object, 0, sizeof(c->object));
- c->object[0].handle = gem_create(fd, 4096);
- gem_write(fd, c->object[0].handle, 0, &bbe, sizeof(bbe));
+ c->object[0].handle = gem_create(i915, 4096);
+ gem_write(i915, c->object[0].handle, 0, &bbe, sizeof(bbe));
c->execbuf.buffer_count = 1;
- gem_execbuf(fd, &c->execbuf);
+ gem_execbuf(i915, &c->execbuf);
c->object[0].flags |= EXEC_OBJECT_WRITE;
- c->object[1].handle = gem_create(fd, sz);
+ c->object[1].handle = gem_create(i915, sz);
c->object[1].relocs_ptr = to_user_pointer(c->reloc);
c->object[1].relocation_count = 1024 * i;
- batch = gem_mmap__cpu(fd, c->object[1].handle, 0, sz,
+ batch = gem_mmap__cpu(i915, c->object[1].handle, 0, sz,
PROT_WRITE | PROT_READ);
- gem_set_domain(fd, c->object[1].handle,
+ gem_set_domain(i915, c->object[1].handle,
I915_GEM_DOMAIN_CPU, I915_GEM_DOMAIN_CPU);
memset(c->reloc, 0, sizeof(c->reloc)); @@ -707,8 +735,8 @@ switch_ring(int fd, unsigned ring, int num_children, int timeout)
igt_assert((b - batch)*sizeof(uint32_t) < sz);
munmap(batch, sz);
c->execbuf.buffer_count = 2;
- gem_execbuf(fd, &c->execbuf);
- gem_sync(fd, c->object[1].handle);
+ gem_execbuf(i915, &c->execbuf);
+ gem_sync(i915, c->object[1].handle);
}
cycles = 0;
@@ -717,12 +745,12 @@ switch_ring(int fd, unsigned ring, int num_children, int timeout)
do {
double this;
- gem_execbuf(fd, &contexts[1].execbuf);
- gem_execbuf(fd, &contexts[0].execbuf);
+ gem_execbuf(i915, &contexts[1].execbuf);
+ gem_execbuf(i915, &contexts[0].execbuf);
this = gettime();
- gem_sync(fd, contexts[1].object[1].handle);
- gem_sync(fd, contexts[0].object[1].handle);
+ gem_sync(i915, contexts[1].object[1].handle);
+ gem_sync(i915, contexts[0].object[1].handle);
baseline += gettime() - this;
} while (++cycles & 1023);
}
@@ -734,14 +762,14 @@ switch_ring(int fd, unsigned ring, int num_children, int timeout)
do {
double this;
- gem_execbuf(fd, &contexts[1].execbuf);
- gem_execbuf(fd, &contexts[0].execbuf);
+ gem_execbuf(i915, &contexts[1].execbuf);
+ gem_execbuf(i915, &contexts[0].execbuf);
this = gettime();
- gem_sync(fd, contexts[0].object[1].handle);
+ gem_sync(i915, contexts[0].object[1].handle);
elapsed += gettime() - this;
- gem_sync(fd, contexts[1].object[1].handle);
+ gem_sync(i915, contexts[1].object[1].handle);
} while (++cycles & 1023);
}
elapsed /= cycles;
@@ -752,9 +780,9 @@ switch_ring(int fd, unsigned ring, int num_children, int timeout)
cycles, elapsed*1e6, baseline*1e6);
for (int i = 0; i < ARRAY_SIZE(contexts); i++) {
- gem_close(fd, contexts[i].object[1].handle);
- gem_close(fd, contexts[i].object[0].handle);
- gem_context_destroy(fd, contexts[i].execbuf.rsvd1);
+ gem_close(i915, contexts[i].object[1].handle);
+ gem_close(i915, contexts[i].object[0].handle);
+ gem_context_destroy(i915, contexts[i].execbuf.rsvd1);
}
}
igt_waitchildren_timeout(timeout+10, NULL); @@ -803,7 +831,8 @@ static void *waiter(void *arg) }
static void
-__store_many(int fd, unsigned ring, int timeout, unsigned long *cycles)
+__store_many(int fd, const struct intel_execution_engine2 *e,
+ int timeout, unsigned long *cycles)
{
const int gen = intel_gen(intel_get_drm_devid(fd));
const uint32_t bbe = MI_BATCH_BUFFER_END; @@ -817,7 +846,7 @@ __store_many(int fd, unsigned ring, int timeout, unsigned long *cycles)
memset(&execbuf, 0, sizeof(execbuf));
execbuf.buffers_ptr = to_user_pointer(object);
- execbuf.flags = ring;
+ execbuf.flags = e->flags;
execbuf.flags |= LOCAL_I915_EXEC_NO_RELOC;
execbuf.flags |= LOCAL_I915_EXEC_HANDLE_LUT;
if (gen < 6)
@@ -931,8 +960,9 @@ __store_many(int fd, unsigned ring, int timeout, unsigned long *cycles) }
static void
-store_many(int fd, unsigned ring, int timeout)
+store_many(int fd, const struct intel_execution_engine2 *e, int
+timeout)
{
+ const struct intel_execution_engine2 *e2;
unsigned long *shared;
const char *names[16];
int n = 0;
@@ -942,24 +972,23 @@ store_many(int fd, unsigned ring, int timeout)
intel_detect_and_clear_missed_interrupts(fd);
- if (ring == ALL_ENGINES) {
- for_each_physical_engine(e, fd) {
- if (!gem_can_store_dword(fd, eb_ring(e)))
+ if (!e) {
+ __for_each_physical_engine(fd, e2) {
+ if (!gem_class_can_store_dword(fd, e2->class))
continue;
igt_fork(child, 1)
__store_many(fd,
- eb_ring(e),
+ e2,
timeout,
&shared[n]);
- names[n++] = e->name;
+ names[n++] = e2->name;
}
igt_waitchildren();
} else {
- gem_require_ring(fd, ring);
- igt_require(gem_can_store_dword(fd, ring));
- __store_many(fd, ring, timeout, &shared[n]);
+ igt_require(gem_class_can_store_dword(fd, e->class));
+ __store_many(fd, e, timeout, &shared[n]);
names[n++] = NULL;
}
@@ -1025,15 +1054,16 @@ sync_all(int fd, int num_children, int timeout) static void store_all(int fd, int num_children, int timeout) {
+ const struct intel_execution_engine2 *e;
const int gen = intel_gen(intel_get_drm_devid(fd));
unsigned engines[16];
int num_engines = 0;
- for_each_physical_engine(e, fd) {
- if (!gem_can_store_dword(fd, eb_ring(e)))
+ __for_each_physical_engine(fd, e) {
+ if (!gem_class_can_store_dword(fd, e->class))
continue;
- engines[num_engines++] = eb_ring(e);
+ engines[num_engines++] = e->flags;
if (num_engines == ARRAY_SIZE(engines))
break;
}
@@ -1041,6 +1071,7 @@ store_all(int fd, int num_children, int timeout)
intel_detect_and_clear_missed_interrupts(fd);
igt_fork(child, num_children) {
+ int i915;
const uint32_t bbe = MI_BATCH_BUFFER_END;
struct drm_i915_gem_exec_object2 object[2];
struct drm_i915_gem_relocation_entry reloc[1024]; @@ -1049,6 +1080,8 @@ store_all(int fd, int num_children, int timeout)
unsigned long cycles;
uint32_t *batch, *b;
+ i915 = gem_reopen_driver(fd);
+ gem_context_copy_engines(fd, 0, i915, 0);
memset(&execbuf, 0, sizeof(execbuf));
execbuf.buffers_ptr = to_user_pointer(object);
execbuf.flags |= LOCAL_I915_EXEC_NO_RELOC; @@ -1057,20 +1090,20 @@ store_all(int fd, int num_children, int timeout)
execbuf.flags |= I915_EXEC_SECURE;
memset(object, 0, sizeof(object));
- object[0].handle = gem_create(fd, 4096);
- gem_write(fd, object[0].handle, 0, &bbe, sizeof(bbe));
+ object[0].handle = gem_create(i915, 4096);
+ gem_write(i915, object[0].handle, 0, &bbe, sizeof(bbe));
execbuf.buffer_count = 1;
- gem_execbuf(fd, &execbuf);
+ gem_execbuf(i915, &execbuf);
object[0].flags |= EXEC_OBJECT_WRITE;
- object[1].handle = gem_create(fd, 1024*16 + 4096);
+ object[1].handle = gem_create(i915, 1024*16 + 4096);
object[1].relocs_ptr = to_user_pointer(reloc);
object[1].relocation_count = 1024;
- batch = gem_mmap__cpu(fd, object[1].handle, 0, 16*1024 + 4096,
+ batch = gem_mmap__cpu(i915, object[1].handle, 0, 16*1024 + 4096,
PROT_WRITE | PROT_READ);
- gem_set_domain(fd, object[1].handle,
+ gem_set_domain(i915, object[1].handle,
I915_GEM_DOMAIN_CPU, I915_GEM_DOMAIN_CPU);
memset(reloc, 0, sizeof(reloc));
@@ -1103,8 +1136,8 @@ store_all(int fd, int num_children, int timeout)
igt_assert((b - batch)*sizeof(uint32_t) < 20*1024);
munmap(batch, 16*1024+4096);
execbuf.buffer_count = 2;
- gem_execbuf(fd, &execbuf);
- gem_sync(fd, object[1].handle);
+ gem_execbuf(i915, &execbuf);
+ gem_sync(i915, object[1].handle);
start = gettime();
cycles = 0;
@@ -1114,101 +1147,104 @@ store_all(int fd, int num_children, int timeout)
for (int n = 0; n < num_engines; n++) {
execbuf.flags &= ~ENGINE_MASK;
execbuf.flags |= engines[n];
- gem_execbuf(fd, &execbuf);
+ gem_execbuf(i915, &execbuf);
}
- gem_sync(fd, object[1].handle);
+ gem_sync(i915, object[1].handle);
} while (++cycles & 1023);
} while ((elapsed = gettime() - start) < timeout);
igt_info("Completed %ld cycles: %.3f us\n",
cycles, elapsed*1e6/cycles);
- gem_close(fd, object[1].handle);
- gem_close(fd, object[0].handle);
+ gem_close(i915, object[1].handle);
+ gem_close(i915, object[0].handle);
}
igt_waitchildren_timeout(timeout+10, NULL);
igt_assert_eq(intel_detect_and_clear_missed_interrupts(fd), 0); }
static void
-preempt(int fd, unsigned ring, int num_children, int timeout)
+preempt(int fd, const struct intel_execution_engine2 *e,
+ int num_children, int timeout)
{
+ const struct intel_execution_engine2 *e2;
unsigned engines[16];
const char *names[16];
int num_engines = 0;
- uint32_t ctx[2];
- if (ring == ALL_ENGINES) {
- for_each_physical_engine(e, fd) {
- names[num_engines] = e->name;
- engines[num_engines++] = eb_ring(e);
+ if (!e) {
+ __for_each_physical_engine(fd, e2) {
+ names[num_engines] = e2->name;
+ engines[num_engines++] = e2->flags;
if (num_engines == ARRAY_SIZE(engines))
break;
}
num_children *= num_engines;
} else {
- gem_require_ring(fd, ring);
names[num_engines] = NULL;
- engines[num_engines++] = ring;
+ engines[num_engines++] = e->flags;
}
- ctx[0] = gem_context_create(fd);
- gem_context_set_priority(fd, ctx[0], MIN_PRIO);
-
- ctx[1] = gem_context_create(fd);
- gem_context_set_priority(fd, ctx[1], MAX_PRIO);
-
intel_detect_and_clear_missed_interrupts(fd);
igt_fork(child, num_children) {
+ int i915;
+ uint32_t ctx[2];
const uint32_t bbe = MI_BATCH_BUFFER_END;
struct drm_i915_gem_exec_object2 object;
struct drm_i915_gem_execbuffer2 execbuf;
double start, elapsed;
unsigned long cycles;
+ i915 = gem_reopen_driver(fd);
+ ctx[1] = gem_context_clone_with_engines(fd, 0);
+ gem_context_set_priority(fd, ctx[1], MAX_PRIO);
+ gem_context_copy_engines(fd, 0, i915, ctx[1]);
memset(&object, 0, sizeof(object));
- object.handle = gem_create(fd, 4096);
- gem_write(fd, object.handle, 0, &bbe, sizeof(bbe));
+ object.handle = gem_create(i915, 4096);
+ gem_write(i915, object.handle, 0, &bbe, sizeof(bbe));
memset(&execbuf, 0, sizeof(execbuf));
execbuf.buffers_ptr = to_user_pointer(&object);
execbuf.buffer_count = 1;
execbuf.flags = engines[child % num_engines];
execbuf.rsvd1 = ctx[1];
- gem_execbuf(fd, &execbuf);
- gem_sync(fd, object.handle);
-
+ gem_execbuf(i915, &execbuf);
+ gem_sync(i915, object.handle);
+ ctx[0] = gem_context_clone_with_engines(fd, 0);
+ gem_context_set_priority(fd, ctx[0], MIN_PRIO);
+ gem_context_copy_engines(fd, 0, i915, ctx[0]);
start = gettime();
cycles = 0;
do {
igt_spin_t *spin =
- __igt_spin_new(fd,
+ __igt_spin_new(i915,
.ctx = ctx[0],
.engine = execbuf.flags);
do {
- gem_execbuf(fd, &execbuf);
- gem_sync(fd, object.handle);
+ gem_execbuf(i915, &execbuf);
+ gem_sync(i915, object.handle);
} while (++cycles & 1023);
- igt_spin_free(fd, spin);
+ igt_spin_free(i915, spin);
} while ((elapsed = gettime() - start) < timeout);
igt_info("%s%sompleted %ld cycles: %.3f us\n",
names[child % num_engines] ?: "",
names[child % num_engines] ? " c" : "C",
cycles, elapsed*1e6/cycles);
- gem_close(fd, object.handle);
+ gem_context_destroy(i915, ctx[1]);
+ gem_context_destroy(i915, ctx[0]);
+ gem_close(i915, object.handle);
}
igt_waitchildren_timeout(timeout+10, NULL);
igt_assert_eq(intel_detect_and_clear_missed_interrupts(fd), 0);
- gem_context_destroy(fd, ctx[1]);
- gem_context_destroy(fd, ctx[0]);
}
igt_main
{
+ const struct intel_execution_engine2 *e2;
const struct intel_execution_engine *e;
const int ncpus = sysconf(_SC_NPROCESSORS_ONLN);
int fd = -1;
@@ -1222,55 +1258,138 @@ igt_main
igt_fork_hang_detector(fd);
}
+ /* Legacy testing must be first. */
for (e = intel_execution_engines; e->name; e++) {
- igt_subtest_f("%s", e->name)
- sync_ring(fd, eb_ring(e), 1, 20);
- igt_subtest_f("idle-%s", e->name)
- idle_ring(fd, eb_ring(e), 20);
- igt_subtest_f("active-%s", e->name)
- active_ring(fd, eb_ring(e), 20);
- igt_subtest_f("wakeup-%s", e->name)
- wakeup_ring(fd, eb_ring(e), 20, 1);
- igt_subtest_f("active-wakeup-%s", e->name)
- active_wakeup_ring(fd, eb_ring(e), 20, 1);
- igt_subtest_f("double-wakeup-%s", e->name)
- wakeup_ring(fd, eb_ring(e), 20, 2);
- igt_subtest_f("store-%s", e->name)
- store_ring(fd, eb_ring(e), 1, 20);
- igt_subtest_f("switch-%s", e->name)
- switch_ring(fd, eb_ring(e), 1, 20);
- igt_subtest_f("forked-switch-%s", e->name)
- switch_ring(fd, eb_ring(e), ncpus, 20);
- igt_subtest_f("many-%s", e->name)
- store_many(fd, eb_ring(e), 20);
- igt_subtest_f("forked-%s", e->name)
- sync_ring(fd, eb_ring(e), ncpus, 20);
- igt_subtest_f("forked-store-%s", e->name)
- store_ring(fd, eb_ring(e), ncpus, 20);
+ struct intel_execution_engine2 e2__;
+
+ e2__ = gem_eb_flags_to_engine(eb_ring(e));
+ if (e2__.flags == -1)
+ continue;
+ e2 = &e2__;
+
+ igt_subtest_f("legacy_%s", e->name)
+ sync_ring(fd, e2, 1, 20);
+
+ }
+
+ igt_subtest_with_dynamic("basic_sync_ring") {
+ __for_each_physical_engine(fd, e2) {
+ /* Requires master for STORE_DWORD on gen4/5 */
+ igt_dynamic_f("%s", e2->name)
+ sync_ring(fd, e2, 1, 20);
+ }
+ }
+
+ igt_subtest_with_dynamic("idle") {
+ __for_each_physical_engine(fd, e2) {
+ /* Requires master for STORE_DWORD on gen4/5 */
+ igt_dynamic_f("%s", e2->name)
+ idle_ring(fd, e2, 20);
+ }
+ }
+
+ igt_subtest_with_dynamic("active") {
+ __for_each_physical_engine(fd, e2) {
+ /* Requires master for STORE_DWORD on gen4/5 */
+ igt_dynamic_f("%s", e2->name)
+ active_ring(fd, e2, 20);
+ }
+ }
+
+ igt_subtest_with_dynamic("wakeup") {
+ __for_each_physical_engine(fd, e2) {
+ /* Requires master for STORE_DWORD on gen4/5 */
+ igt_dynamic_f("%s", e2->name)
+ wakeup_ring(fd, e2, 150, 1);
+ }
+ }
+
+ igt_subtest_with_dynamic("active-wakeup") {
+ __for_each_physical_engine(fd, e2) {
+ /* Requires master for STORE_DWORD on gen4/5 */
+ igt_dynamic_f("%s", e2->name)
+ active_wakeup_ring(fd, e2, 1, 20);
+ }
+ }
+
+ igt_subtest_with_dynamic("double-wakeup") {
+ __for_each_physical_engine(fd, e2) {
+ /* Requires master for STORE_DWORD on gen4/5 */
+ igt_dynamic_f("%s", e2->name)
+ wakeup_ring(fd, e2, 20, 2);
+ }
+ }
+
+ igt_subtest_with_dynamic("store") {
+ __for_each_physical_engine(fd, e2) {
+ /* Requires master for STORE_DWORD on gen4/5 */
+ igt_dynamic_f("%s", e2->name)
+ store_ring(fd, e2, 1, 20);
+ }
+ }
+
+ igt_subtest_with_dynamic("switch") {
+ __for_each_physical_engine(fd, e2) {
+ /* Requires master for STORE_DWORD on gen4/5 */
+ igt_dynamic_f("%s", e2->name)
+ switch_ring(fd, e2, 1, 20);
+ }
+ }
+
+ igt_subtest_with_dynamic("forked-switch") {
+ __for_each_physical_engine(fd, e2) {
+ /* Requires master for STORE_DWORD on gen4/5 */
+ igt_dynamic_f("%s", e2->name)
+ switch_ring(fd, e2, ncpus, 20);
+ }
+ }
+
+ igt_subtest_with_dynamic("many") {
+ __for_each_physical_engine(fd, e2) {
+ /* Requires master for STORE_DWORD on gen4/5 */
+ igt_dynamic_f("%s", e2->name)
+ store_many(fd, e2, 20);
+ }
+ }
+
+ igt_subtest_with_dynamic("forked") {
+ __for_each_physical_engine(fd, e2) {
+ /* Requires master for STORE_DWORD on gen4/5 */
+ igt_dynamic_f("%s", e2->name)
+ sync_ring(fd, e2, ncpus, 20);
+ }
+ }
+
+ igt_subtest_with_dynamic("forked-store") {
+ __for_each_physical_engine(fd, e2) {
+ /* Requires master for STORE_DWORD on gen4/5 */
+ igt_dynamic_f("%s", e2->name)
+ store_ring(fd, e2, ncpus, 20);
+ }
}
igt_subtest("basic-each")
- sync_ring(fd, ALL_ENGINES, 1, 2);
+ sync_ring(fd, NULL, 1, 2);
igt_subtest("basic-store-each")
- store_ring(fd, ALL_ENGINES, 1, 2);
+ store_ring(fd, NULL, 1, 2);
igt_subtest("basic-many-each")
- store_many(fd, ALL_ENGINES, 2);
+ store_many(fd, NULL, 2);
igt_subtest("switch-each")
- switch_ring(fd, ALL_ENGINES, 1, 20);
+ switch_ring(fd, NULL, 1, 20);
igt_subtest("forked-switch-each")
- switch_ring(fd, ALL_ENGINES, ncpus, 20);
+ switch_ring(fd, NULL, ncpus, 20);
igt_subtest("forked-each")
- sync_ring(fd, ALL_ENGINES, ncpus, 20);
+ sync_ring(fd, NULL, ncpus, 20);
igt_subtest("forked-store-each")
- store_ring(fd, ALL_ENGINES, ncpus, 20);
+ store_ring(fd, NULL, ncpus, 20);
igt_subtest("active-each")
- active_ring(fd, ALL_ENGINES, 20);
+ active_ring(fd, NULL, 20);
igt_subtest("wakeup-each")
- wakeup_ring(fd, ALL_ENGINES, 20, 1);
+ wakeup_ring(fd, NULL, 20, 1);
igt_subtest("active-wakeup-each")
- active_wakeup_ring(fd, ALL_ENGINES, 20, 1);
+ active_wakeup_ring(fd, NULL, 20, 1);
igt_subtest("double-wakeup-each")
- wakeup_ring(fd, ALL_ENGINES, 20, 2);
+ wakeup_ring(fd, NULL, 20, 2);
igt_subtest("basic-all")
sync_all(fd, 1, 2);
@@ -1294,14 +1413,15 @@ igt_main
}
igt_subtest("preempt-all")
- preempt(fd, ALL_ENGINES, 1, 20);
-
- for (e = intel_execution_engines; e->name; e++) {
- igt_subtest_f("preempt-%s", e->name)
- preempt(fd, eb_ring(e), ncpus, 20);
+ preempt(fd, NULL, 1, 20);
+ igt_subtest_with_dynamic("preempt") {
+ __for_each_physical_engine(fd, e2) {
+ /* Requires master for STORE_DWORD on gen4/5 */
+ igt_dynamic_f("%s", e2->name)
+ preempt(fd, e2, ncpus, 20);
+ }
}
}
-
igt_fixture {
igt_stop_hang_detector();
close(fd);
--
2.25.1
_______________________________________________
igt-dev mailing list
igt-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/igt-dev
^ permalink raw reply [flat|nested] 8+ messages in thread
* [igt-dev] [PATCH] [PATCH i-g-t][V2]tests/i915/gem_sync.c :Added __for_each_physical_engine to utilize all available
@ 2020-04-06 9:25 Arjun Melkaveri
2020-04-06 11:25 ` [igt-dev] ✗ GitLab.Pipeline: failure for tests/i915/gem_sync.c :Added __for_each_physical_engine to utilize all available (rev3) Patchwork
` (3 more replies)
0 siblings, 4 replies; 8+ messages in thread
From: Arjun Melkaveri @ 2020-04-06 9:25 UTC (permalink / raw)
To: arjun.melkaveri, igt-dev
Replaced the legacy for_each_engine* defines with the ones
implemented in the gem_engine_topology library.
Used gem_context_clone_with_engines
to make sure that engine index was potentially created
based on a default context with engine map configured.
Added gem_reopen_driver and gem_context_copy_engines
to transfer the engine map from parent fd default
context.
V2:
Added Legacy engine coverage for sync_ring and sync_all.
Cc: Dec Katarzyna <katarzyna.dec@intel.com>
Cc: Ursulin Tvrtko <tvrtko.ursulin@intel.com>
Signed-off-by: sai gowtham <sai.gowtham.ch@intel.com>
Signed-off-by: Arjun Melkaveri <arjun.melkaveri@intel.com>
---
tests/i915/gem_sync.c | 566 +++++++++++++++++++++++++-----------------
1 file changed, 343 insertions(+), 223 deletions(-)
diff --git a/tests/i915/gem_sync.c b/tests/i915/gem_sync.c
index 2ef55ecc..8efa0668 100644
--- a/tests/i915/gem_sync.c
+++ b/tests/i915/gem_sync.c
@@ -79,52 +79,56 @@ out:
}
static void
-sync_ring(int fd, unsigned ring, int num_children, int timeout)
+sync_ring(int fd, const struct intel_execution_engine2 *e,
+ int num_children, int timeout)
{
+ const struct intel_execution_engine2 *e2;
unsigned engines[16];
const char *names[16];
int num_engines = 0;
- if (ring == ALL_ENGINES) {
- for_each_physical_engine(e, fd) {
- names[num_engines] = e->name;
- engines[num_engines++] = eb_ring(e);
+ if (!e) {
+ __for_each_physical_engine(fd, e2) {
+ names[num_engines] = e2->name;
+ engines[num_engines++] = e2->flags;
if (num_engines == ARRAY_SIZE(engines))
break;
}
num_children *= num_engines;
} else {
- gem_require_ring(fd, ring);
names[num_engines] = NULL;
- engines[num_engines++] = ring;
+ engines[num_engines++] = e->flags;
}
intel_detect_and_clear_missed_interrupts(fd);
igt_fork(child, num_children) {
+ int i915;
const uint32_t bbe = MI_BATCH_BUFFER_END;
struct drm_i915_gem_exec_object2 object;
struct drm_i915_gem_execbuffer2 execbuf;
double start, elapsed;
unsigned long cycles;
+ i915 = gem_reopen_driver(fd);
+ gem_context_copy_engines(fd, 0, i915, 0);
memset(&object, 0, sizeof(object));
- object.handle = gem_create(fd, 4096);
- gem_write(fd, object.handle, 0, &bbe, sizeof(bbe));
+ object.handle = gem_create(i915, 4096);
+ gem_write(i915, object.handle, 0, &bbe, sizeof(bbe));
memset(&execbuf, 0, sizeof(execbuf));
execbuf.buffers_ptr = to_user_pointer(&object);
execbuf.buffer_count = 1;
execbuf.flags = engines[child % num_engines];
- gem_execbuf(fd, &execbuf);
- gem_sync(fd, object.handle);
+ gem_execbuf(i915, &execbuf);
+ gem_sync(i915, object.handle);
start = gettime();
cycles = 0;
do {
do {
- gem_execbuf(fd, &execbuf);
- gem_sync(fd, object.handle);
+ gem_execbuf(i915, &execbuf);
+ gem_sync(i915, object.handle);
} while (++cycles & 1023);
} while ((elapsed = gettime() - start) < timeout);
igt_info("%s%sompleted %ld cycles: %.3f us\n",
@@ -132,14 +136,14 @@ sync_ring(int fd, unsigned ring, int num_children, int timeout)
names[child % num_engines] ? " c" : "C",
cycles, elapsed*1e6/cycles);
- gem_close(fd, object.handle);
+ gem_close(i915, object.handle);
}
igt_waitchildren_timeout(timeout+10, NULL);
igt_assert_eq(intel_detect_and_clear_missed_interrupts(fd), 0);
}
static void
-idle_ring(int fd, unsigned ring, int timeout)
+idle_ring(int fd, const struct intel_execution_engine2 *e, int timeout)
{
const uint32_t bbe = MI_BATCH_BUFFER_END;
struct drm_i915_gem_exec_object2 object;
@@ -147,7 +151,6 @@ idle_ring(int fd, unsigned ring, int timeout)
double start, elapsed;
unsigned long cycles;
- gem_require_ring(fd, ring);
memset(&object, 0, sizeof(object));
object.handle = gem_create(fd, 4096);
@@ -156,7 +159,7 @@ idle_ring(int fd, unsigned ring, int timeout)
memset(&execbuf, 0, sizeof(execbuf));
execbuf.buffers_ptr = to_user_pointer(&object);
execbuf.buffer_count = 1;
- execbuf.flags = ring;
+ execbuf.flags = e->flags;
gem_execbuf(fd, &execbuf);
gem_sync(fd, object.handle);
@@ -178,32 +181,34 @@ idle_ring(int fd, unsigned ring, int timeout)
}
static void
-wakeup_ring(int fd, unsigned ring, int timeout, int wlen)
+wakeup_ring(int fd, const struct intel_execution_engine2 *e,
+ int timeout, int wlen)
{
+ const struct intel_execution_engine2 *e2;
unsigned engines[16];
const char *names[16];
int num_engines = 0;
- if (ring == ALL_ENGINES) {
- for_each_physical_engine(e, fd) {
- if (!gem_can_store_dword(fd, eb_ring(e)))
+ if (!e) {
+ __for_each_physical_engine(fd, e2) {
+ if (!gem_class_can_store_dword(fd, e2->class))
continue;
- names[num_engines] = e->name;
- engines[num_engines++] = eb_ring(e);
+ names[num_engines] = e2->name;
+ engines[num_engines++] = e2->flags;
if (num_engines == ARRAY_SIZE(engines))
break;
}
igt_require(num_engines);
} else {
- gem_require_ring(fd, ring);
- igt_require(gem_can_store_dword(fd, ring));
+ igt_require(gem_class_can_store_dword(fd, e->class));
names[num_engines] = NULL;
- engines[num_engines++] = ring;
+ engines[num_engines++] = e->flags;
}
intel_detect_and_clear_missed_interrupts(fd);
igt_fork(child, num_engines) {
+ int i915;
const uint32_t bbe = MI_BATCH_BUFFER_END;
struct drm_i915_gem_exec_object2 object;
struct drm_i915_gem_execbuffer2 execbuf;
@@ -211,9 +216,11 @@ wakeup_ring(int fd, unsigned ring, int timeout, int wlen)
unsigned long cycles;
igt_spin_t *spin;
+ i915 = gem_reopen_driver(fd);
+ gem_context_copy_engines(fd, 0, i915, 0);
memset(&object, 0, sizeof(object));
- object.handle = gem_create(fd, 4096);
- gem_write(fd, object.handle, 0, &bbe, sizeof(bbe));
+ object.handle = gem_create(i915, 4096);
+ gem_write(i915, object.handle, 0, &bbe, sizeof(bbe));
memset(&execbuf, 0, sizeof(execbuf));
execbuf.buffers_ptr = to_user_pointer(&object);
@@ -226,10 +233,10 @@ wakeup_ring(int fd, unsigned ring, int timeout, int wlen)
IGT_SPIN_FAST));
igt_assert(igt_spin_has_poll(spin));
- gem_execbuf(fd, &execbuf);
+ gem_execbuf(i915, &execbuf);
igt_spin_end(spin);
- gem_sync(fd, object.handle);
+ gem_sync(i915, object.handle);
for (int warmup = 0; warmup <= 1; warmup++) {
end = gettime() + timeout/10.;
@@ -238,12 +245,12 @@ wakeup_ring(int fd, unsigned ring, int timeout, int wlen)
do {
igt_spin_reset(spin);
- gem_execbuf(fd, &spin->execbuf);
+ gem_execbuf(i915, &spin->execbuf);
igt_spin_busywait_until_started(spin);
this = gettime();
igt_spin_end(spin);
- gem_sync(fd, spin->handle);
+ gem_sync(i915, spin->handle);
now = gettime();
elapsed += now - this;
@@ -262,15 +269,15 @@ wakeup_ring(int fd, unsigned ring, int timeout, int wlen)
do {
igt_spin_reset(spin);
- gem_execbuf(fd, &spin->execbuf);
+ gem_execbuf(i915, &spin->execbuf);
igt_spin_busywait_until_started(spin);
for (int n = 0; n < wlen; n++)
- gem_execbuf(fd, &execbuf);
+ gem_execbuf(i915, &execbuf);
this = gettime();
igt_spin_end(spin);
- gem_sync(fd, object.handle);
+ gem_sync(i915, object.handle);
now = gettime();
elapsed += now - this;
@@ -283,49 +290,53 @@ wakeup_ring(int fd, unsigned ring, int timeout, int wlen)
names[child % num_engines] ? " c" : "C",
cycles, 1e6*baseline, elapsed*1e6/cycles);
- igt_spin_free(fd, spin);
- gem_close(fd, object.handle);
+ igt_spin_free(i915, spin);
+ gem_close(i915, object.handle);
}
igt_waitchildren_timeout(2*timeout, NULL);
igt_assert_eq(intel_detect_and_clear_missed_interrupts(fd), 0);
}
-static void active_ring(int fd, unsigned ring, int timeout)
+static void active_ring(int fd, const struct intel_execution_engine2 *e,
+ int timeout)
{
+ const struct intel_execution_engine2 *e2;
unsigned engines[16];
const char *names[16];
int num_engines = 0;
- if (ring == ALL_ENGINES) {
- for_each_physical_engine(e, fd) {
- if (!gem_can_store_dword(fd, eb_ring(e)))
+ if (!e) {
+ __for_each_physical_engine(fd, e2) {
+ if (!gem_class_can_store_dword(fd, e2->class))
continue;
- names[num_engines] = e->name;
- engines[num_engines++] = eb_ring(e);
+ names[num_engines] = e2->name;
+ engines[num_engines++] = e2->flags;
if (num_engines == ARRAY_SIZE(engines))
break;
}
igt_require(num_engines);
} else {
- gem_require_ring(fd, ring);
- igt_require(gem_can_store_dword(fd, ring));
+ igt_require(gem_class_can_store_dword(fd, e->class));
names[num_engines] = NULL;
- engines[num_engines++] = ring;
+ engines[num_engines++] = e->flags;
}
intel_detect_and_clear_missed_interrupts(fd);
igt_fork(child, num_engines) {
+ int i915;
double start, end, elapsed;
unsigned long cycles;
igt_spin_t *spin[2];
- spin[0] = __igt_spin_new(fd,
- .engine = ring,
+ i915 = gem_reopen_driver(fd);
+ gem_context_copy_engines(fd, 0, i915, 0);
+ spin[0] = __igt_spin_new(i915,
+ .engine = e->flags,
.flags = IGT_SPIN_FAST);
- spin[1] = __igt_spin_new(fd,
- .engine = ring,
+ spin[1] = __igt_spin_new(i915,
+ .engine = e->flags,
.flags = IGT_SPIN_FAST);
start = gettime();
@@ -336,16 +347,16 @@ static void active_ring(int fd, unsigned ring, int timeout)
igt_spin_t *s = spin[loop & 1];
igt_spin_end(s);
- gem_sync(fd, s->handle);
+ gem_sync(i915, s->handle);
igt_spin_reset(s);
- gem_execbuf(fd, &s->execbuf);
+ gem_execbuf(i915, &s->execbuf);
}
cycles += 1024;
} while ((elapsed = gettime()) < end);
- igt_spin_free(fd, spin[1]);
- igt_spin_free(fd, spin[0]);
+ igt_spin_free(i915, spin[1]);
+ igt_spin_free(i915, spin[0]);
igt_info("%s%sompleted %ld cycles: %.3f us\n",
names[child % num_engines] ?: "",
@@ -357,32 +368,34 @@ static void active_ring(int fd, unsigned ring, int timeout)
}
static void
-active_wakeup_ring(int fd, unsigned ring, int timeout, int wlen)
+active_wakeup_ring(int fd, const struct intel_execution_engine2 *e,
+ int timeout, int wlen)
{
+ const struct intel_execution_engine2 *e2;
unsigned engines[16];
const char *names[16];
int num_engines = 0;
- if (ring == ALL_ENGINES) {
- for_each_physical_engine(e, fd) {
- if (!gem_can_store_dword(fd, eb_ring(e)))
+ if (!e) {
+ __for_each_physical_engine(fd, e2) {
+ if (!gem_class_can_store_dword(fd, e2->class))
continue;
- names[num_engines] = e->name;
- engines[num_engines++] = eb_ring(e);
+ names[num_engines] = e2->name;
+ engines[num_engines++] = e2->flags;
if (num_engines == ARRAY_SIZE(engines))
break;
}
igt_require(num_engines);
} else {
- gem_require_ring(fd, ring);
- igt_require(gem_can_store_dword(fd, ring));
+ igt_require(gem_class_can_store_dword(fd, e->class));
names[num_engines] = NULL;
- engines[num_engines++] = ring;
+ engines[num_engines++] = e->flags;
}
intel_detect_and_clear_missed_interrupts(fd);
igt_fork(child, num_engines) {
+ int i915;
const uint32_t bbe = MI_BATCH_BUFFER_END;
struct drm_i915_gem_exec_object2 object;
struct drm_i915_gem_execbuffer2 execbuf;
@@ -390,36 +403,38 @@ active_wakeup_ring(int fd, unsigned ring, int timeout, int wlen)
unsigned long cycles;
igt_spin_t *spin[2];
+ i915 = gem_reopen_driver(fd);
+ gem_context_copy_engines(fd, 0, i915, 0);
memset(&object, 0, sizeof(object));
- object.handle = gem_create(fd, 4096);
- gem_write(fd, object.handle, 0, &bbe, sizeof(bbe));
+ object.handle = gem_create(i915, 4096);
+ gem_write(i915, object.handle, 0, &bbe, sizeof(bbe));
memset(&execbuf, 0, sizeof(execbuf));
execbuf.buffers_ptr = to_user_pointer(&object);
execbuf.buffer_count = 1;
execbuf.flags = engines[child % num_engines];
- spin[0] = __igt_spin_new(fd,
+ spin[0] = __igt_spin_new(i915,
.engine = execbuf.flags,
.flags = (IGT_SPIN_POLL_RUN |
IGT_SPIN_FAST));
igt_assert(igt_spin_has_poll(spin[0]));
- spin[1] = __igt_spin_new(fd,
+ spin[1] = __igt_spin_new(i915,
.engine = execbuf.flags,
.flags = (IGT_SPIN_POLL_RUN |
IGT_SPIN_FAST));
- gem_execbuf(fd, &execbuf);
+ gem_execbuf(i915, &execbuf);
igt_spin_end(spin[1]);
igt_spin_end(spin[0]);
- gem_sync(fd, object.handle);
+ gem_sync(i915, object.handle);
for (int warmup = 0; warmup <= 1; warmup++) {
igt_spin_reset(spin[0]);
- gem_execbuf(fd, &spin[0]->execbuf);
+ gem_execbuf(i915, &spin[0]->execbuf);
end = gettime() + timeout/10.;
elapsed = 0;
@@ -429,11 +444,11 @@ active_wakeup_ring(int fd, unsigned ring, int timeout, int wlen)
igt_spin_reset(spin[1]);
- gem_execbuf(fd, &spin[1]->execbuf);
+ gem_execbuf(i915, &spin[1]->execbuf);
this = gettime();
igt_spin_end(spin[0]);
- gem_sync(fd, spin[0]->handle);
+ gem_sync(i915, spin[0]->handle);
now = gettime();
elapsed += now - this;
@@ -450,7 +465,7 @@ active_wakeup_ring(int fd, unsigned ring, int timeout, int wlen)
igt_spin_reset(spin[0]);
- gem_execbuf(fd, &spin[0]->execbuf);
+ gem_execbuf(i915, &spin[0]->execbuf);
end = gettime() + timeout;
elapsed = 0;
@@ -459,15 +474,15 @@ active_wakeup_ring(int fd, unsigned ring, int timeout, int wlen)
igt_spin_busywait_until_started(spin[0]);
for (int n = 0; n < wlen; n++)
- gem_execbuf(fd, &execbuf);
+ gem_execbuf(i915, &execbuf);
igt_spin_reset(spin[1]);
- gem_execbuf(fd, &spin[1]->execbuf);
+ gem_execbuf(i915, &spin[1]->execbuf);
this = gettime();
igt_spin_end(spin[0]);
- gem_sync(fd, object.handle);
+ gem_sync(i915, object.handle);
now = gettime();
elapsed += now - this;
@@ -482,43 +497,45 @@ active_wakeup_ring(int fd, unsigned ring, int timeout, int wlen)
names[child % num_engines] ? " c" : "C",
cycles, 1e6*baseline, elapsed*1e6/cycles);
- igt_spin_free(fd, spin[1]);
- igt_spin_free(fd, spin[0]);
- gem_close(fd, object.handle);
+ igt_spin_free(i915, spin[1]);
+ igt_spin_free(i915, spin[0]);
+ gem_close(i915, object.handle);
}
igt_waitchildren_timeout(2*timeout, NULL);
igt_assert_eq(intel_detect_and_clear_missed_interrupts(fd), 0);
}
static void
-store_ring(int fd, unsigned ring, int num_children, int timeout)
+store_ring(int fd, const struct intel_execution_engine2 *e,
+ int num_children, int timeout)
{
+ const struct intel_execution_engine2 *e2;
const int gen = intel_gen(intel_get_drm_devid(fd));
unsigned engines[16];
const char *names[16];
int num_engines = 0;
- if (ring == ALL_ENGINES) {
- for_each_physical_engine(e, fd) {
- if (!gem_can_store_dword(fd, eb_ring(e)))
+ if (!e) {
+ __for_each_physical_engine(fd, e2) {
+ if (!gem_class_can_store_dword(fd, e2->class))
continue;
- names[num_engines] = e->name;
- engines[num_engines++] = eb_ring(e);
+ names[num_engines] = e2->name;
+ engines[num_engines++] = e2->flags;
if (num_engines == ARRAY_SIZE(engines))
break;
}
num_children *= num_engines;
} else {
- gem_require_ring(fd, ring);
- igt_require(gem_can_store_dword(fd, ring));
+ igt_require(gem_class_can_store_dword(fd, e->class));
names[num_engines] = NULL;
- engines[num_engines++] = ring;
+ engines[num_engines++] = e->flags;
}
intel_detect_and_clear_missed_interrupts(fd);
igt_fork(child, num_children) {
+ int i915;
const uint32_t bbe = MI_BATCH_BUFFER_END;
struct drm_i915_gem_exec_object2 object[2];
struct drm_i915_gem_relocation_entry reloc[1024];
@@ -527,6 +544,8 @@ store_ring(int fd, unsigned ring, int num_children, int timeout)
unsigned long cycles;
uint32_t *batch, *b;
+ i915 = gem_reopen_driver(fd);
+ gem_context_copy_engines(fd, 0, i915, 0);
memset(&execbuf, 0, sizeof(execbuf));
execbuf.buffers_ptr = to_user_pointer(object);
execbuf.flags = engines[child % num_engines];
@@ -536,20 +555,20 @@ store_ring(int fd, unsigned ring, int num_children, int timeout)
execbuf.flags |= I915_EXEC_SECURE;
memset(object, 0, sizeof(object));
- object[0].handle = gem_create(fd, 4096);
- gem_write(fd, object[0].handle, 0, &bbe, sizeof(bbe));
+ object[0].handle = gem_create(i915, 4096);
+ gem_write(i915, object[0].handle, 0, &bbe, sizeof(bbe));
execbuf.buffer_count = 1;
- gem_execbuf(fd, &execbuf);
+ gem_execbuf(i915, &execbuf);
object[0].flags |= EXEC_OBJECT_WRITE;
- object[1].handle = gem_create(fd, 20*1024);
+ object[1].handle = gem_create(i915, 20*1024);
object[1].relocs_ptr = to_user_pointer(reloc);
object[1].relocation_count = 1024;
- batch = gem_mmap__cpu(fd, object[1].handle, 0, 20*1024,
+ batch = gem_mmap__cpu(i915, object[1].handle, 0, 20*1024,
PROT_WRITE | PROT_READ);
- gem_set_domain(fd, object[1].handle,
+ gem_set_domain(i915, object[1].handle,
I915_GEM_DOMAIN_CPU, I915_GEM_DOMAIN_CPU);
memset(reloc, 0, sizeof(reloc));
@@ -582,15 +601,15 @@ store_ring(int fd, unsigned ring, int num_children, int timeout)
igt_assert((b - batch)*sizeof(uint32_t) < 20*1024);
munmap(batch, 20*1024);
execbuf.buffer_count = 2;
- gem_execbuf(fd, &execbuf);
- gem_sync(fd, object[1].handle);
+ gem_execbuf(i915, &execbuf);
+ gem_sync(i915, object[1].handle);
start = gettime();
cycles = 0;
do {
do {
- gem_execbuf(fd, &execbuf);
- gem_sync(fd, object[1].handle);
+ gem_execbuf(i915, &execbuf);
+ gem_sync(i915, object[1].handle);
} while (++cycles & 1023);
} while ((elapsed = gettime() - start) < timeout);
igt_info("%s%sompleted %ld cycles: %.3f us\n",
@@ -598,16 +617,18 @@ store_ring(int fd, unsigned ring, int num_children, int timeout)
names[child % num_engines] ? " c" : "C",
cycles, elapsed*1e6/cycles);
- gem_close(fd, object[1].handle);
- gem_close(fd, object[0].handle);
+ gem_close(i915, object[1].handle);
+ gem_close(i915, object[0].handle);
}
igt_waitchildren_timeout(timeout+10, NULL);
igt_assert_eq(intel_detect_and_clear_missed_interrupts(fd), 0);
}
static void
-switch_ring(int fd, unsigned ring, int num_children, int timeout)
+switch_ring(int fd, const struct intel_execution_engine2 *e,
+ int num_children, int timeout)
{
+ const struct intel_execution_engine2 *e2;
const int gen = intel_gen(intel_get_drm_devid(fd));
unsigned engines[16];
const char *names[16];
@@ -615,27 +636,28 @@ switch_ring(int fd, unsigned ring, int num_children, int timeout)
gem_require_contexts(fd);
- if (ring == ALL_ENGINES) {
- for_each_physical_engine(e, fd) {
- if (!gem_can_store_dword(fd, eb_ring(e)))
+ if (!e) {
+ __for_each_physical_engine(fd, e2) {
+ if (!gem_class_can_store_dword(fd, e2->class))
continue;
- names[num_engines] = e->name;
- engines[num_engines++] = eb_ring(e);
+ names[num_engines] = e2->name;
+ engines[num_engines++] = e2->flags;
if (num_engines == ARRAY_SIZE(engines))
break;
}
num_children *= num_engines;
} else {
- gem_require_ring(fd, ring);
- igt_require(gem_can_store_dword(fd, ring));
+ igt_require(gem_class_can_store_dword(fd, e->class));
names[num_engines] = NULL;
- engines[num_engines++] = ring;
+ engines[num_engines++] = e->flags;
}
intel_detect_and_clear_missed_interrupts(fd);
igt_fork(child, num_children) {
+ int i915;
+ uint32_t ctx;
struct context {
struct drm_i915_gem_exec_object2 object[2];
struct drm_i915_gem_relocation_entry reloc[1024];
@@ -643,7 +665,13 @@ switch_ring(int fd, unsigned ring, int num_children, int timeout)
} contexts[2];
double elapsed, baseline;
unsigned long cycles;
-
+ /*
+ * Ensure the gpu is idle by launching
+ * nop execbuf and stalling for it.
+ */
+ i915 = gem_reopen_driver(fd);
+ ctx = gem_context_create(fd);
+ gem_context_copy_engines(fd, 0, i915, ctx);
for (int i = 0; i < ARRAY_SIZE(contexts); i++) {
const uint32_t bbe = MI_BATCH_BUFFER_END;
const uint32_t sz = 32 << 10;
@@ -657,23 +685,23 @@ switch_ring(int fd, unsigned ring, int num_children, int timeout)
c->execbuf.flags |= LOCAL_I915_EXEC_HANDLE_LUT;
if (gen < 6)
c->execbuf.flags |= I915_EXEC_SECURE;
- c->execbuf.rsvd1 = gem_context_create(fd);
+ c->execbuf.rsvd1 = ctx;
memset(c->object, 0, sizeof(c->object));
- c->object[0].handle = gem_create(fd, 4096);
- gem_write(fd, c->object[0].handle, 0, &bbe, sizeof(bbe));
+ c->object[0].handle = gem_create(i915, 4096);
+ gem_write(i915, c->object[0].handle, 0, &bbe, sizeof(bbe));
c->execbuf.buffer_count = 1;
- gem_execbuf(fd, &c->execbuf);
+ gem_execbuf(i915, &c->execbuf);
c->object[0].flags |= EXEC_OBJECT_WRITE;
- c->object[1].handle = gem_create(fd, sz);
+ c->object[1].handle = gem_create(i915, sz);
c->object[1].relocs_ptr = to_user_pointer(c->reloc);
c->object[1].relocation_count = 1024 * i;
- batch = gem_mmap__cpu(fd, c->object[1].handle, 0, sz,
+ batch = gem_mmap__cpu(i915, c->object[1].handle, 0, sz,
PROT_WRITE | PROT_READ);
- gem_set_domain(fd, c->object[1].handle,
+ gem_set_domain(i915, c->object[1].handle,
I915_GEM_DOMAIN_CPU, I915_GEM_DOMAIN_CPU);
memset(c->reloc, 0, sizeof(c->reloc));
@@ -707,8 +735,8 @@ switch_ring(int fd, unsigned ring, int num_children, int timeout)
igt_assert((b - batch)*sizeof(uint32_t) < sz);
munmap(batch, sz);
c->execbuf.buffer_count = 2;
- gem_execbuf(fd, &c->execbuf);
- gem_sync(fd, c->object[1].handle);
+ gem_execbuf(i915, &c->execbuf);
+ gem_sync(i915, c->object[1].handle);
}
cycles = 0;
@@ -717,12 +745,12 @@ switch_ring(int fd, unsigned ring, int num_children, int timeout)
do {
double this;
- gem_execbuf(fd, &contexts[1].execbuf);
- gem_execbuf(fd, &contexts[0].execbuf);
+ gem_execbuf(i915, &contexts[1].execbuf);
+ gem_execbuf(i915, &contexts[0].execbuf);
this = gettime();
- gem_sync(fd, contexts[1].object[1].handle);
- gem_sync(fd, contexts[0].object[1].handle);
+ gem_sync(i915, contexts[1].object[1].handle);
+ gem_sync(i915, contexts[0].object[1].handle);
baseline += gettime() - this;
} while (++cycles & 1023);
}
@@ -734,14 +762,14 @@ switch_ring(int fd, unsigned ring, int num_children, int timeout)
do {
double this;
- gem_execbuf(fd, &contexts[1].execbuf);
- gem_execbuf(fd, &contexts[0].execbuf);
+ gem_execbuf(i915, &contexts[1].execbuf);
+ gem_execbuf(i915, &contexts[0].execbuf);
this = gettime();
- gem_sync(fd, contexts[0].object[1].handle);
+ gem_sync(i915, contexts[0].object[1].handle);
elapsed += gettime() - this;
- gem_sync(fd, contexts[1].object[1].handle);
+ gem_sync(i915, contexts[1].object[1].handle);
} while (++cycles & 1023);
}
elapsed /= cycles;
@@ -752,9 +780,9 @@ switch_ring(int fd, unsigned ring, int num_children, int timeout)
cycles, elapsed*1e6, baseline*1e6);
for (int i = 0; i < ARRAY_SIZE(contexts); i++) {
- gem_close(fd, contexts[i].object[1].handle);
- gem_close(fd, contexts[i].object[0].handle);
- gem_context_destroy(fd, contexts[i].execbuf.rsvd1);
+ gem_close(i915, contexts[i].object[1].handle);
+ gem_close(i915, contexts[i].object[0].handle);
+ gem_context_destroy(i915, contexts[i].execbuf.rsvd1);
}
}
igt_waitchildren_timeout(timeout+10, NULL);
@@ -803,7 +831,8 @@ static void *waiter(void *arg)
}
static void
-__store_many(int fd, unsigned ring, int timeout, unsigned long *cycles)
+__store_many(int fd, const struct intel_execution_engine2 *e,
+ int timeout, unsigned long *cycles)
{
const int gen = intel_gen(intel_get_drm_devid(fd));
const uint32_t bbe = MI_BATCH_BUFFER_END;
@@ -817,7 +846,7 @@ __store_many(int fd, unsigned ring, int timeout, unsigned long *cycles)
memset(&execbuf, 0, sizeof(execbuf));
execbuf.buffers_ptr = to_user_pointer(object);
- execbuf.flags = ring;
+ execbuf.flags = e->flags;
execbuf.flags |= LOCAL_I915_EXEC_NO_RELOC;
execbuf.flags |= LOCAL_I915_EXEC_HANDLE_LUT;
if (gen < 6)
@@ -931,8 +960,9 @@ __store_many(int fd, unsigned ring, int timeout, unsigned long *cycles)
}
static void
-store_many(int fd, unsigned ring, int timeout)
+store_many(int fd, const struct intel_execution_engine2 *e, int timeout)
{
+ const struct intel_execution_engine2 *e2;
unsigned long *shared;
const char *names[16];
int n = 0;
@@ -942,24 +972,23 @@ store_many(int fd, unsigned ring, int timeout)
intel_detect_and_clear_missed_interrupts(fd);
- if (ring == ALL_ENGINES) {
- for_each_physical_engine(e, fd) {
- if (!gem_can_store_dword(fd, eb_ring(e)))
+ if (!e) {
+ __for_each_physical_engine(fd, e2) {
+ if (!gem_class_can_store_dword(fd, e2->class))
continue;
igt_fork(child, 1)
__store_many(fd,
- eb_ring(e),
+ e2,
timeout,
&shared[n]);
- names[n++] = e->name;
+ names[n++] = e2->name;
}
igt_waitchildren();
} else {
- gem_require_ring(fd, ring);
- igt_require(gem_can_store_dword(fd, ring));
- __store_many(fd, ring, timeout, &shared[n]);
+ igt_require(gem_class_can_store_dword(fd, e->class));
+ __store_many(fd, e, timeout, &shared[n]);
names[n++] = NULL;
}
@@ -1025,15 +1054,16 @@ sync_all(int fd, int num_children, int timeout)
static void
store_all(int fd, int num_children, int timeout)
{
+ const struct intel_execution_engine2 *e;
const int gen = intel_gen(intel_get_drm_devid(fd));
unsigned engines[16];
int num_engines = 0;
- for_each_physical_engine(e, fd) {
- if (!gem_can_store_dword(fd, eb_ring(e)))
+ __for_each_physical_engine(fd, e) {
+ if (!gem_class_can_store_dword(fd, e->class))
continue;
- engines[num_engines++] = eb_ring(e);
+ engines[num_engines++] = e->flags;
if (num_engines == ARRAY_SIZE(engines))
break;
}
@@ -1041,6 +1071,7 @@ store_all(int fd, int num_children, int timeout)
intel_detect_and_clear_missed_interrupts(fd);
igt_fork(child, num_children) {
+ int i915;
const uint32_t bbe = MI_BATCH_BUFFER_END;
struct drm_i915_gem_exec_object2 object[2];
struct drm_i915_gem_relocation_entry reloc[1024];
@@ -1049,6 +1080,8 @@ store_all(int fd, int num_children, int timeout)
unsigned long cycles;
uint32_t *batch, *b;
+ i915 = gem_reopen_driver(fd);
+ gem_context_copy_engines(fd, 0, i915, 0);
memset(&execbuf, 0, sizeof(execbuf));
execbuf.buffers_ptr = to_user_pointer(object);
execbuf.flags |= LOCAL_I915_EXEC_NO_RELOC;
@@ -1057,20 +1090,20 @@ store_all(int fd, int num_children, int timeout)
execbuf.flags |= I915_EXEC_SECURE;
memset(object, 0, sizeof(object));
- object[0].handle = gem_create(fd, 4096);
- gem_write(fd, object[0].handle, 0, &bbe, sizeof(bbe));
+ object[0].handle = gem_create(i915, 4096);
+ gem_write(i915, object[0].handle, 0, &bbe, sizeof(bbe));
execbuf.buffer_count = 1;
- gem_execbuf(fd, &execbuf);
+ gem_execbuf(i915, &execbuf);
object[0].flags |= EXEC_OBJECT_WRITE;
- object[1].handle = gem_create(fd, 1024*16 + 4096);
+ object[1].handle = gem_create(i915, 1024*16 + 4096);
object[1].relocs_ptr = to_user_pointer(reloc);
object[1].relocation_count = 1024;
- batch = gem_mmap__cpu(fd, object[1].handle, 0, 16*1024 + 4096,
+ batch = gem_mmap__cpu(i915, object[1].handle, 0, 16*1024 + 4096,
PROT_WRITE | PROT_READ);
- gem_set_domain(fd, object[1].handle,
+ gem_set_domain(i915, object[1].handle,
I915_GEM_DOMAIN_CPU, I915_GEM_DOMAIN_CPU);
memset(reloc, 0, sizeof(reloc));
@@ -1103,8 +1136,8 @@ store_all(int fd, int num_children, int timeout)
igt_assert((b - batch)*sizeof(uint32_t) < 20*1024);
munmap(batch, 16*1024+4096);
execbuf.buffer_count = 2;
- gem_execbuf(fd, &execbuf);
- gem_sync(fd, object[1].handle);
+ gem_execbuf(i915, &execbuf);
+ gem_sync(i915, object[1].handle);
start = gettime();
cycles = 0;
@@ -1114,101 +1147,104 @@ store_all(int fd, int num_children, int timeout)
for (int n = 0; n < num_engines; n++) {
execbuf.flags &= ~ENGINE_MASK;
execbuf.flags |= engines[n];
- gem_execbuf(fd, &execbuf);
+ gem_execbuf(i915, &execbuf);
}
- gem_sync(fd, object[1].handle);
+ gem_sync(i915, object[1].handle);
} while (++cycles & 1023);
} while ((elapsed = gettime() - start) < timeout);
igt_info("Completed %ld cycles: %.3f us\n",
cycles, elapsed*1e6/cycles);
- gem_close(fd, object[1].handle);
- gem_close(fd, object[0].handle);
+ gem_close(i915, object[1].handle);
+ gem_close(i915, object[0].handle);
}
igt_waitchildren_timeout(timeout+10, NULL);
igt_assert_eq(intel_detect_and_clear_missed_interrupts(fd), 0);
}
static void
-preempt(int fd, unsigned ring, int num_children, int timeout)
+preempt(int fd, const struct intel_execution_engine2 *e,
+ int num_children, int timeout)
{
+ const struct intel_execution_engine2 *e2;
unsigned engines[16];
const char *names[16];
int num_engines = 0;
- uint32_t ctx[2];
- if (ring == ALL_ENGINES) {
- for_each_physical_engine(e, fd) {
- names[num_engines] = e->name;
- engines[num_engines++] = eb_ring(e);
+ if (!e) {
+ __for_each_physical_engine(fd, e2) {
+ names[num_engines] = e2->name;
+ engines[num_engines++] = e2->flags;
if (num_engines == ARRAY_SIZE(engines))
break;
}
num_children *= num_engines;
} else {
- gem_require_ring(fd, ring);
names[num_engines] = NULL;
- engines[num_engines++] = ring;
+ engines[num_engines++] = e->flags;
}
- ctx[0] = gem_context_create(fd);
- gem_context_set_priority(fd, ctx[0], MIN_PRIO);
-
- ctx[1] = gem_context_create(fd);
- gem_context_set_priority(fd, ctx[1], MAX_PRIO);
-
intel_detect_and_clear_missed_interrupts(fd);
igt_fork(child, num_children) {
+ int i915;
+ uint32_t ctx[2];
const uint32_t bbe = MI_BATCH_BUFFER_END;
struct drm_i915_gem_exec_object2 object;
struct drm_i915_gem_execbuffer2 execbuf;
double start, elapsed;
unsigned long cycles;
+ i915 = gem_reopen_driver(fd);
+ ctx[1] = gem_context_clone_with_engines(fd, 0);
+ gem_context_set_priority(fd, ctx[1], MAX_PRIO);
+ gem_context_copy_engines(fd, 0, i915, ctx[1]);
memset(&object, 0, sizeof(object));
- object.handle = gem_create(fd, 4096);
- gem_write(fd, object.handle, 0, &bbe, sizeof(bbe));
+ object.handle = gem_create(i915, 4096);
+ gem_write(i915, object.handle, 0, &bbe, sizeof(bbe));
memset(&execbuf, 0, sizeof(execbuf));
execbuf.buffers_ptr = to_user_pointer(&object);
execbuf.buffer_count = 1;
execbuf.flags = engines[child % num_engines];
execbuf.rsvd1 = ctx[1];
- gem_execbuf(fd, &execbuf);
- gem_sync(fd, object.handle);
-
+ gem_execbuf(i915, &execbuf);
+ gem_sync(i915, object.handle);
+ ctx[0] = gem_context_clone_with_engines(fd, 0);
+ gem_context_set_priority(fd, ctx[0], MIN_PRIO);
+ gem_context_copy_engines(fd, 0, i915, ctx[0]);
start = gettime();
cycles = 0;
do {
igt_spin_t *spin =
- __igt_spin_new(fd,
+ __igt_spin_new(i915,
.ctx = ctx[0],
.engine = execbuf.flags);
do {
- gem_execbuf(fd, &execbuf);
- gem_sync(fd, object.handle);
+ gem_execbuf(i915, &execbuf);
+ gem_sync(i915, object.handle);
} while (++cycles & 1023);
- igt_spin_free(fd, spin);
+ igt_spin_free(i915, spin);
} while ((elapsed = gettime() - start) < timeout);
igt_info("%s%sompleted %ld cycles: %.3f us\n",
names[child % num_engines] ?: "",
names[child % num_engines] ? " c" : "C",
cycles, elapsed*1e6/cycles);
- gem_close(fd, object.handle);
+ gem_context_destroy(i915, ctx[1]);
+ gem_context_destroy(i915, ctx[0]);
+ gem_close(i915, object.handle);
}
igt_waitchildren_timeout(timeout+10, NULL);
igt_assert_eq(intel_detect_and_clear_missed_interrupts(fd), 0);
- gem_context_destroy(fd, ctx[1]);
- gem_context_destroy(fd, ctx[0]);
}
igt_main
{
+ const struct intel_execution_engine2 *e2;
const struct intel_execution_engine *e;
const int ncpus = sysconf(_SC_NPROCESSORS_ONLN);
int fd = -1;
@@ -1222,55 +1258,138 @@ igt_main
igt_fork_hang_detector(fd);
}
+ /* Legacy testing must be first. */
for (e = intel_execution_engines; e->name; e++) {
- igt_subtest_f("%s", e->name)
- sync_ring(fd, eb_ring(e), 1, 20);
- igt_subtest_f("idle-%s", e->name)
- idle_ring(fd, eb_ring(e), 20);
- igt_subtest_f("active-%s", e->name)
- active_ring(fd, eb_ring(e), 20);
- igt_subtest_f("wakeup-%s", e->name)
- wakeup_ring(fd, eb_ring(e), 20, 1);
- igt_subtest_f("active-wakeup-%s", e->name)
- active_wakeup_ring(fd, eb_ring(e), 20, 1);
- igt_subtest_f("double-wakeup-%s", e->name)
- wakeup_ring(fd, eb_ring(e), 20, 2);
- igt_subtest_f("store-%s", e->name)
- store_ring(fd, eb_ring(e), 1, 20);
- igt_subtest_f("switch-%s", e->name)
- switch_ring(fd, eb_ring(e), 1, 20);
- igt_subtest_f("forked-switch-%s", e->name)
- switch_ring(fd, eb_ring(e), ncpus, 20);
- igt_subtest_f("many-%s", e->name)
- store_many(fd, eb_ring(e), 20);
- igt_subtest_f("forked-%s", e->name)
- sync_ring(fd, eb_ring(e), ncpus, 20);
- igt_subtest_f("forked-store-%s", e->name)
- store_ring(fd, eb_ring(e), ncpus, 20);
+ struct intel_execution_engine2 e2__;
+
+ e2__ = gem_eb_flags_to_engine(eb_ring(e));
+ if (e2__.flags == -1)
+ continue;
+ e2 = &e2__;
+
+ igt_subtest_f("legacy_%s", e->name)
+ sync_ring(fd, e2, 1, 20);
+
+ }
+
+ igt_subtest_with_dynamic("basic_sync_ring") {
+ __for_each_physical_engine(fd, e2) {
+ /* Requires master for STORE_DWORD on gen4/5 */
+ igt_dynamic_f("%s", e2->name)
+ sync_ring(fd, e2, 1, 20);
+ }
+ }
+
+ igt_subtest_with_dynamic("idle") {
+ __for_each_physical_engine(fd, e2) {
+ /* Requires master for STORE_DWORD on gen4/5 */
+ igt_dynamic_f("%s", e2->name)
+ idle_ring(fd, e2, 20);
+ }
+ }
+
+ igt_subtest_with_dynamic("active") {
+ __for_each_physical_engine(fd, e2) {
+ /* Requires master for STORE_DWORD on gen4/5 */
+ igt_dynamic_f("%s", e2->name)
+ active_ring(fd, e2, 20);
+ }
+ }
+
+ igt_subtest_with_dynamic("wakeup") {
+ __for_each_physical_engine(fd, e2) {
+ /* Requires master for STORE_DWORD on gen4/5 */
+ igt_dynamic_f("%s", e2->name)
+ wakeup_ring(fd, e2, 150, 1);
+ }
+ }
+
+ igt_subtest_with_dynamic("active-wakeup") {
+ __for_each_physical_engine(fd, e2) {
+ /* Requires master for STORE_DWORD on gen4/5 */
+ igt_dynamic_f("%s", e2->name)
+ active_wakeup_ring(fd, e2, 1, 20);
+ }
+ }
+
+ igt_subtest_with_dynamic("double-wakeup") {
+ __for_each_physical_engine(fd, e2) {
+ /* Requires master for STORE_DWORD on gen4/5 */
+ igt_dynamic_f("%s", e2->name)
+ wakeup_ring(fd, e2, 20, 2);
+ }
+ }
+
+ igt_subtest_with_dynamic("store") {
+ __for_each_physical_engine(fd, e2) {
+ /* Requires master for STORE_DWORD on gen4/5 */
+ igt_dynamic_f("%s", e2->name)
+ store_ring(fd, e2, 1, 20);
+ }
+ }
+
+ igt_subtest_with_dynamic("switch") {
+ __for_each_physical_engine(fd, e2) {
+ /* Requires master for STORE_DWORD on gen4/5 */
+ igt_dynamic_f("%s", e2->name)
+ switch_ring(fd, e2, 1, 20);
+ }
+ }
+
+ igt_subtest_with_dynamic("forked-switch") {
+ __for_each_physical_engine(fd, e2) {
+ /* Requires master for STORE_DWORD on gen4/5 */
+ igt_dynamic_f("%s", e2->name)
+ switch_ring(fd, e2, ncpus, 20);
+ }
+ }
+
+ igt_subtest_with_dynamic("many") {
+ __for_each_physical_engine(fd, e2) {
+ /* Requires master for STORE_DWORD on gen4/5 */
+ igt_dynamic_f("%s", e2->name)
+ store_many(fd, e2, 20);
+ }
+ }
+
+ igt_subtest_with_dynamic("forked") {
+ __for_each_physical_engine(fd, e2) {
+ /* Requires master for STORE_DWORD on gen4/5 */
+ igt_dynamic_f("%s", e2->name)
+ sync_ring(fd, e2, ncpus, 20);
+ }
+ }
+
+ igt_subtest_with_dynamic("forked-store") {
+ __for_each_physical_engine(fd, e2) {
+ /* Requires master for STORE_DWORD on gen4/5 */
+ igt_dynamic_f("%s", e2->name)
+ store_ring(fd, e2, ncpus, 20);
+ }
}
igt_subtest("basic-each")
- sync_ring(fd, ALL_ENGINES, 1, 2);
+ sync_ring(fd, NULL, 1, 2);
igt_subtest("basic-store-each")
- store_ring(fd, ALL_ENGINES, 1, 2);
+ store_ring(fd, NULL, 1, 2);
igt_subtest("basic-many-each")
- store_many(fd, ALL_ENGINES, 2);
+ store_many(fd, NULL, 2);
igt_subtest("switch-each")
- switch_ring(fd, ALL_ENGINES, 1, 20);
+ switch_ring(fd, NULL, 1, 20);
igt_subtest("forked-switch-each")
- switch_ring(fd, ALL_ENGINES, ncpus, 20);
+ switch_ring(fd, NULL, ncpus, 20);
igt_subtest("forked-each")
- sync_ring(fd, ALL_ENGINES, ncpus, 20);
+ sync_ring(fd, NULL, ncpus, 20);
igt_subtest("forked-store-each")
- store_ring(fd, ALL_ENGINES, ncpus, 20);
+ store_ring(fd, NULL, ncpus, 20);
igt_subtest("active-each")
- active_ring(fd, ALL_ENGINES, 20);
+ active_ring(fd, NULL, 20);
igt_subtest("wakeup-each")
- wakeup_ring(fd, ALL_ENGINES, 20, 1);
+ wakeup_ring(fd, NULL, 20, 1);
igt_subtest("active-wakeup-each")
- active_wakeup_ring(fd, ALL_ENGINES, 20, 1);
+ active_wakeup_ring(fd, NULL, 20, 1);
igt_subtest("double-wakeup-each")
- wakeup_ring(fd, ALL_ENGINES, 20, 2);
+ wakeup_ring(fd, NULL, 20, 2);
igt_subtest("basic-all")
sync_all(fd, 1, 2);
@@ -1294,14 +1413,15 @@ igt_main
}
igt_subtest("preempt-all")
- preempt(fd, ALL_ENGINES, 1, 20);
-
- for (e = intel_execution_engines; e->name; e++) {
- igt_subtest_f("preempt-%s", e->name)
- preempt(fd, eb_ring(e), ncpus, 20);
+ preempt(fd, NULL, 1, 20);
+ igt_subtest_with_dynamic("preempt") {
+ __for_each_physical_engine(fd, e2) {
+ /* Requires master for STORE_DWORD on gen4/5 */
+ igt_dynamic_f("%s", e2->name)
+ preempt(fd, e2, ncpus, 20);
+ }
}
}
-
igt_fixture {
igt_stop_hang_detector();
close(fd);
--
2.25.1
_______________________________________________
igt-dev mailing list
igt-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/igt-dev
^ permalink raw reply related [flat|nested] 8+ messages in thread
* [igt-dev] ✗ GitLab.Pipeline: failure for tests/i915/gem_sync.c :Added __for_each_physical_engine to utilize all available (rev3)
2020-04-06 9:25 [igt-dev] [PATCH] [PATCH i-g-t][V2]tests/i915/gem_sync.c :Added __for_each_physical_engine to utilize all available Arjun Melkaveri
@ 2020-04-06 11:25 ` Patchwork
2020-04-06 11:33 ` [igt-dev] ✓ Fi.CI.BAT: success " Patchwork
` (2 subsequent siblings)
3 siblings, 0 replies; 8+ messages in thread
From: Patchwork @ 2020-04-06 11:25 UTC (permalink / raw)
To: Arjun Melkaveri; +Cc: igt-dev
== Series Details ==
Series: tests/i915/gem_sync.c :Added __for_each_physical_engine to utilize all available (rev3)
URL : https://patchwork.freedesktop.org/series/75536/
State : failure
== Summary ==
ERROR! This series introduces new undocumented tests:
gem_sync@active
gem_sync@active-wakeup
gem_sync@basic_sync_ring
gem_sync@double-wakeup
gem_sync@forked
gem_sync@forked-store
gem_sync@forked-switch
gem_sync@idle
gem_sync@legacy_blt
gem_sync@legacy_bsd1
gem_sync@legacy_bsd2
gem_sync@legacy_default
gem_sync@legacy_render
gem_sync@legacy_vebox
gem_sync@many
gem_sync@preempt
gem_sync@store
gem_sync@switch
gem_sync@wakeup
Can you document them as per the requirement in the [CONTRIBUTING.md]?
[Documentation] has more details on how to do this.
Here are few examples:
https://gitlab.freedesktop.org/drm/igt-gpu-tools/commit/0316695d03aa46108296b27f3982ec93200c7a6e
https://gitlab.freedesktop.org/drm/igt-gpu-tools/commit/443cc658e1e6b492ee17bf4f4d891029eb7a205d
Thanks in advance!
[CONTRIBUTING.md]: https://gitlab.freedesktop.org/drm/igt-gpu-tools/blob/master/CONTRIBUTING.md#L19
[Documentation]: https://drm.pages.freedesktop.org/igt-gpu-tools/igt-gpu-tools-Core.html#igt-describe
Other than that, pipeline status: SUCCESS.
see https://gitlab.freedesktop.org/gfx-ci/igt-ci-tags/pipelines/128871 for the overview.
== Logs ==
For more details see: https://gitlab.freedesktop.org/gfx-ci/igt-ci-tags/pipelines/128871
_______________________________________________
igt-dev mailing list
igt-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/igt-dev
^ permalink raw reply [flat|nested] 8+ messages in thread
* [igt-dev] ✓ Fi.CI.BAT: success for tests/i915/gem_sync.c :Added __for_each_physical_engine to utilize all available (rev3)
2020-04-06 9:25 [igt-dev] [PATCH] [PATCH i-g-t][V2]tests/i915/gem_sync.c :Added __for_each_physical_engine to utilize all available Arjun Melkaveri
2020-04-06 11:25 ` [igt-dev] ✗ GitLab.Pipeline: failure for tests/i915/gem_sync.c :Added __for_each_physical_engine to utilize all available (rev3) Patchwork
@ 2020-04-06 11:33 ` Patchwork
2020-04-06 15:27 ` [igt-dev] ✗ Fi.CI.IGT: failure " Patchwork
2020-04-06 15:29 ` [igt-dev] [PATCH] [PATCH i-g-t][V2]tests/i915/gem_sync.c :Added __for_each_physical_engine to utilize all available Chris Wilson
3 siblings, 0 replies; 8+ messages in thread
From: Patchwork @ 2020-04-06 11:33 UTC (permalink / raw)
To: Arjun Melkaveri; +Cc: igt-dev
== Series Details ==
Series: tests/i915/gem_sync.c :Added __for_each_physical_engine to utilize all available (rev3)
URL : https://patchwork.freedesktop.org/series/75536/
State : success
== Summary ==
CI Bug Log - changes from IGT_5572 -> IGTPW_4409
====================================================
Summary
-------
**SUCCESS**
No regressions found.
External URL: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4409/index.html
Known issues
------------
Here are the changes found in IGTPW_4409 that come from known issues:
### IGT changes ###
#### Issues hit ####
* igt@i915_pm_rpm@basic-pci-d3-state:
- fi-icl-dsi: [PASS][1] -> [INCOMPLETE][2] ([i915#189])
[1]: https://intel-gfx-ci.01.org/tree/drm-tip/IGT_5572/fi-icl-dsi/igt@i915_pm_rpm@basic-pci-d3-state.html
[2]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4409/fi-icl-dsi/igt@i915_pm_rpm@basic-pci-d3-state.html
[i915#189]: https://gitlab.freedesktop.org/drm/intel/issues/189
Participating hosts (54 -> 47)
------------------------------
Missing (7): fi-ilk-m540 fi-hsw-4200u fi-byt-squawks fi-bsw-cyan fi-ctg-p8600 fi-byt-clapper fi-bdw-samus
Build changes
-------------
* CI: CI-20190529 -> None
* IGT: IGT_5572 -> IGTPW_4409
CI-20190529: 20190529
CI_DRM_8259: 450fc86b62651336f9b5fde79c068df7b4c95aa4 @ git://anongit.freedesktop.org/gfx-ci/linux
IGTPW_4409: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4409/index.html
IGT_5572: 6c124b5c8501d900966c033ac86c3dc55c16a2da @ git://anongit.freedesktop.org/xorg/app/intel-gpu-tools
== Testlist changes ==
+++ 19 lines
--- 91 lines
== Logs ==
For more details see: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4409/index.html
_______________________________________________
igt-dev mailing list
igt-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/igt-dev
^ permalink raw reply [flat|nested] 8+ messages in thread
* [igt-dev] ✗ Fi.CI.IGT: failure for tests/i915/gem_sync.c :Added __for_each_physical_engine to utilize all available (rev3)
2020-04-06 9:25 [igt-dev] [PATCH] [PATCH i-g-t][V2]tests/i915/gem_sync.c :Added __for_each_physical_engine to utilize all available Arjun Melkaveri
2020-04-06 11:25 ` [igt-dev] ✗ GitLab.Pipeline: failure for tests/i915/gem_sync.c :Added __for_each_physical_engine to utilize all available (rev3) Patchwork
2020-04-06 11:33 ` [igt-dev] ✓ Fi.CI.BAT: success " Patchwork
@ 2020-04-06 15:27 ` Patchwork
2020-04-06 15:29 ` [igt-dev] [PATCH] [PATCH i-g-t][V2]tests/i915/gem_sync.c :Added __for_each_physical_engine to utilize all available Chris Wilson
3 siblings, 0 replies; 8+ messages in thread
From: Patchwork @ 2020-04-06 15:27 UTC (permalink / raw)
To: Arjun Melkaveri; +Cc: igt-dev
== Series Details ==
Series: tests/i915/gem_sync.c :Added __for_each_physical_engine to utilize all available (rev3)
URL : https://patchwork.freedesktop.org/series/75536/
State : failure
== Summary ==
CI Bug Log - changes from IGT_5572_full -> IGTPW_4409_full
====================================================
Summary
-------
**FAILURE**
Serious unknown changes coming with IGTPW_4409_full absolutely need to be
verified manually.
If you think the reported changes have nothing to do with the changes
introduced in IGTPW_4409_full, please notify your bug team to allow them
to document this new failure mode, which will reduce false positives in CI.
External URL: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4409/index.html
Possible new issues
-------------------
Here are the unknown changes that may have been introduced in IGTPW_4409_full:
### IGT changes ###
#### Possible regressions ####
* igt@gem_workarounds@suspend-resume:
- shard-apl: [PASS][1] -> [INCOMPLETE][2]
[1]: https://intel-gfx-ci.01.org/tree/drm-tip/IGT_5572/shard-apl7/igt@gem_workarounds@suspend-resume.html
[2]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4409/shard-apl6/igt@gem_workarounds@suspend-resume.html
New tests
---------
New tests have been introduced between IGT_5572_full and IGTPW_4409_full:
### New IGT tests (6) ###
* igt@gem_sync@basic_sync_ring:
- Statuses :
- Exec time: [None] s
* igt@gem_sync@basic_sync_ring@bcs0:
- Statuses : 5 pass(s)
- Exec time: [20.03, 20.14] s
* igt@gem_sync@basic_sync_ring@rcs0:
- Statuses : 5 pass(s)
- Exec time: [20.04, 20.24] s
* igt@gem_sync@basic_sync_ring@vcs0:
- Statuses : 5 pass(s)
- Exec time: [20.03, 20.13] s
* igt@gem_sync@basic_sync_ring@vcs1:
- Statuses : 1 pass(s)
- Exec time: [20.04] s
* igt@gem_sync@basic_sync_ring@vecs0:
- Statuses : 4 pass(s)
- Exec time: [20.02, 20.08] s
Known issues
------------
Here are the changes found in IGTPW_4409_full that come from known issues:
### IGT changes ###
#### Issues hit ####
* igt@i915_pm_dc@dc6-psr:
- shard-iclb: [PASS][3] -> [FAIL][4] ([i915#454])
[3]: https://intel-gfx-ci.01.org/tree/drm-tip/IGT_5572/shard-iclb2/igt@i915_pm_dc@dc6-psr.html
[4]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4409/shard-iclb2/igt@i915_pm_dc@dc6-psr.html
* igt@i915_pm_rpm@drm-resources-equal:
- shard-hsw: [PASS][5] -> [SKIP][6] ([fdo#109271]) +1 similar issue
[5]: https://intel-gfx-ci.01.org/tree/drm-tip/IGT_5572/shard-hsw5/igt@i915_pm_rpm@drm-resources-equal.html
[6]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4409/shard-hsw4/igt@i915_pm_rpm@drm-resources-equal.html
- shard-iclb: [PASS][7] -> [SKIP][8] ([i915#1316] / [i915#579])
[7]: https://intel-gfx-ci.01.org/tree/drm-tip/IGT_5572/shard-iclb5/igt@i915_pm_rpm@drm-resources-equal.html
[8]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4409/shard-iclb7/igt@i915_pm_rpm@drm-resources-equal.html
- shard-tglb: [PASS][9] -> [SKIP][10] ([i915#1316] / [i915#579])
[9]: https://intel-gfx-ci.01.org/tree/drm-tip/IGT_5572/shard-tglb7/igt@i915_pm_rpm@drm-resources-equal.html
[10]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4409/shard-tglb6/igt@i915_pm_rpm@drm-resources-equal.html
* igt@kms_cursor_crc@pipe-a-cursor-128x42-random:
- shard-kbl: [PASS][11] -> [FAIL][12] ([i915#54] / [i915#93] / [i915#95])
[11]: https://intel-gfx-ci.01.org/tree/drm-tip/IGT_5572/shard-kbl1/igt@kms_cursor_crc@pipe-a-cursor-128x42-random.html
[12]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4409/shard-kbl2/igt@kms_cursor_crc@pipe-a-cursor-128x42-random.html
* igt@kms_cursor_edge_walk@pipe-c-128x128-right-edge:
- shard-kbl: [PASS][13] -> [FAIL][14] ([i915#70])
[13]: https://intel-gfx-ci.01.org/tree/drm-tip/IGT_5572/shard-kbl4/igt@kms_cursor_edge_walk@pipe-c-128x128-right-edge.html
[14]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4409/shard-kbl2/igt@kms_cursor_edge_walk@pipe-c-128x128-right-edge.html
- shard-apl: [PASS][15] -> [FAIL][16] ([i915#70])
[15]: https://intel-gfx-ci.01.org/tree/drm-tip/IGT_5572/shard-apl2/igt@kms_cursor_edge_walk@pipe-c-128x128-right-edge.html
[16]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4409/shard-apl7/igt@kms_cursor_edge_walk@pipe-c-128x128-right-edge.html
* igt@kms_draw_crc@draw-method-rgb565-render-untiled:
- shard-glk: [PASS][17] -> [FAIL][18] ([i915#52] / [i915#54]) +4 similar issues
[17]: https://intel-gfx-ci.01.org/tree/drm-tip/IGT_5572/shard-glk5/igt@kms_draw_crc@draw-method-rgb565-render-untiled.html
[18]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4409/shard-glk1/igt@kms_draw_crc@draw-method-rgb565-render-untiled.html
* igt@kms_draw_crc@draw-method-xrgb8888-mmap-cpu-untiled:
- shard-kbl: [PASS][19] -> [FAIL][20] ([i915#177] / [i915#52] / [i915#54] / [i915#93] / [i915#95])
[19]: https://intel-gfx-ci.01.org/tree/drm-tip/IGT_5572/shard-kbl2/igt@kms_draw_crc@draw-method-xrgb8888-mmap-cpu-untiled.html
[20]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4409/shard-kbl3/igt@kms_draw_crc@draw-method-xrgb8888-mmap-cpu-untiled.html
- shard-apl: [PASS][21] -> [FAIL][22] ([i915#52] / [i915#54] / [i915#95])
[21]: https://intel-gfx-ci.01.org/tree/drm-tip/IGT_5572/shard-apl4/igt@kms_draw_crc@draw-method-xrgb8888-mmap-cpu-untiled.html
[22]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4409/shard-apl1/igt@kms_draw_crc@draw-method-xrgb8888-mmap-cpu-untiled.html
* igt@kms_hdmi_inject@inject-audio:
- shard-tglb: [PASS][23] -> [SKIP][24] ([i915#433])
[23]: https://intel-gfx-ci.01.org/tree/drm-tip/IGT_5572/shard-tglb5/igt@kms_hdmi_inject@inject-audio.html
[24]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4409/shard-tglb6/igt@kms_hdmi_inject@inject-audio.html
* igt@kms_pipe_crc_basic@suspend-read-crc-pipe-a:
- shard-apl: [PASS][25] -> [DMESG-WARN][26] ([i915#180])
[25]: https://intel-gfx-ci.01.org/tree/drm-tip/IGT_5572/shard-apl1/igt@kms_pipe_crc_basic@suspend-read-crc-pipe-a.html
[26]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4409/shard-apl4/igt@kms_pipe_crc_basic@suspend-read-crc-pipe-a.html
* igt@kms_psr@psr2_sprite_plane_move:
- shard-iclb: [PASS][27] -> [SKIP][28] ([fdo#109441]) +3 similar issues
[27]: https://intel-gfx-ci.01.org/tree/drm-tip/IGT_5572/shard-iclb2/igt@kms_psr@psr2_sprite_plane_move.html
[28]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4409/shard-iclb8/igt@kms_psr@psr2_sprite_plane_move.html
* igt@kms_vblank@pipe-a-ts-continuation-suspend:
- shard-kbl: [PASS][29] -> [DMESG-WARN][30] ([i915#180]) +4 similar issues
[29]: https://intel-gfx-ci.01.org/tree/drm-tip/IGT_5572/shard-kbl2/igt@kms_vblank@pipe-a-ts-continuation-suspend.html
[30]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4409/shard-kbl1/igt@kms_vblank@pipe-a-ts-continuation-suspend.html
* igt@kms_vblank@pipe-c-ts-continuation-dpms-rpm:
- shard-glk: [PASS][31] -> [SKIP][32] ([fdo#109271]) +1 similar issue
[31]: https://intel-gfx-ci.01.org/tree/drm-tip/IGT_5572/shard-glk6/igt@kms_vblank@pipe-c-ts-continuation-dpms-rpm.html
[32]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4409/shard-glk4/igt@kms_vblank@pipe-c-ts-continuation-dpms-rpm.html
- shard-tglb: [PASS][33] -> [SKIP][34] ([fdo#112015])
[33]: https://intel-gfx-ci.01.org/tree/drm-tip/IGT_5572/shard-tglb5/igt@kms_vblank@pipe-c-ts-continuation-dpms-rpm.html
[34]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4409/shard-tglb6/igt@kms_vblank@pipe-c-ts-continuation-dpms-rpm.html
- shard-iclb: [PASS][35] -> [SKIP][36] ([fdo#109278])
[35]: https://intel-gfx-ci.01.org/tree/drm-tip/IGT_5572/shard-iclb1/igt@kms_vblank@pipe-c-ts-continuation-dpms-rpm.html
[36]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4409/shard-iclb7/igt@kms_vblank@pipe-c-ts-continuation-dpms-rpm.html
#### Possible fixes ####
* {igt@gem_ctx_isolation@preservation-s3@vcs0}:
- shard-kbl: [DMESG-WARN][37] ([i915#180]) -> [PASS][38] +4 similar issues
[37]: https://intel-gfx-ci.01.org/tree/drm-tip/IGT_5572/shard-kbl7/igt@gem_ctx_isolation@preservation-s3@vcs0.html
[38]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4409/shard-kbl4/igt@gem_ctx_isolation@preservation-s3@vcs0.html
* igt@gem_exec_params@invalid-bsd-ring:
- shard-iclb: [SKIP][39] ([fdo#109276]) -> [PASS][40]
[39]: https://intel-gfx-ci.01.org/tree/drm-tip/IGT_5572/shard-iclb8/igt@gem_exec_params@invalid-bsd-ring.html
[40]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4409/shard-iclb2/igt@gem_exec_params@invalid-bsd-ring.html
* igt@i915_pm_rpm@cursor:
- shard-tglb: [SKIP][41] ([i915#1316] / [i915#579]) -> [PASS][42] +1 similar issue
[41]: https://intel-gfx-ci.01.org/tree/drm-tip/IGT_5572/shard-tglb1/igt@i915_pm_rpm@cursor.html
[42]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4409/shard-tglb7/igt@i915_pm_rpm@cursor.html
- shard-hsw: [SKIP][43] ([fdo#109271]) -> [PASS][44]
[43]: https://intel-gfx-ci.01.org/tree/drm-tip/IGT_5572/shard-hsw7/igt@i915_pm_rpm@cursor.html
[44]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4409/shard-hsw1/igt@i915_pm_rpm@cursor.html
- shard-glk: [SKIP][45] ([fdo#109271]) -> [PASS][46]
[45]: https://intel-gfx-ci.01.org/tree/drm-tip/IGT_5572/shard-glk9/igt@i915_pm_rpm@cursor.html
[46]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4409/shard-glk1/igt@i915_pm_rpm@cursor.html
* igt@i915_pm_rpm@modeset-lpsp-stress-no-wait:
- shard-iclb: [SKIP][47] ([i915#1316] / [i915#579]) -> [PASS][48] +1 similar issue
[47]: https://intel-gfx-ci.01.org/tree/drm-tip/IGT_5572/shard-iclb6/igt@i915_pm_rpm@modeset-lpsp-stress-no-wait.html
[48]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4409/shard-iclb7/igt@i915_pm_rpm@modeset-lpsp-stress-no-wait.html
* igt@kms_busy@basic@flip:
- shard-hsw: [INCOMPLETE][49] ([i915#61]) -> [PASS][50] +1 similar issue
[49]: https://intel-gfx-ci.01.org/tree/drm-tip/IGT_5572/shard-hsw1/igt@kms_busy@basic@flip.html
[50]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4409/shard-hsw8/igt@kms_busy@basic@flip.html
* igt@kms_cursor_crc@pipe-a-cursor-128x42-sliding:
- shard-apl: [FAIL][51] ([i915#54] / [i915#95]) -> [PASS][52]
[51]: https://intel-gfx-ci.01.org/tree/drm-tip/IGT_5572/shard-apl8/igt@kms_cursor_crc@pipe-a-cursor-128x42-sliding.html
[52]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4409/shard-apl2/igt@kms_cursor_crc@pipe-a-cursor-128x42-sliding.html
* igt@kms_cursor_crc@pipe-a-cursor-64x21-offscreen:
- shard-kbl: [FAIL][53] ([i915#54] / [i915#93] / [i915#95]) -> [PASS][54] +1 similar issue
[53]: https://intel-gfx-ci.01.org/tree/drm-tip/IGT_5572/shard-kbl3/igt@kms_cursor_crc@pipe-a-cursor-64x21-offscreen.html
[54]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4409/shard-kbl6/igt@kms_cursor_crc@pipe-a-cursor-64x21-offscreen.html
* igt@kms_cursor_crc@pipe-c-cursor-128x128-sliding:
- shard-kbl: [FAIL][55] ([i915#54]) -> [PASS][56]
[55]: https://intel-gfx-ci.01.org/tree/drm-tip/IGT_5572/shard-kbl1/igt@kms_cursor_crc@pipe-c-cursor-128x128-sliding.html
[56]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4409/shard-kbl7/igt@kms_cursor_crc@pipe-c-cursor-128x128-sliding.html
* igt@kms_cursor_legacy@flip-vs-cursor-crc-legacy:
- shard-kbl: [FAIL][57] ([i915#1566] / [i915#93] / [i915#95]) -> [PASS][58]
[57]: https://intel-gfx-ci.01.org/tree/drm-tip/IGT_5572/shard-kbl7/igt@kms_cursor_legacy@flip-vs-cursor-crc-legacy.html
[58]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4409/shard-kbl3/igt@kms_cursor_legacy@flip-vs-cursor-crc-legacy.html
* igt@kms_draw_crc@draw-method-rgb565-blt-ytiled:
- shard-glk: [FAIL][59] ([i915#52] / [i915#54]) -> [PASS][60] +4 similar issues
[59]: https://intel-gfx-ci.01.org/tree/drm-tip/IGT_5572/shard-glk2/igt@kms_draw_crc@draw-method-rgb565-blt-ytiled.html
[60]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4409/shard-glk7/igt@kms_draw_crc@draw-method-rgb565-blt-ytiled.html
* igt@kms_draw_crc@draw-method-rgb565-pwrite-untiled:
- shard-glk: [FAIL][61] ([i915#177] / [i915#52] / [i915#54]) -> [PASS][62]
[61]: https://intel-gfx-ci.01.org/tree/drm-tip/IGT_5572/shard-glk4/igt@kms_draw_crc@draw-method-rgb565-pwrite-untiled.html
[62]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4409/shard-glk4/igt@kms_draw_crc@draw-method-rgb565-pwrite-untiled.html
* igt@kms_draw_crc@draw-method-xrgb8888-mmap-gtt-untiled:
- shard-apl: [FAIL][63] ([i915#52] / [i915#54] / [i915#95]) -> [PASS][64]
[63]: https://intel-gfx-ci.01.org/tree/drm-tip/IGT_5572/shard-apl3/igt@kms_draw_crc@draw-method-xrgb8888-mmap-gtt-untiled.html
[64]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4409/shard-apl3/igt@kms_draw_crc@draw-method-xrgb8888-mmap-gtt-untiled.html
- shard-kbl: [FAIL][65] ([i915#177] / [i915#52] / [i915#54] / [i915#93] / [i915#95]) -> [PASS][66]
[65]: https://intel-gfx-ci.01.org/tree/drm-tip/IGT_5572/shard-kbl1/igt@kms_draw_crc@draw-method-xrgb8888-mmap-gtt-untiled.html
[66]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4409/shard-kbl1/igt@kms_draw_crc@draw-method-xrgb8888-mmap-gtt-untiled.html
* igt@kms_fbcon_fbt@fbc-suspend:
- shard-kbl: [DMESG-WARN][67] ([i915#180] / [i915#93] / [i915#95]) -> [PASS][68]
[67]: https://intel-gfx-ci.01.org/tree/drm-tip/IGT_5572/shard-kbl7/igt@kms_fbcon_fbt@fbc-suspend.html
[68]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4409/shard-kbl3/igt@kms_fbcon_fbt@fbc-suspend.html
* igt@kms_flip@flip-vs-expired-vblank-interruptible:
- shard-glk: [FAIL][69] ([i915#79]) -> [PASS][70]
[69]: https://intel-gfx-ci.01.org/tree/drm-tip/IGT_5572/shard-glk2/igt@kms_flip@flip-vs-expired-vblank-interruptible.html
[70]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4409/shard-glk8/igt@kms_flip@flip-vs-expired-vblank-interruptible.html
* igt@kms_flip@flip-vs-suspend-interruptible:
- shard-apl: [DMESG-WARN][71] ([i915#180]) -> [PASS][72] +1 similar issue
[71]: https://intel-gfx-ci.01.org/tree/drm-tip/IGT_5572/shard-apl6/igt@kms_flip@flip-vs-suspend-interruptible.html
[72]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4409/shard-apl2/igt@kms_flip@flip-vs-suspend-interruptible.html
- shard-snb: [INCOMPLETE][73] ([i915#82]) -> [PASS][74]
[73]: https://intel-gfx-ci.01.org/tree/drm-tip/IGT_5572/shard-snb1/igt@kms_flip@flip-vs-suspend-interruptible.html
[74]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4409/shard-snb7/igt@kms_flip@flip-vs-suspend-interruptible.html
* igt@kms_frontbuffer_tracking@fbc-1p-primscrn-spr-indfb-draw-render:
- shard-kbl: [FAIL][75] ([i915#49]) -> [PASS][76]
[75]: https://intel-gfx-ci.01.org/tree/drm-tip/IGT_5572/shard-kbl7/igt@kms_frontbuffer_tracking@fbc-1p-primscrn-spr-indfb-draw-render.html
[76]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4409/shard-kbl4/igt@kms_frontbuffer_tracking@fbc-1p-primscrn-spr-indfb-draw-render.html
- shard-apl: [FAIL][77] ([i915#49]) -> [PASS][78]
[77]: https://intel-gfx-ci.01.org/tree/drm-tip/IGT_5572/shard-apl1/igt@kms_frontbuffer_tracking@fbc-1p-primscrn-spr-indfb-draw-render.html
[78]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4409/shard-apl2/igt@kms_frontbuffer_tracking@fbc-1p-primscrn-spr-indfb-draw-render.html
- shard-glk: [FAIL][79] ([i915#49]) -> [PASS][80]
[79]: https://intel-gfx-ci.01.org/tree/drm-tip/IGT_5572/shard-glk3/igt@kms_frontbuffer_tracking@fbc-1p-primscrn-spr-indfb-draw-render.html
[80]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4409/shard-glk8/igt@kms_frontbuffer_tracking@fbc-1p-primscrn-spr-indfb-draw-render.html
* igt@kms_plane_lowres@pipe-a-tiling-x:
- shard-glk: [FAIL][81] ([i915#899]) -> [PASS][82]
[81]: https://intel-gfx-ci.01.org/tree/drm-tip/IGT_5572/shard-glk4/igt@kms_plane_lowres@pipe-a-tiling-x.html
[82]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4409/shard-glk5/igt@kms_plane_lowres@pipe-a-tiling-x.html
* igt@kms_psr@psr2_cursor_plane_move:
- shard-iclb: [SKIP][83] ([fdo#109441]) -> [PASS][84] +1 similar issue
[83]: https://intel-gfx-ci.01.org/tree/drm-tip/IGT_5572/shard-iclb1/igt@kms_psr@psr2_cursor_plane_move.html
[84]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4409/shard-iclb2/igt@kms_psr@psr2_cursor_plane_move.html
* {igt@perf@blocking-parameterized}:
- shard-kbl: [FAIL][85] ([i915#1542]) -> [PASS][86]
[85]: https://intel-gfx-ci.01.org/tree/drm-tip/IGT_5572/shard-kbl4/igt@perf@blocking-parameterized.html
[86]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4409/shard-kbl3/igt@perf@blocking-parameterized.html
- shard-hsw: [FAIL][87] ([i915#1542]) -> [PASS][88]
[87]: https://intel-gfx-ci.01.org/tree/drm-tip/IGT_5572/shard-hsw6/igt@perf@blocking-parameterized.html
[88]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4409/shard-hsw7/igt@perf@blocking-parameterized.html
* igt@perf@gen12-mi-rpc:
- shard-tglb: [FAIL][89] ([i915#1085]) -> [PASS][90]
[89]: https://intel-gfx-ci.01.org/tree/drm-tip/IGT_5572/shard-tglb1/igt@perf@gen12-mi-rpc.html
[90]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4409/shard-tglb7/igt@perf@gen12-mi-rpc.html
#### Warnings ####
* igt@i915_pm_dc@dc6-dpms:
- shard-tglb: [SKIP][91] ([i915#468]) -> [FAIL][92] ([i915#454])
[91]: https://intel-gfx-ci.01.org/tree/drm-tip/IGT_5572/shard-tglb2/igt@i915_pm_dc@dc6-dpms.html
[92]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4409/shard-tglb5/igt@i915_pm_dc@dc6-dpms.html
* igt@kms_content_protection@uevent:
- shard-kbl: [FAIL][93] ([i915#357] / [i915#93] / [i915#95]) -> [FAIL][94] ([i915#357])
[93]: https://intel-gfx-ci.01.org/tree/drm-tip/IGT_5572/shard-kbl6/igt@kms_content_protection@uevent.html
[94]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4409/shard-kbl7/igt@kms_content_protection@uevent.html
- shard-apl: [FAIL][95] ([i915#357] / [i915#95]) -> [FAIL][96] ([i915#357])
[95]: https://intel-gfx-ci.01.org/tree/drm-tip/IGT_5572/shard-apl8/igt@kms_content_protection@uevent.html
[96]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4409/shard-apl7/igt@kms_content_protection@uevent.html
* igt@kms_plane_alpha_blend@pipe-b-constant-alpha-max:
- shard-apl: [FAIL][97] ([fdo#108145] / [i915#265]) -> [FAIL][98] ([fdo#108145] / [i915#265] / [i915#95])
[97]: https://intel-gfx-ci.01.org/tree/drm-tip/IGT_5572/shard-apl3/igt@kms_plane_alpha_blend@pipe-b-constant-alpha-max.html
[98]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4409/shard-apl3/igt@kms_plane_alpha_blend@pipe-b-constant-alpha-max.html
- shard-kbl: [FAIL][99] ([fdo#108145] / [i915#265]) -> [FAIL][100] ([fdo#108145] / [i915#265] / [i915#93] / [i915#95])
[99]: https://intel-gfx-ci.01.org/tree/drm-tip/IGT_5572/shard-kbl1/igt@kms_plane_alpha_blend@pipe-b-constant-alpha-max.html
[100]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4409/shard-kbl6/igt@kms_plane_alpha_blend@pipe-b-constant-alpha-max.html
{name}: This element is suppressed. This means it is ignored when computing
the status of the difference (SUCCESS, WARNING, or FAILURE).
[fdo#108145]: https://bugs.freedesktop.org/show_bug.cgi?id=108145
[fdo#109271]: https://bugs.freedesktop.org/show_bug.cgi?id=109271
[fdo#109276]: https://bugs.freedesktop.org/show_bug.cgi?id=109276
[fdo#109278]: https://bugs.freedesktop.org/show_bug.cgi?id=109278
[fdo#109441]: https://bugs.freedesktop.org/show_bug.cgi?id=109441
[fdo#112015]: https://bugs.freedesktop.org/show_bug.cgi?id=112015
[i915#1085]: https://gitlab.freedesktop.org/drm/intel/issues/1085
[i915#1316]: https://gitlab.freedesktop.org/drm/intel/issues/1316
[i915#1459]: https://gitlab.freedesktop.org/drm/intel/issues/1459
[i915#1542]: https://gitlab.freedesktop.org/drm/intel/issues/1542
[i915#1566]: https://gitlab.freedesktop.org/drm/intel/issues/1566
[i915#177]: https://gitlab.freedesktop.org/drm/intel/issues/177
[i915#180]: https://gitlab.freedesktop.org/drm/intel/issues/180
[i915#265]: https://gitlab.freedesktop.org/drm/intel/issues/265
[i915#357]: https://gitlab.freedesktop.org/drm/intel/issues/357
[i915#433]: https://gitlab.freedesktop.org/drm/intel/issues/433
[i915#454]: https://gitlab.freedesktop.org/drm/intel/issues/454
[i915#468]: https://gitlab.freedesktop.org/drm/intel/issues/468
[i915#49]: https://gitlab.freedesktop.org/drm/intel/issues/49
[i915#52]: https://gitlab.freedesktop.org/drm/intel/issues/52
[i915#54]: https://gitlab.freedesktop.org/drm/intel/issues/54
[i915#579]: https://gitlab.freedesktop.org/drm/intel/issues/579
[i915#61]: https://gitlab.freedesktop.org/drm/intel/issues/61
[i915#70]: https://gitlab.freedesktop.org/drm/intel/issues/70
[i915#79]: https://gitlab.freedesktop.org/drm/intel/issues/79
[i915#82]: https://gitlab.freedesktop.org/drm/intel/issues/82
[i915#899]: https://gitlab.freedesktop.org/drm/intel/issues/899
[i915#93]: https://gitlab.freedesktop.org/drm/intel/issues/93
[i915#95]: https://gitlab.freedesktop.org/drm/intel/issues/95
Participating hosts (8 -> 8)
------------------------------
No changes in participating hosts
Build changes
-------------
* CI: CI-20190529 -> None
* IGT: IGT_5572 -> IGTPW_4409
CI-20190529: 20190529
CI_DRM_8259: 450fc86b62651336f9b5fde79c068df7b4c95aa4 @ git://anongit.freedesktop.org/gfx-ci/linux
IGTPW_4409: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4409/index.html
IGT_5572: 6c124b5c8501d900966c033ac86c3dc55c16a2da @ git://anongit.freedesktop.org/xorg/app/intel-gpu-tools
== Logs ==
For more details see: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4409/index.html
_______________________________________________
igt-dev mailing list
igt-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/igt-dev
^ permalink raw reply [flat|nested] 8+ messages in thread
* Re: [igt-dev] [PATCH] [PATCH i-g-t][V2]tests/i915/gem_sync.c :Added __for_each_physical_engine to utilize all available
2020-04-06 9:25 [igt-dev] [PATCH] [PATCH i-g-t][V2]tests/i915/gem_sync.c :Added __for_each_physical_engine to utilize all available Arjun Melkaveri
` (2 preceding siblings ...)
2020-04-06 15:27 ` [igt-dev] ✗ Fi.CI.IGT: failure " Patchwork
@ 2020-04-06 15:29 ` Chris Wilson
2020-04-06 15:44 ` Melkaveri, Arjun
3 siblings, 1 reply; 8+ messages in thread
From: Chris Wilson @ 2020-04-06 15:29 UTC (permalink / raw)
To: arjun.melkaveri, igt-dev
Quoting Arjun Melkaveri (2020-04-06 10:25:49)
> Replaced the legacy for_each_engine* defines with the ones
> implemented in the gem_engine_topology library.
>
> Used gem_context_clone_with_engines
> to make sure that engine index was potentially created
> based on a default context with engine map configured.
>
> Added gem_reopen_driver and gem_context_copy_engines
> to transfer the engine map from parent fd default
> context.
>
> V2:
> Added Legacy engine coverage for sync_ring and sync_all.
>
> Cc: Dec Katarzyna <katarzyna.dec@intel.com>
> Cc: Ursulin Tvrtko <tvrtko.ursulin@intel.com>
> Signed-off-by: sai gowtham <sai.gowtham.ch@intel.com>
> Signed-off-by: Arjun Melkaveri <arjun.melkaveri@intel.com>
> ---
> tests/i915/gem_sync.c | 566 +++++++++++++++++++++++++-----------------
> 1 file changed, 343 insertions(+), 223 deletions(-)
>
> diff --git a/tests/i915/gem_sync.c b/tests/i915/gem_sync.c
> index 2ef55ecc..8efa0668 100644
> --- a/tests/i915/gem_sync.c
> +++ b/tests/i915/gem_sync.c
> @@ -79,52 +79,56 @@ out:
> }
>
> static void
> -sync_ring(int fd, unsigned ring, int num_children, int timeout)
> +sync_ring(int fd, const struct intel_execution_engine2 *e,
> + int num_children, int timeout)
> {
ALL_ENGINES is compatible with the engine index interface. That will
greatly reduce the churn and eyesore.
> + const struct intel_execution_engine2 *e2;
> unsigned engines[16];
> const char *names[16];
> int num_engines = 0;
>
> - if (ring == ALL_ENGINES) {
> - for_each_physical_engine(e, fd) {
> - names[num_engines] = e->name;
> - engines[num_engines++] = eb_ring(e);
> + if (!e) {
> + __for_each_physical_engine(fd, e2) {
> + names[num_engines] = e2->name;
> + engines[num_engines++] = e2->flags;
> if (num_engines == ARRAY_SIZE(engines))
> break;
> }
>
> num_children *= num_engines;
> } else {
> - gem_require_ring(fd, ring);
> names[num_engines] = NULL;
> - engines[num_engines++] = ring;
> + engines[num_engines++] = e->flags;
> }
>
> intel_detect_and_clear_missed_interrupts(fd);
> igt_fork(child, num_children) {
> + int i915;
> const uint32_t bbe = MI_BATCH_BUFFER_END;
> struct drm_i915_gem_exec_object2 object;
> struct drm_i915_gem_execbuffer2 execbuf;
> double start, elapsed;
> unsigned long cycles;
>
> + i915 = gem_reopen_driver(fd);
> + gem_context_copy_engines(fd, 0, i915, 0);
Please do randomly, completely and utterly change the test to do
something else than was originally intended.
-Chris
_______________________________________________
igt-dev mailing list
igt-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/igt-dev
^ permalink raw reply [flat|nested] 8+ messages in thread
* Re: [igt-dev] [PATCH] [PATCH i-g-t][V2]tests/i915/gem_sync.c :Added __for_each_physical_engine to utilize all available
2020-04-06 15:29 ` [igt-dev] [PATCH] [PATCH i-g-t][V2]tests/i915/gem_sync.c :Added __for_each_physical_engine to utilize all available Chris Wilson
@ 2020-04-06 15:44 ` Melkaveri, Arjun
0 siblings, 0 replies; 8+ messages in thread
From: Melkaveri, Arjun @ 2020-04-06 15:44 UTC (permalink / raw)
To: Chris Wilson, igt-dev@lists.freedesktop.org
-----Original Message-----
From: Chris Wilson <chris@chris-wilson.co.uk>
Sent: Monday, April 6, 2020 9:00 PM
To: Melkaveri, Arjun <arjun.melkaveri@intel.com>; igt-dev@lists.freedesktop.org
Subject: Re: [igt-dev] [PATCH] [PATCH i-g-t][V2]tests/i915/gem_sync.c :Added __for_each_physical_engine to utilize all available
Quoting Arjun Melkaveri (2020-04-06 10:25:49)
> Replaced the legacy for_each_engine* defines with the ones implemented
> in the gem_engine_topology library.
>
> Used gem_context_clone_with_engines
> to make sure that engine index was potentially created based on a
> default context with engine map configured.
>
> Added gem_reopen_driver and gem_context_copy_engines to transfer the
> engine map from parent fd default context.
>
> V2:
> Added Legacy engine coverage for sync_ring and sync_all.
>
> Cc: Dec Katarzyna <katarzyna.dec@intel.com>
> Cc: Ursulin Tvrtko <tvrtko.ursulin@intel.com>
> Signed-off-by: sai gowtham <sai.gowtham.ch@intel.com>
> Signed-off-by: Arjun Melkaveri <arjun.melkaveri@intel.com>
> ---
> tests/i915/gem_sync.c | 566
> +++++++++++++++++++++++++-----------------
> 1 file changed, 343 insertions(+), 223 deletions(-)
>
> diff --git a/tests/i915/gem_sync.c b/tests/i915/gem_sync.c index
> 2ef55ecc..8efa0668 100644
> --- a/tests/i915/gem_sync.c
> +++ b/tests/i915/gem_sync.c
> @@ -79,52 +79,56 @@ out:
> }
>
> static void
> -sync_ring(int fd, unsigned ring, int num_children, int timeout)
> +sync_ring(int fd, const struct intel_execution_engine2 *e,
> + int num_children, int timeout)
> {
ALL_ENGINES is compatible with the engine index interface. That will greatly reduce the churn and eyesore.
> + const struct intel_execution_engine2 *e2;
> unsigned engines[16];
> const char *names[16];
> int num_engines = 0;
>
> - if (ring == ALL_ENGINES) {
> - for_each_physical_engine(e, fd) {
> - names[num_engines] = e->name;
> - engines[num_engines++] = eb_ring(e);
> + if (!e) {
> + __for_each_physical_engine(fd, e2) {
> + names[num_engines] = e2->name;
> + engines[num_engines++] = e2->flags;
> if (num_engines == ARRAY_SIZE(engines))
> break;
> }
>
> num_children *= num_engines;
> } else {
> - gem_require_ring(fd, ring);
> names[num_engines] = NULL;
> - engines[num_engines++] = ring;
> + engines[num_engines++] = e->flags;
> }
>
> intel_detect_and_clear_missed_interrupts(fd);
> igt_fork(child, num_children) {
> + int i915;
> const uint32_t bbe = MI_BATCH_BUFFER_END;
> struct drm_i915_gem_exec_object2 object;
> struct drm_i915_gem_execbuffer2 execbuf;
> double start, elapsed;
> unsigned long cycles;
>
> + i915 = gem_reopen_driver(fd);
> + gem_context_copy_engines(fd, 0, i915, 0);
Please do randomly, completely and utterly change the test to do something else than was originally intended.
-Chris
Apologies as this was inherited patch , I did not check it .
Will revert back to original code and remove gem reopen and gem context copy engine .
Keep it in line with one of comment I got for other change , gem_reopen_driver
Shouldn’t be used and we can pass on FD directly .
Also add back ALL_ENGINES .
Thanks
Arjun M
_______________________________________________
igt-dev mailing list
igt-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/igt-dev
^ permalink raw reply [flat|nested] 8+ messages in thread
end of thread, other threads:[~2020-04-06 15:44 UTC | newest]
Thread overview: 8+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2020-04-06 9:25 [igt-dev] [PATCH] [PATCH i-g-t][V2]tests/i915/gem_sync.c :Added __for_each_physical_engine to utilize all available Arjun Melkaveri
2020-04-06 11:25 ` [igt-dev] ✗ GitLab.Pipeline: failure for tests/i915/gem_sync.c :Added __for_each_physical_engine to utilize all available (rev3) Patchwork
2020-04-06 11:33 ` [igt-dev] ✓ Fi.CI.BAT: success " Patchwork
2020-04-06 15:27 ` [igt-dev] ✗ Fi.CI.IGT: failure " Patchwork
2020-04-06 15:29 ` [igt-dev] [PATCH] [PATCH i-g-t][V2]tests/i915/gem_sync.c :Added __for_each_physical_engine to utilize all available Chris Wilson
2020-04-06 15:44 ` Melkaveri, Arjun
-- strict thread matches above, loose matches on Subject: below --
2020-04-06 9:12 Arjun Melkaveri
2020-04-06 9:19 ` Melkaveri, Arjun
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox