public inbox for intel-gfx@lists.freedesktop.org
 help / color / mirror / Atom feed
* [PATCH igt 1/2] igt: Add test case for EXEC_OBJECT_ASYNC
@ 2016-08-30 10:00 Chris Wilson
  2016-08-30 10:00 ` [PATCH igt 2/2] igt: Add exerciser for execbuf fence-out <-> fence-in Chris Wilson
  2016-08-31 13:07 ` [PATCH igt 1/2] igt: Add test case for EXEC_OBJECT_ASYNC Joonas Lahtinen
  0 siblings, 2 replies; 4+ messages in thread
From: Chris Wilson @ 2016-08-30 10:00 UTC (permalink / raw)
  To: intel-gfx

The intention behind EXEC_OBJECT_ASYNC is to instruct the kernel to
ignore implicit fences on the object but still maintain them for the GEM
API. The user is expected to provide explicit fencing to maintain
correct ordering of rendering.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 tests/Makefile.sources |   1 +
 tests/gem_exec_async.c | 218 +++++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 219 insertions(+)
 create mode 100644 tests/gem_exec_async.c

diff --git a/tests/Makefile.sources b/tests/Makefile.sources
index 6cdfea3..4255fda 100644
--- a/tests/Makefile.sources
+++ b/tests/Makefile.sources
@@ -31,6 +31,7 @@ TESTS_progs_M = \
 	gem_evict_alignment \
 	gem_evict_everything \
 	gem_exec_alignment \
+	gem_exec_async \
 	gem_exec_bad_domains \
 	gem_exec_basic \
 	gem_exec_create \
diff --git a/tests/gem_exec_async.c b/tests/gem_exec_async.c
new file mode 100644
index 0000000..31c390f
--- /dev/null
+++ b/tests/gem_exec_async.c
@@ -0,0 +1,218 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "igt.h"
+
+#define LOCAL_OBJECT_ASYNC (1 << 6)
+#define LOCAL_PARAM_HAS_EXEC_ASYNC 41
+
+IGT_TEST_DESCRIPTION("Check that we can issue concurrent writes across the engines.");
+
+static void store_dword(int fd, unsigned ring,
+			uint32_t target, uint32_t offset, uint32_t value)
+{
+	const int gen = intel_gen(intel_get_drm_devid(fd));
+	struct drm_i915_gem_exec_object2 obj[2];
+	struct drm_i915_gem_relocation_entry reloc;
+	struct drm_i915_gem_execbuffer2 execbuf;
+	uint32_t batch[16];
+	int i;
+
+	memset(&execbuf, 0, sizeof(execbuf));
+	execbuf.buffers_ptr = (uintptr_t)obj;
+	execbuf.buffer_count = 2;
+	execbuf.flags = ring;
+	if (gen < 6)
+		execbuf.flags |= I915_EXEC_SECURE;
+
+	memset(obj, 0, sizeof(obj));
+	obj[0].handle = target;
+	obj[0].flags = LOCAL_OBJECT_ASYNC;
+	obj[1].handle = gem_create(fd, 4096);
+
+	memset(&reloc, 0, sizeof(reloc));
+	reloc.target_handle = obj[0].handle;
+	reloc.presumed_offset = 0;
+	reloc.offset = sizeof(uint32_t);
+	reloc.delta = offset;
+	reloc.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
+	reloc.write_domain = I915_GEM_DOMAIN_INSTRUCTION;
+	obj[1].relocs_ptr = (uintptr_t)&reloc;
+	obj[1].relocation_count = 1;
+
+	i = 0;
+	batch[i] = MI_STORE_DWORD_IMM | (gen < 6 ? 1 << 22 : 0);
+	if (gen >= 8) {
+		batch[++i] = offset;
+		batch[++i] = 0;
+	} else if (gen >= 4) {
+		batch[++i] = 0;
+		batch[++i] = offset;
+		reloc.offset += sizeof(uint32_t);
+	} else {
+		batch[i]--;
+		batch[++i] = offset;
+	}
+	batch[++i] = value;
+	batch[++i] = MI_BATCH_BUFFER_END;
+	gem_write(fd, obj[1].handle, 0, batch, sizeof(batch));
+	gem_execbuf(fd, &execbuf);
+	gem_close(fd, obj[1].handle);
+}
+
+static void one(int fd, unsigned ring, uint32_t flags)
+{
+	const int gen = intel_gen(intel_get_drm_devid(fd));
+	struct drm_i915_gem_exec_object2 obj[2];
+#define SCRATCH 0
+#define BATCH 1
+	struct drm_i915_gem_relocation_entry reloc;
+	struct drm_i915_gem_execbuffer2 execbuf;
+	uint32_t *batch;
+	int i;
+
+	/* On the target ring, create a looping batch that marks
+	 * the scratch for write. Then on the other rings try and
+	 * write into that target. If it blocks we hang the GPU...
+	 */
+
+	memset(obj, 0, sizeof(obj));
+	obj[SCRATCH].handle = gem_create(fd, 4096);
+
+	obj[BATCH].handle = gem_create(fd, 4096);
+	obj[BATCH].relocs_ptr = (uintptr_t)&reloc;
+	obj[BATCH].relocation_count = 1;
+
+	memset(&reloc, 0, sizeof(reloc));
+	reloc.target_handle = obj[BATCH].handle; /* recurse */
+	reloc.presumed_offset = 0;
+	reloc.offset = sizeof(uint32_t);
+	reloc.delta = 0;
+	reloc.read_domains = I915_GEM_DOMAIN_COMMAND;
+	reloc.write_domain = 0;
+
+	batch = gem_mmap__wc(fd, obj[BATCH].handle, 0, 4096, PROT_WRITE);
+	gem_set_domain(fd, obj[BATCH].handle,
+			I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
+
+	i = 0;
+	batch[i] = MI_BATCH_BUFFER_START;
+	if (gen >= 8) {
+		batch[i] |= 1 << 8 | 1;
+		batch[++i] = 0;
+		batch[++i] = 0;
+	} else if (gen >= 6) {
+		batch[i] |= 1 << 8;
+		batch[++i] = 0;
+	} else {
+		batch[i] |= 2 << 6;
+		batch[++i] = 0;
+		if (gen < 4) {
+			batch[i] |= 1;
+			reloc.delta = 1;
+		}
+	}
+	i++;
+
+	memset(&execbuf, 0, sizeof(execbuf));
+	execbuf.buffers_ptr = (uintptr_t)obj;
+	execbuf.buffer_count = 2;
+	execbuf.flags = ring | flags;
+	igt_require(__gem_execbuf(fd, &execbuf) == 0);
+	gem_close(fd, obj[BATCH].handle);
+
+	i = 0;
+	for (const struct intel_execution_engine *e = intel_execution_engines;
+	     e->name; e++) {
+		if (e->exec_id == 0 || e->exec_id == ring)
+			continue;
+
+		if (e->exec_id == I915_EXEC_BSD && gen == 6)
+			continue;
+
+		if (!gem_has_ring(fd, e->exec_id | e->flags))
+			continue;
+
+		store_dword(fd, e->exec_id | e->flags,
+			    obj[SCRATCH].handle, 4*i, i);
+		i++;
+	}
+
+	*batch = MI_BATCH_BUFFER_END;
+	__sync_synchronize();
+	munmap(batch, 4096);
+
+	batch = gem_mmap__wc(fd, obj[SCRATCH].handle, 0, 4096, PROT_READ);
+	/* The kernel only tracks the last *submitted* write (but all reads),
+	 * so to ensure *all* rings are flushed, we flush all reads even
+	 * though we only need read access for ourselves.
+	 */
+	gem_set_domain(fd, obj[SCRATCH].handle,
+		       I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
+	gem_close(fd, obj[SCRATCH].handle);
+	while (i--)
+		igt_assert_eq_u32(batch[i], i);
+	munmap(batch, 4096);
+}
+
+static bool has_async_execbuf(int fd)
+{
+	drm_i915_getparam_t gp;
+	int async = -1;
+
+	gp.param = LOCAL_PARAM_HAS_EXEC_ASYNC;
+	gp.value = &async;
+	drmIoctl(fd, DRM_IOCTL_I915_GETPARAM, &gp);
+
+	return async > 0;
+}
+
+igt_main
+{
+	const struct intel_execution_engine *e;
+	int fd = -1;
+
+	igt_skip_on_simulation();
+
+	igt_fixture {
+		fd = drm_open_driver_master(DRIVER_INTEL);
+		gem_require_mmap_wc(fd);
+		igt_require(has_async_execbuf(fd));
+	}
+
+	igt_fork_hang_detector(fd);
+
+	for (e = intel_execution_engines; e->name; e++) {
+		/* default exec-id is purely symbolic */
+		if (e->exec_id == 0)
+			continue;
+
+		igt_subtest_f("concurrent-writes-%s", e->name)
+			one(fd, e->exec_id, e->flags);
+	}
+
+	igt_stop_hang_detector();
+
+	igt_fixture
+		close(fd);
+}
-- 
2.9.3

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 4+ messages in thread

* [PATCH igt 2/2] igt: Add exerciser for execbuf fence-out <-> fence-in
  2016-08-30 10:00 [PATCH igt 1/2] igt: Add test case for EXEC_OBJECT_ASYNC Chris Wilson
@ 2016-08-30 10:00 ` Chris Wilson
  2016-09-01 13:20   ` Joonas Lahtinen
  2016-08-31 13:07 ` [PATCH igt 1/2] igt: Add test case for EXEC_OBJECT_ASYNC Joonas Lahtinen
  1 sibling, 1 reply; 4+ messages in thread
From: Chris Wilson @ 2016-08-30 10:00 UTC (permalink / raw)
  To: intel-gfx

When execbuf2 supports explicit fencing with sync_file in/out fences
(via a fence-fd), we can control execution via the fence.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 tests/Makefile.sources |   1 +
 tests/gem_exec_fence.c | 366 +++++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 367 insertions(+)
 create mode 100644 tests/gem_exec_fence.c

diff --git a/tests/Makefile.sources b/tests/Makefile.sources
index 4255fda..ea28dcd 100644
--- a/tests/Makefile.sources
+++ b/tests/Makefile.sources
@@ -36,6 +36,7 @@ TESTS_progs_M = \
 	gem_exec_basic \
 	gem_exec_create \
 	gem_exec_faulting_reloc \
+	gem_exec_fence \
 	gem_exec_flush \
 	gem_exec_gttfill \
 	gem_exec_nop \
diff --git a/tests/gem_exec_fence.c b/tests/gem_exec_fence.c
new file mode 100644
index 0000000..bbf603c
--- /dev/null
+++ b/tests/gem_exec_fence.c
@@ -0,0 +1,366 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "igt.h"
+
+#include <sys/ioctl.h>
+#include <sys/poll.h>
+
+IGT_TEST_DESCRIPTION("Check that execbuf waits for explicit fences");
+
+#define LOCAL_PARAM_HAS_EXEC_FENCE 42
+#define LOCAL_EXEC_FENCE_IN (1 << 16)
+#define LOCAL_EXEC_FENCE_OUT (1 << 17)
+#define LOCAL_IOCTL_I915_GEM_EXECBUFFER2_WR       DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER2, struct drm_i915_gem_execbuffer2)
+
+static void store(int fd, unsigned ring, int fence, uint32_t target, unsigned offset_value)
+{
+	const int SCRATCH = 0;
+	const int BATCH = 1;
+	const int gen = intel_gen(intel_get_drm_devid(fd));
+	struct drm_i915_gem_exec_object2 obj[2];
+	struct drm_i915_gem_relocation_entry reloc;
+	struct drm_i915_gem_execbuffer2 execbuf;
+	uint32_t batch[16];
+	int i;
+
+	memset(&execbuf, 0, sizeof(execbuf));
+	execbuf.buffers_ptr = (uintptr_t)obj;
+	execbuf.buffer_count = 2;
+	execbuf.flags = ring | LOCAL_EXEC_FENCE_IN;
+	execbuf.rsvd2 = fence;
+	if (gen < 6)
+		execbuf.flags |= I915_EXEC_SECURE;
+
+	memset(obj, 0, sizeof(obj));
+	obj[SCRATCH].handle = target;
+
+	obj[BATCH].handle = gem_create(fd, 4096);
+	obj[BATCH].relocs_ptr = (uintptr_t)&reloc;
+	obj[BATCH].relocation_count = 1;
+	memset(&reloc, 0, sizeof(reloc));
+
+	i = 0;
+	reloc.target_handle = obj[SCRATCH].handle;
+	reloc.presumed_offset = -1;
+	reloc.offset = sizeof(uint32_t) * (i + 1);
+	reloc.delta = sizeof(uint32_t) * offset_value;
+	reloc.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
+	reloc.write_domain = I915_GEM_DOMAIN_INSTRUCTION;
+	batch[i] = MI_STORE_DWORD_IMM | (gen < 6 ? 1 << 22 : 0);
+	if (gen >= 8) {
+		batch[++i] = reloc.delta;
+		batch[++i] = 0;
+	} else if (gen >= 4) {
+		batch[++i] = 0;
+		batch[++i] = reloc.delta;
+		reloc.offset += sizeof(uint32_t);
+	} else {
+		batch[i]--;
+		batch[++i] = reloc.delta;
+	}
+	batch[++i] = offset_value;
+	batch[++i] = MI_BATCH_BUFFER_END;
+	gem_write(fd, obj[BATCH].handle, 0, batch, sizeof(batch));
+	gem_execbuf(fd, &execbuf);
+	gem_close(fd, obj[BATCH].handle);
+}
+
+static int __gem_execbuf_wr(int fd, struct drm_i915_gem_execbuffer2 *execbuf)
+{
+	int err = 0;
+	if (igt_ioctl(fd, LOCAL_IOCTL_I915_GEM_EXECBUFFER2_WR, execbuf))
+		err = -errno;
+	errno = 0;
+	return err;
+}
+
+static void gem_execbuf_wr(int fd, struct drm_i915_gem_execbuffer2 *execbuf)
+{
+	igt_assert_eq(__gem_execbuf_wr(fd, execbuf), 0);
+}
+
+static bool fence_busy(int fence)
+{
+	struct pollfd pfd = { .fd = fence, .events = POLLIN };
+	return poll(&pfd, 1, 0) == 0;
+}
+
+#define HANG 0x1
+#define NONBLOCK 0x2
+
+static void test_fence_busy(int fd, unsigned ring, unsigned flags)
+{
+	const int gen = intel_gen(intel_get_drm_devid(fd));
+	struct drm_i915_gem_exec_object2 obj;
+	struct drm_i915_gem_relocation_entry reloc;
+	struct drm_i915_gem_execbuffer2 execbuf;
+	struct timespec tv;
+	uint32_t *batch;
+	int fence, i, timeout;
+
+	memset(&execbuf, 0, sizeof(execbuf));
+	execbuf.buffers_ptr = (uintptr_t)&obj;
+	execbuf.buffer_count = 1;
+	execbuf.flags = ring | LOCAL_EXEC_FENCE_OUT;
+
+	memset(&obj, 0, sizeof(obj));
+	obj.handle = gem_create(fd, 4096);
+
+	obj.relocs_ptr = (uintptr_t)&reloc;
+	obj.relocation_count = 1;
+	memset(&reloc, 0, sizeof(reloc));
+
+	batch = gem_mmap__wc(fd, obj.handle, 0, 4096, PROT_WRITE);
+	gem_set_domain(fd, obj.handle,
+			I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
+
+	reloc.target_handle = obj.handle; /* recurse */
+	reloc.presumed_offset = 0;
+	reloc.offset = sizeof(uint32_t);
+	reloc.delta = 0;
+	reloc.read_domains = I915_GEM_DOMAIN_COMMAND;
+	reloc.write_domain = 0;
+
+	i = 0;
+	batch[i] = MI_BATCH_BUFFER_START;
+	if (gen >= 8) {
+		batch[i] |= 1 << 8 | 1;
+		batch[++i] = 0;
+		batch[++i] = 0;
+	} else if (gen >= 6) {
+		batch[i] |= 1 << 8;
+		batch[++i] = 0;
+	} else {
+		batch[i] |= 2 << 6;
+		batch[++i] = 0;
+		if (gen < 4) {
+			batch[i] |= 1;
+			reloc.delta = 1;
+		}
+	}
+	i++;
+
+	execbuf.rsvd2 = -1;
+	gem_execbuf_wr(fd, &execbuf);
+	fence = execbuf.rsvd2 >> 32;
+	igt_assert(fence != -1);
+
+	igt_assert(gem_bo_busy(fd, obj.handle));
+	igt_assert(fence_busy(fence));
+
+	timeout = 120;
+	if ((flags & HANG) == 0) {
+		*batch = MI_BATCH_BUFFER_END;
+		__sync_synchronize();
+		timeout = 1;
+	}
+	munmap(batch, 4096);
+
+	memset(&tv, 0, sizeof(tv));
+	while (fence_busy(fence))
+		igt_assert(igt_seconds_elapsed(&tv) < timeout);
+
+	igt_assert(!gem_bo_busy(fd, obj.handle));
+
+	close(fence);
+	gem_close(fd, obj.handle);
+}
+
+static void test_fence_wait(int fd, unsigned ring, unsigned flags)
+{
+	const int gen = intel_gen(intel_get_drm_devid(fd));
+	struct drm_i915_gem_exec_object2 obj;
+	struct drm_i915_gem_relocation_entry reloc;
+	struct drm_i915_gem_execbuffer2 execbuf;
+	uint32_t scratch = gem_create(fd, 4096);
+	uint32_t *batch, *out;
+	unsigned engine;
+	int fence, i;
+
+	memset(&execbuf, 0, sizeof(execbuf));
+	execbuf.buffers_ptr = (uintptr_t)&obj;
+	execbuf.buffer_count = 1;
+	execbuf.flags = ring | LOCAL_EXEC_FENCE_OUT;
+
+	memset(&obj, 0, sizeof(obj));
+	obj.handle = gem_create(fd, 4096);
+
+	obj.relocs_ptr = (uintptr_t)&reloc;
+	obj.relocation_count = 1;
+	memset(&reloc, 0, sizeof(reloc));
+
+	out = gem_mmap__wc(fd, scratch, 0, 4096, PROT_WRITE);
+	gem_set_domain(fd, obj.handle,
+			I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
+
+	batch = gem_mmap__wc(fd, obj.handle, 0, 4096, PROT_WRITE);
+	gem_set_domain(fd, obj.handle,
+			I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
+
+	reloc.target_handle = obj.handle; /* recurse */
+	reloc.presumed_offset = 0;
+	reloc.offset = sizeof(uint32_t);
+	reloc.delta = 0;
+	reloc.read_domains = I915_GEM_DOMAIN_COMMAND;
+	reloc.write_domain = 0;
+
+	i = 0;
+	batch[i] = MI_BATCH_BUFFER_START;
+	if (gen >= 8) {
+		batch[i] |= 1 << 8 | 1;
+		batch[++i] = 0;
+		batch[++i] = 0;
+	} else if (gen >= 6) {
+		batch[i] |= 1 << 8;
+		batch[++i] = 0;
+	} else {
+		batch[i] |= 2 << 6;
+		batch[++i] = 0;
+		if (gen < 4) {
+			batch[i] |= 1;
+			reloc.delta = 1;
+		}
+	}
+	i++;
+
+	execbuf.rsvd2 = -1;
+	gem_execbuf_wr(fd, &execbuf);
+	gem_close(fd, obj.handle);
+	fence = execbuf.rsvd2 >> 32;
+	igt_assert(fence != -1);
+
+	i = 0;
+	for_each_engine(fd, engine) {
+		if (flags & NONBLOCK) {
+			store(fd, engine, fence, scratch, i);
+		} else {
+			igt_fork(child, 1)
+				store(fd, engine, fence, scratch, i);
+		}
+	}
+	close(fence);
+
+	sleep(1);
+
+	/* Check for invalidly completing the task early */
+	for (int n = 0; n < i; n++)
+		igt_assert_eq_u32(out[n], 0);
+
+	if ((flags & HANG) == 0) {
+		*batch = MI_BATCH_BUFFER_END;
+		__sync_synchronize();
+	}
+	munmap(batch, 4096);
+
+	igt_waitchildren();
+
+	gem_set_domain(fd, scratch, I915_GEM_DOMAIN_GTT, 0);
+	while (i--)
+		igt_assert_eq_u32(out[i], flags & HANG ? 0 : i);
+	munmap(out, 4096);
+	gem_close(fd, scratch);
+}
+
+static void test_fence_flip(int i915)
+{
+	igt_skip_on_f(1, "no fence-in for atomic flips\n");
+}
+
+static bool gem_has_exec_fence(int fd)
+{
+	struct drm_i915_getparam gp;
+	int val = -1;
+
+	memset(&gp, 0, sizeof(gp));
+	gp.param = LOCAL_PARAM_HAS_EXEC_FENCE;
+	gp.value = &val;
+
+	ioctl(fd, DRM_IOCTL_I915_GETPARAM, &gp);
+
+	return val > 0;
+}
+
+igt_main
+{
+	const struct intel_execution_engine *e;
+	bool can_hang = false;
+	int i915 = -1;
+
+	igt_skip_on_simulation();
+
+	igt_fixture {
+		struct local_i915_gem_context_param param;
+
+		i915 = drm_open_driver_master(DRIVER_INTEL);
+		igt_require(gem_has_exec_fence(i915));
+		gem_require_mmap_wc(i915);
+
+		memset(&param, 0, sizeof(param));
+		param.param = LOCAL_CONTEXT_PARAM_BAN_PERIOD;
+		param.value = 0;
+		can_hang =  __gem_context_set_param(i915, &param) == 0;
+	}
+
+	for (e = intel_execution_engines; e->name; e++) {
+		igt_subtest_f("%sbusy-%s",
+			      e->exec_id == 0 ? "basic-" : "",
+			      e->name) {
+			gem_quiescent_gpu(i915);
+			test_fence_busy(i915, e->exec_id | e->flags, 0);
+		}
+
+		igt_subtest_f("%swait-%s",
+			      e->exec_id == 0 ? "basic-" : "",
+			      e->name) {
+			gem_quiescent_gpu(i915);
+			test_fence_wait(i915, e->exec_id | e->flags, 0);
+		}
+
+		igt_subtest_f("nb-wait-%s", e->name) {
+			igt_require(can_hang);
+			gem_quiescent_gpu(i915);
+			test_fence_wait(i915, e->exec_id | e->flags, NONBLOCK);
+		}
+
+		igt_subtest_f("busy-hang-%s", e->name) {
+			igt_require(can_hang);
+			gem_quiescent_gpu(i915);
+			test_fence_busy(i915, e->exec_id | e->flags, HANG);
+		}
+		igt_subtest_f("wait-hang-%s", e->name) {
+			igt_require(can_hang);
+			gem_quiescent_gpu(i915);
+			test_fence_wait(i915, e->exec_id | e->flags, HANG);
+		}
+	}
+
+	igt_subtest("flip") {
+		gem_quiescent_gpu(i915);
+		test_fence_flip(i915);
+	}
+
+	igt_fixture {
+		close(i915);
+	}
+}
-- 
2.9.3

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 4+ messages in thread

* Re: [PATCH igt 1/2] igt: Add test case for EXEC_OBJECT_ASYNC
  2016-08-30 10:00 [PATCH igt 1/2] igt: Add test case for EXEC_OBJECT_ASYNC Chris Wilson
  2016-08-30 10:00 ` [PATCH igt 2/2] igt: Add exerciser for execbuf fence-out <-> fence-in Chris Wilson
@ 2016-08-31 13:07 ` Joonas Lahtinen
  1 sibling, 0 replies; 4+ messages in thread
From: Joonas Lahtinen @ 2016-08-31 13:07 UTC (permalink / raw)
  To: Chris Wilson, intel-gfx

On ti, 2016-08-30 at 11:00 +0100, Chris Wilson wrote:
> +static void store_dword(int fd, unsigned ring,
> +			uint32_t target, uint32_t offset, uint32_t value)
> +{

I'd prefer a library function (as you obviously copied this from
gem_exec_store.c), as we have more similar functions (with more or less
features) in other tests. Some of them are not even using hardcoded
magic numbers.

Adding as TODO for future.

Other than that,

Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>

This not need be a "basic" test?

Regards, Joonas
-- 
Joonas Lahtinen
Open Source Technology Center
Intel Corporation
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 4+ messages in thread

* Re: [PATCH igt 2/2] igt: Add exerciser for execbuf fence-out <-> fence-in
  2016-08-30 10:00 ` [PATCH igt 2/2] igt: Add exerciser for execbuf fence-out <-> fence-in Chris Wilson
@ 2016-09-01 13:20   ` Joonas Lahtinen
  0 siblings, 0 replies; 4+ messages in thread
From: Joonas Lahtinen @ 2016-09-01 13:20 UTC (permalink / raw)
  To: Chris Wilson, intel-gfx

Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>

Regards, Joonas
-- 
Joonas Lahtinen
Open Source Technology Center
Intel Corporation

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 4+ messages in thread

end of thread, other threads:[~2016-09-01 13:20 UTC | newest]

Thread overview: 4+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2016-08-30 10:00 [PATCH igt 1/2] igt: Add test case for EXEC_OBJECT_ASYNC Chris Wilson
2016-08-30 10:00 ` [PATCH igt 2/2] igt: Add exerciser for execbuf fence-out <-> fence-in Chris Wilson
2016-09-01 13:20   ` Joonas Lahtinen
2016-08-31 13:07 ` [PATCH igt 1/2] igt: Add test case for EXEC_OBJECT_ASYNC Joonas Lahtinen

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox