public inbox for intel-gfx@lists.freedesktop.org
 help / color / mirror / Atom feed
* Some unreviewed selftests
@ 2017-02-13 12:06 Chris Wilson
  2017-02-13 12:06 ` [PATCH 1/3] drm/i915: Live testing for context execution Chris Wilson
                   ` (3 more replies)
  0 siblings, 4 replies; 8+ messages in thread
From: Chris Wilson @ 2017-02-13 12:06 UTC (permalink / raw)
  To: intel-gfx

Just the live context testing to go...
-Chris

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 8+ messages in thread

* [PATCH 1/3] drm/i915: Live testing for context execution
  2017-02-13 12:06 Some unreviewed selftests Chris Wilson
@ 2017-02-13 12:06 ` Chris Wilson
  2017-02-13 14:20   ` Joonas Lahtinen
  2017-02-13 12:06 ` [PATCH 2/3] drm/i915: Extract aliasing ppgtt setup Chris Wilson
                   ` (2 subsequent siblings)
  3 siblings, 1 reply; 8+ messages in thread
From: Chris Wilson @ 2017-02-13 12:06 UTC (permalink / raw)
  To: intel-gfx

Check we can create and execution within a context.

v2: Write one set of dwords through each context/engine to exercise more
contexts within the same time period.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/i915_gem_context.c            |   1 +
 drivers/gpu/drm/i915/selftests/i915_gem_context.c  | 400 +++++++++++++++++++++
 .../gpu/drm/i915/selftests/i915_live_selftests.h   |   1 +
 3 files changed, 402 insertions(+)
 create mode 100644 drivers/gpu/drm/i915/selftests/i915_gem_context.c

diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index d27c4050b4c5..c73bf02870d5 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -1202,4 +1202,5 @@ int i915_gem_context_reset_stats_ioctl(struct drm_device *dev,
 
 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
 #include "selftests/mock_context.c"
+#include "selftests/i915_gem_context.c"
 #endif
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/selftests/i915_gem_context.c
new file mode 100644
index 000000000000..364578e4e449
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_context.c
@@ -0,0 +1,400 @@
+/*
+ * Copyright © 2017 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include "../i915_selftest.h"
+
+#include "mock_drm.h"
+#include "huge_gem_object.h"
+
+#define DW_PER_PAGE (PAGE_SIZE / sizeof(u32))
+
+static struct i915_vma *
+gpu_fill_dw(struct i915_vma *vma, u64 offset, unsigned long count, u32 value)
+{
+	struct drm_i915_gem_object *obj;
+	const int gen = INTEL_GEN(vma->vm->i915);
+	unsigned long n;
+	u32 *cmd;
+	int err;
+
+	GEM_BUG_ON(!igt_can_mi_store_dword_imm(vma->vm->i915));
+
+	n = (4*count + 1)*sizeof(u32);
+	obj = i915_gem_object_create_internal(vma->vm->i915,
+					      round_up(n, PAGE_SIZE));
+	if (IS_ERR(obj))
+		return ERR_CAST(obj);
+
+	cmd = i915_gem_object_pin_map(obj, I915_MAP_WB);
+	if (IS_ERR(cmd)) {
+		err = PTR_ERR(cmd);
+		goto err;
+	}
+
+	GEM_BUG_ON(offset + (count - 1) * PAGE_SIZE > vma->node.size);
+	offset += vma->node.start;
+
+	for (n = 0; n < count; n++) {
+		if (gen >= 8) {
+			*cmd++ = MI_STORE_DWORD_IMM_GEN4;
+			*cmd++ = lower_32_bits(offset);
+			*cmd++ = upper_32_bits(offset);
+			*cmd++ = value;
+		} else if (gen >= 4) {
+			*cmd++ = MI_STORE_DWORD_IMM_GEN4 |
+				(gen < 6 ? 1 << 22 : 0);
+			*cmd++ = 0;
+			*cmd++ = offset;
+			*cmd++ = value;
+		} else {
+			*cmd++ = MI_STORE_DWORD_IMM | 1 << 22;
+			*cmd++ = offset;
+			*cmd++ = value;
+		}
+		offset += PAGE_SIZE;
+	}
+	*cmd = MI_BATCH_BUFFER_END;
+	wmb();
+	i915_gem_object_unpin_map(obj);
+
+	err = i915_gem_object_set_to_gtt_domain(obj, false);
+	if (err)
+		goto err;
+
+	vma = i915_vma_instance(obj, vma->vm, NULL);
+	if (IS_ERR(vma)) {
+		err = PTR_ERR(vma);
+		goto err;
+	}
+
+	err = i915_vma_pin(vma, 0, 0, PIN_USER);
+	if (err)
+		goto err;
+
+	return vma;
+
+err:
+	i915_gem_object_put(obj);
+	return ERR_PTR(err);
+}
+
+static unsigned long real_page_count(struct drm_i915_gem_object *obj)
+{
+	return huge_gem_object_phys_size(obj) >> PAGE_SHIFT;
+}
+
+static unsigned long fake_page_count(struct drm_i915_gem_object *obj)
+{
+	return huge_gem_object_dma_size(obj) >> PAGE_SHIFT;
+}
+
+static int gpu_fill(struct drm_i915_gem_object *obj,
+		    struct i915_gem_context *ctx,
+		    struct intel_engine_cs *engine,
+		    unsigned int dw)
+{
+	struct drm_i915_private *i915 = to_i915(obj->base.dev);
+	struct i915_address_space *vm =
+		ctx->ppgtt ? &ctx->ppgtt->base : &i915->ggtt.base;
+	struct drm_i915_gem_request *rq;
+	struct i915_vma *vma;
+	struct i915_vma *batch;
+	unsigned int flags;
+	int err;
+
+	GEM_BUG_ON(obj->base.size > vm->total);
+
+	vma = i915_vma_instance(obj, vm, NULL);
+	if (IS_ERR(vma))
+		return PTR_ERR(vma);
+
+	err = i915_gem_object_set_to_gtt_domain(obj, false);
+	if (err)
+		return err;
+
+	err = i915_vma_pin(vma, 0, 0, PIN_HIGH | PIN_USER);
+	if (err)
+		return err;
+
+	/* Within the GTT the huge objects maps every page onto
+	 * its 1024 real pages (using phys_pfn = dma_pfn % 1024).
+	 * We set the nth dword within the page using the nth
+	 * mapping via the GTT - this should exercise the GTT mapping
+	 * whilst checking that each context provides a unique view
+	 * into the object.
+	 */
+	batch = gpu_fill_dw(vma,
+			    (dw * real_page_count(obj)) << PAGE_SHIFT |
+			    (dw * sizeof(u32)),
+			    real_page_count(obj),
+			    dw);
+	if (IS_ERR(batch)) {
+		err = PTR_ERR(batch);
+		goto err_vma;
+	}
+
+	rq = i915_gem_request_alloc(engine, ctx);
+	if (IS_ERR(rq)) {
+		err = PTR_ERR(rq);
+		goto err_batch;
+	}
+
+	err = engine->emit_flush(rq, EMIT_INVALIDATE);
+	if (err)
+		goto err_request;
+
+	err = i915_switch_context(rq);
+	if (err)
+		goto err_request;
+
+	flags = 0;
+	if (INTEL_GEN(vm->i915) <= 5)
+		flags |= I915_DISPATCH_SECURE;
+
+	err = engine->emit_bb_start(rq,
+				    batch->node.start, batch->node.size,
+				    flags);
+	if (err)
+		goto err_request;
+
+	i915_vma_move_to_active(batch, rq, 0);
+	i915_gem_object_set_active_reference(batch->obj);
+	i915_vma_unpin(batch);
+	i915_vma_close(batch);
+
+	i915_vma_move_to_active(vma, rq, 0);
+	i915_vma_unpin(vma);
+
+	reservation_object_lock(obj->resv, NULL);
+	reservation_object_add_excl_fence(obj->resv, &rq->fence);
+	reservation_object_unlock(obj->resv);
+
+	__i915_add_request(rq, true);
+
+	return 0;
+
+err_request:
+	__i915_add_request(rq, false);
+err_batch:
+	i915_vma_unpin(batch);
+err_vma:
+	i915_vma_unpin(vma);
+	return err;
+}
+
+static int cpu_fill(struct drm_i915_gem_object *obj, u32 value)
+{
+	const bool has_llc = HAS_LLC(to_i915(obj->base.dev));
+	unsigned int n, m, need_flush;
+	int err;
+
+	err = i915_gem_obj_prepare_shmem_write(obj, &need_flush);
+	if (err)
+		return err;
+
+	for (n = 0; n < real_page_count(obj); n++) {
+		u32 *map;
+
+		map = kmap_atomic(i915_gem_object_get_page(obj, n));
+		for (m = 0; m < DW_PER_PAGE; m++)
+			map[m] = value;
+		if (!has_llc)
+			drm_clflush_virt_range(map, PAGE_SIZE);
+		kunmap_atomic(map);
+	}
+
+	i915_gem_obj_finish_shmem_access(obj);
+	obj->base.read_domains = I915_GEM_DOMAIN_GTT | I915_GEM_DOMAIN_CPU;
+	obj->base.write_domain = 0;
+	return 0;
+}
+
+static int cpu_check(struct drm_i915_gem_object *obj, unsigned int max)
+{
+	unsigned int n, m, needs_flush;
+	int err;
+
+	err = i915_gem_obj_prepare_shmem_read(obj, &needs_flush);
+	if (err)
+		return err;
+
+	for (n = 0; !err && n < real_page_count(obj); n++) {
+		u32 *map;
+
+		map = kmap_atomic(i915_gem_object_get_page(obj, n));
+		if (needs_flush & CLFLUSH_BEFORE)
+			drm_clflush_virt_range(map, PAGE_SIZE);
+
+		for (m = 0; !err && m < max; m++) {
+			if (map[m] != m) {
+				pr_err("Invalid value at page %d, offset %d: found %x expected %x\n",
+				       n, m, map[m], m);
+				err = -EINVAL;
+			}
+		}
+
+		for (; !err && m < DW_PER_PAGE; m++) {
+			if (map[m] != 0xdeadbeef) {
+				pr_err("Invalid value at page %d, offset %d: found %x expected %x\n",
+				       n, m, map[m], 0xdeadbeef);
+				err = -EINVAL;
+			}
+		}
+
+		kunmap_atomic(map);
+	}
+
+	i915_gem_obj_finish_shmem_access(obj);
+	return err;
+}
+
+static struct drm_i915_gem_object *
+create_test_object(struct i915_gem_context *ctx,
+		   struct drm_file *file,
+		   struct list_head *objects)
+{
+	struct drm_i915_gem_object *obj;
+	struct i915_address_space *vm =
+		ctx->ppgtt ? &ctx->ppgtt->base : &ctx->i915->ggtt.base;
+	u64 size;
+	u32 handle;
+	int err;
+
+	size = min(vm->total / 2, 1024ull * DW_PER_PAGE * PAGE_SIZE);
+	size = round_down(size, DW_PER_PAGE * PAGE_SIZE);
+
+	obj = huge_gem_object(ctx->i915, DW_PER_PAGE * PAGE_SIZE, size);
+	if (IS_ERR(obj))
+		return obj;
+
+	/* tie the handle to the drm_file for easy reaping */
+	err = drm_gem_handle_create(file, &obj->base, &handle);
+	i915_gem_object_put(obj);
+	if (err)
+		return ERR_PTR(err);
+
+	err = cpu_fill(obj, 0xdeadbeef);
+	if (err) {
+		pr_err("Failed to fill object with cpu, err=%d\n",
+		       err);
+		return ERR_PTR(err);
+	}
+
+	list_add_tail(&obj->st_link, objects);
+	return obj;
+}
+
+static unsigned long max_dwords(struct drm_i915_gem_object *obj)
+{
+	unsigned long npages = fake_page_count(obj);
+
+	GEM_BUG_ON(!IS_ALIGNED(npages, DW_PER_PAGE));
+	return npages / DW_PER_PAGE;
+}
+
+static int igt_ctx_exec(void *arg)
+{
+	struct drm_i915_private *i915 = arg;
+	struct drm_file *file = mock_file(i915);
+	struct drm_i915_gem_object *obj;
+	IGT_TIMEOUT(end_time);
+	LIST_HEAD(objects);
+	unsigned long ncontexts, ndwords, dw;
+	int err;
+
+	/* Create a few different contexts (with different mm) and write
+	 * through each ctx/mm using the GPU making sure those writes end
+	 * up in the expected pages of our obj.
+	 */
+
+	mutex_lock(&i915->drm.struct_mutex);
+
+	ncontexts = 0;
+	ndwords = 0;
+	dw = 0;
+	while (!time_after(jiffies, end_time)) {
+		struct intel_engine_cs *engine;
+		struct i915_gem_context *ctx;
+		unsigned int id;
+
+		ctx = i915_gem_create_context(i915, file->driver_priv);
+		if (IS_ERR(ctx)) {
+			err = PTR_ERR(ctx);
+			goto out_unlock;
+		}
+
+		for_each_engine(engine, i915, id) {
+			if (dw == 0) {
+				obj = create_test_object(ctx, file, &objects);
+				if (IS_ERR(obj)) {
+					err = PTR_ERR(obj);
+					goto out_unlock;
+				}
+			}
+
+			err = gpu_fill(obj, ctx, engine, dw);
+			if (err) {
+				pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) in ctx %u [full-ppgtt? %d], err=%d\n",
+				       ndwords, dw, max_dwords(obj),
+				       engine->name, ctx->hw_id, !!ctx->ppgtt,
+				       err);
+				goto out_unlock;
+			}
+
+			if (++dw == max_dwords(obj))
+				dw = 0;
+			ndwords++;
+		}
+		ncontexts++;
+	}
+	pr_info("Submitted %lu contexts (across %u engines), filling %lu dwords\n",
+		ncontexts, INTEL_INFO(i915)->num_rings, ndwords);
+
+	dw = 0;
+	list_for_each_entry(obj, &objects, st_link) {
+		unsigned int rem =
+			min_t(unsigned int, ndwords - dw, max_dwords(obj));
+
+		err = cpu_check(obj, rem);
+		if (err)
+			break;
+
+		dw += rem;
+	}
+
+out_unlock:
+	mutex_unlock(&i915->drm.struct_mutex);
+
+	mock_file_free(i915, file);
+	return err;
+}
+
+int i915_gem_context_live_selftests(struct drm_i915_private *i915)
+{
+	static const struct i915_subtest tests[] = {
+		SUBTEST(igt_ctx_exec),
+	};
+
+	return i915_subtests(tests, i915);
+}
diff --git a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
index d6c869c5b54f..eedd7901ce61 100644
--- a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
+++ b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
@@ -15,3 +15,4 @@ selftest(objects, i915_gem_object_live_selftests)
 selftest(dmabuf, i915_gem_dmabuf_live_selftests)
 selftest(coherency, i915_gem_coherency_live_selftests)
 selftest(gtt, i915_gem_gtt_live_selftests)
+selftest(contexts, i915_gem_context_live_selftests)
-- 
2.11.0

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 8+ messages in thread

* [PATCH 2/3] drm/i915: Extract aliasing ppgtt setup
  2017-02-13 12:06 Some unreviewed selftests Chris Wilson
  2017-02-13 12:06 ` [PATCH 1/3] drm/i915: Live testing for context execution Chris Wilson
@ 2017-02-13 12:06 ` Chris Wilson
  2017-02-13 12:06 ` [PATCH 3/3] drm/i915: Force an aliasing_ppgtt test for context execution Chris Wilson
  2017-02-13 12:10 ` [PATCH] drm/i915: Add unit tests for the breadcrumb rbtree, wakeups Chris Wilson
  3 siblings, 0 replies; 8+ messages in thread
From: Chris Wilson @ 2017-02-13 12:06 UTC (permalink / raw)
  To: intel-gfx

In order to force testing of the aliasing ppgtt, extract its
initialisation function.

v2: Also extract the cleanup function for symmetry.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
---
 drivers/gpu/drm/i915/i915_gem_gtt.c | 90 +++++++++++++++++++++++--------------
 drivers/gpu/drm/i915/i915_gem_gtt.h |  3 ++
 2 files changed, 59 insertions(+), 34 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index f8ac69380a0f..eebbffdb9a0b 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -2738,6 +2738,59 @@ static void i915_gtt_color_adjust(const struct drm_mm_node *node,
 		*end -= I915_GTT_PAGE_SIZE;
 }
 
+int i915_gem_init_aliasing_ppgtt(struct drm_i915_private *i915)
+{
+	struct i915_ggtt *ggtt = &i915->ggtt;
+	struct i915_hw_ppgtt *ppgtt;
+	int err;
+
+	ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
+	if (!ppgtt)
+		return -ENOMEM;
+
+	err = __hw_ppgtt_init(ppgtt, i915);
+	if (err)
+		goto err_ppgtt;
+
+	if (ppgtt->base.allocate_va_range) {
+		err = ppgtt->base.allocate_va_range(&ppgtt->base,
+						    0, ppgtt->base.total);
+		if (err)
+			goto err_ppgtt_cleanup;
+	}
+
+	ppgtt->base.clear_range(&ppgtt->base,
+				ppgtt->base.start,
+				ppgtt->base.total);
+
+	i915->mm.aliasing_ppgtt = ppgtt;
+	WARN_ON(ggtt->base.bind_vma != ggtt_bind_vma);
+	ggtt->base.bind_vma = aliasing_gtt_bind_vma;
+
+	return 0;
+
+err_ppgtt_cleanup:
+	ppgtt->base.cleanup(&ppgtt->base);
+err_ppgtt:
+	kfree(ppgtt);
+	return err;
+}
+
+void i915_gem_fini_aliasing_ppgtt(struct drm_i915_private *i915)
+{
+	struct i915_ggtt *ggtt = &i915->ggtt;
+	struct i915_hw_ppgtt *ppgtt;
+
+	ppgtt = fetch_and_zero(&i915->mm.aliasing_ppgtt);
+	if (!ppgtt)
+		return;
+
+	ppgtt->base.cleanup(&ppgtt->base);
+	kfree(ppgtt);
+
+	ggtt->base.bind_vma = ggtt_bind_vma;
+}
+
 int i915_gem_init_ggtt(struct drm_i915_private *dev_priv)
 {
 	/* Let GEM Manage all of the aperture.
@@ -2751,7 +2804,6 @@ int i915_gem_init_ggtt(struct drm_i915_private *dev_priv)
 	 */
 	struct i915_ggtt *ggtt = &dev_priv->ggtt;
 	unsigned long hole_start, hole_end;
-	struct i915_hw_ppgtt *ppgtt;
 	struct drm_mm_node *entry;
 	int ret;
 
@@ -2780,38 +2832,13 @@ int i915_gem_init_ggtt(struct drm_i915_private *dev_priv)
 			       ggtt->base.total - PAGE_SIZE, PAGE_SIZE);
 
 	if (USES_PPGTT(dev_priv) && !USES_FULL_PPGTT(dev_priv)) {
-		ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
-		if (!ppgtt) {
-			ret = -ENOMEM;
-			goto err;
-		}
-
-		ret = __hw_ppgtt_init(ppgtt, dev_priv);
+		ret = i915_gem_init_aliasing_ppgtt(dev_priv);
 		if (ret)
-			goto err_ppgtt;
-
-		if (ppgtt->base.allocate_va_range) {
-			ret = ppgtt->base.allocate_va_range(&ppgtt->base, 0,
-							    ppgtt->base.total);
-			if (ret)
-				goto err_ppgtt_cleanup;
-		}
-
-		ppgtt->base.clear_range(&ppgtt->base,
-					ppgtt->base.start,
-					ppgtt->base.total);
-
-		dev_priv->mm.aliasing_ppgtt = ppgtt;
-		WARN_ON(ggtt->base.bind_vma != ggtt_bind_vma);
-		ggtt->base.bind_vma = aliasing_gtt_bind_vma;
+			goto err;
 	}
 
 	return 0;
 
-err_ppgtt_cleanup:
-	ppgtt->base.cleanup(&ppgtt->base);
-err_ppgtt:
-	kfree(ppgtt);
 err:
 	drm_mm_remove_node(&ggtt->error_capture);
 	return ret;
@@ -2834,12 +2861,7 @@ void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv)
 		WARN_ON(i915_vma_unbind(vma));
 	mutex_unlock(&dev_priv->drm.struct_mutex);
 
-	if (dev_priv->mm.aliasing_ppgtt) {
-		struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
-		ppgtt->base.cleanup(&ppgtt->base);
-		kfree(ppgtt);
-	}
-
+	i915_gem_fini_aliasing_ppgtt(dev_priv);
 	i915_gem_cleanup_stolen(&dev_priv->drm);
 
 	if (drm_mm_node_allocated(&ggtt->error_capture))
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index 7e678ce5a9c7..fe922059a412 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -531,6 +531,9 @@ i915_vm_is_48bit(const struct i915_address_space *vm)
 	return (vm->total - 1) >> 32;
 }
 
+int i915_gem_init_aliasing_ppgtt(struct drm_i915_private *i915);
+void i915_gem_fini_aliasing_ppgtt(struct drm_i915_private *i915);
+
 int i915_ggtt_probe_hw(struct drm_i915_private *dev_priv);
 int i915_ggtt_init_hw(struct drm_i915_private *dev_priv);
 int i915_ggtt_enable_hw(struct drm_i915_private *dev_priv);
-- 
2.11.0

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 8+ messages in thread

* [PATCH 3/3] drm/i915: Force an aliasing_ppgtt test for context execution
  2017-02-13 12:06 Some unreviewed selftests Chris Wilson
  2017-02-13 12:06 ` [PATCH 1/3] drm/i915: Live testing for context execution Chris Wilson
  2017-02-13 12:06 ` [PATCH 2/3] drm/i915: Extract aliasing ppgtt setup Chris Wilson
@ 2017-02-13 12:06 ` Chris Wilson
  2017-02-13 13:06   ` Joonas Lahtinen
  2017-02-13 12:10 ` [PATCH] drm/i915: Add unit tests for the breadcrumb rbtree, wakeups Chris Wilson
  3 siblings, 1 reply; 8+ messages in thread
From: Chris Wilson @ 2017-02-13 12:06 UTC (permalink / raw)
  To: intel-gfx

Ensure that we minimally exercise the aliasing_ppgtt, even on a
full-ppgtt, by allocating one and similarly creating a context to use
it.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/selftests/i915_gem_context.c | 61 +++++++++++++++++++++--
 1 file changed, 58 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/selftests/i915_gem_context.c
index 364578e4e449..b1c3d328d208 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_context.c
@@ -321,6 +321,7 @@ static int igt_ctx_exec(void *arg)
 	IGT_TIMEOUT(end_time);
 	LIST_HEAD(objects);
 	unsigned long ncontexts, ndwords, dw;
+	bool first_shared_gtt = true;
 	int err;
 
 	/* Create a few different contexts (with different mm) and write
@@ -338,7 +339,12 @@ static int igt_ctx_exec(void *arg)
 		struct i915_gem_context *ctx;
 		unsigned int id;
 
-		ctx = i915_gem_create_context(i915, file->driver_priv);
+		if (first_shared_gtt) {
+			ctx = __create_hw_context(i915, file->driver_priv);
+			first_shared_gtt = false;
+		} else {
+			ctx = i915_gem_create_context(i915, file->driver_priv);
+		}
 		if (IS_ERR(ctx)) {
 			err = PTR_ERR(ctx);
 			goto out_unlock;
@@ -390,11 +396,60 @@ static int igt_ctx_exec(void *arg)
 	return err;
 }
 
-int i915_gem_context_live_selftests(struct drm_i915_private *i915)
+static int fake_aliasing_ppgtt_enable(struct drm_i915_private *i915)
+{
+	struct drm_i915_gem_object *obj;
+	int err;
+
+	err = i915_gem_init_aliasing_ppgtt(i915);
+	if (err)
+		return err;
+
+	list_for_each_entry(obj, &i915->mm.bound_list, global_link) {
+		struct i915_vma *vma;
+
+		vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
+		if (IS_ERR(vma))
+			continue;
+
+		vma->flags &= ~I915_VMA_LOCAL_BIND;
+	}
+
+	return 0;
+}
+
+static void fake_aliasing_ppgtt_disable(struct drm_i915_private *i915)
+{
+	i915_gem_fini_aliasing_ppgtt(i915);
+}
+
+int i915_gem_context_live_selftests(struct drm_i915_private *dev_priv)
 {
 	static const struct i915_subtest tests[] = {
 		SUBTEST(igt_ctx_exec),
 	};
+	bool fake_alias = false;
+	int err;
+
+	/* Install a fake aliasing gtt for exercise */
+	if (USES_PPGTT(dev_priv) && !dev_priv->mm.aliasing_ppgtt) {
+		mutex_lock(&dev_priv->drm.struct_mutex);
+		err = fake_aliasing_ppgtt_enable(dev_priv);
+		mutex_unlock(&dev_priv->drm.struct_mutex);
+		if (err)
+			return err;
+
+		GEM_BUG_ON(!dev_priv->mm.aliasing_ppgtt);
+		fake_alias = true;
+	}
+
+	err = i915_subtests(tests, dev_priv);
 
-	return i915_subtests(tests, i915);
+	if (fake_alias) {
+		mutex_lock(&dev_priv->drm.struct_mutex);
+		fake_aliasing_ppgtt_disable(dev_priv);
+		mutex_unlock(&dev_priv->drm.struct_mutex);
+	}
+
+	return err;
 }
-- 
2.11.0

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 8+ messages in thread

* [PATCH] drm/i915: Add unit tests for the breadcrumb rbtree, wakeups
  2017-02-13 12:06 Some unreviewed selftests Chris Wilson
                   ` (2 preceding siblings ...)
  2017-02-13 12:06 ` [PATCH 3/3] drm/i915: Force an aliasing_ppgtt test for context execution Chris Wilson
@ 2017-02-13 12:10 ` Chris Wilson
  2017-02-13 14:39   ` Tvrtko Ursulin
  3 siblings, 1 reply; 8+ messages in thread
From: Chris Wilson @ 2017-02-13 12:10 UTC (permalink / raw)
  To: intel-gfx

Third retroactive test, make sure that the seqno waiters are woken.

v2: Smattering of comments, rearrange code
v3: Fix IDLE assert to avoid startup/sleep races

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/selftests/intel_breadcrumbs.c | 201 +++++++++++++++++++++
 1 file changed, 201 insertions(+)

diff --git a/drivers/gpu/drm/i915/selftests/intel_breadcrumbs.c b/drivers/gpu/drm/i915/selftests/intel_breadcrumbs.c
index 32a27e56c353..d1b99b565500 100644
--- a/drivers/gpu/drm/i915/selftests/intel_breadcrumbs.c
+++ b/drivers/gpu/drm/i915/selftests/intel_breadcrumbs.c
@@ -259,11 +259,212 @@ static int igt_insert_complete(void *arg)
 	return err;
 }
 
+struct igt_wakeup {
+	struct task_struct *tsk;
+	atomic_t *ready, *set, *done;
+	struct intel_engine_cs *engine;
+	unsigned long flags;
+#define STOP 0
+#define IDLE 1
+	wait_queue_head_t *wq;
+	u32 seqno;
+};
+
+static int wait_atomic(atomic_t *p)
+{
+	schedule();
+	return 0;
+}
+
+static int wait_atomic_timeout(atomic_t *p)
+{
+	return schedule_timeout(10 * HZ) ? 0 : -ETIMEDOUT;
+}
+
+static bool wait_for_ready(struct igt_wakeup *w)
+{
+	DEFINE_WAIT(ready);
+
+	set_bit(IDLE, &w->flags);
+	if (atomic_dec_and_test(w->done))
+		wake_up_atomic_t(w->done);
+
+	if (test_bit(STOP, &w->flags))
+		goto out;
+
+	for (;;) {
+		prepare_to_wait(w->wq, &ready, TASK_INTERRUPTIBLE);
+		if (atomic_read(w->ready) == 0)
+			break;
+
+		schedule();
+	}
+	finish_wait(w->wq, &ready);
+
+out:
+	clear_bit(IDLE, &w->flags);
+	if (atomic_dec_and_test(w->set))
+		wake_up_atomic_t(w->set);
+
+	return !test_bit(STOP, &w->flags);
+}
+
+static int igt_wakeup_thread(void *arg)
+{
+	struct igt_wakeup *w = arg;
+	struct intel_wait wait;
+
+	while (wait_for_ready(w)) {
+		GEM_BUG_ON(kthread_should_stop());
+
+		intel_wait_init(&wait, w->seqno);
+		intel_engine_add_wait(w->engine, &wait);
+		for (;;) {
+			set_current_state(TASK_UNINTERRUPTIBLE);
+			if (i915_seqno_passed(intel_engine_get_seqno(w->engine),
+					      w->seqno))
+				break;
+
+			if (test_bit(STOP, &w->flags)) /* emergency escape */
+				break;
+
+			schedule();
+		}
+		intel_engine_remove_wait(w->engine, &wait);
+		__set_current_state(TASK_RUNNING);
+	}
+
+	return 0;
+}
+
+static void igt_wake_all_sync(atomic_t *ready,
+			      atomic_t *set,
+			      atomic_t *done,
+			      wait_queue_head_t *wq,
+			      int count)
+{
+	atomic_set(set, count);
+	atomic_set(ready, 0);
+	wake_up_all(wq);
+
+	wait_on_atomic_t(set, wait_atomic, TASK_UNINTERRUPTIBLE);
+	atomic_set(ready, count);
+	atomic_set(done, count);
+}
+
+static int igt_wakeup(void *arg)
+{
+	I915_RND_STATE(prng);
+	const int state = TASK_UNINTERRUPTIBLE;
+	struct intel_engine_cs *engine = arg;
+	struct igt_wakeup *waiters;
+	DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
+	const int count = 4096;
+	const u32 max_seqno = count / 4;
+	atomic_t ready, set, done;
+	int err = -ENOMEM;
+	int n, step;
+
+	mock_engine_reset(engine);
+
+	waiters = drm_malloc_gfp(count, sizeof(*waiters), GFP_TEMPORARY);
+	if (!waiters)
+		goto out_engines;
+
+	/* Create a large number of threads, each waiting on a random seqno.
+	 * Multiple waiters will be waiting for the same seqno.
+	 */
+	atomic_set(&ready, count);
+	for (n = 0; n < count; n++) {
+		waiters[n].wq = &wq;
+		waiters[n].ready = &ready;
+		waiters[n].set = &set;
+		waiters[n].done = &done;
+		waiters[n].engine = engine;
+		waiters[n].flags = BIT(IDLE);
+
+		waiters[n].tsk = kthread_run(igt_wakeup_thread, &waiters[n],
+					     "i915/igt:%d", n);
+		if (IS_ERR(waiters[n].tsk))
+			goto out_waiters;
+
+		get_task_struct(waiters[n].tsk);
+	}
+
+	for (step = 1; step <= max_seqno; step <<= 1) {
+		u32 seqno;
+
+		/* The waiter threads start paused as we assign them a random
+		 * seqno and reset the engine. Once the engine is reset,
+		 * we signal that the threads may begin their wait upon their
+		 * seqno.
+		 */
+		for (n = 0; n < count; n++) {
+			GEM_BUG_ON(!test_bit(IDLE, &waiters[n].flags));
+			waiters[n].seqno =
+				1 + prandom_u32_state(&prng) % max_seqno;
+		}
+		mock_seqno_advance(engine, 0);
+		igt_wake_all_sync(&ready, &set, &done, &wq, count);
+
+		/* Simulate the GPU doing chunks of work, with one or more
+		 * seqno appearing to finish at the same time. A random number
+		 * of threads will be waiting upon the update and hopefully be
+		 * woken.
+		 */
+		for (seqno = 1; seqno <= max_seqno + step; seqno += step) {
+			usleep_range(50, 500);
+			mock_seqno_advance(engine, seqno);
+		}
+		GEM_BUG_ON(intel_engine_get_seqno(engine) < 1 + max_seqno);
+
+		/* With the seqno now beyond any of the waiting threads, they
+		 * should all be woken, see that they are complete and signal
+		 * that they are ready for the next test. We wait until all
+		 * threads are complete and waiting for us (i.e. not a seqno).
+		 */
+		err = wait_on_atomic_t(&done, wait_atomic_timeout, state);
+		if (err) {
+			pr_err("Timed out waiting for %d remaining waiters\n",
+			       atomic_read(&done));
+			break;
+		}
+
+		err = check_rbtree_empty(engine);
+		if (err)
+			break;
+	}
+
+out_waiters:
+	for (n = 0; n < count; n++) {
+		if (IS_ERR(waiters[n].tsk))
+			break;
+
+		set_bit(STOP, &waiters[n].flags);
+	}
+	mock_seqno_advance(engine, INT_MAX); /* wakeup any broken waiters */
+	igt_wake_all_sync(&ready, &set, &done, &wq, n);
+
+	for (n = 0; n < count; n++) {
+		if (IS_ERR(waiters[n].tsk))
+			break;
+
+		kthread_stop(waiters[n].tsk);
+		put_task_struct(waiters[n].tsk);
+	}
+
+	drm_free_large(waiters);
+out_engines:
+	mock_engine_flush(engine);
+	return err;
+}
+
 int intel_breadcrumbs_mock_selftests(void)
 {
 	static const struct i915_subtest tests[] = {
 		SUBTEST(igt_random_insert_remove),
 		SUBTEST(igt_insert_complete),
+		SUBTEST(igt_wakeup),
 	};
 	struct intel_engine_cs *engine;
 	int err;
-- 
2.11.0

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 8+ messages in thread

* Re: [PATCH 3/3] drm/i915: Force an aliasing_ppgtt test for context execution
  2017-02-13 12:06 ` [PATCH 3/3] drm/i915: Force an aliasing_ppgtt test for context execution Chris Wilson
@ 2017-02-13 13:06   ` Joonas Lahtinen
  0 siblings, 0 replies; 8+ messages in thread
From: Joonas Lahtinen @ 2017-02-13 13:06 UTC (permalink / raw)
  To: Chris Wilson, intel-gfx

On ma, 2017-02-13 at 12:06 +0000, Chris Wilson wrote:
> Ensure that we minimally exercise the aliasing_ppgtt, even on a
> full-ppgtt, by allocating one and similarly creating a context to use
> it.
> 
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>

Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>

Regards, Joonas
-- 
Joonas Lahtinen
Open Source Technology Center
Intel Corporation
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH 1/3] drm/i915: Live testing for context execution
  2017-02-13 12:06 ` [PATCH 1/3] drm/i915: Live testing for context execution Chris Wilson
@ 2017-02-13 14:20   ` Joonas Lahtinen
  0 siblings, 0 replies; 8+ messages in thread
From: Joonas Lahtinen @ 2017-02-13 14:20 UTC (permalink / raw)
  To: Chris Wilson, intel-gfx

On ma, 2017-02-13 at 12:06 +0000, Chris Wilson wrote:
> Check we can create and execution within a context.
> 
> v2: Write one set of dwords through each context/engine to exercise more
> contexts within the same time period.
> 
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>

<SNIP>

> +static struct i915_vma *
> +gpu_fill_dw(struct i915_vma *vma, u64 offset, unsigned long count, u32 value)
> +{
> +	struct drm_i915_gem_object *obj;
> +	const int gen = INTEL_GEN(vma->vm->i915);
> +	unsigned long n;
> +	u32 *cmd;
> +	int err;
> +
> +	GEM_BUG_ON(!igt_can_mi_store_dword_imm(vma->vm->i915));
> +
> +	n = (4*count + 1)*sizeof(u32);
> +	obj = i915_gem_object_create_internal(vma->vm->i915,
> +					      round_up(n, PAGE_SIZE));

Reuse of variable "n" purpose later.

> +static int cpu_check(struct drm_i915_gem_object *obj, unsigned int max)
> +{
> +	unsigned int n, m, needs_flush;
> +	int err;
> +
> +	err = i915_gem_obj_prepare_shmem_read(obj, &needs_flush);
> +	if (err)
> +		return err;
> +
> +	for (n = 0; !err && n < real_page_count(obj); n++) {
> +		u32 *map;
> +
> +		map = kmap_atomic(i915_gem_object_get_page(obj, n));
> +		if (needs_flush & CLFLUSH_BEFORE)
> +			drm_clflush_virt_range(map, PAGE_SIZE);
> +
> +		for (m = 0; !err && m < max; m++) {
> +			if (map[m] != m) {
> +				pr_err("Invalid value at page %d, offset %d: found %x expected %x\n",
> +				       n, m, map[m], m);
> +				err = -EINVAL;
> +			}
> +		}

Use breaks or gotos to avoid cluttering the loop conditions.

<SNIP>

> +				pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) in ctx %u [full-ppgtt? %d], err=%d\n",
> +				       ndwords, dw, max_dwords(obj),
> +				       engine->name, ctx->hw_id, !!ctx->ppgtt,

yesno(!!ctx->ppgtt)

Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>

Regards, Joonas
-- 
Joonas Lahtinen
Open Source Technology Center
Intel Corporation
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH] drm/i915: Add unit tests for the breadcrumb rbtree, wakeups
  2017-02-13 12:10 ` [PATCH] drm/i915: Add unit tests for the breadcrumb rbtree, wakeups Chris Wilson
@ 2017-02-13 14:39   ` Tvrtko Ursulin
  0 siblings, 0 replies; 8+ messages in thread
From: Tvrtko Ursulin @ 2017-02-13 14:39 UTC (permalink / raw)
  To: Chris Wilson, intel-gfx


On 13/02/2017 12:10, Chris Wilson wrote:
> Third retroactive test, make sure that the seqno waiters are woken.
>
> v2: Smattering of comments, rearrange code
> v3: Fix IDLE assert to avoid startup/sleep races

This addresses the last remaining issue I had.

Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>

Regards,

Tvrtko

> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> ---
>  drivers/gpu/drm/i915/selftests/intel_breadcrumbs.c | 201 +++++++++++++++++++++
>  1 file changed, 201 insertions(+)
>
> diff --git a/drivers/gpu/drm/i915/selftests/intel_breadcrumbs.c b/drivers/gpu/drm/i915/selftests/intel_breadcrumbs.c
> index 32a27e56c353..d1b99b565500 100644
> --- a/drivers/gpu/drm/i915/selftests/intel_breadcrumbs.c
> +++ b/drivers/gpu/drm/i915/selftests/intel_breadcrumbs.c
> @@ -259,11 +259,212 @@ static int igt_insert_complete(void *arg)
>  	return err;
>  }
>
> +struct igt_wakeup {
> +	struct task_struct *tsk;
> +	atomic_t *ready, *set, *done;
> +	struct intel_engine_cs *engine;
> +	unsigned long flags;
> +#define STOP 0
> +#define IDLE 1
> +	wait_queue_head_t *wq;
> +	u32 seqno;
> +};
> +
> +static int wait_atomic(atomic_t *p)
> +{
> +	schedule();
> +	return 0;
> +}
> +
> +static int wait_atomic_timeout(atomic_t *p)
> +{
> +	return schedule_timeout(10 * HZ) ? 0 : -ETIMEDOUT;
> +}
> +
> +static bool wait_for_ready(struct igt_wakeup *w)
> +{
> +	DEFINE_WAIT(ready);
> +
> +	set_bit(IDLE, &w->flags);
> +	if (atomic_dec_and_test(w->done))
> +		wake_up_atomic_t(w->done);
> +
> +	if (test_bit(STOP, &w->flags))
> +		goto out;
> +
> +	for (;;) {
> +		prepare_to_wait(w->wq, &ready, TASK_INTERRUPTIBLE);
> +		if (atomic_read(w->ready) == 0)
> +			break;
> +
> +		schedule();
> +	}
> +	finish_wait(w->wq, &ready);
> +
> +out:
> +	clear_bit(IDLE, &w->flags);
> +	if (atomic_dec_and_test(w->set))
> +		wake_up_atomic_t(w->set);
> +
> +	return !test_bit(STOP, &w->flags);
> +}
> +
> +static int igt_wakeup_thread(void *arg)
> +{
> +	struct igt_wakeup *w = arg;
> +	struct intel_wait wait;
> +
> +	while (wait_for_ready(w)) {
> +		GEM_BUG_ON(kthread_should_stop());
> +
> +		intel_wait_init(&wait, w->seqno);
> +		intel_engine_add_wait(w->engine, &wait);
> +		for (;;) {
> +			set_current_state(TASK_UNINTERRUPTIBLE);
> +			if (i915_seqno_passed(intel_engine_get_seqno(w->engine),
> +					      w->seqno))
> +				break;
> +
> +			if (test_bit(STOP, &w->flags)) /* emergency escape */
> +				break;
> +
> +			schedule();
> +		}
> +		intel_engine_remove_wait(w->engine, &wait);
> +		__set_current_state(TASK_RUNNING);
> +	}
> +
> +	return 0;
> +}
> +
> +static void igt_wake_all_sync(atomic_t *ready,
> +			      atomic_t *set,
> +			      atomic_t *done,
> +			      wait_queue_head_t *wq,
> +			      int count)
> +{
> +	atomic_set(set, count);
> +	atomic_set(ready, 0);
> +	wake_up_all(wq);
> +
> +	wait_on_atomic_t(set, wait_atomic, TASK_UNINTERRUPTIBLE);
> +	atomic_set(ready, count);
> +	atomic_set(done, count);
> +}
> +
> +static int igt_wakeup(void *arg)
> +{
> +	I915_RND_STATE(prng);
> +	const int state = TASK_UNINTERRUPTIBLE;
> +	struct intel_engine_cs *engine = arg;
> +	struct igt_wakeup *waiters;
> +	DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
> +	const int count = 4096;
> +	const u32 max_seqno = count / 4;
> +	atomic_t ready, set, done;
> +	int err = -ENOMEM;
> +	int n, step;
> +
> +	mock_engine_reset(engine);
> +
> +	waiters = drm_malloc_gfp(count, sizeof(*waiters), GFP_TEMPORARY);
> +	if (!waiters)
> +		goto out_engines;
> +
> +	/* Create a large number of threads, each waiting on a random seqno.
> +	 * Multiple waiters will be waiting for the same seqno.
> +	 */
> +	atomic_set(&ready, count);
> +	for (n = 0; n < count; n++) {
> +		waiters[n].wq = &wq;
> +		waiters[n].ready = &ready;
> +		waiters[n].set = &set;
> +		waiters[n].done = &done;
> +		waiters[n].engine = engine;
> +		waiters[n].flags = BIT(IDLE);
> +
> +		waiters[n].tsk = kthread_run(igt_wakeup_thread, &waiters[n],
> +					     "i915/igt:%d", n);
> +		if (IS_ERR(waiters[n].tsk))
> +			goto out_waiters;
> +
> +		get_task_struct(waiters[n].tsk);
> +	}
> +
> +	for (step = 1; step <= max_seqno; step <<= 1) {
> +		u32 seqno;
> +
> +		/* The waiter threads start paused as we assign them a random
> +		 * seqno and reset the engine. Once the engine is reset,
> +		 * we signal that the threads may begin their wait upon their
> +		 * seqno.
> +		 */
> +		for (n = 0; n < count; n++) {
> +			GEM_BUG_ON(!test_bit(IDLE, &waiters[n].flags));
> +			waiters[n].seqno =
> +				1 + prandom_u32_state(&prng) % max_seqno;
> +		}
> +		mock_seqno_advance(engine, 0);
> +		igt_wake_all_sync(&ready, &set, &done, &wq, count);
> +
> +		/* Simulate the GPU doing chunks of work, with one or more
> +		 * seqno appearing to finish at the same time. A random number
> +		 * of threads will be waiting upon the update and hopefully be
> +		 * woken.
> +		 */
> +		for (seqno = 1; seqno <= max_seqno + step; seqno += step) {
> +			usleep_range(50, 500);
> +			mock_seqno_advance(engine, seqno);
> +		}
> +		GEM_BUG_ON(intel_engine_get_seqno(engine) < 1 + max_seqno);
> +
> +		/* With the seqno now beyond any of the waiting threads, they
> +		 * should all be woken, see that they are complete and signal
> +		 * that they are ready for the next test. We wait until all
> +		 * threads are complete and waiting for us (i.e. not a seqno).
> +		 */
> +		err = wait_on_atomic_t(&done, wait_atomic_timeout, state);
> +		if (err) {
> +			pr_err("Timed out waiting for %d remaining waiters\n",
> +			       atomic_read(&done));
> +			break;
> +		}
> +
> +		err = check_rbtree_empty(engine);
> +		if (err)
> +			break;
> +	}
> +
> +out_waiters:
> +	for (n = 0; n < count; n++) {
> +		if (IS_ERR(waiters[n].tsk))
> +			break;
> +
> +		set_bit(STOP, &waiters[n].flags);
> +	}
> +	mock_seqno_advance(engine, INT_MAX); /* wakeup any broken waiters */
> +	igt_wake_all_sync(&ready, &set, &done, &wq, n);
> +
> +	for (n = 0; n < count; n++) {
> +		if (IS_ERR(waiters[n].tsk))
> +			break;
> +
> +		kthread_stop(waiters[n].tsk);
> +		put_task_struct(waiters[n].tsk);
> +	}
> +
> +	drm_free_large(waiters);
> +out_engines:
> +	mock_engine_flush(engine);
> +	return err;
> +}
> +
>  int intel_breadcrumbs_mock_selftests(void)
>  {
>  	static const struct i915_subtest tests[] = {
>  		SUBTEST(igt_random_insert_remove),
>  		SUBTEST(igt_insert_complete),
> +		SUBTEST(igt_wakeup),
>  	};
>  	struct intel_engine_cs *engine;
>  	int err;
>
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 8+ messages in thread

end of thread, other threads:[~2017-02-13 14:39 UTC | newest]

Thread overview: 8+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2017-02-13 12:06 Some unreviewed selftests Chris Wilson
2017-02-13 12:06 ` [PATCH 1/3] drm/i915: Live testing for context execution Chris Wilson
2017-02-13 14:20   ` Joonas Lahtinen
2017-02-13 12:06 ` [PATCH 2/3] drm/i915: Extract aliasing ppgtt setup Chris Wilson
2017-02-13 12:06 ` [PATCH 3/3] drm/i915: Force an aliasing_ppgtt test for context execution Chris Wilson
2017-02-13 13:06   ` Joonas Lahtinen
2017-02-13 12:10 ` [PATCH] drm/i915: Add unit tests for the breadcrumb rbtree, wakeups Chris Wilson
2017-02-13 14:39   ` Tvrtko Ursulin

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox