* [PATCH] drm/i915/selftests: Add coverage of mocs registers
@ 2019-10-18 12:06 Chris Wilson
2019-10-18 13:51 ` ✗ Fi.CI.CHECKPATCH: warning for drm/i915/selftests: Add coverage of mocs registers (rev5) Patchwork
` (2 more replies)
0 siblings, 3 replies; 10+ messages in thread
From: Chris Wilson @ 2019-10-18 12:06 UTC (permalink / raw)
To: intel-gfx
Probe the mocs registers for new contexts and across GPU resets. Similar
to intel_workarounds, we have tables of what register values we expect
to see, so verify that user contexts are affected by them. In the
future, we should add tests similar to intel_sseu to cover dynamic
reconfigurations.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Prathap Kumar Valsan <prathap.kumar.valsan@intel.com>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
---
drivers/gpu/drm/i915/gt/intel_mocs.c | 4 +
drivers/gpu/drm/i915/gt/selftest_mocs.c | 431 ++++++++++++++++++
.../drm/i915/selftests/i915_live_selftests.h | 1 +
3 files changed, 436 insertions(+)
create mode 100644 drivers/gpu/drm/i915/gt/selftest_mocs.c
diff --git a/drivers/gpu/drm/i915/gt/intel_mocs.c b/drivers/gpu/drm/i915/gt/intel_mocs.c
index 5bac3966906b..f5a239640553 100644
--- a/drivers/gpu/drm/i915/gt/intel_mocs.c
+++ b/drivers/gpu/drm/i915/gt/intel_mocs.c
@@ -490,3 +490,7 @@ void intel_mocs_init(struct intel_gt *gt)
if (HAS_GLOBAL_MOCS_REGISTERS(gt->i915))
intel_mocs_init_global(gt);
}
+
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+#include "selftest_mocs.c"
+#endif
diff --git a/drivers/gpu/drm/i915/gt/selftest_mocs.c b/drivers/gpu/drm/i915/gt/selftest_mocs.c
new file mode 100644
index 000000000000..94c1c638621b
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/selftest_mocs.c
@@ -0,0 +1,431 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include "gt/intel_engine_pm.h"
+#include "i915_selftest.h"
+
+#include "gem/selftests/mock_context.h"
+#include "selftests/igt_reset.h"
+#include "selftests/igt_spinner.h"
+
+struct live_mocs {
+ struct drm_i915_mocs_table table;
+ struct i915_gem_context *ctx;
+ struct i915_vma *scratch;
+ void *vaddr;
+};
+
+static int request_add_sync(struct i915_request *rq, int err)
+{
+ i915_request_get(rq);
+ i915_request_add(rq);
+ if (i915_request_wait(rq, 0, HZ / 5) < 0)
+ err = -ETIME;
+ i915_request_put(rq);
+
+ return err;
+}
+
+static int request_add_spin(struct i915_request *rq, struct igt_spinner *spin)
+{
+ int err = 0;
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+ if (spin && !igt_wait_for_spinner(spin, rq))
+ err = -ETIME;
+ i915_request_put(rq);
+
+ return err;
+}
+
+static struct i915_vma *create_scratch(struct intel_gt *gt)
+{
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ int err;
+
+ obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
+ if (IS_ERR(obj))
+ return ERR_CAST(obj);
+
+ i915_gem_object_set_cache_coherency(obj, I915_CACHING_CACHED);
+
+ vma = i915_vma_instance(obj, >->ggtt->vm, NULL);
+ if (IS_ERR(vma)) {
+ i915_gem_object_put(obj);
+ return vma;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL);
+ if (err) {
+ i915_gem_object_put(obj);
+ return ERR_PTR(err);
+ }
+
+ return vma;
+}
+
+static int live_mocs_init(struct live_mocs *arg, struct intel_gt *gt)
+{
+ int err;
+
+ if (!get_mocs_settings(gt->i915, &arg->table))
+ return -EINVAL;
+
+ arg->ctx = kernel_context(gt->i915);
+ if (!arg->ctx)
+ return -ENOMEM;
+
+ arg->scratch = create_scratch(gt);
+ if (IS_ERR(arg->scratch)) {
+ err = PTR_ERR(arg->scratch);
+ goto err_ctx;
+ }
+
+ arg->vaddr = i915_gem_object_pin_map(arg->scratch->obj, I915_MAP_WB);
+ if (IS_ERR(arg->vaddr)) {
+ err = PTR_ERR(arg->vaddr);
+ goto err_scratch;
+ }
+
+ return 0;
+
+err_scratch:
+ i915_vma_unpin_and_release(&arg->scratch, 0);
+err_ctx:
+ kernel_context_close(arg->ctx);
+ return err;
+}
+
+static void live_mocs_fini(struct live_mocs *arg)
+{
+ i915_vma_unpin_and_release(&arg->scratch, I915_VMA_RELEASE_MAP);
+ kernel_context_close(arg->ctx);
+}
+
+static int read_mocs_table(struct i915_request *rq,
+ const struct drm_i915_mocs_table *table,
+ struct i915_vma *vma, int *offset)
+{
+ unsigned int i;
+ u32 *cs;
+
+ cs = intel_ring_begin(rq, 4 * table->n_entries);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ for (i = 0; i < table->n_entries; i++) {
+ *cs++ = MI_STORE_REGISTER_MEM_GEN8 | MI_USE_GGTT;
+ *cs++ = i915_mmio_reg_offset(HAS_GLOBAL_MOCS_REGISTERS(rq->i915) ?
+ GEN12_GLOBAL_MOCS(i) :
+ mocs_register(rq->engine, i));
+ *cs++ = i915_ggtt_offset(vma) + i * sizeof(u32) + *offset;
+ *cs++ = 0;
+ }
+
+ intel_ring_advance(rq, cs);
+ *offset += i * sizeof(u32);
+
+ return 0;
+}
+
+static int read_l3cc_table(struct i915_request *rq,
+ const struct drm_i915_mocs_table *table,
+ struct i915_vma *vma, int *offset)
+{
+ unsigned int i;
+ u32 *cs;
+
+ cs = intel_ring_begin(rq, 4 * table->n_entries / 2);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ /* XXX can't read the MCR range 0xb00 directly */
+ for (i = 0; i < table->n_entries / 2; i++) {
+ *cs++ = MI_STORE_REGISTER_MEM_GEN8 | MI_USE_GGTT;
+ *cs++ = i915_mmio_reg_offset(GEN9_LNCFCMOCS(i));
+ *cs++ = i915_ggtt_offset(vma) + i * sizeof(u32) + *offset;
+ *cs++ = 0;
+ }
+
+ intel_ring_advance(rq, cs);
+ *offset += i * sizeof(u32);
+
+ return 0;
+}
+
+static int check_mocs_table(struct intel_engine_cs *engine,
+ const struct drm_i915_mocs_table *table,
+ const u32 *vaddr, int *offset)
+{
+ unsigned int i;
+ u32 expect;
+
+ for (i = 0; i < table->size; i++) {
+ if (HAS_GLOBAL_MOCS_REGISTERS(engine->i915))
+ expect = table->table[i].control_value;
+ else
+ expect = get_entry_control(table, i);
+ if (vaddr[*offset] != expect) {
+ pr_err("%s: Invalid MOCS[%d] entry, found %08x, expected %08x\n",
+ engine->name, i, vaddr[*offset], expect);
+ return -EINVAL;
+ }
+ ++*offset;
+ }
+
+ /* All remaining entries are default */
+ if (HAS_GLOBAL_MOCS_REGISTERS(engine->i915))
+ expect = table->table[0].control_value;
+ else
+ expect = table->table[I915_MOCS_PTE].control_value;
+ for (; i < table->n_entries; i++) {
+ if (vaddr[*offset] != expect) {
+ pr_err("%s: Invalid MOCS[%d*] entry, found %08x, expected %08x\n",
+ engine->name, i, vaddr[*offset], expect);
+ return -EINVAL;
+ }
+ ++*offset;
+ }
+
+ return 0;
+}
+
+static int check_l3cc_table(struct intel_engine_cs *engine,
+ const struct drm_i915_mocs_table *table,
+ const u32 *vaddr, int *offset)
+{
+ u16 unused_value = table->table[I915_MOCS_PTE].l3cc_value;
+ unsigned int i;
+ u32 expect;
+
+ if (1) { /* XXX skip MCR read back */
+ *offset += table->n_entries / 2;
+ return 0;
+ }
+
+ for (i = 0; i < table->size / 2; i++) {
+ u16 low = get_entry_l3cc(table, 2 * i);
+ u16 high = get_entry_l3cc(table, 2 * i + 1);
+
+ expect = l3cc_combine(table, low, high);
+ if (vaddr[*offset] != expect) {
+ pr_err("%s: Invalid L3CC[%d] entry, found %08x, expected %08x\n",
+ engine->name, i, vaddr[*offset], expect);
+ return -EINVAL;
+ }
+ ++*offset;
+ }
+
+ /* Odd table size - 1 left over */
+ if (table->size & 1) {
+ u16 low = get_entry_l3cc(table, 2 * i);
+
+ expect = l3cc_combine(table, low, unused_value);
+ if (vaddr[*offset] != expect) {
+ pr_err("%s: Invalid L3CC[%d] entry, found %08x, expected %08x\n",
+ engine->name, i, vaddr[*offset], expect);
+ return -EINVAL;
+ }
+ ++*offset;
+ i++;
+ }
+
+ /* All remaining entries are also unused */
+ for (; i < table->n_entries / 2; i++) {
+ expect = l3cc_combine(table, unused_value, unused_value);
+ if (vaddr[*offset] != expect) {
+ pr_err("%s: Invalid L3CC[%d] entry, found %08x, expected %08x\n",
+ engine->name, i, vaddr[*offset], expect);
+ return -EINVAL;
+ }
+ ++*offset;
+ }
+
+ return 0;
+}
+
+static int check_mocs_engine(struct live_mocs *arg,
+ struct intel_context *ce)
+{
+ struct i915_vma *vma = arg->scratch;
+ struct i915_request *rq;
+ int offset;
+ int err;
+
+ memset32(arg->vaddr, STACK_MAGIC, PAGE_SIZE / sizeof(u32));
+
+ rq = intel_context_create_request(ce);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+
+ i915_vma_lock(vma);
+ err = i915_request_await_object(rq, vma->obj, true);
+ if (!err)
+ err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
+ i915_vma_unlock(vma);
+
+ offset = 0;
+ if (!err)
+ err = read_mocs_table(rq, &arg->table, vma, &offset);
+ if (!err && ce->engine->class == RENDER_CLASS)
+ err = read_l3cc_table(rq, &arg->table, vma, &offset);
+
+ err = request_add_sync(rq, err);
+ if (err)
+ return err;
+
+ offset = 0;
+ if (!err)
+ err = check_mocs_table(ce->engine, &arg->table,
+ arg->vaddr, &offset);
+ if (!err && ce->engine->class == RENDER_CLASS)
+ err = check_l3cc_table(ce->engine, &arg->table,
+ arg->vaddr, &offset);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int live_mocs_clean(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ struct live_mocs mocs;
+ int err;
+
+ err = live_mocs_init(&mocs, gt);
+ if (err)
+ return err;
+
+ for_each_engine(engine, gt, id) {
+ struct intel_context *ce;
+
+ ce = intel_context_create(engine->kernel_context->gem_context,
+ engine);
+ if (IS_ERR(ce)) {
+ err = PTR_ERR(ce);
+ break;
+ }
+
+ err = check_mocs_engine(&mocs, ce);
+ intel_context_put(ce);
+ if (err)
+ break;
+ }
+
+ live_mocs_fini(&mocs);
+
+ return err;
+}
+
+static int active_engine_reset(struct intel_context *ce,
+ const char *reason)
+{
+ struct igt_spinner spin;
+ struct i915_request *rq;
+ int err;
+
+ err = igt_spinner_init(&spin, ce->engine->gt);
+ if (err)
+ return err;
+
+ rq = igt_spinner_create_request(&spin, ce, MI_NOOP);
+ if (IS_ERR(rq)) {
+ igt_spinner_fini(&spin);
+ return PTR_ERR(rq);
+ }
+
+ err = request_add_spin(rq, &spin);
+ if (err == 0)
+ err = intel_engine_reset(ce->engine, reason);
+
+ igt_spinner_end(&spin);
+ igt_spinner_fini(&spin);
+
+ return err;
+}
+
+static int __live_mocs_reset(struct live_mocs *mocs,
+ struct intel_context *ce)
+{
+ int err;
+
+ err = intel_engine_reset(ce->engine, "mocs");
+ if (err)
+ return err;
+
+ err = check_mocs_engine(mocs, ce);
+ if (err)
+ return err;
+
+ err = active_engine_reset(ce, "mocs");
+ if (err)
+ return err;
+
+ err = check_mocs_engine(mocs, ce);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int live_mocs_reset(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ struct live_mocs mocs;
+ int err = 0;
+
+ if (!intel_has_reset_engine(gt))
+ return 0;
+
+ err = live_mocs_init(&mocs, gt);
+ if (err)
+ return err;
+
+ igt_global_reset_lock(gt);
+ for_each_engine(engine, gt, id) {
+ struct intel_context *ce;
+
+ ce = intel_context_create(engine->kernel_context->gem_context,
+ engine);
+ if (IS_ERR(ce)) {
+ err = PTR_ERR(ce);
+ break;
+ }
+
+ intel_engine_pm_get(engine);
+ err = __live_mocs_reset(&mocs, ce);
+ intel_engine_pm_put(engine);
+
+ intel_context_put(ce);
+ if (err)
+ break;
+ }
+ igt_global_reset_unlock(gt);
+
+ live_mocs_fini(&mocs);
+ return err;
+}
+
+int intel_mocs_live_selftests(struct drm_i915_private *i915)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(live_mocs_clean),
+ SUBTEST(live_mocs_reset),
+ };
+ struct drm_i915_mocs_table table;
+
+ if (!get_mocs_settings(i915, &table))
+ return 0;
+
+ return intel_gt_live_subtests(tests, &i915->gt);
+}
diff --git a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
index 6daf6599ec79..1a6abcffce81 100644
--- a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
+++ b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
@@ -16,6 +16,7 @@ selftest(gt_engines, intel_engine_live_selftests)
selftest(gt_timelines, intel_timeline_live_selftests)
selftest(gt_contexts, intel_context_live_selftests)
selftest(gt_lrc, intel_lrc_live_selftests)
+selftest(gt_mocs, intel_mocs_live_selftests)
selftest(gt_pm, intel_gt_pm_live_selftests)
selftest(requests, i915_request_live_selftests)
selftest(active, i915_active_live_selftests)
--
2.23.0
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply related [flat|nested] 10+ messages in thread* ✗ Fi.CI.CHECKPATCH: warning for drm/i915/selftests: Add coverage of mocs registers (rev5)
2019-10-18 12:06 [PATCH] drm/i915/selftests: Add coverage of mocs registers Chris Wilson
@ 2019-10-18 13:51 ` Patchwork
2019-10-18 14:09 ` [PATCH] drm/i915/selftests: Add coverage of mocs registers Kumar Valsan, Prathap
2019-10-18 14:16 ` ✗ Fi.CI.BAT: failure for drm/i915/selftests: Add coverage of mocs registers (rev5) Patchwork
2 siblings, 0 replies; 10+ messages in thread
From: Patchwork @ 2019-10-18 13:51 UTC (permalink / raw)
To: Chris Wilson; +Cc: intel-gfx
== Series Details ==
Series: drm/i915/selftests: Add coverage of mocs registers (rev5)
URL : https://patchwork.freedesktop.org/series/68135/
State : warning
== Summary ==
$ dim checkpatch origin/drm-tip
221d3a170a43 drm/i915/selftests: Add coverage of mocs registers
-:29: WARNING:FILE_PATH_CHANGES: added, moved or deleted file(s), does MAINTAINERS need updating?
#29:
new file mode 100644
-:34: WARNING:SPDX_LICENSE_TAG: Missing or malformed SPDX-License-Identifier tag in line 1
#34: FILE: drivers/gpu/drm/i915/gt/selftest_mocs.c:1:
+/*
-:35: WARNING:SPDX_LICENSE_TAG: Misplaced SPDX-License-Identifier tag - use line 1 instead
#35: FILE: drivers/gpu/drm/i915/gt/selftest_mocs.c:2:
+ * SPDX-License-Identifier: MIT
total: 0 errors, 3 warnings, 0 checks, 445 lines checked
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply [flat|nested] 10+ messages in thread
* Re: [PATCH] drm/i915/selftests: Add coverage of mocs registers
2019-10-18 12:06 [PATCH] drm/i915/selftests: Add coverage of mocs registers Chris Wilson
2019-10-18 13:51 ` ✗ Fi.CI.CHECKPATCH: warning for drm/i915/selftests: Add coverage of mocs registers (rev5) Patchwork
@ 2019-10-18 14:09 ` Kumar Valsan, Prathap
2019-10-18 14:16 ` ✗ Fi.CI.BAT: failure for drm/i915/selftests: Add coverage of mocs registers (rev5) Patchwork
2 siblings, 0 replies; 10+ messages in thread
From: Kumar Valsan, Prathap @ 2019-10-18 14:09 UTC (permalink / raw)
To: Chris Wilson; +Cc: intel-gfx
On Fri, Oct 18, 2019 at 01:06:39PM +0100, Chris Wilson wrote:
> Probe the mocs registers for new contexts and across GPU resets. Similar
> to intel_workarounds, we have tables of what register values we expect
> to see, so verify that user contexts are affected by them. In the
> future, we should add tests similar to intel_sseu to cover dynamic
> reconfigurations.
>
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> Cc: Prathap Kumar Valsan <prathap.kumar.valsan@intel.com>
> Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
> ---
> +static int check_l3cc_table(struct intel_engine_cs *engine,
> + const struct drm_i915_mocs_table *table,
> + const u32 *vaddr, int *offset)
> +{
> + u16 unused_value = table->table[I915_MOCS_PTE].l3cc_value;
> + unsigned int i;
> + u32 expect;
> +
> + if (1) { /* XXX skip MCR read back */
> + *offset += table->n_entries / 2;
> + return 0;
> + }
Not checking l3cc table?
> +
> + for (i = 0; i < table->size / 2; i++) {
> + u16 low = get_entry_l3cc(table, 2 * i);
> + u16 high = get_entry_l3cc(table, 2 * i + 1);
> +
> + expect = l3cc_combine(table, low, high);
> + if (vaddr[*offset] != expect) {
> + pr_err("%s: Invalid L3CC[%d] entry, found %08x, expected %08x\n",
> + engine->name, i, vaddr[*offset], expect);
> + return -EINVAL;
> + }
> + ++*offset;
> + }
> +
> + /* Odd table size - 1 left over */
> + if (table->size & 1) {
> + u16 low = get_entry_l3cc(table, 2 * i);
> +
> + expect = l3cc_combine(table, low, unused_value);
> + if (vaddr[*offset] != expect) {
> + pr_err("%s: Invalid L3CC[%d] entry, found %08x, expected %08x\n",
> + engine->name, i, vaddr[*offset], expect);
> + return -EINVAL;
> + }
> + ++*offset;
> + i++;
> + }
> +
> + /* All remaining entries are also unused */
> + for (; i < table->n_entries / 2; i++) {
> + expect = l3cc_combine(table, unused_value, unused_value);
> + if (vaddr[*offset] != expect) {
> + pr_err("%s: Invalid L3CC[%d] entry, found %08x, expected %08x\n",
> + engine->name, i, vaddr[*offset], expect);
> + return -EINVAL;
> + }
> + ++*offset;
> + }
> +
> + return 0;
> +}
> +
> +static int check_mocs_engine(struct live_mocs *arg,
> + struct intel_context *ce)
> +{
> + struct i915_vma *vma = arg->scratch;
> + struct i915_request *rq;
> + int offset;
> + int err;
> +
> + memset32(arg->vaddr, STACK_MAGIC, PAGE_SIZE / sizeof(u32));
> +
> + rq = intel_context_create_request(ce);
> + if (IS_ERR(rq))
> + return PTR_ERR(rq);
> +
> + i915_vma_lock(vma);
> + err = i915_request_await_object(rq, vma->obj, true);
> + if (!err)
> + err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
> + i915_vma_unlock(vma);
> +
> + offset = 0;
> + if (!err)
> + err = read_mocs_table(rq, &arg->table, vma, &offset);
> + if (!err && ce->engine->class == RENDER_CLASS)
> + err = read_l3cc_table(rq, &arg->table, vma, &offset);
> +
> + err = request_add_sync(rq, err);
> + if (err)
> + return err;
> +
> + offset = 0;
> + if (!err)
> + err = check_mocs_table(ce->engine, &arg->table,
> + arg->vaddr, &offset);
> + if (!err && ce->engine->class == RENDER_CLASS)
> + err = check_l3cc_table(ce->engine, &arg->table,
> + arg->vaddr, &offset);
> + if (err)
> + return err;
> +
> + return 0;
> +}
> +
> +static int live_mocs_clean(void *arg)
> +{
> + struct intel_gt *gt = arg;
> + struct intel_engine_cs *engine;
> + enum intel_engine_id id;
> + struct live_mocs mocs;
> + int err;
> +
> + err = live_mocs_init(&mocs, gt);
> + if (err)
> + return err;
> +
> + for_each_engine(engine, gt, id) {
> + struct intel_context *ce;
> +
> + ce = intel_context_create(engine->kernel_context->gem_context,
> + engine);
> + if (IS_ERR(ce)) {
> + err = PTR_ERR(ce);
> + break;
> + }
> +
> + err = check_mocs_engine(&mocs, ce);
> + intel_context_put(ce);
Need a _get() to pair with _put()?
> + if (err)
> + break;
> + }
> +
> + live_mocs_fini(&mocs);
> +
> + return err;
> +}
> +
[snip]
> diff --git a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
> index 6daf6599ec79..1a6abcffce81 100644
> --- a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
> +++ b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
> @@ -16,6 +16,7 @@ selftest(gt_engines, intel_engine_live_selftests)
> selftest(gt_timelines, intel_timeline_live_selftests)
> selftest(gt_contexts, intel_context_live_selftests)
> selftest(gt_lrc, intel_lrc_live_selftests)
> +selftest(gt_mocs, intel_mocs_live_selftests)
> selftest(gt_pm, intel_gt_pm_live_selftests)
> selftest(requests, i915_request_live_selftests)
> selftest(active, i915_active_live_selftests)
Regards,
Prathap
> --
> 2.23.0
>
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply [flat|nested] 10+ messages in thread* ✗ Fi.CI.BAT: failure for drm/i915/selftests: Add coverage of mocs registers (rev5)
2019-10-18 12:06 [PATCH] drm/i915/selftests: Add coverage of mocs registers Chris Wilson
2019-10-18 13:51 ` ✗ Fi.CI.CHECKPATCH: warning for drm/i915/selftests: Add coverage of mocs registers (rev5) Patchwork
2019-10-18 14:09 ` [PATCH] drm/i915/selftests: Add coverage of mocs registers Kumar Valsan, Prathap
@ 2019-10-18 14:16 ` Patchwork
2 siblings, 0 replies; 10+ messages in thread
From: Patchwork @ 2019-10-18 14:16 UTC (permalink / raw)
To: Chris Wilson; +Cc: intel-gfx
== Series Details ==
Series: drm/i915/selftests: Add coverage of mocs registers (rev5)
URL : https://patchwork.freedesktop.org/series/68135/
State : failure
== Summary ==
CI Bug Log - changes from CI_DRM_7128 -> Patchwork_14882
====================================================
Summary
-------
**FAILURE**
Serious unknown changes coming with Patchwork_14882 absolutely need to be
verified manually.
If you think the reported changes have nothing to do with the changes
introduced in Patchwork_14882, please notify your bug team to allow them
to document this new failure mode, which will reduce false positives in CI.
External URL: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14882/index.html
Possible new issues
-------------------
Here are the unknown changes that may have been introduced in Patchwork_14882:
### IGT changes ###
#### Possible regressions ####
* igt@runner@aborted:
- fi-bxt-dsi: NOTRUN -> [FAIL][1]
[1]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14882/fi-bxt-dsi/igt@runner@aborted.html
New tests
---------
New tests have been introduced between CI_DRM_7128 and Patchwork_14882:
### New IGT tests (1) ###
* igt@i915_selftest@live_gt_mocs:
- Statuses : 43 pass(s)
- Exec time: [0.40, 2.44] s
Known issues
------------
Here are the changes found in Patchwork_14882 that come from known issues:
### IGT changes ###
#### Issues hit ####
* igt@gem_ctx_create@basic-files:
- fi-cml-u2: [PASS][2] -> [INCOMPLETE][3] ([fdo#110566])
[2]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7128/fi-cml-u2/igt@gem_ctx_create@basic-files.html
[3]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14882/fi-cml-u2/igt@gem_ctx_create@basic-files.html
* igt@gem_mmap_gtt@basic-short:
- fi-icl-u3: [PASS][4] -> [DMESG-WARN][5] ([fdo#107724])
[4]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7128/fi-icl-u3/igt@gem_mmap_gtt@basic-short.html
[5]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14882/fi-icl-u3/igt@gem_mmap_gtt@basic-short.html
* igt@i915_selftest@live_execlists:
- fi-bxt-dsi: [PASS][6] -> [INCOMPLETE][7] ([fdo#103927])
[6]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7128/fi-bxt-dsi/igt@i915_selftest@live_execlists.html
[7]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14882/fi-bxt-dsi/igt@i915_selftest@live_execlists.html
* igt@kms_chamelium@hdmi-hpd-fast:
- fi-icl-u2: [PASS][8] -> [FAIL][9] ([fdo#109483])
[8]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7128/fi-icl-u2/igt@kms_chamelium@hdmi-hpd-fast.html
[9]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14882/fi-icl-u2/igt@kms_chamelium@hdmi-hpd-fast.html
#### Possible fixes ####
* igt@gem_exec_suspend@basic:
- {fi-icl-u4}: [FAIL][10] ([fdo#111699]) -> [PASS][11]
[10]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7128/fi-icl-u4/igt@gem_exec_suspend@basic.html
[11]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14882/fi-icl-u4/igt@gem_exec_suspend@basic.html
* igt@i915_selftest@live_execlists:
- fi-whl-u: [INCOMPLETE][12] ([fdo#112065]) -> [PASS][13]
[12]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7128/fi-whl-u/igt@i915_selftest@live_execlists.html
[13]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14882/fi-whl-u/igt@i915_selftest@live_execlists.html
- fi-skl-6260u: [INCOMPLETE][14] ([fdo#111934]) -> [PASS][15]
[14]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7128/fi-skl-6260u/igt@i915_selftest@live_execlists.html
[15]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14882/fi-skl-6260u/igt@i915_selftest@live_execlists.html
- fi-skl-lmem: [INCOMPLETE][16] ([fdo#111934]) -> [PASS][17]
[16]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7128/fi-skl-lmem/igt@i915_selftest@live_execlists.html
[17]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14882/fi-skl-lmem/igt@i915_selftest@live_execlists.html
* igt@i915_selftest@live_gem_contexts:
- fi-cfl-8109u: [DMESG-FAIL][18] ([fdo#112050 ]) -> [PASS][19]
[18]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7128/fi-cfl-8109u/igt@i915_selftest@live_gem_contexts.html
[19]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14882/fi-cfl-8109u/igt@i915_selftest@live_gem_contexts.html
* igt@i915_selftest@live_gtt:
- {fi-tgl-u2}: [INCOMPLETE][20] ([fdo#111833]) -> [PASS][21]
[20]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7128/fi-tgl-u2/igt@i915_selftest@live_gtt.html
[21]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14882/fi-tgl-u2/igt@i915_selftest@live_gtt.html
* igt@i915_selftest@live_sanitycheck:
- fi-icl-u3: [DMESG-WARN][22] ([fdo#107724]) -> [PASS][23] +1 similar issue
[22]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7128/fi-icl-u3/igt@i915_selftest@live_sanitycheck.html
[23]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14882/fi-icl-u3/igt@i915_selftest@live_sanitycheck.html
* igt@kms_chamelium@hdmi-hpd-fast:
- fi-kbl-7500u: [FAIL][24] ([fdo#111407]) -> [PASS][25]
[24]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7128/fi-kbl-7500u/igt@kms_chamelium@hdmi-hpd-fast.html
[25]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14882/fi-kbl-7500u/igt@kms_chamelium@hdmi-hpd-fast.html
{name}: This element is suppressed. This means it is ignored when computing
the status of the difference (SUCCESS, WARNING, or FAILURE).
[fdo#103927]: https://bugs.freedesktop.org/show_bug.cgi?id=103927
[fdo#107713]: https://bugs.freedesktop.org/show_bug.cgi?id=107713
[fdo#107724]: https://bugs.freedesktop.org/show_bug.cgi?id=107724
[fdo#108569]: https://bugs.freedesktop.org/show_bug.cgi?id=108569
[fdo#109483]: https://bugs.freedesktop.org/show_bug.cgi?id=109483
[fdo#110566]: https://bugs.freedesktop.org/show_bug.cgi?id=110566
[fdo#111144]: https://bugs.freedesktop.org/show_bug.cgi?id=111144
[fdo#111407]: https://bugs.freedesktop.org/show_bug.cgi?id=111407
[fdo#111600]: https://bugs.freedesktop.org/show_bug.cgi?id=111600
[fdo#111678]: https://bugs.freedesktop.org/show_bug.cgi?id=111678
[fdo#111699]: https://bugs.freedesktop.org/show_bug.cgi?id=111699
[fdo#111833]: https://bugs.freedesktop.org/show_bug.cgi?id=111833
[fdo#111934]: https://bugs.freedesktop.org/show_bug.cgi?id=111934
[fdo#112050 ]: https://bugs.freedesktop.org/show_bug.cgi?id=112050
[fdo#112065]: https://bugs.freedesktop.org/show_bug.cgi?id=112065
Participating hosts (52 -> 45)
------------------------------
Additional (1): fi-pnv-d510
Missing (8): fi-ilk-m540 fi-hsw-4200u fi-byt-squawks fi-bsw-cyan fi-icl-y fi-byt-clapper fi-bdw-samus fi-snb-2600
Build changes
-------------
* CI: CI-20190529 -> None
* Linux: CI_DRM_7128 -> Patchwork_14882
CI-20190529: 20190529
CI_DRM_7128: 8b9127d9e8ad36b96096fb3358a1edb34eda96ba @ git://anongit.freedesktop.org/gfx-ci/linux
IGT_5232: bb5735423eaf6fdbf6b2f94ef0b8520e74eab993 @ git://anongit.freedesktop.org/xorg/app/intel-gpu-tools
Patchwork_14882: 221d3a170a4313754b20e6fbbc2c18bdf15f2685 @ git://anongit.freedesktop.org/gfx-ci/linux
== Linux commits ==
221d3a170a43 drm/i915/selftests: Add coverage of mocs registers
== Logs ==
For more details see: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14882/index.html
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply [flat|nested] 10+ messages in thread
* [PATCH 4/4] drm/i915/selftests: Add coverage of mocs registers
@ 2019-10-22 11:51 Chris Wilson
2019-10-22 11:57 ` [PATCH] " Chris Wilson
0 siblings, 1 reply; 10+ messages in thread
From: Chris Wilson @ 2019-10-22 11:51 UTC (permalink / raw)
To: intel-gfx
Probe the mocs registers for new contexts and across GPU resets. Similar
to intel_workarounds, we have tables of what register values we expect
to see, so verify that user contexts are affected by them. In the
future, we should add tests similar to intel_sseu to cover dynamic
reconfigurations.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Prathap Kumar Valsan <prathap.kumar.valsan@intel.com>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
---
drivers/gpu/drm/i915/gt/intel_mocs.c | 4 +
drivers/gpu/drm/i915/gt/selftest_mocs.c | 405 ++++++++++++++++++
.../drm/i915/selftests/i915_live_selftests.h | 1 +
3 files changed, 410 insertions(+)
create mode 100644 drivers/gpu/drm/i915/gt/selftest_mocs.c
diff --git a/drivers/gpu/drm/i915/gt/intel_mocs.c b/drivers/gpu/drm/i915/gt/intel_mocs.c
index 445ec025bda0..06dba7ff294e 100644
--- a/drivers/gpu/drm/i915/gt/intel_mocs.c
+++ b/drivers/gpu/drm/i915/gt/intel_mocs.c
@@ -448,3 +448,7 @@ void intel_mocs_init(struct intel_gt *gt)
if (HAS_GLOBAL_MOCS_REGISTERS(gt->i915))
init_global_mocs(gt);
}
+
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+#include "selftest_mocs.c"
+#endif
diff --git a/drivers/gpu/drm/i915/gt/selftest_mocs.c b/drivers/gpu/drm/i915/gt/selftest_mocs.c
new file mode 100644
index 000000000000..f4dfb36f450d
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/selftest_mocs.c
@@ -0,0 +1,405 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include "gt/intel_engine_pm.h"
+#include "i915_selftest.h"
+
+#include "gem/selftests/mock_context.h"
+#include "selftests/igt_reset.h"
+#include "selftests/igt_spinner.h"
+
+struct live_mocs {
+ struct drm_i915_mocs_table table;
+ struct i915_vma *scratch;
+ void *vaddr;
+};
+
+static int request_add_sync(struct i915_request *rq, int err)
+{
+ i915_request_get(rq);
+ i915_request_add(rq);
+ if (i915_request_wait(rq, 0, HZ / 5) < 0)
+ err = -ETIME;
+ i915_request_put(rq);
+
+ return err;
+}
+
+static int request_add_spin(struct i915_request *rq, struct igt_spinner *spin)
+{
+ int err = 0;
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+ if (spin && !igt_wait_for_spinner(spin, rq))
+ err = -ETIME;
+ i915_request_put(rq);
+
+ return err;
+}
+
+static struct i915_vma *create_scratch(struct intel_gt *gt)
+{
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ int err;
+
+ obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
+ if (IS_ERR(obj))
+ return ERR_CAST(obj);
+
+ i915_gem_object_set_cache_coherency(obj, I915_CACHING_CACHED);
+
+ vma = i915_vma_instance(obj, >->ggtt->vm, NULL);
+ if (IS_ERR(vma)) {
+ i915_gem_object_put(obj);
+ return vma;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL);
+ if (err) {
+ i915_gem_object_put(obj);
+ return ERR_PTR(err);
+ }
+
+ return vma;
+}
+
+static int live_mocs_init(struct live_mocs *arg, struct intel_gt *gt)
+{
+ int err;
+
+ if (!get_mocs_settings(gt->i915, &arg->table))
+ return -EINVAL;
+
+ arg->scratch = create_scratch(gt);
+ if (IS_ERR(arg->scratch))
+ return PTR_ERR(arg->scratch);
+
+ arg->vaddr = i915_gem_object_pin_map(arg->scratch->obj, I915_MAP_WB);
+ if (IS_ERR(arg->vaddr)) {
+ err = PTR_ERR(arg->vaddr);
+ goto err_scratch;
+ }
+
+ return 0;
+
+err_scratch:
+ i915_vma_unpin_and_release(&arg->scratch, 0);
+ return err;
+}
+
+static void live_mocs_fini(struct live_mocs *arg)
+{
+ i915_vma_unpin_and_release(&arg->scratch, I915_VMA_RELEASE_MAP);
+}
+
+static int read_mocs_table(struct i915_request *rq,
+ const struct drm_i915_mocs_table *table,
+ uint32_t *offset)
+{
+ unsigned int i;
+ u32 addr;
+ u32 *cs;
+
+ GEM_BUG_ON(!IS_ALIGNED(*offset, sizeof(u32)));
+
+ if (HAS_GLOBAL_MOCS_REGISTERS(rq->i915))
+ addr = 0x4000;
+ else
+ addr = mocs_register(rq->engine);
+
+ cs = intel_ring_begin(rq, 4 * table->n_entries);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ for (i = 0; i < table->n_entries; i++) {
+ *cs++ = MI_STORE_REGISTER_MEM_GEN8 | MI_USE_GGTT;
+ *cs++ = addr;
+ *cs++ = *offset;
+ *cs++ = 0;
+
+ addr += sizeof(u32);
+ *offset += sizeof(u32);
+ }
+
+ intel_ring_advance(rq, cs);
+
+ return 0;
+}
+
+static int read_l3cc_table(struct i915_request *rq,
+ const struct drm_i915_mocs_table *table,
+ uint32_t *offset)
+{
+ u32 addr = i915_mmio_reg_offset(GEN9_LNCFCMOCS(0));
+ unsigned int i;
+ u32 *cs;
+
+ GEM_BUG_ON(!IS_ALIGNED(*offset, sizeof(u32)));
+
+ cs = intel_ring_begin(rq, (table->n_entries + 1) / 2 * 4);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ /* Can we read the MCR range 0xb00 directly? See intel_workarounds! */
+ for (i = 0; i < (table->n_entries + 1) / 2; i++) {
+ *cs++ = MI_STORE_REGISTER_MEM_GEN8 | MI_USE_GGTT;
+ *cs++ = addr;
+ *cs++ = *offset;
+ *cs++ = 0;
+
+ addr += sizeof(u32);
+ *offset += sizeof(u32);
+ }
+
+ intel_ring_advance(rq, cs);
+
+ return 0;
+}
+
+static int check_mocs_table(struct intel_engine_cs *engine,
+ const struct drm_i915_mocs_table *table,
+ uint32_t **vaddr)
+{
+ unsigned int i;
+ u32 expect;
+
+ for_each_mocs(expect, table, i) {
+ if (**vaddr != expect) {
+ pr_err("%s: Invalid MOCS[%d] entry, found %08x, expected %08x\n",
+ engine->name, i, **vaddr, expect);
+ return -EINVAL;
+ }
+ ++*vaddr;
+ }
+
+ return 0;
+}
+
+static int check_l3cc_table(struct intel_engine_cs *engine,
+ const struct drm_i915_mocs_table *table,
+ uint32_t **vaddr)
+{
+ unsigned int i;
+ u32 expect;
+
+ for_each_l3cc(expect, table, i) {
+ if (**vaddr != expect) {
+ pr_err("%s: Invalid L3CC[%d] entry, found %08x, expected %08x\n",
+ engine->name, i, **vaddr, expect);
+ return -EINVAL;
+ }
+ ++*vaddr;
+ }
+
+ return 0;
+}
+
+static int check_mocs_engine(struct live_mocs *arg,
+ struct intel_context *ce)
+{
+ struct i915_vma *vma = arg->scratch;
+ struct i915_request *rq;
+ u32 offset;
+ u32 *vaddr;
+ int err;
+
+ memset32(arg->vaddr, STACK_MAGIC, PAGE_SIZE / sizeof(u32));
+
+ rq = intel_context_create_request(ce);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+
+ i915_vma_lock(vma);
+ err = i915_request_await_object(rq, vma->obj, true);
+ if (!err)
+ err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
+ i915_vma_unlock(vma);
+
+ offset = i915_ggtt_offset(vma);
+ if (!err)
+ err = read_mocs_table(rq, &arg->table, &offset);
+ if (!err && ce->engine->class == RENDER_CLASS)
+ err = read_l3cc_table(rq, &arg->table, &offset);
+ offset -= i915_ggtt_offset(vma);
+ GEM_BUG_ON(offset > PAGE_SIZE);
+
+ err = request_add_sync(rq, err);
+ if (err)
+ return err;
+
+ vaddr = arg->vaddr;
+ if (!err)
+ err = check_mocs_table(ce->engine, &arg->table, &vaddr);
+ if (!err && ce->engine->class == RENDER_CLASS)
+ err = check_l3cc_table(ce->engine, &arg->table, &vaddr);
+ if (err)
+ return err;
+
+ GEM_BUG_ON(arg->vaddr + offset != vaddr);
+ return 0;
+}
+
+static int live_mocs_kernel(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ struct live_mocs mocs;
+ int err;
+
+ err = live_mocs_init(&mocs, gt);
+ if (err)
+ return err;
+
+ for_each_engine(engine, gt, id) {
+ err = check_mocs_engine(&mocs, engine->kernel_context);
+ if (err)
+ break;
+ }
+
+ live_mocs_fini(&mocs);
+ return err;
+}
+
+static int live_mocs_clean(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ struct live_mocs mocs;
+ int err;
+
+ err = live_mocs_init(&mocs, gt);
+ if (err)
+ return err;
+
+ for_each_engine(engine, gt, id) {
+ struct intel_context *ce;
+
+ ce = intel_context_create(engine->kernel_context->gem_context,
+ engine);
+ if (IS_ERR(ce)) {
+ err = PTR_ERR(ce);
+ break;
+ }
+
+ err = check_mocs_engine(&mocs, ce);
+ intel_context_put(ce);
+ if (err)
+ break;
+ }
+
+ live_mocs_fini(&mocs);
+ return err;
+}
+
+static int active_engine_reset(struct intel_context *ce,
+ const char *reason)
+{
+ struct igt_spinner spin;
+ struct i915_request *rq;
+ int err;
+
+ err = igt_spinner_init(&spin, ce->engine->gt);
+ if (err)
+ return err;
+
+ rq = igt_spinner_create_request(&spin, ce, MI_NOOP);
+ if (IS_ERR(rq)) {
+ igt_spinner_fini(&spin);
+ return PTR_ERR(rq);
+ }
+
+ err = request_add_spin(rq, &spin);
+ if (err == 0)
+ err = intel_engine_reset(ce->engine, reason);
+
+ igt_spinner_end(&spin);
+ igt_spinner_fini(&spin);
+
+ return err;
+}
+
+static int __live_mocs_reset(struct live_mocs *mocs,
+ struct intel_context *ce)
+{
+ int err;
+
+ err = intel_engine_reset(ce->engine, "mocs");
+ if (err)
+ return err;
+
+ err = check_mocs_engine(mocs, ce);
+ if (err)
+ return err;
+
+ err = active_engine_reset(ce, "mocs");
+ if (err)
+ return err;
+
+ err = check_mocs_engine(mocs, ce);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int live_mocs_reset(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ struct live_mocs mocs;
+ int err = 0;
+
+ if (!intel_has_reset_engine(gt))
+ return 0;
+
+ err = live_mocs_init(&mocs, gt);
+ if (err)
+ return err;
+
+ igt_global_reset_lock(gt);
+ for_each_engine(engine, gt, id) {
+ struct intel_context *ce;
+
+ ce = intel_context_create(engine->kernel_context->gem_context,
+ engine);
+ if (IS_ERR(ce)) {
+ err = PTR_ERR(ce);
+ break;
+ }
+
+ intel_engine_pm_get(engine);
+ err = __live_mocs_reset(&mocs, ce);
+ intel_engine_pm_put(engine);
+
+ intel_context_put(ce);
+ if (err)
+ break;
+ }
+ igt_global_reset_unlock(gt);
+
+ live_mocs_fini(&mocs);
+ return err;
+}
+
+int intel_mocs_live_selftests(struct drm_i915_private *i915)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(live_mocs_kernel),
+ SUBTEST(live_mocs_clean),
+ SUBTEST(live_mocs_reset),
+ };
+ struct drm_i915_mocs_table table;
+
+ if (!get_mocs_settings(i915, &table))
+ return 0;
+
+ return intel_gt_live_subtests(tests, &i915->gt);
+}
diff --git a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
index 00a063730bc3..c23d06bca09e 100644
--- a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
+++ b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
@@ -16,6 +16,7 @@ selftest(gt_engines, intel_engine_live_selftests)
selftest(gt_timelines, intel_timeline_live_selftests)
selftest(gt_contexts, intel_context_live_selftests)
selftest(gt_lrc, intel_lrc_live_selftests)
+selftest(gt_mocs, intel_mocs_live_selftests)
selftest(gt_pm, intel_gt_pm_live_selftests)
selftest(gt_heartbeat, intel_heartbeat_live_selftests)
selftest(requests, i915_request_live_selftests)
--
2.24.0.rc0
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply related [flat|nested] 10+ messages in thread* [PATCH] drm/i915/selftests: Add coverage of mocs registers
2019-10-22 11:51 [PATCH 4/4] drm/i915/selftests: Add coverage of mocs registers Chris Wilson
@ 2019-10-22 11:57 ` Chris Wilson
2019-10-23 21:03 ` Kumar Valsan, Prathap
0 siblings, 1 reply; 10+ messages in thread
From: Chris Wilson @ 2019-10-22 11:57 UTC (permalink / raw)
To: intel-gfx
Probe the mocs registers for new contexts and across GPU resets. Similar
to intel_workarounds, we have tables of what register values we expect
to see, so verify that user contexts are affected by them. In the
future, we should add tests similar to intel_sseu to cover dynamic
reconfigurations.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Prathap Kumar Valsan <prathap.kumar.valsan@intel.com>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
---
drivers/gpu/drm/i915/gt/intel_mocs.c | 4 +
drivers/gpu/drm/i915/gt/selftest_mocs.c | 393 ++++++++++++++++++
.../drm/i915/selftests/i915_live_selftests.h | 1 +
3 files changed, 398 insertions(+)
create mode 100644 drivers/gpu/drm/i915/gt/selftest_mocs.c
diff --git a/drivers/gpu/drm/i915/gt/intel_mocs.c b/drivers/gpu/drm/i915/gt/intel_mocs.c
index 445ec025bda0..06dba7ff294e 100644
--- a/drivers/gpu/drm/i915/gt/intel_mocs.c
+++ b/drivers/gpu/drm/i915/gt/intel_mocs.c
@@ -448,3 +448,7 @@ void intel_mocs_init(struct intel_gt *gt)
if (HAS_GLOBAL_MOCS_REGISTERS(gt->i915))
init_global_mocs(gt);
}
+
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+#include "selftest_mocs.c"
+#endif
diff --git a/drivers/gpu/drm/i915/gt/selftest_mocs.c b/drivers/gpu/drm/i915/gt/selftest_mocs.c
new file mode 100644
index 000000000000..ca9679c3ee68
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/selftest_mocs.c
@@ -0,0 +1,393 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include "gt/intel_engine_pm.h"
+#include "i915_selftest.h"
+
+#include "gem/selftests/mock_context.h"
+#include "selftests/igt_reset.h"
+#include "selftests/igt_spinner.h"
+
+struct live_mocs {
+ struct drm_i915_mocs_table table;
+ struct i915_vma *scratch;
+ void *vaddr;
+};
+
+static int request_add_sync(struct i915_request *rq, int err)
+{
+ i915_request_get(rq);
+ i915_request_add(rq);
+ if (i915_request_wait(rq, 0, HZ / 5) < 0)
+ err = -ETIME;
+ i915_request_put(rq);
+
+ return err;
+}
+
+static int request_add_spin(struct i915_request *rq, struct igt_spinner *spin)
+{
+ int err = 0;
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+ if (spin && !igt_wait_for_spinner(spin, rq))
+ err = -ETIME;
+ i915_request_put(rq);
+
+ return err;
+}
+
+static struct i915_vma *create_scratch(struct intel_gt *gt)
+{
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ int err;
+
+ obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
+ if (IS_ERR(obj))
+ return ERR_CAST(obj);
+
+ i915_gem_object_set_cache_coherency(obj, I915_CACHING_CACHED);
+
+ vma = i915_vma_instance(obj, >->ggtt->vm, NULL);
+ if (IS_ERR(vma)) {
+ i915_gem_object_put(obj);
+ return vma;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL);
+ if (err) {
+ i915_gem_object_put(obj);
+ return ERR_PTR(err);
+ }
+
+ return vma;
+}
+
+static int live_mocs_init(struct live_mocs *arg, struct intel_gt *gt)
+{
+ int err;
+
+ if (!get_mocs_settings(gt->i915, &arg->table))
+ return -EINVAL;
+
+ arg->scratch = create_scratch(gt);
+ if (IS_ERR(arg->scratch))
+ return PTR_ERR(arg->scratch);
+
+ arg->vaddr = i915_gem_object_pin_map(arg->scratch->obj, I915_MAP_WB);
+ if (IS_ERR(arg->vaddr)) {
+ err = PTR_ERR(arg->vaddr);
+ goto err_scratch;
+ }
+
+ return 0;
+
+err_scratch:
+ i915_vma_unpin_and_release(&arg->scratch, 0);
+ return err;
+}
+
+static void live_mocs_fini(struct live_mocs *arg)
+{
+ i915_vma_unpin_and_release(&arg->scratch, I915_VMA_RELEASE_MAP);
+}
+
+static int read_regs(struct i915_request *rq,
+ u32 addr, unsigned int count,
+ uint32_t *offset)
+{
+ unsigned int i;
+ u32 *cs;
+
+ GEM_BUG_ON(!IS_ALIGNED(*offset, sizeof(u32)));
+
+ cs = intel_ring_begin(rq, 4 * count);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ for (i = 0; i < count; i++) {
+ *cs++ = MI_STORE_REGISTER_MEM_GEN8 | MI_USE_GGTT;
+ *cs++ = addr;
+ *cs++ = *offset;
+ *cs++ = 0;
+
+ addr += sizeof(u32);
+ *offset += sizeof(u32);
+ }
+
+ intel_ring_advance(rq, cs);
+
+ return 0;
+}
+
+static int read_mocs_table(struct i915_request *rq,
+ const struct drm_i915_mocs_table *table,
+ uint32_t *offset)
+{
+ u32 addr;
+
+ if (HAS_GLOBAL_MOCS_REGISTERS(rq->i915))
+ addr = 0x4000;
+ else
+ addr = mocs_register(rq->engine);
+
+ return read_regs(rq, addr, table->n_entries, offset);
+}
+
+static int read_l3cc_table(struct i915_request *rq,
+ const struct drm_i915_mocs_table *table,
+ uint32_t *offset)
+{
+ /* Can we read the MCR range 0xb00 directly? See intel_workarounds! */
+ u32 addr = i915_mmio_reg_offset(GEN9_LNCFCMOCS(0));
+
+ return read_regs(rq, addr, (table->n_entries + 1) / 2, offset);
+}
+
+static int check_mocs_table(struct intel_engine_cs *engine,
+ const struct drm_i915_mocs_table *table,
+ uint32_t **vaddr)
+{
+ unsigned int i;
+ u32 expect;
+
+ for_each_mocs(expect, table, i) {
+ if (**vaddr != expect) {
+ pr_err("%s: Invalid MOCS[%d] entry, found %08x, expected %08x\n",
+ engine->name, i, **vaddr, expect);
+ return -EINVAL;
+ }
+ ++*vaddr;
+ }
+
+ return 0;
+}
+
+static int check_l3cc_table(struct intel_engine_cs *engine,
+ const struct drm_i915_mocs_table *table,
+ uint32_t **vaddr)
+{
+ unsigned int i;
+ u32 expect;
+
+ for_each_l3cc(expect, table, i) {
+ if (**vaddr != expect) {
+ pr_err("%s: Invalid L3CC[%d] entry, found %08x, expected %08x\n",
+ engine->name, i, **vaddr, expect);
+ return -EINVAL;
+ }
+ ++*vaddr;
+ }
+
+ return 0;
+}
+
+static int check_mocs_engine(struct live_mocs *arg,
+ struct intel_context *ce)
+{
+ struct i915_vma *vma = arg->scratch;
+ struct i915_request *rq;
+ u32 offset;
+ u32 *vaddr;
+ int err;
+
+ memset32(arg->vaddr, STACK_MAGIC, PAGE_SIZE / sizeof(u32));
+
+ rq = intel_context_create_request(ce);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+
+ i915_vma_lock(vma);
+ err = i915_request_await_object(rq, vma->obj, true);
+ if (!err)
+ err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
+ i915_vma_unlock(vma);
+
+ offset = i915_ggtt_offset(vma);
+ if (!err)
+ err = read_mocs_table(rq, &arg->table, &offset);
+ if (!err && ce->engine->class == RENDER_CLASS)
+ err = read_l3cc_table(rq, &arg->table, &offset);
+ offset -= i915_ggtt_offset(vma);
+ GEM_BUG_ON(offset > PAGE_SIZE);
+
+ err = request_add_sync(rq, err);
+ if (err)
+ return err;
+
+ vaddr = arg->vaddr;
+ if (!err)
+ err = check_mocs_table(ce->engine, &arg->table, &vaddr);
+ if (!err && ce->engine->class == RENDER_CLASS)
+ err = check_l3cc_table(ce->engine, &arg->table, &vaddr);
+ if (err)
+ return err;
+
+ GEM_BUG_ON(arg->vaddr + offset != vaddr);
+ return 0;
+}
+
+static int live_mocs_kernel(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ struct live_mocs mocs;
+ int err;
+
+ err = live_mocs_init(&mocs, gt);
+ if (err)
+ return err;
+
+ for_each_engine(engine, gt, id) {
+ err = check_mocs_engine(&mocs, engine->kernel_context);
+ if (err)
+ break;
+ }
+
+ live_mocs_fini(&mocs);
+ return err;
+}
+
+static int live_mocs_clean(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ struct live_mocs mocs;
+ int err;
+
+ err = live_mocs_init(&mocs, gt);
+ if (err)
+ return err;
+
+ for_each_engine(engine, gt, id) {
+ struct intel_context *ce;
+
+ ce = intel_context_create(engine->kernel_context->gem_context,
+ engine);
+ if (IS_ERR(ce)) {
+ err = PTR_ERR(ce);
+ break;
+ }
+
+ err = check_mocs_engine(&mocs, ce);
+ intel_context_put(ce);
+ if (err)
+ break;
+ }
+
+ live_mocs_fini(&mocs);
+ return err;
+}
+
+static int active_engine_reset(struct intel_context *ce,
+ const char *reason)
+{
+ struct igt_spinner spin;
+ struct i915_request *rq;
+ int err;
+
+ err = igt_spinner_init(&spin, ce->engine->gt);
+ if (err)
+ return err;
+
+ rq = igt_spinner_create_request(&spin, ce, MI_NOOP);
+ if (IS_ERR(rq)) {
+ igt_spinner_fini(&spin);
+ return PTR_ERR(rq);
+ }
+
+ err = request_add_spin(rq, &spin);
+ if (err == 0)
+ err = intel_engine_reset(ce->engine, reason);
+
+ igt_spinner_end(&spin);
+ igt_spinner_fini(&spin);
+
+ return err;
+}
+
+static int __live_mocs_reset(struct live_mocs *mocs,
+ struct intel_context *ce)
+{
+ int err;
+
+ err = intel_engine_reset(ce->engine, "mocs");
+ if (err)
+ return err;
+
+ err = check_mocs_engine(mocs, ce);
+ if (err)
+ return err;
+
+ err = active_engine_reset(ce, "mocs");
+ if (err)
+ return err;
+
+ err = check_mocs_engine(mocs, ce);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int live_mocs_reset(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ struct live_mocs mocs;
+ int err = 0;
+
+ if (!intel_has_reset_engine(gt))
+ return 0;
+
+ err = live_mocs_init(&mocs, gt);
+ if (err)
+ return err;
+
+ igt_global_reset_lock(gt);
+ for_each_engine(engine, gt, id) {
+ struct intel_context *ce;
+
+ ce = intel_context_create(engine->kernel_context->gem_context,
+ engine);
+ if (IS_ERR(ce)) {
+ err = PTR_ERR(ce);
+ break;
+ }
+
+ intel_engine_pm_get(engine);
+ err = __live_mocs_reset(&mocs, ce);
+ intel_engine_pm_put(engine);
+
+ intel_context_put(ce);
+ if (err)
+ break;
+ }
+ igt_global_reset_unlock(gt);
+
+ live_mocs_fini(&mocs);
+ return err;
+}
+
+int intel_mocs_live_selftests(struct drm_i915_private *i915)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(live_mocs_kernel),
+ SUBTEST(live_mocs_clean),
+ SUBTEST(live_mocs_reset),
+ };
+ struct drm_i915_mocs_table table;
+
+ if (!get_mocs_settings(i915, &table))
+ return 0;
+
+ return intel_gt_live_subtests(tests, &i915->gt);
+}
diff --git a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
index 00a063730bc3..c23d06bca09e 100644
--- a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
+++ b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
@@ -16,6 +16,7 @@ selftest(gt_engines, intel_engine_live_selftests)
selftest(gt_timelines, intel_timeline_live_selftests)
selftest(gt_contexts, intel_context_live_selftests)
selftest(gt_lrc, intel_lrc_live_selftests)
+selftest(gt_mocs, intel_mocs_live_selftests)
selftest(gt_pm, intel_gt_pm_live_selftests)
selftest(gt_heartbeat, intel_heartbeat_live_selftests)
selftest(requests, i915_request_live_selftests)
--
2.24.0.rc0
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply related [flat|nested] 10+ messages in thread* Re: [PATCH] drm/i915/selftests: Add coverage of mocs registers
2019-10-22 11:57 ` [PATCH] " Chris Wilson
@ 2019-10-23 21:03 ` Kumar Valsan, Prathap
2019-10-24 7:13 ` Chris Wilson
0 siblings, 1 reply; 10+ messages in thread
From: Kumar Valsan, Prathap @ 2019-10-23 21:03 UTC (permalink / raw)
To: Chris Wilson; +Cc: intel-gfx
On Tue, Oct 22, 2019 at 12:57:05PM +0100, Chris Wilson wrote:
> Probe the mocs registers for new contexts and across GPU resets. Similar
> to intel_workarounds, we have tables of what register values we expect
> to see, so verify that user contexts are affected by them. In the
> future, we should add tests similar to intel_sseu to cover dynamic
> reconfigurations.
>
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> Cc: Prathap Kumar Valsan <prathap.kumar.valsan@intel.com>
> Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
s/for_each_engine/for_each_uabi_engine ?
Otherwise
Reviewed-by: Prathap Kumar Valsan <prathap.kumar.valsan@intel.com>
> ---
> drivers/gpu/drm/i915/gt/intel_mocs.c | 4 +
> drivers/gpu/drm/i915/gt/selftest_mocs.c | 393 ++++++++++++++++++
> .../drm/i915/selftests/i915_live_selftests.h | 1 +
> 3 files changed, 398 insertions(+)
> create mode 100644 drivers/gpu/drm/i915/gt/selftest_mocs.c
>
> diff --git a/drivers/gpu/drm/i915/gt/intel_mocs.c b/drivers/gpu/drm/i915/gt/intel_mocs.c
> index 445ec025bda0..06dba7ff294e 100644
> --- a/drivers/gpu/drm/i915/gt/intel_mocs.c
> +++ b/drivers/gpu/drm/i915/gt/intel_mocs.c
> @@ -448,3 +448,7 @@ void intel_mocs_init(struct intel_gt *gt)
> if (HAS_GLOBAL_MOCS_REGISTERS(gt->i915))
> init_global_mocs(gt);
> }
> +
> +#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
> +#include "selftest_mocs.c"
> +#endif
> diff --git a/drivers/gpu/drm/i915/gt/selftest_mocs.c b/drivers/gpu/drm/i915/gt/selftest_mocs.c
> new file mode 100644
> index 000000000000..ca9679c3ee68
> --- /dev/null
> +++ b/drivers/gpu/drm/i915/gt/selftest_mocs.c
> @@ -0,0 +1,393 @@
> +/*
> + * SPDX-License-Identifier: MIT
> + *
> + * Copyright © 2019 Intel Corporation
> + */
> +
> +#include "gt/intel_engine_pm.h"
> +#include "i915_selftest.h"
> +
> +#include "gem/selftests/mock_context.h"
> +#include "selftests/igt_reset.h"
> +#include "selftests/igt_spinner.h"
> +
> +struct live_mocs {
> + struct drm_i915_mocs_table table;
> + struct i915_vma *scratch;
> + void *vaddr;
> +};
> +
> +static int request_add_sync(struct i915_request *rq, int err)
> +{
> + i915_request_get(rq);
> + i915_request_add(rq);
> + if (i915_request_wait(rq, 0, HZ / 5) < 0)
> + err = -ETIME;
> + i915_request_put(rq);
> +
> + return err;
> +}
> +
> +static int request_add_spin(struct i915_request *rq, struct igt_spinner *spin)
> +{
> + int err = 0;
> +
> + i915_request_get(rq);
> + i915_request_add(rq);
> + if (spin && !igt_wait_for_spinner(spin, rq))
> + err = -ETIME;
> + i915_request_put(rq);
> +
> + return err;
> +}
> +
> +static struct i915_vma *create_scratch(struct intel_gt *gt)
> +{
> + struct drm_i915_gem_object *obj;
> + struct i915_vma *vma;
> + int err;
> +
> + obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
> + if (IS_ERR(obj))
> + return ERR_CAST(obj);
> +
> + i915_gem_object_set_cache_coherency(obj, I915_CACHING_CACHED);
> +
> + vma = i915_vma_instance(obj, >->ggtt->vm, NULL);
> + if (IS_ERR(vma)) {
> + i915_gem_object_put(obj);
> + return vma;
> + }
> +
> + err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL);
> + if (err) {
> + i915_gem_object_put(obj);
> + return ERR_PTR(err);
> + }
> +
> + return vma;
> +}
> +
> +static int live_mocs_init(struct live_mocs *arg, struct intel_gt *gt)
> +{
> + int err;
> +
> + if (!get_mocs_settings(gt->i915, &arg->table))
> + return -EINVAL;
> +
> + arg->scratch = create_scratch(gt);
> + if (IS_ERR(arg->scratch))
> + return PTR_ERR(arg->scratch);
> +
> + arg->vaddr = i915_gem_object_pin_map(arg->scratch->obj, I915_MAP_WB);
> + if (IS_ERR(arg->vaddr)) {
> + err = PTR_ERR(arg->vaddr);
> + goto err_scratch;
> + }
> +
> + return 0;
> +
> +err_scratch:
> + i915_vma_unpin_and_release(&arg->scratch, 0);
> + return err;
> +}
> +
> +static void live_mocs_fini(struct live_mocs *arg)
> +{
> + i915_vma_unpin_and_release(&arg->scratch, I915_VMA_RELEASE_MAP);
> +}
> +
> +static int read_regs(struct i915_request *rq,
> + u32 addr, unsigned int count,
> + uint32_t *offset)
> +{
> + unsigned int i;
> + u32 *cs;
> +
> + GEM_BUG_ON(!IS_ALIGNED(*offset, sizeof(u32)));
> +
> + cs = intel_ring_begin(rq, 4 * count);
> + if (IS_ERR(cs))
> + return PTR_ERR(cs);
> +
> + for (i = 0; i < count; i++) {
> + *cs++ = MI_STORE_REGISTER_MEM_GEN8 | MI_USE_GGTT;
> + *cs++ = addr;
> + *cs++ = *offset;
> + *cs++ = 0;
> +
> + addr += sizeof(u32);
> + *offset += sizeof(u32);
> + }
> +
> + intel_ring_advance(rq, cs);
> +
> + return 0;
> +}
> +
> +static int read_mocs_table(struct i915_request *rq,
> + const struct drm_i915_mocs_table *table,
> + uint32_t *offset)
> +{
> + u32 addr;
> +
> + if (HAS_GLOBAL_MOCS_REGISTERS(rq->i915))
> + addr = 0x4000;
> + else
> + addr = mocs_register(rq->engine);
> +
> + return read_regs(rq, addr, table->n_entries, offset);
> +}
> +
> +static int read_l3cc_table(struct i915_request *rq,
> + const struct drm_i915_mocs_table *table,
> + uint32_t *offset)
> +{
> + /* Can we read the MCR range 0xb00 directly? See intel_workarounds! */
> + u32 addr = i915_mmio_reg_offset(GEN9_LNCFCMOCS(0));
> +
> + return read_regs(rq, addr, (table->n_entries + 1) / 2, offset);
> +}
> +
> +static int check_mocs_table(struct intel_engine_cs *engine,
> + const struct drm_i915_mocs_table *table,
> + uint32_t **vaddr)
> +{
> + unsigned int i;
> + u32 expect;
> +
> + for_each_mocs(expect, table, i) {
> + if (**vaddr != expect) {
> + pr_err("%s: Invalid MOCS[%d] entry, found %08x, expected %08x\n",
> + engine->name, i, **vaddr, expect);
> + return -EINVAL;
> + }
> + ++*vaddr;
> + }
> +
> + return 0;
> +}
> +
> +static int check_l3cc_table(struct intel_engine_cs *engine,
> + const struct drm_i915_mocs_table *table,
> + uint32_t **vaddr)
> +{
> + unsigned int i;
> + u32 expect;
> +
> + for_each_l3cc(expect, table, i) {
> + if (**vaddr != expect) {
> + pr_err("%s: Invalid L3CC[%d] entry, found %08x, expected %08x\n",
> + engine->name, i, **vaddr, expect);
> + return -EINVAL;
> + }
> + ++*vaddr;
> + }
> +
> + return 0;
> +}
> +
> +static int check_mocs_engine(struct live_mocs *arg,
> + struct intel_context *ce)
> +{
> + struct i915_vma *vma = arg->scratch;
> + struct i915_request *rq;
> + u32 offset;
> + u32 *vaddr;
> + int err;
> +
> + memset32(arg->vaddr, STACK_MAGIC, PAGE_SIZE / sizeof(u32));
> +
> + rq = intel_context_create_request(ce);
> + if (IS_ERR(rq))
> + return PTR_ERR(rq);
> +
> + i915_vma_lock(vma);
> + err = i915_request_await_object(rq, vma->obj, true);
> + if (!err)
> + err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
> + i915_vma_unlock(vma);
> +
> + offset = i915_ggtt_offset(vma);
> + if (!err)
> + err = read_mocs_table(rq, &arg->table, &offset);
> + if (!err && ce->engine->class == RENDER_CLASS)
> + err = read_l3cc_table(rq, &arg->table, &offset);
> + offset -= i915_ggtt_offset(vma);
> + GEM_BUG_ON(offset > PAGE_SIZE);
> +
> + err = request_add_sync(rq, err);
> + if (err)
> + return err;
> +
> + vaddr = arg->vaddr;
> + if (!err)
> + err = check_mocs_table(ce->engine, &arg->table, &vaddr);
> + if (!err && ce->engine->class == RENDER_CLASS)
> + err = check_l3cc_table(ce->engine, &arg->table, &vaddr);
> + if (err)
> + return err;
> +
> + GEM_BUG_ON(arg->vaddr + offset != vaddr);
> + return 0;
> +}
> +
> +static int live_mocs_kernel(void *arg)
> +{
> + struct intel_gt *gt = arg;
> + struct intel_engine_cs *engine;
> + enum intel_engine_id id;
> + struct live_mocs mocs;
> + int err;
> +
> + err = live_mocs_init(&mocs, gt);
> + if (err)
> + return err;
> +
> + for_each_engine(engine, gt, id) {
> + err = check_mocs_engine(&mocs, engine->kernel_context);
> + if (err)
> + break;
> + }
> +
> + live_mocs_fini(&mocs);
> + return err;
> +}
> +
> +static int live_mocs_clean(void *arg)
> +{
> + struct intel_gt *gt = arg;
> + struct intel_engine_cs *engine;
> + enum intel_engine_id id;
> + struct live_mocs mocs;
> + int err;
> +
> + err = live_mocs_init(&mocs, gt);
> + if (err)
> + return err;
> +
> + for_each_engine(engine, gt, id) {
> + struct intel_context *ce;
> +
> + ce = intel_context_create(engine->kernel_context->gem_context,
> + engine);
> + if (IS_ERR(ce)) {
> + err = PTR_ERR(ce);
> + break;
> + }
> +
> + err = check_mocs_engine(&mocs, ce);
> + intel_context_put(ce);
> + if (err)
> + break;
> + }
> +
> + live_mocs_fini(&mocs);
> + return err;
> +}
> +
> +static int active_engine_reset(struct intel_context *ce,
> + const char *reason)
> +{
> + struct igt_spinner spin;
> + struct i915_request *rq;
> + int err;
> +
> + err = igt_spinner_init(&spin, ce->engine->gt);
> + if (err)
> + return err;
> +
> + rq = igt_spinner_create_request(&spin, ce, MI_NOOP);
> + if (IS_ERR(rq)) {
> + igt_spinner_fini(&spin);
> + return PTR_ERR(rq);
> + }
> +
> + err = request_add_spin(rq, &spin);
> + if (err == 0)
> + err = intel_engine_reset(ce->engine, reason);
> +
> + igt_spinner_end(&spin);
> + igt_spinner_fini(&spin);
> +
> + return err;
> +}
> +
> +static int __live_mocs_reset(struct live_mocs *mocs,
> + struct intel_context *ce)
> +{
> + int err;
> +
> + err = intel_engine_reset(ce->engine, "mocs");
> + if (err)
> + return err;
> +
> + err = check_mocs_engine(mocs, ce);
> + if (err)
> + return err;
> +
> + err = active_engine_reset(ce, "mocs");
> + if (err)
> + return err;
> +
> + err = check_mocs_engine(mocs, ce);
> + if (err)
> + return err;
> +
> + return 0;
> +}
> +
> +static int live_mocs_reset(void *arg)
> +{
> + struct intel_gt *gt = arg;
> + struct intel_engine_cs *engine;
> + enum intel_engine_id id;
> + struct live_mocs mocs;
> + int err = 0;
> +
> + if (!intel_has_reset_engine(gt))
> + return 0;
> +
> + err = live_mocs_init(&mocs, gt);
> + if (err)
> + return err;
> +
> + igt_global_reset_lock(gt);
> + for_each_engine(engine, gt, id) {
> + struct intel_context *ce;
> +
> + ce = intel_context_create(engine->kernel_context->gem_context,
> + engine);
> + if (IS_ERR(ce)) {
> + err = PTR_ERR(ce);
> + break;
> + }
> +
> + intel_engine_pm_get(engine);
> + err = __live_mocs_reset(&mocs, ce);
> + intel_engine_pm_put(engine);
> +
> + intel_context_put(ce);
> + if (err)
> + break;
> + }
> + igt_global_reset_unlock(gt);
> +
> + live_mocs_fini(&mocs);
> + return err;
> +}
> +
> +int intel_mocs_live_selftests(struct drm_i915_private *i915)
> +{
> + static const struct i915_subtest tests[] = {
> + SUBTEST(live_mocs_kernel),
> + SUBTEST(live_mocs_clean),
> + SUBTEST(live_mocs_reset),
> + };
> + struct drm_i915_mocs_table table;
> +
> + if (!get_mocs_settings(i915, &table))
> + return 0;
> +
> + return intel_gt_live_subtests(tests, &i915->gt);
> +}
> diff --git a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
> index 00a063730bc3..c23d06bca09e 100644
> --- a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
> +++ b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
> @@ -16,6 +16,7 @@ selftest(gt_engines, intel_engine_live_selftests)
> selftest(gt_timelines, intel_timeline_live_selftests)
> selftest(gt_contexts, intel_context_live_selftests)
> selftest(gt_lrc, intel_lrc_live_selftests)
> +selftest(gt_mocs, intel_mocs_live_selftests)
> selftest(gt_pm, intel_gt_pm_live_selftests)
> selftest(gt_heartbeat, intel_heartbeat_live_selftests)
> selftest(requests, i915_request_live_selftests)
> --
> 2.24.0.rc0
>
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply [flat|nested] 10+ messages in thread* Re: [PATCH] drm/i915/selftests: Add coverage of mocs registers
2019-10-23 21:03 ` Kumar Valsan, Prathap
@ 2019-10-24 7:13 ` Chris Wilson
2019-10-24 17:01 ` Kumar Valsan, Prathap
0 siblings, 1 reply; 10+ messages in thread
From: Chris Wilson @ 2019-10-24 7:13 UTC (permalink / raw)
To: Kumar Valsan, Prathap; +Cc: intel-gfx
Quoting Kumar Valsan, Prathap (2019-10-23 22:03:40)
> On Tue, Oct 22, 2019 at 12:57:05PM +0100, Chris Wilson wrote:
> > Probe the mocs registers for new contexts and across GPU resets. Similar
> > to intel_workarounds, we have tables of what register values we expect
> > to see, so verify that user contexts are affected by them. In the
> > future, we should add tests similar to intel_sseu to cover dynamic
> > reconfigurations.
> >
> > Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> > Cc: Prathap Kumar Valsan <prathap.kumar.valsan@intel.com>
> > Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
>
> s/for_each_engine/for_each_uabi_engine ?
No, we are inside the gt compartment, so we only operate within our
little enclosure. Think parallelism...
-Chris
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply [flat|nested] 10+ messages in thread
* Re: [PATCH] drm/i915/selftests: Add coverage of mocs registers
2019-10-24 7:13 ` Chris Wilson
@ 2019-10-24 17:01 ` Kumar Valsan, Prathap
0 siblings, 0 replies; 10+ messages in thread
From: Kumar Valsan, Prathap @ 2019-10-24 17:01 UTC (permalink / raw)
To: Chris Wilson; +Cc: intel-gfx
On Thu, Oct 24, 2019 at 08:13:29AM +0100, Chris Wilson wrote:
> Quoting Kumar Valsan, Prathap (2019-10-23 22:03:40)
> > On Tue, Oct 22, 2019 at 12:57:05PM +0100, Chris Wilson wrote:
> > > Probe the mocs registers for new contexts and across GPU resets. Similar
> > > to intel_workarounds, we have tables of what register values we expect
> > > to see, so verify that user contexts are affected by them. In the
> > > future, we should add tests similar to intel_sseu to cover dynamic
> > > reconfigurations.
> > >
> > > Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> > > Cc: Prathap Kumar Valsan <prathap.kumar.valsan@intel.com>
> > > Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
> >
> > s/for_each_engine/for_each_uabi_engine ?
>
> No, we are inside the gt compartment, so we only operate within our
> little enclosure. Think parallelism...
Ok. Thanks.
> -Chris
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply [flat|nested] 10+ messages in thread
* [PATCH] drm/i915/selftests: Add coverage of mocs registers
@ 2019-10-17 8:01 Chris Wilson
2019-10-17 9:27 ` Chris Wilson
0 siblings, 1 reply; 10+ messages in thread
From: Chris Wilson @ 2019-10-17 8:01 UTC (permalink / raw)
To: intel-gfx
Probe the mocs registers for new contexts and across GPU resets. Similar
to intel_workarounds, we have tables of what register values we expect
to see, so verify that user contexts are affected by them. In the
future, we should add tests similar to intel_sseu to cover dynamic
reconfigurations.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Prathap Kumar Valsan <prathap.kumar.valsan@intel.com>
---
drivers/gpu/drm/i915/gt/intel_mocs.c | 4 +
drivers/gpu/drm/i915/gt/selftest_mocs.c | 309 ++++++++++++++++++
.../drm/i915/selftests/i915_live_selftests.h | 1 +
3 files changed, 314 insertions(+)
create mode 100644 drivers/gpu/drm/i915/gt/selftest_mocs.c
diff --git a/drivers/gpu/drm/i915/gt/intel_mocs.c b/drivers/gpu/drm/i915/gt/intel_mocs.c
index 5bac3966906b..f5a239640553 100644
--- a/drivers/gpu/drm/i915/gt/intel_mocs.c
+++ b/drivers/gpu/drm/i915/gt/intel_mocs.c
@@ -490,3 +490,7 @@ void intel_mocs_init(struct intel_gt *gt)
if (HAS_GLOBAL_MOCS_REGISTERS(gt->i915))
intel_mocs_init_global(gt);
}
+
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+#include "selftest_mocs.c"
+#endif
diff --git a/drivers/gpu/drm/i915/gt/selftest_mocs.c b/drivers/gpu/drm/i915/gt/selftest_mocs.c
new file mode 100644
index 000000000000..0aac3b1ab846
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/selftest_mocs.c
@@ -0,0 +1,309 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include "i915_selftest.h"
+
+#include "gem/selftests/mock_context.h"
+#include "selftests/igt_reset.h"
+#include "selftests/igt_spinner.h"
+
+struct live_mocs {
+ struct drm_i915_mocs_table table;
+ struct i915_gem_context *ctx;
+ struct i915_vma *scratch;
+ void *vaddr;
+};
+
+static int request_add_sync(struct i915_request *rq, int err)
+{
+ i915_request_get(rq);
+ i915_request_add(rq);
+ if (i915_request_wait(rq, 0, HZ / 5) < 0)
+ err = -ETIME;
+ i915_request_put(rq);
+
+ return err;
+}
+
+static int request_add_spin(struct i915_request *rq, struct igt_spinner *spin)
+{
+ int err = 0;
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+ if (spin && !igt_wait_for_spinner(spin, rq))
+ err = -ETIME;
+ i915_request_put(rq);
+
+ return err;
+}
+
+static struct i915_vma *create_scratch(struct intel_gt *gt)
+{
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ int err;
+
+ obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
+ if (IS_ERR(obj))
+ return ERR_CAST(obj);
+
+ i915_gem_object_set_cache_coherency(obj, I915_CACHING_CACHED);
+
+ vma = i915_vma_instance(obj, >->ggtt->vm, NULL);
+ if (IS_ERR(vma)) {
+ i915_gem_object_put(obj);
+ return vma;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL);
+ if (err) {
+ i915_gem_object_put(obj);
+ return ERR_PTR(err);
+ }
+
+ return vma;
+}
+
+static int live_mocs_init(struct live_mocs *arg, struct intel_gt *gt)
+{
+ int err;
+
+ if (!get_mocs_settings(gt->i915, &arg->table))
+ return -EINVAL;
+
+ arg->ctx = kernel_context(gt->i915);
+ if (!arg->ctx)
+ return -ENOMEM;
+
+ arg->scratch = create_scratch(gt);
+ if (IS_ERR(arg->scratch)) {
+ err = PTR_ERR(arg->scratch);
+ goto err_ctx;
+ }
+
+ arg->vaddr = i915_gem_object_pin_map(arg->scratch->obj, I915_MAP_WB);
+ if (IS_ERR(arg->vaddr)) {
+ err = PTR_ERR(arg->vaddr);
+ goto err_scratch;
+ }
+
+ return 0;
+
+err_scratch:
+ i915_vma_put(arg->scratch);
+err_ctx:
+ kernel_context_close(arg->ctx);
+ return err;
+}
+
+static void live_mocs_fini(struct live_mocs *arg)
+{
+ i915_vma_unpin_and_release(&arg->scratch, I915_VMA_RELEASE_MAP);
+ kernel_context_close(arg->ctx);
+}
+
+static int read_mocs_table(struct i915_request *rq,
+ const struct drm_i915_mocs_table *table,
+ struct i915_vma *vma)
+{
+ unsigned int i;
+ int err;
+ u32 *cs;
+
+ i915_vma_lock(vma);
+ err = i915_request_await_object(rq, vma->obj, true);
+ if (!err)
+ err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
+ i915_vma_unlock(vma);
+ if (err)
+ return err;
+
+ cs = intel_ring_begin(rq, 4 * table->n_entries);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ for (i = 0; i < table->n_entries; i++) {
+ *cs++ = MI_STORE_REGISTER_MEM_GEN8 | MI_USE_GGTT;
+ *cs++ = i915_mmio_reg_offset(HAS_GLOBAL_MOCS_REGISTERS(rq->i915) ?
+ GEN12_GLOBAL_MOCS(i) :
+ mocs_register(rq->engine, i));
+ *cs++ = i915_ggtt_offset(vma) + i * sizeof(u32);
+ *cs++ = 0;
+ }
+
+ intel_ring_advance(rq, cs);
+
+ return 0;
+}
+
+static int check_mocs_table(struct intel_engine_cs *engine,
+ const struct drm_i915_mocs_table *table,
+ const u32 *vaddr)
+{
+ unsigned int i;
+ u32 expect;
+
+ for (i = 0; i < table->size; i++) {
+ if (HAS_GLOBAL_MOCS_REGISTERS(engine->i915))
+ expect = table->table[i].control_value;
+ else
+ expect = get_entry_control(table, i);
+
+ if (vaddr[i] != expect) {
+ pr_err("%s: Invalid MOCS[%d] entry, found %08x, expected %08x\n",
+ engine->name, i, vaddr[i], expect);
+ return -EINVAL;
+ }
+ }
+
+ /* All remaining entries are default */
+ if (HAS_GLOBAL_MOCS_REGISTERS(engine->i915))
+ expect = table->table[0].control_value;
+ else
+ expect = table->table[I915_MOCS_PTE].control_value;
+ for (; i < table->n_entries; i++) {
+ if (vaddr[i] != expect) {
+ pr_err("%s: Invalid MOCS[%d*] entry, found %08x, expected %08x\n",
+ engine->name, i, vaddr[i], expect);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int check_mocs_engine(struct live_mocs *arg,
+ struct intel_context *ce)
+{
+ struct i915_request *rq;
+ int err;
+
+ memset32(arg->vaddr, STACK_MAGIC, PAGE_SIZE/sizeof(u32));
+
+ rq = intel_context_create_request(ce);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+
+ err = read_mocs_table(rq, &arg->table, arg->scratch);
+
+ err = request_add_sync(rq, err);
+ if(err)
+ return err;
+
+ return check_mocs_table(ce->engine, &arg->table, arg->vaddr);
+}
+
+static int live_mocs_clean(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct i915_gem_engines_iter it;
+ struct intel_context *ce;
+ struct live_mocs mocs;
+ int err;
+
+ err = live_mocs_init(&mocs, gt);
+ if (err)
+ return err;
+
+ /* XXX for_each_engine(gt) once we can create raw intel_context */
+ for_each_gem_engine(ce, i915_gem_context_engines(mocs.ctx), it) {
+ err = check_mocs_engine(&mocs, ce);
+ if (err)
+ break;
+ }
+ i915_gem_context_unlock_engines(mocs.ctx);
+
+ live_mocs_fini(&mocs);
+
+ return err;
+}
+
+static int active_engine_reset(struct intel_context *ce,
+ const char *reason)
+{
+ struct igt_spinner spin;
+ struct i915_request *rq;
+ int err;
+
+ err = igt_spinner_init(&spin, ce->engine->gt);
+ if (err)
+ return err;
+
+ rq = igt_spinner_create_request(&spin, ce, MI_NOOP);
+ if (IS_ERR(rq)) {
+ igt_spinner_fini(&spin);
+ return PTR_ERR(rq);
+ }
+
+ err = request_add_spin(rq, &spin);
+ if (err == 0)
+ err = intel_engine_reset(ce->engine, reason);
+
+ igt_spinner_end(&spin);
+ igt_spinner_fini(&spin);
+
+ return err;
+
+}
+static int live_mocs_reset(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct i915_gem_engines_iter it;
+ struct intel_context *ce;
+ intel_wakeref_t wakeref;
+ struct live_mocs mocs;
+ int err = 0;
+
+ if (!intel_has_reset_engine(gt))
+ return 0;
+
+ err = live_mocs_init(&mocs, gt);
+ if (err)
+ return err;
+
+ igt_global_reset_lock(gt);
+ wakeref = intel_runtime_pm_get(gt->uncore->rpm);
+
+ for_each_gem_engine(ce, i915_gem_context_engines(mocs.ctx), it) {
+ err = intel_engine_reset(ce->engine, "mocs");
+ if (err)
+ break;
+
+ err = check_mocs_engine(&mocs, ce);
+ if (err)
+ break;
+
+ err = active_engine_reset(ce, "mocs");
+ if (err)
+ break;
+
+ err = check_mocs_engine(&mocs, ce);
+ if (err)
+ break;
+ }
+ i915_gem_context_unlock_engines(mocs.ctx);
+
+ intel_runtime_pm_put(gt->uncore->rpm, wakeref);
+ igt_global_reset_unlock(gt);
+
+ live_mocs_fini(&mocs);
+ return err;
+}
+
+int intel_mocs_live_selftests(struct drm_i915_private *i915)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(live_mocs_clean),
+ SUBTEST(live_mocs_reset),
+ };
+ struct drm_i915_mocs_table table;
+
+ if (!get_mocs_settings(i915, &table))
+ return 0;
+
+ return intel_gt_live_subtests(tests, &i915->gt);
+}
diff --git a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
index 6daf6599ec79..1a6abcffce81 100644
--- a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
+++ b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
@@ -16,6 +16,7 @@ selftest(gt_engines, intel_engine_live_selftests)
selftest(gt_timelines, intel_timeline_live_selftests)
selftest(gt_contexts, intel_context_live_selftests)
selftest(gt_lrc, intel_lrc_live_selftests)
+selftest(gt_mocs, intel_mocs_live_selftests)
selftest(gt_pm, intel_gt_pm_live_selftests)
selftest(requests, i915_request_live_selftests)
selftest(active, i915_active_live_selftests)
--
2.23.0
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply related [flat|nested] 10+ messages in thread* [PATCH] drm/i915/selftests: Add coverage of mocs registers
2019-10-17 8:01 Chris Wilson
@ 2019-10-17 9:27 ` Chris Wilson
0 siblings, 0 replies; 10+ messages in thread
From: Chris Wilson @ 2019-10-17 9:27 UTC (permalink / raw)
To: intel-gfx
Probe the mocs registers for new contexts and across GPU resets. Similar
to intel_workarounds, we have tables of what register values we expect
to see, so verify that user contexts are affected by them. In the
future, we should add tests similar to intel_sseu to cover dynamic
reconfigurations.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Prathap Kumar Valsan <prathap.kumar.valsan@intel.com>
---
Helps to lock the engines.
---
drivers/gpu/drm/i915/gt/intel_mocs.c | 4 +
drivers/gpu/drm/i915/gt/selftest_mocs.c | 309 ++++++++++++++++++
.../drm/i915/selftests/i915_live_selftests.h | 1 +
3 files changed, 314 insertions(+)
create mode 100644 drivers/gpu/drm/i915/gt/selftest_mocs.c
diff --git a/drivers/gpu/drm/i915/gt/intel_mocs.c b/drivers/gpu/drm/i915/gt/intel_mocs.c
index 5bac3966906b..f5a239640553 100644
--- a/drivers/gpu/drm/i915/gt/intel_mocs.c
+++ b/drivers/gpu/drm/i915/gt/intel_mocs.c
@@ -490,3 +490,7 @@ void intel_mocs_init(struct intel_gt *gt)
if (HAS_GLOBAL_MOCS_REGISTERS(gt->i915))
intel_mocs_init_global(gt);
}
+
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+#include "selftest_mocs.c"
+#endif
diff --git a/drivers/gpu/drm/i915/gt/selftest_mocs.c b/drivers/gpu/drm/i915/gt/selftest_mocs.c
new file mode 100644
index 000000000000..46ae9976570f
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/selftest_mocs.c
@@ -0,0 +1,309 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include "i915_selftest.h"
+
+#include "gem/selftests/mock_context.h"
+#include "selftests/igt_reset.h"
+#include "selftests/igt_spinner.h"
+
+struct live_mocs {
+ struct drm_i915_mocs_table table;
+ struct i915_gem_context *ctx;
+ struct i915_vma *scratch;
+ void *vaddr;
+};
+
+static int request_add_sync(struct i915_request *rq, int err)
+{
+ i915_request_get(rq);
+ i915_request_add(rq);
+ if (i915_request_wait(rq, 0, HZ / 5) < 0)
+ err = -ETIME;
+ i915_request_put(rq);
+
+ return err;
+}
+
+static int request_add_spin(struct i915_request *rq, struct igt_spinner *spin)
+{
+ int err = 0;
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+ if (spin && !igt_wait_for_spinner(spin, rq))
+ err = -ETIME;
+ i915_request_put(rq);
+
+ return err;
+}
+
+static struct i915_vma *create_scratch(struct intel_gt *gt)
+{
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ int err;
+
+ obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
+ if (IS_ERR(obj))
+ return ERR_CAST(obj);
+
+ i915_gem_object_set_cache_coherency(obj, I915_CACHING_CACHED);
+
+ vma = i915_vma_instance(obj, >->ggtt->vm, NULL);
+ if (IS_ERR(vma)) {
+ i915_gem_object_put(obj);
+ return vma;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL);
+ if (err) {
+ i915_gem_object_put(obj);
+ return ERR_PTR(err);
+ }
+
+ return vma;
+}
+
+static int live_mocs_init(struct live_mocs *arg, struct intel_gt *gt)
+{
+ int err;
+
+ if (!get_mocs_settings(gt->i915, &arg->table))
+ return -EINVAL;
+
+ arg->ctx = kernel_context(gt->i915);
+ if (!arg->ctx)
+ return -ENOMEM;
+
+ arg->scratch = create_scratch(gt);
+ if (IS_ERR(arg->scratch)) {
+ err = PTR_ERR(arg->scratch);
+ goto err_ctx;
+ }
+
+ arg->vaddr = i915_gem_object_pin_map(arg->scratch->obj, I915_MAP_WB);
+ if (IS_ERR(arg->vaddr)) {
+ err = PTR_ERR(arg->vaddr);
+ goto err_scratch;
+ }
+
+ return 0;
+
+err_scratch:
+ i915_vma_unpin_and_release(&arg->scratch, 0);
+err_ctx:
+ kernel_context_close(arg->ctx);
+ return err;
+}
+
+static void live_mocs_fini(struct live_mocs *arg)
+{
+ i915_vma_unpin_and_release(&arg->scratch, I915_VMA_RELEASE_MAP);
+ kernel_context_close(arg->ctx);
+}
+
+static int read_mocs_table(struct i915_request *rq,
+ const struct drm_i915_mocs_table *table,
+ struct i915_vma *vma)
+{
+ unsigned int i;
+ int err;
+ u32 *cs;
+
+ i915_vma_lock(vma);
+ err = i915_request_await_object(rq, vma->obj, true);
+ if (!err)
+ err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
+ i915_vma_unlock(vma);
+ if (err)
+ return err;
+
+ cs = intel_ring_begin(rq, 4 * table->n_entries);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ for (i = 0; i < table->n_entries; i++) {
+ *cs++ = MI_STORE_REGISTER_MEM_GEN8 | MI_USE_GGTT;
+ *cs++ = i915_mmio_reg_offset(HAS_GLOBAL_MOCS_REGISTERS(rq->i915) ?
+ GEN12_GLOBAL_MOCS(i) :
+ mocs_register(rq->engine, i));
+ *cs++ = i915_ggtt_offset(vma) + i * sizeof(u32);
+ *cs++ = 0;
+ }
+
+ intel_ring_advance(rq, cs);
+
+ return 0;
+}
+
+static int check_mocs_table(struct intel_engine_cs *engine,
+ const struct drm_i915_mocs_table *table,
+ const u32 *vaddr)
+{
+ unsigned int i;
+ u32 expect;
+
+ for (i = 0; i < table->size; i++) {
+ if (HAS_GLOBAL_MOCS_REGISTERS(engine->i915))
+ expect = table->table[i].control_value;
+ else
+ expect = get_entry_control(table, i);
+
+ if (vaddr[i] != expect) {
+ pr_err("%s: Invalid MOCS[%d] entry, found %08x, expected %08x\n",
+ engine->name, i, vaddr[i], expect);
+ return -EINVAL;
+ }
+ }
+
+ /* All remaining entries are default */
+ if (HAS_GLOBAL_MOCS_REGISTERS(engine->i915))
+ expect = table->table[0].control_value;
+ else
+ expect = table->table[I915_MOCS_PTE].control_value;
+ for (; i < table->n_entries; i++) {
+ if (vaddr[i] != expect) {
+ pr_err("%s: Invalid MOCS[%d*] entry, found %08x, expected %08x\n",
+ engine->name, i, vaddr[i], expect);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int check_mocs_engine(struct live_mocs *arg,
+ struct intel_context *ce)
+{
+ struct i915_request *rq;
+ int err;
+
+ memset32(arg->vaddr, STACK_MAGIC, PAGE_SIZE/sizeof(u32));
+
+ rq = intel_context_create_request(ce);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+
+ err = read_mocs_table(rq, &arg->table, arg->scratch);
+
+ err = request_add_sync(rq, err);
+ if(err)
+ return err;
+
+ return check_mocs_table(ce->engine, &arg->table, arg->vaddr);
+}
+
+static int live_mocs_clean(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct i915_gem_engines_iter it;
+ struct intel_context *ce;
+ struct live_mocs mocs;
+ int err;
+
+ err = live_mocs_init(&mocs, gt);
+ if (err)
+ return err;
+
+ /* XXX for_each_engine(gt) once we can create raw intel_context */
+ for_each_gem_engine(ce, i915_gem_context_lock_engines(mocs.ctx), it) {
+ err = check_mocs_engine(&mocs, ce);
+ if (err)
+ break;
+ }
+ i915_gem_context_unlock_engines(mocs.ctx);
+
+ live_mocs_fini(&mocs);
+
+ return err;
+}
+
+static int active_engine_reset(struct intel_context *ce,
+ const char *reason)
+{
+ struct igt_spinner spin;
+ struct i915_request *rq;
+ int err;
+
+ err = igt_spinner_init(&spin, ce->engine->gt);
+ if (err)
+ return err;
+
+ rq = igt_spinner_create_request(&spin, ce, MI_NOOP);
+ if (IS_ERR(rq)) {
+ igt_spinner_fini(&spin);
+ return PTR_ERR(rq);
+ }
+
+ err = request_add_spin(rq, &spin);
+ if (err == 0)
+ err = intel_engine_reset(ce->engine, reason);
+
+ igt_spinner_end(&spin);
+ igt_spinner_fini(&spin);
+
+ return err;
+
+}
+static int live_mocs_reset(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct i915_gem_engines_iter it;
+ struct intel_context *ce;
+ intel_wakeref_t wakeref;
+ struct live_mocs mocs;
+ int err = 0;
+
+ if (!intel_has_reset_engine(gt))
+ return 0;
+
+ err = live_mocs_init(&mocs, gt);
+ if (err)
+ return err;
+
+ igt_global_reset_lock(gt);
+ wakeref = intel_runtime_pm_get(gt->uncore->rpm);
+
+ for_each_gem_engine(ce, i915_gem_context_lock_engines(mocs.ctx), it) {
+ err = intel_engine_reset(ce->engine, "mocs");
+ if (err)
+ break;
+
+ err = check_mocs_engine(&mocs, ce);
+ if (err)
+ break;
+
+ err = active_engine_reset(ce, "mocs");
+ if (err)
+ break;
+
+ err = check_mocs_engine(&mocs, ce);
+ if (err)
+ break;
+ }
+ i915_gem_context_unlock_engines(mocs.ctx);
+
+ intel_runtime_pm_put(gt->uncore->rpm, wakeref);
+ igt_global_reset_unlock(gt);
+
+ live_mocs_fini(&mocs);
+ return err;
+}
+
+int intel_mocs_live_selftests(struct drm_i915_private *i915)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(live_mocs_clean),
+ SUBTEST(live_mocs_reset),
+ };
+ struct drm_i915_mocs_table table;
+
+ if (!get_mocs_settings(i915, &table))
+ return 0;
+
+ return intel_gt_live_subtests(tests, &i915->gt);
+}
diff --git a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
index 6daf6599ec79..1a6abcffce81 100644
--- a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
+++ b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
@@ -16,6 +16,7 @@ selftest(gt_engines, intel_engine_live_selftests)
selftest(gt_timelines, intel_timeline_live_selftests)
selftest(gt_contexts, intel_context_live_selftests)
selftest(gt_lrc, intel_lrc_live_selftests)
+selftest(gt_mocs, intel_mocs_live_selftests)
selftest(gt_pm, intel_gt_pm_live_selftests)
selftest(requests, i915_request_live_selftests)
selftest(active, i915_active_live_selftests)
--
2.23.0
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply related [flat|nested] 10+ messages in thread
end of thread, other threads:[~2019-10-24 16:44 UTC | newest]
Thread overview: 10+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2019-10-18 12:06 [PATCH] drm/i915/selftests: Add coverage of mocs registers Chris Wilson
2019-10-18 13:51 ` ✗ Fi.CI.CHECKPATCH: warning for drm/i915/selftests: Add coverage of mocs registers (rev5) Patchwork
2019-10-18 14:09 ` [PATCH] drm/i915/selftests: Add coverage of mocs registers Kumar Valsan, Prathap
2019-10-18 14:16 ` ✗ Fi.CI.BAT: failure for drm/i915/selftests: Add coverage of mocs registers (rev5) Patchwork
-- strict thread matches above, loose matches on Subject: below --
2019-10-22 11:51 [PATCH 4/4] drm/i915/selftests: Add coverage of mocs registers Chris Wilson
2019-10-22 11:57 ` [PATCH] " Chris Wilson
2019-10-23 21:03 ` Kumar Valsan, Prathap
2019-10-24 7:13 ` Chris Wilson
2019-10-24 17:01 ` Kumar Valsan, Prathap
2019-10-17 8:01 Chris Wilson
2019-10-17 9:27 ` Chris Wilson
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).