* [Intel-gfx] [PATCH 1/3] drm/i915: Create a blitter context for GGTT updates
@ 2023-08-18 19:42 Andi Shyti
2023-08-18 19:42 ` [Intel-gfx] [CI] drm/i915/gt: Refactor hangcheck selftest to use igt_spinner Andi Shyti
` (3 more replies)
0 siblings, 4 replies; 8+ messages in thread
From: Andi Shyti @ 2023-08-18 19:42 UTC (permalink / raw)
To: Jonathan Cavitt; +Cc: intel-gfx
From: Nirmoy Das <nirmoy.das@intel.com>
Create a separate blitter context if a platform requires
GGTT updates using MI_UPDATE_GTT blitter command.
Subsequent patch will introduce methods to update
GGTT using this blitter context and MI_UPDATE_GTT blitter
command.
Signed-off-by: Nirmoy Das <nirmoy.das@intel.com>
---
drivers/gpu/drm/i915/gt/intel_engine.h | 4 ++
drivers/gpu/drm/i915/gt/intel_engine_cs.c | 44 +++++++++++++++++++-
drivers/gpu/drm/i915/gt/intel_engine_types.h | 3 ++
drivers/gpu/drm/i915/gt/intel_gtt.c | 4 ++
drivers/gpu/drm/i915/gt/intel_gtt.h | 2 +
5 files changed, 56 insertions(+), 1 deletion(-)
diff --git a/drivers/gpu/drm/i915/gt/intel_engine.h b/drivers/gpu/drm/i915/gt/intel_engine.h
index b58c30ac8ef02..ee36db2fdaa7c 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine.h
@@ -170,6 +170,8 @@ intel_write_status_page(struct intel_engine_cs *engine, int reg, u32 value)
#define I915_GEM_HWS_SEQNO 0x40
#define I915_GEM_HWS_SEQNO_ADDR (I915_GEM_HWS_SEQNO * sizeof(u32))
#define I915_GEM_HWS_MIGRATE (0x42 * sizeof(u32))
+#define I915_GEM_HWS_GGTT_BLIT 0x46
+#define I915_GEM_HWS_GGTT_BLIT_ADDR (I915_GEM_HWS_GGTT_BLIT * sizeof(u32))
#define I915_GEM_HWS_PXP 0x60
#define I915_GEM_HWS_PXP_ADDR (I915_GEM_HWS_PXP * sizeof(u32))
#define I915_GEM_HWS_GSC 0x62
@@ -356,4 +358,6 @@ u64 intel_clamp_preempt_timeout_ms(struct intel_engine_cs *engine, u64 value);
u64 intel_clamp_stop_timeout_ms(struct intel_engine_cs *engine, u64 value);
u64 intel_clamp_timeslice_duration_ms(struct intel_engine_cs *engine, u64 value);
+void intel_engine_blitter_context_set_ready(struct intel_gt *gt, bool ready);
+bool intel_engine_blitter_context_ready(struct intel_gt *gt);
#endif /* _INTEL_RINGBUFFER_H_ */
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
index ee15486fed0da..9871ee5ab754b 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
@@ -27,6 +27,7 @@
#include "intel_gt_mcr.h"
#include "intel_gt_pm.h"
#include "intel_gt_requests.h"
+#include "intel_gtt.h"
#include "intel_lrc.h"
#include "intel_lrc_reg.h"
#include "intel_reset.h"
@@ -1419,6 +1420,34 @@ void intel_engine_destroy_pinned_context(struct intel_context *ce)
intel_context_put(ce);
}
+void intel_engine_blitter_context_set_ready(struct intel_gt *gt, bool ready)
+{
+ struct intel_engine_cs *engine = gt->engine[BCS0];
+
+ if (engine && engine->blitter_context)
+ atomic_set(&engine->blitter_context_ready, ready ? 1 : 0);
+}
+
+bool intel_engine_blitter_context_ready(struct intel_gt *gt)
+{
+ struct intel_engine_cs *engine = gt->engine[BCS0];
+
+ if (engine)
+ return atomic_read(&engine->blitter_context_ready) == 1;
+
+ return false;
+}
+
+static struct intel_context *
+create_ggtt_blitter_context(struct intel_engine_cs *engine)
+{
+ static struct lock_class_key kernel;
+
+ /* MI_UPDATE_GTT can insert upto 512 PTE entries so get a bigger ring */
+ return intel_engine_create_pinned_context(engine, engine->gt->vm, SZ_512K,
+ I915_GEM_HWS_GGTT_BLIT_ADDR,
+ &kernel, "ggtt_blitter_context");
+}
static struct intel_context *
create_kernel_context(struct intel_engine_cs *engine)
{
@@ -1442,7 +1471,7 @@ create_kernel_context(struct intel_engine_cs *engine)
*/
static int engine_init_common(struct intel_engine_cs *engine)
{
- struct intel_context *ce;
+ struct intel_context *ce, *bce = NULL;
int ret;
engine->set_default_submission(engine);
@@ -1458,6 +1487,15 @@ static int engine_init_common(struct intel_engine_cs *engine)
ce = create_kernel_context(engine);
if (IS_ERR(ce))
return PTR_ERR(ce);
+ /*
+ * Create a separate pinned context for GGTT update using blitter
+ * if a platform require such service.
+ */
+ if (i915_ggtt_require_blitter(engine->i915) && engine->id == BCS0) {
+ bce = create_ggtt_blitter_context(engine);
+ if (IS_ERR(bce))
+ return PTR_ERR(bce);
+ }
ret = measure_breadcrumb_dw(ce);
if (ret < 0)
@@ -1465,6 +1503,7 @@ static int engine_init_common(struct intel_engine_cs *engine)
engine->emit_fini_breadcrumb_dw = ret;
engine->kernel_context = ce;
+ engine->blitter_context = bce;
return 0;
@@ -1537,6 +1576,9 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine)
if (engine->kernel_context)
intel_engine_destroy_pinned_context(engine->kernel_context);
+ if (engine->blitter_context)
+ intel_engine_destroy_pinned_context(engine->blitter_context);
+
GEM_BUG_ON(!llist_empty(&engine->barrier_tasks));
cleanup_status_page(engine);
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h
index e99a6fa03d453..62095c0d8783d 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h
@@ -415,6 +415,9 @@ struct intel_engine_cs {
struct llist_head barrier_tasks;
struct intel_context *kernel_context; /* pinned */
+ struct intel_context *blitter_context; /* pinned, only for BCS0 */
+ /* mark the blitter engine's availability status */
+ atomic_t blitter_context_ready;
/**
* pinned_contexts_list: List of pinned contexts. This list is only
diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.c b/drivers/gpu/drm/i915/gt/intel_gtt.c
index 13944a14ea2d1..9c77c97670fea 100644
--- a/drivers/gpu/drm/i915/gt/intel_gtt.c
+++ b/drivers/gpu/drm/i915/gt/intel_gtt.c
@@ -21,6 +21,10 @@
#include "intel_gt_regs.h"
#include "intel_gtt.h"
+bool i915_ggtt_require_blitter(struct drm_i915_private *i915)
+{
+ return IS_METEORLAKE(i915);
+}
static bool intel_ggtt_update_needs_vtd_wa(struct drm_i915_private *i915)
{
diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.h b/drivers/gpu/drm/i915/gt/intel_gtt.h
index 4d6296cdbcfdd..9710eb031fb2c 100644
--- a/drivers/gpu/drm/i915/gt/intel_gtt.h
+++ b/drivers/gpu/drm/i915/gt/intel_gtt.h
@@ -688,4 +688,6 @@ static inline struct sgt_dma {
return (struct sgt_dma){ sg, addr, addr + sg_dma_len(sg) };
}
+bool i915_ggtt_require_blitter(struct drm_i915_private *i915);
+
#endif
--
2.40.1
^ permalink raw reply related [flat|nested] 8+ messages in thread* [Intel-gfx] [CI] drm/i915/gt: Refactor hangcheck selftest to use igt_spinner 2023-08-18 19:42 [Intel-gfx] [PATCH 1/3] drm/i915: Create a blitter context for GGTT updates Andi Shyti @ 2023-08-18 19:42 ` Andi Shyti 2023-08-18 19:42 ` [Intel-gfx] [PATCH 2/3] drm/i915: Implement GGTT update method with blitter Andi Shyti ` (2 subsequent siblings) 3 siblings, 0 replies; 8+ messages in thread From: Andi Shyti @ 2023-08-18 19:42 UTC (permalink / raw) To: Jonathan Cavitt; +Cc: intel-gfx From: Jonathan Cavitt <jonathan.cavitt@intel.com> The hangcheck live selftest contains duplicate declarations of some functions that already exist in igt_spinner.c, such as the creation and deconstruction of a spinning batch buffer (spinner) that hangs an engine. It's undesireable to have such code duplicated, as the requirements for the spinner may change with hardware updates, necessitating both execution paths be updated. To avoid this, have the hangcheck live selftest use the declaration from igt_spinner. This eliminates the need for the declarations in the selftest itself, as well as the associated local helper structures, so we can erase those. Suggested-by: Matt Roper <matthew.d.roper@intel.com> Signed-off-by: Jonathan Cavitt <jonathan.cavitt@intel.com> --- Hi, I'm resending this patch in order to be sure that CI doesn't complain. Andi drivers/gpu/drm/i915/gt/selftest_hangcheck.c | 457 ++++++------------- drivers/gpu/drm/i915/selftests/igt_spinner.c | 15 +- drivers/gpu/drm/i915/selftests/igt_spinner.h | 9 + 3 files changed, 155 insertions(+), 326 deletions(-) diff --git a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c index 0dd4d00ee894e..36376a4ade8e4 100644 --- a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c +++ b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c @@ -29,281 +29,40 @@ #define IGT_IDLE_TIMEOUT 50 /* ms; time to wait after flushing between tests */ -struct hang { - struct intel_gt *gt; - struct drm_i915_gem_object *hws; - struct drm_i915_gem_object *obj; - struct i915_gem_context *ctx; - u32 *seqno; - u32 *batch; -}; - -static int hang_init(struct hang *h, struct intel_gt *gt) -{ - void *vaddr; - int err; - - memset(h, 0, sizeof(*h)); - h->gt = gt; - - h->ctx = kernel_context(gt->i915, NULL); - if (IS_ERR(h->ctx)) - return PTR_ERR(h->ctx); - - GEM_BUG_ON(i915_gem_context_is_bannable(h->ctx)); - - h->hws = i915_gem_object_create_internal(gt->i915, PAGE_SIZE); - if (IS_ERR(h->hws)) { - err = PTR_ERR(h->hws); - goto err_ctx; - } - - h->obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE); - if (IS_ERR(h->obj)) { - err = PTR_ERR(h->obj); - goto err_hws; - } - - i915_gem_object_set_cache_coherency(h->hws, I915_CACHE_LLC); - vaddr = i915_gem_object_pin_map_unlocked(h->hws, I915_MAP_WB); - if (IS_ERR(vaddr)) { - err = PTR_ERR(vaddr); - goto err_obj; - } - h->seqno = memset(vaddr, 0xff, PAGE_SIZE); - - vaddr = i915_gem_object_pin_map_unlocked(h->obj, - intel_gt_coherent_map_type(gt, h->obj, false)); - if (IS_ERR(vaddr)) { - err = PTR_ERR(vaddr); - goto err_unpin_hws; - } - h->batch = vaddr; - - return 0; - -err_unpin_hws: - i915_gem_object_unpin_map(h->hws); -err_obj: - i915_gem_object_put(h->obj); -err_hws: - i915_gem_object_put(h->hws); -err_ctx: - kernel_context_close(h->ctx); - return err; -} - -static u64 hws_address(const struct i915_vma *hws, - const struct i915_request *rq) -{ - return i915_vma_offset(hws) + - offset_in_page(sizeof(u32) * rq->fence.context); -} - -static struct i915_request * -hang_create_request(struct hang *h, struct intel_engine_cs *engine) -{ - struct intel_gt *gt = h->gt; - struct i915_address_space *vm = i915_gem_context_get_eb_vm(h->ctx); - struct drm_i915_gem_object *obj; - struct i915_request *rq = NULL; - struct i915_vma *hws, *vma; - unsigned int flags; - void *vaddr; - u32 *batch; - int err; - - obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE); - if (IS_ERR(obj)) { - i915_vm_put(vm); - return ERR_CAST(obj); - } - - vaddr = i915_gem_object_pin_map_unlocked(obj, intel_gt_coherent_map_type(gt, obj, false)); - if (IS_ERR(vaddr)) { - i915_gem_object_put(obj); - i915_vm_put(vm); - return ERR_CAST(vaddr); - } - - i915_gem_object_unpin_map(h->obj); - i915_gem_object_put(h->obj); - - h->obj = obj; - h->batch = vaddr; - - vma = i915_vma_instance(h->obj, vm, NULL); - if (IS_ERR(vma)) { - i915_vm_put(vm); - return ERR_CAST(vma); - } - - hws = i915_vma_instance(h->hws, vm, NULL); - if (IS_ERR(hws)) { - i915_vm_put(vm); - return ERR_CAST(hws); - } - - err = i915_vma_pin(vma, 0, 0, PIN_USER); - if (err) { - i915_vm_put(vm); - return ERR_PTR(err); - } - - err = i915_vma_pin(hws, 0, 0, PIN_USER); - if (err) - goto unpin_vma; - - rq = igt_request_alloc(h->ctx, engine); - if (IS_ERR(rq)) { - err = PTR_ERR(rq); - goto unpin_hws; - } - - err = igt_vma_move_to_active_unlocked(vma, rq, 0); - if (err) - goto cancel_rq; - - err = igt_vma_move_to_active_unlocked(hws, rq, 0); - if (err) - goto cancel_rq; - - batch = h->batch; - if (GRAPHICS_VER(gt->i915) >= 8) { - *batch++ = MI_STORE_DWORD_IMM_GEN4; - *batch++ = lower_32_bits(hws_address(hws, rq)); - *batch++ = upper_32_bits(hws_address(hws, rq)); - *batch++ = rq->fence.seqno; - *batch++ = MI_NOOP; - - memset(batch, 0, 1024); - batch += 1024 / sizeof(*batch); - - *batch++ = MI_NOOP; - *batch++ = MI_BATCH_BUFFER_START | 1 << 8 | 1; - *batch++ = lower_32_bits(i915_vma_offset(vma)); - *batch++ = upper_32_bits(i915_vma_offset(vma)); - } else if (GRAPHICS_VER(gt->i915) >= 6) { - *batch++ = MI_STORE_DWORD_IMM_GEN4; - *batch++ = 0; - *batch++ = lower_32_bits(hws_address(hws, rq)); - *batch++ = rq->fence.seqno; - *batch++ = MI_NOOP; - - memset(batch, 0, 1024); - batch += 1024 / sizeof(*batch); - - *batch++ = MI_NOOP; - *batch++ = MI_BATCH_BUFFER_START | 1 << 8; - *batch++ = lower_32_bits(i915_vma_offset(vma)); - } else if (GRAPHICS_VER(gt->i915) >= 4) { - *batch++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT; - *batch++ = 0; - *batch++ = lower_32_bits(hws_address(hws, rq)); - *batch++ = rq->fence.seqno; - *batch++ = MI_NOOP; - - memset(batch, 0, 1024); - batch += 1024 / sizeof(*batch); - - *batch++ = MI_NOOP; - *batch++ = MI_BATCH_BUFFER_START | 2 << 6; - *batch++ = lower_32_bits(i915_vma_offset(vma)); - } else { - *batch++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL; - *batch++ = lower_32_bits(hws_address(hws, rq)); - *batch++ = rq->fence.seqno; - *batch++ = MI_NOOP; - - memset(batch, 0, 1024); - batch += 1024 / sizeof(*batch); - - *batch++ = MI_NOOP; - *batch++ = MI_BATCH_BUFFER_START | 2 << 6; - *batch++ = lower_32_bits(i915_vma_offset(vma)); - } - *batch++ = MI_BATCH_BUFFER_END; /* not reached */ - intel_gt_chipset_flush(engine->gt); - - if (rq->engine->emit_init_breadcrumb) { - err = rq->engine->emit_init_breadcrumb(rq); - if (err) - goto cancel_rq; - } - - flags = 0; - if (GRAPHICS_VER(gt->i915) <= 5) - flags |= I915_DISPATCH_SECURE; - - err = rq->engine->emit_bb_start(rq, i915_vma_offset(vma), PAGE_SIZE, flags); - -cancel_rq: - if (err) { - i915_request_set_error_once(rq, err); - i915_request_add(rq); - } -unpin_hws: - i915_vma_unpin(hws); -unpin_vma: - i915_vma_unpin(vma); - i915_vm_put(vm); - return err ? ERR_PTR(err) : rq; -} - -static u32 hws_seqno(const struct hang *h, const struct i915_request *rq) -{ - return READ_ONCE(h->seqno[rq->fence.context % (PAGE_SIZE/sizeof(u32))]); -} - -static void hang_fini(struct hang *h) -{ - *h->batch = MI_BATCH_BUFFER_END; - intel_gt_chipset_flush(h->gt); - - i915_gem_object_unpin_map(h->obj); - i915_gem_object_put(h->obj); - - i915_gem_object_unpin_map(h->hws); - i915_gem_object_put(h->hws); - - kernel_context_close(h->ctx); - - igt_flush_test(h->gt->i915); -} - -static bool wait_until_running(struct hang *h, struct i915_request *rq) -{ - return !(wait_for_us(i915_seqno_passed(hws_seqno(h, rq), - rq->fence.seqno), - 10) && - wait_for(i915_seqno_passed(hws_seqno(h, rq), - rq->fence.seqno), - 1000)); -} - static int igt_hang_sanitycheck(void *arg) { struct intel_gt *gt = arg; struct i915_request *rq; struct intel_engine_cs *engine; enum intel_engine_id id; - struct hang h; + struct igt_spinner spin; int err; /* Basic check that we can execute our hanging batch */ - err = hang_init(&h, gt); + err = igt_spinner_init(&spin, gt); if (err) return err; for_each_engine(engine, gt, id) { struct intel_wedge_me w; + struct intel_context *ce; long timeout; if (!intel_engine_can_store_dword(engine)) continue; - rq = hang_create_request(&h, engine); + ce = intel_context_create(engine); + if (IS_ERR(ce)) { + err = PTR_ERR(ce); + pr_err("Failed to create context for %s, err=%d\n", + engine->name, err); + goto fini; + } + + rq = igt_spinner_create_request(&spin, ce, MI_NOOP); + intel_context_put(ce); + if (IS_ERR(rq)) { err = PTR_ERR(rq); pr_err("Failed to create request for %s, err=%d\n", @@ -312,10 +71,7 @@ static int igt_hang_sanitycheck(void *arg) } i915_request_get(rq); - - *h.batch = MI_BATCH_BUFFER_END; - intel_gt_chipset_flush(engine->gt); - + igt_spinner_end(&spin); i915_request_add(rq); timeout = 0; @@ -336,7 +92,7 @@ static int igt_hang_sanitycheck(void *arg) } fini: - hang_fini(&h); + igt_spinner_fini(&spin); return err; } @@ -686,7 +442,7 @@ static int __igt_reset_engine(struct intel_gt *gt, bool active) struct i915_gpu_error *global = >->i915->gpu_error; struct intel_engine_cs *engine; enum intel_engine_id id; - struct hang h; + struct igt_spinner spin; int err = 0; /* Check that we can issue an engine reset on an idle engine (no-op) */ @@ -695,7 +451,7 @@ static int __igt_reset_engine(struct intel_gt *gt, bool active) return 0; if (active) { - err = hang_init(&h, gt); + err = igt_spinner_init(&spin, gt); if (err) return err; } @@ -739,7 +495,17 @@ static int __igt_reset_engine(struct intel_gt *gt, bool active) } if (active) { - rq = hang_create_request(&h, engine); + struct intel_context *ce = intel_context_create(engine); + if (IS_ERR(ce)) { + err = PTR_ERR(ce); + pr_err("[%s] Create context failed: %d!\n", + engine->name, err); + goto restore; + } + + rq = igt_spinner_create_request(&spin, ce, MI_NOOP); + intel_context_put(ce); + if (IS_ERR(rq)) { err = PTR_ERR(rq); pr_err("[%s] Create hang request failed: %d!\n", @@ -750,11 +516,11 @@ static int __igt_reset_engine(struct intel_gt *gt, bool active) i915_request_get(rq); i915_request_add(rq); - if (!wait_until_running(&h, rq)) { + if (!igt_wait_for_spinner(&spin, rq)) { struct drm_printer p = drm_info_printer(gt->i915->drm.dev); pr_err("%s: Failed to start request %llx, at %x\n", - __func__, rq->fence.seqno, hws_seqno(&h, rq)); + __func__, rq->fence.seqno, hws_seqno(&spin, rq)); intel_engine_dump(engine, &p, "%s\n", engine->name); @@ -835,7 +601,7 @@ static int __igt_reset_engine(struct intel_gt *gt, bool active) } if (active) - hang_fini(&h); + igt_spinner_fini(&spin); return err; } @@ -967,7 +733,7 @@ static int __igt_reset_engines(struct intel_gt *gt, struct intel_engine_cs *engine, *other; struct active_engine *threads; enum intel_engine_id id, tmp; - struct hang h; + struct igt_spinner spin; int err = 0; /* Check that issuing a reset on one engine does not interfere @@ -978,12 +744,9 @@ static int __igt_reset_engines(struct intel_gt *gt, return 0; if (flags & TEST_ACTIVE) { - err = hang_init(&h, gt); + err = igt_spinner_init(&spin, gt); if (err) return err; - - if (flags & TEST_PRIORITY) - h.ctx->sched.priority = 1024; } threads = kmalloc_array(I915_NUM_ENGINES, sizeof(*threads), GFP_KERNEL); @@ -1057,7 +820,20 @@ static int __igt_reset_engines(struct intel_gt *gt, } if (flags & TEST_ACTIVE) { - rq = hang_create_request(&h, engine); + struct intel_context *ce = intel_context_create(engine); + if (IS_ERR(ce)) { + err = PTR_ERR(ce); + pr_err("[%s] Create context failed: %d!\n", + engine->name, err); + goto restore; + } + + if (flags && TEST_PRIORITY) + ce->gem_context->sched.priority = 1024; + + rq = igt_spinner_create_request(&spin, ce, MI_NOOP); + intel_context_put(ce); + if (IS_ERR(rq)) { err = PTR_ERR(rq); pr_err("[%s] Create hang request failed: %d!\n", @@ -1068,11 +844,11 @@ static int __igt_reset_engines(struct intel_gt *gt, i915_request_get(rq); i915_request_add(rq); - if (!wait_until_running(&h, rq)) { + if (!igt_wait_for_spinner(&spin, rq)) { struct drm_printer p = drm_info_printer(gt->i915->drm.dev); pr_err("%s: Failed to start request %llx, at %x\n", - __func__, rq->fence.seqno, hws_seqno(&h, rq)); + __func__, rq->fence.seqno, hws_seqno(&spin, rq)); intel_engine_dump(engine, &p, "%s\n", engine->name); @@ -1240,7 +1016,7 @@ static int __igt_reset_engines(struct intel_gt *gt, err = -EIO; if (flags & TEST_ACTIVE) - hang_fini(&h); + igt_spinner_fini(&spin); return err; } @@ -1299,7 +1075,8 @@ static int igt_reset_wait(void *arg) struct intel_engine_cs *engine; struct i915_request *rq; unsigned int reset_count; - struct hang h; + struct igt_spinner spin; + struct intel_context *ce; long timeout; int err; @@ -1312,13 +1089,22 @@ static int igt_reset_wait(void *arg) igt_global_reset_lock(gt); - err = hang_init(&h, gt); + err = igt_spinner_init(&spin, gt); if (err) { - pr_err("[%s] Hang init failed: %d!\n", engine->name, err); + pr_err("[%s] Spinner init failed: %d!\n", engine->name, err); goto unlock; } - rq = hang_create_request(&h, engine); + ce = intel_context_create(engine); + if (IS_ERR(ce)) { + err = PTR_ERR(ce); + pr_err("[%s] Create context failed: %d!\n", engine->name, err); + goto fini; + } + + rq = igt_spinner_create_request(&spin, ce, MI_NOOP); + intel_context_put(ce); + if (IS_ERR(rq)) { err = PTR_ERR(rq); pr_err("[%s] Create hang request failed: %d!\n", engine->name, err); @@ -1328,11 +1114,11 @@ static int igt_reset_wait(void *arg) i915_request_get(rq); i915_request_add(rq); - if (!wait_until_running(&h, rq)) { + if (!igt_wait_for_spinner(&spin, rq)) { struct drm_printer p = drm_info_printer(gt->i915->drm.dev); pr_err("%s: Failed to start request %llx, at %x\n", - __func__, rq->fence.seqno, hws_seqno(&h, rq)); + __func__, rq->fence.seqno, hws_seqno(&spin, rq)); intel_engine_dump(rq->engine, &p, "%s\n", rq->engine->name); intel_gt_set_wedged(gt); @@ -1360,7 +1146,7 @@ static int igt_reset_wait(void *arg) out_rq: i915_request_put(rq); fini: - hang_fini(&h); + igt_spinner_fini(&spin); unlock: igt_global_reset_unlock(gt); @@ -1433,7 +1219,8 @@ static int __igt_reset_evict_vma(struct intel_gt *gt, struct task_struct *tsk = NULL; struct i915_request *rq; struct evict_vma arg; - struct hang h; + struct igt_spinner spin; + struct intel_context *ce; unsigned int pin_flags; int err; @@ -1447,9 +1234,9 @@ static int __igt_reset_evict_vma(struct intel_gt *gt, /* Check that we can recover an unbind stuck on a hanging request */ - err = hang_init(&h, gt); + err = igt_spinner_init(&spin, gt); if (err) { - pr_err("[%s] Hang init failed: %d!\n", engine->name, err); + pr_err("[%s] Spinner init failed: %d!\n", engine->name, err); return err; } @@ -1475,7 +1262,16 @@ static int __igt_reset_evict_vma(struct intel_gt *gt, goto out_obj; } - rq = hang_create_request(&h, engine); + ce = intel_context_create(engine); + if (IS_ERR(ce)) { + err = PTR_ERR(ce); + pr_err("[%s] Create context failed: %d!\n", engine->name, err); + goto out_obj; + } + + rq = igt_spinner_create_request(&spin, ce, MI_NOOP); + intel_context_put(ce); + if (IS_ERR(rq)) { err = PTR_ERR(rq); pr_err("[%s] Create hang request failed: %d!\n", engine->name, err); @@ -1517,11 +1313,11 @@ static int __igt_reset_evict_vma(struct intel_gt *gt, if (err) goto out_rq; - if (!wait_until_running(&h, rq)) { + if (!igt_wait_for_spinner(&spin, rq)) { struct drm_printer p = drm_info_printer(gt->i915->drm.dev); pr_err("%s: Failed to start request %llx, at %x\n", - __func__, rq->fence.seqno, hws_seqno(&h, rq)); + __func__, rq->fence.seqno, hws_seqno(&spin, rq)); intel_engine_dump(rq->engine, &p, "%s\n", rq->engine->name); intel_gt_set_wedged(gt); @@ -1571,7 +1367,7 @@ static int __igt_reset_evict_vma(struct intel_gt *gt, out_obj: i915_gem_object_put(obj); fini: - hang_fini(&h); + igt_spinner_fini(&spin); if (intel_gt_is_wedged(gt)) return -EIO; @@ -1638,20 +1434,21 @@ static int igt_reset_queue(void *arg) struct i915_gpu_error *global = >->i915->gpu_error; struct intel_engine_cs *engine; enum intel_engine_id id; - struct hang h; + struct igt_spinner spin; int err; /* Check that we replay pending requests following a hang */ igt_global_reset_lock(gt); - err = hang_init(&h, gt); + err = igt_spinner_init(&spin, gt); if (err) goto unlock; for_each_engine(engine, gt, id) { struct intel_selftest_saved_policy saved; struct i915_request *prev; + struct intel_context *ce; IGT_TIMEOUT(end_time); unsigned int count; bool using_guc = intel_engine_uses_guc(engine); @@ -1668,7 +1465,16 @@ static int igt_reset_queue(void *arg) } } - prev = hang_create_request(&h, engine); + ce = intel_context_create(engine); + if (IS_ERR(ce)) { + err = PTR_ERR(ce); + pr_err("[%s] Create 'prev' context failed: %d!\n", engine->name, err); + goto restore; + } + + prev = igt_spinner_create_request(&spin, ce, MI_NOOP); + intel_context_put(ce); + if (IS_ERR(prev)) { err = PTR_ERR(prev); pr_err("[%s] Create 'prev' hang request failed: %d!\n", engine->name, err); @@ -1682,8 +1488,17 @@ static int igt_reset_queue(void *arg) do { struct i915_request *rq; unsigned int reset_count; + + ce = intel_context_create(engine); + if (IS_ERR(ce)) { + err = PTR_ERR(ce); + pr_err("[%s] Create context failed: %d!\n", engine->name, err); + goto restore; + } + + rq = igt_spinner_create_request(&spin, ce, MI_NOOP); + intel_context_put(ce); - rq = hang_create_request(&h, engine); if (IS_ERR(rq)) { err = PTR_ERR(rq); pr_err("[%s] Create hang request failed: %d!\n", engine->name, err); @@ -1715,12 +1530,12 @@ static int igt_reset_queue(void *arg) goto restore; } - if (!wait_until_running(&h, prev)) { + if (!igt_wait_for_spinner(&spin, prev)) { struct drm_printer p = drm_info_printer(gt->i915->drm.dev); pr_err("%s(%s): Failed to start request %llx, at %x\n", __func__, engine->name, - prev->fence.seqno, hws_seqno(&h, prev)); + prev->fence.seqno, hws_seqno(&spin, prev)); intel_engine_dump(engine, &p, "%s\n", engine->name); @@ -1768,9 +1583,7 @@ static int igt_reset_queue(void *arg) pr_info("%s: Completed %d queued resets\n", engine->name, count); - *h.batch = MI_BATCH_BUFFER_END; - intel_gt_chipset_flush(engine->gt); - + igt_spinner_end(&spin); i915_request_put(prev); restore: @@ -1794,7 +1607,7 @@ static int igt_reset_queue(void *arg) } fini: - hang_fini(&h); + igt_spinner_fini(&spin); unlock: igt_global_reset_unlock(gt); @@ -1809,7 +1622,8 @@ static int igt_handle_error(void *arg) struct intel_gt *gt = arg; struct i915_gpu_error *global = >->i915->gpu_error; struct intel_engine_cs *engine; - struct hang h; + struct igt_spinner spin; + struct intel_context *ce; struct i915_request *rq; struct i915_gpu_coredump *error; int err; @@ -1824,13 +1638,22 @@ static int igt_handle_error(void *arg) if (!engine || !intel_engine_can_store_dword(engine)) return 0; - err = hang_init(&h, gt); + err = igt_spinner_init(&spin, gt); if (err) { - pr_err("[%s] Hang init failed: %d!\n", engine->name, err); + pr_err("[%s] Spinner init failed: %d!\n", engine->name, err); return err; } - rq = hang_create_request(&h, engine); + ce = intel_context_create(engine); + if (IS_ERR(ce)) { + err = PTR_ERR(ce); + pr_err("[%s] Create context failed: %d!\n", engine->name, err); + goto err_fini; + } + + rq = igt_spinner_create_request(&spin, ce, MI_NOOP); + intel_context_put(ce); + if (IS_ERR(rq)) { err = PTR_ERR(rq); pr_err("[%s] Create hang request failed: %d!\n", engine->name, err); @@ -1840,11 +1663,11 @@ static int igt_handle_error(void *arg) i915_request_get(rq); i915_request_add(rq); - if (!wait_until_running(&h, rq)) { + if (!igt_wait_for_spinner(&spin, rq)) { struct drm_printer p = drm_info_printer(gt->i915->drm.dev); pr_err("%s: Failed to start request %llx, at %x\n", - __func__, rq->fence.seqno, hws_seqno(&h, rq)); + __func__, rq->fence.seqno, hws_seqno(&spin, rq)); intel_engine_dump(rq->engine, &p, "%s\n", rq->engine->name); intel_gt_set_wedged(gt); @@ -1869,7 +1692,7 @@ static int igt_handle_error(void *arg) err_request: i915_request_put(rq); err_fini: - hang_fini(&h); + igt_spinner_fini(&spin); return err; } @@ -1910,20 +1733,30 @@ static int igt_atomic_reset_engine(struct intel_engine_cs *engine, const struct igt_atomic_section *p) { struct i915_request *rq; - struct hang h; + struct igt_spinner spin; + struct intel_context *ce; int err; err = __igt_atomic_reset_engine(engine, p, "idle"); if (err) return err; - err = hang_init(&h, engine->gt); + err = igt_spinner_init(&spin, engine->gt); if (err) { pr_err("[%s] Hang init failed: %d!\n", engine->name, err); return err; } - rq = hang_create_request(&h, engine); + ce = intel_context_create(engine); + if (IS_ERR(ce)) { + err = PTR_ERR(ce); + pr_err("[%s] Create context failed: %d!\n", engine->name, err); + goto out; + } + + rq = igt_spinner_create_request(&spin, ce, MI_NOOP); + intel_context_put(ce); + if (IS_ERR(rq)) { err = PTR_ERR(rq); pr_err("[%s] Create hang request failed: %d!\n", engine->name, err); @@ -1933,12 +1766,12 @@ static int igt_atomic_reset_engine(struct intel_engine_cs *engine, i915_request_get(rq); i915_request_add(rq); - if (wait_until_running(&h, rq)) { + if (igt_wait_for_spinner(&spin, rq)) { err = __igt_atomic_reset_engine(engine, p, "active"); } else { pr_err("%s(%s): Failed to start request %llx, at %x\n", __func__, engine->name, - rq->fence.seqno, hws_seqno(&h, rq)); + rq->fence.seqno, hws_seqno(&spin, rq)); intel_gt_set_wedged(engine->gt); err = -EIO; } @@ -1954,7 +1787,7 @@ static int igt_atomic_reset_engine(struct intel_engine_cs *engine, i915_request_put(rq); out: - hang_fini(&h); + igt_spinner_fini(&spin); return err; } diff --git a/drivers/gpu/drm/i915/selftests/igt_spinner.c b/drivers/gpu/drm/i915/selftests/igt_spinner.c index 8c3e1f20e5a15..fc4f33076ec7b 100644 --- a/drivers/gpu/drm/i915/selftests/igt_spinner.c +++ b/drivers/gpu/drm/i915/selftests/igt_spinner.c @@ -108,15 +108,10 @@ int igt_spinner_pin(struct igt_spinner *spin, return 0; } -static unsigned int seqno_offset(u64 fence) -{ - return offset_in_page(sizeof(u32) * fence); -} - static u64 hws_address(const struct i915_vma *hws, const struct i915_request *rq) { - return i915_vma_offset(hws) + seqno_offset(rq->fence.context); + return i915_vma_offset(hws) + offset_in_page(sizeof(u32) * rq->fence.context); } struct i915_request * @@ -216,14 +211,6 @@ igt_spinner_create_request(struct igt_spinner *spin, return err ? ERR_PTR(err) : rq; } -static u32 -hws_seqno(const struct igt_spinner *spin, const struct i915_request *rq) -{ - u32 *seqno = spin->seqno + seqno_offset(rq->fence.context); - - return READ_ONCE(*seqno); -} - void igt_spinner_end(struct igt_spinner *spin) { if (!spin->batch) diff --git a/drivers/gpu/drm/i915/selftests/igt_spinner.h b/drivers/gpu/drm/i915/selftests/igt_spinner.h index fbe5b1625b05e..faff1008999a5 100644 --- a/drivers/gpu/drm/i915/selftests/igt_spinner.h +++ b/drivers/gpu/drm/i915/selftests/igt_spinner.h @@ -40,4 +40,13 @@ void igt_spinner_end(struct igt_spinner *spin); bool igt_wait_for_spinner(struct igt_spinner *spin, struct i915_request *rq); +static inline u32 +hws_seqno(const struct igt_spinner *spin, const struct i915_request *rq) +{ + u32 *seqno = spin->seqno + offset_in_page(sizeof(u32) * rq->fence.context); + + return READ_ONCE(*seqno); +} + + #endif -- 2.40.1 ^ permalink raw reply related [flat|nested] 8+ messages in thread
* [Intel-gfx] [PATCH 2/3] drm/i915: Implement GGTT update method with blitter 2023-08-18 19:42 [Intel-gfx] [PATCH 1/3] drm/i915: Create a blitter context for GGTT updates Andi Shyti 2023-08-18 19:42 ` [Intel-gfx] [CI] drm/i915/gt: Refactor hangcheck selftest to use igt_spinner Andi Shyti @ 2023-08-18 19:42 ` Andi Shyti 2023-08-19 22:49 ` Andi Shyti 2023-08-18 19:42 ` [Intel-gfx] [PATCH 3/3] drm/i915: Enable GGTT blitting in MTL Andi Shyti 2023-08-18 20:05 ` [Intel-gfx] ✗ Fi.CI.BUILD: failure for series starting with [CI] drm/i915/gt: Refactor hangcheck selftest to use igt_spinner (rev2) Patchwork 3 siblings, 1 reply; 8+ messages in thread From: Andi Shyti @ 2023-08-18 19:42 UTC (permalink / raw) To: Jonathan Cavitt; +Cc: intel-gfx From: Nirmoy Das <nirmoy.das@intel.com> Implement GGTT update method with blitter command, MI_UPDATE_GTT and install those handlers if a platform requires that. Signed-off-by: Nirmoy Das <nirmoy.das@intel.com> --- drivers/gpu/drm/i915/gt/intel_ggtt.c | 189 +++++++++++++++++++++++++++ 1 file changed, 189 insertions(+) diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt.c b/drivers/gpu/drm/i915/gt/intel_ggtt.c index dd0ed941441aa..715f24de2cfa6 100644 --- a/drivers/gpu/drm/i915/gt/intel_ggtt.c +++ b/drivers/gpu/drm/i915/gt/intel_ggtt.c @@ -15,12 +15,16 @@ #include "display/intel_display.h" #include "gem/i915_gem_lmem.h" +#include "intel_context.h" #include "intel_ggtt_gmch.h" +#include "intel_gpu_commands.h" #include "intel_gt.h" #include "intel_gt_regs.h" #include "intel_pci_config.h" +#include "intel_ring.h" #include "i915_drv.h" #include "i915_pci.h" +#include "i915_request.h" #include "i915_scatterlist.h" #include "i915_utils.h" #include "i915_vgpu.h" @@ -252,6 +256,95 @@ u64 gen8_ggtt_pte_encode(dma_addr_t addr, return pte; } +static bool should_update_ggtt_with_blit(struct i915_ggtt *ggtt) +{ + struct intel_gt *gt = ggtt->vm.gt; + + return intel_engine_blitter_context_ready(gt); +} + +static bool gen8_ggtt_blit_fixed_pte(struct i915_ggtt *ggtt, u32 offset, + u32 num_entries, const gen8_pte_t pte) +{ + struct intel_gt *gt = ggtt->vm.gt; + struct i915_sched_attr attr = {}; + struct i915_request *rq; + struct intel_context *ce; + bool wakeref; + u32 *cs; + + if (!num_entries) + return true; + /* + * If the GT is not awake already at this stage then fallback + * to pci based GGTT update otherwise __intel_wakeref_get_first() + * would conflict with fs_reclaim trying to allocate memory while + * doing rpm_resume(). + */ + wakeref = intel_gt_pm_get_if_awake(gt); + if (!wakeref) + return false; + + ce = gt->engine[BCS0]->blitter_context; + if (!ce) { + drm_dbg(&ggtt->vm.i915->drm, "Failed to get blitter context\n"); + goto err_print; + } + + while (num_entries) { + /* MI_UPDATE_GTT can update 512 entries in a single command */ + u32 n_ptes = min_t(u32, 512, num_entries); + + mutex_lock(&ce->timeline->mutex); + intel_context_enter(ce); + rq = __i915_request_create(ce, GFP_NOWAIT | GFP_ATOMIC); + intel_context_exit(ce); + if (IS_ERR(rq)) { + drm_dbg(&ggtt->vm.i915->drm, + "Failed to get blitter request\n"); + goto err_unlock; + } + + cs = intel_ring_begin(rq, 2 * n_ptes + 2); + if (IS_ERR(cs)) { + drm_dbg(&ggtt->vm.i915->drm, + "Failed to begin ring for GGTT blitter\n"); + goto err_rq; + } + + *cs++ = MI_UPDATE_GTT | (2 * n_ptes); + *cs++ = offset << 12; + memset64((u64 *)cs, pte, n_ptes); + cs += n_ptes * 2; + intel_ring_advance(rq, cs); + + i915_request_get(rq); + __i915_request_commit(rq); + __i915_request_queue(rq, &attr); + + mutex_unlock(&ce->timeline->mutex); + /* This will break if the request is complete or after engine reset */ + i915_request_wait(rq, 0, MAX_SCHEDULE_TIMEOUT); + i915_request_put(rq); + + num_entries -= n_ptes; + } + + ggtt->invalidate(ggtt); + intel_gt_pm_put_async(gt); + return true; + +err_rq: + i915_request_put(rq); +err_unlock: + mutex_unlock(&ce->timeline->mutex); +err_print: + drm_warn_once(&ggtt->vm.i915->drm, + "GGTT update failed with blitter\n"); + intel_gt_pm_put(gt); + return false; +} + static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte) { writeq(pte, addr); @@ -272,6 +365,22 @@ static void gen8_ggtt_insert_page(struct i915_address_space *vm, ggtt->invalidate(ggtt); } +static void gen8_ggtt_insert_page_blit(struct i915_address_space *vm, + dma_addr_t addr, u64 offset, + unsigned int pat_index, u32 flags) +{ + struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); + gen8_pte_t pte; + + pte = ggtt->vm.pte_encode(addr, pat_index, flags); + if (should_update_ggtt_with_blit(i915_vm_to_ggtt(vm)) && + gen8_ggtt_blit_fixed_pte(ggtt, offset, 1, pte)) + return; + + gen8_ggtt_insert_page(vm, addr, offset, pat_index, flags); + ggtt->invalidate(ggtt); +} + static void gen8_ggtt_insert_entries(struct i915_address_space *vm, struct i915_vma_resource *vma_res, unsigned int pat_index, @@ -311,6 +420,52 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm, ggtt->invalidate(ggtt); } +static void __gen8_ggtt_insert_entries_blit(struct i915_address_space *vm, + struct i915_vma_resource *vma_res, + unsigned int pat_index, u32 flags) +{ + gen8_pte_t pte_encode; + struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); + struct sgt_iter iter; + dma_addr_t addr; + u64 start, end; + + pte_encode = ggtt->vm.pte_encode(0, pat_index, flags); + start = (vma_res->start - vma_res->guard) / I915_GTT_PAGE_SIZE; + end = start + vma_res->guard / I915_GTT_PAGE_SIZE; + if (!gen8_ggtt_blit_fixed_pte(ggtt, start, end - start, vm->scratch[0]->encode)) + goto err; + start = end; + + end += (vma_res->node_size + vma_res->guard) / I915_GTT_PAGE_SIZE; + /* TODO: MI_UPDATE_GTT can update 511 entries in a single command. */ + for_each_sgt_daddr(addr, iter, vma_res->bi.pages) { + if (!gen8_ggtt_blit_fixed_pte(ggtt, start++, 1, pte_encode | addr)) + goto err; + } + + if (!gen8_ggtt_blit_fixed_pte(ggtt, start, end - start, vm->scratch[0]->encode)) + goto err; + + return; + +err: + drm_dbg(&ggtt->vm.i915->drm, "falling back to gen8_ggtt_insert_entries\n"); + gen8_ggtt_insert_entries(vm, vma_res, pat_index, flags); +} + +static void gen8_ggtt_insert_entries_blit(struct i915_address_space *vm, + struct i915_vma_resource *vma_res, + unsigned int pat_index, u32 flags) +{ + if (!should_update_ggtt_with_blit(i915_vm_to_ggtt(vm))) { + gen8_ggtt_insert_entries(vm, vma_res, pat_index, flags); + return; + } + + __gen8_ggtt_insert_entries_blit(vm, vma_res, pat_index, flags); +} + static void gen8_ggtt_clear_range(struct i915_address_space *vm, u64 start, u64 length) { @@ -332,6 +487,34 @@ static void gen8_ggtt_clear_range(struct i915_address_space *vm, gen8_set_pte(>t_base[i], scratch_pte); } +static void gen8_ggtt_scratch_range_blit(struct i915_address_space *vm, + u64 start, u64 length) +{ + struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); + unsigned int first_entry = start / I915_GTT_PAGE_SIZE; + unsigned int num_entries = length / I915_GTT_PAGE_SIZE; + const gen8_pte_t scratch_pte = vm->scratch[0]->encode; + gen8_pte_t __iomem *gtt_base = + (gen8_pte_t __iomem *)ggtt->gsm + first_entry; + const int max_entries = ggtt_total_entries(ggtt) - first_entry; + int i; + + if (WARN(num_entries > max_entries, + "First entry = %d; Num entries = %d (max=%d)\n", + first_entry, num_entries, max_entries)) + num_entries = max_entries; + + if (should_update_ggtt_with_blit(ggtt) && + gen8_ggtt_blit_fixed_pte(ggtt, first_entry, + num_entries, scratch_pte)) + return; + + for (i = 0; i < num_entries; i++) + gen8_set_pte(>t_base[i], scratch_pte); + + ggtt->invalidate(ggtt); +} + static void gen6_ggtt_insert_page(struct i915_address_space *vm, dma_addr_t addr, u64 offset, @@ -997,6 +1180,12 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt) I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND; } + if (i915_ggtt_require_blitter(i915)) { + ggtt->vm.scratch_range = gen8_ggtt_scratch_range_blit; + ggtt->vm.insert_page = gen8_ggtt_insert_page_blit; + ggtt->vm.insert_entries = gen8_ggtt_insert_entries_blit; + } + if (intel_uc_wants_guc(&ggtt->vm.gt->uc)) ggtt->invalidate = guc_ggtt_invalidate; else -- 2.40.1 ^ permalink raw reply related [flat|nested] 8+ messages in thread
* Re: [Intel-gfx] [PATCH 2/3] drm/i915: Implement GGTT update method with blitter 2023-08-18 19:42 ` [Intel-gfx] [PATCH 2/3] drm/i915: Implement GGTT update method with blitter Andi Shyti @ 2023-08-19 22:49 ` Andi Shyti 0 siblings, 0 replies; 8+ messages in thread From: Andi Shyti @ 2023-08-19 22:49 UTC (permalink / raw) To: Andi Shyti; +Cc: intel-gfx, Jonathan Cavitt ops... sorry... too many patches sent... please, ignore! Andi On Fri, Aug 18, 2023 at 09:42:48PM +0200, Andi Shyti wrote: > From: Nirmoy Das <nirmoy.das@intel.com> > > Implement GGTT update method with blitter command, MI_UPDATE_GTT > and install those handlers if a platform requires that. > > Signed-off-by: Nirmoy Das <nirmoy.das@intel.com> > --- > drivers/gpu/drm/i915/gt/intel_ggtt.c | 189 +++++++++++++++++++++++++++ > 1 file changed, 189 insertions(+) > > diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt.c b/drivers/gpu/drm/i915/gt/intel_ggtt.c > index dd0ed941441aa..715f24de2cfa6 100644 > --- a/drivers/gpu/drm/i915/gt/intel_ggtt.c > +++ b/drivers/gpu/drm/i915/gt/intel_ggtt.c > @@ -15,12 +15,16 @@ > #include "display/intel_display.h" > #include "gem/i915_gem_lmem.h" > > +#include "intel_context.h" > #include "intel_ggtt_gmch.h" > +#include "intel_gpu_commands.h" > #include "intel_gt.h" > #include "intel_gt_regs.h" > #include "intel_pci_config.h" > +#include "intel_ring.h" > #include "i915_drv.h" > #include "i915_pci.h" > +#include "i915_request.h" > #include "i915_scatterlist.h" > #include "i915_utils.h" > #include "i915_vgpu.h" > @@ -252,6 +256,95 @@ u64 gen8_ggtt_pte_encode(dma_addr_t addr, > return pte; > } > > +static bool should_update_ggtt_with_blit(struct i915_ggtt *ggtt) > +{ > + struct intel_gt *gt = ggtt->vm.gt; > + > + return intel_engine_blitter_context_ready(gt); > +} > + > +static bool gen8_ggtt_blit_fixed_pte(struct i915_ggtt *ggtt, u32 offset, > + u32 num_entries, const gen8_pte_t pte) > +{ > + struct intel_gt *gt = ggtt->vm.gt; > + struct i915_sched_attr attr = {}; > + struct i915_request *rq; > + struct intel_context *ce; > + bool wakeref; > + u32 *cs; > + > + if (!num_entries) > + return true; > + /* > + * If the GT is not awake already at this stage then fallback > + * to pci based GGTT update otherwise __intel_wakeref_get_first() > + * would conflict with fs_reclaim trying to allocate memory while > + * doing rpm_resume(). > + */ > + wakeref = intel_gt_pm_get_if_awake(gt); > + if (!wakeref) > + return false; > + > + ce = gt->engine[BCS0]->blitter_context; > + if (!ce) { > + drm_dbg(&ggtt->vm.i915->drm, "Failed to get blitter context\n"); > + goto err_print; > + } > + > + while (num_entries) { > + /* MI_UPDATE_GTT can update 512 entries in a single command */ > + u32 n_ptes = min_t(u32, 512, num_entries); > + > + mutex_lock(&ce->timeline->mutex); > + intel_context_enter(ce); > + rq = __i915_request_create(ce, GFP_NOWAIT | GFP_ATOMIC); > + intel_context_exit(ce); > + if (IS_ERR(rq)) { > + drm_dbg(&ggtt->vm.i915->drm, > + "Failed to get blitter request\n"); > + goto err_unlock; > + } > + > + cs = intel_ring_begin(rq, 2 * n_ptes + 2); > + if (IS_ERR(cs)) { > + drm_dbg(&ggtt->vm.i915->drm, > + "Failed to begin ring for GGTT blitter\n"); > + goto err_rq; > + } > + > + *cs++ = MI_UPDATE_GTT | (2 * n_ptes); > + *cs++ = offset << 12; > + memset64((u64 *)cs, pte, n_ptes); > + cs += n_ptes * 2; > + intel_ring_advance(rq, cs); > + > + i915_request_get(rq); > + __i915_request_commit(rq); > + __i915_request_queue(rq, &attr); > + > + mutex_unlock(&ce->timeline->mutex); > + /* This will break if the request is complete or after engine reset */ > + i915_request_wait(rq, 0, MAX_SCHEDULE_TIMEOUT); > + i915_request_put(rq); > + > + num_entries -= n_ptes; > + } > + > + ggtt->invalidate(ggtt); > + intel_gt_pm_put_async(gt); > + return true; > + > +err_rq: > + i915_request_put(rq); > +err_unlock: > + mutex_unlock(&ce->timeline->mutex); > +err_print: > + drm_warn_once(&ggtt->vm.i915->drm, > + "GGTT update failed with blitter\n"); > + intel_gt_pm_put(gt); > + return false; > +} > + > static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte) > { > writeq(pte, addr); > @@ -272,6 +365,22 @@ static void gen8_ggtt_insert_page(struct i915_address_space *vm, > ggtt->invalidate(ggtt); > } > > +static void gen8_ggtt_insert_page_blit(struct i915_address_space *vm, > + dma_addr_t addr, u64 offset, > + unsigned int pat_index, u32 flags) > +{ > + struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); > + gen8_pte_t pte; > + > + pte = ggtt->vm.pte_encode(addr, pat_index, flags); > + if (should_update_ggtt_with_blit(i915_vm_to_ggtt(vm)) && > + gen8_ggtt_blit_fixed_pte(ggtt, offset, 1, pte)) > + return; > + > + gen8_ggtt_insert_page(vm, addr, offset, pat_index, flags); > + ggtt->invalidate(ggtt); > +} > + > static void gen8_ggtt_insert_entries(struct i915_address_space *vm, > struct i915_vma_resource *vma_res, > unsigned int pat_index, > @@ -311,6 +420,52 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm, > ggtt->invalidate(ggtt); > } > > +static void __gen8_ggtt_insert_entries_blit(struct i915_address_space *vm, > + struct i915_vma_resource *vma_res, > + unsigned int pat_index, u32 flags) > +{ > + gen8_pte_t pte_encode; > + struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); > + struct sgt_iter iter; > + dma_addr_t addr; > + u64 start, end; > + > + pte_encode = ggtt->vm.pte_encode(0, pat_index, flags); > + start = (vma_res->start - vma_res->guard) / I915_GTT_PAGE_SIZE; > + end = start + vma_res->guard / I915_GTT_PAGE_SIZE; > + if (!gen8_ggtt_blit_fixed_pte(ggtt, start, end - start, vm->scratch[0]->encode)) > + goto err; > + start = end; > + > + end += (vma_res->node_size + vma_res->guard) / I915_GTT_PAGE_SIZE; > + /* TODO: MI_UPDATE_GTT can update 511 entries in a single command. */ > + for_each_sgt_daddr(addr, iter, vma_res->bi.pages) { > + if (!gen8_ggtt_blit_fixed_pte(ggtt, start++, 1, pte_encode | addr)) > + goto err; > + } > + > + if (!gen8_ggtt_blit_fixed_pte(ggtt, start, end - start, vm->scratch[0]->encode)) > + goto err; > + > + return; > + > +err: > + drm_dbg(&ggtt->vm.i915->drm, "falling back to gen8_ggtt_insert_entries\n"); > + gen8_ggtt_insert_entries(vm, vma_res, pat_index, flags); > +} > + > +static void gen8_ggtt_insert_entries_blit(struct i915_address_space *vm, > + struct i915_vma_resource *vma_res, > + unsigned int pat_index, u32 flags) > +{ > + if (!should_update_ggtt_with_blit(i915_vm_to_ggtt(vm))) { > + gen8_ggtt_insert_entries(vm, vma_res, pat_index, flags); > + return; > + } > + > + __gen8_ggtt_insert_entries_blit(vm, vma_res, pat_index, flags); > +} > + > static void gen8_ggtt_clear_range(struct i915_address_space *vm, > u64 start, u64 length) > { > @@ -332,6 +487,34 @@ static void gen8_ggtt_clear_range(struct i915_address_space *vm, > gen8_set_pte(>t_base[i], scratch_pte); > } > > +static void gen8_ggtt_scratch_range_blit(struct i915_address_space *vm, > + u64 start, u64 length) > +{ > + struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); > + unsigned int first_entry = start / I915_GTT_PAGE_SIZE; > + unsigned int num_entries = length / I915_GTT_PAGE_SIZE; > + const gen8_pte_t scratch_pte = vm->scratch[0]->encode; > + gen8_pte_t __iomem *gtt_base = > + (gen8_pte_t __iomem *)ggtt->gsm + first_entry; > + const int max_entries = ggtt_total_entries(ggtt) - first_entry; > + int i; > + > + if (WARN(num_entries > max_entries, > + "First entry = %d; Num entries = %d (max=%d)\n", > + first_entry, num_entries, max_entries)) > + num_entries = max_entries; > + > + if (should_update_ggtt_with_blit(ggtt) && > + gen8_ggtt_blit_fixed_pte(ggtt, first_entry, > + num_entries, scratch_pte)) > + return; > + > + for (i = 0; i < num_entries; i++) > + gen8_set_pte(>t_base[i], scratch_pte); > + > + ggtt->invalidate(ggtt); > +} > + > static void gen6_ggtt_insert_page(struct i915_address_space *vm, > dma_addr_t addr, > u64 offset, > @@ -997,6 +1180,12 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt) > I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND; > } > > + if (i915_ggtt_require_blitter(i915)) { > + ggtt->vm.scratch_range = gen8_ggtt_scratch_range_blit; > + ggtt->vm.insert_page = gen8_ggtt_insert_page_blit; > + ggtt->vm.insert_entries = gen8_ggtt_insert_entries_blit; > + } > + > if (intel_uc_wants_guc(&ggtt->vm.gt->uc)) > ggtt->invalidate = guc_ggtt_invalidate; > else > -- > 2.40.1 ^ permalink raw reply [flat|nested] 8+ messages in thread
* [Intel-gfx] [PATCH 3/3] drm/i915: Enable GGTT blitting in MTL 2023-08-18 19:42 [Intel-gfx] [PATCH 1/3] drm/i915: Create a blitter context for GGTT updates Andi Shyti 2023-08-18 19:42 ` [Intel-gfx] [CI] drm/i915/gt: Refactor hangcheck selftest to use igt_spinner Andi Shyti 2023-08-18 19:42 ` [Intel-gfx] [PATCH 2/3] drm/i915: Implement GGTT update method with blitter Andi Shyti @ 2023-08-18 19:42 ` Andi Shyti 2023-08-18 20:05 ` [Intel-gfx] ✗ Fi.CI.BUILD: failure for series starting with [CI] drm/i915/gt: Refactor hangcheck selftest to use igt_spinner (rev2) Patchwork 3 siblings, 0 replies; 8+ messages in thread From: Andi Shyti @ 2023-08-18 19:42 UTC (permalink / raw) To: Jonathan Cavitt; +Cc: intel-gfx From: Nirmoy Das <nirmoy.das@intel.com> MTL can hang because of a HW bug while parallel reading/writing from/to LMEM/GTTMMADR BAR so try to reduce GGTT update related pci transactions with blitter command as recommended for Wa_22018444074. To issue blitter commands, the driver must be primed to receive requests. Maintain blitter-based GGTT update disablement until driver probing completes. Moreover, implement a temporary disablement of blitter prior to entering suspend, followed by re-enablement post-resume. This is acceptable as those transition periods are mostly single threaded. v2: Disable GGTT blitter prior to runtime suspend and re-enable after runtime resume. Signed-off-by: Nirmoy Das <nirmoy.das@intel.com> Signed-off-by: Oak Zeng <oak.zeng@intel.com> --- drivers/gpu/drm/i915/i915_driver.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/drivers/gpu/drm/i915/i915_driver.c b/drivers/gpu/drm/i915/i915_driver.c index ec4d26b3c17cc..a893a5e8496ef 100644 --- a/drivers/gpu/drm/i915/i915_driver.c +++ b/drivers/gpu/drm/i915/i915_driver.c @@ -812,6 +812,7 @@ int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent) i915_welcome_messages(i915); i915->do_release = true; + intel_engine_blitter_context_set_ready(to_gt(i915), true); return 0; @@ -852,6 +853,7 @@ void i915_driver_remove(struct drm_i915_private *i915) { intel_wakeref_t wakeref; + intel_engine_blitter_context_set_ready(to_gt(i915), false); wakeref = intel_runtime_pm_get(&i915->runtime_pm); i915_driver_unregister(i915); @@ -1074,6 +1076,8 @@ static int i915_drm_suspend(struct drm_device *dev) struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev); pci_power_t opregion_target_state; + intel_engine_blitter_context_set_ready(to_gt(dev_priv), false); + disable_rpm_wakeref_asserts(&dev_priv->runtime_pm); /* We do a lot of poking in a lot of registers, make sure they work @@ -1261,6 +1265,7 @@ static int i915_drm_resume(struct drm_device *dev) intel_gvt_resume(dev_priv); enable_rpm_wakeref_asserts(&dev_priv->runtime_pm); + intel_engine_blitter_context_set_ready(to_gt(dev_priv), true); return 0; } @@ -1512,6 +1517,7 @@ static int intel_runtime_suspend(struct device *kdev) if (drm_WARN_ON_ONCE(&dev_priv->drm, !HAS_RUNTIME_PM(dev_priv))) return -ENODEV; + intel_engine_blitter_context_set_ready(to_gt(dev_priv), false); drm_dbg(&dev_priv->drm, "Suspending device\n"); disable_rpm_wakeref_asserts(rpm); @@ -1666,6 +1672,8 @@ static int intel_runtime_resume(struct device *kdev) else drm_dbg(&dev_priv->drm, "Device resumed\n"); + intel_engine_blitter_context_set_ready(to_gt(dev_priv), true); + return ret; } -- 2.40.1 ^ permalink raw reply related [flat|nested] 8+ messages in thread
* [Intel-gfx] ✗ Fi.CI.BUILD: failure for series starting with [CI] drm/i915/gt: Refactor hangcheck selftest to use igt_spinner (rev2) 2023-08-18 19:42 [Intel-gfx] [PATCH 1/3] drm/i915: Create a blitter context for GGTT updates Andi Shyti ` (2 preceding siblings ...) 2023-08-18 19:42 ` [Intel-gfx] [PATCH 3/3] drm/i915: Enable GGTT blitting in MTL Andi Shyti @ 2023-08-18 20:05 ` Patchwork 3 siblings, 0 replies; 8+ messages in thread From: Patchwork @ 2023-08-18 20:05 UTC (permalink / raw) To: Andi Shyti; +Cc: intel-gfx == Series Details == Series: series starting with [CI] drm/i915/gt: Refactor hangcheck selftest to use igt_spinner (rev2) URL : https://patchwork.freedesktop.org/series/122651/ State : failure == Summary == Error: make failed CALL scripts/checksyscalls.sh DESCEND objtool INSTALL libsubcmd_headers CC [M] drivers/gpu/drm/i915/i915_driver.o drivers/gpu/drm/i915/i915_driver.c: In function ‘i915_driver_probe’: drivers/gpu/drm/i915/i915_driver.c:818:2: error: implicit declaration of function ‘intel_engine_blitter_context_set_ready’ [-Werror=implicit-function-declaration] 818 | intel_engine_blitter_context_set_ready(to_gt(i915), true); | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ cc1: all warnings being treated as errors make[6]: *** [scripts/Makefile.build:243: drivers/gpu/drm/i915/i915_driver.o] Error 1 make[5]: *** [scripts/Makefile.build:480: drivers/gpu/drm/i915] Error 2 make[4]: *** [scripts/Makefile.build:480: drivers/gpu/drm] Error 2 make[3]: *** [scripts/Makefile.build:480: drivers/gpu] Error 2 make[2]: *** [scripts/Makefile.build:480: drivers] Error 2 make[1]: *** [/home/kbuild2/kernel/Makefile:2032: .] Error 2 make: *** [Makefile:234: __sub-make] Error 2 Build failed, no error log produced ^ permalink raw reply [flat|nested] 8+ messages in thread
* [Intel-gfx] [PATCH 1/3] drm/i915: Create a blitter context for GGTT updates @ 2023-08-22 15:28 Oak Zeng 2023-08-22 15:28 ` [Intel-gfx] [PATCH 2/3] drm/i915: Implement GGTT update method with blitter Oak Zeng 0 siblings, 1 reply; 8+ messages in thread From: Oak Zeng @ 2023-08-22 15:28 UTC (permalink / raw) To: intel-gfx; +Cc: andi.shyti, chris.p.wilson, nirmoy.das From: Nirmoy Das <nirmoy.das@intel.com> Create a separate blitter context if a platform requires GGTT updates using MI_UPDATE_GTT blitter command. Subsequent patch will introduce methods to update GGTT using this blitter context and MI_UPDATE_GTT blitter command. v2: Fix a typo in comment. (Oak) Signed-off-by: Nirmoy Das <nirmoy.das@intel.com> Signed-off-by: Oak Zeng <oak.zeng@intel.com> --- drivers/gpu/drm/i915/gt/intel_engine.h | 4 ++ drivers/gpu/drm/i915/gt/intel_engine_cs.c | 44 +++++++++++++++++++- drivers/gpu/drm/i915/gt/intel_engine_types.h | 3 ++ drivers/gpu/drm/i915/gt/intel_gtt.c | 4 ++ drivers/gpu/drm/i915/gt/intel_gtt.h | 2 + 5 files changed, 56 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/gt/intel_engine.h b/drivers/gpu/drm/i915/gt/intel_engine.h index b58c30ac8ef0..ee36db2fdaa7 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine.h +++ b/drivers/gpu/drm/i915/gt/intel_engine.h @@ -170,6 +170,8 @@ intel_write_status_page(struct intel_engine_cs *engine, int reg, u32 value) #define I915_GEM_HWS_SEQNO 0x40 #define I915_GEM_HWS_SEQNO_ADDR (I915_GEM_HWS_SEQNO * sizeof(u32)) #define I915_GEM_HWS_MIGRATE (0x42 * sizeof(u32)) +#define I915_GEM_HWS_GGTT_BLIT 0x46 +#define I915_GEM_HWS_GGTT_BLIT_ADDR (I915_GEM_HWS_GGTT_BLIT * sizeof(u32)) #define I915_GEM_HWS_PXP 0x60 #define I915_GEM_HWS_PXP_ADDR (I915_GEM_HWS_PXP * sizeof(u32)) #define I915_GEM_HWS_GSC 0x62 @@ -356,4 +358,6 @@ u64 intel_clamp_preempt_timeout_ms(struct intel_engine_cs *engine, u64 value); u64 intel_clamp_stop_timeout_ms(struct intel_engine_cs *engine, u64 value); u64 intel_clamp_timeslice_duration_ms(struct intel_engine_cs *engine, u64 value); +void intel_engine_blitter_context_set_ready(struct intel_gt *gt, bool ready); +bool intel_engine_blitter_context_ready(struct intel_gt *gt); #endif /* _INTEL_RINGBUFFER_H_ */ diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c index dfb69fc977a0..d8c492a507a4 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c @@ -27,6 +27,7 @@ #include "intel_gt_mcr.h" #include "intel_gt_pm.h" #include "intel_gt_requests.h" +#include "intel_gtt.h" #include "intel_lrc.h" #include "intel_lrc_reg.h" #include "intel_reset.h" @@ -1419,6 +1420,34 @@ void intel_engine_destroy_pinned_context(struct intel_context *ce) intel_context_put(ce); } +void intel_engine_blitter_context_set_ready(struct intel_gt *gt, bool ready) +{ + struct intel_engine_cs *engine = gt->engine[BCS0]; + + if (engine && engine->blitter_context) + atomic_set(&engine->blitter_context_ready, ready ? 1 : 0); +} + +bool intel_engine_blitter_context_ready(struct intel_gt *gt) +{ + struct intel_engine_cs *engine = gt->engine[BCS0]; + + if (engine) + return atomic_read(&engine->blitter_context_ready) == 1; + + return false; +} + +static struct intel_context * +create_ggtt_blitter_context(struct intel_engine_cs *engine) +{ + static struct lock_class_key kernel; + + /* MI_UPDATE_GTT can insert up to 512 PTE entries so get a bigger ring */ + return intel_engine_create_pinned_context(engine, engine->gt->vm, SZ_512K, + I915_GEM_HWS_GGTT_BLIT_ADDR, + &kernel, "ggtt_blitter_context"); +} static struct intel_context * create_kernel_context(struct intel_engine_cs *engine) { @@ -1442,7 +1471,7 @@ create_kernel_context(struct intel_engine_cs *engine) */ static int engine_init_common(struct intel_engine_cs *engine) { - struct intel_context *ce; + struct intel_context *ce, *bce = NULL; int ret; engine->set_default_submission(engine); @@ -1458,6 +1487,15 @@ static int engine_init_common(struct intel_engine_cs *engine) ce = create_kernel_context(engine); if (IS_ERR(ce)) return PTR_ERR(ce); + /* + * Create a separate pinned context for GGTT update using blitter + * if a platform require such service. + */ + if (i915_ggtt_require_blitter(engine->i915) && engine->id == BCS0) { + bce = create_ggtt_blitter_context(engine); + if (IS_ERR(bce)) + return PTR_ERR(bce); + } ret = measure_breadcrumb_dw(ce); if (ret < 0) @@ -1465,6 +1503,7 @@ static int engine_init_common(struct intel_engine_cs *engine) engine->emit_fini_breadcrumb_dw = ret; engine->kernel_context = ce; + engine->blitter_context = bce; return 0; @@ -1537,6 +1576,9 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine) if (engine->kernel_context) intel_engine_destroy_pinned_context(engine->kernel_context); + if (engine->blitter_context) + intel_engine_destroy_pinned_context(engine->blitter_context); + GEM_BUG_ON(!llist_empty(&engine->barrier_tasks)); cleanup_status_page(engine); diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h index e99a6fa03d45..62095c0d8783 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_types.h +++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h @@ -415,6 +415,9 @@ struct intel_engine_cs { struct llist_head barrier_tasks; struct intel_context *kernel_context; /* pinned */ + struct intel_context *blitter_context; /* pinned, only for BCS0 */ + /* mark the blitter engine's availability status */ + atomic_t blitter_context_ready; /** * pinned_contexts_list: List of pinned contexts. This list is only diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.c b/drivers/gpu/drm/i915/gt/intel_gtt.c index 13944a14ea2d..9c77c97670fe 100644 --- a/drivers/gpu/drm/i915/gt/intel_gtt.c +++ b/drivers/gpu/drm/i915/gt/intel_gtt.c @@ -21,6 +21,10 @@ #include "intel_gt_regs.h" #include "intel_gtt.h" +bool i915_ggtt_require_blitter(struct drm_i915_private *i915) +{ + return IS_METEORLAKE(i915); +} static bool intel_ggtt_update_needs_vtd_wa(struct drm_i915_private *i915) { diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.h b/drivers/gpu/drm/i915/gt/intel_gtt.h index 4d6296cdbcfd..9710eb031fb2 100644 --- a/drivers/gpu/drm/i915/gt/intel_gtt.h +++ b/drivers/gpu/drm/i915/gt/intel_gtt.h @@ -688,4 +688,6 @@ static inline struct sgt_dma { return (struct sgt_dma){ sg, addr, addr + sg_dma_len(sg) }; } +bool i915_ggtt_require_blitter(struct drm_i915_private *i915); + #endif -- 2.26.3 ^ permalink raw reply related [flat|nested] 8+ messages in thread
* [Intel-gfx] [PATCH 2/3] drm/i915: Implement GGTT update method with blitter 2023-08-22 15:28 [Intel-gfx] [PATCH 1/3] drm/i915: Create a blitter context for GGTT updates Oak Zeng @ 2023-08-22 15:28 ` Oak Zeng 0 siblings, 0 replies; 8+ messages in thread From: Oak Zeng @ 2023-08-22 15:28 UTC (permalink / raw) To: intel-gfx; +Cc: andi.shyti, chris.p.wilson, nirmoy.das From: Nirmoy Das <nirmoy.das@intel.com> Implement GGTT update method with blitter command, MI_UPDATE_GTT and install those handlers if a platform requires that. v2: Make sure we hold the GT wakeref and Blitter engine wakeref before we call mutex_lock/intel_context_enter below. When GT/engine are not awake, the intel_context_enter calls into some runtime pm function which can end up with kmalloc/fs_reclaim. But trigger fs_reclaim holding a mutex lock is not allowed because shrinker can also try to hold the same mutex lock. It is a circular lock. So hold the GT/blitter engine wakeref before calling mutex_lock, to fix the circular lock. (Oak) Signed-off-by: Nirmoy Das <nirmoy.das@intel.com> Signed-off-by: Oak Zeng <oak.zeng@intel.com> --- drivers/gpu/drm/i915/gt/intel_ggtt.c | 198 +++++++++++++++++++++++++++ 1 file changed, 198 insertions(+) diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt.c b/drivers/gpu/drm/i915/gt/intel_ggtt.c index dd0ed941441a..d445cb015257 100644 --- a/drivers/gpu/drm/i915/gt/intel_ggtt.c +++ b/drivers/gpu/drm/i915/gt/intel_ggtt.c @@ -15,18 +15,23 @@ #include "display/intel_display.h" #include "gem/i915_gem_lmem.h" +#include "intel_context.h" #include "intel_ggtt_gmch.h" +#include "intel_gpu_commands.h" #include "intel_gt.h" #include "intel_gt_regs.h" #include "intel_pci_config.h" +#include "intel_ring.h" #include "i915_drv.h" #include "i915_pci.h" +#include "i915_request.h" #include "i915_scatterlist.h" #include "i915_utils.h" #include "i915_vgpu.h" #include "intel_gtt.h" #include "gen8_ppgtt.h" +#include "intel_engine_pm.h" static void i915_ggtt_color_adjust(const struct drm_mm_node *node, unsigned long color, @@ -252,6 +257,103 @@ u64 gen8_ggtt_pte_encode(dma_addr_t addr, return pte; } +static bool should_update_ggtt_with_blit(struct i915_ggtt *ggtt) +{ + struct intel_gt *gt = ggtt->vm.gt; + + return intel_engine_blitter_context_ready(gt); +} + +static bool gen8_ggtt_blit_fixed_pte(struct i915_ggtt *ggtt, u32 offset, + u32 num_entries, const gen8_pte_t pte) +{ + struct intel_gt *gt = ggtt->vm.gt; + struct i915_sched_attr attr = {}; + struct i915_request *rq; + struct intel_context *ce; + bool wakeref; + u32 *cs; + + if (!num_entries) + return true; + + ce = gt->engine[BCS0]->blitter_context; + if (!ce) { + drm_dbg(&ggtt->vm.i915->drm, "Failed to get blitter context\n"); + return false; + } + + /* + * If the GT is not awake already at this stage then fallback + * to pci based GGTT update otherwise __intel_wakeref_get_first() + * would conflict with fs_reclaim trying to allocate memory while + * doing rpm_resume(). + */ + wakeref = intel_gt_pm_get_if_awake(gt); + if (!wakeref) { + drm_dbg(&ggtt->vm.i915->drm, "GT is not awake, fallback to CPU GGTT update\n"); + return false; + } + + if (!intel_engine_pm_get_if_awake(gt->engine[BCS0])) { + drm_dbg(&ggtt->vm.i915->drm, "Blitter engine is not awake, fallback to CPU GGTT update\n"); + goto err_gt_pm_put; + } + + while (num_entries) { + /* MI_UPDATE_GTT can update 512 entries in a single command */ + u32 n_ptes = min_t(u32, 512, num_entries); + + mutex_lock(&ce->timeline->mutex); + intel_context_enter(ce); + rq = __i915_request_create(ce, GFP_NOWAIT | GFP_ATOMIC); + intel_context_exit(ce); + if (IS_ERR(rq)) { + drm_dbg(&ggtt->vm.i915->drm, + "Failed to get blitter request\n"); + goto err_unlock; + } + + cs = intel_ring_begin(rq, 2 * n_ptes + 2); + if (IS_ERR(cs)) { + drm_dbg(&ggtt->vm.i915->drm, + "Failed to begin ring for GGTT blitter\n"); + goto err_rq; + } + + *cs++ = MI_UPDATE_GTT | (2 * n_ptes); + *cs++ = offset << 12; + memset64((u64 *)cs, pte, n_ptes); + cs += n_ptes * 2; + intel_ring_advance(rq, cs); + + i915_request_get(rq); + __i915_request_commit(rq); + __i915_request_queue(rq, &attr); + + mutex_unlock(&ce->timeline->mutex); + /* This will break if the request is complete or after engine reset */ + i915_request_wait(rq, 0, MAX_SCHEDULE_TIMEOUT); + i915_request_put(rq); + + num_entries -= n_ptes; + } + + ggtt->invalidate(ggtt); + intel_engine_pm_put(gt->engine[BCS0]); + intel_gt_pm_put(gt); + return true; + +err_rq: + i915_request_put(rq); +err_unlock: + mutex_unlock(&ce->timeline->mutex); + intel_engine_pm_put(gt->engine[BCS0]); +err_gt_pm_put: + intel_gt_pm_put(gt); + return false; +} + static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte) { writeq(pte, addr); @@ -272,6 +374,22 @@ static void gen8_ggtt_insert_page(struct i915_address_space *vm, ggtt->invalidate(ggtt); } +static void gen8_ggtt_insert_page_blit(struct i915_address_space *vm, + dma_addr_t addr, u64 offset, + unsigned int pat_index, u32 flags) +{ + struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); + gen8_pte_t pte; + + pte = ggtt->vm.pte_encode(addr, pat_index, flags); + if (should_update_ggtt_with_blit(i915_vm_to_ggtt(vm)) && + gen8_ggtt_blit_fixed_pte(ggtt, offset, 1, pte)) + return; + + gen8_ggtt_insert_page(vm, addr, offset, pat_index, flags); + ggtt->invalidate(ggtt); +} + static void gen8_ggtt_insert_entries(struct i915_address_space *vm, struct i915_vma_resource *vma_res, unsigned int pat_index, @@ -311,6 +429,52 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm, ggtt->invalidate(ggtt); } +static void __gen8_ggtt_insert_entries_blit(struct i915_address_space *vm, + struct i915_vma_resource *vma_res, + unsigned int pat_index, u32 flags) +{ + gen8_pte_t pte_encode; + struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); + struct sgt_iter iter; + dma_addr_t addr; + u64 start, end; + + pte_encode = ggtt->vm.pte_encode(0, pat_index, flags); + start = (vma_res->start - vma_res->guard) / I915_GTT_PAGE_SIZE; + end = start + vma_res->guard / I915_GTT_PAGE_SIZE; + if (!gen8_ggtt_blit_fixed_pte(ggtt, start, end - start, vm->scratch[0]->encode)) + goto err; + start = end; + + end += (vma_res->node_size + vma_res->guard) / I915_GTT_PAGE_SIZE; + /* TODO: MI_UPDATE_GTT can update 511 entries in a single command. */ + for_each_sgt_daddr(addr, iter, vma_res->bi.pages) { + if (!gen8_ggtt_blit_fixed_pte(ggtt, start++, 1, pte_encode | addr)) + goto err; + } + + if (!gen8_ggtt_blit_fixed_pte(ggtt, start, end - start, vm->scratch[0]->encode)) + goto err; + + return; + +err: + drm_dbg(&ggtt->vm.i915->drm, "falling back to gen8_ggtt_insert_entries\n"); + gen8_ggtt_insert_entries(vm, vma_res, pat_index, flags); +} + +static void gen8_ggtt_insert_entries_blit(struct i915_address_space *vm, + struct i915_vma_resource *vma_res, + unsigned int pat_index, u32 flags) +{ + if (!should_update_ggtt_with_blit(i915_vm_to_ggtt(vm))) { + gen8_ggtt_insert_entries(vm, vma_res, pat_index, flags); + return; + } + + __gen8_ggtt_insert_entries_blit(vm, vma_res, pat_index, flags); +} + static void gen8_ggtt_clear_range(struct i915_address_space *vm, u64 start, u64 length) { @@ -332,6 +496,34 @@ static void gen8_ggtt_clear_range(struct i915_address_space *vm, gen8_set_pte(>t_base[i], scratch_pte); } +static void gen8_ggtt_scratch_range_blit(struct i915_address_space *vm, + u64 start, u64 length) +{ + struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); + unsigned int first_entry = start / I915_GTT_PAGE_SIZE; + unsigned int num_entries = length / I915_GTT_PAGE_SIZE; + const gen8_pte_t scratch_pte = vm->scratch[0]->encode; + gen8_pte_t __iomem *gtt_base = + (gen8_pte_t __iomem *)ggtt->gsm + first_entry; + const int max_entries = ggtt_total_entries(ggtt) - first_entry; + int i; + + if (WARN(num_entries > max_entries, + "First entry = %d; Num entries = %d (max=%d)\n", + first_entry, num_entries, max_entries)) + num_entries = max_entries; + + if (should_update_ggtt_with_blit(ggtt) && + gen8_ggtt_blit_fixed_pte(ggtt, first_entry, + num_entries, scratch_pte)) + return; + + for (i = 0; i < num_entries; i++) + gen8_set_pte(>t_base[i], scratch_pte); + + ggtt->invalidate(ggtt); +} + static void gen6_ggtt_insert_page(struct i915_address_space *vm, dma_addr_t addr, u64 offset, @@ -997,6 +1189,12 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt) I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND; } + if (i915_ggtt_require_blitter(i915)) { + ggtt->vm.scratch_range = gen8_ggtt_scratch_range_blit; + ggtt->vm.insert_page = gen8_ggtt_insert_page_blit; + ggtt->vm.insert_entries = gen8_ggtt_insert_entries_blit; + } + if (intel_uc_wants_guc(&ggtt->vm.gt->uc)) ggtt->invalidate = guc_ggtt_invalidate; else -- 2.26.3 ^ permalink raw reply related [flat|nested] 8+ messages in thread
* [Intel-gfx] [PATCH 1/3] drm/i915: Create a bind context for GGTT updates @ 2023-08-24 21:03 Oak Zeng 2023-08-24 21:03 ` [Intel-gfx] [PATCH 2/3] drm/i915: Implement GGTT update method with blitter Oak Zeng 0 siblings, 1 reply; 8+ messages in thread From: Oak Zeng @ 2023-08-24 21:03 UTC (permalink / raw) To: intel-gfx; +Cc: matthew.d.roper, andi.shyti, chris.p.wilson, nirmoy.das From: Nirmoy Das <nirmoy.das@intel.com> Create a separate blitter context if a platform requires GGTT updates using MI_UPDATE_GTT blitter command. Subsequent patch will introduce methods to update GGTT using this blitter context and MI_UPDATE_GTT blitter command. v2: Fix a typo in comment. (Oak) v3: s/blitter_context/bind_context/g (Chris) Signed-off-by: Nirmoy Das <nirmoy.das@intel.com> Signed-off-by: Oak Zeng <oak.zeng@intel.com> --- drivers/gpu/drm/i915/gt/intel_engine.h | 4 ++ drivers/gpu/drm/i915/gt/intel_engine_cs.c | 44 +++++++++++++++++++- drivers/gpu/drm/i915/gt/intel_engine_types.h | 3 ++ drivers/gpu/drm/i915/gt/intel_gtt.c | 4 ++ drivers/gpu/drm/i915/gt/intel_gtt.h | 2 + 5 files changed, 56 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/gt/intel_engine.h b/drivers/gpu/drm/i915/gt/intel_engine.h index b58c30ac8ef0..c3b2ac8bad08 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine.h +++ b/drivers/gpu/drm/i915/gt/intel_engine.h @@ -170,6 +170,8 @@ intel_write_status_page(struct intel_engine_cs *engine, int reg, u32 value) #define I915_GEM_HWS_SEQNO 0x40 #define I915_GEM_HWS_SEQNO_ADDR (I915_GEM_HWS_SEQNO * sizeof(u32)) #define I915_GEM_HWS_MIGRATE (0x42 * sizeof(u32)) +#define I915_GEM_HWS_GGTT_BLIT 0x46 +#define I915_GEM_HWS_GGTT_BLIT_ADDR (I915_GEM_HWS_GGTT_BLIT * sizeof(u32)) #define I915_GEM_HWS_PXP 0x60 #define I915_GEM_HWS_PXP_ADDR (I915_GEM_HWS_PXP * sizeof(u32)) #define I915_GEM_HWS_GSC 0x62 @@ -356,4 +358,6 @@ u64 intel_clamp_preempt_timeout_ms(struct intel_engine_cs *engine, u64 value); u64 intel_clamp_stop_timeout_ms(struct intel_engine_cs *engine, u64 value); u64 intel_clamp_timeslice_duration_ms(struct intel_engine_cs *engine, u64 value); +void intel_engine_bind_context_set_ready(struct intel_gt *gt, bool ready); +bool intel_engine_bind_context_ready(struct intel_gt *gt); #endif /* _INTEL_RINGBUFFER_H_ */ diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c index dfb69fc977a0..d316f9eb914a 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c @@ -27,6 +27,7 @@ #include "intel_gt_mcr.h" #include "intel_gt_pm.h" #include "intel_gt_requests.h" +#include "intel_gtt.h" #include "intel_lrc.h" #include "intel_lrc_reg.h" #include "intel_reset.h" @@ -1419,6 +1420,34 @@ void intel_engine_destroy_pinned_context(struct intel_context *ce) intel_context_put(ce); } +void intel_engine_bind_context_set_ready(struct intel_gt *gt, bool ready) +{ + struct intel_engine_cs *engine = gt->engine[BCS0]; + + if (engine && engine->bind_context) + atomic_set(&engine->bind_context_ready, ready ? 1 : 0); +} + +bool intel_engine_bind_context_ready(struct intel_gt *gt) +{ + struct intel_engine_cs *engine = gt->engine[BCS0]; + + if (engine) + return atomic_read(&engine->bind_context_ready) == 1; + + return false; +} + +static struct intel_context * +create_ggtt_bind_context(struct intel_engine_cs *engine) +{ + static struct lock_class_key kernel; + + /* MI_UPDATE_GTT can insert up to 512 PTE entries so get a bigger ring */ + return intel_engine_create_pinned_context(engine, engine->gt->vm, SZ_512K, + I915_GEM_HWS_GGTT_BLIT_ADDR, + &kernel, "ggtt_bind_context"); +} static struct intel_context * create_kernel_context(struct intel_engine_cs *engine) { @@ -1442,7 +1471,7 @@ create_kernel_context(struct intel_engine_cs *engine) */ static int engine_init_common(struct intel_engine_cs *engine) { - struct intel_context *ce; + struct intel_context *ce, *bce = NULL; int ret; engine->set_default_submission(engine); @@ -1458,6 +1487,15 @@ static int engine_init_common(struct intel_engine_cs *engine) ce = create_kernel_context(engine); if (IS_ERR(ce)) return PTR_ERR(ce); + /* + * Create a separate pinned context for GGTT update using blitter + * if a platform require such service. + */ + if (i915_ggtt_require_blitter(engine->i915) && engine->id == BCS0) { + bce = create_ggtt_bind_context(engine); + if (IS_ERR(bce)) + return PTR_ERR(bce); + } ret = measure_breadcrumb_dw(ce); if (ret < 0) @@ -1465,6 +1503,7 @@ static int engine_init_common(struct intel_engine_cs *engine) engine->emit_fini_breadcrumb_dw = ret; engine->kernel_context = ce; + engine->bind_context = bce; return 0; @@ -1537,6 +1576,9 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine) if (engine->kernel_context) intel_engine_destroy_pinned_context(engine->kernel_context); + if (engine->bind_context) + intel_engine_destroy_pinned_context(engine->bind_context); + GEM_BUG_ON(!llist_empty(&engine->barrier_tasks)); cleanup_status_page(engine); diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h index e99a6fa03d45..a1fea76698f3 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_types.h +++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h @@ -415,6 +415,9 @@ struct intel_engine_cs { struct llist_head barrier_tasks; struct intel_context *kernel_context; /* pinned */ + struct intel_context *bind_context; /* pinned, only for BCS0 */ + /* mark the blitter engine's availability status */ + atomic_t bind_context_ready; /** * pinned_contexts_list: List of pinned contexts. This list is only diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.c b/drivers/gpu/drm/i915/gt/intel_gtt.c index 13944a14ea2d..9c77c97670fe 100644 --- a/drivers/gpu/drm/i915/gt/intel_gtt.c +++ b/drivers/gpu/drm/i915/gt/intel_gtt.c @@ -21,6 +21,10 @@ #include "intel_gt_regs.h" #include "intel_gtt.h" +bool i915_ggtt_require_blitter(struct drm_i915_private *i915) +{ + return IS_METEORLAKE(i915); +} static bool intel_ggtt_update_needs_vtd_wa(struct drm_i915_private *i915) { diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.h b/drivers/gpu/drm/i915/gt/intel_gtt.h index 4d6296cdbcfd..9710eb031fb2 100644 --- a/drivers/gpu/drm/i915/gt/intel_gtt.h +++ b/drivers/gpu/drm/i915/gt/intel_gtt.h @@ -688,4 +688,6 @@ static inline struct sgt_dma { return (struct sgt_dma){ sg, addr, addr + sg_dma_len(sg) }; } +bool i915_ggtt_require_blitter(struct drm_i915_private *i915); + #endif -- 2.26.3 ^ permalink raw reply related [flat|nested] 8+ messages in thread
* [Intel-gfx] [PATCH 2/3] drm/i915: Implement GGTT update method with blitter 2023-08-24 21:03 [Intel-gfx] [PATCH 1/3] drm/i915: Create a bind context for GGTT updates Oak Zeng @ 2023-08-24 21:03 ` Oak Zeng 0 siblings, 0 replies; 8+ messages in thread From: Oak Zeng @ 2023-08-24 21:03 UTC (permalink / raw) To: intel-gfx; +Cc: matthew.d.roper, andi.shyti, chris.p.wilson, nirmoy.das From: Nirmoy Das <nirmoy.das@intel.com> Implement GGTT update method with blitter command, MI_UPDATE_GTT and install those handlers if a platform requires that. v2: Make sure we hold the GT wakeref and Blitter engine wakeref before we call mutex_lock/intel_context_enter below. When GT/engine are not awake, the intel_context_enter calls into some runtime pm function which can end up with kmalloc/fs_reclaim. But trigger fs_reclaim holding a mutex lock is not allowed because shrinker can also try to hold the same mutex lock. It is a circular lock. So hold the GT/blitter engine wakeref before calling mutex_lock, to fix the circular lock. (Oak) v3: - Set sched attr priority to _BARRIER to avoid userspace deadlock (Chris) - Don't use drm_dbg (Chris) - Use unconditional engine_pm_get (Chris) - Rename blitter_context to bind_context (Chris) - Handle err_rq correctly in _fixed_pte function (Chris) - Move ggtt invalidate from _fixed_pte function to caller, more efficient (Piotr) Signed-off-by: Nirmoy Das <nirmoy.das@intel.com> Signed-off-by: Oak Zeng <oak.zeng@intel.com> --- drivers/gpu/drm/i915/gt/intel_ggtt.c | 190 +++++++++++++++++++++++++++ 1 file changed, 190 insertions(+) diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt.c b/drivers/gpu/drm/i915/gt/intel_ggtt.c index dd0ed941441a..d94b25561109 100644 --- a/drivers/gpu/drm/i915/gt/intel_ggtt.c +++ b/drivers/gpu/drm/i915/gt/intel_ggtt.c @@ -15,18 +15,23 @@ #include "display/intel_display.h" #include "gem/i915_gem_lmem.h" +#include "intel_context.h" #include "intel_ggtt_gmch.h" +#include "intel_gpu_commands.h" #include "intel_gt.h" #include "intel_gt_regs.h" #include "intel_pci_config.h" +#include "intel_ring.h" #include "i915_drv.h" #include "i915_pci.h" +#include "i915_request.h" #include "i915_scatterlist.h" #include "i915_utils.h" #include "i915_vgpu.h" #include "intel_gtt.h" #include "gen8_ppgtt.h" +#include "intel_engine_pm.h" static void i915_ggtt_color_adjust(const struct drm_mm_node *node, unsigned long color, @@ -252,6 +257,91 @@ u64 gen8_ggtt_pte_encode(dma_addr_t addr, return pte; } +static bool should_update_ggtt_with_blit(struct i915_ggtt *ggtt) +{ + struct intel_gt *gt = ggtt->vm.gt; + + return intel_engine_bind_context_ready(gt); +} + +static bool gen8_ggtt_blit_fixed_pte(struct i915_ggtt *ggtt, u32 offset, + u32 num_entries, const gen8_pte_t pte) +{ + struct intel_gt *gt = ggtt->vm.gt; + struct i915_sched_attr attr = {.priority = I915_PRIORITY_BARRIER}; + struct i915_request *rq; + struct intel_context *ce; + bool wakeref; + u32 *cs; + + if (!num_entries) + return true; + + ce = gt->engine[BCS0]->bind_context; + GEM_BUG_ON(!ce); + + /* + * If the GT is not awake already at this stage then fallback + * to pci based GGTT update otherwise __intel_wakeref_get_first() + * would conflict with fs_reclaim trying to allocate memory while + * doing rpm_resume(). + */ + wakeref = intel_gt_pm_get_if_awake(gt); + if (!wakeref) { + GT_TRACE(gt, "GT is not awake, fallback to CPU GGTT update\n"); + return false; + } + + intel_engine_pm_get(gt->engine[BCS0]); + + while (num_entries) { + /* MI_UPDATE_GTT can update 512 entries in a single command */ + u32 n_ptes = min_t(u32, 512, num_entries); + + mutex_lock(&ce->timeline->mutex); + intel_context_enter(ce); + rq = __i915_request_create(ce, GFP_NOWAIT | GFP_ATOMIC); + intel_context_exit(ce); + if (IS_ERR(rq)) { + goto err_unlock; + } + + cs = intel_ring_begin(rq, 2 * n_ptes + 2); + if (IS_ERR(cs)) + goto err_rq; + + *cs++ = MI_UPDATE_GTT | (2 * n_ptes); + *cs++ = offset << 12; + memset64((u64 *)cs, pte, n_ptes); + cs += n_ptes * 2; + intel_ring_advance(rq, cs); + + i915_request_get(rq); + __i915_request_commit(rq); + __i915_request_queue(rq, &attr); + + mutex_unlock(&ce->timeline->mutex); + /* This will break if the request is complete or after engine reset */ + i915_request_wait(rq, 0, MAX_SCHEDULE_TIMEOUT); + i915_request_put(rq); + + num_entries -= n_ptes; + } + + intel_engine_pm_put(gt->engine[BCS0]); + intel_gt_pm_put(gt); + return true; + +err_rq: + i915_request_set_error_once(rq, -ENOSPC); + i915_request_add(rq); +err_unlock: + mutex_unlock(&ce->timeline->mutex); + intel_engine_pm_put(gt->engine[BCS0]); + intel_gt_pm_put(gt); + return false; +} + static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte) { writeq(pte, addr); @@ -272,6 +362,24 @@ static void gen8_ggtt_insert_page(struct i915_address_space *vm, ggtt->invalidate(ggtt); } +static void gen8_ggtt_insert_page_blit(struct i915_address_space *vm, + dma_addr_t addr, u64 offset, + unsigned int pat_index, u32 flags) +{ + struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); + gen8_pte_t pte; + + pte = ggtt->vm.pte_encode(addr, pat_index, flags); + if (should_update_ggtt_with_blit(i915_vm_to_ggtt(vm)) && + gen8_ggtt_blit_fixed_pte(ggtt, offset, 1, pte)) { + ggtt->invalidate(ggtt); + return; + } + + gen8_ggtt_insert_page(vm, addr, offset, pat_index, flags); + ggtt->invalidate(ggtt); +} + static void gen8_ggtt_insert_entries(struct i915_address_space *vm, struct i915_vma_resource *vma_res, unsigned int pat_index, @@ -311,6 +419,52 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm, ggtt->invalidate(ggtt); } +static void __gen8_ggtt_insert_entries_blit(struct i915_address_space *vm, + struct i915_vma_resource *vma_res, + unsigned int pat_index, u32 flags) +{ + gen8_pte_t pte_encode; + struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); + struct sgt_iter iter; + dma_addr_t addr; + u64 start, end; + + pte_encode = ggtt->vm.pte_encode(0, pat_index, flags); + start = (vma_res->start - vma_res->guard) / I915_GTT_PAGE_SIZE; + end = start + vma_res->guard / I915_GTT_PAGE_SIZE; + if (!gen8_ggtt_blit_fixed_pte(ggtt, start, end - start, vm->scratch[0]->encode)) + goto err; + start = end; + + end += (vma_res->node_size + vma_res->guard) / I915_GTT_PAGE_SIZE; + /* TODO: MI_UPDATE_GTT can update 511 entries in a single command. */ + for_each_sgt_daddr(addr, iter, vma_res->bi.pages) { + if (!gen8_ggtt_blit_fixed_pte(ggtt, start++, 1, pte_encode | addr)) + goto err; + } + + if (!gen8_ggtt_blit_fixed_pte(ggtt, start, end - start, vm->scratch[0]->encode)) + goto err; + + ggtt->invalidate(ggtt); + return; + +err: + gen8_ggtt_insert_entries(vm, vma_res, pat_index, flags); +} + +static void gen8_ggtt_insert_entries_blit(struct i915_address_space *vm, + struct i915_vma_resource *vma_res, + unsigned int pat_index, u32 flags) +{ + if (!should_update_ggtt_with_blit(i915_vm_to_ggtt(vm))) { + gen8_ggtt_insert_entries(vm, vma_res, pat_index, flags); + return; + } + + __gen8_ggtt_insert_entries_blit(vm, vma_res, pat_index, flags); +} + static void gen8_ggtt_clear_range(struct i915_address_space *vm, u64 start, u64 length) { @@ -332,6 +486,36 @@ static void gen8_ggtt_clear_range(struct i915_address_space *vm, gen8_set_pte(>t_base[i], scratch_pte); } +static void gen8_ggtt_scratch_range_blit(struct i915_address_space *vm, + u64 start, u64 length) +{ + struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); + unsigned int first_entry = start / I915_GTT_PAGE_SIZE; + unsigned int num_entries = length / I915_GTT_PAGE_SIZE; + const gen8_pte_t scratch_pte = vm->scratch[0]->encode; + gen8_pte_t __iomem *gtt_base = + (gen8_pte_t __iomem *)ggtt->gsm + first_entry; + const int max_entries = ggtt_total_entries(ggtt) - first_entry; + int i; + + if (WARN(num_entries > max_entries, + "First entry = %d; Num entries = %d (max=%d)\n", + first_entry, num_entries, max_entries)) + num_entries = max_entries; + + if (should_update_ggtt_with_blit(ggtt) && + gen8_ggtt_blit_fixed_pte(ggtt, first_entry, + num_entries, scratch_pte)) { + ggtt->invalidate(ggtt); + return; + } + + for (i = 0; i < num_entries; i++) + gen8_set_pte(>t_base[i], scratch_pte); + + ggtt->invalidate(ggtt); +} + static void gen6_ggtt_insert_page(struct i915_address_space *vm, dma_addr_t addr, u64 offset, @@ -997,6 +1181,12 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt) I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND; } + if (i915_ggtt_require_blitter(i915)) { + ggtt->vm.scratch_range = gen8_ggtt_scratch_range_blit; + ggtt->vm.insert_page = gen8_ggtt_insert_page_blit; + ggtt->vm.insert_entries = gen8_ggtt_insert_entries_blit; + } + if (intel_uc_wants_guc(&ggtt->vm.gt->uc)) ggtt->invalidate = guc_ggtt_invalidate; else -- 2.26.3 ^ permalink raw reply related [flat|nested] 8+ messages in thread
end of thread, other threads:[~2023-08-24 20:56 UTC | newest] Thread overview: 8+ messages (download: mbox.gz follow: Atom feed -- links below jump to the message on this page -- 2023-08-18 19:42 [Intel-gfx] [PATCH 1/3] drm/i915: Create a blitter context for GGTT updates Andi Shyti 2023-08-18 19:42 ` [Intel-gfx] [CI] drm/i915/gt: Refactor hangcheck selftest to use igt_spinner Andi Shyti 2023-08-18 19:42 ` [Intel-gfx] [PATCH 2/3] drm/i915: Implement GGTT update method with blitter Andi Shyti 2023-08-19 22:49 ` Andi Shyti 2023-08-18 19:42 ` [Intel-gfx] [PATCH 3/3] drm/i915: Enable GGTT blitting in MTL Andi Shyti 2023-08-18 20:05 ` [Intel-gfx] ✗ Fi.CI.BUILD: failure for series starting with [CI] drm/i915/gt: Refactor hangcheck selftest to use igt_spinner (rev2) Patchwork -- strict thread matches above, loose matches on Subject: below -- 2023-08-22 15:28 [Intel-gfx] [PATCH 1/3] drm/i915: Create a blitter context for GGTT updates Oak Zeng 2023-08-22 15:28 ` [Intel-gfx] [PATCH 2/3] drm/i915: Implement GGTT update method with blitter Oak Zeng 2023-08-24 21:03 [Intel-gfx] [PATCH 1/3] drm/i915: Create a bind context for GGTT updates Oak Zeng 2023-08-24 21:03 ` [Intel-gfx] [PATCH 2/3] drm/i915: Implement GGTT update method with blitter Oak Zeng
This is a public inbox, see mirroring instructions for how to clone and mirror all data and code used for this inbox