* [Intel-gfx] [PATCH v3 1/4] drm/i915: Define and use GuC and CTB TLB invalidation routines
@ 2023-10-02 17:24 Jonathan Cavitt
2023-10-02 17:24 ` [Intel-gfx] [PATCH v3 2/4] drm/i915: No TLB invalidation on wedged or suspended GT Jonathan Cavitt
` (7 more replies)
0 siblings, 8 replies; 21+ messages in thread
From: Jonathan Cavitt @ 2023-10-02 17:24 UTC (permalink / raw)
To: intel-gfx
Cc: janusz.krzysztofik, andi.shyti, matthew.d.roper, jonathan.cavitt,
chris.p.wilson, nirmoy.das
From: Prathap Kumar Valsan <prathap.kumar.valsan@intel.com>
The GuC firmware had defined the interface for Translation Look-Aside
Buffer (TLB) invalidation. We should use this interface when
invalidating the engine and GuC TLBs.
Add additional functionality to intel_gt_invalidate_tlb, invalidating
the GuC TLBs and falling back to GT invalidation when the GuC is
disabled.
The invalidation is done by sending a request directly to the GuC
tlb_lookup that invalidates the table. The invalidation is submitted as
a wait request and is performed in the CT event handler. This means we
cannot perform this TLB invalidation path if the CT is not enabled.
If the request isn't fulfilled in two seconds, this would constitute
an error in the invalidation as that would constitute either a lost
request or a severe GuC overload.
With this new invalidation routine, we can perform GuC-based GGTT
invalidations. We should only do this when GuC is enabled and fall
back to the original path when GuC is disabled to prevent concurrent
issuance between GuC and KMD.
Signed-off-by: Prathap Kumar Valsan <prathap.kumar.valsan@intel.com>
Signed-off-by: Bruce Chang <yu.bruce.chang@intel.com>
Signed-off-by: Chris Wilson <chris.p.wilson@intel.com>
Signed-off-by: Umesh Nerlige Ramappa <umesh.nerlige.ramappa@intel.com>
Signed-off-by: Jonathan Cavitt <jonathan.cavitt@intel.com>
Signed-off-by: Aravind Iddamsetty <aravind.iddamsetty@intel.com>
Signed-off-by: Fei Yang <fei.yang@intel.com>
CC: Andi Shyti <andi.shyti@linux.intel.com>
---
drivers/gpu/drm/i915/gt/intel_ggtt.c | 43 ++--
drivers/gpu/drm/i915/gt/intel_tlb.c | 14 +-
.../gpu/drm/i915/gt/uc/abi/guc_actions_abi.h | 33 +++
drivers/gpu/drm/i915/gt/uc/intel_guc.h | 22 ++
drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c | 9 +
drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h | 5 +
.../gpu/drm/i915/gt/uc/intel_guc_submission.c | 212 +++++++++++++++++-
7 files changed, 322 insertions(+), 16 deletions(-)
diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt.c b/drivers/gpu/drm/i915/gt/intel_ggtt.c
index 4d7d88b92632b..db5644b0146ca 100644
--- a/drivers/gpu/drm/i915/gt/intel_ggtt.c
+++ b/drivers/gpu/drm/i915/gt/intel_ggtt.c
@@ -206,22 +206,38 @@ static void gen8_ggtt_invalidate(struct i915_ggtt *ggtt)
intel_uncore_write_fw(uncore, GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
}
-static void guc_ggtt_invalidate(struct i915_ggtt *ggtt)
+static void guc_ggtt_ct_invalidate(struct intel_gt *gt)
{
- struct drm_i915_private *i915 = ggtt->vm.i915;
+ struct intel_uncore *uncore = gt->uncore;
+ intel_wakeref_t wakeref;
- gen8_ggtt_invalidate(ggtt);
+ with_intel_runtime_pm_if_active(uncore->rpm, wakeref) {
+ struct intel_guc *guc = >->uc.guc;
- if (GRAPHICS_VER(i915) >= 12) {
- struct intel_gt *gt;
+ intel_guc_invalidate_tlb(guc);
+ }
+}
- list_for_each_entry(gt, &ggtt->gt_list, ggtt_link)
- intel_uncore_write_fw(gt->uncore,
- GEN12_GUC_TLB_INV_CR,
- GEN12_GUC_TLB_INV_CR_INVALIDATE);
- } else {
- intel_uncore_write_fw(ggtt->vm.gt->uncore,
- GEN8_GTCR, GEN8_GTCR_INVALIDATE);
+static void guc_ggtt_invalidate(struct i915_ggtt *ggtt)
+{
+ struct drm_i915_private *i915 = ggtt->vm.i915;
+ struct intel_gt *gt;
+
+ if (!IS_GEN9_LP(i915) && GRAPHICS_VER(i915) < 11)
+ gen8_ggtt_invalidate(ggtt);
+
+ list_for_each_entry(gt, &ggtt->gt_list, ggtt_link) {
+ if (INTEL_GUC_SUPPORTS_TLB_INVALIDATION(>->uc.guc) &&
+ intel_guc_is_ready(>->uc.guc)) {
+ guc_ggtt_ct_invalidate(gt);
+ } else if (GRAPHICS_VER(i915) >= 12) {
+ intel_uncore_write(gt->uncore,
+ GEN12_GUC_TLB_INV_CR,
+ GEN12_GUC_TLB_INV_CR_INVALIDATE);
+ } else {
+ intel_uncore_write(gt->uncore,
+ GEN8_GTCR, GEN8_GTCR_INVALIDATE);
+ }
}
}
@@ -1243,7 +1259,8 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
ggtt->vm.raw_insert_page = gen8_ggtt_insert_page;
}
- if (intel_uc_wants_guc(&ggtt->vm.gt->uc))
+ if (intel_uc_wants_guc(&ggtt->vm.gt->uc) &&
+ intel_uc_wants_guc_submission(&ggtt->vm.gt->uc))
ggtt->invalidate = guc_ggtt_invalidate;
else
ggtt->invalidate = gen8_ggtt_invalidate;
diff --git a/drivers/gpu/drm/i915/gt/intel_tlb.c b/drivers/gpu/drm/i915/gt/intel_tlb.c
index 139608c30d978..efe002f14413d 100644
--- a/drivers/gpu/drm/i915/gt/intel_tlb.c
+++ b/drivers/gpu/drm/i915/gt/intel_tlb.c
@@ -12,6 +12,7 @@
#include "intel_gt_print.h"
#include "intel_gt_regs.h"
#include "intel_tlb.h"
+#include "uc/intel_guc.h"
/*
* HW architecture suggest typical invalidation time at 40us,
@@ -131,11 +132,22 @@ void intel_gt_invalidate_tlb_full(struct intel_gt *gt, u32 seqno)
return;
with_intel_gt_pm_if_awake(gt, wakeref) {
+ struct intel_guc *guc = >->uc.guc;
+
mutex_lock(>->tlb.invalidate_lock);
if (tlb_seqno_passed(gt, seqno))
goto unlock;
- mmio_invalidate_full(gt);
+ if (INTEL_GUC_SUPPORTS_TLB_INVALIDATION(guc)) {
+ if (intel_guc_is_ready(guc))
+ intel_guc_invalidate_tlb_full(guc);
+ } else {
+ /*
+ * Fall back to old path if GuC is disabled.
+ * This is safe because GuC is not enabled and not writing to MMIO.
+ */
+ mmio_invalidate_full(gt);
+ }
write_seqcount_invalidate(>->tlb.seqno);
unlock:
diff --git a/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h b/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h
index f359bef046e0b..9dff8012d5e76 100644
--- a/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h
+++ b/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h
@@ -138,6 +138,8 @@ enum intel_guc_action {
INTEL_GUC_ACTION_REGISTER_CONTEXT_MULTI_LRC = 0x4601,
INTEL_GUC_ACTION_CLIENT_SOFT_RESET = 0x5507,
INTEL_GUC_ACTION_SET_ENG_UTIL_BUFF = 0x550A,
+ INTEL_GUC_ACTION_TLB_INVALIDATION = 0x7000,
+ INTEL_GUC_ACTION_TLB_INVALIDATION_DONE = 0x7001,
INTEL_GUC_ACTION_STATE_CAPTURE_NOTIFICATION = 0x8002,
INTEL_GUC_ACTION_NOTIFY_FLUSH_LOG_BUFFER_TO_FILE = 0x8003,
INTEL_GUC_ACTION_NOTIFY_CRASH_DUMP_POSTED = 0x8004,
@@ -181,4 +183,35 @@ enum intel_guc_state_capture_event_status {
#define INTEL_GUC_STATE_CAPTURE_EVENT_STATUS_MASK 0x000000FF
+#define INTEL_GUC_TLB_INVAL_TYPE_MASK REG_GENMASK(7, 0)
+#define INTEL_GUC_TLB_INVAL_MODE_MASK REG_GENMASK(11, 8)
+#define INTEL_GUC_TLB_INVAL_FLUSH_CACHE REG_BIT(31)
+
+enum intel_guc_tlb_invalidation_type {
+ INTEL_GUC_TLB_INVAL_FULL = 0x0,
+ INTEL_GUC_TLB_INVAL_GUC = 0x3,
+};
+
+/*
+ * 0: Heavy mode of Invalidation:
+ * The pipeline of the engine(s) for which the invalidation is targeted to is
+ * blocked, and all the in-flight transactions are guaranteed to be Globally
+ * Observed before completing the TLB invalidation
+ * 1: Lite mode of Invalidation:
+ * TLBs of the targeted engine(s) are immediately invalidated.
+ * In-flight transactions are NOT guaranteed to be Globally Observed before
+ * completing TLB invalidation.
+ * Light Invalidation Mode is to be used only when
+ * it can be guaranteed (by SW) that the address translations remain invariant
+ * for the in-flight transactions across the TLB invalidation. In other words,
+ * this mode can be used when the TLB invalidation is intended to clear out the
+ * stale cached translations that are no longer in use. Light Invalidation Mode
+ * is much faster than the Heavy Invalidation Mode, as it does not wait for the
+ * in-flight transactions to be GOd.
+ */
+enum intel_guc_tlb_inval_mode {
+ INTEL_GUC_TLB_INVAL_MODE_HEAVY = 0x0,
+ INTEL_GUC_TLB_INVAL_MODE_LITE = 0x1,
+};
+
#endif /* _ABI_GUC_ACTIONS_ABI_H */
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.h b/drivers/gpu/drm/i915/gt/uc/intel_guc.h
index 6c392bad29c19..5fc5e67f870cc 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.h
@@ -79,6 +79,18 @@ struct intel_guc {
*/
atomic_t outstanding_submission_g2h;
+ /** @tlb_lookup: xarray to store all pending TLB invalidation requests */
+ struct xarray tlb_lookup;
+
+ /**
+ * @serial_slot: id to the initial waiter created in tlb_lookup,
+ * which is used only when failed to allocate new waiter.
+ */
+ u32 serial_slot;
+
+ /** @next_seqno: the next id (sequence no.) to allocate. */
+ u32 next_seqno;
+
/** @interrupts: pointers to GuC interrupt-managing functions. */
struct {
bool enabled;
@@ -296,6 +308,11 @@ struct intel_guc {
#define MAKE_GUC_VER_STRUCT(ver) MAKE_GUC_VER((ver).major, (ver).minor, (ver).patch)
#define GUC_SUBMIT_VER(guc) MAKE_GUC_VER_STRUCT((guc)->submission_version)
+struct intel_guc_tlb_wait {
+ struct wait_queue_head wq;
+ u8 status;
+} __aligned(4);
+
static inline struct intel_guc *log_to_guc(struct intel_guc_log *log)
{
return container_of(log, struct intel_guc, log);
@@ -417,6 +434,11 @@ static inline bool intel_guc_is_supported(struct intel_guc *guc)
{
return intel_uc_fw_is_supported(&guc->fw);
}
+
+int intel_guc_invalidate_tlb_full(struct intel_guc *guc);
+int intel_guc_invalidate_tlb(struct intel_guc *guc);
+int intel_guc_tlb_invalidation_done(struct intel_guc *guc, const u32 *hxg,
+ u32 size);
static inline bool intel_guc_is_wanted(struct intel_guc *guc)
{
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c
index 6e22af31513a5..4b29a0b814950 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c
@@ -1186,9 +1186,18 @@ static int ct_handle_event(struct intel_guc_ct *ct, struct ct_incoming_msg *requ
switch (action) {
case INTEL_GUC_ACTION_SCHED_CONTEXT_MODE_DONE:
case INTEL_GUC_ACTION_DEREGISTER_CONTEXT_DONE:
+ case INTEL_GUC_ACTION_TLB_INVALIDATION_DONE:
g2h_release_space(ct, request->size);
}
+ /* Handle tlb invalidation response in interrupt context */
+ if (action == INTEL_GUC_ACTION_TLB_INVALIDATION_DONE) {
+ int ret = intel_guc_tlb_invalidation_done(ct_to_guc(ct), hxg, request->size);
+
+ ct_free_msg(request);
+ return ret;
+ }
+
spin_lock_irqsave(&ct->requests.lock, flags);
list_add_tail(&request->link, &ct->requests.incoming);
spin_unlock_irqrestore(&ct->requests.lock, flags);
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h
index b4d56eccfb1f0..01109d15b779b 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h
@@ -22,6 +22,7 @@
/* Payload length only i.e. don't include G2H header length */
#define G2H_LEN_DW_SCHED_CONTEXT_MODE_SET 2
#define G2H_LEN_DW_DEREGISTER_CONTEXT 1
+#define G2H_LEN_DW_INVALIDATE_TLB 1
#define GUC_CONTEXT_DISABLE 0
#define GUC_CONTEXT_ENABLE 1
@@ -498,4 +499,8 @@ enum intel_guc_recv_message {
INTEL_GUC_RECV_MSG_EXCEPTION = BIT(30),
};
+#define INTEL_GUC_SUPPORTS_TLB_INVALIDATION(guc) \
+ ((intel_guc_ct_enabled(&(guc)->ct)) && \
+ (intel_guc_submission_is_used(guc)) && \
+ (GRAPHICS_VER(guc_to_gt((guc))->i915) >= 12))
#endif
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index ae3495a9c8146..3478fa73180ab 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -1796,11 +1796,20 @@ static void __guc_reset_context(struct intel_context *ce, intel_engine_mask_t st
intel_context_put(parent);
}
+static void wake_up_tlb_invalidate(struct intel_guc_tlb_wait *wait)
+{
+ /* Barrier to ensure the store is observed by the woken thread */
+ smp_store_mb(wait->status, 0);
+ wake_up(&wait->wq);
+}
+
void intel_guc_submission_reset(struct intel_guc *guc, intel_engine_mask_t stalled)
{
+ struct intel_guc_tlb_wait *wait;
struct intel_context *ce;
unsigned long index;
unsigned long flags;
+ unsigned long i;
if (unlikely(!guc_submission_initialized(guc))) {
/* Reset called during driver load? GuC not yet initialised! */
@@ -1826,6 +1835,13 @@ void intel_guc_submission_reset(struct intel_guc *guc, intel_engine_mask_t stall
/* GuC is blown away, drop all references to contexts */
xa_destroy(&guc->context_lookup);
+
+ /*
+ * The full GT reset will have cleared the TLB caches and flushed the
+ * G2H message queue; we can release all the blocked waiters.
+ */
+ xa_for_each(&guc->tlb_lookup, i, wait)
+ wake_up_tlb_invalidate(wait);
}
static void guc_cancel_context_requests(struct intel_context *ce)
@@ -1948,6 +1964,41 @@ void intel_guc_submission_reset_finish(struct intel_guc *guc)
static void destroyed_worker_func(struct work_struct *w);
static void reset_fail_worker_func(struct work_struct *w);
+static int init_tlb_lookup(struct intel_guc *guc)
+{
+ struct intel_guc_tlb_wait *wait;
+ int err;
+
+ xa_init_flags(&guc->tlb_lookup, XA_FLAGS_ALLOC);
+
+ wait = kzalloc(sizeof(*wait), GFP_KERNEL);
+ if (!wait)
+ return -ENOMEM;
+
+ init_waitqueue_head(&wait->wq);
+ err = xa_alloc_cyclic_irq(&guc->tlb_lookup, &guc->serial_slot, wait,
+ xa_limit_32b, &guc->next_seqno, GFP_KERNEL);
+ if (err == -ENOMEM) {
+ kfree(wait);
+ return err;
+ }
+
+ return 0;
+}
+
+static void fini_tlb_lookup(struct intel_guc *guc)
+{
+ struct intel_guc_tlb_wait *wait;
+
+ wait = xa_load(&guc->tlb_lookup, guc->serial_slot);
+ if (wait) {
+ GEM_BUG_ON(wait->status);
+ kfree(wait);
+ }
+
+ xa_destroy(&guc->tlb_lookup);
+}
+
/*
* Set up the memory resources to be shared with the GuC (via the GGTT)
* at firmware loading time.
@@ -1966,11 +2017,15 @@ int intel_guc_submission_init(struct intel_guc *guc)
return ret;
}
+ ret = init_tlb_lookup(guc);
+ if (ret)
+ goto destroy_pool;
+
guc->submission_state.guc_ids_bitmap =
bitmap_zalloc(NUMBER_MULTI_LRC_GUC_ID(guc), GFP_KERNEL);
if (!guc->submission_state.guc_ids_bitmap) {
ret = -ENOMEM;
- goto destroy_pool;
+ goto destroy_tlb;
}
guc->timestamp.ping_delay = (POLL_TIME_CLKS / gt->clock_frequency + 1) * HZ;
@@ -1979,9 +2034,10 @@ int intel_guc_submission_init(struct intel_guc *guc)
return 0;
+destroy_tlb:
+ fini_tlb_lookup(guc);
destroy_pool:
guc_lrc_desc_pool_destroy_v69(guc);
-
return ret;
}
@@ -1994,6 +2050,7 @@ void intel_guc_submission_fini(struct intel_guc *guc)
guc_lrc_desc_pool_destroy_v69(guc);
i915_sched_engine_put(guc->sched_engine);
bitmap_free(guc->submission_state.guc_ids_bitmap);
+ fini_tlb_lookup(guc);
guc->submission_initialized = false;
}
@@ -4624,6 +4681,157 @@ g2h_context_lookup(struct intel_guc *guc, u32 ctx_id)
return ce;
}
+static void wait_wake_outstanding_tlb_g2h(struct intel_guc *guc, u32 seqno)
+{
+ struct intel_guc_tlb_wait *wait;
+ unsigned long flags;
+
+ xa_lock_irqsave(&guc->tlb_lookup, flags);
+ wait = xa_load(&guc->tlb_lookup, seqno);
+
+ /* We received a response after the waiting task did exit with a timeout */
+ if (unlikely(!wait))
+ drm_dbg(&guc_to_gt(guc)->i915->drm,
+ "Stale TLB invalidation response with seqno %d\n", seqno);
+
+ if (wait)
+ wake_up_tlb_invalidate(wait);
+
+ xa_unlock_irqrestore(&guc->tlb_lookup, flags);
+}
+
+int intel_guc_tlb_invalidation_done(struct intel_guc *guc, const u32 *hxg, u32 size)
+{
+ u32 seqno, hxg_len, len;
+
+ /*
+ * FIXME: these calculations would be better done signed. That
+ * way underflow can be detected as well.
+ */
+ hxg_len = size - GUC_CTB_MSG_MIN_LEN;
+ len = hxg_len - GUC_HXG_MSG_MIN_LEN;
+
+ if (unlikely(len < 1))
+ return -EPROTO;
+
+ seqno = hxg[GUC_HXG_MSG_MIN_LEN];
+ wait_wake_outstanding_tlb_g2h(guc, seqno);
+ return 0;
+}
+
+static long must_wait_woken(struct wait_queue_entry *wq_entry, long timeout)
+{
+ /*
+ * This is equivalent to wait_woken() with the exception that
+ * we do not wake up early if the kthread task has been completed.
+ * As we are called from page reclaim in any task context,
+ * we may be invoked from stopped kthreads, but we *must*
+ * complete the wait from the HW .
+ *
+ * A second problem is that since we are called under reclaim
+ * and wait_woken() inspected the thread state, it makes an invalid
+ * assumption that all PF_KTHREAD tasks have set_kthread_struct()
+ * called upon them, and will trigger a GPF in is_kthread_should_stop().
+ */
+ do {
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ if (wq_entry->flags & WQ_FLAG_WOKEN)
+ break;
+
+ timeout = schedule_timeout(timeout);
+ } while (timeout);
+ __set_current_state(TASK_RUNNING);
+
+ /* See wait_woken() and woken_wake_function() */
+ smp_store_mb(wq_entry->flags, wq_entry->flags & ~WQ_FLAG_WOKEN);
+
+ return timeout;
+}
+
+static int guc_send_invalidate_tlb(struct intel_guc *guc, u32 type)
+{
+ struct intel_guc_tlb_wait _wq, *wq = &_wq;
+ DEFINE_WAIT_FUNC(wait, woken_wake_function);
+ struct intel_gt *gt = guc_to_gt(guc);
+ int err = 0;
+ u32 seqno;
+ u32 action[] = {
+ INTEL_GUC_ACTION_TLB_INVALIDATION,
+ 0,
+ REG_FIELD_PREP(INTEL_GUC_TLB_INVAL_TYPE_MASK, type) |
+ REG_FIELD_PREP(INTEL_GUC_TLB_INVAL_MODE_MASK,
+ INTEL_GUC_TLB_INVAL_MODE_HEAVY) |
+ INTEL_GUC_TLB_INVAL_FLUSH_CACHE,
+ };
+ u32 size = ARRAY_SIZE(action);
+
+ if (!intel_guc_ct_enabled(&guc->ct))
+ return -EINVAL;
+
+ init_waitqueue_head(&_wq.wq);
+
+ if (xa_alloc_cyclic_irq(&guc->tlb_lookup, &seqno, wq,
+ xa_limit_32b, &guc->next_seqno,
+ GFP_ATOMIC | __GFP_NOWARN) < 0) {
+ /* Under severe memory pressure? Serialise TLB allocations */
+ xa_lock_irq(&guc->tlb_lookup);
+ wq = xa_load(&guc->tlb_lookup, guc->serial_slot);
+ wait_event_lock_irq(wq->wq,
+ !READ_ONCE(wq->status),
+ guc->tlb_lookup.xa_lock);
+ /*
+ * Update wq->status under lock to ensure only one waiter can
+ * issue the TLB invalidation command using the serial slot at a
+ * time. The condition is set to false before releasing the lock
+ * so that other caller continue to wait until woken up again.
+ */
+ wq->status = 1;
+ xa_unlock_irq(&guc->tlb_lookup);
+
+ seqno = guc->serial_slot;
+ }
+
+ action[1] = seqno;
+
+ add_wait_queue(&wq->wq, &wait);
+
+ err = intel_guc_send_busy_loop(guc, action, size, G2H_LEN_DW_INVALIDATE_TLB, true);
+ if (err)
+ goto out;
+
+ /*
+ * GuC has a timeout of 1ms for a TLB invalidation response from GAM. On a
+ * timeout GuC drops the request and has no mechanism to notify the host about
+ * the timeout. So keep a larger timeout that accounts for this individual
+ * timeout and max number of outstanding invalidation requests that can be
+ * queued in CT buffer.
+ */
+#define OUTSTANDING_GUC_TIMEOUT_PERIOD (HZ * 2)
+ if (!must_wait_woken(&wait, OUTSTANDING_GUC_TIMEOUT_PERIOD)) {
+ gt_err(gt,
+ "TLB invalidation response timed out for seqno %u\n", seqno);
+ err = -ETIME;
+ }
+out:
+ remove_wait_queue(&wq->wq, &wait);
+ if (seqno != guc->serial_slot)
+ xa_erase_irq(&guc->tlb_lookup, seqno);
+
+ return err;
+}
+
+/* Full TLB invalidation */
+int intel_guc_invalidate_tlb_full(struct intel_guc *guc)
+{
+ return guc_send_invalidate_tlb(guc, INTEL_GUC_TLB_INVAL_FULL);
+}
+
+/* GuC TLB Invalidation: Invalidate the TLB's of GuC itself. */
+int intel_guc_invalidate_tlb(struct intel_guc *guc)
+{
+ return guc_send_invalidate_tlb(guc, INTEL_GUC_TLB_INVAL_GUC);
+}
+
int intel_guc_deregister_done_process_msg(struct intel_guc *guc,
const u32 *msg,
u32 len)
--
2.25.1
^ permalink raw reply related [flat|nested] 21+ messages in thread
* [Intel-gfx] [PATCH v3 2/4] drm/i915: No TLB invalidation on wedged or suspended GT
2023-10-02 17:24 [Intel-gfx] [PATCH v3 1/4] drm/i915: Define and use GuC and CTB TLB invalidation routines Jonathan Cavitt
@ 2023-10-02 17:24 ` Jonathan Cavitt
2023-10-03 10:35 ` Tvrtko Ursulin
2023-10-03 11:50 ` Jani Nikula
2023-10-02 17:24 ` [Intel-gfx] [PATCH v3 3/4] drm/i915: Perform TLB invalidation on all GTs during suspend/resume Jonathan Cavitt
` (6 subsequent siblings)
7 siblings, 2 replies; 21+ messages in thread
From: Jonathan Cavitt @ 2023-10-02 17:24 UTC (permalink / raw)
To: intel-gfx
Cc: janusz.krzysztofik, andi.shyti, matthew.d.roper, jonathan.cavitt,
chris.p.wilson, nirmoy.das
From: Fei Yang <fei.yang@intel.com>
In case of GT is suspended or wedged, don't allow submission of new TLB
invalidation request and cancel all pending requests. The TLB entries
will be invalidated either during GuC reload or on system resume.
Signed-off-by: Fei Yang <fei.yang@intel.com>
Signed-off-by: Jonathan Cavitt <jonathan.cavitt@intel.com>
CC: John Harrison <john.c.harrison@intel.com>
---
drivers/gpu/drm/i915/gt/uc/intel_guc.h | 1 +
.../gpu/drm/i915/gt/uc/intel_guc_submission.c | 35 +++++++++++++++----
drivers/gpu/drm/i915/i915_driver.c | 9 +++++
3 files changed, 39 insertions(+), 6 deletions(-)
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.h b/drivers/gpu/drm/i915/gt/uc/intel_guc.h
index 5fc5e67f870cc..0cdc7ca66861c 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.h
@@ -536,4 +536,5 @@ void intel_guc_dump_time_info(struct intel_guc *guc, struct drm_printer *p);
int intel_guc_sched_disable_gucid_threshold_max(struct intel_guc *guc);
+void wake_up_all_tlb_invalidate(struct intel_guc *guc);
#endif
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index 3478fa73180ab..2f194cadbe553 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -32,6 +32,7 @@
#include "i915_drv.h"
#include "i915_reg.h"
+#include "i915_irq.h"
#include "i915_trace.h"
/**
@@ -1803,13 +1804,20 @@ static void wake_up_tlb_invalidate(struct intel_guc_tlb_wait *wait)
wake_up(&wait->wq);
}
-void intel_guc_submission_reset(struct intel_guc *guc, intel_engine_mask_t stalled)
+void wake_up_all_tlb_invalidate(struct intel_guc *guc)
{
struct intel_guc_tlb_wait *wait;
+ unsigned long i;
+
+ xa_for_each(&guc->tlb_lookup, i, wait)
+ wake_up_tlb_invalidate(wait);
+}
+
+void intel_guc_submission_reset(struct intel_guc *guc, intel_engine_mask_t stalled)
+{
struct intel_context *ce;
unsigned long index;
unsigned long flags;
- unsigned long i;
if (unlikely(!guc_submission_initialized(guc))) {
/* Reset called during driver load? GuC not yet initialised! */
@@ -1840,8 +1848,7 @@ void intel_guc_submission_reset(struct intel_guc *guc, intel_engine_mask_t stall
* The full GT reset will have cleared the TLB caches and flushed the
* G2H message queue; we can release all the blocked waiters.
*/
- xa_for_each(&guc->tlb_lookup, i, wait)
- wake_up_tlb_invalidate(wait);
+ wake_up_all_tlb_invalidate(guc);
}
static void guc_cancel_context_requests(struct intel_context *ce)
@@ -1937,6 +1944,12 @@ void intel_guc_submission_cancel_requests(struct intel_guc *guc)
/* GuC is blown away, drop all references to contexts */
xa_destroy(&guc->context_lookup);
+
+ /*
+ * Wedged GT won't respond to any TLB invalidation request. Simply
+ * release all the blocked waiters.
+ */
+ wake_up_all_tlb_invalidate(guc);
}
void intel_guc_submission_reset_finish(struct intel_guc *guc)
@@ -4748,6 +4761,14 @@ static long must_wait_woken(struct wait_queue_entry *wq_entry, long timeout)
return timeout;
}
+static bool intel_gt_is_enabled(const struct intel_gt *gt)
+{
+ /* Check if GT is wedged or suspended */
+ if (intel_gt_is_wedged(gt) || !intel_irqs_enabled(gt->i915))
+ return false;
+ return true;
+}
+
static int guc_send_invalidate_tlb(struct intel_guc *guc, u32 type)
{
struct intel_guc_tlb_wait _wq, *wq = &_wq;
@@ -4765,7 +4786,8 @@ static int guc_send_invalidate_tlb(struct intel_guc *guc, u32 type)
};
u32 size = ARRAY_SIZE(action);
- if (!intel_guc_ct_enabled(&guc->ct))
+ if (!intel_guc_ct_enabled(&guc->ct) ||
+ !intel_gt_is_enabled(gt))
return -EINVAL;
init_waitqueue_head(&_wq.wq);
@@ -4807,7 +4829,8 @@ static int guc_send_invalidate_tlb(struct intel_guc *guc, u32 type)
* queued in CT buffer.
*/
#define OUTSTANDING_GUC_TIMEOUT_PERIOD (HZ * 2)
- if (!must_wait_woken(&wait, OUTSTANDING_GUC_TIMEOUT_PERIOD)) {
+ if (!must_wait_woken(&wait, OUTSTANDING_GUC_TIMEOUT_PERIOD) &&
+ intel_gt_is_enabled(gt)) {
gt_err(gt,
"TLB invalidation response timed out for seqno %u\n", seqno);
err = -ETIME;
diff --git a/drivers/gpu/drm/i915/i915_driver.c b/drivers/gpu/drm/i915/i915_driver.c
index 78501a83ba109..f5175103ea900 100644
--- a/drivers/gpu/drm/i915/i915_driver.c
+++ b/drivers/gpu/drm/i915/i915_driver.c
@@ -72,6 +72,7 @@
#include "gt/intel_gt.h"
#include "gt/intel_gt_pm.h"
#include "gt/intel_rc6.h"
+#include "gt/uc/intel_guc.h"
#include "pxp/intel_pxp.h"
#include "pxp/intel_pxp_debugfs.h"
@@ -1092,6 +1093,9 @@ static int i915_drm_suspend(struct drm_device *dev)
intel_dp_mst_suspend(dev_priv);
intel_runtime_pm_disable_interrupts(dev_priv);
+
+ wake_up_all_tlb_invalidate(&to_gt(dev_priv)->uc.guc);
+
intel_hpd_cancel_work(dev_priv);
intel_suspend_encoders(dev_priv);
@@ -1263,6 +1267,11 @@ static int i915_drm_resume(struct drm_device *dev)
intel_gvt_resume(dev_priv);
+ if (INTEL_GUC_SUPPORTS_TLB_INVALIDATION(&to_gt(dev_priv)->uc.guc)) {
+ intel_guc_invalidate_tlb_full(&to_gt(dev_priv)->uc.guc);
+ intel_guc_invalidate_tlb(&to_gt(dev_priv)->uc.guc);
+ }
+
enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
return 0;
--
2.25.1
^ permalink raw reply related [flat|nested] 21+ messages in thread
* [Intel-gfx] [PATCH v3 3/4] drm/i915: Perform TLB invalidation on all GTs during suspend/resume
2023-10-02 17:24 [Intel-gfx] [PATCH v3 1/4] drm/i915: Define and use GuC and CTB TLB invalidation routines Jonathan Cavitt
2023-10-02 17:24 ` [Intel-gfx] [PATCH v3 2/4] drm/i915: No TLB invalidation on wedged or suspended GT Jonathan Cavitt
@ 2023-10-02 17:24 ` Jonathan Cavitt
2023-10-03 11:48 ` Jani Nikula
2023-10-02 17:24 ` [Intel-gfx] [PATCH v3 4/4] drm/i915/gt: Increase sleep in gt_tlb selftest sanitycheck Jonathan Cavitt
` (5 subsequent siblings)
7 siblings, 1 reply; 21+ messages in thread
From: Jonathan Cavitt @ 2023-10-02 17:24 UTC (permalink / raw)
To: intel-gfx
Cc: janusz.krzysztofik, andi.shyti, matthew.d.roper, jonathan.cavitt,
chris.p.wilson, nirmoy.das
Consider multi-gt support when cancelling all tlb invalidations on
suspend, and when submitting tlb invalidations on resume.
Suggested-by: Tvrtko Ursulin <tvrtko.ursulin@linux.intel.com>
Signed-off-by: Fei Yang <fei.yang@intel.com>
Signed-off-by: Jonathan Cavitt <jonathan.cavitt@intel.com>
CC: John Harrison <John.C.Harrison@Intel.com>
---
drivers/gpu/drm/i915/i915_driver.c | 13 +++++++++----
1 file changed, 9 insertions(+), 4 deletions(-)
diff --git a/drivers/gpu/drm/i915/i915_driver.c b/drivers/gpu/drm/i915/i915_driver.c
index f5175103ea900..d7655a7b60eda 100644
--- a/drivers/gpu/drm/i915/i915_driver.c
+++ b/drivers/gpu/drm/i915/i915_driver.c
@@ -1077,6 +1077,8 @@ static int i915_drm_suspend(struct drm_device *dev)
struct drm_i915_private *dev_priv = to_i915(dev);
struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
pci_power_t opregion_target_state;
+ struct intel_gt *gt;
+ int i;
disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
@@ -1094,7 +1096,8 @@ static int i915_drm_suspend(struct drm_device *dev)
intel_runtime_pm_disable_interrupts(dev_priv);
- wake_up_all_tlb_invalidate(&to_gt(dev_priv)->uc.guc);
+ for_each_gt(gt, dev_priv, i)
+ wake_up_all_tlb_invalidate(>->uc.guc);
intel_hpd_cancel_work(dev_priv);
@@ -1267,9 +1270,11 @@ static int i915_drm_resume(struct drm_device *dev)
intel_gvt_resume(dev_priv);
- if (INTEL_GUC_SUPPORTS_TLB_INVALIDATION(&to_gt(dev_priv)->uc.guc)) {
- intel_guc_invalidate_tlb_full(&to_gt(dev_priv)->uc.guc);
- intel_guc_invalidate_tlb(&to_gt(dev_priv)->uc.guc);
+ for_each_gt(gt, dev_priv, i) {
+ if (!INTEL_GUC_SUPPORTS_TLB_INVALIDATION(>->uc.guc))
+ continue;
+ intel_guc_invalidate_tlb_full(>->uc.guc);
+ intel_guc_invalidate_tlb(>->uc.guc);
}
enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
--
2.25.1
^ permalink raw reply related [flat|nested] 21+ messages in thread
* [Intel-gfx] [PATCH v3 4/4] drm/i915/gt: Increase sleep in gt_tlb selftest sanitycheck
2023-10-02 17:24 [Intel-gfx] [PATCH v3 1/4] drm/i915: Define and use GuC and CTB TLB invalidation routines Jonathan Cavitt
2023-10-02 17:24 ` [Intel-gfx] [PATCH v3 2/4] drm/i915: No TLB invalidation on wedged or suspended GT Jonathan Cavitt
2023-10-02 17:24 ` [Intel-gfx] [PATCH v3 3/4] drm/i915: Perform TLB invalidation on all GTs during suspend/resume Jonathan Cavitt
@ 2023-10-02 17:24 ` Jonathan Cavitt
2023-10-02 23:42 ` [Intel-gfx] ✗ Fi.CI.CHECKPATCH: warning for series starting with [v3,1/4] drm/i915: Define and use GuC and CTB TLB invalidation routines Patchwork
` (4 subsequent siblings)
7 siblings, 0 replies; 21+ messages in thread
From: Jonathan Cavitt @ 2023-10-02 17:24 UTC (permalink / raw)
To: intel-gfx
Cc: janusz.krzysztofik, andi.shyti, matthew.d.roper, jonathan.cavitt,
chris.p.wilson, nirmoy.das
For the gt_tlb live selftest, increase the timeout from 10 ms to 200 ms.
200 ms should be more than enough time, and 10 ms was too aggressive.
Signed-off-by: Jonathan Cavitt <jonathan.cavitt@intel.com>
---
drivers/gpu/drm/i915/gt/selftest_tlb.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/drivers/gpu/drm/i915/gt/selftest_tlb.c b/drivers/gpu/drm/i915/gt/selftest_tlb.c
index 7e41f69fc818f..46e0a1dbecc8d 100644
--- a/drivers/gpu/drm/i915/gt/selftest_tlb.c
+++ b/drivers/gpu/drm/i915/gt/selftest_tlb.c
@@ -137,7 +137,7 @@ pte_tlbinv(struct intel_context *ce,
i915_request_add(rq);
/* Short sleep to sanitycheck the batch is spinning before we begin */
- msleep(10);
+ msleep(200);
if (va == vb) {
if (!i915_request_completed(rq)) {
pr_err("%s(%s): Semaphore sanitycheck failed %llx, with alignment %llx, using PTE size %x (phys %x, sg %x)\n",
--
2.25.1
^ permalink raw reply related [flat|nested] 21+ messages in thread
* [Intel-gfx] ✗ Fi.CI.CHECKPATCH: warning for series starting with [v3,1/4] drm/i915: Define and use GuC and CTB TLB invalidation routines
2023-10-02 17:24 [Intel-gfx] [PATCH v3 1/4] drm/i915: Define and use GuC and CTB TLB invalidation routines Jonathan Cavitt
` (2 preceding siblings ...)
2023-10-02 17:24 ` [Intel-gfx] [PATCH v3 4/4] drm/i915/gt: Increase sleep in gt_tlb selftest sanitycheck Jonathan Cavitt
@ 2023-10-02 23:42 ` Patchwork
2023-10-02 23:42 ` [Intel-gfx] ✗ Fi.CI.SPARSE: " Patchwork
` (3 subsequent siblings)
7 siblings, 0 replies; 21+ messages in thread
From: Patchwork @ 2023-10-02 23:42 UTC (permalink / raw)
To: Jonathan Cavitt; +Cc: intel-gfx
== Series Details ==
Series: series starting with [v3,1/4] drm/i915: Define and use GuC and CTB TLB invalidation routines
URL : https://patchwork.freedesktop.org/series/124535/
State : warning
== Summary ==
Error: dim checkpatch failed
bedea6a0f331 drm/i915: Define and use GuC and CTB TLB invalidation routines
-:224: ERROR:TRAILING_WHITESPACE: trailing whitespace
#224: FILE: drivers/gpu/drm/i915/gt/uc/intel_guc.h:437:
+^I$
-:271: CHECK:MACRO_ARG_REUSE: Macro argument reuse 'guc' - possible side-effects?
#271: FILE: drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h:502:
+#define INTEL_GUC_SUPPORTS_TLB_INVALIDATION(guc) \
+ ((intel_guc_ct_enabled(&(guc)->ct)) && \
+ (intel_guc_submission_is_used(guc)) && \
+ (GRAPHICS_VER(guc_to_gt((guc))->i915) >= 12))
-:347: WARNING:AVOID_BUG: Do not crash the kernel unless it is absolutely unavoidable--use WARN_ON_ONCE() plus recovery code (if feasible) instead of BUG() or variants
#347: FILE: drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c:1995:
+ GEM_BUG_ON(wait->status);
total: 1 errors, 1 warnings, 1 checks, 470 lines checked
e481f56de67c drm/i915: No TLB invalidation on wedged or suspended GT
7e3d037b3a01 drm/i915: Perform TLB invalidation on all GTs during suspend/resume
cc0b0930d7f6 drm/i915/gt: Increase sleep in gt_tlb selftest sanitycheck
^ permalink raw reply [flat|nested] 21+ messages in thread
* [Intel-gfx] ✗ Fi.CI.SPARSE: warning for series starting with [v3,1/4] drm/i915: Define and use GuC and CTB TLB invalidation routines
2023-10-02 17:24 [Intel-gfx] [PATCH v3 1/4] drm/i915: Define and use GuC and CTB TLB invalidation routines Jonathan Cavitt
` (3 preceding siblings ...)
2023-10-02 23:42 ` [Intel-gfx] ✗ Fi.CI.CHECKPATCH: warning for series starting with [v3,1/4] drm/i915: Define and use GuC and CTB TLB invalidation routines Patchwork
@ 2023-10-02 23:42 ` Patchwork
2023-10-03 0:01 ` [Intel-gfx] ✗ Fi.CI.BAT: failure " Patchwork
` (2 subsequent siblings)
7 siblings, 0 replies; 21+ messages in thread
From: Patchwork @ 2023-10-02 23:42 UTC (permalink / raw)
To: Jonathan Cavitt; +Cc: intel-gfx
== Series Details ==
Series: series starting with [v3,1/4] drm/i915: Define and use GuC and CTB TLB invalidation routines
URL : https://patchwork.freedesktop.org/series/124535/
State : warning
== Summary ==
Error: dim sparse failed
Sparse version: v0.6.2
Fast mode used, each commit won't be checked separately.
^ permalink raw reply [flat|nested] 21+ messages in thread
* [Intel-gfx] ✗ Fi.CI.BAT: failure for series starting with [v3,1/4] drm/i915: Define and use GuC and CTB TLB invalidation routines
2023-10-02 17:24 [Intel-gfx] [PATCH v3 1/4] drm/i915: Define and use GuC and CTB TLB invalidation routines Jonathan Cavitt
` (4 preceding siblings ...)
2023-10-02 23:42 ` [Intel-gfx] ✗ Fi.CI.SPARSE: " Patchwork
@ 2023-10-03 0:01 ` Patchwork
2023-10-03 10:28 ` [Intel-gfx] [PATCH v3 1/4] " Tvrtko Ursulin
2023-10-03 11:06 ` Tvrtko Ursulin
7 siblings, 0 replies; 21+ messages in thread
From: Patchwork @ 2023-10-03 0:01 UTC (permalink / raw)
To: Jonathan Cavitt; +Cc: intel-gfx
[-- Attachment #1: Type: text/plain, Size: 15143 bytes --]
== Series Details ==
Series: series starting with [v3,1/4] drm/i915: Define and use GuC and CTB TLB invalidation routines
URL : https://patchwork.freedesktop.org/series/124535/
State : failure
== Summary ==
CI Bug Log - changes from CI_DRM_13702 -> Patchwork_124535v1
====================================================
Summary
-------
**FAILURE**
Serious unknown changes coming with Patchwork_124535v1 absolutely need to be
verified manually.
If you think the reported changes have nothing to do with the changes
introduced in Patchwork_124535v1, please notify your bug team (lgci.bug.filing@intel.com) to allow them
to document this new failure mode, which will reduce false positives in CI.
External URL: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_124535v1/index.html
Participating hosts (37 -> 40)
------------------------------
Additional (4): fi-skl-guc fi-hsw-4770 bat-adlm-1 bat-adlp-6
Missing (1): fi-snb-2520m
Possible new issues
-------------------
Here are the unknown changes that may have been introduced in Patchwork_124535v1:
### IGT changes ###
#### Possible regressions ####
* igt@gem_ringfill@basic-all:
- bat-dg1-5: [PASS][1] -> [DMESG-WARN][2] +5 other tests dmesg-warn
[1]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_13702/bat-dg1-5/igt@gem_ringfill@basic-all.html
[2]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_124535v1/bat-dg1-5/igt@gem_ringfill@basic-all.html
* igt@i915_selftest@live@guc_hang:
- bat-adlp-9: [PASS][3] -> [ABORT][4]
[3]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_13702/bat-adlp-9/igt@i915_selftest@live@guc_hang.html
[4]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_124535v1/bat-adlp-9/igt@i915_selftest@live@guc_hang.html
- bat-dg2-11: [PASS][5] -> [ABORT][6]
[5]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_13702/bat-dg2-11/igt@i915_selftest@live@guc_hang.html
[6]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_124535v1/bat-dg2-11/igt@i915_selftest@live@guc_hang.html
- bat-adlm-1: NOTRUN -> [ABORT][7]
[7]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_124535v1/bat-adlm-1/igt@i915_selftest@live@guc_hang.html
* igt@i915_selftest@live@reset:
- bat-rpls-1: [PASS][8] -> [TIMEOUT][9]
[8]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_13702/bat-rpls-1/igt@i915_selftest@live@reset.html
[9]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_124535v1/bat-rpls-1/igt@i915_selftest@live@reset.html
Known issues
------------
Here are the changes found in Patchwork_124535v1 that come from known issues:
### CI changes ###
#### Issues hit ####
* boot:
- fi-bsw-n3050: [PASS][10] -> [FAIL][11] ([i915#8293])
[10]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_13702/fi-bsw-n3050/boot.html
[11]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_124535v1/fi-bsw-n3050/boot.html
### IGT changes ###
#### Issues hit ####
* igt@debugfs_test@basic-hwmon:
- bat-adlp-6: NOTRUN -> [SKIP][12] ([i915#9318])
[12]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_124535v1/bat-adlp-6/igt@debugfs_test@basic-hwmon.html
- bat-adlm-1: NOTRUN -> [SKIP][13] ([i915#3826])
[13]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_124535v1/bat-adlm-1/igt@debugfs_test@basic-hwmon.html
* igt@fbdev@eof:
- bat-adlm-1: NOTRUN -> [SKIP][14] ([i915#2582]) +3 other tests skip
[14]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_124535v1/bat-adlm-1/igt@fbdev@eof.html
* igt@fbdev@info:
- bat-adlm-1: NOTRUN -> [SKIP][15] ([i915#1849] / [i915#2582])
[15]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_124535v1/bat-adlm-1/igt@fbdev@info.html
* igt@gem_lmem_swapping@basic:
- fi-skl-guc: NOTRUN -> [SKIP][16] ([fdo#109271] / [i915#4613]) +3 other tests skip
[16]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_124535v1/fi-skl-guc/igt@gem_lmem_swapping@basic.html
* igt@gem_lmem_swapping@parallel-random-engines:
- bat-adlm-1: NOTRUN -> [SKIP][17] ([i915#4613]) +3 other tests skip
[17]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_124535v1/bat-adlm-1/igt@gem_lmem_swapping@parallel-random-engines.html
* igt@gem_tiled_pread_basic:
- bat-adlp-6: NOTRUN -> [SKIP][18] ([i915#3282])
[18]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_124535v1/bat-adlp-6/igt@gem_tiled_pread_basic.html
- bat-adlm-1: NOTRUN -> [SKIP][19] ([i915#3282])
[19]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_124535v1/bat-adlm-1/igt@gem_tiled_pread_basic.html
* igt@i915_pm_rps@basic-api:
- bat-adlm-1: NOTRUN -> [SKIP][20] ([i915#6621])
[20]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_124535v1/bat-adlm-1/igt@i915_pm_rps@basic-api.html
* igt@i915_selftest@live@gt_heartbeat:
- fi-apl-guc: [PASS][21] -> [DMESG-FAIL][22] ([i915#5334])
[21]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_13702/fi-apl-guc/igt@i915_selftest@live@gt_heartbeat.html
[22]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_124535v1/fi-apl-guc/igt@i915_selftest@live@gt_heartbeat.html
* igt@i915_suspend@basic-s2idle-without-i915:
- bat-rpls-1: [PASS][23] -> [WARN][24] ([i915#8747])
[23]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_13702/bat-rpls-1/igt@i915_suspend@basic-s2idle-without-i915.html
[24]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_124535v1/bat-rpls-1/igt@i915_suspend@basic-s2idle-without-i915.html
* igt@i915_suspend@basic-s3-without-i915:
- bat-mtlp-6: NOTRUN -> [SKIP][25] ([i915#6645])
[25]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_124535v1/bat-mtlp-6/igt@i915_suspend@basic-s3-without-i915.html
* igt@kms_addfb_basic@addfb25-y-tiled-small-legacy:
- fi-hsw-4770: NOTRUN -> [SKIP][26] ([fdo#109271]) +13 other tests skip
[26]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_124535v1/fi-hsw-4770/igt@kms_addfb_basic@addfb25-y-tiled-small-legacy.html
* igt@kms_cursor_legacy@basic-busy-flip-before-cursor-legacy:
- bat-adlp-6: NOTRUN -> [SKIP][27] ([i915#4103] / [i915#5608]) +1 other test skip
[27]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_124535v1/bat-adlp-6/igt@kms_cursor_legacy@basic-busy-flip-before-cursor-legacy.html
* igt@kms_cursor_legacy@basic-flip-after-cursor-varying-size:
- bat-adlm-1: NOTRUN -> [SKIP][28] ([i915#1845]) +16 other tests skip
[28]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_124535v1/bat-adlm-1/igt@kms_cursor_legacy@basic-flip-after-cursor-varying-size.html
* igt@kms_dsc@dsc-basic:
- fi-skl-guc: NOTRUN -> [SKIP][29] ([fdo#109271]) +12 other tests skip
[29]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_124535v1/fi-skl-guc/igt@kms_dsc@dsc-basic.html
- bat-adlp-6: NOTRUN -> [SKIP][30] ([i915#3555] / [i915#3840])
[30]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_124535v1/bat-adlp-6/igt@kms_dsc@dsc-basic.html
* igt@kms_flip@basic-plain-flip:
- bat-adlm-1: NOTRUN -> [SKIP][31] ([i915#3637]) +3 other tests skip
[31]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_124535v1/bat-adlm-1/igt@kms_flip@basic-plain-flip.html
* igt@kms_force_connector_basic@force-load-detect:
- bat-adlm-1: NOTRUN -> [SKIP][32] ([fdo#109285])
[32]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_124535v1/bat-adlm-1/igt@kms_force_connector_basic@force-load-detect.html
- bat-adlp-6: NOTRUN -> [SKIP][33] ([fdo#109285])
[33]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_124535v1/bat-adlp-6/igt@kms_force_connector_basic@force-load-detect.html
* igt@kms_frontbuffer_tracking@basic:
- bat-adlm-1: NOTRUN -> [SKIP][34] ([i915#1849])
[34]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_124535v1/bat-adlm-1/igt@kms_frontbuffer_tracking@basic.html
* igt@kms_hdmi_inject@inject-audio:
- fi-skl-guc: NOTRUN -> [FAIL][35] ([IGT#3])
[35]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_124535v1/fi-skl-guc/igt@kms_hdmi_inject@inject-audio.html
* igt@kms_pipe_crc_basic@read-crc-frame-sequence@pipe-c-dp-5:
- bat-adlp-11: [PASS][36] -> [ABORT][37] ([i915#8668])
[36]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_13702/bat-adlp-11/igt@kms_pipe_crc_basic@read-crc-frame-sequence@pipe-c-dp-5.html
[37]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_124535v1/bat-adlp-11/igt@kms_pipe_crc_basic@read-crc-frame-sequence@pipe-c-dp-5.html
* igt@kms_pipe_crc_basic@suspend-read-crc:
- bat-mtlp-6: NOTRUN -> [SKIP][38] ([i915#1845] / [i915#4078])
[38]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_124535v1/bat-mtlp-6/igt@kms_pipe_crc_basic@suspend-read-crc.html
* igt@kms_pipe_crc_basic@suspend-read-crc@pipe-c-vga-1:
- fi-hsw-4770: NOTRUN -> [DMESG-WARN][39] ([i915#8841]) +6 other tests dmesg-warn
[39]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_124535v1/fi-hsw-4770/igt@kms_pipe_crc_basic@suspend-read-crc@pipe-c-vga-1.html
* igt@kms_psr@cursor_plane_move:
- bat-adlm-1: NOTRUN -> [SKIP][40] ([i915#1072]) +3 other tests skip
[40]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_124535v1/bat-adlm-1/igt@kms_psr@cursor_plane_move.html
* igt@kms_psr@sprite_plane_onoff:
- fi-hsw-4770: NOTRUN -> [SKIP][41] ([fdo#109271] / [i915#1072]) +3 other tests skip
[41]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_124535v1/fi-hsw-4770/igt@kms_psr@sprite_plane_onoff.html
* igt@kms_setmode@basic-clone-single-crtc:
- bat-adlm-1: NOTRUN -> [SKIP][42] ([i915#3555])
[42]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_124535v1/bat-adlm-1/igt@kms_setmode@basic-clone-single-crtc.html
* igt@prime_vgem@basic-fence-flip:
- bat-adlm-1: NOTRUN -> [SKIP][43] ([i915#1845] / [i915#3708])
[43]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_124535v1/bat-adlm-1/igt@prime_vgem@basic-fence-flip.html
* igt@prime_vgem@basic-write:
- bat-adlm-1: NOTRUN -> [SKIP][44] ([i915#3708]) +2 other tests skip
[44]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_124535v1/bat-adlm-1/igt@prime_vgem@basic-write.html
#### Possible fixes ####
* igt@gem_exec_suspend@basic-s0@lmem0:
- bat-dg2-9: [INCOMPLETE][45] ([i915#9275]) -> [PASS][46]
[45]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_13702/bat-dg2-9/igt@gem_exec_suspend@basic-s0@lmem0.html
[46]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_124535v1/bat-dg2-9/igt@gem_exec_suspend@basic-s0@lmem0.html
* igt@gem_exec_suspend@basic-s3@smem:
- bat-mtlp-8: [ABORT][47] ([i915#9262]) -> [PASS][48]
[47]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_13702/bat-mtlp-8/igt@gem_exec_suspend@basic-s3@smem.html
[48]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_124535v1/bat-mtlp-8/igt@gem_exec_suspend@basic-s3@smem.html
* igt@i915_selftest@live@gem_migrate:
- bat-mtlp-6: [INCOMPLETE][49] -> [PASS][50]
[49]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_13702/bat-mtlp-6/igt@i915_selftest@live@gem_migrate.html
[50]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_124535v1/bat-mtlp-6/igt@i915_selftest@live@gem_migrate.html
* igt@i915_selftest@live@hugepages:
- bat-mtlp-8: [DMESG-WARN][51] ([i915#8962]) -> [PASS][52]
[51]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_13702/bat-mtlp-8/igt@i915_selftest@live@hugepages.html
[52]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_124535v1/bat-mtlp-8/igt@i915_selftest@live@hugepages.html
* igt@kms_pipe_crc_basic@read-crc-frame-sequence@pipe-d-dp-5:
- bat-adlp-11: [ABORT][53] ([i915#8668]) -> [PASS][54]
[53]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_13702/bat-adlp-11/igt@kms_pipe_crc_basic@read-crc-frame-sequence@pipe-d-dp-5.html
[54]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_124535v1/bat-adlp-11/igt@kms_pipe_crc_basic@read-crc-frame-sequence@pipe-d-dp-5.html
{name}: This element is suppressed. This means it is ignored when computing
the status of the difference (SUCCESS, WARNING, or FAILURE).
[IGT#3]: https://gitlab.freedesktop.org/drm/igt-gpu-tools/issues/3
[fdo#109271]: https://bugs.freedesktop.org/show_bug.cgi?id=109271
[fdo#109285]: https://bugs.freedesktop.org/show_bug.cgi?id=109285
[i915#1072]: https://gitlab.freedesktop.org/drm/intel/issues/1072
[i915#1845]: https://gitlab.freedesktop.org/drm/intel/issues/1845
[i915#1849]: https://gitlab.freedesktop.org/drm/intel/issues/1849
[i915#2582]: https://gitlab.freedesktop.org/drm/intel/issues/2582
[i915#3282]: https://gitlab.freedesktop.org/drm/intel/issues/3282
[i915#3555]: https://gitlab.freedesktop.org/drm/intel/issues/3555
[i915#3637]: https://gitlab.freedesktop.org/drm/intel/issues/3637
[i915#3708]: https://gitlab.freedesktop.org/drm/intel/issues/3708
[i915#3826]: https://gitlab.freedesktop.org/drm/intel/issues/3826
[i915#3840]: https://gitlab.freedesktop.org/drm/intel/issues/3840
[i915#4078]: https://gitlab.freedesktop.org/drm/intel/issues/4078
[i915#4103]: https://gitlab.freedesktop.org/drm/intel/issues/4103
[i915#4613]: https://gitlab.freedesktop.org/drm/intel/issues/4613
[i915#5334]: https://gitlab.freedesktop.org/drm/intel/issues/5334
[i915#5354]: https://gitlab.freedesktop.org/drm/intel/issues/5354
[i915#5608]: https://gitlab.freedesktop.org/drm/intel/issues/5608
[i915#6621]: https://gitlab.freedesktop.org/drm/intel/issues/6621
[i915#6645]: https://gitlab.freedesktop.org/drm/intel/issues/6645
[i915#7952]: https://gitlab.freedesktop.org/drm/intel/issues/7952
[i915#8293]: https://gitlab.freedesktop.org/drm/intel/issues/8293
[i915#8668]: https://gitlab.freedesktop.org/drm/intel/issues/8668
[i915#8747]: https://gitlab.freedesktop.org/drm/intel/issues/8747
[i915#8841]: https://gitlab.freedesktop.org/drm/intel/issues/8841
[i915#8962]: https://gitlab.freedesktop.org/drm/intel/issues/8962
[i915#9262]: https://gitlab.freedesktop.org/drm/intel/issues/9262
[i915#9275]: https://gitlab.freedesktop.org/drm/intel/issues/9275
[i915#9318]: https://gitlab.freedesktop.org/drm/intel/issues/9318
Build changes
-------------
* Linux: CI_DRM_13702 -> Patchwork_124535v1
CI-20190529: 20190529
CI_DRM_13702: 712ba44bed40a31304163b0cf67c455224e2e4a9 @ git://anongit.freedesktop.org/gfx-ci/linux
IGT_7508: f366406b05ca6b3d16eaa734a91e0833bd159f54 @ https://gitlab.freedesktop.org/drm/igt-gpu-tools.git
Patchwork_124535v1: 712ba44bed40a31304163b0cf67c455224e2e4a9 @ git://anongit.freedesktop.org/gfx-ci/linux
### Linux commits
de0732836a45 drm/i915/gt: Increase sleep in gt_tlb selftest sanitycheck
c121eb11449a drm/i915: Perform TLB invalidation on all GTs during suspend/resume
35b0d13600d7 drm/i915: No TLB invalidation on wedged or suspended GT
b32f46274856 drm/i915: Define and use GuC and CTB TLB invalidation routines
== Logs ==
For more details see: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_124535v1/index.html
[-- Attachment #2: Type: text/html, Size: 17609 bytes --]
^ permalink raw reply [flat|nested] 21+ messages in thread
* Re: [Intel-gfx] [PATCH v3 1/4] drm/i915: Define and use GuC and CTB TLB invalidation routines
2023-10-02 17:24 [Intel-gfx] [PATCH v3 1/4] drm/i915: Define and use GuC and CTB TLB invalidation routines Jonathan Cavitt
` (5 preceding siblings ...)
2023-10-03 0:01 ` [Intel-gfx] ✗ Fi.CI.BAT: failure " Patchwork
@ 2023-10-03 10:28 ` Tvrtko Ursulin
2023-10-03 16:41 ` Andi Shyti
2023-10-03 20:23 ` John Harrison
2023-10-03 11:06 ` Tvrtko Ursulin
7 siblings, 2 replies; 21+ messages in thread
From: Tvrtko Ursulin @ 2023-10-03 10:28 UTC (permalink / raw)
To: Jonathan Cavitt, intel-gfx
Cc: janusz.krzysztofik, andi.shyti, matthew.d.roper, chris.p.wilson,
nirmoy.das
On 02/10/2023 18:24, Jonathan Cavitt wrote:
> From: Prathap Kumar Valsan <prathap.kumar.valsan@intel.com>
>
> The GuC firmware had defined the interface for Translation Look-Aside
> Buffer (TLB) invalidation. We should use this interface when
> invalidating the engine and GuC TLBs.
> Add additional functionality to intel_gt_invalidate_tlb, invalidating
> the GuC TLBs and falling back to GT invalidation when the GuC is
> disabled.
> The invalidation is done by sending a request directly to the GuC
> tlb_lookup that invalidates the table. The invalidation is submitted as
> a wait request and is performed in the CT event handler. This means we
> cannot perform this TLB invalidation path if the CT is not enabled.
> If the request isn't fulfilled in two seconds, this would constitute
> an error in the invalidation as that would constitute either a lost
> request or a severe GuC overload.
>
> With this new invalidation routine, we can perform GuC-based GGTT
> invalidations. We should only do this when GuC is enabled and fall
> back to the original path when GuC is disabled to prevent concurrent
> issuance between GuC and KMD.
>
> Signed-off-by: Prathap Kumar Valsan <prathap.kumar.valsan@intel.com>
> Signed-off-by: Bruce Chang <yu.bruce.chang@intel.com>
> Signed-off-by: Chris Wilson <chris.p.wilson@intel.com>
> Signed-off-by: Umesh Nerlige Ramappa <umesh.nerlige.ramappa@intel.com>
> Signed-off-by: Jonathan Cavitt <jonathan.cavitt@intel.com>
> Signed-off-by: Aravind Iddamsetty <aravind.iddamsetty@intel.com>
> Signed-off-by: Fei Yang <fei.yang@intel.com>
> CC: Andi Shyti <andi.shyti@linux.intel.com>
> ---
> drivers/gpu/drm/i915/gt/intel_ggtt.c | 43 ++--
> drivers/gpu/drm/i915/gt/intel_tlb.c | 14 +-
> .../gpu/drm/i915/gt/uc/abi/guc_actions_abi.h | 33 +++
> drivers/gpu/drm/i915/gt/uc/intel_guc.h | 22 ++
> drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c | 9 +
> drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h | 5 +
> .../gpu/drm/i915/gt/uc/intel_guc_submission.c | 212 +++++++++++++++++-
> 7 files changed, 322 insertions(+), 16 deletions(-)
>
> diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt.c b/drivers/gpu/drm/i915/gt/intel_ggtt.c
> index 4d7d88b92632b..db5644b0146ca 100644
> --- a/drivers/gpu/drm/i915/gt/intel_ggtt.c
> +++ b/drivers/gpu/drm/i915/gt/intel_ggtt.c
> @@ -206,22 +206,38 @@ static void gen8_ggtt_invalidate(struct i915_ggtt *ggtt)
> intel_uncore_write_fw(uncore, GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
> }
>
> -static void guc_ggtt_invalidate(struct i915_ggtt *ggtt)
> +static void guc_ggtt_ct_invalidate(struct intel_gt *gt)
> {
> - struct drm_i915_private *i915 = ggtt->vm.i915;
> + struct intel_uncore *uncore = gt->uncore;
> + intel_wakeref_t wakeref;
>
> - gen8_ggtt_invalidate(ggtt);
> + with_intel_runtime_pm_if_active(uncore->rpm, wakeref) {
> + struct intel_guc *guc = >->uc.guc;
>
> - if (GRAPHICS_VER(i915) >= 12) {
> - struct intel_gt *gt;
> + intel_guc_invalidate_tlb(guc);
> + }
> +}
>
> - list_for_each_entry(gt, &ggtt->gt_list, ggtt_link)
> - intel_uncore_write_fw(gt->uncore,
> - GEN12_GUC_TLB_INV_CR,
> - GEN12_GUC_TLB_INV_CR_INVALIDATE);
> - } else {
> - intel_uncore_write_fw(ggtt->vm.gt->uncore,
> - GEN8_GTCR, GEN8_GTCR_INVALIDATE);
> +static void guc_ggtt_invalidate(struct i915_ggtt *ggtt)
> +{
> + struct drm_i915_private *i915 = ggtt->vm.i915;
> + struct intel_gt *gt;
> +
> + if (!IS_GEN9_LP(i915) && GRAPHICS_VER(i915) < 11)
> + gen8_ggtt_invalidate(ggtt);
> +
> + list_for_each_entry(gt, &ggtt->gt_list, ggtt_link) {
> + if (INTEL_GUC_SUPPORTS_TLB_INVALIDATION(>->uc.guc) &&
> + intel_guc_is_ready(>->uc.guc)) {
The condition here expands to a relatively heavy one:
+#define INTEL_GUC_SUPPORTS_TLB_INVALIDATION(guc) \
+ ((intel_guc_ct_enabled(&(guc)->ct)) && \
+ (intel_guc_submission_is_used(guc)) && \
+ (GRAPHICS_VER(guc_to_gt((guc))->i915) >= 12))
&&
static inline bool intel_guc_is_ready(struct intel_guc *guc)
{
return intel_guc_is_fw_running(guc) && intel_guc_ct_enabled(&guc->ct);
}
intel_guc_ct_enabled is even duplicated.
Is there scope to consolidate the parts which are platform invariant, or even runtime invariant, or at least guaranteed not to transition back and forth but one way only?
In other words, if we know during init we will want it, mark it as a flag in intel_guc or somewhere, and then at runtime do only those conditions which can transition back and forth due driver flows.
I am not saying this is performance sensitive, but in terms of elegance, readability and self-documentation the proposed version looks a bit sub-optimal to me.
> + guc_ggtt_ct_invalidate(gt);
> + } else if (GRAPHICS_VER(i915) >= 12) {
> + intel_uncore_write(gt->uncore,
> + GEN12_GUC_TLB_INV_CR,
> + GEN12_GUC_TLB_INV_CR_INVALIDATE);
> + } else {
> + intel_uncore_write(gt->uncore,
> + GEN8_GTCR, GEN8_GTCR_INVALIDATE);
> + }
> }
> }
>
> @@ -1243,7 +1259,8 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
> ggtt->vm.raw_insert_page = gen8_ggtt_insert_page;
> }
>
> - if (intel_uc_wants_guc(&ggtt->vm.gt->uc))
> + if (intel_uc_wants_guc(&ggtt->vm.gt->uc) &&
> + intel_uc_wants_guc_submission(&ggtt->vm.gt->uc))
Is 2nd condition perhaps a superset of the 1st?
> ggtt->invalidate = guc_ggtt_invalidate;
> else
> ggtt->invalidate = gen8_ggtt_invalidate;
> diff --git a/drivers/gpu/drm/i915/gt/intel_tlb.c b/drivers/gpu/drm/i915/gt/intel_tlb.c
> index 139608c30d978..efe002f14413d 100644
> --- a/drivers/gpu/drm/i915/gt/intel_tlb.c
> +++ b/drivers/gpu/drm/i915/gt/intel_tlb.c
> @@ -12,6 +12,7 @@
> #include "intel_gt_print.h"
> #include "intel_gt_regs.h"
> #include "intel_tlb.h"
> +#include "uc/intel_guc.h"
>
> /*
> * HW architecture suggest typical invalidation time at 40us,
> @@ -131,11 +132,22 @@ void intel_gt_invalidate_tlb_full(struct intel_gt *gt, u32 seqno)
> return;
>
> with_intel_gt_pm_if_awake(gt, wakeref) {
> + struct intel_guc *guc = >->uc.guc;
> +
> mutex_lock(>->tlb.invalidate_lock);
> if (tlb_seqno_passed(gt, seqno))
> goto unlock;
>
> - mmio_invalidate_full(gt);
> + if (INTEL_GUC_SUPPORTS_TLB_INVALIDATION(guc)) {
> + if (intel_guc_is_ready(guc))
> + intel_guc_invalidate_tlb_full(guc);
> + } else {
> + /*
> + * Fall back to old path if GuC is disabled.
> + * This is safe because GuC is not enabled and not writing to MMIO.
> + */
It is safe for intel_guc_is_ready() transitioning from false to true during GuC init? No way for some path to start issuing invalidations as that is happening?
> + mmio_invalidate_full(gt);
> + }
>
> write_seqcount_invalidate(>->tlb.seqno);
> unlock:
> diff --git a/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h b/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h
> index f359bef046e0b..9dff8012d5e76 100644
> --- a/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h
> +++ b/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h
> @@ -138,6 +138,8 @@ enum intel_guc_action {
> INTEL_GUC_ACTION_REGISTER_CONTEXT_MULTI_LRC = 0x4601,
> INTEL_GUC_ACTION_CLIENT_SOFT_RESET = 0x5507,
> INTEL_GUC_ACTION_SET_ENG_UTIL_BUFF = 0x550A,
> + INTEL_GUC_ACTION_TLB_INVALIDATION = 0x7000,
> + INTEL_GUC_ACTION_TLB_INVALIDATION_DONE = 0x7001,
> INTEL_GUC_ACTION_STATE_CAPTURE_NOTIFICATION = 0x8002,
> INTEL_GUC_ACTION_NOTIFY_FLUSH_LOG_BUFFER_TO_FILE = 0x8003,
> INTEL_GUC_ACTION_NOTIFY_CRASH_DUMP_POSTED = 0x8004,
> @@ -181,4 +183,35 @@ enum intel_guc_state_capture_event_status {
>
> #define INTEL_GUC_STATE_CAPTURE_EVENT_STATUS_MASK 0x000000FF
>
> +#define INTEL_GUC_TLB_INVAL_TYPE_MASK REG_GENMASK(7, 0)
> +#define INTEL_GUC_TLB_INVAL_MODE_MASK REG_GENMASK(11, 8)
> +#define INTEL_GUC_TLB_INVAL_FLUSH_CACHE REG_BIT(31)
> +
> +enum intel_guc_tlb_invalidation_type {
> + INTEL_GUC_TLB_INVAL_FULL = 0x0,
> + INTEL_GUC_TLB_INVAL_GUC = 0x3,
> +};
> +
> +/*
> + * 0: Heavy mode of Invalidation:
> + * The pipeline of the engine(s) for which the invalidation is targeted to is
> + * blocked, and all the in-flight transactions are guaranteed to be Globally
> + * Observed before completing the TLB invalidation
> + * 1: Lite mode of Invalidation:
> + * TLBs of the targeted engine(s) are immediately invalidated.
> + * In-flight transactions are NOT guaranteed to be Globally Observed before
> + * completing TLB invalidation.
> + * Light Invalidation Mode is to be used only when
> + * it can be guaranteed (by SW) that the address translations remain invariant
> + * for the in-flight transactions across the TLB invalidation. In other words,
> + * this mode can be used when the TLB invalidation is intended to clear out the
> + * stale cached translations that are no longer in use. Light Invalidation Mode
> + * is much faster than the Heavy Invalidation Mode, as it does not wait for the
> + * in-flight transactions to be GOd.
> + */
> +enum intel_guc_tlb_inval_mode {
> + INTEL_GUC_TLB_INVAL_MODE_HEAVY = 0x0,
> + INTEL_GUC_TLB_INVAL_MODE_LITE = 0x1,
> +};
> +
> #endif /* _ABI_GUC_ACTIONS_ABI_H */
> diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.h b/drivers/gpu/drm/i915/gt/uc/intel_guc.h
> index 6c392bad29c19..5fc5e67f870cc 100644
> --- a/drivers/gpu/drm/i915/gt/uc/intel_guc.h
> +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.h
> @@ -79,6 +79,18 @@ struct intel_guc {
> */
> atomic_t outstanding_submission_g2h;
>
> + /** @tlb_lookup: xarray to store all pending TLB invalidation requests */
> + struct xarray tlb_lookup;
> +
> + /**
> + * @serial_slot: id to the initial waiter created in tlb_lookup,
> + * which is used only when failed to allocate new waiter.
> + */
> + u32 serial_slot;
> +
> + /** @next_seqno: the next id (sequence no.) to allocate. */
> + u32 next_seqno;
> +
> /** @interrupts: pointers to GuC interrupt-managing functions. */
> struct {
> bool enabled;
> @@ -296,6 +308,11 @@ struct intel_guc {
> #define MAKE_GUC_VER_STRUCT(ver) MAKE_GUC_VER((ver).major, (ver).minor, (ver).patch)
> #define GUC_SUBMIT_VER(guc) MAKE_GUC_VER_STRUCT((guc)->submission_version)
>
> +struct intel_guc_tlb_wait {
> + struct wait_queue_head wq;
> + u8 status;
> +} __aligned(4);
Put a comment here please stating why it needs to be aligned.
> +
> static inline struct intel_guc *log_to_guc(struct intel_guc_log *log)
> {
> return container_of(log, struct intel_guc, log);
> @@ -417,6 +434,11 @@ static inline bool intel_guc_is_supported(struct intel_guc *guc)
> {
> return intel_uc_fw_is_supported(&guc->fw);
> }
> +
> +int intel_guc_invalidate_tlb_full(struct intel_guc *guc);
> +int intel_guc_invalidate_tlb(struct intel_guc *guc);
> +int intel_guc_tlb_invalidation_done(struct intel_guc *guc, const u32 *hxg,
> + u32 size);
>
> static inline bool intel_guc_is_wanted(struct intel_guc *guc)
> {
> diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c
> index 6e22af31513a5..4b29a0b814950 100644
> --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c
> +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c
> @@ -1186,9 +1186,18 @@ static int ct_handle_event(struct intel_guc_ct *ct, struct ct_incoming_msg *requ
> switch (action) {
> case INTEL_GUC_ACTION_SCHED_CONTEXT_MODE_DONE:
> case INTEL_GUC_ACTION_DEREGISTER_CONTEXT_DONE:
> + case INTEL_GUC_ACTION_TLB_INVALIDATION_DONE:
> g2h_release_space(ct, request->size);
> }
>
> + /* Handle tlb invalidation response in interrupt context */
> + if (action == INTEL_GUC_ACTION_TLB_INVALIDATION_DONE) {
> + int ret = intel_guc_tlb_invalidation_done(ct_to_guc(ct), hxg, request->size);
> +
> + ct_free_msg(request);
> + return ret;
> + }
1)
Can the comment say why it is important to handle these in a special path instead of re-using the existing worker/list?
2)
Could it instead of duplicating some h2g logic in intel_guc_tlb_invalidation_done call ct_process_request, and so handle all actions in a centralized place?
> +
> spin_lock_irqsave(&ct->requests.lock, flags);
> list_add_tail(&request->link, &ct->requests.incoming);
> spin_unlock_irqrestore(&ct->requests.lock, flags);
> diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h
> index b4d56eccfb1f0..01109d15b779b 100644
> --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h
> +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h
> @@ -22,6 +22,7 @@
> /* Payload length only i.e. don't include G2H header length */
> #define G2H_LEN_DW_SCHED_CONTEXT_MODE_SET 2
> #define G2H_LEN_DW_DEREGISTER_CONTEXT 1
> +#define G2H_LEN_DW_INVALIDATE_TLB 1
>
> #define GUC_CONTEXT_DISABLE 0
> #define GUC_CONTEXT_ENABLE 1
> @@ -498,4 +499,8 @@ enum intel_guc_recv_message {
> INTEL_GUC_RECV_MSG_EXCEPTION = BIT(30),
> };
>
> +#define INTEL_GUC_SUPPORTS_TLB_INVALIDATION(guc) \
> + ((intel_guc_ct_enabled(&(guc)->ct)) && \
> + (intel_guc_submission_is_used(guc)) && \
> + (GRAPHICS_VER(guc_to_gt((guc))->i915) >= 12))
> #endif
> diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
> index ae3495a9c8146..3478fa73180ab 100644
> --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
> +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
> @@ -1796,11 +1796,20 @@ static void __guc_reset_context(struct intel_context *ce, intel_engine_mask_t st
> intel_context_put(parent);
> }
>
> +static void wake_up_tlb_invalidate(struct intel_guc_tlb_wait *wait)
> +{
> + /* Barrier to ensure the store is observed by the woken thread */
> + smp_store_mb(wait->status, 0);
Is the memory barrier required, given the main caller is from a spinlocked section?
> + wake_up(&wait->wq);
> +}
> +
> void intel_guc_submission_reset(struct intel_guc *guc, intel_engine_mask_t stalled)
> {
> + struct intel_guc_tlb_wait *wait;
> struct intel_context *ce;
> unsigned long index;
> unsigned long flags;
> + unsigned long i;
>
> if (unlikely(!guc_submission_initialized(guc))) {
> /* Reset called during driver load? GuC not yet initialised! */
> @@ -1826,6 +1835,13 @@ void intel_guc_submission_reset(struct intel_guc *guc, intel_engine_mask_t stall
>
> /* GuC is blown away, drop all references to contexts */
> xa_destroy(&guc->context_lookup);
> +
> + /*
> + * The full GT reset will have cleared the TLB caches and flushed the
> + * G2H message queue; we can release all the blocked waiters.
> + */
> + xa_for_each(&guc->tlb_lookup, i, wait)
> + wake_up_tlb_invalidate(wait);
> }
>
> static void guc_cancel_context_requests(struct intel_context *ce)
> @@ -1948,6 +1964,41 @@ void intel_guc_submission_reset_finish(struct intel_guc *guc)
> static void destroyed_worker_func(struct work_struct *w);
> static void reset_fail_worker_func(struct work_struct *w);
>
> +static int init_tlb_lookup(struct intel_guc *guc)
> +{
> + struct intel_guc_tlb_wait *wait;
> + int err;
> +
> + xa_init_flags(&guc->tlb_lookup, XA_FLAGS_ALLOC);
> +
> + wait = kzalloc(sizeof(*wait), GFP_KERNEL);
> + if (!wait)
> + return -ENOMEM;
> +
> + init_waitqueue_head(&wait->wq);
> + err = xa_alloc_cyclic_irq(&guc->tlb_lookup, &guc->serial_slot, wait,
> + xa_limit_32b, &guc->next_seqno, GFP_KERNEL);
For what is this id, allocated during init, used and when it gets freed?
> + if (err == -ENOMEM) {
> + kfree(wait);
> + return err;
> + }
> +
> + return 0;
> +}
> +
> +static void fini_tlb_lookup(struct intel_guc *guc)
> +{
> + struct intel_guc_tlb_wait *wait;
> +
> + wait = xa_load(&guc->tlb_lookup, guc->serial_slot);
> + if (wait) {
> + GEM_BUG_ON(wait->status);
> + kfree(wait);
> + }
> +
> + xa_destroy(&guc->tlb_lookup);
> +}
> +
> /*
> * Set up the memory resources to be shared with the GuC (via the GGTT)
> * at firmware loading time.
> @@ -1966,11 +2017,15 @@ int intel_guc_submission_init(struct intel_guc *guc)
> return ret;
> }
>
> + ret = init_tlb_lookup(guc);
> + if (ret)
> + goto destroy_pool;
> +
> guc->submission_state.guc_ids_bitmap =
> bitmap_zalloc(NUMBER_MULTI_LRC_GUC_ID(guc), GFP_KERNEL);
> if (!guc->submission_state.guc_ids_bitmap) {
> ret = -ENOMEM;
> - goto destroy_pool;
> + goto destroy_tlb;
> }
>
> guc->timestamp.ping_delay = (POLL_TIME_CLKS / gt->clock_frequency + 1) * HZ;
> @@ -1979,9 +2034,10 @@ int intel_guc_submission_init(struct intel_guc *guc)
>
> return 0;
>
> +destroy_tlb:
> + fini_tlb_lookup(guc);
> destroy_pool:
> guc_lrc_desc_pool_destroy_v69(guc);
> -
> return ret;
> }
>
> @@ -1994,6 +2050,7 @@ void intel_guc_submission_fini(struct intel_guc *guc)
> guc_lrc_desc_pool_destroy_v69(guc);
> i915_sched_engine_put(guc->sched_engine);
> bitmap_free(guc->submission_state.guc_ids_bitmap);
> + fini_tlb_lookup(guc);
> guc->submission_initialized = false;
> }
>
> @@ -4624,6 +4681,157 @@ g2h_context_lookup(struct intel_guc *guc, u32 ctx_id)
> return ce;
> }
>
> +static void wait_wake_outstanding_tlb_g2h(struct intel_guc *guc, u32 seqno)
> +{
> + struct intel_guc_tlb_wait *wait;
> + unsigned long flags;
> +
> + xa_lock_irqsave(&guc->tlb_lookup, flags);
> + wait = xa_load(&guc->tlb_lookup, seqno);
> +
> + /* We received a response after the waiting task did exit with a timeout */
> + if (unlikely(!wait))
> + drm_dbg(&guc_to_gt(guc)->i915->drm,
> + "Stale TLB invalidation response with seqno %d\n", seqno);
> +
> + if (wait)
> + wake_up_tlb_invalidate(wait);
> +
> + xa_unlock_irqrestore(&guc->tlb_lookup, flags);
> +}
> +
> +int intel_guc_tlb_invalidation_done(struct intel_guc *guc, const u32 *hxg, u32 size)
> +{
> + u32 seqno, hxg_len, len;
> +
> + /*
> + * FIXME: these calculations would be better done signed. That
> + * way underflow can be detected as well.
> + */
When can this FIXME be addressed?
> + hxg_len = size - GUC_CTB_MSG_MIN_LEN;
> + len = hxg_len - GUC_HXG_MSG_MIN_LEN;
> +
> + if (unlikely(len < 1))
> + return -EPROTO;
> +
> + seqno = hxg[GUC_HXG_MSG_MIN_LEN];
> + wait_wake_outstanding_tlb_g2h(guc, seqno);
> + return 0;
> +}
> +
> +static long must_wait_woken(struct wait_queue_entry *wq_entry, long timeout)
> +{
> + /*
> + * This is equivalent to wait_woken() with the exception that
> + * we do not wake up early if the kthread task has been completed.
> + * As we are called from page reclaim in any task context,
> + * we may be invoked from stopped kthreads, but we *must*
> + * complete the wait from the HW .
> + *
> + * A second problem is that since we are called under reclaim
> + * and wait_woken() inspected the thread state, it makes an invalid
> + * assumption that all PF_KTHREAD tasks have set_kthread_struct()
> + * called upon them, and will trigger a GPF in is_kthread_should_stop().
> + */
> + do {
> + set_current_state(TASK_UNINTERRUPTIBLE);
> + if (wq_entry->flags & WQ_FLAG_WOKEN)
> + break;
> +
> + timeout = schedule_timeout(timeout);
> + } while (timeout);
> + __set_current_state(TASK_RUNNING);
> +
> + /* See wait_woken() and woken_wake_function() */
> + smp_store_mb(wq_entry->flags, wq_entry->flags & ~WQ_FLAG_WOKEN);
> +
> + return timeout;
> +}
> +
> +static int guc_send_invalidate_tlb(struct intel_guc *guc, u32 type)
> +{
> + struct intel_guc_tlb_wait _wq, *wq = &_wq;
> + DEFINE_WAIT_FUNC(wait, woken_wake_function);
> + struct intel_gt *gt = guc_to_gt(guc);
> + int err = 0;
Looks like err does not need to be initialized.
> + u32 seqno;
> + u32 action[] = {
> + INTEL_GUC_ACTION_TLB_INVALIDATION,
> + 0,
> + REG_FIELD_PREP(INTEL_GUC_TLB_INVAL_TYPE_MASK, type) |
> + REG_FIELD_PREP(INTEL_GUC_TLB_INVAL_MODE_MASK,
> + INTEL_GUC_TLB_INVAL_MODE_HEAVY) |
> + INTEL_GUC_TLB_INVAL_FLUSH_CACHE,
> + };
> + u32 size = ARRAY_SIZE(action);
> +
> + if (!intel_guc_ct_enabled(&guc->ct))
> + return -EINVAL;
> +
> + init_waitqueue_head(&_wq.wq);
> +
> + if (xa_alloc_cyclic_irq(&guc->tlb_lookup, &seqno, wq,
> + xa_limit_32b, &guc->next_seqno,
> + GFP_ATOMIC | __GFP_NOWARN) < 0) {
> + /* Under severe memory pressure? Serialise TLB allocations */
> + xa_lock_irq(&guc->tlb_lookup);
> + wq = xa_load(&guc->tlb_lookup, guc->serial_slot);
> + wait_event_lock_irq(wq->wq,
> + !READ_ONCE(wq->status),
> + guc->tlb_lookup.xa_lock);
> + /*
> + * Update wq->status under lock to ensure only one waiter can
> + * issue the TLB invalidation command using the serial slot at a
> + * time. The condition is set to false before releasing the lock
> + * so that other caller continue to wait until woken up again.
> + */
> + wq->status = 1;
> + xa_unlock_irq(&guc->tlb_lookup);
> +
> + seqno = guc->serial_slot;
> + }
> +
> + action[1] = seqno;
> +
> + add_wait_queue(&wq->wq, &wait);
> +
> + err = intel_guc_send_busy_loop(guc, action, size, G2H_LEN_DW_INVALIDATE_TLB, true);
Busy looping version has to be used from here? Looks like the lock has been dropped and function otherwise can sleep..
> + if (err)
> + goto out;
> +
> + /*
> + * GuC has a timeout of 1ms for a TLB invalidation response from GAM. On a
> + * timeout GuC drops the request and has no mechanism to notify the host about
> + * the timeout. So keep a larger timeout that accounts for this individual
> + * timeout and max number of outstanding invalidation requests that can be
> + * queued in CT buffer.
> + */
> +#define OUTSTANDING_GUC_TIMEOUT_PERIOD (HZ * 2)
Is it possible to express the magic 2 seconds with some expressions involving the CT buffer size, multiplied by timeout per request, as the comment alludes?
> + if (!must_wait_woken(&wait, OUTSTANDING_GUC_TIMEOUT_PERIOD)) {
> + gt_err(gt,
> + "TLB invalidation response timed out for seqno %u\n", seqno);
> + err = -ETIME;
> + }
> +out:
> + remove_wait_queue(&wq->wq, &wait);
> + if (seqno != guc->serial_slot)
> + xa_erase_irq(&guc->tlb_lookup, seqno);
> +
> + return err;
> +}
> +
> +/* Full TLB invalidation */
> +int intel_guc_invalidate_tlb_full(struct intel_guc *guc)
> +{
> + return guc_send_invalidate_tlb(guc, INTEL_GUC_TLB_INVAL_FULL);
> +}
> +
> +/* GuC TLB Invalidation: Invalidate the TLB's of GuC itself. */
> +int intel_guc_invalidate_tlb(struct intel_guc *guc)
> +{
> + return guc_send_invalidate_tlb(guc, INTEL_GUC_TLB_INVAL_GUC);
> +}
> +
> int intel_guc_deregister_done_process_msg(struct intel_guc *guc,
> const u32 *msg,
> u32 len)
Regards,
Tvrtko
^ permalink raw reply [flat|nested] 21+ messages in thread
* Re: [Intel-gfx] [PATCH v3 2/4] drm/i915: No TLB invalidation on wedged or suspended GT
2023-10-02 17:24 ` [Intel-gfx] [PATCH v3 2/4] drm/i915: No TLB invalidation on wedged or suspended GT Jonathan Cavitt
@ 2023-10-03 10:35 ` Tvrtko Ursulin
2023-10-03 11:50 ` Jani Nikula
1 sibling, 0 replies; 21+ messages in thread
From: Tvrtko Ursulin @ 2023-10-03 10:35 UTC (permalink / raw)
To: Jonathan Cavitt, intel-gfx
Cc: janusz.krzysztofik, andi.shyti, matthew.d.roper, chris.p.wilson,
nirmoy.das
On 02/10/2023 18:24, Jonathan Cavitt wrote:
> From: Fei Yang <fei.yang@intel.com>
>
> In case of GT is suspended or wedged, don't allow submission of new TLB
> invalidation request and cancel all pending requests. The TLB entries
> will be invalidated either during GuC reload or on system resume.
>
> Signed-off-by: Fei Yang <fei.yang@intel.com>
> Signed-off-by: Jonathan Cavitt <jonathan.cavitt@intel.com>
> CC: John Harrison <john.c.harrison@intel.com>
> ---
> drivers/gpu/drm/i915/gt/uc/intel_guc.h | 1 +
> .../gpu/drm/i915/gt/uc/intel_guc_submission.c | 35 +++++++++++++++----
> drivers/gpu/drm/i915/i915_driver.c | 9 +++++
> 3 files changed, 39 insertions(+), 6 deletions(-)
>
> diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.h b/drivers/gpu/drm/i915/gt/uc/intel_guc.h
> index 5fc5e67f870cc..0cdc7ca66861c 100644
> --- a/drivers/gpu/drm/i915/gt/uc/intel_guc.h
> +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.h
> @@ -536,4 +536,5 @@ void intel_guc_dump_time_info(struct intel_guc *guc, struct drm_printer *p);
>
> int intel_guc_sched_disable_gucid_threshold_max(struct intel_guc *guc);
>
> +void wake_up_all_tlb_invalidate(struct intel_guc *guc);
> #endif
> diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
> index 3478fa73180ab..2f194cadbe553 100644
> --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
> +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
> @@ -32,6 +32,7 @@
>
> #include "i915_drv.h"
> #include "i915_reg.h"
> +#include "i915_irq.h"
> #include "i915_trace.h"
>
> /**
> @@ -1803,13 +1804,20 @@ static void wake_up_tlb_invalidate(struct intel_guc_tlb_wait *wait)
> wake_up(&wait->wq);
> }
>
> -void intel_guc_submission_reset(struct intel_guc *guc, intel_engine_mask_t stalled)
> +void wake_up_all_tlb_invalidate(struct intel_guc *guc)
> {
> struct intel_guc_tlb_wait *wait;
> + unsigned long i;
> +
> + xa_for_each(&guc->tlb_lookup, i, wait)
> + wake_up_tlb_invalidate(wait);
> +}
> +
> +void intel_guc_submission_reset(struct intel_guc *guc, intel_engine_mask_t stalled)
> +{
> struct intel_context *ce;
> unsigned long index;
> unsigned long flags;
> - unsigned long i;
>
> if (unlikely(!guc_submission_initialized(guc))) {
> /* Reset called during driver load? GuC not yet initialised! */
> @@ -1840,8 +1848,7 @@ void intel_guc_submission_reset(struct intel_guc *guc, intel_engine_mask_t stall
> * The full GT reset will have cleared the TLB caches and flushed the
> * G2H message queue; we can release all the blocked waiters.
> */
> - xa_for_each(&guc->tlb_lookup, i, wait)
> - wake_up_tlb_invalidate(wait);
> + wake_up_all_tlb_invalidate(guc);
> }
>
> static void guc_cancel_context_requests(struct intel_context *ce)
> @@ -1937,6 +1944,12 @@ void intel_guc_submission_cancel_requests(struct intel_guc *guc)
>
> /* GuC is blown away, drop all references to contexts */
> xa_destroy(&guc->context_lookup);
> +
> + /*
> + * Wedged GT won't respond to any TLB invalidation request. Simply
> + * release all the blocked waiters.
> + */
> + wake_up_all_tlb_invalidate(guc);
> }
>
> void intel_guc_submission_reset_finish(struct intel_guc *guc)
> @@ -4748,6 +4761,14 @@ static long must_wait_woken(struct wait_queue_entry *wq_entry, long timeout)
> return timeout;
> }
>
> +static bool intel_gt_is_enabled(const struct intel_gt *gt)
> +{
> + /* Check if GT is wedged or suspended */
> + if (intel_gt_is_wedged(gt) || !intel_irqs_enabled(gt->i915))
> + return false;
> + return true;
> +}
Name still sucks but at least it is now hidden, okay.
> +
> static int guc_send_invalidate_tlb(struct intel_guc *guc, u32 type)
> {
> struct intel_guc_tlb_wait _wq, *wq = &_wq;
> @@ -4765,7 +4786,8 @@ static int guc_send_invalidate_tlb(struct intel_guc *guc, u32 type)
> };
> u32 size = ARRAY_SIZE(action);
>
> - if (!intel_guc_ct_enabled(&guc->ct))
> + if (!intel_guc_ct_enabled(&guc->ct) ||
> + !intel_gt_is_enabled(gt))
> return -EINVAL;
>
> init_waitqueue_head(&_wq.wq);
> @@ -4807,7 +4829,8 @@ static int guc_send_invalidate_tlb(struct intel_guc *guc, u32 type)
> * queued in CT buffer.
> */
> #define OUTSTANDING_GUC_TIMEOUT_PERIOD (HZ * 2)
> - if (!must_wait_woken(&wait, OUTSTANDING_GUC_TIMEOUT_PERIOD)) {
> + if (!must_wait_woken(&wait, OUTSTANDING_GUC_TIMEOUT_PERIOD) &&
> + intel_gt_is_enabled(gt)) {
Order of conditions is okay? Makes sense to first wait and only then
check if "gt is enabled"?
> gt_err(gt,
> "TLB invalidation response timed out for seqno %u\n", seqno);
> err = -ETIME;
> diff --git a/drivers/gpu/drm/i915/i915_driver.c b/drivers/gpu/drm/i915/i915_driver.c
> index 78501a83ba109..f5175103ea900 100644
> --- a/drivers/gpu/drm/i915/i915_driver.c
> +++ b/drivers/gpu/drm/i915/i915_driver.c
> @@ -72,6 +72,7 @@
> #include "gt/intel_gt.h"
> #include "gt/intel_gt_pm.h"
> #include "gt/intel_rc6.h"
> +#include "gt/uc/intel_guc.h"
>
> #include "pxp/intel_pxp.h"
> #include "pxp/intel_pxp_debugfs.h"
> @@ -1092,6 +1093,9 @@ static int i915_drm_suspend(struct drm_device *dev)
> intel_dp_mst_suspend(dev_priv);
>
> intel_runtime_pm_disable_interrupts(dev_priv);
> +
> + wake_up_all_tlb_invalidate(&to_gt(dev_priv)->uc.guc);
> +
> intel_hpd_cancel_work(dev_priv);
>
> intel_suspend_encoders(dev_priv);
> @@ -1263,6 +1267,11 @@ static int i915_drm_resume(struct drm_device *dev)
>
> intel_gvt_resume(dev_priv);
>
> + if (INTEL_GUC_SUPPORTS_TLB_INVALIDATION(&to_gt(dev_priv)->uc.guc)) {
> + intel_guc_invalidate_tlb_full(&to_gt(dev_priv)->uc.guc);
> + intel_guc_invalidate_tlb(&to_gt(dev_priv)->uc.guc);
> + }
> +
> enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
>
> return 0;
Suspend resume bits feel like should be moved into
intel_gt_susped|resume_* in order to have proper component organisation.
If that works, it is likely those already have for_each_gt loops so 3/4
perhaps could be redundant too.
Regards,
Tvrtko
^ permalink raw reply [flat|nested] 21+ messages in thread
* Re: [Intel-gfx] [PATCH v3 1/4] drm/i915: Define and use GuC and CTB TLB invalidation routines
2023-10-02 17:24 [Intel-gfx] [PATCH v3 1/4] drm/i915: Define and use GuC and CTB TLB invalidation routines Jonathan Cavitt
` (6 preceding siblings ...)
2023-10-03 10:28 ` [Intel-gfx] [PATCH v3 1/4] " Tvrtko Ursulin
@ 2023-10-03 11:06 ` Tvrtko Ursulin
7 siblings, 0 replies; 21+ messages in thread
From: Tvrtko Ursulin @ 2023-10-03 11:06 UTC (permalink / raw)
To: Jonathan Cavitt, intel-gfx
Cc: janusz.krzysztofik, andi.shyti, matthew.d.roper, chris.p.wilson,
nirmoy.das
Some more comments..
On 02/10/2023 18:24, Jonathan Cavitt wrote:
> From: Prathap Kumar Valsan <prathap.kumar.valsan@intel.com>
>
> The GuC firmware had defined the interface for Translation Look-Aside
> Buffer (TLB) invalidation. We should use this interface when
> invalidating the engine and GuC TLBs.
> Add additional functionality to intel_gt_invalidate_tlb, invalidating
> the GuC TLBs and falling back to GT invalidation when the GuC is
> disabled.
> The invalidation is done by sending a request directly to the GuC
> tlb_lookup that invalidates the table. The invalidation is submitted as
> a wait request and is performed in the CT event handler. This means we
> cannot perform this TLB invalidation path if the CT is not enabled.
> If the request isn't fulfilled in two seconds, this would constitute
> an error in the invalidation as that would constitute either a lost
> request or a severe GuC overload.
>
> With this new invalidation routine, we can perform GuC-based GGTT
> invalidations. We should only do this when GuC is enabled and fall
> back to the original path when GuC is disabled to prevent concurrent
> issuance between GuC and KMD.
I think the commit message should be improved to describe the mechanism
implemented for waiting and serializing, since it is not really
straightofward. So it needs to be explained why it is needed and what it
gains us.
Like existing MMIO invalidation is one at a time, so what does the
scheme implemented here, to allow multiple, gets us. When they also all
have to wait until the G2H response is received anyway.
More comments inline below.
> Signed-off-by: Prathap Kumar Valsan <prathap.kumar.valsan@intel.com>
> Signed-off-by: Bruce Chang <yu.bruce.chang@intel.com>
> Signed-off-by: Chris Wilson <chris.p.wilson@intel.com>
> Signed-off-by: Umesh Nerlige Ramappa <umesh.nerlige.ramappa@intel.com>
> Signed-off-by: Jonathan Cavitt <jonathan.cavitt@intel.com>
> Signed-off-by: Aravind Iddamsetty <aravind.iddamsetty@intel.com>
> Signed-off-by: Fei Yang <fei.yang@intel.com>
> CC: Andi Shyti <andi.shyti@linux.intel.com>
> ---
> drivers/gpu/drm/i915/gt/intel_ggtt.c | 43 ++--
> drivers/gpu/drm/i915/gt/intel_tlb.c | 14 +-
> .../gpu/drm/i915/gt/uc/abi/guc_actions_abi.h | 33 +++
> drivers/gpu/drm/i915/gt/uc/intel_guc.h | 22 ++
> drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c | 9 +
> drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h | 5 +
> .../gpu/drm/i915/gt/uc/intel_guc_submission.c | 212 +++++++++++++++++-
> 7 files changed, 322 insertions(+), 16 deletions(-)
>
> diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt.c b/drivers/gpu/drm/i915/gt/intel_ggtt.c
> index 4d7d88b92632b..db5644b0146ca 100644
> --- a/drivers/gpu/drm/i915/gt/intel_ggtt.c
> +++ b/drivers/gpu/drm/i915/gt/intel_ggtt.c
> @@ -206,22 +206,38 @@ static void gen8_ggtt_invalidate(struct i915_ggtt *ggtt)
> intel_uncore_write_fw(uncore, GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
> }
>
> -static void guc_ggtt_invalidate(struct i915_ggtt *ggtt)
> +static void guc_ggtt_ct_invalidate(struct intel_gt *gt)
> {
> - struct drm_i915_private *i915 = ggtt->vm.i915;
> + struct intel_uncore *uncore = gt->uncore;
> + intel_wakeref_t wakeref;
>
> - gen8_ggtt_invalidate(ggtt);
> + with_intel_runtime_pm_if_active(uncore->rpm, wakeref) {
> + struct intel_guc *guc = >->uc.guc;
>
> - if (GRAPHICS_VER(i915) >= 12) {
> - struct intel_gt *gt;
> + intel_guc_invalidate_tlb(guc);
> + }
> +}
>
> - list_for_each_entry(gt, &ggtt->gt_list, ggtt_link)
> - intel_uncore_write_fw(gt->uncore,
> - GEN12_GUC_TLB_INV_CR,
> - GEN12_GUC_TLB_INV_CR_INVALIDATE);
> - } else {
> - intel_uncore_write_fw(ggtt->vm.gt->uncore,
> - GEN8_GTCR, GEN8_GTCR_INVALIDATE);
> +static void guc_ggtt_invalidate(struct i915_ggtt *ggtt)
> +{
> + struct drm_i915_private *i915 = ggtt->vm.i915;
> + struct intel_gt *gt;
> +
> + if (!IS_GEN9_LP(i915) && GRAPHICS_VER(i915) < 11)
> + gen8_ggtt_invalidate(ggtt);
> +
> + list_for_each_entry(gt, &ggtt->gt_list, ggtt_link) {
> + if (INTEL_GUC_SUPPORTS_TLB_INVALIDATION(>->uc.guc) &&
> + intel_guc_is_ready(>->uc.guc)) {
> + guc_ggtt_ct_invalidate(gt);
> + } else if (GRAPHICS_VER(i915) >= 12) {
> + intel_uncore_write(gt->uncore,
> + GEN12_GUC_TLB_INV_CR,
> + GEN12_GUC_TLB_INV_CR_INVALIDATE);
> + } else {
> + intel_uncore_write(gt->uncore,
> + GEN8_GTCR, GEN8_GTCR_INVALIDATE);
> + }
> }
> }
>
> @@ -1243,7 +1259,8 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
> ggtt->vm.raw_insert_page = gen8_ggtt_insert_page;
> }
>
> - if (intel_uc_wants_guc(&ggtt->vm.gt->uc))
> + if (intel_uc_wants_guc(&ggtt->vm.gt->uc) &&
> + intel_uc_wants_guc_submission(&ggtt->vm.gt->uc))
> ggtt->invalidate = guc_ggtt_invalidate;
> else
> ggtt->invalidate = gen8_ggtt_invalidate;
> diff --git a/drivers/gpu/drm/i915/gt/intel_tlb.c b/drivers/gpu/drm/i915/gt/intel_tlb.c
> index 139608c30d978..efe002f14413d 100644
> --- a/drivers/gpu/drm/i915/gt/intel_tlb.c
> +++ b/drivers/gpu/drm/i915/gt/intel_tlb.c
> @@ -12,6 +12,7 @@
> #include "intel_gt_print.h"
> #include "intel_gt_regs.h"
> #include "intel_tlb.h"
> +#include "uc/intel_guc.h"
>
> /*
> * HW architecture suggest typical invalidation time at 40us,
> @@ -131,11 +132,22 @@ void intel_gt_invalidate_tlb_full(struct intel_gt *gt, u32 seqno)
> return;
>
> with_intel_gt_pm_if_awake(gt, wakeref) {
> + struct intel_guc *guc = >->uc.guc;
> +
> mutex_lock(>->tlb.invalidate_lock);
> if (tlb_seqno_passed(gt, seqno))
> goto unlock;
>
> - mmio_invalidate_full(gt);
> + if (INTEL_GUC_SUPPORTS_TLB_INVALIDATION(guc)) {
> + if (intel_guc_is_ready(guc))
> + intel_guc_invalidate_tlb_full(guc);
> + } else {
> + /*
> + * Fall back to old path if GuC is disabled.
> + * This is safe because GuC is not enabled and not writing to MMIO.
> + */
> + mmio_invalidate_full(gt);
> + }
>
> write_seqcount_invalidate(>->tlb.seqno);
> unlock:
> diff --git a/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h b/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h
> index f359bef046e0b..9dff8012d5e76 100644
> --- a/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h
> +++ b/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h
> @@ -138,6 +138,8 @@ enum intel_guc_action {
> INTEL_GUC_ACTION_REGISTER_CONTEXT_MULTI_LRC = 0x4601,
> INTEL_GUC_ACTION_CLIENT_SOFT_RESET = 0x5507,
> INTEL_GUC_ACTION_SET_ENG_UTIL_BUFF = 0x550A,
> + INTEL_GUC_ACTION_TLB_INVALIDATION = 0x7000,
> + INTEL_GUC_ACTION_TLB_INVALIDATION_DONE = 0x7001,
> INTEL_GUC_ACTION_STATE_CAPTURE_NOTIFICATION = 0x8002,
> INTEL_GUC_ACTION_NOTIFY_FLUSH_LOG_BUFFER_TO_FILE = 0x8003,
> INTEL_GUC_ACTION_NOTIFY_CRASH_DUMP_POSTED = 0x8004,
> @@ -181,4 +183,35 @@ enum intel_guc_state_capture_event_status {
>
> #define INTEL_GUC_STATE_CAPTURE_EVENT_STATUS_MASK 0x000000FF
>
> +#define INTEL_GUC_TLB_INVAL_TYPE_MASK REG_GENMASK(7, 0)
> +#define INTEL_GUC_TLB_INVAL_MODE_MASK REG_GENMASK(11, 8)
> +#define INTEL_GUC_TLB_INVAL_FLUSH_CACHE REG_BIT(31)
> +
> +enum intel_guc_tlb_invalidation_type {
> + INTEL_GUC_TLB_INVAL_FULL = 0x0,
> + INTEL_GUC_TLB_INVAL_GUC = 0x3,
> +};
> +
> +/*
> + * 0: Heavy mode of Invalidation:
> + * The pipeline of the engine(s) for which the invalidation is targeted to is
> + * blocked, and all the in-flight transactions are guaranteed to be Globally
> + * Observed before completing the TLB invalidation
> + * 1: Lite mode of Invalidation:
> + * TLBs of the targeted engine(s) are immediately invalidated.
> + * In-flight transactions are NOT guaranteed to be Globally Observed before
> + * completing TLB invalidation.
> + * Light Invalidation Mode is to be used only when
> + * it can be guaranteed (by SW) that the address translations remain invariant
> + * for the in-flight transactions across the TLB invalidation. In other words,
> + * this mode can be used when the TLB invalidation is intended to clear out the
> + * stale cached translations that are no longer in use. Light Invalidation Mode
> + * is much faster than the Heavy Invalidation Mode, as it does not wait for the
> + * in-flight transactions to be GOd.
> + */
> +enum intel_guc_tlb_inval_mode {
> + INTEL_GUC_TLB_INVAL_MODE_HEAVY = 0x0,
> + INTEL_GUC_TLB_INVAL_MODE_LITE = 0x1,
> +};
> +
> #endif /* _ABI_GUC_ACTIONS_ABI_H */
> diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.h b/drivers/gpu/drm/i915/gt/uc/intel_guc.h
> index 6c392bad29c19..5fc5e67f870cc 100644
> --- a/drivers/gpu/drm/i915/gt/uc/intel_guc.h
> +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.h
> @@ -79,6 +79,18 @@ struct intel_guc {
> */
> atomic_t outstanding_submission_g2h;
>
> + /** @tlb_lookup: xarray to store all pending TLB invalidation requests */
> + struct xarray tlb_lookup;
> +
> + /**
> + * @serial_slot: id to the initial waiter created in tlb_lookup,
> + * which is used only when failed to allocate new waiter.
> + */
> + u32 serial_slot;
> +
> + /** @next_seqno: the next id (sequence no.) to allocate. */
> + u32 next_seqno;
> +
> /** @interrupts: pointers to GuC interrupt-managing functions. */
> struct {
> bool enabled;
> @@ -296,6 +308,11 @@ struct intel_guc {
> #define MAKE_GUC_VER_STRUCT(ver) MAKE_GUC_VER((ver).major, (ver).minor, (ver).patch)
> #define GUC_SUBMIT_VER(guc) MAKE_GUC_VER_STRUCT((guc)->submission_version)
>
> +struct intel_guc_tlb_wait {
> + struct wait_queue_head wq;
> + u8 status;
Does this need to be u8?
Would something like 'busy' be a more self-documenting name?
> +} __aligned(4);
> +
> static inline struct intel_guc *log_to_guc(struct intel_guc_log *log)
> {
> return container_of(log, struct intel_guc, log);
> @@ -417,6 +434,11 @@ static inline bool intel_guc_is_supported(struct intel_guc *guc)
> {
> return intel_uc_fw_is_supported(&guc->fw);
> }
> +
> +int intel_guc_invalidate_tlb_full(struct intel_guc *guc);
> +int intel_guc_invalidate_tlb(struct intel_guc *guc);
> +int intel_guc_tlb_invalidation_done(struct intel_guc *guc, const u32 *hxg,
> + u32 size);
>
> static inline bool intel_guc_is_wanted(struct intel_guc *guc)
> {
> diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c
> index 6e22af31513a5..4b29a0b814950 100644
> --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c
> +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c
> @@ -1186,9 +1186,18 @@ static int ct_handle_event(struct intel_guc_ct *ct, struct ct_incoming_msg *requ
> switch (action) {
> case INTEL_GUC_ACTION_SCHED_CONTEXT_MODE_DONE:
> case INTEL_GUC_ACTION_DEREGISTER_CONTEXT_DONE:
> + case INTEL_GUC_ACTION_TLB_INVALIDATION_DONE:
> g2h_release_space(ct, request->size);
> }
>
> + /* Handle tlb invalidation response in interrupt context */
> + if (action == INTEL_GUC_ACTION_TLB_INVALIDATION_DONE) {
> + int ret = intel_guc_tlb_invalidation_done(ct_to_guc(ct), hxg, request->size);
> +
> + ct_free_msg(request);
> + return ret;
> + }
> +
> spin_lock_irqsave(&ct->requests.lock, flags);
> list_add_tail(&request->link, &ct->requests.incoming);
> spin_unlock_irqrestore(&ct->requests.lock, flags);
> diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h
> index b4d56eccfb1f0..01109d15b779b 100644
> --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h
> +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h
> @@ -22,6 +22,7 @@
> /* Payload length only i.e. don't include G2H header length */
> #define G2H_LEN_DW_SCHED_CONTEXT_MODE_SET 2
> #define G2H_LEN_DW_DEREGISTER_CONTEXT 1
> +#define G2H_LEN_DW_INVALIDATE_TLB 1
>
> #define GUC_CONTEXT_DISABLE 0
> #define GUC_CONTEXT_ENABLE 1
> @@ -498,4 +499,8 @@ enum intel_guc_recv_message {
> INTEL_GUC_RECV_MSG_EXCEPTION = BIT(30),
> };
>
> +#define INTEL_GUC_SUPPORTS_TLB_INVALIDATION(guc) \
> + ((intel_guc_ct_enabled(&(guc)->ct)) && \
> + (intel_guc_submission_is_used(guc)) && \
> + (GRAPHICS_VER(guc_to_gt((guc))->i915) >= 12))
> #endif
> diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
> index ae3495a9c8146..3478fa73180ab 100644
> --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
> +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
> @@ -1796,11 +1796,20 @@ static void __guc_reset_context(struct intel_context *ce, intel_engine_mask_t st
> intel_context_put(parent);
> }
>
> +static void wake_up_tlb_invalidate(struct intel_guc_tlb_wait *wait)
> +{
> + /* Barrier to ensure the store is observed by the woken thread */
> + smp_store_mb(wait->status, 0);
> + wake_up(&wait->wq);
> +}
> +
> void intel_guc_submission_reset(struct intel_guc *guc, intel_engine_mask_t stalled)
> {
> + struct intel_guc_tlb_wait *wait;
> struct intel_context *ce;
> unsigned long index;
> unsigned long flags;
> + unsigned long i;
>
> if (unlikely(!guc_submission_initialized(guc))) {
> /* Reset called during driver load? GuC not yet initialised! */
> @@ -1826,6 +1835,13 @@ void intel_guc_submission_reset(struct intel_guc *guc, intel_engine_mask_t stall
>
> /* GuC is blown away, drop all references to contexts */
> xa_destroy(&guc->context_lookup);
> +
> + /*
> + * The full GT reset will have cleared the TLB caches and flushed the
> + * G2H message queue; we can release all the blocked waiters.
> + */
> + xa_for_each(&guc->tlb_lookup, i, wait)
> + wake_up_tlb_invalidate(wait);
> }
>
> static void guc_cancel_context_requests(struct intel_context *ce)
> @@ -1948,6 +1964,41 @@ void intel_guc_submission_reset_finish(struct intel_guc *guc)
> static void destroyed_worker_func(struct work_struct *w);
> static void reset_fail_worker_func(struct work_struct *w);
>
> +static int init_tlb_lookup(struct intel_guc *guc)
> +{
> + struct intel_guc_tlb_wait *wait;
> + int err;
> +
> + xa_init_flags(&guc->tlb_lookup, XA_FLAGS_ALLOC);
> +
> + wait = kzalloc(sizeof(*wait), GFP_KERNEL);
> + if (!wait)
> + return -ENOMEM;
> +
> + init_waitqueue_head(&wait->wq);
> + err = xa_alloc_cyclic_irq(&guc->tlb_lookup, &guc->serial_slot, wait,
> + xa_limit_32b, &guc->next_seqno, GFP_KERNEL);
> + if (err == -ENOMEM) {
> + kfree(wait);
> + return err;
> + }
> +
> + return 0;
> +}
> +
> +static void fini_tlb_lookup(struct intel_guc *guc)
> +{
> + struct intel_guc_tlb_wait *wait;
> +
> + wait = xa_load(&guc->tlb_lookup, guc->serial_slot);
> + if (wait) {
> + GEM_BUG_ON(wait->status);
> + kfree(wait);
> + }
> +
> + xa_destroy(&guc->tlb_lookup);
> +}
> +
> /*
> * Set up the memory resources to be shared with the GuC (via the GGTT)
> * at firmware loading time.
> @@ -1966,11 +2017,15 @@ int intel_guc_submission_init(struct intel_guc *guc)
> return ret;
> }
>
> + ret = init_tlb_lookup(guc);
> + if (ret)
> + goto destroy_pool;
> +
> guc->submission_state.guc_ids_bitmap =
> bitmap_zalloc(NUMBER_MULTI_LRC_GUC_ID(guc), GFP_KERNEL);
> if (!guc->submission_state.guc_ids_bitmap) {
> ret = -ENOMEM;
> - goto destroy_pool;
> + goto destroy_tlb;
> }
>
> guc->timestamp.ping_delay = (POLL_TIME_CLKS / gt->clock_frequency + 1) * HZ;
> @@ -1979,9 +2034,10 @@ int intel_guc_submission_init(struct intel_guc *guc)
>
> return 0;
>
> +destroy_tlb:
> + fini_tlb_lookup(guc);
> destroy_pool:
> guc_lrc_desc_pool_destroy_v69(guc);
> -
> return ret;
> }
>
> @@ -1994,6 +2050,7 @@ void intel_guc_submission_fini(struct intel_guc *guc)
> guc_lrc_desc_pool_destroy_v69(guc);
> i915_sched_engine_put(guc->sched_engine);
> bitmap_free(guc->submission_state.guc_ids_bitmap);
> + fini_tlb_lookup(guc);
> guc->submission_initialized = false;
> }
>
> @@ -4624,6 +4681,157 @@ g2h_context_lookup(struct intel_guc *guc, u32 ctx_id)
> return ce;
> }
>
> +static void wait_wake_outstanding_tlb_g2h(struct intel_guc *guc, u32 seqno)
> +{
> + struct intel_guc_tlb_wait *wait;
> + unsigned long flags;
> +
> + xa_lock_irqsave(&guc->tlb_lookup, flags);
> + wait = xa_load(&guc->tlb_lookup, seqno);
> +
> + /* We received a response after the waiting task did exit with a timeout */
> + if (unlikely(!wait))
> + drm_dbg(&guc_to_gt(guc)->i915->drm,
> + "Stale TLB invalidation response with seqno %d\n", seqno);
> +
> + if (wait)
> + wake_up_tlb_invalidate(wait);
What is wrong with if-else? Probably just invert it to positive first.
And I am also not sure unlikely annotation is warranted given all the
humongous costs involved in the whole operation.
> +
> + xa_unlock_irqrestore(&guc->tlb_lookup, flags);
> +}
> +
> +int intel_guc_tlb_invalidation_done(struct intel_guc *guc, const u32 *hxg, u32 size)
> +{
> + u32 seqno, hxg_len, len;
> +
> + /*
> + * FIXME: these calculations would be better done signed. That
> + * way underflow can be detected as well.
> + */
> + hxg_len = size - GUC_CTB_MSG_MIN_LEN;
> + len = hxg_len - GUC_HXG_MSG_MIN_LEN;
> +
> + if (unlikely(len < 1))
> + return -EPROTO;
> +
> + seqno = hxg[GUC_HXG_MSG_MIN_LEN];
> + wait_wake_outstanding_tlb_g2h(guc, seqno);
> + return 0;
> +}
> +
> +static long must_wait_woken(struct wait_queue_entry *wq_entry, long timeout)
> +{
> + /*
> + * This is equivalent to wait_woken() with the exception that
> + * we do not wake up early if the kthread task has been completed.
> + * As we are called from page reclaim in any task context,
> + * we may be invoked from stopped kthreads, but we *must*
> + * complete the wait from the HW .
> + *
> + * A second problem is that since we are called under reclaim
> + * and wait_woken() inspected the thread state, it makes an invalid
> + * assumption that all PF_KTHREAD tasks have set_kthread_struct()
> + * called upon them, and will trigger a GPF in is_kthread_should_stop().
> + */
> + do {
> + set_current_state(TASK_UNINTERRUPTIBLE);
> + if (wq_entry->flags & WQ_FLAG_WOKEN)
> + break;
> +
> + timeout = schedule_timeout(timeout);
> + } while (timeout);
> + __set_current_state(TASK_RUNNING);
> +
> + /* See wait_woken() and woken_wake_function() */
> + smp_store_mb(wq_entry->flags, wq_entry->flags & ~WQ_FLAG_WOKEN);
> +
> + return timeout;
> +}
> +
> +static int guc_send_invalidate_tlb(struct intel_guc *guc, u32 type)
> +{
> + struct intel_guc_tlb_wait _wq, *wq = &_wq;
> + DEFINE_WAIT_FUNC(wait, woken_wake_function);
> + struct intel_gt *gt = guc_to_gt(guc);
> + int err = 0;
> + u32 seqno;
> + u32 action[] = {
> + INTEL_GUC_ACTION_TLB_INVALIDATION,
> + 0,
> + REG_FIELD_PREP(INTEL_GUC_TLB_INVAL_TYPE_MASK, type) |
> + REG_FIELD_PREP(INTEL_GUC_TLB_INVAL_MODE_MASK,
> + INTEL_GUC_TLB_INVAL_MODE_HEAVY) |
> + INTEL_GUC_TLB_INVAL_FLUSH_CACHE,
> + };
> + u32 size = ARRAY_SIZE(action);
> +
> + if (!intel_guc_ct_enabled(&guc->ct))
> + return -EINVAL;
> +
> + init_waitqueue_head(&_wq.wq);
> +
> + if (xa_alloc_cyclic_irq(&guc->tlb_lookup, &seqno, wq,
> + xa_limit_32b, &guc->next_seqno,
> + GFP_ATOMIC | __GFP_NOWARN) < 0) {
> + /* Under severe memory pressure? Serialise TLB allocations */
> + xa_lock_irq(&guc->tlb_lookup);
> + wq = xa_load(&guc->tlb_lookup, guc->serial_slot);
> + wait_event_lock_irq(wq->wq,
> + !READ_ONCE(wq->status),
> + guc->tlb_lookup.xa_lock);
> + /*
> + * Update wq->status under lock to ensure only one waiter can
> + * issue the TLB invalidation command using the serial slot at a
> + * time. The condition is set to false before releasing the lock
> + * so that other caller continue to wait until woken up again.
"set to false" refers to the wake up path? Given how below is setting it
to true.
Regards,
Tvrtko
> + */
> + wq->status = 1;
> + xa_unlock_irq(&guc->tlb_lookup);
> +
> + seqno = guc->serial_slot;
> + }
> +
> + action[1] = seqno;
> +
> + add_wait_queue(&wq->wq, &wait);
> +
> + err = intel_guc_send_busy_loop(guc, action, size, G2H_LEN_DW_INVALIDATE_TLB, true);
> + if (err)
> + goto out;
> +
> + /*
> + * GuC has a timeout of 1ms for a TLB invalidation response from GAM. On a
> + * timeout GuC drops the request and has no mechanism to notify the host about
> + * the timeout. So keep a larger timeout that accounts for this individual
> + * timeout and max number of outstanding invalidation requests that can be
> + * queued in CT buffer.
> + */
> +#define OUTSTANDING_GUC_TIMEOUT_PERIOD (HZ * 2)
> + if (!must_wait_woken(&wait, OUTSTANDING_GUC_TIMEOUT_PERIOD)) {
> + gt_err(gt,
> + "TLB invalidation response timed out for seqno %u\n", seqno);
> + err = -ETIME;
> + }
> +out:
> + remove_wait_queue(&wq->wq, &wait);
> + if (seqno != guc->serial_slot)
> + xa_erase_irq(&guc->tlb_lookup, seqno);
> +
> + return err;
> +}
> +
> +/* Full TLB invalidation */
> +int intel_guc_invalidate_tlb_full(struct intel_guc *guc)
> +{
> + return guc_send_invalidate_tlb(guc, INTEL_GUC_TLB_INVAL_FULL);
> +}
> +
> +/* GuC TLB Invalidation: Invalidate the TLB's of GuC itself. */
> +int intel_guc_invalidate_tlb(struct intel_guc *guc)
> +{
> + return guc_send_invalidate_tlb(guc, INTEL_GUC_TLB_INVAL_GUC);
> +}
> +
> int intel_guc_deregister_done_process_msg(struct intel_guc *guc,
> const u32 *msg,
> u32 len)
^ permalink raw reply [flat|nested] 21+ messages in thread
* Re: [Intel-gfx] [PATCH v3 3/4] drm/i915: Perform TLB invalidation on all GTs during suspend/resume
2023-10-02 17:24 ` [Intel-gfx] [PATCH v3 3/4] drm/i915: Perform TLB invalidation on all GTs during suspend/resume Jonathan Cavitt
@ 2023-10-03 11:48 ` Jani Nikula
2023-10-03 15:59 ` Andi Shyti
0 siblings, 1 reply; 21+ messages in thread
From: Jani Nikula @ 2023-10-03 11:48 UTC (permalink / raw)
To: Jonathan Cavitt, intel-gfx
Cc: janusz.krzysztofik, andi.shyti, chris.p.wilson, jonathan.cavitt,
matthew.d.roper, nirmoy.das
On Mon, 02 Oct 2023, Jonathan Cavitt <jonathan.cavitt@intel.com> wrote:
> Consider multi-gt support when cancelling all tlb invalidations on
> suspend, and when submitting tlb invalidations on resume.
>
> Suggested-by: Tvrtko Ursulin <tvrtko.ursulin@linux.intel.com>
> Signed-off-by: Fei Yang <fei.yang@intel.com>
> Signed-off-by: Jonathan Cavitt <jonathan.cavitt@intel.com>
> CC: John Harrison <John.C.Harrison@Intel.com>
I guess I'm wondering why the top level suspend hook needs to iterate
gts instead of some lower level thing. We should aim to reduce
gem/gt/display details from the top level.
BR,
Jani.
> ---
> drivers/gpu/drm/i915/i915_driver.c | 13 +++++++++----
> 1 file changed, 9 insertions(+), 4 deletions(-)
>
> diff --git a/drivers/gpu/drm/i915/i915_driver.c b/drivers/gpu/drm/i915/i915_driver.c
> index f5175103ea900..d7655a7b60eda 100644
> --- a/drivers/gpu/drm/i915/i915_driver.c
> +++ b/drivers/gpu/drm/i915/i915_driver.c
> @@ -1077,6 +1077,8 @@ static int i915_drm_suspend(struct drm_device *dev)
> struct drm_i915_private *dev_priv = to_i915(dev);
> struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
> pci_power_t opregion_target_state;
> + struct intel_gt *gt;
> + int i;
>
> disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
>
> @@ -1094,7 +1096,8 @@ static int i915_drm_suspend(struct drm_device *dev)
>
> intel_runtime_pm_disable_interrupts(dev_priv);
>
> - wake_up_all_tlb_invalidate(&to_gt(dev_priv)->uc.guc);
> + for_each_gt(gt, dev_priv, i)
> + wake_up_all_tlb_invalidate(>->uc.guc);
>
> intel_hpd_cancel_work(dev_priv);
>
> @@ -1267,9 +1270,11 @@ static int i915_drm_resume(struct drm_device *dev)
>
> intel_gvt_resume(dev_priv);
>
> - if (INTEL_GUC_SUPPORTS_TLB_INVALIDATION(&to_gt(dev_priv)->uc.guc)) {
> - intel_guc_invalidate_tlb_full(&to_gt(dev_priv)->uc.guc);
> - intel_guc_invalidate_tlb(&to_gt(dev_priv)->uc.guc);
> + for_each_gt(gt, dev_priv, i) {
> + if (!INTEL_GUC_SUPPORTS_TLB_INVALIDATION(>->uc.guc))
> + continue;
> + intel_guc_invalidate_tlb_full(>->uc.guc);
> + intel_guc_invalidate_tlb(>->uc.guc);
> }
>
> enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
--
Jani Nikula, Intel
^ permalink raw reply [flat|nested] 21+ messages in thread
* Re: [Intel-gfx] [PATCH v3 2/4] drm/i915: No TLB invalidation on wedged or suspended GT
2023-10-02 17:24 ` [Intel-gfx] [PATCH v3 2/4] drm/i915: No TLB invalidation on wedged or suspended GT Jonathan Cavitt
2023-10-03 10:35 ` Tvrtko Ursulin
@ 2023-10-03 11:50 ` Jani Nikula
1 sibling, 0 replies; 21+ messages in thread
From: Jani Nikula @ 2023-10-03 11:50 UTC (permalink / raw)
To: Jonathan Cavitt, intel-gfx
Cc: janusz.krzysztofik, andi.shyti, chris.p.wilson, jonathan.cavitt,
matthew.d.roper, nirmoy.das
On Mon, 02 Oct 2023, Jonathan Cavitt <jonathan.cavitt@intel.com> wrote:
> From: Fei Yang <fei.yang@intel.com>
>
> In case of GT is suspended or wedged, don't allow submission of new TLB
> invalidation request and cancel all pending requests. The TLB entries
> will be invalidated either during GuC reload or on system resume.
>
> Signed-off-by: Fei Yang <fei.yang@intel.com>
> Signed-off-by: Jonathan Cavitt <jonathan.cavitt@intel.com>
> CC: John Harrison <john.c.harrison@intel.com>
> ---
> drivers/gpu/drm/i915/gt/uc/intel_guc.h | 1 +
> .../gpu/drm/i915/gt/uc/intel_guc_submission.c | 35 +++++++++++++++----
> drivers/gpu/drm/i915/i915_driver.c | 9 +++++
> 3 files changed, 39 insertions(+), 6 deletions(-)
>
> diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.h b/drivers/gpu/drm/i915/gt/uc/intel_guc.h
> index 5fc5e67f870cc..0cdc7ca66861c 100644
> --- a/drivers/gpu/drm/i915/gt/uc/intel_guc.h
> +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.h
> @@ -536,4 +536,5 @@ void intel_guc_dump_time_info(struct intel_guc *guc, struct drm_printer *p);
>
> int intel_guc_sched_disable_gucid_threshold_max(struct intel_guc *guc);
>
> +void wake_up_all_tlb_invalidate(struct intel_guc *guc);
> #endif
> diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
> index 3478fa73180ab..2f194cadbe553 100644
> --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
> +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
> @@ -32,6 +32,7 @@
>
> #include "i915_drv.h"
> #include "i915_reg.h"
> +#include "i915_irq.h"
> #include "i915_trace.h"
>
> /**
> @@ -1803,13 +1804,20 @@ static void wake_up_tlb_invalidate(struct intel_guc_tlb_wait *wait)
> wake_up(&wait->wq);
> }
>
> -void intel_guc_submission_reset(struct intel_guc *guc, intel_engine_mask_t stalled)
> +void wake_up_all_tlb_invalidate(struct intel_guc *guc)
> {
> struct intel_guc_tlb_wait *wait;
> + unsigned long i;
> +
> + xa_for_each(&guc->tlb_lookup, i, wait)
> + wake_up_tlb_invalidate(wait);
> +}
> +
> +void intel_guc_submission_reset(struct intel_guc *guc, intel_engine_mask_t stalled)
> +{
> struct intel_context *ce;
> unsigned long index;
> unsigned long flags;
> - unsigned long i;
>
> if (unlikely(!guc_submission_initialized(guc))) {
> /* Reset called during driver load? GuC not yet initialised! */
> @@ -1840,8 +1848,7 @@ void intel_guc_submission_reset(struct intel_guc *guc, intel_engine_mask_t stall
> * The full GT reset will have cleared the TLB caches and flushed the
> * G2H message queue; we can release all the blocked waiters.
> */
> - xa_for_each(&guc->tlb_lookup, i, wait)
> - wake_up_tlb_invalidate(wait);
> + wake_up_all_tlb_invalidate(guc);
> }
>
> static void guc_cancel_context_requests(struct intel_context *ce)
> @@ -1937,6 +1944,12 @@ void intel_guc_submission_cancel_requests(struct intel_guc *guc)
>
> /* GuC is blown away, drop all references to contexts */
> xa_destroy(&guc->context_lookup);
> +
> + /*
> + * Wedged GT won't respond to any TLB invalidation request. Simply
> + * release all the blocked waiters.
> + */
> + wake_up_all_tlb_invalidate(guc);
> }
>
> void intel_guc_submission_reset_finish(struct intel_guc *guc)
> @@ -4748,6 +4761,14 @@ static long must_wait_woken(struct wait_queue_entry *wq_entry, long timeout)
> return timeout;
> }
>
> +static bool intel_gt_is_enabled(const struct intel_gt *gt)
> +{
> + /* Check if GT is wedged or suspended */
> + if (intel_gt_is_wedged(gt) || !intel_irqs_enabled(gt->i915))
> + return false;
> + return true;
> +}
> +
> static int guc_send_invalidate_tlb(struct intel_guc *guc, u32 type)
> {
> struct intel_guc_tlb_wait _wq, *wq = &_wq;
> @@ -4765,7 +4786,8 @@ static int guc_send_invalidate_tlb(struct intel_guc *guc, u32 type)
> };
> u32 size = ARRAY_SIZE(action);
>
> - if (!intel_guc_ct_enabled(&guc->ct))
> + if (!intel_guc_ct_enabled(&guc->ct) ||
> + !intel_gt_is_enabled(gt))
> return -EINVAL;
>
> init_waitqueue_head(&_wq.wq);
> @@ -4807,7 +4829,8 @@ static int guc_send_invalidate_tlb(struct intel_guc *guc, u32 type)
> * queued in CT buffer.
> */
> #define OUTSTANDING_GUC_TIMEOUT_PERIOD (HZ * 2)
> - if (!must_wait_woken(&wait, OUTSTANDING_GUC_TIMEOUT_PERIOD)) {
> + if (!must_wait_woken(&wait, OUTSTANDING_GUC_TIMEOUT_PERIOD) &&
> + intel_gt_is_enabled(gt)) {
> gt_err(gt,
> "TLB invalidation response timed out for seqno %u\n", seqno);
> err = -ETIME;
> diff --git a/drivers/gpu/drm/i915/i915_driver.c b/drivers/gpu/drm/i915/i915_driver.c
> index 78501a83ba109..f5175103ea900 100644
> --- a/drivers/gpu/drm/i915/i915_driver.c
> +++ b/drivers/gpu/drm/i915/i915_driver.c
> @@ -72,6 +72,7 @@
> #include "gt/intel_gt.h"
> #include "gt/intel_gt_pm.h"
> #include "gt/intel_rc6.h"
> +#include "gt/uc/intel_guc.h"
>
> #include "pxp/intel_pxp.h"
> #include "pxp/intel_pxp_debugfs.h"
> @@ -1092,6 +1093,9 @@ static int i915_drm_suspend(struct drm_device *dev)
> intel_dp_mst_suspend(dev_priv);
>
> intel_runtime_pm_disable_interrupts(dev_priv);
> +
> + wake_up_all_tlb_invalidate(&to_gt(dev_priv)->uc.guc);
Per the name I would assume this is a static function. Not great.
Why does the top level suspend function need to know
&to_gt(dev_priv)->uc.guc? That's low level implementation details that
should be hidden away.
> +
> intel_hpd_cancel_work(dev_priv);
>
> intel_suspend_encoders(dev_priv);
> @@ -1263,6 +1267,11 @@ static int i915_drm_resume(struct drm_device *dev)
>
> intel_gvt_resume(dev_priv);
>
> + if (INTEL_GUC_SUPPORTS_TLB_INVALIDATION(&to_gt(dev_priv)->uc.guc)) {
> + intel_guc_invalidate_tlb_full(&to_gt(dev_priv)->uc.guc);
> + intel_guc_invalidate_tlb(&to_gt(dev_priv)->uc.guc);
> + }
Why does the top level resume hook need to check for guc details? Have
one function that does what you need, call it, and check for the
constraints there instead of at the top level.
BR,
Jani.
> +
> enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
>
> return 0;
--
Jani Nikula, Intel
^ permalink raw reply [flat|nested] 21+ messages in thread
* Re: [Intel-gfx] [PATCH v3 3/4] drm/i915: Perform TLB invalidation on all GTs during suspend/resume
2023-10-03 11:48 ` Jani Nikula
@ 2023-10-03 15:59 ` Andi Shyti
2023-10-03 18:52 ` John Harrison
0 siblings, 1 reply; 21+ messages in thread
From: Andi Shyti @ 2023-10-03 15:59 UTC (permalink / raw)
To: Jani Nikula
Cc: janusz.krzysztofik, chris.p.wilson, intel-gfx, Jonathan Cavitt,
matthew.d.roper, nirmoy.das
Hi Jani,
> > Consider multi-gt support when cancelling all tlb invalidations on
> > suspend, and when submitting tlb invalidations on resume.
> >
> > Suggested-by: Tvrtko Ursulin <tvrtko.ursulin@linux.intel.com>
> > Signed-off-by: Fei Yang <fei.yang@intel.com>
> > Signed-off-by: Jonathan Cavitt <jonathan.cavitt@intel.com>
> > CC: John Harrison <John.C.Harrison@Intel.com>
>
> I guess I'm wondering why the top level suspend hook needs to iterate
> gts instead of some lower level thing. We should aim to reduce
> gem/gt/display details from the top level.
I'm not sure I am understanding the question.
The TLB invalidation details are kept under the GT. But when
suspend is called, then the GT invalidation has to be triggered
by the top levels for each GT. Right?
Thanks,
Andi
^ permalink raw reply [flat|nested] 21+ messages in thread
* Re: [Intel-gfx] [PATCH v3 1/4] drm/i915: Define and use GuC and CTB TLB invalidation routines
2023-10-03 10:28 ` [Intel-gfx] [PATCH v3 1/4] " Tvrtko Ursulin
@ 2023-10-03 16:41 ` Andi Shyti
2023-10-03 20:29 ` John Harrison
2023-10-04 7:54 ` Tvrtko Ursulin
2023-10-03 20:23 ` John Harrison
1 sibling, 2 replies; 21+ messages in thread
From: Andi Shyti @ 2023-10-03 16:41 UTC (permalink / raw)
To: Tvrtko Ursulin
Cc: janusz.krzysztofik, matthew.d.roper, intel-gfx, Jonathan Cavitt,
chris.p.wilson, nirmoy.das
Hi,
[...]
> > +static void guc_ggtt_invalidate(struct i915_ggtt *ggtt)
> > +{
> > + struct drm_i915_private *i915 = ggtt->vm.i915;
> > + struct intel_gt *gt;
> > +
> > + if (!IS_GEN9_LP(i915) && GRAPHICS_VER(i915) < 11)
> > + gen8_ggtt_invalidate(ggtt);
> > +
> > + list_for_each_entry(gt, &ggtt->gt_list, ggtt_link) {
> > + if (INTEL_GUC_SUPPORTS_TLB_INVALIDATION(>->uc.guc) &&
> > + intel_guc_is_ready(>->uc.guc)) {
>
> The condition here expands to a relatively heavy one:
>
> +#define INTEL_GUC_SUPPORTS_TLB_INVALIDATION(guc) \
> + ((intel_guc_ct_enabled(&(guc)->ct)) && \
> + (intel_guc_submission_is_used(guc)) && \
> + (GRAPHICS_VER(guc_to_gt((guc))->i915) >= 12))
>
>
> &&
>
> static inline bool intel_guc_is_ready(struct intel_guc *guc)
> {
> return intel_guc_is_fw_running(guc) && intel_guc_ct_enabled(&guc->ct);
> }
>
> intel_guc_ct_enabled is even duplicated.
Maybe this is a smaller set?
if (INTEL_GUC_SUPPORTS_TLB_INVALIDATION(>->uc.guc) &&
intel_guc_is_fw_running(>->uc.guc))
The last condition includes intel_guc_submission_is_used() from
the macro.
> Is there scope to consolidate the parts which are platform invariant, or even runtime invariant, or at least guaranteed not to transition back and forth but one way only?
>
> In other words, if we know during init we will want it, mark it as a flag in intel_guc or somewhere, and then at runtime do only those conditions which can transition back and forth due driver flows.
>
> I am not saying this is performance sensitive, but in terms of elegance, readability and self-documentation the proposed version looks a bit sub-optimal to me.
Are you suggesting some PCI flag? This is actually applying only
for MTL.
> > + guc_ggtt_ct_invalidate(gt);
> > + } else if (GRAPHICS_VER(i915) >= 12) {
> > + intel_uncore_write(gt->uncore,
> > + GEN12_GUC_TLB_INV_CR,
> > + GEN12_GUC_TLB_INV_CR_INVALIDATE);
> > + } else {
> > + intel_uncore_write(gt->uncore,
> > + GEN8_GTCR, GEN8_GTCR_INVALIDATE);
> > + }
[...]
> > - mmio_invalidate_full(gt);
> > + if (INTEL_GUC_SUPPORTS_TLB_INVALIDATION(guc)) {
> > + if (intel_guc_is_ready(guc))
> > + intel_guc_invalidate_tlb_full(guc);
> > + } else {
> > + /*
> > + * Fall back to old path if GuC is disabled.
> > + * This is safe because GuC is not enabled and not writing to MMIO.
> > + */
>
> It is safe for intel_guc_is_ready() transitioning from false to true during GuC init? No way for some path to start issuing invalidations as that is happening?
>
> > + mmio_invalidate_full(gt);
> > + }
supernitpick: as we are at this, brackets are not required.
Andi
^ permalink raw reply [flat|nested] 21+ messages in thread
* Re: [Intel-gfx] [PATCH v3 3/4] drm/i915: Perform TLB invalidation on all GTs during suspend/resume
2023-10-03 15:59 ` Andi Shyti
@ 2023-10-03 18:52 ` John Harrison
2023-10-04 7:34 ` Jani Nikula
0 siblings, 1 reply; 21+ messages in thread
From: John Harrison @ 2023-10-03 18:52 UTC (permalink / raw)
To: Andi Shyti, Jani Nikula
Cc: janusz.krzysztofik, chris.p.wilson, intel-gfx, Jonathan Cavitt,
matthew.d.roper, nirmoy.das
On 10/3/2023 08:59, Andi Shyti wrote:
> Hi Jani,
>
>>> Consider multi-gt support when cancelling all tlb invalidations on
>>> suspend, and when submitting tlb invalidations on resume.
>>>
>>> Suggested-by: Tvrtko Ursulin <tvrtko.ursulin@linux.intel.com>
>>> Signed-off-by: Fei Yang <fei.yang@intel.com>
>>> Signed-off-by: Jonathan Cavitt <jonathan.cavitt@intel.com>
>>> CC: John Harrison <John.C.Harrison@Intel.com>
>> I guess I'm wondering why the top level suspend hook needs to iterate
>> gts instead of some lower level thing. We should aim to reduce
>> gem/gt/display details from the top level.
> I'm not sure I am understanding the question.
>
> The TLB invalidation details are kept under the GT. But when
> suspend is called, then the GT invalidation has to be triggered
> by the top levels for each GT. Right?
I think Jani's point is that the top level should be:
i915_drm_suspend(...) {
...
intel_tlb_suspend(dev_priv);
}
Then the TLB suspend helper function calls into the GT / UC layers as
appropriate. But none of that internal only detail is exposed at the top
level.
John.
>
> Thanks,
> Andi
^ permalink raw reply [flat|nested] 21+ messages in thread
* Re: [Intel-gfx] [PATCH v3 1/4] drm/i915: Define and use GuC and CTB TLB invalidation routines
2023-10-03 10:28 ` [Intel-gfx] [PATCH v3 1/4] " Tvrtko Ursulin
2023-10-03 16:41 ` Andi Shyti
@ 2023-10-03 20:23 ` John Harrison
2023-10-04 8:12 ` Tvrtko Ursulin
1 sibling, 1 reply; 21+ messages in thread
From: John Harrison @ 2023-10-03 20:23 UTC (permalink / raw)
To: Tvrtko Ursulin, Jonathan Cavitt, intel-gfx
Cc: janusz.krzysztofik, andi.shyti, matthew.d.roper, chris.p.wilson,
nirmoy.das
On 10/3/2023 03:28, Tvrtko Ursulin wrote:
> On 02/10/2023 18:24, Jonathan Cavitt wrote:
>> From: Prathap Kumar Valsan <prathap.kumar.valsan@intel.com>
>>
>> The GuC firmware had defined the interface for Translation Look-Aside
>> Buffer (TLB) invalidation. We should use this interface when
>> invalidating the engine and GuC TLBs.
>> Add additional functionality to intel_gt_invalidate_tlb, invalidating
>> the GuC TLBs and falling back to GT invalidation when the GuC is
>> disabled.
>> The invalidation is done by sending a request directly to the GuC
>> tlb_lookup that invalidates the table. The invalidation is submitted as
>> a wait request and is performed in the CT event handler. This means we
>> cannot perform this TLB invalidation path if the CT is not enabled.
>> If the request isn't fulfilled in two seconds, this would constitute
>> an error in the invalidation as that would constitute either a lost
>> request or a severe GuC overload.
>>
>> With this new invalidation routine, we can perform GuC-based GGTT
>> invalidations. We should only do this when GuC is enabled and fall
>> back to the original path when GuC is disabled to prevent concurrent
>> issuance between GuC and KMD.
>>
>> Signed-off-by: Prathap Kumar Valsan <prathap.kumar.valsan@intel.com>
>> Signed-off-by: Bruce Chang <yu.bruce.chang@intel.com>
>> Signed-off-by: Chris Wilson <chris.p.wilson@intel.com>
>> Signed-off-by: Umesh Nerlige Ramappa <umesh.nerlige.ramappa@intel.com>
>> Signed-off-by: Jonathan Cavitt <jonathan.cavitt@intel.com>
>> Signed-off-by: Aravind Iddamsetty <aravind.iddamsetty@intel.com>
>> Signed-off-by: Fei Yang <fei.yang@intel.com>
>> CC: Andi Shyti <andi.shyti@linux.intel.com>
>> ---
>> drivers/gpu/drm/i915/gt/intel_ggtt.c | 43 ++--
>> drivers/gpu/drm/i915/gt/intel_tlb.c | 14 +-
>> .../gpu/drm/i915/gt/uc/abi/guc_actions_abi.h | 33 +++
>> drivers/gpu/drm/i915/gt/uc/intel_guc.h | 22 ++
>> drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c | 9 +
>> drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h | 5 +
>> .../gpu/drm/i915/gt/uc/intel_guc_submission.c | 212 +++++++++++++++++-
>> 7 files changed, 322 insertions(+), 16 deletions(-)
>>
>> diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt.c
>> b/drivers/gpu/drm/i915/gt/intel_ggtt.c
>> index 4d7d88b92632b..db5644b0146ca 100644
>> --- a/drivers/gpu/drm/i915/gt/intel_ggtt.c
>> +++ b/drivers/gpu/drm/i915/gt/intel_ggtt.c
>> @@ -206,22 +206,38 @@ static void gen8_ggtt_invalidate(struct
>> i915_ggtt *ggtt)
>> intel_uncore_write_fw(uncore, GFX_FLSH_CNTL_GEN6,
>> GFX_FLSH_CNTL_EN);
>> }
>> -static void guc_ggtt_invalidate(struct i915_ggtt *ggtt)
>> +static void guc_ggtt_ct_invalidate(struct intel_gt *gt)
>> {
>> - struct drm_i915_private *i915 = ggtt->vm.i915;
>> + struct intel_uncore *uncore = gt->uncore;
>> + intel_wakeref_t wakeref;
>> - gen8_ggtt_invalidate(ggtt);
>> + with_intel_runtime_pm_if_active(uncore->rpm, wakeref) {
>> + struct intel_guc *guc = >->uc.guc;
>> - if (GRAPHICS_VER(i915) >= 12) {
>> - struct intel_gt *gt;
>> + intel_guc_invalidate_tlb(guc);
>> + }
>> +}
>> - list_for_each_entry(gt, &ggtt->gt_list, ggtt_link)
>> - intel_uncore_write_fw(gt->uncore,
>> - GEN12_GUC_TLB_INV_CR,
>> - GEN12_GUC_TLB_INV_CR_INVALIDATE);
>> - } else {
>> - intel_uncore_write_fw(ggtt->vm.gt->uncore,
>> - GEN8_GTCR, GEN8_GTCR_INVALIDATE);
>> +static void guc_ggtt_invalidate(struct i915_ggtt *ggtt)
>> +{
>> + struct drm_i915_private *i915 = ggtt->vm.i915;
>> + struct intel_gt *gt;
>> +
>> + if (!IS_GEN9_LP(i915) && GRAPHICS_VER(i915) < 11)
>> + gen8_ggtt_invalidate(ggtt);
>> +
>> + list_for_each_entry(gt, &ggtt->gt_list, ggtt_link) {
>> + if (INTEL_GUC_SUPPORTS_TLB_INVALIDATION(>->uc.guc) &&
>> + intel_guc_is_ready(>->uc.guc)) {
>
> The condition here expands to a relatively heavy one:
>
> +#define INTEL_GUC_SUPPORTS_TLB_INVALIDATION(guc) \
> + ((intel_guc_ct_enabled(&(guc)->ct)) && \
> + (intel_guc_submission_is_used(guc)) && \
> + (GRAPHICS_VER(guc_to_gt((guc))->i915) >= 12))
>
>
> &&
>
> static inline bool intel_guc_is_ready(struct intel_guc *guc)
> {
> return intel_guc_is_fw_running(guc) &&
> intel_guc_ct_enabled(&guc->ct);
> }
>
> intel_guc_ct_enabled is even duplicated.
>
> Is there scope to consolidate the parts which are platform invariant,
> or even runtime invariant, or at least guaranteed not to transition
> back and forth but one way only?
>
> In other words, if we know during init we will want it, mark it as a
> flag in intel_guc or somewhere, and then at runtime do only those
> conditions which can transition back and forth due driver flows.
>
> I am not saying this is performance sensitive, but in terms of
> elegance, readability and self-documentation the proposed version
> looks a bit sub-optimal to me.
>
>> + guc_ggtt_ct_invalidate(gt);
>> + } else if (GRAPHICS_VER(i915) >= 12) {
>> + intel_uncore_write(gt->uncore,
>> + GEN12_GUC_TLB_INV_CR,
>> + GEN12_GUC_TLB_INV_CR_INVALIDATE);
>> + } else {
>> + intel_uncore_write(gt->uncore,
>> + GEN8_GTCR, GEN8_GTCR_INVALIDATE);
>> + }
>> }
>> }
>> @@ -1243,7 +1259,8 @@ static int gen8_gmch_probe(struct i915_ggtt
>> *ggtt)
>> ggtt->vm.raw_insert_page = gen8_ggtt_insert_page;
>> }
>> - if (intel_uc_wants_guc(&ggtt->vm.gt->uc))
>> + if (intel_uc_wants_guc(&ggtt->vm.gt->uc) &&
>> + intel_uc_wants_guc_submission(&ggtt->vm.gt->uc))
>
> Is 2nd condition perhaps a superset of the 1st?
>
>> ggtt->invalidate = guc_ggtt_invalidate;
>> else
>> ggtt->invalidate = gen8_ggtt_invalidate;
>> diff --git a/drivers/gpu/drm/i915/gt/intel_tlb.c
>> b/drivers/gpu/drm/i915/gt/intel_tlb.c
>> index 139608c30d978..efe002f14413d 100644
>> --- a/drivers/gpu/drm/i915/gt/intel_tlb.c
>> +++ b/drivers/gpu/drm/i915/gt/intel_tlb.c
>> @@ -12,6 +12,7 @@
>> #include "intel_gt_print.h"
>> #include "intel_gt_regs.h"
>> #include "intel_tlb.h"
>> +#include "uc/intel_guc.h"
>> /*
>> * HW architecture suggest typical invalidation time at 40us,
>> @@ -131,11 +132,22 @@ void intel_gt_invalidate_tlb_full(struct
>> intel_gt *gt, u32 seqno)
>> return;
>> with_intel_gt_pm_if_awake(gt, wakeref) {
>> + struct intel_guc *guc = >->uc.guc;
>> +
>> mutex_lock(>->tlb.invalidate_lock);
>> if (tlb_seqno_passed(gt, seqno))
>> goto unlock;
>> - mmio_invalidate_full(gt);
>> + if (INTEL_GUC_SUPPORTS_TLB_INVALIDATION(guc)) {
>> + if (intel_guc_is_ready(guc))
>> + intel_guc_invalidate_tlb_full(guc);
>> + } else {
>> + /*
>> + * Fall back to old path if GuC is disabled.
>> + * This is safe because GuC is not enabled and not
>> writing to MMIO.
>> + */
>
> It is safe for intel_guc_is_ready() transitioning from false to true
> during GuC init? No way for some path to start issuing invalidations
> as that is happening?
Are you concerned that it will take the GuC path too early? Or that it
will take the MMIO path too late?
The guc_is_ready() helper should only return true if we are genuinely
ready to start sending H2G messages. So even if the GuC init sequence is
still in progress in another thread, it should be valid to send GuC
based inval requests from this thread if the test returns true.
I'm not sure about the 'safe' comment which seems to be implying it is
unsafe to use MMIO based invalidations if GuC is active. That is surely
exactly what we are currently doing all the time? Prior to this patch,
all invalidations are MMIO based because that is the only code we have.
So it really should be safe to keep doing that otherwise why didn't we
implement GuC invalidations as part of the initial GuC enablement?
>
>> + mmio_invalidate_full(gt);
>> + }
>> write_seqcount_invalidate(>->tlb.seqno);
>> unlock:
>> diff --git a/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h
>> b/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h
>> index f359bef046e0b..9dff8012d5e76 100644
>> --- a/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h
>> +++ b/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h
>> @@ -138,6 +138,8 @@ enum intel_guc_action {
>> INTEL_GUC_ACTION_REGISTER_CONTEXT_MULTI_LRC = 0x4601,
>> INTEL_GUC_ACTION_CLIENT_SOFT_RESET = 0x5507,
>> INTEL_GUC_ACTION_SET_ENG_UTIL_BUFF = 0x550A,
>> + INTEL_GUC_ACTION_TLB_INVALIDATION = 0x7000,
>> + INTEL_GUC_ACTION_TLB_INVALIDATION_DONE = 0x7001,
>> INTEL_GUC_ACTION_STATE_CAPTURE_NOTIFICATION = 0x8002,
>> INTEL_GUC_ACTION_NOTIFY_FLUSH_LOG_BUFFER_TO_FILE = 0x8003,
>> INTEL_GUC_ACTION_NOTIFY_CRASH_DUMP_POSTED = 0x8004,
>> @@ -181,4 +183,35 @@ enum intel_guc_state_capture_event_status {
>> #define INTEL_GUC_STATE_CAPTURE_EVENT_STATUS_MASK 0x000000FF
>> +#define INTEL_GUC_TLB_INVAL_TYPE_MASK REG_GENMASK(7, 0)
>> +#define INTEL_GUC_TLB_INVAL_MODE_MASK REG_GENMASK(11, 8)
>> +#define INTEL_GUC_TLB_INVAL_FLUSH_CACHE REG_BIT(31)
>> +
>> +enum intel_guc_tlb_invalidation_type {
>> + INTEL_GUC_TLB_INVAL_FULL = 0x0,
>> + INTEL_GUC_TLB_INVAL_GUC = 0x3,
>> +};
>> +
>> +/*
>> + * 0: Heavy mode of Invalidation:
>> + * The pipeline of the engine(s) for which the invalidation is
>> targeted to is
>> + * blocked, and all the in-flight transactions are guaranteed to be
>> Globally
>> + * Observed before completing the TLB invalidation
>> + * 1: Lite mode of Invalidation:
>> + * TLBs of the targeted engine(s) are immediately invalidated.
>> + * In-flight transactions are NOT guaranteed to be Globally Observed
>> before
>> + * completing TLB invalidation.
>> + * Light Invalidation Mode is to be used only when
>> + * it can be guaranteed (by SW) that the address translations remain
>> invariant
>> + * for the in-flight transactions across the TLB invalidation. In
>> other words,
>> + * this mode can be used when the TLB invalidation is intended to
>> clear out the
>> + * stale cached translations that are no longer in use. Light
>> Invalidation Mode
>> + * is much faster than the Heavy Invalidation Mode, as it does not
>> wait for the
>> + * in-flight transactions to be GOd.
>> + */
>> +enum intel_guc_tlb_inval_mode {
>> + INTEL_GUC_TLB_INVAL_MODE_HEAVY = 0x0,
>> + INTEL_GUC_TLB_INVAL_MODE_LITE = 0x1,
>> +};
>> +
>> #endif /* _ABI_GUC_ACTIONS_ABI_H */
>> diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.h
>> b/drivers/gpu/drm/i915/gt/uc/intel_guc.h
>> index 6c392bad29c19..5fc5e67f870cc 100644
>> --- a/drivers/gpu/drm/i915/gt/uc/intel_guc.h
>> +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.h
>> @@ -79,6 +79,18 @@ struct intel_guc {
>> */
>> atomic_t outstanding_submission_g2h;
>> + /** @tlb_lookup: xarray to store all pending TLB invalidation
>> requests */
>> + struct xarray tlb_lookup;
>> +
>> + /**
>> + * @serial_slot: id to the initial waiter created in tlb_lookup,
>> + * which is used only when failed to allocate new waiter.
>> + */
>> + u32 serial_slot;
>> +
>> + /** @next_seqno: the next id (sequence no.) to allocate. */
>> + u32 next_seqno;
>> +
>> /** @interrupts: pointers to GuC interrupt-managing functions. */
>> struct {
>> bool enabled;
>> @@ -296,6 +308,11 @@ struct intel_guc {
>> #define MAKE_GUC_VER_STRUCT(ver) MAKE_GUC_VER((ver).major,
>> (ver).minor, (ver).patch)
>> #define GUC_SUBMIT_VER(guc)
>> MAKE_GUC_VER_STRUCT((guc)->submission_version)
>> +struct intel_guc_tlb_wait {
>> + struct wait_queue_head wq;
>> + u8 status;
>> +} __aligned(4);
>
> Put a comment here please stating why it needs to be aligned.
>
>> +
>> static inline struct intel_guc *log_to_guc(struct intel_guc_log *log)
>> {
>> return container_of(log, struct intel_guc, log);
>> @@ -417,6 +434,11 @@ static inline bool intel_guc_is_supported(struct
>> intel_guc *guc)
>> {
>> return intel_uc_fw_is_supported(&guc->fw);
>> }
>> +
>> +int intel_guc_invalidate_tlb_full(struct intel_guc *guc);
>> +int intel_guc_invalidate_tlb(struct intel_guc *guc);
>> +int intel_guc_tlb_invalidation_done(struct intel_guc *guc, const u32
>> *hxg,
>> + u32 size);
>> static inline bool intel_guc_is_wanted(struct intel_guc *guc)
>> {
>> diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c
>> b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c
>> index 6e22af31513a5..4b29a0b814950 100644
>> --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c
>> +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c
>> @@ -1186,9 +1186,18 @@ static int ct_handle_event(struct intel_guc_ct
>> *ct, struct ct_incoming_msg *requ
>> switch (action) {
>> case INTEL_GUC_ACTION_SCHED_CONTEXT_MODE_DONE:
>> case INTEL_GUC_ACTION_DEREGISTER_CONTEXT_DONE:
>> + case INTEL_GUC_ACTION_TLB_INVALIDATION_DONE:
>> g2h_release_space(ct, request->size);
>> }
>> + /* Handle tlb invalidation response in interrupt context */
>> + if (action == INTEL_GUC_ACTION_TLB_INVALIDATION_DONE) {
>> + int ret = intel_guc_tlb_invalidation_done(ct_to_guc(ct),
>> hxg, request->size);
>> +
>> + ct_free_msg(request);
>> + return ret;
>> + }
>
> 1)
> Can the comment say why it is important to handle these in a special
> path instead of re-using the existing worker/list?
>
> 2)
> Could it instead of duplicating some h2g logic in
> intel_guc_tlb_invalidation_done call ct_process_request, and so handle
> all actions in a centralized place?
Not sure what logic you are seeing duplicated? You mean the calculation
of the message length? That could be moved out into the above CT layer
so only the payload size is passed in to the TLB handler. I'm not seeing
anything else that is duplicated.
But yes, it should be possible to just call ct_process_request directly.
It doesn't look like it does any extra processing that requires it to be
outside of the ISR (as long as it is guaranteed that it never tries to
process anything other than a TLB inval done message that is). So yeah,
that's probably the cleaner solution.
>
>> +
>> spin_lock_irqsave(&ct->requests.lock, flags);
>> list_add_tail(&request->link, &ct->requests.incoming);
>> spin_unlock_irqrestore(&ct->requests.lock, flags);
>> diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h
>> b/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h
>> index b4d56eccfb1f0..01109d15b779b 100644
>> --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h
>> +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h
>> @@ -22,6 +22,7 @@
>> /* Payload length only i.e. don't include G2H header length */
>> #define G2H_LEN_DW_SCHED_CONTEXT_MODE_SET 2
>> #define G2H_LEN_DW_DEREGISTER_CONTEXT 1
>> +#define G2H_LEN_DW_INVALIDATE_TLB 1
>> #define GUC_CONTEXT_DISABLE 0
>> #define GUC_CONTEXT_ENABLE 1
>> @@ -498,4 +499,8 @@ enum intel_guc_recv_message {
>> INTEL_GUC_RECV_MSG_EXCEPTION = BIT(30),
>> };
>> +#define INTEL_GUC_SUPPORTS_TLB_INVALIDATION(guc) \
>> + ((intel_guc_ct_enabled(&(guc)->ct)) && \
>> + (intel_guc_submission_is_used(guc)) && \
>> + (GRAPHICS_VER(guc_to_gt((guc))->i915) >= 12))
>> #endif
>> diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
>> b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
>> index ae3495a9c8146..3478fa73180ab 100644
>> --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
>> +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
>> @@ -1796,11 +1796,20 @@ static void __guc_reset_context(struct
>> intel_context *ce, intel_engine_mask_t st
>> intel_context_put(parent);
>> }
>> +static void wake_up_tlb_invalidate(struct intel_guc_tlb_wait *wait)
>> +{
>> + /* Barrier to ensure the store is observed by the woken thread */
>> + smp_store_mb(wait->status, 0);
>
> Is the memory barrier required, given the main caller is from a
> spinlocked section?
>
>> + wake_up(&wait->wq);
>> +}
>> +
>> void intel_guc_submission_reset(struct intel_guc *guc,
>> intel_engine_mask_t stalled)
>> {
>> + struct intel_guc_tlb_wait *wait;
>> struct intel_context *ce;
>> unsigned long index;
>> unsigned long flags;
>> + unsigned long i;
>> if (unlikely(!guc_submission_initialized(guc))) {
>> /* Reset called during driver load? GuC not yet
>> initialised! */
>> @@ -1826,6 +1835,13 @@ void intel_guc_submission_reset(struct
>> intel_guc *guc, intel_engine_mask_t stall
>> /* GuC is blown away, drop all references to contexts */
>> xa_destroy(&guc->context_lookup);
>> +
>> + /*
>> + * The full GT reset will have cleared the TLB caches and
>> flushed the
>> + * G2H message queue; we can release all the blocked waiters.
>> + */
>> + xa_for_each(&guc->tlb_lookup, i, wait)
>> + wake_up_tlb_invalidate(wait);
>> }
>> static void guc_cancel_context_requests(struct intel_context *ce)
>> @@ -1948,6 +1964,41 @@ void intel_guc_submission_reset_finish(struct
>> intel_guc *guc)
>> static void destroyed_worker_func(struct work_struct *w);
>> static void reset_fail_worker_func(struct work_struct *w);
>> +static int init_tlb_lookup(struct intel_guc *guc)
>> +{
>> + struct intel_guc_tlb_wait *wait;
>> + int err;
>> +
>> + xa_init_flags(&guc->tlb_lookup, XA_FLAGS_ALLOC);
>> +
>> + wait = kzalloc(sizeof(*wait), GFP_KERNEL);
>> + if (!wait)
>> + return -ENOMEM;
>> +
>> + init_waitqueue_head(&wait->wq);
>> + err = xa_alloc_cyclic_irq(&guc->tlb_lookup, &guc->serial_slot,
>> wait,
>> + xa_limit_32b, &guc->next_seqno, GFP_KERNEL);
>
> For what is this id, allocated during init, used and when it gets freed?
>
>> + if (err == -ENOMEM) {
>> + kfree(wait);
>> + return err;
>> + }
>> +
>> + return 0;
>> +}
>> +
>> +static void fini_tlb_lookup(struct intel_guc *guc)
>> +{
>> + struct intel_guc_tlb_wait *wait;
>> +
>> + wait = xa_load(&guc->tlb_lookup, guc->serial_slot);
>> + if (wait) {
>> + GEM_BUG_ON(wait->status);
>> + kfree(wait);
>> + }
>> +
>> + xa_destroy(&guc->tlb_lookup);
>> +}
>> +
>> /*
>> * Set up the memory resources to be shared with the GuC (via the
>> GGTT)
>> * at firmware loading time.
>> @@ -1966,11 +2017,15 @@ int intel_guc_submission_init(struct
>> intel_guc *guc)
>> return ret;
>> }
>> + ret = init_tlb_lookup(guc);
>> + if (ret)
>> + goto destroy_pool;
>> +
>> guc->submission_state.guc_ids_bitmap =
>> bitmap_zalloc(NUMBER_MULTI_LRC_GUC_ID(guc), GFP_KERNEL);
>> if (!guc->submission_state.guc_ids_bitmap) {
>> ret = -ENOMEM;
>> - goto destroy_pool;
>> + goto destroy_tlb;
>> }
>> guc->timestamp.ping_delay = (POLL_TIME_CLKS /
>> gt->clock_frequency + 1) * HZ;
>> @@ -1979,9 +2034,10 @@ int intel_guc_submission_init(struct intel_guc
>> *guc)
>> return 0;
>> +destroy_tlb:
>> + fini_tlb_lookup(guc);
>> destroy_pool:
>> guc_lrc_desc_pool_destroy_v69(guc);
>> -
>> return ret;
>> }
>> @@ -1994,6 +2050,7 @@ void intel_guc_submission_fini(struct
>> intel_guc *guc)
>> guc_lrc_desc_pool_destroy_v69(guc);
>> i915_sched_engine_put(guc->sched_engine);
>> bitmap_free(guc->submission_state.guc_ids_bitmap);
>> + fini_tlb_lookup(guc);
>> guc->submission_initialized = false;
>> }
>> @@ -4624,6 +4681,157 @@ g2h_context_lookup(struct intel_guc *guc,
>> u32 ctx_id)
>> return ce;
>> }
>> +static void wait_wake_outstanding_tlb_g2h(struct intel_guc *guc,
>> u32 seqno)
>> +{
>> + struct intel_guc_tlb_wait *wait;
>> + unsigned long flags;
>> +
>> + xa_lock_irqsave(&guc->tlb_lookup, flags);
>> + wait = xa_load(&guc->tlb_lookup, seqno);
>> +
>> + /* We received a response after the waiting task did exit with a
>> timeout */
>> + if (unlikely(!wait))
>> + drm_dbg(&guc_to_gt(guc)->i915->drm,
>> + "Stale TLB invalidation response with seqno %d\n", seqno);
>> +
>> + if (wait)
>> + wake_up_tlb_invalidate(wait);
>> +
>> + xa_unlock_irqrestore(&guc->tlb_lookup, flags);
>> +}
>> +
>> +int intel_guc_tlb_invalidation_done(struct intel_guc *guc, const u32
>> *hxg, u32 size)
>> +{
>> + u32 seqno, hxg_len, len;
>> +
>> + /*
>> + * FIXME: these calculations would be better done signed. That
>> + * way underflow can be detected as well.
>> + */
>
> When can this FIXME be addressed?
>
>> + hxg_len = size - GUC_CTB_MSG_MIN_LEN;
>> + len = hxg_len - GUC_HXG_MSG_MIN_LEN;
>> +
>> + if (unlikely(len < 1))
>> + return -EPROTO;
>> +
>> + seqno = hxg[GUC_HXG_MSG_MIN_LEN];
>> + wait_wake_outstanding_tlb_g2h(guc, seqno);
>> + return 0;
>> +}
>> +
>> +static long must_wait_woken(struct wait_queue_entry *wq_entry, long
>> timeout)
>> +{
>> + /*
>> + * This is equivalent to wait_woken() with the exception that
>> + * we do not wake up early if the kthread task has been completed.
>> + * As we are called from page reclaim in any task context,
>> + * we may be invoked from stopped kthreads, but we *must*
>> + * complete the wait from the HW .
>> + *
>> + * A second problem is that since we are called under reclaim
>> + * and wait_woken() inspected the thread state, it makes an invalid
>> + * assumption that all PF_KTHREAD tasks have set_kthread_struct()
>> + * called upon them, and will trigger a GPF in
>> is_kthread_should_stop().
>> + */
>> + do {
>> + set_current_state(TASK_UNINTERRUPTIBLE);
>> + if (wq_entry->flags & WQ_FLAG_WOKEN)
>> + break;
>> +
>> + timeout = schedule_timeout(timeout);
>> + } while (timeout);
>> + __set_current_state(TASK_RUNNING);
>> +
>> + /* See wait_woken() and woken_wake_function() */
>> + smp_store_mb(wq_entry->flags, wq_entry->flags & ~WQ_FLAG_WOKEN);
>> +
>> + return timeout;
>> +}
>> +
>> +static int guc_send_invalidate_tlb(struct intel_guc *guc, u32 type)
>> +{
>> + struct intel_guc_tlb_wait _wq, *wq = &_wq;
>> + DEFINE_WAIT_FUNC(wait, woken_wake_function);
>> + struct intel_gt *gt = guc_to_gt(guc);
>> + int err = 0;
>
> Looks like err does not need to be initialized.
>
>> + u32 seqno;
>> + u32 action[] = {
>> + INTEL_GUC_ACTION_TLB_INVALIDATION,
>> + 0,
>> + REG_FIELD_PREP(INTEL_GUC_TLB_INVAL_TYPE_MASK, type) |
>> + REG_FIELD_PREP(INTEL_GUC_TLB_INVAL_MODE_MASK,
>> + INTEL_GUC_TLB_INVAL_MODE_HEAVY) |
>> + INTEL_GUC_TLB_INVAL_FLUSH_CACHE,
>> + };
>> + u32 size = ARRAY_SIZE(action);
>> +
>> + if (!intel_guc_ct_enabled(&guc->ct))
>> + return -EINVAL;
>> +
>> + init_waitqueue_head(&_wq.wq);
>> +
>> + if (xa_alloc_cyclic_irq(&guc->tlb_lookup, &seqno, wq,
>> + xa_limit_32b, &guc->next_seqno,
>> + GFP_ATOMIC | __GFP_NOWARN) < 0) {
>> + /* Under severe memory pressure? Serialise TLB allocations */
>> + xa_lock_irq(&guc->tlb_lookup);
>> + wq = xa_load(&guc->tlb_lookup, guc->serial_slot);
>> + wait_event_lock_irq(wq->wq,
>> + !READ_ONCE(wq->status),
>> + guc->tlb_lookup.xa_lock);
>> + /*
>> + * Update wq->status under lock to ensure only one waiter can
>> + * issue the TLB invalidation command using the serial slot
>> at a
>> + * time. The condition is set to false before releasing the
>> lock
>> + * so that other caller continue to wait until woken up again.
>> + */
>> + wq->status = 1;
>> + xa_unlock_irq(&guc->tlb_lookup);
>> +
>> + seqno = guc->serial_slot;
>> + }
>> +
>> + action[1] = seqno;
>> +
>> + add_wait_queue(&wq->wq, &wait);
>> +
>> + err = intel_guc_send_busy_loop(guc, action, size,
>> G2H_LEN_DW_INVALIDATE_TLB, true);
>
> Busy looping version has to be used from here? Looks like the lock has
> been dropped and function otherwise can sleep..
If you don't use the auto-looping helper then the loop needs to be done
manually by the caller. The looping is about waiting for space in the
H2G buffer (because it is full of other commands). I assume that the TLB
inval request would want to wait until it can actually be sent rather
than just immediately exit with an -EBUSY error.
>
>> + if (err)
>> + goto out;
>> +
>> + /*
>> + * GuC has a timeout of 1ms for a TLB invalidation response from
>> GAM. On a
>> + * timeout GuC drops the request and has no mechanism to notify
>> the host about
>> + * the timeout. So keep a larger timeout that accounts for this
>> individual
>> + * timeout and max number of outstanding invalidation requests
>> that can be
>> + * queued in CT buffer.
>> + */
>> +#define OUTSTANDING_GUC_TIMEOUT_PERIOD (HZ * 2)
>
> Is it possible to express the magic 2 seconds with some expressions
> involving the CT buffer size, multiplied by timeout per request, as
> the comment alludes?
The correct solution is to implement a
'has_my_request_been_executed_yet()' helper in the CT layer that tests
whether the given H2G call has actually made it to the head of the queue
yet or not. The TLB layer would then loop over that with no time at all
(assuming the call will return an error in the case of a dead GuC, for
example). Only when that returns true would it wait with a timeout,
where that timeout is a few ms as per the hardware spec for a TLB
invalidation.
However, that is a non-trivial update that no-one has the time to
implement :(. And so far, the scale just keeps tipping to the 'add a
bigger timeout' side as the quick hack fix (TLB inval isn't the only CT
client with the problem).
Having said that, if we are going with the quick and dirty timeout, it
would be better to put the timeout define in intel_guc_ct.h. Or make it
a helper function in the same whose code is in _ct.c and which returns a
value of 1s per 4KB of buffer size or something. I don't like the idea
of providing the buffer size to random bits of code outside of the CT
layer. That is exposing internal details to code which should not need
to know.
Likewise, the 'timeout per request' is not something the TLB layer has
any knowledge of. A TLB inval might be guaranteed to be <1ms but the CTB
might be full of much slower requests (context creation/destruction for
example). So you basically have to assume worst case processing time.
Which past experiments have shown to be around 1s for the current buffer
size.
Note also that a fundamental problem here is that GuC doesn't give us a
failed response back in the case of the hardware timeout being hit.
Hence the reason we need a KMD side timeout at all. However, a future
GuC update is promised which will move the timeout inside the GuC and
add a failed notification back to the KMD. Once that is available, we
theoretically don't need any time out at all on the KMD side (assuming
that there is some kind of abort mechanism built in to the GT reset path
for if the GuC itself dies).
John.
>
>> + if (!must_wait_woken(&wait, OUTSTANDING_GUC_TIMEOUT_PERIOD)) {
>> + gt_err(gt,
>> + "TLB invalidation response timed out for seqno %u\n",
>> seqno);
>> + err = -ETIME;
>> + }
>> +out:
>> + remove_wait_queue(&wq->wq, &wait);
>> + if (seqno != guc->serial_slot)
>> + xa_erase_irq(&guc->tlb_lookup, seqno);
>> +
>> + return err;
>> +}
>> +
>> +/* Full TLB invalidation */
>> +int intel_guc_invalidate_tlb_full(struct intel_guc *guc)
>> +{
>> + return guc_send_invalidate_tlb(guc, INTEL_GUC_TLB_INVAL_FULL);
>> +}
>> +
>> +/* GuC TLB Invalidation: Invalidate the TLB's of GuC itself. */
>> +int intel_guc_invalidate_tlb(struct intel_guc *guc)
>> +{
>> + return guc_send_invalidate_tlb(guc, INTEL_GUC_TLB_INVAL_GUC);
>> +}
>> +
>> int intel_guc_deregister_done_process_msg(struct intel_guc *guc,
>> const u32 *msg,
>> u32 len)
>
> Regards,
>
> Tvrtko
^ permalink raw reply [flat|nested] 21+ messages in thread
* Re: [Intel-gfx] [PATCH v3 1/4] drm/i915: Define and use GuC and CTB TLB invalidation routines
2023-10-03 16:41 ` Andi Shyti
@ 2023-10-03 20:29 ` John Harrison
2023-10-03 21:29 ` Andi Shyti
2023-10-04 7:54 ` Tvrtko Ursulin
1 sibling, 1 reply; 21+ messages in thread
From: John Harrison @ 2023-10-03 20:29 UTC (permalink / raw)
To: Andi Shyti, Tvrtko Ursulin
Cc: janusz.krzysztofik, matthew.d.roper, intel-gfx, Jonathan Cavitt,
chris.p.wilson, nirmoy.das
On 10/3/2023 09:41, Andi Shyti wrote:
> [...]
>
>>> - mmio_invalidate_full(gt);
>>> + if (INTEL_GUC_SUPPORTS_TLB_INVALIDATION(guc)) {
>>> + if (intel_guc_is_ready(guc))
>>> + intel_guc_invalidate_tlb_full(guc);
>>> + } else {
>>> + /*
>>> + * Fall back to old path if GuC is disabled.
>>> + * This is safe because GuC is not enabled and not writing to MMIO.
>>> + */
>> It is safe for intel_guc_is_ready() transitioning from false to true during GuC init? No way for some path to start issuing invalidations as that is happening?
>>
>>> + mmio_invalidate_full(gt);
>>> + }
> supernitpick: as we are at this, brackets are not required.
Braces are required on the first half of the 'if' because it is a double
if and the else applies to the top level not the inner level. And my
understanding of the style guide is that lop-sided bracing is incorrect.
i.e. never have "} else". Plus while it might be syntactically valid to
not have braces around the five line else clause because it is only one
actual code statement, it massively helps readability of the code to
have the braces present.
John.
>
> Andi
^ permalink raw reply [flat|nested] 21+ messages in thread
* Re: [Intel-gfx] [PATCH v3 1/4] drm/i915: Define and use GuC and CTB TLB invalidation routines
2023-10-03 20:29 ` John Harrison
@ 2023-10-03 21:29 ` Andi Shyti
0 siblings, 0 replies; 21+ messages in thread
From: Andi Shyti @ 2023-10-03 21:29 UTC (permalink / raw)
To: John Harrison
Cc: janusz.krzysztofik, matthew.d.roper, intel-gfx, Jonathan Cavitt,
chris.p.wilson, nirmoy.das
Hi John,
> > > > - mmio_invalidate_full(gt);
> > > > + if (INTEL_GUC_SUPPORTS_TLB_INVALIDATION(guc)) {
> > > > + if (intel_guc_is_ready(guc))
> > > > + intel_guc_invalidate_tlb_full(guc);
> > > > + } else {
> > > > + /*
> > > > + * Fall back to old path if GuC is disabled.
> > > > + * This is safe because GuC is not enabled and not writing to MMIO.
> > > > + */
> > > It is safe for intel_guc_is_ready() transitioning from false to true during GuC init? No way for some path to start issuing invalidations as that is happening?
> > >
> > > > + mmio_invalidate_full(gt);
> > > > + }
> > supernitpick: as we are at this, brackets are not required.
> Braces are required on the first half of the 'if' because it is a double if
> and the else applies to the top level not the inner level. And my
> understanding of the style guide is that lop-sided bracing is incorrect.
> i.e. never have "} else". Plus while it might be syntactically valid to not
> have braces around the five line else clause because it is only one actual
> code statement, it massively helps readability of the code to have the
> braces present.
You are right, the 'else' would connect with the innermost 'if'
and besides gcc complains with a warning like this:
warning: suggest explicit braces to avoid ambiguous ‘else’ [-Wdangling-else]
Thanks,
Andi
^ permalink raw reply [flat|nested] 21+ messages in thread
* Re: [Intel-gfx] [PATCH v3 3/4] drm/i915: Perform TLB invalidation on all GTs during suspend/resume
2023-10-03 18:52 ` John Harrison
@ 2023-10-04 7:34 ` Jani Nikula
0 siblings, 0 replies; 21+ messages in thread
From: Jani Nikula @ 2023-10-04 7:34 UTC (permalink / raw)
To: John Harrison, Andi Shyti
Cc: janusz.krzysztofik, chris.p.wilson, intel-gfx, Jonathan Cavitt,
matthew.d.roper, nirmoy.das
On Tue, 03 Oct 2023, John Harrison <john.c.harrison@intel.com> wrote:
> On 10/3/2023 08:59, Andi Shyti wrote:
>> Hi Jani,
>>
>>>> Consider multi-gt support when cancelling all tlb invalidations on
>>>> suspend, and when submitting tlb invalidations on resume.
>>>>
>>>> Suggested-by: Tvrtko Ursulin <tvrtko.ursulin@linux.intel.com>
>>>> Signed-off-by: Fei Yang <fei.yang@intel.com>
>>>> Signed-off-by: Jonathan Cavitt <jonathan.cavitt@intel.com>
>>>> CC: John Harrison <John.C.Harrison@Intel.com>
>>> I guess I'm wondering why the top level suspend hook needs to iterate
>>> gts instead of some lower level thing. We should aim to reduce
>>> gem/gt/display details from the top level.
>> I'm not sure I am understanding the question.
>>
>> The TLB invalidation details are kept under the GT. But when
>> suspend is called, then the GT invalidation has to be triggered
>> by the top levels for each GT. Right?
> I think Jani's point is that the top level should be:
> i915_drm_suspend(...) {
> ...
> intel_tlb_suspend(dev_priv);
> }
>
> Then the TLB suspend helper function calls into the GT / UC layers as
> appropriate. But none of that internal only detail is exposed at the top
> level.
That's right, thanks for clarifying.
BR,
Jani.
>
> John.
>
>>
>> Thanks,
>> Andi
>
--
Jani Nikula, Intel
^ permalink raw reply [flat|nested] 21+ messages in thread
* Re: [Intel-gfx] [PATCH v3 1/4] drm/i915: Define and use GuC and CTB TLB invalidation routines
2023-10-03 16:41 ` Andi Shyti
2023-10-03 20:29 ` John Harrison
@ 2023-10-04 7:54 ` Tvrtko Ursulin
1 sibling, 0 replies; 21+ messages in thread
From: Tvrtko Ursulin @ 2023-10-04 7:54 UTC (permalink / raw)
To: Andi Shyti
Cc: janusz.krzysztofik, matthew.d.roper, intel-gfx, Jonathan Cavitt,
chris.p.wilson, nirmoy.das
On 03/10/2023 17:41, Andi Shyti wrote:
> Hi,
>
> [...]
>
>>> +static void guc_ggtt_invalidate(struct i915_ggtt *ggtt)
>>> +{
>>> + struct drm_i915_private *i915 = ggtt->vm.i915;
>>> + struct intel_gt *gt;
>>> +
>>> + if (!IS_GEN9_LP(i915) && GRAPHICS_VER(i915) < 11)
>>> + gen8_ggtt_invalidate(ggtt);
>>> +
>>> + list_for_each_entry(gt, &ggtt->gt_list, ggtt_link) {
>>> + if (INTEL_GUC_SUPPORTS_TLB_INVALIDATION(>->uc.guc) &&
>>> + intel_guc_is_ready(>->uc.guc)) {
>>
>> The condition here expands to a relatively heavy one:
>>
>> +#define INTEL_GUC_SUPPORTS_TLB_INVALIDATION(guc) \
>> + ((intel_guc_ct_enabled(&(guc)->ct)) && \
>> + (intel_guc_submission_is_used(guc)) && \
>> + (GRAPHICS_VER(guc_to_gt((guc))->i915) >= 12))
>>
>>
>> &&
>>
>> static inline bool intel_guc_is_ready(struct intel_guc *guc)
>> {
>> return intel_guc_is_fw_running(guc) && intel_guc_ct_enabled(&guc->ct);
>> }
>>
>> intel_guc_ct_enabled is even duplicated.
>
> Maybe this is a smaller set?
>
> if (INTEL_GUC_SUPPORTS_TLB_INVALIDATION(>->uc.guc) &&
> intel_guc_is_fw_running(>->uc.guc))
>
> The last condition includes intel_guc_submission_is_used() from
> the macro.
>
>> Is there scope to consolidate the parts which are platform invariant, or even runtime invariant, or at least guaranteed not to transition back and forth but one way only?
>>
>> In other words, if we know during init we will want it, mark it as a flag in intel_guc or somewhere, and then at runtime do only those conditions which can transition back and forth due driver flows.
>>
>> I am not saying this is performance sensitive, but in terms of elegance, readability and self-documentation the proposed version looks a bit sub-optimal to me.
>
> Are you suggesting some PCI flag? This is actually applying only
> for MTL.
1)
I didn't specifically have an opinion on whether it should be a device
info flag or in the guc struct or whatever. More knowledge of GuC state
transitions than I have would be required for an informed decision. To
me it just felt it raises the question if it can be simplified by
splitting the invariant from variant and eliminating the redundant. All
upper case macro name was also IMO wrong since we normally use those for
static things.
I'll have a look in the latest version how it looks.
2)
Only for MTL - really? I didn't see the patch do that. Why is that?
Regards,
Tvrtko
>
>>> + guc_ggtt_ct_invalidate(gt);
>>> + } else if (GRAPHICS_VER(i915) >= 12) {
>>> + intel_uncore_write(gt->uncore,
>>> + GEN12_GUC_TLB_INV_CR,
>>> + GEN12_GUC_TLB_INV_CR_INVALIDATE);
>>> + } else {
>>> + intel_uncore_write(gt->uncore,
>>> + GEN8_GTCR, GEN8_GTCR_INVALIDATE);
>>> + }
>
> [...]
>
>>> - mmio_invalidate_full(gt);
>>> + if (INTEL_GUC_SUPPORTS_TLB_INVALIDATION(guc)) {
>>> + if (intel_guc_is_ready(guc))
>>> + intel_guc_invalidate_tlb_full(guc);
>>> + } else {
>>> + /*
>>> + * Fall back to old path if GuC is disabled.
>>> + * This is safe because GuC is not enabled and not writing to MMIO.
>>> + */
>>
>> It is safe for intel_guc_is_ready() transitioning from false to true during GuC init? No way for some path to start issuing invalidations as that is happening?
>>
>>> + mmio_invalidate_full(gt);
>>> + }
>
> supernitpick: as we are at this, brackets are not required.
>
> Andi
^ permalink raw reply [flat|nested] 21+ messages in thread
* Re: [Intel-gfx] [PATCH v3 1/4] drm/i915: Define and use GuC and CTB TLB invalidation routines
2023-10-03 20:23 ` John Harrison
@ 2023-10-04 8:12 ` Tvrtko Ursulin
0 siblings, 0 replies; 21+ messages in thread
From: Tvrtko Ursulin @ 2023-10-04 8:12 UTC (permalink / raw)
To: John Harrison, Jonathan Cavitt, intel-gfx
Cc: janusz.krzysztofik, andi.shyti, matthew.d.roper, chris.p.wilson,
nirmoy.das
On 03/10/2023 21:23, John Harrison wrote:
> On 10/3/2023 03:28, Tvrtko Ursulin wrote:
>> On 02/10/2023 18:24, Jonathan Cavitt wrote:
>>> From: Prathap Kumar Valsan <prathap.kumar.valsan@intel.com>
>>>
>>> The GuC firmware had defined the interface for Translation Look-Aside
>>> Buffer (TLB) invalidation. We should use this interface when
>>> invalidating the engine and GuC TLBs.
>>> Add additional functionality to intel_gt_invalidate_tlb, invalidating
>>> the GuC TLBs and falling back to GT invalidation when the GuC is
>>> disabled.
>>> The invalidation is done by sending a request directly to the GuC
>>> tlb_lookup that invalidates the table. The invalidation is submitted as
>>> a wait request and is performed in the CT event handler. This means we
>>> cannot perform this TLB invalidation path if the CT is not enabled.
>>> If the request isn't fulfilled in two seconds, this would constitute
>>> an error in the invalidation as that would constitute either a lost
>>> request or a severe GuC overload.
>>>
>>> With this new invalidation routine, we can perform GuC-based GGTT
>>> invalidations. We should only do this when GuC is enabled and fall
>>> back to the original path when GuC is disabled to prevent concurrent
>>> issuance between GuC and KMD.
>>>
>>> Signed-off-by: Prathap Kumar Valsan <prathap.kumar.valsan@intel.com>
>>> Signed-off-by: Bruce Chang <yu.bruce.chang@intel.com>
>>> Signed-off-by: Chris Wilson <chris.p.wilson@intel.com>
>>> Signed-off-by: Umesh Nerlige Ramappa <umesh.nerlige.ramappa@intel.com>
>>> Signed-off-by: Jonathan Cavitt <jonathan.cavitt@intel.com>
>>> Signed-off-by: Aravind Iddamsetty <aravind.iddamsetty@intel.com>
>>> Signed-off-by: Fei Yang <fei.yang@intel.com>
>>> CC: Andi Shyti <andi.shyti@linux.intel.com>
>>> ---
>>> drivers/gpu/drm/i915/gt/intel_ggtt.c | 43 ++--
>>> drivers/gpu/drm/i915/gt/intel_tlb.c | 14 +-
>>> .../gpu/drm/i915/gt/uc/abi/guc_actions_abi.h | 33 +++
>>> drivers/gpu/drm/i915/gt/uc/intel_guc.h | 22 ++
>>> drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c | 9 +
>>> drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h | 5 +
>>> .../gpu/drm/i915/gt/uc/intel_guc_submission.c | 212 +++++++++++++++++-
>>> 7 files changed, 322 insertions(+), 16 deletions(-)
>>>
>>> diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt.c
>>> b/drivers/gpu/drm/i915/gt/intel_ggtt.c
>>> index 4d7d88b92632b..db5644b0146ca 100644
>>> --- a/drivers/gpu/drm/i915/gt/intel_ggtt.c
>>> +++ b/drivers/gpu/drm/i915/gt/intel_ggtt.c
>>> @@ -206,22 +206,38 @@ static void gen8_ggtt_invalidate(struct
>>> i915_ggtt *ggtt)
>>> intel_uncore_write_fw(uncore, GFX_FLSH_CNTL_GEN6,
>>> GFX_FLSH_CNTL_EN);
>>> }
>>> -static void guc_ggtt_invalidate(struct i915_ggtt *ggtt)
>>> +static void guc_ggtt_ct_invalidate(struct intel_gt *gt)
>>> {
>>> - struct drm_i915_private *i915 = ggtt->vm.i915;
>>> + struct intel_uncore *uncore = gt->uncore;
>>> + intel_wakeref_t wakeref;
>>> - gen8_ggtt_invalidate(ggtt);
>>> + with_intel_runtime_pm_if_active(uncore->rpm, wakeref) {
>>> + struct intel_guc *guc = >->uc.guc;
>>> - if (GRAPHICS_VER(i915) >= 12) {
>>> - struct intel_gt *gt;
>>> + intel_guc_invalidate_tlb(guc);
>>> + }
>>> +}
>>> - list_for_each_entry(gt, &ggtt->gt_list, ggtt_link)
>>> - intel_uncore_write_fw(gt->uncore,
>>> - GEN12_GUC_TLB_INV_CR,
>>> - GEN12_GUC_TLB_INV_CR_INVALIDATE);
>>> - } else {
>>> - intel_uncore_write_fw(ggtt->vm.gt->uncore,
>>> - GEN8_GTCR, GEN8_GTCR_INVALIDATE);
>>> +static void guc_ggtt_invalidate(struct i915_ggtt *ggtt)
>>> +{
>>> + struct drm_i915_private *i915 = ggtt->vm.i915;
>>> + struct intel_gt *gt;
>>> +
>>> + if (!IS_GEN9_LP(i915) && GRAPHICS_VER(i915) < 11)
>>> + gen8_ggtt_invalidate(ggtt);
>>> +
>>> + list_for_each_entry(gt, &ggtt->gt_list, ggtt_link) {
>>> + if (INTEL_GUC_SUPPORTS_TLB_INVALIDATION(>->uc.guc) &&
>>> + intel_guc_is_ready(>->uc.guc)) {
>>
>> The condition here expands to a relatively heavy one:
>>
>> +#define INTEL_GUC_SUPPORTS_TLB_INVALIDATION(guc) \
>> + ((intel_guc_ct_enabled(&(guc)->ct)) && \
>> + (intel_guc_submission_is_used(guc)) && \
>> + (GRAPHICS_VER(guc_to_gt((guc))->i915) >= 12))
>>
>>
>> &&
>>
>> static inline bool intel_guc_is_ready(struct intel_guc *guc)
>> {
>> return intel_guc_is_fw_running(guc) &&
>> intel_guc_ct_enabled(&guc->ct);
>> }
>>
>> intel_guc_ct_enabled is even duplicated.
>>
>> Is there scope to consolidate the parts which are platform invariant,
>> or even runtime invariant, or at least guaranteed not to transition
>> back and forth but one way only?
>>
>> In other words, if we know during init we will want it, mark it as a
>> flag in intel_guc or somewhere, and then at runtime do only those
>> conditions which can transition back and forth due driver flows.
>>
>> I am not saying this is performance sensitive, but in terms of
>> elegance, readability and self-documentation the proposed version
>> looks a bit sub-optimal to me.
>>
>>> + guc_ggtt_ct_invalidate(gt);
>>> + } else if (GRAPHICS_VER(i915) >= 12) {
>>> + intel_uncore_write(gt->uncore,
>>> + GEN12_GUC_TLB_INV_CR,
>>> + GEN12_GUC_TLB_INV_CR_INVALIDATE);
>>> + } else {
>>> + intel_uncore_write(gt->uncore,
>>> + GEN8_GTCR, GEN8_GTCR_INVALIDATE);
>>> + }
>>> }
>>> }
>>> @@ -1243,7 +1259,8 @@ static int gen8_gmch_probe(struct i915_ggtt
>>> *ggtt)
>>> ggtt->vm.raw_insert_page = gen8_ggtt_insert_page;
>>> }
>>> - if (intel_uc_wants_guc(&ggtt->vm.gt->uc))
>>> + if (intel_uc_wants_guc(&ggtt->vm.gt->uc) &&
>>> + intel_uc_wants_guc_submission(&ggtt->vm.gt->uc))
>>
>> Is 2nd condition perhaps a superset of the 1st?
>>
>>> ggtt->invalidate = guc_ggtt_invalidate;
>>> else
>>> ggtt->invalidate = gen8_ggtt_invalidate;
>>> diff --git a/drivers/gpu/drm/i915/gt/intel_tlb.c
>>> b/drivers/gpu/drm/i915/gt/intel_tlb.c
>>> index 139608c30d978..efe002f14413d 100644
>>> --- a/drivers/gpu/drm/i915/gt/intel_tlb.c
>>> +++ b/drivers/gpu/drm/i915/gt/intel_tlb.c
>>> @@ -12,6 +12,7 @@
>>> #include "intel_gt_print.h"
>>> #include "intel_gt_regs.h"
>>> #include "intel_tlb.h"
>>> +#include "uc/intel_guc.h"
>>> /*
>>> * HW architecture suggest typical invalidation time at 40us,
>>> @@ -131,11 +132,22 @@ void intel_gt_invalidate_tlb_full(struct
>>> intel_gt *gt, u32 seqno)
>>> return;
>>> with_intel_gt_pm_if_awake(gt, wakeref) {
>>> + struct intel_guc *guc = >->uc.guc;
>>> +
>>> mutex_lock(>->tlb.invalidate_lock);
>>> if (tlb_seqno_passed(gt, seqno))
>>> goto unlock;
>>> - mmio_invalidate_full(gt);
>>> + if (INTEL_GUC_SUPPORTS_TLB_INVALIDATION(guc)) {
>>> + if (intel_guc_is_ready(guc))
>>> + intel_guc_invalidate_tlb_full(guc);
>>> + } else {
>>> + /*
>>> + * Fall back to old path if GuC is disabled.
>>> + * This is safe because GuC is not enabled and not
>>> writing to MMIO.
>>> + */
>>
>> It is safe for intel_guc_is_ready() transitioning from false to true
>> during GuC init? No way for some path to start issuing invalidations
>> as that is happening?
> Are you concerned that it will take the GuC path too early? Or that it
> will take the MMIO path too late?
>
> The guc_is_ready() helper should only return true if we are genuinely
> ready to start sending H2G messages. So even if the GuC init sequence is
> still in progress in another thread, it should be valid to send GuC
> based inval requests from this thread if the test returns true.
>
> I'm not sure about the 'safe' comment which seems to be implying it is
> unsafe to use MMIO based invalidations if GuC is active. That is surely
> exactly what we are currently doing all the time? Prior to this patch,
> all invalidations are MMIO based because that is the only code we have.
> So it really should be safe to keep doing that otherwise why didn't we
> implement GuC invalidations as part of the initial GuC enablement?
Right, not too early, not too late, but interleaved at the wrong time
(insufficient serialization of the condition determining which method to
use).
My concern was that the comment perhaps suggests that if both i915 and
GuC would try to invalidate using MMIO _at the same time_ bad things
could happen.
And since the conditions for INTEL_GUC_SUPPORTS_TLB_INVALIDATION and
intel_guc_is_ready need expert GuC knowledge to untangle, I did not know
what is possible at runtime and what not.
Like early init or reset flows. Can we have i915 submit mmio invalidate
is guc is not read at that point, then guc becomes ready and it submits
some too, while the direct mmio ones are still waiting for acks, and
then the hw times out or something.
>
>
>>
>>> + mmio_invalidate_full(gt);
>>> + }
>>> write_seqcount_invalidate(>->tlb.seqno);
>>> unlock:
>>> diff --git a/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h
>>> b/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h
>>> index f359bef046e0b..9dff8012d5e76 100644
>>> --- a/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h
>>> +++ b/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h
>>> @@ -138,6 +138,8 @@ enum intel_guc_action {
>>> INTEL_GUC_ACTION_REGISTER_CONTEXT_MULTI_LRC = 0x4601,
>>> INTEL_GUC_ACTION_CLIENT_SOFT_RESET = 0x5507,
>>> INTEL_GUC_ACTION_SET_ENG_UTIL_BUFF = 0x550A,
>>> + INTEL_GUC_ACTION_TLB_INVALIDATION = 0x7000,
>>> + INTEL_GUC_ACTION_TLB_INVALIDATION_DONE = 0x7001,
>>> INTEL_GUC_ACTION_STATE_CAPTURE_NOTIFICATION = 0x8002,
>>> INTEL_GUC_ACTION_NOTIFY_FLUSH_LOG_BUFFER_TO_FILE = 0x8003,
>>> INTEL_GUC_ACTION_NOTIFY_CRASH_DUMP_POSTED = 0x8004,
>>> @@ -181,4 +183,35 @@ enum intel_guc_state_capture_event_status {
>>> #define INTEL_GUC_STATE_CAPTURE_EVENT_STATUS_MASK 0x000000FF
>>> +#define INTEL_GUC_TLB_INVAL_TYPE_MASK REG_GENMASK(7, 0)
>>> +#define INTEL_GUC_TLB_INVAL_MODE_MASK REG_GENMASK(11, 8)
>>> +#define INTEL_GUC_TLB_INVAL_FLUSH_CACHE REG_BIT(31)
>>> +
>>> +enum intel_guc_tlb_invalidation_type {
>>> + INTEL_GUC_TLB_INVAL_FULL = 0x0,
>>> + INTEL_GUC_TLB_INVAL_GUC = 0x3,
>>> +};
>>> +
>>> +/*
>>> + * 0: Heavy mode of Invalidation:
>>> + * The pipeline of the engine(s) for which the invalidation is
>>> targeted to is
>>> + * blocked, and all the in-flight transactions are guaranteed to be
>>> Globally
>>> + * Observed before completing the TLB invalidation
>>> + * 1: Lite mode of Invalidation:
>>> + * TLBs of the targeted engine(s) are immediately invalidated.
>>> + * In-flight transactions are NOT guaranteed to be Globally Observed
>>> before
>>> + * completing TLB invalidation.
>>> + * Light Invalidation Mode is to be used only when
>>> + * it can be guaranteed (by SW) that the address translations remain
>>> invariant
>>> + * for the in-flight transactions across the TLB invalidation. In
>>> other words,
>>> + * this mode can be used when the TLB invalidation is intended to
>>> clear out the
>>> + * stale cached translations that are no longer in use. Light
>>> Invalidation Mode
>>> + * is much faster than the Heavy Invalidation Mode, as it does not
>>> wait for the
>>> + * in-flight transactions to be GOd.
>>> + */
>>> +enum intel_guc_tlb_inval_mode {
>>> + INTEL_GUC_TLB_INVAL_MODE_HEAVY = 0x0,
>>> + INTEL_GUC_TLB_INVAL_MODE_LITE = 0x1,
>>> +};
>>> +
>>> #endif /* _ABI_GUC_ACTIONS_ABI_H */
>>> diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.h
>>> b/drivers/gpu/drm/i915/gt/uc/intel_guc.h
>>> index 6c392bad29c19..5fc5e67f870cc 100644
>>> --- a/drivers/gpu/drm/i915/gt/uc/intel_guc.h
>>> +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.h
>>> @@ -79,6 +79,18 @@ struct intel_guc {
>>> */
>>> atomic_t outstanding_submission_g2h;
>>> + /** @tlb_lookup: xarray to store all pending TLB invalidation
>>> requests */
>>> + struct xarray tlb_lookup;
>>> +
>>> + /**
>>> + * @serial_slot: id to the initial waiter created in tlb_lookup,
>>> + * which is used only when failed to allocate new waiter.
>>> + */
>>> + u32 serial_slot;
>>> +
>>> + /** @next_seqno: the next id (sequence no.) to allocate. */
>>> + u32 next_seqno;
>>> +
>>> /** @interrupts: pointers to GuC interrupt-managing functions. */
>>> struct {
>>> bool enabled;
>>> @@ -296,6 +308,11 @@ struct intel_guc {
>>> #define MAKE_GUC_VER_STRUCT(ver) MAKE_GUC_VER((ver).major,
>>> (ver).minor, (ver).patch)
>>> #define GUC_SUBMIT_VER(guc)
>>> MAKE_GUC_VER_STRUCT((guc)->submission_version)
>>> +struct intel_guc_tlb_wait {
>>> + struct wait_queue_head wq;
>>> + u8 status;
>>> +} __aligned(4);
>>
>> Put a comment here please stating why it needs to be aligned.
>>
>>> +
>>> static inline struct intel_guc *log_to_guc(struct intel_guc_log *log)
>>> {
>>> return container_of(log, struct intel_guc, log);
>>> @@ -417,6 +434,11 @@ static inline bool intel_guc_is_supported(struct
>>> intel_guc *guc)
>>> {
>>> return intel_uc_fw_is_supported(&guc->fw);
>>> }
>>> +
>>> +int intel_guc_invalidate_tlb_full(struct intel_guc *guc);
>>> +int intel_guc_invalidate_tlb(struct intel_guc *guc);
>>> +int intel_guc_tlb_invalidation_done(struct intel_guc *guc, const u32
>>> *hxg,
>>> + u32 size);
>>> static inline bool intel_guc_is_wanted(struct intel_guc *guc)
>>> {
>>> diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c
>>> b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c
>>> index 6e22af31513a5..4b29a0b814950 100644
>>> --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c
>>> +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c
>>> @@ -1186,9 +1186,18 @@ static int ct_handle_event(struct intel_guc_ct
>>> *ct, struct ct_incoming_msg *requ
>>> switch (action) {
>>> case INTEL_GUC_ACTION_SCHED_CONTEXT_MODE_DONE:
>>> case INTEL_GUC_ACTION_DEREGISTER_CONTEXT_DONE:
>>> + case INTEL_GUC_ACTION_TLB_INVALIDATION_DONE:
>>> g2h_release_space(ct, request->size);
>>> }
>>> + /* Handle tlb invalidation response in interrupt context */
>>> + if (action == INTEL_GUC_ACTION_TLB_INVALIDATION_DONE) {
>>> + int ret = intel_guc_tlb_invalidation_done(ct_to_guc(ct),
>>> hxg, request->size);
>>> +
>>> + ct_free_msg(request);
>>> + return ret;
>>> + }
>>
>> 1)
>> Can the comment say why it is important to handle these in a special
>> path instead of re-using the existing worker/list?
>>
>> 2)
>> Could it instead of duplicating some h2g logic in
>> intel_guc_tlb_invalidation_done call ct_process_request, and so handle
>> all actions in a centralized place?
> Not sure what logic you are seeing duplicated? You mean the calculation
> of the message length? That could be moved out into the above CT layer
> so only the payload size is passed in to the TLB handler. I'm not seeing
> anything else that is duplicated.
Yes all that, the need for a new function which handles just that one
message, while there is a function already.
> But yes, it should be possible to just call ct_process_request directly.
> It doesn't look like it does any extra processing that requires it to be
> outside of the ISR (as long as it is guaranteed that it never tries to
> process anything other than a TLB inval done message that is). So yeah,
> that's probably the cleaner solution.
I guess it did not work out since the latest version still has a
separate intel_guc_tlb_invalidation_done?
>>> +
>>> spin_lock_irqsave(&ct->requests.lock, flags);
>>> list_add_tail(&request->link, &ct->requests.incoming);
>>> spin_unlock_irqrestore(&ct->requests.lock, flags);
>>> diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h
>>> b/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h
>>> index b4d56eccfb1f0..01109d15b779b 100644
>>> --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h
>>> +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h
>>> @@ -22,6 +22,7 @@
>>> /* Payload length only i.e. don't include G2H header length */
>>> #define G2H_LEN_DW_SCHED_CONTEXT_MODE_SET 2
>>> #define G2H_LEN_DW_DEREGISTER_CONTEXT 1
>>> +#define G2H_LEN_DW_INVALIDATE_TLB 1
>>> #define GUC_CONTEXT_DISABLE 0
>>> #define GUC_CONTEXT_ENABLE 1
>>> @@ -498,4 +499,8 @@ enum intel_guc_recv_message {
>>> INTEL_GUC_RECV_MSG_EXCEPTION = BIT(30),
>>> };
>>> +#define INTEL_GUC_SUPPORTS_TLB_INVALIDATION(guc) \
>>> + ((intel_guc_ct_enabled(&(guc)->ct)) && \
>>> + (intel_guc_submission_is_used(guc)) && \
>>> + (GRAPHICS_VER(guc_to_gt((guc))->i915) >= 12))
>>> #endif
>>> diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
>>> b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
>>> index ae3495a9c8146..3478fa73180ab 100644
>>> --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
>>> +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
>>> @@ -1796,11 +1796,20 @@ static void __guc_reset_context(struct
>>> intel_context *ce, intel_engine_mask_t st
>>> intel_context_put(parent);
>>> }
>>> +static void wake_up_tlb_invalidate(struct intel_guc_tlb_wait *wait)
>>> +{
>>> + /* Barrier to ensure the store is observed by the woken thread */
>>> + smp_store_mb(wait->status, 0);
>>
>> Is the memory barrier required, given the main caller is from a
>> spinlocked section?
>>
>>> + wake_up(&wait->wq);
>>> +}
>>> +
>>> void intel_guc_submission_reset(struct intel_guc *guc,
>>> intel_engine_mask_t stalled)
>>> {
>>> + struct intel_guc_tlb_wait *wait;
>>> struct intel_context *ce;
>>> unsigned long index;
>>> unsigned long flags;
>>> + unsigned long i;
>>> if (unlikely(!guc_submission_initialized(guc))) {
>>> /* Reset called during driver load? GuC not yet
>>> initialised! */
>>> @@ -1826,6 +1835,13 @@ void intel_guc_submission_reset(struct
>>> intel_guc *guc, intel_engine_mask_t stall
>>> /* GuC is blown away, drop all references to contexts */
>>> xa_destroy(&guc->context_lookup);
>>> +
>>> + /*
>>> + * The full GT reset will have cleared the TLB caches and
>>> flushed the
>>> + * G2H message queue; we can release all the blocked waiters.
>>> + */
>>> + xa_for_each(&guc->tlb_lookup, i, wait)
>>> + wake_up_tlb_invalidate(wait);
>>> }
>>> static void guc_cancel_context_requests(struct intel_context *ce)
>>> @@ -1948,6 +1964,41 @@ void intel_guc_submission_reset_finish(struct
>>> intel_guc *guc)
>>> static void destroyed_worker_func(struct work_struct *w);
>>> static void reset_fail_worker_func(struct work_struct *w);
>>> +static int init_tlb_lookup(struct intel_guc *guc)
>>> +{
>>> + struct intel_guc_tlb_wait *wait;
>>> + int err;
>>> +
>>> + xa_init_flags(&guc->tlb_lookup, XA_FLAGS_ALLOC);
>>> +
>>> + wait = kzalloc(sizeof(*wait), GFP_KERNEL);
>>> + if (!wait)
>>> + return -ENOMEM;
>>> +
>>> + init_waitqueue_head(&wait->wq);
>>> + err = xa_alloc_cyclic_irq(&guc->tlb_lookup, &guc->serial_slot,
>>> wait,
>>> + xa_limit_32b, &guc->next_seqno, GFP_KERNEL);
>>
>> For what is this id, allocated during init, used and when it gets freed?
>>
>>> + if (err == -ENOMEM) {
>>> + kfree(wait);
>>> + return err;
>>> + }
>>> +
>>> + return 0;
>>> +}
>>> +
>>> +static void fini_tlb_lookup(struct intel_guc *guc)
>>> +{
>>> + struct intel_guc_tlb_wait *wait;
>>> +
>>> + wait = xa_load(&guc->tlb_lookup, guc->serial_slot);
>>> + if (wait) {
>>> + GEM_BUG_ON(wait->status);
>>> + kfree(wait);
>>> + }
>>> +
>>> + xa_destroy(&guc->tlb_lookup);
>>> +}
>>> +
>>> /*
>>> * Set up the memory resources to be shared with the GuC (via the
>>> GGTT)
>>> * at firmware loading time.
>>> @@ -1966,11 +2017,15 @@ int intel_guc_submission_init(struct
>>> intel_guc *guc)
>>> return ret;
>>> }
>>> + ret = init_tlb_lookup(guc);
>>> + if (ret)
>>> + goto destroy_pool;
>>> +
>>> guc->submission_state.guc_ids_bitmap =
>>> bitmap_zalloc(NUMBER_MULTI_LRC_GUC_ID(guc), GFP_KERNEL);
>>> if (!guc->submission_state.guc_ids_bitmap) {
>>> ret = -ENOMEM;
>>> - goto destroy_pool;
>>> + goto destroy_tlb;
>>> }
>>> guc->timestamp.ping_delay = (POLL_TIME_CLKS /
>>> gt->clock_frequency + 1) * HZ;
>>> @@ -1979,9 +2034,10 @@ int intel_guc_submission_init(struct intel_guc
>>> *guc)
>>> return 0;
>>> +destroy_tlb:
>>> + fini_tlb_lookup(guc);
>>> destroy_pool:
>>> guc_lrc_desc_pool_destroy_v69(guc);
>>> -
>>> return ret;
>>> }
>>> @@ -1994,6 +2050,7 @@ void intel_guc_submission_fini(struct
>>> intel_guc *guc)
>>> guc_lrc_desc_pool_destroy_v69(guc);
>>> i915_sched_engine_put(guc->sched_engine);
>>> bitmap_free(guc->submission_state.guc_ids_bitmap);
>>> + fini_tlb_lookup(guc);
>>> guc->submission_initialized = false;
>>> }
>>> @@ -4624,6 +4681,157 @@ g2h_context_lookup(struct intel_guc *guc,
>>> u32 ctx_id)
>>> return ce;
>>> }
>>> +static void wait_wake_outstanding_tlb_g2h(struct intel_guc *guc,
>>> u32 seqno)
>>> +{
>>> + struct intel_guc_tlb_wait *wait;
>>> + unsigned long flags;
>>> +
>>> + xa_lock_irqsave(&guc->tlb_lookup, flags);
>>> + wait = xa_load(&guc->tlb_lookup, seqno);
>>> +
>>> + /* We received a response after the waiting task did exit with a
>>> timeout */
>>> + if (unlikely(!wait))
>>> + drm_dbg(&guc_to_gt(guc)->i915->drm,
>>> + "Stale TLB invalidation response with seqno %d\n", seqno);
>>> +
>>> + if (wait)
>>> + wake_up_tlb_invalidate(wait);
>>> +
>>> + xa_unlock_irqrestore(&guc->tlb_lookup, flags);
>>> +}
>>> +
>>> +int intel_guc_tlb_invalidation_done(struct intel_guc *guc, const u32
>>> *hxg, u32 size)
>>> +{
>>> + u32 seqno, hxg_len, len;
>>> +
>>> + /*
>>> + * FIXME: these calculations would be better done signed. That
>>> + * way underflow can be detected as well.
>>> + */
>>
>> When can this FIXME be addressed?
>>
>>> + hxg_len = size - GUC_CTB_MSG_MIN_LEN;
>>> + len = hxg_len - GUC_HXG_MSG_MIN_LEN;
>>> +
>>> + if (unlikely(len < 1))
>>> + return -EPROTO;
>>> +
>>> + seqno = hxg[GUC_HXG_MSG_MIN_LEN];
>>> + wait_wake_outstanding_tlb_g2h(guc, seqno);
>>> + return 0;
>>> +}
>>> +
>>> +static long must_wait_woken(struct wait_queue_entry *wq_entry, long
>>> timeout)
>>> +{
>>> + /*
>>> + * This is equivalent to wait_woken() with the exception that
>>> + * we do not wake up early if the kthread task has been completed.
>>> + * As we are called from page reclaim in any task context,
>>> + * we may be invoked from stopped kthreads, but we *must*
>>> + * complete the wait from the HW .
>>> + *
>>> + * A second problem is that since we are called under reclaim
>>> + * and wait_woken() inspected the thread state, it makes an invalid
>>> + * assumption that all PF_KTHREAD tasks have set_kthread_struct()
>>> + * called upon them, and will trigger a GPF in
>>> is_kthread_should_stop().
>>> + */
>>> + do {
>>> + set_current_state(TASK_UNINTERRUPTIBLE);
>>> + if (wq_entry->flags & WQ_FLAG_WOKEN)
>>> + break;
>>> +
>>> + timeout = schedule_timeout(timeout);
>>> + } while (timeout);
>>> + __set_current_state(TASK_RUNNING);
>>> +
>>> + /* See wait_woken() and woken_wake_function() */
>>> + smp_store_mb(wq_entry->flags, wq_entry->flags & ~WQ_FLAG_WOKEN);
>>> +
>>> + return timeout;
>>> +}
>>> +
>>> +static int guc_send_invalidate_tlb(struct intel_guc *guc, u32 type)
>>> +{
>>> + struct intel_guc_tlb_wait _wq, *wq = &_wq;
>>> + DEFINE_WAIT_FUNC(wait, woken_wake_function);
>>> + struct intel_gt *gt = guc_to_gt(guc);
>>> + int err = 0;
>>
>> Looks like err does not need to be initialized.
>>
>>> + u32 seqno;
>>> + u32 action[] = {
>>> + INTEL_GUC_ACTION_TLB_INVALIDATION,
>>> + 0,
>>> + REG_FIELD_PREP(INTEL_GUC_TLB_INVAL_TYPE_MASK, type) |
>>> + REG_FIELD_PREP(INTEL_GUC_TLB_INVAL_MODE_MASK,
>>> + INTEL_GUC_TLB_INVAL_MODE_HEAVY) |
>>> + INTEL_GUC_TLB_INVAL_FLUSH_CACHE,
>>> + };
>>> + u32 size = ARRAY_SIZE(action);
>>> +
>>> + if (!intel_guc_ct_enabled(&guc->ct))
>>> + return -EINVAL;
>>> +
>>> + init_waitqueue_head(&_wq.wq);
>>> +
>>> + if (xa_alloc_cyclic_irq(&guc->tlb_lookup, &seqno, wq,
>>> + xa_limit_32b, &guc->next_seqno,
>>> + GFP_ATOMIC | __GFP_NOWARN) < 0) {
>>> + /* Under severe memory pressure? Serialise TLB allocations */
>>> + xa_lock_irq(&guc->tlb_lookup);
>>> + wq = xa_load(&guc->tlb_lookup, guc->serial_slot);
>>> + wait_event_lock_irq(wq->wq,
>>> + !READ_ONCE(wq->status),
>>> + guc->tlb_lookup.xa_lock);
>>> + /*
>>> + * Update wq->status under lock to ensure only one waiter can
>>> + * issue the TLB invalidation command using the serial slot
>>> at a
>>> + * time. The condition is set to false before releasing the
>>> lock
>>> + * so that other caller continue to wait until woken up again.
>>> + */
>>> + wq->status = 1;
>>> + xa_unlock_irq(&guc->tlb_lookup);
>>> +
>>> + seqno = guc->serial_slot;
>>> + }
>>> +
>>> + action[1] = seqno;
>>> +
>>> + add_wait_queue(&wq->wq, &wait);
>>> +
>>> + err = intel_guc_send_busy_loop(guc, action, size,
>>> G2H_LEN_DW_INVALIDATE_TLB, true);
>>
>> Busy looping version has to be used from here? Looks like the lock has
>> been dropped and function otherwise can sleep..
> If you don't use the auto-looping helper then the loop needs to be done
> manually by the caller. The looping is about waiting for space in the
> H2G buffer (because it is full of other commands). I assume that the TLB
> inval request would want to wait until it can actually be sent rather
> than just immediately exit with an -EBUSY error.
I meant to use some function which sleeps when waiting for space instead
of busy looping. AFAIR the busy looping one was for calling from
contexts which cannot sleep, no?
>
>>
>>> + if (err)
>>> + goto out;
>>> +
>>> + /*
>>> + * GuC has a timeout of 1ms for a TLB invalidation response from
>>> GAM. On a
>>> + * timeout GuC drops the request and has no mechanism to notify
>>> the host about
>>> + * the timeout. So keep a larger timeout that accounts for this
>>> individual
>>> + * timeout and max number of outstanding invalidation requests
>>> that can be
>>> + * queued in CT buffer.
>>> + */
>>> +#define OUTSTANDING_GUC_TIMEOUT_PERIOD (HZ * 2)
>>
>> Is it possible to express the magic 2 seconds with some expressions
>> involving the CT buffer size, multiplied by timeout per request, as
>> the comment alludes?
> The correct solution is to implement a
> 'has_my_request_been_executed_yet()' helper in the CT layer that tests
> whether the given H2G call has actually made it to the head of the queue
> yet or not. The TLB layer would then loop over that with no time at all
> (assuming the call will return an error in the case of a dead GuC, for
> example). Only when that returns true would it wait with a timeout,
> where that timeout is a few ms as per the hardware spec for a TLB
> invalidation.
>
> However, that is a non-trivial update that no-one has the time to
> implement :(. And so far, the scale just keeps tipping to the 'add a
> bigger timeout' side as the quick hack fix (TLB inval isn't the only CT
> client with the problem).
>
> Having said that, if we are going with the quick and dirty timeout, it
> would be better to put the timeout define in intel_guc_ct.h. Or make it
> a helper function in the same whose code is in _ct.c and which returns a
> value of 1s per 4KB of buffer size or something. I don't like the idea
> of providing the buffer size to random bits of code outside of the CT
> layer. That is exposing internal details to code which should not need
> to know.
>
> Likewise, the 'timeout per request' is not something the TLB layer has
> any knowledge of. A TLB inval might be guaranteed to be <1ms but the CTB
> might be full of much slower requests (context creation/destruction for
> example). So you basically have to assume worst case processing time.
> Which past experiments have shown to be around 1s for the current buffer
> size.
Okay, I get it. This also means this part of the comment is wrong:
"""
So keep a larger timeout that accounts for this individual
timeout and max number of outstanding invalidation requests
that can be queued in CT buffer.
"""
Or at least misleading, since the timeout value does not related only to
the number of outstanding _invalidation requests_.
> Note also that a fundamental problem here is that GuC doesn't give us a
> failed response back in the case of the hardware timeout being hit.
> Hence the reason we need a KMD side timeout at all. However, a future
> GuC update is promised which will move the timeout inside the GuC and
> add a failed notification back to the KMD. Once that is available, we
> theoretically don't need any time out at all on the KMD side (assuming
> that there is some kind of abort mechanism built in to the GT reset path
> for if the GuC itself dies).
Oh dear, sending messages into a black hole is not the best, but shrug,
it is not much we can do whichever way we detect a timeout apart from
opting to stop the universe.
Regards,
Tvrtko
>
> John.
>
>>
>>> + if (!must_wait_woken(&wait, OUTSTANDING_GUC_TIMEOUT_PERIOD)) {
>>> + gt_err(gt,
>>> + "TLB invalidation response timed out for seqno %u\n",
>>> seqno);
>>> + err = -ETIME;
>>> + }
>>> +out:
>>> + remove_wait_queue(&wq->wq, &wait);
>>> + if (seqno != guc->serial_slot)
>>> + xa_erase_irq(&guc->tlb_lookup, seqno);
>>> +
>>> + return err;
>>> +}
>>> +
>>> +/* Full TLB invalidation */
>>> +int intel_guc_invalidate_tlb_full(struct intel_guc *guc)
>>> +{
>>> + return guc_send_invalidate_tlb(guc, INTEL_GUC_TLB_INVAL_FULL);
>>> +}
>>> +
>>> +/* GuC TLB Invalidation: Invalidate the TLB's of GuC itself. */
>>> +int intel_guc_invalidate_tlb(struct intel_guc *guc)
>>> +{
>>> + return guc_send_invalidate_tlb(guc, INTEL_GUC_TLB_INVAL_GUC);
>>> +}
>>> +
>>> int intel_guc_deregister_done_process_msg(struct intel_guc *guc,
>>> const u32 *msg,
>>> u32 len)
>>
>> Regards,
>>
>> Tvrtko
>
^ permalink raw reply [flat|nested] 21+ messages in thread
end of thread, other threads:[~2023-10-04 8:12 UTC | newest]
Thread overview: 21+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2023-10-02 17:24 [Intel-gfx] [PATCH v3 1/4] drm/i915: Define and use GuC and CTB TLB invalidation routines Jonathan Cavitt
2023-10-02 17:24 ` [Intel-gfx] [PATCH v3 2/4] drm/i915: No TLB invalidation on wedged or suspended GT Jonathan Cavitt
2023-10-03 10:35 ` Tvrtko Ursulin
2023-10-03 11:50 ` Jani Nikula
2023-10-02 17:24 ` [Intel-gfx] [PATCH v3 3/4] drm/i915: Perform TLB invalidation on all GTs during suspend/resume Jonathan Cavitt
2023-10-03 11:48 ` Jani Nikula
2023-10-03 15:59 ` Andi Shyti
2023-10-03 18:52 ` John Harrison
2023-10-04 7:34 ` Jani Nikula
2023-10-02 17:24 ` [Intel-gfx] [PATCH v3 4/4] drm/i915/gt: Increase sleep in gt_tlb selftest sanitycheck Jonathan Cavitt
2023-10-02 23:42 ` [Intel-gfx] ✗ Fi.CI.CHECKPATCH: warning for series starting with [v3,1/4] drm/i915: Define and use GuC and CTB TLB invalidation routines Patchwork
2023-10-02 23:42 ` [Intel-gfx] ✗ Fi.CI.SPARSE: " Patchwork
2023-10-03 0:01 ` [Intel-gfx] ✗ Fi.CI.BAT: failure " Patchwork
2023-10-03 10:28 ` [Intel-gfx] [PATCH v3 1/4] " Tvrtko Ursulin
2023-10-03 16:41 ` Andi Shyti
2023-10-03 20:29 ` John Harrison
2023-10-03 21:29 ` Andi Shyti
2023-10-04 7:54 ` Tvrtko Ursulin
2023-10-03 20:23 ` John Harrison
2023-10-04 8:12 ` Tvrtko Ursulin
2023-10-03 11:06 ` Tvrtko Ursulin
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox