From: Matthew Brost <matthew.brost@intel.com>
To: intel-xe@lists.freedesktop.org
Cc: farah.kassabri@intel.com, michal.wajdeczko@intel.com
Subject: [PATCH 09/11] drm/xe: Add GT TLB invalidation coalescing
Date: Fri, 5 Jul 2024 17:02:50 -0700 [thread overview]
Message-ID: <20240706000252.702044-10-matthew.brost@intel.com> (raw)
In-Reply-To: <20240706000252.702044-1-matthew.brost@intel.com>
It is shown that too many pending GT TLB invalidations can overwhelm the
hardware (GuC firmware). Add a watermark if too many GT TLB
invalidations are pending which holds GT TLB invalidations in the KMD
until pressure is relieved. While holding GT TLB invalidations coalesce
invalidations in the VM (PPGTT) or GGTT structures.
Signed-off-by: Matthew Brost <matthew.brost@intel.com>
---
drivers/gpu/drm/xe/xe_ggtt.c | 21 +-
drivers/gpu/drm/xe/xe_ggtt_types.h | 5 +
drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c | 256 +++++++++++++++++-
drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h | 10 +-
.../gpu/drm/xe/xe_gt_tlb_invalidation_types.h | 23 ++
drivers/gpu/drm/xe/xe_gt_types.h | 21 +-
drivers/gpu/drm/xe/xe_pt.c | 7 +-
drivers/gpu/drm/xe/xe_vm.c | 19 +-
drivers/gpu/drm/xe/xe_vm_types.h | 3 +
9 files changed, 339 insertions(+), 26 deletions(-)
diff --git a/drivers/gpu/drm/xe/xe_ggtt.c b/drivers/gpu/drm/xe/xe_ggtt.c
index 0cdbc1296e88..3d4d45aa5e09 100644
--- a/drivers/gpu/drm/xe/xe_ggtt.c
+++ b/drivers/gpu/drm/xe/xe_ggtt.c
@@ -130,6 +130,12 @@ static void ggtt_fini_early(struct drm_device *drm, void *arg)
mutex_destroy(&ggtt->lock);
drm_mm_takedown(&ggtt->mm);
+
+ xe_gt_tlb_invalidation_coalesce_fini(ggtt->tile->primary_gt,
+ &ggtt->coalesce[PRIMARY_COALESCE_ID]);
+ if (ggtt->tile->media_gt)
+ xe_gt_tlb_invalidation_coalesce_fini(ggtt->tile->media_gt,
+ &ggtt->coalesce[MEDIA_COALESCE_ID]);
}
static void ggtt_fini(struct drm_device *drm, void *arg)
@@ -231,6 +237,10 @@ int xe_ggtt_init_early(struct xe_ggtt *ggtt)
drm_mm_init(&ggtt->mm, xe_wopcm_size(xe),
ggtt->size - xe_wopcm_size(xe));
mutex_init(&ggtt->lock);
+ xe_gt_tlb_invalidation_coalesce_init(&ggtt->coalesce[PRIMARY_COALESCE_ID],
+ XE_GT_TLB_INVALIDATION_COALESCE_GGTT);
+ xe_gt_tlb_invalidation_coalesce_init(&ggtt->coalesce[MEDIA_COALESCE_ID],
+ XE_GT_TLB_INVALIDATION_COALESCE_GGTT);
primelockdep(ggtt);
err = drmm_add_action_or_reset(&xe->drm, ggtt_fini_early, ggtt);
@@ -295,14 +305,15 @@ int xe_ggtt_init(struct xe_ggtt *ggtt)
return err;
}
-static void ggtt_invalidate_gt_tlb(struct xe_gt *gt)
+static void ggtt_invalidate_gt_tlb(struct xe_gt *gt,
+ struct xe_gt_tlb_invalidation_coalesce *coalesce)
{
int err;
if (!gt)
return;
- err = xe_gt_tlb_invalidation_ggtt(gt);
+ err = xe_gt_tlb_invalidation_ggtt(gt, coalesce);
if (err)
drm_warn(>_to_xe(gt)->drm, "xe_gt_tlb_invalidation_ggtt error=%d", err);
}
@@ -310,8 +321,10 @@ static void ggtt_invalidate_gt_tlb(struct xe_gt *gt)
static void xe_ggtt_invalidate(struct xe_ggtt *ggtt)
{
/* Each GT in a tile has its own TLB to cache GGTT lookups */
- ggtt_invalidate_gt_tlb(ggtt->tile->primary_gt);
- ggtt_invalidate_gt_tlb(ggtt->tile->media_gt);
+ ggtt_invalidate_gt_tlb(ggtt->tile->primary_gt,
+ &ggtt->coalesce[PRIMARY_COALESCE_ID]);
+ ggtt_invalidate_gt_tlb(ggtt->tile->media_gt,
+ &ggtt->coalesce[MEDIA_COALESCE_ID]);
}
void xe_ggtt_printk(struct xe_ggtt *ggtt, const char *prefix)
diff --git a/drivers/gpu/drm/xe/xe_ggtt_types.h b/drivers/gpu/drm/xe/xe_ggtt_types.h
index 2245d88d8f39..56ad0b312d22 100644
--- a/drivers/gpu/drm/xe/xe_ggtt_types.h
+++ b/drivers/gpu/drm/xe/xe_ggtt_types.h
@@ -8,6 +8,7 @@
#include <drm/drm_mm.h>
+#include "xe_gt_tlb_invalidation_types.h"
#include "xe_pt_types.h"
struct xe_bo;
@@ -31,6 +32,10 @@ struct xe_ggtt {
struct drm_mm mm;
+#define PRIMARY_COALESCE_ID 0
+#define MEDIA_COALESCE_ID 1
+ struct xe_gt_tlb_invalidation_coalesce coalesce[MEDIA_COALESCE_ID + 1];
+
/** @access_count: counts GGTT writes */
unsigned int access_count;
};
diff --git a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c
index d63ce1da3a92..b85089d27a36 100644
--- a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c
+++ b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c
@@ -62,12 +62,38 @@ static void xe_gt_tlb_fence_timeout(struct work_struct *work)
dma_fence_put(&fence->base);
}
if (!list_empty(>->tlb_invalidation.pending_fences))
- queue_delayed_work(system_wq,
+ queue_delayed_work(gt->tlb_invalidation.wq,
>->tlb_invalidation.fence_tdr,
tlb_timeout_jiffies(gt));
spin_unlock_irq(>->tlb_invalidation.pending_lock);
}
+static void
+xe_gt_tlb_invalidation_coalesce_issue(struct xe_gt *gt,
+ struct xe_gt_tlb_invalidation_coalesce *coalesce);
+
+static void xe_gt_tlb_coalesce_work(struct work_struct *work)
+{
+ struct xe_gt *gt = container_of(work, struct xe_gt,
+ tlb_invalidation.coalesce_work);
+ struct xe_gt_tlb_invalidation_coalesce *coalesce, *next;
+
+ mutex_lock(>->tlb_invalidation.seqno_lock);
+ list_for_each_entry_safe(coalesce, next,
+ >->tlb_invalidation.pending_coalesce,
+ link)
+ xe_gt_tlb_invalidation_coalesce_issue(gt, coalesce);
+ xe_gt_assert(gt, list_empty(>->tlb_invalidation.pending_coalesce));
+ mutex_unlock(>->tlb_invalidation.seqno_lock);
+}
+
+static void gt_tlb_invalidation_init(struct drm_device *drm, void *arg)
+{
+ struct xe_gt *gt = arg;
+
+ destroy_workqueue(gt->tlb_invalidation.wq);
+}
+
/**
* xe_gt_tlb_invalidation_init - Initialize GT TLB invalidation state
* @gt: graphics tile
@@ -79,18 +105,32 @@ static void xe_gt_tlb_fence_timeout(struct work_struct *work)
*/
int xe_gt_tlb_invalidation_init(struct xe_gt *gt)
{
+ int err;
+
gt->tlb_invalidation.seqno = 1;
INIT_LIST_HEAD(>->tlb_invalidation.pending_fences);
+ INIT_LIST_HEAD(>->tlb_invalidation.pending_coalesce);
spin_lock_init(>->tlb_invalidation.pending_lock);
spin_lock_init(>->tlb_invalidation.fence_lock);
INIT_DELAYED_WORK(>->tlb_invalidation.fence_tdr,
xe_gt_tlb_fence_timeout);
+ INIT_WORK(>->tlb_invalidation.coalesce_work,
+ xe_gt_tlb_coalesce_work);
/* Execlists not supported */
if (!gt_to_xe(gt)->info.force_execlist)
gt->tlb_invalidation.ops[XE_GT_TLB_INVALIDATION_CLIENT_GUC] =
xe_guc_tlb_invalidation_get_ops(>->uc.guc);
+ gt->tlb_invalidation.wq = alloc_ordered_workqueue("gt-tlb-inval-ordered-wq", 0);
+ if (!gt->tlb_invalidation.wq)
+ return -ENOMEM;
+
+ err = drmm_add_action_or_reset(>_to_xe(gt)->drm,
+ gt_tlb_invalidation_init, gt);
+ if (err)
+ return err;
+
return drmm_mutex_init(>_to_xe(gt)->drm,
>->tlb_invalidation.seqno_lock);
}
@@ -109,6 +149,22 @@ invalidation_fence_signal(struct xe_device *xe, struct xe_gt_tlb_invalidation_fe
list_del(&fence->link);
__invalidation_fence_signal(xe, fence);
}
+static void
+xe_gt_tlb_invalidation_coalesce_reset(struct xe_gt *gt,
+ struct xe_gt_tlb_invalidation_coalesce *coalesce)
+{
+ struct xe_gt_tlb_invalidation_fence *fence, *next;
+
+ lockdep_assert_held(>->tlb_invalidation.seqno_lock);
+ lockdep_assert_held(>->tlb_invalidation.pending_lock);
+ xe_gt_assert(gt, !list_empty(&coalesce->fence_list));
+
+ list_for_each_entry_safe(fence, next, &coalesce->fence_list,
+ link)
+ invalidation_fence_signal(gt_to_xe(gt), fence);
+
+ list_del_init(&coalesce->link);
+}
/**
* xe_gt_tlb_invalidation_reset - Initialize GT TLB invalidation reset
@@ -119,6 +175,7 @@ invalidation_fence_signal(struct xe_device *xe, struct xe_gt_tlb_invalidation_fe
void xe_gt_tlb_invalidation_reset(struct xe_gt *gt)
{
struct xe_gt_tlb_invalidation_fence *fence, *next;
+ struct xe_gt_tlb_invalidation_coalesce *coalesce, *__next;
enum xe_gt_tlb_invalidation_clients client;
int pending_seqno;
@@ -130,6 +187,7 @@ void xe_gt_tlb_invalidation_reset(struct xe_gt *gt)
mutex_lock(>->tlb_invalidation.seqno_lock);
spin_lock_irq(>->tlb_invalidation.pending_lock);
cancel_delayed_work(>->tlb_invalidation.fence_tdr);
+ cancel_work(>->tlb_invalidation.coalesce_work);
/*
* We might have various kworkers waiting for TLB flushes to complete
* which are not tracked with an explicit TLB fence, however at this
@@ -145,6 +203,11 @@ void xe_gt_tlb_invalidation_reset(struct xe_gt *gt)
WRITE_ONCE(gt->tlb_invalidation.seqno_recv[client],
pending_seqno);
+ list_for_each_entry_safe(coalesce, __next,
+ >->tlb_invalidation.pending_coalesce,
+ link)
+ xe_gt_tlb_invalidation_coalesce_reset(gt, coalesce);
+
list_for_each_entry_safe(fence, next,
>->tlb_invalidation.pending_fences, link)
invalidation_fence_signal(gt_to_xe(gt), fence);
@@ -152,6 +215,35 @@ void xe_gt_tlb_invalidation_reset(struct xe_gt *gt)
mutex_unlock(>->tlb_invalidation.seqno_lock);
}
+static int __tlb_invalidation_seqno_diff(struct xe_gt *gt,
+ enum xe_gt_tlb_invalidation_clients client)
+{
+ int seqno_recv = READ_ONCE(gt->tlb_invalidation.seqno_recv[client]);
+ int seqno = gt->tlb_invalidation.seqno;
+
+ lockdep_assert_held(>->tlb_invalidation.pending_lock);
+
+ if (seqno >= seqno_recv)
+ return seqno - seqno_recv;
+ else
+ return TLB_INVALIDATION_SEQNO_MAX - seqno_recv + seqno - 1;
+}
+
+static int tlb_invalidation_seqno_diff(struct xe_gt *gt)
+{
+ enum xe_gt_tlb_invalidation_clients client;
+ int diff = 0, tmp;
+
+ lockdep_assert_held(>->tlb_invalidation.pending_lock);
+ for (client = 0; client < XE_GT_TLB_INVALIDATION_CLIENT_MAX; ++client) {
+ tmp = __tlb_invalidation_seqno_diff(gt, client);
+ if (tmp > diff)
+ diff = tmp;
+ }
+
+ return diff;
+}
+
static bool __tlb_invalidation_seqno_past(struct xe_gt *gt,
enum xe_gt_tlb_invalidation_clients client,
int seqno)
@@ -222,6 +314,14 @@ static int send_tlb_invalidation_ppgtt(struct xe_gt *gt, u64 start, u64 end,
return 0;
}
+static void xe_gt_tlb_invalidation_seqno_bump(struct xe_gt *gt)
+{
+ gt->tlb_invalidation.seqno = (gt->tlb_invalidation.seqno + 1) %
+ TLB_INVALIDATION_SEQNO_MAX;
+ if (!gt->tlb_invalidation.seqno)
+ gt->tlb_invalidation.seqno = 1;
+}
+
static void xe_gt_tlb_invalidation_fence_prep(struct xe_gt *gt,
struct xe_gt_tlb_invalidation_fence *fence)
{
@@ -241,30 +341,114 @@ static void xe_gt_tlb_invalidation_fence_prep(struct xe_gt *gt,
>->tlb_invalidation.pending_fences);
if (list_is_singular(>->tlb_invalidation.pending_fences))
- queue_delayed_work(system_wq,
+ queue_delayed_work(gt->tlb_invalidation.wq,
>->tlb_invalidation.fence_tdr,
tlb_timeout_jiffies(gt));
spin_unlock_irq(>->tlb_invalidation.pending_lock);
- gt->tlb_invalidation.seqno = (gt->tlb_invalidation.seqno + 1) %
- TLB_INVALIDATION_SEQNO_MAX;
- if (!gt->tlb_invalidation.seqno)
- gt->tlb_invalidation.seqno = 1;
+ xe_gt_tlb_invalidation_seqno_bump(gt);
+}
+
+static void
+xe_gt_tlb_invalidation_coalesce_issue(struct xe_gt *gt,
+ struct xe_gt_tlb_invalidation_coalesce *coalesce)
+{
+ struct xe_gt_tlb_invalidation_fence *fence;
+ int seqno = gt->tlb_invalidation.seqno;
+
+ lockdep_assert_held(>->tlb_invalidation.seqno_lock);
+ xe_gt_assert(gt, !list_empty(&coalesce->fence_list));
+
+ list_for_each_entry(fence, &coalesce->fence_list, link) {
+ fence->seqno = seqno;
+ fence->invalidation_time = ktime_get();
+ trace_xe_gt_tlb_invalidation_fence_send(gt_to_xe(gt), fence);
+ }
+
+ spin_lock_irq(>->tlb_invalidation.pending_lock);
+ if (list_empty(>->tlb_invalidation.pending_fences))
+ queue_delayed_work(gt->tlb_invalidation.wq,
+ >->tlb_invalidation.fence_tdr,
+ tlb_timeout_jiffies(gt));
+ list_splice_tail_init(&coalesce->fence_list,
+ >->tlb_invalidation.pending_fences);
+ list_del_init(&coalesce->link);
+ spin_unlock_irq(>->tlb_invalidation.pending_lock);
+
+ xe_gt_tlb_invalidation_seqno_bump(gt);
+
+ if (coalesce->type == XE_GT_TLB_INVALIDATION_COALESCE_GGTT)
+ send_tlb_invalidation_ggtt(gt, seqno);
+ else
+ send_tlb_invalidation_ppgtt(gt, coalesce->start, coalesce->end,
+ coalesce->asid, seqno);
+
+ coalesce->start = ULONG_MAX;
+ coalesce->end = 0;
+
+ xe_gt_assert(gt, list_empty(&coalesce->fence_list));
+ xe_gt_assert(gt, list_empty(&coalesce->link));
}
-static int __xe_gt_tlb_invalidation_ggtt(struct xe_gt *gt,
- struct xe_gt_tlb_invalidation_fence *fence)
+/* Any more pending TLB invalidations start to hold in KMD */
+#define SEQNO_WATERMARK 16
+
+static int
+xe_gt_tlb_invalidation_coalesce_prep(struct xe_gt *gt,
+ struct xe_gt_tlb_invalidation_fence *fence,
+ struct xe_gt_tlb_invalidation_coalesce *coalesce,
+ u64 start, u64 end, u32 asid)
{
- int ret;
+ int ret = -EBUSY;
+
+ lockdep_assert_held(>->tlb_invalidation.seqno_lock);
+
+ spin_lock_irq(>->tlb_invalidation.pending_lock);
+
+ if (list_empty(>->tlb_invalidation.pending_coalesce) &&
+ tlb_invalidation_seqno_diff(gt) < SEQNO_WATERMARK) {
+ ret = 0;
+ goto unlock;
+ }
+
+ if (list_empty(&coalesce->link))
+ list_add_tail(&coalesce->link,
+ >->tlb_invalidation.pending_coalesce);
+
+ if (coalesce->start > start)
+ coalesce->start = start;
+ if (coalesce->end < end)
+ coalesce->end = end;
+ coalesce->asid = asid;
+
+ list_add_tail(&fence->link, &coalesce->fence_list);
+
+unlock:
+ spin_unlock_irq(>->tlb_invalidation.pending_lock);
+
+ return ret;
+}
+
+static int
+__xe_gt_tlb_invalidation_ggtt(struct xe_gt *gt,
+ struct xe_gt_tlb_invalidation_fence *fence,
+ struct xe_gt_tlb_invalidation_coalesce *coalesce)
+{
+ int ret = 0;
mutex_lock(>->tlb_invalidation.seqno_lock);
+ if (xe_gt_tlb_invalidation_coalesce_prep(gt, fence, coalesce,
+ 0, 0, 0))
+ goto unlock;
+
xe_gt_tlb_invalidation_fence_prep(gt, fence);
ret = send_tlb_invalidation_ggtt(gt, fence->seqno);
if (ret < 0)
invalidation_fence_signal(gt_to_xe(gt), fence);
+unlock:
mutex_unlock(>->tlb_invalidation.seqno_lock);
return ret;
@@ -273,13 +457,15 @@ static int __xe_gt_tlb_invalidation_ggtt(struct xe_gt *gt,
/**
* xe_gt_tlb_invalidation_ggtt - Issue a TLB invalidation on this GT for the GGTT
* @gt: graphics tile
+ * @coalesce: coalesce structure
*
* Issue a TLB invalidation for the GGTT. Completion of TLB invalidation is
* synchronous.
*
* Return: 0 on success, negative error code on error
*/
-int xe_gt_tlb_invalidation_ggtt(struct xe_gt *gt)
+int xe_gt_tlb_invalidation_ggtt(struct xe_gt *gt,
+ struct xe_gt_tlb_invalidation_coalesce *coalesce)
{
struct xe_device *xe = gt_to_xe(gt);
@@ -289,7 +475,7 @@ int xe_gt_tlb_invalidation_ggtt(struct xe_gt *gt)
int ret;
xe_gt_tlb_invalidation_fence_init(gt, &fence);
- ret = __xe_gt_tlb_invalidation_ggtt(gt, &fence);
+ ret = __xe_gt_tlb_invalidation_ggtt(gt, &fence, coalesce);
if (ret < 0)
return ret;
@@ -321,6 +507,7 @@ int xe_gt_tlb_invalidation_ggtt(struct xe_gt *gt)
* @gt: graphics tile
* @fence: invalidation fence which will be signal on TLB invalidation
* completion
+ * @coalesce: coalesce structure
* @start: start address
* @end: end address
* @asid: address space id
@@ -333,21 +520,27 @@ int xe_gt_tlb_invalidation_ggtt(struct xe_gt *gt)
*/
int xe_gt_tlb_invalidation_range(struct xe_gt *gt,
struct xe_gt_tlb_invalidation_fence *fence,
+ struct xe_gt_tlb_invalidation_coalesce *coalesce,
u64 start, u64 end, u32 asid)
{
struct xe_device *xe = gt_to_xe(gt);
- int ret;
+ int ret = 0;
xe_gt_assert(gt, fence);
mutex_lock(>->tlb_invalidation.seqno_lock);
+ if (xe_gt_tlb_invalidation_coalesce_prep(gt, fence, coalesce,
+ start, end, asid))
+ goto unlock;
+
xe_gt_tlb_invalidation_fence_prep(gt, fence);
ret = send_tlb_invalidation_ppgtt(gt, start, end, asid, fence->seqno);
if (ret < 0)
invalidation_fence_signal(xe, fence);
+unlock:
mutex_unlock(>->tlb_invalidation.seqno_lock);
return ret;
@@ -358,6 +551,7 @@ int xe_gt_tlb_invalidation_range(struct xe_gt *gt,
* @gt: graphics tile
* @fence: invalidation fence which will be signal on TLB invalidation
* completion, can be NULL
+ * @coalesce: coalesce structure
* @vma: VMA to invalidate
*
* Issue a range based TLB invalidation if supported, if not fallback to a full
@@ -368,11 +562,13 @@ int xe_gt_tlb_invalidation_range(struct xe_gt *gt,
*/
int xe_gt_tlb_invalidation_vma(struct xe_gt *gt,
struct xe_gt_tlb_invalidation_fence *fence,
+ struct xe_gt_tlb_invalidation_coalesce *coalesce,
struct xe_vma *vma)
{
xe_gt_assert(gt, vma);
- return xe_gt_tlb_invalidation_range(gt, fence, xe_vma_start(vma),
+ return xe_gt_tlb_invalidation_range(gt, fence, coalesce,
+ xe_vma_start(vma),
xe_vma_end(vma),
xe_vma_vm(vma)->usm.asid);
}
@@ -418,8 +614,14 @@ void xe_gt_tlb_invalidation_done_handler(struct xe_gt *gt,
invalidation_fence_signal(xe, fence);
}
+ if ((list_empty(>->tlb_invalidation.pending_fences) ||
+ list_is_singular(>->tlb_invalidation.pending_fences)) &&
+ !list_empty(>->tlb_invalidation.pending_coalesce))
+ queue_work(gt->tlb_invalidation.wq,
+ >->tlb_invalidation.coalesce_work);
+
if (!list_empty(>->tlb_invalidation.pending_fences))
- mod_delayed_work(system_wq,
+ mod_delayed_work(gt->tlb_invalidation.wq,
>->tlb_invalidation.fence_tdr,
tlb_timeout_jiffies(gt));
else
@@ -463,3 +665,29 @@ void xe_gt_tlb_invalidation_fence_init(struct xe_gt *gt,
INIT_LIST_HEAD(&fence->link);
dma_fence_get(&fence->base);
}
+
+/**
+ * xe_gt_tlb_invalidation_coalesce_init - Initialize TLB invalidation coalescing
+ * @coalesce: coalescing structure to init
+ * @types: type of coalescing
+ */
+void xe_gt_tlb_invalidation_coalesce_init(struct xe_gt_tlb_invalidation_coalesce *coalesce,
+ enum xe_gt_tlb_invalidation_coalesce_type type)
+{
+ INIT_LIST_HEAD(&coalesce->link);
+ INIT_LIST_HEAD(&coalesce->fence_list);
+ coalesce->start = ULONG_MAX;
+ coalesce->end = 0;
+ coalesce->type = type;
+}
+
+void xe_gt_tlb_invalidation_coalesce_fini(struct xe_gt *gt,
+ struct xe_gt_tlb_invalidation_coalesce *coalesce)
+{
+ mutex_lock(>->tlb_invalidation.seqno_lock);
+ spin_lock_irq(>->tlb_invalidation.pending_lock);
+ if (!list_empty(&coalesce->link))
+ xe_gt_tlb_invalidation_coalesce_reset(gt, coalesce);
+ spin_unlock_irq(>->tlb_invalidation.pending_lock);
+ mutex_unlock(>->tlb_invalidation.seqno_lock);
+}
diff --git a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h
index df22d9b4d85c..2401aa432714 100644
--- a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h
+++ b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h
@@ -15,12 +15,15 @@ struct xe_vma;
int xe_gt_tlb_invalidation_init(struct xe_gt *gt);
void xe_gt_tlb_invalidation_reset(struct xe_gt *gt);
-int xe_gt_tlb_invalidation_ggtt(struct xe_gt *gt);
+int xe_gt_tlb_invalidation_ggtt(struct xe_gt *gt,
+ struct xe_gt_tlb_invalidation_coalesce *coalesce);
int xe_gt_tlb_invalidation_vma(struct xe_gt *gt,
struct xe_gt_tlb_invalidation_fence *fence,
+ struct xe_gt_tlb_invalidation_coalesce *coalesce,
struct xe_vma *vma);
int xe_gt_tlb_invalidation_range(struct xe_gt *gt,
struct xe_gt_tlb_invalidation_fence *fence,
+ struct xe_gt_tlb_invalidation_coalesce *coalesce,
u64 start, u64 end, u32 asid);
void xe_gt_tlb_invalidation_fence_init(struct xe_gt *gt,
@@ -30,6 +33,11 @@ void xe_gt_tlb_invalidation_done_handler(struct xe_gt *gt,
enum xe_gt_tlb_invalidation_clients client,
int seqno);
+void xe_gt_tlb_invalidation_coalesce_init(struct xe_gt_tlb_invalidation_coalesce *coalesce,
+ enum xe_gt_tlb_invalidation_coalesce_type type);
+void xe_gt_tlb_invalidation_coalesce_fini(struct xe_gt *gt,
+ struct xe_gt_tlb_invalidation_coalesce *coalesce);
+
static inline void
xe_gt_tlb_invalidation_fence_wait(struct xe_gt_tlb_invalidation_fence *fence)
{
diff --git a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation_types.h b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation_types.h
index 1208edf7a5a4..46db9f70eaac 100644
--- a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation_types.h
+++ b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation_types.h
@@ -38,6 +38,29 @@ struct xe_gt_tlb_invalidation_ops {
u32 asid, int seqno);
};
+enum xe_gt_tlb_invalidation_coalesce_type {
+ XE_GT_TLB_INVALIDATION_COALESCE_GGTT = 0,
+ XE_GT_TLB_INVALIDATION_COALESCE_PPGTT,
+};
+
+/**
+ * struct xe_gt_tlb_invalidation_coalesce - Xe GT TLB invalidation coalesce
+ */
+struct xe_gt_tlb_invalidation_coalesce {
+ /** @link: link into pending coalesce */
+ struct list_head link;
+ /** @fence_list: list of fences to coalesce */
+ struct list_head fence_list;
+ /** @start: start address to coalesce (PPGTT only) */
+ u64 start;
+ /** @end: start address to coalesce (PPGTT only) */
+ u64 end;
+ /** @asid: address space ID (PPGTT only) */
+ u32 asid;
+ /** @type: type of coalesce */
+ enum xe_gt_tlb_invalidation_coalesce_type type;
+};
+
enum xe_gt_tlb_invalidation_clients {
XE_GT_TLB_INVALIDATION_CLIENT_GUC = 0,
XE_GT_TLB_INVALIDATION_CLIENT_MAX,
diff --git a/drivers/gpu/drm/xe/xe_gt_types.h b/drivers/gpu/drm/xe/xe_gt_types.h
index 9a2f1e8b74e1..9a411aa6c388 100644
--- a/drivers/gpu/drm/xe/xe_gt_types.h
+++ b/drivers/gpu/drm/xe/xe_gt_types.h
@@ -193,8 +193,25 @@ struct xe_gt {
*/
struct list_head pending_fences;
/**
- * @tlb_invalidation.pending_lock: protects @tlb_invalidation.pending_fences
- * and updating @tlb_invalidation.seqno_recv.
+ * @tlb_invalidation.pending_coalesce: list of pending coalesce
+ * TLB invalidations, protected by @tlb_invalidation.seqno_lock
+ * (send) and @tlb_invalidation.pending_lock (send, recv)
+ */
+ struct list_head pending_coalesce;
+ /**
+ * @tlb_invalidation.wq: ordered work queue for TLB invalidations
+ */
+ struct workqueue_struct *wq;
+ /**
+ * @tlb_invalidation.coalesce_work: worker to issue coalesce TLB
+ * invalidations.
+ */
+ struct work_struct coalesce_work;
+ /**
+ * @tlb_invalidation.pending_lock: protects
+ * @tlb_invalidation.pending_fences, updating
+ * @tlb_invalidation.seqno_recv, and
+ * @tlb_invalidation.pending_coalesce
*/
spinlock_t pending_lock;
/**
diff --git a/drivers/gpu/drm/xe/xe_pt.c b/drivers/gpu/drm/xe/xe_pt.c
index 1caa99b22c73..5054c9d8d149 100644
--- a/drivers/gpu/drm/xe/xe_pt.c
+++ b/drivers/gpu/drm/xe/xe_pt.c
@@ -1308,6 +1308,7 @@ static int xe_pt_userptr_pre_commit(struct xe_migrate_pt_update *pt_update)
struct invalidation_fence {
struct xe_gt_tlb_invalidation_fence base;
+ struct xe_gt_tlb_invalidation_coalesce *coalesce;
struct xe_gt *gt;
struct dma_fence *fence;
struct dma_fence_cb cb;
@@ -1342,13 +1343,15 @@ static void invalidation_fence_work_func(struct work_struct *w)
struct xe_device *xe = gt_to_xe(ifence->gt);
trace_xe_gt_tlb_invalidation_fence_work_func(xe, &ifence->base);
- xe_gt_tlb_invalidation_range(ifence->gt, &ifence->base, ifence->start,
+ xe_gt_tlb_invalidation_range(ifence->gt, &ifence->base,
+ ifence->coalesce, ifence->start,
ifence->end, ifence->asid);
}
static void invalidation_fence_init(struct xe_gt *gt,
struct invalidation_fence *ifence,
struct dma_fence *fence,
+ struct xe_gt_tlb_invalidation_coalesce *coalesce,
u64 start, u64 end, u32 asid)
{
int ret;
@@ -1358,6 +1361,7 @@ static void invalidation_fence_init(struct xe_gt *gt,
xe_gt_tlb_invalidation_fence_init(gt, &ifence->base);
ifence->fence = fence;
+ ifence->coalesce = coalesce;
ifence->gt = gt;
ifence->start = start;
ifence->end = end;
@@ -2026,6 +2030,7 @@ xe_pt_update_ops_run(struct xe_tile *tile, struct xe_vma_ops *vops)
/* tlb invalidation must be done before signaling rebind */
if (ifence) {
invalidation_fence_init(tile->primary_gt, ifence, fence,
+ &vm->coalesce[tile->id],
pt_update_ops->start,
pt_update_ops->last, vm->usm.asid);
fence = &ifence->base.base;
diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
index 478932fb7718..c5b5c629b154 100644
--- a/drivers/gpu/drm/xe/xe_vm.c
+++ b/drivers/gpu/drm/xe/xe_vm.c
@@ -1407,8 +1407,11 @@ struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags)
INIT_LIST_HEAD(&vm->preempt.exec_queues);
vm->preempt.min_run_period_ms = 10; /* FIXME: Wire up to uAPI */
- for_each_tile(tile, xe, id)
+ for_each_tile(tile, xe, id) {
xe_range_fence_tree_init(&vm->rftree[id]);
+ xe_gt_tlb_invalidation_coalesce_init(&vm->coalesce[id],
+ XE_GT_TLB_INVALIDATION_COALESCE_PPGTT);
+ }
vm->pt_ops = &xelp_pt_ops;
@@ -1525,8 +1528,11 @@ struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags)
err_no_resv:
mutex_destroy(&vm->snap_mutex);
- for_each_tile(tile, xe, id)
+ for_each_tile(tile, xe, id) {
xe_range_fence_tree_fini(&vm->rftree[id]);
+ xe_gt_tlb_invalidation_coalesce_fini(tile->primary_gt,
+ &vm->coalesce[id]);
+ }
kfree(vm);
if (flags & XE_VM_FLAG_LR_MODE)
xe_pm_runtime_put(xe);
@@ -1641,8 +1647,11 @@ void xe_vm_close_and_put(struct xe_vm *vm)
}
mutex_unlock(&xe->usm.lock);
- for_each_tile(tile, xe, id)
+ for_each_tile(tile, xe, id) {
xe_range_fence_tree_fini(&vm->rftree[id]);
+ xe_gt_tlb_invalidation_coalesce_fini(tile->primary_gt,
+ &vm->coalesce[id]);
+ }
xe_vm_put(vm);
}
@@ -3213,7 +3222,9 @@ int xe_vm_invalidate_vma(struct xe_vma *vma)
* GTs within the tile
*/
ret = xe_gt_tlb_invalidation_vma(tile->primary_gt,
- &fence[id], vma);
+ &fence[id],
+ &xe_vma_vm(vma)->coalesce[id],
+ vma);
if (ret < 0)
goto wait;
diff --git a/drivers/gpu/drm/xe/xe_vm_types.h b/drivers/gpu/drm/xe/xe_vm_types.h
index 7f9a303e51d8..e70839af93d0 100644
--- a/drivers/gpu/drm/xe/xe_vm_types.h
+++ b/drivers/gpu/drm/xe/xe_vm_types.h
@@ -14,6 +14,7 @@
#include <linux/scatterlist.h>
#include "xe_device_types.h"
+#include "xe_gt_tlb_invalidation_types.h"
#include "xe_pt_types.h"
#include "xe_range_fence.h"
@@ -152,6 +153,8 @@ struct xe_vm {
struct xe_pt *pt_root[XE_MAX_TILES_PER_DEVICE];
struct xe_pt *scratch_pt[XE_MAX_TILES_PER_DEVICE][XE_VM_MAX_LEVEL];
+ struct xe_gt_tlb_invalidation_coalesce coalesce[XE_MAX_TILES_PER_DEVICE];
+
/**
* @flags: flags for this VM, statically setup a creation time aside
* from XE_VM_FLAG_BANNED which requires vm->lock to set / read safely
--
2.34.1
next prev parent reply other threads:[~2024-07-06 0:02 UTC|newest]
Thread overview: 20+ messages / expand[flat|nested] mbox.gz Atom feed top
2024-07-06 0:02 [PATCH 00/11] Proper GT TLB invalidation layering and new coalescing feature Matthew Brost
2024-07-06 0:02 ` [PATCH 01/11] drm/xe: Add xe_gt_tlb_invalidation_fence_init helper Matthew Brost
2024-07-06 0:02 ` [PATCH 02/11] drm/xe: Drop xe_gt_tlb_invalidation_wait Matthew Brost
2024-07-06 0:02 ` [PATCH 03/11] drm/xe: s/tlb_invalidation.lock/tlb_invalidation.fence_lock Matthew Brost
2024-07-06 0:02 ` [PATCH 04/11] drm/xe: Add tlb_invalidation.seqno_lock Matthew Brost
2024-07-06 0:02 ` [PATCH 05/11] drm/xe: Add xe_gt_tlb_invalidation_done_handler Matthew Brost
2024-07-06 0:02 ` [PATCH 06/11] drm/xe: Add send tlb invalidation helpers Matthew Brost
2024-07-06 0:02 ` [PATCH 07/11] drm/xe: Add xe_guc_tlb_invalidation layer Matthew Brost
2024-07-06 0:02 ` [PATCH 08/11] drm/xe: Add multi-client support for GT TLB invalidations Matthew Brost
2024-07-06 0:02 ` Matthew Brost [this message]
2024-07-06 0:02 ` [PATCH 10/11] drm/xe: Add GT TLB coalesce tracepoints Matthew Brost
2024-07-06 0:02 ` [PATCH 11/11] drm/xe: Add GT TLB invalidation watermark debugfs Matthew Brost
2024-07-06 0:07 ` ✓ CI.Patch_applied: success for Proper GT TLB invalidation layering and new coalescing feature Patchwork
2024-07-06 0:07 ` ✗ CI.checkpatch: warning " Patchwork
2024-07-06 0:08 ` ✓ CI.KUnit: success " Patchwork
2024-07-06 0:20 ` ✓ CI.Build: " Patchwork
2024-07-06 0:22 ` ✗ CI.Hooks: failure " Patchwork
2024-07-06 0:24 ` ✓ CI.checksparse: success " Patchwork
2024-07-06 0:53 ` ✗ CI.BAT: failure " Patchwork
2024-07-06 1:41 ` ✗ CI.FULL: " Patchwork
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20240706000252.702044-10-matthew.brost@intel.com \
--to=matthew.brost@intel.com \
--cc=farah.kassabri@intel.com \
--cc=intel-xe@lists.freedesktop.org \
--cc=michal.wajdeczko@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox