Intel-XE Archive on lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH] drm/xe: Add per-engine pagefault and reset counts
@ 2025-02-10 19:36 Jonathan Cavitt
  2025-02-10 19:51 ` ✓ CI.Patch_applied: success for " Patchwork
                   ` (14 more replies)
  0 siblings, 15 replies; 20+ messages in thread
From: Jonathan Cavitt @ 2025-02-10 19:36 UTC (permalink / raw)
  To: intel-xe
  Cc: saurabhg.gupta, alex.zuo, jonathan.cavitt,
	niranjana.vishwanathapura, ayaz.siddiqui, tomasz.mistat

Add counters to all engines that count the number of pagefaults and
engine resets that have been triggered on them.  Report these values
during an engine reset.

Signed-off-by: Jonathan Cavitt <jonathan.cavitt@intel.com>
CC: Tomasz Mistat <tomasz.mistat@intel.com>
CC: Ayaz A Siddiqui <ayaz.siddiqui@intel.com>
CC: Niranjana Vishwanathapura <niranjana.vishwanathapura@intel.com>
---
 drivers/gpu/drm/xe/xe_gt_pagefault.c    | 6 ++++++
 drivers/gpu/drm/xe/xe_guc_submit.c      | 9 +++++++--
 drivers/gpu/drm/xe/xe_hw_engine.c       | 3 +++
 drivers/gpu/drm/xe/xe_hw_engine_types.h | 4 ++++
 4 files changed, 20 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/xe/xe_gt_pagefault.c b/drivers/gpu/drm/xe/xe_gt_pagefault.c
index 46701ca11ce0..04e973b20019 100644
--- a/drivers/gpu/drm/xe/xe_gt_pagefault.c
+++ b/drivers/gpu/drm/xe/xe_gt_pagefault.c
@@ -130,6 +130,7 @@ static int handle_vma_pagefault(struct xe_gt *gt, struct pagefault *pf,
 {
 	struct xe_vm *vm = xe_vma_vm(vma);
 	struct xe_tile *tile = gt_to_tile(gt);
+	struct xe_hw_engine *hwe = NULL;
 	struct drm_exec exec;
 	struct dma_fence *fence;
 	ktime_t end = 0;
@@ -140,6 +141,11 @@ static int handle_vma_pagefault(struct xe_gt *gt, struct pagefault *pf,
 	xe_gt_stats_incr(gt, XE_GT_STATS_ID_VMA_PAGEFAULT_BYTES, xe_vma_size(vma));
 
 	trace_xe_vma_pagefault(vma);
+
+	hwe = xe_gt_hw_engine(gt, pf->engine_class, pf->engine_instance, false);
+	if (hwe)
+		atomic_inc(&hwe->pagefault_count);
+
 	atomic = access_is_atomic(pf->access_type);
 
 	/* Check if VMA is valid */
diff --git a/drivers/gpu/drm/xe/xe_guc_submit.c b/drivers/gpu/drm/xe/xe_guc_submit.c
index 913c74d6e2ae..6f5d74340319 100644
--- a/drivers/gpu/drm/xe/xe_guc_submit.c
+++ b/drivers/gpu/drm/xe/xe_guc_submit.c
@@ -1972,6 +1972,7 @@ int xe_guc_exec_queue_reset_handler(struct xe_guc *guc, u32 *msg, u32 len)
 {
 	struct xe_gt *gt = guc_to_gt(guc);
 	struct xe_exec_queue *q;
+	struct xe_hw_engine *hwe;
 	u32 guc_id;
 
 	if (unlikely(len < 1))
@@ -1983,8 +1984,12 @@ int xe_guc_exec_queue_reset_handler(struct xe_guc *guc, u32 *msg, u32 len)
 	if (unlikely(!q))
 		return -EPROTO;
 
-	xe_gt_info(gt, "Engine reset: engine_class=%s, logical_mask: 0x%x, guc_id=%d",
-		   xe_hw_engine_class_to_str(q->class), q->logical_mask, guc_id);
+	hwe = q->hwe;
+	atomic_inc(&hwe->reset_count);
+
+	xe_gt_info(gt, "Engine reset: engine_class=%s, logical_mask: 0x%x, guc_id=%d, pagefault_count=%u, reset_count=%u",
+		   xe_hw_engine_class_to_str(q->class), q->logical_mask, guc_id,
+		   atomic_read(&hwe->pagefault_count), atomic_read(&hwe->reset_count));
 
 	trace_xe_exec_queue_reset(q);
 
diff --git a/drivers/gpu/drm/xe/xe_hw_engine.c b/drivers/gpu/drm/xe/xe_hw_engine.c
index fc447751fe78..0be6c38fe2a4 100644
--- a/drivers/gpu/drm/xe/xe_hw_engine.c
+++ b/drivers/gpu/drm/xe/xe_hw_engine.c
@@ -516,6 +516,9 @@ static void hw_engine_init_early(struct xe_gt *gt, struct xe_hw_engine *hwe,
 	hwe->fence_irq = &gt->fence_irq[info->class];
 	hwe->engine_id = id;
 
+	atomic_set(&hwe->pagefault_count, 0);
+	atomic_set(&hwe->reset_count, 0);
+
 	hwe->eclass = &gt->eclass[hwe->class];
 	if (!hwe->eclass->sched_props.job_timeout_ms) {
 		hwe->eclass->sched_props.job_timeout_ms = 5 * 1000;
diff --git a/drivers/gpu/drm/xe/xe_hw_engine_types.h b/drivers/gpu/drm/xe/xe_hw_engine_types.h
index e4191a7a2c31..635dc3da6523 100644
--- a/drivers/gpu/drm/xe/xe_hw_engine_types.h
+++ b/drivers/gpu/drm/xe/xe_hw_engine_types.h
@@ -150,6 +150,10 @@ struct xe_hw_engine {
 	struct xe_oa_unit *oa_unit;
 	/** @hw_engine_group: the group of hw engines this one belongs to */
 	struct xe_hw_engine_group *hw_engine_group;
+	/** @pagefault_count: number of pagefaults associated with this engine */
+	atomic_t pagefault_count;
+	/** @reset_count: number of engine resets associated with this engine */
+	atomic_t reset_count;
 };
 
 enum xe_hw_engine_snapshot_source_id {
-- 
2.43.0


^ permalink raw reply related	[flat|nested] 20+ messages in thread

end of thread, other threads:[~2025-02-12 23:44 UTC | newest]

Thread overview: 20+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2025-02-10 19:36 [PATCH] drm/xe: Add per-engine pagefault and reset counts Jonathan Cavitt
2025-02-10 19:51 ` ✓ CI.Patch_applied: success for " Patchwork
2025-02-10 19:51 ` ✓ CI.checkpatch: " Patchwork
2025-02-10 19:52 ` ✓ CI.KUnit: " Patchwork
2025-02-10 20:09 ` ✓ CI.Build: " Patchwork
2025-02-10 20:11 ` ✓ CI.Hooks: " Patchwork
2025-02-10 20:12 ` ✓ CI.checksparse: " Patchwork
2025-02-11  6:38 ` ✓ CI.Patch_applied: success for drm/xe: Add per-engine pagefault and reset counts (rev2) Patchwork
2025-02-11  6:38 ` ✓ CI.checkpatch: " Patchwork
2025-02-11  6:40 ` ✓ CI.KUnit: " Patchwork
2025-02-11  6:56 ` ✓ CI.Build: " Patchwork
2025-02-11  6:58 ` ✗ CI.Hooks: failure " Patchwork
2025-02-11  6:59 ` ✗ CI.checksparse: warning " Patchwork
2025-02-11  7:19 ` ✓ Xe.CI.BAT: success " Patchwork
2025-02-11 15:16 ` ✗ Xe.CI.Full: failure " Patchwork
2025-02-11 20:44 ` [PATCH] drm/xe: Add per-engine pagefault and reset counts Rodrigo Vivi
2025-02-12  5:37   ` Lucas De Marchi
2025-02-12  5:56     ` Matthew Brost
2025-02-12 20:48       ` Lucas De Marchi
2025-02-12 23:45         ` Matthew Brost

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox