Intel-XE Archive on lore.kernel.org
 help / color / mirror / Atom feed
From: Matthew Brost <matthew.brost@intel.com>
To: intel-xe@lists.freedesktop.org
Cc: francois.dugast@intel.com, thomas.hellstrom@linux.intel.com,
	michal.mrozek@intel.com
Subject: [PATCH 6/6] drm/xe: Add more GT stats around pagefault mode switch flows
Date: Thu, 11 Dec 2025 13:00:32 -0800	[thread overview]
Message-ID: <20251211210032.1520113-7-matthew.brost@intel.com> (raw)
In-Reply-To: <20251211210032.1520113-1-matthew.brost@intel.com>

Add GT stats to measure the time spent switching between pagefault mode
and dma-fence mode. Also add a GT stat to indicate when pagefault
suspend is skipped because the system is idle. These metrics will help
profile pagefault workloads while 3D and display are enabled.

Signed-off-by: Matthew Brost <matthew.brost@intel.com>
---
 drivers/gpu/drm/xe/xe_gt_stats.c        |  6 +++++
 drivers/gpu/drm/xe/xe_gt_stats_types.h  |  3 +++
 drivers/gpu/drm/xe/xe_hw_engine_group.c | 32 +++++++++++++++++++++++++
 3 files changed, 41 insertions(+)

diff --git a/drivers/gpu/drm/xe/xe_gt_stats.c b/drivers/gpu/drm/xe/xe_gt_stats.c
index 714045ad9354..fb2904bd0abd 100644
--- a/drivers/gpu/drm/xe/xe_gt_stats.c
+++ b/drivers/gpu/drm/xe/xe_gt_stats.c
@@ -68,8 +68,14 @@ static const char *const stat_description[__XE_GT_STATS_NUM_IDS] = {
 	DEF_STAT_STR(SVM_2M_BIND_US, "svm_2M_bind_us"),
 	DEF_STAT_STR(HW_ENGINE_GROUP_SUSPEND_LR_QUEUE_COUNT,
 		     "hw_engine_group_suspend_lr_queue_count"),
+	DEF_STAT_STR(HW_ENGINE_GROUP_SKIP_LR_QUEUE_COUNT,
+		     "hw_engine_group_skip_lr_queue_count"),
 	DEF_STAT_STR(HW_ENGINE_GROUP_WAIT_DMA_QUEUE_COUNT,
 		     "hw_engine_group_wait_dma_queue_count"),
+	DEF_STAT_STR(HW_ENGINE_GROUP_SUSPEND_LR_QUEUE_US,
+		     "hw_engine_group_suspend_lr_queue_us"),
+	DEF_STAT_STR(HW_ENGINE_GROUP_WAIT_DMA_QUEUE_US,
+		     "hw_engine_group_wait_dma_queue_us"),
 };
 
 /**
diff --git a/drivers/gpu/drm/xe/xe_gt_stats_types.h b/drivers/gpu/drm/xe/xe_gt_stats_types.h
index aada5df421e5..b92d013091d5 100644
--- a/drivers/gpu/drm/xe/xe_gt_stats_types.h
+++ b/drivers/gpu/drm/xe/xe_gt_stats_types.h
@@ -45,7 +45,10 @@ enum xe_gt_stats_id {
 	XE_GT_STATS_ID_SVM_64K_BIND_US,
 	XE_GT_STATS_ID_SVM_2M_BIND_US,
 	XE_GT_STATS_ID_HW_ENGINE_GROUP_SUSPEND_LR_QUEUE_COUNT,
+	XE_GT_STATS_ID_HW_ENGINE_GROUP_SKIP_LR_QUEUE_COUNT,
 	XE_GT_STATS_ID_HW_ENGINE_GROUP_WAIT_DMA_QUEUE_COUNT,
+	XE_GT_STATS_ID_HW_ENGINE_GROUP_SUSPEND_LR_QUEUE_US,
+	XE_GT_STATS_ID_HW_ENGINE_GROUP_WAIT_DMA_QUEUE_US,
 	/* must be the last entry */
 	__XE_GT_STATS_NUM_IDS,
 };
diff --git a/drivers/gpu/drm/xe/xe_hw_engine_group.c b/drivers/gpu/drm/xe/xe_hw_engine_group.c
index 35966889c776..8236fdee0901 100644
--- a/drivers/gpu/drm/xe/xe_hw_engine_group.c
+++ b/drivers/gpu/drm/xe/xe_hw_engine_group.c
@@ -14,6 +14,17 @@
 #include "xe_sync.h"
 #include "xe_vm.h"
 
+static s64 xe_hw_engine_group_stats_ktime_us_delta(ktime_t start)
+{
+	return IS_ENABLED(CONFIG_DEBUG_FS) ?
+		ktime_us_delta(ktime_get(), start) : 0;
+}
+
+static ktime_t xe_hw_engine_group_stats_ktime_get(void)
+{
+	return IS_ENABLED(CONFIG_DEBUG_FS) ? ktime_get() : 0;
+}
+
 static void
 hw_engine_group_resume_lr_jobs_func(struct work_struct *w)
 {
@@ -200,7 +211,9 @@ static int xe_hw_engine_group_suspend_faulting_lr_jobs(struct xe_hw_engine_group
 {
 	int err, i;
 	struct xe_exec_queue *q;
+	struct xe_gt *gt = NULL;
 	bool need_resume = false;
+	ktime_t start = xe_hw_engine_group_stats_ktime_get();
 
 	lockdep_assert_held_write(&group->mode_sem);
 
@@ -213,6 +226,9 @@ static int xe_hw_engine_group_suspend_faulting_lr_jobs(struct xe_hw_engine_group
 		xe_gt_stats_incr(q->gt, XE_GT_STATS_ID_HW_ENGINE_GROUP_SUSPEND_LR_QUEUE_COUNT, 1);
 
 		idle_skip_suspend = xe_exec_queue_idle_skip_suspend(q);
+		if (idle_skip_suspend)
+			xe_gt_stats_incr(q->gt,
+					 XE_GT_STATS_ID_HW_ENGINE_GROUP_SKIP_LR_QUEUE_COUNT, 1);
 
 		if (!need_resume && !idle_skip_suspend && num_syncs) {
 			for (i = 0; i < num_syncs; ++i)
@@ -221,6 +237,7 @@ static int xe_hw_engine_group_suspend_faulting_lr_jobs(struct xe_hw_engine_group
 
 		need_resume |= !idle_skip_suspend;
 		q->ops->suspend(q);
+		gt = q->gt;
 	}
 
 	list_for_each_entry(q, &group->exec_queue_list, hw_engine_group_link) {
@@ -232,6 +249,12 @@ static int xe_hw_engine_group_suspend_faulting_lr_jobs(struct xe_hw_engine_group
 			return err;
 	}
 
+	if (gt) {
+		xe_gt_stats_incr(gt,
+				 XE_GT_STATS_ID_HW_ENGINE_GROUP_SUSPEND_LR_QUEUE_US,
+				 xe_hw_engine_group_stats_ktime_us_delta(start));
+	}
+
 	if (need_resume)
 		xe_hw_engine_group_resume_faulting_lr_jobs(group);
 
@@ -252,7 +275,9 @@ static int xe_hw_engine_group_wait_for_dma_fence_jobs(struct xe_hw_engine_group
 {
 	long timeout;
 	struct xe_exec_queue *q;
+	struct xe_gt *gt = NULL;
 	struct dma_fence *fence;
+	ktime_t start = xe_hw_engine_group_stats_ktime_get();
 
 	lockdep_assert_held_write(&group->mode_sem);
 
@@ -264,11 +289,18 @@ static int xe_hw_engine_group_wait_for_dma_fence_jobs(struct xe_hw_engine_group
 		fence = xe_exec_queue_last_fence_get_for_resume(q, q->vm);
 		timeout = dma_fence_wait(fence, false);
 		dma_fence_put(fence);
+		gt = q->gt;
 
 		if (timeout < 0)
 			return -ETIME;
 	}
 
+	if (gt) {
+		xe_gt_stats_incr(gt,
+				 XE_GT_STATS_ID_HW_ENGINE_GROUP_WAIT_DMA_QUEUE_US,
+				 xe_hw_engine_group_stats_ktime_us_delta(start));
+	}
+
 	return 0;
 }
 
-- 
2.34.1


  parent reply	other threads:[~2025-12-11 21:00 UTC|newest]

Thread overview: 17+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2025-12-11 21:00 [PATCH 0/6] Fix performance when pagefaults and 3d/display share resources Matthew Brost
2025-12-11 21:00 ` [PATCH 1/6] drm/xe: Adjust long-running workload timeslices to reasonable values Matthew Brost
2025-12-11 21:00 ` [PATCH 2/6] drm/xe: Use usleep_range for accurate long-running workload timeslicing Matthew Brost
2025-12-11 21:00 ` [PATCH 3/6] drm/xe: Add debugfs knobs to control long running " Matthew Brost
2025-12-11 21:00 ` [PATCH 4/6] drm/xe: Skip exec queue schedule toggle if queue is idle during suspend Matthew Brost
2025-12-11 21:00 ` [PATCH 5/6] drm/xe: Wait on in-syncs when swicthing to dma-fence mode Matthew Brost
2025-12-12  9:22   ` Thomas Hellström
2025-12-12 16:33     ` Matthew Brost
2025-12-12 16:38       ` Matthew Brost
2025-12-12 18:41       ` Thomas Hellström
2025-12-12 20:20         ` Matthew Brost
2025-12-11 21:00 ` Matthew Brost [this message]
2025-12-12 16:07   ` [PATCH 6/6] drm/xe: Add more GT stats around pagefault mode switch flows Francois Dugast
2025-12-12 16:18     ` Matthew Brost
2025-12-11 21:29 ` ✓ CI.KUnit: success for Fix performance when pagefaults and 3d/display share resources Patchwork
2025-12-11 22:34 ` ✗ Xe.CI.BAT: failure " Patchwork
2025-12-12 13:46 ` ✗ Xe.CI.Full: " Patchwork

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20251211210032.1520113-7-matthew.brost@intel.com \
    --to=matthew.brost@intel.com \
    --cc=francois.dugast@intel.com \
    --cc=intel-xe@lists.freedesktop.org \
    --cc=michal.mrozek@intel.com \
    --cc=thomas.hellstrom@linux.intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox