Intel-XE Archive on lore.kernel.org
 help / color / mirror / Atom feed
From: Umesh Nerlige Ramappa <umesh.nerlige.ramappa@intel.com>
To: intel-xe@lists.freedesktop.org, niranjana.vishwanathapura@intel.com
Cc: matthew.brost@intel.com, stuart.summers@intel.com
Subject: [PATCH v2 6/9] drm/xe/multi_queue: Capture queue run times for active queues
Date: Fri,  1 May 2026 17:53:39 -0700	[thread overview]
Message-ID: <20260502005332.3135977-17-umesh.nerlige.ramappa@intel.com> (raw)
In-Reply-To: <20260502005332.3135977-11-umesh.nerlige.ramappa@intel.com>

If a queue is currently active on the CS, query the QUEUE TIMESTAMP
register to get an up to date value of the runtime. To do so, ensure
that the primary queue is active and then check if the secondary queue
is executing on the CS.

Signed-off-by: Umesh Nerlige Ramappa <umesh.nerlige.ramappa@intel.com>
---
v2:
- Move trace to a separate patch (Stuart)
- Refactor multi queue timestamp logic (Matt/Niranjana)
---
 drivers/gpu/drm/xe/regs/xe_engine_regs.h |   4 +
 drivers/gpu/drm/xe/xe_lrc.c              | 115 +++++++++++++++++++----
 2 files changed, 99 insertions(+), 20 deletions(-)

diff --git a/drivers/gpu/drm/xe/regs/xe_engine_regs.h b/drivers/gpu/drm/xe/regs/xe_engine_regs.h
index 1b4a7e9a703d..af6af6f3f5e8 100644
--- a/drivers/gpu/drm/xe/regs/xe_engine_regs.h
+++ b/drivers/gpu/drm/xe/regs/xe_engine_regs.h
@@ -170,6 +170,10 @@
 #define   GFX_MSIX_INTERRUPT_ENABLE		REG_BIT(13)
 
 #define RING_CSMQDEBUG(base)			XE_REG((base) + 0x2b0)
+#define   CURRENT_ACTIVE_QUEUE_ID_MASK		REG_GENMASK(7, 0)
+
+#define RING_QUEUE_TIMESTAMP(base)		XE_REG((base) + 0x4c0)
+#define RING_QUEUE_TIMESTAMP_UDW(base)		XE_REG((base) + 0x4c0 + 4)
 
 #define RING_TIMESTAMP(base)			XE_REG((base) + 0x358)
 
diff --git a/drivers/gpu/drm/xe/xe_lrc.c b/drivers/gpu/drm/xe/xe_lrc.c
index 92419e5058fd..023202be5d52 100644
--- a/drivers/gpu/drm/xe/xe_lrc.c
+++ b/drivers/gpu/drm/xe/xe_lrc.c
@@ -21,6 +21,7 @@
 #include "xe_configfs.h"
 #include "xe_device.h"
 #include "xe_drm_client.h"
+#include "xe_exec_queue.h"
 #include "xe_exec_queue_types.h"
 #include "xe_gt.h"
 #include "xe_gt_clock.h"
@@ -2655,17 +2656,65 @@ static int get_ctx_timestamp(struct xe_lrc *lrc, u32 engine_id, u64 *reg_ctx_ts)
 	return 0;
 }
 
-/**
- * xe_lrc_timestamp() - Current ctx timestamp
- * @lrc: Pointer to the lrc.
- *
- * Return latest ctx timestamp. With support for active contexts, the
- * calculation may be slightly racy, so follow a read-again logic to ensure that
- * the context is still active before returning the right timestamp.
- *
- * Returns: New ctx timestamp value
- */
-u64 xe_lrc_timestamp(struct xe_lrc *lrc)
+static u64 get_queue_timestamp(struct xe_hw_engine *hwe)
+{
+	return xe_mmio_read64_2x32(&hwe->gt->mmio,
+				   RING_QUEUE_TIMESTAMP(hwe->mmio_base));
+}
+
+static u32 get_queue_id(struct xe_hw_engine *hwe)
+{
+	u32 val = xe_mmio_read32(&hwe->gt->mmio,
+				 RING_CSMQDEBUG(hwe->mmio_base));
+
+	return REG_FIELD_GET(CURRENT_ACTIVE_QUEUE_ID_MASK, val);
+}
+
+static bool context_active(struct xe_lrc *lrc)
+{
+	return xe_lrc_ctx_timestamp(lrc) == CONTEXT_ACTIVE;
+}
+
+static u64 xe_lrc_multi_queue_timestamp(struct xe_lrc *lrc)
+{
+	struct xe_lrc *primary_lrc = lrc->multi_queue.primary_lrc;
+	struct xe_hw_engine *hwe;
+	u64 reg_queue_ts = lrc->queue_timestamp;
+
+	if (IS_SRIOV_VF(lrc_to_xe(lrc)))
+		return xe_lrc_queue_timestamp(lrc);
+
+	if (!primary_lrc || !context_active(primary_lrc))
+		return xe_lrc_queue_timestamp(lrc);
+
+	/* WA BB populates engine id in PPHWSP of primary context only */
+	hwe = engine_id_to_hwe(primary_lrc->gt, xe_lrc_engine_id(primary_lrc));
+	if (!hwe)
+		return xe_lrc_queue_timestamp(lrc);
+
+	if (get_queue_id(hwe) != lrc->multi_queue.pos)
+		return xe_lrc_queue_timestamp(lrc);
+
+	/* queue is active, so store the queue timestamp register */
+	reg_queue_ts = get_queue_timestamp(hwe);
+
+	/* double check queue and primary queue are both still active */
+	if (get_queue_id(hwe) != lrc->multi_queue.pos ||
+	    !context_active(primary_lrc))
+		return xe_lrc_queue_timestamp(lrc);
+
+	return reg_queue_ts;
+}
+
+static u64 xe_lrc_update_multi_queue_timestamp(struct xe_lrc *lrc, u64 *old_ts)
+{
+	*old_ts = lrc->queue_timestamp;
+	lrc->queue_timestamp = xe_lrc_multi_queue_timestamp(lrc);
+
+	return lrc->queue_timestamp;
+}
+
+static u64 xe_lrc_single_queue_timestamp(struct xe_lrc *lrc)
 {
 	u64 lrc_ts, reg_ts, new_ts = lrc->ctx_timestamp;
 	u32 engine_id;
@@ -2697,24 +2746,50 @@ u64 xe_lrc_timestamp(struct xe_lrc *lrc)
 	return new_ts;
 }
 
+static u64 xe_lrc_update_ctx_timestamp(struct xe_lrc *lrc, u64 *old_ts)
+{
+	*old_ts = lrc->ctx_timestamp;
+	lrc->ctx_timestamp = xe_lrc_single_queue_timestamp(lrc);
+
+	trace_xe_lrc_update_timestamp(lrc, *old_ts);
+
+	return lrc->ctx_timestamp;
+}
+
 /**
- * xe_lrc_update_timestamp() - Update ctx timestamp
+ * xe_lrc_timestamp() - Current lrc timestamp
+ * @lrc: Pointer to the lrc.
+ *
+ * Return latest lrc timestamp. With support for active contexts/queues, the
+ * calculation may be slightly racy, so follow a read-again logic to ensure that
+ * the context/queue is still active before returning the right timestamp.
+ *
+ * Returns: New lrc timestamp value
+ */
+u64 xe_lrc_timestamp(struct xe_lrc *lrc)
+{
+	if (xe_lrc_is_multi_queue(lrc))
+		return xe_lrc_multi_queue_timestamp(lrc);
+	else
+		return xe_lrc_single_queue_timestamp(lrc);
+}
+
+/**
+ * xe_lrc_update_timestamp() - Update lrc timestamp
  * @lrc: Pointer to the lrc.
  * @old_ts: Old timestamp value
  *
- * Populate @old_ts current saved ctx timestamp, read new ctx timestamp and
+ * Populate @old_ts with current saved lrc timestamp, read new lrc timestamp and
  * update saved value.
  *
- * Returns: New ctx timestamp value
+ * Returns: New lrc timestamp value
  */
 u64 xe_lrc_update_timestamp(struct xe_lrc *lrc, u64 *old_ts)
 {
-	*old_ts = lrc->ctx_timestamp;
-	lrc->ctx_timestamp = xe_lrc_timestamp(lrc);
-
-	trace_xe_lrc_update_timestamp(lrc, *old_ts);
-
-	return lrc->ctx_timestamp;
+	if (xe_lrc_is_multi_queue(lrc))
+		return xe_lrc_update_multi_queue_timestamp(lrc, old_ts);
+	else
+		return xe_lrc_update_ctx_timestamp(lrc, old_ts);
 }
 
 /**
-- 
2.43.0


  parent reply	other threads:[~2026-05-02  0:53 UTC|newest]

Thread overview: 27+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2026-05-02  0:53 [PATCH v2 0/9] Support run ticks for multi-queue use case Umesh Nerlige Ramappa
2026-05-02  0:53 ` [PATCH v2 1/9] drm/xe/lrc: Use 64 bit ctx timestamp in the LRC snapshot Umesh Nerlige Ramappa
2026-05-04 23:51   ` Niranjana Vishwanathapura
2026-05-02  0:53 ` [PATCH v2 2/9] drm/xe: Add timestamp_ms to " Umesh Nerlige Ramappa
2026-05-04 23:59   ` Niranjana Vishwanathapura
2026-05-05 18:03     ` Umesh Nerlige Ramappa
2026-05-02  0:53 ` [PATCH v2 3/9] drm/xe/multi_queue: Store primary LRC and position info in LRC Umesh Nerlige Ramappa
2026-05-05  3:46   ` Niranjana Vishwanathapura
2026-05-05 18:35     ` Umesh Nerlige Ramappa
2026-05-05 18:45       ` Niranjana Vishwanathapura
2026-05-05 18:51         ` Umesh Nerlige Ramappa
2026-05-02  0:53 ` [PATCH v2 4/9] drm/xe/multi_queue: Add helpers to access CS QUEUE TIMESTAMP from lrc Umesh Nerlige Ramappa
2026-05-05  4:00   ` Niranjana Vishwanathapura
2026-05-02  0:53 ` [PATCH v2 5/9] drm/xe/lrc: Refactor out engine id to hwe conversion Umesh Nerlige Ramappa
2026-05-05  4:16   ` Niranjana Vishwanathapura
2026-05-02  0:53 ` Umesh Nerlige Ramappa [this message]
2026-05-05  4:12   ` [PATCH v2 6/9] drm/xe/multi_queue: Capture queue run times for active queues Niranjana Vishwanathapura
2026-05-05 19:02     ` Umesh Nerlige Ramappa
2026-05-02  0:53 ` [PATCH v2 7/9] drm/xe/multi_queue: Add trace event for the multi queue timestamp Umesh Nerlige Ramappa
2026-05-05  4:19   ` Niranjana Vishwanathapura
2026-05-02  0:53 ` [PATCH v2 8/9] drm/xe/multi_queue: Use QUEUE_TIMESTAMP as job timestamp for multi-queue Umesh Nerlige Ramappa
2026-05-05  4:20   ` Niranjana Vishwanathapura
2026-05-02  0:53 ` [PATCH v2 9/9] drm/xe/multi_queue: Whitelist QUEUE_TIMESTAMP register Umesh Nerlige Ramappa
2026-05-05  4:25   ` Niranjana Vishwanathapura
2026-05-05 17:58     ` Umesh Nerlige Ramappa
2026-05-05 18:34       ` Niranjana Vishwanathapura
2026-05-05 19:06         ` Umesh Nerlige Ramappa

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20260502005332.3135977-17-umesh.nerlige.ramappa@intel.com \
    --to=umesh.nerlige.ramappa@intel.com \
    --cc=intel-xe@lists.freedesktop.org \
    --cc=matthew.brost@intel.com \
    --cc=niranjana.vishwanathapura@intel.com \
    --cc=stuart.summers@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox