public inbox for intel-gfx@lists.freedesktop.org
 help / color / mirror / Atom feed
From: Tvrtko Ursulin <tursulin@ursulin.net>
To: Intel-gfx@lists.freedesktop.org
Subject: [PATCH v5] drm/i915: Handle RC6 counter wrap
Date: Thu,  8 Feb 2018 16:00:36 +0000	[thread overview]
Message-ID: <20180208160036.29919-1-tvrtko.ursulin@linux.intel.com> (raw)
In-Reply-To: <151810509993.28809.2232183378316076347@mail.alporthouse.com>

From: Tvrtko Ursulin <tvrtko.ursulin@intel.com>

We can implement limited RC6 counter wrap-around protection under the
assumption that clients will be reading this value more frequently than
the wrap period on a given platform.

With the typical wrap-around period being ~90 minutes, even with the
exception of Baytrail which wraps every 13 seconds, this sounds like a
reasonable assumption.

Implementation works by storing a 64-bit software copy of a hardware RC6
counter, along with the previous HW counter snapshot. This enables it to
detect wrap is polled frequently enough and keep the software copy
monotonically incrementing.

v2:
 * Missed GEN6_GT_GFX_RC6_LOCKED when considering slot sizing and
   indexing.
 * Fixed off-by-one in wrap-around handling. (Chris Wilson)

v3:
 * Simplify index checking by using unsigned int. (Chris Wilson)
 * Expand the comment to explain why indexing works.

v4:
 * Use __int128 if supported.

v5:
 * Use mul_u64_u32_div. (Chris Wilson)

Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=94852
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk> # v3
Cc: Ville Syrjälä <ville.syrjala@linux.intel.com>
---
 drivers/gpu/drm/i915/i915_drv.h |  2 ++
 drivers/gpu/drm/i915/intel_pm.c | 64 ++++++++++++++++++++++++++++++++++-------
 2 files changed, 56 insertions(+), 10 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index ad1fc845cd1b..28a2671a26c7 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -946,6 +946,8 @@ struct intel_rps {
 
 struct intel_rc6 {
 	bool enabled;
+	u64 prev_hw_residency[4];
+	u64 cur_residency[4];
 };
 
 struct intel_llc_pstate {
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 41f26ab46501..b10f67338c0e 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -9417,15 +9417,16 @@ static u64 vlv_residency_raw(struct drm_i915_private *dev_priv,
 			     const i915_reg_t reg)
 {
 	u32 lower, upper, tmp;
-	unsigned long flags;
 	int loop = 2;
 
-	/* The register accessed do not need forcewake. We borrow
+	/*
+	 * The register accessed do not need forcewake. We borrow
 	 * uncore lock to prevent concurrent access to range reg.
 	 */
-	spin_lock_irqsave(&dev_priv->uncore.lock, flags);
+	lockdep_assert_held(&dev_priv->uncore.lock);
 
-	/* vlv and chv residency counters are 40 bits in width.
+	/*
+	 * vlv and chv residency counters are 40 bits in width.
 	 * With a control bit, we can choose between upper or lower
 	 * 32bit window into this counter.
 	 *
@@ -9449,29 +9450,49 @@ static u64 vlv_residency_raw(struct drm_i915_private *dev_priv,
 		upper = I915_READ_FW(reg);
 	} while (upper != tmp && --loop);
 
-	/* Everywhere else we always use VLV_COUNTER_CONTROL with the
+	/*
+	 * Everywhere else we always use VLV_COUNTER_CONTROL with the
 	 * VLV_COUNT_RANGE_HIGH bit set - so it is safe to leave it set
 	 * now.
 	 */
 
-	spin_unlock_irqrestore(&dev_priv->uncore.lock, flags);
-
 	return lower | (u64)upper << 8;
 }
 
 u64 intel_rc6_residency_ns(struct drm_i915_private *dev_priv,
 			   const i915_reg_t reg)
 {
-	u64 time_hw;
+	u64 time_hw, prev_hw, overflow_hw;
+	unsigned int fw_domains;
+	unsigned long flags;
+	unsigned int i;
 	u32 mul, div;
 
 	if (!HAS_RC6(dev_priv))
 		return 0;
 
+	/*
+	 * Store previous hw counter values for counter wrap-around handling.
+	 *
+	 * There are only four interesting registers and they live next to each
+	 * other so we can use the relative address, compared to the smallest
+	 * one as the index into driver storage.
+	 */
+	i = (i915_mmio_reg_offset(reg) -
+	     i915_mmio_reg_offset(GEN6_GT_GFX_RC6_LOCKED)) / sizeof(u32);
+	if (WARN_ON_ONCE(i >= ARRAY_SIZE(dev_priv->gt_pm.rc6.cur_residency)))
+		return 0;
+
+	fw_domains = intel_uncore_forcewake_for_reg(dev_priv, reg, FW_REG_READ);
+
+	spin_lock_irqsave(&dev_priv->uncore.lock, flags);
+	intel_uncore_forcewake_get__locked(dev_priv, fw_domains);
+
 	/* On VLV and CHV, residency time is in CZ units rather than 1.28us */
 	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
 		mul = 1000000;
 		div = dev_priv->czclk_freq;
+		overflow_hw = BIT_ULL(40);
 		time_hw = vlv_residency_raw(dev_priv, reg);
 	} else {
 		/* 833.33ns units on Gen9LP, 1.28us elsewhere. */
@@ -9483,10 +9504,33 @@ u64 intel_rc6_residency_ns(struct drm_i915_private *dev_priv,
 			div = 1;
 		}
 
-		time_hw = I915_READ(reg);
+		overflow_hw = BIT_ULL(32);
+		time_hw = I915_READ_FW(reg);
 	}
 
-	return DIV_ROUND_UP_ULL(time_hw * mul, div);
+	/*
+	 * Counter wrap handling.
+	 *
+	 * But relying on a sufficient frequency of queries otherwise counters
+	 * can still wrap.
+	 */
+	prev_hw = dev_priv->gt_pm.rc6.prev_hw_residency[i];
+	dev_priv->gt_pm.rc6.prev_hw_residency[i] = time_hw;
+
+	/* RC6 delta from last sample. */
+	if (time_hw >= prev_hw)
+		time_hw -= prev_hw;
+	else
+		time_hw += overflow_hw - prev_hw;
+
+	/* Add delta to RC6 extended raw driver copy. */
+	time_hw += dev_priv->gt_pm.rc6.cur_residency[i];
+	dev_priv->gt_pm.rc6.cur_residency[i] = time_hw;
+
+	intel_uncore_forcewake_put__locked(dev_priv, fw_domains);
+	spin_unlock_irqrestore(&dev_priv->uncore.lock, flags);
+
+	return mul_u64_u32_div(time_hw, mul, div);
 }
 
 u32 intel_get_cagf(struct drm_i915_private *dev_priv, u32 rpstat)
-- 
2.14.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

  reply	other threads:[~2018-02-08 16:00 UTC|newest]

Thread overview: 17+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
     [not found] <151807682017.28809.17040553218386245341@mail.alporthouse.com151807682017.28809.17040553218386245341@mail.alporthouse.com>
2018-02-08  8:18 ` [PATCH v3] drm/i915: Handle RC6 counter wrap Tvrtko Ursulin
2018-02-08 14:06   ` Ville Syrjälä
2018-02-08 14:31     ` Tvrtko Ursulin
2018-02-08 14:44       ` Chris Wilson
2018-02-08 15:46   ` [PATCH v4] " Tvrtko Ursulin
2018-02-08 15:49     ` Tvrtko Ursulin
2018-02-08 15:51     ` Chris Wilson
2018-02-08 16:00       ` Tvrtko Ursulin [this message]
2018-02-08 16:04         ` [PATCH v5] " Chris Wilson
2018-02-13 14:09           ` Tvrtko Ursulin
2018-02-13 15:42             ` Ville Syrjälä
2018-02-08  9:08 ` ✓ Fi.CI.BAT: success for drm/i915: Handle RC6 counter wrap (rev3) Patchwork
2018-02-08 12:46 ` ✓ Fi.CI.IGT: " Patchwork
2018-02-08 16:04 ` ✓ Fi.CI.BAT: success for drm/i915: Handle RC6 counter wrap (rev4) Patchwork
2018-02-08 16:23 ` ✓ Fi.CI.BAT: success for drm/i915: Handle RC6 counter wrap (rev5) Patchwork
2018-02-13 16:33   ` Tvrtko Ursulin
2018-02-08 23:44 ` ✓ Fi.CI.IGT: " Patchwork

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20180208160036.29919-1-tvrtko.ursulin@linux.intel.com \
    --to=tursulin@ursulin.net \
    --cc=Intel-gfx@lists.freedesktop.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox