public inbox for intel-gfx@lists.freedesktop.org
 help / color / mirror / Atom feed
* [PATCH] drm/i915: add intel_display_power_enabled_sw for use in atomic ctx
@ 2013-11-27 18:09 Imre Deak
  2013-11-27 18:30 ` Ville Syrjälä
  2013-11-27 20:02 ` [PATCH] drm/i915: add intel_display_power_enabled_sw() " Imre Deak
  0 siblings, 2 replies; 7+ messages in thread
From: Imre Deak @ 2013-11-27 18:09 UTC (permalink / raw)
  To: intel-gfx

Atm we call intel_display_power_enabled() from
i915_capture_error_state() in IRQ context and then take a mutex. To fix
this add a new intel_display_power_enabled_sw() which returns the domain
state based on software tracking as opposed to reading the actual HW
state.

Since we use domain_use_count for this without locking on the reader
side make sure we increase the counter only after enabling all required
power wells and decrease it before disabling any of these power wells.

Regression introduced in
commit 1b02383464b4a915627ef3b8fd0ad7f07168c54c
Author: Imre Deak <imre.deak@intel.com>
Date:   Tue Sep 24 16:17:09 2013 +0300

    drm/i915: support for multiple power wells

Reported-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Imre Deak <imre.deak@intel.com>
---
 drivers/gpu/drm/i915/i915_drv.h      |  2 --
 drivers/gpu/drm/i915/intel_display.c |  4 ++--
 drivers/gpu/drm/i915/intel_drv.h     |  2 ++
 drivers/gpu/drm/i915/intel_pm.c      | 38 ++++++++++++++++++++++++++++--------
 4 files changed, 34 insertions(+), 12 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 47b8fd1..d17a62a 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -966,9 +966,7 @@ struct i915_power_domains {
 	int power_well_count;
 
 	struct mutex lock;
-#if IS_ENABLED(CONFIG_DEBUG_FS)
 	int domain_use_count[POWER_DOMAIN_NUM];
-#endif
 	struct i915_power_well *power_wells;
 };
 
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index fab7d35..5b5d831 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -11374,7 +11374,7 @@ intel_display_capture_error_state(struct drm_device *dev)
 		error->power_well_driver = I915_READ(HSW_PWR_WELL_DRIVER);
 
 	for_each_pipe(i) {
-		if (!intel_display_power_enabled(dev, POWER_DOMAIN_PIPE(i)))
+		if (!intel_display_power_enabled_sw(dev, POWER_DOMAIN_PIPE(i)))
 			continue;
 
 		if (INTEL_INFO(dev)->gen <= 6 || IS_VALLEYVIEW(dev)) {
@@ -11410,7 +11410,7 @@ intel_display_capture_error_state(struct drm_device *dev)
 	for (i = 0; i < error->num_transcoders; i++) {
 		enum transcoder cpu_transcoder = transcoders[i];
 
-		if (!intel_display_power_enabled(dev,
+		if (!intel_display_power_enabled_sw(dev,
 				POWER_DOMAIN_TRANSCODER(cpu_transcoder)))
 			continue;
 
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index b2d2cc1..fb3cfc5 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -830,6 +830,8 @@ int intel_power_domains_init(struct drm_device *dev);
 void intel_power_domains_remove(struct drm_device *dev);
 bool intel_display_power_enabled(struct drm_device *dev,
 				 enum intel_display_power_domain domain);
+bool intel_display_power_enabled_sw(struct drm_device *dev,
+				    enum intel_display_power_domain domain);
 void intel_display_power_get(struct drm_device *dev,
 			     enum intel_display_power_domain domain);
 void intel_display_power_put(struct drm_device *dev,
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index a1a54ab..b9a900d 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -5633,6 +5633,21 @@ static bool hsw_power_well_enabled(struct drm_device *dev,
 		     (HSW_PWR_WELL_ENABLE_REQUEST | HSW_PWR_WELL_STATE_ENABLED);
 }
 
+bool intel_display_power_enabled_sw(struct drm_device *dev,
+				    enum intel_display_power_domain domain)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct i915_power_domains *power_domains;
+	bool enabled;
+
+	power_domains = &dev_priv->power_domains;
+
+	enabled = power_domains->domain_use_count[domain];
+	smp_rmb();
+
+	return enabled;
+}
+
 bool intel_display_power_enabled(struct drm_device *dev,
 				 enum intel_display_power_domain domain)
 {
@@ -5753,12 +5768,16 @@ void intel_display_power_get(struct drm_device *dev,
 
 	mutex_lock(&power_domains->lock);
 
-#if IS_ENABLED(CONFIG_DEBUG_FS)
-	power_domains->domain_use_count[domain]++;
-#endif
 	for_each_power_well(i, power_well, BIT(domain), power_domains)
 		__intel_power_well_get(dev, power_well);
 
+	/*
+	 * Don't reorder the adjustment of domain_use_count with whatever
+	 * __intel_power_well_get does.
+	 */
+	smp_wmb();
+	power_domains->domain_use_count[domain]++;
+
 	mutex_unlock(&power_domains->lock);
 }
 
@@ -5774,13 +5793,16 @@ void intel_display_power_put(struct drm_device *dev,
 
 	mutex_lock(&power_domains->lock);
 
-	for_each_power_well_rev(i, power_well, BIT(domain), power_domains)
-		__intel_power_well_put(dev, power_well);
-
-#if IS_ENABLED(CONFIG_DEBUG_FS)
 	WARN_ON(!power_domains->domain_use_count[domain]);
 	power_domains->domain_use_count[domain]--;
-#endif
+	/*
+	 * Don't reorder the adjustment of domain_use_count with whatever
+	 * __intel_power_well_put does.
+	 */
+	smp_wmb();
+
+	for_each_power_well_rev(i, power_well, BIT(domain), power_domains)
+		__intel_power_well_put(dev, power_well);
 
 	mutex_unlock(&power_domains->lock);
 }
-- 
1.8.4

^ permalink raw reply related	[flat|nested] 7+ messages in thread

end of thread, other threads:[~2013-11-28 14:04 UTC | newest]

Thread overview: 7+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2013-11-27 18:09 [PATCH] drm/i915: add intel_display_power_enabled_sw for use in atomic ctx Imre Deak
2013-11-27 18:30 ` Ville Syrjälä
2013-11-27 18:38   ` Daniel Vetter
2013-11-27 18:42     ` Imre Deak
2013-11-27 20:02 ` [PATCH] drm/i915: add intel_display_power_enabled_sw() " Imre Deak
2013-11-28 13:50   ` Paulo Zanoni
2013-11-28 14:05     ` Daniel Vetter

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox