intel-gfx.lists.freedesktop.org archive mirror
 help / color / mirror / Atom feed
From: Mika Kuoppala <mika.kuoppala@linux.intel.com>
To: Chris Wilson <chris@chris-wilson.co.uk>, intel-gfx@lists.freedesktop.org
Cc: Jani Nikula <jani.nikula@intel.com>
Subject: Re: [PATCH 12/46] drm/i915/gem: Track the rpm wakerefs
Date: Wed, 09 Jan 2019 13:16:05 +0200	[thread overview]
Message-ID: <87r2dm13pm.fsf@gaia.fi.intel.com> (raw)
In-Reply-To: <20190107115509.12523-12-chris@chris-wilson.co.uk>

Chris Wilson <chris@chris-wilson.co.uk> writes:

> Keep track of the temporary rpm wakerefs used for user access to the
> device, so that we can cancel them upon release and clearly identify any
> leaks.
>
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> Cc: Jani Nikula <jani.nikula@intel.com>
> ---
>  drivers/gpu/drm/i915/i915_gem.c            | 47 +++++++++++++---------
>  drivers/gpu/drm/i915/i915_gem_execbuffer.c |  5 ++-
>  drivers/gpu/drm/i915/i915_gem_fence_reg.c  |  6 ++-
>  drivers/gpu/drm/i915/i915_gem_gtt.c        | 22 ++++++----
>  drivers/gpu/drm/i915/i915_gem_shrinker.c   | 32 +++++++++------
>  drivers/gpu/drm/i915/intel_engine_cs.c     | 12 ++++--
>  drivers/gpu/drm/i915/intel_uncore.c        |  5 ++-
>  7 files changed, 81 insertions(+), 48 deletions(-)
>
> diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
> index 27f207cbabd9..e04dadeca879 100644
> --- a/drivers/gpu/drm/i915/i915_gem.c
> +++ b/drivers/gpu/drm/i915/i915_gem.c
> @@ -786,6 +786,8 @@ fb_write_origin(struct drm_i915_gem_object *obj, unsigned int domain)
>  
>  void i915_gem_flush_ggtt_writes(struct drm_i915_private *dev_priv)
>  {
> +	intel_wakeref_t wakeref;
> +
>  	/*
>  	 * No actual flushing is required for the GTT write domain for reads
>  	 * from the GTT domain. Writes to it "immediately" go to main memory
> @@ -812,13 +814,13 @@ void i915_gem_flush_ggtt_writes(struct drm_i915_private *dev_priv)
>  
>  	i915_gem_chipset_flush(dev_priv);
>  
> -	intel_runtime_pm_get(dev_priv);
> +	wakeref = intel_runtime_pm_get(dev_priv);
>  	spin_lock_irq(&dev_priv->uncore.lock);
>  
>  	POSTING_READ_FW(RING_HEAD(RENDER_RING_BASE));
>  
>  	spin_unlock_irq(&dev_priv->uncore.lock);
> -	intel_runtime_pm_put_unchecked(dev_priv);
> +	intel_runtime_pm_put(dev_priv, wakeref);
>  }
>  
>  static void
> @@ -1070,6 +1072,7 @@ i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
>  {
>  	struct drm_i915_private *i915 = to_i915(obj->base.dev);
>  	struct i915_ggtt *ggtt = &i915->ggtt;
> +	intel_wakeref_t wakeref;
>  	struct drm_mm_node node;
>  	struct i915_vma *vma;
>  	void __user *user_data;
> @@ -1080,7 +1083,7 @@ i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
>  	if (ret)
>  		return ret;
>  
> -	intel_runtime_pm_get(i915);
> +	wakeref = intel_runtime_pm_get(i915);
>  	vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
>  				       PIN_MAPPABLE |
>  				       PIN_NONFAULT |
> @@ -1153,7 +1156,7 @@ i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
>  		i915_vma_unpin(vma);
>  	}
>  out_unlock:
> -	intel_runtime_pm_put_unchecked(i915);
> +	intel_runtime_pm_put(i915, wakeref);
>  	mutex_unlock(&i915->drm.struct_mutex);
>  
>  	return ret;
> @@ -1254,6 +1257,7 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
>  {
>  	struct drm_i915_private *i915 = to_i915(obj->base.dev);
>  	struct i915_ggtt *ggtt = &i915->ggtt;
> +	intel_wakeref_t wakeref;
>  	struct drm_mm_node node;
>  	struct i915_vma *vma;
>  	u64 remain, offset;
> @@ -1272,13 +1276,14 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
>  		 * This easily dwarfs any performance advantage from
>  		 * using the cache bypass of indirect GGTT access.
>  		 */
> -		if (!intel_runtime_pm_get_if_in_use(i915)) {
> +		wakeref = intel_runtime_pm_get_if_in_use(i915);
> +		if (!wakeref) {
>  			ret = -EFAULT;
>  			goto out_unlock;
>  		}
>  	} else {
>  		/* No backing pages, no fallback, we must force GGTT access */
> -		intel_runtime_pm_get(i915);
> +		wakeref = intel_runtime_pm_get(i915);
>  	}
>  
>  	vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
> @@ -1360,7 +1365,7 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
>  		i915_vma_unpin(vma);
>  	}
>  out_rpm:
> -	intel_runtime_pm_put_unchecked(i915);
> +	intel_runtime_pm_put(i915, wakeref);
>  out_unlock:
>  	mutex_unlock(&i915->drm.struct_mutex);
>  	return ret;
> @@ -1865,6 +1870,7 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf)
>  	struct drm_i915_private *dev_priv = to_i915(dev);
>  	struct i915_ggtt *ggtt = &dev_priv->ggtt;
>  	bool write = area->vm_flags & VM_WRITE;
> +	intel_wakeref_t wakeref;
>  	struct i915_vma *vma;
>  	pgoff_t page_offset;
>  	int ret;
> @@ -1894,7 +1900,7 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf)
>  	if (ret)
>  		goto err;
>  
> -	intel_runtime_pm_get(dev_priv);
> +	wakeref = intel_runtime_pm_get(dev_priv);
>  
>  	ret = i915_mutex_lock_interruptible(dev);
>  	if (ret)
> @@ -1972,7 +1978,7 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf)
>  err_unlock:
>  	mutex_unlock(&dev->struct_mutex);
>  err_rpm:
> -	intel_runtime_pm_put_unchecked(dev_priv);
> +	intel_runtime_pm_put(dev_priv, wakeref);
>  	i915_gem_object_unpin_pages(obj);
>  err:
>  	switch (ret) {
> @@ -2045,6 +2051,7 @@ void
>  i915_gem_release_mmap(struct drm_i915_gem_object *obj)
>  {
>  	struct drm_i915_private *i915 = to_i915(obj->base.dev);
> +	intel_wakeref_t wakeref;
>  
>  	/* Serialisation between user GTT access and our code depends upon
>  	 * revoking the CPU's PTE whilst the mutex is held. The next user
> @@ -2055,7 +2062,7 @@ i915_gem_release_mmap(struct drm_i915_gem_object *obj)
>  	 * wakeref.
>  	 */
>  	lockdep_assert_held(&i915->drm.struct_mutex);
> -	intel_runtime_pm_get(i915);
> +	wakeref = intel_runtime_pm_get(i915);
>  
>  	if (!obj->userfault_count)
>  		goto out;
> @@ -2072,7 +2079,7 @@ i915_gem_release_mmap(struct drm_i915_gem_object *obj)
>  	wmb();
>  
>  out:
> -	intel_runtime_pm_put_unchecked(i915);
> +	intel_runtime_pm_put(i915, wakeref);
>  }
>  
>  void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv)
> @@ -4707,8 +4714,9 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
>  				    struct llist_node *freed)
>  {
>  	struct drm_i915_gem_object *obj, *on;
> +	intel_wakeref_t wakeref;
>  
> -	intel_runtime_pm_get(i915);
> +	wakeref = intel_runtime_pm_get(i915);
>  	llist_for_each_entry_safe(obj, on, freed, freed) {
>  		struct i915_vma *vma, *vn;
>  
> @@ -4769,7 +4777,7 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
>  		if (on)
>  			cond_resched();
>  	}
> -	intel_runtime_pm_put_unchecked(i915);
> +	intel_runtime_pm_put(i915, wakeref);
>  }
>  
>  static void i915_gem_flush_free_objects(struct drm_i915_private *i915)
> @@ -4878,11 +4886,13 @@ void __i915_gem_object_release_unless_active(struct drm_i915_gem_object *obj)
>  
>  void i915_gem_sanitize(struct drm_i915_private *i915)
>  {
> +	intel_wakeref_t wakeref;
> +
>  	GEM_TRACE("\n");
>  
>  	mutex_lock(&i915->drm.struct_mutex);
>  
> -	intel_runtime_pm_get(i915);
> +	wakeref = intel_runtime_pm_get(i915);
>  	intel_uncore_forcewake_get(i915, FORCEWAKE_ALL);
>  
>  	/*
> @@ -4905,7 +4915,7 @@ void i915_gem_sanitize(struct drm_i915_private *i915)
>  	intel_engines_sanitize(i915, false);
>  
>  	intel_uncore_forcewake_put(i915, FORCEWAKE_ALL);
> -	intel_runtime_pm_put_unchecked(i915);
> +	intel_runtime_pm_put(i915, wakeref);
>  
>  	i915_gem_contexts_lost(i915);
>  	mutex_unlock(&i915->drm.struct_mutex);
> @@ -4913,11 +4923,12 @@ void i915_gem_sanitize(struct drm_i915_private *i915)
>  
>  int i915_gem_suspend(struct drm_i915_private *i915)
>  {
> +	intel_wakeref_t wakeref;
>  	int ret;
>  
>  	GEM_TRACE("\n");
>  
> -	intel_runtime_pm_get(i915);
> +	wakeref = intel_runtime_pm_get(i915);
>  	intel_suspend_gt_powersave(i915);
>  
>  	mutex_lock(&i915->drm.struct_mutex);
> @@ -4969,12 +4980,12 @@ int i915_gem_suspend(struct drm_i915_private *i915)
>  	if (WARN_ON(!intel_engines_are_idle(i915)))
>  		i915_gem_set_wedged(i915); /* no hope, discard everything */
>  
> -	intel_runtime_pm_put_unchecked(i915);
> +	intel_runtime_pm_put(i915, wakeref);
>  	return 0;
>  
>  err_unlock:
>  	mutex_unlock(&i915->drm.struct_mutex);
> -	intel_runtime_pm_put_unchecked(i915);
> +	intel_runtime_pm_put(i915, wakeref);
>  	return ret;
>  }
>  
> diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
> index a52fa42ed8b1..76bb1a89e530 100644
> --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
> +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
> @@ -2203,6 +2203,7 @@ i915_gem_do_execbuffer(struct drm_device *dev,
>  	struct i915_execbuffer eb;
>  	struct dma_fence *in_fence = NULL;
>  	struct sync_file *out_fence = NULL;
> +	intel_wakeref_t wakeref;
>  	int out_fence_fd = -1;
>  	int err;
>  
> @@ -2273,7 +2274,7 @@ i915_gem_do_execbuffer(struct drm_device *dev,
>  	 * wakeref that we hold until the GPU has been idle for at least
>  	 * 100ms.
>  	 */
> -	intel_runtime_pm_get(eb.i915);
> +	wakeref = intel_runtime_pm_get(eb.i915);
>  
>  	err = i915_mutex_lock_interruptible(dev);
>  	if (err)
> @@ -2425,7 +2426,7 @@ i915_gem_do_execbuffer(struct drm_device *dev,
>  		eb_release_vmas(&eb);
>  	mutex_unlock(&dev->struct_mutex);
>  err_rpm:
> -	intel_runtime_pm_put_unchecked(eb.i915);
> +	intel_runtime_pm_put(eb.i915, wakeref);
>  	i915_gem_context_put(eb.ctx);
>  err_destroy:
>  	eb_destroy(&eb);
> diff --git a/drivers/gpu/drm/i915/i915_gem_fence_reg.c b/drivers/gpu/drm/i915/i915_gem_fence_reg.c
> index 1f72f5047945..e6edcd83450c 100644
> --- a/drivers/gpu/drm/i915/i915_gem_fence_reg.c
> +++ b/drivers/gpu/drm/i915/i915_gem_fence_reg.c
> @@ -210,6 +210,7 @@ static void fence_write(struct drm_i915_fence_reg *fence,
>  static int fence_update(struct drm_i915_fence_reg *fence,
>  			struct i915_vma *vma)
>  {
> +	intel_wakeref_t wakeref;
>  	int ret;
>  
>  	if (vma) {
> @@ -257,9 +258,10 @@ static int fence_update(struct drm_i915_fence_reg *fence,
>  	 * If the device is currently powered down, we will defer the write
>  	 * to the runtime resume, see i915_gem_restore_fences().
>  	 */
> -	if (intel_runtime_pm_get_if_in_use(fence->i915)) {
> +	wakeref = intel_runtime_pm_get_if_in_use(fence->i915);
> +	if (wakeref) {
>  		fence_write(fence, vma);
> -		intel_runtime_pm_put_unchecked(fence->i915);
> +		intel_runtime_pm_put(fence->i915, wakeref);
>  	}
>  
>  	if (vma) {
> diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
> index 6dac9614f7ba..4bec10286487 100644
> --- a/drivers/gpu/drm/i915/i915_gem_gtt.c
> +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
> @@ -2528,6 +2528,7 @@ static int ggtt_bind_vma(struct i915_vma *vma,
>  {
>  	struct drm_i915_private *i915 = vma->vm->i915;
>  	struct drm_i915_gem_object *obj = vma->obj;
> +	intel_wakeref_t wakeref;
>  	u32 pte_flags;
>  
>  	/* Applicable to VLV (gen8+ do not support RO in the GGTT) */
> @@ -2535,9 +2536,9 @@ static int ggtt_bind_vma(struct i915_vma *vma,
>  	if (i915_gem_object_is_readonly(obj))
>  		pte_flags |= PTE_READ_ONLY;
>  
> -	intel_runtime_pm_get(i915);
> +	wakeref = intel_runtime_pm_get(i915);
>  	vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags);
> -	intel_runtime_pm_put_unchecked(i915);
> +	intel_runtime_pm_put(i915, wakeref);
>  
>  	vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
>  
> @@ -2554,10 +2555,11 @@ static int ggtt_bind_vma(struct i915_vma *vma,
>  static void ggtt_unbind_vma(struct i915_vma *vma)
>  {
>  	struct drm_i915_private *i915 = vma->vm->i915;
> +	intel_wakeref_t wakeref;
>  
> -	intel_runtime_pm_get(i915);
> +	wakeref = intel_runtime_pm_get(i915);
>  	vma->vm->clear_range(vma->vm, vma->node.start, vma->size);
> -	intel_runtime_pm_put_unchecked(i915);
> +	intel_runtime_pm_put(i915, wakeref);
>  }
>  
>  static int aliasing_gtt_bind_vma(struct i915_vma *vma,
> @@ -2589,9 +2591,11 @@ static int aliasing_gtt_bind_vma(struct i915_vma *vma,
>  	}
>  
>  	if (flags & I915_VMA_GLOBAL_BIND) {
> -		intel_runtime_pm_get(i915);
> +		intel_wakeref_t wakeref;
> +
> +		wakeref = intel_runtime_pm_get(i915);
>  		vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags);
> -		intel_runtime_pm_put_unchecked(i915);
> +		intel_runtime_pm_put(i915, wakeref);
>  	}
>  
>  	return 0;
> @@ -2602,9 +2606,11 @@ static void aliasing_gtt_unbind_vma(struct i915_vma *vma)
>  	struct drm_i915_private *i915 = vma->vm->i915;
>  
>  	if (vma->flags & I915_VMA_GLOBAL_BIND) {
> -		intel_runtime_pm_get(i915);
> +		intel_wakeref_t wakeref;
> +
> +		wakeref = intel_runtime_pm_get(i915);
>  		vma->vm->clear_range(vma->vm, vma->node.start, vma->size);
> -		intel_runtime_pm_put_unchecked(i915);
> +		intel_runtime_pm_put(i915, wakeref);
>  	}
>  
>  	if (vma->flags & I915_VMA_LOCAL_BIND) {
> diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c
> index 16693dd4d019..bc230e43b98f 100644
> --- a/drivers/gpu/drm/i915/i915_gem_shrinker.c
> +++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c
> @@ -154,6 +154,7 @@ i915_gem_shrink(struct drm_i915_private *i915,
>  		{ &i915->mm.bound_list, I915_SHRINK_BOUND },
>  		{ NULL, 0 },
>  	}, *phase;
> +	intel_wakeref_t wakeref = 0;
>  	unsigned long count = 0;
>  	unsigned long scanned = 0;
>  	bool unlock;
> @@ -183,9 +184,11 @@ i915_gem_shrink(struct drm_i915_private *i915,
>  	 * device just to recover a little memory. If absolutely necessary,
>  	 * we will force the wake during oom-notifier.
>  	 */
> -	if ((flags & I915_SHRINK_BOUND) &&
> -	    !intel_runtime_pm_get_if_in_use(i915))
> -		flags &= ~I915_SHRINK_BOUND;
> +	if (flags & I915_SHRINK_BOUND) {
> +		wakeref = intel_runtime_pm_get_if_in_use(i915);
> +		if (!wakeref)
> +			flags &= ~I915_SHRINK_BOUND;
> +	}
>  
>  	/*
>  	 * As we may completely rewrite the (un)bound list whilst unbinding
> @@ -266,7 +269,7 @@ i915_gem_shrink(struct drm_i915_private *i915,
>  	}
>  
>  	if (flags & I915_SHRINK_BOUND)
> -		intel_runtime_pm_put_unchecked(i915);
> +		intel_runtime_pm_put(i915, wakeref);

This is ok but raises a question that did we have
GEM_BUG_ON(wakeref == 0) on pm_put? Perhaps not needed
per se as we do find that we don't have ref for 0.

Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>

>  
>  	i915_retire_requests(i915);
>  
> @@ -293,14 +296,15 @@ i915_gem_shrink(struct drm_i915_private *i915,
>   */
>  unsigned long i915_gem_shrink_all(struct drm_i915_private *i915)
>  {
> +	intel_wakeref_t wakeref;
>  	unsigned long freed;
>  
> -	intel_runtime_pm_get(i915);
> +	wakeref = intel_runtime_pm_get(i915);
>  	freed = i915_gem_shrink(i915, -1UL, NULL,
>  				I915_SHRINK_BOUND |
>  				I915_SHRINK_UNBOUND |
>  				I915_SHRINK_ACTIVE);
> -	intel_runtime_pm_put_unchecked(i915);
> +	intel_runtime_pm_put(i915, wakeref);
>  
>  	return freed;
>  }
> @@ -371,14 +375,16 @@ i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
>  					 I915_SHRINK_BOUND |
>  					 I915_SHRINK_UNBOUND);
>  	if (sc->nr_scanned < sc->nr_to_scan && current_is_kswapd()) {
> -		intel_runtime_pm_get(i915);
> +		intel_wakeref_t wakeref;
> +
> +		wakeref = intel_runtime_pm_get(i915);
>  		freed += i915_gem_shrink(i915,
>  					 sc->nr_to_scan - sc->nr_scanned,
>  					 &sc->nr_scanned,
>  					 I915_SHRINK_ACTIVE |
>  					 I915_SHRINK_BOUND |
>  					 I915_SHRINK_UNBOUND);
> -		intel_runtime_pm_put_unchecked(i915);
> +		intel_runtime_pm_put(i915, wakeref);
>  	}
>  
>  	shrinker_unlock(i915, unlock);
> @@ -418,12 +424,13 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
>  		container_of(nb, struct drm_i915_private, mm.oom_notifier);
>  	struct drm_i915_gem_object *obj;
>  	unsigned long unevictable, bound, unbound, freed_pages;
> +	intel_wakeref_t wakeref;
>  
> -	intel_runtime_pm_get(i915);
> +	wakeref = intel_runtime_pm_get(i915);
>  	freed_pages = i915_gem_shrink(i915, -1UL, NULL,
>  				      I915_SHRINK_BOUND |
>  				      I915_SHRINK_UNBOUND);
> -	intel_runtime_pm_put_unchecked(i915);
> +	intel_runtime_pm_put(i915, wakeref);
>  
>  	/* Because we may be allocating inside our own driver, we cannot
>  	 * assert that there are no objects with pinned pages that are not
> @@ -461,6 +468,7 @@ i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr
>  		container_of(nb, struct drm_i915_private, mm.vmap_notifier);
>  	struct i915_vma *vma, *next;
>  	unsigned long freed_pages = 0;
> +	intel_wakeref_t wakeref;
>  	bool unlock;
>  	int ret;
>  
> @@ -474,12 +482,12 @@ i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr
>  	if (ret)
>  		goto out;
>  
> -	intel_runtime_pm_get(i915);
> +	wakeref = intel_runtime_pm_get(i915);
>  	freed_pages += i915_gem_shrink(i915, -1UL, NULL,
>  				       I915_SHRINK_BOUND |
>  				       I915_SHRINK_UNBOUND |
>  				       I915_SHRINK_VMAPS);
> -	intel_runtime_pm_put_unchecked(i915);
> +	intel_runtime_pm_put(i915, wakeref);
>  
>  	/* We also want to clear any cached iomaps as they wrap vmap */
>  	list_for_each_entry_safe(vma, next,
> diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
> index 85131166589c..bf4dae2649ab 100644
> --- a/drivers/gpu/drm/i915/intel_engine_cs.c
> +++ b/drivers/gpu/drm/i915/intel_engine_cs.c
> @@ -913,10 +913,12 @@ void intel_engine_get_instdone(struct intel_engine_cs *engine,
>  static bool ring_is_idle(struct intel_engine_cs *engine)
>  {
>  	struct drm_i915_private *dev_priv = engine->i915;
> +	intel_wakeref_t wakeref;
>  	bool idle = true;
>  
>  	/* If the whole device is asleep, the engine must be idle */
> -	if (!intel_runtime_pm_get_if_in_use(dev_priv))
> +	wakeref = intel_runtime_pm_get_if_in_use(dev_priv);
> +	if (!wakeref)
>  		return true;
>  
>  	/* First check that no commands are left in the ring */
> @@ -928,7 +930,7 @@ static bool ring_is_idle(struct intel_engine_cs *engine)
>  	if (INTEL_GEN(dev_priv) > 2 && !(I915_READ_MODE(engine) & MODE_IDLE))
>  		idle = false;
>  
> -	intel_runtime_pm_put_unchecked(dev_priv);
> +	intel_runtime_pm_put(dev_priv, wakeref);
>  
>  	return idle;
>  }
> @@ -1425,6 +1427,7 @@ void intel_engine_dump(struct intel_engine_cs *engine,
>  	const struct intel_engine_execlists * const execlists = &engine->execlists;
>  	struct i915_gpu_error * const error = &engine->i915->gpu_error;
>  	struct i915_request *rq, *last;
> +	intel_wakeref_t wakeref;
>  	unsigned long flags;
>  	struct rb_node *rb;
>  	int count;
> @@ -1483,9 +1486,10 @@ void intel_engine_dump(struct intel_engine_cs *engine,
>  
>  	rcu_read_unlock();
>  
> -	if (intel_runtime_pm_get_if_in_use(engine->i915)) {
> +	wakeref = intel_runtime_pm_get_if_in_use(engine->i915);
> +	if (wakeref) {
>  		intel_engine_print_registers(engine, m);
> -		intel_runtime_pm_put_unchecked(engine->i915);
> +		intel_runtime_pm_put(engine->i915, wakeref);
>  	} else {
>  		drm_printf(m, "\tDevice is asleep; skipping register dump\n");
>  	}
> diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
> index 8d4c76ac0e7d..d494d92da02c 100644
> --- a/drivers/gpu/drm/i915/intel_uncore.c
> +++ b/drivers/gpu/drm/i915/intel_uncore.c
> @@ -1670,6 +1670,7 @@ int i915_reg_read_ioctl(struct drm_device *dev,
>  	struct drm_i915_private *dev_priv = to_i915(dev);
>  	struct drm_i915_reg_read *reg = data;
>  	struct reg_whitelist const *entry;
> +	intel_wakeref_t wakeref;
>  	unsigned int flags;
>  	int remain;
>  	int ret = 0;
> @@ -1695,7 +1696,7 @@ int i915_reg_read_ioctl(struct drm_device *dev,
>  
>  	flags = reg->offset & (entry->size - 1);
>  
> -	intel_runtime_pm_get(dev_priv);
> +	wakeref = intel_runtime_pm_get(dev_priv);
>  	if (entry->size == 8 && flags == I915_REG_READ_8B_WA)
>  		reg->val = I915_READ64_2x32(entry->offset_ldw,
>  					    entry->offset_udw);
> @@ -1709,7 +1710,7 @@ int i915_reg_read_ioctl(struct drm_device *dev,
>  		reg->val = I915_READ8(entry->offset_ldw);
>  	else
>  		ret = -EINVAL;
> -	intel_runtime_pm_put_unchecked(dev_priv);
> +	intel_runtime_pm_put(dev_priv, wakeref);
>  
>  	return ret;
>  }
> -- 
> 2.20.1
>
> _______________________________________________
> Intel-gfx mailing list
> Intel-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/intel-gfx
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

  reply	other threads:[~2019-01-09 11:17 UTC|newest]

Thread overview: 111+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-01-07 11:54 [PATCH 01/46] drm/i915: Return immediately if trylock fails for direct-reclaim Chris Wilson
2019-01-07 11:54 ` [PATCH 02/46] drm/i915: Report the number of closed vma held by each context in debugfs Chris Wilson
2019-01-07 12:35   ` Mika Kuoppala
2019-01-07 11:54 ` [PATCH 03/46] drm/i915: Track all held rpm wakerefs Chris Wilson
2019-01-07 13:14   ` Mika Kuoppala
2019-01-07 13:22     ` Chris Wilson
2019-01-08 11:45   ` [PATCH v2] " Chris Wilson
2019-01-08 12:22   ` [PATCH v3] " Chris Wilson
2019-01-08 12:49     ` Mika Kuoppala
2019-01-08 20:05     ` kbuild test robot
2019-01-07 11:54 ` [PATCH 04/46] drm/i915: Markup paired operations on wakerefs Chris Wilson
2019-01-08 16:23   ` Mika Kuoppala
2019-01-08 16:41     ` Chris Wilson
2019-01-09  9:23       ` Mika Kuoppala
2019-01-09 11:51         ` Chris Wilson
2019-01-09 23:33           ` John Harrison
2019-01-07 11:54 ` [PATCH 05/46] drm/i915: Track GT wakeref Chris Wilson
2019-01-09  9:52   ` Mika Kuoppala
2019-01-07 11:54 ` [PATCH 06/46] drm/i915: Track the rpm wakerefs for error handling Chris Wilson
2019-01-09 10:12   ` Mika Kuoppala
2019-01-07 11:54 ` [PATCH 07/46] drm/i915: Mark up sysfs with rpm wakeref tracking Chris Wilson
2019-01-09 10:13   ` Mika Kuoppala
2019-01-07 11:54 ` [PATCH 08/46] drm/i915: Mark up debugfs " Chris Wilson
2019-01-09 10:20   ` Mika Kuoppala
2019-01-09 11:49     ` Chris Wilson
2019-01-07 11:54 ` [PATCH 09/46] drm/i915/perf: Track the rpm wakeref Chris Wilson
2019-01-09 10:30   ` Mika Kuoppala
2019-01-09 11:45     ` Chris Wilson
2019-01-07 11:54 ` [PATCH 10/46] drm/i915/pmu: Track " Chris Wilson
2019-01-09 10:37   ` Mika Kuoppala
2019-01-07 11:54 ` [PATCH 11/46] drm/i915/guc: Track the " Chris Wilson
2019-01-09 10:53   ` Mika Kuoppala
2019-01-07 11:54 ` [PATCH 12/46] drm/i915/gem: Track the rpm wakerefs Chris Wilson
2019-01-09 11:16   ` Mika Kuoppala [this message]
2019-01-09 23:45     ` John Harrison
2019-01-07 11:54 ` [PATCH 13/46] drm/i915/fb: Track " Chris Wilson
2019-01-09 11:39   ` Mika Kuoppala
2019-01-07 11:54 ` [PATCH 14/46] drm/i915/hotplug: Track temporary rpm wakeref Chris Wilson
2019-01-09 11:40   ` Mika Kuoppala
2019-01-07 11:54 ` [PATCH 15/46] drm/i915/panel: " Chris Wilson
2019-01-09 11:41   ` Mika Kuoppala
2019-01-07 11:54 ` [PATCH 16/46] drm/i915/selftests: Mark up rpm wakerefs Chris Wilson
2019-01-09 12:54   ` Mika Kuoppala
2019-01-07 11:54 ` [PATCH 17/46] drm/i915: Syntatic sugar for using intel_runtime_pm Chris Wilson
2019-01-09 14:30   ` Mika Kuoppala
2019-01-10  0:24   ` John Harrison
2019-01-10  1:10     ` John Harrison
2019-01-10  9:59       ` Chris Wilson
2019-01-07 11:54 ` [PATCH 18/46] drm/i915: Markup paired operations on display power domains Chris Wilson
2019-01-10  0:55   ` John Harrison
2019-01-10 10:00     ` Chris Wilson
2019-01-07 11:54 ` [PATCH 19/46] drm/i915: Track the wakeref used to initialise " Chris Wilson
2019-01-07 11:54 ` [PATCH 20/46] drm/i915: Combined gt.awake/gt.power wakerefs Chris Wilson
2019-01-07 11:54 ` [PATCH 21/46] drm/i915/dp: Markup pps lock power well Chris Wilson
2019-01-07 11:54 ` [PATCH 22/46] drm/i915: Complain if hsw_get_pipe_config acquires the same power well twice Chris Wilson
2019-01-07 11:54 ` [PATCH 23/46] drm/i915: Mark up Ironlake ips with rpm wakerefs Chris Wilson
2019-01-07 11:54 ` [PATCH 24/46] drm/i915: Serialise concurrent calls to i915_gem_set_wedged() Chris Wilson
2019-01-07 11:54 ` [PATCH 25/46] drm/i915: Differentiate between ggtt->mutex and ppgtt->mutex Chris Wilson
2019-01-08  9:00   ` Tvrtko Ursulin
2019-01-07 11:54 ` [PATCH 26/46] drm/i915: Pull all the reset functionality together into i915_reset.c Chris Wilson
2019-01-07 11:54 ` [PATCH 27/46] drm/i915: Make all GPU resets atomic Chris Wilson
2019-01-07 11:54 ` [PATCH 28/46] drm/i915/guc: Disable global reset Chris Wilson
2019-01-07 11:54 ` [PATCH 29/46] drm/i915: Remove GPU reset dependence on struct_mutex Chris Wilson
2019-01-07 11:54 ` [PATCH 30/46] drm/i915: Issue engine resets onto idle engines Chris Wilson
2019-01-07 11:54 ` [PATCH 31/46] drm/i915: Stop tracking MRU activity on VMA Chris Wilson
2019-01-16 16:27   ` Tvrtko Ursulin
2019-01-16 16:37     ` Chris Wilson
2019-01-07 11:54 ` [PATCH 32/46] drm/i915: Pull VM lists under the VM mutex Chris Wilson
2019-01-16 16:47   ` Tvrtko Ursulin
2019-01-16 17:01     ` Chris Wilson
2019-01-17 16:23       ` Tvrtko Ursulin
2019-01-17 23:20         ` Chris Wilson
2019-01-07 11:54 ` [PATCH 33/46] drm/i915: Move vma lookup to its own lock Chris Wilson
2019-01-07 11:54 ` [PATCH 34/46] drm/i915: Move intel_execlists_show_requests() aside Chris Wilson
2019-01-07 11:54 ` [PATCH 35/46] drm/i915: Use b->irq_enable() as predicate for mock engine Chris Wilson
2019-01-07 11:54 ` [PATCH 36/46] drm/i915/selftests: Allocate mock ring/timeline per context Chris Wilson
2019-01-07 11:55 ` [PATCH 37/46] drm/i915/selftests: Make evict tolerant of foreign objects Chris Wilson
2019-01-07 11:55 ` [PATCH 38/46] drm/i915: Remove the intel_engine_notify tracepoint Chris Wilson
2019-01-07 11:55 ` [PATCH 39/46] drm/i915: Always allocate an object/vma for the HWSP Chris Wilson
2019-01-10 10:52   ` Matthew Auld
2019-01-10 11:07     ` Chris Wilson
2019-01-10 11:24   ` Matthew Auld
2019-01-07 11:55 ` [PATCH 40/46] drm/i915: Move list of timelines under its own lock Chris Wilson
2019-01-07 11:55 ` [PATCH 41/46] drm/i915: Introduce concept of per-timeline (context) HWSP Chris Wilson
2019-01-15  0:55   ` John Harrison
2019-01-15  9:14     ` Chris Wilson
2019-01-15 15:40       ` Chris Wilson
2019-01-15 17:56         ` John Harrison
2019-01-07 11:55 ` [PATCH 42/46] drm/i915: Enlarge vma->pin_count Chris Wilson
2019-01-15 19:57   ` John Harrison
2019-01-15 20:17     ` Chris Wilson
2019-01-16  0:18       ` John Harrison
2019-01-07 11:55 ` [PATCH 43/46] drm/i915: Allocate a status page for each timeline Chris Wilson
2019-01-15  0:56   ` John Harrison
2019-01-15  9:50     ` Chris Wilson
2019-01-15 18:17       ` John Harrison
2019-01-15 18:43         ` Chris Wilson
2019-01-16 21:06           ` John Harrison
2019-01-16 21:15             ` Chris Wilson
2019-01-07 11:55 ` [PATCH 44/46] drm/i915: Track the context's seqno in its own timeline HWSP Chris Wilson
2019-01-07 11:55 ` [PATCH 45/46] drm/i915: Identify active requests Chris Wilson
2019-01-07 11:55 ` [PATCH 46/46] drm/i915: Replace global breadcrumbs with per-context interrupt tracking Chris Wilson
2019-01-07 12:45 ` ✗ Fi.CI.CHECKPATCH: warning for series starting with [01/46] drm/i915: Return immediately if trylock fails for direct-reclaim Patchwork
2019-01-07 13:02 ` ✗ Fi.CI.SPARSE: " Patchwork
2019-01-07 13:05 ` ✓ Fi.CI.BAT: success " Patchwork
2019-01-07 14:00 ` [PATCH 01/46] " Tvrtko Ursulin
2019-01-07 14:07   ` Chris Wilson
2019-01-08  8:58     ` Tvrtko Ursulin
2019-01-07 17:10 ` ✗ Fi.CI.IGT: failure for series starting with [01/46] " Patchwork
2019-01-07 17:19   ` Chris Wilson
2019-01-08 13:50 ` ✗ Fi.CI.BAT: failure for series starting with [01/46] drm/i915: Return immediately if trylock fails for direct-reclaim (rev3) Patchwork

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=87r2dm13pm.fsf@gaia.fi.intel.com \
    --to=mika.kuoppala@linux.intel.com \
    --cc=chris@chris-wilson.co.uk \
    --cc=intel-gfx@lists.freedesktop.org \
    --cc=jani.nikula@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).