public inbox for intel-gfx@lists.freedesktop.org
 help / color / mirror / Atom feed
From: Mika Kuoppala <mika.kuoppala@linux.intel.com>
To: Chris Wilson <chris@chris-wilson.co.uk>, intel-gfx@lists.freedesktop.org
Subject: Re: [PATCH 2/3] drm/i915/gt: Mark context->active_count as protected by timeline->mutex
Date: Fri, 16 Aug 2019 14:48:31 +0300	[thread overview]
Message-ID: <87sgq15o1c.fsf@gaia.fi.intel.com> (raw)
In-Reply-To: <20190816092424.31386-2-chris@chris-wilson.co.uk>

Chris Wilson <chris@chris-wilson.co.uk> writes:

> We use timeline->mutex to protect modifications to
> context->active_count, and the associated enable/disable callbacks.
> Due to complications with engine-pm barrier there is a path where we used
> a "superlock" to provide serialised protect and so could not
> unconditionally assert with lockdep that it was always held. However,
> we can mark the mutex as taken (noting that we may be nested underneath
> ourselves) which means we can be reassured the right timeline->mutex is
> always treated as held and let lockdep roam free.
>
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>

Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>

> ---
>  drivers/gpu/drm/i915/gt/intel_context.h       |  3 +++
>  drivers/gpu/drm/i915/gt/intel_context_types.h |  2 +-
>  drivers/gpu/drm/i915/gt/intel_engine_pm.c     | 14 ++++++++++++++
>  drivers/gpu/drm/i915/gt/intel_timeline.c      |  4 ++++
>  drivers/gpu/drm/i915/i915_request.c           |  3 ++-
>  5 files changed, 24 insertions(+), 2 deletions(-)
>
> diff --git a/drivers/gpu/drm/i915/gt/intel_context.h b/drivers/gpu/drm/i915/gt/intel_context.h
> index 053a1307ecb4..dd742ac2fbdb 100644
> --- a/drivers/gpu/drm/i915/gt/intel_context.h
> +++ b/drivers/gpu/drm/i915/gt/intel_context.h
> @@ -89,17 +89,20 @@ void intel_context_exit_engine(struct intel_context *ce);
>  
>  static inline void intel_context_enter(struct intel_context *ce)
>  {
> +	lockdep_assert_held(&ce->timeline->mutex);
>  	if (!ce->active_count++)
>  		ce->ops->enter(ce);
>  }
>  
>  static inline void intel_context_mark_active(struct intel_context *ce)
>  {
> +	lockdep_assert_held(&ce->timeline->mutex);
>  	++ce->active_count;
>  }
>  
>  static inline void intel_context_exit(struct intel_context *ce)
>  {
> +	lockdep_assert_held(&ce->timeline->mutex);
>  	GEM_BUG_ON(!ce->active_count);
>  	if (!--ce->active_count)
>  		ce->ops->exit(ce);
> diff --git a/drivers/gpu/drm/i915/gt/intel_context_types.h b/drivers/gpu/drm/i915/gt/intel_context_types.h
> index d8ce266c049f..bf9cedfccbf0 100644
> --- a/drivers/gpu/drm/i915/gt/intel_context_types.h
> +++ b/drivers/gpu/drm/i915/gt/intel_context_types.h
> @@ -59,7 +59,7 @@ struct intel_context {
>  	u32 *lrc_reg_state;
>  	u64 lrc_desc;
>  
> -	unsigned int active_count; /* notionally protected by timeline->mutex */
> +	unsigned int active_count; /* protected by timeline->mutex */
>  
>  	atomic_t pin_count;
>  	struct mutex pin_mutex; /* guards pinning and associated on-gpuing */
> diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pm.c b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
> index f3f0109f9e22..5f03f7dcad72 100644
> --- a/drivers/gpu/drm/i915/gt/intel_engine_pm.c
> +++ b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
> @@ -37,6 +37,16 @@ static int __engine_unpark(struct intel_wakeref *wf)
>  	return 0;
>  }
>  
> +static inline void __timeline_mark_lock(struct intel_context *ce)
> +{
> +	mutex_acquire(&ce->timeline->mutex.dep_map, 2, 0, _THIS_IP_);
> +}
> +
> +static inline void __timeline_mark_unlock(struct intel_context *ce)
> +{
> +	mutex_release(&ce->timeline->mutex.dep_map, 0, _THIS_IP_);
> +}
> +
>  static bool switch_to_kernel_context(struct intel_engine_cs *engine)
>  {
>  	struct i915_request *rq;
> @@ -61,6 +71,8 @@ static bool switch_to_kernel_context(struct intel_engine_cs *engine)
>  	 * retiring the last request, thus all rings should be empty and
>  	 * all timelines idle.
>  	 */
> +	__timeline_mark_lock(engine->kernel_context);
> +
>  	rq = __i915_request_create(engine->kernel_context, GFP_NOWAIT);
>  	if (IS_ERR(rq))
>  		/* Context switch failed, hope for the best! Maybe reset? */
> @@ -80,6 +92,8 @@ static bool switch_to_kernel_context(struct intel_engine_cs *engine)
>  	__intel_wakeref_defer_park(&engine->wakeref);
>  	__i915_request_queue(rq, NULL);
>  
> +	__timeline_mark_unlock(engine->kernel_context);
> +
>  	return false;
>  }
>  
> diff --git a/drivers/gpu/drm/i915/gt/intel_timeline.c b/drivers/gpu/drm/i915/gt/intel_timeline.c
> index 7b476cd55dac..eafd94d5e211 100644
> --- a/drivers/gpu/drm/i915/gt/intel_timeline.c
> +++ b/drivers/gpu/drm/i915/gt/intel_timeline.c
> @@ -338,6 +338,8 @@ void intel_timeline_enter(struct intel_timeline *tl)
>  {
>  	struct intel_gt_timelines *timelines = &tl->gt->timelines;
>  
> +	lockdep_assert_held(&tl->mutex);
> +
>  	GEM_BUG_ON(!atomic_read(&tl->pin_count));
>  	if (tl->active_count++)
>  		return;
> @@ -352,6 +354,8 @@ void intel_timeline_exit(struct intel_timeline *tl)
>  {
>  	struct intel_gt_timelines *timelines = &tl->gt->timelines;
>  
> +	lockdep_assert_held(&tl->mutex);
> +
>  	GEM_BUG_ON(!tl->active_count);
>  	if (--tl->active_count)
>  		return;
> diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
> index 174c7fdd02ff..7edcd0fef5c6 100644
> --- a/drivers/gpu/drm/i915/i915_request.c
> +++ b/drivers/gpu/drm/i915/i915_request.c
> @@ -1087,7 +1087,8 @@ __i915_request_add_to_timeline(struct i915_request *rq)
>  	 * precludes optimising to use semaphores serialisation of a single
>  	 * timeline across engines.
>  	 */
> -	prev = rcu_dereference_protected(timeline->last_request.request, 1);
> +	prev = rcu_dereference_protected(timeline->last_request.request,
> +					 lockdep_is_held(&timeline->mutex));
>  	if (prev && !i915_request_completed(prev)) {
>  		if (is_power_of_2(prev->engine->mask | rq->engine->mask))
>  			i915_sw_fence_await_sw_fence(&rq->submit,
> -- 
> 2.23.0.rc1
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

  reply	other threads:[~2019-08-16 11:49 UTC|newest]

Thread overview: 13+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-08-16  9:24 [PATCH 1/3] drm/i915/execlists: Lift process_csb() out of the irq-off spinlock Chris Wilson
2019-08-16  9:24 ` [PATCH 2/3] drm/i915/gt: Mark context->active_count as protected by timeline->mutex Chris Wilson
2019-08-16 11:48   ` Mika Kuoppala [this message]
2019-08-16  9:24 ` [PATCH 3/3] drm/i915: Markup expected timeline locks for i915_active Chris Wilson
2019-08-16 12:02   ` Mika Kuoppala
2019-08-16 12:06     ` Chris Wilson
2019-08-16 11:05 ` ✗ Fi.CI.CHECKPATCH: warning for series starting with [1/3] drm/i915/execlists: Lift process_csb() out of the irq-off spinlock Patchwork
2019-08-16 11:26 ` ✗ Fi.CI.BAT: failure " Patchwork
2019-08-16 11:28   ` Chris Wilson
2019-08-16 11:38 ` [PATCH 1/3] " Mika Kuoppala
2019-08-16 11:49 ` [PATCH] " Chris Wilson
2019-08-16 16:16 ` ✗ Fi.CI.CHECKPATCH: warning for series starting with drm/i915/execlists: Lift process_csb() out of the irq-off spinlock (rev2) Patchwork
2019-08-16 16:37 ` ✗ Fi.CI.BAT: failure " Patchwork

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=87sgq15o1c.fsf@gaia.fi.intel.com \
    --to=mika.kuoppala@linux.intel.com \
    --cc=chris@chris-wilson.co.uk \
    --cc=intel-gfx@lists.freedesktop.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox