public inbox for intel-gfx@lists.freedesktop.org
 help / color / mirror / Atom feed
From: Mika Kuoppala <mika.kuoppala@linux.intel.com>
To: Chris Wilson <chris@chris-wilson.co.uk>, intel-gfx@lists.freedesktop.org
Subject: Re: [PATCH 03/26] drm/i915: Stop passing I915_WAIT_LOCKED to i915_request_wait()
Date: Wed, 19 Jun 2019 14:44:10 +0300	[thread overview]
Message-ID: <87k1dh7qb9.fsf@gaia.fi.intel.com> (raw)
In-Reply-To: <20190618074153.16055-3-chris@chris-wilson.co.uk>

Chris Wilson <chris@chris-wilson.co.uk> writes:

> Since commit eb8d0f5af4ec ("drm/i915: Remove GPU reset dependence on
> struct_mutex"), the I915_WAIT_LOCKED flags passed to i915_request_wait()
> has been defunct. Now go ahead and remove it from all callers.
>
> References: eb8d0f5af4ec ("drm/i915: Remove GPU reset dependence on struct_mutex")

This reminds me about the warm and soothing feeling I got
seeing RESET_HANDOFF vanishing. I like simple things.

> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>

And the comment about locked was amended also so,

Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>

> ---
>  .../drm/i915/gem/selftests/i915_gem_context.c | 14 +++------
>  drivers/gpu/drm/i915/gt/intel_ringbuffer.c    |  2 +-
>  drivers/gpu/drm/i915/gt/intel_workarounds.c   |  2 +-
>  drivers/gpu/drm/i915/gt/selftest_hangcheck.c  |  9 ++----
>  drivers/gpu/drm/i915/gt/selftest_lrc.c        | 24 ++++++---------
>  .../gpu/drm/i915/gt/selftest_workarounds.c    |  6 ++--
>  drivers/gpu/drm/i915/i915_active.h            |  2 +-
>  drivers/gpu/drm/i915/i915_request.c           |  4 ---
>  drivers/gpu/drm/i915/i915_trace.h             |  3 +-
>  drivers/gpu/drm/i915/selftests/i915_request.c | 30 +++++++------------
>  .../gpu/drm/i915/selftests/i915_timeline.c    |  6 ++--
>  11 files changed, 36 insertions(+), 66 deletions(-)
>
> diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
> index 03ac5003abf1..eaa2b16574c7 100644
> --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
> +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
> @@ -83,9 +83,7 @@ static int live_nop_switch(void *arg)
>  			}
>  			i915_request_add(rq);
>  		}
> -		if (i915_request_wait(rq,
> -				      I915_WAIT_LOCKED,
> -				      HZ / 5) < 0) {
> +		if (i915_request_wait(rq, 0, HZ / 5) < 0) {
>  			pr_err("Failed to populated %d contexts\n", nctx);
>  			i915_gem_set_wedged(i915);
>  			err = -EIO;
> @@ -128,9 +126,7 @@ static int live_nop_switch(void *arg)
>  
>  				i915_request_add(rq);
>  			}
> -			if (i915_request_wait(rq,
> -					      I915_WAIT_LOCKED,
> -					      HZ / 5) < 0) {
> +			if (i915_request_wait(rq, 0, HZ / 5) < 0) {
>  				pr_err("Switching between %ld contexts timed out\n",
>  				       prime);
>  				i915_gem_set_wedged(i915);
> @@ -893,7 +889,7 @@ __read_slice_count(struct drm_i915_private *i915,
>  	if (spin)
>  		igt_spinner_end(spin);
>  
> -	ret = i915_request_wait(rq, I915_WAIT_LOCKED, MAX_SCHEDULE_TIMEOUT);
> +	ret = i915_request_wait(rq, 0, MAX_SCHEDULE_TIMEOUT);
>  	i915_request_put(rq);
>  	if (ret < 0)
>  		return ret;
> @@ -980,9 +976,7 @@ __sseu_finish(struct drm_i915_private *i915,
>  		igt_spinner_end(spin);
>  
>  	if ((flags & TEST_IDLE) && ret == 0) {
> -		ret = i915_gem_wait_for_idle(i915,
> -					     I915_WAIT_LOCKED,
> -					     MAX_SCHEDULE_TIMEOUT);
> +		ret = i915_gem_wait_for_idle(i915, 0, MAX_SCHEDULE_TIMEOUT);
>  		if (ret)
>  			return ret;
>  
> diff --git a/drivers/gpu/drm/i915/gt/intel_ringbuffer.c b/drivers/gpu/drm/i915/gt/intel_ringbuffer.c
> index bd35ad202459..c6023bc9452d 100644
> --- a/drivers/gpu/drm/i915/gt/intel_ringbuffer.c
> +++ b/drivers/gpu/drm/i915/gt/intel_ringbuffer.c
> @@ -1815,7 +1815,7 @@ static noinline int wait_for_space(struct intel_ring *ring, unsigned int bytes)
>  		return -ENOSPC;
>  
>  	timeout = i915_request_wait(target,
> -				    I915_WAIT_INTERRUPTIBLE | I915_WAIT_LOCKED,
> +				    I915_WAIT_INTERRUPTIBLE,
>  				    MAX_SCHEDULE_TIMEOUT);
>  	if (timeout < 0)
>  		return timeout;
> diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c
> index 165b0a45e009..9624d9e776e3 100644
> --- a/drivers/gpu/drm/i915/gt/intel_workarounds.c
> +++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c
> @@ -1384,7 +1384,7 @@ static int engine_wa_list_verify(struct intel_context *ce,
>  		goto err_vma;
>  
>  	i915_request_add(rq);
> -	if (i915_request_wait(rq, I915_WAIT_LOCKED, HZ / 5) < 0) {
> +	if (i915_request_wait(rq, 0, HZ / 5) < 0) {
>  		err = -ETIME;
>  		goto err_vma;
>  	}
> diff --git a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
> index b0b2998e56b8..1ee4c923044f 100644
> --- a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
> +++ b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
> @@ -339,8 +339,7 @@ static int igt_hang_sanitycheck(void *arg)
>  
>  		timeout = 0;
>  		igt_wedge_on_timeout(&w, i915, HZ / 10 /* 100ms timeout*/)
> -			timeout = i915_request_wait(rq,
> -						    I915_WAIT_LOCKED,
> +			timeout = i915_request_wait(rq, 0,
>  						    MAX_SCHEDULE_TIMEOUT);
>  		if (i915_reset_failed(i915))
>  			timeout = -EIO;
> @@ -1098,7 +1097,7 @@ static int igt_reset_wait(void *arg)
>  
>  	reset_count = fake_hangcheck(i915, ALL_ENGINES);
>  
> -	timeout = i915_request_wait(rq, I915_WAIT_LOCKED, 10);
> +	timeout = i915_request_wait(rq, 0, 10);
>  	if (timeout < 0) {
>  		pr_err("i915_request_wait failed on a stuck request: err=%ld\n",
>  		       timeout);
> @@ -1666,9 +1665,7 @@ static int igt_atomic_reset_engine(struct intel_engine_cs *engine,
>  		struct igt_wedge_me w;
>  
>  		igt_wedge_on_timeout(&w, i915, HZ / 20 /* 50ms timeout*/)
> -			i915_request_wait(rq,
> -					  I915_WAIT_LOCKED,
> -					  MAX_SCHEDULE_TIMEOUT);
> +			i915_request_wait(rq, 0, MAX_SCHEDULE_TIMEOUT);
>  		if (i915_reset_failed(i915))
>  			err = -EIO;
>  	}
> diff --git a/drivers/gpu/drm/i915/gt/selftest_lrc.c b/drivers/gpu/drm/i915/gt/selftest_lrc.c
> index d84d31e3da19..401e8b539297 100644
> --- a/drivers/gpu/drm/i915/gt/selftest_lrc.c
> +++ b/drivers/gpu/drm/i915/gt/selftest_lrc.c
> @@ -192,7 +192,7 @@ static int live_busywait_preempt(void *arg)
>  		}
>  
>  		/* Low priority request should be busywaiting now */
> -		if (i915_request_wait(lo, I915_WAIT_LOCKED, 1) != -ETIME) {
> +		if (i915_request_wait(lo, 0, 1) != -ETIME) {
>  			pr_err("%s: Busywaiting request did not!\n",
>  			       engine->name);
>  			err = -EIO;
> @@ -220,7 +220,7 @@ static int live_busywait_preempt(void *arg)
>  		intel_ring_advance(hi, cs);
>  		i915_request_add(hi);
>  
> -		if (i915_request_wait(lo, I915_WAIT_LOCKED, HZ / 5) < 0) {
> +		if (i915_request_wait(lo, 0, HZ / 5) < 0) {
>  			struct drm_printer p = drm_info_printer(i915->drm.dev);
>  
>  			pr_err("%s: Failed to preempt semaphore busywait!\n",
> @@ -739,7 +739,6 @@ static int live_suppress_wait_preempt(void *arg)
>  			GEM_BUG_ON(!i915_request_started(rq[0]));
>  
>  			if (i915_request_wait(rq[depth],
> -					      I915_WAIT_LOCKED |
>  					      I915_WAIT_PRIORITY,
>  					      1) != -ETIME) {
>  				pr_err("%s: Waiter depth:%d completed!\n",
> @@ -841,7 +840,7 @@ static int live_chain_preempt(void *arg)
>  			 __func__, engine->name, ring_size);
>  
>  		igt_spinner_end(&lo.spin);
> -		if (i915_request_wait(rq, I915_WAIT_LOCKED, HZ / 2) < 0) {
> +		if (i915_request_wait(rq, 0, HZ / 2) < 0) {
>  			pr_err("Timed out waiting to flush %s\n", engine->name);
>  			goto err_wedged;
>  		}
> @@ -882,7 +881,7 @@ static int live_chain_preempt(void *arg)
>  			engine->schedule(rq, &attr);
>  
>  			igt_spinner_end(&hi.spin);
> -			if (i915_request_wait(rq, I915_WAIT_LOCKED, HZ / 5) < 0) {
> +			if (i915_request_wait(rq, 0, HZ / 5) < 0) {
>  				struct drm_printer p =
>  					drm_info_printer(i915->drm.dev);
>  
> @@ -898,7 +897,7 @@ static int live_chain_preempt(void *arg)
>  			if (IS_ERR(rq))
>  				goto err_wedged;
>  			i915_request_add(rq);
> -			if (i915_request_wait(rq, I915_WAIT_LOCKED, HZ / 5) < 0) {
> +			if (i915_request_wait(rq, 0, HZ / 5) < 0) {
>  				struct drm_printer p =
>  					drm_info_printer(i915->drm.dev);
>  
> @@ -1396,9 +1395,7 @@ static int nop_virtual_engine(struct drm_i915_private *i915,
>  		}
>  
>  		for (nc = 0; nc < nctx; nc++) {
> -			if (i915_request_wait(request[nc],
> -					      I915_WAIT_LOCKED,
> -					      HZ / 10) < 0) {
> +			if (i915_request_wait(request[nc], 0, HZ / 10) < 0) {
>  				pr_err("%s(%s): wait for %llx:%lld timed out\n",
>  				       __func__, ve[0]->engine->name,
>  				       request[nc]->fence.context,
> @@ -1545,7 +1542,7 @@ static int mask_virtual_engine(struct drm_i915_private *i915,
>  	}
>  
>  	for (n = 0; n < nsibling; n++) {
> -		if (i915_request_wait(request[n], I915_WAIT_LOCKED, HZ / 10) < 0) {
> +		if (i915_request_wait(request[n], 0, HZ / 10) < 0) {
>  			pr_err("%s(%s): wait for %llx:%lld timed out\n",
>  			       __func__, ve->engine->name,
>  			       request[n]->fence.context,
> @@ -1720,9 +1717,7 @@ static int bond_virtual_engine(struct drm_i915_private *i915,
>  		}
>  		onstack_fence_fini(&fence);
>  
> -		if (i915_request_wait(rq[0],
> -				      I915_WAIT_LOCKED,
> -				      HZ / 10) < 0) {
> +		if (i915_request_wait(rq[0], 0, HZ / 10) < 0) {
>  			pr_err("Master request did not execute (on %s)!\n",
>  			       rq[0]->engine->name);
>  			err = -EIO;
> @@ -1730,8 +1725,7 @@ static int bond_virtual_engine(struct drm_i915_private *i915,
>  		}
>  
>  		for (n = 0; n < nsibling; n++) {
> -			if (i915_request_wait(rq[n + 1],
> -					      I915_WAIT_LOCKED,
> +			if (i915_request_wait(rq[n + 1], 0,
>  					      MAX_SCHEDULE_TIMEOUT) < 0) {
>  				err = -EIO;
>  				goto out;
> diff --git a/drivers/gpu/drm/i915/gt/selftest_workarounds.c b/drivers/gpu/drm/i915/gt/selftest_workarounds.c
> index 91449d5157f6..87426f9183cd 100644
> --- a/drivers/gpu/drm/i915/gt/selftest_workarounds.c
> +++ b/drivers/gpu/drm/i915/gt/selftest_workarounds.c
> @@ -541,7 +541,7 @@ static int check_dirty_whitelist(struct i915_gem_context *ctx,
>  		if (err)
>  			goto out_batch;
>  
> -		if (i915_request_wait(rq, I915_WAIT_LOCKED, HZ / 5) < 0) {
> +		if (i915_request_wait(rq, 0, HZ / 5) < 0) {
>  			pr_err("%s: Futzing %x timedout; cancelling test\n",
>  			       engine->name, reg);
>  			i915_gem_set_wedged(ctx->i915);
> @@ -739,7 +739,7 @@ static int read_whitelisted_registers(struct i915_gem_context *ctx,
>  err_req:
>  	i915_request_add(rq);
>  
> -	if (i915_request_wait(rq, I915_WAIT_LOCKED, HZ / 5) < 0)
> +	if (i915_request_wait(rq, 0, HZ / 5) < 0)
>  		err = -EIO;
>  
>  	return err;
> @@ -790,7 +790,7 @@ static int scrub_whitelisted_registers(struct i915_gem_context *ctx,
>  
>  err_request:
>  	i915_request_add(rq);
> -	if (i915_request_wait(rq, I915_WAIT_LOCKED, HZ / 5) < 0)
> +	if (i915_request_wait(rq, 0, HZ / 5) < 0)
>  		err = -EIO;
>  
>  err_unpin:
> diff --git a/drivers/gpu/drm/i915/i915_active.h b/drivers/gpu/drm/i915/i915_active.h
> index d55d37673944..c14eebf6d074 100644
> --- a/drivers/gpu/drm/i915/i915_active.h
> +++ b/drivers/gpu/drm/i915/i915_active.h
> @@ -330,7 +330,7 @@ i915_active_request_retire(struct i915_active_request *active,
>  		return 0;
>  
>  	ret = i915_request_wait(request,
> -				I915_WAIT_INTERRUPTIBLE | I915_WAIT_LOCKED,
> +				I915_WAIT_INTERRUPTIBLE,
>  				MAX_SCHEDULE_TIMEOUT);
>  	if (ret < 0)
>  		return ret;
> diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
> index 9819483d1b5d..d7fd77e8a789 100644
> --- a/drivers/gpu/drm/i915/i915_request.c
> +++ b/drivers/gpu/drm/i915/i915_request.c
> @@ -1360,10 +1360,6 @@ static void request_wait_wake(struct dma_fence *fence, struct dma_fence_cb *cb)
>   * maximum of @timeout jiffies (with MAX_SCHEDULE_TIMEOUT implying an
>   * unbounded wait).
>   *
> - * If the caller holds the struct_mutex, the caller must pass I915_WAIT_LOCKED
> - * in via the flags, and vice versa if the struct_mutex is not held, the caller
> - * must not specify that the wait is locked.
> - *
>   * Returns the remaining time (in jiffies) if the request completed, which may
>   * be zero or -ETIME if the request is unfinished after the timeout expires.
>   * May return -EINTR is called with I915_WAIT_INTERRUPTIBLE and a signal is
> diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
> index 5c8cfaa70d72..f4ce643b3bc3 100644
> --- a/drivers/gpu/drm/i915/i915_trace.h
> +++ b/drivers/gpu/drm/i915/i915_trace.h
> @@ -863,10 +863,9 @@ TRACE_EVENT(i915_request_wait_begin,
>  			   __entry->flags = flags;
>  			   ),
>  
> -	    TP_printk("dev=%u, engine=%u:%u, hw_id=%u, ctx=%llu, seqno=%u, blocking=%u, flags=0x%x",
> +	    TP_printk("dev=%u, engine=%u:%u, hw_id=%u, ctx=%llu, seqno=%u, flags=0x%x",
>  		      __entry->dev, __entry->class, __entry->instance,
>  		      __entry->hw_id, __entry->ctx, __entry->seqno,
> -		      !!(__entry->flags & I915_WAIT_LOCKED),
>  		      __entry->flags)
>  );
>  
> diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c
> index 3de24f3d4ed5..298bb7116c51 100644
> --- a/drivers/gpu/drm/i915/selftests/i915_request.c
> +++ b/drivers/gpu/drm/i915/selftests/i915_request.c
> @@ -74,12 +74,12 @@ static int igt_wait_request(void *arg)
>  		goto out_unlock;
>  	}
>  
> -	if (i915_request_wait(request, I915_WAIT_LOCKED, 0) != -ETIME) {
> +	if (i915_request_wait(request, 0, 0) != -ETIME) {
>  		pr_err("request wait (busy query) succeeded (expected timeout before submit!)\n");
>  		goto out_unlock;
>  	}
>  
> -	if (i915_request_wait(request, I915_WAIT_LOCKED, T) != -ETIME) {
> +	if (i915_request_wait(request, 0, T) != -ETIME) {
>  		pr_err("request wait succeeded (expected timeout before submit!)\n");
>  		goto out_unlock;
>  	}
> @@ -91,7 +91,7 @@ static int igt_wait_request(void *arg)
>  
>  	i915_request_add(request);
>  
> -	if (i915_request_wait(request, I915_WAIT_LOCKED, 0) != -ETIME) {
> +	if (i915_request_wait(request, 0, 0) != -ETIME) {
>  		pr_err("request wait (busy query) succeeded (expected timeout after submit!)\n");
>  		goto out_unlock;
>  	}
> @@ -101,12 +101,12 @@ static int igt_wait_request(void *arg)
>  		goto out_unlock;
>  	}
>  
> -	if (i915_request_wait(request, I915_WAIT_LOCKED, T / 2) != -ETIME) {
> +	if (i915_request_wait(request, 0, T / 2) != -ETIME) {
>  		pr_err("request wait succeeded (expected timeout!)\n");
>  		goto out_unlock;
>  	}
>  
> -	if (i915_request_wait(request, I915_WAIT_LOCKED, T) == -ETIME) {
> +	if (i915_request_wait(request, 0, T) == -ETIME) {
>  		pr_err("request wait timed out!\n");
>  		goto out_unlock;
>  	}
> @@ -116,7 +116,7 @@ static int igt_wait_request(void *arg)
>  		goto out_unlock;
>  	}
>  
> -	if (i915_request_wait(request, I915_WAIT_LOCKED, T) == -ETIME) {
> +	if (i915_request_wait(request, 0, T) == -ETIME) {
>  		pr_err("request wait timed out when already complete!\n");
>  		goto out_unlock;
>  	}
> @@ -574,9 +574,7 @@ static int live_nop_request(void *arg)
>  
>  				i915_request_add(request);
>  			}
> -			i915_request_wait(request,
> -					  I915_WAIT_LOCKED,
> -					  MAX_SCHEDULE_TIMEOUT);
> +			i915_request_wait(request, 0, MAX_SCHEDULE_TIMEOUT);
>  
>  			times[1] = ktime_sub(ktime_get_raw(), times[1]);
>  			if (prime == 1)
> @@ -706,9 +704,7 @@ static int live_empty_request(void *arg)
>  			err = PTR_ERR(request);
>  			goto out_batch;
>  		}
> -		i915_request_wait(request,
> -				  I915_WAIT_LOCKED,
> -				  MAX_SCHEDULE_TIMEOUT);
> +		i915_request_wait(request, 0, MAX_SCHEDULE_TIMEOUT);
>  
>  		for_each_prime_number_from(prime, 1, 8192) {
>  			times[1] = ktime_get_raw();
> @@ -720,9 +716,7 @@ static int live_empty_request(void *arg)
>  					goto out_batch;
>  				}
>  			}
> -			i915_request_wait(request,
> -					  I915_WAIT_LOCKED,
> -					  MAX_SCHEDULE_TIMEOUT);
> +			i915_request_wait(request, 0, MAX_SCHEDULE_TIMEOUT);
>  
>  			times[1] = ktime_sub(ktime_get_raw(), times[1]);
>  			if (prime == 1)
> @@ -895,8 +889,7 @@ static int live_all_engines(void *arg)
>  	for_each_engine(engine, i915, id) {
>  		long timeout;
>  
> -		timeout = i915_request_wait(request[id],
> -					    I915_WAIT_LOCKED,
> +		timeout = i915_request_wait(request[id], 0,
>  					    MAX_SCHEDULE_TIMEOUT);
>  		if (timeout < 0) {
>  			err = timeout;
> @@ -1013,8 +1006,7 @@ static int live_sequential_engines(void *arg)
>  			goto out_request;
>  		}
>  
> -		timeout = i915_request_wait(request[id],
> -					    I915_WAIT_LOCKED,
> +		timeout = i915_request_wait(request[id], 0,
>  					    MAX_SCHEDULE_TIMEOUT);
>  		if (timeout < 0) {
>  			err = timeout;
> diff --git a/drivers/gpu/drm/i915/selftests/i915_timeline.c b/drivers/gpu/drm/i915/selftests/i915_timeline.c
> index 724bf3650b3e..76d3977f1d4b 100644
> --- a/drivers/gpu/drm/i915/selftests/i915_timeline.c
> +++ b/drivers/gpu/drm/i915/selftests/i915_timeline.c
> @@ -724,7 +724,7 @@ static int live_hwsp_wrap(void *arg)
>  
>  		i915_request_add(rq);
>  
> -		if (i915_request_wait(rq, I915_WAIT_LOCKED, HZ / 5) < 0) {
> +		if (i915_request_wait(rq, 0, HZ / 5) < 0) {
>  			pr_err("Wait for timeline writes timed out!\n");
>  			err = -EIO;
>  			goto out;
> @@ -797,9 +797,7 @@ static int live_hwsp_recycle(void *arg)
>  				goto out;
>  			}
>  
> -			if (i915_request_wait(rq,
> -					      I915_WAIT_LOCKED,
> -					      HZ / 5) < 0) {
> +			if (i915_request_wait(rq, 0, HZ / 5) < 0) {
>  				pr_err("Wait for timeline writes timed out!\n");
>  				i915_timeline_put(tl);
>  				err = -EIO;
> -- 
> 2.20.1
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

  reply	other threads:[~2019-06-19 11:44 UTC|newest]

Thread overview: 42+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-06-18  7:41 [PATCH 01/26] drm/i915: Keep engine alive as we retire the context Chris Wilson
2019-06-18  7:41 ` [PATCH 02/26] drm/i915: Skip shrinking already freed pages Chris Wilson
2019-06-18 11:59   ` Chris Wilson
2019-06-18 16:06   ` Mika Kuoppala
2019-06-18 16:22     ` Chris Wilson
2019-06-18  7:41 ` [PATCH 03/26] drm/i915: Stop passing I915_WAIT_LOCKED to i915_request_wait() Chris Wilson
2019-06-19 11:44   ` Mika Kuoppala [this message]
2019-06-18  7:41 ` [PATCH 04/26] drm/i915: Flush the execution-callbacks on retiring Chris Wilson
2019-06-19 13:12   ` Mika Kuoppala
2019-06-19 13:18     ` Chris Wilson
2019-06-18  7:41 ` [PATCH 05/26] drm/i915/execlists: Preempt-to-busy Chris Wilson
2019-06-18  7:41 ` [PATCH 06/26] drm/i915/execlists: Minimalistic timeslicing Chris Wilson
2019-06-18  7:41 ` [PATCH 07/26] drm/i915/execlists: Force preemption Chris Wilson
2019-06-18  7:41 ` [PATCH 08/26] drm/i915: Make the semaphore saturation mask global Chris Wilson
2019-06-19 10:45   ` Tvrtko Ursulin
2019-06-18  7:41 ` [PATCH 09/26] dma-fence: Propagate errors to dma-fence-array container Chris Wilson
2019-06-18  7:41 ` [PATCH 10/26] dma-fence: Report the composite sync_file status Chris Wilson
2019-06-18  7:41 ` [PATCH 11/26] dma-fence: Refactor signaling for manual invocation Chris Wilson
2019-06-18  7:41 ` [PATCH 12/26] dma-fence: Always execute signal callbacks Chris Wilson
2019-06-18  7:41 ` [PATCH 13/26] drm/i915: Track i915_active using debugobjects Chris Wilson
2019-06-18  7:41 ` [PATCH 14/26] drm/i915: Signal fence completion from i915_request_wait Chris Wilson
2019-06-18  7:41 ` [PATCH 15/26] drm/i915: Remove waiting & retiring from shrinker paths Chris Wilson
2019-06-18  7:41 ` [PATCH 16/26] drm/i915: Throw away the active object retirement complexity Chris Wilson
2019-06-18  7:41 ` [PATCH 17/26] drm/i915: Provide an i915_active.acquire callback Chris Wilson
2019-06-18  7:41 ` [PATCH 18/26] drm/i915: Push the i915_active.retire into a worker Chris Wilson
2019-06-18  7:41 ` [PATCH 19/26] drm/i915/overlay: Switch to using i915_active tracking Chris Wilson
2019-06-18  7:41 ` [PATCH 20/26] drm/i915: Forgo last_fence active request tracking Chris Wilson
2019-06-18  7:41 ` [PATCH 21/26] drm/i915: Extract intel_frontbuffer active tracking Chris Wilson
2019-06-18  7:41 ` [PATCH 22/26] drm/i915: Coordinate i915_active with its own mutex Chris Wilson
2019-06-18  7:41 ` [PATCH 23/26] drm/i915: Rename intel_wakeref_[is]_active Chris Wilson
2019-06-18  8:14   ` Chris Wilson
2019-06-18  7:41 ` [PATCH 24/26] drm/i915: Teach execbuffer to take the engine wakeref not GT Chris Wilson
2019-06-18  7:41 ` [PATCH 25/26] drm/i915: Replace struct_mutex for batch pool serialisation Chris Wilson
2019-06-18  7:41 ` [PATCH 26/26] drm/i915: Move idle barrier cleanup into engine-pm Chris Wilson
2019-06-18  8:57 ` ✗ Fi.CI.CHECKPATCH: warning for series starting with [01/26] drm/i915: Keep engine alive as we retire the context Patchwork
2019-06-18  9:09 ` ✗ Fi.CI.SPARSE: " Patchwork
2019-06-18  9:18 ` ✓ Fi.CI.BAT: success " Patchwork
2019-06-18 13:45 ` [PATCH 01/26] " Mika Kuoppala
2019-06-18 13:59   ` Chris Wilson
2019-06-18 14:03     ` Chris Wilson
2019-06-18 14:08     ` Mika Kuoppala
2019-06-18 19:15 ` ✗ Fi.CI.IGT: failure for series starting with [01/26] " Patchwork

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=87k1dh7qb9.fsf@gaia.fi.intel.com \
    --to=mika.kuoppala@linux.intel.com \
    --cc=chris@chris-wilson.co.uk \
    --cc=intel-gfx@lists.freedesktop.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox