public inbox for intel-gfx@lists.freedesktop.org
 help / color / mirror / Atom feed
From: Tvrtko Ursulin <tvrtko.ursulin@linux.intel.com>
To: Chris Wilson <chris@chris-wilson.co.uk>, intel-gfx@lists.freedesktop.org
Subject: Re: [PATCH 20/46] drm/i915: Remove access to global seqno in the HWSP
Date: Mon, 11 Feb 2019 18:22:34 +0000	[thread overview]
Message-ID: <8553d4a5-df80-5c91-0aa6-d4c817af9a5a@linux.intel.com> (raw)
In-Reply-To: <20190206130356.18771-21-chris@chris-wilson.co.uk>


On 06/02/2019 13:03, Chris Wilson wrote:
> Stop accessing the HWSP to read the global seqno, and stop tracking the
> mirror in the engine's execution timeline -- it is unused.
> 
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> ---
>   drivers/gpu/drm/i915/i915_gpu_error.c         |  4 --
>   drivers/gpu/drm/i915/i915_gpu_error.h         |  3 --
>   drivers/gpu/drm/i915/i915_request.c           | 27 +++++--------
>   drivers/gpu/drm/i915/i915_reset.c             |  1 -
>   drivers/gpu/drm/i915/intel_engine_cs.c        | 14 +------
>   drivers/gpu/drm/i915/intel_lrc.c              | 21 +++-------
>   drivers/gpu/drm/i915/intel_ringbuffer.c       |  7 +---
>   drivers/gpu/drm/i915/intel_ringbuffer.h       | 40 -------------------
>   drivers/gpu/drm/i915/selftests/i915_request.c |  3 +-
>   drivers/gpu/drm/i915/selftests/mock_engine.c  |  2 -
>   10 files changed, 19 insertions(+), 103 deletions(-)
> 
> diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
> index 9a65341fec09..a674c78ca1f8 100644
> --- a/drivers/gpu/drm/i915/i915_gpu_error.c
> +++ b/drivers/gpu/drm/i915/i915_gpu_error.c
> @@ -533,8 +533,6 @@ static void error_print_engine(struct drm_i915_error_state_buf *m,
>   				   ee->vm_info.pp_dir_base);
>   		}
>   	}
> -	err_printf(m, "  seqno: 0x%08x\n", ee->seqno);
> -	err_printf(m, "  last_seqno: 0x%08x\n", ee->last_seqno);
>   	err_printf(m, "  ring->head: 0x%08x\n", ee->cpu_ring_head);
>   	err_printf(m, "  ring->tail: 0x%08x\n", ee->cpu_ring_tail);
>   	err_printf(m, "  hangcheck timestamp: %dms (%lu%s)\n",
> @@ -1227,8 +1225,6 @@ static void error_record_engine_registers(struct i915_gpu_state *error,
>   
>   	ee->instpm = I915_READ(RING_INSTPM(engine->mmio_base));
>   	ee->acthd = intel_engine_get_active_head(engine);
> -	ee->seqno = intel_engine_get_seqno(engine);
> -	ee->last_seqno = intel_engine_last_submit(engine);
>   	ee->start = I915_READ_START(engine);
>   	ee->head = I915_READ_HEAD(engine);
>   	ee->tail = I915_READ_TAIL(engine);
> diff --git a/drivers/gpu/drm/i915/i915_gpu_error.h b/drivers/gpu/drm/i915/i915_gpu_error.h
> index d5c58e82508b..4dbbd0f02edb 100644
> --- a/drivers/gpu/drm/i915/i915_gpu_error.h
> +++ b/drivers/gpu/drm/i915/i915_gpu_error.h
> @@ -94,8 +94,6 @@ struct i915_gpu_state {
>   		u32 cpu_ring_head;
>   		u32 cpu_ring_tail;
>   
> -		u32 last_seqno;
> -
>   		/* Register state */
>   		u32 start;
>   		u32 tail;
> @@ -108,7 +106,6 @@ struct i915_gpu_state {
>   		u32 bbstate;
>   		u32 instpm;
>   		u32 instps;
> -		u32 seqno;
>   		u64 bbaddr;
>   		u64 acthd;
>   		u32 fault_reg;
> diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
> index eed66d3606d9..85cf5cfbc7ed 100644
> --- a/drivers/gpu/drm/i915/i915_request.c
> +++ b/drivers/gpu/drm/i915/i915_request.c
> @@ -192,12 +192,11 @@ static void free_capture_list(struct i915_request *request)
>   static void __retire_engine_request(struct intel_engine_cs *engine,
>   				    struct i915_request *rq)
>   {
> -	GEM_TRACE("%s(%s) fence %llx:%lld, global=%d, current %d:%d\n",
> +	GEM_TRACE("%s(%s) fence %llx:%lld, global=%d, current %d\n",
>   		  __func__, engine->name,
>   		  rq->fence.context, rq->fence.seqno,
>   		  rq->global_seqno,
> -		  hwsp_seqno(rq),
> -		  intel_engine_get_seqno(engine));
> +		  hwsp_seqno(rq));
>   
>   	GEM_BUG_ON(!i915_request_completed(rq));
>   
> @@ -256,12 +255,11 @@ static void i915_request_retire(struct i915_request *request)
>   {
>   	struct i915_active_request *active, *next;
>   
> -	GEM_TRACE("%s fence %llx:%lld, global=%d, current %d:%d\n",
> +	GEM_TRACE("%s fence %llx:%lld, global=%d, current %d\n",
>   		  request->engine->name,
>   		  request->fence.context, request->fence.seqno,
>   		  request->global_seqno,
> -		  hwsp_seqno(request),
> -		  intel_engine_get_seqno(request->engine));
> +		  hwsp_seqno(request));
>   
>   	lockdep_assert_held(&request->i915->drm.struct_mutex);
>   	GEM_BUG_ON(!i915_sw_fence_signaled(&request->submit));
> @@ -320,12 +318,11 @@ void i915_request_retire_upto(struct i915_request *rq)
>   	struct intel_ring *ring = rq->ring;
>   	struct i915_request *tmp;
>   
> -	GEM_TRACE("%s fence %llx:%lld, global=%d, current %d:%d\n",
> +	GEM_TRACE("%s fence %llx:%lld, global=%d, current %d\n",
>   		  rq->engine->name,
>   		  rq->fence.context, rq->fence.seqno,
>   		  rq->global_seqno,
> -		  hwsp_seqno(rq),
> -		  intel_engine_get_seqno(rq->engine));
> +		  hwsp_seqno(rq));
>   
>   	lockdep_assert_held(&rq->i915->drm.struct_mutex);
>   	GEM_BUG_ON(!i915_request_completed(rq));
> @@ -427,12 +424,11 @@ void __i915_request_submit(struct i915_request *request)
>   	struct intel_engine_cs *engine = request->engine;
>   	u32 seqno;
>   
> -	GEM_TRACE("%s fence %llx:%lld -> global=%d, current %d:%d\n",
> +	GEM_TRACE("%s fence %llx:%lld -> global=%d, current %d\n",
>   		  engine->name,
>   		  request->fence.context, request->fence.seqno,
>   		  engine->timeline.seqno + 1,
> -		  hwsp_seqno(request),
> -		  intel_engine_get_seqno(engine));
> +		  hwsp_seqno(request));
>   
>   	GEM_BUG_ON(!irqs_disabled());
>   	lockdep_assert_held(&engine->timeline.lock);
> @@ -441,7 +437,6 @@ void __i915_request_submit(struct i915_request *request)
>   
>   	seqno = next_global_seqno(&engine->timeline);
>   	GEM_BUG_ON(!seqno);
> -	GEM_BUG_ON(intel_engine_signaled(engine, seqno));
>   
>   	/* We may be recursing from the signal callback of another i915 fence */
>   	spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING);
> @@ -492,12 +487,11 @@ void __i915_request_unsubmit(struct i915_request *request)
>   {
>   	struct intel_engine_cs *engine = request->engine;
>   
> -	GEM_TRACE("%s fence %llx:%lld <- global=%d, current %d:%d\n",
> +	GEM_TRACE("%s fence %llx:%lld <- global=%d, current %d\n",
>   		  engine->name,
>   		  request->fence.context, request->fence.seqno,
>   		  request->global_seqno,
> -		  hwsp_seqno(request),
> -		  intel_engine_get_seqno(engine));
> +		  hwsp_seqno(request));
>   
>   	GEM_BUG_ON(!irqs_disabled());
>   	lockdep_assert_held(&engine->timeline.lock);
> @@ -508,7 +502,6 @@ void __i915_request_unsubmit(struct i915_request *request)
>   	 */
>   	GEM_BUG_ON(!request->global_seqno);
>   	GEM_BUG_ON(request->global_seqno != engine->timeline.seqno);
> -	GEM_BUG_ON(intel_engine_has_completed(engine, request->global_seqno));
>   	engine->timeline.seqno--;
>   
>   	/* We may be recursing from the signal callback of another i915 fence */
> diff --git a/drivers/gpu/drm/i915/i915_reset.c b/drivers/gpu/drm/i915/i915_reset.c
> index b629f25a81f0..7051c0a43941 100644
> --- a/drivers/gpu/drm/i915/i915_reset.c
> +++ b/drivers/gpu/drm/i915/i915_reset.c
> @@ -787,7 +787,6 @@ static void nop_submit_request(struct i915_request *request)
>   	spin_lock_irqsave(&engine->timeline.lock, flags);
>   	__i915_request_submit(request);
>   	i915_request_mark_complete(request);
> -	intel_engine_write_global_seqno(engine, request->global_seqno);
>   	spin_unlock_irqrestore(&engine->timeline.lock, flags);
>   
>   	intel_engine_queue_breadcrumbs(engine);
> diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
> index e1e54b7448b4..ea370ed094a5 100644
> --- a/drivers/gpu/drm/i915/intel_engine_cs.c
> +++ b/drivers/gpu/drm/i915/intel_engine_cs.c
> @@ -455,12 +455,6 @@ int intel_engines_init(struct drm_i915_private *dev_priv)
>   	return err;
>   }
>   
> -void intel_engine_write_global_seqno(struct intel_engine_cs *engine, u32 seqno)
> -{
> -	intel_write_status_page(engine, I915_GEM_HWS_INDEX, seqno);
> -	GEM_BUG_ON(intel_engine_get_seqno(engine) != seqno);
> -}
> -
>   static void intel_engine_init_batch_pool(struct intel_engine_cs *engine)
>   {
>   	i915_gem_batch_pool_init(&engine->batch_pool, engine);
> @@ -1053,10 +1047,6 @@ bool intel_engine_is_idle(struct intel_engine_cs *engine)
>   	if (i915_terminally_wedged(&dev_priv->gpu_error))
>   		return true;
>   
> -	/* Any inflight/incomplete requests? */
> -	if (!intel_engine_signaled(engine, intel_engine_last_submit(engine)))
> -		return false;
> -
>   	/* Waiting to drain ELSP? */
>   	if (READ_ONCE(engine->execlists.active)) {
>   		struct tasklet_struct *t = &engine->execlists.tasklet;
> @@ -1538,9 +1528,7 @@ void intel_engine_dump(struct intel_engine_cs *engine,
>   	if (i915_terminally_wedged(&engine->i915->gpu_error))
>   		drm_printf(m, "*** WEDGED ***\n");
>   
> -	drm_printf(m, "\tcurrent seqno %x, last %x, hangcheck %x/%x [%d ms]\n",
> -		   intel_engine_get_seqno(engine),
> -		   intel_engine_last_submit(engine),
> +	drm_printf(m, "\tHangcheck %x:%x [%d ms]\n",
>   		   engine->hangcheck.last_seqno,
>   		   engine->hangcheck.next_seqno,
>   		   jiffies_to_msecs(jiffies - engine->hangcheck.action_timestamp));
> diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
> index 342d3a91be03..2f2c27e6ae6d 100644
> --- a/drivers/gpu/drm/i915/intel_lrc.c
> +++ b/drivers/gpu/drm/i915/intel_lrc.c
> @@ -565,13 +565,12 @@ static void execlists_submit_ports(struct intel_engine_cs *engine)
>   			desc = execlists_update_context(rq);
>   			GEM_DEBUG_EXEC(port[n].context_id = upper_32_bits(desc));
>   
> -			GEM_TRACE("%s in[%d]:  ctx=%d.%d, global=%d (fence %llx:%lld) (current %d:%d), prio=%d\n",
> +			GEM_TRACE("%s in[%d]:  ctx=%d.%d, global=%d (fence %llx:%lld) (current %d), prio=%d\n",
>   				  engine->name, n,
>   				  port[n].context_id, count,
>   				  rq->global_seqno,
>   				  rq->fence.context, rq->fence.seqno,
>   				  hwsp_seqno(rq),
> -				  intel_engine_get_seqno(engine),
>   				  rq_prio(rq));
>   		} else {
>   			GEM_BUG_ON(!n);
> @@ -876,13 +875,12 @@ execlists_cancel_port_requests(struct intel_engine_execlists * const execlists)
>   	while (num_ports-- && port_isset(port)) {
>   		struct i915_request *rq = port_request(port);
>   
> -		GEM_TRACE("%s:port%u global=%d (fence %llx:%lld), (current %d:%d)\n",
> +		GEM_TRACE("%s:port%u global=%d (fence %llx:%lld), (current %d)\n",
>   			  rq->engine->name,
>   			  (unsigned int)(port - execlists->port),
>   			  rq->global_seqno,
>   			  rq->fence.context, rq->fence.seqno,
> -			  hwsp_seqno(rq),
> -			  intel_engine_get_seqno(rq->engine));
> +			  hwsp_seqno(rq));
>   
>   		GEM_BUG_ON(!execlists->active);
>   		execlists_context_schedule_out(rq,
> @@ -938,8 +936,7 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine)
>   	struct rb_node *rb;
>   	unsigned long flags;
>   
> -	GEM_TRACE("%s current %d\n",
> -		  engine->name, intel_engine_get_seqno(engine));
> +	GEM_TRACE("%s\n", engine->name);
>   
>   	/*
>   	 * Before we call engine->cancel_requests(), we should have exclusive
> @@ -987,10 +984,6 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine)
>   		i915_priolist_free(p);
>   	}
>   
> -	intel_write_status_page(engine,
> -				I915_GEM_HWS_INDEX,
> -				intel_engine_last_submit(engine));
> -
>   	/* Remaining _unready_ requests will be nop'ed when submitted */
>   
>   	execlists->queue_priority_hint = INT_MIN;
> @@ -1106,14 +1099,13 @@ static void process_csb(struct intel_engine_cs *engine)
>   						EXECLISTS_ACTIVE_USER));
>   
>   		rq = port_unpack(port, &count);
> -		GEM_TRACE("%s out[0]: ctx=%d.%d, global=%d (fence %llx:%lld) (current %d:%d), prio=%d\n",
> +		GEM_TRACE("%s out[0]: ctx=%d.%d, global=%d (fence %llx:%lld) (current %d), prio=%d\n",
>   			  engine->name,
>   			  port->context_id, count,
>   			  rq ? rq->global_seqno : 0,
>   			  rq ? rq->fence.context : 0,
>   			  rq ? rq->fence.seqno : 0,
>   			  rq ? hwsp_seqno(rq) : 0,
> -			  intel_engine_get_seqno(engine),
>   			  rq ? rq_prio(rq) : 0);
>   
>   		/* Check the context/desc id for this event matches */
> @@ -1975,10 +1967,9 @@ static void execlists_reset(struct intel_engine_cs *engine, bool stalled)
>   	/* Following the reset, we need to reload the CSB read/write pointers */
>   	reset_csb_pointers(&engine->execlists);
>   
> -	GEM_TRACE("%s seqno=%d, current=%d, stalled? %s\n",
> +	GEM_TRACE("%s seqno=%d, stalled? %s\n",
>   		  engine->name,
>   		  rq ? rq->global_seqno : 0,
> -		  intel_engine_get_seqno(engine),
>   		  yesno(stalled));
>   	if (!rq)
>   		goto out_unlock;
> diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
> index 870184bbd169..2d59e2990448 100644
> --- a/drivers/gpu/drm/i915/intel_ringbuffer.c
> +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
> @@ -782,10 +782,9 @@ static void reset_ring(struct intel_engine_cs *engine, bool stalled)
>   		}
>   	}
>   
> -	GEM_TRACE("%s seqno=%d, current=%d, stalled? %s\n",
> +	GEM_TRACE("%s seqno=%d, stalled? %s\n",
>   		  engine->name,
>   		  rq ? rq->global_seqno : 0,
> -		  intel_engine_get_seqno(engine),
>   		  yesno(stalled));
>   	/*
>   	 * The guilty request will get skipped on a hung engine.
> @@ -924,10 +923,6 @@ static void cancel_requests(struct intel_engine_cs *engine)
>   		i915_request_mark_complete(request);
>   	}
>   
> -	intel_write_status_page(engine,
> -				I915_GEM_HWS_INDEX,
> -				intel_engine_last_submit(engine));
> -
>   	/* Remaining _unready_ requests will be nop'ed when submitted */
>   
>   	spin_unlock_irqrestore(&engine->timeline.lock, flags);
> diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
> index b30c37ac55a3..26bae7772208 100644
> --- a/drivers/gpu/drm/i915/intel_ringbuffer.h
> +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
> @@ -840,8 +840,6 @@ __intel_ring_space(unsigned int head, unsigned int tail, unsigned int size)
>   	return (head - tail - CACHELINE_BYTES) & (size - 1);
>   }
>   
> -void intel_engine_write_global_seqno(struct intel_engine_cs *engine, u32 seqno);
> -
>   int intel_engine_setup_common(struct intel_engine_cs *engine);
>   int intel_engine_init_common(struct intel_engine_cs *engine);
>   void intel_engine_cleanup_common(struct intel_engine_cs *engine);
> @@ -859,44 +857,6 @@ void intel_engine_set_hwsp_writemask(struct intel_engine_cs *engine, u32 mask);
>   u64 intel_engine_get_active_head(const struct intel_engine_cs *engine);
>   u64 intel_engine_get_last_batch_head(const struct intel_engine_cs *engine);
>   
> -static inline u32 intel_engine_last_submit(struct intel_engine_cs *engine)
> -{
> -	/*
> -	 * We are only peeking at the tail of the submit queue (and not the
> -	 * queue itself) in order to gain a hint as to the current active
> -	 * state of the engine. Callers are not expected to be taking
> -	 * engine->timeline->lock, nor are they expected to be concerned
> -	 * wtih serialising this hint with anything, so document it as
> -	 * a hint and nothing more.
> -	 */
> -	return READ_ONCE(engine->timeline.seqno);
> -}
> -
> -static inline u32 intel_engine_get_seqno(struct intel_engine_cs *engine)
> -{
> -	return intel_read_status_page(engine, I915_GEM_HWS_INDEX);
> -}
> -
> -static inline bool intel_engine_signaled(struct intel_engine_cs *engine,
> -					 u32 seqno)
> -{
> -	return i915_seqno_passed(intel_engine_get_seqno(engine), seqno);
> -}
> -
> -static inline bool intel_engine_has_completed(struct intel_engine_cs *engine,
> -					      u32 seqno)
> -{
> -	GEM_BUG_ON(!seqno);
> -	return intel_engine_signaled(engine, seqno);
> -}
> -
> -static inline bool intel_engine_has_started(struct intel_engine_cs *engine,
> -					    u32 seqno)
> -{
> -	GEM_BUG_ON(!seqno);
> -	return intel_engine_signaled(engine, seqno - 1);
> -}
> -
>   void intel_engine_get_instdone(struct intel_engine_cs *engine,
>   			       struct intel_instdone *instdone);
>   
> diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c
> index 6733dc5b6b4c..074d393f4a02 100644
> --- a/drivers/gpu/drm/i915/selftests/i915_request.c
> +++ b/drivers/gpu/drm/i915/selftests/i915_request.c
> @@ -226,8 +226,7 @@ static int igt_request_rewind(void *arg)
>   	mutex_unlock(&i915->drm.struct_mutex);
>   
>   	if (i915_request_wait(vip, 0, HZ) == -ETIME) {
> -		pr_err("timed out waiting for high priority request, vip.seqno=%d, current seqno=%d\n",
> -		       vip->global_seqno, intel_engine_get_seqno(i915->engine[RCS]));
> +		pr_err("timed out waiting for high priority request\n");
>   		goto err;
>   	}
>   
> diff --git a/drivers/gpu/drm/i915/selftests/mock_engine.c b/drivers/gpu/drm/i915/selftests/mock_engine.c
> index 0d35af07867b..f055da01ced9 100644
> --- a/drivers/gpu/drm/i915/selftests/mock_engine.c
> +++ b/drivers/gpu/drm/i915/selftests/mock_engine.c
> @@ -86,7 +86,6 @@ static struct i915_request *first_request(struct mock_engine *engine)
>   static void advance(struct i915_request *request)
>   {
>   	list_del_init(&request->mock.link);
> -	intel_engine_write_global_seqno(request->engine, request->global_seqno);
>   	i915_request_mark_complete(request);
>   	GEM_BUG_ON(!i915_request_completed(request));
>   
> @@ -276,7 +275,6 @@ void mock_engine_flush(struct intel_engine_cs *engine)
>   
>   void mock_engine_reset(struct intel_engine_cs *engine)
>   {
> -	intel_engine_write_global_seqno(engine, 0);
>   }
>   
>   void mock_engine_free(struct intel_engine_cs *engine)
> 

Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>

Regards,

Tvrtko
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

  reply	other threads:[~2019-02-11 18:22 UTC|newest]

Thread overview: 97+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-02-06 13:03 The road to load balancing Chris Wilson
2019-02-06 13:03 ` [PATCH 01/46] drm/i915: Hack and slash, throttle execbuffer hogs Chris Wilson
2019-02-06 13:03 ` [PATCH 02/46] drm/i915: Revoke mmaps and prevent access to fence registers across reset Chris Wilson
2019-02-06 15:56   ` Mika Kuoppala
2019-02-06 16:08     ` Chris Wilson
2019-02-06 16:18       ` Chris Wilson
2019-02-26 19:53   ` Rodrigo Vivi
2019-02-26 20:27     ` Chris Wilson
2019-02-06 13:03 ` [PATCH 03/46] drm/i915: Force the GPU reset upon wedging Chris Wilson
2019-02-06 13:03 ` [PATCH 04/46] drm/i915: Uninterruptibly drain the timelines on unwedging Chris Wilson
2019-02-06 13:03 ` [PATCH 05/46] drm/i915: Wait for old resets before applying debugfs/i915_wedged Chris Wilson
2019-02-06 13:03 ` [PATCH 06/46] drm/i915: Serialise resets with wedging Chris Wilson
2019-02-06 13:03 ` [PATCH 07/46] drm/i915: Don't claim an unstarted request was guilty Chris Wilson
2019-02-06 13:03 ` [PATCH 08/46] drm/i915/execlists: Suppress mere WAIT preemption Chris Wilson
2019-02-11 11:19   ` Tvrtko Ursulin
2019-02-19 10:22   ` Matthew Auld
2019-02-19 10:34     ` Chris Wilson
2019-02-06 13:03 ` [PATCH 09/46] drm/i915/execlists: Suppress redundant preemption Chris Wilson
2019-02-06 13:03 ` [PATCH 10/46] drm/i915: Make request allocation caches global Chris Wilson
2019-02-11 11:43   ` Tvrtko Ursulin
2019-02-11 12:40     ` Chris Wilson
2019-02-11 17:02       ` Tvrtko Ursulin
2019-02-12 11:51         ` Chris Wilson
2019-02-06 13:03 ` [PATCH 11/46] drm/i915: Keep timeline HWSP allocated until idle across the system Chris Wilson
2019-02-06 13:03 ` [PATCH 12/46] drm/i915/execlists: Refactor out can_merge_rq() Chris Wilson
2019-02-06 13:03 ` [PATCH 13/46] drm/i915: Compute the global scheduler caps Chris Wilson
2019-02-11 12:24   ` Tvrtko Ursulin
2019-02-11 12:33     ` Chris Wilson
2019-02-06 13:03 ` [PATCH 14/46] drm/i915: Use HW semaphores for inter-engine synchronisation on gen8+ Chris Wilson
2019-02-06 13:03 ` [PATCH 15/46] drm/i915: Prioritise non-busywait semaphore workloads Chris Wilson
2019-02-06 13:03 ` [PATCH 16/46] drm/i915: Show support for accurate sw PMU busyness tracking Chris Wilson
2019-02-06 13:03 ` [PATCH 17/46] drm/i915: Apply rps waitboosting for dma_fence_wait_timeout() Chris Wilson
2019-02-11 18:06   ` Tvrtko Ursulin
2019-02-06 13:03 ` [PATCH 18/46] drm/i915: Replace global_seqno with a hangcheck heartbeat seqno Chris Wilson
2019-02-11 12:40   ` Tvrtko Ursulin
2019-02-11 12:44     ` Chris Wilson
2019-02-11 16:56       ` Tvrtko Ursulin
2019-02-12 13:36         ` Chris Wilson
2019-02-06 13:03 ` [PATCH 19/46] drm/i915/pmu: Always sample an active ringbuffer Chris Wilson
2019-02-11 18:18   ` Tvrtko Ursulin
2019-02-12 13:40     ` Chris Wilson
2019-02-06 13:03 ` [PATCH 20/46] drm/i915: Remove access to global seqno in the HWSP Chris Wilson
2019-02-11 18:22   ` Tvrtko Ursulin [this message]
2019-02-06 13:03 ` [PATCH 21/46] drm/i915: Remove i915_request.global_seqno Chris Wilson
2019-02-11 18:44   ` Tvrtko Ursulin
2019-02-12 13:45     ` Chris Wilson
2019-02-06 13:03 ` [PATCH 22/46] drm/i915: Force GPU idle on suspend Chris Wilson
2019-02-06 13:03 ` [PATCH 23/46] drm/i915/selftests: Improve switch-to-kernel-context checking Chris Wilson
2019-02-06 13:03 ` [PATCH 24/46] drm/i915: Do a synchronous switch-to-kernel-context on idling Chris Wilson
2019-02-21 19:48   ` Daniele Ceraolo Spurio
2019-02-21 21:17     ` Chris Wilson
2019-02-21 21:31       ` Daniele Ceraolo Spurio
2019-02-21 21:42         ` Chris Wilson
2019-02-21 22:53           ` Daniele Ceraolo Spurio
2019-02-21 23:25             ` Chris Wilson
2019-02-22  0:29               ` Daniele Ceraolo Spurio
2019-02-06 13:03 ` [PATCH 25/46] drm/i915: Store the BIT(engine->id) as the engine's mask Chris Wilson
2019-02-11 18:51   ` Tvrtko Ursulin
2019-02-12 13:51     ` Chris Wilson
2019-02-06 13:03 ` [PATCH 26/46] drm/i915: Refactor common code to load initial power context Chris Wilson
2019-02-06 13:03 ` [PATCH 27/46] drm/i915: Reduce presumption of request ordering for barriers Chris Wilson
2019-02-06 13:03 ` [PATCH 28/46] drm/i915: Remove has-kernel-context Chris Wilson
2019-02-06 13:03 ` [PATCH 29/46] drm/i915: Introduce the i915_user_extension_method Chris Wilson
2019-02-11 19:00   ` Tvrtko Ursulin
2019-02-12 13:56     ` Chris Wilson
2019-02-06 13:03 ` [PATCH 30/46] drm/i915: Track active engines within a context Chris Wilson
2019-02-11 19:11   ` Tvrtko Ursulin
2019-02-12 13:59     ` Chris Wilson
2019-02-06 13:03 ` [PATCH 31/46] drm/i915: Introduce a context barrier callback Chris Wilson
2019-02-06 13:03 ` [PATCH 32/46] drm/i915: Create/destroy VM (ppGTT) for use with contexts Chris Wilson
2019-02-12 11:18   ` Tvrtko Ursulin
2019-02-12 14:11     ` Chris Wilson
2019-02-06 13:03 ` [PATCH 33/46] drm/i915: Extend CONTEXT_CREATE to set parameters upon construction Chris Wilson
2019-02-12 13:43   ` Tvrtko Ursulin
2019-02-06 13:03 ` [PATCH 34/46] drm/i915: Allow contexts to share a single timeline across all engines Chris Wilson
2019-02-06 13:03 ` [PATCH 35/46] drm/i915: Fix I915_EXEC_RING_MASK Chris Wilson
2019-02-06 13:03 ` [PATCH 36/46] drm/i915: Remove last traces of exec-id (GEM_BUSY) Chris Wilson
2019-02-06 13:03 ` [PATCH 37/46] drm/i915: Re-arrange execbuf so context is known before engine Chris Wilson
2019-02-06 13:03 ` [PATCH 38/46] drm/i915: Allow a context to define its set of engines Chris Wilson
2019-02-25 10:41   ` Tvrtko Ursulin
2019-02-25 10:47     ` Chris Wilson
2019-02-06 13:03 ` [PATCH 39/46] drm/i915: Extend I915_CONTEXT_PARAM_SSEU to support local ctx->engine[] Chris Wilson
2019-02-06 13:03 ` [PATCH 40/46] drm/i915: Pass around the intel_context Chris Wilson
2019-02-06 13:03 ` [PATCH 41/46] drm/i915: Split struct intel_context definition to its own header Chris Wilson
2019-02-06 13:03 ` [PATCH 42/46] drm/i915: Move over to intel_context_lookup() Chris Wilson
2019-02-06 14:27   ` [PATCH] " Chris Wilson
2019-02-06 13:03 ` [PATCH 43/46] drm/i915: Load balancing across a virtual engine Chris Wilson
2019-02-06 13:03 ` [PATCH 44/46] drm/i915: Extend execution fence to support a callback Chris Wilson
2019-02-06 13:03 ` [PATCH 45/46] drm/i915/execlists: Virtual engine bonding Chris Wilson
2019-02-06 13:03 ` [PATCH 46/46] drm/i915: Allow specification of parallel execbuf Chris Wilson
2019-02-06 13:52 ` ✗ Fi.CI.CHECKPATCH: warning for series starting with [01/46] drm/i915: Hack and slash, throttle execbuffer hogs Patchwork
2019-02-06 14:09 ` ✗ Fi.CI.BAT: failure " Patchwork
2019-02-06 14:11 ` ✗ Fi.CI.SPARSE: warning " Patchwork
2019-02-06 14:37 ` ✗ Fi.CI.CHECKPATCH: warning for series starting with [01/46] drm/i915: Hack and slash, throttle execbuffer hogs (rev2) Patchwork
2019-02-06 14:55 ` ✗ Fi.CI.SPARSE: " Patchwork
2019-02-06 14:56 ` ✓ Fi.CI.BAT: success " Patchwork
2019-02-06 16:18 ` ✗ Fi.CI.IGT: failure " Patchwork

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=8553d4a5-df80-5c91-0aa6-d4c817af9a5a@linux.intel.com \
    --to=tvrtko.ursulin@linux.intel.com \
    --cc=chris@chris-wilson.co.uk \
    --cc=intel-gfx@lists.freedesktop.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox