intel-gfx.lists.freedesktop.org archive mirror
 help / color / mirror / Atom feed
From: Chris Wilson <chris@chris-wilson.co.uk>
To: intel-gfx@lists.freedesktop.org
Subject: [PATCH v2] drm/i915: Track vma activity per fence.context, not per engine
Date: Fri, 29 Jun 2018 23:03:36 +0100	[thread overview]
Message-ID: <20180629220336.14040-1-chris@chris-wilson.co.uk> (raw)
In-Reply-To: <20180629075348.27358-17-chris@chris-wilson.co.uk>

In the next patch, we will want to be able to use more flexible request
timelines that can hop between engines. From the vma pov, we can then
not rely on the binding of this request to an engine and so can not
ensure that different requests are ordered through a per-engine
timeline, and so we must track activity of all timelines. (We track
activity on the vma itself to prevent unbinding from HW before the HW
has finished accessing it.)

v2: Switch to a rbtree for 32b safety (since using u64 as a radixtree
index is fraught with aliasing of unsigned longs).

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/i915_gem_gtt.c   |   3 -
 drivers/gpu/drm/i915/i915_gpu_error.c |  14 +---
 drivers/gpu/drm/i915/i915_gpu_error.h |   2 +-
 drivers/gpu/drm/i915/i915_request.h   |   1 +
 drivers/gpu/drm/i915/i915_vma.c       | 112 +++++++++++++++++++-------
 drivers/gpu/drm/i915/i915_vma.h       |  46 +++--------
 6 files changed, 98 insertions(+), 80 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index f46d873a7530..52e7bed477c2 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -2006,7 +2006,6 @@ static struct i915_vma *pd_vma_create(struct gen6_hw_ppgtt *ppgtt, int size)
 	struct drm_i915_private *i915 = ppgtt->base.vm.i915;
 	struct i915_ggtt *ggtt = &i915->ggtt;
 	struct i915_vma *vma;
-	int i;
 
 	GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
 	GEM_BUG_ON(size > ggtt->vm.total);
@@ -2015,8 +2014,6 @@ static struct i915_vma *pd_vma_create(struct gen6_hw_ppgtt *ppgtt, int size)
 	if (!vma)
 		return ERR_PTR(-ENOMEM);
 
-	for (i = 0; i < ARRAY_SIZE(vma->last_read); i++)
-		init_request_active(&vma->last_read[i], NULL);
 	init_request_active(&vma->last_fence, NULL);
 
 	vma->vm = &ggtt->vm;
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index df524c9cad40..8c81cf3aa182 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -335,21 +335,16 @@ static void print_error_buffers(struct drm_i915_error_state_buf *m,
 				struct drm_i915_error_buffer *err,
 				int count)
 {
-	int i;
-
 	err_printf(m, "%s [%d]:\n", name, count);
 
 	while (count--) {
-		err_printf(m, "    %08x_%08x %8u %02x %02x [ ",
+		err_printf(m, "    %08x_%08x %8u %02x %02x %02x",
 			   upper_32_bits(err->gtt_offset),
 			   lower_32_bits(err->gtt_offset),
 			   err->size,
 			   err->read_domains,
-			   err->write_domain);
-		for (i = 0; i < I915_NUM_ENGINES; i++)
-			err_printf(m, "%02x ", err->rseqno[i]);
-
-		err_printf(m, "] %02x", err->wseqno);
+			   err->write_domain,
+			   err->wseqno);
 		err_puts(m, tiling_flag(err->tiling));
 		err_puts(m, dirty_flag(err->dirty));
 		err_puts(m, purgeable_flag(err->purgeable));
@@ -1021,13 +1016,10 @@ static void capture_bo(struct drm_i915_error_buffer *err,
 		       struct i915_vma *vma)
 {
 	struct drm_i915_gem_object *obj = vma->obj;
-	int i;
 
 	err->size = obj->base.size;
 	err->name = obj->base.name;
 
-	for (i = 0; i < I915_NUM_ENGINES; i++)
-		err->rseqno[i] = __active_get_seqno(&vma->last_read[i]);
 	err->wseqno = __active_get_seqno(&obj->frontbuffer_write);
 	err->engine = __active_get_engine_id(&obj->frontbuffer_write);
 
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.h b/drivers/gpu/drm/i915/i915_gpu_error.h
index 58910f1dc67c..f893a4e8b783 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.h
+++ b/drivers/gpu/drm/i915/i915_gpu_error.h
@@ -177,7 +177,7 @@ struct i915_gpu_state {
 	struct drm_i915_error_buffer {
 		u32 size;
 		u32 name;
-		u32 rseqno[I915_NUM_ENGINES], wseqno;
+		u32 wseqno;
 		u64 gtt_offset;
 		u32 read_domains;
 		u32 write_domain;
diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h
index a355a081485f..e1c9365dfefb 100644
--- a/drivers/gpu/drm/i915/i915_request.h
+++ b/drivers/gpu/drm/i915/i915_request.h
@@ -380,6 +380,7 @@ static inline void
 init_request_active(struct i915_gem_active *active,
 		    i915_gem_retire_fn retire)
 {
+	RCU_INIT_POINTER(active->request, NULL);
 	INIT_LIST_HEAD(&active->link);
 	active->retire = retire ?: i915_gem_retire_noop;
 }
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index 7635c27e7e8b..2faad2a1d00e 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -63,18 +63,20 @@ static void vma_print_allocator(struct i915_vma *vma, const char *reason)
 
 #endif
 
+struct i915_vma_active {
+	struct i915_gem_active base;
+	struct i915_vma *vma;
+	struct rb_node node;
+	u64 timeline;
+};
+
 static void
-i915_vma_retire(struct i915_gem_active *active, struct i915_request *rq)
+__i915_vma_retire(struct i915_vma *vma, struct i915_request *rq)
 {
-	const unsigned int idx = rq->engine->id;
-	struct i915_vma *vma =
-		container_of(active, struct i915_vma, last_read[idx]);
 	struct drm_i915_gem_object *obj = vma->obj;
 
-	GEM_BUG_ON(!i915_vma_has_active_engine(vma, idx));
-
-	i915_vma_clear_active(vma, idx);
-	if (i915_vma_is_active(vma))
+	GEM_BUG_ON(!i915_vma_is_active(vma));
+	if (--vma->active_count)
 		return;
 
 	GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
@@ -108,6 +110,15 @@ i915_vma_retire(struct i915_gem_active *active, struct i915_request *rq)
 	}
 }
 
+static void
+i915_vma_retire(struct i915_gem_active *base, struct i915_request *rq)
+{
+	struct i915_vma_active *active =
+		container_of(base, typeof(*active), base);
+
+	__i915_vma_retire(active->vma, rq);
+}
+
 static struct i915_vma *
 vma_create(struct drm_i915_gem_object *obj,
 	   struct i915_address_space *vm,
@@ -115,7 +126,6 @@ vma_create(struct drm_i915_gem_object *obj,
 {
 	struct i915_vma *vma;
 	struct rb_node *rb, **p;
-	int i;
 
 	/* The aliasing_ppgtt should never be used directly! */
 	GEM_BUG_ON(vm == &vm->i915->mm.aliasing_ppgtt->vm);
@@ -124,8 +134,8 @@ vma_create(struct drm_i915_gem_object *obj,
 	if (vma == NULL)
 		return ERR_PTR(-ENOMEM);
 
-	for (i = 0; i < ARRAY_SIZE(vma->last_read); i++)
-		init_request_active(&vma->last_read[i], i915_vma_retire);
+	vma->active = RB_ROOT;
+
 	init_request_active(&vma->last_fence, NULL);
 	vma->vm = vm;
 	vma->ops = &vm->vma_ops;
@@ -778,13 +788,11 @@ void i915_vma_reopen(struct i915_vma *vma)
 static void __i915_vma_destroy(struct i915_vma *vma)
 {
 	struct drm_i915_private *i915 = vma->vm->i915;
-	int i;
+	struct i915_vma_active *iter, *n;
 
 	GEM_BUG_ON(vma->node.allocated);
 	GEM_BUG_ON(vma->fence);
 
-	for (i = 0; i < ARRAY_SIZE(vma->last_read); i++)
-		GEM_BUG_ON(i915_gem_active_isset(&vma->last_read[i]));
 	GEM_BUG_ON(i915_gem_active_isset(&vma->last_fence));
 
 	list_del(&vma->obj_link);
@@ -795,6 +803,11 @@ static void __i915_vma_destroy(struct i915_vma *vma)
 	if (!i915_vma_is_ggtt(vma))
 		i915_ppgtt_put(i915_vm_to_ppgtt(vma->vm));
 
+	rbtree_postorder_for_each_entry_safe(iter, n, &vma->active, node) {
+		GEM_BUG_ON(i915_gem_active_isset(&iter->base));
+		kfree(iter);
+	}
+
 	kmem_cache_free(i915->vmas, vma);
 }
 
@@ -878,16 +891,54 @@ static void export_fence(struct i915_vma *vma,
 	reservation_object_unlock(resv);
 }
 
+static struct i915_gem_active *lookup_active(struct i915_vma *vma, u64 idx)
+{
+	struct i915_vma_active *active;
+	struct rb_node **p, *parent;
+
+	parent = NULL;
+	p = &vma->active.rb_node;
+	while (*p) {
+		parent = *p;
+
+		active = rb_entry(parent, struct i915_vma_active, node);
+		if (active->timeline == idx)
+			return &active->base;
+
+		if (active->timeline < idx)
+			p = &parent->rb_right;
+		else
+			p = &parent->rb_left;
+	}
+
+	active = kmalloc(sizeof(*active), GFP_KERNEL);
+	if (unlikely(!active))
+		return ERR_PTR(-ENOMEM);
+
+	init_request_active(&active->base, i915_vma_retire);
+	active->vma = vma;
+	active->timeline = idx;
+
+	rb_link_node(&active->node, parent, p);
+	rb_insert_color(&active->node, &vma->active);
+
+	return &active->base;
+}
+
 int i915_vma_move_to_active(struct i915_vma *vma,
 			    struct i915_request *rq,
 			    unsigned int flags)
 {
 	struct drm_i915_gem_object *obj = vma->obj;
-	const unsigned int idx = rq->engine->id;
+	struct i915_gem_active *active;
 
 	lockdep_assert_held(&rq->i915->drm.struct_mutex);
 	GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
 
+	active = lookup_active(vma, rq->fence.context);
+	if (IS_ERR(active))
+		return PTR_ERR(active);
+
 	/*
 	 * Add a reference if we're newly entering the active list.
 	 * The order in which we add operations to the retirement queue is
@@ -896,11 +947,13 @@ int i915_vma_move_to_active(struct i915_vma *vma,
 	 * add the active reference first and queue for it to be dropped
 	 * *last*.
 	 */
-	if (!i915_vma_is_active(vma))
+	if (!i915_gem_active_isset(active) && !vma->active_count++) {
+		list_move_tail(&vma->vm_link, &vma->vm->active_list);
 		obj->active_count++;
-	i915_vma_set_active(vma, idx);
-	i915_gem_active_set(&vma->last_read[idx], rq);
-	list_move_tail(&vma->vm_link, &vma->vm->active_list);
+	}
+	i915_gem_active_set(active, rq);
+	GEM_BUG_ON(!i915_vma_is_active(vma));
+	GEM_BUG_ON(!obj->active_count);
 
 	obj->write_domain = 0;
 	if (flags & EXEC_OBJECT_WRITE) {
@@ -922,7 +975,6 @@ int i915_vma_move_to_active(struct i915_vma *vma,
 
 int i915_vma_unbind(struct i915_vma *vma)
 {
-	unsigned long active;
 	int ret;
 
 	lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
@@ -932,9 +984,8 @@ int i915_vma_unbind(struct i915_vma *vma)
 	 * have side-effects such as unpinning or even unbinding this vma.
 	 */
 	might_sleep();
-	active = i915_vma_get_active(vma);
-	if (active) {
-		int idx;
+	if (i915_vma_is_active(vma)) {
+		struct i915_vma_active *active, *n;
 
 		/*
 		 * When a closed VMA is retired, it is unbound - eek.
@@ -951,18 +1002,17 @@ int i915_vma_unbind(struct i915_vma *vma)
 		 */
 		__i915_vma_pin(vma);
 
-		for_each_active(active, idx) {
-			ret = i915_gem_active_retire(&vma->last_read[idx],
+		rbtree_postorder_for_each_entry_safe(active, n,
+						     &vma->active, node) {
+			ret = i915_gem_active_retire(&active->base,
 						     &vma->vm->i915->drm.struct_mutex);
 			if (ret)
-				break;
-		}
-
-		if (!ret) {
-			ret = i915_gem_active_retire(&vma->last_fence,
-						     &vma->vm->i915->drm.struct_mutex);
+				goto unpin;
 		}
 
+		ret = i915_gem_active_retire(&vma->last_fence,
+					     &vma->vm->i915->drm.struct_mutex);
+unpin:
 		__i915_vma_unpin(vma);
 		if (ret)
 			return ret;
diff --git a/drivers/gpu/drm/i915/i915_vma.h b/drivers/gpu/drm/i915/i915_vma.h
index a218b689e418..c297b0a0dc47 100644
--- a/drivers/gpu/drm/i915/i915_vma.h
+++ b/drivers/gpu/drm/i915/i915_vma.h
@@ -26,6 +26,7 @@
 #define __I915_VMA_H__
 
 #include <linux/io-mapping.h>
+#include <linux/rbtree.h>
 
 #include <drm/drm_mm.h>
 
@@ -94,8 +95,8 @@ struct i915_vma {
 #define I915_VMA_USERFAULT	BIT(I915_VMA_USERFAULT_BIT)
 #define I915_VMA_GGTT_WRITE	BIT(12)
 
-	unsigned int active;
-	struct i915_gem_active last_read[I915_NUM_ENGINES];
+	unsigned int active_count;
+	struct rb_root active;
 	struct i915_gem_active last_fence;
 
 	/**
@@ -138,6 +139,15 @@ i915_vma_instance(struct drm_i915_gem_object *obj,
 
 void i915_vma_unpin_and_release(struct i915_vma **p_vma);
 
+static inline bool i915_vma_is_active(struct i915_vma *vma)
+{
+	return vma->active_count;
+}
+
+int __must_check i915_vma_move_to_active(struct i915_vma *vma,
+					 struct i915_request *rq,
+					 unsigned int flags);
+
 static inline bool i915_vma_is_ggtt(const struct i915_vma *vma)
 {
 	return vma->flags & I915_VMA_GGTT;
@@ -187,38 +197,6 @@ static inline bool i915_vma_has_userfault(const struct i915_vma *vma)
 	return test_bit(I915_VMA_USERFAULT_BIT, &vma->flags);
 }
 
-static inline unsigned int i915_vma_get_active(const struct i915_vma *vma)
-{
-	return vma->active;
-}
-
-static inline bool i915_vma_is_active(const struct i915_vma *vma)
-{
-	return i915_vma_get_active(vma);
-}
-
-static inline void i915_vma_set_active(struct i915_vma *vma,
-				       unsigned int engine)
-{
-	vma->active |= BIT(engine);
-}
-
-static inline void i915_vma_clear_active(struct i915_vma *vma,
-					 unsigned int engine)
-{
-	vma->active &= ~BIT(engine);
-}
-
-static inline bool i915_vma_has_active_engine(const struct i915_vma *vma,
-					      unsigned int engine)
-{
-	return vma->active & BIT(engine);
-}
-
-int __must_check i915_vma_move_to_active(struct i915_vma *vma,
-					 struct i915_request *rq,
-					 unsigned int flags);
-
 static inline u32 i915_ggtt_offset(const struct i915_vma *vma)
 {
 	GEM_BUG_ON(!i915_vma_is_ggtt(vma));
-- 
2.18.0

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

  parent reply	other threads:[~2018-06-29 22:03 UTC|newest]

Thread overview: 69+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2018-06-29  7:53 [PATCH 01/37] drm/i915/gtt: Add read only pages to gen8_pte_encode Chris Wilson
2018-06-29  7:53 ` [PATCH 02/37] drm/i915/gtt: Read-only pages for insert_entries on bdw+ Chris Wilson
2018-06-29  7:53 ` [PATCH 03/37] drm/i915: Prevent writing into a read-only object via a GGTT mmap Chris Wilson
2018-06-29  7:53 ` [PATCH 04/37] drm/i915: Reject attempted pwrites into a read-only object Chris Wilson
2018-06-29  7:53 ` [PATCH 05/37] drm/i915/userptr: Enable read-only support on gen8+ Chris Wilson
2018-06-29  7:53 ` [PATCH 06/37] drm/i915: Move rate-limiting request retire to after submission Chris Wilson
2018-06-29 10:00   ` Tvrtko Ursulin
2018-06-29 10:10     ` Chris Wilson
2018-06-29  7:53 ` [PATCH 07/37] drm/i915: Move engine request retirement to intel_engine_cs Chris Wilson
2018-06-29  7:53 ` [PATCH 08/37] drm/i915: Hold request reference for submission until retirement Chris Wilson
2018-06-29  7:53 ` [PATCH 09/37] drm/i915/execlists: Switch to rb_root_cached Chris Wilson
2018-07-11 13:20   ` Tvrtko Ursulin
2018-06-29  7:53 ` [PATCH 10/37] drm/i915: Reserve some priority bits for internal use Chris Wilson
2018-06-29  7:53 ` [PATCH 11/37] drm/i915: Combine multiple internal plists into the same i915_priolist bucket Chris Wilson
2018-06-29  7:53 ` [PATCH 12/37] drm/i915: Priority boost for new clients Chris Wilson
2018-06-29 10:04   ` Tvrtko Ursulin
2018-06-29 10:09     ` Chris Wilson
2018-06-29 10:36       ` Tvrtko Ursulin
2018-06-29 10:41         ` Chris Wilson
2018-06-29 10:51         ` Chris Wilson
2018-06-29 11:10           ` Tvrtko Ursulin
2018-07-02 10:19             ` Tvrtko Ursulin
2018-06-29  7:53 ` [PATCH 13/37] drm/i915: Priority boost switching to an idle ring Chris Wilson
2018-06-29 10:08   ` Tvrtko Ursulin
2018-06-29 10:15     ` Chris Wilson
2018-06-29 10:41       ` Tvrtko Ursulin
2018-06-29  7:53 ` [PATCH 14/37] drm/i915: Refactor export_fence() after i915_vma_move_to_active() Chris Wilson
2018-06-29 12:00   ` Tvrtko Ursulin
2018-06-29  7:53 ` [PATCH 15/37] drm/i915: Export i915_request_skip() Chris Wilson
2018-06-29 12:10   ` Tvrtko Ursulin
2018-06-29 12:15     ` Chris Wilson
2018-06-29  7:53 ` [PATCH 16/37] drm/i915: Start returning an error from i915_vma_move_to_active() Chris Wilson
2018-06-29 12:17   ` Tvrtko Ursulin
2018-06-29  7:53 ` [PATCH 17/37] drm/i915: Track vma activity per fence.context, not per engine Chris Wilson
2018-06-29 14:54   ` Tvrtko Ursulin
2018-06-29 15:03     ` Chris Wilson
2018-06-29 15:34       ` Chris Wilson
2018-06-29 15:08     ` Tvrtko Ursulin
2018-06-29 15:36       ` Chris Wilson
2018-06-29 15:39         ` Chris Wilson
2018-07-02  9:38           ` Tvrtko Ursulin
2018-06-29 22:03   ` Chris Wilson [this message]
2018-06-29  7:53 ` [PATCH 18/37] drm/i915: Track the last-active inside the i915_vma Chris Wilson
2018-06-29 22:01   ` [PATCH v2] " Chris Wilson
2018-06-29  7:53 ` [PATCH 19/37] drm/i915: Stop tracking MRU activity on VMA Chris Wilson
2018-06-29  7:53 ` [PATCH 20/37] drm/i915: Introduce the i915_user_extension_method Chris Wilson
2018-06-29  7:53 ` [PATCH 21/37] drm/i915: Extend CREATE_CONTEXT to allow inheritance ala clone() Chris Wilson
2018-06-29  7:53 ` [PATCH 22/37] drm/i915: Allow contexts to share a single timeline across all engines Chris Wilson
2018-06-29  7:53 ` [PATCH 23/37] drm/i915: Fix I915_EXEC_RING_MASK Chris Wilson
2018-06-29  7:53 ` [PATCH 24/37] drm/i915: Re-arrange execbuf so context is known before engine Chris Wilson
2018-06-29  7:53 ` [PATCH 25/37] drm/i915: Allow a context to define its set of engines Chris Wilson
2018-06-29  7:53 ` [PATCH 26/37] drm/i915/execlists: Flush the tasklet before unpinning Chris Wilson
2018-06-29  7:53 ` [PATCH 27/37] drm/i915/execlists: Refactor out can_merge_rq() Chris Wilson
2018-06-29  7:53 ` [PATCH 28/37] drm/i915: Replace nested subclassing with explicit subclasses Chris Wilson
2018-06-29  7:53 ` [PATCH 29/37] RFC drm/i915: Load balancing across a virtual engine Chris Wilson
2018-06-29  7:53 ` [PATCH 30/37] drm/i915: Introduce i915_address_space.mutex Chris Wilson
2018-06-29  7:53 ` [PATCH 31/37] drm/i915: Move fence register tracking to GGTT Chris Wilson
2018-06-29  7:53 ` [PATCH 32/37] drm/i915: Convert fences to use a GGTT lock rather than struct_mutex Chris Wilson
2018-06-29  7:53 ` [PATCH 33/37] drm/i915: Tidy i915_gem_suspend() Chris Wilson
2018-06-29  7:53 ` [PATCH 34/37] drm/i915: Move fence-reg interface to i915_gem_fence_reg.h Chris Wilson
2018-06-29  7:53 ` [PATCH 35/37] drm/i915: Dynamically allocate the array of drm_i915_gem_fence_reg Chris Wilson
2018-06-29  7:53 ` [PATCH 36/37] drm/i915: Pull all the reset functionality together into i915_reset.c Chris Wilson
2018-06-29  7:53 ` [PATCH 37/37] drm/i915: Remove GPU reset dependence on struct_mutex Chris Wilson
2018-06-29  8:58 ` ✗ Fi.CI.CHECKPATCH: warning for series starting with [01/37] drm/i915/gtt: Add read only pages to gen8_pte_encode Patchwork
2018-06-29  9:14 ` ✗ Fi.CI.SPARSE: " Patchwork
2018-06-29  9:16 ` ✓ Fi.CI.BAT: success " Patchwork
2018-06-29 11:53 ` ✗ Fi.CI.IGT: failure " Patchwork
2018-06-29 22:38 ` ✗ Fi.CI.BAT: failure for series starting with [01/37] drm/i915/gtt: Add read only pages to gen8_pte_encode (rev3) Patchwork
     [not found] <24ec4018c46bc7ee0d0b2afa1e855b1b9995e3f2>
2018-06-29 22:01 ` [PATCH v2] drm/i915: Track vma activity per fence.context, not per engine Chris Wilson

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20180629220336.14040-1-chris@chris-wilson.co.uk \
    --to=chris@chris-wilson.co.uk \
    --cc=intel-gfx@lists.freedesktop.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).