public inbox for intel-gfx@lists.freedesktop.org
 help / color / mirror / Atom feed
* [PATCH 1/3] drm: Track drm_mm nodes with an interval tree
@ 2015-10-06 10:53 Chris Wilson
  2015-10-06 10:53 ` [PATCH 2/3] drm/i915: Allow the user to pass a context to any ring Chris Wilson
                   ` (3 more replies)
  0 siblings, 4 replies; 27+ messages in thread
From: Chris Wilson @ 2015-10-06 10:53 UTC (permalink / raw)
  To: intel-gfx; +Cc: dri-devel

In addition to the last-in/first-out stack for accessing drm_mm nodes,
we occasionally and in the future often want to find a drm_mm_node by an
address. To do so efficiently we need to track the nodes in an interval
tree - lookups for a particular address will then be O(lg(N)), where N
is the number of nodes in the range manager as opposed to O(N).
Insertion however gains an extra O(lg(N)) step for all nodes
irrespective of whether the interval tree is in use. For future i915
patches, eliminating the linear walk is a significant improvement.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: dri-devel@lists.freedesktop.org
---
 drivers/gpu/drm/Kconfig  |  1 +
 drivers/gpu/drm/drm_mm.c | 71 ++++++++++++++++++++++++++++++++----------------
 include/drm/drm_mm.h     |  5 ++++
 3 files changed, 54 insertions(+), 23 deletions(-)

diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 06ae5008c5ed..e25050a5a843 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -12,6 +12,7 @@ menuconfig DRM
 	select I2C
 	select I2C_ALGOBIT
 	select DMA_SHARED_BUFFER
+	select INTERVAL_TREE
 	help
 	  Kernel-level support for the Direct Rendering Infrastructure (DRI)
 	  introduced in XFree86 4.0. If you say Y here, you need to select
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index 04de6fd88f8c..e3acd860f738 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -153,6 +153,10 @@ static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
 	INIT_LIST_HEAD(&node->hole_stack);
 	list_add(&node->node_list, &hole_node->node_list);
 
+	node->it.start = node->start;
+	node->it.last = node->start + size - 1;
+	interval_tree_insert(&node->it, &mm->interval_tree);
+
 	BUG_ON(node->start + node->size > adj_end);
 
 	node->hole_follows = 0;
@@ -178,39 +182,53 @@ static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
  */
 int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node)
 {
-	struct drm_mm_node *hole;
 	u64 end = node->start + node->size;
-	u64 hole_start;
-	u64 hole_end;
-
-	BUG_ON(node == NULL);
+	struct interval_tree_node *it;
+	struct drm_mm_node *hole;
+	u64 hole_start, hole_end;
 
 	/* Find the relevant hole to add our node to */
-	drm_mm_for_each_hole(hole, mm, hole_start, hole_end) {
-		if (hole_start > node->start || hole_end < end)
-			continue;
+	it = interval_tree_iter_first(&mm->interval_tree,
+				      node->start, (u64)-1);
+	if (it == NULL) {
+		hole = list_last_entry(&mm->head_node.node_list,
+				       struct drm_mm_node, node_list);
+	} else {
+		hole = container_of(it, typeof(*hole), it);
+		if (hole->start <= node->start)
+			return -ENOSPC;
+
+		hole = list_last_entry(&hole->node_list,
+				       struct drm_mm_node, node_list);
+	}
 
-		node->mm = mm;
-		node->allocated = 1;
+	hole_start = drm_mm_hole_node_start(hole);
+	hole_end = drm_mm_hole_node_end(hole);
+	if (hole_start > node->start || hole_end < end)
+		return -ENOSPC;
 
-		INIT_LIST_HEAD(&node->hole_stack);
-		list_add(&node->node_list, &hole->node_list);
+	node->mm = mm;
+	node->allocated = 1;
 
-		if (node->start == hole_start) {
-			hole->hole_follows = 0;
-			list_del_init(&hole->hole_stack);
-		}
+	INIT_LIST_HEAD(&node->hole_stack);
+	list_add(&node->node_list, &hole->node_list);
 
-		node->hole_follows = 0;
-		if (end != hole_end) {
-			list_add(&node->hole_stack, &mm->hole_stack);
-			node->hole_follows = 1;
-		}
+	node->it.start = node->start;
+	node->it.last = node->start + node->size - 1;
+	interval_tree_insert(&node->it, &mm->interval_tree);
 
-		return 0;
+	if (node->start == hole_start) {
+		hole->hole_follows = 0;
+		list_del_init(&hole->hole_stack);
 	}
 
-	return -ENOSPC;
+	node->hole_follows = 0;
+	if (end != hole_end) {
+		list_add(&node->hole_stack, &mm->hole_stack);
+		node->hole_follows = 1;
+	}
+
+	return 0;
 }
 EXPORT_SYMBOL(drm_mm_reserve_node);
 
@@ -300,6 +318,10 @@ static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
 	INIT_LIST_HEAD(&node->hole_stack);
 	list_add(&node->node_list, &hole_node->node_list);
 
+	node->it.start = node->start;
+	node->it.last = node->start + node->size - 1;
+	interval_tree_insert(&node->it, &mm->interval_tree);
+
 	BUG_ON(node->start < start);
 	BUG_ON(node->start < adj_start);
 	BUG_ON(node->start + node->size > adj_end);
@@ -388,6 +410,7 @@ void drm_mm_remove_node(struct drm_mm_node *node)
 	} else
 		list_move(&prev_node->hole_stack, &mm->hole_stack);
 
+	interval_tree_remove(&node->it, &mm->interval_tree);
 	list_del(&node->node_list);
 	node->allocated = 0;
 }
@@ -756,6 +779,8 @@ void drm_mm_init(struct drm_mm * mm, u64 start, u64 size)
 	mm->head_node.size = start - mm->head_node.start;
 	list_add_tail(&mm->head_node.hole_stack, &mm->hole_stack);
 
+	mm->interval_tree = RB_ROOT;
+
 	mm->color_adjust = NULL;
 }
 EXPORT_SYMBOL(drm_mm_init);
diff --git a/include/drm/drm_mm.h b/include/drm/drm_mm.h
index 0de6290df4da..6d531fe529bf 100644
--- a/include/drm/drm_mm.h
+++ b/include/drm/drm_mm.h
@@ -37,6 +37,7 @@
  * Generic range manager structs
  */
 #include <linux/bug.h>
+#include <linux/interval_tree.h>
 #include <linux/kernel.h>
 #include <linux/list.h>
 #include <linux/spinlock.h>
@@ -61,6 +62,7 @@ enum drm_mm_allocator_flags {
 struct drm_mm_node {
 	struct list_head node_list;
 	struct list_head hole_stack;
+	struct interval_tree_node it;
 	unsigned hole_follows : 1;
 	unsigned scanned_block : 1;
 	unsigned scanned_prev_free : 1;
@@ -79,6 +81,9 @@ struct drm_mm {
 	/* head_node.node_list is the list of all memory nodes, ordered
 	 * according to the (increasing) start address of the memory node. */
 	struct drm_mm_node head_node;
+	/* Keep an interval_tree for fast lookup of drm_mm_nodes by address. */
+	struct rb_root interval_tree;
+
 	unsigned int scan_check_range : 1;
 	unsigned scan_alignment;
 	unsigned long scan_color;
-- 
2.6.0

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 27+ messages in thread
* [PATCH 1/3] drm/i915: Fix obj->map_and_fenceable for ppgtt
@ 2014-05-15 15:55 Chris Wilson
  2014-05-15 15:55 ` [PATCH 3/3] drm/i915: Add soft-pinning API for execbuffer Chris Wilson
  0 siblings, 1 reply; 27+ messages in thread
From: Chris Wilson @ 2014-05-15 15:55 UTC (permalink / raw)
  To: intel-gfx; +Cc: Daniel Vetter, Ben Widawsky

With ppgtt, it is no longer correct to mark an object as
map_and_fenceable if we simply unbind it from the global gtt. This has
consequences during execbuffer where we simply use
obj->map_and_fenceable in use_cpu_reloc() to decide which access method
to use for writing into the object. Now for a ppgtt object,
map_and_fenceable will be true when it is not bound into the ggtt but
only in a ppgtt, leading to an invalid access on a non-llc architecture.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Ben Widawsky <benjamin.widawsky@intel.com>
Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
---
 drivers/gpu/drm/i915/i915_gem.c            |   8 +-
 drivers/gpu/drm/i915/i915_gem_execbuffer.c | 146 +++++++++++++++--------------
 2 files changed, 79 insertions(+), 75 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index f8b2c16745f8..844ea6048321 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2807,9 +2807,8 @@ int i915_vma_unbind(struct i915_vma *vma)
 	i915_gem_gtt_finish_object(obj);
 
 	list_del_init(&vma->mm_list);
-	/* Avoid an unnecessary call to unbind on rebind. */
 	if (i915_is_ggtt(vma->vm))
-		obj->map_and_fenceable = true;
+		obj->map_and_fenceable = false;
 
 	drm_mm_remove_node(&vma->node);
 	i915_gem_vma_destroy(vma);
@@ -3159,6 +3158,9 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj)
 			return 0;
 		}
 	} else if (enable) {
+		if (WARN_ON(!obj->map_and_fenceable))
+			return -EINVAL;
+
 		reg = i915_find_fence_reg(dev);
 		if (IS_ERR(reg))
 			return PTR_ERR(reg);
@@ -4170,8 +4172,6 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
 
 	obj->fence_reg = I915_FENCE_REG_NONE;
 	obj->madv = I915_MADV_WILLNEED;
-	/* Avoid an unnecessary call to unbind on the first bind. */
-	obj->map_and_fenceable = true;
 
 	i915_gem_info_add_obj(obj->base.dev->dev_private, obj->base.size);
 }
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index c7ee1e3013ac..b4d8c31c0919 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -35,6 +35,7 @@
 
 #define  __EXEC_OBJECT_HAS_PIN (1<<31)
 #define  __EXEC_OBJECT_HAS_FENCE (1<<30)
+#define  __EXEC_OBJECT_NEEDS_MAP (1<<29)
 
 struct eb_vmas {
 	struct list_head vmas;
@@ -532,14 +533,6 @@ i915_gem_execbuffer_relocate(struct eb_vmas *eb)
 }
 
 static int
-need_reloc_mappable(struct i915_vma *vma)
-{
-	struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
-	return entry->relocation_count && !use_cpu_reloc(vma->obj) &&
-		i915_is_ggtt(vma->vm);
-}
-
-static int
 i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
 				struct intel_ring_buffer *ring,
 				bool *need_reloc)
@@ -547,19 +540,12 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
 	struct drm_i915_gem_object *obj = vma->obj;
 	struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
 	bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
-	bool need_fence;
 	unsigned flags;
 	int ret;
 
 	flags = 0;
-
-	need_fence =
-		has_fenced_gpu_access &&
-		entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
-		obj->tiling_mode != I915_TILING_NONE;
-	if (need_fence || need_reloc_mappable(vma))
+	if (entry->flags & __EXEC_OBJECT_NEEDS_MAP)
 		flags |= PIN_MAPPABLE;
-
 	if (entry->flags & EXEC_OBJECT_NEEDS_GTT)
 		flags |= PIN_GLOBAL;
 
@@ -595,6 +581,27 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
 	return 0;
 }
 
+static bool
+need_reloc_mappable(struct i915_vma *vma)
+{
+	struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
+
+	if (entry->relocation_count == 0)
+		return false;
+
+	if (!i915_is_ggtt(vma->vm))
+		return false;
+
+	/* See also use_cpu_reloc() */
+	if (HAS_LLC(vma->obj->base.dev))
+		return false;
+
+	if (vma->obj->base.write_domain == I915_GEM_DOMAIN_CPU)
+		return false;
+
+	return true;
+}
+
 static int
 i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
 			    struct list_head *vmas,
@@ -627,9 +634,10 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
 			obj->tiling_mode != I915_TILING_NONE;
 		need_mappable = need_fence || need_reloc_mappable(vma);
 
-		if (need_mappable)
+		if (need_mappable) {
+			entry->flags |= __EXEC_OBJECT_NEEDS_MAP;
 			list_move(&vma->exec_list, &ordered_vmas);
-		else
+		} else
 			list_move_tail(&vma->exec_list, &ordered_vmas);
 
 		obj->base.pending_read_domains = I915_GEM_GPU_DOMAINS & ~I915_GEM_DOMAIN_COMMAND;
@@ -657,25 +665,14 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
 		/* Unbind any ill-fitting objects or pin. */
 		list_for_each_entry(vma, vmas, exec_list) {
 			struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
-			bool need_fence, need_mappable;
 
 			obj = vma->obj;
 
 			if (!drm_mm_node_allocated(&vma->node))
 				continue;
 
-			need_fence =
-				has_fenced_gpu_access &&
-				entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
-				obj->tiling_mode != I915_TILING_NONE;
-			need_mappable = need_fence || need_reloc_mappable(vma);
-
-			WARN_ON((need_mappable || need_fence) &&
-			       !i915_is_ggtt(vma->vm));
-
-			if ((entry->alignment &&
-			     vma->node.start & (entry->alignment - 1)) ||
-			    (need_mappable && !obj->map_and_fenceable))
+			if ((entry->alignment && vma->node.start & (entry->alignment - 1)) ||
+			    (entry->flags & __EXEC_OBJECT_NEEDS_MAP && !obj->map_and_fenceable))
 				ret = i915_vma_unbind(vma);
 			else
 				ret = i915_gem_execbuffer_reserve_vma(vma, ring, need_relocs);
@@ -776,9 +773,9 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
 		 * relocations were valid.
 		 */
 		for (j = 0; j < exec[i].relocation_count; j++) {
-			if (copy_to_user(&user_relocs[j].presumed_offset,
-					 &invalid_offset,
-					 sizeof(invalid_offset))) {
+			if (__copy_to_user(&user_relocs[j].presumed_offset,
+					   &invalid_offset,
+					   sizeof(invalid_offset))) {
 				ret = -EFAULT;
 				mutex_lock(&dev->struct_mutex);
 				goto err;
@@ -1268,33 +1265,28 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
 	/* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
 	 * batch" bit. Hence we need to pin secure batches into the global gtt.
 	 * hsw should have this fixed, but bdw mucks it up again. */
-	if (flags & I915_DISPATCH_SECURE &&
-	    !batch_obj->has_global_gtt_mapping) {
-		/* When we have multiple VMs, we'll need to make sure that we
-		 * allocate space first */
-		struct i915_vma *vma = i915_gem_obj_to_ggtt(batch_obj);
-		BUG_ON(!vma);
-		vma->bind_vma(vma, batch_obj->cache_level, GLOBAL_BIND);
-	}
+	if (flags & I915_DISPATCH_SECURE) {
+		ret = i915_gem_obj_ggtt_pin(batch_obj, 0, 0);
+		if (ret)
+			goto err;
 
-	if (flags & I915_DISPATCH_SECURE)
 		exec_start += i915_gem_obj_ggtt_offset(batch_obj);
-	else
+	} else
 		exec_start += i915_gem_obj_offset(batch_obj, vm);
 
 	ret = i915_gem_execbuffer_move_to_gpu(ring, &eb->vmas);
 	if (ret)
-		goto err;
+		goto err_batch;
 
 	ret = i915_switch_context(ring, ctx);
 	if (ret)
-		goto err;
+		goto err_batch;
 
 	if (ring == &dev_priv->ring[RCS] &&
 	    mode != dev_priv->relative_constants_mode) {
 		ret = intel_ring_begin(ring, 4);
 		if (ret)
-				goto err;
+			goto err_batch;
 
 		intel_ring_emit(ring, MI_NOOP);
 		intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
@@ -1308,30 +1300,29 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
 	if (args->flags & I915_EXEC_GEN7_SOL_RESET) {
 		ret = i915_reset_gen7_sol_offsets(dev, ring);
 		if (ret)
-			goto err;
+			goto err_batch;
 	}
 
-
 	exec_len = args->batch_len;
 	if (cliprects) {
 		for (i = 0; i < args->num_cliprects; i++) {
 			ret = i915_emit_box(dev, &cliprects[i],
 					    args->DR1, args->DR4);
 			if (ret)
-				goto err;
+				goto err_batch;
 
 			ret = ring->dispatch_execbuffer(ring,
 							exec_start, exec_len,
 							flags);
 			if (ret)
-				goto err;
+				goto err_batch;
 		}
 	} else {
 		ret = ring->dispatch_execbuffer(ring,
 						exec_start, exec_len,
 						flags);
 		if (ret)
-			goto err;
+			goto err_batch;
 	}
 
 	trace_i915_gem_ring_dispatch(ring, intel_ring_get_seqno(ring), flags);
@@ -1339,6 +1330,9 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
 	i915_gem_execbuffer_move_to_active(&eb->vmas, ring);
 	i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj);
 
+err_batch:
+	if (flags & I915_DISPATCH_SECURE)
+		i915_gem_object_ggtt_unpin(batch_obj);
 err:
 	/* the request owns the ref now */
 	i915_gem_context_unreference(ctx);
@@ -1420,18 +1414,21 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
 
 	ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list);
 	if (!ret) {
+		struct drm_i915_gem_exec_object __user *user_exec_list =
+			to_user_ptr(args->buffers_ptr);
+
 		/* Copy the new buffer offsets back to the user's exec list. */
-		for (i = 0; i < args->buffer_count; i++)
-			exec_list[i].offset = exec2_list[i].offset;
-		/* ... and back out to userspace */
-		ret = copy_to_user(to_user_ptr(args->buffers_ptr),
-				   exec_list,
-				   sizeof(*exec_list) * args->buffer_count);
-		if (ret) {
-			ret = -EFAULT;
-			DRM_DEBUG("failed to copy %d exec entries "
-				  "back to user (%d)\n",
-				  args->buffer_count, ret);
+		for (i = 0; i < args->buffer_count; i++) {
+			ret = __copy_to_user(&user_exec_list[i].offset,
+					     &exec2_list[i].offset,
+					     sizeof(user_exec_list[i].offset));
+			if (ret) {
+				ret = -EFAULT;
+				DRM_DEBUG("failed to copy %d exec entries "
+					  "back to user (%d)\n",
+					  args->buffer_count, ret);
+				break;
+			}
 		}
 	}
 
@@ -1482,14 +1479,21 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data,
 	ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list);
 	if (!ret) {
 		/* Copy the new buffer offsets back to the user's exec list. */
-		ret = copy_to_user(to_user_ptr(args->buffers_ptr),
-				   exec2_list,
-				   sizeof(*exec2_list) * args->buffer_count);
-		if (ret) {
-			ret = -EFAULT;
-			DRM_DEBUG("failed to copy %d exec entries "
-				  "back to user (%d)\n",
-				  args->buffer_count, ret);
+		struct drm_i915_gem_exec_object2 *user_exec_list =
+				   to_user_ptr(args->buffers_ptr);
+		int i;
+
+		for (i = 0; i < args->buffer_count; i++) {
+			ret = __copy_to_user(&user_exec_list[i].offset,
+					     &exec2_list[i].offset,
+					     sizeof(user_exec_list[i].offset));
+			if (ret) {
+				ret = -EFAULT;
+				DRM_DEBUG("failed to copy %d exec entries "
+					  "back to user\n",
+					  args->buffer_count);
+				break;
+			}
 		}
 	}
 
-- 
2.0.0.rc2

^ permalink raw reply related	[flat|nested] 27+ messages in thread

end of thread, other threads:[~2015-12-02 13:42 UTC | newest]

Thread overview: 27+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2015-10-06 10:53 [PATCH 1/3] drm: Track drm_mm nodes with an interval tree Chris Wilson
2015-10-06 10:53 ` [PATCH 2/3] drm/i915: Allow the user to pass a context to any ring Chris Wilson
2015-10-06 13:57   ` Daniel, Thomas
2015-12-02 13:42     ` Tvrtko Ursulin
2015-10-06 10:53 ` [PATCH 3/3] drm/i915: Add soft-pinning API for execbuffer Chris Wilson
2015-10-06 13:59   ` Daniel, Thomas
2015-10-21 15:07     ` Daniel Vetter
2015-10-21 15:11       ` Daniel, Thomas
2015-10-23  2:31         ` Yang, Rong R
2015-10-27 11:51   ` akash goel
2015-11-05 10:57     ` Daniel, Thomas
2015-12-02 13:28     ` Chris Wilson
2015-11-05 17:51   ` Kristian Høgsberg
2015-11-05 18:17     ` Jesse Barnes
2015-11-06 13:38       ` Chris Wilson
2015-11-06 17:01         ` Jesse Barnes
2015-11-06 23:58         ` Kristian Høgsberg
2015-10-06 11:11 ` [Intel-gfx] [PATCH 1/3] drm: Track drm_mm nodes with an interval tree Daniel Vetter
2015-10-06 11:19   ` Chris Wilson
2015-10-06 12:01     ` Daniel Vetter
2015-10-07 10:22     ` David Herrmann
2015-10-16  8:54       ` Daniel, Thomas
2015-10-16 14:26         ` [Intel-gfx] " Daniel Vetter
2015-10-21 15:11 ` Daniel Vetter
2015-10-21 15:14   ` David Herrmann
2015-10-22  8:07     ` [Intel-gfx] " Daniel Vetter
  -- strict thread matches above, loose matches on Subject: below --
2014-05-15 15:55 [PATCH 1/3] drm/i915: Fix obj->map_and_fenceable for ppgtt Chris Wilson
2014-05-15 15:55 ` [PATCH 3/3] drm/i915: Add soft-pinning API for execbuffer Chris Wilson

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox