* [PATCH 1/3] drm: Track drm_mm nodes with an interval tree
@ 2016-08-03 15:04 Chris Wilson
2016-08-03 15:04 ` [PATCH 2/3] drm: Convert drm_vma_manager to embedded interval-tree in drm_mm Chris Wilson
` (5 more replies)
0 siblings, 6 replies; 12+ messages in thread
From: Chris Wilson @ 2016-08-03 15:04 UTC (permalink / raw)
To: dri-devel; +Cc: daniel.vetter, intel-gfx, David Herrmann
In addition to the last-in/first-out stack for accessing drm_mm nodes,
we occasionally and in the future often want to find a drm_mm_node by an
address. To do so efficiently we need to track the nodes in an interval
tree - lookups for a particular address will then be O(lg(N)), where N
is the number of nodes in the range manager as opposed to O(N).
Insertion however gains an extra O(lg(N)) step for all nodes
irrespective of whether the interval tree is in use. For future i915
patches, eliminating the linear walk is a significant improvement.
v2: Use generic interval-tree template for u64 and faster insertion.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: David Herrmann <dh.herrmann@gmail.com>
Cc: dri-devel@lists.freedesktop.org
---
drivers/gpu/drm/drm_mm.c | 133 +++++++++++++++++++++++++++++++++++++++--------
include/drm/drm_mm.h | 12 +++++
2 files changed, 122 insertions(+), 23 deletions(-)
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index cb39f45d6a16..5c188c56894b 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -46,6 +46,7 @@
#include <linux/slab.h>
#include <linux/seq_file.h>
#include <linux/export.h>
+#include <linux/interval_tree_generic.h>
/**
* DOC: Overview
@@ -103,6 +104,72 @@ static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_
u64 end,
enum drm_mm_search_flags flags);
+#define START(node) ((node)->start)
+#define LAST(node) ((node)->start + (node)->size - 1)
+
+INTERVAL_TREE_DEFINE(struct drm_mm_node, rb,
+ u64, __subtree_last,
+ START, LAST, static inline, drm_mm_interval_tree)
+
+struct drm_mm_node *
+drm_mm_interval_first(struct drm_mm *mm, u64 start, u64 last)
+{
+ return drm_mm_interval_tree_iter_first(&mm->interval_tree,
+ start, last);
+}
+EXPORT_SYMBOL(drm_mm_interval_first);
+
+struct drm_mm_node *
+drm_mm_interval_next(struct drm_mm_node *node, u64 start, u64 last)
+{
+ return drm_mm_interval_tree_iter_next(node, start, last);
+}
+EXPORT_SYMBOL(drm_mm_interval_next);
+
+static void drm_mm_interval_tree_add_node(struct drm_mm_node *hole_node,
+ struct drm_mm_node *node)
+{
+ struct drm_mm *mm = hole_node->mm;
+ struct rb_node **link, *rb;
+ struct drm_mm_node *parent;
+
+ node->__subtree_last = LAST(node);
+
+ if (hole_node->allocated) {
+ rb = &hole_node->rb;
+ while (rb) {
+ parent = rb_entry(rb, struct drm_mm_node, rb);
+ if (parent->__subtree_last >= node->__subtree_last)
+ break;
+
+ parent->__subtree_last = node->__subtree_last;
+ rb = rb_parent(rb);
+ }
+
+ rb = &hole_node->rb;
+ link = &hole_node->rb.rb_right;
+ } else {
+ rb = NULL;
+ link = &mm->interval_tree.rb_node;
+ }
+
+ while (*link) {
+ rb = *link;
+ parent = rb_entry(rb, struct drm_mm_node, rb);
+ if (parent->__subtree_last < node->__subtree_last)
+ parent->__subtree_last = node->__subtree_last;
+ if (node->start < parent->start)
+ link = &parent->rb.rb_left;
+ else
+ link = &parent->rb.rb_right;
+ }
+
+ rb_link_node(&node->rb, rb, link);
+ rb_insert_augmented(&node->rb,
+ &mm->interval_tree,
+ &drm_mm_interval_tree_augment);
+}
+
static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
struct drm_mm_node *node,
u64 size, unsigned alignment,
@@ -153,6 +220,8 @@ static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
INIT_LIST_HEAD(&node->hole_stack);
list_add(&node->node_list, &hole_node->node_list);
+ drm_mm_interval_tree_add_node(hole_node, node);
+
BUG_ON(node->start + node->size > adj_end);
node->hole_follows = 0;
@@ -178,41 +247,52 @@ static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
*/
int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node)
{
+ u64 end = node->start + node->size;
struct drm_mm_node *hole;
- u64 end;
- u64 hole_start;
- u64 hole_end;
-
- BUG_ON(node == NULL);
+ u64 hole_start, hole_end;
end = node->start + node->size;
/* Find the relevant hole to add our node to */
- drm_mm_for_each_hole(hole, mm, hole_start, hole_end) {
- if (hole_start > node->start || hole_end < end)
- continue;
+ hole = drm_mm_interval_tree_iter_first(&mm->interval_tree,
+ node->start, ~(u64)0);
+ if (hole) {
+ if (hole->start < end)
+ return -ENOSPC;
+ } else {
+ hole = list_entry(&mm->head_node.node_list,
+ typeof(*hole), node_list);
+ }
- node->mm = mm;
- node->allocated = 1;
+ hole = list_last_entry(&hole->node_list, typeof(*hole), node_list);
+ if (!hole->hole_follows)
+ return -ENOSPC;
- INIT_LIST_HEAD(&node->hole_stack);
- list_add(&node->node_list, &hole->node_list);
+ hole_start = __drm_mm_hole_node_start(hole);
+ hole_end = __drm_mm_hole_node_end(hole);
+ if (hole_start > node->start || hole_end < end)
+ return -ENOSPC;
- if (node->start == hole_start) {
- hole->hole_follows = 0;
- list_del_init(&hole->hole_stack);
- }
+ node->mm = mm;
+ node->allocated = 1;
- node->hole_follows = 0;
- if (end != hole_end) {
- list_add(&node->hole_stack, &mm->hole_stack);
- node->hole_follows = 1;
- }
+ INIT_LIST_HEAD(&node->hole_stack);
+ list_add(&node->node_list, &hole->node_list);
- return 0;
+ drm_mm_interval_tree_add_node(hole, node);
+
+ if (node->start == hole_start) {
+ hole->hole_follows = 0;
+ list_del_init(&hole->hole_stack);
+ }
+
+ node->hole_follows = 0;
+ if (end != hole_end) {
+ list_add(&node->hole_stack, &mm->hole_stack);
+ node->hole_follows = 1;
}
- return -ENOSPC;
+ return 0;
}
EXPORT_SYMBOL(drm_mm_reserve_node);
@@ -302,6 +382,8 @@ static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
INIT_LIST_HEAD(&node->hole_stack);
list_add(&node->node_list, &hole_node->node_list);
+ drm_mm_interval_tree_add_node(hole_node, node);
+
BUG_ON(node->start < start);
BUG_ON(node->start < adj_start);
BUG_ON(node->start + node->size > adj_end);
@@ -390,6 +472,7 @@ void drm_mm_remove_node(struct drm_mm_node *node)
} else
list_move(&prev_node->hole_stack, &mm->hole_stack);
+ drm_mm_interval_tree_remove(node, &mm->interval_tree);
list_del(&node->node_list);
node->allocated = 0;
}
@@ -516,11 +599,13 @@ void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new)
{
list_replace(&old->node_list, &new->node_list);
list_replace(&old->hole_stack, &new->hole_stack);
+ rb_replace_node(&old->rb, &new->rb, &old->mm->interval_tree);
new->hole_follows = old->hole_follows;
new->mm = old->mm;
new->start = old->start;
new->size = old->size;
new->color = old->color;
+ new->__subtree_last = old->__subtree_last;
old->allocated = 0;
new->allocated = 1;
@@ -758,6 +843,8 @@ void drm_mm_init(struct drm_mm * mm, u64 start, u64 size)
mm->head_node.size = start - mm->head_node.start;
list_add_tail(&mm->head_node.hole_stack, &mm->hole_stack);
+ mm->interval_tree = RB_ROOT;
+
mm->color_adjust = NULL;
}
EXPORT_SYMBOL(drm_mm_init);
diff --git a/include/drm/drm_mm.h b/include/drm/drm_mm.h
index fc65118e5077..205ddcf6d55d 100644
--- a/include/drm/drm_mm.h
+++ b/include/drm/drm_mm.h
@@ -37,6 +37,7 @@
* Generic range manager structs
*/
#include <linux/bug.h>
+#include <linux/rbtree.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/spinlock.h>
@@ -61,6 +62,7 @@ enum drm_mm_allocator_flags {
struct drm_mm_node {
struct list_head node_list;
struct list_head hole_stack;
+ struct rb_node rb;
unsigned hole_follows : 1;
unsigned scanned_block : 1;
unsigned scanned_prev_free : 1;
@@ -70,6 +72,7 @@ struct drm_mm_node {
unsigned long color;
u64 start;
u64 size;
+ u64 __subtree_last;
struct drm_mm *mm;
};
@@ -79,6 +82,9 @@ struct drm_mm {
/* head_node.node_list is the list of all memory nodes, ordered
* according to the (increasing) start address of the memory node. */
struct drm_mm_node head_node;
+ /* Keep an interval_tree for fast lookup of drm_mm_nodes by address. */
+ struct rb_root interval_tree;
+
unsigned int scan_check_range : 1;
unsigned scan_alignment;
unsigned long scan_color;
@@ -295,6 +301,12 @@ void drm_mm_init(struct drm_mm *mm,
void drm_mm_takedown(struct drm_mm *mm);
bool drm_mm_clean(struct drm_mm *mm);
+struct drm_mm_node *
+drm_mm_interval_first(struct drm_mm *mm, u64 start, u64 last);
+
+struct drm_mm_node *
+drm_mm_interval_next(struct drm_mm_node *node, u64 start, u64 last);
+
void drm_mm_init_scan(struct drm_mm *mm,
u64 size,
unsigned alignment,
--
2.8.1
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply related [flat|nested] 12+ messages in thread
* [PATCH 2/3] drm: Convert drm_vma_manager to embedded interval-tree in drm_mm
2016-08-03 15:04 [PATCH 1/3] drm: Track drm_mm nodes with an interval tree Chris Wilson
@ 2016-08-03 15:04 ` Chris Wilson
2016-08-03 17:45 ` David Herrmann
2016-08-03 15:04 ` [PATCH 3/3] drm: Skip initialising the drm_mm_node->hole_stack Chris Wilson
` (4 subsequent siblings)
5 siblings, 1 reply; 12+ messages in thread
From: Chris Wilson @ 2016-08-03 15:04 UTC (permalink / raw)
To: dri-devel; +Cc: daniel.vetter, intel-gfx, David Herrmann
Having added an interval-tree to struct drm_mm, we can replace the
auxiliary rb-tree inside the drm_vma_manager with it.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: David Herrmann <dh.herrmann@gmail.com>
Cc: dri-devel@lists.freedesktop.org
---
drivers/gpu/drm/drm_vma_manager.c | 43 ++++++++-------------------------------
include/drm/drm_vma_manager.h | 2 --
2 files changed, 9 insertions(+), 36 deletions(-)
diff --git a/drivers/gpu/drm/drm_vma_manager.c b/drivers/gpu/drm/drm_vma_manager.c
index f306c8855978..0aef432679f9 100644
--- a/drivers/gpu/drm/drm_vma_manager.c
+++ b/drivers/gpu/drm/drm_vma_manager.c
@@ -86,7 +86,6 @@ void drm_vma_offset_manager_init(struct drm_vma_offset_manager *mgr,
unsigned long page_offset, unsigned long size)
{
rwlock_init(&mgr->vm_lock);
- mgr->vm_addr_space_rb = RB_ROOT;
drm_mm_init(&mgr->vm_addr_space_mm, page_offset, size);
}
EXPORT_SYMBOL(drm_vma_offset_manager_init);
@@ -145,16 +144,16 @@ struct drm_vma_offset_node *drm_vma_offset_lookup_locked(struct drm_vma_offset_m
unsigned long start,
unsigned long pages)
{
- struct drm_vma_offset_node *node, *best;
+ struct drm_mm_node *node, *best;
struct rb_node *iter;
unsigned long offset;
- iter = mgr->vm_addr_space_rb.rb_node;
+ iter = mgr->vm_addr_space_mm.interval_tree.rb_node;
best = NULL;
while (likely(iter)) {
- node = rb_entry(iter, struct drm_vma_offset_node, vm_rb);
- offset = node->vm_node.start;
+ node = rb_entry(iter, struct drm_mm_node, rb);
+ offset = node->start;
if (start >= offset) {
iter = iter->rb_right;
best = node;
@@ -167,38 +166,17 @@ struct drm_vma_offset_node *drm_vma_offset_lookup_locked(struct drm_vma_offset_m
/* verify that the node spans the requested area */
if (best) {
- offset = best->vm_node.start + best->vm_node.size;
+ offset = best->start + best->size;
if (offset < start + pages)
best = NULL;
}
- return best;
-}
-EXPORT_SYMBOL(drm_vma_offset_lookup_locked);
-
-/* internal helper to link @node into the rb-tree */
-static void _drm_vma_offset_add_rb(struct drm_vma_offset_manager *mgr,
- struct drm_vma_offset_node *node)
-{
- struct rb_node **iter = &mgr->vm_addr_space_rb.rb_node;
- struct rb_node *parent = NULL;
- struct drm_vma_offset_node *iter_node;
-
- while (likely(*iter)) {
- parent = *iter;
- iter_node = rb_entry(*iter, struct drm_vma_offset_node, vm_rb);
+ if (!best)
+ return NULL;
- if (node->vm_node.start < iter_node->vm_node.start)
- iter = &(*iter)->rb_left;
- else if (node->vm_node.start > iter_node->vm_node.start)
- iter = &(*iter)->rb_right;
- else
- BUG();
- }
-
- rb_link_node(&node->vm_rb, parent, iter);
- rb_insert_color(&node->vm_rb, &mgr->vm_addr_space_rb);
+ return container_of(best, struct drm_vma_offset_node, vm_node);
}
+EXPORT_SYMBOL(drm_vma_offset_lookup_locked);
/**
* drm_vma_offset_add() - Add offset node to manager
@@ -240,8 +218,6 @@ int drm_vma_offset_add(struct drm_vma_offset_manager *mgr,
if (ret)
goto out_unlock;
- _drm_vma_offset_add_rb(mgr, node);
-
out_unlock:
write_unlock(&mgr->vm_lock);
return ret;
@@ -265,7 +241,6 @@ void drm_vma_offset_remove(struct drm_vma_offset_manager *mgr,
write_lock(&mgr->vm_lock);
if (drm_mm_node_allocated(&node->vm_node)) {
- rb_erase(&node->vm_rb, &mgr->vm_addr_space_rb);
drm_mm_remove_node(&node->vm_node);
memset(&node->vm_node, 0, sizeof(node->vm_node));
}
diff --git a/include/drm/drm_vma_manager.h b/include/drm/drm_vma_manager.h
index 06ea8e077ec2..afba6fcac853 100644
--- a/include/drm/drm_vma_manager.h
+++ b/include/drm/drm_vma_manager.h
@@ -40,13 +40,11 @@ struct drm_vma_offset_file {
struct drm_vma_offset_node {
rwlock_t vm_lock;
struct drm_mm_node vm_node;
- struct rb_node vm_rb;
struct rb_root vm_files;
};
struct drm_vma_offset_manager {
rwlock_t vm_lock;
- struct rb_root vm_addr_space_rb;
struct drm_mm vm_addr_space_mm;
};
--
2.8.1
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply related [flat|nested] 12+ messages in thread
* [PATCH 3/3] drm: Skip initialising the drm_mm_node->hole_stack
2016-08-03 15:04 [PATCH 1/3] drm: Track drm_mm nodes with an interval tree Chris Wilson
2016-08-03 15:04 ` [PATCH 2/3] drm: Convert drm_vma_manager to embedded interval-tree in drm_mm Chris Wilson
@ 2016-08-03 15:04 ` Chris Wilson
2016-08-03 17:49 ` David Herrmann
2016-08-03 15:29 ` ✗ Ro.CI.BAT: failure for series starting with [1/3] drm: Track drm_mm nodes with an interval tree Patchwork
` (3 subsequent siblings)
5 siblings, 1 reply; 12+ messages in thread
From: Chris Wilson @ 2016-08-03 15:04 UTC (permalink / raw)
To: dri-devel; +Cc: daniel.vetter, intel-gfx
As we always add this to the drm_mm->hole_stack as our first operation,
we do not need to initialise the list node.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: David Herrmann <dh.herrmann@gmail.com>
Cc: dri-devel@lists.freedesktop.org
---
drivers/gpu/drm/drm_mm.c | 6 +-----
1 file changed, 1 insertion(+), 5 deletions(-)
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index 5c188c56894b..3f56d4b0cdae 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -217,7 +217,6 @@ static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
node->color = color;
node->allocated = 1;
- INIT_LIST_HEAD(&node->hole_stack);
list_add(&node->node_list, &hole_node->node_list);
drm_mm_interval_tree_add_node(hole_node, node);
@@ -276,14 +275,13 @@ int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node)
node->mm = mm;
node->allocated = 1;
- INIT_LIST_HEAD(&node->hole_stack);
list_add(&node->node_list, &hole->node_list);
drm_mm_interval_tree_add_node(hole, node);
if (node->start == hole_start) {
hole->hole_follows = 0;
- list_del_init(&hole->hole_stack);
+ list_del(&hole->hole_stack);
}
node->hole_follows = 0;
@@ -379,7 +377,6 @@ static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
node->color = color;
node->allocated = 1;
- INIT_LIST_HEAD(&node->hole_stack);
list_add(&node->node_list, &hole_node->node_list);
drm_mm_interval_tree_add_node(hole_node, node);
@@ -833,7 +830,6 @@ void drm_mm_init(struct drm_mm * mm, u64 start, u64 size)
/* Clever trick to avoid a special case in the free hole tracking. */
INIT_LIST_HEAD(&mm->head_node.node_list);
- INIT_LIST_HEAD(&mm->head_node.hole_stack);
mm->head_node.hole_follows = 1;
mm->head_node.scanned_block = 0;
mm->head_node.scanned_prev_free = 0;
--
2.8.1
_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel
^ permalink raw reply related [flat|nested] 12+ messages in thread
* ✗ Ro.CI.BAT: failure for series starting with [1/3] drm: Track drm_mm nodes with an interval tree
2016-08-03 15:04 [PATCH 1/3] drm: Track drm_mm nodes with an interval tree Chris Wilson
2016-08-03 15:04 ` [PATCH 2/3] drm: Convert drm_vma_manager to embedded interval-tree in drm_mm Chris Wilson
2016-08-03 15:04 ` [PATCH 3/3] drm: Skip initialising the drm_mm_node->hole_stack Chris Wilson
@ 2016-08-03 15:29 ` Patchwork
2016-08-03 17:55 ` [PATCH 1/3] " David Herrmann
` (2 subsequent siblings)
5 siblings, 0 replies; 12+ messages in thread
From: Patchwork @ 2016-08-03 15:29 UTC (permalink / raw)
To: Chris Wilson; +Cc: intel-gfx
== Series Details ==
Series: series starting with [1/3] drm: Track drm_mm nodes with an interval tree
URL : https://patchwork.freedesktop.org/series/10600/
State : failure
== Summary ==
Series 10600v1 Series without cover letter
http://patchwork.freedesktop.org/api/1.0/series/10600/revisions/1/mbox
Test kms_cursor_legacy:
Subgroup basic-cursor-vs-flip-varying-size:
pass -> FAIL (ro-ilk1-i5-650)
Subgroup basic-flip-vs-cursor-legacy:
fail -> PASS (ro-skl3-i5-6260u)
fail -> PASS (ro-byt-n2820)
pass -> FAIL (ro-bdw-i5-5250u)
fail -> PASS (ro-bdw-i7-5600u)
Subgroup basic-flip-vs-cursor-varying-size:
fail -> PASS (ro-snb-i7-2620M)
Test kms_flip:
Subgroup basic-flip-vs-wf_vblank:
pass -> FAIL (ro-byt-n2820)
Test kms_pipe_crc_basic:
Subgroup suspend-read-crc-pipe-a:
dmesg-warn -> PASS (ro-bdw-i7-5557U)
fi-hsw-i7-4770k total:240 pass:218 dwarn:0 dfail:0 fail:0 skip:22
fi-kbl-qkkr total:240 pass:181 dwarn:29 dfail:0 fail:3 skip:27
fi-skl-i5-6260u total:240 pass:224 dwarn:0 dfail:0 fail:2 skip:14
fi-skl-i7-6700k total:240 pass:208 dwarn:0 dfail:0 fail:4 skip:28
fi-snb-i7-2600 total:240 pass:198 dwarn:0 dfail:0 fail:0 skip:42
ro-bdw-i5-5250u total:240 pass:218 dwarn:4 dfail:0 fail:2 skip:16
ro-bdw-i7-5557U total:240 pass:224 dwarn:0 dfail:0 fail:0 skip:16
ro-bdw-i7-5600u total:240 pass:207 dwarn:0 dfail:0 fail:1 skip:32
ro-bsw-n3050 total:240 pass:194 dwarn:0 dfail:0 fail:4 skip:42
ro-byt-n2820 total:240 pass:197 dwarn:0 dfail:0 fail:3 skip:40
ro-hsw-i3-4010u total:240 pass:214 dwarn:0 dfail:0 fail:0 skip:26
ro-hsw-i7-4770r total:240 pass:214 dwarn:0 dfail:0 fail:0 skip:26
ro-ilk-i7-620lm total:240 pass:173 dwarn:1 dfail:0 fail:1 skip:65
ro-ilk1-i5-650 total:235 pass:173 dwarn:0 dfail:0 fail:2 skip:60
ro-ivb2-i7-3770 total:240 pass:209 dwarn:0 dfail:0 fail:0 skip:31
ro-skl3-i5-6260u total:240 pass:223 dwarn:0 dfail:0 fail:3 skip:14
ro-snb-i7-2620M total:240 pass:198 dwarn:0 dfail:0 fail:1 skip:41
Results at /archive/results/CI_IGT_test/RO_Patchwork_1681/
0bee311 drm-intel-nightly: 2016y-08m-03d-13h-02m-57s UTC integration manifest
33bafff drm: Skip initialising the drm_mm_node->hole_stack
af9f939c drm: Convert drm_vma_manager to embedded interval-tree in drm_mm
a2e42a8 drm: Track drm_mm nodes with an interval tree
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply [flat|nested] 12+ messages in thread
* Re: [PATCH 2/3] drm: Convert drm_vma_manager to embedded interval-tree in drm_mm
2016-08-03 15:04 ` [PATCH 2/3] drm: Convert drm_vma_manager to embedded interval-tree in drm_mm Chris Wilson
@ 2016-08-03 17:45 ` David Herrmann
0 siblings, 0 replies; 12+ messages in thread
From: David Herrmann @ 2016-08-03 17:45 UTC (permalink / raw)
To: Chris Wilson
Cc: Daniel Vetter, Intel Graphics Development,
dri-devel@lists.freedesktop.org
Hi Chris
On Wed, Aug 3, 2016 at 5:04 PM, Chris Wilson <chris@chris-wilson.co.uk> wrote:
> Having added an interval-tree to struct drm_mm, we can replace the
> auxiliary rb-tree inside the drm_vma_manager with it.
>
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> Cc: David Herrmann <dh.herrmann@gmail.com>
> Cc: dri-devel@lists.freedesktop.org
> ---
Should have done that right from start when writing drm_vma_manager..
Reviewed-by: David Herrmann <dh.herrmann@gmail.com>
Thanks
David
> drivers/gpu/drm/drm_vma_manager.c | 43 ++++++++-------------------------------
> include/drm/drm_vma_manager.h | 2 --
> 2 files changed, 9 insertions(+), 36 deletions(-)
>
> diff --git a/drivers/gpu/drm/drm_vma_manager.c b/drivers/gpu/drm/drm_vma_manager.c
> index f306c8855978..0aef432679f9 100644
> --- a/drivers/gpu/drm/drm_vma_manager.c
> +++ b/drivers/gpu/drm/drm_vma_manager.c
> @@ -86,7 +86,6 @@ void drm_vma_offset_manager_init(struct drm_vma_offset_manager *mgr,
> unsigned long page_offset, unsigned long size)
> {
> rwlock_init(&mgr->vm_lock);
> - mgr->vm_addr_space_rb = RB_ROOT;
> drm_mm_init(&mgr->vm_addr_space_mm, page_offset, size);
> }
> EXPORT_SYMBOL(drm_vma_offset_manager_init);
> @@ -145,16 +144,16 @@ struct drm_vma_offset_node *drm_vma_offset_lookup_locked(struct drm_vma_offset_m
> unsigned long start,
> unsigned long pages)
> {
> - struct drm_vma_offset_node *node, *best;
> + struct drm_mm_node *node, *best;
> struct rb_node *iter;
> unsigned long offset;
>
> - iter = mgr->vm_addr_space_rb.rb_node;
> + iter = mgr->vm_addr_space_mm.interval_tree.rb_node;
> best = NULL;
>
> while (likely(iter)) {
> - node = rb_entry(iter, struct drm_vma_offset_node, vm_rb);
> - offset = node->vm_node.start;
> + node = rb_entry(iter, struct drm_mm_node, rb);
> + offset = node->start;
> if (start >= offset) {
> iter = iter->rb_right;
> best = node;
> @@ -167,38 +166,17 @@ struct drm_vma_offset_node *drm_vma_offset_lookup_locked(struct drm_vma_offset_m
>
> /* verify that the node spans the requested area */
> if (best) {
> - offset = best->vm_node.start + best->vm_node.size;
> + offset = best->start + best->size;
> if (offset < start + pages)
> best = NULL;
> }
>
> - return best;
> -}
> -EXPORT_SYMBOL(drm_vma_offset_lookup_locked);
> -
> -/* internal helper to link @node into the rb-tree */
> -static void _drm_vma_offset_add_rb(struct drm_vma_offset_manager *mgr,
> - struct drm_vma_offset_node *node)
> -{
> - struct rb_node **iter = &mgr->vm_addr_space_rb.rb_node;
> - struct rb_node *parent = NULL;
> - struct drm_vma_offset_node *iter_node;
> -
> - while (likely(*iter)) {
> - parent = *iter;
> - iter_node = rb_entry(*iter, struct drm_vma_offset_node, vm_rb);
> + if (!best)
> + return NULL;
>
> - if (node->vm_node.start < iter_node->vm_node.start)
> - iter = &(*iter)->rb_left;
> - else if (node->vm_node.start > iter_node->vm_node.start)
> - iter = &(*iter)->rb_right;
> - else
> - BUG();
> - }
> -
> - rb_link_node(&node->vm_rb, parent, iter);
> - rb_insert_color(&node->vm_rb, &mgr->vm_addr_space_rb);
> + return container_of(best, struct drm_vma_offset_node, vm_node);
> }
> +EXPORT_SYMBOL(drm_vma_offset_lookup_locked);
>
> /**
> * drm_vma_offset_add() - Add offset node to manager
> @@ -240,8 +218,6 @@ int drm_vma_offset_add(struct drm_vma_offset_manager *mgr,
> if (ret)
> goto out_unlock;
>
> - _drm_vma_offset_add_rb(mgr, node);
> -
> out_unlock:
> write_unlock(&mgr->vm_lock);
> return ret;
> @@ -265,7 +241,6 @@ void drm_vma_offset_remove(struct drm_vma_offset_manager *mgr,
> write_lock(&mgr->vm_lock);
>
> if (drm_mm_node_allocated(&node->vm_node)) {
> - rb_erase(&node->vm_rb, &mgr->vm_addr_space_rb);
> drm_mm_remove_node(&node->vm_node);
> memset(&node->vm_node, 0, sizeof(node->vm_node));
> }
> diff --git a/include/drm/drm_vma_manager.h b/include/drm/drm_vma_manager.h
> index 06ea8e077ec2..afba6fcac853 100644
> --- a/include/drm/drm_vma_manager.h
> +++ b/include/drm/drm_vma_manager.h
> @@ -40,13 +40,11 @@ struct drm_vma_offset_file {
> struct drm_vma_offset_node {
> rwlock_t vm_lock;
> struct drm_mm_node vm_node;
> - struct rb_node vm_rb;
> struct rb_root vm_files;
> };
>
> struct drm_vma_offset_manager {
> rwlock_t vm_lock;
> - struct rb_root vm_addr_space_rb;
> struct drm_mm vm_addr_space_mm;
> };
>
> --
> 2.8.1
>
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply [flat|nested] 12+ messages in thread
* Re: [PATCH 3/3] drm: Skip initialising the drm_mm_node->hole_stack
2016-08-03 15:04 ` [PATCH 3/3] drm: Skip initialising the drm_mm_node->hole_stack Chris Wilson
@ 2016-08-03 17:49 ` David Herrmann
0 siblings, 0 replies; 12+ messages in thread
From: David Herrmann @ 2016-08-03 17:49 UTC (permalink / raw)
To: Chris Wilson
Cc: Daniel Vetter, Intel Graphics Development,
dri-devel@lists.freedesktop.org
Hi Chris
On Wed, Aug 3, 2016 at 5:04 PM, Chris Wilson <chris@chris-wilson.co.uk> wrote:
> As we always add this to the drm_mm->hole_stack as our first operation,
> we do not need to initialise the list node.
>
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> Cc: David Herrmann <dh.herrmann@gmail.com>
> Cc: dri-devel@lists.freedesktop.org
> ---
> drivers/gpu/drm/drm_mm.c | 6 +-----
> 1 file changed, 1 insertion(+), 5 deletions(-)
"hole_follows" tells whether it is linked or not. So yeah, indeed, redundant.
Reviewed-by: David Herrmann <dh.herrmann@gmail.com>
Thanks
David
> diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
> index 5c188c56894b..3f56d4b0cdae 100644
> --- a/drivers/gpu/drm/drm_mm.c
> +++ b/drivers/gpu/drm/drm_mm.c
> @@ -217,7 +217,6 @@ static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
> node->color = color;
> node->allocated = 1;
>
> - INIT_LIST_HEAD(&node->hole_stack);
> list_add(&node->node_list, &hole_node->node_list);
>
> drm_mm_interval_tree_add_node(hole_node, node);
> @@ -276,14 +275,13 @@ int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node)
> node->mm = mm;
> node->allocated = 1;
>
> - INIT_LIST_HEAD(&node->hole_stack);
> list_add(&node->node_list, &hole->node_list);
>
> drm_mm_interval_tree_add_node(hole, node);
>
> if (node->start == hole_start) {
> hole->hole_follows = 0;
> - list_del_init(&hole->hole_stack);
> + list_del(&hole->hole_stack);
> }
>
> node->hole_follows = 0;
> @@ -379,7 +377,6 @@ static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
> node->color = color;
> node->allocated = 1;
>
> - INIT_LIST_HEAD(&node->hole_stack);
> list_add(&node->node_list, &hole_node->node_list);
>
> drm_mm_interval_tree_add_node(hole_node, node);
> @@ -833,7 +830,6 @@ void drm_mm_init(struct drm_mm * mm, u64 start, u64 size)
>
> /* Clever trick to avoid a special case in the free hole tracking. */
> INIT_LIST_HEAD(&mm->head_node.node_list);
> - INIT_LIST_HEAD(&mm->head_node.hole_stack);
> mm->head_node.hole_follows = 1;
> mm->head_node.scanned_block = 0;
> mm->head_node.scanned_prev_free = 0;
> --
> 2.8.1
>
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply [flat|nested] 12+ messages in thread
* Re: [PATCH 1/3] drm: Track drm_mm nodes with an interval tree
2016-08-03 15:04 [PATCH 1/3] drm: Track drm_mm nodes with an interval tree Chris Wilson
` (2 preceding siblings ...)
2016-08-03 15:29 ` ✗ Ro.CI.BAT: failure for series starting with [1/3] drm: Track drm_mm nodes with an interval tree Patchwork
@ 2016-08-03 17:55 ` David Herrmann
2016-08-03 18:26 ` [PATCH] drm: Declare that create drm_mm nodes with size 0 is illegal Chris Wilson
2016-08-03 20:33 ` ✗ Fi.CI.BAT: failure for series starting with drm: Declare that create drm_mm nodes with size 0 is illegal (rev2) Patchwork
2016-08-04 5:20 ` ✗ Ro.CI.BAT: " Patchwork
5 siblings, 1 reply; 12+ messages in thread
From: David Herrmann @ 2016-08-03 17:55 UTC (permalink / raw)
To: Chris Wilson
Cc: Daniel Vetter, Intel Graphics Development,
dri-devel@lists.freedesktop.org
Hey
On Wed, Aug 3, 2016 at 5:04 PM, Chris Wilson <chris@chris-wilson.co.uk> wrote:
> In addition to the last-in/first-out stack for accessing drm_mm nodes,
> we occasionally and in the future often want to find a drm_mm_node by an
> address. To do so efficiently we need to track the nodes in an interval
> tree - lookups for a particular address will then be O(lg(N)), where N
> is the number of nodes in the range manager as opposed to O(N).
> Insertion however gains an extra O(lg(N)) step for all nodes
> irrespective of whether the interval tree is in use. For future i915
> patches, eliminating the linear walk is a significant improvement.
>
> v2: Use generic interval-tree template for u64 and faster insertion.
>
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> Cc: David Herrmann <dh.herrmann@gmail.com>
> Cc: dri-devel@lists.freedesktop.org
> ---
> drivers/gpu/drm/drm_mm.c | 133 +++++++++++++++++++++++++++++++++++++++--------
> include/drm/drm_mm.h | 12 +++++
> 2 files changed, 122 insertions(+), 23 deletions(-)
>
> diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
> index cb39f45d6a16..5c188c56894b 100644
> --- a/drivers/gpu/drm/drm_mm.c
> +++ b/drivers/gpu/drm/drm_mm.c
> @@ -46,6 +46,7 @@
> #include <linux/slab.h>
> #include <linux/seq_file.h>
> #include <linux/export.h>
> +#include <linux/interval_tree_generic.h>
>
> /**
> * DOC: Overview
> @@ -103,6 +104,72 @@ static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_
> u64 end,
> enum drm_mm_search_flags flags);
>
> +#define START(node) ((node)->start)
> +#define LAST(node) ((node)->start + (node)->size - 1)
So this goes nuts with "size == 0". We do not explicitly prevent that
from happening, I think, but might be prevented in the upper layers.
Might wanna add WARN_ONs?
Otherwise, looks good to me:
Reviewed-by: David Herrmann <dh.herrmann@gmail.com>
Thanks
David
> +
> +INTERVAL_TREE_DEFINE(struct drm_mm_node, rb,
> + u64, __subtree_last,
> + START, LAST, static inline, drm_mm_interval_tree)
> +
> +struct drm_mm_node *
> +drm_mm_interval_first(struct drm_mm *mm, u64 start, u64 last)
> +{
> + return drm_mm_interval_tree_iter_first(&mm->interval_tree,
> + start, last);
> +}
> +EXPORT_SYMBOL(drm_mm_interval_first);
> +
> +struct drm_mm_node *
> +drm_mm_interval_next(struct drm_mm_node *node, u64 start, u64 last)
> +{
> + return drm_mm_interval_tree_iter_next(node, start, last);
> +}
> +EXPORT_SYMBOL(drm_mm_interval_next);
> +
> +static void drm_mm_interval_tree_add_node(struct drm_mm_node *hole_node,
> + struct drm_mm_node *node)
> +{
> + struct drm_mm *mm = hole_node->mm;
> + struct rb_node **link, *rb;
> + struct drm_mm_node *parent;
> +
> + node->__subtree_last = LAST(node);
> +
> + if (hole_node->allocated) {
> + rb = &hole_node->rb;
> + while (rb) {
> + parent = rb_entry(rb, struct drm_mm_node, rb);
> + if (parent->__subtree_last >= node->__subtree_last)
> + break;
> +
> + parent->__subtree_last = node->__subtree_last;
> + rb = rb_parent(rb);
> + }
> +
> + rb = &hole_node->rb;
> + link = &hole_node->rb.rb_right;
> + } else {
> + rb = NULL;
> + link = &mm->interval_tree.rb_node;
> + }
> +
> + while (*link) {
> + rb = *link;
> + parent = rb_entry(rb, struct drm_mm_node, rb);
> + if (parent->__subtree_last < node->__subtree_last)
> + parent->__subtree_last = node->__subtree_last;
> + if (node->start < parent->start)
> + link = &parent->rb.rb_left;
> + else
> + link = &parent->rb.rb_right;
> + }
> +
> + rb_link_node(&node->rb, rb, link);
> + rb_insert_augmented(&node->rb,
> + &mm->interval_tree,
> + &drm_mm_interval_tree_augment);
> +}
> +
> static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
> struct drm_mm_node *node,
> u64 size, unsigned alignment,
> @@ -153,6 +220,8 @@ static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
> INIT_LIST_HEAD(&node->hole_stack);
> list_add(&node->node_list, &hole_node->node_list);
>
> + drm_mm_interval_tree_add_node(hole_node, node);
> +
> BUG_ON(node->start + node->size > adj_end);
>
> node->hole_follows = 0;
> @@ -178,41 +247,52 @@ static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
> */
> int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node)
> {
> + u64 end = node->start + node->size;
> struct drm_mm_node *hole;
> - u64 end;
> - u64 hole_start;
> - u64 hole_end;
> -
> - BUG_ON(node == NULL);
> + u64 hole_start, hole_end;
>
> end = node->start + node->size;
>
> /* Find the relevant hole to add our node to */
> - drm_mm_for_each_hole(hole, mm, hole_start, hole_end) {
> - if (hole_start > node->start || hole_end < end)
> - continue;
> + hole = drm_mm_interval_tree_iter_first(&mm->interval_tree,
> + node->start, ~(u64)0);
> + if (hole) {
> + if (hole->start < end)
> + return -ENOSPC;
> + } else {
> + hole = list_entry(&mm->head_node.node_list,
> + typeof(*hole), node_list);
> + }
>
> - node->mm = mm;
> - node->allocated = 1;
> + hole = list_last_entry(&hole->node_list, typeof(*hole), node_list);
> + if (!hole->hole_follows)
> + return -ENOSPC;
>
> - INIT_LIST_HEAD(&node->hole_stack);
> - list_add(&node->node_list, &hole->node_list);
> + hole_start = __drm_mm_hole_node_start(hole);
> + hole_end = __drm_mm_hole_node_end(hole);
> + if (hole_start > node->start || hole_end < end)
> + return -ENOSPC;
>
> - if (node->start == hole_start) {
> - hole->hole_follows = 0;
> - list_del_init(&hole->hole_stack);
> - }
> + node->mm = mm;
> + node->allocated = 1;
>
> - node->hole_follows = 0;
> - if (end != hole_end) {
> - list_add(&node->hole_stack, &mm->hole_stack);
> - node->hole_follows = 1;
> - }
> + INIT_LIST_HEAD(&node->hole_stack);
> + list_add(&node->node_list, &hole->node_list);
>
> - return 0;
> + drm_mm_interval_tree_add_node(hole, node);
> +
> + if (node->start == hole_start) {
> + hole->hole_follows = 0;
> + list_del_init(&hole->hole_stack);
> + }
> +
> + node->hole_follows = 0;
> + if (end != hole_end) {
> + list_add(&node->hole_stack, &mm->hole_stack);
> + node->hole_follows = 1;
> }
>
> - return -ENOSPC;
> + return 0;
> }
> EXPORT_SYMBOL(drm_mm_reserve_node);
>
> @@ -302,6 +382,8 @@ static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
> INIT_LIST_HEAD(&node->hole_stack);
> list_add(&node->node_list, &hole_node->node_list);
>
> + drm_mm_interval_tree_add_node(hole_node, node);
> +
> BUG_ON(node->start < start);
> BUG_ON(node->start < adj_start);
> BUG_ON(node->start + node->size > adj_end);
> @@ -390,6 +472,7 @@ void drm_mm_remove_node(struct drm_mm_node *node)
> } else
> list_move(&prev_node->hole_stack, &mm->hole_stack);
>
> + drm_mm_interval_tree_remove(node, &mm->interval_tree);
> list_del(&node->node_list);
> node->allocated = 0;
> }
> @@ -516,11 +599,13 @@ void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new)
> {
> list_replace(&old->node_list, &new->node_list);
> list_replace(&old->hole_stack, &new->hole_stack);
> + rb_replace_node(&old->rb, &new->rb, &old->mm->interval_tree);
> new->hole_follows = old->hole_follows;
> new->mm = old->mm;
> new->start = old->start;
> new->size = old->size;
> new->color = old->color;
> + new->__subtree_last = old->__subtree_last;
>
> old->allocated = 0;
> new->allocated = 1;
> @@ -758,6 +843,8 @@ void drm_mm_init(struct drm_mm * mm, u64 start, u64 size)
> mm->head_node.size = start - mm->head_node.start;
> list_add_tail(&mm->head_node.hole_stack, &mm->hole_stack);
>
> + mm->interval_tree = RB_ROOT;
> +
> mm->color_adjust = NULL;
> }
> EXPORT_SYMBOL(drm_mm_init);
> diff --git a/include/drm/drm_mm.h b/include/drm/drm_mm.h
> index fc65118e5077..205ddcf6d55d 100644
> --- a/include/drm/drm_mm.h
> +++ b/include/drm/drm_mm.h
> @@ -37,6 +37,7 @@
> * Generic range manager structs
> */
> #include <linux/bug.h>
> +#include <linux/rbtree.h>
> #include <linux/kernel.h>
> #include <linux/list.h>
> #include <linux/spinlock.h>
> @@ -61,6 +62,7 @@ enum drm_mm_allocator_flags {
> struct drm_mm_node {
> struct list_head node_list;
> struct list_head hole_stack;
> + struct rb_node rb;
> unsigned hole_follows : 1;
> unsigned scanned_block : 1;
> unsigned scanned_prev_free : 1;
> @@ -70,6 +72,7 @@ struct drm_mm_node {
> unsigned long color;
> u64 start;
> u64 size;
> + u64 __subtree_last;
> struct drm_mm *mm;
> };
>
> @@ -79,6 +82,9 @@ struct drm_mm {
> /* head_node.node_list is the list of all memory nodes, ordered
> * according to the (increasing) start address of the memory node. */
> struct drm_mm_node head_node;
> + /* Keep an interval_tree for fast lookup of drm_mm_nodes by address. */
> + struct rb_root interval_tree;
> +
> unsigned int scan_check_range : 1;
> unsigned scan_alignment;
> unsigned long scan_color;
> @@ -295,6 +301,12 @@ void drm_mm_init(struct drm_mm *mm,
> void drm_mm_takedown(struct drm_mm *mm);
> bool drm_mm_clean(struct drm_mm *mm);
>
> +struct drm_mm_node *
> +drm_mm_interval_first(struct drm_mm *mm, u64 start, u64 last);
> +
> +struct drm_mm_node *
> +drm_mm_interval_next(struct drm_mm_node *node, u64 start, u64 last);
> +
> void drm_mm_init_scan(struct drm_mm *mm,
> u64 size,
> unsigned alignment,
> --
> 2.8.1
>
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply [flat|nested] 12+ messages in thread
* [PATCH] drm: Declare that create drm_mm nodes with size 0 is illegal
2016-08-03 17:55 ` [PATCH 1/3] " David Herrmann
@ 2016-08-03 18:26 ` Chris Wilson
2016-08-04 8:13 ` David Herrmann
2016-08-04 8:14 ` Daniel Vetter
0 siblings, 2 replies; 12+ messages in thread
From: Chris Wilson @ 2016-08-03 18:26 UTC (permalink / raw)
To: dri-devel; +Cc: intel-gfx
At a higher level, all objects are created with definite size i.e. 0 is
illegal. In forthcoming patches, this assumption is dependent upon in
the drm_mm range manager, i.e. trying to create a drm_mm node with size
0 will have undefined behaviour. Add a couple of WARNs upon creating the
drm_mm node to prevent later bugs.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
drivers/gpu/drm/drm_mm.c | 9 +++++++++
1 file changed, 9 insertions(+)
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index cb39f45d6a16..e8c15795386d 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -185,6 +185,9 @@ int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node)
BUG_ON(node == NULL);
+ if (WARN_ON(node->size == 0))
+ return -EINVAL;
+
end = node->start + node->size;
/* Find the relevant hole to add our node to */
@@ -239,6 +242,9 @@ int drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node,
{
struct drm_mm_node *hole_node;
+ if (WARN_ON(size == 0))
+ return -EINVAL;
+
hole_node = drm_mm_search_free_generic(mm, size, alignment,
color, sflags);
if (!hole_node)
@@ -340,6 +346,9 @@ int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, struct drm_mm_node *n
{
struct drm_mm_node *hole_node;
+ if (WARN_ON(size == 0))
+ return -EINVAL;
+
hole_node = drm_mm_search_free_in_range_generic(mm,
size, alignment, color,
start, end, sflags);
--
2.8.1
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply related [flat|nested] 12+ messages in thread
* ✗ Fi.CI.BAT: failure for series starting with drm: Declare that create drm_mm nodes with size 0 is illegal (rev2)
2016-08-03 15:04 [PATCH 1/3] drm: Track drm_mm nodes with an interval tree Chris Wilson
` (3 preceding siblings ...)
2016-08-03 17:55 ` [PATCH 1/3] " David Herrmann
@ 2016-08-03 20:33 ` Patchwork
2016-08-04 5:20 ` ✗ Ro.CI.BAT: " Patchwork
5 siblings, 0 replies; 12+ messages in thread
From: Patchwork @ 2016-08-03 20:33 UTC (permalink / raw)
To: Chris Wilson; +Cc: intel-gfx
== Series Details ==
Series: series starting with drm: Declare that create drm_mm nodes with size 0 is illegal (rev2)
URL : https://patchwork.freedesktop.org/series/10600/
State : failure
== Summary ==
CC net/ipv6/tcpv6_offload.o
LD drivers/net/ethernet/intel/igbvf/built-in.o
CC [M] drivers/net/ethernet/intel/igbvf/vf.o
CC [M] drivers/net/ethernet/intel/igbvf/mbx.o
CC net/ipv6/exthdrs_offload.o
CC [M] drivers/net/ethernet/intel/igbvf/ethtool.o
CC net/ipv6/inet6_hashtables.o
CC [M] drivers/net/ethernet/intel/igbvf/netdev.o
CC net/ipv6/mcast_snoop.o
LD net/ipv6/ipv6.o
CC [M] drivers/net/ethernet/intel/igb/igb_ethtool.o
CC [M] drivers/net/ethernet/intel/e1000/e1000_param.o
CC [M] drivers/net/ethernet/intel/igb/e1000_82575.o
CC [M] drivers/net/ethernet/intel/igb/e1000_mac.o
CC [M] drivers/net/ethernet/intel/igb/e1000_nvm.o
CC [M] drivers/net/ethernet/intel/igb/e1000_phy.o
CC [M] drivers/net/ethernet/intel/igb/e1000_mbx.o
CC [M] drivers/net/ethernet/intel/igb/igb_ptp.o
CC [M] drivers/net/ethernet/intel/igb/e1000_i210.o
CC [M] drivers/net/ethernet/intel/igb/igb_hwmon.o
LD net/ipv6/built-in.o
LD net/built-in.o
LD [M] drivers/net/ethernet/intel/e1000/e1000.o
LD [M] drivers/net/ethernet/intel/igbvf/igbvf.o
LD [M] drivers/net/ethernet/intel/e1000e/e1000e.o
LD [M] drivers/net/ethernet/intel/igb/igb.o
LD drivers/net/ethernet/built-in.o
LD drivers/net/built-in.o
Makefile:976: recipe for target 'drivers' failed
make: *** [drivers] Error 2
Full logs at /archive/deploy/logs/CI_Patchwork_build_2194
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply [flat|nested] 12+ messages in thread
* ✗ Ro.CI.BAT: failure for series starting with drm: Declare that create drm_mm nodes with size 0 is illegal (rev2)
2016-08-03 15:04 [PATCH 1/3] drm: Track drm_mm nodes with an interval tree Chris Wilson
` (4 preceding siblings ...)
2016-08-03 20:33 ` ✗ Fi.CI.BAT: failure for series starting with drm: Declare that create drm_mm nodes with size 0 is illegal (rev2) Patchwork
@ 2016-08-04 5:20 ` Patchwork
5 siblings, 0 replies; 12+ messages in thread
From: Patchwork @ 2016-08-04 5:20 UTC (permalink / raw)
To: Chris Wilson; +Cc: intel-gfx
== Series Details ==
Series: series starting with drm: Declare that create drm_mm nodes with size 0 is illegal (rev2)
URL : https://patchwork.freedesktop.org/series/10600/
State : failure
== Summary ==
Applying: drm: Declare that create drm_mm nodes with size 0 is illegal
Applying: drm: Convert drm_vma_manager to embedded interval-tree in drm_mm
Applying: drm: Skip initialising the drm_mm_node->hole_stack
Using index info to reconstruct a base tree...
M drivers/gpu/drm/drm_mm.c
Falling back to patching base and 3-way merge...
Auto-merging drivers/gpu/drm/drm_mm.c
CONFLICT (content): Merge conflict in drivers/gpu/drm/drm_mm.c
error: Failed to merge in the changes.
Patch failed at 0003 drm: Skip initialising the drm_mm_node->hole_stack
The copy of the patch that failed is found in: .git/rebase-apply/patch
When you have resolved this problem, run "git am --continue".
If you prefer to skip this patch, run "git am --skip" instead.
To restore the original branch and stop patching, run "git am --abort".
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply [flat|nested] 12+ messages in thread
* Re: [PATCH] drm: Declare that create drm_mm nodes with size 0 is illegal
2016-08-03 18:26 ` [PATCH] drm: Declare that create drm_mm nodes with size 0 is illegal Chris Wilson
@ 2016-08-04 8:13 ` David Herrmann
2016-08-04 8:14 ` Daniel Vetter
1 sibling, 0 replies; 12+ messages in thread
From: David Herrmann @ 2016-08-04 8:13 UTC (permalink / raw)
To: Chris Wilson; +Cc: Intel Graphics Development, dri-devel@lists.freedesktop.org
On Wed, Aug 3, 2016 at 8:26 PM, Chris Wilson <chris@chris-wilson.co.uk> wrote:
> At a higher level, all objects are created with definite size i.e. 0 is
> illegal. In forthcoming patches, this assumption is dependent upon in
> the drm_mm range manager, i.e. trying to create a drm_mm node with size
> 0 will have undefined behaviour. Add a couple of WARNs upon creating the
> drm_mm node to prevent later bugs.
>
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> ---
> drivers/gpu/drm/drm_mm.c | 9 +++++++++
> 1 file changed, 9 insertions(+)
Reviewed-by: David Herrmann <dh.herrmann@gmail.com>
Thanks
David
> diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
> index cb39f45d6a16..e8c15795386d 100644
> --- a/drivers/gpu/drm/drm_mm.c
> +++ b/drivers/gpu/drm/drm_mm.c
> @@ -185,6 +185,9 @@ int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node)
>
> BUG_ON(node == NULL);
>
> + if (WARN_ON(node->size == 0))
> + return -EINVAL;
> +
> end = node->start + node->size;
>
> /* Find the relevant hole to add our node to */
> @@ -239,6 +242,9 @@ int drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node,
> {
> struct drm_mm_node *hole_node;
>
> + if (WARN_ON(size == 0))
> + return -EINVAL;
> +
> hole_node = drm_mm_search_free_generic(mm, size, alignment,
> color, sflags);
> if (!hole_node)
> @@ -340,6 +346,9 @@ int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, struct drm_mm_node *n
> {
> struct drm_mm_node *hole_node;
>
> + if (WARN_ON(size == 0))
> + return -EINVAL;
> +
> hole_node = drm_mm_search_free_in_range_generic(mm,
> size, alignment, color,
> start, end, sflags);
> --
> 2.8.1
>
> _______________________________________________
> dri-devel mailing list
> dri-devel@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/dri-devel
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply [flat|nested] 12+ messages in thread
* Re: [PATCH] drm: Declare that create drm_mm nodes with size 0 is illegal
2016-08-03 18:26 ` [PATCH] drm: Declare that create drm_mm nodes with size 0 is illegal Chris Wilson
2016-08-04 8:13 ` David Herrmann
@ 2016-08-04 8:14 ` Daniel Vetter
1 sibling, 0 replies; 12+ messages in thread
From: Daniel Vetter @ 2016-08-04 8:14 UTC (permalink / raw)
To: Chris Wilson; +Cc: intel-gfx, dri-devel
On Wed, Aug 03, 2016 at 07:26:28PM +0100, Chris Wilson wrote:
> At a higher level, all objects are created with definite size i.e. 0 is
> illegal. In forthcoming patches, this assumption is dependent upon in
> the drm_mm range manager, i.e. trying to create a drm_mm node with size
> 0 will have undefined behaviour. Add a couple of WARNs upon creating the
> drm_mm node to prevent later bugs.
>
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Queued all up for -misc, will push out once -rc1 hits.
-Daniel
> ---
> drivers/gpu/drm/drm_mm.c | 9 +++++++++
> 1 file changed, 9 insertions(+)
>
> diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
> index cb39f45d6a16..e8c15795386d 100644
> --- a/drivers/gpu/drm/drm_mm.c
> +++ b/drivers/gpu/drm/drm_mm.c
> @@ -185,6 +185,9 @@ int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node)
>
> BUG_ON(node == NULL);
>
> + if (WARN_ON(node->size == 0))
> + return -EINVAL;
> +
> end = node->start + node->size;
>
> /* Find the relevant hole to add our node to */
> @@ -239,6 +242,9 @@ int drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node,
> {
> struct drm_mm_node *hole_node;
>
> + if (WARN_ON(size == 0))
> + return -EINVAL;
> +
> hole_node = drm_mm_search_free_generic(mm, size, alignment,
> color, sflags);
> if (!hole_node)
> @@ -340,6 +346,9 @@ int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, struct drm_mm_node *n
> {
> struct drm_mm_node *hole_node;
>
> + if (WARN_ON(size == 0))
> + return -EINVAL;
> +
> hole_node = drm_mm_search_free_in_range_generic(mm,
> size, alignment, color,
> start, end, sflags);
> --
> 2.8.1
>
> _______________________________________________
> Intel-gfx mailing list
> Intel-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/intel-gfx
--
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply [flat|nested] 12+ messages in thread
end of thread, other threads:[~2016-08-04 8:14 UTC | newest]
Thread overview: 12+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2016-08-03 15:04 [PATCH 1/3] drm: Track drm_mm nodes with an interval tree Chris Wilson
2016-08-03 15:04 ` [PATCH 2/3] drm: Convert drm_vma_manager to embedded interval-tree in drm_mm Chris Wilson
2016-08-03 17:45 ` David Herrmann
2016-08-03 15:04 ` [PATCH 3/3] drm: Skip initialising the drm_mm_node->hole_stack Chris Wilson
2016-08-03 17:49 ` David Herrmann
2016-08-03 15:29 ` ✗ Ro.CI.BAT: failure for series starting with [1/3] drm: Track drm_mm nodes with an interval tree Patchwork
2016-08-03 17:55 ` [PATCH 1/3] " David Herrmann
2016-08-03 18:26 ` [PATCH] drm: Declare that create drm_mm nodes with size 0 is illegal Chris Wilson
2016-08-04 8:13 ` David Herrmann
2016-08-04 8:14 ` Daniel Vetter
2016-08-03 20:33 ` ✗ Fi.CI.BAT: failure for series starting with drm: Declare that create drm_mm nodes with size 0 is illegal (rev2) Patchwork
2016-08-04 5:20 ` ✗ Ro.CI.BAT: " Patchwork
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox