From: Yu Zhang <yu.c.zhang@linux.intel.com>
To: xen-devel@lists.xen.org
Cc: kevin.tian@intel.com, keir@xen.org, ian.campbell@citrix.com,
stefano.stabellini@eu.citrix.com, andrew.cooper3@citrix.com,
ian.jackson@eu.citrix.com, Paul.Durrant@citrix.com,
zhiyuan.lv@intel.com, jbeulich@suse.com, wei.liu2@citrix.com
Subject: [PATCH v12 1/3] Refactor rangeset structure for better performance.
Date: Fri, 29 Jan 2016 18:45:12 +0800 [thread overview]
Message-ID: <1454064314-7799-2-git-send-email-yu.c.zhang@linux.intel.com> (raw)
In-Reply-To: <1454064314-7799-1-git-send-email-yu.c.zhang@linux.intel.com>
This patch refactors struct rangeset to base it on the red-black
tree structure, instead of on the current doubly linked list. By
now, ioreq leverages rangeset to keep track of the IO/memory
resources to be emulated. Yet when number of ranges inside one
ioreq server is very high, traversing a doubly linked list could
be time consuming. With this patch, the time complexity for
searching a rangeset can be improved from O(n) to O(log(n)).
Interfaces of rangeset still remain the same, and no new APIs
introduced.
Reviewed-by: Paul Durrant <paul.durrant@citrix.com>
Signed-off-by: Shuai Ruan <shuai.ruan@linux.intel.com>
Signed-off-by: Yu Zhang <yu.c.zhang@linux.intel.com>
---
xen/common/rangeset.c | 82 +++++++++++++++++++++++++++++++++++++--------------
1 file changed, 60 insertions(+), 22 deletions(-)
diff --git a/xen/common/rangeset.c b/xen/common/rangeset.c
index 6c6293c..d15d8d5 100644
--- a/xen/common/rangeset.c
+++ b/xen/common/rangeset.c
@@ -10,11 +10,12 @@
#include <xen/sched.h>
#include <xen/errno.h>
#include <xen/rangeset.h>
+#include <xen/rbtree.h>
#include <xsm/xsm.h>
/* An inclusive range [s,e] and pointer to next range in ascending order. */
struct range {
- struct list_head list;
+ struct rb_node node;
unsigned long s, e;
};
@@ -24,7 +25,7 @@ struct rangeset {
struct domain *domain;
/* Ordered list of ranges contained in this set, and protecting lock. */
- struct list_head range_list;
+ struct rb_root range_tree;
/* Number of ranges that can be allocated */
long nr_ranges;
@@ -45,41 +46,78 @@ struct rangeset {
static struct range *find_range(
struct rangeset *r, unsigned long s)
{
- struct range *x = NULL, *y;
+ struct rb_node *node;
+ struct range *x;
+ struct range *prev = NULL;
- list_for_each_entry ( y, &r->range_list, list )
+ node = r->range_tree.rb_node;
+ while ( node != NULL )
{
- if ( y->s > s )
- break;
- x = y;
+ x = container_of(node, struct range, node);
+ if ( (s >= x->s) && (s <= x->e) )
+ return x;
+ if ( s < x->s )
+ node = node->rb_left;
+ else
+ {
+ prev = x;
+ node = node->rb_right;
+ }
}
- return x;
+ return prev;
}
/* Return the lowest range in the set r, or NULL if r is empty. */
static struct range *first_range(
struct rangeset *r)
{
- if ( list_empty(&r->range_list) )
- return NULL;
- return list_entry(r->range_list.next, struct range, list);
+ struct rb_node *node;
+
+ node = rb_first(&r->range_tree);
+ if ( node != NULL )
+ return container_of(node, struct range, node);
+
+ return NULL;
}
/* Return range following x in ascending order, or NULL if x is the highest. */
static struct range *next_range(
struct rangeset *r, struct range *x)
{
- if ( x->list.next == &r->range_list )
- return NULL;
- return list_entry(x->list.next, struct range, list);
+ struct rb_node *node;
+
+ node = rb_next(&x->node);
+ if ( node != NULL )
+ return container_of(node, struct range, node);
+
+ return NULL;
}
/* Insert range y after range x in r. Insert as first range if x is NULL. */
static void insert_range(
struct rangeset *r, struct range *x, struct range *y)
{
- list_add(&y->list, (x != NULL) ? &x->list : &r->range_list);
+ struct rb_node **node;
+ struct rb_node *parent = NULL;
+
+ if ( x == NULL )
+ node = &r->range_tree.rb_node;
+ else
+ {
+ node = &x->node.rb_right;
+ parent = &x->node;
+ }
+
+ while ( *node != NULL )
+ {
+ parent = *node;
+ node = &parent->rb_left;
+ }
+
+ /* Add new node and rebalance the red-black tree. */
+ rb_link_node(&y->node, parent, node);
+ rb_insert_color(&y->node, &r->range_tree);
}
/* Remove a range from its list and free it. */
@@ -88,7 +126,7 @@ static void destroy_range(
{
r->nr_ranges++;
- list_del(&x->list);
+ rb_erase(&x->node, &r->range_tree);
xfree(x);
}
@@ -319,7 +357,7 @@ bool_t rangeset_contains_singleton(
bool_t rangeset_is_empty(
const struct rangeset *r)
{
- return ((r == NULL) || list_empty(&r->range_list));
+ return ((r == NULL) || RB_EMPTY_ROOT(&r->range_tree));
}
struct rangeset *rangeset_new(
@@ -332,7 +370,7 @@ struct rangeset *rangeset_new(
return NULL;
rwlock_init(&r->lock);
- INIT_LIST_HEAD(&r->range_list);
+ r->range_tree = RB_ROOT;
r->nr_ranges = -1;
BUG_ON(flags & ~RANGESETF_prettyprint_hex);
@@ -410,7 +448,7 @@ void rangeset_domain_destroy(
void rangeset_swap(struct rangeset *a, struct rangeset *b)
{
- LIST_HEAD(tmp);
+ struct rb_node *tmp;
if ( a < b )
{
@@ -423,9 +461,9 @@ void rangeset_swap(struct rangeset *a, struct rangeset *b)
write_lock(&a->lock);
}
- list_splice_init(&a->range_list, &tmp);
- list_splice_init(&b->range_list, &a->range_list);
- list_splice(&tmp, &b->range_list);
+ tmp = a->range_tree.rb_node;
+ a->range_tree.rb_node = b->range_tree.rb_node;
+ b->range_tree.rb_node = tmp;
write_unlock(&a->lock);
write_unlock(&b->lock);
--
1.9.1
next prev parent reply other threads:[~2016-01-29 10:45 UTC|newest]
Thread overview: 109+ messages / expand[flat|nested] mbox.gz Atom feed top
2016-01-29 10:45 [PATCH v12 0/3] Refactor ioreq server for better performance Yu Zhang
2016-01-29 10:45 ` Yu Zhang [this message]
2016-01-29 10:45 ` [PATCH v12 2/3] Differentiate IO/mem resources tracked by ioreq server Yu Zhang
2016-01-29 10:45 ` [PATCH v3 3/3] tools: introduce parameter max_wp_ram_ranges Yu Zhang
2016-01-29 16:33 ` Jan Beulich
2016-01-30 14:38 ` Yu, Zhang
2016-02-01 7:52 ` Jan Beulich
2016-02-01 12:02 ` Wei Liu
2016-02-01 12:15 ` Jan Beulich
2016-02-01 12:49 ` Wei Liu
2016-02-01 13:07 ` Jan Beulich
2016-02-01 15:14 ` Yu, Zhang
2016-02-01 16:16 ` Jan Beulich
2016-02-01 16:33 ` Yu, Zhang
2016-02-01 16:19 ` Yu, Zhang
2016-02-01 16:35 ` Jan Beulich
2016-02-01 16:37 ` Yu, Zhang
2016-02-01 17:05 ` Ian Jackson
2016-02-02 8:04 ` Yu, Zhang
2016-02-02 11:51 ` Wei Liu
2016-02-02 13:56 ` Yu, Zhang
2016-02-02 10:32 ` Jan Beulich
2016-02-02 10:56 ` Yu, Zhang
2016-02-02 11:12 ` Jan Beulich
2016-02-02 14:01 ` Yu, Zhang
2016-02-02 14:42 ` Jan Beulich
2016-02-02 15:00 ` Yu, Zhang
2016-02-02 15:21 ` Jan Beulich
2016-02-02 15:19 ` Yu, Zhang
2016-02-03 7:10 ` Yu, Zhang
2016-02-03 8:32 ` Jan Beulich
2016-02-03 12:20 ` Paul Durrant
2016-02-03 12:35 ` Jan Beulich
2016-02-03 12:50 ` Paul Durrant
2016-02-03 13:00 ` Jan Beulich
2016-02-03 13:07 ` Paul Durrant
2016-02-03 13:17 ` Jan Beulich
2016-02-03 13:18 ` Paul Durrant
2016-02-03 14:43 ` Ian Jackson
2016-02-03 15:10 ` Paul Durrant
2016-02-03 17:50 ` George Dunlap
2016-02-04 8:50 ` Yu, Zhang
2016-02-03 17:41 ` George Dunlap
2016-02-03 18:21 ` George Dunlap
2016-02-03 18:26 ` George Dunlap
2016-02-03 18:39 ` Andrew Cooper
2016-02-03 19:12 ` George Dunlap
2016-02-04 8:51 ` Yu, Zhang
2016-02-04 10:49 ` George Dunlap
2016-02-04 11:08 ` Ian Campbell
2016-02-04 11:19 ` Ian Campbell
2016-02-04 8:50 ` Yu, Zhang
2016-02-04 9:28 ` Paul Durrant
2016-02-04 9:38 ` Yu, Zhang
2016-02-04 9:49 ` Paul Durrant
2016-02-04 10:34 ` Jan Beulich
2016-02-04 13:33 ` Ian Jackson
2016-02-04 13:47 ` Paul Durrant
2016-02-04 14:12 ` Jan Beulich
2016-02-04 14:25 ` Paul Durrant
2016-02-04 15:06 ` Ian Jackson
2016-02-04 15:51 ` Paul Durrant
2016-02-05 3:47 ` Tian, Kevin
2016-02-05 3:35 ` Tian, Kevin
2016-02-04 14:08 ` Jan Beulich
2016-02-04 17:12 ` George Dunlap
2016-02-05 4:18 ` Tian, Kevin
2016-02-05 8:41 ` Yu, Zhang
2016-02-05 8:32 ` Jan Beulich
2016-02-05 9:24 ` Paul Durrant
2016-02-05 10:41 ` Jan Beulich
2016-02-05 11:14 ` George Dunlap
2016-02-05 11:24 ` Paul Durrant
2016-02-16 7:22 ` Tian, Kevin
2016-02-16 8:50 ` Paul Durrant
2016-02-16 10:33 ` Jan Beulich
2016-02-16 11:11 ` Paul Durrant
2016-02-17 3:18 ` Tian, Kevin
2016-02-17 8:58 ` Paul Durrant
2016-02-17 9:32 ` Jan Beulich
2016-02-17 9:58 ` Tian, Kevin
2016-02-17 10:03 ` Paul Durrant
2016-02-17 10:22 ` Jan Beulich
2016-02-17 10:24 ` Paul Durrant
2016-02-17 10:25 ` Tian, Kevin
2016-02-17 11:01 ` George Dunlap
2016-02-17 11:12 ` Paul Durrant
2016-02-22 15:56 ` George Dunlap
2016-02-22 16:02 ` Paul Durrant
2016-02-22 16:45 ` George Dunlap
2016-02-22 17:01 ` Paul Durrant
2016-02-22 17:23 ` George Dunlap
2016-02-22 17:34 ` Paul Durrant
2016-02-05 8:41 ` Yu, Zhang
2016-02-04 11:06 ` George Dunlap
2016-02-05 2:01 ` Zhiyuan Lv
2016-02-05 3:44 ` Tian, Kevin
2016-02-05 8:38 ` Jan Beulich
2016-02-05 11:05 ` George Dunlap
2016-02-05 15:13 ` Zhiyuan Lv
2016-02-05 20:14 ` George Dunlap
2016-02-05 8:40 ` Yu, Zhang
2016-02-04 10:06 ` Ian Campbell
2016-02-05 3:31 ` Tian, Kevin
2016-02-02 11:31 ` Andrew Cooper
2016-02-02 11:43 ` Jan Beulich
2016-02-02 14:20 ` Andrew Cooper
2016-02-01 11:57 ` Wei Liu
2016-02-01 15:15 ` Yu, Zhang
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1454064314-7799-2-git-send-email-yu.c.zhang@linux.intel.com \
--to=yu.c.zhang@linux.intel.com \
--cc=Paul.Durrant@citrix.com \
--cc=andrew.cooper3@citrix.com \
--cc=ian.campbell@citrix.com \
--cc=ian.jackson@eu.citrix.com \
--cc=jbeulich@suse.com \
--cc=keir@xen.org \
--cc=kevin.tian@intel.com \
--cc=stefano.stabellini@eu.citrix.com \
--cc=wei.liu2@citrix.com \
--cc=xen-devel@lists.xen.org \
--cc=zhiyuan.lv@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).