* [PATCH 01/14] tmem: cleanup: drop pageshift from struct tmem_pool
2013-12-18 6:52 [PATCH 00/14] xen: new patches for tmem cleanup Bob Liu
@ 2013-12-18 6:52 ` Bob Liu
2013-12-18 6:52 ` [PATCH 02/14] tmem: refactor function do_tmem_op() Bob Liu
` (12 subsequent siblings)
13 siblings, 0 replies; 16+ messages in thread
From: Bob Liu @ 2013-12-18 6:52 UTC (permalink / raw)
To: xen-devel; +Cc: james.harper, keir, ian.campbell, andrew.cooper3, JBeulich
pool->pageshift is used to calculate the pagesize, it can be dropped because
the pagesize is always the same as PAGE_SIZE.
This patch remove pageshift from struct tmem_pool and use POOL_PAGESHIFT and
PAGE_SIZE instead.
Signed-off-by: Bob Liu <bob.liu@oracle.com>
---
xen/common/tmem.c | 9 +++------
1 file changed, 3 insertions(+), 6 deletions(-)
diff --git a/xen/common/tmem.c b/xen/common/tmem.c
index d9e912b..aa8e6f5 100644
--- a/xen/common/tmem.c
+++ b/xen/common/tmem.c
@@ -88,6 +88,7 @@ struct share_list {
struct client *client;
};
+#define POOL_PAGESHIFT (PAGE_SHIFT - 12)
#define OBJ_HASH_BUCKETS 256 /* must be power of two */
#define OBJ_HASH_BUCKETS_MASK (OBJ_HASH_BUCKETS-1)
@@ -95,7 +96,6 @@ struct tmem_pool {
bool_t shared;
bool_t persistent;
bool_t is_dying;
- int pageshift; /* 0 == 2**12 */
struct list_head pool_list;
struct client *client;
uint64_t uuid[2]; /* 0 for private, non-zero for shared */
@@ -1042,7 +1042,6 @@ static struct tmem_pool * pool_alloc(void)
pool->objnode_count = pool->objnode_count_max = 0;
atomic_set(&pool->pgp_count,0);
pool->obj_count = 0; pool->shared_count = 0;
- pool->pageshift = PAGE_SHIFT - 12;
pool->good_puts = pool->puts = pool->dup_puts_flushed = 0;
pool->dup_puts_replaced = pool->no_mem_puts = 0;
pool->found_gets = pool->gets = 0;
@@ -2355,7 +2354,7 @@ static int tmemc_save_subop(int cli_id, uint32_t pool_id,
break;
rc = (pool->persistent ? TMEM_POOL_PERSIST : 0) |
(pool->shared ? TMEM_POOL_SHARED : 0) |
- (pool->pageshift << TMEM_POOL_PAGESIZE_SHIFT) |
+ (POOL_PAGESHIFT << TMEM_POOL_PAGESIZE_SHIFT) |
(TMEM_SPEC_VERSION << TMEM_POOL_VERSION_SHIFT);
break;
case TMEMC_SAVE_GET_POOL_NPAGES:
@@ -2395,13 +2394,11 @@ static int tmemc_save_get_next_page(int cli_id, uint32_t pool_id,
struct oid oid;
int ret = 0;
struct tmem_handle h;
- unsigned int pagesize;
if ( pool == NULL || !is_persistent(pool) )
return -1;
- pagesize = 1 << (pool->pageshift + 12);
- if ( bufsize < pagesize + sizeof(struct tmem_handle) )
+ if ( bufsize < PAGE_SIZE + sizeof(struct tmem_handle) )
return -ENOMEM;
spin_lock(&pers_lists_spinlock);
--
1.7.10.4
^ permalink raw reply related [flat|nested] 16+ messages in thread* [PATCH 02/14] tmem: refactor function do_tmem_op()
2013-12-18 6:52 [PATCH 00/14] xen: new patches for tmem cleanup Bob Liu
2013-12-18 6:52 ` [PATCH 01/14] tmem: cleanup: drop pageshift from struct tmem_pool Bob Liu
@ 2013-12-18 6:52 ` Bob Liu
2013-12-18 6:52 ` [PATCH 03/14] tmem: cleanup: drop unneeded client/pool initialization Bob Liu
` (11 subsequent siblings)
13 siblings, 0 replies; 16+ messages in thread
From: Bob Liu @ 2013-12-18 6:52 UTC (permalink / raw)
To: xen-devel; +Cc: james.harper, keir, ian.campbell, andrew.cooper3, JBeulich
Refactor function do_tmem_op() to make it more readable.
Signed-off-by: Bob Liu <bob.liu@oracle.com>
---
xen/common/tmem.c | 172 +++++++++++++++++++++++++----------------------------
1 file changed, 81 insertions(+), 91 deletions(-)
diff --git a/xen/common/tmem.c b/xen/common/tmem.c
index aa8e6f5..362e774 100644
--- a/xen/common/tmem.c
+++ b/xen/common/tmem.c
@@ -2599,7 +2599,6 @@ long do_tmem_op(tmem_cli_op_t uops)
bool_t succ_get = 0, succ_put = 0;
bool_t non_succ_get = 0, non_succ_put = 0;
bool_t flush = 0, flush_obj = 0;
- bool_t write_lock_set = 0, read_lock_set = 0;
if ( !tmem_initialized )
return -ENODEV;
@@ -2624,114 +2623,105 @@ long do_tmem_op(tmem_cli_op_t uops)
goto simple_error;
}
+ /* Acquire wirte lock for all command at first */
+ write_lock(&tmem_rwlock);
+
if ( op.cmd == TMEM_CONTROL )
{
- write_lock(&tmem_rwlock);
- write_lock_set = 1;
rc = do_tmem_control(&op);
- goto out;
- } else if ( op.cmd == TMEM_AUTH ) {
- write_lock(&tmem_rwlock);
- write_lock_set = 1;
+ }
+ else if ( op.cmd == TMEM_AUTH )
+ {
rc = tmemc_shared_pool_auth(op.u.creat.arg1,op.u.creat.uuid[0],
op.u.creat.uuid[1],op.u.creat.flags);
- goto out;
- } else if ( op.cmd == TMEM_RESTORE_NEW ) {
- write_lock(&tmem_rwlock);
- write_lock_set = 1;
+ }
+ else if ( op.cmd == TMEM_RESTORE_NEW )
+ {
rc = do_tmem_new_pool(op.u.creat.arg1, op.pool_id, op.u.creat.flags,
op.u.creat.uuid[0], op.u.creat.uuid[1]);
- goto out;
}
-
- /* create per-client tmem structure dynamically on first use by client */
- if ( client == NULL )
- {
- write_lock(&tmem_rwlock);
- write_lock_set = 1;
- if ( (client = client_create(current->domain->domain_id)) == NULL )
+ else {
+ /*
+ * For other commands, create per-client tmem structure dynamically on
+ * first use by client.
+ */
+ if ( client == NULL )
{
- tmem_client_err("tmem: can't create tmem structure for %s\n",
- tmem_client_str);
- rc = -ENOMEM;
- goto out;
+ if ( (client = client_create(current->domain->domain_id)) == NULL )
+ {
+ tmem_client_err("tmem: can't create tmem structure for %s\n",
+ tmem_client_str);
+ rc = -ENOMEM;
+ goto out;
+ }
}
- }
- if ( op.cmd == TMEM_NEW_POOL || op.cmd == TMEM_DESTROY_POOL )
- {
- if ( !write_lock_set )
+ if ( op.cmd == TMEM_NEW_POOL || op.cmd == TMEM_DESTROY_POOL )
{
- write_lock(&tmem_rwlock);
- write_lock_set = 1;
+ if ( op.cmd == TMEM_NEW_POOL )
+ rc = do_tmem_new_pool(TMEM_CLI_ID_NULL, 0, op.u.creat.flags,
+ op.u.creat.uuid[0], op.u.creat.uuid[1]);
+ else
+ rc = do_tmem_destroy_pool(op.pool_id);
}
- }
- else
- {
- if ( !write_lock_set )
+ else
{
+ if ( ((uint32_t)op.pool_id >= MAX_POOLS_PER_DOMAIN) ||
+ ((pool = client->pools[op.pool_id]) == NULL) )
+ {
+ tmem_client_err("tmem: operation requested on uncreated pool\n");
+ rc = -ENODEV;
+ goto out;
+ }
+ /* Commands only need read lock */
+ write_unlock(&tmem_rwlock);
read_lock(&tmem_rwlock);
- read_lock_set = 1;
- }
- if ( ((uint32_t)op.pool_id >= MAX_POOLS_PER_DOMAIN) ||
- ((pool = client->pools[op.pool_id]) == NULL) )
- {
- tmem_client_err("tmem: operation requested on uncreated pool\n");
- rc = -ENODEV;
- goto out;
- }
- }
- oidp = (struct oid *)&op.u.gen.oid[0];
- switch ( op.cmd )
- {
- case TMEM_NEW_POOL:
- rc = do_tmem_new_pool(TMEM_CLI_ID_NULL, 0, op.u.creat.flags,
- op.u.creat.uuid[0], op.u.creat.uuid[1]);
- break;
- case TMEM_PUT_PAGE:
- if (tmem_ensure_avail_pages())
- rc = do_tmem_put(pool, oidp, op.u.gen.index, op.u.gen.cmfn,
- tmem_cli_buf_null);
- else
- rc = -ENOMEM;
- if (rc == 1) succ_put = 1;
- else non_succ_put = 1;
- break;
- case TMEM_GET_PAGE:
- rc = do_tmem_get(pool, oidp, op.u.gen.index, op.u.gen.cmfn,
- tmem_cli_buf_null);
- if (rc == 1) succ_get = 1;
- else non_succ_get = 1;
- break;
- case TMEM_FLUSH_PAGE:
- flush = 1;
- rc = do_tmem_flush_page(pool, oidp, op.u.gen.index);
- break;
- case TMEM_FLUSH_OBJECT:
- rc = do_tmem_flush_object(pool, oidp);
- flush_obj = 1;
- break;
- case TMEM_DESTROY_POOL:
- flush = 1;
- rc = do_tmem_destroy_pool(op.pool_id);
- break;
- default:
- tmem_client_warn("tmem: op %d not implemented\n", op.cmd);
- rc = -ENOSYS;
- break;
+ oidp = (struct oid *)&op.u.gen.oid[0];
+ switch ( op.cmd )
+ {
+ case TMEM_NEW_POOL:
+ rc = do_tmem_new_pool(TMEM_CLI_ID_NULL, 0, op.u.creat.flags,
+ op.u.creat.uuid[0], op.u.creat.uuid[1]);
+ break;
+ case TMEM_PUT_PAGE:
+ if (tmem_ensure_avail_pages())
+ rc = do_tmem_put(pool, oidp, op.u.gen.index, op.u.gen.cmfn,
+ tmem_cli_buf_null);
+ else
+ rc = -ENOMEM;
+ if (rc == 1) succ_put = 1;
+ else non_succ_put = 1;
+ break;
+ case TMEM_GET_PAGE:
+ rc = do_tmem_get(pool, oidp, op.u.gen.index, op.u.gen.cmfn,
+ tmem_cli_buf_null);
+ if (rc == 1) succ_get = 1;
+ else non_succ_get = 1;
+ break;
+ case TMEM_FLUSH_PAGE:
+ flush = 1;
+ rc = do_tmem_flush_page(pool, oidp, op.u.gen.index);
+ break;
+ case TMEM_FLUSH_OBJECT:
+ rc = do_tmem_flush_object(pool, oidp);
+ flush_obj = 1;
+ break;
+ case TMEM_DESTROY_POOL:
+ flush = 1;
+ rc = do_tmem_destroy_pool(op.pool_id);
+ break;
+ default:
+ tmem_client_warn("tmem: op %d not implemented\n", op.cmd);
+ rc = -ENOSYS;
+ break;
+ }
+ read_unlock(&tmem_rwlock);
+ return rc;
+ }
}
-
out:
- if ( rc < 0 )
- errored_tmem_ops++;
- if ( write_lock_set )
- write_unlock(&tmem_rwlock);
- else if ( read_lock_set )
- read_unlock(&tmem_rwlock);
- else
- ASSERT(0);
-
+ write_unlock(&tmem_rwlock);
return rc;
}
--
1.7.10.4
^ permalink raw reply related [flat|nested] 16+ messages in thread* [PATCH 03/14] tmem: cleanup: drop unneeded client/pool initialization
2013-12-18 6:52 [PATCH 00/14] xen: new patches for tmem cleanup Bob Liu
2013-12-18 6:52 ` [PATCH 01/14] tmem: cleanup: drop pageshift from struct tmem_pool Bob Liu
2013-12-18 6:52 ` [PATCH 02/14] tmem: refactor function do_tmem_op() Bob Liu
@ 2013-12-18 6:52 ` Bob Liu
2013-12-18 6:52 ` [PATCH 04/14] tmem: bugfix in obj allocate path Bob Liu
` (10 subsequent siblings)
13 siblings, 0 replies; 16+ messages in thread
From: Bob Liu @ 2013-12-18 6:52 UTC (permalink / raw)
To: xen-devel; +Cc: james.harper, keir, ian.campbell, andrew.cooper3, JBeulich
Using xzalloc to alloc client and pool, so some extra initialization are
unneeded.
Signed-off-by: Bob Liu <bob.liu@oracle.com>
---
xen/common/tmem.c | 19 +------------------
1 file changed, 1 insertion(+), 18 deletions(-)
diff --git a/xen/common/tmem.c b/xen/common/tmem.c
index 362e774..6ed91b4 100644
--- a/xen/common/tmem.c
+++ b/xen/common/tmem.c
@@ -1030,24 +1030,13 @@ static struct tmem_pool * pool_alloc(void)
struct tmem_pool *pool;
int i;
- if ( (pool = xmalloc(struct tmem_pool)) == NULL )
+ if ( (pool = xzalloc(struct tmem_pool)) == NULL )
return NULL;
for (i = 0; i < OBJ_HASH_BUCKETS; i++)
pool->obj_rb_root[i] = RB_ROOT;
INIT_LIST_HEAD(&pool->pool_list);
INIT_LIST_HEAD(&pool->persistent_page_list);
- pool->cur_pgp = NULL;
rwlock_init(&pool->pool_rwlock);
- pool->pgp_count_max = pool->obj_count_max = 0;
- pool->objnode_count = pool->objnode_count_max = 0;
- atomic_set(&pool->pgp_count,0);
- pool->obj_count = 0; pool->shared_count = 0;
- pool->good_puts = pool->puts = pool->dup_puts_flushed = 0;
- pool->dup_puts_replaced = pool->no_mem_puts = 0;
- pool->found_gets = pool->gets = 0;
- pool->flushs_found = pool->flushs = 0;
- pool->flush_objs_found = pool->flush_objs = 0;
- pool->is_dying = 0;
return pool;
}
@@ -1216,15 +1205,9 @@ static struct client *client_create(domid_t cli_id)
for ( i = 0; i < MAX_GLOBAL_SHARED_POOLS; i++)
client->shared_auth_uuid[i][0] =
client->shared_auth_uuid[i][1] = -1L;
- client->frozen = 0; client->live_migrating = 0;
- client->weight = 0; client->cap = 0;
list_add_tail(&client->client_list, &global_client_list);
INIT_LIST_HEAD(&client->ephemeral_page_list);
INIT_LIST_HEAD(&client->persistent_invalidated_list);
- client->cur_pgp = NULL;
- client->eph_count = client->eph_count_max = 0;
- client->total_cycles = 0; client->succ_pers_puts = 0;
- client->succ_eph_gets = 0; client->succ_pers_gets = 0;
tmem_client_info("ok\n");
return client;
--
1.7.10.4
^ permalink raw reply related [flat|nested] 16+ messages in thread* [PATCH 04/14] tmem: bugfix in obj allocate path
2013-12-18 6:52 [PATCH 00/14] xen: new patches for tmem cleanup Bob Liu
` (2 preceding siblings ...)
2013-12-18 6:52 ` [PATCH 03/14] tmem: cleanup: drop unneeded client/pool initialization Bob Liu
@ 2013-12-18 6:52 ` Bob Liu
2013-12-18 6:52 ` [PATCH 05/14] tmem: cleanup: remove unneed parameter for pgp_delist() Bob Liu
` (9 subsequent siblings)
13 siblings, 0 replies; 16+ messages in thread
From: Bob Liu @ 2013-12-18 6:52 UTC (permalink / raw)
To: xen-devel; +Cc: james.harper, keir, ian.campbell, andrew.cooper3, JBeulich
There is a bug in the obj allocate path. If there are parallel callers allocated
obj and inserted to pool->obj_rb_root, an unexpected obj will be returned by
obj_new().
This patch fix it by checking the return value of obj_rb_insert().
Also rename obj_new to obj_alloc and move the insert/spinlock code out of the
alloc path.
Signed-off-by: Bob Liu <bob.liu@oracle.com>
---
xen/common/tmem.c | 25 +++++++++++++++++--------
1 file changed, 17 insertions(+), 8 deletions(-)
diff --git a/xen/common/tmem.c b/xen/common/tmem.c
index 6ed91b4..61dfd62 100644
--- a/xen/common/tmem.c
+++ b/xen/common/tmem.c
@@ -959,12 +959,11 @@ static int obj_rb_insert(struct rb_root *root, struct tmem_object_root *obj)
* allocate, initialize, and insert an tmem_object_root
* (should be called only if find failed)
*/
-static struct tmem_object_root * obj_new(struct tmem_pool *pool, struct oid *oidp)
+static struct tmem_object_root * obj_alloc(struct tmem_pool *pool, struct oid *oidp)
{
struct tmem_object_root *obj;
ASSERT(pool != NULL);
- ASSERT_WRITELOCK(&pool->pool_rwlock);
if ( (obj = tmem_malloc(sizeof(struct tmem_object_root), pool)) == NULL )
return NULL;
pool->obj_count++;
@@ -979,9 +978,6 @@ static struct tmem_object_root * obj_new(struct tmem_pool *pool, struct oid *oid
obj->objnode_count = 0;
obj->pgp_count = 0;
obj->last_client = TMEM_CLI_ID_NULL;
- spin_lock(&obj->obj_spinlock);
- obj_rb_insert(&pool->obj_rb_root[oid_hash(oidp)], obj);
- ASSERT_SPINLOCK(&obj->obj_spinlock);
return obj;
}
@@ -1552,10 +1548,13 @@ static int do_tmem_put(struct tmem_pool *pool,
ASSERT(pool != NULL);
client = pool->client;
+ ASSERT(client != NULL);
ret = client->frozen ? -EFROZEN : -ENOMEM;
pool->puts++;
+
+refind:
/* does page already exist (dup)? if so, handle specially */
- if ( (obj = obj_find(pool,oidp)) != NULL )
+ if ( (obj = obj_find(pool, oidp)) != NULL )
{
if ((pgp = pgp_lookup_in_obj(obj, index)) != NULL)
{
@@ -1573,12 +1572,22 @@ static int do_tmem_put(struct tmem_pool *pool,
/* no puts allowed into a frozen pool (except dup puts) */
if ( client->frozen )
return ret;
+ if ( (obj = obj_alloc(pool, oidp)) == NULL )
+ return -ENOMEM;
+
write_lock(&pool->pool_rwlock);
- if ( (obj = obj_new(pool,oidp)) == NULL )
+ /*
+ * Parallel callers may already allocated obj and inserted to obj_rb_root
+ * before us.
+ */
+ if (!obj_rb_insert(&pool->obj_rb_root[oid_hash(oidp)], obj))
{
+ tmem_free(obj, pool);
write_unlock(&pool->pool_rwlock);
- return -ENOMEM;
+ goto refind;
}
+
+ spin_lock(&obj->obj_spinlock);
newobj = 1;
write_unlock(&pool->pool_rwlock);
}
--
1.7.10.4
^ permalink raw reply related [flat|nested] 16+ messages in thread* [PATCH 05/14] tmem: cleanup: remove unneed parameter for pgp_delist()
2013-12-18 6:52 [PATCH 00/14] xen: new patches for tmem cleanup Bob Liu
` (3 preceding siblings ...)
2013-12-18 6:52 ` [PATCH 04/14] tmem: bugfix in obj allocate path Bob Liu
@ 2013-12-18 6:52 ` Bob Liu
2013-12-18 6:52 ` [PATCH 06/14] tmem: cleanup: remove unneed parameter for pgp_free() Bob Liu
` (8 subsequent siblings)
13 siblings, 0 replies; 16+ messages in thread
From: Bob Liu @ 2013-12-18 6:52 UTC (permalink / raw)
To: xen-devel; +Cc: james.harper, keir, ian.campbell, andrew.cooper3, JBeulich
tmem_evict() is the only caller of pgp_delist() with eph_lock holded.
This patch embeded the delist code into tmem_evict() directly, so that the
eph_lock parameter can be dropped.
And by this change, the eph list lock can also be released a bit earier.
---
xen/common/tmem.c | 55 +++++++++++++++++++++++++++++------------------------
1 file changed, 30 insertions(+), 25 deletions(-)
diff --git a/xen/common/tmem.c b/xen/common/tmem.c
index 61dfd62..8a6ee84 100644
--- a/xen/common/tmem.c
+++ b/xen/common/tmem.c
@@ -694,7 +694,7 @@ static void pgp_free_from_inv_list(struct client *client, struct tmem_page_descr
}
/* remove the page from appropriate lists but not from parent object */
-static void pgp_delist(struct tmem_page_descriptor *pgp, bool_t no_eph_lock)
+static void pgp_delist(struct tmem_page_descriptor *pgp)
{
struct client *client;
@@ -705,8 +705,7 @@ static void pgp_delist(struct tmem_page_descriptor *pgp, bool_t no_eph_lock)
ASSERT(client != NULL);
if ( !is_persistent(pgp->us.obj->pool) )
{
- if ( !no_eph_lock )
- spin_lock(&eph_lists_spinlock);
+ spin_lock(&eph_lists_spinlock);
if ( !list_empty(&pgp->us.client_eph_pages) )
client->eph_count--;
ASSERT(client->eph_count >= 0);
@@ -715,8 +714,7 @@ static void pgp_delist(struct tmem_page_descriptor *pgp, bool_t no_eph_lock)
global_eph_count--;
ASSERT(global_eph_count >= 0);
list_del_init(&pgp->global_eph_pages);
- if ( !no_eph_lock )
- spin_unlock(&eph_lists_spinlock);
+ spin_unlock(&eph_lists_spinlock);
} else {
if ( client->live_migrating )
{
@@ -735,7 +733,7 @@ static void pgp_delist(struct tmem_page_descriptor *pgp, bool_t no_eph_lock)
}
/* remove page from lists (but not from parent object) and free it */
-static void pgp_delete(struct tmem_page_descriptor *pgp, bool_t no_eph_lock)
+static void pgp_delete(struct tmem_page_descriptor *pgp)
{
uint64_t life;
@@ -744,7 +742,7 @@ static void pgp_delete(struct tmem_page_descriptor *pgp, bool_t no_eph_lock)
ASSERT(pgp->us.obj->pool != NULL);
life = get_cycles() - pgp->timestamp;
pgp->us.obj->pool->sum_life_cycles += life;
- pgp_delist(pgp, no_eph_lock);
+ pgp_delist(pgp);
pgp_free(pgp,1);
}
@@ -754,7 +752,7 @@ static void pgp_destroy(void *v)
struct tmem_page_descriptor *pgp = (struct tmem_page_descriptor *)v;
ASSERT_SPINLOCK(&pgp->us.obj->obj_spinlock);
- pgp_delist(pgp,0);
+ pgp_delist(pgp);
ASSERT(pgp->us.obj != NULL);
pgp->us.obj->pgp_count--;
ASSERT(pgp->us.obj->pgp_count >= 0);
@@ -1303,7 +1301,7 @@ obj_unlock:
static int tmem_evict(void)
{
struct client *client = current->domain->tmem_client;
- struct tmem_page_descriptor *pgp = NULL, *pgp2, *pgp_del;
+ struct tmem_page_descriptor *pgp = NULL, *pgp_del;
struct tmem_object_root *obj;
struct tmem_pool *pool;
int ret = 0;
@@ -1314,21 +1312,28 @@ static int tmem_evict(void)
if ( (client != NULL) && client_over_quota(client) &&
!list_empty(&client->ephemeral_page_list) )
{
- list_for_each_entry_safe(pgp,pgp2,&client->ephemeral_page_list,us.client_eph_pages)
- if ( tmem_try_to_evict_pgp(pgp,&hold_pool_rwlock) )
+ list_for_each_entry(pgp, &client->ephemeral_page_list, us.client_eph_pages)
+ if ( tmem_try_to_evict_pgp(pgp, &hold_pool_rwlock) )
goto found;
- } else if ( list_empty(&global_ephemeral_page_list) ) {
- goto out;
- } else {
- list_for_each_entry_safe(pgp,pgp2,&global_ephemeral_page_list,global_eph_pages)
- if ( tmem_try_to_evict_pgp(pgp,&hold_pool_rwlock) )
+ }
+ else if ( !list_empty(&global_ephemeral_page_list) )
+ {
+ list_for_each_entry(pgp, &global_ephemeral_page_list, global_eph_pages)
+ if ( tmem_try_to_evict_pgp(pgp, &hold_pool_rwlock) )
goto found;
}
-
- ret = 0;
+ spin_unlock(&eph_lists_spinlock);
goto out;
found:
+ list_del_init(&pgp->us.client_eph_pages);
+ client->eph_count--;
+ list_del_init(&pgp->global_eph_pages);
+ global_eph_count--;
+ ASSERT(global_eph_count >= 0);
+ ASSERT(client->eph_count >= 0);
+ spin_unlock(&eph_lists_spinlock);
+
ASSERT(pgp != NULL);
obj = pgp->us.obj;
ASSERT(obj != NULL);
@@ -1343,7 +1348,9 @@ found:
ASSERT(pgp->pcd->pgp_ref_count == 1 || pgp->eviction_attempted);
pcd_disassociate(pgp,pool,1);
}
- pgp_delete(pgp,1);
+
+ /* pgp already delist, so call pgp_free directly */
+ pgp_free(pgp, 1);
if ( obj->pgp_count == 0 )
{
ASSERT_WRITELOCK(&pool->pool_rwlock);
@@ -1355,9 +1362,7 @@ found:
write_unlock(&pool->pool_rwlock);
evicted_pgs++;
ret = 1;
-
out:
- spin_unlock(&eph_lists_spinlock);
return ret;
}
@@ -1524,7 +1529,7 @@ failed_dup:
cleanup:
pgpfound = pgp_delete_from_obj(obj, pgp->index);
ASSERT(pgpfound == pgp);
- pgp_delete(pgpfound,0);
+ pgp_delete(pgpfound);
if ( obj->pgp_count == 0 )
{
write_lock(&pool->pool_rwlock);
@@ -1684,7 +1689,7 @@ del_pgp_from_obj:
pgp_delete_from_obj(obj, pgp->index);
free_pgp:
- pgp_delete(pgp, 0);
+ pgp_delete(pgp);
unlock_obj:
if ( newobj )
{
@@ -1743,7 +1748,7 @@ static int do_tmem_get(struct tmem_pool *pool, struct oid *oidp, uint32_t index,
{
if ( !is_shared(pool) )
{
- pgp_delete(pgp,0);
+ pgp_delete(pgp);
if ( obj->pgp_count == 0 )
{
write_lock(&pool->pool_rwlock);
@@ -1793,7 +1798,7 @@ static int do_tmem_flush_page(struct tmem_pool *pool, struct oid *oidp, uint32_t
spin_unlock(&obj->obj_spinlock);
goto out;
}
- pgp_delete(pgp,0);
+ pgp_delete(pgp);
if ( obj->pgp_count == 0 )
{
write_lock(&pool->pool_rwlock);
--
1.7.10.4
^ permalink raw reply related [flat|nested] 16+ messages in thread* [PATCH 06/14] tmem: cleanup: remove unneed parameter for pgp_free()
2013-12-18 6:52 [PATCH 00/14] xen: new patches for tmem cleanup Bob Liu
` (4 preceding siblings ...)
2013-12-18 6:52 ` [PATCH 05/14] tmem: cleanup: remove unneed parameter for pgp_delist() Bob Liu
@ 2013-12-18 6:52 ` Bob Liu
2013-12-18 6:52 ` [PATCH 07/14] tmem: cleanup the pgp free path Bob Liu
` (7 subsequent siblings)
13 siblings, 0 replies; 16+ messages in thread
From: Bob Liu @ 2013-12-18 6:52 UTC (permalink / raw)
To: xen-devel; +Cc: james.harper, keir, ian.campbell, andrew.cooper3, JBeulich
The only difference of the from_delete parameter in pgp_free() is one line
ASSERT(), this patch remove it to make code more clean.
Signed-off-by: Bob Liu <bob.liu@oracle.com>
---
xen/common/tmem.c | 14 +++++++-------
1 file changed, 7 insertions(+), 7 deletions(-)
diff --git a/xen/common/tmem.c b/xen/common/tmem.c
index 8a6ee84..9cfbca3 100644
--- a/xen/common/tmem.c
+++ b/xen/common/tmem.c
@@ -654,15 +654,14 @@ static void pgp_free_data(struct tmem_page_descriptor *pgp, struct tmem_pool *po
pgp->size = -1;
}
-static void pgp_free(struct tmem_page_descriptor *pgp, int from_delete)
+static void pgp_free(struct tmem_page_descriptor *pgp)
{
struct tmem_pool *pool = NULL;
ASSERT(pgp->us.obj != NULL);
- ASSERT(pgp->us.obj->pool->client != NULL);
- if ( from_delete )
- ASSERT(pgp_lookup_in_obj(pgp->us.obj,pgp->index) == NULL);
ASSERT(pgp->us.obj->pool != NULL);
+ ASSERT(pgp->us.obj->pool->client != NULL);
+
pool = pgp->us.obj->pool;
if ( !is_persistent(pool) )
{
@@ -743,7 +742,8 @@ static void pgp_delete(struct tmem_page_descriptor *pgp)
life = get_cycles() - pgp->timestamp;
pgp->us.obj->pool->sum_life_cycles += life;
pgp_delist(pgp);
- pgp_free(pgp,1);
+ ASSERT(pgp_lookup_in_obj(pgp->us.obj,pgp->index) == NULL);
+ pgp_free(pgp);
}
/* called only indirectly by radix_tree_destroy */
@@ -756,7 +756,7 @@ static void pgp_destroy(void *v)
ASSERT(pgp->us.obj != NULL);
pgp->us.obj->pgp_count--;
ASSERT(pgp->us.obj->pgp_count >= 0);
- pgp_free(pgp,0);
+ pgp_free(pgp);
}
static int pgp_add_to_obj(struct tmem_object_root *obj, uint32_t index, struct tmem_page_descriptor *pgp)
@@ -1350,7 +1350,7 @@ found:
}
/* pgp already delist, so call pgp_free directly */
- pgp_free(pgp, 1);
+ pgp_free(pgp);
if ( obj->pgp_count == 0 )
{
ASSERT_WRITELOCK(&pool->pool_rwlock);
--
1.7.10.4
^ permalink raw reply related [flat|nested] 16+ messages in thread* [PATCH 07/14] tmem: cleanup the pgp free path
2013-12-18 6:52 [PATCH 00/14] xen: new patches for tmem cleanup Bob Liu
` (5 preceding siblings ...)
2013-12-18 6:52 ` [PATCH 06/14] tmem: cleanup: remove unneed parameter for pgp_free() Bob Liu
@ 2013-12-18 6:52 ` Bob Liu
2013-12-18 6:52 ` [PATCH 08/14] tmem: drop oneline function client_freeze() Bob Liu
` (6 subsequent siblings)
13 siblings, 0 replies; 16+ messages in thread
From: Bob Liu @ 2013-12-18 6:52 UTC (permalink / raw)
To: xen-devel; +Cc: james.harper, keir, ian.campbell, andrew.cooper3, JBeulich
There are several functions related with pgp free, but their relationship are
not clear for understanding. This patch made some cleanup.
Signed-off-by: Bob Liu <bob.liu@oracle.com>
---
xen/common/tmem.c | 66 +++++++++++++++++++++++------------------------------
1 file changed, 28 insertions(+), 38 deletions(-)
diff --git a/xen/common/tmem.c b/xen/common/tmem.c
index 9cfbca3..91096eb 100644
--- a/xen/common/tmem.c
+++ b/xen/common/tmem.c
@@ -654,6 +654,13 @@ static void pgp_free_data(struct tmem_page_descriptor *pgp, struct tmem_pool *po
pgp->size = -1;
}
+static void __pgp_free(struct tmem_page_descriptor *pgp, struct tmem_pool *pool)
+{
+ pgp->us.obj = NULL;
+ pgp->index = -1;
+ tmem_free(pgp, pool);
+}
+
static void pgp_free(struct tmem_page_descriptor *pgp)
{
struct tmem_pool *pool = NULL;
@@ -678,30 +685,22 @@ static void pgp_free(struct tmem_page_descriptor *pgp)
pgp->pool_id = pool->pool_id;
return;
}
- pgp->us.obj = NULL;
- pgp->index = -1;
- tmem_free(pgp, pool);
-}
-
-static void pgp_free_from_inv_list(struct client *client, struct tmem_page_descriptor *pgp)
-{
- struct tmem_pool *pool = client->pools[pgp->pool_id];
-
- pgp->us.obj = NULL;
- pgp->index = -1;
- tmem_free(pgp, pool);
+ __pgp_free(pgp, pool);
}
-/* remove the page from appropriate lists but not from parent object */
-static void pgp_delist(struct tmem_page_descriptor *pgp)
+/* remove pgp from global/pool/client lists and free it */
+static void pgp_delist_free(struct tmem_page_descriptor *pgp)
{
struct client *client;
+ uint64_t life;
ASSERT(pgp != NULL);
ASSERT(pgp->us.obj != NULL);
ASSERT(pgp->us.obj->pool != NULL);
client = pgp->us.obj->pool->client;
ASSERT(client != NULL);
+
+ /* Delist pgp */
if ( !is_persistent(pgp->us.obj->pool) )
{
spin_lock(&eph_lists_spinlock);
@@ -714,7 +713,9 @@ static void pgp_delist(struct tmem_page_descriptor *pgp)
ASSERT(global_eph_count >= 0);
list_del_init(&pgp->global_eph_pages);
spin_unlock(&eph_lists_spinlock);
- } else {
+ }
+ else
+ {
if ( client->live_migrating )
{
spin_lock(&pers_lists_spinlock);
@@ -723,26 +724,18 @@ static void pgp_delist(struct tmem_page_descriptor *pgp)
if ( pgp != pgp->us.obj->pool->cur_pgp )
list_del_init(&pgp->us.pool_pers_pages);
spin_unlock(&pers_lists_spinlock);
- } else {
+ }
+ else
+ {
spin_lock(&pers_lists_spinlock);
list_del_init(&pgp->us.pool_pers_pages);
spin_unlock(&pers_lists_spinlock);
}
}
-}
-
-/* remove page from lists (but not from parent object) and free it */
-static void pgp_delete(struct tmem_page_descriptor *pgp)
-{
- uint64_t life;
-
- ASSERT(pgp != NULL);
- ASSERT(pgp->us.obj != NULL);
- ASSERT(pgp->us.obj->pool != NULL);
life = get_cycles() - pgp->timestamp;
pgp->us.obj->pool->sum_life_cycles += life;
- pgp_delist(pgp);
- ASSERT(pgp_lookup_in_obj(pgp->us.obj,pgp->index) == NULL);
+
+ /* free pgp */
pgp_free(pgp);
}
@@ -751,12 +744,8 @@ static void pgp_destroy(void *v)
{
struct tmem_page_descriptor *pgp = (struct tmem_page_descriptor *)v;
- ASSERT_SPINLOCK(&pgp->us.obj->obj_spinlock);
- pgp_delist(pgp);
- ASSERT(pgp->us.obj != NULL);
pgp->us.obj->pgp_count--;
- ASSERT(pgp->us.obj->pgp_count >= 0);
- pgp_free(pgp);
+ pgp_delist_free(pgp);
}
static int pgp_add_to_obj(struct tmem_object_root *obj, uint32_t index, struct tmem_page_descriptor *pgp)
@@ -1326,6 +1315,7 @@ static int tmem_evict(void)
goto out;
found:
+ /* Delist */
list_del_init(&pgp->us.client_eph_pages);
client->eph_count--;
list_del_init(&pgp->global_eph_pages);
@@ -1529,7 +1519,7 @@ failed_dup:
cleanup:
pgpfound = pgp_delete_from_obj(obj, pgp->index);
ASSERT(pgpfound == pgp);
- pgp_delete(pgpfound);
+ pgp_delist_free(pgpfound);
if ( obj->pgp_count == 0 )
{
write_lock(&pool->pool_rwlock);
@@ -1689,7 +1679,7 @@ del_pgp_from_obj:
pgp_delete_from_obj(obj, pgp->index);
free_pgp:
- pgp_delete(pgp);
+ pgp_free(pgp);
unlock_obj:
if ( newobj )
{
@@ -1748,7 +1738,7 @@ static int do_tmem_get(struct tmem_pool *pool, struct oid *oidp, uint32_t index,
{
if ( !is_shared(pool) )
{
- pgp_delete(pgp);
+ pgp_delist_free(pgp);
if ( obj->pgp_count == 0 )
{
write_lock(&pool->pool_rwlock);
@@ -1798,7 +1788,7 @@ static int do_tmem_flush_page(struct tmem_pool *pool, struct oid *oidp, uint32_t
spin_unlock(&obj->obj_spinlock);
goto out;
}
- pgp_delete(pgp);
+ pgp_delist_free(pgp);
if ( obj->pgp_count == 0 )
{
write_lock(&pool->pool_rwlock);
@@ -2373,7 +2363,7 @@ static int tmemc_save_subop(int cli_id, uint32_t pool_id,
if ( !list_empty(&client->persistent_invalidated_list) )
list_for_each_entry_safe(pgp,pgp2,
&client->persistent_invalidated_list, client_inv_pages)
- pgp_free_from_inv_list(client,pgp);
+ __pgp_free(pgp, client->pools[pgp->pool_id]);
client->frozen = client->was_frozen;
rc = 0;
break;
--
1.7.10.4
^ permalink raw reply related [flat|nested] 16+ messages in thread* [PATCH 08/14] tmem: drop oneline function client_freeze()
2013-12-18 6:52 [PATCH 00/14] xen: new patches for tmem cleanup Bob Liu
` (6 preceding siblings ...)
2013-12-18 6:52 ` [PATCH 07/14] tmem: cleanup the pgp free path Bob Liu
@ 2013-12-18 6:52 ` Bob Liu
2013-12-18 6:52 ` [PATCH 09/14] tmem: remove unneeded parameters for obj destroy Bob Liu
` (5 subsequent siblings)
13 siblings, 0 replies; 16+ messages in thread
From: Bob Liu @ 2013-12-18 6:52 UTC (permalink / raw)
To: xen-devel; +Cc: james.harper, keir, ian.campbell, andrew.cooper3, JBeulich
Function client_freeze() only set client->frozen = freeze, the caller can do
this work directly.
Signed-off-by: Bob Liu <bob.liu@oracle.com>
---
xen/common/tmem.c | 9 ++-------
1 file changed, 2 insertions(+), 7 deletions(-)
diff --git a/xen/common/tmem.c b/xen/common/tmem.c
index 91096eb..70c80ab 100644
--- a/xen/common/tmem.c
+++ b/xen/common/tmem.c
@@ -1236,11 +1236,6 @@ static bool_t client_over_quota(struct client *client)
((total*100L) / client->weight) );
}
-static void client_freeze(struct client *client, int freeze)
-{
- client->frozen = freeze;
-}
-
/************ MEMORY REVOCATION ROUTINES *******************************/
static bool_t tmem_try_to_evict_pgp(struct tmem_page_descriptor *pgp, bool_t *hold_pool_rwlock)
@@ -1988,14 +1983,14 @@ static int tmemc_freeze_pools(domid_t cli_id, int arg)
if ( cli_id == TMEM_CLI_ID_NULL )
{
list_for_each_entry(client,&global_client_list,client_list)
- client_freeze(client,freeze);
+ client->frozen = freeze;
tmem_client_info("tmem: all pools %s for all %ss\n", s, tmem_client_str);
}
else
{
if ( (client = tmem_client_from_cli_id(cli_id)) == NULL)
return -1;
- client_freeze(client,freeze);
+ client->frozen = freeze;
tmem_client_info("tmem: all pools %s for %s=%d\n",
s, tmem_cli_id_str, cli_id);
}
--
1.7.10.4
^ permalink raw reply related [flat|nested] 16+ messages in thread* [PATCH 09/14] tmem: remove unneeded parameters for obj destroy
2013-12-18 6:52 [PATCH 00/14] xen: new patches for tmem cleanup Bob Liu
` (7 preceding siblings ...)
2013-12-18 6:52 ` [PATCH 08/14] tmem: drop oneline function client_freeze() Bob Liu
@ 2013-12-18 6:52 ` Bob Liu
2013-12-18 6:52 ` [PATCH 10/14] tmem: cleanup: drop global_pool_list Bob Liu
` (4 subsequent siblings)
13 siblings, 0 replies; 16+ messages in thread
From: Bob Liu @ 2013-12-18 6:52 UTC (permalink / raw)
To: xen-devel; +Cc: james.harper, keir, ian.campbell, andrew.cooper3, JBeulich
Parameters selective and no_rebalance are unneeded during obj destory, this
patch remove them.
Signed-off-by: Bob Liu <bob.liu@oracle.com>
---
xen/common/tmem.c | 35 +++++++++++++++--------------------
1 file changed, 15 insertions(+), 20 deletions(-)
diff --git a/xen/common/tmem.c b/xen/common/tmem.c
index 70c80ab..0febae1 100644
--- a/xen/common/tmem.c
+++ b/xen/common/tmem.c
@@ -885,7 +885,7 @@ restart_find:
}
/* free an object that has no more pgps in it */
-static void obj_free(struct tmem_object_root *obj, int no_rebalance)
+static void obj_free(struct tmem_object_root *obj)
{
struct tmem_pool *pool;
struct oid old_oid;
@@ -908,9 +908,7 @@ static void obj_free(struct tmem_object_root *obj, int no_rebalance)
oid_set_invalid(&obj->oid);
obj->last_client = TMEM_CLI_ID_NULL;
atomic_dec_and_assert(global_obj_count);
- /* use no_rebalance only if all objects are being destroyed anyway */
- if ( !no_rebalance )
- rb_erase(&obj->rb_tree_node,&pool->obj_rb_root[oid_hash(&old_oid)]);
+ rb_erase(&obj->rb_tree_node, &pool->obj_rb_root[oid_hash(&old_oid)]);
spin_unlock(&obj->obj_spinlock);
tmem_free(obj, pool);
}
@@ -969,15 +967,15 @@ static struct tmem_object_root * obj_alloc(struct tmem_pool *pool, struct oid *o
}
/* free an object after destroying any pgps in it */
-static void obj_destroy(struct tmem_object_root *obj, int no_rebalance)
+static void obj_destroy(struct tmem_object_root *obj)
{
ASSERT_WRITELOCK(&obj->pool->pool_rwlock);
radix_tree_destroy(&obj->tree_root, pgp_destroy);
- obj_free(obj,no_rebalance);
+ obj_free(obj);
}
/* destroys all objs in a pool, or only if obj->last_client matches cli_id */
-static void pool_destroy_objs(struct tmem_pool *pool, bool_t selective, domid_t cli_id)
+static void pool_destroy_objs(struct tmem_pool *pool, domid_t cli_id)
{
struct rb_node *node;
struct tmem_object_root *obj;
@@ -993,11 +991,8 @@ static void pool_destroy_objs(struct tmem_pool *pool, bool_t selective, domid_t
obj = container_of(node, struct tmem_object_root, rb_tree_node);
spin_lock(&obj->obj_spinlock);
node = rb_next(node);
- if ( !selective )
- /* FIXME: should be obj,1 but walking/erasing rbtree is racy */
- obj_destroy(obj,0);
- else if ( obj->last_client == cli_id )
- obj_destroy(obj,0);
+ if ( obj->last_client == cli_id )
+ obj_destroy(obj);
else
spin_unlock(&obj->obj_spinlock);
}
@@ -1088,7 +1083,7 @@ static int shared_pool_quit(struct tmem_pool *pool, domid_t cli_id)
ASSERT(pool->client != NULL);
ASSERT_WRITELOCK(&tmem_rwlock);
- pool_destroy_objs(pool,1,cli_id);
+ pool_destroy_objs(pool, cli_id);
list_for_each_entry(sl,&pool->share_list, share_list)
{
if (sl->client->cli_id != cli_id)
@@ -1134,7 +1129,7 @@ static void pool_flush(struct tmem_pool *pool, domid_t cli_id, bool_t destroy)
destroy?"destroy":"flush", tmem_client_str);
return;
}
- pool_destroy_objs(pool,0,TMEM_CLI_ID_NULL);
+ pool_destroy_objs(pool, TMEM_CLI_ID_NULL);
if ( destroy )
{
pool->client->pools[pool->pool_id] = NULL;
@@ -1339,7 +1334,7 @@ found:
if ( obj->pgp_count == 0 )
{
ASSERT_WRITELOCK(&pool->pool_rwlock);
- obj_free(obj,0);
+ obj_free(obj);
}
else
spin_unlock(&obj->obj_spinlock);
@@ -1518,7 +1513,7 @@ cleanup:
if ( obj->pgp_count == 0 )
{
write_lock(&pool->pool_rwlock);
- obj_free(obj,0);
+ obj_free(obj);
write_unlock(&pool->pool_rwlock);
} else {
spin_unlock(&obj->obj_spinlock);
@@ -1679,7 +1674,7 @@ unlock_obj:
if ( newobj )
{
write_lock(&pool->pool_rwlock);
- obj_free(obj, 0);
+ obj_free(obj);
write_unlock(&pool->pool_rwlock);
}
else
@@ -1737,7 +1732,7 @@ static int do_tmem_get(struct tmem_pool *pool, struct oid *oidp, uint32_t index,
if ( obj->pgp_count == 0 )
{
write_lock(&pool->pool_rwlock);
- obj_free(obj,0);
+ obj_free(obj);
obj = NULL;
write_unlock(&pool->pool_rwlock);
}
@@ -1787,7 +1782,7 @@ static int do_tmem_flush_page(struct tmem_pool *pool, struct oid *oidp, uint32_t
if ( obj->pgp_count == 0 )
{
write_lock(&pool->pool_rwlock);
- obj_free(obj,0);
+ obj_free(obj);
write_unlock(&pool->pool_rwlock);
} else {
spin_unlock(&obj->obj_spinlock);
@@ -1810,7 +1805,7 @@ static int do_tmem_flush_object(struct tmem_pool *pool, struct oid *oidp)
if ( obj == NULL )
goto out;
write_lock(&pool->pool_rwlock);
- obj_destroy(obj,0);
+ obj_destroy(obj);
pool->flush_objs_found++;
write_unlock(&pool->pool_rwlock);
--
1.7.10.4
^ permalink raw reply related [flat|nested] 16+ messages in thread* [PATCH 10/14] tmem: cleanup: drop global_pool_list
2013-12-18 6:52 [PATCH 00/14] xen: new patches for tmem cleanup Bob Liu
` (8 preceding siblings ...)
2013-12-18 6:52 ` [PATCH 09/14] tmem: remove unneeded parameters for obj destroy Bob Liu
@ 2013-12-18 6:52 ` Bob Liu
2013-12-18 6:52 ` [PATCH 11/14] tmem: fix the return value of tmemc_set_var() Bob Liu
` (3 subsequent siblings)
13 siblings, 0 replies; 16+ messages in thread
From: Bob Liu @ 2013-12-18 6:52 UTC (permalink / raw)
To: xen-devel; +Cc: james.harper, keir, ian.campbell, andrew.cooper3, JBeulich
No need to maintain a global pool list, nobody use it.
Signed-off-by: Bob Liu <bob.liu@oracle.com>
---
xen/common/tmem.c | 5 -----
1 file changed, 5 deletions(-)
diff --git a/xen/common/tmem.c b/xen/common/tmem.c
index 0febae1..72c3838 100644
--- a/xen/common/tmem.c
+++ b/xen/common/tmem.c
@@ -96,7 +96,6 @@ struct tmem_pool {
bool_t shared;
bool_t persistent;
bool_t is_dying;
- struct list_head pool_list;
struct client *client;
uint64_t uuid[2]; /* 0 for private, non-zero for shared */
uint32_t pool_id;
@@ -199,7 +198,6 @@ rwlock_t pcd_tree_rwlocks[256]; /* poor man's concurrency for now */
static LIST_HEAD(global_ephemeral_page_list); /* all pages in ephemeral pools */
static LIST_HEAD(global_client_list);
-static LIST_HEAD(global_pool_list);
static struct tmem_pool *global_shared_pools[MAX_GLOBAL_SHARED_POOLS] = { 0 };
static bool_t global_shared_auth = 0;
@@ -1012,7 +1010,6 @@ static struct tmem_pool * pool_alloc(void)
return NULL;
for (i = 0; i < OBJ_HASH_BUCKETS; i++)
pool->obj_rb_root[i] = RB_ROOT;
- INIT_LIST_HEAD(&pool->pool_list);
INIT_LIST_HEAD(&pool->persistent_page_list);
rwlock_init(&pool->pool_rwlock);
return pool;
@@ -1021,7 +1018,6 @@ static struct tmem_pool * pool_alloc(void)
static void pool_free(struct tmem_pool *pool)
{
pool->client = NULL;
- list_del(&pool->pool_list);
xfree(pool);
}
@@ -1952,7 +1948,6 @@ static int do_tmem_new_pool(domid_t this_cli_id,
}
}
client->pools[d_poolid] = pool;
- list_add_tail(&pool->pool_list, &global_pool_list);
pool->pool_id = d_poolid;
pool->persistent = persistent;
pool->uuid[0] = uuid_lo; pool->uuid[1] = uuid_hi;
--
1.7.10.4
^ permalink raw reply related [flat|nested] 16+ messages in thread* [PATCH 11/14] tmem: fix the return value of tmemc_set_var()
2013-12-18 6:52 [PATCH 00/14] xen: new patches for tmem cleanup Bob Liu
` (9 preceding siblings ...)
2013-12-18 6:52 ` [PATCH 10/14] tmem: cleanup: drop global_pool_list Bob Liu
@ 2013-12-18 6:52 ` Bob Liu
2013-12-18 6:52 ` [PATCH 12/14] tmem: cleanup: refactor function tmemc_shared_pool_auth() Bob Liu
` (2 subsequent siblings)
13 siblings, 0 replies; 16+ messages in thread
From: Bob Liu @ 2013-12-18 6:52 UTC (permalink / raw)
To: xen-devel; +Cc: james.harper, keir, ian.campbell, andrew.cooper3, JBeulich
tmemc_set_var() calls tmemc_set_var_one() but without taking its return value,
this patch fix this issue.
Also rename tmemc_set_var_one() to __tmemc_set_var().
Signed-off-by: Bob Liu <bob.liu@oracle.com>
---
xen/common/tmem.c | 21 +++++++++++++++------
1 file changed, 15 insertions(+), 6 deletions(-)
diff --git a/xen/common/tmem.c b/xen/common/tmem.c
index 72c3838..eea3cbb 100644
--- a/xen/common/tmem.c
+++ b/xen/common/tmem.c
@@ -2176,7 +2176,7 @@ static int tmemc_list(domid_t cli_id, tmem_cli_va_param_t buf, uint32_t len,
return 0;
}
-static int tmemc_set_var_one(struct client *client, uint32_t subop, uint32_t arg1)
+static int __tmemc_set_var(struct client *client, uint32_t subop, uint32_t arg1)
{
domid_t cli_id = client->cli_id;
uint32_t old_weight;
@@ -2218,15 +2218,24 @@ static int tmemc_set_var_one(struct client *client, uint32_t subop, uint32_t arg
static int tmemc_set_var(domid_t cli_id, uint32_t subop, uint32_t arg1)
{
struct client *client;
+ int ret = -1;
if ( cli_id == TMEM_CLI_ID_NULL )
+ {
list_for_each_entry(client,&global_client_list,client_list)
- tmemc_set_var_one(client, subop, arg1);
- else if ( (client = tmem_client_from_cli_id(cli_id)) == NULL)
- return -1;
+ {
+ ret = __tmemc_set_var(client, subop, arg1);
+ if (ret)
+ break;
+ }
+ }
else
- tmemc_set_var_one(client, subop, arg1);
- return 0;
+ {
+ client = tmem_client_from_cli_id(cli_id);
+ if ( client )
+ ret = __tmemc_set_var(client, subop, arg1);
+ }
+ return ret;
}
static int tmemc_shared_pool_auth(domid_t cli_id, uint64_t uuid_lo,
--
1.7.10.4
^ permalink raw reply related [flat|nested] 16+ messages in thread* [PATCH 12/14] tmem: cleanup: refactor function tmemc_shared_pool_auth()
2013-12-18 6:52 [PATCH 00/14] xen: new patches for tmem cleanup Bob Liu
` (10 preceding siblings ...)
2013-12-18 6:52 ` [PATCH 11/14] tmem: fix the return value of tmemc_set_var() Bob Liu
@ 2013-12-18 6:52 ` Bob Liu
2013-12-18 6:52 ` [PATCH 13/14] tmem: reorg the shared pool allocate path Bob Liu
2013-12-18 6:52 ` [PATCH 14/14] tmem: remove useless parameter from client and pool flush Bob Liu
13 siblings, 0 replies; 16+ messages in thread
From: Bob Liu @ 2013-12-18 6:52 UTC (permalink / raw)
To: xen-devel; +Cc: james.harper, keir, ian.campbell, andrew.cooper3, JBeulich
Make function tmemc_shared_pool_auth() more readable.
Signed-off-by: Bob Liu <bob.liu@oracle.com>
---
xen/common/tmem.c | 38 +++++++++++++++++++++++++-------------
1 file changed, 25 insertions(+), 13 deletions(-)
diff --git a/xen/common/tmem.c b/xen/common/tmem.c
index eea3cbb..37f4e8f 100644
--- a/xen/common/tmem.c
+++ b/xen/common/tmem.c
@@ -2252,27 +2252,39 @@ static int tmemc_shared_pool_auth(domid_t cli_id, uint64_t uuid_lo,
client = tmem_client_from_cli_id(cli_id);
if ( client == NULL )
return -EINVAL;
+
for ( i = 0; i < MAX_GLOBAL_SHARED_POOLS; i++)
{
- if ( (client->shared_auth_uuid[i][0] == uuid_lo) &&
- (client->shared_auth_uuid[i][1] == uuid_hi) )
+ if ( auth == 0 )
{
- if ( auth == 0 )
- client->shared_auth_uuid[i][0] =
- client->shared_auth_uuid[i][1] = -1L;
- return 1;
+ if ( (client->shared_auth_uuid[i][0] == uuid_lo) &&
+ (client->shared_auth_uuid[i][1] == uuid_hi) )
+ {
+ client->shared_auth_uuid[i][0] = -1L;
+ client->shared_auth_uuid[i][1] = -1L;
+ return 1;
+ }
}
- if ( (auth == 1) && (client->shared_auth_uuid[i][0] == -1L) &&
- (client->shared_auth_uuid[i][1] == -1L) && (free == -1) )
- free = i;
+ else
+ {
+ if ( (client->shared_auth_uuid[i][0] == -1L) &&
+ (client->shared_auth_uuid[i][1] == -1L) )
+ {
+ free = i;
+ break;
+ }
+ }
}
if ( auth == 0 )
return 0;
- if ( auth == 1 && free == -1 )
+ else if ( free == -1)
return -ENOMEM;
- client->shared_auth_uuid[free][0] = uuid_lo;
- client->shared_auth_uuid[free][1] = uuid_hi;
- return 1;
+ else
+ {
+ client->shared_auth_uuid[free][0] = uuid_lo;
+ client->shared_auth_uuid[free][1] = uuid_hi;
+ return 1;
+ }
}
static int tmemc_save_subop(int cli_id, uint32_t pool_id,
--
1.7.10.4
^ permalink raw reply related [flat|nested] 16+ messages in thread* [PATCH 13/14] tmem: reorg the shared pool allocate path
2013-12-18 6:52 [PATCH 00/14] xen: new patches for tmem cleanup Bob Liu
` (11 preceding siblings ...)
2013-12-18 6:52 ` [PATCH 12/14] tmem: cleanup: refactor function tmemc_shared_pool_auth() Bob Liu
@ 2013-12-18 6:52 ` Bob Liu
2013-12-18 6:52 ` [PATCH 14/14] tmem: remove useless parameter from client and pool flush Bob Liu
13 siblings, 0 replies; 16+ messages in thread
From: Bob Liu @ 2013-12-18 6:52 UTC (permalink / raw)
To: xen-devel; +Cc: james.harper, keir, ian.campbell, andrew.cooper3, JBeulich
Reorg the code to make it more readable.
Adding check of return value of shared_pool_join() and drop a unneeded call to
it.
Also disable creating a shared&persistant pool which will make things too
complex.
Signed-off-by: Bob Liu <bob.liu@oracle.com>
---
xen/common/tmem.c | 104 +++++++++++++++++++++++++++++++++++------------------
1 file changed, 70 insertions(+), 34 deletions(-)
diff --git a/xen/common/tmem.c b/xen/common/tmem.c
index 37f4e8f..ab5bba7 100644
--- a/xen/common/tmem.c
+++ b/xen/common/tmem.c
@@ -1021,21 +1021,24 @@ static void pool_free(struct tmem_pool *pool)
xfree(pool);
}
-/* register new_client as a user of this shared pool and return new
- total number of registered users */
+/*
+ * Register new_client as a user of this shared pool and return 0 on succ.
+ */
static int shared_pool_join(struct tmem_pool *pool, struct client *new_client)
{
struct share_list *sl;
-
ASSERT(is_shared(pool));
+
if ( (sl = tmem_malloc(sizeof(struct share_list), NULL)) == NULL )
return -1;
sl->client = new_client;
list_add_tail(&sl->share_list, &pool->share_list);
if ( new_client->cli_id != pool->client->cli_id )
tmem_client_info("adding new %s %d to shared pool owned by %s %d\n",
- tmem_client_str, new_client->cli_id, tmem_client_str, pool->client->cli_id);
- return ++pool->shared_count;
+ tmem_client_str, new_client->cli_id, tmem_client_str,
+ pool->client->cli_id);
+ ++pool->shared_count;
+ return 0;
}
/* reassign "ownership" of the pool to another client that shares this pool */
@@ -1841,8 +1844,7 @@ static int do_tmem_new_pool(domid_t this_cli_id,
int specversion = (flags >> TMEM_POOL_VERSION_SHIFT)
& TMEM_POOL_VERSION_MASK;
struct tmem_pool *pool, *shpool;
- int s_poolid, first_unused_s_poolid;
- int i;
+ int i, first_unused_s_poolid;
if ( this_cli_id == TMEM_CLI_ID_NULL )
cli_id = current->domain->domain_id;
@@ -1856,6 +1858,11 @@ static int do_tmem_new_pool(domid_t this_cli_id,
tmem_client_err("failed... unsupported spec version\n");
return -EPERM;
}
+ if ( shared && persistent )
+ {
+ tmem_client_err("failed... unable to create a shared-persistant pool\n");
+ return -EPERM;
+ }
if ( pagebits != (PAGE_SHIFT - 12) )
{
tmem_client_err("failed... unsupported pagesize %d\n",
@@ -1872,17 +1879,12 @@ static int do_tmem_new_pool(domid_t this_cli_id,
tmem_client_err("failed... reserved bits must be zero\n");
return -EPERM;
}
- if ( (pool = pool_alloc()) == NULL )
- {
- tmem_client_err("failed... out of memory\n");
- return -ENOMEM;
- }
if ( this_cli_id != TMEM_CLI_ID_NULL )
{
if ( (client = tmem_client_from_cli_id(this_cli_id)) == NULL
|| d_poolid >= MAX_POOLS_PER_DOMAIN
|| client->pools[d_poolid] != NULL )
- goto fail;
+ return -EPERM;
}
else
{
@@ -1895,13 +1897,35 @@ static int do_tmem_new_pool(domid_t this_cli_id,
{
tmem_client_err("failed... no more pool slots available for this %s\n",
tmem_client_str);
- goto fail;
+ return -EPERM;
}
}
+
+ if ( (pool = pool_alloc()) == NULL )
+ {
+ tmem_client_err("failed... out of memory\n");
+ return -ENOMEM;
+ }
+ client->pools[d_poolid] = pool;
+ pool->client = client;
+ pool->pool_id = d_poolid;
+ pool->shared = shared;
+ pool->persistent = persistent;
+ pool->uuid[0] = uuid_lo;
+ pool->uuid[1] = uuid_hi;
+
+ /*
+ * Already created a pool when arrived here, but need some special process
+ * for shared pool.
+ */
if ( shared )
{
if ( uuid_lo == -1L && uuid_hi == -1L )
- shared = 0;
+ {
+ tmem_client_info("Invalid uuid, create non shared pool instead!\n");
+ pool->shared = 0;
+ goto out;
+ }
if ( client->shared_auth_required && !global_shared_auth )
{
for ( i = 0; i < MAX_GLOBAL_SHARED_POOLS; i++)
@@ -1909,48 +1933,60 @@ static int do_tmem_new_pool(domid_t this_cli_id,
(client->shared_auth_uuid[i][1] == uuid_hi) )
break;
if ( i == MAX_GLOBAL_SHARED_POOLS )
- shared = 0;
+ {
+ tmem_client_info("Shared auth failed, create non shared pool instead!\n");
+ pool->shared = 0;
+ goto out;
+ }
}
- }
- pool->shared = shared;
- pool->client = client;
- if ( shared )
- {
+
+ /*
+ * Authorize okay, match a global shared pool or use the newly allocated
+ * one
+ */
first_unused_s_poolid = MAX_GLOBAL_SHARED_POOLS;
- for ( s_poolid = 0; s_poolid < MAX_GLOBAL_SHARED_POOLS; s_poolid++ )
+ for ( i = 0; i < MAX_GLOBAL_SHARED_POOLS; i++ )
{
- if ( (shpool = global_shared_pools[s_poolid]) != NULL )
+ if ( (shpool = global_shared_pools[i]) != NULL )
{
if ( shpool->uuid[0] == uuid_lo && shpool->uuid[1] == uuid_hi )
{
+ /* Succ to match a global shared pool */
tmem_client_info("(matches shared pool uuid=%"PRIx64".%"PRIx64") pool_id=%d\n",
uuid_hi, uuid_lo, d_poolid);
- client->pools[d_poolid] = global_shared_pools[s_poolid];
- shared_pool_join(global_shared_pools[s_poolid], client);
- pool_free(pool);
- return d_poolid;
+ client->pools[d_poolid] = shpool;
+ if ( !shared_pool_join(shpool, client) )
+ {
+ pool_free(pool);
+ goto out;
+ }
+ else
+ goto fail;
}
}
- else if ( first_unused_s_poolid == MAX_GLOBAL_SHARED_POOLS )
- first_unused_s_poolid = s_poolid;
+ else
+ {
+ if ( first_unused_s_poolid == MAX_GLOBAL_SHARED_POOLS )
+ first_unused_s_poolid = i;
+ }
}
+
+ /* Failed to find a global shard pool slot */
if ( first_unused_s_poolid == MAX_GLOBAL_SHARED_POOLS )
{
tmem_client_warn("tmem: failed... no global shared pool slots available\n");
goto fail;
}
+ /* Add pool to global shard pool */
else
{
INIT_LIST_HEAD(&pool->share_list);
pool->shared_count = 0;
global_shared_pools[first_unused_s_poolid] = pool;
- (void)shared_pool_join(pool,client);
}
}
- client->pools[d_poolid] = pool;
- pool->pool_id = d_poolid;
- pool->persistent = persistent;
- pool->uuid[0] = uuid_lo; pool->uuid[1] = uuid_hi;
+
+out:
tmem_client_info("pool_id=%d\n", d_poolid);
return d_poolid;
--
1.7.10.4
^ permalink raw reply related [flat|nested] 16+ messages in thread* [PATCH 14/14] tmem: remove useless parameter from client and pool flush
2013-12-18 6:52 [PATCH 00/14] xen: new patches for tmem cleanup Bob Liu
` (12 preceding siblings ...)
2013-12-18 6:52 ` [PATCH 13/14] tmem: reorg the shared pool allocate path Bob Liu
@ 2013-12-18 6:52 ` Bob Liu
13 siblings, 0 replies; 16+ messages in thread
From: Bob Liu @ 2013-12-18 6:52 UTC (permalink / raw)
To: xen-devel; +Cc: james.harper, keir, ian.campbell, andrew.cooper3, JBeulich
Parameter 'destroy' in function client_flush() and pool_flush() is unneeded,
this patch remove it.
Signed-off-by: Bob Liu <bob.liu@oracle.com>
---
xen/common/tmem.c | 30 ++++++++++++------------------
1 file changed, 12 insertions(+), 18 deletions(-)
diff --git a/xen/common/tmem.c b/xen/common/tmem.c
index ab5bba7..366dab2 100644
--- a/xen/common/tmem.c
+++ b/xen/common/tmem.c
@@ -1108,7 +1108,7 @@ static int shared_pool_quit(struct tmem_pool *pool, domid_t cli_id)
}
/* flush all data (owned by cli_id) from a pool and, optionally, free it */
-static void pool_flush(struct tmem_pool *pool, domid_t cli_id, bool_t destroy)
+static void pool_flush(struct tmem_pool *pool, domid_t cli_id)
{
ASSERT(pool != NULL);
if ( (is_shared(pool)) && (shared_pool_quit(pool,cli_id) > 0) )
@@ -1117,23 +1117,19 @@ static void pool_flush(struct tmem_pool *pool, domid_t cli_id, bool_t destroy)
tmem_cli_id_str, cli_id, pool->pool_id, tmem_cli_id_str,pool->client->cli_id);
return;
}
- tmem_client_info("%s %s-%s tmem pool %s=%d pool_id=%d\n",
- destroy ? "destroying" : "flushing",
+ tmem_client_info("Destroying %s-%s tmem pool %s=%d pool_id=%d\n",
is_persistent(pool) ? "persistent" : "ephemeral" ,
is_shared(pool) ? "shared" : "private",
tmem_cli_id_str, pool->client->cli_id, pool->pool_id);
if ( pool->client->live_migrating )
{
- tmem_client_warn("can't %s pool while %s is live-migrating\n",
- destroy?"destroy":"flush", tmem_client_str);
+ tmem_client_warn("can't destroy pool while %s is live-migrating\n",
+ tmem_client_str);
return;
}
pool_destroy_objs(pool, TMEM_CLI_ID_NULL);
- if ( destroy )
- {
- pool->client->pools[pool->pool_id] = NULL;
- pool_free(pool);
- }
+ pool->client->pools[pool->pool_id] = NULL;
+ pool_free(pool);
}
/************ CLIENT MANIPULATION OPERATIONS **************************/
@@ -1201,7 +1197,7 @@ static void client_free(struct client *client)
}
/* flush all data from a client and, optionally, free it */
-static void client_flush(struct client *client, bool_t destroy)
+static void client_flush(struct client *client)
{
int i;
struct tmem_pool *pool;
@@ -1210,12 +1206,10 @@ static void client_flush(struct client *client, bool_t destroy)
{
if ( (pool = client->pools[i]) == NULL )
continue;
- pool_flush(pool,client->cli_id,destroy);
- if ( destroy )
- client->pools[i] = NULL;
+ pool_flush(pool, client->cli_id);
+ client->pools[i] = NULL;
}
- if ( destroy )
- client_free(client);
+ client_free(client);
}
static bool_t client_over_quota(struct client *client)
@@ -1827,7 +1821,7 @@ static int do_tmem_destroy_pool(uint32_t pool_id)
if ( (pool = client->pools[pool_id]) == NULL )
return 0;
client->pools[pool_id] = NULL;
- pool_flush(pool,client->cli_id,1);
+ pool_flush(pool, client->cli_id);
return 1;
}
@@ -2772,7 +2766,7 @@ void tmem_destroy(void *v)
printk("tmem: flushing tmem pools for %s=%d\n",
tmem_cli_id_str, client->cli_id);
- client_flush(client, 1);
+ client_flush(client);
write_unlock(&tmem_rwlock);
}
--
1.7.10.4
^ permalink raw reply related [flat|nested] 16+ messages in thread