From: Bob Liu <lliubbo@gmail.com>
To: xen-devel@lists.xenproject.org
Cc: keir@xen.org, ian.campbell@citrix.com, JBeulich@suse.com
Subject: [PATCH 11/16] tmem: cleanup: drop tmem_lock_all
Date: Wed, 20 Nov 2013 16:46:20 +0800 [thread overview]
Message-ID: <1384937185-24749-11-git-send-email-bob.liu@oracle.com> (raw)
In-Reply-To: <1384937185-24749-1-git-send-email-bob.liu@oracle.com>
tmem_lock_all is used for debug only, remove it from upstream to make
tmem source code more readable and easy to maintain.
Signed-off-by: Bob Liu <bob.liu@oracle.com>
---
xen/common/tmem.c | 267 +++++++++++++++++---------------------------
xen/common/tmem_xen.c | 3 -
xen/include/xen/tmem_xen.h | 8 --
3 files changed, 101 insertions(+), 177 deletions(-)
diff --git a/xen/common/tmem.c b/xen/common/tmem.c
index c1b3e21..67fa1ee 100644
--- a/xen/common/tmem.c
+++ b/xen/common/tmem.c
@@ -167,22 +167,12 @@ static atomic_t client_weight_total = ATOMIC_INIT(0);
static int tmem_initialized = 0;
/************ CONCURRENCY ***********************************************/
-DEFINE_SPINLOCK(tmem_spinlock); /* used iff tmem_lock_all */
-DEFINE_RWLOCK(tmem_rwlock); /* used iff !tmem_lock_all */
+DEFINE_RWLOCK(tmem_rwlock);
static DEFINE_SPINLOCK(eph_lists_spinlock); /* protects global AND clients */
static DEFINE_SPINLOCK(pers_lists_spinlock);
-#define tmem_spin_lock(_l) do {if (!tmem_lock_all) spin_lock(_l);}while(0)
-#define tmem_spin_unlock(_l) do {if (!tmem_lock_all) spin_unlock(_l);}while(0)
-#define tmem_read_lock(_l) do {if (!tmem_lock_all) read_lock(_l);}while(0)
-#define tmem_read_unlock(_l) do {if (!tmem_lock_all) read_unlock(_l);}while(0)
-#define tmem_write_lock(_l) do {if (!tmem_lock_all) write_lock(_l);}while(0)
-#define tmem_write_unlock(_l) do {if (!tmem_lock_all) write_unlock(_l);}while(0)
-#define tmem_write_trylock(_l) ((tmem_lock_all)?1:write_trylock(_l))
-#define tmem_spin_trylock(_l) (tmem_lock_all?1:spin_trylock(_l))
-
-#define ASSERT_SPINLOCK(_l) ASSERT(tmem_lock_all || spin_is_locked(_l))
-#define ASSERT_WRITELOCK(_l) ASSERT(tmem_lock_all || rw_is_write_locked(_l))
+#define ASSERT_SPINLOCK(_l) ASSERT(spin_is_locked(_l))
+#define ASSERT_WRITELOCK(_l) ASSERT(rw_is_write_locked(_l))
/* global counters (should use long_atomic_t access) */
static long global_eph_count = 0; /* atomicity depends on eph_lists_spinlock */
@@ -250,7 +240,7 @@ static int pcd_copy_to_client(xen_pfn_t cmfn, struct tmem_page_descriptor *pgp)
int ret;
ASSERT(tmem_dedup_enabled());
- tmem_read_lock(&pcd_tree_rwlocks[firstbyte]);
+ read_lock(&pcd_tree_rwlocks[firstbyte]);
pcd = pgp->pcd;
if ( pgp->size < PAGE_SIZE && pgp->size != 0 &&
pcd->size < PAGE_SIZE && pcd->size != 0 )
@@ -260,7 +250,7 @@ static int pcd_copy_to_client(xen_pfn_t cmfn, struct tmem_page_descriptor *pgp)
ret = tmem_copy_tze_to_client(cmfn, pcd->tze, pcd->size);
else
ret = tmem_copy_to_client(cmfn, pcd->pfp, tmem_cli_buf_null);
- tmem_read_unlock(&pcd_tree_rwlocks[firstbyte]);
+ read_unlock(&pcd_tree_rwlocks[firstbyte]);
return ret;
}
@@ -283,14 +273,14 @@ static void pcd_disassociate(struct tmem_page_descriptor *pgp, struct tmem_pool
if ( have_pcd_rwlock )
ASSERT_WRITELOCK(&pcd_tree_rwlocks[firstbyte]);
else
- tmem_write_lock(&pcd_tree_rwlocks[firstbyte]);
+ write_lock(&pcd_tree_rwlocks[firstbyte]);
list_del_init(&pgp->pcd_siblings);
pgp->pcd = NULL;
pgp->firstbyte = NOT_SHAREABLE;
pgp->size = -1;
if ( --pcd->pgp_ref_count )
{
- tmem_write_unlock(&pcd_tree_rwlocks[firstbyte]);
+ write_unlock(&pcd_tree_rwlocks[firstbyte]);
return;
}
@@ -317,7 +307,7 @@ static void pcd_disassociate(struct tmem_page_descriptor *pgp, struct tmem_pool
/* real physical page */
tmem_page_free(pool,pfp);
}
- tmem_write_unlock(&pcd_tree_rwlocks[firstbyte]);
+ write_unlock(&pcd_tree_rwlocks[firstbyte]);
}
@@ -349,7 +339,7 @@ static int pcd_associate(struct tmem_page_descriptor *pgp, char *cdata, pagesize
ASSERT(pfp_size <= PAGE_SIZE);
ASSERT(!(pfp_size & (sizeof(uint64_t)-1)));
}
- tmem_write_lock(&pcd_tree_rwlocks[firstbyte]);
+ write_lock(&pcd_tree_rwlocks[firstbyte]);
/* look for page match */
root = &pcd_tree_roots[firstbyte];
@@ -443,7 +433,7 @@ match:
pgp->pcd = pcd;
unlock:
- tmem_write_unlock(&pcd_tree_rwlocks[firstbyte]);
+ write_unlock(&pcd_tree_rwlocks[firstbyte]);
return ret;
}
@@ -552,7 +542,7 @@ static void pgp_delist(struct tmem_page_descriptor *pgp, bool_t no_eph_lock)
if ( !is_persistent(pgp->us.obj->pool) )
{
if ( !no_eph_lock )
- tmem_spin_lock(&eph_lists_spinlock);
+ spin_lock(&eph_lists_spinlock);
if ( !list_empty(&pgp->us.client_eph_pages) )
client->eph_count--;
ASSERT(client->eph_count >= 0);
@@ -562,20 +552,20 @@ static void pgp_delist(struct tmem_page_descriptor *pgp, bool_t no_eph_lock)
ASSERT(global_eph_count >= 0);
list_del_init(&pgp->global_eph_pages);
if ( !no_eph_lock )
- tmem_spin_unlock(&eph_lists_spinlock);
+ spin_unlock(&eph_lists_spinlock);
} else {
if ( client->live_migrating )
{
- tmem_spin_lock(&pers_lists_spinlock);
+ spin_lock(&pers_lists_spinlock);
list_add_tail(&pgp->client_inv_pages,
&client->persistent_invalidated_list);
if ( pgp != pgp->us.obj->pool->cur_pgp )
list_del_init(&pgp->us.pool_pers_pages);
- tmem_spin_unlock(&pers_lists_spinlock);
+ spin_unlock(&pers_lists_spinlock);
} else {
- tmem_spin_lock(&pers_lists_spinlock);
+ spin_lock(&pers_lists_spinlock);
list_del_init(&pgp->us.pool_pers_pages);
- tmem_spin_unlock(&pers_lists_spinlock);
+ spin_unlock(&pers_lists_spinlock);
}
}
}
@@ -709,7 +699,7 @@ static struct tmem_object_root * obj_find(struct tmem_pool *pool, struct oid *oi
struct tmem_object_root *obj;
restart_find:
- tmem_read_lock(&pool->pool_rwlock);
+ read_lock(&pool->pool_rwlock);
node = pool->obj_rb_root[oid_hash(oidp)].rb_node;
while ( node )
{
@@ -717,17 +707,12 @@ restart_find:
switch ( oid_compare(&obj->oid, oidp) )
{
case 0: /* equal */
- if ( tmem_lock_all )
- obj->no_evict = 1;
- else
+ if ( !spin_trylock(&obj->obj_spinlock) )
{
- if ( !tmem_spin_trylock(&obj->obj_spinlock) )
- {
- tmem_read_unlock(&pool->pool_rwlock);
- goto restart_find;
- }
- tmem_read_unlock(&pool->pool_rwlock);
+ read_unlock(&pool->pool_rwlock);
+ goto restart_find;
}
+ read_unlock(&pool->pool_rwlock);
return obj;
case -1:
node = node->rb_left;
@@ -736,7 +721,7 @@ restart_find:
node = node->rb_right;
}
}
- tmem_read_unlock(&pool->pool_rwlock);
+ read_unlock(&pool->pool_rwlock);
return NULL;
}
@@ -763,7 +748,7 @@ static void obj_free(struct tmem_object_root *obj, int no_rebalance)
/* use no_rebalance only if all objects are being destroyed anyway */
if ( !no_rebalance )
rb_erase(&obj->rb_tree_node,&pool->obj_rb_root[oid_hash(&old_oid)]);
- tmem_spin_unlock(&obj->obj_spinlock);
+ spin_unlock(&obj->obj_spinlock);
tmem_free(obj, pool);
}
@@ -813,7 +798,7 @@ static struct tmem_object_root * obj_new(struct tmem_pool *pool, struct oid *oid
obj->oid = *oidp;
obj->pgp_count = 0;
obj->last_client = TMEM_CLI_ID_NULL;
- tmem_spin_lock(&obj->obj_spinlock);
+ spin_lock(&obj->obj_spinlock);
obj_rb_insert(&pool->obj_rb_root[oid_hash(oidp)], obj);
obj->no_evict = 1;
ASSERT_SPINLOCK(&obj->obj_spinlock);
@@ -835,7 +820,7 @@ static void pool_destroy_objs(struct tmem_pool *pool, bool_t selective, domid_t
struct tmem_object_root *obj;
int i;
- tmem_write_lock(&pool->pool_rwlock);
+ write_lock(&pool->pool_rwlock);
pool->is_dying = 1;
for (i = 0; i < OBJ_HASH_BUCKETS; i++)
{
@@ -843,7 +828,7 @@ static void pool_destroy_objs(struct tmem_pool *pool, bool_t selective, domid_t
while ( node != NULL )
{
obj = container_of(node, struct tmem_object_root, rb_tree_node);
- tmem_spin_lock(&obj->obj_spinlock);
+ spin_lock(&obj->obj_spinlock);
node = rb_next(node);
ASSERT(obj->no_evict == 0);
if ( !selective )
@@ -852,10 +837,10 @@ static void pool_destroy_objs(struct tmem_pool *pool, bool_t selective, domid_t
else if ( obj->last_client == cli_id )
obj_destroy(obj,0);
else
- tmem_spin_unlock(&obj->obj_spinlock);
+ spin_unlock(&obj->obj_spinlock);
}
}
- tmem_write_unlock(&pool->pool_rwlock);
+ write_unlock(&pool->pool_rwlock);
}
@@ -1114,9 +1099,7 @@ static bool_t tmem_try_to_evict_pgp(struct tmem_page_descriptor *pgp, bool_t *ho
if ( pool->is_dying )
return 0;
- if ( tmem_lock_all && !obj->no_evict )
- return 1;
- if ( tmem_spin_trylock(&obj->obj_spinlock) )
+ if ( spin_trylock(&obj->obj_spinlock) )
{
if ( tmem_dedup_enabled() )
{
@@ -1124,7 +1107,7 @@ static bool_t tmem_try_to_evict_pgp(struct tmem_page_descriptor *pgp, bool_t *ho
if ( firstbyte == NOT_SHAREABLE )
goto obj_unlock;
ASSERT(firstbyte < 256);
- if ( !tmem_write_trylock(&pcd_tree_rwlocks[firstbyte]) )
+ if ( !write_trylock(&pcd_tree_rwlocks[firstbyte]) )
goto obj_unlock;
if ( pgp->pcd->pgp_ref_count > 1 && !pgp->eviction_attempted )
{
@@ -1138,15 +1121,15 @@ static bool_t tmem_try_to_evict_pgp(struct tmem_page_descriptor *pgp, bool_t *ho
}
if ( obj->pgp_count > 1 )
return 1;
- if ( tmem_write_trylock(&pool->pool_rwlock) )
+ if ( write_trylock(&pool->pool_rwlock) )
{
*hold_pool_rwlock = 1;
return 1;
}
pcd_unlock:
- tmem_write_unlock(&pcd_tree_rwlocks[firstbyte]);
+ write_unlock(&pcd_tree_rwlocks[firstbyte]);
obj_unlock:
- tmem_spin_unlock(&obj->obj_spinlock);
+ spin_unlock(&obj->obj_spinlock);
}
return 0;
}
@@ -1160,7 +1143,7 @@ static int tmem_evict(void)
int ret = 0;
bool_t hold_pool_rwlock = 0;
- tmem_spin_lock(&eph_lists_spinlock);
+ spin_lock(&eph_lists_spinlock);
if ( (client != NULL) && client_over_quota(client) &&
!list_empty(&client->ephemeral_page_list) )
{
@@ -1201,13 +1184,13 @@ found:
obj_free(obj,0);
}
else
- tmem_spin_unlock(&obj->obj_spinlock);
+ spin_unlock(&obj->obj_spinlock);
if ( hold_pool_rwlock )
- tmem_write_unlock(&pool->pool_rwlock);
+ write_unlock(&pool->pool_rwlock);
ret = 1;
out:
- tmem_spin_unlock(&eph_lists_spinlock);
+ spin_unlock(&eph_lists_spinlock);
return ret;
}
@@ -1349,7 +1332,7 @@ done:
if ( is_shared(pool) )
obj->last_client = client->cli_id;
obj->no_evict = 0;
- tmem_spin_unlock(&obj->obj_spinlock);
+ spin_unlock(&obj->obj_spinlock);
return 1;
failed_dup:
@@ -1362,12 +1345,12 @@ cleanup:
pgp_delete(pgpfound,0);
if ( obj->pgp_count == 0 )
{
- tmem_write_lock(&pool->pool_rwlock);
+ write_lock(&pool->pool_rwlock);
obj_free(obj,0);
- tmem_write_unlock(&pool->pool_rwlock);
+ write_unlock(&pool->pool_rwlock);
} else {
obj->no_evict = 0;
- tmem_spin_unlock(&obj->obj_spinlock);
+ spin_unlock(&obj->obj_spinlock);
}
return ret;
}
@@ -1403,14 +1386,14 @@ static int do_tmem_put(struct tmem_pool *pool,
/* no puts allowed into a frozen pool (except dup puts) */
if ( client->frozen )
return ret;
- tmem_write_lock(&pool->pool_rwlock);
+ write_lock(&pool->pool_rwlock);
if ( (obj = obj_new(pool,oidp)) == NULL )
{
- tmem_write_unlock(&pool->pool_rwlock);
+ write_unlock(&pool->pool_rwlock);
return -ENOMEM;
}
newobj= 1;
- tmem_write_unlock(&pool->pool_rwlock);
+ write_unlock(&pool->pool_rwlock);
}
/* When arrive here, we have a spinlocked obj for use */
@@ -1462,21 +1445,21 @@ copy_uncompressed:
insert_page:
if ( !is_persistent(pool) )
{
- tmem_spin_lock(&eph_lists_spinlock);
+ spin_lock(&eph_lists_spinlock);
list_add_tail(&pgp->global_eph_pages,
&global_ephemeral_page_list);
++global_eph_count;
list_add_tail(&pgp->us.client_eph_pages,
&client->ephemeral_page_list);
++client->eph_count;
- tmem_spin_unlock(&eph_lists_spinlock);
+ spin_unlock(&eph_lists_spinlock);
}
else
{ /* is_persistent */
- tmem_spin_lock(&pers_lists_spinlock);
+ spin_lock(&pers_lists_spinlock);
list_add_tail(&pgp->us.pool_pers_pages,
&pool->persistent_page_list);
- tmem_spin_unlock(&pers_lists_spinlock);
+ spin_unlock(&pers_lists_spinlock);
}
if ( is_shared(pool) )
@@ -1484,7 +1467,7 @@ insert_page:
obj->no_evict = 0;
/* free the obj spinlock */
- tmem_spin_unlock(&obj->obj_spinlock);
+ spin_unlock(&obj->obj_spinlock);
return 1;
del_pgp_from_obj:
@@ -1496,14 +1479,14 @@ free_pgp:
unlock_obj:
if ( newobj )
{
- tmem_write_lock(&pool->pool_rwlock);
+ write_lock(&pool->pool_rwlock);
obj_free(obj,0);
- tmem_write_unlock(&pool->pool_rwlock);
+ write_unlock(&pool->pool_rwlock);
}
else
{
obj->no_evict = 0;
- tmem_spin_unlock(&obj->obj_spinlock);
+ spin_unlock(&obj->obj_spinlock);
}
return ret;
}
@@ -1531,7 +1514,7 @@ static int do_tmem_get(struct tmem_pool *pool, struct oid *oidp, uint32_t index,
if ( pgp == NULL )
{
obj->no_evict = 0;
- tmem_spin_unlock(&obj->obj_spinlock);
+ spin_unlock(&obj->obj_spinlock);
return 0;
}
ASSERT(pgp->size != -1);
@@ -1554,31 +1537,31 @@ static int do_tmem_get(struct tmem_pool *pool, struct oid *oidp, uint32_t index,
pgp_delete(pgp,0);
if ( obj->pgp_count == 0 )
{
- tmem_write_lock(&pool->pool_rwlock);
+ write_lock(&pool->pool_rwlock);
obj_free(obj,0);
obj = NULL;
- tmem_write_unlock(&pool->pool_rwlock);
+ write_unlock(&pool->pool_rwlock);
}
} else {
- tmem_spin_lock(&eph_lists_spinlock);
+ spin_lock(&eph_lists_spinlock);
list_del(&pgp->global_eph_pages);
list_add_tail(&pgp->global_eph_pages,&global_ephemeral_page_list);
list_del(&pgp->us.client_eph_pages);
list_add_tail(&pgp->us.client_eph_pages,&client->ephemeral_page_list);
- tmem_spin_unlock(&eph_lists_spinlock);
+ spin_unlock(&eph_lists_spinlock);
obj->last_client = tmem_get_cli_id_from_current();
}
}
if ( obj != NULL )
{
obj->no_evict = 0;
- tmem_spin_unlock(&obj->obj_spinlock);
+ spin_unlock(&obj->obj_spinlock);
}
return 1;
out:
obj->no_evict = 0;
- tmem_spin_unlock(&obj->obj_spinlock);
+ spin_unlock(&obj->obj_spinlock);
return rc;
}
@@ -1594,18 +1577,18 @@ static int do_tmem_flush_page(struct tmem_pool *pool, struct oid *oidp, uint32_t
if ( pgp == NULL )
{
obj->no_evict = 0;
- tmem_spin_unlock(&obj->obj_spinlock);
+ spin_unlock(&obj->obj_spinlock);
goto out;
}
pgp_delete(pgp,0);
if ( obj->pgp_count == 0 )
{
- tmem_write_lock(&pool->pool_rwlock);
+ write_lock(&pool->pool_rwlock);
obj_free(obj,0);
- tmem_write_unlock(&pool->pool_rwlock);
+ write_unlock(&pool->pool_rwlock);
} else {
obj->no_evict = 0;
- tmem_spin_unlock(&obj->obj_spinlock);
+ spin_unlock(&obj->obj_spinlock);
}
out:
@@ -1622,9 +1605,9 @@ static int do_tmem_flush_object(struct tmem_pool *pool, struct oid *oidp)
obj = obj_find(pool,oidp);
if ( obj == NULL )
goto out;
- tmem_write_lock(&pool->pool_rwlock);
+ write_lock(&pool->pool_rwlock);
obj_destroy(obj,0);
- tmem_write_unlock(&pool->pool_rwlock);
+ write_unlock(&pool->pool_rwlock);
out:
if ( pool->client->frozen )
@@ -2031,7 +2014,7 @@ static int tmemc_save_get_next_page(int cli_id, uint32_t pool_id,
if ( bufsize < pagesize + sizeof(struct tmem_handle) )
return -ENOMEM;
- tmem_spin_lock(&pers_lists_spinlock);
+ spin_lock(&pers_lists_spinlock);
if ( list_empty(&pool->persistent_page_list) )
{
ret = -1;
@@ -2063,7 +2046,7 @@ static int tmemc_save_get_next_page(int cli_id, uint32_t pool_id,
ret = do_tmem_get(pool, &oid, pgp->index, 0, buf);
out:
- tmem_spin_unlock(&pers_lists_spinlock);
+ spin_unlock(&pers_lists_spinlock);
return ret;
}
@@ -2079,7 +2062,7 @@ static int tmemc_save_get_next_inv(int cli_id, tmem_cli_va_param_t buf,
return 0;
if ( bufsize < sizeof(struct tmem_handle) )
return 0;
- tmem_spin_lock(&pers_lists_spinlock);
+ spin_lock(&pers_lists_spinlock);
if ( list_empty(&client->persistent_invalidated_list) )
goto out;
if ( client->cur_pgp == NULL )
@@ -2105,7 +2088,7 @@ static int tmemc_save_get_next_inv(int cli_id, tmem_cli_va_param_t buf,
tmem_copy_to_client_buf(buf, &h, 1);
ret = 1;
out:
- tmem_spin_unlock(&pers_lists_spinlock);
+ spin_unlock(&pers_lists_spinlock);
return ret;
}
@@ -2226,7 +2209,7 @@ long do_tmem_op(tmem_cli_op_t uops)
bool_t succ_get = 0, succ_put = 0;
bool_t non_succ_get = 0, non_succ_put = 0;
bool_t flush = 0, flush_obj = 0;
- bool_t tmem_write_lock_set = 0, tmem_read_lock_set = 0;
+ bool_t write_lock_set = 0, read_lock_set = 0;
if ( !tmem_initialized )
return -ENODEV;
@@ -2234,47 +2217,31 @@ long do_tmem_op(tmem_cli_op_t uops)
if ( !tmem_current_permitted() )
return -EPERM;
- if ( tmem_lock_all )
- {
- if ( tmem_lock_all > 1 )
- spin_lock_irq(&tmem_spinlock);
- else
- spin_lock(&tmem_spinlock);
- }
-
if ( client != NULL && tmem_client_is_dying(client) )
- {
- rc = -ENODEV;
- if ( tmem_lock_all )
- goto out;
- simple_error:
- return rc;
- }
+ return -ENODEV;
if ( unlikely(tmem_get_tmemop_from_client(&op, uops) != 0) )
{
tmem_client_err("tmem: can't get tmem struct from %s\n", tmem_client_str);
rc = -EFAULT;
- if ( !tmem_lock_all )
- goto simple_error;
- goto out;
+ return rc;
}
if ( op.cmd == TMEM_CONTROL )
{
- tmem_write_lock(&tmem_rwlock);
- tmem_write_lock_set = 1;
+ write_lock(&tmem_rwlock);
+ write_lock_set = 1;
rc = do_tmem_control(&op);
goto out;
} else if ( op.cmd == TMEM_AUTH ) {
- tmem_write_lock(&tmem_rwlock);
- tmem_write_lock_set = 1;
+ write_lock(&tmem_rwlock);
+ write_lock_set = 1;
rc = tmemc_shared_pool_auth(op.u.creat.arg1,op.u.creat.uuid[0],
op.u.creat.uuid[1],op.u.creat.flags);
goto out;
} else if ( op.cmd == TMEM_RESTORE_NEW ) {
- tmem_write_lock(&tmem_rwlock);
- tmem_write_lock_set = 1;
+ write_lock(&tmem_rwlock);
+ write_lock_set = 1;
rc = do_tmem_new_pool(op.u.creat.arg1, op.pool_id, op.u.creat.flags,
op.u.creat.uuid[0], op.u.creat.uuid[1]);
goto out;
@@ -2283,8 +2250,8 @@ long do_tmem_op(tmem_cli_op_t uops)
/* create per-client tmem structure dynamically on first use by client */
if ( client == NULL )
{
- tmem_write_lock(&tmem_rwlock);
- tmem_write_lock_set = 1;
+ write_lock(&tmem_rwlock);
+ write_lock_set = 1;
if ( (client = client_create(tmem_get_cli_id_from_current())) == NULL )
{
tmem_client_err("tmem: can't create tmem structure for %s\n",
@@ -2296,18 +2263,18 @@ long do_tmem_op(tmem_cli_op_t uops)
if ( op.cmd == TMEM_NEW_POOL || op.cmd == TMEM_DESTROY_POOL )
{
- if ( !tmem_write_lock_set )
+ if ( !write_lock_set )
{
- tmem_write_lock(&tmem_rwlock);
- tmem_write_lock_set = 1;
+ write_lock(&tmem_rwlock);
+ write_lock_set = 1;
}
}
else
{
- if ( !tmem_write_lock_set )
+ if ( !write_lock_set )
{
- tmem_read_lock(&tmem_rwlock);
- tmem_read_lock_set = 1;
+ read_lock(&tmem_rwlock);
+ read_lock_set = 1;
}
if ( ((uint32_t)op.pool_id >= MAX_POOLS_PER_DOMAIN) ||
((pool = client->pools[op.pool_id]) == NULL) )
@@ -2357,21 +2324,12 @@ long do_tmem_op(tmem_cli_op_t uops)
}
out:
- if ( tmem_lock_all )
- {
- if ( tmem_lock_all > 1 )
- spin_unlock_irq(&tmem_spinlock);
- else
- spin_unlock(&tmem_spinlock);
- } else {
- if ( tmem_write_lock_set )
- write_unlock(&tmem_rwlock);
- else if ( tmem_read_lock_set )
- read_unlock(&tmem_rwlock);
- else
- ASSERT(0);
- }
-
+ if ( write_lock_set )
+ write_unlock(&tmem_rwlock);
+ else if ( read_lock_set )
+ read_unlock(&tmem_rwlock);
+ else
+ ASSERT(0);
return rc;
}
@@ -2389,38 +2347,26 @@ void tmem_destroy(void *v)
return;
}
- if ( tmem_lock_all )
- spin_lock(&tmem_spinlock);
- else
- write_lock(&tmem_rwlock);
+ write_lock(&tmem_rwlock);
printk("tmem: flushing tmem pools for %s=%d\n",
tmem_cli_id_str, client->cli_id);
client_flush(client, 1);
- if ( tmem_lock_all )
- spin_unlock(&tmem_spinlock);
- else
- write_unlock(&tmem_rwlock);
+ write_unlock(&tmem_rwlock);
}
/* freezing all pools guarantees that no additional memory will be consumed */
void tmem_freeze_all(unsigned char key)
{
static int freeze = 0;
-
- if ( tmem_lock_all )
- spin_lock(&tmem_spinlock);
- else
- write_lock(&tmem_rwlock);
+
+ write_lock(&tmem_rwlock);
freeze = !freeze;
tmemc_freeze_pools(TMEM_CLI_ID_NULL,freeze);
- if ( tmem_lock_all )
- spin_unlock(&tmem_spinlock);
- else
- write_unlock(&tmem_rwlock);
+ write_unlock(&tmem_rwlock);
}
#define MAX_EVICTS 10 /* should be variable or set via TMEMC_ ?? */
@@ -2442,12 +2388,7 @@ void *tmem_relinquish_pages(unsigned int order, unsigned int memflags)
}
if ( tmem_called_from_tmem(memflags) )
- {
- if ( tmem_lock_all )
- spin_lock(&tmem_spinlock);
- else
- read_lock(&tmem_rwlock);
- }
+ read_lock(&tmem_rwlock);
while ( (pfp = tmem_alloc_page(NULL,1)) == NULL )
{
@@ -2462,12 +2403,7 @@ void *tmem_relinquish_pages(unsigned int order, unsigned int memflags)
}
if ( tmem_called_from_tmem(memflags) )
- {
- if ( tmem_lock_all )
- spin_unlock(&tmem_spinlock);
- else
- read_unlock(&tmem_rwlock);
- }
+ read_unlock(&tmem_rwlock);
return pfp;
}
@@ -2493,9 +2429,8 @@ static int __init init_tmem(void)
if ( tmem_init() )
{
- printk("tmem: initialized comp=%d dedup=%d tze=%d global-lock=%d\n",
- tmem_compression_enabled(), tmem_dedup_enabled(), tmem_tze_enabled(),
- tmem_lock_all);
+ printk("tmem: initialized comp=%d dedup=%d tze=%d\n",
+ tmem_compression_enabled(), tmem_dedup_enabled(), tmem_tze_enabled());
if ( tmem_dedup_enabled()&&tmem_compression_enabled()&&tmem_tze_enabled() )
{
tmem_tze_disable();
diff --git a/xen/common/tmem_xen.c b/xen/common/tmem_xen.c
index fbd1acc..bc8e249 100644
--- a/xen/common/tmem_xen.c
+++ b/xen/common/tmem_xen.c
@@ -29,9 +29,6 @@ boolean_param("tmem_tze", opt_tmem_tze);
bool_t __read_mostly opt_tmem_shared_auth = 0;
boolean_param("tmem_shared_auth", opt_tmem_shared_auth);
-int __read_mostly opt_tmem_lock = 0;
-integer_param("tmem_lock", opt_tmem_lock);
-
atomic_t freeable_page_count = ATOMIC_INIT(0);
/* these are a concurrency bottleneck, could be percpu and dynamically
diff --git a/xen/include/xen/tmem_xen.h b/xen/include/xen/tmem_xen.h
index ae6acf3..9907575 100644
--- a/xen/include/xen/tmem_xen.h
+++ b/xen/include/xen/tmem_xen.h
@@ -34,11 +34,6 @@ extern spinlock_t tmem_page_list_lock;
extern unsigned long tmem_page_list_pages;
extern atomic_t freeable_page_count;
-extern spinlock_t tmem_lock;
-extern spinlock_t tmem_spinlock;
-extern rwlock_t tmem_rwlock;
-
-extern void tmem_copy_page(char *to, char*from);
extern int tmem_init(void);
#define tmem_hash hash_long
@@ -77,8 +72,6 @@ static inline bool_t tmem_enabled(void)
return opt_tmem;
}
-extern int opt_tmem_lock;
-
/*
* Memory free page list management
*/
@@ -182,7 +175,6 @@ static inline unsigned long tmem_free_mb(void)
return (tmem_page_list_pages + total_free_pages()) >> (20 - PAGE_SHIFT);
}
-#define tmem_lock_all opt_tmem_lock
#define tmem_called_from_tmem(_memflags) (_memflags & MEMF_tmem)
/* "Client" (==domain) abstraction */
--
1.7.10.4
next prev parent reply other threads:[~2013-11-20 8:47 UTC|newest]
Thread overview: 49+ messages / expand[flat|nested] mbox.gz Atom feed top
2013-11-20 8:46 [PATCH 01/16] tmem: cleanup: drop some debug code Bob Liu
2013-11-20 8:46 ` [PATCH 02/16] tmem: cleanup: drop useless function 'tmem_copy_page' Bob Liu
2013-11-20 8:46 ` [PATCH 03/16] tmem: cleanup: rm unused tmem_op Bob Liu
2013-11-22 17:38 ` Konrad Rzeszutek Wilk
2013-11-25 9:43 ` Jan Beulich
2013-11-25 9:52 ` Ian Campbell
2013-11-25 9:58 ` Jan Beulich
2013-11-25 16:37 ` Konrad Rzeszutek Wilk
2013-11-25 16:40 ` Ian Campbell
2013-11-25 17:09 ` Konrad Rzeszutek Wilk
2013-11-25 17:12 ` Ian Campbell
2013-11-25 19:56 ` Konrad Rzeszutek Wilk
2013-11-26 8:56 ` Bob Liu
2013-11-20 8:46 ` [PATCH 04/16] tmem: cleanup: rm unneeded parameters from put path Bob Liu
2013-11-22 17:54 ` Konrad Rzeszutek Wilk
2013-11-26 8:22 ` Bob Liu
2013-11-20 8:46 ` [PATCH 05/16] tmem: cleanup: rm unneeded parameters from get path Bob Liu
2013-11-22 17:55 ` Konrad Rzeszutek Wilk
2013-11-20 8:46 ` [PATCH 06/16] tmem: cleanup: reorg do_tmem_put() Bob Liu
2013-11-22 18:04 ` Konrad Rzeszutek Wilk
2013-11-20 8:46 ` [PATCH 07/16] tmem: drop unneeded is_ephemeral() and is_private() Bob Liu
2013-11-20 8:46 ` [PATCH 08/16] tmem: cleanup: rm useless EXPORT/FORWARD define Bob Liu
2013-11-22 18:05 ` Konrad Rzeszutek Wilk
2013-11-20 8:46 ` [PATCH 09/16] tmem: cleanup: drop tmemc_list() temporary Bob Liu
2013-11-22 18:07 ` Konrad Rzeszutek Wilk
2013-11-26 8:28 ` Bob Liu
2013-11-22 21:00 ` Konrad Rzeszutek Wilk
2013-11-20 8:46 ` [PATCH 10/16] tmem: cleanup: drop runtime statistics Bob Liu
2013-11-22 18:08 ` Konrad Rzeszutek Wilk
2013-11-20 8:46 ` Bob Liu [this message]
2013-11-20 8:46 ` [PATCH 12/16] tmem: cleanup: refactor the alloc/free path Bob Liu
2013-11-20 8:46 ` [PATCH 13/16] tmem: cleanup: __tmem_alloc_page: drop unneed parameters Bob Liu
2013-11-22 18:17 ` Konrad Rzeszutek Wilk
2013-11-26 8:41 ` Bob Liu
2013-11-26 17:38 ` Konrad Rzeszutek Wilk
2013-11-20 8:46 ` [PATCH 14/16] tmem: cleanup: drop useless functions from head file Bob Liu
2013-11-27 14:38 ` Andrew Cooper
2013-11-27 14:52 ` Konrad Rzeszutek Wilk
2013-11-27 14:59 ` Andrew Cooper
2013-11-27 15:55 ` Jan Beulich
2013-11-20 8:46 ` [PATCH 15/16] tmem: refator function tmem_ensure_avail_pages() Bob Liu
2013-11-22 18:22 ` Konrad Rzeszutek Wilk
2013-11-20 8:46 ` [PATCH 16/16] tmem: cleanup: rename tmem_relinquish_npages() Bob Liu
2013-11-20 9:08 ` [PATCH 01/16] tmem: cleanup: drop some debug code Jan Beulich
2013-11-20 9:19 ` Bob Liu
2013-11-20 9:25 ` Jan Beulich
2013-11-20 13:51 ` Konrad Rzeszutek Wilk
2013-11-20 14:21 ` Jan Beulich
2013-11-20 18:46 ` Konrad Rzeszutek Wilk
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1384937185-24749-11-git-send-email-bob.liu@oracle.com \
--to=lliubbo@gmail.com \
--cc=JBeulich@suse.com \
--cc=ian.campbell@citrix.com \
--cc=keir@xen.org \
--cc=xen-devel@lists.xenproject.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).