From: Bob Liu <lliubbo@gmail.com>
To: xen-devel@lists.xenproject.org
Cc: keir@xen.org, ian.campbell@citrix.com, JBeulich@suse.com
Subject: [PATCH 12/16] tmem: cleanup: refactor the alloc/free path
Date: Wed, 20 Nov 2013 16:46:21 +0800 [thread overview]
Message-ID: <1384937185-24749-12-git-send-email-bob.liu@oracle.com> (raw)
In-Reply-To: <1384937185-24749-1-git-send-email-bob.liu@oracle.com>
There are two allocate path for each persistant and ephemeral pool.
This path try to refactor those allocate/free functions with better name and
more readable call layer. Also added more comment.
Signed-off-by: Bob Liu <bob.liu@oracle.com>
---
xen/common/tmem.c | 112 +++++++++++++++++++++++++++++++++++++-------
xen/common/tmem_xen.c | 63 -------------------------
xen/include/xen/tmem_xen.h | 20 ++------
3 files changed, 100 insertions(+), 95 deletions(-)
diff --git a/xen/common/tmem.c b/xen/common/tmem.c
index 67fa1ee..ee61899 100644
--- a/xen/common/tmem.c
+++ b/xen/common/tmem.c
@@ -166,6 +166,13 @@ static bool_t global_shared_auth = 0;
static atomic_t client_weight_total = ATOMIC_INIT(0);
static int tmem_initialized = 0;
+struct xmem_pool *tmem_mempool = 0;
+unsigned int tmem_mempool_maxalloc = 0;
+
+DEFINE_SPINLOCK(tmem_page_list_lock);
+PAGE_LIST_HEAD(tmem_page_list);
+unsigned long tmem_page_list_pages = 0;
+
/************ CONCURRENCY ***********************************************/
DEFINE_RWLOCK(tmem_rwlock);
static DEFINE_SPINLOCK(eph_lists_spinlock); /* protects global AND clients */
@@ -177,7 +184,29 @@ static DEFINE_SPINLOCK(pers_lists_spinlock);
/* global counters (should use long_atomic_t access) */
static long global_eph_count = 0; /* atomicity depends on eph_lists_spinlock */
-/************ MEMORY ALLOCATION INTERFACE *****************************/
+/*
+ * There two types of memory allocation interfaces in tmem.
+ * One is based on xmem_pool and the other is used for allocate a whole page.
+ * Both of them are based on the lowlevel function __tmem_alloc_page/_thispool().
+ * The call trace of alloc path is like below.
+ * Persistant pool:
+ * 1.tmem_malloc()
+ * > xmem_pool_alloc()
+ * > tmem_persistent_pool_page_get()
+ * > __tmem_alloc_page_thispool()
+ * 2.tmem_alloc_page()
+ * > __tmem_alloc_page_thispool()
+ *
+ * Ephemeral pool:
+ * 1.tmem_malloc()
+ * > xmem_pool_alloc()
+ * > tmem_mempool_page_get()
+ * > __tmem_alloc_page()
+ * 2.tmem_alloc_page()
+ * > __tmem_alloc_page()
+ *
+ * The free path is done in the same manner.
+ */
static void *tmem_malloc(size_t size, struct tmem_pool *pool)
{
void *v = NULL;
@@ -209,28 +238,76 @@ static void tmem_free(void *p, struct tmem_pool *pool)
}
}
-static struct page_info *tmem_page_alloc(struct tmem_pool *pool)
+static struct page_info *tmem_alloc_page(struct tmem_pool *pool)
{
struct page_info *pfp = NULL;
if ( pool != NULL && is_persistent(pool) )
- pfp = tmem_alloc_page_thispool(pool->client->domain);
+ pfp = __tmem_alloc_page_thispool(pool->client->domain);
else
- pfp = tmem_alloc_page(pool,0);
+ pfp = __tmem_alloc_page(pool,0);
return pfp;
}
-static void tmem_page_free(struct tmem_pool *pool, struct page_info *pfp)
+static void tmem_free_page(struct tmem_pool *pool, struct page_info *pfp)
{
ASSERT(pfp);
if ( pool == NULL || !is_persistent(pool) )
- tmem_free_page(pfp);
+ __tmem_free_page(pfp);
else
- tmem_free_page_thispool(pfp);
+ __tmem_free_page_thispool(pfp);
}
-/************ PAGE CONTENT DESCRIPTOR MANIPULATION ROUTINES ***********/
+static noinline void *tmem_mempool_page_get(unsigned long size)
+{
+ struct page_info *pi;
+ ASSERT(size == PAGE_SIZE);
+ if ( (pi = __tmem_alloc_page(NULL,0)) == NULL )
+ return NULL;
+ ASSERT(IS_VALID_PAGE(pi));
+ return page_to_virt(pi);
+}
+
+static void tmem_mempool_page_put(void *page_va)
+{
+ ASSERT(IS_PAGE_ALIGNED(page_va));
+ __tmem_free_page(virt_to_page(page_va));
+}
+
+static int __init tmem_mempool_init(void)
+{
+ tmem_mempool = xmem_pool_create("tmem", tmem_mempool_page_get,
+ tmem_mempool_page_put, PAGE_SIZE, 0, PAGE_SIZE);
+ if ( tmem_mempool )
+ tmem_mempool_maxalloc = xmem_pool_maxalloc(tmem_mempool);
+ return tmem_mempool != NULL;
+}
+
+/* persistent pools are per-domain */
+static void *tmem_persistent_pool_page_get(unsigned long size)
+{
+ struct page_info *pi;
+ struct domain *d = current->domain;
+
+ ASSERT(size == PAGE_SIZE);
+ if ( (pi = __tmem_alloc_page_thispool(d)) == NULL )
+ return NULL;
+ ASSERT(IS_VALID_PAGE(pi));
+ return page_to_virt(pi);
+}
+
+static void tmem_persistent_pool_page_put(void *page_va)
+{
+ struct page_info *pi;
+
+ ASSERT(IS_PAGE_ALIGNED(page_va));
+ pi = mfn_to_page(virt_to_mfn(page_va));
+ ASSERT(IS_VALID_PAGE(pi));
+ __tmem_free_page_thispool(pi);
+}
+
+/************ PAGE CONTENT DESCRIPTOR MANIPULATION ROUTINES ***********/
#define NOT_SHAREABLE ((uint16_t)-1UL)
static int pcd_copy_to_client(xen_pfn_t cmfn, struct tmem_page_descriptor *pgp)
@@ -305,7 +382,7 @@ static void pcd_disassociate(struct tmem_page_descriptor *pgp, struct tmem_pool
tmem_free(pcd_tze, pool);
} else {
/* real physical page */
- tmem_page_free(pool,pfp);
+ tmem_free_page(pool,pfp);
}
write_unlock(&pcd_tree_rwlocks[firstbyte]);
}
@@ -384,7 +461,7 @@ static int pcd_associate(struct tmem_page_descriptor *pgp, char *cdata, pagesize
/* match! if not compressed, free the no-longer-needed page */
/* but if compressed, data is assumed static so don't free! */
if ( cdata == NULL )
- tmem_page_free(pgp->us.obj->pool,pgp->pfp);
+ tmem_free_page(pgp->us.obj->pool,pgp->pfp);
goto match;
}
}
@@ -417,7 +494,7 @@ static int pcd_associate(struct tmem_page_descriptor *pgp, char *cdata, pagesize
((pcd->tze = tmem_malloc(pfp_size,pgp->us.obj->pool)) != NULL) ) {
tmem_tze_copy_from_pfp(pcd->tze,pgp->pfp,pfp_size);
pcd->size = pfp_size;
- tmem_page_free(pgp->us.obj->pool,pgp->pfp);
+ tmem_free_page(pgp->us.obj->pool,pgp->pfp);
} else {
pcd->pfp = pgp->pfp;
pcd->size = PAGE_SIZE;
@@ -486,7 +563,7 @@ static void pgp_free_data(struct tmem_page_descriptor *pgp, struct tmem_pool *po
else if ( pgp_size )
tmem_free(pgp->cdata, pool);
else
- tmem_page_free(pgp->us.obj->pool,pgp->pfp);
+ tmem_free_page(pgp->us.obj->pool,pgp->pfp);
pgp->pfp = NULL;
pgp->size = -1;
}
@@ -1258,7 +1335,7 @@ static int do_tmem_put_compress(struct tmem_page_descriptor *pgp, xen_pfn_t cmfn
ret = tmem_compress_from_client(cmfn, &dst, &size, clibuf);
if ( ret <= 0 )
goto out;
- else if ( (size == 0) || (size >= tmem_subpage_maxsize()) ) {
+ else if ( (size == 0) || (size >= tmem_mempool_maxalloc) ) {
ret = 0;
goto out;
} else if ( tmem_dedup_enabled() && !is_persistent(pgp->us.obj->pool) ) {
@@ -1315,7 +1392,7 @@ static int do_tmem_dup_put(struct tmem_page_descriptor *pgp, xen_pfn_t cmfn,
copy_uncompressed:
if ( pgp->pfp )
pgp_free_data(pgp, pool);
- if ( ( pgp->pfp = tmem_page_alloc(pool) ) == NULL )
+ if ( ( pgp->pfp = tmem_alloc_page(pool) ) == NULL )
goto failed_dup;
pgp->size = 0;
ret = tmem_copy_from_client(pgp->pfp, cmfn, tmem_cli_buf_null);
@@ -1427,7 +1504,7 @@ static int do_tmem_put(struct tmem_pool *pool,
}
copy_uncompressed:
- if ( ( pgp->pfp = tmem_page_alloc(pool) ) == NULL )
+ if ( ( pgp->pfp = tmem_alloc_page(pool) ) == NULL )
{
ret = -ENOMEM;
goto del_pgp_from_obj;
@@ -2390,7 +2467,7 @@ void *tmem_relinquish_pages(unsigned int order, unsigned int memflags)
if ( tmem_called_from_tmem(memflags) )
read_lock(&tmem_rwlock);
- while ( (pfp = tmem_alloc_page(NULL,1)) == NULL )
+ while ( (pfp = __tmem_alloc_page(NULL,1)) == NULL )
{
if ( (max_evictions-- <= 0) || !tmem_evict())
break;
@@ -2427,6 +2504,9 @@ static int __init init_tmem(void)
rwlock_init(&pcd_tree_rwlocks[i]);
}
+ if ( !tmem_mempool_init() )
+ return 0;
+
if ( tmem_init() )
{
printk("tmem: initialized comp=%d dedup=%d tze=%d\n",
diff --git a/xen/common/tmem_xen.c b/xen/common/tmem_xen.c
index bc8e249..5ef131b 100644
--- a/xen/common/tmem_xen.c
+++ b/xen/common/tmem_xen.c
@@ -238,67 +238,7 @@ int tmem_copy_tze_to_client(xen_pfn_t cmfn, void *tmem_va,
return 1;
}
-/****************** XEN-SPECIFIC MEMORY ALLOCATION ********************/
-
-struct xmem_pool *tmem_mempool = 0;
-unsigned int tmem_mempool_maxalloc = 0;
-
-DEFINE_SPINLOCK(tmem_page_list_lock);
-PAGE_LIST_HEAD(tmem_page_list);
-unsigned long tmem_page_list_pages = 0;
-
-static noinline void *tmem_mempool_page_get(unsigned long size)
-{
- struct page_info *pi;
-
- ASSERT(size == PAGE_SIZE);
- if ( (pi = tmem_alloc_page(NULL,0)) == NULL )
- return NULL;
- ASSERT(IS_VALID_PAGE(pi));
- return page_to_virt(pi);
-}
-
-static void tmem_mempool_page_put(void *page_va)
-{
- ASSERT(IS_PAGE_ALIGNED(page_va));
- tmem_free_page(virt_to_page(page_va));
-}
-
-static int __init tmem_mempool_init(void)
-{
- tmem_mempool = xmem_pool_create("tmem", tmem_mempool_page_get,
- tmem_mempool_page_put, PAGE_SIZE, 0, PAGE_SIZE);
- if ( tmem_mempool )
- tmem_mempool_maxalloc = xmem_pool_maxalloc(tmem_mempool);
- return tmem_mempool != NULL;
-}
-
-/* persistent pools are per-domain */
-
-void *tmem_persistent_pool_page_get(unsigned long size)
-{
- struct page_info *pi;
- struct domain *d = current->domain;
-
- ASSERT(size == PAGE_SIZE);
- if ( (pi = tmem_alloc_page_thispool(d)) == NULL )
- return NULL;
- ASSERT(IS_VALID_PAGE(pi));
- return page_to_virt(pi);
-}
-
-void tmem_persistent_pool_page_put(void *page_va)
-{
- struct page_info *pi;
-
- ASSERT(IS_PAGE_ALIGNED(page_va));
- pi = mfn_to_page(virt_to_mfn(page_va));
- ASSERT(IS_VALID_PAGE(pi));
- tmem_free_page_thispool(pi);
-}
-
/****************** XEN-SPECIFIC HOST INITIALIZATION ********************/
-
static int dstmem_order, workmem_order;
static int cpu_callback(
@@ -351,9 +291,6 @@ int __init tmem_init(void)
{
unsigned int cpu;
- if ( !tmem_mempool_init() )
- return 0;
-
dstmem_order = get_order_from_pages(LZO_DSTMEM_PAGES);
workmem_order = get_order_from_bytes(LZO1X_1_MEM_COMPRESS);
diff --git a/xen/include/xen/tmem_xen.h b/xen/include/xen/tmem_xen.h
index 9907575..7468c28 100644
--- a/xen/include/xen/tmem_xen.h
+++ b/xen/include/xen/tmem_xen.h
@@ -27,8 +27,6 @@ typedef uint32_t pagesize_t; /* like size_t, must handle largest PAGE_SIZE */
((void *)((((unsigned long)addr + (PAGE_SIZE - 1)) & PAGE_MASK)) == addr)
#define IS_VALID_PAGE(_pi) ( mfn_valid(page_to_mfn(_pi)) )
-extern struct xmem_pool *tmem_mempool;
-extern unsigned int tmem_mempool_maxalloc;
extern struct page_list_head tmem_page_list;
extern spinlock_t tmem_page_list_lock;
extern unsigned long tmem_page_list_pages;
@@ -100,7 +98,7 @@ static inline void tmem_page_list_put(struct page_info *pi)
/*
* Memory allocation for persistent data
*/
-static inline struct page_info *tmem_alloc_page_thispool(struct domain *d)
+static inline struct page_info *__tmem_alloc_page_thispool(struct domain *d)
{
struct page_info *pi;
@@ -128,7 +126,7 @@ out:
return pi;
}
-static inline void tmem_free_page_thispool(struct page_info *pi)
+static inline void __tmem_free_page_thispool(struct page_info *pi)
{
struct domain *d = page_get_owner(pi);
@@ -146,7 +144,7 @@ static inline void tmem_free_page_thispool(struct page_info *pi)
/*
* Memory allocation for ephemeral (non-persistent) data
*/
-static inline struct page_info *tmem_alloc_page(void *pool, int no_heap)
+static inline struct page_info *__tmem_alloc_page(void *pool, int no_heap)
{
struct page_info *pi = tmem_page_list_get();
@@ -158,18 +156,13 @@ static inline struct page_info *tmem_alloc_page(void *pool, int no_heap)
return pi;
}
-static inline void tmem_free_page(struct page_info *pi)
+static inline void __tmem_free_page(struct page_info *pi)
{
ASSERT(IS_VALID_PAGE(pi));
tmem_page_list_put(pi);
atomic_dec(&freeable_page_count);
}
-static inline unsigned int tmem_subpage_maxsize(void)
-{
- return tmem_mempool_maxalloc;
-}
-
static inline unsigned long tmem_free_mb(void)
{
return (tmem_page_list_pages + total_free_pages()) >> (20 - PAGE_SHIFT);
@@ -373,17 +366,12 @@ static inline void tmem_copy_to_client_buf_offset(tmem_cli_va_param_t clibuf,
int tmem_decompress_to_client(xen_pfn_t, void *, size_t,
tmem_cli_va_param_t);
-
int tmem_compress_from_client(xen_pfn_t, void **, size_t *,
tmem_cli_va_param_t);
int tmem_copy_from_client(struct page_info *, xen_pfn_t, tmem_cli_va_param_t);
-
int tmem_copy_to_client(xen_pfn_t, struct page_info *, tmem_cli_va_param_t);
-
extern int tmem_copy_tze_to_client(xen_pfn_t cmfn, void *tmem_va, pagesize_t len);
-extern void *tmem_persistent_pool_page_get(unsigned long size);
-extern void tmem_persistent_pool_page_put(void *page_va);
#define tmem_client_err(fmt, args...) printk(XENLOG_G_ERR fmt, ##args)
#define tmem_client_warn(fmt, args...) printk(XENLOG_G_WARNING fmt, ##args)
--
1.7.10.4
next prev parent reply other threads:[~2013-11-20 8:47 UTC|newest]
Thread overview: 49+ messages / expand[flat|nested] mbox.gz Atom feed top
2013-11-20 8:46 [PATCH 01/16] tmem: cleanup: drop some debug code Bob Liu
2013-11-20 8:46 ` [PATCH 02/16] tmem: cleanup: drop useless function 'tmem_copy_page' Bob Liu
2013-11-20 8:46 ` [PATCH 03/16] tmem: cleanup: rm unused tmem_op Bob Liu
2013-11-22 17:38 ` Konrad Rzeszutek Wilk
2013-11-25 9:43 ` Jan Beulich
2013-11-25 9:52 ` Ian Campbell
2013-11-25 9:58 ` Jan Beulich
2013-11-25 16:37 ` Konrad Rzeszutek Wilk
2013-11-25 16:40 ` Ian Campbell
2013-11-25 17:09 ` Konrad Rzeszutek Wilk
2013-11-25 17:12 ` Ian Campbell
2013-11-25 19:56 ` Konrad Rzeszutek Wilk
2013-11-26 8:56 ` Bob Liu
2013-11-20 8:46 ` [PATCH 04/16] tmem: cleanup: rm unneeded parameters from put path Bob Liu
2013-11-22 17:54 ` Konrad Rzeszutek Wilk
2013-11-26 8:22 ` Bob Liu
2013-11-20 8:46 ` [PATCH 05/16] tmem: cleanup: rm unneeded parameters from get path Bob Liu
2013-11-22 17:55 ` Konrad Rzeszutek Wilk
2013-11-20 8:46 ` [PATCH 06/16] tmem: cleanup: reorg do_tmem_put() Bob Liu
2013-11-22 18:04 ` Konrad Rzeszutek Wilk
2013-11-20 8:46 ` [PATCH 07/16] tmem: drop unneeded is_ephemeral() and is_private() Bob Liu
2013-11-20 8:46 ` [PATCH 08/16] tmem: cleanup: rm useless EXPORT/FORWARD define Bob Liu
2013-11-22 18:05 ` Konrad Rzeszutek Wilk
2013-11-20 8:46 ` [PATCH 09/16] tmem: cleanup: drop tmemc_list() temporary Bob Liu
2013-11-22 18:07 ` Konrad Rzeszutek Wilk
2013-11-26 8:28 ` Bob Liu
2013-11-22 21:00 ` Konrad Rzeszutek Wilk
2013-11-20 8:46 ` [PATCH 10/16] tmem: cleanup: drop runtime statistics Bob Liu
2013-11-22 18:08 ` Konrad Rzeszutek Wilk
2013-11-20 8:46 ` [PATCH 11/16] tmem: cleanup: drop tmem_lock_all Bob Liu
2013-11-20 8:46 ` Bob Liu [this message]
2013-11-20 8:46 ` [PATCH 13/16] tmem: cleanup: __tmem_alloc_page: drop unneed parameters Bob Liu
2013-11-22 18:17 ` Konrad Rzeszutek Wilk
2013-11-26 8:41 ` Bob Liu
2013-11-26 17:38 ` Konrad Rzeszutek Wilk
2013-11-20 8:46 ` [PATCH 14/16] tmem: cleanup: drop useless functions from head file Bob Liu
2013-11-27 14:38 ` Andrew Cooper
2013-11-27 14:52 ` Konrad Rzeszutek Wilk
2013-11-27 14:59 ` Andrew Cooper
2013-11-27 15:55 ` Jan Beulich
2013-11-20 8:46 ` [PATCH 15/16] tmem: refator function tmem_ensure_avail_pages() Bob Liu
2013-11-22 18:22 ` Konrad Rzeszutek Wilk
2013-11-20 8:46 ` [PATCH 16/16] tmem: cleanup: rename tmem_relinquish_npages() Bob Liu
2013-11-20 9:08 ` [PATCH 01/16] tmem: cleanup: drop some debug code Jan Beulich
2013-11-20 9:19 ` Bob Liu
2013-11-20 9:25 ` Jan Beulich
2013-11-20 13:51 ` Konrad Rzeszutek Wilk
2013-11-20 14:21 ` Jan Beulich
2013-11-20 18:46 ` Konrad Rzeszutek Wilk
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1384937185-24749-12-git-send-email-bob.liu@oracle.com \
--to=lliubbo@gmail.com \
--cc=JBeulich@suse.com \
--cc=ian.campbell@citrix.com \
--cc=keir@xen.org \
--cc=xen-devel@lists.xenproject.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).