From: Bob Liu <lliubbo@gmail.com>
To: xen-devel@lists.xenproject.org
Cc: james.harper@bendigoit.com.au, ian.campbell@citrix.com,
andrew.cooper3@citrix.com, JBeulich@suse.com
Subject: [PATCH v3 10/15] tmem: cleanup: refactor the alloc/free path
Date: Wed, 11 Dec 2013 16:50:39 +0800 [thread overview]
Message-ID: <1386751844-32387-11-git-send-email-bob.liu@oracle.com> (raw)
In-Reply-To: <1386751844-32387-1-git-send-email-bob.liu@oracle.com>
There are two allocate path for each persistant and ephemeral pool.
This path try to refactor those allocate/free functions with better name and
more readable call layer. Also added more comment.
Signed-off-by: Bob Liu <bob.liu@oracle.com>
---
xen/common/tmem.c | 112 +++++++++++++++++++++++++++++++++++++-------
xen/common/tmem_xen.c | 63 -------------------------
xen/include/xen/tmem_xen.h | 20 ++------
3 files changed, 100 insertions(+), 95 deletions(-)
diff --git a/xen/common/tmem.c b/xen/common/tmem.c
index a09a506..b904285 100644
--- a/xen/common/tmem.c
+++ b/xen/common/tmem.c
@@ -165,6 +165,13 @@ static bool_t global_shared_auth = 0;
static atomic_t client_weight_total = ATOMIC_INIT(0);
static int tmem_initialized = 0;
+struct xmem_pool *tmem_mempool = 0;
+unsigned int tmem_mempool_maxalloc = 0;
+
+DEFINE_SPINLOCK(tmem_page_list_lock);
+PAGE_LIST_HEAD(tmem_page_list);
+unsigned long tmem_page_list_pages = 0;
+
/************ CONCURRENCY ***********************************************/
DEFINE_RWLOCK(tmem_rwlock);
static DEFINE_SPINLOCK(eph_lists_spinlock); /* protects global AND clients */
@@ -176,7 +183,29 @@ static DEFINE_SPINLOCK(pers_lists_spinlock);
/* global counters (should use long_atomic_t access) */
static long global_eph_count = 0; /* atomicity depends on eph_lists_spinlock */
-/************ MEMORY ALLOCATION INTERFACE *****************************/
+/*
+ * There two types of memory allocation interfaces in tmem.
+ * One is based on xmem_pool and the other is used for allocate a whole page.
+ * Both of them are based on the lowlevel function __tmem_alloc_page/_thispool().
+ * The call trace of alloc path is like below.
+ * Persistant pool:
+ * 1.tmem_malloc()
+ * > xmem_pool_alloc()
+ * > tmem_persistent_pool_page_get()
+ * > __tmem_alloc_page_thispool()
+ * 2.tmem_alloc_page()
+ * > __tmem_alloc_page_thispool()
+ *
+ * Ephemeral pool:
+ * 1.tmem_malloc()
+ * > xmem_pool_alloc()
+ * > tmem_mempool_page_get()
+ * > __tmem_alloc_page()
+ * 2.tmem_alloc_page()
+ * > __tmem_alloc_page()
+ *
+ * The free path is done in the same manner.
+ */
static void *tmem_malloc(size_t size, struct tmem_pool *pool)
{
void *v = NULL;
@@ -208,28 +237,76 @@ static void tmem_free(void *p, struct tmem_pool *pool)
}
}
-static struct page_info *tmem_page_alloc(struct tmem_pool *pool)
+static struct page_info *tmem_alloc_page(struct tmem_pool *pool)
{
struct page_info *pfp = NULL;
if ( pool != NULL && is_persistent(pool) )
- pfp = tmem_alloc_page_thispool(pool->client->domain);
+ pfp = __tmem_alloc_page_thispool(pool->client->domain);
else
- pfp = tmem_alloc_page(pool,0);
+ pfp = __tmem_alloc_page(pool,0);
return pfp;
}
-static void tmem_page_free(struct tmem_pool *pool, struct page_info *pfp)
+static void tmem_free_page(struct tmem_pool *pool, struct page_info *pfp)
{
ASSERT(pfp);
if ( pool == NULL || !is_persistent(pool) )
- tmem_free_page(pfp);
+ __tmem_free_page(pfp);
else
- tmem_free_page_thispool(pfp);
+ __tmem_free_page_thispool(pfp);
}
-/************ PAGE CONTENT DESCRIPTOR MANIPULATION ROUTINES ***********/
+static noinline void *tmem_mempool_page_get(unsigned long size)
+{
+ struct page_info *pi;
+ ASSERT(size == PAGE_SIZE);
+ if ( (pi = __tmem_alloc_page(NULL,0)) == NULL )
+ return NULL;
+ ASSERT(IS_VALID_PAGE(pi));
+ return page_to_virt(pi);
+}
+
+static void tmem_mempool_page_put(void *page_va)
+{
+ ASSERT(IS_PAGE_ALIGNED(page_va));
+ __tmem_free_page(virt_to_page(page_va));
+}
+
+static int __init tmem_mempool_init(void)
+{
+ tmem_mempool = xmem_pool_create("tmem", tmem_mempool_page_get,
+ tmem_mempool_page_put, PAGE_SIZE, 0, PAGE_SIZE);
+ if ( tmem_mempool )
+ tmem_mempool_maxalloc = xmem_pool_maxalloc(tmem_mempool);
+ return tmem_mempool != NULL;
+}
+
+/* persistent pools are per-domain */
+static void *tmem_persistent_pool_page_get(unsigned long size)
+{
+ struct page_info *pi;
+ struct domain *d = current->domain;
+
+ ASSERT(size == PAGE_SIZE);
+ if ( (pi = __tmem_alloc_page_thispool(d)) == NULL )
+ return NULL;
+ ASSERT(IS_VALID_PAGE(pi));
+ return page_to_virt(pi);
+}
+
+static void tmem_persistent_pool_page_put(void *page_va)
+{
+ struct page_info *pi;
+
+ ASSERT(IS_PAGE_ALIGNED(page_va));
+ pi = mfn_to_page(virt_to_mfn(page_va));
+ ASSERT(IS_VALID_PAGE(pi));
+ __tmem_free_page_thispool(pi);
+}
+
+/************ PAGE CONTENT DESCRIPTOR MANIPULATION ROUTINES ***********/
#define NOT_SHAREABLE ((uint16_t)-1UL)
static int pcd_copy_to_client(xen_pfn_t cmfn, struct tmem_page_descriptor *pgp)
@@ -304,7 +381,7 @@ static void pcd_disassociate(struct tmem_page_descriptor *pgp, struct tmem_pool
tmem_free(pcd_tze, pool);
} else {
/* real physical page */
- tmem_page_free(pool,pfp);
+ tmem_free_page(pool,pfp);
}
write_unlock(&pcd_tree_rwlocks[firstbyte]);
}
@@ -383,7 +460,7 @@ static int pcd_associate(struct tmem_page_descriptor *pgp, char *cdata, pagesize
/* match! if not compressed, free the no-longer-needed page */
/* but if compressed, data is assumed static so don't free! */
if ( cdata == NULL )
- tmem_page_free(pgp->us.obj->pool,pgp->pfp);
+ tmem_free_page(pgp->us.obj->pool,pgp->pfp);
goto match;
}
}
@@ -416,7 +493,7 @@ static int pcd_associate(struct tmem_page_descriptor *pgp, char *cdata, pagesize
((pcd->tze = tmem_malloc(pfp_size,pgp->us.obj->pool)) != NULL) ) {
tmem_tze_copy_from_pfp(pcd->tze,pgp->pfp,pfp_size);
pcd->size = pfp_size;
- tmem_page_free(pgp->us.obj->pool,pgp->pfp);
+ tmem_free_page(pgp->us.obj->pool,pgp->pfp);
} else {
pcd->pfp = pgp->pfp;
pcd->size = PAGE_SIZE;
@@ -485,7 +562,7 @@ static void pgp_free_data(struct tmem_page_descriptor *pgp, struct tmem_pool *po
else if ( pgp_size )
tmem_free(pgp->cdata, pool);
else
- tmem_page_free(pgp->us.obj->pool,pgp->pfp);
+ tmem_free_page(pgp->us.obj->pool,pgp->pfp);
pgp->pfp = NULL;
pgp->size = -1;
}
@@ -1254,7 +1331,7 @@ static int do_tmem_put_compress(struct tmem_page_descriptor *pgp, xen_pfn_t cmfn
ret = tmem_compress_from_client(cmfn, &dst, &size, clibuf);
if ( ret <= 0 )
goto out;
- else if ( (size == 0) || (size >= tmem_subpage_maxsize()) ) {
+ else if ( (size == 0) || (size >= tmem_mempool_maxalloc) ) {
ret = 0;
goto out;
} else if ( tmem_dedup_enabled() && !is_persistent(pgp->us.obj->pool) ) {
@@ -1311,7 +1388,7 @@ static int do_tmem_dup_put(struct tmem_page_descriptor *pgp, xen_pfn_t cmfn,
copy_uncompressed:
if ( pgp->pfp )
pgp_free_data(pgp, pool);
- if ( ( pgp->pfp = tmem_page_alloc(pool) ) == NULL )
+ if ( ( pgp->pfp = tmem_alloc_page(pool) ) == NULL )
goto failed_dup;
pgp->size = 0;
ret = tmem_copy_from_client(pgp->pfp, cmfn, tmem_cli_buf_null);
@@ -1422,7 +1499,7 @@ static int do_tmem_put(struct tmem_pool *pool,
}
copy_uncompressed:
- if ( ( pgp->pfp = tmem_page_alloc(pool) ) == NULL )
+ if ( ( pgp->pfp = tmem_alloc_page(pool) ) == NULL )
{
ret = -ENOMEM;
goto del_pgp_from_obj;
@@ -2365,7 +2442,7 @@ void *tmem_relinquish_pages(unsigned int order, unsigned int memflags)
if ( tmem_called_from_tmem(memflags) )
read_lock(&tmem_rwlock);
- while ( (pfp = tmem_alloc_page(NULL,1)) == NULL )
+ while ( (pfp = __tmem_alloc_page(NULL,1)) == NULL )
{
if ( (max_evictions-- <= 0) || !tmem_evict())
break;
@@ -2402,6 +2479,9 @@ static int __init init_tmem(void)
rwlock_init(&pcd_tree_rwlocks[i]);
}
+ if ( !tmem_mempool_init() )
+ return 0;
+
if ( tmem_init() )
{
printk("tmem: initialized comp=%d dedup=%d tze=%d\n",
diff --git a/xen/common/tmem_xen.c b/xen/common/tmem_xen.c
index bc8e249..5ef131b 100644
--- a/xen/common/tmem_xen.c
+++ b/xen/common/tmem_xen.c
@@ -238,67 +238,7 @@ int tmem_copy_tze_to_client(xen_pfn_t cmfn, void *tmem_va,
return 1;
}
-/****************** XEN-SPECIFIC MEMORY ALLOCATION ********************/
-
-struct xmem_pool *tmem_mempool = 0;
-unsigned int tmem_mempool_maxalloc = 0;
-
-DEFINE_SPINLOCK(tmem_page_list_lock);
-PAGE_LIST_HEAD(tmem_page_list);
-unsigned long tmem_page_list_pages = 0;
-
-static noinline void *tmem_mempool_page_get(unsigned long size)
-{
- struct page_info *pi;
-
- ASSERT(size == PAGE_SIZE);
- if ( (pi = tmem_alloc_page(NULL,0)) == NULL )
- return NULL;
- ASSERT(IS_VALID_PAGE(pi));
- return page_to_virt(pi);
-}
-
-static void tmem_mempool_page_put(void *page_va)
-{
- ASSERT(IS_PAGE_ALIGNED(page_va));
- tmem_free_page(virt_to_page(page_va));
-}
-
-static int __init tmem_mempool_init(void)
-{
- tmem_mempool = xmem_pool_create("tmem", tmem_mempool_page_get,
- tmem_mempool_page_put, PAGE_SIZE, 0, PAGE_SIZE);
- if ( tmem_mempool )
- tmem_mempool_maxalloc = xmem_pool_maxalloc(tmem_mempool);
- return tmem_mempool != NULL;
-}
-
-/* persistent pools are per-domain */
-
-void *tmem_persistent_pool_page_get(unsigned long size)
-{
- struct page_info *pi;
- struct domain *d = current->domain;
-
- ASSERT(size == PAGE_SIZE);
- if ( (pi = tmem_alloc_page_thispool(d)) == NULL )
- return NULL;
- ASSERT(IS_VALID_PAGE(pi));
- return page_to_virt(pi);
-}
-
-void tmem_persistent_pool_page_put(void *page_va)
-{
- struct page_info *pi;
-
- ASSERT(IS_PAGE_ALIGNED(page_va));
- pi = mfn_to_page(virt_to_mfn(page_va));
- ASSERT(IS_VALID_PAGE(pi));
- tmem_free_page_thispool(pi);
-}
-
/****************** XEN-SPECIFIC HOST INITIALIZATION ********************/
-
static int dstmem_order, workmem_order;
static int cpu_callback(
@@ -351,9 +291,6 @@ int __init tmem_init(void)
{
unsigned int cpu;
- if ( !tmem_mempool_init() )
- return 0;
-
dstmem_order = get_order_from_pages(LZO_DSTMEM_PAGES);
workmem_order = get_order_from_bytes(LZO1X_1_MEM_COMPRESS);
diff --git a/xen/include/xen/tmem_xen.h b/xen/include/xen/tmem_xen.h
index 7612a3f..79b0730 100644
--- a/xen/include/xen/tmem_xen.h
+++ b/xen/include/xen/tmem_xen.h
@@ -27,8 +27,6 @@ typedef uint32_t pagesize_t; /* like size_t, must handle largest PAGE_SIZE */
((void *)((((unsigned long)addr + (PAGE_SIZE - 1)) & PAGE_MASK)) == addr)
#define IS_VALID_PAGE(_pi) ( mfn_valid(page_to_mfn(_pi)) )
-extern struct xmem_pool *tmem_mempool;
-extern unsigned int tmem_mempool_maxalloc;
extern struct page_list_head tmem_page_list;
extern spinlock_t tmem_page_list_lock;
extern unsigned long tmem_page_list_pages;
@@ -100,7 +98,7 @@ static inline void tmem_page_list_put(struct page_info *pi)
/*
* Memory allocation for persistent data
*/
-static inline struct page_info *tmem_alloc_page_thispool(struct domain *d)
+static inline struct page_info *__tmem_alloc_page_thispool(struct domain *d)
{
struct page_info *pi;
@@ -128,7 +126,7 @@ out:
return pi;
}
-static inline void tmem_free_page_thispool(struct page_info *pi)
+static inline void __tmem_free_page_thispool(struct page_info *pi)
{
struct domain *d = page_get_owner(pi);
@@ -146,7 +144,7 @@ static inline void tmem_free_page_thispool(struct page_info *pi)
/*
* Memory allocation for ephemeral (non-persistent) data
*/
-static inline struct page_info *tmem_alloc_page(void *pool, int no_heap)
+static inline struct page_info *__tmem_alloc_page(void *pool, int no_heap)
{
struct page_info *pi = tmem_page_list_get();
@@ -158,18 +156,13 @@ static inline struct page_info *tmem_alloc_page(void *pool, int no_heap)
return pi;
}
-static inline void tmem_free_page(struct page_info *pi)
+static inline void __tmem_free_page(struct page_info *pi)
{
ASSERT(IS_VALID_PAGE(pi));
tmem_page_list_put(pi);
atomic_dec(&freeable_page_count);
}
-static inline unsigned int tmem_subpage_maxsize(void)
-{
- return tmem_mempool_maxalloc;
-}
-
static inline unsigned long tmem_free_mb(void)
{
return (tmem_page_list_pages + total_free_pages()) >> (20 - PAGE_SHIFT);
@@ -367,17 +360,12 @@ static inline int tmem_get_tmemop_from_client(tmem_op_t *op, tmem_cli_op_t uops)
int tmem_decompress_to_client(xen_pfn_t, void *, size_t,
tmem_cli_va_param_t);
-
int tmem_compress_from_client(xen_pfn_t, void **, size_t *,
tmem_cli_va_param_t);
int tmem_copy_from_client(struct page_info *, xen_pfn_t, tmem_cli_va_param_t);
-
int tmem_copy_to_client(xen_pfn_t, struct page_info *, tmem_cli_va_param_t);
-
extern int tmem_copy_tze_to_client(xen_pfn_t cmfn, void *tmem_va, pagesize_t len);
-extern void *tmem_persistent_pool_page_get(unsigned long size);
-extern void tmem_persistent_pool_page_put(void *page_va);
#define tmem_client_err(fmt, args...) printk(XENLOG_G_ERR fmt, ##args)
#define tmem_client_warn(fmt, args...) printk(XENLOG_G_WARNING fmt, ##args)
--
1.7.10.4
next prev parent reply other threads:[~2013-12-11 8:54 UTC|newest]
Thread overview: 19+ messages / expand[flat|nested] mbox.gz Atom feed top
2013-12-11 8:50 [PATCH v3 00/15] tmem: continue to cleanup tmem Bob Liu
2013-12-11 8:50 ` [PATCH v3 01/15] tmem: cleanup: drop unused sub command Bob Liu
2013-12-11 8:50 ` [PATCH v3 02/15] tmem: cleanup: drop some debug code Bob Liu
2013-12-11 8:50 ` [PATCH v3 03/15] tmem: cleanup: drop useless function 'tmem_copy_page' Bob Liu
2013-12-11 8:50 ` [PATCH v3 04/15] tmem: cleanup: drop useless parameters from put/get page Bob Liu
2013-12-11 8:50 ` [PATCH v3 05/15] tmem: cleanup: reorg function do_tmem_put() Bob Liu
2013-12-11 8:50 ` [PATCH v3 06/15] tmem: drop unneeded is_ephemeral() and is_private() Bob Liu
2013-12-11 8:50 ` [PATCH v3 07/15] tmem: cleanup: rm useless EXPORT/FORWARD define Bob Liu
2013-12-11 8:50 ` [PATCH v3 08/15] tmem: cleanup: drop runtime statistics Bob Liu
2013-12-11 8:50 ` [PATCH v3 09/15] tmem: cleanup: drop tmem_lock_all Bob Liu
2013-12-11 10:45 ` Andrew Cooper
2013-12-11 13:11 ` Bob Liu
2013-12-11 22:01 ` Konrad Rzeszutek Wilk
2013-12-11 8:50 ` Bob Liu [this message]
2013-12-11 8:50 ` [PATCH v3 11/15] tmem: cleanup: __tmem_alloc_page: drop unneed parameters Bob Liu
2013-12-11 8:50 ` [PATCH v3 12/15] tmem: cleanup: drop useless functions from head file Bob Liu
2013-12-11 8:50 ` [PATCH v3 13/15] tmem: refator function tmem_ensure_avail_pages() Bob Liu
2013-12-11 8:50 ` [PATCH v3 14/15] tmem: cleanup: rename tmem_relinquish_npages() Bob Liu
2013-12-11 8:50 ` [PATCH v3 15/15] tmem: cleanup: rm unused tmem_freeze_all() Bob Liu
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1386751844-32387-11-git-send-email-bob.liu@oracle.com \
--to=lliubbo@gmail.com \
--cc=JBeulich@suse.com \
--cc=andrew.cooper3@citrix.com \
--cc=ian.campbell@citrix.com \
--cc=james.harper@bendigoit.com.au \
--cc=xen-devel@lists.xenproject.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).