* [PATCH 1/4] zcache: introduce zero-filled pages handler
2013-03-13 7:05 [PATCH 0/4] zcache: Support zero-filled pages more efficiently Wanpeng Li
@ 2013-03-13 7:05 ` Wanpeng Li
2013-03-13 7:05 ` [PATCH 2/4] zcache: zero-filled pages awareness Wanpeng Li
` (2 subsequent siblings)
3 siblings, 0 replies; 8+ messages in thread
From: Wanpeng Li @ 2013-03-13 7:05 UTC (permalink / raw)
To: Andrew Morton
Cc: Greg Kroah-Hartman, Dan Magenheimer, Seth Jennings,
Konrad Rzeszutek Wilk, Minchan Kim, linux-mm, linux-kernel,
Wanpeng Li
Introduce zero-filled pages handler to capture and handle zero pages.
Signed-off-by: Wanpeng Li <liwanp@linux.vnet.ibm.com>
---
drivers/staging/zcache/zcache-main.c | 26 ++++++++++++++++++++++++++
1 files changed, 26 insertions(+), 0 deletions(-)
diff --git a/drivers/staging/zcache/zcache-main.c b/drivers/staging/zcache/zcache-main.c
index 328898e..b71e033 100644
--- a/drivers/staging/zcache/zcache-main.c
+++ b/drivers/staging/zcache/zcache-main.c
@@ -460,6 +460,32 @@ static void zcache_obj_free(struct tmem_obj *obj, struct tmem_pool *pool)
kmem_cache_free(zcache_obj_cache, obj);
}
+static bool page_zero_filled(void *ptr)
+{
+ unsigned int pos;
+ unsigned long *page;
+
+ page = (unsigned long *)ptr;
+
+ for (pos = 0; pos < PAGE_SIZE / sizeof(*page); pos++) {
+ if (page[pos])
+ return false;
+ }
+
+ return true;
+}
+
+static void handle_zero_page(void *page)
+{
+ void *user_mem;
+
+ user_mem = kmap_atomic(page);
+ memset(user_mem, 0, PAGE_SIZE);
+ kunmap_atomic(user_mem);
+
+ flush_dcache_page(page);
+}
+
static struct tmem_hostops zcache_hostops = {
.obj_alloc = zcache_obj_alloc,
.obj_free = zcache_obj_free,
--
1.7.7.6
^ permalink raw reply related [flat|nested] 8+ messages in thread* [PATCH 2/4] zcache: zero-filled pages awareness
2013-03-13 7:05 [PATCH 0/4] zcache: Support zero-filled pages more efficiently Wanpeng Li
2013-03-13 7:05 ` [PATCH 1/4] zcache: introduce zero-filled pages handler Wanpeng Li
@ 2013-03-13 7:05 ` Wanpeng Li
2013-03-13 7:05 ` [PATCH 3/4] zcache: introduce zero-filled pages stat count Wanpeng Li
2013-03-13 7:05 ` [PATCH 4/4] zcache: add pageframes count once compress zero-filled pages twice Wanpeng Li
3 siblings, 0 replies; 8+ messages in thread
From: Wanpeng Li @ 2013-03-13 7:05 UTC (permalink / raw)
To: Andrew Morton
Cc: Greg Kroah-Hartman, Dan Magenheimer, Seth Jennings,
Konrad Rzeszutek Wilk, Minchan Kim, linux-mm, linux-kernel,
Wanpeng Li
Compression of zero-filled pages can unneccessarily cause internal
fragmentation, and thus waste memory. This special case can be
optimized.
This patch captures zero-filled pages, and marks their corresponding
zcache backing page entry as zero-filled. Whenever such zero-filled
page is retrieved, we fill the page frame with zero.
Signed-off-by: Wanpeng Li <liwanp@linux.vnet.ibm.com>
---
drivers/staging/zcache/tmem.c | 4 +-
drivers/staging/zcache/tmem.h | 5 ++
drivers/staging/zcache/zcache-main.c | 87 ++++++++++++++++++++++++++++++----
3 files changed, 85 insertions(+), 11 deletions(-)
diff --git a/drivers/staging/zcache/tmem.c b/drivers/staging/zcache/tmem.c
index a2b7e03..62468ea 100644
--- a/drivers/staging/zcache/tmem.c
+++ b/drivers/staging/zcache/tmem.c
@@ -597,7 +597,9 @@ int tmem_put(struct tmem_pool *pool, struct tmem_oid *oidp, uint32_t index,
if (unlikely(ret == -ENOMEM))
/* may have partially built objnode tree ("stump") */
goto delete_and_free;
- (*tmem_pamops.create_finish)(pampd, is_ephemeral(pool));
+ if (pampd != (void *)ZERO_FILLED)
+ (*tmem_pamops.create_finish)(pampd, is_ephemeral(pool));
+
goto out;
delete_and_free:
diff --git a/drivers/staging/zcache/tmem.h b/drivers/staging/zcache/tmem.h
index adbe5a8..6719dbd 100644
--- a/drivers/staging/zcache/tmem.h
+++ b/drivers/staging/zcache/tmem.h
@@ -204,6 +204,11 @@ struct tmem_handle {
uint16_t client_id;
};
+/*
+ * mark pampd to special vaule in order that later
+ * retrieve will identify zero-filled pages
+ */
+#define ZERO_FILLED 0x2
/* pampd abstract datatype methods provided by the PAM implementation */
struct tmem_pamops {
diff --git a/drivers/staging/zcache/zcache-main.c b/drivers/staging/zcache/zcache-main.c
index b71e033..ed5ef26 100644
--- a/drivers/staging/zcache/zcache-main.c
+++ b/drivers/staging/zcache/zcache-main.c
@@ -543,7 +543,23 @@ static void *zcache_pampd_eph_create(char *data, size_t size, bool raw,
{
void *pampd = NULL, *cdata = data;
unsigned clen = size;
+ bool zero_filled = false;
struct page *page = (struct page *)(data), *newpage;
+ char *user_mem;
+
+ user_mem = kmap_atomic(page);
+
+ /*
+ * Compressing zero-filled pages will waste memory and introduce
+ * serious fragmentation, skip it to avoid overhead
+ */
+ if (page_zero_filled(user_mem)) {
+ kunmap_atomic(user_mem);
+ clen = 0;
+ zero_filled = true;
+ goto got_pampd;
+ }
+ kunmap_atomic(user_mem);
if (!raw) {
zcache_compress(page, &cdata, &clen);
@@ -592,6 +608,8 @@ got_pampd:
zcache_eph_zpages_max = zcache_eph_zpages;
if (ramster_enabled && raw)
ramster_count_foreign_pages(true, 1);
+ if (zero_filled)
+ pampd = (void *)ZERO_FILLED;
out:
return pampd;
}
@@ -600,15 +618,31 @@ static void *zcache_pampd_pers_create(char *data, size_t size, bool raw,
struct tmem_handle *th)
{
void *pampd = NULL, *cdata = data;
- unsigned clen = size;
+ unsigned clen = size, zero_filled = 0;
struct page *page = (struct page *)(data), *newpage;
unsigned long zbud_mean_zsize;
unsigned long curr_pers_zpages, total_zsize;
+ char *user_mem;
if (data == NULL) {
BUG_ON(!ramster_enabled);
goto create_pampd;
}
+
+ user_mem = kmap_atomic(page);
+
+ /*
+ * Compressing zero-filled pages will waste memory and introduce
+ * serious fragmentation, skip it to avoid overhead
+ */
+ if (page_zero_filled(page)) {
+ kunmap_atomic(user_mem);
+ clen = 0;
+ zero_filled = 1;
+ goto got_pampd;
+ }
+ kunmap_atomic(user_mem);
+
curr_pers_zpages = zcache_pers_zpages;
/* FIXME CONFIG_RAMSTER... subtract atomic remote_pers_pages here? */
if (!raw)
@@ -674,6 +708,8 @@ got_pampd:
zcache_pers_zbytes_max = zcache_pers_zbytes;
if (ramster_enabled && raw)
ramster_count_foreign_pages(false, 1);
+ if (zero_filled)
+ pampd = (void *)ZERO_FILLED;
out:
return pampd;
}
@@ -780,6 +816,14 @@ static int zcache_pampd_get_data(char *data, size_t *sizep, bool raw,
BUG_ON(preemptible());
BUG_ON(eph); /* fix later if shared pools get implemented */
BUG_ON(pampd_is_remote(pampd));
+
+ if (pampd == (void *)ZERO_FILLED) {
+ handle_zero_page(data);
+ if (!raw)
+ *sizep = PAGE_SIZE;
+ return 0;
+ }
+
if (raw)
ret = zbud_copy_from_zbud(data, (struct zbudref *)pampd,
sizep, eph);
@@ -800,13 +844,24 @@ static int zcache_pampd_get_data_and_free(char *data, size_t *sizep, bool raw,
void *pampd, struct tmem_pool *pool,
struct tmem_oid *oid, uint32_t index)
{
- int ret;
- bool eph = !is_persistent(pool);
+ int ret = 0;
+ bool eph = !is_persistent(pool), zero_filled = false;
struct page *page = NULL;
unsigned int zsize, zpages;
BUG_ON(preemptible());
BUG_ON(pampd_is_remote(pampd));
+
+ if (pampd == (void *)ZERO_FILLED) {
+ handle_zero_page(data);
+ zero_filled = true;
+ zsize = 0;
+ zpages = 1;
+ if (!raw)
+ *sizep = PAGE_SIZE;
+ goto zero_fill;
+ }
+
if (raw)
ret = zbud_copy_from_zbud(data, (struct zbudref *)pampd,
sizep, eph);
@@ -818,8 +873,9 @@ static int zcache_pampd_get_data_and_free(char *data, size_t *sizep, bool raw,
}
page = zbud_free_and_delist((struct zbudref *)pampd, eph,
&zsize, &zpages);
+zero_fill:
if (eph) {
- if (page)
+ if (page || zero_filled)
zcache_eph_pageframes =
atomic_dec_return(&zcache_eph_pageframes_atomic);
zcache_eph_zpages =
@@ -827,7 +883,7 @@ static int zcache_pampd_get_data_and_free(char *data, size_t *sizep, bool raw,
zcache_eph_zbytes =
atomic_long_sub_return(zsize, &zcache_eph_zbytes_atomic);
} else {
- if (page)
+ if (page || zero_filled)
zcache_pers_pageframes =
atomic_dec_return(&zcache_pers_pageframes_atomic);
zcache_pers_zpages =
@@ -837,7 +893,7 @@ static int zcache_pampd_get_data_and_free(char *data, size_t *sizep, bool raw,
}
if (!is_local_client(pool->client))
ramster_count_foreign_pages(eph, -1);
- if (page)
+ if (page && !zero_filled)
zcache_free_page(page);
return ret;
}
@@ -851,18 +907,29 @@ static void zcache_pampd_free(void *pampd, struct tmem_pool *pool,
{
struct page *page = NULL;
unsigned int zsize, zpages;
+ bool zero_filled = false;
BUG_ON(preemptible());
- if (pampd_is_remote(pampd)) {
+
+ if (pampd == (void *)ZERO_FILLED) {
+ zero_filled = true;
+ zsize = 0;
+ zpages = 1;
+ }
+
+ if (pampd_is_remote(pampd) && !zero_filled) {
+
BUG_ON(!ramster_enabled);
pampd = ramster_pampd_free(pampd, pool, oid, index, acct);
if (pampd == NULL)
return;
}
if (is_ephemeral(pool)) {
- page = zbud_free_and_delist((struct zbudref *)pampd,
+ if (!zero_filled)
+ page = zbud_free_and_delist((struct zbudref *)pampd,
+
true, &zsize, &zpages);
- if (page)
+ if (page || zero_filled)
zcache_eph_pageframes =
atomic_dec_return(&zcache_eph_pageframes_atomic);
zcache_eph_zpages =
@@ -883,7 +950,7 @@ static void zcache_pampd_free(void *pampd, struct tmem_pool *pool,
}
if (!is_local_client(pool->client))
ramster_count_foreign_pages(is_ephemeral(pool), -1);
- if (page)
+ if (page && !zero_filled)
zcache_free_page(page);
}
--
1.7.7.6
^ permalink raw reply related [flat|nested] 8+ messages in thread* [PATCH 3/4] zcache: introduce zero-filled pages stat count
2013-03-13 7:05 [PATCH 0/4] zcache: Support zero-filled pages more efficiently Wanpeng Li
2013-03-13 7:05 ` [PATCH 1/4] zcache: introduce zero-filled pages handler Wanpeng Li
2013-03-13 7:05 ` [PATCH 2/4] zcache: zero-filled pages awareness Wanpeng Li
@ 2013-03-13 7:05 ` Wanpeng Li
2013-03-13 7:05 ` [PATCH 4/4] zcache: add pageframes count once compress zero-filled pages twice Wanpeng Li
3 siblings, 0 replies; 8+ messages in thread
From: Wanpeng Li @ 2013-03-13 7:05 UTC (permalink / raw)
To: Andrew Morton
Cc: Greg Kroah-Hartman, Dan Magenheimer, Seth Jennings,
Konrad Rzeszutek Wilk, Minchan Kim, linux-mm, linux-kernel,
Wanpeng Li
Introduce zero-filled page statistics to monitor the number of
zero-filled pages.
Signed-off-by: Wanpeng Li <liwanp@linux.vnet.ibm.com>
---
drivers/staging/zcache/zcache-main.c | 7 +++++++
1 files changed, 7 insertions(+), 0 deletions(-)
diff --git a/drivers/staging/zcache/zcache-main.c b/drivers/staging/zcache/zcache-main.c
index ed5ef26..dd52975 100644
--- a/drivers/staging/zcache/zcache-main.c
+++ b/drivers/staging/zcache/zcache-main.c
@@ -191,6 +191,7 @@ static ssize_t zcache_eph_nonactive_puts_ignored;
static ssize_t zcache_pers_nonactive_puts_ignored;
static ssize_t zcache_writtenback_pages;
static ssize_t zcache_outstanding_writeback_pages;
+static ssize_t zcache_pages_zero;
#ifdef CONFIG_DEBUG_FS
#include <linux/debugfs.h>
@@ -252,6 +253,7 @@ static int zcache_debugfs_init(void)
zdfs("outstanding_writeback_pages", S_IRUGO, root,
&zcache_outstanding_writeback_pages);
zdfs("writtenback_pages", S_IRUGO, root, &zcache_writtenback_pages);
+ zdfs("pages_zero", S_IRUGO, root, &zcache_pages_zero);
return 0;
}
#undef zdebugfs
@@ -321,6 +323,7 @@ void zcache_dump(void)
pr_info("zcache: outstanding_writeback_pages=%zd\n",
zcache_outstanding_writeback_pages);
pr_info("zcache: writtenback_pages=%zd\n", zcache_writtenback_pages);
+ pr_info("zcache: pages_zero=%zd\n", zcache_pages_zero);
}
#endif
@@ -557,6 +560,7 @@ static void *zcache_pampd_eph_create(char *data, size_t size, bool raw,
kunmap_atomic(user_mem);
clen = 0;
zero_filled = true;
+ zcache_pages_zero++;
goto got_pampd;
}
kunmap_atomic(user_mem);
@@ -639,6 +643,7 @@ static void *zcache_pampd_pers_create(char *data, size_t size, bool raw,
kunmap_atomic(user_mem);
clen = 0;
zero_filled = 1;
+ zcache_pages_zero++;
goto got_pampd;
}
kunmap_atomic(user_mem);
@@ -859,6 +864,7 @@ static int zcache_pampd_get_data_and_free(char *data, size_t *sizep, bool raw,
zpages = 1;
if (!raw)
*sizep = PAGE_SIZE;
+ zcache_pages_zero--;
goto zero_fill;
}
@@ -915,6 +921,7 @@ static void zcache_pampd_free(void *pampd, struct tmem_pool *pool,
zero_filled = true;
zsize = 0;
zpages = 1;
+ zcache_pages_zero--;
}
if (pampd_is_remote(pampd) && !zero_filled) {
--
1.7.7.6
^ permalink raw reply related [flat|nested] 8+ messages in thread* [PATCH 4/4] zcache: add pageframes count once compress zero-filled pages twice
2013-03-13 7:05 [PATCH 0/4] zcache: Support zero-filled pages more efficiently Wanpeng Li
` (2 preceding siblings ...)
2013-03-13 7:05 ` [PATCH 3/4] zcache: introduce zero-filled pages stat count Wanpeng Li
@ 2013-03-13 7:05 ` Wanpeng Li
3 siblings, 0 replies; 8+ messages in thread
From: Wanpeng Li @ 2013-03-13 7:05 UTC (permalink / raw)
To: Andrew Morton
Cc: Greg Kroah-Hartman, Dan Magenheimer, Seth Jennings,
Konrad Rzeszutek Wilk, Minchan Kim, linux-mm, linux-kernel,
Wanpeng Li
Since zbudpage consist of two zpages, two zero-filled pages compression
contribute to one [eph|pers]pageframe count accumulated.
Signed-off-by: Wanpeng Li <liwanp@linux.vnet.ibm.com>
---
drivers/staging/zcache/zcache-main.c | 25 +++++++++++++++++++++++--
1 files changed, 23 insertions(+), 2 deletions(-)
diff --git a/drivers/staging/zcache/zcache-main.c b/drivers/staging/zcache/zcache-main.c
index dd52975..7860ff0 100644
--- a/drivers/staging/zcache/zcache-main.c
+++ b/drivers/staging/zcache/zcache-main.c
@@ -544,6 +544,8 @@ static struct page *zcache_evict_eph_pageframe(void);
static void *zcache_pampd_eph_create(char *data, size_t size, bool raw,
struct tmem_handle *th)
{
+ static ssize_t second_eph_zero_page;
+ static atomic_t second_eph_zero_page_atomic = ATOMIC_INIT(0);
void *pampd = NULL, *cdata = data;
unsigned clen = size;
bool zero_filled = false;
@@ -561,7 +563,14 @@ static void *zcache_pampd_eph_create(char *data, size_t size, bool raw,
clen = 0;
zero_filled = true;
zcache_pages_zero++;
- goto got_pampd;
+ second_eph_zero_page = atomic_inc_return(
+ &second_eph_zero_page_atomic);
+ if (second_eph_zero_page % 2 == 1)
+ goto got_pampd;
+ else {
+ atomic_sub(2, &second_eph_zero_page_atomic);
+ goto count_zero_page;
+ }
}
kunmap_atomic(user_mem);
@@ -597,6 +606,7 @@ static void *zcache_pampd_eph_create(char *data, size_t size, bool raw,
create_in_new_page:
pampd = (void *)zbud_create_prep(th, true, cdata, clen, newpage);
BUG_ON(pampd == NULL);
+count_zero_page:
zcache_eph_pageframes =
atomic_inc_return(&zcache_eph_pageframes_atomic);
if (zcache_eph_pageframes > zcache_eph_pageframes_max)
@@ -621,6 +631,8 @@ out:
static void *zcache_pampd_pers_create(char *data, size_t size, bool raw,
struct tmem_handle *th)
{
+ static ssize_t second_pers_zero_page;
+ static atomic_t second_pers_zero_page_atomic = ATOMIC_INIT(0);
void *pampd = NULL, *cdata = data;
unsigned clen = size, zero_filled = 0;
struct page *page = (struct page *)(data), *newpage;
@@ -644,7 +656,15 @@ static void *zcache_pampd_pers_create(char *data, size_t size, bool raw,
clen = 0;
zero_filled = 1;
zcache_pages_zero++;
- goto got_pampd;
+ second_pers_zero_page = atomic_inc_return(
+ &second_pers_zero_page_atomic);
+ if (second_pers_zero_page % 2 == 1)
+ goto got_pampd;
+ else {
+ atomic_sub(2, &second_pers_zero_page_atomic);
+ goto count_zero_page;
+ }
+
}
kunmap_atomic(user_mem);
@@ -698,6 +718,7 @@ create_pampd:
create_in_new_page:
pampd = (void *)zbud_create_prep(th, false, cdata, clen, newpage);
BUG_ON(pampd == NULL);
+count_zero_page:
zcache_pers_pageframes =
atomic_inc_return(&zcache_pers_pageframes_atomic);
if (zcache_pers_pageframes > zcache_pers_pageframes_max)
--
1.7.7.6
^ permalink raw reply related [flat|nested] 8+ messages in thread