From: Nitin Gupta <ngupta@vflare.org>
To: Pekka Enberg <penberg@cs.helsinki.fi>,
Minchan Kim <minchan.kim@gmail.com>,
Andrew Morton <akpm@linux-foundation.org>,
Greg KH <greg@kroah.com>
Cc: Linux Driver Project <devel@linuxdriverproject.org>,
linux-mm <linux-mm@kvack.org>,
linux-kernel <linux-kernel@vger.kernel.org>
Subject: [PATCH 05/10] Reduce per table entry overhead by 4 bytes
Date: Mon, 9 Aug 2010 22:56:51 +0530 [thread overview]
Message-ID: <1281374816-904-6-git-send-email-ngupta@vflare.org> (raw)
In-Reply-To: <1281374816-904-1-git-send-email-ngupta@vflare.org>
Each zram device maintains an array (table) that maps
index within the device to the location of corresponding
compressed chunk. Currently we store 'struct page' pointer,
offset with page and various flags separately which takes
12 bytes per table entry. Now all these are encoded in a
single 'phys_add_t' value which results in savings of 4 bytes
per entry (except on PAE systems).
Unfortunately, cleanups related to some variable renames
were mixed in this patch. So, please bear some additional
noise.
Signed-off-by: Nitin Gupta <ngupta@vflare.org>
---
drivers/staging/zram/zram_drv.c | 256 ++++++++++++++++++++------------------
drivers/staging/zram/zram_drv.h | 24 +---
2 files changed, 140 insertions(+), 140 deletions(-)
diff --git a/drivers/staging/zram/zram_drv.c b/drivers/staging/zram/zram_drv.c
index c16e09a..efe9c93 100644
--- a/drivers/staging/zram/zram_drv.c
+++ b/drivers/staging/zram/zram_drv.c
@@ -42,6 +42,13 @@ struct zram *devices;
/* Module params (documentation at end) */
unsigned int num_devices;
+/*
+ * We do not allocate any memory for zero-filled pages.
+ * Rather, we simply mark them in corresponding table
+ * entry by setting this bit.
+ */
+#define ZRAM_ZERO_PAGE_MARK_BIT (1 << 0)
+
static void zram_add_stat(struct zram *zram,
enum zram_stats_index idx, s64 val)
{
@@ -65,37 +72,62 @@ static void zram_dec_stat(struct zram *zram, enum zram_stats_index idx)
zram_add_stat(zram, idx, -1);
}
-static int zram_test_flag(struct zram *zram, u32 index,
- enum zram_pageflags flag)
+static int page_zero_filled(void *ptr)
{
- return zram->table[index].flags & BIT(flag);
+ unsigned int pos;
+ unsigned long *page;
+
+ page = (unsigned long *)ptr;
+
+ for (pos = 0; pos != PAGE_SIZE / sizeof(*page); pos++) {
+ if (page[pos])
+ return 0;
+ }
+
+ return 1;
}
-static void zram_set_flag(struct zram *zram, u32 index,
- enum zram_pageflags flag)
+static int zram_is_zero_page(struct zram *zram, u32 index)
{
- zram->table[index].flags |= BIT(flag);
+ phys_addr_t addr = zram->table[index].addr;
+
+ return addr & ZRAM_ZERO_PAGE_MARK_BIT;
}
-static void zram_clear_flag(struct zram *zram, u32 index,
- enum zram_pageflags flag)
+static void zram_set_zero_page(struct zram *zram, u32 index)
{
- zram->table[index].flags &= ~BIT(flag);
+ zram->table[index].addr |= ZRAM_ZERO_PAGE_MARK_BIT;
}
-static int page_zero_filled(void *ptr)
+static void zram_clear_zero_page(struct zram *zram, u32 index)
{
- unsigned int pos;
- unsigned long *page;
+ zram->table[index].addr &= ~ZRAM_ZERO_PAGE_MARK_BIT;
+}
- page = (unsigned long *)ptr;
+static void zram_find_obj(struct zram *zram, u32 index, struct page **page,
+ u32 *offset)
+{
+ phys_addr_t addr = zram->table[index].addr;
- for (pos = 0; pos != PAGE_SIZE / sizeof(*page); pos++) {
- if (page[pos])
- return 0;
+ if (!addr) {
+ *page = NULL;
+ *offset = 0;
+ return;
}
- return 1;
+ *page = pfn_to_page(addr >> PAGE_SHIFT);
+ *offset = addr & ~PAGE_MASK;
+}
+
+static void zram_insert_obj(struct zram *zram, u32 index, struct page *page,
+ u32 offset)
+{
+ phys_addr_t addr;
+
+ addr = page_to_pfn(page) << PAGE_SHIFT;
+ addr |= (offset & ~PAGE_MASK);
+
+ zram->table[index].addr = addr;
}
static void zram_set_disksize(struct zram *zram, size_t totalram_bytes)
@@ -129,44 +161,44 @@ static void zram_set_disksize(struct zram *zram, size_t totalram_bytes)
static void zram_free_page(struct zram *zram, size_t index)
{
- int clen;
+ int zlen;
void *obj;
+ u32 offset;
+ struct page *page;
- struct page *page = zram->table[index].page;
- u32 offset = zram->table[index].offset;
-
- if (unlikely(!page)) {
- /*
- * No memory is allocated for zero filled pages.
- * Simply clear zero page flag.
- */
- if (zram_test_flag(zram, index, ZRAM_ZERO)) {
- zram_clear_flag(zram, index, ZRAM_ZERO);
- zram_dec_stat(zram, ZRAM_STAT_PAGES_ZERO);
- }
+ /*
+ * No memory is allocated for zero filled pages.
+ * Simply clear corresponding table entry.
+ */
+ if (zram_is_zero_page(zram, index)) {
+ zram_clear_zero_page(zram, index);
+ zram_dec_stat(zram, ZRAM_STAT_PAGES_ZERO);
return;
}
- if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED))) {
- clen = PAGE_SIZE;
+ zram_find_obj(zram, index, &page, &offset);
+ if (!page)
+ return;
+
+ /* Uncompressed pages cosume whole page, so offset is zero */
+ if (unlikely(!offset)) {
+ zlen = PAGE_SIZE;
__free_page(page);
- zram_clear_flag(zram, index, ZRAM_UNCOMPRESSED);
zram_dec_stat(zram, ZRAM_STAT_PAGES_EXPAND);
goto out;
}
obj = kmap_atomic(page, KM_USER0) + offset;
- clen = xv_get_object_size(obj) - sizeof(struct zobj_header);
+ zlen = xv_get_object_size(obj);
kunmap_atomic(obj, KM_USER0);
xv_free(zram->mem_pool, page, offset);
out:
- zram_add_stat(zram, ZRAM_STAT_COMPR_SIZE, -clen);
+ zram_add_stat(zram, ZRAM_STAT_COMPR_SIZE, -zlen);
zram_dec_stat(zram, ZRAM_STAT_PAGES_STORED);
- zram->table[index].page = NULL;
- zram->table[index].offset = 0;
+ zram->table[index].addr = 0;
}
static void handle_zero_page(struct page *page)
@@ -181,24 +213,27 @@ static void handle_zero_page(struct page *page)
}
static void handle_uncompressed_page(struct zram *zram,
- struct page *page, u32 index)
+ struct page *bio_page, u32 index)
{
- unsigned char *user_mem, *cmem;
+ u32 zoffset;
+ struct page *zpage;
+ unsigned char *bio_mem, *zmem;
- user_mem = kmap_atomic(page, KM_USER0);
- cmem = kmap_atomic(zram->table[index].page, KM_USER1) +
- zram->table[index].offset;
+ zram_find_obj(zram, index, &zpage, &zoffset);
+ BUG_ON(zoffset);
- memcpy(user_mem, cmem, PAGE_SIZE);
- kunmap_atomic(user_mem, KM_USER0);
- kunmap_atomic(cmem, KM_USER1);
+ bio_mem = kmap_atomic(bio_page, KM_USER0);
+ zmem = kmap_atomic(zpage, KM_USER1);
- flush_dcache_page(page);
+ memcpy(bio_mem, zmem, PAGE_SIZE);
+ kunmap_atomic(bio_mem, KM_USER0);
+ kunmap_atomic(zmem, KM_USER1);
+
+ flush_dcache_page(bio_page);
}
static int zram_read(struct zram *zram, struct bio *bio)
{
-
int i;
u32 index;
struct bio_vec *bvec;
@@ -214,54 +249,54 @@ static int zram_read(struct zram *zram, struct bio *bio)
bio_for_each_segment(bvec, bio, i) {
int ret;
- size_t clen;
- struct page *page;
- struct zobj_header *zheader;
- unsigned char *user_mem, *cmem;
+ size_t zlen;
+ u32 zoffset;
+ struct page *bio_page, *zpage;
+ unsigned char *bio_mem, *zmem;
- page = bvec->bv_page;
+ bio_page = bvec->bv_page;
- if (zram_test_flag(zram, index, ZRAM_ZERO)) {
- handle_zero_page(page);
+ if (zram_is_zero_page(zram, index)) {
+ handle_zero_page(bio_page);
continue;
}
+ zram_find_obj(zram, index, &zpage, &zoffset);
+
/* Requested page is not present in compressed area */
- if (unlikely(!zram->table[index].page)) {
- pr_debug("Read before write: sector=%lu, size=%u",
+ if (unlikely(!zpage)) {
+ pr_debug("Read before write on swap device: "
+ "sector=%lu, size=%u",
(ulong)(bio->bi_sector), bio->bi_size);
/* Do nothing */
continue;
}
/* Page is stored uncompressed since it's incompressible */
- if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED))) {
- handle_uncompressed_page(zram, page, index);
+ if (unlikely(!zoffset)) {
+ handle_uncompressed_page(zram, bio_page, index);
continue;
}
- user_mem = kmap_atomic(page, KM_USER0);
- clen = PAGE_SIZE;
+ bio_mem = kmap_atomic(bio_page, KM_USER0);
+ zlen = PAGE_SIZE;
- cmem = kmap_atomic(zram->table[index].page, KM_USER1) +
- zram->table[index].offset;
+ zmem = kmap_atomic(zpage, KM_USER1) + zoffset;
- ret = lzo1x_decompress_safe(
- cmem + sizeof(*zheader),
- xv_get_object_size(cmem) - sizeof(*zheader),
- user_mem, &clen);
+ ret = lzo1x_decompress_safe(zmem, xv_get_object_size(zmem),
+ bio_mem, &zlen);
- kunmap_atomic(user_mem, KM_USER0);
- kunmap_atomic(cmem, KM_USER1);
+ kunmap_atomic(bio_mem, KM_USER0);
+ kunmap_atomic(zmem, KM_USER1);
- /* Should NEVER happen. Return bio error if it does. */
+ /* This should NEVER happen - return bio error if it does! */
if (unlikely(ret != LZO_E_OK)) {
pr_err("Decompression failed! err=%d, page=%u\n",
ret, index);
goto out;
}
- flush_dcache_page(page);
+ flush_dcache_page(bio_page);
index++;
}
@@ -290,22 +325,19 @@ static int zram_write(struct zram *zram, struct bio *bio)
index = bio->bi_sector >> SECTORS_PER_PAGE_SHIFT;
bio_for_each_segment(bvec, bio, i) {
- u32 offset;
- size_t clen;
- struct zobj_header *zheader;
- struct page *page, *page_store;
+ size_t zlen;
+ u32 zoffset;
+ struct page *bio_page, *zpage;
unsigned char *zbuffer, *zworkmem;
- unsigned char *user_mem, *cmem, *src;
+ unsigned char *bio_mem, *zmem, *src;
- page = bvec->bv_page;
+ bio_page = bvec->bv_page;
/*
* System overwrites unused sectors. Free memory associated
- * with this sector now.
+ * with this sector now (if used).
*/
- if (zram->table[index].page ||
- zram_test_flag(zram, index, ZRAM_ZERO))
- zram_free_page(zram, index);
+ zram_free_page(zram, index);
preempt_disable();
zbuffer = __get_cpu_var(compress_buffer);
@@ -316,19 +348,19 @@ static int zram_write(struct zram *zram, struct bio *bio)
}
src = zbuffer;
- user_mem = kmap_atomic(page, KM_USER0);
- if (page_zero_filled(user_mem)) {
- kunmap_atomic(user_mem, KM_USER0);
+ bio_mem = kmap_atomic(bio_page, KM_USER0);
+ if (page_zero_filled(bio_mem)) {
+ kunmap_atomic(bio_mem, KM_USER0);
preempt_enable();
zram_inc_stat(zram, ZRAM_STAT_PAGES_ZERO);
- zram_set_flag(zram, index, ZRAM_ZERO);
+ zram_set_zero_page(zram, index);
continue;
}
- ret = lzo1x_1_compress(user_mem, PAGE_SIZE, src, &clen,
+ ret = lzo1x_1_compress(bio_mem, PAGE_SIZE, src, &zlen,
zworkmem);
- kunmap_atomic(user_mem, KM_USER0);
+ kunmap_atomic(bio_mem, KM_USER0);
if (unlikely(ret != LZO_E_OK)) {
preempt_enable();
@@ -337,50 +369,45 @@ static int zram_write(struct zram *zram, struct bio *bio)
}
/* Page is incompressible. Store it as-is (uncompressed) */
- if (unlikely(clen > max_zpage_size)) {
- clen = PAGE_SIZE;
- page_store = alloc_page(GFP_NOWAIT | __GFP_HIGHMEM);
- if (unlikely(!page_store)) {
+ if (unlikely(zlen > max_zpage_size)) {
+ zlen = PAGE_SIZE;
+ zpage = alloc_page(GFP_NOWAIT | __GFP_HIGHMEM);
+ if (unlikely(!zpage)) {
preempt_enable();
pr_info("Error allocating memory for "
"incompressible page: %u\n", index);
goto out;
}
- offset = 0;
- zram_set_flag(zram, index, ZRAM_UNCOMPRESSED);
+ zoffset = 0;
zram_inc_stat(zram, ZRAM_STAT_PAGES_EXPAND);
- zram->table[index].page = page_store;
- src = kmap_atomic(page, KM_USER0);
+ src = kmap_atomic(zpage, KM_USER0);
goto memstore;
}
- if (xv_malloc(zram->mem_pool, clen + sizeof(*zheader),
- &zram->table[index].page, &offset,
+ if (xv_malloc(zram->mem_pool, zlen, &zpage, &zoffset,
GFP_NOWAIT | __GFP_HIGHMEM)) {
preempt_enable();
pr_info("Error allocating memory for compressed "
- "page: %u, size=%zu\n", index, clen);
+ "page: %u, size=%zu\n", index, zlen);
goto out;
}
memstore:
- zram->table[index].offset = offset;
+ zmem = kmap_atomic(zpage, KM_USER1) + zoffset;
- cmem = kmap_atomic(zram->table[index].page, KM_USER1) +
- zram->table[index].offset;
-
- memcpy(cmem, src, clen);
- kunmap_atomic(cmem, KM_USER1);
+ memcpy(zmem, src, zlen);
+ kunmap_atomic(zmem, KM_USER1);
preempt_enable();
- if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED)))
+ if (unlikely(!zoffset))
kunmap_atomic(src, KM_USER0);
/* Update stats */
- zram_add_stat(zram, ZRAM_STAT_COMPR_SIZE, clen);
+ zram_add_stat(zram, ZRAM_STAT_COMPR_SIZE, zlen);
zram_inc_stat(zram, ZRAM_STAT_PAGES_STORED);
+ zram_insert_obj(zram, index, zpage, zoffset);
index++;
}
@@ -445,21 +472,8 @@ void zram_reset_device(struct zram *zram)
zram->init_done = 0;
/* Free all pages that are still in this zram device */
- for (index = 0; index < zram->disksize >> PAGE_SHIFT; index++) {
- struct page *page;
- u16 offset;
-
- page = zram->table[index].page;
- offset = zram->table[index].offset;
-
- if (!page)
- continue;
-
- if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED)))
- __free_page(page);
- else
- xv_free(zram->mem_pool, page, offset);
- }
+ for (index = 0; index < zram->disksize >> PAGE_SHIFT; index++)
+ zram_free_page(zram, index);
vfree(zram->table);
zram->table = NULL;
diff --git a/drivers/staging/zram/zram_drv.h b/drivers/staging/zram/zram_drv.h
index 21c97f6..65e512d 100644
--- a/drivers/staging/zram/zram_drv.h
+++ b/drivers/staging/zram/zram_drv.h
@@ -62,26 +62,12 @@ static const unsigned max_zpage_size = PAGE_SIZE / 4 * 3;
#define SECTORS_PER_PAGE_SHIFT (PAGE_SHIFT - SECTOR_SHIFT)
#define SECTORS_PER_PAGE (1 << SECTORS_PER_PAGE_SHIFT)
-/* Flags for zram pages (table[page_no].flags) */
-enum zram_pageflags {
- /* Page is stored uncompressed */
- ZRAM_UNCOMPRESSED,
-
- /* Page consists entirely of zeros */
- ZRAM_ZERO,
-
- __NR_ZRAM_PAGEFLAGS,
-};
-
-/*-- Data structures */
-
-/* Allocated for each disk page */
+/*
+ * Maintains swap slot to compressed object mapping.
+ */
struct table {
- struct page *page;
- u16 offset;
- u8 count; /* object ref count (not yet used) */
- u8 flags;
-} __attribute__((aligned(4)));
+ phys_addr_t addr; /* location of [compressed] object */
+};
enum zram_stats_index {
ZRAM_STAT_COMPR_SIZE, /* compressed size of pages stored */
--
1.7.2.1
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
next prev parent reply other threads:[~2010-08-09 17:27 UTC|newest]
Thread overview: 67+ messages / expand[flat|nested] mbox.gz Atom feed top
2010-08-09 17:26 [PATCH 00/10] zram: various improvements and cleanups Nitin Gupta
2010-08-09 17:26 ` [PATCH 01/10] Replace ioctls with sysfs interface Nitin Gupta
2010-08-09 18:34 ` Pekka Enberg
2010-08-10 3:06 ` Nitin Gupta
2010-08-31 23:06 ` Dave Hansen
2010-08-09 17:26 ` [PATCH 02/10] Remove need for explicit device initialization Nitin Gupta
2010-08-09 18:36 ` Pekka Enberg
2010-08-10 3:38 ` Nitin Gupta
2010-08-09 17:26 ` [PATCH 03/10] Use percpu stats Nitin Gupta
2010-08-09 18:44 ` Pekka Enberg
2010-08-10 4:34 ` Andrew Morton
2010-08-11 16:39 ` Nitin Gupta
2010-08-11 17:18 ` Andrew Morton
2010-08-30 16:20 ` Christoph Lameter
2010-08-31 20:31 ` Nitin Gupta
2010-08-31 21:28 ` Eric Dumazet
2010-08-31 21:35 ` Christoph Lameter
2010-08-31 21:41 ` Eric Dumazet
2010-09-01 20:05 ` Christoph Lameter
2010-09-01 20:38 ` Eric Dumazet
2010-09-02 0:34 ` Christoph Lameter
2010-08-31 5:36 ` Anton Blanchard
2010-09-01 3:41 ` Anton Blanchard
2010-09-01 3:51 ` Anton Blanchard
2010-09-17 20:59 ` Andrew Morton
2010-08-09 17:26 ` [PATCH 04/10] Use percpu buffers Nitin Gupta
2010-08-09 18:57 ` Pekka Enberg
2010-08-10 4:47 ` Nitin Gupta
2010-08-10 5:05 ` Pekka Enberg
2010-08-10 5:32 ` Nitin Gupta
2010-08-10 7:36 ` Pekka Enberg
2010-08-09 17:26 ` Nitin Gupta [this message]
2010-08-09 18:59 ` [PATCH 05/10] Reduce per table entry overhead by 4 bytes Pekka Enberg
2010-08-10 4:55 ` Nitin Gupta
2010-08-09 17:26 ` [PATCH 06/10] Block discard support Nitin Gupta
2010-08-09 19:03 ` Pekka Enberg
2010-08-10 2:23 ` Jens Axboe
2010-08-10 4:54 ` Nitin Gupta
2010-08-10 15:54 ` Jens Axboe
2010-08-09 17:26 ` [PATCH 07/10] Increase compressed page size threshold Nitin Gupta
2010-08-09 18:32 ` Pekka Enberg
2010-08-09 17:26 ` [PATCH 08/10] Some cleanups Nitin Gupta
2010-08-09 19:02 ` Pekka Enberg
2010-08-09 17:26 ` [PATCH 09/10] Update zram documentation Nitin Gupta
2010-08-09 17:26 ` [PATCH 10/10] Document sysfs entries Nitin Gupta
2010-08-09 19:02 ` Pekka Enberg
2010-08-31 22:37 ` [PATCH 00/10] zram: various improvements and cleanups Greg KH
2010-09-01 3:32 ` Anton Blanchard
2010-09-09 17:24 ` OOM panics with zram Dave Hansen
2010-09-09 19:07 ` [patch -rc] oom: always return a badness score of non-zero for eligible tasks David Rientjes
2010-09-09 19:48 ` Dave Hansen
2010-09-09 21:00 ` David Rientjes
2010-09-09 21:10 ` Dave Hansen
2010-09-09 21:40 ` David Rientjes
2010-10-03 18:41 ` OOM panics with zram Nitin Gupta
2010-10-03 19:27 ` Dave Hansen
2010-10-03 19:40 ` Nitin Gupta
2010-10-04 11:08 ` Ed Tomlinson
2010-10-05 23:43 ` Greg KH
2010-10-06 2:29 ` Nitin Gupta
2010-10-06 2:36 ` Greg KH
2010-10-06 4:30 ` Nitin Gupta
2010-10-06 7:38 ` Pekka Enberg
2010-10-06 14:03 ` Greg KH
2010-10-06 14:16 ` Pekka Enberg
2010-10-06 14:53 ` Nitin Gupta
2010-10-06 14:02 ` Greg KH
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1281374816-904-6-git-send-email-ngupta@vflare.org \
--to=ngupta@vflare.org \
--cc=akpm@linux-foundation.org \
--cc=devel@linuxdriverproject.org \
--cc=greg@kroah.com \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=minchan.kim@gmail.com \
--cc=penberg@cs.helsinki.fi \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).