linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
* frontswap/cleancache: allow backends to register after init
@ 2012-03-14 17:13 Andor Daam
  2012-03-14 17:13 ` [PATCH 1/2] frontswap: allow backends to register after frontswap initilization Andor Daam
  2012-03-14 17:13 ` [PATCH 2/2] cleancache: allow backends to register after cleancache initilaization Andor Daam
  0 siblings, 2 replies; 5+ messages in thread
From: Andor Daam @ 2012-03-14 17:13 UTC (permalink / raw)
  To: linux-mm
  Cc: dan.magenheimer, sjenning, ilendir, konrad.wilk, fschmaus,
	i4passt, ngupta

These two patches allow backends to register to frontswap and cleancache
after initialization and after swapon was run respectively filesystems
were mounted. This should be a first step to allow insmodding of backends
like zcache.

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Fight unfair telecom internet charges in Canada: sign http://stopthemeter.ca/
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply	[flat|nested] 5+ messages in thread

* [PATCH 1/2] frontswap: allow backends to register after frontswap initilization
  2012-03-14 17:13 frontswap/cleancache: allow backends to register after init Andor Daam
@ 2012-03-14 17:13 ` Andor Daam
  2012-03-15 18:14   ` Konrad Rzeszutek Wilk
  2012-03-14 17:13 ` [PATCH 2/2] cleancache: allow backends to register after cleancache initilaization Andor Daam
  1 sibling, 1 reply; 5+ messages in thread
From: Andor Daam @ 2012-03-14 17:13 UTC (permalink / raw)
  To: linux-mm
  Cc: dan.magenheimer, sjenning, ilendir, konrad.wilk, fschmaus,
	i4passt, ngupta, Andor Daam

This patch allows backends to register to frontswap even after
swapon was run. Before a backend registers all calls to init are
recorded and the creation of tmem_pools delayed until a backend
registers.

Sigend-off-by: Stefan Hengelein <ilendir@googlemail.com>
Signed-off-by: Florian Schmaus <fschmaus@gmail.com>
Sigend-off-by: Andor Daam <andor.daam@googlemail.com>
---
 mm/frontswap.c |   72 +++++++++++++++++++++++++++++++++++++++++++++++++-------
 1 files changed, 63 insertions(+), 9 deletions(-)

diff --git a/mm/frontswap.c b/mm/frontswap.c
index 2b80c5a..209487b 100644
--- a/mm/frontswap.c
+++ b/mm/frontswap.c
@@ -49,15 +49,33 @@ static u64 frontswap_failed_puts;
 static u64 frontswap_invalidates;
 
 /*
+ * When no backend is registered all calls to init are registered and
+ * remembered but fail to create tmem_pools. When a backend registers with
+ * frontswap the previous calls to init are executed to create tmem_pools
+ * and set the respective poolids.
+ * While no backend is registered all "puts", "gets" and "flushes" are
+ * ignored or fail.
+ */
+#define MAX_INITIALIZABLE_SD 32
+static int sds[MAX_INITIALIZABLE_SD];
+static int backend_registered;
+
+/*
  * Register operations for frontswap, returning previous thus allowing
  * detection of multiple backends and possible nesting
  */
 struct frontswap_ops frontswap_register_ops(struct frontswap_ops *ops)
 {
 	struct frontswap_ops old = frontswap_ops;
+	int i;
 
 	frontswap_ops = *ops;
-	frontswap_enabled = 1;
+
+	backend_registered = 1;
+	for (i = 0; i < MAX_INITIALIZABLE_SD; i++) {
+		if (sds[i] != -1)
+			(*frontswap_ops.init)(sds[i]);
+	}
 	return old;
 }
 EXPORT_SYMBOL(frontswap_register_ops);
@@ -66,12 +84,21 @@ EXPORT_SYMBOL(frontswap_register_ops);
 void __frontswap_init(unsigned type)
 {
 	struct swap_info_struct *sis = swap_info[type];
+	int i;
 
 	BUG_ON(sis == NULL);
 	if (sis->frontswap_map == NULL)
 		return;
-	if (frontswap_enabled)
-		(*frontswap_ops.init)(type);
+	if (frontswap_enabled) {
+		if (backend_registered)
+			(*frontswap_ops.init)(type);
+		for (i = 0; i < MAX_INITIALIZABLE_SD; i++) {
+			if (sds[i] == -1) {
+				sds[i] = type;
+				break;
+			}
+		}
+	}
 }
 EXPORT_SYMBOL(__frontswap_init);
 
@@ -90,6 +117,11 @@ int __frontswap_put_page(struct page *page)
 	struct swap_info_struct *sis = swap_info[type];
 	pgoff_t offset = swp_offset(entry);
 
+	if (!backend_registered) {
+		frontswap_failed_puts++;
+		return ret;
+	}
+
 	BUG_ON(!PageLocked(page));
 	BUG_ON(sis == NULL);
 	if (frontswap_test(sis, offset))
@@ -127,12 +159,16 @@ int __frontswap_get_page(struct page *page)
 	struct swap_info_struct *sis = swap_info[type];
 	pgoff_t offset = swp_offset(entry);
 
+	if (!backend_registered)
+		return ret;
+
 	BUG_ON(!PageLocked(page));
 	BUG_ON(sis == NULL);
 	if (frontswap_test(sis, offset))
 		ret = (*frontswap_ops.get_page)(type, offset, page);
 	if (ret == 0)
 		frontswap_gets++;
+
 	return ret;
 }
 EXPORT_SYMBOL(__frontswap_get_page);
@@ -145,6 +181,9 @@ void __frontswap_invalidate_page(unsigned type, pgoff_t offset)
 {
 	struct swap_info_struct *sis = swap_info[type];
 
+	if (!backend_registered)
+		return;
+
 	BUG_ON(sis == NULL);
 	if (frontswap_test(sis, offset)) {
 		(*frontswap_ops.invalidate_page)(type, offset);
@@ -162,13 +201,23 @@ EXPORT_SYMBOL(__frontswap_invalidate_page);
 void __frontswap_invalidate_area(unsigned type)
 {
 	struct swap_info_struct *sis = swap_info[type];
+	int i;
 
-	BUG_ON(sis == NULL);
-	if (sis->frontswap_map == NULL)
-		return;
-	(*frontswap_ops.invalidate_area)(type);
-	atomic_set(&sis->frontswap_pages, 0);
-	memset(sis->frontswap_map, 0, sis->max / sizeof(long));
+	if (backend_registered) {
+		BUG_ON(sis == NULL);
+		if (sis->frontswap_map == NULL)
+			return;
+		(*frontswap_ops.invalidate_area)(type);
+		atomic_set(&sis->frontswap_pages, 0);
+		memset(sis->frontswap_map, 0, sis->max / sizeof(long));
+	} else {
+		for (i = 0; i < MAX_INITIALIZABLE_SD; i++) {
+			if (sds[i] == type) {
+				sds[i] = -1;
+				break;
+			}
+		}
+	}
 }
 EXPORT_SYMBOL(__frontswap_invalidate_area);
 
@@ -255,6 +304,7 @@ EXPORT_SYMBOL(frontswap_curr_pages);
 static int __init init_frontswap(void)
 {
 	int err = 0;
+	int i;
 
 #ifdef CONFIG_DEBUG_FS
 	struct dentry *root = debugfs_create_dir("frontswap", NULL);
@@ -267,6 +317,10 @@ static int __init init_frontswap(void)
 	debugfs_create_u64("invalidates", S_IRUGO,
 				root, &frontswap_invalidates);
 #endif
+	for (i = 0; i < MAX_INITIALIZABLE_SD; i++)
+		sds[i] = -1;
+
+	frontswap_enabled = 1;
 	return err;
 }
 
-- 
1.7.5.4

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Fight unfair telecom internet charges in Canada: sign http://stopthemeter.ca/
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply related	[flat|nested] 5+ messages in thread

* [PATCH 2/2] cleancache: allow backends to register after cleancache initilaization
  2012-03-14 17:13 frontswap/cleancache: allow backends to register after init Andor Daam
  2012-03-14 17:13 ` [PATCH 1/2] frontswap: allow backends to register after frontswap initilization Andor Daam
@ 2012-03-14 17:13 ` Andor Daam
  2012-03-15 18:20   ` Konrad Rzeszutek Wilk
  1 sibling, 1 reply; 5+ messages in thread
From: Andor Daam @ 2012-03-14 17:13 UTC (permalink / raw)
  To: linux-mm
  Cc: dan.magenheimer, sjenning, ilendir, konrad.wilk, fschmaus,
	i4passt, ngupta, Andor Daam

This patch allows backends to register to cleancache even after
filesystems were mounted. Calls to inif_fs and init_shared_fs are
remembered, fake poolids but no real tmem_pools created. On backend
registration the fake poolids are mapped to real poolids and respective
tmem_pools.

Signed-off-by: Stefan Hengelein <ilendir@googlemail.com>
Signed-off-by: Florian Schmaus <fschmaus@gmail.com>
Signed-off-by: Andor Daam <andor.daam@googlemail.com>
---
 mm/cleancache.c |  157 +++++++++++++++++++++++++++++++++++++++++++++++++------
 1 files changed, 140 insertions(+), 17 deletions(-)

diff --git a/mm/cleancache.c b/mm/cleancache.c
index 5646c74..9f504b6 100644
--- a/mm/cleancache.c
+++ b/mm/cleancache.c
@@ -45,15 +45,42 @@ static u64 cleancache_puts;
 static u64 cleancache_invalidates;
 
 /*
+ * When no backend is registered all calls to init_fs and init_shard_fs
+ * are registered and fake poolids are given to the respective
+ * super block but no tmem_pools are created. When a backend
+ * registers with cleancache the previous calls to init_fs and
+ * init_shared_fs are executed to create tmem_pools and set the
+ * respective poolids. While no backend is registered all "puts",
+ * "gets" and "flushes" are ignored or fail.
+ */
+#define MAX_INITIALIZABLE_FS 32
+#define FAKE_FS_POOLID_OFFSET 1000
+#define FAKE_SHARED_FS_POOLID_OFFSET 2000
+static int fs_poolid_map[MAX_INITIALIZABLE_FS];
+static int shared_fs_poolid_map[MAX_INITIALIZABLE_FS];
+static char *uuids[MAX_INITIALIZABLE_FS];
+static int backend_registered;
+
+/*
  * register operations for cleancache, returning previous thus allowing
  * detection of multiple backends and possible nesting
  */
 struct cleancache_ops cleancache_register_ops(struct cleancache_ops *ops)
 {
 	struct cleancache_ops old = cleancache_ops;
+	int i;
 
 	cleancache_ops = *ops;
-	cleancache_enabled = 1;
+
+	backend_registered = 1;
+	for (i = 0; i < MAX_INITIALIZABLE_FS; i++) {
+		if (fs_poolid_map[i] == -1)
+			fs_poolid_map[i] = (*cleancache_ops.init_fs)(PAGE_SIZE);
+		if (shared_fs_poolid_map[i] == -1)
+			shared_fs_poolid_map[i] =
+				(*cleancache_ops.init_shared_fs)
+					(uuids[i], PAGE_SIZE);
+	}
 	return old;
 }
 EXPORT_SYMBOL(cleancache_register_ops);
@@ -61,15 +88,42 @@ EXPORT_SYMBOL(cleancache_register_ops);
 /* Called by a cleancache-enabled filesystem at time of mount */
 void __cleancache_init_fs(struct super_block *sb)
 {
-	sb->cleancache_poolid = (*cleancache_ops.init_fs)(PAGE_SIZE);
+	int i;
+
+	for (i = 0; i < MAX_INITIALIZABLE_FS; i++) {
+		if (fs_poolid_map[i] == -2) {
+			sb->cleancache_poolid =
+				i + FAKE_FS_POOLID_OFFSET;
+			if (backend_registered)
+				fs_poolid_map[i] =
+					(*cleancache_ops.init_fs)(PAGE_SIZE);
+			else
+				fs_poolid_map[i] = -1;
+			break;
+		}
+	}
 }
 EXPORT_SYMBOL(__cleancache_init_fs);
 
 /* Called by a cleancache-enabled clustered filesystem at time of mount */
 void __cleancache_init_shared_fs(char *uuid, struct super_block *sb)
 {
-	sb->cleancache_poolid =
-		(*cleancache_ops.init_shared_fs)(uuid, PAGE_SIZE);
+	int i;
+
+	for (i = 0; i < MAX_INITIALIZABLE_FS; i++) {
+		if (shared_fs_poolid_map[i] == -2) {
+			sb->cleancache_poolid =
+				i + FAKE_SHARED_FS_POOLID_OFFSET;
+			uuids[i] = uuid;
+			if (backend_registered)
+				shared_fs_poolid_map[i] =
+					(*cleancache_ops.init_shared_fs)
+						(uuid, PAGE_SIZE);
+			else
+				shared_fs_poolid_map[i] = -1;
+			break;
+		}
+	}
 }
 EXPORT_SYMBOL(__cleancache_init_shared_fs);
 
@@ -101,6 +155,19 @@ static int cleancache_get_key(struct inode *inode,
 }
 
 /*
+ * Returns a pool_id that is associated with a given fake poolid.
+ */
+static int get_poolid_from_fake(int fake_pool_id)
+{
+	if (fake_pool_id >= FAKE_SHARED_FS_POOLID_OFFSET)
+		return shared_fs_poolid_map[fake_pool_id -
+			FAKE_SHARED_FS_POOLID_OFFSET];
+	else if (fake_pool_id >= FAKE_FS_POOLID_OFFSET)
+		return fs_poolid_map[fake_pool_id - FAKE_FS_POOLID_OFFSET];
+	return -1;
+}
+
+/*
  * "Get" data from cleancache associated with the poolid/inode/index
  * that were specified when the data was put to cleanache and, if
  * successful, use it to fill the specified page with data and return 0.
@@ -111,17 +178,26 @@ int __cleancache_get_page(struct page *page)
 {
 	int ret = -1;
 	int pool_id;
+	int fake_pool_id;
 	struct cleancache_filekey key = { .u.key = { 0 } };
 
+	if (!backend_registered) {
+		cleancache_failed_gets++;
+		goto out;
+	}
+
 	VM_BUG_ON(!PageLocked(page));
-	pool_id = page->mapping->host->i_sb->cleancache_poolid;
-	if (pool_id < 0)
+	fake_pool_id = page->mapping->host->i_sb->cleancache_poolid;
+	if (fake_pool_id < 0)
 		goto out;
+	pool_id = get_poolid_from_fake(fake_pool_id);
 
 	if (cleancache_get_key(page->mapping->host, &key) < 0)
 		goto out;
 
-	ret = (*cleancache_ops.get_page)(pool_id, key, page->index, page);
+	if (pool_id >= 0)
+		ret = (*cleancache_ops.get_page)(pool_id,
+				key, page->index, page);
 	if (ret == 0)
 		cleancache_succ_gets++;
 	else
@@ -140,12 +216,23 @@ EXPORT_SYMBOL(__cleancache_get_page);
 void __cleancache_put_page(struct page *page)
 {
 	int pool_id;
+	int fake_pool_id;
 	struct cleancache_filekey key = { .u.key = { 0 } };
 
+	if (!backend_registered) {
+		cleancache_puts++;
+		return;
+	}
+
 	VM_BUG_ON(!PageLocked(page));
-	pool_id = page->mapping->host->i_sb->cleancache_poolid;
+	fake_pool_id = page->mapping->host->i_sb->cleancache_poolid;
+	if (fake_pool_id < 0)
+		return;
+
+	pool_id = get_poolid_from_fake(fake_pool_id);
+
 	if (pool_id >= 0 &&
-	      cleancache_get_key(page->mapping->host, &key) >= 0) {
+		cleancache_get_key(page->mapping->host, &key) >= 0) {
 		(*cleancache_ops.put_page)(pool_id, key, page->index, page);
 		cleancache_puts++;
 	}
@@ -160,14 +247,22 @@ void __cleancache_invalidate_page(struct address_space *mapping,
 					struct page *page)
 {
 	/* careful... page->mapping is NULL sometimes when this is called */
-	int pool_id = mapping->host->i_sb->cleancache_poolid;
+	int pool_id;
+	int fake_pool_id = mapping->host->i_sb->cleancache_poolid;
 	struct cleancache_filekey key = { .u.key = { 0 } };
 
-	if (pool_id >= 0) {
+	if (!backend_registered)
+		return;
+
+	if (fake_pool_id >= 0) {
+		pool_id = get_poolid_from_fake(fake_pool_id);
+		if (pool_id < 0)
+			return;
+
 		VM_BUG_ON(!PageLocked(page));
 		if (cleancache_get_key(mapping->host, &key) >= 0) {
 			(*cleancache_ops.invalidate_page)(pool_id,
-							  key, page->index);
+					key, page->index);
 			cleancache_invalidates++;
 		}
 	}
@@ -181,9 +276,18 @@ EXPORT_SYMBOL(__cleancache_invalidate_page);
  */
 void __cleancache_invalidate_inode(struct address_space *mapping)
 {
-	int pool_id = mapping->host->i_sb->cleancache_poolid;
+	int pool_id;
+	int fake_pool_id = mapping->host->i_sb->cleancache_poolid;
 	struct cleancache_filekey key = { .u.key = { 0 } };
 
+	if (!backend_registered)
+		return;
+
+	if (fake_pool_id < 0)
+		return;
+
+	pool_id = get_poolid_from_fake(fake_pool_id);
+
 	if (pool_id >= 0 && cleancache_get_key(mapping->host, &key) >= 0)
 		(*cleancache_ops.invalidate_inode)(pool_id, key);
 }
@@ -196,16 +300,30 @@ EXPORT_SYMBOL(__cleancache_invalidate_inode);
  */
 void __cleancache_invalidate_fs(struct super_block *sb)
 {
-	if (sb->cleancache_poolid >= 0) {
-		int old_poolid = sb->cleancache_poolid;
-		sb->cleancache_poolid = -1;
-		(*cleancache_ops.invalidate_fs)(old_poolid);
+	int old_poolid;
+	int index;
+	int fake_pool_id = sb->cleancache_poolid;
+
+	if (fake_pool_id >= FAKE_SHARED_FS_POOLID_OFFSET) {
+		index = fake_pool_id - FAKE_SHARED_FS_POOLID_OFFSET;
+		old_poolid = shared_fs_poolid_map[index];
+		shared_fs_poolid_map[index] = -2;
+		uuids[index] = NULL;
+	} else if (fake_pool_id >= FAKE_FS_POOLID_OFFSET) {
+		index = fake_pool_id - FAKE_FS_POOLID_OFFSET;
+		old_poolid = fs_poolid_map[index];
+		fs_poolid_map[index] = -2;
 	}
+	sb->cleancache_poolid = -1;
+	if (backend_registered)
+		(*cleancache_ops.invalidate_fs)(old_poolid);
 }
 EXPORT_SYMBOL(__cleancache_invalidate_fs);
 
 static int __init init_cleancache(void)
 {
+	int i;
+
 #ifdef CONFIG_DEBUG_FS
 	struct dentry *root = debugfs_create_dir("cleancache", NULL);
 	if (root == NULL)
@@ -217,6 +335,11 @@ static int __init init_cleancache(void)
 	debugfs_create_u64("invalidates", S_IRUGO,
 				root, &cleancache_invalidates);
 #endif
+	for (i = 0; i < MAX_INITIALIZABLE_FS; i++) {
+		fs_poolid_map[i] = -2;
+		shared_fs_poolid_map[i] = -2;
+	}
+	cleancache_enabled = 1;
 	return 0;
 }
 module_init(init_cleancache)
-- 
1.7.5.4

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Fight unfair telecom internet charges in Canada: sign http://stopthemeter.ca/
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply related	[flat|nested] 5+ messages in thread

* Re: [PATCH 1/2] frontswap: allow backends to register after frontswap initilization
  2012-03-14 17:13 ` [PATCH 1/2] frontswap: allow backends to register after frontswap initilization Andor Daam
@ 2012-03-15 18:14   ` Konrad Rzeszutek Wilk
  0 siblings, 0 replies; 5+ messages in thread
From: Konrad Rzeszutek Wilk @ 2012-03-15 18:14 UTC (permalink / raw)
  To: Andor Daam
  Cc: linux-mm, dan.magenheimer, sjenning, ilendir, fschmaus, i4passt,
	ngupta

On Wed, Mar 14, 2012 at 06:13:27PM +0100, Andor Daam wrote:
> This patch allows backends to register to frontswap even after
> swapon was run. Before a backend registers all calls to init are
> recorded and the creation of tmem_pools delayed until a backend
> registers.

Couple of questions..
> 
> Sigend-off-by: Stefan Hengelein <ilendir@googlemail.com>
> Signed-off-by: Florian Schmaus <fschmaus@gmail.com>
> Sigend-off-by: Andor Daam <andor.daam@googlemail.com>
> ---
>  mm/frontswap.c |   72 +++++++++++++++++++++++++++++++++++++++++++++++++-------
>  1 files changed, 63 insertions(+), 9 deletions(-)
> 
> diff --git a/mm/frontswap.c b/mm/frontswap.c
> index 2b80c5a..209487b 100644
> --- a/mm/frontswap.c
> +++ b/mm/frontswap.c
> @@ -49,15 +49,33 @@ static u64 frontswap_failed_puts;
>  static u64 frontswap_invalidates;
>  
>  /*
> + * When no backend is registered all calls to init are registered and
> + * remembered but fail to create tmem_pools. When a backend registers with
> + * frontswap the previous calls to init are executed to create tmem_pools
> + * and set the respective poolids.
> + * While no backend is registered all "puts", "gets" and "flushes" are
> + * ignored or fail.
> + */
> +#define MAX_INITIALIZABLE_SD 32

Why 32? Would 42 work better :-) What does 'SD' stand for?


> +static int sds[MAX_INITIALIZABLE_SD];
> +static int backend_registered;

bool.
> +
> +/*
>   * Register operations for frontswap, returning previous thus allowing
>   * detection of multiple backends and possible nesting
>   */
>  struct frontswap_ops frontswap_register_ops(struct frontswap_ops *ops)
>  {
>  	struct frontswap_ops old = frontswap_ops;
> +	int i;

unsigned int.

>  
>  	frontswap_ops = *ops;
> -	frontswap_enabled = 1;
> +
> +	backend_registered = 1;
> +	for (i = 0; i < MAX_INITIALIZABLE_SD; i++) {
> +		if (sds[i] != -1)

Hm, why not have sds be an array of bool's instead?

> +			(*frontswap_ops.init)(sds[i]);
> +	}
>  	return old;
>  }
>  EXPORT_SYMBOL(frontswap_register_ops);
> @@ -66,12 +84,21 @@ EXPORT_SYMBOL(frontswap_register_ops);
>  void __frontswap_init(unsigned type)
>  {
>  	struct swap_info_struct *sis = swap_info[type];
> +	int i;
>  
>  	BUG_ON(sis == NULL);
>  	if (sis->frontswap_map == NULL)
>  		return;
> -	if (frontswap_enabled)
> -		(*frontswap_ops.init)(type);
> +	if (frontswap_enabled) {
> +		if (backend_registered)
> +			(*frontswap_ops.init)(type);
> +		for (i = 0; i < MAX_INITIALIZABLE_SD; i++) {
> +			if (sds[i] == -1) {
> +				sds[i] = type;

Ok, so you save 'unsigned' in a 'int' array. If the unsigned ends up being
32769, that translates to -1 in int. So we would wind up thinking it is unregisted.

> +				break;
> +			}
> +		}
> +	}
>  }
>  EXPORT_SYMBOL(__frontswap_init);
>  
> @@ -90,6 +117,11 @@ int __frontswap_put_page(struct page *page)
>  	struct swap_info_struct *sis = swap_info[type];
>  	pgoff_t offset = swp_offset(entry);
>  
> +	if (!backend_registered) {
> +		frontswap_failed_puts++;
> +		return ret;
> +	}
> +
>  	BUG_ON(!PageLocked(page));
>  	BUG_ON(sis == NULL);
>  	if (frontswap_test(sis, offset))
> @@ -127,12 +159,16 @@ int __frontswap_get_page(struct page *page)
>  	struct swap_info_struct *sis = swap_info[type];
>  	pgoff_t offset = swp_offset(entry);
>  
> +	if (!backend_registered)
> +		return ret;
> +
>  	BUG_ON(!PageLocked(page));
>  	BUG_ON(sis == NULL);
>  	if (frontswap_test(sis, offset))
>  		ret = (*frontswap_ops.get_page)(type, offset, page);
>  	if (ret == 0)
>  		frontswap_gets++;
> +
>  	return ret;
>  }
>  EXPORT_SYMBOL(__frontswap_get_page);
> @@ -145,6 +181,9 @@ void __frontswap_invalidate_page(unsigned type, pgoff_t offset)
>  {
>  	struct swap_info_struct *sis = swap_info[type];
>  
> +	if (!backend_registered)
> +		return;
> +
>  	BUG_ON(sis == NULL);
>  	if (frontswap_test(sis, offset)) {
>  		(*frontswap_ops.invalidate_page)(type, offset);
> @@ -162,13 +201,23 @@ EXPORT_SYMBOL(__frontswap_invalidate_page);
>  void __frontswap_invalidate_area(unsigned type)
>  {
>  	struct swap_info_struct *sis = swap_info[type];
> +	int i;

unsigned int

>  
> -	BUG_ON(sis == NULL);
> -	if (sis->frontswap_map == NULL)
> -		return;
> -	(*frontswap_ops.invalidate_area)(type);
> -	atomic_set(&sis->frontswap_pages, 0);
> -	memset(sis->frontswap_map, 0, sis->max / sizeof(long));
> +	if (backend_registered) {
> +		BUG_ON(sis == NULL);
> +		if (sis->frontswap_map == NULL)
> +			return;
> +		(*frontswap_ops.invalidate_area)(type);
> +		atomic_set(&sis->frontswap_pages, 0);
> +		memset(sis->frontswap_map, 0, sis->max / sizeof(long));
> +	} else {
> +		for (i = 0; i < MAX_INITIALIZABLE_SD; i++) {
> +			if (sds[i] == type) {
> +				sds[i] = -1;
> +				break;
> +			}
> +		}
> +	}
>  }
>  EXPORT_SYMBOL(__frontswap_invalidate_area);
>  
> @@ -255,6 +304,7 @@ EXPORT_SYMBOL(frontswap_curr_pages);
>  static int __init init_frontswap(void)
>  {
>  	int err = 0;
> +	int i;
>  
>  #ifdef CONFIG_DEBUG_FS
>  	struct dentry *root = debugfs_create_dir("frontswap", NULL);
> @@ -267,6 +317,10 @@ static int __init init_frontswap(void)
>  	debugfs_create_u64("invalidates", S_IRUGO,
>  				root, &frontswap_invalidates);
>  #endif
> +	for (i = 0; i < MAX_INITIALIZABLE_SD; i++)
> +		sds[i] = -1;
> +
> +	frontswap_enabled = 1;
>  	return err;
>  }
>  
> -- 
> 1.7.5.4

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Fight unfair telecom internet charges in Canada: sign http://stopthemeter.ca/
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH 2/2] cleancache: allow backends to register after cleancache initilaization
  2012-03-14 17:13 ` [PATCH 2/2] cleancache: allow backends to register after cleancache initilaization Andor Daam
@ 2012-03-15 18:20   ` Konrad Rzeszutek Wilk
  0 siblings, 0 replies; 5+ messages in thread
From: Konrad Rzeszutek Wilk @ 2012-03-15 18:20 UTC (permalink / raw)
  To: Andor Daam
  Cc: linux-mm, dan.magenheimer, sjenning, ilendir, fschmaus, i4passt,
	ngupta

On Wed, Mar 14, 2012 at 06:13:28PM +0100, Andor Daam wrote:
> This patch allows backends to register to cleancache even after
> filesystems were mounted. Calls to inif_fs and init_shared_fs are
> remembered, fake poolids but no real tmem_pools created. On backend
> registration the fake poolids are mapped to real poolids and respective
> tmem_pools.
> 
> Signed-off-by: Stefan Hengelein <ilendir@googlemail.com>
> Signed-off-by: Florian Schmaus <fschmaus@gmail.com>
> Signed-off-by: Andor Daam <andor.daam@googlemail.com>
> ---
>  mm/cleancache.c |  157 +++++++++++++++++++++++++++++++++++++++++++++++++------
>  1 files changed, 140 insertions(+), 17 deletions(-)
> 
> diff --git a/mm/cleancache.c b/mm/cleancache.c
> index 5646c74..9f504b6 100644
> --- a/mm/cleancache.c
> +++ b/mm/cleancache.c
> @@ -45,15 +45,42 @@ static u64 cleancache_puts;
>  static u64 cleancache_invalidates;
>  
>  /*
> + * When no backend is registered all calls to init_fs and init_shard_fs
> + * are registered and fake poolids are given to the respective
> + * super block but no tmem_pools are created. When a backend
> + * registers with cleancache the previous calls to init_fs and
> + * init_shared_fs are executed to create tmem_pools and set the
> + * respective poolids. While no backend is registered all "puts",
> + * "gets" and "flushes" are ignored or fail.
> + */
> +#define MAX_INITIALIZABLE_FS 32
> +#define FAKE_FS_POOLID_OFFSET 1000

Hmm, why 1000? 
> +#define FAKE_SHARED_FS_POOLID_OFFSET 2000
> +static int fs_poolid_map[MAX_INITIALIZABLE_FS];
> +static int shared_fs_poolid_map[MAX_INITIALIZABLE_FS];
> +static char *uuids[MAX_INITIALIZABLE_FS];

Could all of this be remade into a struct and a list?
And potentially a 'hot' item which is the one last accessed?

> +static int backend_registered;
> +
> +/*
>   * register operations for cleancache, returning previous thus allowing
>   * detection of multiple backends and possible nesting
>   */
>  struct cleancache_ops cleancache_register_ops(struct cleancache_ops *ops)
>  {
>  	struct cleancache_ops old = cleancache_ops;
> +	int i;
>  
>  	cleancache_ops = *ops;
> -	cleancache_enabled = 1;
> +
> +	backend_registered = 1;
> +	for (i = 0; i < MAX_INITIALIZABLE_FS; i++) {
> +		if (fs_poolid_map[i] == -1)
> +			fs_poolid_map[i] = (*cleancache_ops.init_fs)(PAGE_SIZE);
> +		if (shared_fs_poolid_map[i] == -1)
> +			shared_fs_poolid_map[i] =
> +				(*cleancache_ops.init_shared_fs)
> +					(uuids[i], PAGE_SIZE);
> +	}
>  	return old;
>  }
>  EXPORT_SYMBOL(cleancache_register_ops);
> @@ -61,15 +88,42 @@ EXPORT_SYMBOL(cleancache_register_ops);
>  /* Called by a cleancache-enabled filesystem at time of mount */
>  void __cleancache_init_fs(struct super_block *sb)
>  {
> -	sb->cleancache_poolid = (*cleancache_ops.init_fs)(PAGE_SIZE);
> +	int i;
> +
> +	for (i = 0; i < MAX_INITIALIZABLE_FS; i++) {
> +		if (fs_poolid_map[i] == -2) {
> +			sb->cleancache_poolid =
> +				i + FAKE_FS_POOLID_OFFSET;
> +			if (backend_registered)
> +				fs_poolid_map[i] =
> +					(*cleancache_ops.init_fs)(PAGE_SIZE);
> +			else
> +				fs_poolid_map[i] = -1;
> +			break;
> +		}
> +	}
>  }
>  EXPORT_SYMBOL(__cleancache_init_fs);
>  
>  /* Called by a cleancache-enabled clustered filesystem at time of mount */
>  void __cleancache_init_shared_fs(char *uuid, struct super_block *sb)
>  {
> -	sb->cleancache_poolid =
> -		(*cleancache_ops.init_shared_fs)(uuid, PAGE_SIZE);
> +	int i;
> +
> +	for (i = 0; i < MAX_INITIALIZABLE_FS; i++) {
> +		if (shared_fs_poolid_map[i] == -2) {
> +			sb->cleancache_poolid =
> +				i + FAKE_SHARED_FS_POOLID_OFFSET;
> +			uuids[i] = uuid;
> +			if (backend_registered)
> +				shared_fs_poolid_map[i] =
> +					(*cleancache_ops.init_shared_fs)
> +						(uuid, PAGE_SIZE);
> +			else
> +				shared_fs_poolid_map[i] = -1;
> +			break;
> +		}
> +	}
>  }
>  EXPORT_SYMBOL(__cleancache_init_shared_fs);
>  
> @@ -101,6 +155,19 @@ static int cleancache_get_key(struct inode *inode,
>  }
>  
>  /*
> + * Returns a pool_id that is associated with a given fake poolid.
> + */
> +static int get_poolid_from_fake(int fake_pool_id)
> +{
> +	if (fake_pool_id >= FAKE_SHARED_FS_POOLID_OFFSET)
> +		return shared_fs_poolid_map[fake_pool_id -
> +			FAKE_SHARED_FS_POOLID_OFFSET];
> +	else if (fake_pool_id >= FAKE_FS_POOLID_OFFSET)
> +		return fs_poolid_map[fake_pool_id - FAKE_FS_POOLID_OFFSET];
> +	return -1;
> +}
> +
> +/*
>   * "Get" data from cleancache associated with the poolid/inode/index
>   * that were specified when the data was put to cleanache and, if
>   * successful, use it to fill the specified page with data and return 0.
> @@ -111,17 +178,26 @@ int __cleancache_get_page(struct page *page)
>  {
>  	int ret = -1;
>  	int pool_id;
> +	int fake_pool_id;
>  	struct cleancache_filekey key = { .u.key = { 0 } };
>  
> +	if (!backend_registered) {
> +		cleancache_failed_gets++;
> +		goto out;
> +	}
> +
>  	VM_BUG_ON(!PageLocked(page));
> -	pool_id = page->mapping->host->i_sb->cleancache_poolid;
> -	if (pool_id < 0)
> +	fake_pool_id = page->mapping->host->i_sb->cleancache_poolid;
> +	if (fake_pool_id < 0)
>  		goto out;
> +	pool_id = get_poolid_from_fake(fake_pool_id);
>  
>  	if (cleancache_get_key(page->mapping->host, &key) < 0)
>  		goto out;
>  
> -	ret = (*cleancache_ops.get_page)(pool_id, key, page->index, page);
> +	if (pool_id >= 0)
> +		ret = (*cleancache_ops.get_page)(pool_id,
> +				key, page->index, page);
>  	if (ret == 0)
>  		cleancache_succ_gets++;
>  	else
> @@ -140,12 +216,23 @@ EXPORT_SYMBOL(__cleancache_get_page);
>  void __cleancache_put_page(struct page *page)
>  {
>  	int pool_id;
> +	int fake_pool_id;
>  	struct cleancache_filekey key = { .u.key = { 0 } };
>  
> +	if (!backend_registered) {
> +		cleancache_puts++;
> +		return;
> +	}
> +
>  	VM_BUG_ON(!PageLocked(page));
> -	pool_id = page->mapping->host->i_sb->cleancache_poolid;
> +	fake_pool_id = page->mapping->host->i_sb->cleancache_poolid;
> +	if (fake_pool_id < 0)
> +		return;
> +
> +	pool_id = get_poolid_from_fake(fake_pool_id);
> +
>  	if (pool_id >= 0 &&
> -	      cleancache_get_key(page->mapping->host, &key) >= 0) {
> +		cleancache_get_key(page->mapping->host, &key) >= 0) {
>  		(*cleancache_ops.put_page)(pool_id, key, page->index, page);
>  		cleancache_puts++;
>  	}
> @@ -160,14 +247,22 @@ void __cleancache_invalidate_page(struct address_space *mapping,
>  					struct page *page)
>  {
>  	/* careful... page->mapping is NULL sometimes when this is called */
> -	int pool_id = mapping->host->i_sb->cleancache_poolid;
> +	int pool_id;
> +	int fake_pool_id = mapping->host->i_sb->cleancache_poolid;
>  	struct cleancache_filekey key = { .u.key = { 0 } };
>  
> -	if (pool_id >= 0) {
> +	if (!backend_registered)
> +		return;
> +
> +	if (fake_pool_id >= 0) {
> +		pool_id = get_poolid_from_fake(fake_pool_id);
> +		if (pool_id < 0)
> +			return;
> +
>  		VM_BUG_ON(!PageLocked(page));
>  		if (cleancache_get_key(mapping->host, &key) >= 0) {
>  			(*cleancache_ops.invalidate_page)(pool_id,
> -							  key, page->index);
> +					key, page->index);
>  			cleancache_invalidates++;
>  		}
>  	}
> @@ -181,9 +276,18 @@ EXPORT_SYMBOL(__cleancache_invalidate_page);
>   */
>  void __cleancache_invalidate_inode(struct address_space *mapping)
>  {
> -	int pool_id = mapping->host->i_sb->cleancache_poolid;
> +	int pool_id;
> +	int fake_pool_id = mapping->host->i_sb->cleancache_poolid;
>  	struct cleancache_filekey key = { .u.key = { 0 } };
>  
> +	if (!backend_registered)
> +		return;
> +
> +	if (fake_pool_id < 0)
> +		return;
> +
> +	pool_id = get_poolid_from_fake(fake_pool_id);
> +
>  	if (pool_id >= 0 && cleancache_get_key(mapping->host, &key) >= 0)
>  		(*cleancache_ops.invalidate_inode)(pool_id, key);
>  }
> @@ -196,16 +300,30 @@ EXPORT_SYMBOL(__cleancache_invalidate_inode);
>   */
>  void __cleancache_invalidate_fs(struct super_block *sb)
>  {
> -	if (sb->cleancache_poolid >= 0) {
> -		int old_poolid = sb->cleancache_poolid;
> -		sb->cleancache_poolid = -1;
> -		(*cleancache_ops.invalidate_fs)(old_poolid);
> +	int old_poolid;
> +	int index;
> +	int fake_pool_id = sb->cleancache_poolid;
> +
> +	if (fake_pool_id >= FAKE_SHARED_FS_POOLID_OFFSET) {
> +		index = fake_pool_id - FAKE_SHARED_FS_POOLID_OFFSET;
> +		old_poolid = shared_fs_poolid_map[index];
> +		shared_fs_poolid_map[index] = -2;
> +		uuids[index] = NULL;
> +	} else if (fake_pool_id >= FAKE_FS_POOLID_OFFSET) {
> +		index = fake_pool_id - FAKE_FS_POOLID_OFFSET;
> +		old_poolid = fs_poolid_map[index];
> +		fs_poolid_map[index] = -2;
>  	}
> +	sb->cleancache_poolid = -1;
> +	if (backend_registered)
> +		(*cleancache_ops.invalidate_fs)(old_poolid);
>  }
>  EXPORT_SYMBOL(__cleancache_invalidate_fs);
>  
>  static int __init init_cleancache(void)
>  {
> +	int i;
> +
>  #ifdef CONFIG_DEBUG_FS
>  	struct dentry *root = debugfs_create_dir("cleancache", NULL);
>  	if (root == NULL)
> @@ -217,6 +335,11 @@ static int __init init_cleancache(void)
>  	debugfs_create_u64("invalidates", S_IRUGO,
>  				root, &cleancache_invalidates);
>  #endif
> +	for (i = 0; i < MAX_INITIALIZABLE_FS; i++) {
> +		fs_poolid_map[i] = -2;
> +		shared_fs_poolid_map[i] = -2;

You should declare an #define for these. But I am wondering
whether this would be made easier if you had something like this:

struct cleancache_clients {
	struct list_head	next;
	struct cleancache_clients	*hot;
#define UNREGISTERED	(0<<1)
#define REGISTERED	(1<<1)
#define SOMETHING	(2<<1)

	unsigned int	flags;
	int		poolid;
	...
}

> +	}
> +	cleancache_enabled = 1;
>  	return 0;
>  }
>  module_init(init_cleancache)
> -- 
> 1.7.5.4

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Fight unfair telecom internet charges in Canada: sign http://stopthemeter.ca/
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply	[flat|nested] 5+ messages in thread

end of thread, other threads:[~2012-03-15 18:24 UTC | newest]

Thread overview: 5+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2012-03-14 17:13 frontswap/cleancache: allow backends to register after init Andor Daam
2012-03-14 17:13 ` [PATCH 1/2] frontswap: allow backends to register after frontswap initilization Andor Daam
2012-03-15 18:14   ` Konrad Rzeszutek Wilk
2012-03-14 17:13 ` [PATCH 2/2] cleancache: allow backends to register after cleancache initilaization Andor Daam
2012-03-15 18:20   ` Konrad Rzeszutek Wilk

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).