All of lore.kernel.org
 help / color / mirror / Atom feed
diff for duplicates of <508676FA.4000107@parallels.com>

diff --git a/a/2.txt b/N1/2.txt
index 8b13789..b11e7eb 100644
--- a/a/2.txt
+++ b/N1/2.txt
@@ -1 +1,146 @@
+>From 825f5179174be825f1219e23bdde769ef9febd45 Mon Sep 17 00:00:00 2001
+From: Glauber Costa <glommer@parallels.com>
+Date: Mon, 22 Oct 2012 15:20:26 +0400
+Subject: [PATCH] slab: move kmem_cache_free to common code
 
+In the effort of commonizing the slab allocators, it would be better if
+we had a single entry-point for kmem_cache_free. At the same time, the
+different layout of the allocators lead to routines that are necessarily
+different.
+
+Because we would also want the allocators to be able to free objects in
+their fast paths without issuing a call instruction, we will rely on the
+pre-processor to aid us.
+
+Signed-off-by: Glauber Costa <glommer@parallels.com>
+CC: Christoph Lameter <cl@linux.com>
+CC: Pekka Enberg <penberg@kernel.org>
+CC: David Rientjes <rientjes@google.com>
+---
+ mm/slab.c | 15 +++------------
+ mm/slab.h | 26 ++++++++++++++++++++++++++
+ mm/slob.c |  6 ++----
+ mm/slub.c |  6 ++----
+ 4 files changed, 33 insertions(+), 20 deletions(-)
+
+diff --git a/mm/slab.c b/mm/slab.c
+index 98b3460..bceffcc 100644
+--- a/mm/slab.c
++++ b/mm/slab.c
+@@ -3903,15 +3903,8 @@ void *__kmalloc(size_t size, gfp_t flags)
+ EXPORT_SYMBOL(__kmalloc);
+ #endif
+ 
+-/**
+- * kmem_cache_free - Deallocate an object
+- * @cachep: The cache the allocation was from.
+- * @objp: The previously allocated object.
+- *
+- * Free an object which was previously allocated from this
+- * cache.
+- */
+-void kmem_cache_free(struct kmem_cache *cachep, void *objp)
++static __always_inline void
++__kmem_cache_free(struct kmem_cache *cachep, void *objp)
+ {
+ 	unsigned long flags;
+ 
+@@ -3921,10 +3914,8 @@ void kmem_cache_free(struct kmem_cache *cachep, void *objp)
+ 		debug_check_no_obj_freed(objp, cachep->object_size);
+ 	__cache_free(cachep, objp, __builtin_return_address(0));
+ 	local_irq_restore(flags);
+-
+-	trace_kmem_cache_free(_RET_IP_, objp);
+ }
+-EXPORT_SYMBOL(kmem_cache_free);
++KMEM_CACHE_FREE(__kmem_cache_free);
+ 
+ /**
+  * kfree - free previously allocated memory
+diff --git a/mm/slab.h b/mm/slab.h
+index 66a62d3..32063f8 100644
+--- a/mm/slab.h
++++ b/mm/slab.h
+@@ -92,4 +92,30 @@ void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo);
+ void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s);
+ ssize_t slabinfo_write(struct file *file, const char __user *buffer,
+ 		       size_t count, loff_t *ppos);
++
++/*
++ * What goes below for kmem_cache_free is not pretty. But because this
++ * is an extremely hot path, we would like to avoid function calls as
++ * much as we can.
++ *
++ * As ugly as it is, this is a way to guarantee that different allocators,
++ * with different layouts, and therefore, different free functions, can
++ * still live in different files and inline the whole of kmem_cache_free.
++ */
++/**
++ * kmem_cache_free - Deallocate an object
++ * @cachep: The cache the allocation was from.
++ * @objp: The previously allocated object.
++ *
++ * Free an object which was previously allocated from this
++ * cache.
++ *
++ */
++#define KMEM_CACHE_FREE(allocator_fn)			\
++void kmem_cache_free(struct kmem_cache *s, void *x)	\
++{							\
++	allocator_fn(s, x);				\
++	trace_kmem_cache_free(_RET_IP_, x);		\
++}							\
++EXPORT_SYMBOL(kmem_cache_free)
+ #endif
+diff --git a/mm/slob.c b/mm/slob.c
+index 3edfeaa..1580371 100644
+--- a/mm/slob.c
++++ b/mm/slob.c
+@@ -571,7 +571,7 @@ static void kmem_rcu_free(struct rcu_head *head)
+ 	__kmem_cache_free(b, slob_rcu->size);
+ }
+ 
+-void kmem_cache_free(struct kmem_cache *c, void *b)
++static __always_inline void do_kmem_cache_free(struct kmem_cache *c, void *b)
+ {
+ 	kmemleak_free_recursive(b, c->flags);
+ 	if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
+@@ -582,10 +582,8 @@ void kmem_cache_free(struct kmem_cache *c, void *b)
+ 	} else {
+ 		__kmem_cache_free(b, c->size);
+ 	}
+-
+-	trace_kmem_cache_free(_RET_IP_, b);
+ }
+-EXPORT_SYMBOL(kmem_cache_free);
++KMEM_CACHE_FREE(do_kmem_cache_free);
+ 
+ unsigned int kmem_cache_size(struct kmem_cache *c)
+ {
+diff --git a/mm/slub.c b/mm/slub.c
+index 259bc2c..7dd41d7 100644
+--- a/mm/slub.c
++++ b/mm/slub.c
+@@ -2606,7 +2606,7 @@ redo:
+ 
+ }
+ 
+-void kmem_cache_free(struct kmem_cache *s, void *x)
++static __always_inline void __kmem_cache_free(struct kmem_cache *s, void *x)
+ {
+ 	struct page *page;
+ 
+@@ -2620,10 +2620,8 @@ void kmem_cache_free(struct kmem_cache *s, void *x)
+ 	}
+ 
+ 	slab_free(s, page, x, _RET_IP_);
+-
+-	trace_kmem_cache_free(_RET_IP_, x);
+ }
+-EXPORT_SYMBOL(kmem_cache_free);
++KMEM_CACHE_FREE(__kmem_cache_free);
+ 
+ /*
+  * Object placement in a slab is made very easy because we always start at
+-- 
+1.7.11.7
diff --git a/a/content_digest b/N1/content_digest
index 98be2d8..1a61842 100644
--- a/a/content_digest
+++ b/N1/content_digest
@@ -9,8 +9,8 @@
  "Date\0Tue, 23 Oct 2012 14:52:42 +0400\0"
  "To\0JoonSoo Kim <js1304@gmail.com>\0"
  "Cc\0Christoph Lameter <cl@linux.com>"
-  linux-mm@kvack.org
-  linux-kernel@vger.kernel.org
+  <linux-mm@kvack.org>
+  <linux-kernel@vger.kernel.org>
   Pekka Enberg <penberg@kernel.org>
  " David Rientjes <rientjes@google.com>\0"
  "\01:1\0"
@@ -94,5 +94,151 @@
  "\01:2\0"
  "fn\00001-slab-move-kmem_cache_free-to-common-code.patch\0"
  "b\0"
+ ">From 825f5179174be825f1219e23bdde769ef9febd45 Mon Sep 17 00:00:00 2001\n"
+ "From: Glauber Costa <glommer@parallels.com>\n"
+ "Date: Mon, 22 Oct 2012 15:20:26 +0400\n"
+ "Subject: [PATCH] slab: move kmem_cache_free to common code\n"
+ "\n"
+ "In the effort of commonizing the slab allocators, it would be better if\n"
+ "we had a single entry-point for kmem_cache_free. At the same time, the\n"
+ "different layout of the allocators lead to routines that are necessarily\n"
+ "different.\n"
+ "\n"
+ "Because we would also want the allocators to be able to free objects in\n"
+ "their fast paths without issuing a call instruction, we will rely on the\n"
+ "pre-processor to aid us.\n"
+ "\n"
+ "Signed-off-by: Glauber Costa <glommer@parallels.com>\n"
+ "CC: Christoph Lameter <cl@linux.com>\n"
+ "CC: Pekka Enberg <penberg@kernel.org>\n"
+ "CC: David Rientjes <rientjes@google.com>\n"
+ "---\n"
+ " mm/slab.c | 15 +++------------\n"
+ " mm/slab.h | 26 ++++++++++++++++++++++++++\n"
+ " mm/slob.c |  6 ++----\n"
+ " mm/slub.c |  6 ++----\n"
+ " 4 files changed, 33 insertions(+), 20 deletions(-)\n"
+ "\n"
+ "diff --git a/mm/slab.c b/mm/slab.c\n"
+ "index 98b3460..bceffcc 100644\n"
+ "--- a/mm/slab.c\n"
+ "+++ b/mm/slab.c\n"
+ "@@ -3903,15 +3903,8 @@ void *__kmalloc(size_t size, gfp_t flags)\n"
+ " EXPORT_SYMBOL(__kmalloc);\n"
+ " #endif\n"
+ " \n"
+ "-/**\n"
+ "- * kmem_cache_free - Deallocate an object\n"
+ "- * @cachep: The cache the allocation was from.\n"
+ "- * @objp: The previously allocated object.\n"
+ "- *\n"
+ "- * Free an object which was previously allocated from this\n"
+ "- * cache.\n"
+ "- */\n"
+ "-void kmem_cache_free(struct kmem_cache *cachep, void *objp)\n"
+ "+static __always_inline void\n"
+ "+__kmem_cache_free(struct kmem_cache *cachep, void *objp)\n"
+ " {\n"
+ " \tunsigned long flags;\n"
+ " \n"
+ "@@ -3921,10 +3914,8 @@ void kmem_cache_free(struct kmem_cache *cachep, void *objp)\n"
+ " \t\tdebug_check_no_obj_freed(objp, cachep->object_size);\n"
+ " \t__cache_free(cachep, objp, __builtin_return_address(0));\n"
+ " \tlocal_irq_restore(flags);\n"
+ "-\n"
+ "-\ttrace_kmem_cache_free(_RET_IP_, objp);\n"
+ " }\n"
+ "-EXPORT_SYMBOL(kmem_cache_free);\n"
+ "+KMEM_CACHE_FREE(__kmem_cache_free);\n"
+ " \n"
+ " /**\n"
+ "  * kfree - free previously allocated memory\n"
+ "diff --git a/mm/slab.h b/mm/slab.h\n"
+ "index 66a62d3..32063f8 100644\n"
+ "--- a/mm/slab.h\n"
+ "+++ b/mm/slab.h\n"
+ "@@ -92,4 +92,30 @@ void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo);\n"
+ " void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s);\n"
+ " ssize_t slabinfo_write(struct file *file, const char __user *buffer,\n"
+ " \t\t       size_t count, loff_t *ppos);\n"
+ "+\n"
+ "+/*\n"
+ "+ * What goes below for kmem_cache_free is not pretty. But because this\n"
+ "+ * is an extremely hot path, we would like to avoid function calls as\n"
+ "+ * much as we can.\n"
+ "+ *\n"
+ "+ * As ugly as it is, this is a way to guarantee that different allocators,\n"
+ "+ * with different layouts, and therefore, different free functions, can\n"
+ "+ * still live in different files and inline the whole of kmem_cache_free.\n"
+ "+ */\n"
+ "+/**\n"
+ "+ * kmem_cache_free - Deallocate an object\n"
+ "+ * @cachep: The cache the allocation was from.\n"
+ "+ * @objp: The previously allocated object.\n"
+ "+ *\n"
+ "+ * Free an object which was previously allocated from this\n"
+ "+ * cache.\n"
+ "+ *\n"
+ "+ */\n"
+ "+#define KMEM_CACHE_FREE(allocator_fn)\t\t\t\\\n"
+ "+void kmem_cache_free(struct kmem_cache *s, void *x)\t\\\n"
+ "+{\t\t\t\t\t\t\t\\\n"
+ "+\tallocator_fn(s, x);\t\t\t\t\\\n"
+ "+\ttrace_kmem_cache_free(_RET_IP_, x);\t\t\\\n"
+ "+}\t\t\t\t\t\t\t\\\n"
+ "+EXPORT_SYMBOL(kmem_cache_free)\n"
+ " #endif\n"
+ "diff --git a/mm/slob.c b/mm/slob.c\n"
+ "index 3edfeaa..1580371 100644\n"
+ "--- a/mm/slob.c\n"
+ "+++ b/mm/slob.c\n"
+ "@@ -571,7 +571,7 @@ static void kmem_rcu_free(struct rcu_head *head)\n"
+ " \t__kmem_cache_free(b, slob_rcu->size);\n"
+ " }\n"
+ " \n"
+ "-void kmem_cache_free(struct kmem_cache *c, void *b)\n"
+ "+static __always_inline void do_kmem_cache_free(struct kmem_cache *c, void *b)\n"
+ " {\n"
+ " \tkmemleak_free_recursive(b, c->flags);\n"
+ " \tif (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {\n"
+ "@@ -582,10 +582,8 @@ void kmem_cache_free(struct kmem_cache *c, void *b)\n"
+ " \t} else {\n"
+ " \t\t__kmem_cache_free(b, c->size);\n"
+ " \t}\n"
+ "-\n"
+ "-\ttrace_kmem_cache_free(_RET_IP_, b);\n"
+ " }\n"
+ "-EXPORT_SYMBOL(kmem_cache_free);\n"
+ "+KMEM_CACHE_FREE(do_kmem_cache_free);\n"
+ " \n"
+ " unsigned int kmem_cache_size(struct kmem_cache *c)\n"
+ " {\n"
+ "diff --git a/mm/slub.c b/mm/slub.c\n"
+ "index 259bc2c..7dd41d7 100644\n"
+ "--- a/mm/slub.c\n"
+ "+++ b/mm/slub.c\n"
+ "@@ -2606,7 +2606,7 @@ redo:\n"
+ " \n"
+ " }\n"
+ " \n"
+ "-void kmem_cache_free(struct kmem_cache *s, void *x)\n"
+ "+static __always_inline void __kmem_cache_free(struct kmem_cache *s, void *x)\n"
+ " {\n"
+ " \tstruct page *page;\n"
+ " \n"
+ "@@ -2620,10 +2620,8 @@ void kmem_cache_free(struct kmem_cache *s, void *x)\n"
+ " \t}\n"
+ " \n"
+ " \tslab_free(s, page, x, _RET_IP_);\n"
+ "-\n"
+ "-\ttrace_kmem_cache_free(_RET_IP_, x);\n"
+ " }\n"
+ "-EXPORT_SYMBOL(kmem_cache_free);\n"
+ "+KMEM_CACHE_FREE(__kmem_cache_free);\n"
+ " \n"
+ " /*\n"
+ "  * Object placement in a slab is made very easy because we always start at\n"
+ "-- \n"
+ 1.7.11.7
 
-584aff8456028fe7b9f208733e9e60663cac89f49cc552c2a335f19269d63fd2
+2a88af40c56426e5a09c36dc3670ef4e1560391b96232ae0b0c7e7b0ae14e23e

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.