public inbox for linux-kernel@vger.kernel.org
 help / color / mirror / Atom feed
* [RFC/PATCH 2/2] slab: consolidate allocation paths
@ 2006-06-15  7:12 Pekka Enberg
  2006-06-16 20:05 ` Christoph Lameter
  2006-06-17 16:38 ` Christoph Lameter
  0 siblings, 2 replies; 5+ messages in thread
From: Pekka Enberg @ 2006-06-15  7:12 UTC (permalink / raw)
  To: christoph, manfred; +Cc: linux-kernel

From: Pekka Enberg <penberg@cs.helsinki.fi>

This patch consolidates the UMA and NUMA memory allocation paths in the
slab allocator. This is accomplished by making the UMA-path look like
we are on NUMA but always allocating from the current node.

There is a slight increase in NUMA kernel text size with this patch:

   text    data     bss     dec     hex filename
  17019    2520      20   19559    4c67 mm/slab.o (before)
  17034    2520      20   19574    4c76 mm/slab.o (after)

However, bloatometer says it's even less:

  add/remove: 0/0 grow/shrink: 1/1 up/down: 4/-1 (3)
  function                                     old     new   delta
  kmem_cache_alloc_node                        161     165      +4
  kmem_cache_create                           1512    1511      -1

UMA text size is unchanged.

Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi>

---

 mm/slab.c |   52 +++++++++++++++++++++-------------------------------
 1 files changed, 21 insertions(+), 31 deletions(-)

3b92d48f346b46b3a050f4195497c96f5eb6bb59
diff --git a/mm/slab.c b/mm/slab.c
index 579cff3..83a3394 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -2855,8 +2855,8 @@ static void *cache_alloc_debugcheck_afte
 #define cache_alloc_debugcheck_after(a,b,objp,d) (objp)
 #endif
 
-static __always_inline void *__cache_alloc_cpucache(struct kmem_cache *cachep,
-						    gfp_t flags)
+static __always_inline void *cache_alloc_cpucache(struct kmem_cache *cachep,
+						  gfp_t flags)
 {
 	void *objp;
 	struct array_cache *ac;
@@ -2959,14 +2959,19 @@ done:
 	return obj;
 }
 
-static inline void *cache_alloc_cpucache(struct kmem_cache *cache, gfp_t flags)
+static inline void *__cache_alloc(struct kmem_cache *cache, gfp_t flags,
+				  int nodeid)
 {
+	if (nodeid != -1 && nodeid != numa_node_id() &&
+	    cache->nodelists[nodeid])
+		return __cache_alloc_node(cache, flags, nodeid);
+
 	if (unlikely(current->flags & (PF_SPREAD_SLAB | PF_MEMPOLICY))) {
 		void *objp = alternate_node_alloc(cache, flags);
 		if (objp != NULL)
 			return objp;
 	}
-	return __cache_alloc_cpucache(cache, flags);
+	return cache_alloc_cpucache(cache, flags);
 }
 
 #else
@@ -2975,15 +2980,17 @@ static inline void *cache_alloc_cpucache
  * On UMA, we always allocate directly drom the per-CPU cache.
  */
 
-static inline void *cache_alloc_cpucache(struct kmem_cache *cache, gfp_t flags)
+static __always_inline void *__cache_alloc(struct kmem_cache *cache,
+					   gfp_t flags, int nodeid)
 {
-	return __cache_alloc_cpucache(cache, flags);
+	return cache_alloc_cpucache(cache, flags);
 }
 
 #endif
 
-static __always_inline void *__cache_alloc(struct kmem_cache *cachep,
-						gfp_t flags, void *caller)
+static __always_inline void *cache_alloc(struct kmem_cache *cachep,
+					 gfp_t flags, int nodeid,
+					 void *caller)
 {
 	unsigned long save_flags;
 	void *objp;
@@ -2991,10 +2998,9 @@ static __always_inline void *__cache_all
 	cache_alloc_debugcheck_before(cachep, flags);
 
 	local_irq_save(save_flags);
-	objp = cache_alloc_cpucache(cachep, flags);
+	objp = __cache_alloc(cachep, flags, nodeid);
 	local_irq_restore(save_flags);
-	objp = cache_alloc_debugcheck_after(cachep, flags, objp,
-					    caller);
+	objp = cache_alloc_debugcheck_after(cachep, flags, objp, caller);
 	prefetchw(objp);
 	return objp;
 }
@@ -3158,7 +3164,7 @@ static inline void __cache_free(struct k
  */
 void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
 {
-	return __cache_alloc(cachep, flags, __builtin_return_address(0));
+	return cache_alloc(cachep, flags, -1, __builtin_return_address(0));
 }
 EXPORT_SYMBOL(kmem_cache_alloc);
 
@@ -3172,7 +3178,7 @@ EXPORT_SYMBOL(kmem_cache_alloc);
  */
 void *kmem_cache_zalloc(struct kmem_cache *cache, gfp_t flags)
 {
-	void *ret = __cache_alloc(cache, flags, __builtin_return_address(0));
+	void *ret = cache_alloc(cache, flags, -1, __builtin_return_address(0));
 	if (ret)
 		memset(ret, 0, obj_size(cache));
 	return ret;
@@ -3236,23 +3242,7 @@ out:
  */
 void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
 {
-	unsigned long save_flags;
-	void *ptr;
-
-	cache_alloc_debugcheck_before(cachep, flags);
-	local_irq_save(save_flags);
-
-	if (nodeid == -1 || nodeid == numa_node_id() ||
-			!cachep->nodelists[nodeid])
-		ptr = cache_alloc_cpucache(cachep, flags);
-	else
-		ptr = __cache_alloc_node(cachep, flags, nodeid);
-	local_irq_restore(save_flags);
-
-	ptr = cache_alloc_debugcheck_after(cachep, flags, ptr,
-					   __builtin_return_address(0));
-
-	return ptr;
+	return cache_alloc(cachep, flags, nodeid, __builtin_return_address(0));
 }
 EXPORT_SYMBOL(kmem_cache_alloc_node);
 
@@ -3303,7 +3293,7 @@ static __always_inline void *__do_kmallo
 	cachep = __find_general_cachep(size, flags);
 	if (unlikely(cachep == NULL))
 		return NULL;
-	return __cache_alloc(cachep, flags, caller);
+	return cache_alloc(cachep, flags, -1, caller);
 }
 
 
-- 
1.1.3



^ permalink raw reply related	[flat|nested] 5+ messages in thread

* Re: [RFC/PATCH 2/2] slab: consolidate allocation paths
  2006-06-15  7:12 [RFC/PATCH 2/2] slab: consolidate allocation paths Pekka Enberg
@ 2006-06-16 20:05 ` Christoph Lameter
  2006-06-17  4:44   ` Pekka Enberg
  2006-06-17 16:38 ` Christoph Lameter
  1 sibling, 1 reply; 5+ messages in thread
From: Christoph Lameter @ 2006-06-16 20:05 UTC (permalink / raw)
  To: Pekka Enberg; +Cc: christoph, manfred, linux-kernel

On Thu, 15 Jun 2006, Pekka Enberg wrote:

> This patch consolidates the UMA and NUMA memory allocation paths in the
> slab allocator. This is accomplished by making the UMA-path look like
> we are on NUMA but always allocating from the current node.

Which kernel does this apply to? Cannot find this in upstream nor in 
Andrews tree.


^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [RFC/PATCH 2/2] slab: consolidate allocation paths
  2006-06-16 20:05 ` Christoph Lameter
@ 2006-06-17  4:44   ` Pekka Enberg
  2006-06-17 16:27     ` Christoph Lameter
  0 siblings, 1 reply; 5+ messages in thread
From: Pekka Enberg @ 2006-06-17  4:44 UTC (permalink / raw)
  To: Christoph Lameter; +Cc: christoph, manfred, linux-kernel

Hi,

On Thu, 15 Jun 2006, Pekka Enberg wrote:
> > This patch consolidates the UMA and NUMA memory allocation paths in the
> > slab allocator. This is accomplished by making the UMA-path look like
> > we are on NUMA but always allocating from the current node.

On 6/16/06, Christoph Lameter <clameter@sgi.com> wrote:
> Which kernel does this apply to? Cannot find this in upstream nor in
> Andrews tree.

Applies on top of git head and 2.6.17-rc6 from www.kernel.org here.
Did you apply both patches?

                                                       Pekka

^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [RFC/PATCH 2/2] slab: consolidate allocation paths
  2006-06-17  4:44   ` Pekka Enberg
@ 2006-06-17 16:27     ` Christoph Lameter
  0 siblings, 0 replies; 5+ messages in thread
From: Christoph Lameter @ 2006-06-17 16:27 UTC (permalink / raw)
  To: Pekka Enberg; +Cc: christoph, manfred, linux-kernel

On Sat, 17 Jun 2006, Pekka Enberg wrote:

> On 6/16/06, Christoph Lameter <clameter@sgi.com> wrote:
> > Which kernel does this apply to? Cannot find this in upstream nor in
> > Andrews tree.
> 
> Applies on top of git head and 2.6.17-rc6 from www.kernel.org here.
> Did you apply both patches?

Got them in wrong order it seems.


^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [RFC/PATCH 2/2] slab: consolidate allocation paths
  2006-06-15  7:12 [RFC/PATCH 2/2] slab: consolidate allocation paths Pekka Enberg
  2006-06-16 20:05 ` Christoph Lameter
@ 2006-06-17 16:38 ` Christoph Lameter
  1 sibling, 0 replies; 5+ messages in thread
From: Christoph Lameter @ 2006-06-17 16:38 UTC (permalink / raw)
  To: Pekka Enberg; +Cc: manfred, linux-kernel

On Thu, 15 Jun 2006, Pekka Enberg wrote:

> -static inline void *cache_alloc_cpucache(struct kmem_cache *cache, gfp_t flags)
> +static inline void *__cache_alloc(struct kmem_cache *cache, gfp_t flags,
> +				  int nodeid)
>  {
> +	if (nodeid != -1 && nodeid != numa_node_id() &&
> +	    cache->nodelists[nodeid])
> +		return __cache_alloc_node(cache, flags, nodeid);
> +
>  	if (unlikely(current->flags & (PF_SPREAD_SLAB | PF_MEMPOLICY))) {
>  		void *objp = alternate_node_alloc(cache, flags);

So we always run through the additional code that you added for each 
allocation on a numa system? The case of nodeid != -1 is a rare case.


^ permalink raw reply	[flat|nested] 5+ messages in thread

end of thread, other threads:[~2006-06-17 16:39 UTC | newest]

Thread overview: 5+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2006-06-15  7:12 [RFC/PATCH 2/2] slab: consolidate allocation paths Pekka Enberg
2006-06-16 20:05 ` Christoph Lameter
2006-06-17  4:44   ` Pekka Enberg
2006-06-17 16:27     ` Christoph Lameter
2006-06-17 16:38 ` Christoph Lameter

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox