linux-arch.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Christoph Lameter <clameter@sgi.com>
To: akpm@linux-foundation.org
Cc: linux-arch@vger.kernel.org, linux-kernel@vger.kernel.org,
	Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>,
	Pekka Enberg <penberg@cs.helsinki.fi>
Subject: [patch 5/7] SLUB: Use allocpercpu to allocate per cpu data instead of running our own per cpu allocator
Date: Wed, 31 Oct 2007 17:02:16 -0700	[thread overview]
Message-ID: <20071101000310.674476000@sgi.com> (raw)
In-Reply-To: 20071101000211.970501947@sgi.com

[-- Attachment #1: slub_alloc --]
[-- Type: text/plain, Size: 7240 bytes --]

Using allocpercpu removes the needs for the per cpu arrays in the kmem_cache struct.
These could get quite big if we have to support system of up to thousands of cpus.
The use of alloc_percpu means that:

1. The size of kmem_cache for SMP configuration shrinks since we will only
need 1 pointer instead of NR_CPUS. The same pointer can be used by all
processors. Reduces cache footprint of the allocator.

2. We can dynamically size kmem_cache according to the actual nodes in the
system meaning less memory overhead for configurations that may potentially
support up to 1k NUMA nodes.

3. We can remove the diddle widdle with allocating and releasing kmem_cache_cpu
   structures when bringing up and shuttting down cpus. The allocpercpu
   logic will do it all for us.

4. Fastpath performance increases by another 20% vs. the earlier improvements.
   Instead of having fastpath with 40-50 cycles we are now in the 30-40 range.

Signed-off-by: Christoph Lameter <clameter@sgi.com>

---
 include/linux/slub_def.h |   11 ++--
 mm/slub.c                |  125 ++++-------------------------------------------
 2 files changed, 18 insertions(+), 118 deletions(-)

Index: linux-2.6/include/linux/slub_def.h
===================================================================
--- linux-2.6.orig/include/linux/slub_def.h	2007-10-30 16:34:41.000000000 -0700
+++ linux-2.6/include/linux/slub_def.h	2007-10-31 09:23:26.000000000 -0700
@@ -34,6 +34,12 @@ struct kmem_cache_node {
  * Slab cache management.
  */
 struct kmem_cache {
+#ifdef CONFIG_SMP
+	/* Per cpu pointer usable for any cpu */
+	struct kmem_cache_cpu *cpu_slab;
+#else
+	struct kmem_cache_cpu cpu_slab;
+#endif
 	/* Used for retriving partial slabs etc */
 	unsigned long flags;
 	int size;		/* The size of an object including meta data */
@@ -63,11 +69,6 @@ struct kmem_cache {
 	int defrag_ratio;
 	struct kmem_cache_node *node[MAX_NUMNODES];
 #endif
-#ifdef CONFIG_SMP
-	struct kmem_cache_cpu *cpu_slab[NR_CPUS];
-#else
-	struct kmem_cache_cpu cpu_slab;
-#endif
 };
 
 /*
Index: linux-2.6/mm/slub.c
===================================================================
--- linux-2.6.orig/mm/slub.c	2007-10-30 22:52:24.000000000 -0700
+++ linux-2.6/mm/slub.c	2007-10-31 09:45:59.000000000 -0700
@@ -242,7 +242,7 @@ static inline struct kmem_cache_node *ge
 static inline struct kmem_cache_cpu *get_cpu_slab(struct kmem_cache *s, int cpu)
 {
 #ifdef CONFIG_SMP
-	return s->cpu_slab[cpu];
+	return percpu_ptr(s->cpu_slab, cpu);
 #else
 	return &s->cpu_slab;
 #endif
@@ -2032,119 +2032,25 @@ static void init_kmem_cache_node(struct 
 }
 
 #ifdef CONFIG_SMP
-/*
- * Per cpu array for per cpu structures.
- *
- * The per cpu array places all kmem_cache_cpu structures from one processor
- * close together meaning that it becomes possible that multiple per cpu
- * structures are contained in one cacheline. This may be particularly
- * beneficial for the kmalloc caches.
- *
- * A desktop system typically has around 60-80 slabs. With 100 here we are
- * likely able to get per cpu structures for all caches from the array defined
- * here. We must be able to cover all kmalloc caches during bootstrap.
- *
- * If the per cpu array is exhausted then fall back to kmalloc
- * of individual cachelines. No sharing is possible then.
- */
-#define NR_KMEM_CACHE_CPU 100
-
-static DEFINE_PER_CPU(struct kmem_cache_cpu,
-				kmem_cache_cpu)[NR_KMEM_CACHE_CPU];
-
-static DEFINE_PER_CPU(struct kmem_cache_cpu *, kmem_cache_cpu_free);
-static cpumask_t kmem_cach_cpu_free_init_once = CPU_MASK_NONE;
-
-static struct kmem_cache_cpu *alloc_kmem_cache_cpu(struct kmem_cache *s,
-							int cpu, gfp_t flags)
-{
-	struct kmem_cache_cpu *c = per_cpu(kmem_cache_cpu_free, cpu);
-
-	if (c)
-		per_cpu(kmem_cache_cpu_free, cpu) =
-				(void *)c->freelist;
-	else {
-		/* Table overflow: So allocate ourselves */
-		c = kmalloc_node(
-			ALIGN(sizeof(struct kmem_cache_cpu), cache_line_size()),
-			flags, cpu_to_node(cpu));
-		if (!c)
-			return NULL;
-	}
-
-	init_kmem_cache_cpu(s, c);
-	return c;
-}
-
-static void free_kmem_cache_cpu(struct kmem_cache_cpu *c, int cpu)
-{
-	if (c < per_cpu(kmem_cache_cpu, cpu) ||
-			c > per_cpu(kmem_cache_cpu, cpu) + NR_KMEM_CACHE_CPU) {
-		kfree(c);
-		return;
-	}
-	c->freelist = (void *)per_cpu(kmem_cache_cpu_free, cpu);
-	per_cpu(kmem_cache_cpu_free, cpu) = c;
-}
-
 static void free_kmem_cache_cpus(struct kmem_cache *s)
 {
-	int cpu;
-
-	for_each_online_cpu(cpu) {
-		struct kmem_cache_cpu *c = get_cpu_slab(s, cpu);
-
-		if (c) {
-			s->cpu_slab[cpu] = NULL;
-			free_kmem_cache_cpu(c, cpu);
-		}
-	}
+	percpu_free(s->cpu_slab);
 }
 
 static int alloc_kmem_cache_cpus(struct kmem_cache *s, gfp_t flags)
 {
 	int cpu;
 
-	for_each_online_cpu(cpu) {
-		struct kmem_cache_cpu *c = get_cpu_slab(s, cpu);
+	s->cpu_slab = alloc_percpu(struct kmem_cache_cpu);
 
-		if (c)
-			continue;
+	if (!s->cpu_slab)
+		return 0;
 
-		c = alloc_kmem_cache_cpu(s, cpu, flags);
-		if (!c) {
-			free_kmem_cache_cpus(s);
-			return 0;
-		}
-		s->cpu_slab[cpu] = c;
-	}
+	for_each_online_cpu(cpu)
+		init_kmem_cache_cpu(s, get_cpu_slab(s, cpu));
 	return 1;
 }
 
-/*
- * Initialize the per cpu array.
- */
-static void init_alloc_cpu_cpu(int cpu)
-{
-	int i;
-
-	if (cpu_isset(cpu, kmem_cach_cpu_free_init_once))
-		return;
-
-	for (i = NR_KMEM_CACHE_CPU - 1; i >= 0; i--)
-		free_kmem_cache_cpu(&per_cpu(kmem_cache_cpu, cpu)[i], cpu);
-
-	cpu_set(cpu, kmem_cach_cpu_free_init_once);
-}
-
-static void __init init_alloc_cpu(void)
-{
-	int cpu;
-
-	for_each_online_cpu(cpu)
-		init_alloc_cpu_cpu(cpu);
-  }
-
 #else
 static inline void free_kmem_cache_cpus(struct kmem_cache *s) {}
 static inline void init_alloc_cpu(void) {}
@@ -2974,8 +2880,6 @@ void __init kmem_cache_init(void)
 	int i;
 	int caches = 0;
 
-	init_alloc_cpu();
-
 #ifdef CONFIG_NUMA
 	/*
 	 * Must first have the slab cache available for the allocations of the
@@ -3035,11 +2939,12 @@ void __init kmem_cache_init(void)
 	for (i = KMALLOC_SHIFT_LOW; i < PAGE_SHIFT; i++)
 		kmalloc_caches[i]. name =
 			kasprintf(GFP_KERNEL, "kmalloc-%d", 1 << i);
-
 #ifdef CONFIG_SMP
 	register_cpu_notifier(&slab_notifier);
-	kmem_size = offsetof(struct kmem_cache, cpu_slab) +
-				nr_cpu_ids * sizeof(struct kmem_cache_cpu *);
+#endif
+#ifdef CONFIG_NUMA
+	kmem_size = offsetof(struct kmem_cache, node) +
+				nr_node_ids * sizeof(struct kmem_cache_node *);
 #else
 	kmem_size = sizeof(struct kmem_cache);
 #endif
@@ -3181,11 +3086,9 @@ static int __cpuinit slab_cpuup_callback
 	switch (action) {
 	case CPU_UP_PREPARE:
 	case CPU_UP_PREPARE_FROZEN:
-		init_alloc_cpu_cpu(cpu);
 		down_read(&slub_lock);
 		list_for_each_entry(s, &slab_caches, list)
-			s->cpu_slab[cpu] = alloc_kmem_cache_cpu(s, cpu,
-							GFP_KERNEL);
+			init_kmem_cache_cpu(s, get_cpu_slab(s, cpu));
 		up_read(&slub_lock);
 		break;
 
@@ -3195,13 +3098,9 @@ static int __cpuinit slab_cpuup_callback
 	case CPU_DEAD_FROZEN:
 		down_read(&slub_lock);
 		list_for_each_entry(s, &slab_caches, list) {
-			struct kmem_cache_cpu *c = get_cpu_slab(s, cpu);
-
 			local_irq_save(flags);
 			__flush_cpu_slab(s, cpu);
 			local_irq_restore(flags);
-			free_kmem_cache_cpu(c, cpu);
-			s->cpu_slab[cpu] = NULL;
 		}
 		up_read(&slub_lock);
 		break;

-- 

  parent reply	other threads:[~2007-11-01  0:03 UTC|newest]

Thread overview: 62+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2007-11-01  0:02 [patch 0/7] [RFC] SLUB: Improve allocpercpu to reduce per cpu access overhead Christoph Lameter
2007-11-01  0:02 ` [patch 1/7] allocpercpu: Make it a true per cpu allocator by allocating from a per cpu array Christoph Lameter
2007-11-01  7:24   ` Eric Dumazet
2007-11-01 12:59     ` Christoph Lameter
2007-11-01  0:02 ` [patch 2/7] allocpercpu: Remove functions that are rarely used Christoph Lameter
2007-11-01  0:02 ` [patch 3/7] Allocpercpu: Do __percpu_disguise() only if CONFIG_DEBUG_VM is set Christoph Lameter
2007-11-01  7:25   ` Eric Dumazet
2007-11-01  0:02 ` [patch 4/7] Percpu: Add support for this_cpu_offset() to be able to create this_cpu_ptr() Christoph Lameter
2007-11-01  0:02 ` Christoph Lameter [this message]
2007-11-01  0:02 ` [patch 6/7] SLUB: No need to cache kmem_cache data in kmem_cache_cpu anymore Christoph Lameter
2007-11-01  0:02 ` [patch 7/7] SLUB: Optimize per cpu access on the local cpu using this_cpu_ptr() Christoph Lameter
2007-11-01  0:24 ` [patch 0/7] [RFC] SLUB: Improve allocpercpu to reduce per cpu access overhead David Miller
2007-11-01  0:26   ` Christoph Lameter
2007-11-01  0:27     ` David Miller
2007-11-01  0:31       ` Christoph Lameter
2007-11-01  0:51         ` David Miller
2007-11-01  0:53           ` Christoph Lameter
2007-11-01  1:00             ` David Miller
2007-11-01  1:01               ` Christoph Lameter
2007-11-01  1:09                 ` David Miller
2007-11-01  1:12                   ` Christoph Lameter
2007-11-01  1:13                     ` David Miller
2007-11-01  1:21                       ` Christoph Lameter
2007-11-01  5:27                         ` David Miller
2007-11-01  4:16                       ` Christoph Lameter
2007-11-01  5:38                         ` David Miller
2007-11-01  7:01                         ` David Miller
2007-11-01  9:14                           ` David Miller
2007-11-01 13:03                             ` Christoph Lameter
2007-11-01 21:29                               ` David Miller
2007-11-01 22:15                                 ` Christoph Lameter
2007-11-01 22:38                                   ` David Miller
2007-11-01 22:48                                     ` Christoph Lameter
2007-11-01 22:58                                       ` David Miller
2007-11-02  1:06                                         ` Christoph Lameter
2007-11-02  2:51                                           ` David Miller
2007-11-02 10:28                                         ` Peter Zijlstra
2007-11-02 14:35                                           ` Christoph Lameter
2007-11-02 15:20                                             ` Peter Zijlstra
2007-11-02 15:29                                               ` Christoph Lameter
2007-11-12 10:52                                         ` Herbert Xu
2007-11-12 19:14                                           ` Christoph Lameter
2007-11-12 19:48                                             ` Eric Dumazet
2007-11-12 19:56                                               ` Christoph Lameter
2007-11-12 20:18                                                 ` Eric Dumazet
2007-11-12 22:46                                                   ` David Miller
2007-11-12 19:57                                               ` Luck, Tony
2007-11-12 20:14                                                 ` Eric Dumazet
2007-11-12 22:46                                                   ` David Miller
2007-11-12 21:28                                           ` David Miller
2007-11-01 23:00                                       ` Eric Dumazet
2007-11-02  0:58                                         ` Christoph Lameter
2007-11-02  1:40                                         ` Christoph Lameter
2007-11-01  7:17 ` Eric Dumazet
2007-11-01  7:57   ` David Miller
2007-11-01 13:01     ` Christoph Lameter
2007-11-01 21:25       ` David Miller
2007-11-01 12:57   ` Christoph Lameter
2007-11-01 21:28     ` David Miller
2007-11-01 22:11       ` Christoph Lameter
2007-11-01 22:14         ` David Miller
2007-11-01 22:16           ` Christoph Lameter

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20071101000310.674476000@sgi.com \
    --to=clameter@sgi.com \
    --cc=akpm@linux-foundation.org \
    --cc=linux-arch@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=mathieu.desnoyers@polymtl.ca \
    --cc=penberg@cs.helsinki.fi \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).