public inbox for linux-arch@vger.kernel.org
 help / color / mirror / Atom feed
From: Christoph Lameter <clameter@sgi.com>
To: akpm@linux-foundation.org
Cc: linux-arch@vger.kernel.org, linux-kernel@vger.kernel.org,
	David Miller <davem@davemloft.net>,
	Eric Dumazet <dada1@cosmosbay.com>,
	Peter Zijlstra <peterz@infradead.org>,
	Rusty Russell <rusty@rustcorp.com.au>,
	Mike Travis <travis@sgi.com>
Subject: [patch 32/41] cpu alloc: Use in slub
Date: Thu, 29 May 2008 20:56:52 -0700	[thread overview]
Message-ID: <20080530040021.800522644@sgi.com> (raw)
In-Reply-To: 20080530035620.587204923@sgi.com

[-- Attachment #1: cpu_alloc_slub_conversion --]
[-- Type: text/plain, Size: 11012 bytes --]

Using cpu alloc removes the needs for the per cpu arrays in the kmem_cache struct.
These could get quite big if we have to support system of up to thousands of cpus.
The use of cpu_alloc means that:

1. The size of kmem_cache for SMP configuration shrinks since we will only
   need 1 pointer instead of NR_CPUS. The same pointer can be used by all
   processors. Reduces cache footprint of the allocator.

2. We can dynamically size kmem_cache according to the actual nodes in the
   system meaning less memory overhead for configurations that may potentially
   support up to 1k NUMA nodes.

3. We can remove the diddle widdle with allocating and releasing of
   kmem_cache_cpu structures when bringing up and shutting down cpus. The cpu
   alloc logic will do it all for us. Removes some portions of the cpu hotplug
   functionality.

4. Fastpath performance increases by 20%.

Signed-off-by: Christoph Lameter <clameter@sgi.com>
---
 arch/x86/Kconfig         |    4 
 include/linux/slub_def.h |    6 -
 mm/slub.c                |  226 ++++++++++-------------------------------------
 3 files changed, 50 insertions(+), 186 deletions(-)

Index: linux-2.6/include/linux/slub_def.h
===================================================================
--- linux-2.6.orig/include/linux/slub_def.h	2008-05-27 23:56:24.000000000 -0700
+++ linux-2.6/include/linux/slub_def.h	2008-05-28 00:00:27.000000000 -0700
@@ -67,6 +67,7 @@
  * Slab cache management.
  */
 struct kmem_cache {
+	struct kmem_cache_cpu *cpu_slab;
 	/* Used for retriving partial slabs etc */
 	unsigned long flags;
 	int size;		/* The size of an object including meta data */
@@ -101,11 +102,6 @@
 	int remote_node_defrag_ratio;
 	struct kmem_cache_node *node[MAX_NUMNODES];
 #endif
-#ifdef CONFIG_SMP
-	struct kmem_cache_cpu *cpu_slab[NR_CPUS];
-#else
-	struct kmem_cache_cpu cpu_slab;
-#endif
 };
 
 /*
Index: linux-2.6/mm/slub.c
===================================================================
--- linux-2.6.orig/mm/slub.c	2008-05-27 23:56:24.000000000 -0700
+++ linux-2.6/mm/slub.c	2008-05-28 00:00:27.000000000 -0700
@@ -258,15 +258,6 @@
 #endif
 }
 
-static inline struct kmem_cache_cpu *get_cpu_slab(struct kmem_cache *s, int cpu)
-{
-#ifdef CONFIG_SMP
-	return s->cpu_slab[cpu];
-#else
-	return &s->cpu_slab;
-#endif
-}
-
 /* Verify that a pointer has an address that is valid within a slab page */
 static inline int check_valid_pointer(struct kmem_cache *s,
 				struct page *page, const void *object)
@@ -1120,7 +1111,7 @@
 		if (!page)
 			return NULL;
 
-		stat(get_cpu_slab(s, raw_smp_processor_id()), ORDER_FALLBACK);
+		stat(THIS_CPU(s->cpu_slab), ORDER_FALLBACK);
 	}
 	page->objects = oo_objects(oo);
 	mod_zone_page_state(page_zone(page),
@@ -1397,7 +1388,7 @@
 static void unfreeze_slab(struct kmem_cache *s, struct page *page, int tail)
 {
 	struct kmem_cache_node *n = get_node(s, page_to_nid(page));
-	struct kmem_cache_cpu *c = get_cpu_slab(s, smp_processor_id());
+	struct kmem_cache_cpu *c = THIS_CPU(s->cpu_slab);
 
 	ClearSlabFrozen(page);
 	if (page->inuse) {
@@ -1428,7 +1419,7 @@
 			slab_unlock(page);
 		} else {
 			slab_unlock(page);
-			stat(get_cpu_slab(s, raw_smp_processor_id()), FREE_SLAB);
+			stat(__THIS_CPU(s->cpu_slab), FREE_SLAB);
 			discard_slab(s, page);
 		}
 	}
@@ -1481,7 +1472,7 @@
  */
 static inline void __flush_cpu_slab(struct kmem_cache *s, int cpu)
 {
-	struct kmem_cache_cpu *c = get_cpu_slab(s, cpu);
+	struct kmem_cache_cpu *c = CPU_PTR(s->cpu_slab, cpu);
 
 	if (likely(c && c->page))
 		flush_slab(s, c);
@@ -1496,15 +1487,7 @@
 
 static void flush_all(struct kmem_cache *s)
 {
-#ifdef CONFIG_SMP
 	on_each_cpu(flush_cpu_slab, s, 1, 1);
-#else
-	unsigned long flags;
-
-	local_irq_save(flags);
-	flush_cpu_slab(s);
-	local_irq_restore(flags);
-#endif
 }
 
 /*
@@ -1520,6 +1503,15 @@
 	return 1;
 }
 
+static inline int cpu_node_match(struct kmem_cache_cpu *c, int node)
+{
+#ifdef CONFIG_NUMA
+	if (node != -1 && __CPU_READ(c->node) != node)
+		return 0;
+#endif
+	return 1;
+}
+
 /*
  * Slow path. The lockless freelist is empty or we need to perform
  * debugging duties.
@@ -1592,7 +1584,7 @@
 		local_irq_disable();
 
 	if (new) {
-		c = get_cpu_slab(s, smp_processor_id());
+		c = __THIS_CPU(s->cpu_slab);
 		stat(c, ALLOC_SLAB);
 		if (c->page)
 			flush_slab(s, c);
@@ -1630,20 +1622,20 @@
 	unsigned long flags;
 
 	local_irq_save(flags);
-	c = get_cpu_slab(s, smp_processor_id());
-	if (unlikely(!c->freelist || !node_match(c, node)))
+	c = __THIS_CPU(s->cpu_slab);
+	object = c->freelist;
+	if (unlikely(!object || !node_match(c, node)))
 
 		object = __slab_alloc(s, gfpflags, node, addr, c);
 
 	else {
-		object = c->freelist;
 		c->freelist = object[c->offset];
 		stat(c, ALLOC_FASTPATH);
 	}
 	local_irq_restore(flags);
 
 	if (unlikely((gfpflags & __GFP_ZERO) && object))
-		memset(object, 0, c->objsize);
+		memset(object, 0, s->objsize);
 
 	return object;
 }
@@ -1677,7 +1669,7 @@
 	void **object = (void *)x;
 	struct kmem_cache_cpu *c;
 
-	c = get_cpu_slab(s, raw_smp_processor_id());
+	c = __THIS_CPU(s->cpu_slab);
 	stat(c, FREE_SLOWPATH);
 	slab_lock(page);
 
@@ -1748,7 +1740,7 @@
 	unsigned long flags;
 
 	local_irq_save(flags);
-	c = get_cpu_slab(s, smp_processor_id());
+	c = __THIS_CPU(s->cpu_slab);
 	debug_check_no_locks_freed(object, c->objsize);
 	if (!(s->flags & SLAB_DEBUG_OBJECTS))
 		debug_check_no_obj_freed(object, s->objsize);
@@ -1962,130 +1954,19 @@
 #endif
 }
 
-#ifdef CONFIG_SMP
-/*
- * Per cpu array for per cpu structures.
- *
- * The per cpu array places all kmem_cache_cpu structures from one processor
- * close together meaning that it becomes possible that multiple per cpu
- * structures are contained in one cacheline. This may be particularly
- * beneficial for the kmalloc caches.
- *
- * A desktop system typically has around 60-80 slabs. With 100 here we are
- * likely able to get per cpu structures for all caches from the array defined
- * here. We must be able to cover all kmalloc caches during bootstrap.
- *
- * If the per cpu array is exhausted then fall back to kmalloc
- * of individual cachelines. No sharing is possible then.
- */
-#define NR_KMEM_CACHE_CPU 100
-
-static DEFINE_PER_CPU(struct kmem_cache_cpu,
-				kmem_cache_cpu)[NR_KMEM_CACHE_CPU];
-
-static DEFINE_PER_CPU(struct kmem_cache_cpu *, kmem_cache_cpu_free);
-static cpumask_t kmem_cach_cpu_free_init_once = CPU_MASK_NONE;
-
-static struct kmem_cache_cpu *alloc_kmem_cache_cpu(struct kmem_cache *s,
-							int cpu, gfp_t flags)
-{
-	struct kmem_cache_cpu *c = per_cpu(kmem_cache_cpu_free, cpu);
-
-	if (c)
-		per_cpu(kmem_cache_cpu_free, cpu) =
-				(void *)c->freelist;
-	else {
-		/* Table overflow: So allocate ourselves */
-		c = kmalloc_node(
-			ALIGN(sizeof(struct kmem_cache_cpu), cache_line_size()),
-			flags, cpu_to_node(cpu));
-		if (!c)
-			return NULL;
-	}
-
-	init_kmem_cache_cpu(s, c);
-	return c;
-}
-
-static void free_kmem_cache_cpu(struct kmem_cache_cpu *c, int cpu)
-{
-	if (c < per_cpu(kmem_cache_cpu, cpu) ||
-			c > per_cpu(kmem_cache_cpu, cpu) + NR_KMEM_CACHE_CPU) {
-		kfree(c);
-		return;
-	}
-	c->freelist = (void *)per_cpu(kmem_cache_cpu_free, cpu);
-	per_cpu(kmem_cache_cpu_free, cpu) = c;
-}
-
-static void free_kmem_cache_cpus(struct kmem_cache *s)
-{
-	int cpu;
-
-	for_each_online_cpu(cpu) {
-		struct kmem_cache_cpu *c = get_cpu_slab(s, cpu);
-
-		if (c) {
-			s->cpu_slab[cpu] = NULL;
-			free_kmem_cache_cpu(c, cpu);
-		}
-	}
-}
-
 static int alloc_kmem_cache_cpus(struct kmem_cache *s, gfp_t flags)
 {
 	int cpu;
 
-	for_each_online_cpu(cpu) {
-		struct kmem_cache_cpu *c = get_cpu_slab(s, cpu);
-
-		if (c)
-			continue;
-
-		c = alloc_kmem_cache_cpu(s, cpu, flags);
-		if (!c) {
-			free_kmem_cache_cpus(s);
-			return 0;
-		}
-		s->cpu_slab[cpu] = c;
-	}
-	return 1;
-}
-
-/*
- * Initialize the per cpu array.
- */
-static void init_alloc_cpu_cpu(int cpu)
-{
-	int i;
-
-	if (cpu_isset(cpu, kmem_cach_cpu_free_init_once))
-		return;
-
-	for (i = NR_KMEM_CACHE_CPU - 1; i >= 0; i--)
-		free_kmem_cache_cpu(&per_cpu(kmem_cache_cpu, cpu)[i], cpu);
+	s->cpu_slab = CPU_ALLOC(struct kmem_cache_cpu, flags);
 
-	cpu_set(cpu, kmem_cach_cpu_free_init_once);
-}
-
-static void __init init_alloc_cpu(void)
-{
-	int cpu;
+	if (!s->cpu_slab)
+		return 0;
 
 	for_each_online_cpu(cpu)
-		init_alloc_cpu_cpu(cpu);
-  }
-
-#else
-static inline void free_kmem_cache_cpus(struct kmem_cache *s) {}
-static inline void init_alloc_cpu(void) {}
-
-static inline int alloc_kmem_cache_cpus(struct kmem_cache *s, gfp_t flags)
-{
-	init_kmem_cache_cpu(s, &s->cpu_slab);
+		init_kmem_cache_cpu(s, CPU_PTR(s->cpu_slab, cpu));
 	return 1;
 }
-#endif
 
 #ifdef CONFIG_NUMA
 /*
@@ -2446,9 +2327,8 @@
 	int node;
 
 	flush_all(s);
-
+	CPU_FREE(s->cpu_slab);
 	/* Attempt to free all objects */
-	free_kmem_cache_cpus(s);
 	for_each_node_state(node, N_NORMAL_MEMORY) {
 		struct kmem_cache_node *n = get_node(s, node);
 
@@ -2966,8 +2846,6 @@
 	int i;
 	int caches = 0;
 
-	init_alloc_cpu();
-
 #ifdef CONFIG_NUMA
 	/*
 	 * Must first have the slab cache available for the allocations of the
@@ -3027,11 +2905,12 @@
 	for (i = KMALLOC_SHIFT_LOW; i <= PAGE_SHIFT; i++)
 		kmalloc_caches[i]. name =
 			kasprintf(GFP_KERNEL, "kmalloc-%d", 1 << i);
-
 #ifdef CONFIG_SMP
 	register_cpu_notifier(&slab_notifier);
-	kmem_size = offsetof(struct kmem_cache, cpu_slab) +
-				nr_cpu_ids * sizeof(struct kmem_cache_cpu *);
+#endif
+#ifdef CONFIG_NUMA
+	kmem_size = offsetof(struct kmem_cache, node) +
+				nr_node_ids * sizeof(struct kmem_cache_node *);
 #else
 	kmem_size = sizeof(struct kmem_cache);
 #endif
@@ -3128,7 +3007,7 @@
 		 * per cpu structures
 		 */
 		for_each_online_cpu(cpu)
-			get_cpu_slab(s, cpu)->objsize = s->objsize;
+			CPU_PTR(s->cpu_slab, cpu)->objsize = s->objsize;
 
 		s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
 		up_write(&slub_lock);
@@ -3176,11 +3055,9 @@
 	switch (action) {
 	case CPU_UP_PREPARE:
 	case CPU_UP_PREPARE_FROZEN:
-		init_alloc_cpu_cpu(cpu);
 		down_read(&slub_lock);
 		list_for_each_entry(s, &slab_caches, list)
-			s->cpu_slab[cpu] = alloc_kmem_cache_cpu(s, cpu,
-							GFP_KERNEL);
+			init_kmem_cache_cpu(s, CPU_PTR(s->cpu_slab, cpu));
 		up_read(&slub_lock);
 		break;
 
@@ -3190,13 +3067,9 @@
 	case CPU_DEAD_FROZEN:
 		down_read(&slub_lock);
 		list_for_each_entry(s, &slab_caches, list) {
-			struct kmem_cache_cpu *c = get_cpu_slab(s, cpu);
-
 			local_irq_save(flags);
 			__flush_cpu_slab(s, cpu);
 			local_irq_restore(flags);
-			free_kmem_cache_cpu(c, cpu);
-			s->cpu_slab[cpu] = NULL;
 		}
 		up_read(&slub_lock);
 		break;
@@ -3687,7 +3560,7 @@
 		int cpu;
 
 		for_each_possible_cpu(cpu) {
-			struct kmem_cache_cpu *c = get_cpu_slab(s, cpu);
+			struct kmem_cache_cpu *c = CPU_PTR(s->cpu_slab, cpu);
 
 			if (!c || c->node < 0)
 				continue;
@@ -4092,7 +3965,7 @@
 		return -ENOMEM;
 
 	for_each_online_cpu(cpu) {
-		unsigned x = get_cpu_slab(s, cpu)->stat[si];
+		unsigned x = CPU_PTR(s->cpu_slab, cpu)->stat[si];
 
 		data[cpu] = x;
 		sum += x;

-- 

  parent reply	other threads:[~2008-05-30  4:00 UTC|newest]

Thread overview: 163+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2008-05-30  3:56 [patch 00/41] cpu alloc / cpu ops v3: Optimize per cpu access Christoph Lameter
2008-05-30  3:56 ` [patch 01/41] cpu_alloc: Increase percpu area size to 128k Christoph Lameter
2008-06-02 17:58   ` Luck, Tony
2008-06-02 23:48     ` Rusty Russell
2008-06-10 17:22     ` Christoph Lameter
2008-06-10 17:22       ` Christoph Lameter
2008-06-10 19:54       ` Luck, Tony
2008-05-30  3:56 ` [patch 02/41] cpu alloc: The allocator Christoph Lameter
2008-05-30  4:58   ` Andrew Morton
2008-05-30  5:10     ` Christoph Lameter
2008-05-30  5:31       ` Andrew Morton
2008-06-02  9:29         ` Paul Jackson
2008-05-30  5:56       ` KAMEZAWA Hiroyuki
2008-05-30  6:16         ` Christoph Lameter
2008-06-04 14:48     ` Mike Travis
2008-05-30  5:04   ` Eric Dumazet
2008-05-30  5:20     ` Christoph Lameter
2008-05-30  5:52       ` Rusty Russell
2008-06-04 15:30         ` Mike Travis
2008-06-05 23:48           ` Rusty Russell
2008-05-30  5:54       ` Eric Dumazet
2008-06-04 14:58       ` Mike Travis
2008-06-04 15:11         ` Eric Dumazet
2008-06-06  0:32           ` Rusty Russell
2008-06-06  0:32             ` Rusty Russell
2008-06-10 17:33         ` Christoph Lameter
2008-06-10 18:05           ` Eric Dumazet
2008-06-10 18:28             ` Christoph Lameter
2008-05-30  5:46   ` Rusty Russell
2008-06-04 15:04     ` Mike Travis
2008-06-10 17:34       ` Christoph Lameter
2008-05-31 20:58   ` Pavel Machek
2008-05-30  3:56 ` [patch 03/41] cpu alloc: Use cpu allocator instead of the builtin modules per cpu allocator Christoph Lameter
2008-05-30  4:58   ` Andrew Morton
2008-05-30  5:14     ` Christoph Lameter
2008-05-30  5:34       ` Andrew Morton
2008-05-30  6:08   ` Rusty Russell
2008-05-30  6:21     ` Christoph Lameter
2008-05-30  3:56 ` [patch 04/41] cpu ops: Core piece for generic atomic per cpu operations Christoph Lameter
2008-05-30  4:58   ` Andrew Morton
2008-05-30  5:17     ` Christoph Lameter
2008-05-30  5:38       ` Andrew Morton
2008-05-30  6:12         ` Christoph Lameter
2008-05-30  7:08           ` Rusty Russell
2008-05-30 18:00             ` Christoph Lameter
2008-06-02  2:00               ` Rusty Russell
2008-06-04 18:18                 ` Mike Travis
2008-06-05 23:59                   ` Rusty Russell
2008-06-09 19:00                     ` Christoph Lameter
2008-06-09 23:27                       ` Rusty Russell
2008-06-09 23:54                         ` Christoph Lameter
2008-06-10  2:56                           ` Rusty Russell
2008-06-10  3:18                             ` Christoph Lameter
2008-06-11  0:03                               ` Rusty Russell
2008-06-11  0:15                                 ` Christoph Lameter
2008-06-09 23:09                   ` Christoph Lameter
2008-06-10 17:42                 ` Christoph Lameter
2008-06-11 11:10                   ` Rusty Russell
2008-06-11 23:39                     ` Christoph Lameter
2008-06-12  0:58                       ` Nick Piggin
2008-06-12  2:44                         ` Rusty Russell
2008-06-12  3:40                           ` Nick Piggin
2008-06-12  9:37                             ` Martin Peschke
2008-06-12 11:21                               ` Nick Piggin
2008-06-12 17:19                                 ` Christoph Lameter
2008-06-13  0:38                                   ` Rusty Russell
2008-06-13  2:27                                     ` Christoph Lameter
2008-06-15 10:33                                       ` Rusty Russell
2008-06-15 10:33                                         ` Rusty Russell
2008-06-16 14:52                                         ` Christoph Lameter
2008-06-17  0:24                                           ` Rusty Russell
2008-06-17  2:29                                             ` Christoph Lameter
2008-06-17 14:21                                             ` Mike Travis
2008-05-30  7:05         ` Rusty Russell
2008-05-30  6:32       ` Rusty Russell
2008-05-30  3:56 ` [patch 05/41] cpu alloc: Percpu_counter conversion Christoph Lameter
2008-05-30  6:47   ` Rusty Russell
2008-05-30 17:54     ` Christoph Lameter
2008-05-30  3:56 ` [patch 06/41] cpu alloc: crash_notes conversion Christoph Lameter
2008-05-30  3:56 ` [patch 07/41] cpu alloc: Workqueue conversion Christoph Lameter
2008-05-30  3:56 ` [patch 08/41] cpu alloc: ACPI cstate handling conversion Christoph Lameter
2008-05-30  3:56 ` [patch 09/41] cpu alloc: Genhd statistics conversion Christoph Lameter
2008-05-30  3:56 ` [patch 10/41] cpu alloc: blktrace conversion Christoph Lameter
2008-05-30  3:56 ` [patch 11/41] cpu alloc: SRCU cpu alloc conversion Christoph Lameter
2008-05-30  3:56 ` [patch 12/41] cpu alloc: XFS counter conversion Christoph Lameter
2008-05-30  3:56 ` [patch 13/41] cpu alloc: NFS statistics Christoph Lameter
2008-05-30  3:56 ` [patch 14/41] cpu alloc: Neigbour statistics Christoph Lameter
2008-05-30  3:56 ` [patch 15/41] cpu_alloc: Convert ip route statistics Christoph Lameter
2008-05-30  3:56 ` [patch 16/41] cpu alloc: Tcp statistics conversion Christoph Lameter
2008-05-30  3:56 ` [patch 17/41] cpu alloc: Convert scratches to cpu alloc Christoph Lameter
2008-05-30  3:56 ` [patch 18/41] cpu alloc: Dmaengine conversion Christoph Lameter
2008-05-30  3:56 ` [patch 19/41] cpu alloc: Convert loopback statistics Christoph Lameter
2008-05-30  3:56 ` [patch 20/41] cpu alloc: Veth conversion Christoph Lameter
2008-05-30  3:56 ` [patch 21/41] cpu alloc: Chelsio statistics conversion Christoph Lameter
2008-05-30  3:56 ` [patch 22/41] cpu alloc: Convert network sockets inuse counter Christoph Lameter
2008-05-30  3:56 ` [patch 23/41] cpu alloc: Use it for infiniband Christoph Lameter
2008-05-30  3:56 ` [patch 24/41] cpu alloc: Use in the crypto subsystem Christoph Lameter
2008-05-30  3:56 ` [patch 25/41] cpu alloc: scheduler: Convert cpuusage to cpu_alloc Christoph Lameter
2008-05-30  3:56 ` [patch 26/41] cpu alloc: Convert mib handling to cpu alloc Christoph Lameter
2008-05-30  6:47   ` Eric Dumazet
2008-05-30 18:01     ` Christoph Lameter
2008-05-30  3:56 ` [patch 27/41] cpu alloc: Remove the allocpercpu functionality Christoph Lameter
2008-05-30  4:58   ` Andrew Morton
2008-05-30  3:56 ` [patch 28/41] Module handling: Use CPU_xx ops to dynamically allocate counters Christoph Lameter
2008-05-30  3:56 ` [patch 29/41] x86_64: Use CPU ops for nmi alert counter Christoph Lameter
2008-05-30  3:56 ` [patch 30/41] Remove local_t support Christoph Lameter
2008-05-30  3:56 ` [patch 31/41] VM statistics: Use CPU ops Christoph Lameter
2008-05-30  3:56 ` Christoph Lameter [this message]
2008-05-30  3:56 ` [patch 33/41] cpu alloc: Remove slub fields Christoph Lameter
2008-05-30  3:56 ` [patch 34/41] cpu alloc: Page allocator conversion Christoph Lameter
2008-05-30  3:56 ` [patch 35/41] Support for CPU ops Christoph Lameter
2008-05-30  4:58   ` Andrew Morton
2008-05-30  5:18     ` Christoph Lameter
2008-05-30  3:56 ` [patch 36/41] Zero based percpu: Infrastructure to rebase the per cpu area to zero Christoph Lameter
2008-05-30  3:56 ` [patch 37/41] x86_64: Fold pda into per cpu area Christoph Lameter
2008-05-30  3:56 ` [patch 38/41] x86: Extend percpu ops to 64 bit Christoph Lameter
2008-05-30  3:56 ` [patch 39/41] x86: Replace cpu_pda() using percpu logic and get rid of _cpu_pda() Christoph Lameter
2008-05-30  3:57 ` [patch 40/41] x86: Replace xxx_pda() operations with x86_xx_percpu() Christoph Lameter
2008-05-30  3:57 ` [patch 41/41] x86_64: Support for cpu ops Christoph Lameter
2008-05-30  4:58 ` [patch 00/41] cpu alloc / cpu ops v3: Optimize per cpu access Andrew Morton
2008-05-30  5:03   ` Christoph Lameter
2008-05-30  5:21     ` Andrew Morton
2008-05-30  5:27       ` Christoph Lameter
2008-05-30  5:49         ` Andrew Morton
2008-05-30  6:16           ` Christoph Lameter
2008-05-30  6:51             ` KAMEZAWA Hiroyuki
2008-05-30 14:38         ` Mike Travis
2008-05-30 17:50           ` Christoph Lameter
2008-05-30 18:00             ` Matthew Wilcox
2008-05-30 18:12               ` Christoph Lameter
2008-05-30  6:01       ` Eric Dumazet
2008-05-30  6:16         ` Andrew Morton
2008-05-30  6:22           ` Christoph Lameter
2008-05-30  6:37             ` Andrew Morton
2008-05-30 11:32               ` Matthew Wilcox
2008-06-04 15:07   ` Mike Travis
2008-06-06  5:33     ` Eric Dumazet
2008-06-06 13:08       ` Mike Travis
2008-06-08  6:00       ` Rusty Russell
2008-06-09 18:44       ` Christoph Lameter
2008-06-09 19:11         ` Andi Kleen
2008-06-09 20:15           ` Eric Dumazet
2008-05-30  9:12 ` Peter Zijlstra
2008-05-30  9:18   ` Ingo Molnar
2008-05-30 18:11     ` Christoph Lameter
2008-05-30 18:40       ` Peter Zijlstra
2008-05-30 18:56         ` Christoph Lameter
2008-05-30 19:13           ` Peter Zijlstra
2008-06-01  3:25             ` Christoph Lameter
2008-06-01  8:19               ` Peter Zijlstra
2008-05-30 18:06   ` Christoph Lameter
2008-05-30 18:19     ` Peter Zijlstra
2008-05-30 18:26       ` Christoph Lameter
2008-05-30 18:47         ` Peter Zijlstra
2008-05-30 19:10           ` Christoph Lameter
2008-05-30 19:21             ` Peter Zijlstra
2008-05-30 19:35               ` Peter Zijlstra
2008-06-01  3:27               ` Christoph Lameter
2008-05-30 18:08   ` Christoph Lameter
2008-05-30 18:39     ` Peter Zijlstra
2008-05-30 18:51       ` Christoph Lameter
2008-05-30 19:00         ` Peter Zijlstra
2008-05-30 19:11           ` Christoph Lameter

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20080530040021.800522644@sgi.com \
    --to=clameter@sgi.com \
    --cc=akpm@linux-foundation.org \
    --cc=dada1@cosmosbay.com \
    --cc=davem@davemloft.net \
    --cc=linux-arch@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=peterz@infradead.org \
    --cc=rusty@rustcorp.com.au \
    --cc=travis@sgi.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox