* [RFC/PATCH 1/2] slab: cpucache allocation cleanup
@ 2006-06-15 7:12 Pekka Enberg
2006-06-17 16:33 ` Christoph Lameter
0 siblings, 1 reply; 2+ messages in thread
From: Pekka Enberg @ 2006-06-15 7:12 UTC (permalink / raw)
To: christoph, manfred; +Cc: linux-kernel
From: Pekka Enberg <penberg@cs.helsinki.fi>
This patch cleans up allocation from the per-CPU cache by separating NUMA
and UMA paths.
Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi>
---
mm/slab.c | 69 +++++++++++++++++++++++++++++++++++++------------------------
1 files changed, 42 insertions(+), 27 deletions(-)
8658c94d24e3b97f2ad747182811713c52dffdcf
diff --git a/mm/slab.c b/mm/slab.c
index f1b644e..579cff3 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -2855,19 +2855,12 @@ static void *cache_alloc_debugcheck_afte
#define cache_alloc_debugcheck_after(a,b,objp,d) (objp)
#endif
-static inline void *____cache_alloc(struct kmem_cache *cachep, gfp_t flags)
+static __always_inline void *__cache_alloc_cpucache(struct kmem_cache *cachep,
+ gfp_t flags)
{
void *objp;
struct array_cache *ac;
-#ifdef CONFIG_NUMA
- if (unlikely(current->flags & (PF_SPREAD_SLAB | PF_MEMPOLICY))) {
- objp = alternate_node_alloc(cachep, flags);
- if (objp != NULL)
- return objp;
- }
-#endif
-
check_irq_off();
ac = cpu_cache_get(cachep);
if (likely(ac->avail)) {
@@ -2881,23 +2874,6 @@ static inline void *____cache_alloc(stru
return objp;
}
-static __always_inline void *__cache_alloc(struct kmem_cache *cachep,
- gfp_t flags, void *caller)
-{
- unsigned long save_flags;
- void *objp;
-
- cache_alloc_debugcheck_before(cachep, flags);
-
- local_irq_save(save_flags);
- objp = ____cache_alloc(cachep, flags);
- local_irq_restore(save_flags);
- objp = cache_alloc_debugcheck_after(cachep, flags, objp,
- caller);
- prefetchw(objp);
- return objp;
-}
-
#ifdef CONFIG_NUMA
/*
* Try allocating on another node if PF_SPREAD_SLAB|PF_MEMPOLICY.
@@ -2982,8 +2958,47 @@ must_grow:
done:
return obj;
}
+
+static inline void *cache_alloc_cpucache(struct kmem_cache *cache, gfp_t flags)
+{
+ if (unlikely(current->flags & (PF_SPREAD_SLAB | PF_MEMPOLICY))) {
+ void *objp = alternate_node_alloc(cache, flags);
+ if (objp != NULL)
+ return objp;
+ }
+ return __cache_alloc_cpucache(cache, flags);
+}
+
+#else
+
+/*
+ * On UMA, we always allocate directly drom the per-CPU cache.
+ */
+
+static inline void *cache_alloc_cpucache(struct kmem_cache *cache, gfp_t flags)
+{
+ return __cache_alloc_cpucache(cache, flags);
+}
+
#endif
+static __always_inline void *__cache_alloc(struct kmem_cache *cachep,
+ gfp_t flags, void *caller)
+{
+ unsigned long save_flags;
+ void *objp;
+
+ cache_alloc_debugcheck_before(cachep, flags);
+
+ local_irq_save(save_flags);
+ objp = cache_alloc_cpucache(cachep, flags);
+ local_irq_restore(save_flags);
+ objp = cache_alloc_debugcheck_after(cachep, flags, objp,
+ caller);
+ prefetchw(objp);
+ return objp;
+}
+
/*
* Caller needs to acquire correct kmem_list's list_lock
*/
@@ -3229,7 +3244,7 @@ void *kmem_cache_alloc_node(struct kmem_
if (nodeid == -1 || nodeid == numa_node_id() ||
!cachep->nodelists[nodeid])
- ptr = ____cache_alloc(cachep, flags);
+ ptr = cache_alloc_cpucache(cachep, flags);
else
ptr = __cache_alloc_node(cachep, flags, nodeid);
local_irq_restore(save_flags);
--
1.1.3
^ permalink raw reply related [flat|nested] 2+ messages in thread
* Re: [RFC/PATCH 1/2] slab: cpucache allocation cleanup
2006-06-15 7:12 [RFC/PATCH 1/2] slab: cpucache allocation cleanup Pekka Enberg
@ 2006-06-17 16:33 ` Christoph Lameter
0 siblings, 0 replies; 2+ messages in thread
From: Christoph Lameter @ 2006-06-17 16:33 UTC (permalink / raw)
To: Pekka Enberg; +Cc: manfred, linux-kernel
On Thu, 15 Jun 2006, Pekka Enberg wrote:
> -static inline void *____cache_alloc(struct kmem_cache *cachep, gfp_t flags)
> +static __always_inline void *__cache_alloc_cpucache(struct kmem_cache *cachep,
The new name is confusing because __cache_alloc_cpucache suggests that we
are only allocating from the cpucache and that this be something special.
However, we always allocate from the cpucache for local allocations and we
refill the cpucache in the __cpucache function from the shared cache and
the per node lists. So we do much more there.
Maybe call this __cache_alloc_local ?
^ permalink raw reply [flat|nested] 2+ messages in thread
end of thread, other threads:[~2006-06-17 16:34 UTC | newest]
Thread overview: 2+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2006-06-15 7:12 [RFC/PATCH 1/2] slab: cpucache allocation cleanup Pekka Enberg
2006-06-17 16:33 ` Christoph Lameter
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox