* [Slub cleanup5 0/3] SLUB: Cleanups V5
@ 2010-09-28 13:10 Christoph Lameter
2010-09-28 13:10 ` [Slub cleanup5 1/3] slub: reduce differences between SMP and NUMA Christoph Lameter
` (3 more replies)
0 siblings, 4 replies; 12+ messages in thread
From: Christoph Lameter @ 2010-09-28 13:10 UTC (permalink / raw)
To: Pekka Enberg; +Cc: linux-mm, David Rientjes
A couple of more cleanups (patches against Pekka's tree for next rebased to todays upstream)
1 Avoid #ifdefs by making data structures similar under SMP and NUMA
2 Avoid ? : by passing the redzone markers directly to the functions checking objects
3 Extract common code for removal of pages from partial list into a single function
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 12+ messages in thread
* [Slub cleanup5 1/3] slub: reduce differences between SMP and NUMA
2010-09-28 13:10 [Slub cleanup5 0/3] SLUB: Cleanups V5 Christoph Lameter
@ 2010-09-28 13:10 ` Christoph Lameter
2010-09-28 14:34 ` Pekka Enberg
2010-09-29 0:33 ` David Rientjes
2010-09-28 13:10 ` [Slub cleanup5 2/3] SLUB: Pass active and inactive redzone flags instead of boolean to debug functions Christoph Lameter
` (2 subsequent siblings)
3 siblings, 2 replies; 12+ messages in thread
From: Christoph Lameter @ 2010-09-28 13:10 UTC (permalink / raw)
To: Pekka Enberg; +Cc: linux-mm, David Rientjes
[-- Attachment #1: drop_smp --]
[-- Type: text/plain, Size: 4191 bytes --]
Reduce the #ifdefs and simplify bootstrap by making SMP and NUMA as much alike
as possible. This means that there will be an additional indirection to get to
the kmem_cache_node field under SMP.
Signed-off-by: Christoph Lameter <cl@linux.com>
---
include/linux/slub_def.h | 5 +----
mm/slub.c | 39 +--------------------------------------
2 files changed, 2 insertions(+), 42 deletions(-)
Index: linux-2.6/include/linux/slub_def.h
===================================================================
--- linux-2.6.orig/include/linux/slub_def.h 2010-09-28 07:54:31.000000000 -0500
+++ linux-2.6/include/linux/slub_def.h 2010-09-28 07:56:37.000000000 -0500
@@ -96,11 +96,8 @@ struct kmem_cache {
* Defragmentation by allocating from a remote node.
*/
int remote_node_defrag_ratio;
- struct kmem_cache_node *node[MAX_NUMNODES];
-#else
- /* Avoid an extra cache line for UP */
- struct kmem_cache_node local_node;
#endif
+ struct kmem_cache_node *node[MAX_NUMNODES];
};
/*
Index: linux-2.6/mm/slub.c
===================================================================
--- linux-2.6.orig/mm/slub.c 2010-09-28 07:54:33.000000000 -0500
+++ linux-2.6/mm/slub.c 2010-09-28 07:56:37.000000000 -0500
@@ -233,11 +233,7 @@ int slab_is_available(void)
static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node)
{
-#ifdef CONFIG_NUMA
return s->node[node];
-#else
- return &s->local_node;
-#endif
}
/* Verify that a pointer has an address that is valid within a slab page */
@@ -871,7 +867,7 @@ static inline void inc_slabs_node(struct
* dilemma by deferring the increment of the count during
* bootstrap (see early_kmem_cache_node_alloc).
*/
- if (!NUMA_BUILD || n) {
+ if (n) {
atomic_long_inc(&n->nr_slabs);
atomic_long_add(objects, &n->total_objects);
}
@@ -2112,7 +2108,6 @@ static inline int alloc_kmem_cache_cpus(
return s->cpu_slab != NULL;
}
-#ifdef CONFIG_NUMA
static struct kmem_cache *kmem_cache_node;
/*
@@ -2202,17 +2197,6 @@ static int init_kmem_cache_nodes(struct
}
return 1;
}
-#else
-static void free_kmem_cache_nodes(struct kmem_cache *s)
-{
-}
-
-static int init_kmem_cache_nodes(struct kmem_cache *s)
-{
- init_kmem_cache_node(&s->local_node, s);
- return 1;
-}
-#endif
static void set_min_partial(struct kmem_cache *s, unsigned long min)
{
@@ -3023,8 +3007,6 @@ void __init kmem_cache_init(void)
int caches = 0;
struct kmem_cache *temp_kmem_cache;
int order;
-
-#ifdef CONFIG_NUMA
struct kmem_cache *temp_kmem_cache_node;
unsigned long kmalloc_size;
@@ -3048,12 +3030,6 @@ void __init kmem_cache_init(void)
0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL);
hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI);
-#else
- /* Allocate a single kmem_cache from the page allocator */
- kmem_size = sizeof(struct kmem_cache);
- order = get_order(kmem_size);
- kmem_cache = (void *)__get_free_pages(GFP_NOWAIT, order);
-#endif
/* Able to allocate the per node structures */
slab_state = PARTIAL;
@@ -3064,7 +3040,6 @@ void __init kmem_cache_init(void)
kmem_cache = kmem_cache_alloc(kmem_cache, GFP_NOWAIT);
memcpy(kmem_cache, temp_kmem_cache, kmem_size);
-#ifdef CONFIG_NUMA
/*
* Allocate kmem_cache_node properly from the kmem_cache slab.
* kmem_cache_node is separately allocated so no need to
@@ -3078,18 +3053,6 @@ void __init kmem_cache_init(void)
kmem_cache_bootstrap_fixup(kmem_cache_node);
caches++;
-#else
- /*
- * kmem_cache has kmem_cache_node embedded and we moved it!
- * Update the list heads
- */
- INIT_LIST_HEAD(&kmem_cache->local_node.partial);
- list_splice(&temp_kmem_cache->local_node.partial, &kmem_cache->local_node.partial);
-#ifdef CONFIG_SLUB_DEBUG
- INIT_LIST_HEAD(&kmem_cache->local_node.full);
- list_splice(&temp_kmem_cache->local_node.full, &kmem_cache->local_node.full);
-#endif
-#endif
kmem_cache_bootstrap_fixup(kmem_cache);
caches++;
/* Free temporary boot structure */
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 12+ messages in thread
* [Slub cleanup5 2/3] SLUB: Pass active and inactive redzone flags instead of boolean to debug functions
2010-09-28 13:10 [Slub cleanup5 0/3] SLUB: Cleanups V5 Christoph Lameter
2010-09-28 13:10 ` [Slub cleanup5 1/3] slub: reduce differences between SMP and NUMA Christoph Lameter
@ 2010-09-28 13:10 ` Christoph Lameter
2010-09-29 0:38 ` David Rientjes
2010-09-28 13:10 ` [Slub cleanup5 3/3] slub: extract common code to remove objects from partial list without locking Christoph Lameter
2010-10-02 8:50 ` [Slub cleanup5 0/3] SLUB: Cleanups V5 Pekka Enberg
3 siblings, 1 reply; 12+ messages in thread
From: Christoph Lameter @ 2010-09-28 13:10 UTC (permalink / raw)
To: Pekka Enberg; +Cc: linux-mm, David Rientjes
[-- Attachment #1: change_debug --]
[-- Type: text/plain, Size: 5034 bytes --]
Pass the actual values used for inactive and active redzoning to the
functions that check the objects. Avoids a lot of the ? : things to
lookup the values in the functions.
Signed-off-by: Christoph Lameter <cl@linux.com>
---
mm/slub.c | 37 +++++++++++++++++--------------------
1 file changed, 17 insertions(+), 20 deletions(-)
Index: linux-2.6/mm/slub.c
===================================================================
--- linux-2.6.orig/mm/slub.c 2010-09-28 08:02:26.000000000 -0500
+++ linux-2.6/mm/slub.c 2010-09-28 08:04:04.000000000 -0500
@@ -490,7 +490,8 @@ static void slab_err(struct kmem_cache *
dump_stack();
}
-static void init_object(struct kmem_cache *s, void *object, int active)
+
+static void init_object(struct kmem_cache *s, void *object, u8 val)
{
u8 *p = object;
@@ -500,9 +501,7 @@ static void init_object(struct kmem_cach
}
if (s->flags & SLAB_RED_ZONE)
- memset(p + s->objsize,
- active ? SLUB_RED_ACTIVE : SLUB_RED_INACTIVE,
- s->inuse - s->objsize);
+ memset(p + s->objsize, val, s->inuse - s->objsize);
}
static u8 *check_bytes(u8 *start, unsigned int value, unsigned int bytes)
@@ -637,17 +636,14 @@ static int slab_pad_check(struct kmem_ca
}
static int check_object(struct kmem_cache *s, struct page *page,
- void *object, int active)
+ void *object, u8 val)
{
u8 *p = object;
u8 *endobject = object + s->objsize;
if (s->flags & SLAB_RED_ZONE) {
- unsigned int red =
- active ? SLUB_RED_ACTIVE : SLUB_RED_INACTIVE;
-
if (!check_bytes_and_report(s, page, object, "Redzone",
- endobject, red, s->inuse - s->objsize))
+ endobject, val, s->inuse - s->objsize))
return 0;
} else {
if ((s->flags & SLAB_POISON) && s->objsize < s->inuse) {
@@ -657,7 +653,7 @@ static int check_object(struct kmem_cach
}
if (s->flags & SLAB_POISON) {
- if (!active && (s->flags & __OBJECT_POISON) &&
+ if (val != SLUB_RED_ACTIVE && (s->flags & __OBJECT_POISON) &&
(!check_bytes_and_report(s, page, p, "Poison", p,
POISON_FREE, s->objsize - 1) ||
!check_bytes_and_report(s, page, p, "Poison",
@@ -669,7 +665,7 @@ static int check_object(struct kmem_cach
check_pad_bytes(s, page, p);
}
- if (!s->offset && active)
+ if (!s->offset && val == SLUB_RED_ACTIVE)
/*
* Object and freepointer overlap. Cannot check
* freepointer while object is allocated.
@@ -887,7 +883,7 @@ static void setup_object_debug(struct km
if (!(s->flags & (SLAB_STORE_USER|SLAB_RED_ZONE|__OBJECT_POISON)))
return;
- init_object(s, object, 0);
+ init_object(s, object, SLUB_RED_INACTIVE);
init_tracking(s, object);
}
@@ -907,14 +903,14 @@ static noinline int alloc_debug_processi
goto bad;
}
- if (!check_object(s, page, object, 0))
+ if (!check_object(s, page, object, SLUB_RED_INACTIVE))
goto bad;
/* Success perform special debug activities for allocs */
if (s->flags & SLAB_STORE_USER)
set_track(s, object, TRACK_ALLOC, addr);
trace(s, page, object, 1);
- init_object(s, object, 1);
+ init_object(s, object, SLUB_RED_ACTIVE);
return 1;
bad:
@@ -947,7 +943,7 @@ static noinline int free_debug_processin
goto fail;
}
- if (!check_object(s, page, object, 1))
+ if (!check_object(s, page, object, SLUB_RED_ACTIVE))
return 0;
if (unlikely(s != page->slab)) {
@@ -971,7 +967,7 @@ static noinline int free_debug_processin
if (s->flags & SLAB_STORE_USER)
set_track(s, object, TRACK_FREE, addr);
trace(s, page, object, 0);
- init_object(s, object, 0);
+ init_object(s, object, SLUB_RED_INACTIVE);
return 1;
fail:
@@ -1075,8 +1071,9 @@ static inline int free_debug_processing(
static inline int slab_pad_check(struct kmem_cache *s, struct page *page)
{ return 1; }
static inline int check_object(struct kmem_cache *s, struct page *page,
- void *object, int active) { return 1; }
-static inline void add_full(struct kmem_cache_node *n, struct page *page) {}
+ void *object, u8 val) { return 1; }
+static inline void add_full(struct kmem_cache *s,
+ struct kmem_cache_node *n, struct page *page) {}
static inline unsigned long kmem_cache_flags(unsigned long objsize,
unsigned long flags, const char *name,
void (*ctor)(void *))
@@ -1235,7 +1232,7 @@ static void __free_slab(struct kmem_cach
slab_pad_check(s, page);
for_each_object(p, s, page_address(page),
page->objects)
- check_object(s, page, p, 0);
+ check_object(s, page, p, SLUB_RED_INACTIVE);
}
kmemcheck_free_shadow(page, compound_order(page));
@@ -2143,7 +2140,7 @@ static void early_kmem_cache_node_alloc(
page->inuse++;
kmem_cache_node->node[node] = n;
#ifdef CONFIG_SLUB_DEBUG
- init_object(kmem_cache_node, n, 1);
+ init_object(kmem_cache_node, n, SLUB_RED_ACTIVE);
init_tracking(kmem_cache_node, n);
#endif
init_kmem_cache_node(n, kmem_cache_node);
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 12+ messages in thread
* [Slub cleanup5 3/3] slub: extract common code to remove objects from partial list without locking
2010-09-28 13:10 [Slub cleanup5 0/3] SLUB: Cleanups V5 Christoph Lameter
2010-09-28 13:10 ` [Slub cleanup5 1/3] slub: reduce differences between SMP and NUMA Christoph Lameter
2010-09-28 13:10 ` [Slub cleanup5 2/3] SLUB: Pass active and inactive redzone flags instead of boolean to debug functions Christoph Lameter
@ 2010-09-28 13:10 ` Christoph Lameter
2010-09-29 0:38 ` David Rientjes
2010-10-02 8:50 ` [Slub cleanup5 0/3] SLUB: Cleanups V5 Pekka Enberg
3 siblings, 1 reply; 12+ messages in thread
From: Christoph Lameter @ 2010-09-28 13:10 UTC (permalink / raw)
To: Pekka Enberg; +Cc: linux-mm, David Rientjes
[-- Attachment #1: cleanup_remove_partial --]
[-- Type: text/plain, Size: 2174 bytes --]
There are a couple of places where repeat the same statements when removing
a page from the partial list. Consolidate that into __remove_partial().
Signed-off-by: Christoph Lameter <cl@linux.com>
---
mm/slub.c | 19 +++++++++++--------
1 file changed, 11 insertions(+), 8 deletions(-)
Index: linux-2.6/mm/slub.c
===================================================================
--- linux-2.6.orig/mm/slub.c 2010-09-28 08:05:49.000000000 -0500
+++ linux-2.6/mm/slub.c 2010-09-28 08:05:50.000000000 -0500
@@ -1312,13 +1312,19 @@ static void add_partial(struct kmem_cach
spin_unlock(&n->list_lock);
}
+static inline void __remove_partial(struct kmem_cache_node *n,
+ struct page *page)
+{
+ list_del(&page->lru);
+ n->nr_partial--;
+}
+
static void remove_partial(struct kmem_cache *s, struct page *page)
{
struct kmem_cache_node *n = get_node(s, page_to_nid(page));
spin_lock(&n->list_lock);
- list_del(&page->lru);
- n->nr_partial--;
+ __remove_partial(n, page);
spin_unlock(&n->list_lock);
}
@@ -1331,8 +1337,7 @@ static inline int lock_and_freeze_slab(s
struct page *page)
{
if (slab_trylock(page)) {
- list_del(&page->lru);
- n->nr_partial--;
+ __remove_partial(n, page);
__SetPageSlubFrozen(page);
return 1;
}
@@ -2464,9 +2469,8 @@ static void free_partial(struct kmem_cac
spin_lock_irqsave(&n->list_lock, flags);
list_for_each_entry_safe(page, h, &n->partial, lru) {
if (!page->inuse) {
- list_del(&page->lru);
+ __remove_partial(n, page);
discard_slab(s, page);
- n->nr_partial--;
} else {
list_slab_objects(s, page,
"Objects remaining on kmem_cache_close()");
@@ -2824,8 +2828,7 @@ int kmem_cache_shrink(struct kmem_cache
* may have freed the last object and be
* waiting to release the slab.
*/
- list_del(&page->lru);
- n->nr_partial--;
+ __remove_partial(n, page);
slab_unlock(page);
discard_slab(s, page);
} else {
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 12+ messages in thread
* Re: [Slub cleanup5 1/3] slub: reduce differences between SMP and NUMA
2010-09-28 13:10 ` [Slub cleanup5 1/3] slub: reduce differences between SMP and NUMA Christoph Lameter
@ 2010-09-28 14:34 ` Pekka Enberg
2010-09-28 14:43 ` Christoph Lameter
2010-09-29 0:33 ` David Rientjes
1 sibling, 1 reply; 12+ messages in thread
From: Pekka Enberg @ 2010-09-28 14:34 UTC (permalink / raw)
To: Christoph Lameter; +Cc: Pekka Enberg, linux-mm, David Rientjes
On Tue, Sep 28, 2010 at 4:10 PM, Christoph Lameter <cl@linux.com> wrote:
> Reduce the #ifdefs and simplify bootstrap by making SMP and NUMA as much alike
> as possible. This means that there will be an additional indirection to get to
> the kmem_cache_node field under SMP.
>
> Signed-off-by: Christoph Lameter <cl@linux.com>
I'm slightly confused. What does SMP have to do with this? Isn't this
simply NUMA vs UMA thing regardless whether its UP or SMP?
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 12+ messages in thread
* Re: [Slub cleanup5 1/3] slub: reduce differences between SMP and NUMA
2010-09-28 14:34 ` Pekka Enberg
@ 2010-09-28 14:43 ` Christoph Lameter
0 siblings, 0 replies; 12+ messages in thread
From: Christoph Lameter @ 2010-09-28 14:43 UTC (permalink / raw)
To: Pekka Enberg; +Cc: Pekka Enberg, linux-mm, David Rientjes
On Tue, 28 Sep 2010, Pekka Enberg wrote:
> On Tue, Sep 28, 2010 at 4:10 PM, Christoph Lameter <cl@linux.com> wrote:
> > Reduce the #ifdefs and simplify bootstrap by making SMP and NUMA as much alike
> > as possible. This means that there will be an additional indirection to get to
> > the kmem_cache_node field under SMP.
> >
> > Signed-off-by: Christoph Lameter <cl@linux.com>
>
> I'm slightly confused. What does SMP have to do with this? Isn't this
> simply NUMA vs UMA thing regardless whether its UP or SMP?
Right. But then UMA / UP is a rare config that I have only ever seen
working on IA64.
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 12+ messages in thread
* Re: [Slub cleanup5 1/3] slub: reduce differences between SMP and NUMA
2010-09-28 13:10 ` [Slub cleanup5 1/3] slub: reduce differences between SMP and NUMA Christoph Lameter
2010-09-28 14:34 ` Pekka Enberg
@ 2010-09-29 0:33 ` David Rientjes
1 sibling, 0 replies; 12+ messages in thread
From: David Rientjes @ 2010-09-29 0:33 UTC (permalink / raw)
To: Christoph Lameter; +Cc: Pekka Enberg, linux-mm
On Tue, 28 Sep 2010, Christoph Lameter wrote:
> Reduce the #ifdefs and simplify bootstrap by making SMP and NUMA as much alike
> as possible. This means that there will be an additional indirection to get to
> the kmem_cache_node field under SMP.
>
> Signed-off-by: Christoph Lameter <cl@linux.com>
Acked-by: David Rientjes <rientjes@google.com>
Nice cleanup!
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 12+ messages in thread
* Re: [Slub cleanup5 2/3] SLUB: Pass active and inactive redzone flags instead of boolean to debug functions
2010-09-28 13:10 ` [Slub cleanup5 2/3] SLUB: Pass active and inactive redzone flags instead of boolean to debug functions Christoph Lameter
@ 2010-09-29 0:38 ` David Rientjes
2010-09-29 12:15 ` Christoph Lameter
0 siblings, 1 reply; 12+ messages in thread
From: David Rientjes @ 2010-09-29 0:38 UTC (permalink / raw)
To: Christoph Lameter; +Cc: Pekka Enberg, linux-mm
On Tue, 28 Sep 2010, Christoph Lameter wrote:
> @@ -1075,8 +1071,9 @@ static inline int free_debug_processing(
> static inline int slab_pad_check(struct kmem_cache *s, struct page *page)
> { return 1; }
> static inline int check_object(struct kmem_cache *s, struct page *page,
> - void *object, int active) { return 1; }
> -static inline void add_full(struct kmem_cache_node *n, struct page *page) {}
> + void *object, u8 val) { return 1; }
> +static inline void add_full(struct kmem_cache *s,
> + struct kmem_cache_node *n, struct page *page) {}
> static inline unsigned long kmem_cache_flags(unsigned long objsize,
> unsigned long flags, const char *name,
> void (*ctor)(void *))
Looks like add_full() got changed there for CONFIG_SLUB_DEBUG=n
unintentionally.
I'm wondering if we should make that option configurable regardless of
CONFIG_EMBEDDED, it's a large savings if you're never going to be doing
any debugging on Pekka's for-next:
text data bss dec hex filename
25817 1473 288 27578 6bba slub.o.debug
10742 232 256 11230 2bde slub.o.nodebug
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 12+ messages in thread
* Re: [Slub cleanup5 3/3] slub: extract common code to remove objects from partial list without locking
2010-09-28 13:10 ` [Slub cleanup5 3/3] slub: extract common code to remove objects from partial list without locking Christoph Lameter
@ 2010-09-29 0:38 ` David Rientjes
0 siblings, 0 replies; 12+ messages in thread
From: David Rientjes @ 2010-09-29 0:38 UTC (permalink / raw)
To: Christoph Lameter; +Cc: Pekka Enberg, linux-mm
On Tue, 28 Sep 2010, Christoph Lameter wrote:
> There are a couple of places where repeat the same statements when removing
> a page from the partial list. Consolidate that into __remove_partial().
>
> Signed-off-by: Christoph Lameter <cl@linux.com>
Acked-by: David Rientjes <rientjes@google.com>
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 12+ messages in thread
* Re: [Slub cleanup5 2/3] SLUB: Pass active and inactive redzone flags instead of boolean to debug functions
2010-09-29 0:38 ` David Rientjes
@ 2010-09-29 12:15 ` Christoph Lameter
2010-09-29 20:01 ` David Rientjes
0 siblings, 1 reply; 12+ messages in thread
From: Christoph Lameter @ 2010-09-29 12:15 UTC (permalink / raw)
To: David Rientjes; +Cc: Pekka Enberg, linux-mm
On Tue, 28 Sep 2010, David Rientjes wrote:
> I'm wondering if we should make that option configurable regardless of
> CONFIG_EMBEDDED, it's a large savings if you're never going to be doing
> any debugging on Pekka's for-next:
>
> text data bss dec hex filename
> 25817 1473 288 27578 6bba slub.o.debug
> 10742 232 256 11230 2bde slub.o.nodebug
We know. On the other hand it is essential to have that capability in
enterprise kernels so that you can just reboot with full debugging and
then get a detailed report on what went wrong. Slab diagnostics and
resiliency are critical for other kernel components.
Updated patch:
Subject: SLUB: Pass active and inactive redzone flags instead of boolean to debug functions
Pass the actual values used for inactive and active redzoning to the
functions that check the objects. Avoids a lot of the ? : things to
lookup the values in the functions.
Signed-off-by: Christoph Lameter <cl@linux.com>
---
mm/slub.c | 33 ++++++++++++++-------------------
1 file changed, 14 insertions(+), 19 deletions(-)
Index: linux-2.6/mm/slub.c
===================================================================
--- linux-2.6.orig/mm/slub.c 2010-09-29 07:11:38.000000000 -0500
+++ linux-2.6/mm/slub.c 2010-09-29 07:12:26.000000000 -0500
@@ -490,7 +490,7 @@ static void slab_err(struct kmem_cache *
dump_stack();
}
-static void init_object(struct kmem_cache *s, void *object, int active)
+static void init_object(struct kmem_cache *s, void *object, u8 val)
{
u8 *p = object;
@@ -500,9 +500,7 @@ static void init_object(struct kmem_cach
}
if (s->flags & SLAB_RED_ZONE)
- memset(p + s->objsize,
- active ? SLUB_RED_ACTIVE : SLUB_RED_INACTIVE,
- s->inuse - s->objsize);
+ memset(p + s->objsize, val, s->inuse - s->objsize);
}
static u8 *check_bytes(u8 *start, unsigned int value, unsigned int bytes)
@@ -637,17 +635,14 @@ static int slab_pad_check(struct kmem_ca
}
static int check_object(struct kmem_cache *s, struct page *page,
- void *object, int active)
+ void *object, u8 val)
{
u8 *p = object;
u8 *endobject = object + s->objsize;
if (s->flags & SLAB_RED_ZONE) {
- unsigned int red =
- active ? SLUB_RED_ACTIVE : SLUB_RED_INACTIVE;
-
if (!check_bytes_and_report(s, page, object, "Redzone",
- endobject, red, s->inuse - s->objsize))
+ endobject, val, s->inuse - s->objsize))
return 0;
} else {
if ((s->flags & SLAB_POISON) && s->objsize < s->inuse) {
@@ -657,7 +652,7 @@ static int check_object(struct kmem_cach
}
if (s->flags & SLAB_POISON) {
- if (!active && (s->flags & __OBJECT_POISON) &&
+ if (val != SLUB_RED_ACTIVE && (s->flags & __OBJECT_POISON) &&
(!check_bytes_and_report(s, page, p, "Poison", p,
POISON_FREE, s->objsize - 1) ||
!check_bytes_and_report(s, page, p, "Poison",
@@ -669,7 +664,7 @@ static int check_object(struct kmem_cach
check_pad_bytes(s, page, p);
}
- if (!s->offset && active)
+ if (!s->offset && val == SLUB_RED_ACTIVE)
/*
* Object and freepointer overlap. Cannot check
* freepointer while object is allocated.
@@ -887,7 +882,7 @@ static void setup_object_debug(struct km
if (!(s->flags & (SLAB_STORE_USER|SLAB_RED_ZONE|__OBJECT_POISON)))
return;
- init_object(s, object, 0);
+ init_object(s, object, SLUB_RED_INACTIVE);
init_tracking(s, object);
}
@@ -907,14 +902,14 @@ static noinline int alloc_debug_processi
goto bad;
}
- if (!check_object(s, page, object, 0))
+ if (!check_object(s, page, object, SLUB_RED_INACTIVE))
goto bad;
/* Success perform special debug activities for allocs */
if (s->flags & SLAB_STORE_USER)
set_track(s, object, TRACK_ALLOC, addr);
trace(s, page, object, 1);
- init_object(s, object, 1);
+ init_object(s, object, SLUB_RED_ACTIVE);
return 1;
bad:
@@ -947,7 +942,7 @@ static noinline int free_debug_processin
goto fail;
}
- if (!check_object(s, page, object, 1))
+ if (!check_object(s, page, object, SLUB_RED_ACTIVE))
return 0;
if (unlikely(s != page->slab)) {
@@ -971,7 +966,7 @@ static noinline int free_debug_processin
if (s->flags & SLAB_STORE_USER)
set_track(s, object, TRACK_FREE, addr);
trace(s, page, object, 0);
- init_object(s, object, 0);
+ init_object(s, object, SLUB_RED_INACTIVE);
return 1;
fail:
@@ -1075,7 +1070,7 @@ static inline int free_debug_processing(
static inline int slab_pad_check(struct kmem_cache *s, struct page *page)
{ return 1; }
static inline int check_object(struct kmem_cache *s, struct page *page,
- void *object, int active) { return 1; }
+ void *object, u8 val) { return 1; }
static inline void add_full(struct kmem_cache_node *n, struct page *page) {}
static inline unsigned long kmem_cache_flags(unsigned long objsize,
unsigned long flags, const char *name,
@@ -1235,7 +1230,7 @@ static void __free_slab(struct kmem_cach
slab_pad_check(s, page);
for_each_object(p, s, page_address(page),
page->objects)
- check_object(s, page, p, 0);
+ check_object(s, page, p, SLUB_RED_INACTIVE);
}
kmemcheck_free_shadow(page, compound_order(page));
@@ -2143,7 +2138,7 @@ static void early_kmem_cache_node_alloc(
page->inuse++;
kmem_cache_node->node[node] = n;
#ifdef CONFIG_SLUB_DEBUG
- init_object(kmem_cache_node, n, 1);
+ init_object(kmem_cache_node, n, SLUB_RED_ACTIVE);
init_tracking(kmem_cache_node, n);
#endif
init_kmem_cache_node(n, kmem_cache_node);
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 12+ messages in thread
* Re: [Slub cleanup5 2/3] SLUB: Pass active and inactive redzone flags instead of boolean to debug functions
2010-09-29 12:15 ` Christoph Lameter
@ 2010-09-29 20:01 ` David Rientjes
0 siblings, 0 replies; 12+ messages in thread
From: David Rientjes @ 2010-09-29 20:01 UTC (permalink / raw)
To: Christoph Lameter; +Cc: Pekka Enberg, linux-mm
On Wed, 29 Sep 2010, Christoph Lameter wrote:
> Updated patch:
>
> Subject: SLUB: Pass active and inactive redzone flags instead of boolean to debug functions
>
> Pass the actual values used for inactive and active redzoning to the
> functions that check the objects. Avoids a lot of the ? : things to
> lookup the values in the functions.
>
> Signed-off-by: Christoph Lameter <cl@linux.com>
Acked-by: David Rientjes <rientjes@google.com>
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 12+ messages in thread
* Re: [Slub cleanup5 0/3] SLUB: Cleanups V5
2010-09-28 13:10 [Slub cleanup5 0/3] SLUB: Cleanups V5 Christoph Lameter
` (2 preceding siblings ...)
2010-09-28 13:10 ` [Slub cleanup5 3/3] slub: extract common code to remove objects from partial list without locking Christoph Lameter
@ 2010-10-02 8:50 ` Pekka Enberg
3 siblings, 0 replies; 12+ messages in thread
From: Pekka Enberg @ 2010-10-02 8:50 UTC (permalink / raw)
To: Christoph Lameter; +Cc: linux-mm, David Rientjes
On 28.9.2010 16.10, Christoph Lameter wrote:
> A couple of more cleanups (patches against Pekka's tree for next rebased to todays upstream)
>
> 1 Avoid #ifdefs by making data structures similar under SMP and NUMA
>
> 2 Avoid ? : by passing the redzone markers directly to the functions checking objects
>
> 3 Extract common code for removal of pages from partial list into a single function
The series has been applied. Thanks!
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 12+ messages in thread
end of thread, other threads:[~2010-10-02 8:50 UTC | newest]
Thread overview: 12+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2010-09-28 13:10 [Slub cleanup5 0/3] SLUB: Cleanups V5 Christoph Lameter
2010-09-28 13:10 ` [Slub cleanup5 1/3] slub: reduce differences between SMP and NUMA Christoph Lameter
2010-09-28 14:34 ` Pekka Enberg
2010-09-28 14:43 ` Christoph Lameter
2010-09-29 0:33 ` David Rientjes
2010-09-28 13:10 ` [Slub cleanup5 2/3] SLUB: Pass active and inactive redzone flags instead of boolean to debug functions Christoph Lameter
2010-09-29 0:38 ` David Rientjes
2010-09-29 12:15 ` Christoph Lameter
2010-09-29 20:01 ` David Rientjes
2010-09-28 13:10 ` [Slub cleanup5 3/3] slub: extract common code to remove objects from partial list without locking Christoph Lameter
2010-09-29 0:38 ` David Rientjes
2010-10-02 8:50 ` [Slub cleanup5 0/3] SLUB: Cleanups V5 Pekka Enberg
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).