* Common 01/22] [slob] Define page struct fields used in mm_types.h
2012-05-23 20:34 Common 00/22] Sl[auo]b: Common functionality V3 Christoph Lameter
@ 2012-05-23 20:34 ` Christoph Lameter
2012-05-31 21:14 ` David Rientjes
2012-05-23 20:34 ` Common 03/22] [slob] Remove various small accessors Christoph Lameter
` (18 subsequent siblings)
19 siblings, 1 reply; 32+ messages in thread
From: Christoph Lameter @ 2012-05-23 20:34 UTC (permalink / raw)
To: Pekka Enberg
Cc: linux-mm, David Rientjes, Matt Mackall, Glauber Costa,
Joonsoo Kim
[-- Attachment #1: slob_use_page_struct --]
[-- Type: text/plain, Size: 8683 bytes --]
Define the fields used by slob in mm_types.h and use struct page instead
of struct slob_page in slob. This cleans up numerous of typecasts in slob.c and
makes readers aware of slob's use of page struct fields.
[Also cleans up some bitrot in slob.c. The page struct field layout
in slob.c is an old layout and does not match the one in mm_types.h]
Reviewed-by: Glauber Costa <gommer@parallels.com>
Signed-off-by: Christoph Lameter <cl@linux.com>
---
include/linux/mm_types.h | 7 ++-
mm/slob.c | 95 ++++++++++++++++++-----------------------------
2 files changed, 42 insertions(+), 60 deletions(-)
Index: linux-2.6/mm/slob.c
===================================================================
--- linux-2.6.orig/mm/slob.c 2012-05-22 06:27:51.343836706 -0500
+++ linux-2.6/mm/slob.c 2012-05-22 06:32:17.727831192 -0500
@@ -92,33 +92,12 @@ struct slob_block {
typedef struct slob_block slob_t;
/*
- * We use struct page fields to manage some slob allocation aspects,
- * however to avoid the horrible mess in include/linux/mm_types.h, we'll
- * just define our own struct page type variant here.
- */
-struct slob_page {
- union {
- struct {
- unsigned long flags; /* mandatory */
- atomic_t _count; /* mandatory */
- slobidx_t units; /* free units left in page */
- unsigned long pad[2];
- slob_t *free; /* first free slob_t in page */
- struct list_head list; /* linked list of free pages */
- };
- struct page page;
- };
-};
-static inline void struct_slob_page_wrong_size(void)
-{ BUILD_BUG_ON(sizeof(struct slob_page) != sizeof(struct page)); }
-
-/*
* free_slob_page: call before a slob_page is returned to the page allocator.
*/
-static inline void free_slob_page(struct slob_page *sp)
+static inline void free_slob_page(struct page *sp)
{
- reset_page_mapcount(&sp->page);
- sp->page.mapping = NULL;
+ reset_page_mapcount(sp);
+ sp->mapping = NULL;
}
/*
@@ -133,44 +112,44 @@ static LIST_HEAD(free_slob_large);
/*
* is_slob_page: True for all slob pages (false for bigblock pages)
*/
-static inline int is_slob_page(struct slob_page *sp)
+static inline int is_slob_page(struct page *sp)
{
- return PageSlab((struct page *)sp);
+ return PageSlab(sp);
}
-static inline void set_slob_page(struct slob_page *sp)
+static inline void set_slob_page(struct page *sp)
{
- __SetPageSlab((struct page *)sp);
+ __SetPageSlab(sp);
}
-static inline void clear_slob_page(struct slob_page *sp)
+static inline void clear_slob_page(struct page *sp)
{
- __ClearPageSlab((struct page *)sp);
+ __ClearPageSlab(sp);
}
-static inline struct slob_page *slob_page(const void *addr)
+static inline struct page *slob_page(const void *addr)
{
- return (struct slob_page *)virt_to_page(addr);
+ return virt_to_page(addr);
}
/*
* slob_page_free: true for pages on free_slob_pages list.
*/
-static inline int slob_page_free(struct slob_page *sp)
+static inline int slob_page_free(struct page *sp)
{
- return PageSlobFree((struct page *)sp);
+ return PageSlobFree(sp);
}
-static void set_slob_page_free(struct slob_page *sp, struct list_head *list)
+static void set_slob_page_free(struct page *sp, struct list_head *list)
{
list_add(&sp->list, list);
- __SetPageSlobFree((struct page *)sp);
+ __SetPageSlobFree(sp);
}
-static inline void clear_slob_page_free(struct slob_page *sp)
+static inline void clear_slob_page_free(struct page *sp)
{
list_del(&sp->list);
- __ClearPageSlobFree((struct page *)sp);
+ __ClearPageSlobFree(sp);
}
#define SLOB_UNIT sizeof(slob_t)
@@ -267,12 +246,12 @@ static void slob_free_pages(void *b, int
/*
* Allocate a slob block within a given slob_page sp.
*/
-static void *slob_page_alloc(struct slob_page *sp, size_t size, int align)
+static void *slob_page_alloc(struct page *sp, size_t size, int align)
{
slob_t *prev, *cur, *aligned = NULL;
int delta = 0, units = SLOB_UNITS(size);
- for (prev = NULL, cur = sp->free; ; prev = cur, cur = slob_next(cur)) {
+ for (prev = NULL, cur = sp->freelist; ; prev = cur, cur = slob_next(cur)) {
slobidx_t avail = slob_units(cur);
if (align) {
@@ -296,12 +275,12 @@ static void *slob_page_alloc(struct slob
if (prev)
set_slob(prev, slob_units(prev), next);
else
- sp->free = next;
+ sp->freelist = next;
} else { /* fragment */
if (prev)
set_slob(prev, slob_units(prev), cur + units);
else
- sp->free = cur + units;
+ sp->freelist = cur + units;
set_slob(cur + units, avail - units, next);
}
@@ -320,7 +299,7 @@ static void *slob_page_alloc(struct slob
*/
static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
{
- struct slob_page *sp;
+ struct page *sp;
struct list_head *prev;
struct list_head *slob_list;
slob_t *b = NULL;
@@ -341,7 +320,7 @@ static void *slob_alloc(size_t size, gfp
* If there's a node specification, search for a partial
* page with a matching node id in the freelist.
*/
- if (node != -1 && page_to_nid(&sp->page) != node)
+ if (node != -1 && page_to_nid(sp) != node)
continue;
#endif
/* Enough room on this page? */
@@ -374,7 +353,7 @@ static void *slob_alloc(size_t size, gfp
spin_lock_irqsave(&slob_lock, flags);
sp->units = SLOB_UNITS(PAGE_SIZE);
- sp->free = b;
+ sp->freelist = b;
INIT_LIST_HEAD(&sp->list);
set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
set_slob_page_free(sp, slob_list);
@@ -392,7 +371,7 @@ static void *slob_alloc(size_t size, gfp
*/
static void slob_free(void *block, int size)
{
- struct slob_page *sp;
+ struct page *sp;
slob_t *prev, *next, *b = (slob_t *)block;
slobidx_t units;
unsigned long flags;
@@ -421,7 +400,7 @@ static void slob_free(void *block, int s
if (!slob_page_free(sp)) {
/* This slob page is about to become partially free. Easy! */
sp->units = units;
- sp->free = b;
+ sp->freelist = b;
set_slob(b, units,
(void *)((unsigned long)(b +
SLOB_UNITS(PAGE_SIZE)) & PAGE_MASK));
@@ -441,15 +420,15 @@ static void slob_free(void *block, int s
*/
sp->units += units;
- if (b < sp->free) {
- if (b + units == sp->free) {
- units += slob_units(sp->free);
- sp->free = slob_next(sp->free);
+ if (b < (slob_t *)sp->freelist) {
+ if (b + units == sp->freelist) {
+ units += slob_units(sp->freelist);
+ sp->freelist = slob_next(sp->freelist);
}
- set_slob(b, units, sp->free);
- sp->free = b;
+ set_slob(b, units, sp->freelist);
+ sp->freelist = b;
} else {
- prev = sp->free;
+ prev = sp->freelist;
next = slob_next(prev);
while (b > next) {
prev = next;
@@ -522,7 +501,7 @@ EXPORT_SYMBOL(__kmalloc_node);
void kfree(const void *block)
{
- struct slob_page *sp;
+ struct page *sp;
trace_kfree(_RET_IP_, block);
@@ -536,14 +515,14 @@ void kfree(const void *block)
unsigned int *m = (unsigned int *)(block - align);
slob_free(m, *m + align);
} else
- put_page(&sp->page);
+ put_page(sp);
}
EXPORT_SYMBOL(kfree);
/* can't use ksize for kmem_cache_alloc memory, only kmalloc */
size_t ksize(const void *block)
{
- struct slob_page *sp;
+ struct page *sp;
BUG_ON(!block);
if (unlikely(block == ZERO_SIZE_PTR))
@@ -555,7 +534,7 @@ size_t ksize(const void *block)
unsigned int *m = (unsigned int *)(block - align);
return SLOB_UNITS(*m) * SLOB_UNIT;
} else
- return sp->page.private;
+ return sp->private;
}
EXPORT_SYMBOL(ksize);
Index: linux-2.6/include/linux/mm_types.h
===================================================================
--- linux-2.6.orig/include/linux/mm_types.h 2012-05-22 06:27:51.335836712 -0500
+++ linux-2.6/include/linux/mm_types.h 2012-05-22 06:34:39.255828250 -0500
@@ -52,7 +52,7 @@ struct page {
struct {
union {
pgoff_t index; /* Our offset within mapping. */
- void *freelist; /* slub first free object */
+ void *freelist; /* slub/slob first free object */
};
union {
@@ -80,11 +80,12 @@ struct page {
*/
atomic_t _mapcount;
- struct {
+ struct { /* SLUB */
unsigned inuse:16;
unsigned objects:15;
unsigned frozen:1;
};
+ int units; /* SLOB */
};
atomic_t _count; /* Usage count, see below. */
};
@@ -106,6 +107,8 @@ struct page {
short int pobjects;
#endif
};
+
+ struct list_head list; /* slobs list of pages */
};
/* Remainder is not double word aligned */
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Fight unfair telecom internet charges in Canada: sign http://stopthemeter.ca/
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 32+ messages in thread
* Re: Common 01/22] [slob] Define page struct fields used in mm_types.h
2012-05-23 20:34 ` Common 01/22] [slob] Define page struct fields used in mm_types.h Christoph Lameter
@ 2012-05-31 21:14 ` David Rientjes
2012-06-01 13:30 ` JoonSoo Kim
0 siblings, 1 reply; 32+ messages in thread
From: David Rientjes @ 2012-05-31 21:14 UTC (permalink / raw)
To: Christoph Lameter
Cc: Pekka Enberg, linux-mm, Matt Mackall, Glauber Costa, Joonsoo Kim
On Wed, 23 May 2012, Christoph Lameter wrote:
> Define the fields used by slob in mm_types.h and use struct page instead
> of struct slob_page in slob. This cleans up numerous of typecasts in slob.c and
> makes readers aware of slob's use of page struct fields.
>
> [Also cleans up some bitrot in slob.c. The page struct field layout
> in slob.c is an old layout and does not match the one in mm_types.h]
>
> Reviewed-by: Glauber Costa <gommer@parallels.com>
> Signed-off-by: Christoph Lameter <cl@linux.com>
Acked-by: David Rientjes <rientjes@google.com>
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Fight unfair telecom internet charges in Canada: sign http://stopthemeter.ca/
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 32+ messages in thread
* Re: Common 01/22] [slob] Define page struct fields used in mm_types.h
2012-05-31 21:14 ` David Rientjes
@ 2012-06-01 13:30 ` JoonSoo Kim
0 siblings, 0 replies; 32+ messages in thread
From: JoonSoo Kim @ 2012-06-01 13:30 UTC (permalink / raw)
To: David Rientjes
Cc: Christoph Lameter, Pekka Enberg, linux-mm, Matt Mackall,
Glauber Costa
2012/6/1 David Rientjes <rientjes@google.com>:
> On Wed, 23 May 2012, Christoph Lameter wrote:
>
>> Define the fields used by slob in mm_types.h and use struct page instead
>> of struct slob_page in slob. This cleans up numerous of typecasts in slob.c and
>> makes readers aware of slob's use of page struct fields.
>>
>> [Also cleans up some bitrot in slob.c. The page struct field layout
>> in slob.c is an old layout and does not match the one in mm_types.h]
>>
>> Reviewed-by: Glauber Costa <gommer@parallels.com>
>> Signed-off-by: Christoph Lameter <cl@linux.com>
>
> Acked-by: David Rientjes <rientjes@google.com>
Reviewed-by: Joonsoo Kim <js1304@gmail.com>
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Fight unfair telecom internet charges in Canada: sign http://stopthemeter.ca/
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 32+ messages in thread
* Common 03/22] [slob] Remove various small accessors
2012-05-23 20:34 Common 00/22] Sl[auo]b: Common functionality V3 Christoph Lameter
2012-05-23 20:34 ` Common 01/22] [slob] Define page struct fields used in mm_types.h Christoph Lameter
@ 2012-05-23 20:34 ` Christoph Lameter
2012-05-31 21:19 ` David Rientjes
2012-05-23 20:34 ` Common 05/22] [slab] Remove some accessors Christoph Lameter
` (17 subsequent siblings)
19 siblings, 1 reply; 32+ messages in thread
From: Christoph Lameter @ 2012-05-23 20:34 UTC (permalink / raw)
To: Pekka Enberg
Cc: linux-mm, David Rientjes, Matt Mackall, Glauber Costa,
Joonsoo Kim
[-- Attachment #1: slob_inline --]
[-- Type: text/plain, Size: 3252 bytes --]
Those have become so simple that they are no longer needed.
signed-off-by: Christoph Lameter <cl@linux.com>
---
mm/slob.c | 49 +++++++++----------------------------------------
1 file changed, 9 insertions(+), 40 deletions(-)
Index: linux-2.6/mm/slob.c
===================================================================
--- linux-2.6.orig/mm/slob.c 2012-05-22 09:05:55.024463914 -0500
+++ linux-2.6/mm/slob.c 2012-05-22 09:10:01.944458789 -0500
@@ -92,14 +92,6 @@ struct slob_block {
typedef struct slob_block slob_t;
/*
- * free_slob_page: call before a slob_page is returned to the page allocator.
- */
-static inline void free_slob_page(struct page *sp)
-{
- reset_page_mapcount(sp);
-}
-
-/*
* All partially free slob pages go on these lists.
*/
#define SLOB_BREAK1 256
@@ -109,29 +101,6 @@ static LIST_HEAD(free_slob_medium);
static LIST_HEAD(free_slob_large);
/*
- * is_slob_page: True for all slob pages (false for bigblock pages)
- */
-static inline int is_slob_page(struct page *sp)
-{
- return PageSlab(sp);
-}
-
-static inline void set_slob_page(struct page *sp)
-{
- __SetPageSlab(sp);
-}
-
-static inline void clear_slob_page(struct page *sp)
-{
- __ClearPageSlab(sp);
-}
-
-static inline struct page *slob_page(const void *addr)
-{
- return virt_to_page(addr);
-}
-
-/*
* slob_page_free: true for pages on free_slob_pages list.
*/
static inline int slob_page_free(struct page *sp)
@@ -347,8 +316,8 @@ static void *slob_alloc(size_t size, gfp
b = slob_new_pages(gfp & ~__GFP_ZERO, 0, node);
if (!b)
return NULL;
- sp = slob_page(b);
- set_slob_page(sp);
+ sp = virt_to_page(b);
+ __SetPageSlab(sp);
spin_lock_irqsave(&slob_lock, flags);
sp->units = SLOB_UNITS(PAGE_SIZE);
@@ -380,7 +349,7 @@ static void slob_free(void *block, int s
return;
BUG_ON(!size);
- sp = slob_page(block);
+ sp = virt_to_page(block);
units = SLOB_UNITS(size);
spin_lock_irqsave(&slob_lock, flags);
@@ -390,8 +359,8 @@ static void slob_free(void *block, int s
if (slob_page_free(sp))
clear_slob_page_free(sp);
spin_unlock_irqrestore(&slob_lock, flags);
- clear_slob_page(sp);
- free_slob_page(sp);
+ __ClearPageSlab(sp);
+ reset_page_mapcount(sp);
slob_free_pages(b, 0);
return;
}
@@ -508,8 +477,8 @@ void kfree(const void *block)
return;
kmemleak_free(block);
- sp = slob_page(block);
- if (is_slob_page(sp)) {
+ sp = virt_to_page(block);
+ if (PageSlab(sp)) {
int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
unsigned int *m = (unsigned int *)(block - align);
slob_free(m, *m + align);
@@ -527,8 +496,8 @@ size_t ksize(const void *block)
if (unlikely(block == ZERO_SIZE_PTR))
return 0;
- sp = slob_page(block);
- if (is_slob_page(sp)) {
+ sp = virt_to_page(block);
+ if (PageSlab(sp)) {
int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
unsigned int *m = (unsigned int *)(block - align);
return SLOB_UNITS(*m) * SLOB_UNIT;
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Fight unfair telecom internet charges in Canada: sign http://stopthemeter.ca/
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 32+ messages in thread
* Re: Common 03/22] [slob] Remove various small accessors
2012-05-23 20:34 ` Common 03/22] [slob] Remove various small accessors Christoph Lameter
@ 2012-05-31 21:19 ` David Rientjes
2012-06-01 13:29 ` JoonSoo Kim
0 siblings, 1 reply; 32+ messages in thread
From: David Rientjes @ 2012-05-31 21:19 UTC (permalink / raw)
To: Christoph Lameter
Cc: Pekka Enberg, linux-mm, Matt Mackall, Glauber Costa, Joonsoo Kim
On Wed, 23 May 2012, Christoph Lameter wrote:
> Those have become so simple that they are no longer needed.
>
> signed-off-by: Christoph Lameter <cl@linux.com>
Acked-by: David Rientjes <rientjes@google.com>
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Fight unfair telecom internet charges in Canada: sign http://stopthemeter.ca/
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 32+ messages in thread
* Re: Common 03/22] [slob] Remove various small accessors
2012-05-31 21:19 ` David Rientjes
@ 2012-06-01 13:29 ` JoonSoo Kim
0 siblings, 0 replies; 32+ messages in thread
From: JoonSoo Kim @ 2012-06-01 13:29 UTC (permalink / raw)
To: David Rientjes
Cc: Christoph Lameter, Pekka Enberg, linux-mm, Matt Mackall,
Glauber Costa
2012/6/1 David Rientjes <rientjes@google.com>:
> On Wed, 23 May 2012, Christoph Lameter wrote:
>
>> Those have become so simple that they are no longer needed.
>>
>> signed-off-by: Christoph Lameter <cl@linux.com>
>
> Acked-by: David Rientjes <rientjes@google.com>
Reviewed-by: Joonsoo Kim <js1304@gmail.com>
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Fight unfair telecom internet charges in Canada: sign http://stopthemeter.ca/
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 32+ messages in thread
* Common 05/22] [slab] Remove some accessors
2012-05-23 20:34 Common 00/22] Sl[auo]b: Common functionality V3 Christoph Lameter
2012-05-23 20:34 ` Common 01/22] [slob] Define page struct fields used in mm_types.h Christoph Lameter
2012-05-23 20:34 ` Common 03/22] [slob] Remove various small accessors Christoph Lameter
@ 2012-05-23 20:34 ` Christoph Lameter
2012-05-23 20:34 ` Common 06/22] Extract common fields from struct kmem_cache Christoph Lameter
` (16 subsequent siblings)
19 siblings, 0 replies; 32+ messages in thread
From: Christoph Lameter @ 2012-05-23 20:34 UTC (permalink / raw)
To: Pekka Enberg
Cc: linux-mm, David Rientjes, Matt Mackall, Glauber Costa,
Joonsoo Kim
[-- Attachment #1: slab_remove_accessors --]
[-- Type: text/plain, Size: 2981 bytes --]
Those are rather trivial now and its better to see inline what is
really going on.
Signed-off-by: Christoph Lameter <cl@linux.com>
---
mm/slab.c | 35 ++++++++---------------------------
1 file changed, 8 insertions(+), 27 deletions(-)
Index: linux-2.6/mm/slab.c
===================================================================
--- linux-2.6.orig/mm/slab.c 2012-05-22 09:21:28.528444571 -0500
+++ linux-2.6/mm/slab.c 2012-05-22 09:27:35.664436970 -0500
@@ -489,16 +489,6 @@ EXPORT_SYMBOL(slab_buffer_size);
static int slab_max_order = SLAB_MAX_ORDER_LO;
static bool slab_max_order_set __initdata;
-/*
- * Functions for storing/retrieving the cachep and or slab from the page
- * allocator. These are used to find the slab an obj belongs to. With kfree(),
- * these are used to find the cache which an obj belongs to.
- */
-static inline void page_set_cache(struct page *page, struct kmem_cache *cache)
-{
- page->slab_cache = cache;
-}
-
static inline struct kmem_cache *page_get_cache(struct page *page)
{
page = compound_head(page);
@@ -506,27 +496,18 @@ static inline struct kmem_cache *page_ge
return page->slab_cache;
}
-static inline void page_set_slab(struct page *page, struct slab *slab)
-{
- page->slab_page = slab;
-}
-
-static inline struct slab *page_get_slab(struct page *page)
-{
- BUG_ON(!PageSlab(page));
- return page->slab_page;
-}
-
static inline struct kmem_cache *virt_to_cache(const void *obj)
{
struct page *page = virt_to_head_page(obj);
- return page_get_cache(page);
+ return page->slab_cache;
}
static inline struct slab *virt_to_slab(const void *obj)
{
struct page *page = virt_to_head_page(obj);
- return page_get_slab(page);
+
+ VM_BUG_ON(!PageSlab(page));
+ return page->slab_page;
}
static inline void *index_to_obj(struct kmem_cache *cache, struct slab *slab,
@@ -2918,8 +2899,8 @@ static void slab_map_pages(struct kmem_c
nr_pages <<= cache->gfporder;
do {
- page_set_cache(page, cache);
- page_set_slab(page, slab);
+ page->slab_cache = cache;
+ page->slab_page = slab;
page++;
} while (--nr_pages);
}
@@ -3057,7 +3038,7 @@ static void *cache_free_debugcheck(struc
kfree_debugcheck(objp);
page = virt_to_head_page(objp);
- slabp = page_get_slab(page);
+ slabp = page->slab_page;
if (cachep->flags & SLAB_RED_ZONE) {
verify_redzone_free(cachep, objp);
@@ -3261,7 +3242,7 @@ static void *cache_alloc_debugcheck_afte
struct slab *slabp;
unsigned objnr;
- slabp = page_get_slab(virt_to_head_page(objp));
+ slabp = virt_to_head_page(objp)->slab_page;
objnr = (unsigned)(objp - slabp->s_mem) / cachep->buffer_size;
slab_bufctl(slabp)[objnr] = BUFCTL_ACTIVE;
}
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Fight unfair telecom internet charges in Canada: sign http://stopthemeter.ca/
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 32+ messages in thread
* Common 06/22] Extract common fields from struct kmem_cache
2012-05-23 20:34 Common 00/22] Sl[auo]b: Common functionality V3 Christoph Lameter
` (2 preceding siblings ...)
2012-05-23 20:34 ` Common 05/22] [slab] Remove some accessors Christoph Lameter
@ 2012-05-23 20:34 ` Christoph Lameter
2012-05-30 6:39 ` Pekka Enberg
2012-05-23 20:34 ` Common 07/22] Extract common code for kmem_cache_create() Christoph Lameter
` (15 subsequent siblings)
19 siblings, 1 reply; 32+ messages in thread
From: Christoph Lameter @ 2012-05-23 20:34 UTC (permalink / raw)
To: Pekka Enberg
Cc: linux-mm, David Rientjes, Matt Mackall, Glauber Costa,
Joonsoo Kim
[-- Attachment #1: common_fields --]
[-- Type: text/plain, Size: 9844 bytes --]
Define "COMMON" to include definitions for fields used in all
slab allocators. After that it will be possible to share code that
only operates on those fields of kmem_cache.
The patch basically takes the slob definition of kmem cache and
uses the field namees for the other allocators.
The slob definition of kmem_cache is moved from slob.c to slob_def.h
so that the location of the kmem_cache definition is the same for
all allocators.
Reviewed-by: Glauber Costa <glommer@parallels.com>
Reviewed-by: Joonsoo Kim <js1304@gmail.com>
Signed-off-by: Christoph Lameter <cl@linux.com>
---
include/linux/slab.h | 11 +++++++++++
include/linux/slab_def.h | 8 ++------
include/linux/slob_def.h | 4 ++++
include/linux/slub_def.h | 11 ++++-------
mm/slab.c | 30 +++++++++++++++---------------
mm/slob.c | 7 -------
6 files changed, 36 insertions(+), 35 deletions(-)
Index: linux-2.6/include/linux/slab.h
===================================================================
--- linux-2.6.orig/include/linux/slab.h 2012-05-22 09:05:49.416464029 -0500
+++ linux-2.6/include/linux/slab.h 2012-05-23 04:23:21.423024939 -0500
@@ -93,6 +93,17 @@
(unsigned long)ZERO_SIZE_PTR)
/*
+ * Common fields provided in kmem_cache by all slab allocators
+ */
+#define SLAB_COMMON \
+ unsigned int size, align; \
+ unsigned long flags; \
+ const char *name; \
+ int refcount; \
+ void (*ctor)(void *); \
+ struct list_head list;
+
+/*
* struct kmem_cache related prototypes
*/
void __init kmem_cache_init(void);
Index: linux-2.6/include/linux/slab_def.h
===================================================================
--- linux-2.6.orig/include/linux/slab_def.h 2012-05-22 09:05:49.360464030 -0500
+++ linux-2.6/include/linux/slab_def.h 2012-05-23 04:23:21.423024939 -0500
@@ -31,7 +31,6 @@ struct kmem_cache {
u32 reciprocal_buffer_size;
/* 2) touched by every alloc & free from the backend */
- unsigned int flags; /* constant flags */
unsigned int num; /* # of objs per slab */
/* 3) cache_grow/shrink */
@@ -47,12 +46,9 @@ struct kmem_cache {
unsigned int slab_size;
unsigned int dflags; /* dynamic flags */
- /* constructor func */
- void (*ctor)(void *obj);
-
/* 4) cache creation/removal */
- const char *name;
- struct list_head next;
+
+ SLAB_COMMON
/* 5) statistics */
#ifdef CONFIG_DEBUG_SLAB
Index: linux-2.6/include/linux/slub_def.h
===================================================================
--- linux-2.6.orig/include/linux/slub_def.h 2012-05-22 09:05:49.392464029 -0500
+++ linux-2.6/include/linux/slub_def.h 2012-05-23 04:23:21.423024939 -0500
@@ -80,9 +80,7 @@ struct kmem_cache_order_objects {
struct kmem_cache {
struct kmem_cache_cpu __percpu *cpu_slab;
/* Used for retriving partial slabs etc */
- unsigned long flags;
unsigned long min_partial;
- int size; /* The size of an object including meta data */
int objsize; /* The size of an object without meta data */
int offset; /* Free pointer offset. */
int cpu_partial; /* Number of per cpu partial objects to keep around */
@@ -92,13 +90,12 @@ struct kmem_cache {
struct kmem_cache_order_objects max;
struct kmem_cache_order_objects min;
gfp_t allocflags; /* gfp flags to use on each alloc */
- int refcount; /* Refcount for slab cache destroy */
- void (*ctor)(void *);
+
+ SLAB_COMMON
+
int inuse; /* Offset to metadata */
- int align; /* Alignment */
int reserved; /* Reserved bytes at the end of slabs */
- const char *name; /* Name (only for display!) */
- struct list_head list; /* List of slab caches */
+
#ifdef CONFIG_SYSFS
struct kobject kobj; /* For sysfs */
#endif
Index: linux-2.6/mm/slob.c
===================================================================
--- linux-2.6.orig/mm/slob.c 2012-05-22 09:21:26.588444610 -0500
+++ linux-2.6/mm/slob.c 2012-05-23 04:23:21.423024939 -0500
@@ -506,13 +506,6 @@ size_t ksize(const void *block)
}
EXPORT_SYMBOL(ksize);
-struct kmem_cache {
- unsigned int size, align;
- unsigned long flags;
- const char *name;
- void (*ctor)(void *);
-};
-
struct kmem_cache *kmem_cache_create(const char *name, size_t size,
size_t align, unsigned long flags, void (*ctor)(void *))
{
Index: linux-2.6/mm/slab.c
===================================================================
--- linux-2.6.orig/mm/slab.c 2012-05-22 09:27:35.664436970 -0500
+++ linux-2.6/mm/slab.c 2012-05-23 04:23:21.427024939 -0500
@@ -1134,7 +1134,7 @@ static int init_cache_nodelists_node(int
struct kmem_list3 *l3;
const int memsize = sizeof(struct kmem_list3);
- list_for_each_entry(cachep, &cache_chain, next) {
+ list_for_each_entry(cachep, &cache_chain, list) {
/*
* Set up the size64 kmemlist for cpu before we can
* begin anything. Make sure some other cpu on this
@@ -1172,7 +1172,7 @@ static void __cpuinit cpuup_canceled(lon
int node = cpu_to_mem(cpu);
const struct cpumask *mask = cpumask_of_node(node);
- list_for_each_entry(cachep, &cache_chain, next) {
+ list_for_each_entry(cachep, &cache_chain, list) {
struct array_cache *nc;
struct array_cache *shared;
struct array_cache **alien;
@@ -1222,7 +1222,7 @@ free_array_cache:
* the respective cache's slabs, now we can go ahead and
* shrink each nodelist to its limit.
*/
- list_for_each_entry(cachep, &cache_chain, next) {
+ list_for_each_entry(cachep, &cache_chain, list) {
l3 = cachep->nodelists[node];
if (!l3)
continue;
@@ -1251,7 +1251,7 @@ static int __cpuinit cpuup_prepare(long
* Now we can go ahead with allocating the shared arrays and
* array caches
*/
- list_for_each_entry(cachep, &cache_chain, next) {
+ list_for_each_entry(cachep, &cache_chain, list) {
struct array_cache *nc;
struct array_cache *shared = NULL;
struct array_cache **alien = NULL;
@@ -1383,7 +1383,7 @@ static int __meminit drain_cache_nodelis
struct kmem_cache *cachep;
int ret = 0;
- list_for_each_entry(cachep, &cache_chain, next) {
+ list_for_each_entry(cachep, &cache_chain, list) {
struct kmem_list3 *l3;
l3 = cachep->nodelists[node];
@@ -1526,7 +1526,7 @@ void __init kmem_cache_init(void)
/* 1) create the cache_cache */
INIT_LIST_HEAD(&cache_chain);
- list_add(&cache_cache.next, &cache_chain);
+ list_add(&cache_cache.list, &cache_chain);
cache_cache.colour_off = cache_line_size();
cache_cache.array[smp_processor_id()] = &initarray_cache.cache;
cache_cache.nodelists[node] = &initkmem_list3[CACHE_CACHE + node];
@@ -1671,7 +1671,7 @@ void __init kmem_cache_init_late(void)
/* 6) resize the head arrays to their final sizes */
mutex_lock(&cache_chain_mutex);
- list_for_each_entry(cachep, &cache_chain, next)
+ list_for_each_entry(cachep, &cache_chain, list)
if (enable_cpucache(cachep, GFP_NOWAIT))
BUG();
mutex_unlock(&cache_chain_mutex);
@@ -2281,7 +2281,7 @@ kmem_cache_create (const char *name, siz
mutex_lock(&cache_chain_mutex);
}
- list_for_each_entry(pc, &cache_chain, next) {
+ list_for_each_entry(pc, &cache_chain, list) {
char tmp;
int res;
@@ -2526,7 +2526,7 @@ kmem_cache_create (const char *name, siz
}
/* cache setup completed, link it into the list */
- list_add(&cachep->next, &cache_chain);
+ list_add(&cachep->list, &cache_chain);
oops:
if (!cachep && (flags & SLAB_PANIC))
panic("kmem_cache_create(): failed to create slab `%s'\n",
@@ -2721,10 +2721,10 @@ void kmem_cache_destroy(struct kmem_cach
/*
* the chain is never empty, cache_cache is never destroyed
*/
- list_del(&cachep->next);
+ list_del(&cachep->list);
if (__cache_shrink(cachep)) {
slab_error(cachep, "Can't free all objects");
- list_add(&cachep->next, &cache_chain);
+ list_add(&cachep->list, &cache_chain);
mutex_unlock(&cache_chain_mutex);
put_online_cpus();
return;
@@ -4011,7 +4011,7 @@ static int alloc_kmemlist(struct kmem_ca
return 0;
fail:
- if (!cachep->next.next) {
+ if (!cachep->list.next) {
/* Cache is not active yet. Roll back what we did */
node--;
while (node >= 0) {
@@ -4196,7 +4196,7 @@ static void cache_reap(struct work_struc
/* Give up. Setup the next iteration. */
goto out;
- list_for_each_entry(searchp, &cache_chain, next) {
+ list_for_each_entry(searchp, &cache_chain, list) {
check_irq_on();
/*
@@ -4289,7 +4289,7 @@ static void s_stop(struct seq_file *m, v
static int s_show(struct seq_file *m, void *p)
{
- struct kmem_cache *cachep = list_entry(p, struct kmem_cache, next);
+ struct kmem_cache *cachep = list_entry(p, struct kmem_cache, list);
struct slab *slabp;
unsigned long active_objs;
unsigned long num_objs;
@@ -4437,7 +4437,7 @@ static ssize_t slabinfo_write(struct fil
/* Find the cache in the chain of caches. */
mutex_lock(&cache_chain_mutex);
res = -EINVAL;
- list_for_each_entry(cachep, &cache_chain, next) {
+ list_for_each_entry(cachep, &cache_chain, list) {
if (!strcmp(cachep->name, kbuf)) {
if (limit < 1 || batchcount < 1 ||
batchcount > limit || shared < 0) {
Index: linux-2.6/include/linux/slob_def.h
===================================================================
--- linux-2.6.orig/include/linux/slob_def.h 2012-05-22 09:05:49.376464032 -0500
+++ linux-2.6/include/linux/slob_def.h 2012-05-23 04:23:21.427024939 -0500
@@ -1,6 +1,10 @@
#ifndef __LINUX_SLOB_DEF_H
#define __LINUX_SLOB_DEF_H
+struct kmem_cache {
+ SLAB_COMMON
+};
+
void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
static __always_inline void *kmem_cache_alloc(struct kmem_cache *cachep,
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Fight unfair telecom internet charges in Canada: sign http://stopthemeter.ca/
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 32+ messages in thread
* Re: Common 06/22] Extract common fields from struct kmem_cache
2012-05-23 20:34 ` Common 06/22] Extract common fields from struct kmem_cache Christoph Lameter
@ 2012-05-30 6:39 ` Pekka Enberg
2012-05-30 15:29 ` Christoph Lameter
0 siblings, 1 reply; 32+ messages in thread
From: Pekka Enberg @ 2012-05-30 6:39 UTC (permalink / raw)
To: Christoph Lameter
Cc: linux-mm, David Rientjes, Matt Mackall, Glauber Costa,
Joonsoo Kim
On Wed, May 23, 2012 at 11:34 PM, Christoph Lameter <cl@linux.com> wrote:
> Define "COMMON" to include definitions for fields used in all
> slab allocators. After that it will be possible to share code that
> only operates on those fields of kmem_cache.
>
> The patch basically takes the slob definition of kmem cache and
> uses the field namees for the other allocators.
>
> The slob definition of kmem_cache is moved from slob.c to slob_def.h
> so that the location of the kmem_cache definition is the same for
> all allocators.
>
> Reviewed-by: Glauber Costa <glommer@parallels.com>
> Reviewed-by: Joonsoo Kim <js1304@gmail.com>
> Signed-off-by: Christoph Lameter <cl@linux.com>
>
> ---
> include/linux/slab.h | 11 +++++++++++
> include/linux/slab_def.h | 8 ++------
> include/linux/slob_def.h | 4 ++++
> include/linux/slub_def.h | 11 ++++-------
> mm/slab.c | 30 +++++++++++++++---------------
> mm/slob.c | 7 -------
> 6 files changed, 36 insertions(+), 35 deletions(-)
>
> Index: linux-2.6/include/linux/slab.h
> ===================================================================
> --- linux-2.6.orig/include/linux/slab.h 2012-05-22 09:05:49.416464029 -0500
> +++ linux-2.6/include/linux/slab.h 2012-05-23 04:23:21.423024939 -0500
> @@ -93,6 +93,17 @@
> (unsigned long)ZERO_SIZE_PTR)
>
> /*
> + * Common fields provided in kmem_cache by all slab allocators
> + */
> +#define SLAB_COMMON \
> + unsigned int size, align; \
> + unsigned long flags; \
> + const char *name; \
> + int refcount; \
> + void (*ctor)(void *); \
> + struct list_head list;
> +
I don't like this at all - it obscures the actual "kmem_cache"
structures. If we can't come up with a reasonable solution that makes
this a proper struct that's embedded in allocator-specific
"kmem_cache" structures, it's best that we rename the fields but keep
them inlined and drop this macro..
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Fight unfair telecom internet charges in Canada: sign http://stopthemeter.ca/
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 32+ messages in thread
* Re: Common 06/22] Extract common fields from struct kmem_cache
2012-05-30 6:39 ` Pekka Enberg
@ 2012-05-30 15:29 ` Christoph Lameter
2012-05-30 18:10 ` Christoph Lameter
2012-05-31 7:52 ` Pekka Enberg
0 siblings, 2 replies; 32+ messages in thread
From: Christoph Lameter @ 2012-05-30 15:29 UTC (permalink / raw)
To: Pekka Enberg
Cc: linux-mm, David Rientjes, Matt Mackall, Glauber Costa,
Joonsoo Kim
[-- Attachment #1: Type: TEXT/PLAIN, Size: 1119 bytes --]
On Wed, 30 May 2012, Pekka Enberg wrote:
> > /*
> > + * Common fields provided in kmem_cache by all slab allocators
> > + */
> > +#define SLAB_COMMON \
> > + unsigned int size, align; \
> > + unsigned long flags; \
> > + const char *name; \
> > + int refcount; \
> > + void (*ctor)(void *); \
> > + struct list_head list;
> > +
>
> I don't like this at all - it obscures the actual "kmem_cache"
> structures. If we can't come up with a reasonable solution that makes
> this a proper struct that's embedded in allocator-specific
> "kmem_cache" structures, it's best that we rename the fields but keep
> them inlined and drop this macro..
Actually that is a good idea. We can keep a fake struct in comments around
in slab.h to document what all slab allocators have to support and then at
some point we may be able to integrate the struct.
^ permalink raw reply [flat|nested] 32+ messages in thread
* Re: Common 06/22] Extract common fields from struct kmem_cache
2012-05-30 15:29 ` Christoph Lameter
@ 2012-05-30 18:10 ` Christoph Lameter
2012-05-31 7:52 ` Pekka Enberg
1 sibling, 0 replies; 32+ messages in thread
From: Christoph Lameter @ 2012-05-30 18:10 UTC (permalink / raw)
To: Pekka Enberg
Cc: linux-mm, David Rientjes, Matt Mackall, Glauber Costa,
Joonsoo Kim
Tried using an anonymous struct but these are not supported in the kernel
it seems. C11 supports it.
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Fight unfair telecom internet charges in Canada: sign http://stopthemeter.ca/
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 32+ messages in thread
* Re: Common 06/22] Extract common fields from struct kmem_cache
2012-05-30 15:29 ` Christoph Lameter
2012-05-30 18:10 ` Christoph Lameter
@ 2012-05-31 7:52 ` Pekka Enberg
1 sibling, 0 replies; 32+ messages in thread
From: Pekka Enberg @ 2012-05-31 7:52 UTC (permalink / raw)
To: Christoph Lameter
Cc: linux-mm, David Rientjes, Matt Mackall, Glauber Costa,
Joonsoo Kim
[-- Attachment #1: Type: TEXT/PLAIN, Size: 1213 bytes --]
On Wed, 30 May 2012, Christoph Lameter wrote:
> On Wed, 30 May 2012, Pekka Enberg wrote:
>
> > > /*
> > > + * Common fields provided in kmem_cache by all slab allocators
> > > + */
> > > +#define SLAB_COMMON \
> > > + unsigned int size, align; \
> > > + unsigned long flags; \
> > > + const char *name; \
> > > + int refcount; \
> > > + void (*ctor)(void *); \
> > > + struct list_head list;
> > > +
> >
> > I don't like this at all - it obscures the actual "kmem_cache"
> > structures. If we can't come up with a reasonable solution that makes
> > this a proper struct that's embedded in allocator-specific
> > "kmem_cache" structures, it's best that we rename the fields but keep
> > them inlined and drop this macro..
>
> Actually that is a good idea. We can keep a fake struct in comments around
> in slab.h to document what all slab allocators have to support and then at
> some point we may be able to integrate the struct.
Works for me.
Pekka
^ permalink raw reply [flat|nested] 32+ messages in thread
* Common 07/22] Extract common code for kmem_cache_create()
2012-05-23 20:34 Common 00/22] Sl[auo]b: Common functionality V3 Christoph Lameter
` (3 preceding siblings ...)
2012-05-23 20:34 ` Common 06/22] Extract common fields from struct kmem_cache Christoph Lameter
@ 2012-05-23 20:34 ` Christoph Lameter
2012-05-23 20:34 ` Common 08/22] Common definition for boot state of the slab allocators Christoph Lameter
` (14 subsequent siblings)
19 siblings, 0 replies; 32+ messages in thread
From: Christoph Lameter @ 2012-05-23 20:34 UTC (permalink / raw)
To: Pekka Enberg
Cc: linux-mm, David Rientjes, Matt Mackall, Glauber Costa,
Joonsoo Kim
[-- Attachment #1: common_kmem_cache_checks --]
[-- Type: text/plain, Size: 9751 bytes --]
Kmem_cache_create() does a variety of sanity checks but those
vary depending on the allocator. Use the strictest tests and put them into
a slab_common file. Make the tests conditional on CONFIG_DEBUG_VM.
This patch has the effect of adding sanity checks for SLUB and SLOB
under CONFIG_DEBUG_VM and removes the checks in SLAB for !CONFIG_DEBUG_VM.
Signed-off-by: Christoph Lameter <cl@linux.com>
---
include/linux/slab.h | 4 +++
mm/Makefile | 2 -
mm/slab.c | 24 ++++++------------
mm/slab_common.c | 67 +++++++++++++++++++++++++++++++++++++++++++++++++++
mm/slob.c | 8 ++----
mm/slub.c | 11 --------
6 files changed, 85 insertions(+), 31 deletions(-)
Index: linux-2.6/mm/slab.c
===================================================================
--- linux-2.6.orig/mm/slab.c 2012-05-23 04:23:21.427024939 -0500
+++ linux-2.6/mm/slab.c 2012-05-23 04:23:27.567024810 -0500
@@ -1566,7 +1566,7 @@ void __init kmem_cache_init(void)
* bug.
*/
- sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name,
+ sizes[INDEX_AC].cs_cachep = __kmem_cache_create(names[INDEX_AC].name,
sizes[INDEX_AC].cs_size,
ARCH_KMALLOC_MINALIGN,
ARCH_KMALLOC_FLAGS|SLAB_PANIC,
@@ -1574,7 +1574,7 @@ void __init kmem_cache_init(void)
if (INDEX_AC != INDEX_L3) {
sizes[INDEX_L3].cs_cachep =
- kmem_cache_create(names[INDEX_L3].name,
+ __kmem_cache_create(names[INDEX_L3].name,
sizes[INDEX_L3].cs_size,
ARCH_KMALLOC_MINALIGN,
ARCH_KMALLOC_FLAGS|SLAB_PANIC,
@@ -1592,14 +1592,14 @@ void __init kmem_cache_init(void)
* allow tighter packing of the smaller caches.
*/
if (!sizes->cs_cachep) {
- sizes->cs_cachep = kmem_cache_create(names->name,
+ sizes->cs_cachep = __kmem_cache_create(names->name,
sizes->cs_size,
ARCH_KMALLOC_MINALIGN,
ARCH_KMALLOC_FLAGS|SLAB_PANIC,
NULL);
}
#ifdef CONFIG_ZONE_DMA
- sizes->cs_dmacachep = kmem_cache_create(
+ sizes->cs_dmacachep = __kmem_cache_create(
names->name_dma,
sizes->cs_size,
ARCH_KMALLOC_MINALIGN,
@@ -2228,7 +2228,7 @@ static int __init_refok setup_cpu_cache(
}
/**
- * kmem_cache_create - Create a cache.
+ * __kmem_cache_create - Create a cache.
* @name: A string which is used in /proc/slabinfo to identify this cache.
* @size: The size of objects to be created in this cache.
* @align: The required alignment for the objects.
@@ -2255,7 +2255,7 @@ static int __init_refok setup_cpu_cache(
* as davem.
*/
struct kmem_cache *
-kmem_cache_create (const char *name, size_t size, size_t align,
+__kmem_cache_create (const char *name, size_t size, size_t align,
unsigned long flags, void (*ctor)(void *))
{
size_t left_over, slab_size, ralign;
@@ -2396,7 +2396,7 @@ kmem_cache_create (const char *name, siz
/* Get cache's description obj. */
cachep = kmem_cache_zalloc(&cache_cache, gfp);
if (!cachep)
- goto oops;
+ return NULL;
cachep->nodelists = (struct kmem_list3 **)&cachep->array[nr_cpu_ids];
#if DEBUG
@@ -2452,8 +2452,7 @@ kmem_cache_create (const char *name, siz
printk(KERN_ERR
"kmem_cache_create: couldn't create cache %s.\n", name);
kmem_cache_free(&cache_cache, cachep);
- cachep = NULL;
- goto oops;
+ return NULL;
}
slab_size = ALIGN(cachep->num * sizeof(kmem_bufctl_t)
+ sizeof(struct slab), align);
@@ -2511,8 +2510,7 @@ kmem_cache_create (const char *name, siz
if (setup_cpu_cache(cachep, gfp)) {
__kmem_cache_destroy(cachep);
- cachep = NULL;
- goto oops;
+ return NULL;
}
if (flags & SLAB_DEBUG_OBJECTS) {
@@ -2528,16 +2526,12 @@ kmem_cache_create (const char *name, siz
/* cache setup completed, link it into the list */
list_add(&cachep->list, &cache_chain);
oops:
- if (!cachep && (flags & SLAB_PANIC))
- panic("kmem_cache_create(): failed to create slab `%s'\n",
- name);
if (slab_is_available()) {
mutex_unlock(&cache_chain_mutex);
put_online_cpus();
}
return cachep;
}
-EXPORT_SYMBOL(kmem_cache_create);
#if DEBUG
static void check_irq_off(void)
Index: linux-2.6/mm/slab_common.c
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-2.6/mm/slab_common.c 2012-05-23 04:23:27.567024810 -0500
@@ -0,0 +1,67 @@
+/*
+ * Slab allocator functions that are independent of the allocator strategy
+ *
+ * (C) 2012 Christoph Lameter <cl@linux.com>
+ */
+#include <linux/slab.h>
+
+#include <linux/mm.h>
+#include <linux/poison.h>
+#include <linux/interrupt.h>
+#include <linux/memory.h>
+#include <linux/compiler.h>
+#include <linux/module.h>
+
+#include <asm/cacheflush.h>
+#include <asm/tlbflush.h>
+#include <asm/page.h>
+
+/*
+ * kmem_cache_create - Create a cache.
+ * @name: A string which is used in /proc/slabinfo to identify this cache.
+ * @size: The size of objects to be created in this cache.
+ * @align: The required alignment for the objects.
+ * @flags: SLAB flags
+ * @ctor: A constructor for the objects.
+ *
+ * Returns a ptr to the cache on success, NULL on failure.
+ * Cannot be called within a interrupt, but can be interrupted.
+ * The @ctor is run when new pages are allocated by the cache.
+ *
+ * The flags are
+ *
+ * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5)
+ * to catch references to uninitialised memory.
+ *
+ * %SLAB_RED_ZONE - Insert `Red' zones around the allocated memory to check
+ * for buffer overruns.
+ *
+ * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware
+ * cacheline. This can be beneficial if you're counting cycles as closely
+ * as davem.
+ */
+
+struct kmem_cache *kmem_cache_create(const char *name, size_t size, size_t align,
+ unsigned long flags, void (*ctor)(void *))
+{
+ struct kmem_cache *s = NULL;
+
+#ifdef CONFIG_DEBUG_VM
+ if (!name || in_interrupt() || size < sizeof(void *) ||
+ size > KMALLOC_MAX_SIZE) {
+ printk(KERN_ERR "kmem_cache_create(%s) integrity check"
+ " failed\n", name);
+ goto out;
+ }
+#endif
+
+ s = __kmem_cache_create(name, size, align, flags, ctor);
+
+out:
+ if (!s && (flags & SLAB_PANIC))
+ panic("kmem_cache_create: Failed to create slab '%s'\n", name);
+
+ return s;
+}
+EXPORT_SYMBOL(kmem_cache_create);
+
Index: linux-2.6/mm/slub.c
===================================================================
--- linux-2.6.orig/mm/slub.c 2012-05-22 09:05:48.368464051 -0500
+++ linux-2.6/mm/slub.c 2012-05-23 04:23:27.571024809 -0500
@@ -3921,15 +3921,12 @@ static struct kmem_cache *find_mergeable
return NULL;
}
-struct kmem_cache *kmem_cache_create(const char *name, size_t size,
+struct kmem_cache *__kmem_cache_create(const char *name, size_t size,
size_t align, unsigned long flags, void (*ctor)(void *))
{
struct kmem_cache *s;
char *n;
- if (WARN_ON(!name))
- return NULL;
-
down_write(&slub_lock);
s = find_mergeable(size, align, flags, name, ctor);
if (s) {
@@ -3973,14 +3970,8 @@ struct kmem_cache *kmem_cache_create(con
}
err:
up_write(&slub_lock);
-
- if (flags & SLAB_PANIC)
- panic("Cannot create slabcache %s\n", name);
- else
- s = NULL;
return s;
}
-EXPORT_SYMBOL(kmem_cache_create);
#ifdef CONFIG_SMP
/*
Index: linux-2.6/mm/slob.c
===================================================================
--- linux-2.6.orig/mm/slob.c 2012-05-23 04:23:21.423024939 -0500
+++ linux-2.6/mm/slob.c 2012-05-23 04:23:27.571024809 -0500
@@ -506,7 +506,7 @@ size_t ksize(const void *block)
}
EXPORT_SYMBOL(ksize);
-struct kmem_cache *kmem_cache_create(const char *name, size_t size,
+struct kmem_cache *__kmem_cache_create(const char *name, size_t size,
size_t align, unsigned long flags, void (*ctor)(void *))
{
struct kmem_cache *c;
@@ -529,13 +529,11 @@ struct kmem_cache *kmem_cache_create(con
c->align = ARCH_SLAB_MINALIGN;
if (c->align < align)
c->align = align;
- } else if (flags & SLAB_PANIC)
- panic("Cannot create slab cache %s\n", name);
- kmemleak_alloc(c, sizeof(struct kmem_cache), 1, GFP_KERNEL);
+ kmemleak_alloc(c, sizeof(struct kmem_cache), 1, GFP_KERNEL);
+ }
return c;
}
-EXPORT_SYMBOL(kmem_cache_create);
void kmem_cache_destroy(struct kmem_cache *c)
{
Index: linux-2.6/mm/Makefile
===================================================================
--- linux-2.6.orig/mm/Makefile 2012-05-22 09:05:48.384464050 -0500
+++ linux-2.6/mm/Makefile 2012-05-23 04:23:27.571024809 -0500
@@ -13,7 +13,7 @@ obj-y := filemap.o mempool.o oom_kill.
readahead.o swap.o truncate.o vmscan.o shmem.o \
prio_tree.o util.o mmzone.o vmstat.o backing-dev.o \
page_isolation.o mm_init.o mmu_context.o percpu.o \
- $(mmu-y)
+ slab_common.o $(mmu-y)
obj-y += init-mm.o
ifdef CONFIG_NO_BOOTMEM
Index: linux-2.6/include/linux/slab.h
===================================================================
--- linux-2.6.orig/include/linux/slab.h 2012-05-23 04:23:21.423024939 -0500
+++ linux-2.6/include/linux/slab.h 2012-05-23 04:23:27.571024809 -0500
@@ -117,6 +117,10 @@ int kmem_cache_shrink(struct kmem_cache
void kmem_cache_free(struct kmem_cache *, void *);
unsigned int kmem_cache_size(struct kmem_cache *);
+/* Slab internal function */
+struct kmem_cache *__kmem_cache_create(const char *, size_t, size_t,
+ unsigned long,
+ void (*)(void *));
/*
* Please use this macro to create slab caches. Simply specify the
* name of the structure and maybe some flags that are listed above.
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Fight unfair telecom internet charges in Canada: sign http://stopthemeter.ca/
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 32+ messages in thread
* Common 08/22] Common definition for boot state of the slab allocators
2012-05-23 20:34 Common 00/22] Sl[auo]b: Common functionality V3 Christoph Lameter
` (4 preceding siblings ...)
2012-05-23 20:34 ` Common 07/22] Extract common code for kmem_cache_create() Christoph Lameter
@ 2012-05-23 20:34 ` Christoph Lameter
2012-05-23 20:34 ` Common 09/22] Use a common mutex definition Christoph Lameter
` (13 subsequent siblings)
19 siblings, 0 replies; 32+ messages in thread
From: Christoph Lameter @ 2012-05-23 20:34 UTC (permalink / raw)
To: Pekka Enberg
Cc: linux-mm, David Rientjes, Matt Mackall, Glauber Costa,
Joonsoo Kim
[-- Attachment #1: slab_internal --]
[-- Type: text/plain, Size: 9158 bytes --]
All allocators have some sort of support for the bootstrap status.
Setup a common definition for the boot states and make all slab
allocators use that definition.
Reviewed-by: Glauber Costa <glommer@parallels.com>
Reviewed-by: Joonsoo Kim <js1304@gmail.com>
Signed-off-by: Christoph Lameter <cl@linux.com>
---
include/linux/slab.h | 4 ----
mm/slab.c | 42 +++++++++++-------------------------------
mm/slab.h | 30 ++++++++++++++++++++++++++++++
mm/slab_common.c | 9 +++++++++
mm/slob.c | 14 +++++---------
mm/slub.c | 21 +++++----------------
6 files changed, 60 insertions(+), 60 deletions(-)
Index: linux-2.6/mm/slab.c
===================================================================
--- linux-2.6.orig/mm/slab.c 2012-05-23 04:23:27.567024810 -0500
+++ linux-2.6/mm/slab.c 2012-05-23 04:23:44.379024464 -0500
@@ -87,6 +87,7 @@
*/
#include <linux/slab.h>
+#include "slab.h"
#include <linux/mm.h>
#include <linux/poison.h>
#include <linux/swap.h>
@@ -571,27 +572,6 @@ static struct kmem_cache cache_cache = {
#define BAD_ALIEN_MAGIC 0x01020304ul
-/*
- * chicken and egg problem: delay the per-cpu array allocation
- * until the general caches are up.
- */
-static enum {
- NONE,
- PARTIAL_AC,
- PARTIAL_L3,
- EARLY,
- LATE,
- FULL
-} g_cpucache_up;
-
-/*
- * used by boot code to determine if it can use slab based allocator
- */
-int slab_is_available(void)
-{
- return g_cpucache_up >= EARLY;
-}
-
#ifdef CONFIG_LOCKDEP
/*
@@ -657,7 +637,7 @@ static void init_node_lock_keys(int q)
{
struct cache_sizes *s = malloc_sizes;
- if (g_cpucache_up < LATE)
+ if (slab_state < UP)
return;
for (s = malloc_sizes; s->cs_size != ULONG_MAX; s++) {
@@ -1657,14 +1637,14 @@ void __init kmem_cache_init(void)
}
}
- g_cpucache_up = EARLY;
+ slab_state = UP;
}
void __init kmem_cache_init_late(void)
{
struct kmem_cache *cachep;
- g_cpucache_up = LATE;
+ slab_state = UP;
/* Annotate slab for lockdep -- annotate the malloc caches */
init_lock_keys();
@@ -1677,7 +1657,7 @@ void __init kmem_cache_init_late(void)
mutex_unlock(&cache_chain_mutex);
/* Done! */
- g_cpucache_up = FULL;
+ slab_state = FULL;
/*
* Register a cpu startup notifier callback that initializes
@@ -2175,10 +2155,10 @@ static size_t calculate_slab_order(struc
static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp)
{
- if (g_cpucache_up == FULL)
+ if (slab_state == FULL)
return enable_cpucache(cachep, gfp);
- if (g_cpucache_up == NONE) {
+ if (slab_state == DOWN) {
/*
* Note: the first kmem_cache_create must create the cache
* that's used by kmalloc(24), otherwise the creation of
@@ -2193,16 +2173,16 @@ static int __init_refok setup_cpu_cache(
*/
set_up_list3s(cachep, SIZE_AC);
if (INDEX_AC == INDEX_L3)
- g_cpucache_up = PARTIAL_L3;
+ slab_state = PARTIAL_L3;
else
- g_cpucache_up = PARTIAL_AC;
+ slab_state = PARTIAL_ARRAYCACHE;
} else {
cachep->array[smp_processor_id()] =
kmalloc(sizeof(struct arraycache_init), gfp);
- if (g_cpucache_up == PARTIAL_AC) {
+ if (slab_state == PARTIAL_ARRAYCACHE) {
set_up_list3s(cachep, SIZE_L3);
- g_cpucache_up = PARTIAL_L3;
+ slab_state = PARTIAL_L3;
} else {
int node;
for_each_online_node(node) {
Index: linux-2.6/mm/slab.h
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-2.6/mm/slab.h 2012-05-23 04:23:44.379024464 -0500
@@ -0,0 +1,30 @@
+#ifndef MM_SLAB_H
+#define MM_SLAB_H
+/*
+ * Internal slab definitions
+ */
+
+/*
+ * State of the slab allocator.
+ *
+ * This is used to describe the states of the allocator during bootup.
+ * Allocators use this to gradually bootstrap themselves. Most allocators
+ * have the problem that the structures used for managing slab caches are
+ * allocated from slab caches themselves.
+ */
+enum slab_state {
+ DOWN, /* No slab functionality yet */
+ PARTIAL, /* SLUB: kmem_cache_node available */
+ PARTIAL_ARRAYCACHE, /* SLAB: kmalloc size for arraycache available */
+ PARTIAL_L3, /* SLAB: kmalloc size for l3 struct available */
+ UP, /* Slab caches usable but not all extras yet */
+ FULL /* Everything is working */
+};
+
+extern enum slab_state slab_state;
+
+struct kmem_cache *__kmem_cache_create(const char *name, size_t size,
+ size_t align, unsigned long flags, void (*ctor)(void *));
+
+#endif
+
Index: linux-2.6/mm/slob.c
===================================================================
--- linux-2.6.orig/mm/slob.c 2012-05-23 04:23:27.571024809 -0500
+++ linux-2.6/mm/slob.c 2012-05-23 04:23:44.379024464 -0500
@@ -59,6 +59,8 @@
#include <linux/kernel.h>
#include <linux/slab.h>
+#include "slab.h"
+
#include <linux/mm.h>
#include <linux/swap.h> /* struct reclaim_state */
#include <linux/cache.h>
@@ -531,6 +533,7 @@ struct kmem_cache *__kmem_cache_create(c
c->align = align;
kmemleak_alloc(c, sizeof(struct kmem_cache), 1, GFP_KERNEL);
+ c->refcount = 1;
}
return c;
}
@@ -616,19 +619,12 @@ int kmem_cache_shrink(struct kmem_cache
}
EXPORT_SYMBOL(kmem_cache_shrink);
-static unsigned int slob_ready __read_mostly;
-
-int slab_is_available(void)
-{
- return slob_ready;
-}
-
void __init kmem_cache_init(void)
{
- slob_ready = 1;
+ slab_state = UP;
}
void __init kmem_cache_init_late(void)
{
- /* Nothing to do */
+ slab_state = FULL;
}
Index: linux-2.6/mm/slub.c
===================================================================
--- linux-2.6.orig/mm/slub.c 2012-05-23 04:23:27.571024809 -0500
+++ linux-2.6/mm/slub.c 2012-05-23 04:23:44.383024464 -0500
@@ -16,6 +16,7 @@
#include <linux/interrupt.h>
#include <linux/bitops.h>
#include <linux/slab.h>
+#include "slab.h"
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/kmemcheck.h>
@@ -182,13 +183,6 @@ static int kmem_size = sizeof(struct kme
static struct notifier_block slab_notifier;
#endif
-static enum {
- DOWN, /* No slab functionality available */
- PARTIAL, /* Kmem_cache_node works */
- UP, /* Everything works but does not show up in sysfs */
- SYSFS /* Sysfs up */
-} slab_state = DOWN;
-
/* A list of all slab caches on the system */
static DECLARE_RWSEM(slub_lock);
static LIST_HEAD(slab_caches);
@@ -237,11 +231,6 @@ static inline void stat(const struct kme
* Core slab cache functions
*******************************************************************/
-int slab_is_available(void)
-{
- return slab_state >= UP;
-}
-
static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node)
{
return s->node[node];
@@ -5274,7 +5263,7 @@ static int sysfs_slab_add(struct kmem_ca
const char *name;
int unmergeable;
- if (slab_state < SYSFS)
+ if (slab_state < FULL)
/* Defer until later */
return 0;
@@ -5319,7 +5308,7 @@ static int sysfs_slab_add(struct kmem_ca
static void sysfs_slab_remove(struct kmem_cache *s)
{
- if (slab_state < SYSFS)
+ if (slab_state < FULL)
/*
* Sysfs has not been setup yet so no need to remove the
* cache from sysfs.
@@ -5347,7 +5336,7 @@ static int sysfs_slab_alias(struct kmem_
{
struct saved_alias *al;
- if (slab_state == SYSFS) {
+ if (slab_state == FULL) {
/*
* If we have a leftover link then remove it.
*/
@@ -5380,7 +5369,7 @@ static int __init slab_sysfs_init(void)
return -ENOSYS;
}
- slab_state = SYSFS;
+ slab_state = FULL;
list_for_each_entry(s, &slab_caches, list) {
err = sysfs_slab_add(s);
Index: linux-2.6/mm/slab_common.c
===================================================================
--- linux-2.6.orig/mm/slab_common.c 2012-05-23 04:23:27.567024810 -0500
+++ linux-2.6/mm/slab_common.c 2012-05-23 04:23:44.383024464 -0500
@@ -16,6 +16,10 @@
#include <asm/tlbflush.h>
#include <asm/page.h>
+#include "slab.h"
+
+enum slab_state slab_state;
+
/*
* kmem_cache_create - Create a cache.
* @name: A string which is used in /proc/slabinfo to identify this cache.
@@ -65,3 +69,8 @@ out:
}
EXPORT_SYMBOL(kmem_cache_create);
+int slab_is_available(void)
+{
+ return slab_state >= UP;
+}
+
Index: linux-2.6/include/linux/slab.h
===================================================================
--- linux-2.6.orig/include/linux/slab.h 2012-05-23 04:23:27.571024809 -0500
+++ linux-2.6/include/linux/slab.h 2012-05-23 04:23:44.383024464 -0500
@@ -117,10 +117,6 @@ int kmem_cache_shrink(struct kmem_cache
void kmem_cache_free(struct kmem_cache *, void *);
unsigned int kmem_cache_size(struct kmem_cache *);
-/* Slab internal function */
-struct kmem_cache *__kmem_cache_create(const char *, size_t, size_t,
- unsigned long,
- void (*)(void *));
/*
* Please use this macro to create slab caches. Simply specify the
* name of the structure and maybe some flags that are listed above.
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Fight unfair telecom internet charges in Canada: sign http://stopthemeter.ca/
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 32+ messages in thread
* Common 09/22] Use a common mutex definition
2012-05-23 20:34 Common 00/22] Sl[auo]b: Common functionality V3 Christoph Lameter
` (5 preceding siblings ...)
2012-05-23 20:34 ` Common 08/22] Common definition for boot state of the slab allocators Christoph Lameter
@ 2012-05-23 20:34 ` Christoph Lameter
2012-05-23 20:34 ` Common 10/22] Move kmem_cache_create mutex handling to common code Christoph Lameter
` (12 subsequent siblings)
19 siblings, 0 replies; 32+ messages in thread
From: Christoph Lameter @ 2012-05-23 20:34 UTC (permalink / raw)
To: Pekka Enberg
Cc: linux-mm, David Rientjes, Matt Mackall, Glauber Costa,
Joonsoo Kim
[-- Attachment #1: common_mutex --]
[-- Type: text/plain, Size: 18773 bytes --]
Use the mutex definition from SLAB and make it the common way to take a sleeping lock.
This has the effect of using a mutex instead of a rw semaphore for SLUB.
SLOB gains the use of a mutex for kmem_cache_create serialization.
Not needed now but SLOB may acquire some more features later (like slabinfo
/ sysfs support) through the expansion of the common code that will
need this.
Reviewed-by: Glauber Costa <glommer@parallels.com>
Reviewed-by: Joonsoo Kim <js1304@gmail.com>
Signed-off-by: Christoph Lameter <cl@linux.com>
---
mm/slab.c | 104 +++++++++++++++++++++++++------------------------------
mm/slab.h | 4 ++
mm/slab_common.c | 2 +
mm/slub.c | 54 +++++++++++++---------------
4 files changed, 80 insertions(+), 84 deletions(-)
Index: linux-2.6/mm/slab.c
===================================================================
--- linux-2.6.orig/mm/slab.c 2012-05-23 04:23:44.379024464 -0500
+++ linux-2.6/mm/slab.c 2012-05-23 04:23:50.943024328 -0500
@@ -68,7 +68,7 @@
* Further notes from the original documentation:
*
* 11 April '97. Started multi-threading - markhe
- * The global cache-chain is protected by the mutex 'cache_chain_mutex'.
+ * The global cache-chain is protected by the mutex 'slab_mutex'.
* The sem is only needed when accessing/extending the cache-chain, which
* can never happen inside an interrupt (kmem_cache_create(),
* kmem_cache_shrink() and kmem_cache_reap()).
@@ -677,12 +677,6 @@ static void slab_set_debugobj_lock_class
}
#endif
-/*
- * Guard access to the cache-chain.
- */
-static DEFINE_MUTEX(cache_chain_mutex);
-static struct list_head cache_chain;
-
static DEFINE_PER_CPU(struct delayed_work, slab_reap_work);
static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep)
@@ -1106,7 +1100,7 @@ static inline int cache_free_alien(struc
* When hotplugging memory or a cpu, existing nodelists are not replaced if
* already in use.
*
- * Must hold cache_chain_mutex.
+ * Must hold slab_mutex.
*/
static int init_cache_nodelists_node(int node)
{
@@ -1114,7 +1108,7 @@ static int init_cache_nodelists_node(int
struct kmem_list3 *l3;
const int memsize = sizeof(struct kmem_list3);
- list_for_each_entry(cachep, &cache_chain, list) {
+ list_for_each_entry(cachep, &slab_caches, list) {
/*
* Set up the size64 kmemlist for cpu before we can
* begin anything. Make sure some other cpu on this
@@ -1130,7 +1124,7 @@ static int init_cache_nodelists_node(int
/*
* The l3s don't come and go as CPUs come and
- * go. cache_chain_mutex is sufficient
+ * go. slab_mutex is sufficient
* protection here.
*/
cachep->nodelists[node] = l3;
@@ -1152,7 +1146,7 @@ static void __cpuinit cpuup_canceled(lon
int node = cpu_to_mem(cpu);
const struct cpumask *mask = cpumask_of_node(node);
- list_for_each_entry(cachep, &cache_chain, list) {
+ list_for_each_entry(cachep, &slab_caches, list) {
struct array_cache *nc;
struct array_cache *shared;
struct array_cache **alien;
@@ -1202,7 +1196,7 @@ free_array_cache:
* the respective cache's slabs, now we can go ahead and
* shrink each nodelist to its limit.
*/
- list_for_each_entry(cachep, &cache_chain, list) {
+ list_for_each_entry(cachep, &slab_caches, list) {
l3 = cachep->nodelists[node];
if (!l3)
continue;
@@ -1231,7 +1225,7 @@ static int __cpuinit cpuup_prepare(long
* Now we can go ahead with allocating the shared arrays and
* array caches
*/
- list_for_each_entry(cachep, &cache_chain, list) {
+ list_for_each_entry(cachep, &slab_caches, list) {
struct array_cache *nc;
struct array_cache *shared = NULL;
struct array_cache **alien = NULL;
@@ -1299,9 +1293,9 @@ static int __cpuinit cpuup_callback(stru
switch (action) {
case CPU_UP_PREPARE:
case CPU_UP_PREPARE_FROZEN:
- mutex_lock(&cache_chain_mutex);
+ mutex_lock(&slab_mutex);
err = cpuup_prepare(cpu);
- mutex_unlock(&cache_chain_mutex);
+ mutex_unlock(&slab_mutex);
break;
case CPU_ONLINE:
case CPU_ONLINE_FROZEN:
@@ -1311,7 +1305,7 @@ static int __cpuinit cpuup_callback(stru
case CPU_DOWN_PREPARE:
case CPU_DOWN_PREPARE_FROZEN:
/*
- * Shutdown cache reaper. Note that the cache_chain_mutex is
+ * Shutdown cache reaper. Note that the slab_mutex is
* held so that if cache_reap() is invoked it cannot do
* anything expensive but will only modify reap_work
* and reschedule the timer.
@@ -1338,9 +1332,9 @@ static int __cpuinit cpuup_callback(stru
#endif
case CPU_UP_CANCELED:
case CPU_UP_CANCELED_FROZEN:
- mutex_lock(&cache_chain_mutex);
+ mutex_lock(&slab_mutex);
cpuup_canceled(cpu);
- mutex_unlock(&cache_chain_mutex);
+ mutex_unlock(&slab_mutex);
break;
}
return notifier_from_errno(err);
@@ -1356,14 +1350,14 @@ static struct notifier_block __cpuinitda
* Returns -EBUSY if all objects cannot be drained so that the node is not
* removed.
*
- * Must hold cache_chain_mutex.
+ * Must hold slab_mutex.
*/
static int __meminit drain_cache_nodelists_node(int node)
{
struct kmem_cache *cachep;
int ret = 0;
- list_for_each_entry(cachep, &cache_chain, list) {
+ list_for_each_entry(cachep, &slab_caches, list) {
struct kmem_list3 *l3;
l3 = cachep->nodelists[node];
@@ -1394,14 +1388,14 @@ static int __meminit slab_memory_callbac
switch (action) {
case MEM_GOING_ONLINE:
- mutex_lock(&cache_chain_mutex);
+ mutex_lock(&slab_mutex);
ret = init_cache_nodelists_node(nid);
- mutex_unlock(&cache_chain_mutex);
+ mutex_unlock(&slab_mutex);
break;
case MEM_GOING_OFFLINE:
- mutex_lock(&cache_chain_mutex);
+ mutex_lock(&slab_mutex);
ret = drain_cache_nodelists_node(nid);
- mutex_unlock(&cache_chain_mutex);
+ mutex_unlock(&slab_mutex);
break;
case MEM_ONLINE:
case MEM_OFFLINE:
@@ -1505,8 +1499,8 @@ void __init kmem_cache_init(void)
node = numa_mem_id();
/* 1) create the cache_cache */
- INIT_LIST_HEAD(&cache_chain);
- list_add(&cache_cache.list, &cache_chain);
+ INIT_LIST_HEAD(&slab_caches);
+ list_add(&cache_cache.list, &slab_caches);
cache_cache.colour_off = cache_line_size();
cache_cache.array[smp_processor_id()] = &initarray_cache.cache;
cache_cache.nodelists[node] = &initkmem_list3[CACHE_CACHE + node];
@@ -1650,11 +1644,11 @@ void __init kmem_cache_init_late(void)
init_lock_keys();
/* 6) resize the head arrays to their final sizes */
- mutex_lock(&cache_chain_mutex);
- list_for_each_entry(cachep, &cache_chain, list)
+ mutex_lock(&slab_mutex);
+ list_for_each_entry(cachep, &slab_caches, list)
if (enable_cpucache(cachep, GFP_NOWAIT))
BUG();
- mutex_unlock(&cache_chain_mutex);
+ mutex_unlock(&slab_mutex);
/* Done! */
slab_state = FULL;
@@ -2504,10 +2498,10 @@ __kmem_cache_create (const char *name, s
}
/* cache setup completed, link it into the list */
- list_add(&cachep->list, &cache_chain);
+ list_add(&cachep->list, &slab_caches);
oops:
if (slab_is_available()) {
- mutex_unlock(&cache_chain_mutex);
+ mutex_unlock(&slab_mutex);
put_online_cpus();
}
return cachep;
@@ -2626,7 +2620,7 @@ out:
return nr_freed;
}
-/* Called with cache_chain_mutex held to protect against cpu hotplug */
+/* Called with slab_mutex held to protect against cpu hotplug */
static int __cache_shrink(struct kmem_cache *cachep)
{
int ret = 0, i = 0;
@@ -2661,9 +2655,9 @@ int kmem_cache_shrink(struct kmem_cache
BUG_ON(!cachep || in_interrupt());
get_online_cpus();
- mutex_lock(&cache_chain_mutex);
+ mutex_lock(&slab_mutex);
ret = __cache_shrink(cachep);
- mutex_unlock(&cache_chain_mutex);
+ mutex_unlock(&slab_mutex);
put_online_cpus();
return ret;
}
@@ -2691,15 +2685,15 @@ void kmem_cache_destroy(struct kmem_cach
/* Find the cache in the chain of caches. */
get_online_cpus();
- mutex_lock(&cache_chain_mutex);
+ mutex_lock(&slab_mutex);
/*
* the chain is never empty, cache_cache is never destroyed
*/
list_del(&cachep->list);
if (__cache_shrink(cachep)) {
slab_error(cachep, "Can't free all objects");
- list_add(&cachep->list, &cache_chain);
- mutex_unlock(&cache_chain_mutex);
+ list_add(&cachep->list, &slab_caches);
+ mutex_unlock(&slab_mutex);
put_online_cpus();
return;
}
@@ -2708,7 +2702,7 @@ void kmem_cache_destroy(struct kmem_cach
rcu_barrier();
__kmem_cache_destroy(cachep);
- mutex_unlock(&cache_chain_mutex);
+ mutex_unlock(&slab_mutex);
put_online_cpus();
}
EXPORT_SYMBOL(kmem_cache_destroy);
@@ -4020,7 +4014,7 @@ static void do_ccupdate_local(void *info
new->new[smp_processor_id()] = old;
}
-/* Always called with the cache_chain_mutex held */
+/* Always called with the slab_mutex held */
static int do_tune_cpucache(struct kmem_cache *cachep, int limit,
int batchcount, int shared, gfp_t gfp)
{
@@ -4064,7 +4058,7 @@ static int do_tune_cpucache(struct kmem_
return alloc_kmemlist(cachep, gfp);
}
-/* Called with cache_chain_mutex held always */
+/* Called with slab_mutex held always */
static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp)
{
int err;
@@ -4166,11 +4160,11 @@ static void cache_reap(struct work_struc
int node = numa_mem_id();
struct delayed_work *work = to_delayed_work(w);
- if (!mutex_trylock(&cache_chain_mutex))
+ if (!mutex_trylock(&slab_mutex))
/* Give up. Setup the next iteration. */
goto out;
- list_for_each_entry(searchp, &cache_chain, list) {
+ list_for_each_entry(searchp, &slab_caches, list) {
check_irq_on();
/*
@@ -4208,7 +4202,7 @@ next:
cond_resched();
}
check_irq_on();
- mutex_unlock(&cache_chain_mutex);
+ mutex_unlock(&slab_mutex);
next_reap_node();
out:
/* Set up the next iteration */
@@ -4244,21 +4238,21 @@ static void *s_start(struct seq_file *m,
{
loff_t n = *pos;
- mutex_lock(&cache_chain_mutex);
+ mutex_lock(&slab_mutex);
if (!n)
print_slabinfo_header(m);
- return seq_list_start(&cache_chain, *pos);
+ return seq_list_start(&slab_caches, *pos);
}
static void *s_next(struct seq_file *m, void *p, loff_t *pos)
{
- return seq_list_next(p, &cache_chain, pos);
+ return seq_list_next(p, &slab_caches, pos);
}
static void s_stop(struct seq_file *m, void *p)
{
- mutex_unlock(&cache_chain_mutex);
+ mutex_unlock(&slab_mutex);
}
static int s_show(struct seq_file *m, void *p)
@@ -4409,9 +4403,9 @@ static ssize_t slabinfo_write(struct fil
return -EINVAL;
/* Find the cache in the chain of caches. */
- mutex_lock(&cache_chain_mutex);
+ mutex_lock(&slab_mutex);
res = -EINVAL;
- list_for_each_entry(cachep, &cache_chain, list) {
+ list_for_each_entry(cachep, &slab_caches, list) {
if (!strcmp(cachep->name, kbuf)) {
if (limit < 1 || batchcount < 1 ||
batchcount > limit || shared < 0) {
@@ -4424,7 +4418,7 @@ static ssize_t slabinfo_write(struct fil
break;
}
}
- mutex_unlock(&cache_chain_mutex);
+ mutex_unlock(&slab_mutex);
if (res >= 0)
res = count;
return res;
@@ -4447,8 +4441,8 @@ static const struct file_operations proc
static void *leaks_start(struct seq_file *m, loff_t *pos)
{
- mutex_lock(&cache_chain_mutex);
- return seq_list_start(&cache_chain, *pos);
+ mutex_lock(&slab_mutex);
+ return seq_list_start(&slab_caches, *pos);
}
static inline int add_caller(unsigned long *n, unsigned long v)
@@ -4547,17 +4541,17 @@ static int leaks_show(struct seq_file *m
name = cachep->name;
if (n[0] == n[1]) {
/* Increase the buffer size */
- mutex_unlock(&cache_chain_mutex);
+ mutex_unlock(&slab_mutex);
m->private = kzalloc(n[0] * 4 * sizeof(unsigned long), GFP_KERNEL);
if (!m->private) {
/* Too bad, we are really out */
m->private = n;
- mutex_lock(&cache_chain_mutex);
+ mutex_lock(&slab_mutex);
return -ENOMEM;
}
*(unsigned long *)m->private = n[0] * 2;
kfree(n);
- mutex_lock(&cache_chain_mutex);
+ mutex_lock(&slab_mutex);
/* Now make sure this entry will be retried */
m->count = m->size;
return 0;
Index: linux-2.6/mm/slab.h
===================================================================
--- linux-2.6.orig/mm/slab.h 2012-05-23 04:23:44.379024464 -0500
+++ linux-2.6/mm/slab.h 2012-05-23 04:23:50.947024328 -0500
@@ -23,6 +23,10 @@ enum slab_state {
extern enum slab_state slab_state;
+/* The slab cache mutex protects the management structures during changes */
+extern struct mutex slab_mutex;
+extern struct list_head slab_caches;
+
struct kmem_cache *__kmem_cache_create(const char *name, size_t size,
size_t align, unsigned long flags, void (*ctor)(void *));
Index: linux-2.6/mm/slub.c
===================================================================
--- linux-2.6.orig/mm/slub.c 2012-05-23 04:23:44.383024464 -0500
+++ linux-2.6/mm/slub.c 2012-05-23 04:23:50.947024328 -0500
@@ -36,13 +36,13 @@
/*
* Lock order:
- * 1. slub_lock (Global Semaphore)
+ * 1. slab_mutex (Global Mutex)
* 2. node->list_lock
* 3. slab_lock(page) (Only on some arches and for debugging)
*
- * slub_lock
+ * slab_mutex
*
- * The role of the slub_lock is to protect the list of all the slabs
+ * The role of the slab_mutex is to protect the list of all the slabs
* and to synchronize major metadata changes to slab cache structures.
*
* The slab_lock is only used for debugging and on arches that do not
@@ -183,10 +183,6 @@ static int kmem_size = sizeof(struct kme
static struct notifier_block slab_notifier;
#endif
-/* A list of all slab caches on the system */
-static DECLARE_RWSEM(slub_lock);
-static LIST_HEAD(slab_caches);
-
/*
* Tracking user of a slab.
*/
@@ -3178,11 +3174,11 @@ static inline int kmem_cache_close(struc
*/
void kmem_cache_destroy(struct kmem_cache *s)
{
- down_write(&slub_lock);
+ mutex_lock(&slab_mutex);
s->refcount--;
if (!s->refcount) {
list_del(&s->list);
- up_write(&slub_lock);
+ mutex_unlock(&slab_mutex);
if (kmem_cache_close(s)) {
printk(KERN_ERR "SLUB %s: %s called for cache that "
"still has objects.\n", s->name, __func__);
@@ -3192,7 +3188,7 @@ void kmem_cache_destroy(struct kmem_cach
rcu_barrier();
sysfs_slab_remove(s);
} else
- up_write(&slub_lock);
+ mutex_unlock(&slab_mutex);
}
EXPORT_SYMBOL(kmem_cache_destroy);
@@ -3254,7 +3250,7 @@ static struct kmem_cache *__init create_
/*
* This function is called with IRQs disabled during early-boot on
- * single CPU so there's no need to take slub_lock here.
+ * single CPU so there's no need to take slab_mutex here.
*/
if (!kmem_cache_open(s, name, size, ARCH_KMALLOC_MINALIGN,
flags, NULL))
@@ -3539,10 +3535,10 @@ static int slab_mem_going_offline_callba
{
struct kmem_cache *s;
- down_read(&slub_lock);
+ mutex_lock(&slab_mutex);
list_for_each_entry(s, &slab_caches, list)
kmem_cache_shrink(s);
- up_read(&slub_lock);
+ mutex_unlock(&slab_mutex);
return 0;
}
@@ -3563,7 +3559,7 @@ static void slab_mem_offline_callback(vo
if (offline_node < 0)
return;
- down_read(&slub_lock);
+ mutex_lock(&slab_mutex);
list_for_each_entry(s, &slab_caches, list) {
n = get_node(s, offline_node);
if (n) {
@@ -3579,7 +3575,7 @@ static void slab_mem_offline_callback(vo
kmem_cache_free(kmem_cache_node, n);
}
}
- up_read(&slub_lock);
+ mutex_unlock(&slab_mutex);
}
static int slab_mem_going_online_callback(void *arg)
@@ -3602,7 +3598,7 @@ static int slab_mem_going_online_callbac
* allocate a kmem_cache_node structure in order to bring the node
* online.
*/
- down_read(&slub_lock);
+ mutex_lock(&slab_mutex);
list_for_each_entry(s, &slab_caches, list) {
/*
* XXX: kmem_cache_alloc_node will fallback to other nodes
@@ -3618,7 +3614,7 @@ static int slab_mem_going_online_callbac
s->node[nid] = n;
}
out:
- up_read(&slub_lock);
+ mutex_unlock(&slab_mutex);
return ret;
}
@@ -3916,7 +3912,7 @@ struct kmem_cache *__kmem_cache_create(c
struct kmem_cache *s;
char *n;
- down_write(&slub_lock);
+ mutex_lock(&slab_mutex);
s = find_mergeable(size, align, flags, name, ctor);
if (s) {
s->refcount++;
@@ -3931,7 +3927,7 @@ struct kmem_cache *__kmem_cache_create(c
s->refcount--;
goto err;
}
- up_write(&slub_lock);
+ mutex_unlock(&slab_mutex);
return s;
}
@@ -3944,9 +3940,9 @@ struct kmem_cache *__kmem_cache_create(c
if (kmem_cache_open(s, n,
size, align, flags, ctor)) {
list_add(&s->list, &slab_caches);
- up_write(&slub_lock);
+ mutex_unlock(&slab_mutex);
if (sysfs_slab_add(s)) {
- down_write(&slub_lock);
+ mutex_lock(&slab_mutex);
list_del(&s->list);
kfree(n);
kfree(s);
@@ -3958,7 +3954,7 @@ struct kmem_cache *__kmem_cache_create(c
kfree(s);
}
err:
- up_write(&slub_lock);
+ mutex_unlock(&slab_mutex);
return s;
}
@@ -3979,13 +3975,13 @@ static int __cpuinit slab_cpuup_callback
case CPU_UP_CANCELED_FROZEN:
case CPU_DEAD:
case CPU_DEAD_FROZEN:
- down_read(&slub_lock);
+ mutex_lock(&slab_mutex);
list_for_each_entry(s, &slab_caches, list) {
local_irq_save(flags);
__flush_cpu_slab(s, cpu);
local_irq_restore(flags);
}
- up_read(&slub_lock);
+ mutex_unlock(&slab_mutex);
break;
default:
break;
@@ -5360,11 +5356,11 @@ static int __init slab_sysfs_init(void)
struct kmem_cache *s;
int err;
- down_write(&slub_lock);
+ mutex_lock(&slab_mutex);
slab_kset = kset_create_and_add("slab", &slab_uevent_ops, kernel_kobj);
if (!slab_kset) {
- up_write(&slub_lock);
+ mutex_unlock(&slab_mutex);
printk(KERN_ERR "Cannot register slab subsystem.\n");
return -ENOSYS;
}
@@ -5389,7 +5385,7 @@ static int __init slab_sysfs_init(void)
kfree(al);
}
- up_write(&slub_lock);
+ mutex_unlock(&slab_mutex);
resiliency_test();
return 0;
}
@@ -5415,7 +5411,7 @@ static void *s_start(struct seq_file *m,
{
loff_t n = *pos;
- down_read(&slub_lock);
+ mutex_lock(&slab_mutex);
if (!n)
print_slabinfo_header(m);
@@ -5429,7 +5425,7 @@ static void *s_next(struct seq_file *m,
static void s_stop(struct seq_file *m, void *p)
{
- up_read(&slub_lock);
+ mutex_unlock(&slab_mutex);
}
static int s_show(struct seq_file *m, void *p)
Index: linux-2.6/mm/slab_common.c
===================================================================
--- linux-2.6.orig/mm/slab_common.c 2012-05-23 04:23:44.383024464 -0500
+++ linux-2.6/mm/slab_common.c 2012-05-23 04:23:50.947024328 -0500
@@ -19,6 +19,8 @@
#include "slab.h"
enum slab_state slab_state;
+LIST_HEAD(slab_caches);
+DEFINE_MUTEX(slab_mutex);
/*
* kmem_cache_create - Create a cache.
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Fight unfair telecom internet charges in Canada: sign http://stopthemeter.ca/
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 32+ messages in thread
* Common 10/22] Move kmem_cache_create mutex handling to common code
2012-05-23 20:34 Common 00/22] Sl[auo]b: Common functionality V3 Christoph Lameter
` (6 preceding siblings ...)
2012-05-23 20:34 ` Common 09/22] Use a common mutex definition Christoph Lameter
@ 2012-05-23 20:34 ` Christoph Lameter
2012-05-23 20:34 ` Common 12/22] Extract a common function for kmem_cache_destroy Christoph Lameter
` (11 subsequent siblings)
19 siblings, 0 replies; 32+ messages in thread
From: Christoph Lameter @ 2012-05-23 20:34 UTC (permalink / raw)
To: Pekka Enberg
Cc: linux-mm, David Rientjes, Matt Mackall, Glauber Costa,
Joonsoo Kim
[-- Attachment #1: move_mutex_to_common --]
[-- Type: text/plain, Size: 5658 bytes --]
Move the mutex handling into the common kmem_cache_create()
function.
Then we can also move more checks out of SLAB's kmem_cache_create()
into the common code.
Reviewed-by: Glauber Costa <glommer@parallels.com>
Signed-off-by: Christoph Lameter <cl@linux.com>
---
mm/slab.c | 52 +---------------------------------------------------
mm/slab_common.c | 41 ++++++++++++++++++++++++++++++++++++++++-
mm/slub.c | 30 ++++++++++++++----------------
3 files changed, 55 insertions(+), 68 deletions(-)
Index: linux-2.6/mm/slab.c
===================================================================
--- linux-2.6.orig/mm/slab.c 2012-05-23 04:23:50.943024328 -0500
+++ linux-2.6/mm/slab.c 2012-05-23 04:23:54.363024253 -0500
@@ -2233,55 +2233,10 @@ __kmem_cache_create (const char *name, s
unsigned long flags, void (*ctor)(void *))
{
size_t left_over, slab_size, ralign;
- struct kmem_cache *cachep = NULL, *pc;
+ struct kmem_cache *cachep = NULL;
gfp_t gfp;
- /*
- * Sanity checks... these are all serious usage bugs.
- */
- if (!name || in_interrupt() || (size < BYTES_PER_WORD) ||
- size > KMALLOC_MAX_SIZE) {
- printk(KERN_ERR "%s: Early error in slab %s\n", __func__,
- name);
- BUG();
- }
-
- /*
- * We use cache_chain_mutex to ensure a consistent view of
- * cpu_online_mask as well. Please see cpuup_callback
- */
- if (slab_is_available()) {
- get_online_cpus();
- mutex_lock(&cache_chain_mutex);
- }
-
- list_for_each_entry(pc, &cache_chain, list) {
- char tmp;
- int res;
-
- /*
- * This happens when the module gets unloaded and doesn't
- * destroy its slab cache and no-one else reuses the vmalloc
- * area of the module. Print a warning.
- */
- res = probe_kernel_address(pc->name, tmp);
- if (res) {
- printk(KERN_ERR
- "SLAB: cache with size %d has lost its name\n",
- pc->buffer_size);
- continue;
- }
-
- if (!strcmp(pc->name, name)) {
- printk(KERN_ERR
- "kmem_cache_create: duplicate cache %s\n", name);
- dump_stack();
- goto oops;
- }
- }
-
#if DEBUG
- WARN_ON(strchr(name, ' ')); /* It confuses parsers */
#if FORCED_DEBUG
/*
* Enable redzoning and last user accounting, except for caches with
@@ -2499,11 +2454,6 @@ __kmem_cache_create (const char *name, s
/* cache setup completed, link it into the list */
list_add(&cachep->list, &slab_caches);
-oops:
- if (slab_is_available()) {
- mutex_unlock(&slab_mutex);
- put_online_cpus();
- }
return cachep;
}
Index: linux-2.6/mm/slab_common.c
===================================================================
--- linux-2.6.orig/mm/slab_common.c 2012-05-23 04:23:50.947024328 -0500
+++ linux-2.6/mm/slab_common.c 2012-05-23 04:23:54.367024253 -0500
@@ -11,7 +11,8 @@
#include <linux/memory.h>
#include <linux/compiler.h>
#include <linux/module.h>
-
+#include <linux/cpu.h>
+#include <linux/uaccess.h>
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>
#include <asm/page.h>
@@ -61,8 +62,46 @@ struct kmem_cache *kmem_cache_create(con
}
#endif
+ get_online_cpus();
+ mutex_lock(&slab_mutex);
+
+#ifdef CONFIG_DEBUG_VM
+ list_for_each_entry(s, &slab_caches, list) {
+ char tmp;
+ int res;
+
+ /*
+ * This happens when the module gets unloaded and doesn't
+ * destroy its slab cache and no-one else reuses the vmalloc
+ * area of the module. Print a warning.
+ */
+ res = probe_kernel_address(s->name, tmp);
+ if (res) {
+ printk(KERN_ERR
+ "Slab cache with size %d has lost its name\n",
+ s->size);
+ continue;
+ }
+
+ if (!strcmp(s->name, name)) {
+ printk(KERN_ERR "kmem_cache_create(%s): Cache name"
+ " already exists.\n",
+ name);
+ dump_stack();
+ s = NULL;
+ goto oops;
+ }
+ }
+
+ WARN_ON(strchr(name, ' ')); /* It confuses parsers */
+#endif
+
s = __kmem_cache_create(name, size, align, flags, ctor);
+oops:
+ mutex_unlock(&slab_mutex);
+ put_online_cpus();
+
out:
if (!s && (flags & SLAB_PANIC))
panic("kmem_cache_create: Failed to create slab '%s'\n", name);
Index: linux-2.6/mm/slub.c
===================================================================
--- linux-2.6.orig/mm/slub.c 2012-05-23 04:23:50.947024328 -0500
+++ linux-2.6/mm/slub.c 2012-05-23 04:23:54.367024253 -0500
@@ -3912,7 +3912,6 @@ struct kmem_cache *__kmem_cache_create(c
struct kmem_cache *s;
char *n;
- mutex_lock(&slab_mutex);
s = find_mergeable(size, align, flags, name, ctor);
if (s) {
s->refcount++;
@@ -3925,37 +3924,36 @@ struct kmem_cache *__kmem_cache_create(c
if (sysfs_slab_alias(s, name)) {
s->refcount--;
- goto err;
+ return NULL;
}
- mutex_unlock(&slab_mutex);
return s;
}
n = kstrdup(name, GFP_KERNEL);
if (!n)
- goto err;
+ return NULL;
s = kmalloc(kmem_size, GFP_KERNEL);
if (s) {
if (kmem_cache_open(s, n,
size, align, flags, ctor)) {
+ int r;
+
list_add(&s->list, &slab_caches);
mutex_unlock(&slab_mutex);
- if (sysfs_slab_add(s)) {
- mutex_lock(&slab_mutex);
- list_del(&s->list);
- kfree(n);
- kfree(s);
- goto err;
- }
- return s;
+ r = sysfs_slab_add(s);
+ mutex_lock(&slab_mutex);
+
+ if (!r)
+ return s;
+
+ list_del(&s->list);
+ kmem_cache_close(s);
}
- kfree(n);
kfree(s);
}
-err:
- mutex_unlock(&slab_mutex);
- return s;
+ kfree(n);
+ return NULL;
}
#ifdef CONFIG_SMP
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Fight unfair telecom internet charges in Canada: sign http://stopthemeter.ca/
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 32+ messages in thread
* Common 12/22] Extract a common function for kmem_cache_destroy
2012-05-23 20:34 Common 00/22] Sl[auo]b: Common functionality V3 Christoph Lameter
` (7 preceding siblings ...)
2012-05-23 20:34 ` Common 10/22] Move kmem_cache_create mutex handling to common code Christoph Lameter
@ 2012-05-23 20:34 ` Christoph Lameter
2012-05-23 20:34 ` Common 13/22] Always use the name "kmem_cache" for the slab cache with the kmem_cache structure Christoph Lameter
` (10 subsequent siblings)
19 siblings, 0 replies; 32+ messages in thread
From: Christoph Lameter @ 2012-05-23 20:34 UTC (permalink / raw)
To: Pekka Enberg
Cc: linux-mm, David Rientjes, Matt Mackall, Glauber Costa,
Joonsoo Kim
[-- Attachment #1: kmem_cache_destroy --]
[-- Type: text/plain, Size: 6830 bytes --]
kmem_cache_destroy does basically the same in all allocators.
Extract common code which is easy since we already have common mutex handling.
Signed-off-by: Christoph Lameter <cl@linux.com>
---
mm/slab.c | 55 +++----------------------------------------------------
mm/slab.h | 4 +++-
mm/slab_common.c | 22 ++++++++++++++++++++++
mm/slob.c | 11 +++++++----
mm/slub.c | 29 ++++++++---------------------
5 files changed, 43 insertions(+), 78 deletions(-)
Index: linux-2.6/mm/slab_common.c
===================================================================
--- linux-2.6.orig/mm/slab_common.c 2012-05-23 04:23:58.415024173 -0500
+++ linux-2.6/mm/slab_common.c 2012-05-23 04:41:35.819002263 -0500
@@ -117,6 +117,28 @@ out:
}
EXPORT_SYMBOL(kmem_cache_create);
+void kmem_cache_destroy(struct kmem_cache *s)
+{
+ get_online_cpus();
+ mutex_lock(&slab_mutex);
+ list_del(&s->list);
+
+ if (!__kmem_cache_shutdown(s)) {
+ if (s->flags & SLAB_DESTROY_BY_RCU)
+ rcu_barrier();
+
+ __kmem_cache_destroy(s);
+ } else {
+ list_add(&s->list, &slab_caches);
+ printk(KERN_ERR "kmem_cache_destroy %s: Slab cache still has objects\n",
+ s->name);
+ dump_stack();
+ }
+ mutex_unlock(&slab_mutex);
+ put_online_cpus();
+}
+EXPORT_SYMBOL(kmem_cache_destroy);
+
int slab_is_available(void)
{
return slab_state >= UP;
Index: linux-2.6/mm/slab.c
===================================================================
--- linux-2.6.orig/mm/slab.c 2012-05-23 04:23:58.419024174 -0500
+++ linux-2.6/mm/slab.c 2012-05-23 04:41:35.751002262 -0500
@@ -785,16 +785,6 @@ static void cache_estimate(unsigned long
*left_over = slab_size - nr_objs*buffer_size - mgmt_size;
}
-#define slab_error(cachep, msg) __slab_error(__func__, cachep, msg)
-
-static void __slab_error(const char *function, struct kmem_cache *cachep,
- char *msg)
-{
- printk(KERN_ERR "slab error in %s(): cache `%s': %s\n",
- function, cachep->name, msg);
- dump_stack();
-}
-
/*
* By default on NUMA we use alien caches to stage the freeing of
* objects allocated from other nodes. This causes massive memory
@@ -2060,7 +2050,7 @@ static void slab_destroy(struct kmem_cac
}
}
-static void __kmem_cache_destroy(struct kmem_cache *cachep)
+void __kmem_cache_destroy(struct kmem_cache *cachep)
{
int i;
struct kmem_list3 *l3;
@@ -2616,49 +2606,10 @@ int kmem_cache_shrink(struct kmem_cache
}
EXPORT_SYMBOL(kmem_cache_shrink);
-/**
- * kmem_cache_destroy - delete a cache
- * @cachep: the cache to destroy
- *
- * Remove a &struct kmem_cache object from the slab cache.
- *
- * It is expected this function will be called by a module when it is
- * unloaded. This will remove the cache completely, and avoid a duplicate
- * cache being allocated each time a module is loaded and unloaded, if the
- * module doesn't have persistent in-kernel storage across loads and unloads.
- *
- * The cache must be empty before calling this function.
- *
- * The caller must guarantee that no one will allocate memory from the cache
- * during the kmem_cache_destroy().
- */
-void kmem_cache_destroy(struct kmem_cache *cachep)
+int __kmem_cache_shutdown(struct kmem_cache *cachep)
{
- BUG_ON(!cachep || in_interrupt());
-
- /* Find the cache in the chain of caches. */
- get_online_cpus();
- mutex_lock(&slab_mutex);
- /*
- * the chain is never empty, cache_cache is never destroyed
- */
- list_del(&cachep->list);
- if (__cache_shrink(cachep)) {
- slab_error(cachep, "Can't free all objects");
- list_add(&cachep->list, &slab_caches);
- mutex_unlock(&slab_mutex);
- put_online_cpus();
- return;
- }
-
- if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU))
- rcu_barrier();
-
- __kmem_cache_destroy(cachep);
- mutex_unlock(&slab_mutex);
- put_online_cpus();
+ return __cache_shrink(cachep);
}
-EXPORT_SYMBOL(kmem_cache_destroy);
/*
* Get the memory for a slab management obj.
Index: linux-2.6/mm/slab.h
===================================================================
--- linux-2.6.orig/mm/slab.h 2012-05-23 04:23:50.947024328 -0500
+++ linux-2.6/mm/slab.h 2012-05-23 04:41:35.791002263 -0500
@@ -30,5 +30,7 @@ extern struct list_head slab_caches;
struct kmem_cache *__kmem_cache_create(const char *name, size_t size,
size_t align, unsigned long flags, void (*ctor)(void *));
-#endif
+int __kmem_cache_shutdown(struct kmem_cache *);
+void __kmem_cache_destroy(struct kmem_cache *);
+#endif
Index: linux-2.6/mm/slob.c
===================================================================
--- linux-2.6.orig/mm/slob.c 2012-05-23 04:23:44.379024464 -0500
+++ linux-2.6/mm/slob.c 2012-05-23 04:41:35.727002265 -0500
@@ -538,14 +538,11 @@ struct kmem_cache *__kmem_cache_create(c
return c;
}
-void kmem_cache_destroy(struct kmem_cache *c)
+void __kmem_cache_destroy(struct kmem_cache *c)
{
kmemleak_free(c);
- if (c->flags & SLAB_DESTROY_BY_RCU)
- rcu_barrier();
slob_free(c, sizeof(struct kmem_cache));
}
-EXPORT_SYMBOL(kmem_cache_destroy);
void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
{
@@ -613,6 +610,12 @@ unsigned int kmem_cache_size(struct kmem
}
EXPORT_SYMBOL(kmem_cache_size);
+int __kmem_cache_shutdown(struct kmem_cache *c)
+{
+ /* No way to check for remaining objects */
+ return 0;
+}
+
int kmem_cache_shrink(struct kmem_cache *d)
{
return 0;
Index: linux-2.6/mm/slub.c
===================================================================
--- linux-2.6.orig/mm/slub.c 2012-05-23 04:23:58.423024174 -0500
+++ linux-2.6/mm/slub.c 2012-05-23 04:42:00.323001759 -0500
@@ -3168,29 +3168,16 @@ static inline int kmem_cache_close(struc
return 0;
}
-/*
- * Close a cache and release the kmem_cache structure
- * (must be used for caches created using kmem_cache_create)
- */
-void kmem_cache_destroy(struct kmem_cache *s)
+int __kmem_cache_shutdown(struct kmem_cache *s)
{
- mutex_lock(&slab_mutex);
- s->refcount--;
- if (!s->refcount) {
- list_del(&s->list);
- mutex_unlock(&slab_mutex);
- if (kmem_cache_close(s)) {
- printk(KERN_ERR "SLUB %s: %s called for cache that "
- "still has objects.\n", s->name, __func__);
- dump_stack();
- }
- if (s->flags & SLAB_DESTROY_BY_RCU)
- rcu_barrier();
- sysfs_slab_remove(s);
- } else
- mutex_unlock(&slab_mutex);
+ return kmem_cache_close(s);
+}
+
+void __kmem_cache_destroy(struct kmem_cache *s)
+{
+ sysfs_slab_remove(s);
+ kfree(s);
}
-EXPORT_SYMBOL(kmem_cache_destroy);
/********************************************************************
* Kmalloc subsystem
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Fight unfair telecom internet charges in Canada: sign http://stopthemeter.ca/
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 32+ messages in thread
* Common 13/22] Always use the name "kmem_cache" for the slab cache with the kmem_cache structure.
2012-05-23 20:34 Common 00/22] Sl[auo]b: Common functionality V3 Christoph Lameter
` (8 preceding siblings ...)
2012-05-23 20:34 ` Common 12/22] Extract a common function for kmem_cache_destroy Christoph Lameter
@ 2012-05-23 20:34 ` Christoph Lameter
2012-05-23 20:34 ` Common 15/22] Get rid of __kmem_cache_destroy Christoph Lameter
` (9 subsequent siblings)
19 siblings, 0 replies; 32+ messages in thread
From: Christoph Lameter @ 2012-05-23 20:34 UTC (permalink / raw)
To: Pekka Enberg
Cc: linux-mm, David Rientjes, Matt Mackall, Glauber Costa,
Joonsoo Kim
[-- Attachment #1: common_kmem_cache_name --]
[-- Type: text/plain, Size: 9877 bytes --]
Make all allocators use the "kmem_cache" slabname for the "kmem_cache" structure.
Reviewed-by: Glauber Costa <glommer@parallels.com>
Reviewed-by: Joonsoo Kim <js1304@gmail.com>
Signed-off-by: Christoph Lameter <cl@linux.com>
---
mm/slab.c | 72 ++++++++++++++++++++++++++++---------------------------
mm/slab.h | 6 ++++
mm/slab_common.c | 1
mm/slob.c | 9 ++++++
mm/slub.c | 2 -
5 files changed, 52 insertions(+), 38 deletions(-)
Index: linux-2.6/mm/slab.c
===================================================================
--- linux-2.6.orig/mm/slab.c 2012-05-23 08:51:01.998692154 -0500
+++ linux-2.6/mm/slab.c 2012-05-23 08:52:58.454689743 -0500
@@ -560,9 +560,9 @@ static struct arraycache_init initarray_
{ {0, BOOT_CPUCACHE_ENTRIES, 1, 0} };
/* internal cache of cache description objs */
-static struct kmem_list3 *cache_cache_nodelists[MAX_NUMNODES];
-static struct kmem_cache cache_cache = {
- .nodelists = cache_cache_nodelists,
+static struct kmem_list3 *kmem_cache_nodelists[MAX_NUMNODES];
+static struct kmem_cache kmem_cache_boot = {
+ .nodelists = kmem_cache_nodelists,
.batchcount = 1,
.limit = BOOT_CPUCACHE_ENTRIES,
.shared = 1,
@@ -1448,15 +1448,17 @@ void __init kmem_cache_init(void)
int order;
int node;
+ kmem_cache = &kmem_cache_boot;
+
if (num_possible_nodes() == 1)
use_alien_caches = 0;
for (i = 0; i < NUM_INIT_LISTS; i++) {
kmem_list3_init(&initkmem_list3[i]);
if (i < MAX_NUMNODES)
- cache_cache.nodelists[i] = NULL;
+ kmem_cache->nodelists[i] = NULL;
}
- set_up_list3s(&cache_cache, CACHE_CACHE);
+ set_up_list3s(kmem_cache, CACHE_CACHE);
/*
* Fragmentation resistance on low memory - only use bigger
@@ -1468,9 +1470,9 @@ void __init kmem_cache_init(void)
/* Bootstrap is tricky, because several objects are allocated
* from caches that do not exist yet:
- * 1) initialize the cache_cache cache: it contains the struct
- * kmem_cache structures of all caches, except cache_cache itself:
- * cache_cache is statically allocated.
+ * 1) initialize the kmem_cache cache: it contains the struct
+ * kmem_cache structures of all caches, except kmem_cache itself:
+ * kmem_cache is statically allocated.
* Initially an __init data area is used for the head array and the
* kmem_list3 structures, it's replaced with a kmalloc allocated
* array at the end of the bootstrap.
@@ -1479,45 +1481,45 @@ void __init kmem_cache_init(void)
* An __init data area is used for the head array.
* 3) Create the remaining kmalloc caches, with minimally sized
* head arrays.
- * 4) Replace the __init data head arrays for cache_cache and the first
+ * 4) Replace the __init data head arrays for kmem_cache and the first
* kmalloc cache with kmalloc allocated arrays.
- * 5) Replace the __init data for kmem_list3 for cache_cache and
+ * 5) Replace the __init data for kmem_list3 for kmem_cache and
* the other cache's with kmalloc allocated memory.
* 6) Resize the head arrays of the kmalloc caches to their final sizes.
*/
node = numa_mem_id();
- /* 1) create the cache_cache */
+ /* 1) create the kmem_cache */
INIT_LIST_HEAD(&slab_caches);
- list_add(&cache_cache.list, &slab_caches);
- cache_cache.colour_off = cache_line_size();
- cache_cache.array[smp_processor_id()] = &initarray_cache.cache;
- cache_cache.nodelists[node] = &initkmem_list3[CACHE_CACHE + node];
+ list_add(&kmem_cache->list, &slab_caches);
+ kmem_cache->colour_off = cache_line_size();
+ kmem_cache->array[smp_processor_id()] = &initarray_cache.cache;
+ kmem_cache->nodelists[node] = &initkmem_list3[CACHE_CACHE + node];
/*
* struct kmem_cache size depends on nr_node_ids & nr_cpu_ids
*/
- cache_cache.buffer_size = offsetof(struct kmem_cache, array[nr_cpu_ids]) +
+ kmem_cache->buffer_size = offsetof(struct kmem_cache, array[nr_cpu_ids]) +
nr_node_ids * sizeof(struct kmem_list3 *);
#if DEBUG
- cache_cache.obj_size = cache_cache.buffer_size;
+ kmem_cache->obj_size = kmem_cache->buffer_size;
#endif
- cache_cache.buffer_size = ALIGN(cache_cache.buffer_size,
+ kmem_cache->buffer_size = ALIGN(kmem_cache->buffer_size,
cache_line_size());
- cache_cache.reciprocal_buffer_size =
- reciprocal_value(cache_cache.buffer_size);
+ kmem_cache->reciprocal_buffer_size =
+ reciprocal_value(kmem_cache->buffer_size);
for (order = 0; order < MAX_ORDER; order++) {
- cache_estimate(order, cache_cache.buffer_size,
- cache_line_size(), 0, &left_over, &cache_cache.num);
- if (cache_cache.num)
+ cache_estimate(order, kmem_cache->buffer_size,
+ cache_line_size(), 0, &left_over, &kmem_cache->num);
+ if (kmem_cache->num)
break;
}
- BUG_ON(!cache_cache.num);
- cache_cache.gfporder = order;
- cache_cache.colour = left_over / cache_cache.colour_off;
- cache_cache.slab_size = ALIGN(cache_cache.num * sizeof(kmem_bufctl_t) +
+ BUG_ON(!kmem_cache->num);
+ kmem_cache->gfporder = order;
+ kmem_cache->colour = left_over / kmem_cache->colour_off;
+ kmem_cache->slab_size = ALIGN(kmem_cache->num * sizeof(kmem_bufctl_t) +
sizeof(struct slab), cache_line_size());
/* 2+3) create the kmalloc caches */
@@ -1584,15 +1586,15 @@ void __init kmem_cache_init(void)
ptr = kmalloc(sizeof(struct arraycache_init), GFP_NOWAIT);
- BUG_ON(cpu_cache_get(&cache_cache) != &initarray_cache.cache);
- memcpy(ptr, cpu_cache_get(&cache_cache),
+ BUG_ON(cpu_cache_get(kmem_cache) != &initarray_cache.cache);
+ memcpy(ptr, cpu_cache_get(kmem_cache),
sizeof(struct arraycache_init));
/*
* Do not assume that spinlocks can be initialized via memcpy:
*/
spin_lock_init(&ptr->lock);
- cache_cache.array[smp_processor_id()] = ptr;
+ kmem_cache->array[smp_processor_id()] = ptr;
ptr = kmalloc(sizeof(struct arraycache_init), GFP_NOWAIT);
@@ -1613,7 +1615,7 @@ void __init kmem_cache_init(void)
int nid;
for_each_online_node(nid) {
- init_list(&cache_cache, &initkmem_list3[CACHE_CACHE + nid], nid);
+ init_list(kmem_cache, &initkmem_list3[CACHE_CACHE + nid], nid);
init_list(malloc_sizes[INDEX_AC].cs_cachep,
&initkmem_list3[SIZE_AC + nid], nid);
@@ -2067,7 +2069,7 @@ void __kmem_cache_destroy(struct kmem_ca
kfree(l3);
}
}
- kmem_cache_free(&cache_cache, cachep);
+ kmem_cache_free(kmem_cache, cachep);
}
@@ -2317,7 +2319,7 @@ __kmem_cache_create (const char *name, s
gfp = GFP_NOWAIT;
/* Get cache's description obj. */
- cachep = kmem_cache_zalloc(&cache_cache, gfp);
+ cachep = kmem_cache_zalloc(kmem_cache, gfp);
if (!cachep)
return NULL;
@@ -2374,7 +2376,7 @@ __kmem_cache_create (const char *name, s
if (!cachep->num) {
printk(KERN_ERR
"kmem_cache_create: couldn't create cache %s.\n", name);
- kmem_cache_free(&cache_cache, cachep);
+ kmem_cache_free(kmem_cache, cachep);
return NULL;
}
slab_size = ALIGN(cachep->num * sizeof(kmem_bufctl_t)
@@ -3135,7 +3137,7 @@ static void *cache_alloc_debugcheck_afte
static bool slab_should_failslab(struct kmem_cache *cachep, gfp_t flags)
{
- if (cachep == &cache_cache)
+ if (cachep == kmem_cache)
return false;
return should_failslab(obj_size(cachep), flags, cachep->flags);
Index: linux-2.6/mm/slab.h
===================================================================
--- linux-2.6.orig/mm/slab.h 2012-05-23 08:51:01.998692154 -0500
+++ linux-2.6/mm/slab.h 2012-05-23 08:51:02.778692137 -0500
@@ -25,8 +25,14 @@ extern enum slab_state slab_state;
/* The slab cache mutex protects the management structures during changes */
extern struct mutex slab_mutex;
+
+/* The list of all slab caches on the system */
extern struct list_head slab_caches;
+/* The slab cache that manages slab cache information */
+extern struct kmem_cache *kmem_cache;
+
+/* Functions provided by the slab allocators */
struct kmem_cache *__kmem_cache_create(const char *name, size_t size,
size_t align, unsigned long flags, void (*ctor)(void *));
Index: linux-2.6/mm/slab_common.c
===================================================================
--- linux-2.6.orig/mm/slab_common.c 2012-05-23 08:51:01.998692154 -0500
+++ linux-2.6/mm/slab_common.c 2012-05-23 08:52:58.470689737 -0500
@@ -22,6 +22,7 @@
enum slab_state slab_state;
LIST_HEAD(slab_caches);
DEFINE_MUTEX(slab_mutex);
+struct kmem_cache *kmem_cache;
/*
* kmem_cache_create - Create a cache.
Index: linux-2.6/mm/slub.c
===================================================================
--- linux-2.6.orig/mm/slub.c 2012-05-23 08:51:02.002692152 -0500
+++ linux-2.6/mm/slub.c 2012-05-23 08:53:47.226688758 -0500
@@ -3186,8 +3186,6 @@ void __kmem_cache_destroy(struct kmem_ca
struct kmem_cache *kmalloc_caches[SLUB_PAGE_SHIFT];
EXPORT_SYMBOL(kmalloc_caches);
-static struct kmem_cache *kmem_cache;
-
#ifdef CONFIG_ZONE_DMA
static struct kmem_cache *kmalloc_dma_caches[SLUB_PAGE_SHIFT];
#endif
Index: linux-2.6/mm/slob.c
===================================================================
--- linux-2.6.orig/mm/slob.c 2012-05-23 08:51:02.002692152 -0500
+++ linux-2.6/mm/slob.c 2012-05-23 08:52:58.430689740 -0500
@@ -622,12 +622,19 @@ int kmem_cache_shrink(struct kmem_cache
}
EXPORT_SYMBOL(kmem_cache_shrink);
+struct kmem_cache kmem_cache_boot = {
+ .name = "kmem_cache",
+ .size = sizeof(struct kmem_cache),
+ .flags = SLAB_PANIC,
+ .align = ARCH_KMALLOC_MINALIGN,
+};
+
void __init kmem_cache_init(void)
{
+ kmem_cache = &kmem_cache_boot;
slab_state = UP;
}
void __init kmem_cache_init_late(void)
{
- slab_state = FULL;
}
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Fight unfair telecom internet charges in Canada: sign http://stopthemeter.ca/
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 32+ messages in thread
* Common 15/22] Get rid of __kmem_cache_destroy
2012-05-23 20:34 Common 00/22] Sl[auo]b: Common functionality V3 Christoph Lameter
` (9 preceding siblings ...)
2012-05-23 20:34 ` Common 13/22] Always use the name "kmem_cache" for the slab cache with the kmem_cache structure Christoph Lameter
@ 2012-05-23 20:34 ` Christoph Lameter
2012-05-23 20:34 ` Common 16/22] Move duping of slab name to slab_common.c Christoph Lameter
` (8 subsequent siblings)
19 siblings, 0 replies; 32+ messages in thread
From: Christoph Lameter @ 2012-05-23 20:34 UTC (permalink / raw)
To: Pekka Enberg
Cc: linux-mm, David Rientjes, Matt Mackall, Glauber Costa,
Joonsoo Kim
[-- Attachment #1: no_slab_specific_kmem_cache_destroy --]
[-- Type: text/plain, Size: 4424 bytes --]
Actions done there can be done in __kmem_cache_shutdown.
This affects RCU handling somewhat. On rcu free all slab allocators
do not refer to other management structures than the kmem_cache structure.
Therefore these other structures can be freed before the rcu deferred
free to the page allocator occurs.
Reviewed-by: Joonsoo Kim <js1304@gmail.com>
Signed-off-by: Christoph Lameter <cl@linux.com>
---
mm/slab.c | 43 +++++++++++++++++++++----------------------
mm/slab.h | 1 -
mm/slab_common.c | 1 -
mm/slob.c | 4 ----
mm/slub.c | 10 +++++-----
5 files changed, 26 insertions(+), 33 deletions(-)
Index: linux-2.6/mm/slob.c
===================================================================
--- linux-2.6.orig/mm/slob.c 2012-05-23 04:24:10.463023921 -0500
+++ linux-2.6/mm/slob.c 2012-05-23 04:24:13.703023854 -0500
@@ -538,10 +538,6 @@ struct kmem_cache *__kmem_cache_create(c
return c;
}
-void __kmem_cache_destroy(struct kmem_cache *c)
-{
-}
-
void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
{
void *b;
Index: linux-2.6/mm/slub.c
===================================================================
--- linux-2.6.orig/mm/slub.c 2012-05-23 04:24:10.463023921 -0500
+++ linux-2.6/mm/slub.c 2012-05-23 04:24:13.703023854 -0500
@@ -3170,12 +3170,12 @@ static inline int kmem_cache_close(struc
int __kmem_cache_shutdown(struct kmem_cache *s)
{
- return kmem_cache_close(s);
-}
+ int rc = kmem_cache_close(s);
-void __kmem_cache_destroy(struct kmem_cache *s)
-{
- sysfs_slab_remove(s);
+ if (!rc)
+ sysfs_slab_remove(s);
+
+ return rc;
}
/********************************************************************
Index: linux-2.6/mm/slab.c
===================================================================
--- linux-2.6.orig/mm/slab.c 2012-05-23 04:24:10.467023921 -0500
+++ linux-2.6/mm/slab.c 2012-05-23 04:24:13.707023854 -0500
@@ -2052,26 +2052,6 @@ static void slab_destroy(struct kmem_cac
}
}
-void __kmem_cache_destroy(struct kmem_cache *cachep)
-{
- int i;
- struct kmem_list3 *l3;
-
- for_each_online_cpu(i)
- kfree(cachep->array[i]);
-
- /* NUMA: free the list3 structures */
- for_each_online_node(i) {
- l3 = cachep->nodelists[i];
- if (l3) {
- kfree(l3->shared);
- free_alien_cache(l3->alien);
- kfree(l3);
- }
- }
-}
-
-
/**
* calculate_slab_order - calculate size (page order) of slabs
* @cachep: pointer to the cache that is being created
@@ -2434,7 +2414,7 @@ __kmem_cache_create (const char *name, s
cachep->refcount = 1;
if (setup_cpu_cache(cachep, gfp)) {
- __kmem_cache_destroy(cachep);
+ __kmem_cache_shutdown(cachep);
return NULL;
}
@@ -2609,7 +2589,26 @@ EXPORT_SYMBOL(kmem_cache_shrink);
int __kmem_cache_shutdown(struct kmem_cache *cachep)
{
- return __cache_shrink(cachep);
+ int i;
+ struct kmem_list3 *l3;
+ int rc = __cache_shrink(cachep);
+
+ if (rc)
+ return rc;
+
+ for_each_online_cpu(i)
+ kfree(cachep->array[i]);
+
+ /* NUMA: free the list3 structures */
+ for_each_online_node(i) {
+ l3 = cachep->nodelists[i];
+ if (l3) {
+ kfree(l3->shared);
+ free_alien_cache(l3->alien);
+ kfree(l3);
+ }
+ }
+ return 0;
}
/*
Index: linux-2.6/mm/slab.h
===================================================================
--- linux-2.6.orig/mm/slab.h 2012-05-23 04:24:07.027023992 -0500
+++ linux-2.6/mm/slab.h 2012-05-23 04:24:13.707023854 -0500
@@ -37,6 +37,5 @@ struct kmem_cache *__kmem_cache_create(c
size_t align, unsigned long flags, void (*ctor)(void *));
int __kmem_cache_shutdown(struct kmem_cache *);
-void __kmem_cache_destroy(struct kmem_cache *);
#endif
Index: linux-2.6/mm/slab_common.c
===================================================================
--- linux-2.6.orig/mm/slab_common.c 2012-05-23 04:24:10.459023921 -0500
+++ linux-2.6/mm/slab_common.c 2012-05-23 04:24:13.707023854 -0500
@@ -128,7 +128,6 @@ void kmem_cache_destroy(struct kmem_cach
if (s->flags & SLAB_DESTROY_BY_RCU)
rcu_barrier();
- __kmem_cache_destroy(s);
kmem_cache_free(kmem_cache, s);
} else {
list_add(&s->list, &slab_caches);
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Fight unfair telecom internet charges in Canada: sign http://stopthemeter.ca/
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 32+ messages in thread
* Common 16/22] Move duping of slab name to slab_common.c
2012-05-23 20:34 Common 00/22] Sl[auo]b: Common functionality V3 Christoph Lameter
` (10 preceding siblings ...)
2012-05-23 20:34 ` Common 15/22] Get rid of __kmem_cache_destroy Christoph Lameter
@ 2012-05-23 20:34 ` Christoph Lameter
2012-05-23 20:34 ` Common 17/22] Do slab aliasing call from common code Christoph Lameter
` (7 subsequent siblings)
19 siblings, 0 replies; 32+ messages in thread
From: Christoph Lameter @ 2012-05-23 20:34 UTC (permalink / raw)
To: Pekka Enberg
Cc: linux-mm, David Rientjes, Matt Mackall, Glauber Costa,
Joonsoo Kim
[-- Attachment #1: dup_name_in_common --]
[-- Type: text/plain, Size: 3064 bytes --]
Duping of the slabname has to be done by each slab. Moving this code
to slab_common avoids duplicate implementations.
With this patch we have common string handling for all slab allocators.
Strings passed to kmem_cache_create() are copied internally. Subsystems
can create temporary strings to create slab caches.
Slabs allocated in early states of bootstrap will never be freed (and those
can never be freed since they are essential to slab allocator operations).
During bootstrap we therefore do not have to worry about duping names.
Signed-off-by: Christoph Lameter <cl@linux.com>
---
mm/slab_common.c | 24 +++++++++++++++++-------
mm/slub.c | 5 -----
2 files changed, 17 insertions(+), 12 deletions(-)
Index: linux-2.6/mm/slab_common.c
===================================================================
--- linux-2.6.orig/mm/slab_common.c 2012-05-18 07:24:55.804077432 -0500
+++ linux-2.6/mm/slab_common.c 2012-05-18 07:34:32.288065496 -0500
@@ -53,6 +53,7 @@ struct kmem_cache *kmem_cache_create(con
unsigned long flags, void (*ctor)(void *))
{
struct kmem_cache *s = NULL;
+ char *n;
#ifdef CONFIG_DEBUG_VM
if (!name || in_interrupt() || size < sizeof(void *) ||
@@ -97,14 +98,22 @@ struct kmem_cache *kmem_cache_create(con
WARN_ON(strchr(name, ' ')); /* It confuses parsers */
#endif
- s = __kmem_cache_create(name, size, align, flags, ctor);
+ n = kstrdup(name, GFP_KERNEL);
+ if (!n)
+ goto oops;
- /*
- * Check if the slab has actually been created and if it was a
- * real instatiation. Aliases do not belong on the list
- */
- if (s && s->refcount == 1)
- list_add(&s->list, &slab_caches);
+ s = __kmem_cache_create(n, size, align, flags, ctor);
+
+ if (s) {
+ /*
+ * Check if the slab has actually been created and if it was a
+ * real instatiation. Aliases do not belong on the list
+ */
+ if (s->refcount == 1)
+ list_add(&s->list, &slab_caches);
+
+ } else
+ kfree(n);
oops:
mutex_unlock(&slab_mutex);
@@ -128,6 +137,7 @@ void kmem_cache_destroy(struct kmem_cach
if (s->flags & SLAB_DESTROY_BY_RCU)
rcu_barrier();
+ kfree(s->name);
kmem_cache_free(kmem_cache, s);
} else {
list_add(&s->list, &slab_caches);
Index: linux-2.6/mm/slub.c
===================================================================
--- linux-2.6.orig/mm/slub.c 2012-05-18 07:24:55.804077432 -0500
+++ linux-2.6/mm/slub.c 2012-05-18 07:29:07.284072212 -0500
@@ -3913,10 +3913,6 @@ struct kmem_cache *__kmem_cache_create(c
return s;
}
- n = kstrdup(name, GFP_KERNEL);
- if (!n)
- return NULL;
-
s = kmalloc(kmem_size, GFP_KERNEL);
if (s) {
if (kmem_cache_open(s, n,
@@ -3934,7 +3930,6 @@ struct kmem_cache *__kmem_cache_create(c
}
kfree(s);
}
- kfree(n);
return NULL;
}
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Fight unfair telecom internet charges in Canada: sign http://stopthemeter.ca/
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 32+ messages in thread
* Common 17/22] Do slab aliasing call from common code
2012-05-23 20:34 Common 00/22] Sl[auo]b: Common functionality V3 Christoph Lameter
` (11 preceding siblings ...)
2012-05-23 20:34 ` Common 16/22] Move duping of slab name to slab_common.c Christoph Lameter
@ 2012-05-23 20:34 ` Christoph Lameter
2012-05-23 20:34 ` Common 19/22] Do not pass ctor to __kmem_cache_create() Christoph Lameter
` (6 subsequent siblings)
19 siblings, 0 replies; 32+ messages in thread
From: Christoph Lameter @ 2012-05-23 20:34 UTC (permalink / raw)
To: Pekka Enberg
Cc: linux-mm, David Rientjes, Matt Mackall, Glauber Costa,
Joonsoo Kim
[-- Attachment #1: slab_alias_common --]
[-- Type: text/plain, Size: 3647 bytes --]
The slab aliasing logic causes some strange contortions in
slub. So add a call to deal with aliases to slab_common.c
but disable it for other slab allocators by providng stubs
that fail to create aliases.
Full general support for aliases will require additional
cleanup passes and more standardization of fields in
kmem_cache.
Signed-off-by: Christoph Lameter <cl@linux.com>
---
mm/slab.h | 10 ++++++++++
mm/slab_common.c | 16 +++++++---------
mm/slub.c | 16 +++++++++++-----
3 files changed, 28 insertions(+), 14 deletions(-)
Index: linux-2.6/mm/slab.h
===================================================================
--- linux-2.6.orig/mm/slab.h 2012-05-23 06:54:33.934836948 -0500
+++ linux-2.6/mm/slab.h 2012-05-23 08:00:46.210754648 -0500
@@ -36,6 +36,16 @@ extern struct kmem_cache *kmem_cache;
struct kmem_cache *__kmem_cache_create(const char *name, size_t size,
size_t align, unsigned long flags, void (*ctor)(void *));
+#ifdef CONFIG_SLUB
+struct kmem_cache *__kmem_cache_alias(const char *name, size_t size,
+ size_t align, unsigned long flags, void (*ctor)(void *));
+#else
+static inline struct kmem_cache *__kmem_cache_alias(const char *name, size_t size,
+ size_t align, unsigned long flags, void (*ctor)(void *))
+{ return NULL; }
+#endif
+
+
int __kmem_cache_shutdown(struct kmem_cache *);
#endif
Index: linux-2.6/mm/slab_common.c
===================================================================
--- linux-2.6.orig/mm/slab_common.c 2012-05-23 06:54:33.954836948 -0500
+++ linux-2.6/mm/slab_common.c 2012-05-23 07:59:58.346755634 -0500
@@ -98,21 +98,19 @@ struct kmem_cache *kmem_cache_create(con
WARN_ON(strchr(name, ' ')); /* It confuses parsers */
#endif
+ s = __kmem_cache_alias(name, size, align, flags, ctor);
+ if (s)
+ goto oops;
+
n = kstrdup(name, GFP_KERNEL);
if (!n)
goto oops;
s = __kmem_cache_create(n, size, align, flags, ctor);
- if (s) {
- /*
- * Check if the slab has actually been created and if it was a
- * real instatiation. Aliases do not belong on the list
- */
- if (s->refcount == 1)
- list_add(&s->list, &slab_caches);
-
- } else
+ if (s)
+ list_add(&s->list, &slab_caches);
+ else
kfree(n);
oops:
Index: linux-2.6/mm/slub.c
===================================================================
--- linux-2.6.orig/mm/slub.c 2012-05-23 06:54:33.922836951 -0500
+++ linux-2.6/mm/slub.c 2012-05-23 07:59:58.290755636 -0500
@@ -3890,11 +3890,10 @@ static struct kmem_cache *find_mergeable
return NULL;
}
-struct kmem_cache *__kmem_cache_create(const char *name, size_t size,
+struct kmem_cache *__kmem_cache_alias(const char *name, size_t size,
size_t align, unsigned long flags, void (*ctor)(void *))
{
struct kmem_cache *s;
- char *n;
s = find_mergeable(size, align, flags, name, ctor);
if (s) {
@@ -3908,14 +3907,21 @@ struct kmem_cache *__kmem_cache_create(c
if (sysfs_slab_alias(s, name)) {
s->refcount--;
- return NULL;
+ s = NULL;
}
- return s;
}
+ return s;
+}
+
+struct kmem_cache *__kmem_cache_create(const char *name, size_t size,
+ size_t align, unsigned long flags, void (*ctor)(void *))
+{
+ struct kmem_cache *s;
+
s = kmalloc(kmem_size, GFP_KERNEL);
if (s) {
- if (kmem_cache_open(s, n,
+ if (kmem_cache_open(s, name,
size, align, flags, ctor)) {
int r;
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Fight unfair telecom internet charges in Canada: sign http://stopthemeter.ca/
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 32+ messages in thread
* Common 19/22] Do not pass ctor to __kmem_cache_create()
2012-05-23 20:34 Common 00/22] Sl[auo]b: Common functionality V3 Christoph Lameter
` (12 preceding siblings ...)
2012-05-23 20:34 ` Common 17/22] Do slab aliasing call from common code Christoph Lameter
@ 2012-05-23 20:34 ` Christoph Lameter
2012-05-23 20:34 ` Common 20/22] Set parameters on kmem_cache instead of passing them to functions Christoph Lameter
` (5 subsequent siblings)
19 siblings, 0 replies; 32+ messages in thread
From: Christoph Lameter @ 2012-05-23 20:34 UTC (permalink / raw)
To: Pekka Enberg
Cc: linux-mm, David Rientjes, Matt Mackall, Glauber Costa,
Joonsoo Kim
[-- Attachment #1: no_passing_of_ctor --]
[-- Type: text/plain, Size: 5067 bytes --]
Set the ctor field like the name field directly in the kmem_cache
structure after alloc before calling the allocator specific portion.
Also extract refcount handling to common code.
Signed-off-by: Christoph Lameter <cl@linux.com>
---
mm/slab.h | 2 +-
mm/slab_common.c | 9 +++++----
mm/slob.c | 4 +---
mm/slub.c | 17 +++++++----------
4 files changed, 14 insertions(+), 18 deletions(-)
Index: linux-2.6/mm/slab_common.c
===================================================================
--- linux-2.6.orig/mm/slab_common.c 2012-05-23 08:54:34.202687757 -0500
+++ linux-2.6/mm/slab_common.c 2012-05-23 08:54:35.234687733 -0500
@@ -115,12 +115,13 @@ struct kmem_cache *kmem_cache_create(con
}
s->name = n;
+ s->ctor = ctor;
+ r = __kmem_cache_create(s, size, align, flags);
- r = __kmem_cache_create(s, size, align, flags, ctor);
-
- if (!r)
+ if (!r) {
+ s->refcount = 1;
list_add(&s->list, &slab_caches);
- else {
+ } else {
kmem_cache_free(kmem_cache, s);
kfree(n);
s = NULL;
Index: linux-2.6/mm/slab.h
===================================================================
--- linux-2.6.orig/mm/slab.h 2012-05-23 08:54:34.202687757 -0500
+++ linux-2.6/mm/slab.h 2012-05-23 08:54:35.234687733 -0500
@@ -34,7 +34,7 @@ extern struct kmem_cache *kmem_cache;
/* Functions provided by the slab allocators */
int __kmem_cache_create(struct kmem_cache *s, size_t size,
- size_t align, unsigned long flags, void (*ctor)(void *));
+ size_t align, unsigned long flags);
#ifdef CONFIG_SLUB
struct kmem_cache *__kmem_cache_alias(const char *name, size_t size,
Index: linux-2.6/mm/slob.c
===================================================================
--- linux-2.6.orig/mm/slob.c 2012-05-23 08:54:34.210687755 -0500
+++ linux-2.6/mm/slob.c 2012-05-23 08:55:12.074686972 -0500
@@ -509,7 +509,7 @@ size_t ksize(const void *block)
EXPORT_SYMBOL(ksize);
int __kmem_cache_create(struct kmem_cache *c, size_t size,
- size_t align, unsigned long flags, void (*ctor)(void *))
+ size_t align, unsigned long flags)
{
c->size = size;
if (flags & SLAB_DESTROY_BY_RCU) {
@@ -517,7 +517,6 @@ int __kmem_cache_create(struct kmem_cach
c->size += sizeof(struct slob_rcu);
}
c->flags = flags;
- c->ctor = ctor;
/* ignore alignment unless it's forced */
c->align = (flags & SLAB_HWCACHE_ALIGN) ? SLOB_ALIGN : 0;
if (c->align < ARCH_SLAB_MINALIGN)
@@ -526,7 +525,6 @@ int __kmem_cache_create(struct kmem_cach
c->align = align;
kmemleak_alloc(c, sizeof(struct kmem_cache), 1, GFP_KERNEL);
- c->refcount = 1;
return 0;
}
Index: linux-2.6/mm/slub.c
===================================================================
--- linux-2.6.orig/mm/slub.c 2012-05-23 08:54:34.206687757 -0500
+++ linux-2.6/mm/slub.c 2012-05-23 08:54:35.238687733 -0500
@@ -3000,13 +3000,11 @@ static int calculate_sizes(struct kmem_c
}
static int kmem_cache_open(struct kmem_cache *s, size_t size,
- size_t align, unsigned long flags,
- void (*ctor)(void *))
+ size_t align, unsigned long flags)
{
- s->ctor = ctor;
s->objsize = size;
s->align = align;
- s->flags = kmem_cache_flags(size, flags, s->name, ctor);
+ s->flags = kmem_cache_flags(size, flags, s->name, s->ctor);
s->reserved = 0;
if (need_reserve_slab_rcu && (s->flags & SLAB_DESTROY_BY_RCU))
@@ -3069,7 +3067,6 @@ static int kmem_cache_open(struct kmem_c
else
s->cpu_partial = 30;
- s->refcount = 1;
#ifdef CONFIG_NUMA
s->remote_node_defrag_ratio = 1000;
#endif
@@ -3230,7 +3227,7 @@ static struct kmem_cache *__init create_
* This function is called with IRQs disabled during early-boot on
* single CPU so there's no need to take slab_mutex here.
*/
- r = kmem_cache_open(s, size, ARCH_KMALLOC_MINALIGN, flags, NULL);
+ r = kmem_cache_open(s, size, ARCH_KMALLOC_MINALIGN, flags);
if (r)
panic("Creation of kmalloc slab %s size=%d failed. Code %d\n",
name, size, r);
@@ -3684,7 +3681,7 @@ void __init kmem_cache_init(void)
kmem_cache_node->name = "kmem_cache_node";
r = kmem_cache_open(kmem_cache_node, sizeof(struct kmem_cache_node),
- 0, SLAB_HWCACHE_ALIGN, NULL);
+ 0, SLAB_HWCACHE_ALIGN);
if (r)
goto panic;
@@ -3697,7 +3694,7 @@ void __init kmem_cache_init(void)
kmem_cache->name = "kmem_cache";
r = kmem_cache_open(kmem_cache, kmem_size, 0,
- SLAB_HWCACHE_ALIGN, NULL);
+ SLAB_HWCACHE_ALIGN);
if (r)
goto panic;
@@ -3918,9 +3915,9 @@ struct kmem_cache *__kmem_cache_alias(co
}
int __kmem_cache_create(struct kmem_cache *s, size_t size,
- size_t align, unsigned long flags, void (*ctor)(void *))
+ size_t align, unsigned long flags)
{
- int r = kmem_cache_open(s, size, align, flags, ctor);
+ int r = kmem_cache_open(s, size, align, flags);
if (r)
return r;
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Fight unfair telecom internet charges in Canada: sign http://stopthemeter.ca/
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 32+ messages in thread
* Common 20/22] Set parameters on kmem_cache instead of passing them to functions
2012-05-23 20:34 Common 00/22] Sl[auo]b: Common functionality V3 Christoph Lameter
` (13 preceding siblings ...)
2012-05-23 20:34 ` Common 19/22] Do not pass ctor to __kmem_cache_create() Christoph Lameter
@ 2012-05-23 20:34 ` Christoph Lameter
2012-05-23 20:34 ` Common 21/22] Common alignment code Christoph Lameter
` (4 subsequent siblings)
19 siblings, 0 replies; 32+ messages in thread
From: Christoph Lameter @ 2012-05-23 20:34 UTC (permalink / raw)
To: Pekka Enberg
Cc: linux-mm, David Rientjes, Matt Mackall, Glauber Costa,
Joonsoo Kim
[-- Attachment #1: parameters_in_kmem_cache --]
[-- Type: text/plain, Size: 6582 bytes --]
There are numerous parameters repeatedly passed to kmemcache create functions.
Simplify things by having the common code set these variables in the
kmem_cache structure. That way parameter lists get much simpler and
the code follows that as well. It is then also possible to put more handling
into the common code.
Signed-off-by: Christoph Lameter <cl@linux.com>
---
mm/slab.c | 16 ++++++++++------
mm/slab.h | 3 +--
mm/slab_common.c | 5 ++++-
mm/slob.c | 8 ++++----
mm/slub.c | 28 +++++++++++++++-------------
5 files changed, 34 insertions(+), 26 deletions(-)
Index: linux-2.6/mm/slub.c
===================================================================
--- linux-2.6.orig/mm/slub.c 2012-05-23 08:54:35.000000000 -0500
+++ linux-2.6/mm/slub.c 2012-05-23 08:55:22.514686758 -0500
@@ -2999,12 +2999,10 @@ static int calculate_sizes(struct kmem_c
}
-static int kmem_cache_open(struct kmem_cache *s, size_t size,
- size_t align, unsigned long flags)
+static int kmem_cache_open(struct kmem_cache *s)
{
- s->objsize = size;
- s->align = align;
- s->flags = kmem_cache_flags(size, flags, s->name, s->ctor);
+ s->objsize = s->size;
+ s->flags = kmem_cache_flags(s->size, s->flags, s->name, s->ctor);
s->reserved = 0;
if (need_reserve_slab_rcu && (s->flags & SLAB_DESTROY_BY_RCU))
@@ -3222,12 +3220,15 @@ static struct kmem_cache *__init create_
s = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);
s->name = name;
+ s->size = size;
+ s->align = ARCH_KMALLOC_MINALIGN;
+ s->flags = flags;
/*
* This function is called with IRQs disabled during early-boot on
* single CPU so there's no need to take slab_mutex here.
*/
- r = kmem_cache_open(s, size, ARCH_KMALLOC_MINALIGN, flags);
+ r = kmem_cache_open(s);
if (r)
panic("Creation of kmalloc slab %s size=%d failed. Code %d\n",
name, size, r);
@@ -3679,9 +3680,10 @@ void __init kmem_cache_init(void)
*/
kmem_cache_node = (void *)kmem_cache + kmalloc_size;
kmem_cache_node->name = "kmem_cache_node";
+ kmem_cache_node->size = sizeof(struct kmem_cache_node);
+ kmem_cache_node->flags = SLAB_HWCACHE_ALIGN;
- r = kmem_cache_open(kmem_cache_node, sizeof(struct kmem_cache_node),
- 0, SLAB_HWCACHE_ALIGN);
+ r = kmem_cache_open(kmem_cache_node);
if (r)
goto panic;
@@ -3692,9 +3694,10 @@ void __init kmem_cache_init(void)
temp_kmem_cache = kmem_cache;
kmem_cache->name = "kmem_cache";
+ kmem_cache->size = kmem_size;
+ kmem_cache->flags = SLAB_HWCACHE_ALIGN;
- r = kmem_cache_open(kmem_cache, kmem_size, 0,
- SLAB_HWCACHE_ALIGN);
+ r = kmem_cache_open(kmem_cache);
if (r)
goto panic;
@@ -3914,10 +3917,9 @@ struct kmem_cache *__kmem_cache_alias(co
return s;
}
-int __kmem_cache_create(struct kmem_cache *s, size_t size,
- size_t align, unsigned long flags)
+int __kmem_cache_create(struct kmem_cache *s)
{
- int r = kmem_cache_open(s, size, align, flags);
+ int r = kmem_cache_open(s);
if (r)
return r;
Index: linux-2.6/mm/slab.h
===================================================================
--- linux-2.6.orig/mm/slab.h 2012-05-23 08:54:35.000000000 -0500
+++ linux-2.6/mm/slab.h 2012-05-23 08:55:22.514686758 -0500
@@ -33,8 +33,7 @@ extern struct list_head slab_caches;
extern struct kmem_cache *kmem_cache;
/* Functions provided by the slab allocators */
-int __kmem_cache_create(struct kmem_cache *s, size_t size,
- size_t align, unsigned long flags);
+int __kmem_cache_create(struct kmem_cache *s);
#ifdef CONFIG_SLUB
struct kmem_cache *__kmem_cache_alias(const char *name, size_t size,
Index: linux-2.6/mm/slab_common.c
===================================================================
--- linux-2.6.orig/mm/slab_common.c 2012-05-23 08:54:35.000000000 -0500
+++ linux-2.6/mm/slab_common.c 2012-05-23 08:55:22.518686752 -0500
@@ -116,7 +116,10 @@ struct kmem_cache *kmem_cache_create(con
s->name = n;
s->ctor = ctor;
- r = __kmem_cache_create(s, size, align, flags);
+ s->size = size;
+ s->align = align;
+ s->flags = flags;
+ r = __kmem_cache_create(s);
if (!r) {
s->refcount = 1;
Index: linux-2.6/mm/slab.c
===================================================================
--- linux-2.6.orig/mm/slab.c 2012-05-23 08:54:34.000000000 -0500
+++ linux-2.6/mm/slab.c 2012-05-23 08:55:22.518686752 -0500
@@ -1444,9 +1444,11 @@ struct kmem_cache *create_kmalloc_cache(
goto panic;
s->name = name;
+ s->size = size;
+ s->align = ARCH_KMALLOC_MINALIGN;
+ s->flags = flags | ARCH_KMALLOC_FLAGS;
- r = __kmem_cache_create(s, size, ARCH_KMALLOC_MINALIGN,
- flags | ARCH_KMALLOC_FLAGS);
+ r = __kmem_cache_create(s);
if (r)
goto panic;
@@ -2206,11 +2208,13 @@ static int __init_refok setup_cpu_cache(
* cacheline. This can be beneficial if you're counting cycles as closely
* as davem.
*/
-int __kmem_cache_create(struct kmem_cache *cachep, size_t size, size_t align,
- unsigned long flags)
+int __kmem_cache_create(struct kmem_cache *cachep)
{
size_t left_over, slab_size, ralign;
gfp_t gfp;
+ int flags = cachep->flags;
+ int size = cachep->size;
+ int align = cachep->align;
#if DEBUG
#if FORCED_DEBUG
@@ -2282,8 +2286,8 @@ int __kmem_cache_create(struct kmem_cach
ralign = ARCH_SLAB_MINALIGN;
}
/* 3) caller mandated alignment */
- if (ralign < align) {
- ralign = align;
+ if (ralign < cachep->align) {
+ ralign = cachep->align;
}
/* disable debug if necessary */
if (ralign > __alignof__(unsigned long long))
Index: linux-2.6/mm/slob.c
===================================================================
--- linux-2.6.orig/mm/slob.c 2012-05-23 08:55:12.000000000 -0500
+++ linux-2.6/mm/slob.c 2012-05-23 08:55:53.834686358 -0500
@@ -508,15 +508,15 @@ size_t ksize(const void *block)
}
EXPORT_SYMBOL(ksize);
-int __kmem_cache_create(struct kmem_cache *c, size_t size,
- size_t align, unsigned long flags)
+int __kmem_cache_create(struct kmem_cache *c)
{
- c->size = size;
+ int flags = c->flags;
+ int align = c->align;
+
if (flags & SLAB_DESTROY_BY_RCU) {
/* leave room for rcu footer at the end of object */
c->size += sizeof(struct slob_rcu);
}
- c->flags = flags;
/* ignore alignment unless it's forced */
c->align = (flags & SLAB_HWCACHE_ALIGN) ? SLOB_ALIGN : 0;
if (c->align < ARCH_SLAB_MINALIGN)
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Fight unfair telecom internet charges in Canada: sign http://stopthemeter.ca/
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 32+ messages in thread
* Common 21/22] Common alignment code
2012-05-23 20:34 Common 00/22] Sl[auo]b: Common functionality V3 Christoph Lameter
` (14 preceding siblings ...)
2012-05-23 20:34 ` Common 20/22] Set parameters on kmem_cache instead of passing them to functions Christoph Lameter
@ 2012-05-23 20:34 ` Christoph Lameter
2012-05-23 20:34 ` Common 22/22] Common object size alignment Christoph Lameter
` (3 subsequent siblings)
19 siblings, 0 replies; 32+ messages in thread
From: Christoph Lameter @ 2012-05-23 20:34 UTC (permalink / raw)
To: Pekka Enberg
Cc: linux-mm, David Rientjes, Matt Mackall, Glauber Costa,
Joonsoo Kim
[-- Attachment #1: common_alignment --]
[-- Type: text/plain, Size: 8661 bytes --]
Extract the code to do object alignment from the allocators.
Signed-off-by: Christoph Lameter <cl@linux.com>
---
mm/slab.c | 22 +---------------------
mm/slab.h | 3 +++
mm/slab_common.c | 30 +++++++++++++++++++++++++++++-
mm/slob.c | 13 +------------
mm/slub.c | 47 +++++++++--------------------------------------
5 files changed, 43 insertions(+), 72 deletions(-)
Index: linux-2.6/mm/slab.c
===================================================================
--- linux-2.6.orig/mm/slab.c 2012-05-23 08:12:53.750739568 -0500
+++ linux-2.6/mm/slab.c 2012-05-23 08:37:31.602708946 -0500
@@ -1445,7 +1445,7 @@ struct kmem_cache *create_kmalloc_cache(
s->name = name;
s->size = size;
- s->align = ARCH_KMALLOC_MINALIGN;
+ s->align = calculate_alignment(flags, ARCH_KMALLOC_MINALIGN, size);
s->flags = flags | ARCH_KMALLOC_FLAGS;
r = __kmem_cache_create(s);
@@ -2249,22 +2249,6 @@ int __kmem_cache_create(struct kmem_cach
size &= ~(BYTES_PER_WORD - 1);
}
- /* calculate the final buffer alignment: */
-
- /* 1) arch recommendation: can be overridden for debug */
- if (flags & SLAB_HWCACHE_ALIGN) {
- /*
- * Default alignment: as specified by the arch code. Except if
- * an object is really small, then squeeze multiple objects into
- * one cacheline.
- */
- ralign = cache_line_size();
- while (size <= ralign / 2)
- ralign /= 2;
- } else {
- ralign = BYTES_PER_WORD;
- }
-
/*
* Redzoning and user store require word alignment or possibly larger.
* Note this will be overridden by architecture or caller mandated
@@ -2281,10 +2265,6 @@ int __kmem_cache_create(struct kmem_cach
size &= ~(REDZONE_ALIGN - 1);
}
- /* 2) arch mandated alignment */
- if (ralign < ARCH_SLAB_MINALIGN) {
- ralign = ARCH_SLAB_MINALIGN;
- }
/* 3) caller mandated alignment */
if (ralign < cachep->align) {
ralign = cachep->align;
Index: linux-2.6/mm/slab_common.c
===================================================================
--- linux-2.6.orig/mm/slab_common.c 2012-05-23 08:12:53.810739566 -0500
+++ linux-2.6/mm/slab_common.c 2012-05-23 08:20:10.614730515 -0500
@@ -25,6 +25,34 @@ DEFINE_MUTEX(slab_mutex);
struct kmem_cache *kmem_cache;
/*
+ * Figure out what the alignment of the objects will be given a set of
+ * flags, a user specified alignment and the size of the objects.
+ */
+unsigned long calculate_alignment(unsigned long flags,
+ unsigned long align, unsigned long size)
+{
+ /*
+ * If the user wants hardware cache aligned objects then follow that
+ * suggestion if the object is sufficiently large.
+ *
+ * The hardware cache alignment cannot override the specified
+ * alignment though. If that is greater then use it.
+ */
+ if (flags & SLAB_HWCACHE_ALIGN) {
+ unsigned long ralign = cache_line_size();
+ while (size <= ralign / 2)
+ ralign /= 2;
+ align = max(align, ralign);
+ }
+
+ if (align < ARCH_SLAB_MINALIGN)
+ align = ARCH_SLAB_MINALIGN;
+
+ return ALIGN(align, sizeof(void *));
+}
+
+
+/*
* kmem_cache_create - Create a cache.
* @name: A string which is used in /proc/slabinfo to identify this cache.
* @size: The size of objects to be created in this cache.
@@ -117,7 +145,7 @@ struct kmem_cache *kmem_cache_create(con
s->name = n;
s->ctor = ctor;
s->size = size;
- s->align = align;
+ s->align = calculate_alignment(flags, align, size);
s->flags = flags;
r = __kmem_cache_create(s);
Index: linux-2.6/mm/slob.c
===================================================================
--- linux-2.6.orig/mm/slob.c 2012-05-23 08:12:53.734739565 -0500
+++ linux-2.6/mm/slob.c 2012-05-23 08:13:42.226738561 -0500
@@ -124,7 +124,6 @@ static inline void clear_slob_page_free(
#define SLOB_UNIT sizeof(slob_t)
#define SLOB_UNITS(size) (((size) + SLOB_UNIT - 1)/SLOB_UNIT)
-#define SLOB_ALIGN L1_CACHE_BYTES
/*
* struct slob_rcu is inserted at the tail of allocated slob blocks, which
@@ -510,21 +509,11 @@ EXPORT_SYMBOL(ksize);
int __kmem_cache_create(struct kmem_cache *c)
{
- int flags = c->flags;
- int align = c->align;
-
- if (flags & SLAB_DESTROY_BY_RCU) {
+ if (c->flags & SLAB_DESTROY_BY_RCU) {
/* leave room for rcu footer at the end of object */
c->size += sizeof(struct slob_rcu);
}
- /* ignore alignment unless it's forced */
- c->align = (flags & SLAB_HWCACHE_ALIGN) ? SLOB_ALIGN : 0;
- if (c->align < ARCH_SLAB_MINALIGN)
- c->align = ARCH_SLAB_MINALIGN;
- if (c->align < align)
- c->align = align;
- kmemleak_alloc(c, sizeof(struct kmem_cache), 1, GFP_KERNEL);
return 0;
}
Index: linux-2.6/mm/slub.c
===================================================================
--- linux-2.6.orig/mm/slub.c 2012-05-23 08:12:53.774739565 -0500
+++ linux-2.6/mm/slub.c 2012-05-23 08:35:32.002711423 -0500
@@ -2724,32 +2724,6 @@ static inline int calculate_order(int si
return -ENOSYS;
}
-/*
- * Figure out what the alignment of the objects will be.
- */
-static unsigned long calculate_alignment(unsigned long flags,
- unsigned long align, unsigned long size)
-{
- /*
- * If the user wants hardware cache aligned objects then follow that
- * suggestion if the object is sufficiently large.
- *
- * The hardware cache alignment cannot override the specified
- * alignment though. If that is greater then use it.
- */
- if (flags & SLAB_HWCACHE_ALIGN) {
- unsigned long ralign = cache_line_size();
- while (size <= ralign / 2)
- ralign /= 2;
- align = max(align, ralign);
- }
-
- if (align < ARCH_SLAB_MINALIGN)
- align = ARCH_SLAB_MINALIGN;
-
- return ALIGN(align, sizeof(void *));
-}
-
static void
init_kmem_cache_node(struct kmem_cache_node *n, struct kmem_cache *s)
{
@@ -2882,7 +2856,7 @@ static void set_min_partial(struct kmem_
static int calculate_sizes(struct kmem_cache *s, int forced_order)
{
unsigned long flags = s->flags;
- unsigned long size = s->objsize;
+ unsigned long size = s->size;
unsigned long align = s->align;
int order;
@@ -2955,14 +2929,6 @@ static int calculate_sizes(struct kmem_c
#endif
/*
- * Determine the alignment based on various parameters that the
- * user specified and the dynamic determination of cache line size
- * on bootup.
- */
- align = calculate_alignment(flags, align, s->objsize);
- s->align = align;
-
- /*
* SLUB stores one object immediately after another beginning from
* offset 0. In order to align the objects we have to simply size
* each object to conform to the alignment.
@@ -2996,7 +2962,6 @@ static int calculate_sizes(struct kmem_c
s->max = s->oo;
return !!oo_objects(s->oo);
-
}
static int kmem_cache_open(struct kmem_cache *s)
@@ -3221,7 +3186,7 @@ static struct kmem_cache *__init create_
s = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);
s->name = name;
s->size = size;
- s->align = ARCH_KMALLOC_MINALIGN;
+ s->align = calculate_alignment(flags, ARCH_KMALLOC_MINALIGN, size);
s->flags = flags;
/*
@@ -3682,6 +3647,8 @@ void __init kmem_cache_init(void)
kmem_cache_node->name = "kmem_cache_node";
kmem_cache_node->size = sizeof(struct kmem_cache_node);
kmem_cache_node->flags = SLAB_HWCACHE_ALIGN;
+ kmem_cache_node->align = calculate_alignment(SLAB_HWCACHE_ALIGN,
+ 0, sizeof(struct kmem_cache_node));
r = kmem_cache_open(kmem_cache_node);
if (r)
@@ -3696,6 +3663,8 @@ void __init kmem_cache_init(void)
kmem_cache->name = "kmem_cache";
kmem_cache->size = kmem_size;
kmem_cache->flags = SLAB_HWCACHE_ALIGN;
+ kmem_cache->align = calculate_alignment(SLAB_HWCACHE_ALIGN,
+ 0, sizeof(struct kmem_cache));
r = kmem_cache_open(kmem_cache);
if (r)
@@ -3919,7 +3888,9 @@ struct kmem_cache *__kmem_cache_alias(co
int __kmem_cache_create(struct kmem_cache *s)
{
- int r = kmem_cache_open(s);
+ int r;
+
+ r = kmem_cache_open(s);
if (r)
return r;
Index: linux-2.6/mm/slab.h
===================================================================
--- linux-2.6.orig/mm/slab.h 2012-05-23 08:12:53.790739566 -0500
+++ linux-2.6/mm/slab.h 2012-05-23 08:13:42.226738561 -0500
@@ -32,6 +32,9 @@ extern struct list_head slab_caches;
/* The slab cache that manages slab cache information */
extern struct kmem_cache *kmem_cache;
+unsigned long calculate_alignment(unsigned long flags,
+ unsigned long align, unsigned long size);
+
/* Functions provided by the slab allocators */
int __kmem_cache_create(struct kmem_cache *s);
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Fight unfair telecom internet charges in Canada: sign http://stopthemeter.ca/
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 32+ messages in thread
* Common 22/22] Common object size alignment
2012-05-23 20:34 Common 00/22] Sl[auo]b: Common functionality V3 Christoph Lameter
` (15 preceding siblings ...)
2012-05-23 20:34 ` Common 21/22] Common alignment code Christoph Lameter
@ 2012-05-23 20:34 ` Christoph Lameter
2012-05-30 6:28 ` Common 00/22] Sl[auo]b: Common functionality V3 Pekka Enberg
` (2 subsequent siblings)
19 siblings, 0 replies; 32+ messages in thread
From: Christoph Lameter @ 2012-05-23 20:34 UTC (permalink / raw)
To: Pekka Enberg
Cc: linux-mm, David Rientjes, Matt Mackall, Glauber Costa,
Joonsoo Kim
[-- Attachment #1: align_size --]
[-- Type: text/plain, Size: 2841 bytes --]
All allocators align the objects to a word boundary. Put that into
common code.
Signed-off-by: Christoph Lameter <cl@linux.com>
---
mm/slab.c | 10 ----------
mm/slab_common.c | 3 ++-
mm/slub.c | 7 -------
3 files changed, 2 insertions(+), 18 deletions(-)
Index: linux-2.6/mm/slab.c
===================================================================
--- linux-2.6.orig/mm/slab.c 2012-05-23 09:13:05.938664721 -0500
+++ linux-2.6/mm/slab.c 2012-05-23 09:13:30.758664204 -0500
@@ -2240,16 +2240,6 @@ int __kmem_cache_create(struct kmem_cach
BUG_ON(flags & ~CREATE_MASK);
/*
- * Check that size is in terms of words. This is needed to avoid
- * unaligned accesses for some archs when redzoning is used, and makes
- * sure any on-slab bufctl's are also correctly aligned.
- */
- if (size & (BYTES_PER_WORD - 1)) {
- size += (BYTES_PER_WORD - 1);
- size &= ~(BYTES_PER_WORD - 1);
- }
-
- /*
* Redzoning and user store require word alignment or possibly larger.
* Note this will be overridden by architecture or caller mandated
* alignment if either is greater than BYTES_PER_WORD.
Index: linux-2.6/mm/slab_common.c
===================================================================
--- linux-2.6.orig/mm/slab_common.c 2012-05-23 09:13:05.974664718 -0500
+++ linux-2.6/mm/slab_common.c 2012-05-23 09:14:49.634662589 -0500
@@ -127,6 +127,7 @@ struct kmem_cache *kmem_cache_create(con
WARN_ON(strchr(name, ' ')); /* It confuses parsers */
#endif
+ /* Align size to a word boundary */
s = __kmem_cache_alias(name, size, align, flags, ctor);
if (s)
goto oops;
@@ -144,7 +145,7 @@ struct kmem_cache *kmem_cache_create(con
s->name = n;
s->ctor = ctor;
- s->size = size;
+ s->size = ALIGN(s->size, sizeof(void *));
s->align = calculate_alignment(flags, align, size);
s->flags = flags;
r = __kmem_cache_create(s);
Index: linux-2.6/mm/slub.c
===================================================================
--- linux-2.6.orig/mm/slub.c 2012-05-23 09:13:05.954664718 -0500
+++ linux-2.6/mm/slub.c 2012-05-23 09:13:30.762664204 -0500
@@ -2860,13 +2860,6 @@ static int calculate_sizes(struct kmem_c
unsigned long align = s->align;
int order;
- /*
- * Round up object size to the next word boundary. We can only
- * place the free pointer at word boundaries and this determines
- * the possible location of the free pointer.
- */
- size = ALIGN(size, sizeof(void *));
-
#ifdef CONFIG_SLUB_DEBUG
/*
* Determine if we can poison the object itself. If the user of
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Fight unfair telecom internet charges in Canada: sign http://stopthemeter.ca/
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 32+ messages in thread
* Re: Common 00/22] Sl[auo]b: Common functionality V3
2012-05-23 20:34 Common 00/22] Sl[auo]b: Common functionality V3 Christoph Lameter
` (16 preceding siblings ...)
2012-05-23 20:34 ` Common 22/22] Common object size alignment Christoph Lameter
@ 2012-05-30 6:28 ` Pekka Enberg
[not found] ` <20120523203506.170219003@linux.com>
[not found] ` <20120523203507.324764286@linux.com>
19 siblings, 0 replies; 32+ messages in thread
From: Pekka Enberg @ 2012-05-30 6:28 UTC (permalink / raw)
To: Christoph Lameter
Cc: linux-mm, David Rientjes, Matt Mackall, Glauber Costa,
Joonsoo Kim
On Wed, May 23, 2012 at 11:34 PM, Christoph Lameter <cl@linux.com> wrote:
> This is a series of patches that extracts common functionality from
> slab allocators into a common code base. The intend is to standardize
> as much as possible of the allocator behavior while keeping the
> distinctive features of each allocator which are mostly due to their
> storage format and serialization approaches.
Matt, any comments on the SLOB changes?
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Fight unfair telecom internet charges in Canada: sign http://stopthemeter.ca/
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 32+ messages in thread
[parent not found: <20120523203506.170219003@linux.com>]
[parent not found: <20120523203507.324764286@linux.com>]
* Re: Common 04/22] [slab] Use page struct fields instead of casting
[not found] ` <20120523203507.324764286@linux.com>
@ 2012-05-31 21:23 ` David Rientjes
2012-06-01 13:17 ` JoonSoo Kim
2012-06-01 14:00 ` Christoph Lameter
0 siblings, 2 replies; 32+ messages in thread
From: David Rientjes @ 2012-05-31 21:23 UTC (permalink / raw)
To: Christoph Lameter
Cc: Pekka Enberg, linux-mm, Matt Mackall, Glauber Costa, Joonsoo Kim
On Wed, 23 May 2012, Christoph Lameter wrote:
> Add fields to the page struct so that it is properly documented that
> slab overlays the lru fields.
>
> This cleans up some casts in slab.
>
Sounds good, but...
> Index: linux-2.6/include/linux/mm_types.h
> ===================================================================
> --- linux-2.6.orig/include/linux/mm_types.h 2012-05-22 09:05:49.716464025 -0500
> +++ linux-2.6/include/linux/mm_types.h 2012-05-22 09:21:28.532444572 -0500
> @@ -90,6 +90,10 @@ struct page {
> atomic_t _count; /* Usage count, see below. */
> };
> };
> + struct { /* SLAB */
> + struct kmem_cache *slab_cache;
> + struct slab *slab_page;
> + };
> };
>
> /* Third double word block */
The lru fields are in the third double word block.
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Fight unfair telecom internet charges in Canada: sign http://stopthemeter.ca/
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 32+ messages in thread
* Re: Common 04/22] [slab] Use page struct fields instead of casting
2012-05-31 21:23 ` Common 04/22] [slab] Use page struct fields instead of casting David Rientjes
@ 2012-06-01 13:17 ` JoonSoo Kim
2012-06-01 14:00 ` Christoph Lameter
1 sibling, 0 replies; 32+ messages in thread
From: JoonSoo Kim @ 2012-06-01 13:17 UTC (permalink / raw)
To: David Rientjes
Cc: Christoph Lameter, Pekka Enberg, linux-mm, Matt Mackall,
Glauber Costa
>> Index: linux-2.6/include/linux/mm_types.h
>> ===================================================================
>> --- linux-2.6.orig/include/linux/mm_types.h 2012-05-22 09:05:49.716464025 -0500
>> +++ linux-2.6/include/linux/mm_types.h 2012-05-22 09:21:28.532444572 -0500
>> @@ -90,6 +90,10 @@ struct page {
>> atomic_t _count; /* Usage count, see below. */
>> };
>> };
>> + struct { /* SLAB */
>> + struct kmem_cache *slab_cache;
>> + struct slab *slab_page;
>> + };
>> };
>>
>> /* Third double word block */
>
> The lru fields are in the third double word block.
Yes.
This patch is different with "Common functionality V2 - [2/12]" which I review.
I think fix is needed.
^ permalink raw reply [flat|nested] 32+ messages in thread
* Re: Common 04/22] [slab] Use page struct fields instead of casting
2012-05-31 21:23 ` Common 04/22] [slab] Use page struct fields instead of casting David Rientjes
2012-06-01 13:17 ` JoonSoo Kim
@ 2012-06-01 14:00 ` Christoph Lameter
1 sibling, 0 replies; 32+ messages in thread
From: Christoph Lameter @ 2012-06-01 14:00 UTC (permalink / raw)
To: David Rientjes
Cc: Pekka Enberg, linux-mm, Matt Mackall, Glauber Costa, Joonsoo Kim
On Thu, 31 May 2012, David Rientjes wrote:
> On Wed, 23 May 2012, Christoph Lameter wrote:
>
> > Add fields to the page struct so that it is properly documented that
> > slab overlays the lru fields.
> >
> > This cleans up some casts in slab.
> >
>
> Sounds good, but...
>
> > Index: linux-2.6/include/linux/mm_types.h
> > ===================================================================
> > --- linux-2.6.orig/include/linux/mm_types.h 2012-05-22 09:05:49.716464025 -0500
> > +++ linux-2.6/include/linux/mm_types.h 2012-05-22 09:21:28.532444572 -0500
> > @@ -90,6 +90,10 @@ struct page {
> > atomic_t _count; /* Usage count, see below. */
> > };
> > };
> > + struct { /* SLAB */
> > + struct kmem_cache *slab_cache;
> > + struct slab *slab_page;
> > + };
> > };
> >
> > /* Third double word block */
>
> The lru fields are in the third double word block.
Right. This slipped somehow into an earlier double word block. Next
patchset fixes that.
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Fight unfair telecom internet charges in Canada: sign http://stopthemeter.ca/
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 32+ messages in thread