* [slub p1 0/4] SLUB: [RFC] Per cpu partial lists V1
@ 2011-05-26 19:03 Christoph Lameter
2011-05-26 19:03 ` [slub p1 1/4] slub: Prepare inuse field in new_slab() Christoph Lameter
` (3 more replies)
0 siblings, 4 replies; 5+ messages in thread
From: Christoph Lameter @ 2011-05-26 19:03 UTC (permalink / raw)
To: Pekka Enberg; +Cc: David Rientjes, Andi Kleen, linux-mm
The following patchset applied on top of the lockless patchset V6 and
introduces per cpu partial lists. These lists help to avoid per node
locking overhead. The approach is not fully developed yet. Allocator
latency could be further reduced by making these operations work without
disabling interrupts (like the fastpath and the free slowpath) as well as
implementing better ways of handling ther cpu array with partial pages.
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Fight unfair telecom internet charges in Canada: sign http://stopthemeter.ca/
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 5+ messages in thread
* [slub p1 1/4] slub: Prepare inuse field in new_slab()
2011-05-26 19:03 [slub p1 0/4] SLUB: [RFC] Per cpu partial lists V1 Christoph Lameter
@ 2011-05-26 19:03 ` Christoph Lameter
2011-05-26 19:03 ` [slub p1 2/4] slub: pass kmem_cache_cpu pointer to get_partial() Christoph Lameter
` (2 subsequent siblings)
3 siblings, 0 replies; 5+ messages in thread
From: Christoph Lameter @ 2011-05-26 19:03 UTC (permalink / raw)
To: Pekka Enberg; +Cc: David Rientjes, Andi Kleen, linux-mm
[-- Attachment #1: new_slab --]
[-- Type: text/plain, Size: 1483 bytes --]
inuse will always be set to page->objects. There is no point in
initializing the field to zero in new_slab() and then overwriting
the value in __slab_alloc().
Signed-off-by: Christoph Lameter <cl@linux.com>
---
mm/slub.c | 5 ++---
1 file changed, 2 insertions(+), 3 deletions(-)
Index: linux-2.6/mm/slub.c
===================================================================
--- linux-2.6.orig/mm/slub.c 2011-05-24 09:41:15.454874919 -0500
+++ linux-2.6/mm/slub.c 2011-05-24 09:41:20.854874883 -0500
@@ -1332,7 +1332,7 @@ static struct page *new_slab(struct kmem
set_freepointer(s, last, NULL);
page->freelist = start;
- page->inuse = 0;
+ page->inuse = page->objects;
page->frozen = 1;
out:
return page;
@@ -2022,7 +2022,6 @@ new_slab:
*/
object = page->freelist;
page->freelist = NULL;
- page->inuse = page->objects;
stat(s, ALLOC_SLAB);
c->node = page_to_nid(page);
@@ -2564,7 +2563,7 @@ static void early_kmem_cache_node_alloc(
n = page->freelist;
BUG_ON(!n);
page->freelist = get_freepointer(kmem_cache_node, n);
- page->inuse++;
+ page->inuse = 1;
page->frozen = 0;
kmem_cache_node->node[node] = n;
#ifdef CONFIG_SLUB_DEBUG
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Fight unfair telecom internet charges in Canada: sign http://stopthemeter.ca/
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 5+ messages in thread
* [slub p1 2/4] slub: pass kmem_cache_cpu pointer to get_partial()
2011-05-26 19:03 [slub p1 0/4] SLUB: [RFC] Per cpu partial lists V1 Christoph Lameter
2011-05-26 19:03 ` [slub p1 1/4] slub: Prepare inuse field in new_slab() Christoph Lameter
@ 2011-05-26 19:03 ` Christoph Lameter
2011-05-26 19:03 ` [slub p1 3/4] slub: return object pointer from get_partial() / new_slab() Christoph Lameter
2011-05-26 19:03 ` [slub p1 4/4] slub: [RFC] per cpu cache for partial pages Christoph Lameter
3 siblings, 0 replies; 5+ messages in thread
From: Christoph Lameter @ 2011-05-26 19:03 UTC (permalink / raw)
To: Pekka Enberg; +Cc: David Rientjes, Andi Kleen, linux-mm
[-- Attachment #1: push_c_into_get_partial --]
[-- Type: text/plain, Size: 3759 bytes --]
Pass the kmem_cache_cpu pointer to get_partial(). That way
we can avoid the this_cpu_write() statements.
Signed-off-by: Christoph Lameter <cl@linux.com>
---
mm/slub.c | 30 +++++++++++++++---------------
1 file changed, 15 insertions(+), 15 deletions(-)
Index: linux-2.6/mm/slub.c
===================================================================
--- linux-2.6.orig/mm/slub.c 2011-05-24 09:41:20.854874883 -0500
+++ linux-2.6/mm/slub.c 2011-05-24 09:41:23.624874864 -0500
@@ -1437,7 +1437,8 @@ static inline void remove_partial(struct
* Must hold list_lock.
*/
static inline int acquire_slab(struct kmem_cache *s,
- struct kmem_cache_node *n, struct page *page)
+ struct kmem_cache_node *n, struct page *page,
+ struct kmem_cache_cpu *c)
{
void *freelist;
unsigned long counters;
@@ -1466,9 +1467,9 @@ static inline int acquire_slab(struct km
if (freelist) {
/* Populate the per cpu freelist */
- this_cpu_write(s->cpu_slab->freelist, freelist);
- this_cpu_write(s->cpu_slab->page, page);
- this_cpu_write(s->cpu_slab->node, page_to_nid(page));
+ c->freelist = freelist;
+ c->page = page;
+ c->node = page_to_nid(page);
return 1;
} else {
/*
@@ -1486,7 +1487,7 @@ static inline int acquire_slab(struct km
* Try to allocate a partial slab from a specific node.
*/
static struct page *get_partial_node(struct kmem_cache *s,
- struct kmem_cache_node *n)
+ struct kmem_cache_node *n, struct kmem_cache_cpu *c)
{
struct page *page;
@@ -1501,7 +1502,7 @@ static struct page *get_partial_node(str
spin_lock(&n->list_lock);
list_for_each_entry(page, &n->partial, lru)
- if (acquire_slab(s, n, page))
+ if (acquire_slab(s, n, page, c))
goto out;
page = NULL;
out:
@@ -1512,7 +1513,8 @@ out:
/*
* Get a page from somewhere. Search in increasing NUMA distances.
*/
-static struct page *get_any_partial(struct kmem_cache *s, gfp_t flags)
+static struct page *get_any_partial(struct kmem_cache *s, gfp_t flags,
+ struct kmem_cache_cpu *c)
{
#ifdef CONFIG_NUMA
struct zonelist *zonelist;
@@ -1552,7 +1554,7 @@ static struct page *get_any_partial(stru
if (n && cpuset_zone_allowed_hardwall(zone, flags) &&
n->nr_partial > s->min_partial) {
- page = get_partial_node(s, n);
+ page = get_partial_node(s, n, c);
if (page) {
put_mems_allowed();
return page;
@@ -1567,16 +1569,17 @@ static struct page *get_any_partial(stru
/*
* Get a partial page, lock it and return it.
*/
-static struct page *get_partial(struct kmem_cache *s, gfp_t flags, int node)
+static struct page *get_partial(struct kmem_cache *s, gfp_t flags, int node,
+ struct kmem_cache_cpu *c)
{
struct page *page;
int searchnode = (node == NUMA_NO_NODE) ? numa_node_id() : node;
- page = get_partial_node(s, get_node(s, searchnode));
+ page = get_partial_node(s, get_node(s, searchnode), c);
if (page || node != NUMA_NO_NODE)
return page;
- return get_any_partial(s, flags);
+ return get_any_partial(s, flags, c);
}
#ifdef CONFIG_PREEMPT
@@ -1645,9 +1648,6 @@ void init_kmem_cache_cpus(struct kmem_ca
for_each_possible_cpu(cpu)
per_cpu_ptr(s->cpu_slab, cpu)->tid = init_tid(cpu);
}
-/*
- * Remove the cpu slab
- */
/*
* Remove the cpu slab
@@ -1999,7 +1999,7 @@ load_freelist:
return object;
new_slab:
- page = get_partial(s, gfpflags, node);
+ page = get_partial(s, gfpflags, node, c);
if (page) {
stat(s, ALLOC_FROM_PARTIAL);
object = c->freelist;
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Fight unfair telecom internet charges in Canada: sign http://stopthemeter.ca/
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 5+ messages in thread
* [slub p1 3/4] slub: return object pointer from get_partial() / new_slab().
2011-05-26 19:03 [slub p1 0/4] SLUB: [RFC] Per cpu partial lists V1 Christoph Lameter
2011-05-26 19:03 ` [slub p1 1/4] slub: Prepare inuse field in new_slab() Christoph Lameter
2011-05-26 19:03 ` [slub p1 2/4] slub: pass kmem_cache_cpu pointer to get_partial() Christoph Lameter
@ 2011-05-26 19:03 ` Christoph Lameter
2011-05-26 19:03 ` [slub p1 4/4] slub: [RFC] per cpu cache for partial pages Christoph Lameter
3 siblings, 0 replies; 5+ messages in thread
From: Christoph Lameter @ 2011-05-26 19:03 UTC (permalink / raw)
To: Pekka Enberg; +Cc: David Rientjes, Andi Kleen, linux-mm
[-- Attachment #1: object_instead_of_page_return --]
[-- Type: text/plain, Size: 7670 bytes --]
There is no need anymore to return the pointer to a slab page from get_partial()
since it can be assigned to the kmem_cache_cpu structures "page" field.
Return an object pointer instead.
That in turn allows a simplification of the spaghetti code in __slab_alloc().
Signed-off-by: Christoph Lameter <cl@linux.com>
---
mm/slub.c | 130 ++++++++++++++++++++++++++++++++++----------------------------
1 file changed, 73 insertions(+), 57 deletions(-)
Index: linux-2.6/mm/slub.c
===================================================================
--- linux-2.6.orig/mm/slub.c 2011-05-24 09:41:23.624874864 -0500
+++ linux-2.6/mm/slub.c 2011-05-24 09:41:26.764874847 -0500
@@ -1434,9 +1434,11 @@ static inline void remove_partial(struct
* Lock slab, remove from the partial list and put the object into the
* per cpu freelist.
*
+ * Returns a list of objects or NULL if it fails.
+ *
* Must hold list_lock.
*/
-static inline int acquire_slab(struct kmem_cache *s,
+static inline void *acquire_slab(struct kmem_cache *s,
struct kmem_cache_node *n, struct page *page,
struct kmem_cache_cpu *c)
{
@@ -1467,10 +1469,11 @@ static inline int acquire_slab(struct km
if (freelist) {
/* Populate the per cpu freelist */
- c->freelist = freelist;
c->page = page;
c->node = page_to_nid(page);
- return 1;
+ stat(s, ALLOC_FROM_PARTIAL);
+
+ return freelist;
} else {
/*
* Slab page came from the wrong list. No object to allocate
@@ -1479,17 +1482,18 @@ static inline int acquire_slab(struct km
*/
printk(KERN_ERR "SLUB: %s : Page without available objects on"
" partial list\n", s->name);
- return 0;
+ return NULL;
}
}
/*
* Try to allocate a partial slab from a specific node.
*/
-static struct page *get_partial_node(struct kmem_cache *s,
+static void *get_partial_node(struct kmem_cache *s,
struct kmem_cache_node *n, struct kmem_cache_cpu *c)
{
struct page *page;
+ void *object;
/*
* Racy check. If we mistakenly see no partial slabs then we
@@ -1501,13 +1505,15 @@ static struct page *get_partial_node(str
return NULL;
spin_lock(&n->list_lock);
- list_for_each_entry(page, &n->partial, lru)
- if (acquire_slab(s, n, page, c))
+ list_for_each_entry(page, &n->partial, lru) {
+ object = acquire_slab(s, n, page, c);
+ if (object)
goto out;
- page = NULL;
+ }
+ object = NULL;
out:
spin_unlock(&n->list_lock);
- return page;
+ return object;
}
/*
@@ -1521,7 +1527,7 @@ static struct page *get_any_partial(stru
struct zoneref *z;
struct zone *zone;
enum zone_type high_zoneidx = gfp_zone(flags);
- struct page *page;
+ void *object;
/*
* The defrag ratio allows a configuration of the tradeoffs between
@@ -1554,10 +1560,10 @@ static struct page *get_any_partial(stru
if (n && cpuset_zone_allowed_hardwall(zone, flags) &&
n->nr_partial > s->min_partial) {
- page = get_partial_node(s, n, c);
- if (page) {
+ object = get_partial_node(s, n, c);
+ if (object) {
put_mems_allowed();
- return page;
+ return object;
}
}
}
@@ -1569,15 +1575,15 @@ static struct page *get_any_partial(stru
/*
* Get a partial page, lock it and return it.
*/
-static struct page *get_partial(struct kmem_cache *s, gfp_t flags, int node,
+static void *get_partial(struct kmem_cache *s, gfp_t flags, int node,
struct kmem_cache_cpu *c)
{
- struct page *page;
+ void *object;
int searchnode = (node == NUMA_NO_NODE) ? numa_node_id() : node;
- page = get_partial_node(s, get_node(s, searchnode), c);
- if (page || node != NUMA_NO_NODE)
- return page;
+ object = get_partial_node(s, get_node(s, searchnode), c);
+ if (object || node != NUMA_NO_NODE)
+ return object;
return get_any_partial(s, flags, c);
}
@@ -1907,6 +1913,35 @@ slab_out_of_memory(struct kmem_cache *s,
}
}
+static inline void *new_slab_objects(struct kmem_cache *s, gfp_t flags,
+ int node, struct kmem_cache_cpu **pc)
+{
+ void *object;
+ struct kmem_cache_cpu *c;
+ struct page *page = new_slab(s, flags, node);
+
+ if (page) {
+ c = __this_cpu_ptr(s->cpu_slab);
+ if (c->page)
+ flush_slab(s, c);
+
+ /*
+ * No other reference to the page yet so we can
+ * muck around with it freely without cmpxchg
+ */
+ object = page->freelist;
+ page->freelist = NULL;
+
+ stat(s, ALLOC_SLAB);
+ c->node = page_to_nid(page);
+ c->page = page;
+ *pc = c;
+ } else
+ object = NULL;
+
+ return object;
+}
+
/*
* Slow path. The lockless freelist is empty or we need to perform
* debugging duties.
@@ -1929,7 +1964,6 @@ static void *__slab_alloc(struct kmem_ca
unsigned long addr, struct kmem_cache_cpu *c)
{
void **object;
- struct page *page;
unsigned long flags;
struct page new;
unsigned long counters;
@@ -1947,8 +1981,7 @@ static void *__slab_alloc(struct kmem_ca
/* We handle __GFP_ZERO in the caller */
gfpflags &= ~__GFP_ZERO;
- page = c->page;
- if (!page)
+ if (!c->page)
goto new_slab;
if (unlikely(!node_match(c, node))) {
@@ -1960,8 +1993,8 @@ static void *__slab_alloc(struct kmem_ca
stat(s, ALLOC_SLOWPATH);
do {
- object = page->freelist;
- counters = page->counters;
+ object = c->page->freelist;
+ counters = c->page->counters;
new.counters = counters;
VM_BUG_ON(!new.frozen);
@@ -1973,12 +2006,12 @@ static void *__slab_alloc(struct kmem_ca
*
* If there are objects left then we retrieve them
* and use them to refill the per cpu queue.
- */
+ */
- new.inuse = page->objects;
+ new.inuse = c->page->objects;
new.frozen = object != NULL;
- } while (!cmpxchg_double_slab(s, page,
+ } while (!cmpxchg_double_slab(s, c->page,
object, counters,
NULL, new.counters,
"__slab_alloc"));
@@ -1992,50 +2025,33 @@ static void *__slab_alloc(struct kmem_ca
stat(s, ALLOC_REFILL);
load_freelist:
- VM_BUG_ON(!page->frozen);
c->freelist = get_freepointer(s, object);
c->tid = next_tid(c->tid);
local_irq_restore(flags);
return object;
new_slab:
- page = get_partial(s, gfpflags, node, c);
- if (page) {
- stat(s, ALLOC_FROM_PARTIAL);
- object = c->freelist;
+ object = get_partial(s, gfpflags, node, c);
- if (kmem_cache_debug(s))
- goto debug;
- goto load_freelist;
- }
+ if (unlikely(!object)) {
- page = new_slab(s, gfpflags, node);
+ object = new_slab_objects(s, gfpflags, node, &c);
- if (page) {
- c = __this_cpu_ptr(s->cpu_slab);
- if (c->page)
- flush_slab(s, c);
+ if (unlikely(!object)) {
+ if (!(gfpflags & __GFP_NOWARN) && printk_ratelimit())
+ slab_out_of_memory(s, gfpflags, node);
- /*
- * No other reference to the page yet so we can
- * muck around with it freely without cmpxchg
- */
- object = page->freelist;
- page->freelist = NULL;
+ local_irq_restore(flags);
+ return NULL;
+ }
+ }
- stat(s, ALLOC_SLAB);
- c->node = page_to_nid(page);
- c->page = page;
+ if (likely(!kmem_cache_debug(s)))
goto load_freelist;
- }
- if (!(gfpflags & __GFP_NOWARN) && printk_ratelimit())
- slab_out_of_memory(s, gfpflags, node);
- local_irq_restore(flags);
- return NULL;
-debug:
- if (!object || !alloc_debug_processing(s, page, object, addr))
- goto new_slab;
+ /* Only entered in the debug case */
+ if (!alloc_debug_processing(s, c->page, object, addr))
+ goto new_slab; /* Slab failed checks. Next slab needed */
c->freelist = get_freepointer(s, object);
deactivate_slab(s, c);
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Fight unfair telecom internet charges in Canada: sign http://stopthemeter.ca/
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 5+ messages in thread
* [slub p1 4/4] slub: [RFC] per cpu cache for partial pages
2011-05-26 19:03 [slub p1 0/4] SLUB: [RFC] Per cpu partial lists V1 Christoph Lameter
` (2 preceding siblings ...)
2011-05-26 19:03 ` [slub p1 3/4] slub: return object pointer from get_partial() / new_slab() Christoph Lameter
@ 2011-05-26 19:03 ` Christoph Lameter
3 siblings, 0 replies; 5+ messages in thread
From: Christoph Lameter @ 2011-05-26 19:03 UTC (permalink / raw)
To: Pekka Enberg; +Cc: David Rientjes, Andi Kleen, linux-mm
[-- Attachment #1: per_cpu_partial --]
[-- Type: text/plain, Size: 9732 bytes --]
Allow filling out the rest of the kmem_cache_cpu cacheline with pointers to
partial pages. The partial page list is used in slab_free() to avoid
per node lock taking. The list_lock is taken for batches of partial pages
instead of individual ones.
We can then also use the partial list in slab_alloc() to avoid scanning
partial lists for pages with free objects.
This is only a first stab at this. There are some limitations:
1. We have to scan through an percpu array of page pointers. That is fast
since we stick to a cacheline size.
2. The pickup in __slab_alloc() could consider NUMA locality instead of
blindly picking the first partial block.
3. The "unfreeze()" function should have common code with deactivate_slab().
Maybe those can be unified.
Future enhancements:
1. The pickup from the partial list could be perhaps be done without disabling
interrupts with some work. The free path already puts the page into the
per cpu partial list without disabling interrupts.
2. Configure the size of the per cpu partial blocks dynamically like the other
aspects of slab operations.
3. The __slab_free() likely has some code path that are unnecessary now or
where code is duplicated.
4. We dump all partials if the per cpu array overflows. There must be some other
better algorithm.
Signed-off-by: Christoph Lameter <cl@linux.com>
---
include/linux/slub_def.h | 2
mm/slub.c | 175 ++++++++++++++++++++++++++++++++++++++++++-----
2 files changed, 159 insertions(+), 18 deletions(-)
Index: linux-2.6/include/linux/slub_def.h
===================================================================
--- linux-2.6.orig/include/linux/slub_def.h 2011-05-26 09:12:26.305543189 -0500
+++ linux-2.6/include/linux/slub_def.h 2011-05-26 09:12:38.665543109 -0500
@@ -46,6 +46,7 @@ struct kmem_cache_cpu {
#ifdef CONFIG_SLUB_STATS
unsigned stat[NR_SLUB_STAT_ITEMS];
#endif
+ struct page *partial[]; /* Partially allocated frozen slabs */
};
struct kmem_cache_node {
@@ -79,6 +80,7 @@ struct kmem_cache {
int size; /* The size of an object including meta data */
int objsize; /* The size of an object without meta data */
int offset; /* Free pointer offset. */
+ int cpu_partial; /* Number of per cpu partial pages to keep around */
struct kmem_cache_order_objects oo;
/* Allocation and freeing of slabs */
Index: linux-2.6/mm/slub.c
===================================================================
--- linux-2.6.orig/mm/slub.c 2011-05-26 09:12:26.285543189 -0500
+++ linux-2.6/mm/slub.c 2011-05-26 13:25:32.187867196 -0500
@@ -1806,6 +1806,97 @@ redo:
}
}
+/*
+ * Unfreeze a page. Page cannot be full. May be empty. If n is passed then the list lock on that
+ * node was taken. The functions return the pointer to the list_lock that was eventually taken in
+ * this function.
+ *
+ * Races are limited to __slab_free. Meaning that the number of free objects may increase but not
+ * decrease.
+ */
+struct kmem_cache_node *unfreeze(struct kmem_cache *s, struct page *page, struct kmem_cache_node *n)
+{
+ enum slab_modes { M_PARTIAL, M_FREE };
+ enum slab_modes l = M_FREE, m = M_FREE;
+ struct page new;
+ struct page old;
+
+ do {
+
+ old.freelist = page->freelist;
+ old.counters = page->counters;
+ VM_BUG_ON(!old.frozen);
+
+ new.counters = old.counters;
+ new.freelist = old.freelist;
+
+ new.frozen = 0;
+
+ if (!new.inuse && (!n || n->nr_partial < s->min_partial))
+ m = M_FREE;
+ else {
+ struct kmem_cache_node *n2 = get_node(s, page_to_nid(page));
+
+ m = M_PARTIAL;
+ if (n != n2) {
+ if (n)
+ spin_unlock(&n->list_lock);
+
+ n = n2;
+ spin_lock(&n->list_lock);
+ }
+ }
+
+ if (l != m) {
+ if (l == M_PARTIAL)
+ remove_partial(n, page);
+ else
+ add_partial(n, page, 1);
+
+ l = m;
+ }
+
+ } while (!cmpxchg_double_slab(s, page,
+ old.freelist, old.counters,
+ new.freelist, new.counters,
+ "unfreezing slab"));
+
+ if (m == M_FREE) {
+ stat(s, DEACTIVATE_EMPTY);
+ discard_slab(s, page);
+ stat(s, FREE_SLAB);
+ }
+ return n;
+}
+
+static void unfreeze_partials(struct kmem_cache *s, struct page *page)
+{
+ int i;
+ unsigned long flags;
+ struct kmem_cache_node *n;
+
+ /* Batch free the partial pages */
+ local_irq_save(flags);
+
+ n = unfreeze(s, page, NULL);
+
+ for (i = 0; i < s->cpu_partial; i++) {
+ page = this_cpu_read(s->cpu_slab->partial[i]);
+
+ if (page) {
+ this_cpu_write(s->cpu_slab->partial[i], NULL);
+ n = unfreeze(s, page, n);
+ }
+
+ }
+
+ if (n)
+ spin_unlock(&n->list_lock);
+
+ local_irq_restore(flags);
+}
+
+
static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
{
stat(s, CPUSLAB_FLUSH);
@@ -1967,6 +2058,7 @@ static void *__slab_alloc(struct kmem_ca
unsigned long flags;
struct page new;
unsigned long counters;
+ int i;
local_irq_save(flags);
#ifdef CONFIG_PREEMPT
@@ -1983,7 +2075,7 @@ static void *__slab_alloc(struct kmem_ca
if (!c->page)
goto new_slab;
-
+redo:
if (unlikely(!node_match(c, node))) {
stat(s, ALLOC_NODE_MISMATCH);
deactivate_slab(s, c);
@@ -2031,6 +2123,17 @@ load_freelist:
return object;
new_slab:
+ /* First try our cache of partially allocated pages */
+ for (i = 0; i < s->cpu_partial; i++)
+ if (c->partial[i]) {
+ c->page = c->partial[i];
+ c->freelist = NULL;
+ c->partial[i] = NULL;
+ c->node = page_to_nid(c->page);
+ goto redo;
+ }
+
+ /* Then do expensive stuff like retrieving pages from the partial lists */
object = get_partial(s, gfpflags, node, c);
if (unlikely(!object)) {
@@ -2225,16 +2328,29 @@ static void __slab_free(struct kmem_cach
was_frozen = new.frozen;
new.inuse--;
if ((!new.inuse || !prior) && !was_frozen && !n) {
- n = get_node(s, page_to_nid(page));
- /*
- * Speculatively acquire the list_lock.
- * If the cmpxchg does not succeed then we may
- * drop the list_lock without any processing.
- *
- * Otherwise the list_lock will synchronize with
- * other processors updating the list of slabs.
- */
- spin_lock_irqsave(&n->list_lock, flags);
+
+ if (!kmem_cache_debug(s) && !prior)
+
+ /*
+ * Slab was on no list before and will be partially empty
+ * We can defer the list move and freeze it easily.
+ */
+ new.frozen = 1;
+
+ else { /* Needs to be taken off a list */
+
+ n = get_node(s, page_to_nid(page));
+ /*
+ * Speculatively acquire the list_lock.
+ * If the cmpxchg does not succeed then we may
+ * drop the list_lock without any processing.
+ *
+ * Otherwise the list_lock will synchronize with
+ * other processors updating the list of slabs.
+ */
+ spin_lock_irqsave(&n->list_lock, flags);
+
+ }
}
inuse = new.inuse;
@@ -2244,7 +2360,21 @@ static void __slab_free(struct kmem_cach
"__slab_free"));
if (likely(!n)) {
- /*
+ if (new.frozen && !was_frozen) {
+ int i;
+
+ for (i = 0; i < s->cpu_partial; i++)
+ if (this_cpu_cmpxchg(s->cpu_slab->partial[i], NULL, page) == NULL)
+ return;
+
+ /*
+ * partial array is overflowing. Drop them all as well as the one we just
+ * froze.
+ */
+ unfreeze_partials(s, page);
+ }
+
+ /*
* The list lock was not taken therefore no list
* activity can be necessary.
*/
@@ -2311,7 +2441,6 @@ static __always_inline void slab_free(st
slab_free_hook(s, x);
redo:
-
/*
* Determine the currently cpus per cpu slab.
* The cpu may change afterward. However that does not matter since
@@ -2526,6 +2655,9 @@ init_kmem_cache_node(struct kmem_cache_n
static inline int alloc_kmem_cache_cpus(struct kmem_cache *s)
{
+ int size = sizeof(struct kmem_cache_cpu) + s->cpu_partial * sizeof(void *);
+ int align = sizeof(void *);
+
BUILD_BUG_ON(PERCPU_DYNAMIC_EARLY_SIZE <
SLUB_PAGE_SHIFT * sizeof(struct kmem_cache_cpu));
@@ -2534,12 +2666,10 @@ static inline int alloc_kmem_cache_cpus(
* Must align to double word boundary for the double cmpxchg instructions
* to work.
*/
- s->cpu_slab = __alloc_percpu(sizeof(struct kmem_cache_cpu), 2 * sizeof(void *));
-#else
- /* Regular alignment is sufficient */
- s->cpu_slab = alloc_percpu(struct kmem_cache_cpu);
+ align = 2 * sizeof(void *);
#endif
+ s->cpu_slab = __alloc_percpu(size, align);
if (!s->cpu_slab)
return 0;
@@ -2805,7 +2935,9 @@ static int kmem_cache_open(struct kmem_c
* The larger the object size is, the more pages we want on the partial
* list to avoid pounding the page allocator excessively.
*/
- set_min_partial(s, ilog2(s->size));
+ set_min_partial(s, ilog2(s->size) / 2);
+ s->cpu_partial = min((cache_line_size() - sizeof(struct kmem_cache_cpu)) / sizeof(void),
+ s->min_partial);
s->refcount = 1;
#ifdef CONFIG_NUMA
s->remote_node_defrag_ratio = 1000;
@@ -4343,6 +4475,12 @@ static ssize_t min_partial_store(struct
}
SLAB_ATTR(min_partial);
+static ssize_t cpu_partial_show(struct kmem_cache *s, char *buf)
+{
+ return sprintf(buf, "%u\n", s->cpu_partial);
+}
+SLAB_ATTR_RO(cpu_partial);
+
static ssize_t ctor_show(struct kmem_cache *s, char *buf)
{
if (!s->ctor)
@@ -4701,6 +4839,7 @@ static struct attribute *slab_attrs[] =
&objs_per_slab_attr.attr,
&order_attr.attr,
&min_partial_attr.attr,
+ &cpu_partial_attr.attr,
&objects_attr.attr,
&objects_partial_attr.attr,
&partial_attr.attr,
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Fight unfair telecom internet charges in Canada: sign http://stopthemeter.ca/
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 5+ messages in thread
end of thread, other threads:[~2011-05-26 19:03 UTC | newest]
Thread overview: 5+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2011-05-26 19:03 [slub p1 0/4] SLUB: [RFC] Per cpu partial lists V1 Christoph Lameter
2011-05-26 19:03 ` [slub p1 1/4] slub: Prepare inuse field in new_slab() Christoph Lameter
2011-05-26 19:03 ` [slub p1 2/4] slub: pass kmem_cache_cpu pointer to get_partial() Christoph Lameter
2011-05-26 19:03 ` [slub p1 3/4] slub: return object pointer from get_partial() / new_slab() Christoph Lameter
2011-05-26 19:03 ` [slub p1 4/4] slub: [RFC] per cpu cache for partial pages Christoph Lameter
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).