* [Slub cleanup 1/9] slub: Use freelist instead of "object" in __slab_alloc
2012-01-23 20:16 [Slub cleanup 0/9] Slub: cleanups V1 Christoph Lameter
@ 2012-01-23 20:16 ` Christoph Lameter
2012-02-01 21:51 ` David Rientjes
2012-01-23 20:16 ` [Slub cleanup 2/9] slub: Add frozen check " Christoph Lameter
` (7 subsequent siblings)
8 siblings, 1 reply; 14+ messages in thread
From: Christoph Lameter @ 2012-01-23 20:16 UTC (permalink / raw)
To: Pekka Enberg; +Cc: linux-mm, David Rientjes
[-- Attachment #1: use_freelist_instead_of_object --]
[-- Type: text/plain, Size: 3923 bytes --]
The variable "object" really refers to a list of objects that we
are handling.
Signed-off-by: Christoph Lameter <cl@linux.com>
---
mm/slub.c | 38 ++++++++++++++++++++------------------
1 file changed, 20 insertions(+), 18 deletions(-)
Index: linux-2.6/mm/slub.c
===================================================================
--- linux-2.6.orig/mm/slub.c 2012-01-13 04:05:02.045806519 -0600
+++ linux-2.6/mm/slub.c 2012-01-13 08:47:07.498748874 -0600
@@ -2102,7 +2102,7 @@ slab_out_of_memory(struct kmem_cache *s,
static inline void *new_slab_objects(struct kmem_cache *s, gfp_t flags,
int node, struct kmem_cache_cpu **pc)
{
- void *object;
+ void *freelist;
struct kmem_cache_cpu *c;
struct page *page = new_slab(s, flags, node);
@@ -2115,7 +2115,7 @@ static inline void *new_slab_objects(str
* No other reference to the page yet so we can
* muck around with it freely without cmpxchg
*/
- object = page->freelist;
+ freelist = page->freelist;
page->freelist = NULL;
stat(s, ALLOC_SLAB);
@@ -2123,9 +2123,9 @@ static inline void *new_slab_objects(str
c->page = page;
*pc = c;
} else
- object = NULL;
+ freelist = NULL;
- return object;
+ return freelist;
}
/*
@@ -2145,6 +2145,7 @@ static inline void *get_freelist(struct
do {
freelist = page->freelist;
counters = page->counters;
+
new.counters = counters;
VM_BUG_ON(!new.frozen);
@@ -2178,7 +2179,7 @@ static inline void *get_freelist(struct
static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
unsigned long addr, struct kmem_cache_cpu *c)
{
- void **object;
+ void *freelist;
unsigned long flags;
local_irq_save(flags);
@@ -2194,6 +2195,7 @@ static void *__slab_alloc(struct kmem_ca
if (!c->page)
goto new_slab;
redo:
+
if (unlikely(!node_match(c, node))) {
stat(s, ALLOC_NODE_MISMATCH);
deactivate_slab(s, c);
@@ -2201,15 +2203,15 @@ redo:
}
/* must check again c->freelist in case of cpu migration or IRQ */
- object = c->freelist;
- if (object)
+ freelist = c->freelist;
+ if (freelist)
goto load_freelist;
stat(s, ALLOC_SLOWPATH);
- object = get_freelist(s, c->page);
+ freelist = get_freelist(s, c->page);
- if (!object) {
+ if (!freelist) {
c->page = NULL;
stat(s, DEACTIVATE_BYPASS);
goto new_slab;
@@ -2218,10 +2220,10 @@ redo:
stat(s, ALLOC_REFILL);
load_freelist:
- c->freelist = get_freepointer(s, object);
+ c->freelist = get_freepointer(s, freelist);
c->tid = next_tid(c->tid);
local_irq_restore(flags);
- return object;
+ return freelist;
new_slab:
@@ -2235,13 +2237,13 @@ new_slab:
}
/* Then do expensive stuff like retrieving pages from the partial lists */
- object = get_partial(s, gfpflags, node, c);
+ freelist = get_partial(s, gfpflags, node, c);
- if (unlikely(!object)) {
+ if (unlikely(!freelist)) {
- object = new_slab_objects(s, gfpflags, node, &c);
+ freelist = new_slab_objects(s, gfpflags, node, &c);
- if (unlikely(!object)) {
+ if (unlikely(!freelist)) {
if (!(gfpflags & __GFP_NOWARN) && printk_ratelimit())
slab_out_of_memory(s, gfpflags, node);
@@ -2254,14 +2256,14 @@ new_slab:
goto load_freelist;
/* Only entered in the debug case */
- if (!alloc_debug_processing(s, c->page, object, addr))
+ if (!alloc_debug_processing(s, c->page, freelist, addr))
goto new_slab; /* Slab failed checks. Next slab needed */
- c->freelist = get_freepointer(s, object);
+ c->freelist = get_freepointer(s, freelist);
deactivate_slab(s, c);
c->node = NUMA_NO_NODE;
local_irq_restore(flags);
- return object;
+ return freelist;
}
/*
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Fight unfair telecom internet charges in Canada: sign http://stopthemeter.ca/
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 14+ messages in thread
* [Slub cleanup 2/9] slub: Add frozen check in __slab_alloc
2012-01-23 20:16 [Slub cleanup 0/9] Slub: cleanups V1 Christoph Lameter
2012-01-23 20:16 ` [Slub cleanup 1/9] slub: Use freelist instead of "object" in __slab_alloc Christoph Lameter
@ 2012-01-23 20:16 ` Christoph Lameter
2012-02-01 21:54 ` David Rientjes
2012-01-23 20:16 ` [Slub cleanup 3/9] slub: Acquire_slab() avoid loop Christoph Lameter
` (6 subsequent siblings)
8 siblings, 1 reply; 14+ messages in thread
From: Christoph Lameter @ 2012-01-23 20:16 UTC (permalink / raw)
To: Pekka Enberg; +Cc: linux-mm, David Rientjes
[-- Attachment #1: frozen_check_in_slab_free --]
[-- Type: text/plain, Size: 1139 bytes --]
Verify that objects returned from __slab_alloc come from slab pages
in the correct state.
Signed-off-by: Christoph Lameter <cl@linux.com>
---
mm/slub.c | 6 ++++++
1 file changed, 6 insertions(+)
Index: linux-2.6/mm/slub.c
===================================================================
--- linux-2.6.orig/mm/slub.c 2012-01-13 08:47:07.498748874 -0600
+++ linux-2.6/mm/slub.c 2012-01-13 08:47:10.938748802 -0600
@@ -2220,6 +2220,12 @@ redo:
stat(s, ALLOC_REFILL);
load_freelist:
+ /*
+ * freelist is pointing to the list of objects to be used.
+ * page is pointing to the page from which the objects are obtained.
+ * That page must be frozen for per cpu allocations to work.
+ */
+ VM_BUG_ON(!c->page->frozen);
c->freelist = get_freepointer(s, freelist);
c->tid = next_tid(c->tid);
local_irq_restore(flags);
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Fight unfair telecom internet charges in Canada: sign http://stopthemeter.ca/
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 14+ messages in thread
* [Slub cleanup 3/9] slub: Acquire_slab() avoid loop
2012-01-23 20:16 [Slub cleanup 0/9] Slub: cleanups V1 Christoph Lameter
2012-01-23 20:16 ` [Slub cleanup 1/9] slub: Use freelist instead of "object" in __slab_alloc Christoph Lameter
2012-01-23 20:16 ` [Slub cleanup 2/9] slub: Add frozen check " Christoph Lameter
@ 2012-01-23 20:16 ` Christoph Lameter
2012-01-23 20:16 ` [Slub cleanup 4/9] slub: Simplify control flow in __slab_alloc() Christoph Lameter
` (5 subsequent siblings)
8 siblings, 0 replies; 14+ messages in thread
From: Christoph Lameter @ 2012-01-23 20:16 UTC (permalink / raw)
To: Pekka Enberg; +Cc: linux-mm, David Rientjes
[-- Attachment #1: simplify --]
[-- Type: text/plain, Size: 2148 bytes --]
Avoid the loop in acquire slab and simply fail if there is a conflict.
This will cause the next page on the list to be considered.
Acked-by: David Rientjes <rientjes@google.com>
Signed-off-by: Christoph Lameter <cl@linux.com>
---
mm/slub.c | 28 +++++++++++++++-------------
1 file changed, 15 insertions(+), 13 deletions(-)
Index: linux-2.6/mm/slub.c
===================================================================
--- linux-2.6.orig/mm/slub.c 2012-01-13 08:47:10.938748802 -0600
+++ linux-2.6/mm/slub.c 2012-01-13 08:47:17.158748674 -0600
@@ -1484,12 +1484,12 @@ static inline void remove_partial(struct
}
/*
- * Lock slab, remove from the partial list and put the object into the
- * per cpu freelist.
+ * Remove slab from the partial list, freeze it and
+ * return the pointer to the freelist.
*
* Returns a list of objects or NULL if it fails.
*
- * Must hold list_lock.
+ * Must hold list_lock since we modify the partial list.
*/
static inline void *acquire_slab(struct kmem_cache *s,
struct kmem_cache_node *n, struct page *page,
@@ -1504,22 +1504,24 @@ static inline void *acquire_slab(struct
* The old freelist is the list of objects for the
* per cpu allocation list.
*/
- do {
- freelist = page->freelist;
- counters = page->counters;
- new.counters = counters;
- if (mode)
- new.inuse = page->objects;
+ freelist = page->freelist;
+ counters = page->counters;
+ new.counters = counters;
+ if (mode)
+ new.inuse = page->objects;
- VM_BUG_ON(new.frozen);
- new.frozen = 1;
+ VM_BUG_ON(new.frozen);
+ new.frozen = 1;
- } while (!__cmpxchg_double_slab(s, page,
+ if (!__cmpxchg_double_slab(s, page,
freelist, counters,
NULL, new.counters,
- "lock and freeze"));
+ "acquire_slab"))
+
+ return NULL;
remove_partial(n, page);
+ WARN_ON(!freelist);
return freelist;
}
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Fight unfair telecom internet charges in Canada: sign http://stopthemeter.ca/
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 14+ messages in thread
* [Slub cleanup 4/9] slub: Simplify control flow in __slab_alloc()
2012-01-23 20:16 [Slub cleanup 0/9] Slub: cleanups V1 Christoph Lameter
` (2 preceding siblings ...)
2012-01-23 20:16 ` [Slub cleanup 3/9] slub: Acquire_slab() avoid loop Christoph Lameter
@ 2012-01-23 20:16 ` Christoph Lameter
2012-02-01 22:02 ` David Rientjes
2012-01-23 20:16 ` [Slub cleanup 6/9] slub: Get rid of the node field Christoph Lameter
` (4 subsequent siblings)
8 siblings, 1 reply; 14+ messages in thread
From: Christoph Lameter @ 2012-01-23 20:16 UTC (permalink / raw)
To: Pekka Enberg; +Cc: linux-mm, David Rientjes
[-- Attachment #1: control_flow_simplify --]
[-- Type: text/plain, Size: 1376 bytes --]
Simplify control flow a bit avoiding nesting.
Signed-off-by: Christoph Lameter <cl@linux.com>
---
mm/slub.c | 14 ++++++--------
1 file changed, 6 insertions(+), 8 deletions(-)
Index: linux-2.6/mm/slub.c
===================================================================
--- linux-2.6.orig/mm/slub.c 2012-01-13 08:47:17.158748674 -0600
+++ linux-2.6/mm/slub.c 2012-01-13 08:47:20.490748604 -0600
@@ -2247,17 +2247,15 @@ new_slab:
/* Then do expensive stuff like retrieving pages from the partial lists */
freelist = get_partial(s, gfpflags, node, c);
- if (unlikely(!freelist)) {
-
+ if (!freelist)
freelist = new_slab_objects(s, gfpflags, node, &c);
- if (unlikely(!freelist)) {
- if (!(gfpflags & __GFP_NOWARN) && printk_ratelimit())
- slab_out_of_memory(s, gfpflags, node);
+ if (unlikely(!freelist)) {
+ if (!(gfpflags & __GFP_NOWARN) && printk_ratelimit())
+ slab_out_of_memory(s, gfpflags, node);
- local_irq_restore(flags);
- return NULL;
- }
+ local_irq_restore(flags);
+ return NULL;
}
if (likely(!kmem_cache_debug(s)))
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Fight unfair telecom internet charges in Canada: sign http://stopthemeter.ca/
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 14+ messages in thread
* [Slub cleanup 6/9] slub: Get rid of the node field
2012-01-23 20:16 [Slub cleanup 0/9] Slub: cleanups V1 Christoph Lameter
` (3 preceding siblings ...)
2012-01-23 20:16 ` [Slub cleanup 4/9] slub: Simplify control flow in __slab_alloc() Christoph Lameter
@ 2012-01-23 20:16 ` Christoph Lameter
2012-02-01 22:11 ` David Rientjes
2012-01-23 20:16 ` [Slub cleanup 7/9] slub: Separate out kmem_cache_cpu processing from deactivate_slab Christoph Lameter
` (3 subsequent siblings)
8 siblings, 1 reply; 14+ messages in thread
From: Christoph Lameter @ 2012-01-23 20:16 UTC (permalink / raw)
To: Pekka Enberg; +Cc: linux-mm, David Rientjes
[-- Attachment #1: get_rid_of_cnode --]
[-- Type: text/plain, Size: 3729 bytes --]
The node field is always page_to_nid(c->page). So its rather easy to
replace. Note that there maybe slightly more overhead in various hot paths
due to the need to shift the bits from page->flags. However, that is mostly
compensated for by a smaller footprint of the kmem_cache_cpu structure (this
patch reduces that to 3 words per cache) which allows better caching.
Signed-off-by: Christoph Lameter <cl@linux.com>
---
include/linux/slub_def.h | 1 -
mm/slub.c | 35 ++++++++++++++++-------------------
2 files changed, 16 insertions(+), 20 deletions(-)
Index: linux-2.6/mm/slub.c
===================================================================
--- linux-2.6.orig/mm/slub.c 2012-01-20 07:30:03.810312824 -0600
+++ linux-2.6/mm/slub.c 2012-01-20 08:15:54.066255837 -0600
@@ -1555,7 +1555,6 @@ static void *get_partial_node(struct kme
if (!object) {
c->page = page;
- c->node = page_to_nid(page);
stat(s, ALLOC_FROM_PARTIAL);
object = t;
available = page->objects - page->inuse;
@@ -2032,7 +2031,7 @@ static void flush_all(struct kmem_cache
static inline int node_match(struct kmem_cache_cpu *c, int node)
{
#ifdef CONFIG_NUMA
- if (node != NUMA_NO_NODE && c->node != node)
+ if (node != NUMA_NO_NODE && page_to_nid(c->page) != node)
return 0;
#endif
return 1;
@@ -2127,7 +2126,6 @@ static inline void *new_slab_objects(str
page->freelist = NULL;
stat(s, ALLOC_SLAB);
- c->node = page_to_nid(page);
c->page = page;
*pc = c;
} else
@@ -2244,7 +2242,6 @@ new_slab:
if (c->partial) {
c->page = c->partial;
c->partial = c->page->next;
- c->node = page_to_nid(c->page);
stat(s, CPU_PARTIAL_ALLOC);
c->freelist = NULL;
goto redo;
@@ -2269,7 +2266,6 @@ new_slab:
c->freelist = get_freepointer(s, freelist);
deactivate_slab(s, c);
- c->node = NUMA_NO_NODE;
local_irq_restore(flags);
return freelist;
}
@@ -4474,30 +4470,31 @@ static ssize_t show_slab_objects(struct
for_each_possible_cpu(cpu) {
struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu);
- int node = ACCESS_ONCE(c->node);
+ int node;
struct page *page;
- if (node < 0)
- continue;
page = ACCESS_ONCE(c->page);
- if (page) {
- if (flags & SO_TOTAL)
- x = page->objects;
- else if (flags & SO_OBJECTS)
- x = page->inuse;
- else
- x = 1;
+ if (!page)
+ continue;
- total += x;
- nodes[node] += x;
- }
- page = c->partial;
+ node = page_to_nid(page);
+ if (flags & SO_TOTAL)
+ x = page->objects;
+ else if (flags & SO_OBJECTS)
+ x = page->inuse;
+ else
+ x = 1;
+ total += x;
+ nodes[node] += x;
+
+ page = ACCESS_ONCE(c->partial);
if (page) {
x = page->pobjects;
total += x;
nodes[node] += x;
}
+
per_cpu[node]++;
}
}
Index: linux-2.6/include/linux/slub_def.h
===================================================================
--- linux-2.6.orig/include/linux/slub_def.h 2012-01-20 05:06:45.478490987 -0600
+++ linux-2.6/include/linux/slub_def.h 2012-01-20 08:15:54.066255837 -0600
@@ -45,7 +45,6 @@ struct kmem_cache_cpu {
unsigned long tid; /* Globally unique transaction id */
struct page *page; /* The slab from which we are allocating */
struct page *partial; /* Partially allocated frozen slabs */
- int node; /* The node of the page (or -1 for debug) */
#ifdef CONFIG_SLUB_STATS
unsigned stat[NR_SLUB_STAT_ITEMS];
#endif
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Fight unfair telecom internet charges in Canada: sign http://stopthemeter.ca/
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 14+ messages in thread
* Re: [Slub cleanup 6/9] slub: Get rid of the node field
2012-01-23 20:16 ` [Slub cleanup 6/9] slub: Get rid of the node field Christoph Lameter
@ 2012-02-01 22:11 ` David Rientjes
0 siblings, 0 replies; 14+ messages in thread
From: David Rientjes @ 2012-02-01 22:11 UTC (permalink / raw)
To: Christoph Lameter; +Cc: Pekka Enberg, linux-mm
On Mon, 23 Jan 2012, Christoph Lameter wrote:
> The node field is always page_to_nid(c->page). So its rather easy to
> replace. Note that there maybe slightly more overhead in various hot paths
> due to the need to shift the bits from page->flags. However, that is mostly
> compensated for by a smaller footprint of the kmem_cache_cpu structure (this
> patch reduces that to 3 words per cache) which allows better caching.
>
s/3 words per cache/4 words per cache/
> Signed-off-by: Christoph Lameter <cl@linux.com>
Acked-by: David Rientjes <rientjes@google.com>
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Fight unfair telecom internet charges in Canada: sign http://stopthemeter.ca/
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 14+ messages in thread
* [Slub cleanup 7/9] slub: Separate out kmem_cache_cpu processing from deactivate_slab
2012-01-23 20:16 [Slub cleanup 0/9] Slub: cleanups V1 Christoph Lameter
` (4 preceding siblings ...)
2012-01-23 20:16 ` [Slub cleanup 6/9] slub: Get rid of the node field Christoph Lameter
@ 2012-01-23 20:16 ` Christoph Lameter
2012-01-23 20:16 ` [Slub cleanup 8/9] slub: Use page variable instead of c->page Christoph Lameter
` (2 subsequent siblings)
8 siblings, 0 replies; 14+ messages in thread
From: Christoph Lameter @ 2012-01-23 20:16 UTC (permalink / raw)
To: Pekka Enberg; +Cc: linux-mm, David Rientjes
[-- Attachment #1: separate_deactivate_slab --]
[-- Type: text/plain, Size: 2663 bytes --]
Processing on fields of kmem_cache_cpu is cleaner if code working on fields
of this struct is taken out of deactivate_slab().
Acked-by: David Rientjes <rientjes@google.com>
Signed-off-by: Christoph Lameter <cl@linux.com>
---
mm/slub.c | 24 ++++++++++++------------
1 file changed, 12 insertions(+), 12 deletions(-)
Index: linux-2.6/mm/slub.c
===================================================================
--- linux-2.6.orig/mm/slub.c 2012-01-13 08:47:28.506748438 -0600
+++ linux-2.6/mm/slub.c 2012-01-13 08:47:31.930748367 -0600
@@ -1712,14 +1712,12 @@ void init_kmem_cache_cpus(struct kmem_ca
/*
* Remove the cpu slab
*/
-static void deactivate_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
+static void deactivate_slab(struct kmem_cache *s, struct page *page, void *freelist)
{
enum slab_modes { M_NONE, M_PARTIAL, M_FULL, M_FREE };
- struct page *page = c->page;
struct kmem_cache_node *n = get_node(s, page_to_nid(page));
int lock = 0;
enum slab_modes l = M_NONE, m = M_NONE;
- void *freelist;
void *nextfree;
int tail = DEACTIVATE_TO_HEAD;
struct page new;
@@ -1730,11 +1728,6 @@ static void deactivate_slab(struct kmem_
tail = DEACTIVATE_TO_TAIL;
}
- c->tid = next_tid(c->tid);
- c->page = NULL;
- freelist = c->freelist;
- c->freelist = NULL;
-
/*
* Stage one: Free all available per cpu objects back
* to the page freelist while it is still frozen. Leave the
@@ -1992,7 +1985,11 @@ int put_cpu_partial(struct kmem_cache *s
static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
{
stat(s, CPUSLAB_FLUSH);
- deactivate_slab(s, c);
+ deactivate_slab(s, c->page, c->freelist);
+
+ c->tid = next_tid(c->tid);
+ c->page = NULL;
+ c->freelist = NULL;
}
/*
@@ -2204,7 +2201,9 @@ redo:
if (unlikely(!node_match(c, node))) {
stat(s, ALLOC_NODE_MISMATCH);
- deactivate_slab(s, c);
+ deactivate_slab(s, c->page, c->freelist);
+ c->page = NULL;
+ c->freelist = NULL;
goto new_slab;
}
@@ -2264,8 +2263,9 @@ new_slab:
if (!alloc_debug_processing(s, c->page, freelist, addr))
goto new_slab; /* Slab failed checks. Next slab needed */
- c->freelist = get_freepointer(s, freelist);
- deactivate_slab(s, c);
+ deactivate_slab(s, c->page, get_freepointer(s, freelist));
+ c->page = NULL;
+ c->freelist = NULL;
local_irq_restore(flags);
return freelist;
}
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Fight unfair telecom internet charges in Canada: sign http://stopthemeter.ca/
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 14+ messages in thread
* [Slub cleanup 8/9] slub: Use page variable instead of c->page.
2012-01-23 20:16 [Slub cleanup 0/9] Slub: cleanups V1 Christoph Lameter
` (5 preceding siblings ...)
2012-01-23 20:16 ` [Slub cleanup 7/9] slub: Separate out kmem_cache_cpu processing from deactivate_slab Christoph Lameter
@ 2012-01-23 20:16 ` Christoph Lameter
2012-01-23 20:16 ` [Slub cleanup 9/9] slub: pass page to node_match() instead of kmem_cache_cpu structure Christoph Lameter
[not found] ` <20120123201708.312262597@linux.com>
8 siblings, 0 replies; 14+ messages in thread
From: Christoph Lameter @ 2012-01-23 20:16 UTC (permalink / raw)
To: Pekka Enberg; +Cc: linux-mm, David Rientjes
[-- Attachment #1: use_page_var --]
[-- Type: text/plain, Size: 2363 bytes --]
Store the value of c->page to avoid additional fetches
from per cpu data.
Acked-by: David Rientjes <rientjes@google.com>
Signed-off-by: Christoph Lameter <cl@linux.com>
---
mm/slub.c | 17 ++++++++++-------
1 file changed, 10 insertions(+), 7 deletions(-)
Index: linux-2.6/mm/slub.c
===================================================================
--- linux-2.6.orig/mm/slub.c 2012-01-13 08:47:31.930748367 -0600
+++ linux-2.6/mm/slub.c 2012-01-13 08:47:35.018748303 -0600
@@ -2183,6 +2183,7 @@ static void *__slab_alloc(struct kmem_ca
unsigned long addr, struct kmem_cache_cpu *c)
{
void *freelist;
+ struct page *page;
unsigned long flags;
local_irq_save(flags);
@@ -2195,13 +2196,14 @@ static void *__slab_alloc(struct kmem_ca
c = this_cpu_ptr(s->cpu_slab);
#endif
- if (!c->page)
+ page = c->page;
+ if (!page)
goto new_slab;
redo:
if (unlikely(!node_match(c, node))) {
stat(s, ALLOC_NODE_MISMATCH);
- deactivate_slab(s, c->page, c->freelist);
+ deactivate_slab(s, page, c->freelist);
c->page = NULL;
c->freelist = NULL;
goto new_slab;
@@ -2214,7 +2216,7 @@ redo:
stat(s, ALLOC_SLOWPATH);
- freelist = get_freelist(s, c->page);
+ freelist = get_freelist(s, page);
if (!freelist) {
c->page = NULL;
@@ -2239,8 +2241,8 @@ load_freelist:
new_slab:
if (c->partial) {
- c->page = c->partial;
- c->partial = c->page->next;
+ page = c->page = c->partial;
+ c->partial = page->next;
stat(s, CPU_PARTIAL_ALLOC);
c->freelist = NULL;
goto redo;
@@ -2256,14 +2258,15 @@ new_slab:
return NULL;
}
+ page = c->page;
if (likely(!kmem_cache_debug(s)))
goto load_freelist;
/* Only entered in the debug case */
- if (!alloc_debug_processing(s, c->page, freelist, addr))
+ if (!alloc_debug_processing(s, page, freelist, addr))
goto new_slab; /* Slab failed checks. Next slab needed */
- deactivate_slab(s, c->page, get_freepointer(s, freelist));
+ deactivate_slab(s, page, get_freepointer(s, freelist));
c->page = NULL;
c->freelist = NULL;
local_irq_restore(flags);
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Fight unfair telecom internet charges in Canada: sign http://stopthemeter.ca/
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 14+ messages in thread
* [Slub cleanup 9/9] slub: pass page to node_match() instead of kmem_cache_cpu structure
2012-01-23 20:16 [Slub cleanup 0/9] Slub: cleanups V1 Christoph Lameter
` (6 preceding siblings ...)
2012-01-23 20:16 ` [Slub cleanup 8/9] slub: Use page variable instead of c->page Christoph Lameter
@ 2012-01-23 20:16 ` Christoph Lameter
[not found] ` <20120123201708.312262597@linux.com>
8 siblings, 0 replies; 14+ messages in thread
From: Christoph Lameter @ 2012-01-23 20:16 UTC (permalink / raw)
To: Pekka Enberg; +Cc: linux-mm, David Rientjes
[-- Attachment #1: page_parameter_to_node_match --]
[-- Type: text/plain, Size: 1991 bytes --]
Avoid passing the kmem_cache_cpu pointer to node_match. This makes the
node_match function more generic and easier to understand.
Acked-by: David Rientjes <rientjes@google.com>
Signed-off-by: Christoph Lameter <cl@linux.com>
---
mm/slub.c | 10 ++++++----
1 file changed, 6 insertions(+), 4 deletions(-)
Index: linux-2.6/mm/slub.c
===================================================================
--- linux-2.6.orig/mm/slub.c 2012-01-13 08:47:35.018748303 -0600
+++ linux-2.6/mm/slub.c 2012-01-13 08:47:37.898748244 -0600
@@ -2025,10 +2025,10 @@ static void flush_all(struct kmem_cache
* Check if the objects in a per cpu structure fit numa
* locality expectations.
*/
-static inline int node_match(struct kmem_cache_cpu *c, int node)
+static inline int node_match(struct page *page, int node)
{
#ifdef CONFIG_NUMA
- if (node != NUMA_NO_NODE && page_to_nid(c->page) != node)
+ if (node != NUMA_NO_NODE && page_to_nid(page) != node)
return 0;
#endif
return 1;
@@ -2201,7 +2201,7 @@ static void *__slab_alloc(struct kmem_ca
goto new_slab;
redo:
- if (unlikely(!node_match(c, node))) {
+ if (unlikely(!node_match(page, node))) {
stat(s, ALLOC_NODE_MISMATCH);
deactivate_slab(s, page, c->freelist);
c->page = NULL;
@@ -2288,6 +2288,7 @@ static __always_inline void *slab_alloc(
{
void **object;
struct kmem_cache_cpu *c;
+ struct page *page;
unsigned long tid;
if (slab_pre_alloc_hook(s, gfpflags))
@@ -2313,7 +2314,8 @@ redo:
barrier();
object = c->freelist;
- if (unlikely(!object || !node_match(c, node)))
+ page = c->page;
+ if (unlikely(!object || !node_match(page, node)))
object = __slab_alloc(s, gfpflags, node, addr, c);
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Fight unfair telecom internet charges in Canada: sign http://stopthemeter.ca/
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 14+ messages in thread
[parent not found: <20120123201708.312262597@linux.com>]