From: Christoph Lameter <cl@linux.com>
To: Pekka Enberg <penberg@cs.helsinki.fi>
Cc: David Rientjes <rientjes@google.com>,
Eric Dumazet <eric.dumazet@gmail.com>,
"H. Peter Anvin" <hpa@zytor.com>,
linux-mm@kvack.org, Thomas Gleixner <tglx@linutronix.de>
Subject: [slubllv5 23/25] slub: return object pointer from get_partial() / new_slab().
Date: Mon, 16 May 2011 15:26:28 -0500 [thread overview]
Message-ID: <20110516202634.597471664@linux.com> (raw)
In-Reply-To: 20110516202605.274023469@linux.com
[-- Attachment #1: object_instead_of_page_return --]
[-- Type: text/plain, Size: 7670 bytes --]
There is no need anymore to return the pointer to a slab page from get_partial()
since it can be assigned to the kmem_cache_cpu structures "page" field.
Instead return an object pointer.
That in turn allows a simplification of the spaghetti code in __slab_alloc().
Signed-off-by: Christoph Lameter <cl@linux.com>
---
mm/slub.c | 130 ++++++++++++++++++++++++++++++++++----------------------------
1 file changed, 73 insertions(+), 57 deletions(-)
Index: linux-2.6/mm/slub.c
===================================================================
--- linux-2.6.orig/mm/slub.c 2011-05-16 14:11:37.531452935 -0500
+++ linux-2.6/mm/slub.c 2011-05-16 14:24:19.781452046 -0500
@@ -1434,9 +1434,11 @@ static inline void remove_partial(struct
* Lock slab, remove from the partial list and put the object into the
* per cpu freelist.
*
+ * Returns a list of objects or NULL if it fails.
+ *
* Must hold list_lock.
*/
-static inline int acquire_slab(struct kmem_cache *s,
+static inline void *acquire_slab(struct kmem_cache *s,
struct kmem_cache_node *n, struct page *page,
struct kmem_cache_cpu *c)
{
@@ -1467,10 +1469,11 @@ static inline int acquire_slab(struct km
if (freelist) {
/* Populate the per cpu freelist */
- c->freelist = freelist;
c->page = page;
c->node = page_to_nid(page);
- return 1;
+ stat(s, ALLOC_FROM_PARTIAL);
+
+ return freelist;
} else {
/*
* Slab page came from the wrong list. No object to allocate
@@ -1479,17 +1482,18 @@ static inline int acquire_slab(struct km
*/
printk(KERN_ERR "SLUB: %s : Page without available objects on"
" partial list\n", s->name);
- return 0;
+ return NULL;
}
}
/*
* Try to allocate a partial slab from a specific node.
*/
-static struct page *get_partial_node(struct kmem_cache *s,
+static void *get_partial_node(struct kmem_cache *s,
struct kmem_cache_node *n, struct kmem_cache_cpu *c)
{
struct page *page;
+ void *object;
/*
* Racy check. If we mistakenly see no partial slabs then we
@@ -1501,13 +1505,15 @@ static struct page *get_partial_node(str
return NULL;
spin_lock(&n->list_lock);
- list_for_each_entry(page, &n->partial, lru)
- if (acquire_slab(s, n, page, c))
+ list_for_each_entry(page, &n->partial, lru) {
+ object = acquire_slab(s, n, page, c);
+ if (object)
goto out;
- page = NULL;
+ }
+ object = NULL;
out:
spin_unlock(&n->list_lock);
- return page;
+ return object;
}
/*
@@ -1521,7 +1527,7 @@ static struct page *get_any_partial(stru
struct zoneref *z;
struct zone *zone;
enum zone_type high_zoneidx = gfp_zone(flags);
- struct page *page;
+ void *object;
/*
* The defrag ratio allows a configuration of the tradeoffs between
@@ -1554,10 +1560,10 @@ static struct page *get_any_partial(stru
if (n && cpuset_zone_allowed_hardwall(zone, flags) &&
n->nr_partial > s->min_partial) {
- page = get_partial_node(s, n, c);
- if (page) {
+ object = get_partial_node(s, n, c);
+ if (object) {
put_mems_allowed();
- return page;
+ return object;
}
}
}
@@ -1569,15 +1575,15 @@ static struct page *get_any_partial(stru
/*
* Get a partial page, lock it and return it.
*/
-static struct page *get_partial(struct kmem_cache *s, gfp_t flags, int node,
+static void *get_partial(struct kmem_cache *s, gfp_t flags, int node,
struct kmem_cache_cpu *c)
{
- struct page *page;
+ void *object;
int searchnode = (node == NUMA_NO_NODE) ? numa_node_id() : node;
- page = get_partial_node(s, get_node(s, searchnode), c);
- if (page || node != NUMA_NO_NODE)
- return page;
+ object = get_partial_node(s, get_node(s, searchnode), c);
+ if (object || node != NUMA_NO_NODE)
+ return object;
return get_any_partial(s, flags, c);
}
@@ -1907,6 +1913,35 @@ slab_out_of_memory(struct kmem_cache *s,
}
}
+static inline void *new_slab_objects(struct kmem_cache *s, gfp_t flags,
+ int node, struct kmem_cache_cpu **pc)
+{
+ void *object;
+ struct kmem_cache_cpu *c;
+ struct page *page = new_slab(s, flags, node);
+
+ if (page) {
+ c = __this_cpu_ptr(s->cpu_slab);
+ if (c->page)
+ flush_slab(s, c);
+
+ /*
+ * No other reference to the page yet so we can
+ * muck around with it freely without cmpxchg
+ */
+ object = page->freelist;
+ page->freelist = NULL;
+
+ stat(s, ALLOC_SLAB);
+ c->node = page_to_nid(page);
+ c->page = page;
+ *pc = c;
+ } else
+ object = NULL;
+
+ return object;
+}
+
/*
* Slow path. The lockless freelist is empty or we need to perform
* debugging duties.
@@ -1929,7 +1964,6 @@ static void *__slab_alloc(struct kmem_ca
unsigned long addr, struct kmem_cache_cpu *c)
{
void **object;
- struct page *page;
unsigned long flags;
struct page new;
unsigned long counters;
@@ -1947,8 +1981,7 @@ static void *__slab_alloc(struct kmem_ca
/* We handle __GFP_ZERO in the caller */
gfpflags &= ~__GFP_ZERO;
- page = c->page;
- if (!page)
+ if (!c->page)
goto new_slab;
if (unlikely(!node_match(c, node))) {
@@ -1960,8 +1993,8 @@ static void *__slab_alloc(struct kmem_ca
stat(s, ALLOC_SLOWPATH);
do {
- object = page->freelist;
- counters = page->counters;
+ object = c->page->freelist;
+ counters = c->page->counters;
new.counters = counters;
VM_BUG_ON(!new.frozen);
@@ -1973,12 +2006,12 @@ static void *__slab_alloc(struct kmem_ca
*
* If there are objects left then we retrieve them
* and use them to refill the per cpu queue.
- */
+ */
- new.inuse = page->objects;
+ new.inuse = c->page->objects;
new.frozen = object != NULL;
- } while (!cmpxchg_double_slab(s, page,
+ } while (!cmpxchg_double_slab(s, c->page,
object, counters,
NULL, new.counters,
"__slab_alloc"));
@@ -1992,50 +2025,33 @@ static void *__slab_alloc(struct kmem_ca
stat(s, ALLOC_REFILL);
load_freelist:
- VM_BUG_ON(!page->frozen);
c->freelist = get_freepointer(s, object);
c->tid = next_tid(c->tid);
local_irq_restore(flags);
return object;
new_slab:
- page = get_partial(s, gfpflags, node, c);
- if (page) {
- stat(s, ALLOC_FROM_PARTIAL);
- object = c->freelist;
+ object = get_partial(s, gfpflags, node, c);
- if (kmem_cache_debug(s))
- goto debug;
- goto load_freelist;
- }
+ if (unlikely(!object)) {
- page = new_slab(s, gfpflags, node);
+ object = new_slab_objects(s, gfpflags, node, &c);
- if (page) {
- c = __this_cpu_ptr(s->cpu_slab);
- if (c->page)
- flush_slab(s, c);
+ if (unlikely(!object)) {
+ if (!(gfpflags & __GFP_NOWARN) && printk_ratelimit())
+ slab_out_of_memory(s, gfpflags, node);
- /*
- * No other reference to the page yet so we can
- * muck around with it freely without cmpxchg
- */
- object = page->freelist;
- page->freelist = NULL;
+ local_irq_restore(flags);
+ return NULL;
+ }
+ }
- stat(s, ALLOC_SLAB);
- c->node = page_to_nid(page);
- c->page = page;
+ if (likely(!kmem_cache_debug(s)))
goto load_freelist;
- }
- if (!(gfpflags & __GFP_NOWARN) && printk_ratelimit())
- slab_out_of_memory(s, gfpflags, node);
- local_irq_restore(flags);
- return NULL;
-debug:
- if (!object || !alloc_debug_processing(s, page, object, addr))
- goto new_slab;
+ /* Only entered in the debug case */
+ if (!alloc_debug_processing(s, c->page, object, addr))
+ goto new_slab; /* Slab failed checks. Next slab needed */
c->freelist = get_freepointer(s, object);
deactivate_slab(s, c);
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Fight unfair telecom internet charges in Canada: sign http://stopthemeter.ca/
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
next prev parent reply other threads:[~2011-05-16 20:26 UTC|newest]
Thread overview: 54+ messages / expand[flat|nested] mbox.gz Atom feed top
2011-05-16 20:26 [slubllv5 00/25] SLUB: Lockless freelists for objects V5 Christoph Lameter
2011-05-16 20:26 ` [slubllv5 01/25] slub: Avoid warning for !CONFIG_SLUB_DEBUG Christoph Lameter
2011-05-16 20:26 ` [slubllv5 02/25] slub: Fix control flow in slab_alloc Christoph Lameter
2011-05-16 20:26 ` [slubllv5 03/25] slub: Make CONFIG_PAGE_ALLOC work with new fastpath Christoph Lameter
2011-05-17 4:52 ` Eric Dumazet
2011-05-17 13:46 ` Christoph Lameter
2011-05-17 19:22 ` Pekka Enberg
2011-05-16 20:26 ` [slubllv5 04/25] slub: Push irq disable into allocate_slab() Christoph Lameter
2011-05-16 20:26 ` [slubllv5 05/25] slub: Do not use frozen page flag but a bit in the page counters Christoph Lameter
2011-05-16 20:26 ` [slubllv5 06/25] slub: Move page->frozen handling near where the page->freelist handling occurs Christoph Lameter
2011-05-16 20:26 ` [slubllv5 07/25] x86: Add support for cmpxchg_double Christoph Lameter
2011-05-26 17:57 ` Pekka Enberg
2011-05-26 18:02 ` Christoph Lameter
2011-05-26 18:05 ` H. Peter Anvin
2011-05-26 18:17 ` Christoph Lameter
2011-05-26 18:29 ` H. Peter Anvin
2011-05-26 18:42 ` Christoph Lameter
2011-05-26 21:16 ` Christoph Lameter
2011-05-26 21:21 ` H. Peter Anvin
2011-05-26 21:25 ` Eric Dumazet
2011-05-26 21:31 ` H. Peter Anvin
2011-05-26 21:45 ` Eric Dumazet
2011-05-27 0:49 ` H. Peter Anvin
2011-05-31 15:13 ` Christoph Lameter
2011-05-31 15:16 ` H. Peter Anvin
2011-05-31 16:53 ` Christoph Lameter
2011-05-31 23:16 ` H. Peter Anvin
2011-05-31 23:49 ` Christoph Lameter
2011-05-31 23:54 ` H. Peter Anvin
2011-06-01 14:13 ` Christoph Lameter
2011-06-01 14:46 ` Christoph Lameter
2011-06-01 15:42 ` H. Peter Anvin
2011-06-01 16:08 ` Christoph Lameter
2011-06-01 15:41 ` H. Peter Anvin
2011-05-27 0:50 ` H. Peter Anvin
2011-05-31 15:10 ` Christoph Lameter
2011-05-16 20:26 ` [slubllv5 08/25] mm: Rearrange struct page Christoph Lameter
2011-05-16 20:26 ` [slubllv5 09/25] slub: Add cmpxchg_double_slab() Christoph Lameter
2011-05-16 20:26 ` [slubllv5 10/25] slub: explicit list_lock taking Christoph Lameter
2011-05-16 20:26 ` [slubllv5 11/25] slub: Pass kmem_cache struct to lock and freeze slab Christoph Lameter
2011-05-16 20:26 ` [slubllv5 12/25] slub: Rework allocator fastpaths Christoph Lameter
2011-05-16 20:26 ` [slubllv5 13/25] slub: Invert locking and avoid slab lock Christoph Lameter
2011-05-16 20:26 ` [slubllv5 14/25] slub: Disable interrupts in free_debug processing Christoph Lameter
2011-05-16 20:26 ` [slubllv5 15/25] slub: Avoid disabling interrupts in free slowpath Christoph Lameter
2011-05-16 20:26 ` [slubllv5 16/25] slub: Get rid of the another_slab label Christoph Lameter
2011-05-16 20:26 ` [slubllv5 17/25] slub: Add statistics for the case that the current slab does not match the node Christoph Lameter
2011-05-16 20:26 ` [slubllv5 18/25] slub: fast release on full slab Christoph Lameter
2011-05-16 20:26 ` [slubllv5 19/25] slub: Not necessary to check for empty slab on load_freelist Christoph Lameter
2011-05-16 20:26 ` [slubllv5 20/25] slub: slabinfo update for cmpxchg handling Christoph Lameter
2011-05-16 20:26 ` [slubllv5 21/25] slub: Prepare inuse field in new_slab() Christoph Lameter
2011-05-16 20:26 ` [slubllv5 22/25] slub: pass kmem_cache_cpu pointer to get_partial() Christoph Lameter
2011-05-16 20:26 ` Christoph Lameter [this message]
2011-05-16 20:26 ` [slubllv5 24/25] slub: Remove gotos from __slab_free() Christoph Lameter
2011-05-16 20:26 ` [slubllv5 25/25] slub: Remove gotos from __slab_alloc() Christoph Lameter
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20110516202634.597471664@linux.com \
--to=cl@linux.com \
--cc=eric.dumazet@gmail.com \
--cc=hpa@zytor.com \
--cc=linux-mm@kvack.org \
--cc=penberg@cs.helsinki.fi \
--cc=rientjes@google.com \
--cc=tglx@linutronix.de \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).