From: Christoph Lameter <cl@linux.com>
To: Pekka Enberg <penberg@cs.helsinki.fi>
Cc: David Rientjes <rientjes@google.com>,
Andi Kleen <andi@firstfloor.org>,
tj@kernel.org, Metathronius Galabant <m.galabant@googlemail.com>,
Matt Mackall <mpm@selenic.com>,
Eric Dumazet <eric.dumazet@gmail.com>,
Adrian Drzewiecki <z@drze.net>, Shaohua Li <shaohua.li@intel.com>,
Alex Shi <alex.shi@intel.com>,
linux-mm@kvack.org
Subject: [rfc 12/18] slub: Remove kmem_cache_cpu dependency from acquire slab
Date: Fri, 11 Nov 2011 14:07:23 -0600 [thread overview]
Message-ID: <20111111200733.112660970@linux.com> (raw)
In-Reply-To: 20111111200711.156817886@linux.com
[-- Attachment #1: remove_kmem_cache_cpu_dependency_from_acquire_slab --]
[-- Type: text/plain, Size: 3520 bytes --]
The page can be determined later from the object pointer
via virt_to_head_page().
Signed-off-by: Christoph Lameter <cl@linux.com>
---
mm/slub.c | 26 +++++++++++---------------
1 file changed, 11 insertions(+), 15 deletions(-)
Index: linux-2.6/mm/slub.c
===================================================================
--- linux-2.6.orig/mm/slub.c 2011-11-10 13:46:55.809479604 -0600
+++ linux-2.6/mm/slub.c 2011-11-10 14:33:56.815359070 -0600
@@ -1531,7 +1531,7 @@ static int put_cpu_partial(struct kmem_c
* Try to allocate a partial slab from a specific node.
*/
static void *get_partial_node(struct kmem_cache *s,
- struct kmem_cache_node *n, struct kmem_cache_cpu *c)
+ struct kmem_cache_node *n)
{
struct page *page, *page2;
void *object = NULL;
@@ -1555,7 +1555,6 @@ static void *get_partial_node(struct kme
break;
if (!object) {
- c->page = page;
stat(s, ALLOC_FROM_PARTIAL);
object = t;
available = page->objects - page->inuse;
@@ -1574,8 +1573,7 @@ static void *get_partial_node(struct kme
/*
* Get a page from somewhere. Search in increasing NUMA distances.
*/
-static struct page *get_any_partial(struct kmem_cache *s, gfp_t flags,
- struct kmem_cache_cpu *c)
+static struct page *get_any_partial(struct kmem_cache *s, gfp_t flags)
{
#ifdef CONFIG_NUMA
struct zonelist *zonelist;
@@ -1615,7 +1613,7 @@ static struct page *get_any_partial(stru
if (n && cpuset_zone_allowed_hardwall(zone, flags) &&
n->nr_partial > s->min_partial) {
- object = get_partial_node(s, n, c);
+ object = get_partial_node(s, n);
if (object) {
put_mems_allowed();
return object;
@@ -1630,17 +1628,16 @@ static struct page *get_any_partial(stru
/*
* Get a partial page, lock it and return it.
*/
-static void *get_partial(struct kmem_cache *s, gfp_t flags, int node,
- struct kmem_cache_cpu *c)
+static void *get_partial(struct kmem_cache *s, gfp_t flags, int node)
{
void *object;
int searchnode = (node == NUMA_NO_NODE) ? numa_node_id() : node;
- object = get_partial_node(s, get_node(s, searchnode), c);
+ object = get_partial_node(s, get_node(s, searchnode));
if (object || node != NUMA_NO_NODE)
return object;
- return get_any_partial(s, flags, c);
+ return get_any_partial(s, flags);
}
#ifdef CONFIG_PREEMPT
@@ -2088,7 +2085,7 @@ slab_out_of_memory(struct kmem_cache *s,
}
static inline void *new_slab_objects(struct kmem_cache *s, gfp_t flags,
- int node, struct kmem_cache_cpu **pc)
+ int node)
{
void *freelist;
struct kmem_cache_cpu *c;
@@ -2107,8 +2104,6 @@ static inline void *new_slab_objects(str
page->freelist = NULL;
stat(s, ALLOC_SLAB);
- c->page = page;
- *pc = c;
} else
freelist = NULL;
@@ -2225,10 +2220,10 @@ new_slab:
goto redo;
}
- freelist = get_partial(s, gfpflags, node, c);
+ freelist = get_partial(s, gfpflags, node);
if (!freelist)
- freelist = new_slab_objects(s, gfpflags, node, &c);
+ freelist = new_slab_objects(s, gfpflags, node);
if (unlikely(!freelist)) {
@@ -2239,7 +2234,8 @@ new_slab:
return NULL;
}
- page = c->page;
+ page = c->page = virt_to_head_page(freelist);
+
if (likely(!kmem_cache_debug(s)))
goto load_freelist;
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Fight unfair telecom internet charges in Canada: sign http://stopthemeter.ca/
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
next prev parent reply other threads:[~2011-11-11 20:07 UTC|newest]
Thread overview: 39+ messages / expand[flat|nested] mbox.gz Atom feed top
2011-11-11 20:07 [rfc 00/18] slub: irqless/lockless slow allocation paths Christoph Lameter
2011-11-11 20:07 ` [rfc 01/18] slub: Get rid of the node field Christoph Lameter
2011-11-14 21:42 ` Pekka Enberg
2011-11-15 16:07 ` Christoph Lameter
2011-11-20 23:01 ` David Rientjes
2011-11-21 17:17 ` Christoph Lameter
2011-11-11 20:07 ` [rfc 02/18] slub: Separate out kmem_cache_cpu processing from deactivate_slab Christoph Lameter
2011-11-20 23:10 ` David Rientjes
2011-11-11 20:07 ` [rfc 03/18] slub: Extract get_freelist from __slab_alloc Christoph Lameter
2011-11-14 21:43 ` Pekka Enberg
2011-11-15 16:08 ` Christoph Lameter
2011-12-13 20:31 ` Pekka Enberg
2011-11-20 23:18 ` David Rientjes
2011-11-11 20:07 ` [rfc 04/18] slub: Use freelist instead of "object" in __slab_alloc Christoph Lameter
2011-11-14 21:44 ` Pekka Enberg
2011-11-20 23:22 ` David Rientjes
2011-11-11 20:07 ` [rfc 05/18] slub: Simplify control flow in __slab_alloc() Christoph Lameter
2011-11-14 21:45 ` Pekka Enberg
2011-11-20 23:24 ` David Rientjes
2011-11-11 20:07 ` [rfc 06/18] slub: Use page variable instead of c->page Christoph Lameter
2011-11-14 21:46 ` Pekka Enberg
2011-11-20 23:27 ` David Rientjes
2011-11-11 20:07 ` [rfc 07/18] slub: pass page to node_match() instead of kmem_cache_cpu structure Christoph Lameter
2011-11-20 23:28 ` David Rientjes
2011-11-11 20:07 ` [rfc 08/18] slub: enable use of deactivate_slab with interrupts on Christoph Lameter
2011-11-11 20:07 ` [rfc 09/18] slub: Run deactivate_slab with interrupts enabled Christoph Lameter
2011-11-11 20:07 ` [rfc 10/18] slub: Enable use of get_partial " Christoph Lameter
2011-11-11 20:07 ` [rfc 11/18] slub: Acquire_slab() avoid loop Christoph Lameter
2011-11-11 20:07 ` Christoph Lameter [this message]
2011-11-11 20:07 ` [rfc 13/18] slub: Add functions to manage per cpu freelists Christoph Lameter
2011-11-11 20:07 ` [rfc 14/18] slub: Decomplicate the get_pointer_safe call and fixup statistics Christoph Lameter
2011-11-11 20:07 ` [rfc 15/18] slub: new_slab_objects() can also get objects from partial list Christoph Lameter
2011-11-11 20:07 ` [rfc 16/18] slub: Drop page field from kmem_cache_cpu Christoph Lameter
2011-11-11 20:07 ` [rfc 17/18] slub: Move __slab_free() into slab_free() Christoph Lameter
2011-11-11 20:07 ` [rfc 18/18] slub: Move __slab_alloc() into slab_alloc() Christoph Lameter
2011-11-16 17:39 ` [rfc 00/18] slub: irqless/lockless slow allocation paths Eric Dumazet
2011-11-16 17:45 ` Eric Dumazet
2011-11-20 23:32 ` David Rientjes
2011-11-20 23:30 ` David Rientjes
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20111111200733.112660970@linux.com \
--to=cl@linux.com \
--cc=alex.shi@intel.com \
--cc=andi@firstfloor.org \
--cc=eric.dumazet@gmail.com \
--cc=linux-mm@kvack.org \
--cc=m.galabant@googlemail.com \
--cc=mpm@selenic.com \
--cc=penberg@cs.helsinki.fi \
--cc=rientjes@google.com \
--cc=shaohua.li@intel.com \
--cc=tj@kernel.org \
--cc=z@drze.net \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).