From: Christoph Lameter <cl@linux.com>
To: akpm@linuxfoundation.org
Cc: rostedt@goodmis.org, linux-kernel@vger.kernel.org,
Thomas Gleixner <tglx@linutronix.de>,
linux-mm@kvack.org, penberg@kernel.org, iamjoonsoo@lge.com
Subject: [RFC 3/4] slub: Drop ->page field from kmem_cache_cpu
Date: Wed, 22 Oct 2014 10:55:20 -0500 [thread overview]
Message-ID: <20141022155527.158407162@linux.com> (raw)
In-Reply-To: 20141022155517.560385718@linux.com
[-- Attachment #1: slub_drop_kmem_cache_cpu_page_Field --]
[-- Type: text/plain, Size: 5514 bytes --]
Dropping the page field is possible since the page struct address
of an object or a freelist pointer can now always be calcualted from
the address. No freelist pointer will be NULL anymore so use
NULL to signify the condition that the current cpu has no
percpu slab attached to it.
Signed-off-by: Christoph Lameter <cl@linux.com>
Index: linux/include/linux/slub_def.h
===================================================================
--- linux.orig/include/linux/slub_def.h
+++ linux/include/linux/slub_def.h
@@ -40,7 +40,6 @@ enum stat_item {
struct kmem_cache_cpu {
void **freelist; /* Pointer to next available object */
unsigned long tid; /* Globally unique transaction id */
- struct page *page; /* The slab from which we are allocating */
struct page *partial; /* Partially allocated frozen slabs */
#ifdef CONFIG_SLUB_STATS
unsigned stat[NR_SLUB_STAT_ITEMS];
Index: linux/mm/slub.c
===================================================================
--- linux.orig/mm/slub.c
+++ linux/mm/slub.c
@@ -1611,7 +1611,6 @@ static void *get_partial_node(struct kme
available += objects;
if (!object) {
- c->page = page;
stat(s, ALLOC_FROM_PARTIAL);
object = t;
} else {
@@ -2049,10 +2048,9 @@ static void put_cpu_partial(struct kmem_
static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
{
stat(s, CPUSLAB_FLUSH);
- deactivate_slab(s, c->page, c->freelist);
+ deactivate_slab(s, virt_to_head_page(c->freelist), c->freelist);
c->tid = next_tid(c->tid);
- c->page = NULL;
c->freelist = NULL;
}
@@ -2066,7 +2064,7 @@ static inline void __flush_cpu_slab(stru
struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu);
if (likely(c)) {
- if (c->page)
+ if (c->freelist)
flush_slab(s, c);
unfreeze_partials(s, c);
@@ -2085,7 +2083,7 @@ static bool has_cpu_slab(int cpu, void *
struct kmem_cache *s = info;
struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu);
- return c->page || c->partial;
+ return c->freelist || c->partial;
}
static void flush_all(struct kmem_cache *s)
@@ -2186,7 +2184,7 @@ static inline void *new_slab_objects(str
page = new_slab(s, flags, node);
if (page) {
c = raw_cpu_ptr(s->cpu_slab);
- if (c->page)
+ if (c->freelist)
flush_slab(s, c);
/*
@@ -2197,7 +2195,6 @@ static inline void *new_slab_objects(str
page->freelist = end_token(freelist);
stat(s, ALLOC_SLAB);
- c->page = page;
*pc = c;
} else
freelist = NULL;
@@ -2280,9 +2277,10 @@ static void *__slab_alloc(struct kmem_ca
c = this_cpu_ptr(s->cpu_slab);
#endif
- page = c->page;
- if (!page)
+ if (!c->freelist || is_end_token(c->freelist))
goto new_slab;
+
+ page = virt_to_head_page(c->freelist);
redo:
if (unlikely(!node_match(page, node))) {
@@ -2311,7 +2309,7 @@ redo:
freelist = get_freelist(s, page);
if (!freelist || is_end_token(freelist)) {
- c->page = NULL;
+ c->freelist = NULL;
stat(s, DEACTIVATE_BYPASS);
goto new_slab;
}
@@ -2324,7 +2322,7 @@ load_freelist:
* page is pointing to the page from which the objects are obtained.
* That page must be frozen for per cpu allocations to work.
*/
- VM_BUG_ON(!c->page->frozen);
+ VM_BUG_ON(!virt_to_head_page(freelist)->frozen);
c->freelist = get_freepointer(s, freelist);
c->tid = next_tid(c->tid);
local_irq_restore(flags);
@@ -2332,16 +2330,15 @@ load_freelist:
deactivate:
deactivate_slab(s, page, c->freelist);
- c->page = NULL;
c->freelist = NULL;
new_slab:
if (c->partial) {
- page = c->page = c->partial;
+ page = c->partial;
c->partial = page->next;
stat(s, CPU_PARTIAL_ALLOC);
- c->freelist = NULL;
+ c->freelist = end_token(page_address(page));
goto redo;
}
@@ -2353,7 +2350,7 @@ new_slab:
return NULL;
}
- page = c->page;
+ page = virt_to_head_page(freelist);
if (likely(!kmem_cache_debug(s) && pfmemalloc_match(page, gfpflags)))
goto load_freelist;
@@ -2363,7 +2360,6 @@ new_slab:
goto new_slab; /* Slab failed checks. Next slab needed */
deactivate_slab(s, page, get_freepointer(s, freelist));
- c->page = NULL;
c->freelist = NULL;
local_irq_restore(flags);
return freelist;
@@ -2384,7 +2380,6 @@ static __always_inline void *slab_alloc_
{
void **object;
struct kmem_cache_cpu *c;
- struct page *page;
unsigned long tid;
if (slab_pre_alloc_hook(s, gfpflags))
@@ -2416,8 +2411,7 @@ redo:
preempt_enable();
object = c->freelist;
- page = c->page;
- if (unlikely(!object || is_end_token(object) || !node_match(page, node))) {
+ if (unlikely(!object || is_end_token(object) || !node_match(virt_to_head_page(object), node))) {
object = __slab_alloc(s, gfpflags, node, addr, c);
stat(s, ALLOC_SLOWPATH);
} else {
@@ -2665,7 +2659,7 @@ redo:
tid = c->tid;
preempt_enable();
- if (likely(page == c->page)) {
+ if (likely(c->freelist && page == virt_to_head_page(c->freelist))) {
set_freepointer(s, object, c->freelist);
if (unlikely(!this_cpu_cmpxchg_double(
@@ -4191,10 +4185,10 @@ static ssize_t show_slab_objects(struct
int node;
struct page *page;
- page = ACCESS_ONCE(c->page);
- if (!page)
+ if (!c->freelist)
continue;
+ page = virt_to_head_page(c->freelist);
node = page_to_nid(page);
if (flags & SO_TOTAL)
x = page->objects;
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
next prev parent reply other threads:[~2014-10-22 15:55 UTC|newest]
Thread overview: 15+ messages / expand[flat|nested] mbox.gz Atom feed top
2014-10-22 15:55 [RFC 0/4] [RFC] slub: Fastpath optimization (especially for RT) Christoph Lameter
2014-10-22 15:55 ` [RFC 1/4] slub: Remove __slab_alloc code duplication Christoph Lameter
2014-10-22 18:04 ` Thomas Gleixner
2014-10-22 18:27 ` Christoph Lameter
2014-10-22 15:55 ` [RFC 2/4] slub: Use end_token instead of NULL to terminate freelists Christoph Lameter
2014-10-22 15:55 ` Christoph Lameter [this message]
2014-10-22 15:55 ` [RFC 4/4] slub: Remove preemption disable/enable from fastpath Christoph Lameter
2014-10-23 8:09 ` [RFC 0/4] [RFC] slub: Fastpath optimization (especially for RT) Joonsoo Kim
2014-10-23 14:18 ` Christoph Lameter
2014-10-24 4:56 ` Joonsoo Kim
2014-10-24 14:02 ` Christoph Lameter
2014-10-27 7:54 ` Joonsoo Kim
2014-10-24 14:41 ` Christoph Lameter
2014-10-27 7:58 ` Joonsoo Kim
2014-10-27 13:53 ` Christoph Lameter
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20141022155527.158407162@linux.com \
--to=cl@linux.com \
--cc=akpm@linuxfoundation.org \
--cc=iamjoonsoo@lge.com \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=penberg@kernel.org \
--cc=rostedt@goodmis.org \
--cc=tglx@linutronix.de \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).