From mboxrd@z Thu Jan 1 00:00:00 1970 From: Steven Rostedt Subject: [PATCH RT 2/6] mm/slub: move slab initialization into irq enabled region Date: Fri, 07 Aug 2015 10:01:11 -0400 Message-ID: <20150807140121.297225820@goodmis.org> References: <20150807140109.299360883@goodmis.org> Mime-Version: 1.0 Content-Type: text/plain; charset=ISO-8859-15 Cc: Thomas Gleixner , Carsten Emde , Sebastian Andrzej Siewior , John Kacur , Paul Gortmaker , Christoph Lameter , Pekka Enberg , David Rientjes , Joonsoo Kim , Peter Zijlstra , Andrew Morton To: linux-kernel@vger.kernel.org, linux-rt-users Return-path: Content-Disposition: inline; filename=0002-mm-slub-move-slab-initialization-into-irq-enabled-re.patch Sender: linux-kernel-owner@vger.kernel.org List-Id: linux-rt-users.vger.kernel.org 3.12.44-rt62-rc1 stable review patch. If anyone has any objections, please let me know. ------------------ From: Thomas Gleixner Initializing a new slab can introduce rather large latencies because most of the initialization runs always with interrupts disabled. There is no point in doing so. The newly allocated slab is not visible yet, so there is no reason to protect it against concurrent alloc/free. Move the expensive parts of the initialization into allocate_slab(), so for all allocations with GFP_WAIT set, interrupts are enabled. Signed-off-by: Thomas Gleixner Acked-by: Christoph Lameter Cc: Pekka Enberg Cc: David Rientjes Cc: Joonsoo Kim Cc: Sebastian Andrzej Siewior Cc: Steven Rostedt Cc: Peter Zijlstra Signed-off-by: Andrew Morton Signed-off-by: Steven Rostedt --- mm/slub.c | 80 +++++++++++++++++++++++++++++++-------------------------------- 1 file changed, 39 insertions(+), 41 deletions(-) diff --git a/mm/slub.c b/mm/slub.c index b982cc52d454..3a5d54e75ffe 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -1276,6 +1276,14 @@ struct slub_free_list { }; static DEFINE_PER_CPU(struct slub_free_list, slub_free_list); +static void setup_object(struct kmem_cache *s, struct page *page, + void *object) +{ + setup_object_debug(s, page, object); + if (unlikely(s->ctor)) + s->ctor(object); +} + /* * Slab allocation and freeing */ @@ -1298,6 +1306,8 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) struct kmem_cache_order_objects oo = s->oo; gfp_t alloc_gfp; bool enableirqs; + void *start, *last, *p; + int idx, order; flags &= gfp_allowed_mask; @@ -1324,13 +1334,13 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) * Try a lower order alloc if possible */ page = alloc_slab_page(flags, node, oo); - - if (page) - stat(s, ORDER_FALLBACK); + if (unlikely(!page)) + goto out; + stat(s, ORDER_FALLBACK); } - if (kmemcheck_enabled && page - && !(s->flags & (SLAB_NOTRACK | DEBUG_DEFAULT_FLAGS))) { + if (kmemcheck_enabled && + !(s->flags & (SLAB_NOTRACK | DEBUG_DEFAULT_FLAGS))) { int pages = 1 << oo_order(oo); kmemcheck_alloc_shadow(page, oo_order(oo), flags, node); @@ -1345,45 +1355,9 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) kmemcheck_mark_unallocated_pages(page, pages); } - if (enableirqs) - local_irq_disable(); - if (!page) - return NULL; - page->objects = oo_objects(oo); - mod_zone_page_state(page_zone(page), - (s->flags & SLAB_RECLAIM_ACCOUNT) ? - NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE, - 1 << oo_order(oo)); - - return page; -} - -static void setup_object(struct kmem_cache *s, struct page *page, - void *object) -{ - setup_object_debug(s, page, object); - if (unlikely(s->ctor)) - s->ctor(object); -} - -static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node) -{ - struct page *page; - void *start; - void *last; - void *p; - int order; - - BUG_ON(flags & GFP_SLAB_BUG_MASK); - - page = allocate_slab(s, - flags & (GFP_RECLAIM_MASK | GFP_CONSTRAINT_MASK), node); - if (!page) - goto out; order = compound_order(page); - inc_slabs_node(s, page_to_nid(page), page->objects); memcg_bind_pages(s, order); page->slab_cache = s; __SetPageSlab(page); @@ -1407,10 +1381,34 @@ static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node) page->freelist = start; page->inuse = page->objects; page->frozen = 1; + out: + if (enableirqs) + local_irq_disable(); + if (!page) + return NULL; + + mod_zone_page_state(page_zone(page), + (s->flags & SLAB_RECLAIM_ACCOUNT) ? + NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE, + 1 << oo_order(oo)); + + inc_slabs_node(s, page_to_nid(page), page->objects); + return page; } +static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node) +{ + if (unlikely(flags & GFP_SLAB_BUG_MASK)) { + pr_emerg("gfp: %u\n", flags & GFP_SLAB_BUG_MASK); + BUG(); + } + + return allocate_slab(s, + flags & (GFP_RECLAIM_MASK | GFP_CONSTRAINT_MASK), node); +} + static void __free_slab(struct kmem_cache *s, struct page *page) { int order = compound_order(page); -- 2.1.4