linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
From: Hyeonggon Yoo <42.hyeyoo@gmail.com>
To: Vlastimil Babka <vbabka@suse.cz>
Cc: David Rientjes <rientjes@google.com>,
	Christoph Lameter <cl@linux.com>,
	Pekka Enberg <penberg@kernel.org>,
	Joonsoo Kim <iamjoonsoo.kim@lge.com>,
	Andrew Morton <akpm@linux-foundation.org>,
	Roman Gushchin <roman.gushchin@linux.dev>,
	Andrey Ryabinin <ryabinin.a.a@gmail.com>,
	Alexander Potapenko <glider@google.com>,
	Andrey Konovalov <andreyknvl@gmail.com>,
	Dmitry Vyukov <dvyukov@google.com>,
	Vincenzo Frascino <vincenzo.frascino@arm.com>,
	Marco Elver <elver@google.com>,
	Johannes Weiner <hannes@cmpxchg.org>,
	Michal Hocko <mhocko@kernel.org>,
	Shakeel Butt <shakeelb@google.com>,
	Muchun Song <muchun.song@linux.dev>,
	Kees Cook <keescook@chromium.org>,
	linux-mm@kvack.org, linux-kernel@vger.kernel.org,
	kasan-dev@googlegroups.com, cgroups@vger.kernel.org,
	linux-hardening@vger.kernel.org
Subject: Re: [PATCH v2 18/21] mm/slab: move kmalloc() functions from slab_common.c to slub.c
Date: Thu, 7 Dec 2023 10:30:57 +0900	[thread overview]
Message-ID: <ZXEgUVrUuIHlgsec@localhost.localdomain> (raw)
In-Reply-To: <20231120-slab-remove-slab-v2-18-9c9c70177183@suse.cz>

On Mon, Nov 20, 2023 at 07:34:29PM +0100, Vlastimil Babka wrote:
> This will eliminate a call between compilation units through
> __kmem_cache_alloc_node() and allow better inlining of the allocation
> fast path.
> 
> Reviewed-by: Kees Cook <keescook@chromium.org>
> Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
> ---
>  mm/slab.h        |   3 --
>  mm/slab_common.c | 119 ----------------------------------------------------
>  mm/slub.c        | 126 +++++++++++++++++++++++++++++++++++++++++++++++++++----
>  3 files changed, 118 insertions(+), 130 deletions(-)
> 
> diff --git a/mm/slab.h b/mm/slab.h
> index 7d7cc7af614e..54deeb0428c6 100644
> --- a/mm/slab.h
> +++ b/mm/slab.h
> @@ -416,9 +416,6 @@ kmalloc_slab(size_t size, gfp_t flags, unsigned long caller)
>  	return kmalloc_caches[kmalloc_type(flags, caller)][index];
>  }
>  
> -void *__kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags,
> -			      int node, size_t orig_size,
> -			      unsigned long caller);
>  gfp_t kmalloc_fix_flags(gfp_t flags);
>  
>  /* Functions provided by the slab allocators */
> diff --git a/mm/slab_common.c b/mm/slab_common.c
> index 31ade17a7ad9..238293b1dbe1 100644
> --- a/mm/slab_common.c
> +++ b/mm/slab_common.c
> @@ -936,50 +936,6 @@ void __init create_kmalloc_caches(slab_flags_t flags)
>  	slab_state = UP;
>  }
>  
> -static void *__kmalloc_large_node(size_t size, gfp_t flags, int node);
> -static __always_inline
> -void *__do_kmalloc_node(size_t size, gfp_t flags, int node, unsigned long caller)
> -{
> -	struct kmem_cache *s;
> -	void *ret;
> -
> -	if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) {
> -		ret = __kmalloc_large_node(size, flags, node);
> -		trace_kmalloc(caller, ret, size,
> -			      PAGE_SIZE << get_order(size), flags, node);
> -		return ret;
> -	}
> -
> -	if (unlikely(!size))
> -		return ZERO_SIZE_PTR;
> -
> -	s = kmalloc_slab(size, flags, caller);
> -
> -	ret = __kmem_cache_alloc_node(s, flags, node, size, caller);
> -	ret = kasan_kmalloc(s, ret, size, flags);
> -	trace_kmalloc(caller, ret, size, s->size, flags, node);
> -	return ret;
> -}
> -
> -void *__kmalloc_node(size_t size, gfp_t flags, int node)
> -{
> -	return __do_kmalloc_node(size, flags, node, _RET_IP_);
> -}
> -EXPORT_SYMBOL(__kmalloc_node);
> -
> -void *__kmalloc(size_t size, gfp_t flags)
> -{
> -	return __do_kmalloc_node(size, flags, NUMA_NO_NODE, _RET_IP_);
> -}
> -EXPORT_SYMBOL(__kmalloc);
> -
> -void *__kmalloc_node_track_caller(size_t size, gfp_t flags,
> -				  int node, unsigned long caller)
> -{
> -	return __do_kmalloc_node(size, flags, node, caller);
> -}
> -EXPORT_SYMBOL(__kmalloc_node_track_caller);
> -
>  /**
>   * __ksize -- Report full size of underlying allocation
>   * @object: pointer to the object
> @@ -1016,30 +972,6 @@ size_t __ksize(const void *object)
>  	return slab_ksize(folio_slab(folio)->slab_cache);
>  }
>  
> -void *kmalloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size)
> -{
> -	void *ret = __kmem_cache_alloc_node(s, gfpflags, NUMA_NO_NODE,
> -					    size, _RET_IP_);
> -
> -	trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags, NUMA_NO_NODE);
> -
> -	ret = kasan_kmalloc(s, ret, size, gfpflags);
> -	return ret;
> -}
> -EXPORT_SYMBOL(kmalloc_trace);
> -
> -void *kmalloc_node_trace(struct kmem_cache *s, gfp_t gfpflags,
> -			 int node, size_t size)
> -{
> -	void *ret = __kmem_cache_alloc_node(s, gfpflags, node, size, _RET_IP_);
> -
> -	trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags, node);
> -
> -	ret = kasan_kmalloc(s, ret, size, gfpflags);
> -	return ret;
> -}
> -EXPORT_SYMBOL(kmalloc_node_trace);
> -
>  gfp_t kmalloc_fix_flags(gfp_t flags)
>  {
>  	gfp_t invalid_mask = flags & GFP_SLAB_BUG_MASK;
> @@ -1052,57 +984,6 @@ gfp_t kmalloc_fix_flags(gfp_t flags)
>  	return flags;
>  }
>  
> -/*
> - * To avoid unnecessary overhead, we pass through large allocation requests
> - * directly to the page allocator. We use __GFP_COMP, because we will need to
> - * know the allocation order to free the pages properly in kfree.
> - */
> -
> -static void *__kmalloc_large_node(size_t size, gfp_t flags, int node)
> -{
> -	struct page *page;
> -	void *ptr = NULL;
> -	unsigned int order = get_order(size);
> -
> -	if (unlikely(flags & GFP_SLAB_BUG_MASK))
> -		flags = kmalloc_fix_flags(flags);
> -
> -	flags |= __GFP_COMP;
> -	page = alloc_pages_node(node, flags, order);
> -	if (page) {
> -		ptr = page_address(page);
> -		mod_lruvec_page_state(page, NR_SLAB_UNRECLAIMABLE_B,
> -				      PAGE_SIZE << order);
> -	}
> -
> -	ptr = kasan_kmalloc_large(ptr, size, flags);
> -	/* As ptr might get tagged, call kmemleak hook after KASAN. */
> -	kmemleak_alloc(ptr, size, 1, flags);
> -	kmsan_kmalloc_large(ptr, size, flags);
> -
> -	return ptr;
> -}
> -
> -void *kmalloc_large(size_t size, gfp_t flags)
> -{
> -	void *ret = __kmalloc_large_node(size, flags, NUMA_NO_NODE);
> -
> -	trace_kmalloc(_RET_IP_, ret, size, PAGE_SIZE << get_order(size),
> -		      flags, NUMA_NO_NODE);
> -	return ret;
> -}
> -EXPORT_SYMBOL(kmalloc_large);
> -
> -void *kmalloc_large_node(size_t size, gfp_t flags, int node)
> -{
> -	void *ret = __kmalloc_large_node(size, flags, node);
> -
> -	trace_kmalloc(_RET_IP_, ret, size, PAGE_SIZE << get_order(size),
> -		      flags, node);
> -	return ret;
> -}
> -EXPORT_SYMBOL(kmalloc_large_node);
> -
>  #ifdef CONFIG_SLAB_FREELIST_RANDOM
>  /* Randomize a generic freelist */
>  static void freelist_randomize(unsigned int *list,
> diff --git a/mm/slub.c b/mm/slub.c
> index 2baa9e94d9df..d6bc15929d22 100644
> --- a/mm/slub.c
> +++ b/mm/slub.c
> @@ -3851,14 +3851,6 @@ void *kmem_cache_alloc_lru(struct kmem_cache *s, struct list_lru *lru,
>  }
>  EXPORT_SYMBOL(kmem_cache_alloc_lru);
>  
> -void *__kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags,
> -			      int node, size_t orig_size,
> -			      unsigned long caller)
> -{
> -	return slab_alloc_node(s, NULL, gfpflags, node,
> -			       caller, orig_size);
> -}
> -
>  /**
>   * kmem_cache_alloc_node - Allocate an object on the specified node
>   * @s: The cache to allocate from.
> @@ -3882,6 +3874,124 @@ void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node)
>  }
>  EXPORT_SYMBOL(kmem_cache_alloc_node);
>  
> +/*
> + * To avoid unnecessary overhead, we pass through large allocation requests
> + * directly to the page allocator. We use __GFP_COMP, because we will need to
> + * know the allocation order to free the pages properly in kfree.
> + */
> +static void *__kmalloc_large_node(size_t size, gfp_t flags, int node)
> +{
> +	struct page *page;
> +	void *ptr = NULL;
> +	unsigned int order = get_order(size);
> +
> +	if (unlikely(flags & GFP_SLAB_BUG_MASK))
> +		flags = kmalloc_fix_flags(flags);
> +
> +	flags |= __GFP_COMP;
> +	page = alloc_pages_node(node, flags, order);
> +	if (page) {
> +		ptr = page_address(page);
> +		mod_lruvec_page_state(page, NR_SLAB_UNRECLAIMABLE_B,
> +				      PAGE_SIZE << order);
> +	}
> +
> +	ptr = kasan_kmalloc_large(ptr, size, flags);
> +	/* As ptr might get tagged, call kmemleak hook after KASAN. */
> +	kmemleak_alloc(ptr, size, 1, flags);
> +	kmsan_kmalloc_large(ptr, size, flags);
> +
> +	return ptr;
> +}
> +
> +void *kmalloc_large(size_t size, gfp_t flags)
> +{
> +	void *ret = __kmalloc_large_node(size, flags, NUMA_NO_NODE);
> +
> +	trace_kmalloc(_RET_IP_, ret, size, PAGE_SIZE << get_order(size),
> +		      flags, NUMA_NO_NODE);
> +	return ret;
> +}
> +EXPORT_SYMBOL(kmalloc_large);
> +
> +void *kmalloc_large_node(size_t size, gfp_t flags, int node)
> +{
> +	void *ret = __kmalloc_large_node(size, flags, node);
> +
> +	trace_kmalloc(_RET_IP_, ret, size, PAGE_SIZE << get_order(size),
> +		      flags, node);
> +	return ret;
> +}
> +EXPORT_SYMBOL(kmalloc_large_node);
> +
> +static __always_inline
> +void *__do_kmalloc_node(size_t size, gfp_t flags, int node,
> +			unsigned long caller)
> +{
> +	struct kmem_cache *s;
> +	void *ret;
> +
> +	if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) {
> +		ret = __kmalloc_large_node(size, flags, node);
> +		trace_kmalloc(caller, ret, size,
> +			      PAGE_SIZE << get_order(size), flags, node);
> +		return ret;
> +	}
> +
> +	if (unlikely(!size))
> +		return ZERO_SIZE_PTR;
> +
> +	s = kmalloc_slab(size, flags, caller);
> +
> +	ret = slab_alloc_node(s, NULL, flags, node, caller, size);
> +	ret = kasan_kmalloc(s, ret, size, flags);
> +	trace_kmalloc(caller, ret, size, s->size, flags, node);
> +	return ret;
> +}
> +
> +void *__kmalloc_node(size_t size, gfp_t flags, int node)
> +{
> +	return __do_kmalloc_node(size, flags, node, _RET_IP_);
> +}
> +EXPORT_SYMBOL(__kmalloc_node);
> +
> +void *__kmalloc(size_t size, gfp_t flags)
> +{
> +	return __do_kmalloc_node(size, flags, NUMA_NO_NODE, _RET_IP_);
> +}
> +EXPORT_SYMBOL(__kmalloc);
> +
> +void *__kmalloc_node_track_caller(size_t size, gfp_t flags,
> +				  int node, unsigned long caller)
> +{
> +	return __do_kmalloc_node(size, flags, node, caller);
> +}
> +EXPORT_SYMBOL(__kmalloc_node_track_caller);
> +
> +void *kmalloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size)
> +{
> +	void *ret = slab_alloc_node(s, NULL, gfpflags, NUMA_NO_NODE,
> +					    _RET_IP_, size);
> +
> +	trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags, NUMA_NO_NODE);
> +
> +	ret = kasan_kmalloc(s, ret, size, gfpflags);
> +	return ret;
> +}
> +EXPORT_SYMBOL(kmalloc_trace);
> +
> +void *kmalloc_node_trace(struct kmem_cache *s, gfp_t gfpflags,
> +			 int node, size_t size)
> +{
> +	void *ret = slab_alloc_node(s, NULL, gfpflags, node, _RET_IP_, size);
> +
> +	trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags, node);
> +
> +	ret = kasan_kmalloc(s, ret, size, gfpflags);
> +	return ret;
> +}
> +EXPORT_SYMBOL(kmalloc_node_trace);
> +
>  static noinline void free_to_partial_list(
>  	struct kmem_cache *s, struct slab *slab,
>  	void *head, void *tail, int bulk_cnt,
> 
> -- 

Looks good to me,
Reviewed-by: Hyeonggon Yoo <42.hyeyoo@gmail.com>

> 2.42.1
> 
> 


  reply	other threads:[~2023-12-07  1:31 UTC|newest]

Thread overview: 56+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-11-20 18:34 [PATCH v2 00/21] remove the SLAB allocator Vlastimil Babka
2023-11-20 18:34 ` [PATCH v2 01/21] mm/slab, docs: switch mm-api docs generation from slab.c to slub.c Vlastimil Babka
2023-11-24  0:46   ` David Rientjes
2023-12-05  3:53   ` Hyeonggon Yoo
2023-11-20 18:34 ` [PATCH v2 02/21] mm/slab: remove CONFIG_SLAB from all Kconfig and Makefile Vlastimil Babka
2023-12-05  4:15   ` Hyeonggon Yoo
2023-12-05 10:14     ` Vlastimil Babka
2023-12-06  0:08       ` Hyeonggon Yoo
2023-11-20 18:34 ` [PATCH v2 03/21] KASAN: remove code paths guarded by CONFIG_SLAB Vlastimil Babka
2023-11-21  8:23   ` Hyeonggon Yoo
2023-11-21 16:47   ` Andrey Konovalov
2023-12-05  4:26   ` Hyeonggon Yoo
2023-12-05  4:48     ` Hyeonggon Yoo
2023-12-05 10:16       ` Vlastimil Babka
2023-11-20 18:34 ` [PATCH v2 04/21] KFENCE: cleanup kfence_guarded_alloc() after CONFIG_SLAB removal Vlastimil Babka
2023-12-06  8:01   ` Hyeonggon Yoo
2023-11-20 18:34 ` [PATCH v2 05/21] mm/memcontrol: remove CONFIG_SLAB #ifdef guards Vlastimil Babka
2023-12-06  8:12   ` Hyeonggon Yoo
2023-11-20 18:34 ` [PATCH v2 06/21] cpu/hotplug: remove CPUHP_SLAB_PREPARE hooks Vlastimil Babka
2023-12-01 11:28   ` Thomas Gleixner
2023-12-06  8:28   ` Hyeonggon Yoo
2023-11-20 18:34 ` [PATCH v2 07/21] mm/slab: remove CONFIG_SLAB code from slab common code Vlastimil Babka
2023-12-06  9:05   ` Hyeonggon Yoo
2023-11-20 18:34 ` [PATCH v2 08/21] mm/mempool/dmapool: remove CONFIG_DEBUG_SLAB ifdefs Vlastimil Babka
2023-12-06  9:10   ` Hyeonggon Yoo
2023-11-20 18:34 ` [PATCH v2 09/21] mm/slab: remove mm/slab.c and slab_def.h Vlastimil Babka
2023-11-22 20:07   ` Christoph Lameter
2023-12-06  9:31   ` Hyeonggon Yoo
2023-12-06  9:37     ` Vlastimil Babka
2023-11-20 18:34 ` [PATCH v2 10/21] mm/slab: move struct kmem_cache_cpu declaration to slub.c Vlastimil Babka
2023-12-06  9:35   ` Hyeonggon Yoo
2023-11-20 18:34 ` [PATCH v2 11/21] mm/slab: move the rest of slub_def.h to mm/slab.h Vlastimil Babka
2023-12-06  9:45   ` Hyeonggon Yoo
2023-11-20 18:34 ` [PATCH v2 12/21] mm/slab: consolidate includes in the internal mm/slab.h Vlastimil Babka
2023-12-07  0:30   ` Hyeonggon Yoo
2023-11-20 18:34 ` [PATCH v2 13/21] mm/slab: move pre/post-alloc hooks from slab.h to slub.c Vlastimil Babka
2023-12-07  0:43   ` Hyeonggon Yoo
2023-11-20 18:34 ` [PATCH v2 14/21] mm/slab: move memcg related functions " Vlastimil Babka
2023-12-07  0:59   ` Hyeonggon Yoo
2023-11-20 18:34 ` [PATCH v2 15/21] mm/slab: move struct kmem_cache_node " Vlastimil Babka
2023-12-07  1:11   ` Hyeonggon Yoo
2023-11-20 18:34 ` [PATCH v2 16/21] mm/slab: move kfree() from slab_common.c " Vlastimil Babka
2023-12-05  4:38   ` Hyeonggon Yoo
2023-11-20 18:34 ` [PATCH v2 17/21] mm/slab: move kmalloc_slab() to mm/slab.h Vlastimil Babka
2023-12-07  1:28   ` Hyeonggon Yoo
2023-11-20 18:34 ` [PATCH v2 18/21] mm/slab: move kmalloc() functions from slab_common.c to slub.c Vlastimil Babka
2023-12-07  1:30   ` Hyeonggon Yoo [this message]
2023-11-20 18:34 ` [PATCH v2 19/21] mm/slub: remove slab_alloc() and __kmem_cache_alloc_lru() wrappers Vlastimil Babka
2023-12-07  1:35   ` Hyeonggon Yoo
2023-11-20 18:34 ` [PATCH v2 20/21] mm/slub: optimize alloc fastpath code layout Vlastimil Babka
2023-12-07  2:32   ` Hyeonggon Yoo
2023-11-20 18:34 ` [PATCH v2 21/21] mm/slub: optimize free fast path " Vlastimil Babka
2023-12-07  2:40   ` Hyeonggon Yoo
2023-11-24  0:45 ` [PATCH v2 00/21] remove the SLAB allocator David Rientjes
2023-11-24  9:26   ` Vlastimil Babka
2023-12-07  2:45 ` Hyeonggon Yoo

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=ZXEgUVrUuIHlgsec@localhost.localdomain \
    --to=42.hyeyoo@gmail.com \
    --cc=akpm@linux-foundation.org \
    --cc=andreyknvl@gmail.com \
    --cc=cgroups@vger.kernel.org \
    --cc=cl@linux.com \
    --cc=dvyukov@google.com \
    --cc=elver@google.com \
    --cc=glider@google.com \
    --cc=hannes@cmpxchg.org \
    --cc=iamjoonsoo.kim@lge.com \
    --cc=kasan-dev@googlegroups.com \
    --cc=keescook@chromium.org \
    --cc=linux-hardening@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=mhocko@kernel.org \
    --cc=muchun.song@linux.dev \
    --cc=penberg@kernel.org \
    --cc=rientjes@google.com \
    --cc=roman.gushchin@linux.dev \
    --cc=ryabinin.a.a@gmail.com \
    --cc=shakeelb@google.com \
    --cc=vbabka@suse.cz \
    --cc=vincenzo.frascino@arm.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).