From: Hyeonggon Yoo <42.hyeyoo@gmail.com>
To: Vlastimil Babka <vbabka@suse.cz>
Cc: David Rientjes <rientjes@google.com>,
Christoph Lameter <cl@linux.com>,
Pekka Enberg <penberg@kernel.org>,
Joonsoo Kim <iamjoonsoo.kim@lge.com>,
Andrew Morton <akpm@linux-foundation.org>,
Roman Gushchin <roman.gushchin@linux.dev>,
Andrey Ryabinin <ryabinin.a.a@gmail.com>,
Alexander Potapenko <glider@google.com>,
Andrey Konovalov <andreyknvl@gmail.com>,
Dmitry Vyukov <dvyukov@google.com>,
Vincenzo Frascino <vincenzo.frascino@arm.com>,
Marco Elver <elver@google.com>,
Johannes Weiner <hannes@cmpxchg.org>,
Michal Hocko <mhocko@kernel.org>,
Shakeel Butt <shakeelb@google.com>,
Muchun Song <muchun.song@linux.dev>,
Kees Cook <keescook@chromium.org>,
linux-mm@kvack.org, linux-kernel@vger.kernel.org,
kasan-dev@googlegroups.com, cgroups@vger.kernel.org,
linux-hardening@vger.kernel.org
Subject: Re: [PATCH v2 07/21] mm/slab: remove CONFIG_SLAB code from slab common code
Date: Wed, 6 Dec 2023 18:05:38 +0900 [thread overview]
Message-ID: <ZXA5YqZGAfNUQiIC@localhost.localdomain> (raw)
In-Reply-To: <20231120-slab-remove-slab-v2-7-9c9c70177183@suse.cz>
On Mon, Nov 20, 2023 at 07:34:18PM +0100, Vlastimil Babka wrote:
> In slab_common.c and slab.h headers, we can now remove all code behind
> CONFIG_SLAB and CONFIG_DEBUG_SLAB ifdefs, and remove all CONFIG_SLUB
> ifdefs.
>
> Reviewed-by: Kees Cook <keescook@chromium.org>
> Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
> ---
> include/linux/slab.h | 14 ++---------
> mm/slab.h | 69 ++++------------------------------------------------
> mm/slab_common.c | 22 ++---------------
> 3 files changed, 9 insertions(+), 96 deletions(-)
>
> diff --git a/include/linux/slab.h b/include/linux/slab.h
> index 34e43cddc520..b2015d0e01ad 100644
> --- a/include/linux/slab.h
> +++ b/include/linux/slab.h
> @@ -24,7 +24,7 @@
>
> /*
> * Flags to pass to kmem_cache_create().
> - * The ones marked DEBUG are only valid if CONFIG_DEBUG_SLAB is set.
> + * The ones marked DEBUG need CONFIG_SLUB_DEBUG enabled, otherwise are no-op
> */
> /* DEBUG: Perform (expensive) checks on alloc/free */
> #define SLAB_CONSISTENCY_CHECKS ((slab_flags_t __force)0x00000100U)
> @@ -302,25 +302,15 @@ static inline unsigned int arch_slab_minalign(void)
> * Kmalloc array related definitions
> */
>
> -#ifdef CONFIG_SLAB
> /*
> - * SLAB and SLUB directly allocates requests fitting in to an order-1 page
> + * SLUB directly allocates requests fitting in to an order-1 page
> * (PAGE_SIZE*2). Larger requests are passed to the page allocator.
> */
> #define KMALLOC_SHIFT_HIGH (PAGE_SHIFT + 1)
> #define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT)
> #ifndef KMALLOC_SHIFT_LOW
> -#define KMALLOC_SHIFT_LOW 5
> -#endif
> -#endif
> -
> -#ifdef CONFIG_SLUB
> -#define KMALLOC_SHIFT_HIGH (PAGE_SHIFT + 1)
> -#define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT)
> -#ifndef KMALLOC_SHIFT_LOW
> #define KMALLOC_SHIFT_LOW 3
> #endif
> -#endif
>
> /* Maximum allocatable size */
> #define KMALLOC_MAX_SIZE (1UL << KMALLOC_SHIFT_MAX)
> diff --git a/mm/slab.h b/mm/slab.h
> index 3d07fb428393..014c36ea51fa 100644
> --- a/mm/slab.h
> +++ b/mm/slab.h
> @@ -42,21 +42,6 @@ typedef union {
> struct slab {
> unsigned long __page_flags;
>
> -#if defined(CONFIG_SLAB)
> -
> - struct kmem_cache *slab_cache;
> - union {
> - struct {
> - struct list_head slab_list;
> - void *freelist; /* array of free object indexes */
> - void *s_mem; /* first object */
> - };
> - struct rcu_head rcu_head;
> - };
> - unsigned int active;
> -
> -#elif defined(CONFIG_SLUB)
> -
> struct kmem_cache *slab_cache;
> union {
> struct {
> @@ -91,10 +76,6 @@ struct slab {
> };
> unsigned int __unused;
>
> -#else
> -#error "Unexpected slab allocator configured"
> -#endif
> -
> atomic_t __page_refcount;
> #ifdef CONFIG_MEMCG
> unsigned long memcg_data;
> @@ -111,7 +92,7 @@ SLAB_MATCH(memcg_data, memcg_data);
> #endif
> #undef SLAB_MATCH
> static_assert(sizeof(struct slab) <= sizeof(struct page));
> -#if defined(system_has_freelist_aba) && defined(CONFIG_SLUB)
> +#if defined(system_has_freelist_aba)
> static_assert(IS_ALIGNED(offsetof(struct slab, freelist), sizeof(freelist_aba_t)));
> #endif
>
> @@ -228,13 +209,7 @@ static inline size_t slab_size(const struct slab *slab)
> return PAGE_SIZE << slab_order(slab);
> }
>
> -#ifdef CONFIG_SLAB
> -#include <linux/slab_def.h>
> -#endif
> -
> -#ifdef CONFIG_SLUB
> #include <linux/slub_def.h>
> -#endif
>
> #include <linux/memcontrol.h>
> #include <linux/fault-inject.h>
> @@ -320,26 +295,16 @@ static inline bool is_kmalloc_cache(struct kmem_cache *s)
> SLAB_CACHE_DMA32 | SLAB_PANIC | \
> SLAB_TYPESAFE_BY_RCU | SLAB_DEBUG_OBJECTS )
>
> -#if defined(CONFIG_DEBUG_SLAB)
> -#define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
> -#elif defined(CONFIG_SLUB_DEBUG)
> +#ifdef CONFIG_SLUB_DEBUG
> #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
> SLAB_TRACE | SLAB_CONSISTENCY_CHECKS)
> #else
> #define SLAB_DEBUG_FLAGS (0)
> #endif
>
> -#if defined(CONFIG_SLAB)
> -#define SLAB_CACHE_FLAGS (SLAB_MEM_SPREAD | SLAB_NOLEAKTRACE | \
> - SLAB_RECLAIM_ACCOUNT | SLAB_TEMPORARY | \
> - SLAB_ACCOUNT | SLAB_NO_MERGE)
> -#elif defined(CONFIG_SLUB)
> #define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE | SLAB_RECLAIM_ACCOUNT | \
> SLAB_TEMPORARY | SLAB_ACCOUNT | \
> SLAB_NO_USER_FLAGS | SLAB_KMALLOC | SLAB_NO_MERGE)
> -#else
> -#define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE)
> -#endif
>
> /* Common flags available with current configuration */
> #define CACHE_CREATE_MASK (SLAB_CORE_FLAGS | SLAB_DEBUG_FLAGS | SLAB_CACHE_FLAGS)
> @@ -672,18 +637,14 @@ size_t __ksize(const void *objp);
>
> static inline size_t slab_ksize(const struct kmem_cache *s)
> {
> -#ifndef CONFIG_SLUB
> - return s->object_size;
> -
> -#else /* CONFIG_SLUB */
> -# ifdef CONFIG_SLUB_DEBUG
> +#ifdef CONFIG_SLUB_DEBUG
> /*
> * Debugging requires use of the padding between object
> * and whatever may come after it.
> */
> if (s->flags & (SLAB_RED_ZONE | SLAB_POISON))
> return s->object_size;
> -# endif
> +#endif
> if (s->flags & SLAB_KASAN)
> return s->object_size;
> /*
> @@ -697,7 +658,6 @@ static inline size_t slab_ksize(const struct kmem_cache *s)
> * Else we can use all the padding etc for the allocation
> */
> return s->size;
> -#endif
> }
>
> static inline struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s,
> @@ -775,23 +735,6 @@ static inline void slab_post_alloc_hook(struct kmem_cache *s,
> * The slab lists for all objects.
> */
> struct kmem_cache_node {
> -#ifdef CONFIG_SLAB
> - raw_spinlock_t list_lock;
> - struct list_head slabs_partial; /* partial list first, better asm code */
> - struct list_head slabs_full;
> - struct list_head slabs_free;
> - unsigned long total_slabs; /* length of all slab lists */
> - unsigned long free_slabs; /* length of free slab list only */
> - unsigned long free_objects;
> - unsigned int free_limit;
> - unsigned int colour_next; /* Per-node cache coloring */
> - struct array_cache *shared; /* shared per node */
> - struct alien_cache **alien; /* on other nodes */
> - unsigned long next_reap; /* updated without locking */
> - int free_touched; /* updated without locking */
> -#endif
> -
> -#ifdef CONFIG_SLUB
> spinlock_t list_lock;
> unsigned long nr_partial;
> struct list_head partial;
> @@ -800,8 +743,6 @@ struct kmem_cache_node {
> atomic_long_t total_objects;
> struct list_head full;
> #endif
> -#endif
> -
> };
>
> static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node)
> @@ -818,7 +759,7 @@ static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node)
> if ((__n = get_node(__s, __node)))
>
>
> -#if defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG)
> +#ifdef CONFIG_SLUB_DEBUG
> void dump_unreclaimable_slab(void);
> #else
> static inline void dump_unreclaimable_slab(void)
> diff --git a/mm/slab_common.c b/mm/slab_common.c
> index 8d431193c273..63b8411db7ce 100644
> --- a/mm/slab_common.c
> +++ b/mm/slab_common.c
> @@ -71,10 +71,8 @@ static int __init setup_slab_merge(char *str)
> return 1;
> }
>
> -#ifdef CONFIG_SLUB
> __setup_param("slub_nomerge", slub_nomerge, setup_slab_nomerge, 0);
> __setup_param("slub_merge", slub_merge, setup_slab_merge, 0);
> -#endif
>
> __setup("slab_nomerge", setup_slab_nomerge);
> __setup("slab_merge", setup_slab_merge);
> @@ -197,10 +195,6 @@ struct kmem_cache *find_mergeable(unsigned int size, unsigned int align,
> if (s->size - size >= sizeof(void *))
> continue;
>
> - if (IS_ENABLED(CONFIG_SLAB) && align &&
> - (align > s->align || s->align % align))
> - continue;
> -
> return s;
> }
> return NULL;
> @@ -1222,12 +1216,8 @@ void cache_random_seq_destroy(struct kmem_cache *cachep)
> }
> #endif /* CONFIG_SLAB_FREELIST_RANDOM */
>
> -#if defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG)
> -#ifdef CONFIG_SLAB
> -#define SLABINFO_RIGHTS (0600)
> -#else
> +#ifdef CONFIG_SLUB_DEBUG
> #define SLABINFO_RIGHTS (0400)
> -#endif
>
> static void print_slabinfo_header(struct seq_file *m)
> {
> @@ -1235,18 +1225,10 @@ static void print_slabinfo_header(struct seq_file *m)
> * Output format version, so at least we can change it
> * without _too_ many complaints.
> */
> -#ifdef CONFIG_DEBUG_SLAB
> - seq_puts(m, "slabinfo - version: 2.1 (statistics)\n");
> -#else
> seq_puts(m, "slabinfo - version: 2.1\n");
> -#endif
> seq_puts(m, "# name <active_objs> <num_objs> <objsize> <objperslab> <pagesperslab>");
> seq_puts(m, " : tunables <limit> <batchcount> <sharedfactor>");
> seq_puts(m, " : slabdata <active_slabs> <num_slabs> <sharedavail>");
> -#ifdef CONFIG_DEBUG_SLAB
> - seq_puts(m, " : globalstat <listallocs> <maxobjs> <grown> <reaped> <error> <maxfreeable> <nodeallocs> <remotefrees> <alienoverflow>");
> - seq_puts(m, " : cpustat <allochit> <allocmiss> <freehit> <freemiss>");
> -#endif
> seq_putc(m, '\n');
> }
>
> @@ -1370,7 +1352,7 @@ static int __init slab_proc_init(void)
> }
> module_init(slab_proc_init);
>
> -#endif /* CONFIG_SLAB || CONFIG_SLUB_DEBUG */
> +#endif /* CONFIG_SLUB_DEBUG */
>
> static __always_inline __realloc_size(2) void *
> __do_krealloc(const void *p, size_t new_size, gfp_t flags)
>
> --
Looks good to me,
Reviewed-by: Hyeonggon Yoo <42.hyeyoo@gmail.com>
> 2.42.1
>
>
next prev parent reply other threads:[~2023-12-06 9:06 UTC|newest]
Thread overview: 56+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-11-20 18:34 [PATCH v2 00/21] remove the SLAB allocator Vlastimil Babka
2023-11-20 18:34 ` [PATCH v2 01/21] mm/slab, docs: switch mm-api docs generation from slab.c to slub.c Vlastimil Babka
2023-11-24 0:46 ` David Rientjes
2023-12-05 3:53 ` Hyeonggon Yoo
2023-11-20 18:34 ` [PATCH v2 02/21] mm/slab: remove CONFIG_SLAB from all Kconfig and Makefile Vlastimil Babka
2023-12-05 4:15 ` Hyeonggon Yoo
2023-12-05 10:14 ` Vlastimil Babka
2023-12-06 0:08 ` Hyeonggon Yoo
2023-11-20 18:34 ` [PATCH v2 03/21] KASAN: remove code paths guarded by CONFIG_SLAB Vlastimil Babka
2023-11-21 8:23 ` Hyeonggon Yoo
2023-11-21 16:47 ` Andrey Konovalov
2023-12-05 4:26 ` Hyeonggon Yoo
2023-12-05 4:48 ` Hyeonggon Yoo
2023-12-05 10:16 ` Vlastimil Babka
2023-11-20 18:34 ` [PATCH v2 04/21] KFENCE: cleanup kfence_guarded_alloc() after CONFIG_SLAB removal Vlastimil Babka
2023-12-06 8:01 ` Hyeonggon Yoo
2023-11-20 18:34 ` [PATCH v2 05/21] mm/memcontrol: remove CONFIG_SLAB #ifdef guards Vlastimil Babka
2023-12-06 8:12 ` Hyeonggon Yoo
2023-11-20 18:34 ` [PATCH v2 06/21] cpu/hotplug: remove CPUHP_SLAB_PREPARE hooks Vlastimil Babka
2023-12-01 11:28 ` Thomas Gleixner
2023-12-06 8:28 ` Hyeonggon Yoo
2023-11-20 18:34 ` [PATCH v2 07/21] mm/slab: remove CONFIG_SLAB code from slab common code Vlastimil Babka
2023-12-06 9:05 ` Hyeonggon Yoo [this message]
2023-11-20 18:34 ` [PATCH v2 08/21] mm/mempool/dmapool: remove CONFIG_DEBUG_SLAB ifdefs Vlastimil Babka
2023-12-06 9:10 ` Hyeonggon Yoo
2023-11-20 18:34 ` [PATCH v2 09/21] mm/slab: remove mm/slab.c and slab_def.h Vlastimil Babka
2023-11-22 20:07 ` Christoph Lameter
2023-12-06 9:31 ` Hyeonggon Yoo
2023-12-06 9:37 ` Vlastimil Babka
2023-11-20 18:34 ` [PATCH v2 10/21] mm/slab: move struct kmem_cache_cpu declaration to slub.c Vlastimil Babka
2023-12-06 9:35 ` Hyeonggon Yoo
2023-11-20 18:34 ` [PATCH v2 11/21] mm/slab: move the rest of slub_def.h to mm/slab.h Vlastimil Babka
2023-12-06 9:45 ` Hyeonggon Yoo
2023-11-20 18:34 ` [PATCH v2 12/21] mm/slab: consolidate includes in the internal mm/slab.h Vlastimil Babka
2023-12-07 0:30 ` Hyeonggon Yoo
2023-11-20 18:34 ` [PATCH v2 13/21] mm/slab: move pre/post-alloc hooks from slab.h to slub.c Vlastimil Babka
2023-12-07 0:43 ` Hyeonggon Yoo
2023-11-20 18:34 ` [PATCH v2 14/21] mm/slab: move memcg related functions " Vlastimil Babka
2023-12-07 0:59 ` Hyeonggon Yoo
2023-11-20 18:34 ` [PATCH v2 15/21] mm/slab: move struct kmem_cache_node " Vlastimil Babka
2023-12-07 1:11 ` Hyeonggon Yoo
2023-11-20 18:34 ` [PATCH v2 16/21] mm/slab: move kfree() from slab_common.c " Vlastimil Babka
2023-12-05 4:38 ` Hyeonggon Yoo
2023-11-20 18:34 ` [PATCH v2 17/21] mm/slab: move kmalloc_slab() to mm/slab.h Vlastimil Babka
2023-12-07 1:28 ` Hyeonggon Yoo
2023-11-20 18:34 ` [PATCH v2 18/21] mm/slab: move kmalloc() functions from slab_common.c to slub.c Vlastimil Babka
2023-12-07 1:30 ` Hyeonggon Yoo
2023-11-20 18:34 ` [PATCH v2 19/21] mm/slub: remove slab_alloc() and __kmem_cache_alloc_lru() wrappers Vlastimil Babka
2023-12-07 1:35 ` Hyeonggon Yoo
2023-11-20 18:34 ` [PATCH v2 20/21] mm/slub: optimize alloc fastpath code layout Vlastimil Babka
2023-12-07 2:32 ` Hyeonggon Yoo
2023-11-20 18:34 ` [PATCH v2 21/21] mm/slub: optimize free fast path " Vlastimil Babka
2023-12-07 2:40 ` Hyeonggon Yoo
2023-11-24 0:45 ` [PATCH v2 00/21] remove the SLAB allocator David Rientjes
2023-11-24 9:26 ` Vlastimil Babka
2023-12-07 2:45 ` Hyeonggon Yoo
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=ZXA5YqZGAfNUQiIC@localhost.localdomain \
--to=42.hyeyoo@gmail.com \
--cc=akpm@linux-foundation.org \
--cc=andreyknvl@gmail.com \
--cc=cgroups@vger.kernel.org \
--cc=cl@linux.com \
--cc=dvyukov@google.com \
--cc=elver@google.com \
--cc=glider@google.com \
--cc=hannes@cmpxchg.org \
--cc=iamjoonsoo.kim@lge.com \
--cc=kasan-dev@googlegroups.com \
--cc=keescook@chromium.org \
--cc=linux-hardening@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=mhocko@kernel.org \
--cc=muchun.song@linux.dev \
--cc=penberg@kernel.org \
--cc=rientjes@google.com \
--cc=roman.gushchin@linux.dev \
--cc=ryabinin.a.a@gmail.com \
--cc=shakeelb@google.com \
--cc=vbabka@suse.cz \
--cc=vincenzo.frascino@arm.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).