From: Hyeonggon Yoo <42.hyeyoo@gmail.com>
To: Vlastimil Babka <vbabka@suse.cz>
Cc: Christoph Lameter <cl@linux.com>,
David Rientjes <rientjes@google.com>,
Joonsoo Kim <iamjoonsoo.kim@lge.com>,
Pekka Enberg <penberg@kernel.org>,
Roman Gushchin <roman.gushchin@linux.dev>,
Andrew Morton <akpm@linux-foundation.org>,
Linus Torvalds <torvalds@linux-foundation.org>,
Matthew Wilcox <willy@infradead.org>,
patches@lists.linux.dev, linux-mm@kvack.org,
linux-kernel@vger.kernel.org
Subject: Re: [PATCH 08/12] mm, slub: refactor free debug processing
Date: Sun, 27 Nov 2022 19:18:21 +0900 [thread overview]
Message-ID: <Y4M5bWgjSWKZEXnO@hyeyoo> (raw)
In-Reply-To: <20221121171202.22080-9-vbabka@suse.cz>
On Mon, Nov 21, 2022 at 06:11:58PM +0100, Vlastimil Babka wrote:
> Since commit c7323a5ad078 ("mm/slub: restrict sysfs validation to debug
> caches and make it safe"), caches with debugging enabled use the
> free_debug_processing() function to do both freeing checks and actual
> freeing to partial list under list_lock, bypassing the fast paths.
>
> We will want to use the same path for CONFIG_SLUB_TINY, but without the
> debugging checks, so refactor the code so that free_debug_processing()
> does only the checks, while the freeing is handled by a new function
> free_to_partial_list().
>
> For consistency, change return parameter alloc_debug_processing() from
> int to bool and correct the !SLUB_DEBUG variant to return true and not
> false. This didn't matter until now, but will in the following changes.
>
> Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
> ---
> mm/slub.c | 154 +++++++++++++++++++++++++++++-------------------------
> 1 file changed, 83 insertions(+), 71 deletions(-)
>
> diff --git a/mm/slub.c b/mm/slub.c
> index bf726dd00f7d..fd56d7cca9c2 100644
> --- a/mm/slub.c
> +++ b/mm/slub.c
> @@ -1368,7 +1368,7 @@ static inline int alloc_consistency_checks(struct kmem_cache *s,
> return 1;
> }
>
> -static noinline int alloc_debug_processing(struct kmem_cache *s,
> +static noinline bool alloc_debug_processing(struct kmem_cache *s,
> struct slab *slab, void *object, int orig_size)
> {
> if (s->flags & SLAB_CONSISTENCY_CHECKS) {
> @@ -1380,7 +1380,7 @@ static noinline int alloc_debug_processing(struct kmem_cache *s,
> trace(s, slab, object, 1);
> set_orig_size(s, object, orig_size);
> init_object(s, object, SLUB_RED_ACTIVE);
> - return 1;
> + return true;
>
> bad:
> if (folio_test_slab(slab_folio(slab))) {
> @@ -1393,7 +1393,7 @@ static noinline int alloc_debug_processing(struct kmem_cache *s,
> slab->inuse = slab->objects;
> slab->freelist = NULL;
> }
> - return 0;
> + return false;
> }
>
> static inline int free_consistency_checks(struct kmem_cache *s,
> @@ -1646,17 +1646,17 @@ static inline void setup_object_debug(struct kmem_cache *s, void *object) {}
> static inline
> void setup_slab_debug(struct kmem_cache *s, struct slab *slab, void *addr) {}
>
> -static inline int alloc_debug_processing(struct kmem_cache *s,
> - struct slab *slab, void *object, int orig_size) { return 0; }
> +static inline bool alloc_debug_processing(struct kmem_cache *s,
> + struct slab *slab, void *object, int orig_size) { return true; }
>
> -static inline void free_debug_processing(
> - struct kmem_cache *s, struct slab *slab,
> - void *head, void *tail, int bulk_cnt,
> - unsigned long addr) {}
> +static inline bool free_debug_processing(struct kmem_cache *s,
> + struct slab *slab, void *head, void *tail, int *bulk_cnt,
> + unsigned long addr, depot_stack_handle_t handle) { return true; }
>
> static inline void slab_pad_check(struct kmem_cache *s, struct slab *slab) {}
> static inline int check_object(struct kmem_cache *s, struct slab *slab,
> void *object, u8 val) { return 1; }
> +static inline depot_stack_handle_t set_track_prepare(void) { return 0; }
> static inline void set_track(struct kmem_cache *s, void *object,
> enum track_item alloc, unsigned long addr) {}
> static inline void add_full(struct kmem_cache *s, struct kmem_cache_node *n,
> @@ -2833,38 +2833,28 @@ static inline unsigned long node_nr_objs(struct kmem_cache_node *n)
> }
>
> /* Supports checking bulk free of a constructed freelist */
> -static noinline void free_debug_processing(
> - struct kmem_cache *s, struct slab *slab,
> - void *head, void *tail, int bulk_cnt,
> - unsigned long addr)
> +static inline bool free_debug_processing(struct kmem_cache *s,
> + struct slab *slab, void *head, void *tail, int *bulk_cnt,
> + unsigned long addr, depot_stack_handle_t handle)
> {
> - struct kmem_cache_node *n = get_node(s, slab_nid(slab));
> - struct slab *slab_free = NULL;
> + bool checks_ok = false;
> void *object = head;
> int cnt = 0;
> - unsigned long flags;
> - bool checks_ok = false;
> - depot_stack_handle_t handle = 0;
> -
> - if (s->flags & SLAB_STORE_USER)
> - handle = set_track_prepare();
> -
> - spin_lock_irqsave(&n->list_lock, flags);
>
> if (s->flags & SLAB_CONSISTENCY_CHECKS) {
> if (!check_slab(s, slab))
> goto out;
> }
>
> - if (slab->inuse < bulk_cnt) {
> + if (slab->inuse < *bulk_cnt) {
> slab_err(s, slab, "Slab has %d allocated objects but %d are to be freed\n",
> - slab->inuse, bulk_cnt);
> + slab->inuse, *bulk_cnt);
> goto out;
> }
>
> next_object:
>
> - if (++cnt > bulk_cnt)
> + if (++cnt > *bulk_cnt)
> goto out_cnt;
>
> if (s->flags & SLAB_CONSISTENCY_CHECKS) {
> @@ -2886,57 +2876,18 @@ static noinline void free_debug_processing(
> checks_ok = true;
>
> out_cnt:
> - if (cnt != bulk_cnt)
> + if (cnt != *bulk_cnt) {
> slab_err(s, slab, "Bulk free expected %d objects but found %d\n",
> - bulk_cnt, cnt);
> -
> -out:
> - if (checks_ok) {
> - void *prior = slab->freelist;
> -
> - /* Perform the actual freeing while we still hold the locks */
> - slab->inuse -= cnt;
> - set_freepointer(s, tail, prior);
> - slab->freelist = head;
> -
> - /*
> - * If the slab is empty, and node's partial list is full,
> - * it should be discarded anyway no matter it's on full or
> - * partial list.
> - */
> - if (slab->inuse == 0 && n->nr_partial >= s->min_partial)
> - slab_free = slab;
> -
> - if (!prior) {
> - /* was on full list */
> - remove_full(s, n, slab);
> - if (!slab_free) {
> - add_partial(n, slab, DEACTIVATE_TO_TAIL);
> - stat(s, FREE_ADD_PARTIAL);
> - }
> - } else if (slab_free) {
> - remove_partial(n, slab);
> - stat(s, FREE_REMOVE_PARTIAL);
> - }
> + *bulk_cnt, cnt);
> + *bulk_cnt = cnt;
> }
>
> - if (slab_free) {
> - /*
> - * Update the counters while still holding n->list_lock to
> - * prevent spurious validation warnings
> - */
> - dec_slabs_node(s, slab_nid(slab_free), slab_free->objects);
> - }
> -
> - spin_unlock_irqrestore(&n->list_lock, flags);
> +out:
>
> if (!checks_ok)
> slab_fix(s, "Object at 0x%p not freed", object);
>
> - if (slab_free) {
> - stat(s, FREE_SLAB);
> - free_slab(s, slab_free);
> - }
> + return checks_ok;
> }
> #endif /* CONFIG_SLUB_DEBUG */
>
> @@ -3453,6 +3404,67 @@ void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node)
> }
> EXPORT_SYMBOL(kmem_cache_alloc_node);
>
> +static noinline void free_to_partial_list(
> + struct kmem_cache *s, struct slab *slab,
> + void *head, void *tail, int bulk_cnt,
> + unsigned long addr)
> +{
> + struct kmem_cache_node *n = get_node(s, slab_nid(slab));
> + struct slab *slab_free = NULL;
> + int cnt = bulk_cnt;
> + unsigned long flags;
> + depot_stack_handle_t handle = 0;
> +
> + if (s->flags & SLAB_STORE_USER)
> + handle = set_track_prepare();
> +
> + spin_lock_irqsave(&n->list_lock, flags);
> +
> + if (free_debug_processing(s, slab, head, tail, &cnt, addr, handle)) {
> + void *prior = slab->freelist;
> +
> + /* Perform the actual freeing while we still hold the locks */
> + slab->inuse -= cnt;
> + set_freepointer(s, tail, prior);
> + slab->freelist = head;
> +
> + /*
> + * If the slab is empty, and node's partial list is full,
> + * it should be discarded anyway no matter it's on full or
> + * partial list.
> + */
> + if (slab->inuse == 0 && n->nr_partial >= s->min_partial)
> + slab_free = slab;
> +
> + if (!prior) {
> + /* was on full list */
> + remove_full(s, n, slab);
> + if (!slab_free) {
> + add_partial(n, slab, DEACTIVATE_TO_TAIL);
> + stat(s, FREE_ADD_PARTIAL);
> + }
> + } else if (slab_free) {
> + remove_partial(n, slab);
> + stat(s, FREE_REMOVE_PARTIAL);
> + }
> + }
> +
> + if (slab_free) {
> + /*
> + * Update the counters while still holding n->list_lock to
> + * prevent spurious validation warnings
> + */
> + dec_slabs_node(s, slab_nid(slab_free), slab_free->objects);
> + }
> +
> + spin_unlock_irqrestore(&n->list_lock, flags);
> +
> + if (slab_free) {
> + stat(s, FREE_SLAB);
> + free_slab(s, slab_free);
> + }
> +}
> +
> /*
> * Slow path handling. This may still be called frequently since objects
> * have a longer lifetime than the cpu slabs in most processing loads.
> @@ -3479,7 +3491,7 @@ static void __slab_free(struct kmem_cache *s, struct slab *slab,
> return;
>
> if (kmem_cache_debug(s)) {
> - free_debug_processing(s, slab, head, tail, cnt, addr);
> + free_to_partial_list(s, slab, head, tail, cnt, addr);
> return;
> }
>
> --
> 2.38.1
>
Looks good to me.
Reviewed-by: Hyeonggon Yoo <42.hyeyoo@gmail.com>
--
Thanks,
Hyeonggon
next prev parent reply other threads:[~2022-11-27 10:18 UTC|newest]
Thread overview: 60+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-11-21 17:11 [PATCH 00/12] Introduce CONFIG_SLUB_TINY and deprecate SLOB Vlastimil Babka
2022-11-21 17:11 ` [PATCH 01/12] mm, slab: ignore hardened usercopy parameters when disabled Vlastimil Babka
2022-11-21 21:35 ` Kees Cook
2022-11-23 14:23 ` Vlastimil Babka
2022-11-24 11:16 ` Hyeonggon Yoo
2022-11-24 11:26 ` Vlastimil Babka
2022-11-24 12:33 ` Hyeonggon Yoo
2022-11-21 17:11 ` [PATCH 02/12] mm, slub: add CONFIG_SLUB_TINY Vlastimil Babka
2022-11-24 1:08 ` Roman Gushchin
2022-11-24 11:33 ` Hyeonggon Yoo
2022-11-25 7:55 ` Vlastimil Babka
2022-11-21 17:11 ` [PATCH 03/12] mm, slub: disable SYSFS support with CONFIG_SLUB_TINY Vlastimil Babka
2022-11-24 1:12 ` Roman Gushchin
2022-11-24 9:00 ` Vlastimil Babka
2022-11-21 17:11 ` [PATCH 04/12] mm, slub: retain no free slabs on partial list " Vlastimil Babka
2022-11-24 1:12 ` Roman Gushchin
2022-11-24 11:38 ` Hyeonggon Yoo
2022-11-21 17:11 ` [PATCH 05/12] mm, slub: lower the default slub_max_order " Vlastimil Babka
2022-11-24 1:16 ` Roman Gushchin
2022-11-24 11:40 ` Hyeonggon Yoo
2022-11-21 17:11 ` [PATCH 06/12] mm, slub: don't create kmalloc-rcl caches " Vlastimil Babka
2022-11-23 13:53 ` Vlastimil Babka
2022-11-24 12:06 ` Hyeonggon Yoo
2022-11-24 12:12 ` Vlastimil Babka
2022-11-24 12:55 ` Hyeonggon Yoo
2022-11-24 13:23 ` Hyeonggon Yoo
2022-11-24 14:25 ` Hyeonggon Yoo
2022-11-21 17:11 ` [PATCH 07/12] mm, slab: ignore SLAB_RECLAIM_ACCOUNT " Vlastimil Babka
2022-11-24 1:20 ` Roman Gushchin
2022-11-24 9:09 ` Vlastimil Babka
2022-11-24 9:21 ` Christoph Lameter
2022-11-27 23:11 ` Vlastimil Babka
2022-11-21 17:11 ` [PATCH 08/12] mm, slub: refactor free debug processing Vlastimil Babka
2022-11-27 10:18 ` Hyeonggon Yoo [this message]
2022-11-21 17:11 ` [PATCH 09/12] mm, slub: split out allocations from pre/post hooks Vlastimil Babka
2022-11-27 10:54 ` Hyeonggon Yoo
2022-11-27 23:01 ` Vlastimil Babka
2022-11-28 13:06 ` Hyeonggon Yoo
2022-11-21 17:12 ` [PATCH 10/12] mm, slub: remove percpu slabs with CONFIG_SLUB_TINY Vlastimil Babka
2022-11-27 11:05 ` Hyeonggon Yoo
2022-12-12 10:54 ` Vlastimil Babka
2022-12-12 13:11 ` Dennis Zhou
2022-12-13 3:04 ` Baoquan He
2022-12-13 14:02 ` Hyeonggon Yoo
2022-12-18 10:16 ` Hyeonggon Yoo
2022-11-21 17:12 ` [PATCH 11/12] mm, slub: don't aggressively inline " Vlastimil Babka
2022-11-28 13:19 ` Hyeonggon Yoo
2022-11-21 17:12 ` [PATCH 12/12] mm, slob: rename CONFIG_SLOB to CONFIG_SLOB_DEPRECATED Vlastimil Babka
2022-11-21 18:41 ` Aaro Koskinen
2022-11-21 19:42 ` Vlastimil Babka
2022-11-22 6:47 ` Damien Le Moal
2022-11-22 16:08 ` Arnd Bergmann
2022-11-24 1:21 ` Roman Gushchin
2022-12-02 17:59 ` Palmer Dabbelt
2022-12-05 12:25 ` Damien Le Moal
2022-12-13 13:41 ` Hyeonggon Yoo
2022-11-22 16:33 ` [PATCH 00/12] Introduce CONFIG_SLUB_TINY and deprecate SLOB Arnd Bergmann
2022-11-22 16:59 ` Vlastimil Babka
2022-11-22 17:15 ` Arnd Bergmann
2022-11-24 20:30 ` Mike Rapoport
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=Y4M5bWgjSWKZEXnO@hyeyoo \
--to=42.hyeyoo@gmail.com \
--cc=akpm@linux-foundation.org \
--cc=cl@linux.com \
--cc=iamjoonsoo.kim@lge.com \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=patches@lists.linux.dev \
--cc=penberg@kernel.org \
--cc=rientjes@google.com \
--cc=roman.gushchin@linux.dev \
--cc=torvalds@linux-foundation.org \
--cc=vbabka@suse.cz \
--cc=willy@infradead.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).