linux-fsdevel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Christian Brauner <brauner@kernel.org>
To: Vlastimil Babka <vbabka@suse.cz>, Jens Axboe <axboe@kernel.dk>,
	 Jann Horn <jannh@google.com>,
	 Linus Torvalds <torvalds@linux-foundation.org>,
	 Mike Rapoport <rppt@kernel.org>
Cc: linux-mm@kvack.org, linux-fsdevel@vger.kernel.org,
	 Christian Brauner <brauner@kernel.org>
Subject: [PATCH v2 07/15] slub: pull kmem_cache_open() into do_kmem_cache_create()
Date: Tue, 03 Sep 2024 16:20:48 +0200	[thread overview]
Message-ID: <20240903-work-kmem_cache_args-v2-7-76f97e9a4560@kernel.org> (raw)
In-Reply-To: <20240903-work-kmem_cache_args-v2-0-76f97e9a4560@kernel.org>

do_kmem_cache_create() is the only caller and we're going to pass down
struct kmem_cache_args in a follow-up patch.

Signed-off-by: Christian Brauner <brauner@kernel.org>
---
 mm/slub.c | 132 +++++++++++++++++++++++++++++---------------------------------
 1 file changed, 62 insertions(+), 70 deletions(-)

diff --git a/mm/slub.c b/mm/slub.c
index 23d9d783ff26..30f4ca6335c7 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -5290,65 +5290,6 @@ static int calculate_sizes(struct kmem_cache *s)
 	return !!oo_objects(s->oo);
 }
 
-static int kmem_cache_open(struct kmem_cache *s, slab_flags_t flags)
-{
-	s->flags = kmem_cache_flags(flags, s->name);
-#ifdef CONFIG_SLAB_FREELIST_HARDENED
-	s->random = get_random_long();
-#endif
-
-	if (!calculate_sizes(s))
-		goto error;
-	if (disable_higher_order_debug) {
-		/*
-		 * Disable debugging flags that store metadata if the min slab
-		 * order increased.
-		 */
-		if (get_order(s->size) > get_order(s->object_size)) {
-			s->flags &= ~DEBUG_METADATA_FLAGS;
-			s->offset = 0;
-			if (!calculate_sizes(s))
-				goto error;
-		}
-	}
-
-#ifdef system_has_freelist_aba
-	if (system_has_freelist_aba() && !(s->flags & SLAB_NO_CMPXCHG)) {
-		/* Enable fast mode */
-		s->flags |= __CMPXCHG_DOUBLE;
-	}
-#endif
-
-	/*
-	 * The larger the object size is, the more slabs we want on the partial
-	 * list to avoid pounding the page allocator excessively.
-	 */
-	s->min_partial = min_t(unsigned long, MAX_PARTIAL, ilog2(s->size) / 2);
-	s->min_partial = max_t(unsigned long, MIN_PARTIAL, s->min_partial);
-
-	set_cpu_partial(s);
-
-#ifdef CONFIG_NUMA
-	s->remote_node_defrag_ratio = 1000;
-#endif
-
-	/* Initialize the pre-computed randomized freelist if slab is up */
-	if (slab_state >= UP) {
-		if (init_cache_random_seq(s))
-			goto error;
-	}
-
-	if (!init_kmem_cache_nodes(s))
-		goto error;
-
-	if (alloc_kmem_cache_cpus(s))
-		return 0;
-
-error:
-	__kmem_cache_release(s);
-	return -EINVAL;
-}
-
 static void list_slab_objects(struct kmem_cache *s, struct slab *slab,
 			      const char *text)
 {
@@ -5904,26 +5845,77 @@ __kmem_cache_alias(const char *name, unsigned int size, unsigned int align,
 
 int do_kmem_cache_create(struct kmem_cache *s, slab_flags_t flags)
 {
-	int err;
+	int err = -EINVAL;
 
-	err = kmem_cache_open(s, flags);
-	if (err)
-		return err;
+	s->flags = kmem_cache_flags(flags, s->name);
+#ifdef CONFIG_SLAB_FREELIST_HARDENED
+	s->random = get_random_long();
+#endif
+
+	if (!calculate_sizes(s))
+		goto out;
+	if (disable_higher_order_debug) {
+		/*
+		 * Disable debugging flags that store metadata if the min slab
+		 * order increased.
+		 */
+		if (get_order(s->size) > get_order(s->object_size)) {
+			s->flags &= ~DEBUG_METADATA_FLAGS;
+			s->offset = 0;
+			if (!calculate_sizes(s))
+				goto out;
+		}
+	}
+
+#ifdef system_has_freelist_aba
+	if (system_has_freelist_aba() && !(s->flags & SLAB_NO_CMPXCHG)) {
+		/* Enable fast mode */
+		s->flags |= __CMPXCHG_DOUBLE;
+	}
+#endif
+
+	/*
+	 * The larger the object size is, the more slabs we want on the partial
+	 * list to avoid pounding the page allocator excessively.
+	 */
+	s->min_partial = min_t(unsigned long, MAX_PARTIAL, ilog2(s->size) / 2);
+	s->min_partial = max_t(unsigned long, MIN_PARTIAL, s->min_partial);
+
+	set_cpu_partial(s);
+
+#ifdef CONFIG_NUMA
+	s->remote_node_defrag_ratio = 1000;
+#endif
+
+	/* Initialize the pre-computed randomized freelist if slab is up */
+	if (slab_state >= UP) {
+		if (init_cache_random_seq(s))
+			goto out;
+	}
+
+	if (!init_kmem_cache_nodes(s))
+		goto out;
+
+	if (!alloc_kmem_cache_cpus(s))
+		goto out;
 
 	/* Mutex is not taken during early boot */
-	if (slab_state <= UP)
-		return 0;
+	if (slab_state <= UP) {
+		err = 0;
+		goto out;
+	}
 
 	err = sysfs_slab_add(s);
-	if (err) {
-		__kmem_cache_release(s);
-		return err;
-	}
+	if (err)
+		goto out;
 
 	if (s->flags & SLAB_STORE_USER)
 		debugfs_slab_add(s);
 
-	return 0;
+out:
+	if (err)
+		__kmem_cache_release(s);
+	return err;
 }
 
 #ifdef SLAB_SUPPORTS_SYSFS

-- 
2.45.2


  parent reply	other threads:[~2024-09-03 14:22 UTC|newest]

Thread overview: 67+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2024-09-03 14:20 [PATCH v2 00/15] slab: add struct kmem_cache_args Christian Brauner
2024-09-03 14:20 ` [PATCH v2 01/15] sl*b: s/__kmem_cache_create/do_kmem_cache_create/g Christian Brauner
2024-09-04  4:52   ` Mike Rapoport
2024-09-03 14:20 ` [PATCH v2 02/15] slab: add struct kmem_cache_args Christian Brauner
2024-09-04  4:54   ` Mike Rapoport
2024-09-04  8:13   ` Vlastimil Babka
2024-09-04  9:06     ` Christian Brauner
2024-09-04 15:16   ` Mike Rapoport
2024-09-04 15:48     ` Christian Brauner
2024-09-04 16:16       ` Mike Rapoport
2024-09-04 16:53         ` Christian Brauner
2024-09-04 15:49     ` Vlastimil Babka
2024-09-04 16:16       ` Mike Rapoport
2024-09-04 16:22         ` Vlastimil Babka
2024-09-04 18:21           ` Christian Brauner
2024-09-04 18:53             ` Linus Torvalds
2024-09-04 20:10               ` Christian Brauner
2024-09-03 14:20 ` [PATCH v2 03/15] slab: port kmem_cache_create() to " Christian Brauner
2024-09-04  4:55   ` Mike Rapoport
2024-09-03 14:20 ` [PATCH v2 04/15] slab: port kmem_cache_create_rcu() " Christian Brauner
2024-09-04  4:55   ` Mike Rapoport
2024-09-03 14:20 ` [PATCH v2 05/15] slab: port kmem_cache_create_usercopy() " Christian Brauner
2024-09-04  4:56   ` Mike Rapoport
2024-09-04  8:14   ` Vlastimil Babka
2024-09-04  8:59     ` Christian Brauner
2024-09-03 14:20 ` [PATCH v2 06/15] slab: pass struct kmem_cache_args to create_cache() Christian Brauner
2024-09-04  4:59   ` Mike Rapoport
2024-09-03 14:20 ` Christian Brauner [this message]
2024-09-04  5:02   ` [PATCH v2 07/15] slub: pull kmem_cache_open() into do_kmem_cache_create() Mike Rapoport
2024-09-03 14:20 ` [PATCH v2 08/15] slab: pass struct kmem_cache_args to do_kmem_cache_create() Christian Brauner
2024-09-04  5:04   ` Mike Rapoport
2024-09-03 14:20 ` [PATCH v2 09/15] sl*b: remove rcu_freeptr_offset from struct kmem_cache Christian Brauner
2024-09-04  5:08   ` Mike Rapoport
2024-09-04  8:16   ` Vlastimil Babka
2024-09-04  8:58     ` Christian Brauner
2024-09-03 14:20 ` [PATCH v2 10/15] slab: port KMEM_CACHE() to struct kmem_cache_args Christian Brauner
2024-09-04  5:08   ` Mike Rapoport
2024-09-03 14:20 ` [PATCH v2 11/15] slab: port KMEM_CACHE_USERCOPY() " Christian Brauner
2024-09-04  5:09   ` Mike Rapoport
2024-09-03 14:20 ` [PATCH v2 12/15] slab: create kmem_cache_create() compatibility layer Christian Brauner
2024-09-04  5:14   ` Mike Rapoport
2024-09-04  9:44     ` [PATCH 17/16] slab: make kmem_cache_create_usercopy() static inline Christian Brauner
2024-09-04  9:44     ` [PATCH 18/16] slab: make __kmem_cache_create() " Christian Brauner
2024-09-04  9:45     ` [PATCH v2 12/15] slab: create kmem_cache_create() compatibility layer Christian Brauner
2024-09-04 10:50       ` Vlastimil Babka
2024-09-04 11:38         ` Christian Brauner
2024-09-04 13:33           ` Vlastimil Babka
2024-09-04 14:44             ` Christian Brauner
2024-09-04 15:11               ` Mike Rapoport
2024-09-04 15:38                 ` Christian Brauner
2024-09-04 15:40                 ` Vlastimil Babka
2024-09-03 14:20 ` [PATCH v2 13/15] file: port to struct kmem_cache_args Christian Brauner
2024-09-04  5:15   ` Mike Rapoport
2024-09-03 14:20 ` [PATCH v2 14/15] slab: remove kmem_cache_create_rcu() Christian Brauner
2024-09-04  5:15   ` Mike Rapoport
2024-09-04  8:18   ` Vlastimil Babka
2024-09-04  8:55     ` Christian Brauner
2024-09-03 14:20 ` [PATCH v2 15/15] io_uring: port to struct kmem_cache_args Christian Brauner
2024-09-04  5:16   ` Mike Rapoport
2024-09-04  8:20   ` Vlastimil Babka
2024-09-04  8:50     ` Christian Brauner
2024-09-03 19:22 ` [PATCH v2 00/15] slab: add " Kees Cook
2024-09-03 19:25 ` Jens Axboe
2024-09-06  6:49   ` Christian Brauner
2024-09-04  8:25 ` Vlastimil Babka
2024-09-04  8:42   ` Christian Brauner
2024-09-04  9:05     ` Vlastimil Babka

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20240903-work-kmem_cache_args-v2-7-76f97e9a4560@kernel.org \
    --to=brauner@kernel.org \
    --cc=axboe@kernel.dk \
    --cc=jannh@google.com \
    --cc=linux-fsdevel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=rppt@kernel.org \
    --cc=torvalds@linux-foundation.org \
    --cc=vbabka@suse.cz \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).