All of lore.kernel.org
 help / color / mirror / Atom feed
From: "Harry Yoo (Oracle)" <harry@kernel.org>
To: Andrew Morton <akpm@linux-foundation.org>,
	Vlastimil Babka <vbabka@kernel.org>
Cc: Christoph Lameter <cl@gentwo.org>,
	David Rientjes <rientjes@google.com>,
	Roman Gushchin <roman.gushchin@linux.dev>,
	Hao Li <hao.li@linux.dev>, Alexei Starovoitov <ast@kernel.org>,
	Uladzislau Rezki <urezki@gmail.com>,
	"Paul E . McKenney" <paulmck@kernel.org>,
	Frederic Weisbecker <frederic@kernel.org>,
	Neeraj Upadhyay <neeraj.upadhyay@kernel.org>,
	Joel Fernandes <joelagnelf@nvidia.com>,
	Josh Triplett <josh@joshtriplett.org>,
	Boqun Feng <boqun@kernel.org>, Zqiang <qiang.zhang@linux.dev>,
	Steven Rostedt <rostedt@goodmis.org>,
	Mathieu Desnoyers <mathieu.desnoyers@efficios.com>,
	Lai Jiangshan <jiangshanlai@gmail.com>,
	rcu@vger.kernel.org, linux-mm@kvack.org
Subject: [PATCH 6/8] mm/slab: wrap rcu sheaf handling with ifdef
Date: Thu, 16 Apr 2026 18:10:20 +0900	[thread overview]
Message-ID: <20260416091022.36823-7-harry@kernel.org> (raw)
In-Reply-To: <20260416091022.36823-1-harry@kernel.org>

Freeing objects via rcu sheaves is only done with
CONFIG_KVFREE_RCU_BATCHED. Wrap the related functions and struct
fields with ifdef to make this dependency explicit.

Also remove a TODO about implementing __kvfree_rcu_barrier_on_cache()
for a specific slab cache, as there doesn't seem to be a simple and
effective way to do so.

Signed-off-by: Harry Yoo (Oracle) <harry@kernel.org>
---
 mm/slab.h        |  3 +++
 mm/slab_common.c |  4 ----
 mm/slub.c        | 27 +++++++++++++++++++++++++--
 3 files changed, 28 insertions(+), 6 deletions(-)

diff --git a/mm/slab.h b/mm/slab.h
index d7fd7626e9fe..bdad5f389490 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -409,9 +409,12 @@ static inline bool is_kmalloc_normal(struct kmem_cache *s)
 	return !(s->flags & (SLAB_CACHE_DMA|SLAB_ACCOUNT|SLAB_RECLAIM_ACCOUNT));
 }
 
+#ifdef CONFIG_KVFREE_RCU_BATCHED
 bool __kfree_rcu_sheaf(struct kmem_cache *s, void *obj, bool allow_spin);
 void flush_all_rcu_sheaves(void);
 void flush_rcu_sheaves_on_cache(struct kmem_cache *s);
+#endif
+
 void defer_kvfree_rcu_barrier(void);
 
 #define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | \
diff --git a/mm/slab_common.c b/mm/slab_common.c
index 46a2bee1662b..347e52f1538c 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -2289,10 +2289,6 @@ void kvfree_rcu_barrier_on_cache(struct kmem_cache *s)
 		rcu_barrier();
 	}
 
-	/*
-	 * TODO: Introduce a version of __kvfree_rcu_barrier() that works
-	 * on a specific slab cache.
-	 */
 	__kvfree_rcu_barrier();
 }
 EXPORT_SYMBOL_GPL(kvfree_rcu_barrier_on_cache);
diff --git a/mm/slub.c b/mm/slub.c
index d0db8d070570..91b8827d65da 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -421,7 +421,9 @@ struct slub_percpu_sheaves {
 	local_trylock_t lock;
 	struct slab_sheaf *main; /* never NULL when unlocked */
 	struct slab_sheaf *spare; /* empty or full, may be NULL */
+#ifdef CONFIG_KVFREE_RCU_BATCHED
 	struct slab_sheaf *rcu_free; /* for batching kfree_rcu() */
+#endif
 };
 
 /*
@@ -2923,6 +2925,7 @@ static void sheaf_flush_unused(struct kmem_cache *s, struct slab_sheaf *sheaf)
 	sheaf->size = 0;
 }
 
+#ifdef CONFIG_KVFREE_RCU_BATCHED
 static bool __rcu_free_sheaf_prepare(struct kmem_cache *s,
 				     struct slab_sheaf *sheaf)
 {
@@ -2965,6 +2968,7 @@ static void rcu_free_sheaf_nobarn(struct rcu_head *head)
 
 	free_empty_sheaf(s, sheaf);
 }
+#endif
 
 /*
  * Caller needs to make sure migration is disabled in order to fully flush
@@ -2978,7 +2982,10 @@ static void rcu_free_sheaf_nobarn(struct rcu_head *head)
 static void pcs_flush_all(struct kmem_cache *s)
 {
 	struct slub_percpu_sheaves *pcs;
-	struct slab_sheaf *spare, *rcu_free;
+	struct slab_sheaf *spare;
+#ifdef CONFIG_KVFREE_RCU_BATCHED
+	struct slab_sheaf *rcu_free;
+#endif
 
 	local_lock(&s->cpu_sheaves->lock);
 	pcs = this_cpu_ptr(s->cpu_sheaves);
@@ -2986,8 +2993,10 @@ static void pcs_flush_all(struct kmem_cache *s)
 	spare = pcs->spare;
 	pcs->spare = NULL;
 
+#ifdef CONFIG_KVFREE_RCU_BATCHED
 	rcu_free = pcs->rcu_free;
 	pcs->rcu_free = NULL;
+#endif
 
 	local_unlock(&s->cpu_sheaves->lock);
 
@@ -2996,8 +3005,10 @@ static void pcs_flush_all(struct kmem_cache *s)
 		free_empty_sheaf(s, spare);
 	}
 
+#ifdef CONFIG_KVFREE_RCU_BATCHED
 	if (rcu_free)
 		call_rcu(&rcu_free->rcu_head, rcu_free_sheaf_nobarn);
+#endif
 
 	sheaf_flush_main(s);
 }
@@ -3016,10 +3027,12 @@ static void __pcs_flush_all_cpu(struct kmem_cache *s, unsigned int cpu)
 		pcs->spare = NULL;
 	}
 
+#ifdef CONFIG_KVFREE_RCU_BATCHED
 	if (pcs->rcu_free) {
 		call_rcu(&pcs->rcu_free->rcu_head, rcu_free_sheaf_nobarn);
 		pcs->rcu_free = NULL;
 	}
+#endif
 }
 
 static void pcs_destroy(struct kmem_cache *s)
@@ -3056,7 +3069,9 @@ static void pcs_destroy(struct kmem_cache *s)
 		 */
 
 		WARN_ON(pcs->spare);
+#ifdef CONFIG_KVFREE_RCU_BATCHED
 		WARN_ON(pcs->rcu_free);
+#endif
 
 		if (!WARN_ON(pcs->main->size)) {
 			free_empty_sheaf(s, pcs->main);
@@ -3937,7 +3952,11 @@ static bool has_pcs_used(int cpu, struct kmem_cache *s)
 
 	pcs = per_cpu_ptr(s->cpu_sheaves, cpu);
 
-	return (pcs->spare || pcs->rcu_free || pcs->main->size);
+#ifdef CONFIG_KVFREE_RCU_BATCHED
+	if (pcs->rcu_free)
+		return true;
+#endif
+	return (pcs->spare || pcs->main->size);
 }
 
 /*
@@ -3995,6 +4014,7 @@ static void flush_all(struct kmem_cache *s)
 	cpus_read_unlock();
 }
 
+#ifdef CONFIG_KVFREE_RCU_BATCHED
 static void flush_rcu_sheaf(struct work_struct *w)
 {
 	struct slub_percpu_sheaves *pcs;
@@ -4071,6 +4091,7 @@ void flush_all_rcu_sheaves(void)
 
 	rcu_barrier();
 }
+#endif /* CONFIG_KVFREE_RCU_BATCHED */
 
 static int slub_cpu_setup(unsigned int cpu)
 {
@@ -5825,6 +5846,7 @@ bool free_to_pcs(struct kmem_cache *s, void *object, bool allow_spin)
 	return true;
 }
 
+#ifdef CONFIG_KVFREE_RCU_BATCHED
 static void rcu_free_sheaf(struct rcu_head *head)
 {
 	struct slab_sheaf *sheaf;
@@ -6005,6 +6027,7 @@ bool __kfree_rcu_sheaf(struct kmem_cache *s, void *obj, bool allow_spin)
 	lock_map_release(&kfree_rcu_sheaf_map);
 	return false;
 }
+#endif /* CONFIG_KVFREE_RCU_BATCHED */
 
 static __always_inline bool can_free_to_pcs(struct slab *slab)
 {
-- 
2.43.0



  parent reply	other threads:[~2026-04-16  9:10 UTC|newest]

Thread overview: 40+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2026-04-16  9:10 [RFC PATCH v2 0/8] kvfree_rcu() improvements Harry Yoo (Oracle)
2026-04-16  9:10 ` [PATCH 1/8] mm/slab: introduce k[v]free_rcu() with struct rcu_ptr Harry Yoo (Oracle)
2026-04-22 14:41   ` Vlastimil Babka (SUSE)
2026-04-23  1:36     ` Harry Yoo (Oracle)
2026-04-16  9:10 ` [PATCH 2/8] fs/dcache: use rcu_ptr instead of rcu_head for external names Harry Yoo (Oracle)
2026-04-21 20:21   ` Al Viro
2026-04-22  1:16     ` Harry Yoo (Oracle)
2026-04-16  9:10 ` [PATCH 3/8] mm/slab: move kfree_rcu_cpu[_work] definitions Harry Yoo (Oracle)
2026-04-16  9:10 ` [PATCH 4/8] mm/slab: introduce kfree_rcu_nolock() Harry Yoo (Oracle)
2026-04-21 22:46   ` Alexei Starovoitov
2026-04-21 23:10     ` Paul E. McKenney
2026-04-21 23:14       ` Alexei Starovoitov
2026-04-22  3:02       ` Harry Yoo (Oracle)
2026-04-22 14:42   ` Uladzislau Rezki
2026-04-23  1:08     ` Harry Yoo (Oracle)
2026-04-23  1:56       ` Harry Yoo (Oracle)
2026-04-27 18:08         ` Vlastimil Babka (SUSE)
2026-04-27 18:51           ` Paul E. McKenney
2026-04-23  2:14       ` Harry Yoo (Oracle)
2026-04-23  4:23     ` Harry Yoo (Oracle)
2026-04-23 11:35       ` Uladzislau Rezki
2026-04-28 13:12         ` Harry Yoo (Oracle)
2026-04-30 12:10           ` Uladzislau Rezki
2026-04-27 13:08   ` Vlastimil Babka (SUSE)
2026-04-16  9:10 ` [PATCH 5/8] mm/slab: make kfree_rcu_nolock() work with sheaves Harry Yoo (Oracle)
2026-04-27 13:32   ` Vlastimil Babka (SUSE)
2026-04-27 13:53     ` Vlastimil Babka (SUSE)
2026-04-27 14:45       ` Alexei Starovoitov
2026-04-27 15:08         ` Vlastimil Babka (SUSE)
2026-04-27 15:11           ` Alexei Starovoitov
2026-04-16  9:10 ` Harry Yoo (Oracle) [this message]
2026-04-27 15:47   ` [PATCH 6/8] mm/slab: wrap rcu sheaf handling with ifdef Vlastimil Babka (SUSE)
2026-04-16  9:10 ` [PATCH 7/8] mm/slab: introduce deferred submission of rcu sheaves Harry Yoo (Oracle)
2026-04-21 22:51   ` Alexei Starovoitov
2026-04-22  3:11     ` Harry Yoo (Oracle)
2026-04-27 15:55   ` Vlastimil Babka (SUSE)
2026-04-16  9:10 ` [PATCH 8/8] lib/tests/slub_kunit: add a test case for kfree_rcu_nolock() Harry Yoo (Oracle)
2026-04-22 14:30 ` [RFC PATCH v2 0/8] kvfree_rcu() improvements Vlastimil Babka (SUSE)
2026-04-22 22:41   ` Paul E. McKenney
2026-04-23  1:31   ` Harry Yoo (Oracle)

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20260416091022.36823-7-harry@kernel.org \
    --to=harry@kernel.org \
    --cc=akpm@linux-foundation.org \
    --cc=ast@kernel.org \
    --cc=boqun@kernel.org \
    --cc=cl@gentwo.org \
    --cc=frederic@kernel.org \
    --cc=hao.li@linux.dev \
    --cc=jiangshanlai@gmail.com \
    --cc=joelagnelf@nvidia.com \
    --cc=josh@joshtriplett.org \
    --cc=linux-mm@kvack.org \
    --cc=mathieu.desnoyers@efficios.com \
    --cc=neeraj.upadhyay@kernel.org \
    --cc=paulmck@kernel.org \
    --cc=qiang.zhang@linux.dev \
    --cc=rcu@vger.kernel.org \
    --cc=rientjes@google.com \
    --cc=roman.gushchin@linux.dev \
    --cc=rostedt@goodmis.org \
    --cc=urezki@gmail.com \
    --cc=vbabka@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.