From: "Harry Yoo (Oracle)" <harry@kernel.org>
To: Andrew Morton <akpm@linux-foundation.org>,
Vlastimil Babka <vbabka@kernel.org>
Cc: Christoph Lameter <cl@gentwo.org>,
David Rientjes <rientjes@google.com>,
Roman Gushchin <roman.gushchin@linux.dev>,
Hao Li <hao.li@linux.dev>, Alexei Starovoitov <ast@kernel.org>,
Uladzislau Rezki <urezki@gmail.com>,
"Paul E . McKenney" <paulmck@kernel.org>,
Frederic Weisbecker <frederic@kernel.org>,
Neeraj Upadhyay <neeraj.upadhyay@kernel.org>,
Joel Fernandes <joelagnelf@nvidia.com>,
Josh Triplett <josh@joshtriplett.org>,
Boqun Feng <boqun@kernel.org>, Zqiang <qiang.zhang@linux.dev>,
Steven Rostedt <rostedt@goodmis.org>,
Mathieu Desnoyers <mathieu.desnoyers@efficios.com>,
Lai Jiangshan <jiangshanlai@gmail.com>,
rcu@vger.kernel.org, linux-mm@kvack.org
Subject: [PATCH 5/8] mm/slab: make kfree_rcu_nolock() work with sheaves
Date: Thu, 16 Apr 2026 18:10:19 +0900 [thread overview]
Message-ID: <20260416091022.36823-6-harry@kernel.org> (raw)
In-Reply-To: <20260416091022.36823-1-harry@kernel.org>
Teach kfree_rcu_sheaf() how to handle the !allow_spin case. Similar to
__pcs_replace_full_main(), try to get an empty sheaf from pcs->spare or
the barn, but don't add !allow_spin support for alloc_empty_sheaf() and
fail early instead.
Since call_rcu() does not support NMI contexts, kfree_rcu_sheaf() fails
when the rcu sheaf becomes full.
Signed-off-by: Harry Yoo (Oracle) <harry@kernel.org>
---
mm/slab.h | 2 +-
mm/slab_common.c | 7 +++----
mm/slub.c | 14 ++++++++++++--
3 files changed, 16 insertions(+), 7 deletions(-)
diff --git a/mm/slab.h b/mm/slab.h
index ae2e990e8dc2..d7fd7626e9fe 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -409,7 +409,7 @@ static inline bool is_kmalloc_normal(struct kmem_cache *s)
return !(s->flags & (SLAB_CACHE_DMA|SLAB_ACCOUNT|SLAB_RECLAIM_ACCOUNT));
}
-bool __kfree_rcu_sheaf(struct kmem_cache *s, void *obj);
+bool __kfree_rcu_sheaf(struct kmem_cache *s, void *obj, bool allow_spin);
void flush_all_rcu_sheaves(void);
void flush_rcu_sheaves_on_cache(struct kmem_cache *s);
void defer_kvfree_rcu_barrier(void);
diff --git a/mm/slab_common.c b/mm/slab_common.c
index e840956233dd..46a2bee1662b 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -1716,7 +1716,7 @@ static void kfree_rcu_work(struct work_struct *work)
kvfree_rcu_list(head);
}
-static bool kfree_rcu_sheaf(void *obj)
+static bool kfree_rcu_sheaf(void *obj, bool allow_spin)
{
struct kmem_cache *s;
struct slab *slab;
@@ -1730,7 +1730,7 @@ static bool kfree_rcu_sheaf(void *obj)
s = slab->slab_cache;
if (likely(!IS_ENABLED(CONFIG_NUMA) || slab_nid(slab) == numa_mem_id()))
- return __kfree_rcu_sheaf(s, obj);
+ return __kfree_rcu_sheaf(s, obj, allow_spin);
return false;
}
@@ -2111,8 +2111,7 @@ void kvfree_call_rcu_ptr(struct rcu_ptr *head, void *ptr, bool allow_spin)
IS_ENABLED(CONFIG_DEBUG_KMEMLEAK)))
goto defer_free;
- if (!IS_ENABLED(CONFIG_PREEMPT_RT) &&
- (allow_spin && kfree_rcu_sheaf(ptr)))
+ if (!IS_ENABLED(CONFIG_PREEMPT_RT) && kfree_rcu_sheaf(ptr, allow_spin))
return;
// Queue the object but don't yet schedule the batch.
diff --git a/mm/slub.c b/mm/slub.c
index 6f658ec00751..d0db8d070570 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -5895,7 +5895,7 @@ static void rcu_free_sheaf(struct rcu_head *head)
*/
static DEFINE_WAIT_OVERRIDE_MAP(kfree_rcu_sheaf_map, LD_WAIT_CONFIG);
-bool __kfree_rcu_sheaf(struct kmem_cache *s, void *obj)
+bool __kfree_rcu_sheaf(struct kmem_cache *s, void *obj, bool allow_spin)
{
struct slub_percpu_sheaves *pcs;
struct slab_sheaf *rcu_sheaf;
@@ -5933,7 +5933,7 @@ bool __kfree_rcu_sheaf(struct kmem_cache *s, void *obj)
goto fail;
}
- empty = barn_get_empty_sheaf(barn, true);
+ empty = barn_get_empty_sheaf(barn, allow_spin);
if (empty) {
pcs->rcu_free = empty;
@@ -5942,6 +5942,10 @@ bool __kfree_rcu_sheaf(struct kmem_cache *s, void *obj)
local_unlock(&s->cpu_sheaves->lock);
+ /* It's easier to fall back than trying harder with !allow_spin */
+ if (!allow_spin)
+ goto fail;
+
empty = alloc_empty_sheaf(s, GFP_NOWAIT);
if (!empty)
@@ -5973,6 +5977,12 @@ bool __kfree_rcu_sheaf(struct kmem_cache *s, void *obj)
if (likely(rcu_sheaf->size < s->sheaf_capacity)) {
rcu_sheaf = NULL;
} else {
+ if (unlikely(!allow_spin)) {
+ /* call_rcu() cannot be called in an unknown context */
+ rcu_sheaf->size--;
+ local_unlock(&s->cpu_sheaves->lock);
+ goto fail;
+ }
pcs->rcu_free = NULL;
rcu_sheaf->node = numa_node_id();
}
--
2.43.0
next prev parent reply other threads:[~2026-04-16 9:10 UTC|newest]
Thread overview: 40+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-04-16 9:10 [RFC PATCH v2 0/8] kvfree_rcu() improvements Harry Yoo (Oracle)
2026-04-16 9:10 ` [PATCH 1/8] mm/slab: introduce k[v]free_rcu() with struct rcu_ptr Harry Yoo (Oracle)
2026-04-22 14:41 ` Vlastimil Babka (SUSE)
2026-04-23 1:36 ` Harry Yoo (Oracle)
2026-04-16 9:10 ` [PATCH 2/8] fs/dcache: use rcu_ptr instead of rcu_head for external names Harry Yoo (Oracle)
2026-04-21 20:21 ` Al Viro
2026-04-22 1:16 ` Harry Yoo (Oracle)
2026-04-16 9:10 ` [PATCH 3/8] mm/slab: move kfree_rcu_cpu[_work] definitions Harry Yoo (Oracle)
2026-04-16 9:10 ` [PATCH 4/8] mm/slab: introduce kfree_rcu_nolock() Harry Yoo (Oracle)
2026-04-21 22:46 ` Alexei Starovoitov
2026-04-21 23:10 ` Paul E. McKenney
2026-04-21 23:14 ` Alexei Starovoitov
2026-04-22 3:02 ` Harry Yoo (Oracle)
2026-04-22 14:42 ` Uladzislau Rezki
2026-04-23 1:08 ` Harry Yoo (Oracle)
2026-04-23 1:56 ` Harry Yoo (Oracle)
2026-04-27 18:08 ` Vlastimil Babka (SUSE)
2026-04-27 18:51 ` Paul E. McKenney
2026-04-23 2:14 ` Harry Yoo (Oracle)
2026-04-23 4:23 ` Harry Yoo (Oracle)
2026-04-23 11:35 ` Uladzislau Rezki
2026-04-28 13:12 ` Harry Yoo (Oracle)
2026-04-30 12:10 ` Uladzislau Rezki
2026-04-27 13:08 ` Vlastimil Babka (SUSE)
2026-04-16 9:10 ` Harry Yoo (Oracle) [this message]
2026-04-27 13:32 ` [PATCH 5/8] mm/slab: make kfree_rcu_nolock() work with sheaves Vlastimil Babka (SUSE)
2026-04-27 13:53 ` Vlastimil Babka (SUSE)
2026-04-27 14:45 ` Alexei Starovoitov
2026-04-27 15:08 ` Vlastimil Babka (SUSE)
2026-04-27 15:11 ` Alexei Starovoitov
2026-04-16 9:10 ` [PATCH 6/8] mm/slab: wrap rcu sheaf handling with ifdef Harry Yoo (Oracle)
2026-04-27 15:47 ` Vlastimil Babka (SUSE)
2026-04-16 9:10 ` [PATCH 7/8] mm/slab: introduce deferred submission of rcu sheaves Harry Yoo (Oracle)
2026-04-21 22:51 ` Alexei Starovoitov
2026-04-22 3:11 ` Harry Yoo (Oracle)
2026-04-27 15:55 ` Vlastimil Babka (SUSE)
2026-04-16 9:10 ` [PATCH 8/8] lib/tests/slub_kunit: add a test case for kfree_rcu_nolock() Harry Yoo (Oracle)
2026-04-22 14:30 ` [RFC PATCH v2 0/8] kvfree_rcu() improvements Vlastimil Babka (SUSE)
2026-04-22 22:41 ` Paul E. McKenney
2026-04-23 1:31 ` Harry Yoo (Oracle)
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20260416091022.36823-6-harry@kernel.org \
--to=harry@kernel.org \
--cc=akpm@linux-foundation.org \
--cc=ast@kernel.org \
--cc=boqun@kernel.org \
--cc=cl@gentwo.org \
--cc=frederic@kernel.org \
--cc=hao.li@linux.dev \
--cc=jiangshanlai@gmail.com \
--cc=joelagnelf@nvidia.com \
--cc=josh@joshtriplett.org \
--cc=linux-mm@kvack.org \
--cc=mathieu.desnoyers@efficios.com \
--cc=neeraj.upadhyay@kernel.org \
--cc=paulmck@kernel.org \
--cc=qiang.zhang@linux.dev \
--cc=rcu@vger.kernel.org \
--cc=rientjes@google.com \
--cc=roman.gushchin@linux.dev \
--cc=rostedt@goodmis.org \
--cc=urezki@gmail.com \
--cc=vbabka@kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.