From: "Harry Yoo (Oracle)" <harry@kernel.org>
To: Vlastimil Babka <vbabka@kernel.org>,
Andrew Morton <akpm@linux-foundation.org>,
Hao Li <hao.li@linux.dev>, Christoph Lameter <cl@gentwo.org>,
David Rientjes <rientjes@google.com>,
Roman Gushchin <roman.gushchin@linux.dev>
Cc: linux-mm@kvack.org, linux-kernel@vger.kernel.org,
Suren Baghdasaryan <surenb@google.com>,
"Liam R. Howlett" <liam@infradead.org>
Subject: [PATCH RFC 7/8] mm/slab: add pcs->lock lockdep assert when accessing the barn
Date: Sat, 16 May 2026 01:24:31 +0900 [thread overview]
Message-ID: <20260516-sheaves-tuning-v1-7-221aa3e1d829@kernel.org> (raw)
In-Reply-To: <20260516-sheaves-tuning-v1-0-221aa3e1d829@kernel.org>
If the cache's capacity changes while a CPU is getting/putting
a sheaf from/to the barn, the writer performing the capacity change
is responsible for flushing and freeing those stale sheaves.
However, that can be done only if CPUs hold pcs->lock when accessing
the barn.
Add lockdep_assert_held() on the pcs lock whenever moving a sheaf
to/from the barn. Since struct slab_sheaf no longer has the
cache pointer, add a new parameter for the cache pointer.
When lockdep is disabled, the assert is a no-op and the compiler can
optimize away the unused parameter (since these helpers are static).
Signed-off-by: Harry Yoo (Oracle) <harry@kernel.org>
---
mm/slub.c | 70 +++++++++++++++++++++++++++++++++++++++------------------------
1 file changed, 43 insertions(+), 27 deletions(-)
diff --git a/mm/slub.c b/mm/slub.c
index 7def24fdfae6..856639d3d3f0 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -3142,12 +3142,15 @@ static void pcs_destroy(struct kmem_cache *s)
s->cpu_sheaves = NULL;
}
-static struct slab_sheaf *barn_get_empty_sheaf(struct node_barn *barn,
+static struct slab_sheaf *barn_get_empty_sheaf(struct kmem_cache *s,
+ struct node_barn *barn,
bool allow_spin)
{
struct slab_sheaf *empty = NULL;
unsigned long flags;
+ lockdep_assert_held(this_cpu_ptr(&s->cpu_sheaves->lock));
+
if (!data_race(barn->nr_empty))
return NULL;
@@ -3174,10 +3177,13 @@ static struct slab_sheaf *barn_get_empty_sheaf(struct node_barn *barn,
* empty or full sheaf limits for simplicity.
*/
-static void barn_put_empty_sheaf(struct node_barn *barn, struct slab_sheaf *sheaf)
+static void barn_put_empty_sheaf(struct kmem_cache *s, struct node_barn *barn,
+ struct slab_sheaf *sheaf)
{
unsigned long flags;
+ lockdep_assert_held(this_cpu_ptr(&s->cpu_sheaves->lock));
+
spin_lock_irqsave(&barn->lock, flags);
list_add(&sheaf->barn_list, &barn->sheaves_empty);
@@ -3186,10 +3192,13 @@ static void barn_put_empty_sheaf(struct node_barn *barn, struct slab_sheaf *shea
spin_unlock_irqrestore(&barn->lock, flags);
}
-static void barn_put_full_sheaf(struct node_barn *barn, struct slab_sheaf *sheaf)
+static void barn_put_full_sheaf(struct kmem_cache *s, struct node_barn *barn,
+ struct slab_sheaf *sheaf)
{
unsigned long flags;
+ lockdep_assert_held(this_cpu_ptr(&s->cpu_sheaves->lock));
+
spin_lock_irqsave(&barn->lock, flags);
list_add(&sheaf->barn_list, &barn->sheaves_full);
@@ -3198,11 +3207,14 @@ static void barn_put_full_sheaf(struct node_barn *barn, struct slab_sheaf *sheaf
spin_unlock_irqrestore(&barn->lock, flags);
}
-static struct slab_sheaf *barn_get_full_or_empty_sheaf(struct node_barn *barn)
+static struct slab_sheaf *barn_get_full_or_empty_sheaf(struct kmem_cache *s,
+ struct node_barn *barn)
{
struct slab_sheaf *sheaf = NULL;
unsigned long flags;
+ lockdep_assert_held(this_cpu_ptr(&s->cpu_sheaves->lock));
+
if (!data_race(barn->nr_full) && !data_race(barn->nr_empty))
return NULL;
@@ -3231,12 +3243,14 @@ static struct slab_sheaf *barn_get_full_or_empty_sheaf(struct node_barn *barn)
* change.
*/
static struct slab_sheaf *
-barn_replace_empty_sheaf(struct node_barn *barn, struct slab_sheaf *empty,
- bool allow_spin)
+barn_replace_empty_sheaf(struct kmem_cache *s, struct node_barn *barn,
+ struct slab_sheaf *empty, bool allow_spin)
{
struct slab_sheaf *full = NULL;
unsigned long flags;
+ lockdep_assert_held(this_cpu_ptr(&s->cpu_sheaves->lock));
+
if (!data_race(barn->nr_full))
return NULL;
@@ -3264,12 +3278,14 @@ barn_replace_empty_sheaf(struct node_barn *barn, struct slab_sheaf *empty,
* barn. But if there are too many full sheaves, reject this with -E2BIG.
*/
static struct slab_sheaf *
-barn_replace_full_sheaf(struct node_barn *barn, struct slab_sheaf *full,
- bool allow_spin)
+barn_replace_full_sheaf(struct kmem_cache *s, struct node_barn *barn,
+ struct slab_sheaf *full, bool allow_spin)
{
struct slab_sheaf *empty;
unsigned long flags;
+ lockdep_assert_held(this_cpu_ptr(&s->cpu_sheaves->lock));
+
/* we don't repeat this check under barn->lock as it's not critical */
if (data_race(barn->nr_full) >= MAX_FULL_SHEAVES)
return ERR_PTR(-E2BIG);
@@ -4732,7 +4748,7 @@ __pcs_replace_empty_main(struct kmem_cache *s, struct slub_percpu_sheaves *pcs,
allow_spin = gfpflags_allow_spinning(gfp);
- full = barn_replace_empty_sheaf(barn, pcs->main, allow_spin);
+ full = barn_replace_empty_sheaf(s, barn, pcs->main, allow_spin);
if (full) {
stat(s, BARN_GET);
@@ -4747,7 +4763,7 @@ __pcs_replace_empty_main(struct kmem_cache *s, struct slub_percpu_sheaves *pcs,
empty = pcs->spare;
pcs->spare = NULL;
} else {
- empty = barn_get_empty_sheaf(barn, true);
+ empty = barn_get_empty_sheaf(s, barn, true);
}
}
@@ -4803,7 +4819,7 @@ __pcs_replace_empty_main(struct kmem_cache *s, struct slub_percpu_sheaves *pcs,
if (!pcs->spare)
pcs->spare = pcs->main;
else
- barn_put_empty_sheaf(barn, pcs->main);
+ barn_put_empty_sheaf(s, barn, pcs->main);
pcs->main = full;
return pcs;
}
@@ -4814,12 +4830,12 @@ __pcs_replace_empty_main(struct kmem_cache *s, struct slub_percpu_sheaves *pcs,
}
if (pcs->spare->size == 0) {
- barn_put_empty_sheaf(barn, pcs->spare);
+ barn_put_empty_sheaf(s, barn, pcs->spare);
pcs->spare = full;
return pcs;
}
- barn_put_full_sheaf(barn, full);
+ barn_put_full_sheaf(s, barn, full);
stat(s, BARN_PUT);
return pcs;
@@ -4936,7 +4952,7 @@ unsigned int alloc_from_pcs_bulk(struct kmem_cache *s, gfp_t gfp, size_t size,
return allocated;
}
- full = barn_replace_empty_sheaf(barn, pcs->main,
+ full = barn_replace_empty_sheaf(s, barn, pcs->main,
gfpflags_allow_spinning(gfp));
if (full) {
@@ -5139,7 +5155,7 @@ kmem_cache_prefill_sheaf(struct kmem_cache *s, gfp_t gfp, unsigned short size)
stat(s, SHEAF_PREFILL_SLOW);
if (barn)
- sheaf = barn_get_full_or_empty_sheaf(barn);
+ sheaf = barn_get_full_or_empty_sheaf(s, barn);
if (sheaf && sheaf->size)
stat(s, BARN_GET);
else
@@ -5253,7 +5269,7 @@ void kmem_cache_return_sheaf(struct kmem_cache *s, gfp_t gfp,
goto free_sheaf;
}
- barn_put_full_sheaf(barn, sheaf);
+ barn_put_full_sheaf(s, barn, sheaf);
local_unlock(&s->cpu_sheaves->lock);
stat(s, BARN_PUT);
return;
@@ -5820,14 +5836,14 @@ static void __pcs_install_empty_sheaf(struct kmem_cache *s,
* freed to it. Get rid of our empty sheaf.
*/
if (pcs->main->size < pcs->main->capacity) {
- barn_put_empty_sheaf(barn, empty);
+ barn_put_empty_sheaf(s, barn, empty);
return;
}
/* Also unlikely for the same reason */
if (pcs->spare->size < pcs->spare->capacity) {
swap(pcs->main, pcs->spare);
- barn_put_empty_sheaf(barn, empty);
+ barn_put_empty_sheaf(s, barn, empty);
return;
}
@@ -5835,7 +5851,7 @@ static void __pcs_install_empty_sheaf(struct kmem_cache *s,
* We probably failed barn_replace_full_sheaf() due to no empty sheaf
* available there, but we allocated one, so finish the job.
*/
- barn_put_full_sheaf(barn, pcs->main);
+ barn_put_full_sheaf(s, barn, pcs->main);
stat(s, BARN_PUT);
pcs->main = empty;
}
@@ -5874,7 +5890,7 @@ __pcs_replace_full_main(struct kmem_cache *s, struct slub_percpu_sheaves *pcs,
put_fail = false;
if (!pcs->spare) {
- empty = barn_get_empty_sheaf(barn, allow_spin);
+ empty = barn_get_empty_sheaf(s, barn, allow_spin);
if (empty) {
pcs->spare = pcs->main;
pcs->main = empty;
@@ -5888,7 +5904,7 @@ __pcs_replace_full_main(struct kmem_cache *s, struct slub_percpu_sheaves *pcs,
return pcs;
}
- empty = barn_replace_full_sheaf(barn, pcs->main, allow_spin);
+ empty = barn_replace_full_sheaf(s, barn, pcs->main, allow_spin);
if (!IS_ERR(empty)) {
stat(s, BARN_PUT);
@@ -6058,7 +6074,7 @@ static void rcu_free_sheaf(struct rcu_head *head)
if (data_race(barn->nr_full) < MAX_FULL_SHEAVES) {
stat(s, BARN_PUT);
- barn_put_full_sheaf(barn, sheaf);
+ barn_put_full_sheaf(s, barn, sheaf);
local_unlock(&s->cpu_sheaves->lock);
return;
}
@@ -6068,7 +6084,7 @@ static void rcu_free_sheaf(struct rcu_head *head)
empty:
if (barn && data_race(barn->nr_empty) < MAX_EMPTY_SHEAVES) {
- barn_put_empty_sheaf(barn, sheaf);
+ barn_put_empty_sheaf(s, barn, sheaf);
local_unlock(&s->cpu_sheaves->lock);
return;
}
@@ -6134,7 +6150,7 @@ bool __kfree_rcu_sheaf(struct kmem_cache *s, void *obj)
goto fail;
}
- empty = barn_get_empty_sheaf(barn, true);
+ empty = barn_get_empty_sheaf(s, barn, true);
if (empty) {
pcs->rcu_free = empty;
@@ -6162,7 +6178,7 @@ bool __kfree_rcu_sheaf(struct kmem_cache *s, void *obj)
}
if (unlikely(pcs->rcu_free))
- barn_put_empty_sheaf(barn, empty);
+ barn_put_empty_sheaf(s, barn, empty);
else
pcs->rcu_free = empty;
}
@@ -6314,7 +6330,7 @@ static void free_to_pcs_bulk(struct kmem_cache *s, size_t size, void **p)
goto no_empty;
if (!pcs->spare) {
- empty = barn_get_empty_sheaf(barn, true);
+ empty = barn_get_empty_sheaf(s, barn, true);
if (!empty)
goto no_empty;
@@ -6328,7 +6344,7 @@ static void free_to_pcs_bulk(struct kmem_cache *s, size_t size, void **p)
goto do_free;
}
- empty = barn_replace_full_sheaf(barn, pcs->main, true);
+ empty = barn_replace_full_sheaf(s, barn, pcs->main, true);
if (IS_ERR(empty)) {
stat(s, BARN_PUT_FAIL);
goto no_empty;
--
2.43.0
next prev parent reply other threads:[~2026-05-15 16:25 UTC|newest]
Thread overview: 9+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-05-15 16:24 [PATCH RFC 0/8] mm/slab: enable runtime sheaves tuning Harry Yoo (Oracle)
2026-05-15 16:24 ` [PATCH RFC 1/8] mm/slab: do not store cache pointer in struct slab_sheaf Harry Yoo (Oracle)
2026-05-15 16:24 ` [PATCH RFC 2/8] mm/slab: change sheaf_capacity type to unsigned short Harry Yoo (Oracle)
2026-05-15 16:24 ` [PATCH RFC 3/8] mm/slab: track capacity per sheaf Harry Yoo (Oracle)
2026-05-15 16:24 ` [PATCH RFC 4/8] mm/slab: allow bootstrap_cache_sheaves() to fail Harry Yoo (Oracle)
2026-05-15 16:24 ` [PATCH RFC 5/8] mm/slab: rework cache_has_sheaves() to check immutable properties only Harry Yoo (Oracle)
2026-05-15 16:24 ` [PATCH RFC 6/8] mm/slab: allow changing sheaf_capacity at runtime Harry Yoo (Oracle)
2026-05-15 16:24 ` Harry Yoo (Oracle) [this message]
2026-05-15 16:24 ` [PATCH RFC 8/8] mm/slab: allow changing max_{full,empty}_sheaves " Harry Yoo (Oracle)
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20260516-sheaves-tuning-v1-7-221aa3e1d829@kernel.org \
--to=harry@kernel.org \
--cc=akpm@linux-foundation.org \
--cc=cl@gentwo.org \
--cc=hao.li@linux.dev \
--cc=liam@infradead.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=rientjes@google.com \
--cc=roman.gushchin@linux.dev \
--cc=surenb@google.com \
--cc=vbabka@kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox