Linux-mm Archive on lore.kernel.org
 help / color / mirror / Atom feed
From: "Harry Yoo (Oracle)" <harry@kernel.org>
To: Vlastimil Babka <vbabka@kernel.org>,
	 Andrew Morton <akpm@linux-foundation.org>,
	Hao Li <hao.li@linux.dev>,  Christoph Lameter <cl@gentwo.org>,
	David Rientjes <rientjes@google.com>,
	 Roman Gushchin <roman.gushchin@linux.dev>
Cc: linux-mm@kvack.org, linux-kernel@vger.kernel.org,
	 Suren Baghdasaryan <surenb@google.com>,
	 "Liam R. Howlett" <liam@infradead.org>
Subject: [PATCH RFC 3/8] mm/slab: track capacity per sheaf
Date: Sat, 16 May 2026 01:24:27 +0900	[thread overview]
Message-ID: <20260516-sheaves-tuning-v1-3-221aa3e1d829@kernel.org> (raw)
In-Reply-To: <20260516-sheaves-tuning-v1-0-221aa3e1d829@kernel.org>

Currently, only prefilled sheaves have a capacity field, used to
record the requested (possibly oversized) capacity. To allow
changing sheaf capacity at runtime, track the capacity for each
sheaf so that checking if a sheaf is full would work even when
changing cache capacity concurrently.

Signed-off-by: Harry Yoo (Oracle) <harry@kernel.org>
---
 mm/slub.c | 31 +++++++++++++++----------------
 1 file changed, 15 insertions(+), 16 deletions(-)

diff --git a/mm/slub.c b/mm/slub.c
index a1974523bba9..44f36ae32570 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -417,11 +417,9 @@ struct slab_sheaf {
 		struct rcu_head rcu_head;
 		struct list_head barn_list;
 		/* only used for prefilled sheafs */
-		struct {
-			unsigned short capacity;
-			bool pfmemalloc;
-		};
+		bool pfmemalloc;
 	};
+	unsigned short capacity;
 	unsigned short size;
 	int node; /* only used for rcu_sheaf */
 	void *objects[];
@@ -2780,6 +2778,8 @@ static struct slab_sheaf *__alloc_empty_sheaf(struct kmem_cache *s, gfp_t gfp,
 	if (unlikely(!sheaf))
 		return NULL;
 
+	sheaf->capacity = capacity;
+
 	stat(s, SHEAF_ALLOC);
 
 	return sheaf;
@@ -2816,7 +2816,7 @@ refill_objects(struct kmem_cache *s, void **p, gfp_t gfp, unsigned int min,
 static int refill_sheaf(struct kmem_cache *s, struct slab_sheaf *sheaf,
 			 gfp_t gfp)
 {
-	int to_fill = s->sheaf_capacity - sheaf->size;
+	int to_fill = sheaf->capacity - sheaf->size;
 	int filled;
 
 	if (!to_fill)
@@ -5063,7 +5063,6 @@ kmem_cache_prefill_sheaf(struct kmem_cache *s, gfp_t gfp, unsigned short size)
 		sheaf = alloc_empty_sheaf(s, gfp);
 
 	if (sheaf) {
-		sheaf->capacity = s->sheaf_capacity;
 		sheaf->pfmemalloc = false;
 
 		if (sheaf->size < size &&
@@ -5688,13 +5687,13 @@ static void __pcs_install_empty_sheaf(struct kmem_cache *s,
 	 * Unlikely because if the main sheaf had space, we would have just
 	 * freed to it. Get rid of our empty sheaf.
 	 */
-	if (pcs->main->size < s->sheaf_capacity) {
+	if (pcs->main->size < pcs->main->capacity) {
 		barn_put_empty_sheaf(barn, empty);
 		return;
 	}
 
 	/* Also unlikely for the same reason */
-	if (pcs->spare->size < s->sheaf_capacity) {
+	if (pcs->spare->size < pcs->spare->capacity) {
 		swap(pcs->main, pcs->spare);
 		barn_put_empty_sheaf(barn, empty);
 		return;
@@ -5752,7 +5751,7 @@ __pcs_replace_full_main(struct kmem_cache *s, struct slub_percpu_sheaves *pcs,
 		goto alloc_empty;
 	}
 
-	if (pcs->spare->size < s->sheaf_capacity) {
+	if (pcs->spare->size < pcs->spare->capacity) {
 		swap(pcs->main, pcs->spare);
 		return pcs;
 	}
@@ -5819,7 +5818,7 @@ __pcs_replace_full_main(struct kmem_cache *s, struct slub_percpu_sheaves *pcs,
 	 * but in case we got preempted or migrated, we need to
 	 * check again
 	 */
-	if (pcs->main->size == s->sheaf_capacity)
+	if (pcs->main->size == pcs->main->capacity)
 		goto restart;
 
 	return pcs;
@@ -5850,7 +5849,7 @@ bool free_to_pcs(struct kmem_cache *s, void *object, bool allow_spin)
 
 	pcs = this_cpu_ptr(s->cpu_sheaves);
 
-	if (unlikely(pcs->main->size == s->sheaf_capacity)) {
+	if (unlikely(pcs->main->size == pcs->main->capacity)) {
 
 		pcs = __pcs_replace_full_main(s, pcs, allow_spin);
 		if (unlikely(!pcs))
@@ -6015,7 +6014,7 @@ bool __kfree_rcu_sheaf(struct kmem_cache *s, void *obj)
 	 */
 	rcu_sheaf->objects[rcu_sheaf->size++] = obj;
 
-	if (likely(rcu_sheaf->size < s->sheaf_capacity)) {
+	if (likely(rcu_sheaf->size < rcu_sheaf->capacity)) {
 		rcu_sheaf = NULL;
 	} else {
 		pcs->rcu_free = NULL;
@@ -6139,7 +6138,7 @@ static void free_to_pcs_bulk(struct kmem_cache *s, size_t size, void **p)
 
 	pcs = this_cpu_ptr(s->cpu_sheaves);
 
-	if (likely(pcs->main->size < s->sheaf_capacity))
+	if (likely(pcs->main->size < pcs->main->capacity))
 		goto do_free;
 
 	barn = get_barn(s);
@@ -6156,7 +6155,7 @@ static void free_to_pcs_bulk(struct kmem_cache *s, size_t size, void **p)
 		goto do_free;
 	}
 
-	if (pcs->spare->size < s->sheaf_capacity) {
+	if (pcs->spare->size < pcs->spare->capacity) {
 		swap(pcs->main, pcs->spare);
 		goto do_free;
 	}
@@ -6172,7 +6171,7 @@ static void free_to_pcs_bulk(struct kmem_cache *s, size_t size, void **p)
 
 do_free:
 	main = pcs->main;
-	batch = min_t(size_t, size, s->sheaf_capacity - main->size);
+	batch = min_t(size_t, size, main->capacity - main->size);
 
 	memcpy(main->objects + main->size, p, batch * sizeof(void *));
 	main->size += batch;
@@ -7613,7 +7612,7 @@ static int init_percpu_sheaves(struct kmem_cache *s)
 
 		/*
 		 * Bootstrap sheaf has zero size so fast-path allocation fails.
-		 * It has also size == s->sheaf_capacity, so fast-path free
+		 * It has also size == sheaf->capacity, so fast-path free
 		 * fails. In the slow paths we recognize the situation by
 		 * checking s->sheaf_capacity. This allows fast paths to assume
 		 * s->cpu_sheaves and pcs->main always exists and are valid.

-- 
2.43.0



  parent reply	other threads:[~2026-05-15 16:24 UTC|newest]

Thread overview: 9+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2026-05-15 16:24 [PATCH RFC 0/8] mm/slab: enable runtime sheaves tuning Harry Yoo (Oracle)
2026-05-15 16:24 ` [PATCH RFC 1/8] mm/slab: do not store cache pointer in struct slab_sheaf Harry Yoo (Oracle)
2026-05-15 16:24 ` [PATCH RFC 2/8] mm/slab: change sheaf_capacity type to unsigned short Harry Yoo (Oracle)
2026-05-15 16:24 ` Harry Yoo (Oracle) [this message]
2026-05-15 16:24 ` [PATCH RFC 4/8] mm/slab: allow bootstrap_cache_sheaves() to fail Harry Yoo (Oracle)
2026-05-15 16:24 ` [PATCH RFC 5/8] mm/slab: rework cache_has_sheaves() to check immutable properties only Harry Yoo (Oracle)
2026-05-15 16:24 ` [PATCH RFC 6/8] mm/slab: allow changing sheaf_capacity at runtime Harry Yoo (Oracle)
2026-05-15 16:24 ` [PATCH RFC 7/8] mm/slab: add pcs->lock lockdep assert when accessing the barn Harry Yoo (Oracle)
2026-05-15 16:24 ` [PATCH RFC 8/8] mm/slab: allow changing max_{full,empty}_sheaves at runtime Harry Yoo (Oracle)

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20260516-sheaves-tuning-v1-3-221aa3e1d829@kernel.org \
    --to=harry@kernel.org \
    --cc=akpm@linux-foundation.org \
    --cc=cl@gentwo.org \
    --cc=hao.li@linux.dev \
    --cc=liam@infradead.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=rientjes@google.com \
    --cc=roman.gushchin@linux.dev \
    --cc=surenb@google.com \
    --cc=vbabka@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox