From: "Harry Yoo (Oracle)" <harry@kernel.org>
To: Vlastimil Babka <vbabka@kernel.org>,
Andrew Morton <akpm@linux-foundation.org>,
Hao Li <hao.li@linux.dev>, Christoph Lameter <cl@gentwo.org>,
David Rientjes <rientjes@google.com>,
Roman Gushchin <roman.gushchin@linux.dev>
Cc: linux-mm@kvack.org, linux-kernel@vger.kernel.org,
Suren Baghdasaryan <surenb@google.com>,
"Liam R. Howlett" <liam@infradead.org>
Subject: [PATCH RFC 2/8] mm/slab: change sheaf_capacity type to unsigned short
Date: Sat, 16 May 2026 01:24:26 +0900 [thread overview]
Message-ID: <20260516-sheaves-tuning-v1-2-221aa3e1d829@kernel.org> (raw)
In-Reply-To: <20260516-sheaves-tuning-v1-0-221aa3e1d829@kernel.org>
Change struct kmem_cache.sheaf_capacity and the matching
kmem_cache_args field from unsigned int to unsigned short, so that
we can add a new field later without growing the struct size.
unsigned short is a reasonable size for any realistic configurations.
Signed-off-by: Harry Yoo (Oracle) <harry@kernel.org>
---
include/linux/slab.h | 8 ++++----
mm/slab.h | 2 +-
mm/slub.c | 34 +++++++++++++++++-----------------
tools/include/linux/slab.h | 14 +++++++-------
tools/testing/shared/linux.c | 4 ++--
5 files changed, 31 insertions(+), 31 deletions(-)
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 2b5ab488e96b..6f023f04763a 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -371,7 +371,7 @@ struct kmem_cache_args {
*
* %0 means no sheaves will be created.
*/
- unsigned int sheaf_capacity;
+ unsigned short sheaf_capacity;
};
struct kmem_cache *__kmem_cache_create_args(const char *name,
@@ -828,10 +828,10 @@ void *kmem_cache_alloc_node_noprof(struct kmem_cache *s, gfp_t flags,
#define kmem_cache_alloc_node(...) alloc_hooks(kmem_cache_alloc_node_noprof(__VA_ARGS__))
struct slab_sheaf *
-kmem_cache_prefill_sheaf(struct kmem_cache *s, gfp_t gfp, unsigned int size);
+kmem_cache_prefill_sheaf(struct kmem_cache *s, gfp_t gfp, unsigned short size);
int kmem_cache_refill_sheaf(struct kmem_cache *s, gfp_t gfp,
- struct slab_sheaf **sheafp, unsigned int size);
+ struct slab_sheaf **sheafp, unsigned short size);
void kmem_cache_return_sheaf(struct kmem_cache *s, gfp_t gfp,
struct slab_sheaf *sheaf);
@@ -841,7 +841,7 @@ void *kmem_cache_alloc_from_sheaf_noprof(struct kmem_cache *cachep, gfp_t gfp,
#define kmem_cache_alloc_from_sheaf(...) \
alloc_hooks(kmem_cache_alloc_from_sheaf_noprof(__VA_ARGS__))
-unsigned int kmem_cache_sheaf_size(struct slab_sheaf *sheaf);
+unsigned short kmem_cache_sheaf_size(struct slab_sheaf *sheaf);
/*
* These macros allow declaring a kmem_buckets * parameter alongside size, which
diff --git a/mm/slab.h b/mm/slab.h
index bf2f87acf5e3..dfbe73011cb8 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -204,7 +204,7 @@ struct kmem_cache {
unsigned int object_size; /* Object size without metadata */
struct reciprocal_value reciprocal_size;
unsigned int offset; /* Free pointer offset */
- unsigned int sheaf_capacity;
+ unsigned short sheaf_capacity;
struct kmem_cache_order_objects oo;
/* Allocation and freeing of slabs */
diff --git a/mm/slub.c b/mm/slub.c
index 75281eb802de..a1974523bba9 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -418,11 +418,11 @@ struct slab_sheaf {
struct list_head barn_list;
/* only used for prefilled sheafs */
struct {
- unsigned int capacity;
+ unsigned short capacity;
bool pfmemalloc;
};
};
- unsigned int size;
+ unsigned short size;
int node; /* only used for rcu_sheaf */
void *objects[];
};
@@ -2756,7 +2756,7 @@ static inline void *setup_object(struct kmem_cache *s, void *object)
}
static struct slab_sheaf *__alloc_empty_sheaf(struct kmem_cache *s, gfp_t gfp,
- unsigned int capacity)
+ unsigned short capacity)
{
struct slab_sheaf *sheaf;
size_t sheaf_size;
@@ -2854,10 +2854,10 @@ static void __kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p);
*
* Returns how many objects are remaining to be flushed
*/
-static unsigned int __sheaf_flush_main_batch(struct kmem_cache *s)
+static unsigned short __sheaf_flush_main_batch(struct kmem_cache *s)
{
struct slub_percpu_sheaves *pcs;
- unsigned int batch, remaining;
+ unsigned short batch, remaining;
void *objects[PCS_BATCH_MAX];
struct slab_sheaf *sheaf;
@@ -2884,7 +2884,7 @@ static unsigned int __sheaf_flush_main_batch(struct kmem_cache *s)
static void sheaf_flush_main(struct kmem_cache *s)
{
- unsigned int remaining;
+ unsigned short remaining;
do {
local_lock(&s->cpu_sheaves->lock);
@@ -2899,7 +2899,7 @@ static void sheaf_flush_main(struct kmem_cache *s)
*/
static bool sheaf_try_flush_main(struct kmem_cache *s)
{
- unsigned int remaining;
+ unsigned short remaining;
bool ret = false;
do {
@@ -4849,7 +4849,7 @@ unsigned int alloc_from_pcs_bulk(struct kmem_cache *s, gfp_t gfp, size_t size,
do_alloc:
main = pcs->main;
- batch = min(size, main->size);
+ batch = min_t(size_t, size, main->size);
main->size -= batch;
memcpy(p, main->objects + main->size, batch * sizeof(void *));
@@ -5004,7 +5004,7 @@ static int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags,
* return NULL if sheaf allocation or prefilling failed
*/
struct slab_sheaf *
-kmem_cache_prefill_sheaf(struct kmem_cache *s, gfp_t gfp, unsigned int size)
+kmem_cache_prefill_sheaf(struct kmem_cache *s, gfp_t gfp, unsigned short size)
{
struct slub_percpu_sheaves *pcs;
struct slab_sheaf *sheaf = NULL;
@@ -5146,7 +5146,7 @@ void kmem_cache_return_sheaf(struct kmem_cache *s, gfp_t gfp,
* In practice we always refill to full sheaf's capacity.
*/
int kmem_cache_refill_sheaf(struct kmem_cache *s, gfp_t gfp,
- struct slab_sheaf **sheafp, unsigned int size)
+ struct slab_sheaf **sheafp, unsigned short size)
{
struct slab_sheaf *sheaf;
@@ -5225,7 +5225,7 @@ kmem_cache_alloc_from_sheaf_noprof(struct kmem_cache *s, gfp_t gfp,
return ret;
}
-unsigned int kmem_cache_sheaf_size(struct slab_sheaf *sheaf)
+unsigned short kmem_cache_sheaf_size(struct slab_sheaf *sheaf)
{
return sheaf->size;
}
@@ -6172,7 +6172,7 @@ static void free_to_pcs_bulk(struct kmem_cache *s, size_t size, void **p)
do_free:
main = pcs->main;
- batch = min(size, s->sheaf_capacity - main->size);
+ batch = min_t(size_t, size, s->sheaf_capacity - main->size);
memcpy(main->objects + main->size, p, batch * sizeof(void *));
main->size += batch;
@@ -7759,11 +7759,11 @@ static int init_kmem_cache_nodes(struct kmem_cache *s)
return 1;
}
-static unsigned int calculate_sheaf_capacity(struct kmem_cache *s,
- struct kmem_cache_args *args)
+static unsigned short calculate_sheaf_capacity(struct kmem_cache *s,
+ struct kmem_cache_args *args)
{
- unsigned int capacity;
+ unsigned short capacity;
size_t size;
@@ -8466,7 +8466,7 @@ static struct kmem_cache * __init bootstrap(struct kmem_cache *static_cache)
static void __init bootstrap_cache_sheaves(struct kmem_cache *s)
{
struct kmem_cache_args empty_args = {};
- unsigned int capacity;
+ unsigned short capacity;
bool failed = false;
int node, cpu;
@@ -9091,7 +9091,7 @@ SLAB_ATTR_RO(order);
static ssize_t sheaf_capacity_show(struct kmem_cache *s, char *buf)
{
- return sysfs_emit(buf, "%u\n", s->sheaf_capacity);
+ return sysfs_emit(buf, "%hu\n", s->sheaf_capacity);
}
SLAB_ATTR_RO(sheaf_capacity);
diff --git a/tools/include/linux/slab.h b/tools/include/linux/slab.h
index 6d8e9413d5a4..76d0b9da6cfe 100644
--- a/tools/include/linux/slab.h
+++ b/tools/include/linux/slab.h
@@ -47,7 +47,7 @@ struct kmem_cache {
pthread_mutex_t lock;
unsigned int size;
unsigned int align;
- unsigned int sheaf_capacity;
+ unsigned short sheaf_capacity;
int nr_objs;
void *objs;
void (*ctor)(void *);
@@ -70,7 +70,7 @@ struct kmem_cache_args {
/**
* @sheaf_capacity: The maximum size of the sheaf.
*/
- unsigned int sheaf_capacity;
+ unsigned short sheaf_capacity;
/**
* @useroffset: Usercopy region offset.
*
@@ -127,10 +127,10 @@ struct slab_sheaf {
union {
struct list_head barn_list;
/* only used for prefilled sheafs */
- unsigned int capacity;
+ unsigned short capacity;
};
struct kmem_cache *cache;
- unsigned int size;
+ unsigned short size;
int node; /* only used for rcu_sheaf */
void *objects[];
};
@@ -186,7 +186,7 @@ void kmem_cache_free_bulk(struct kmem_cache *cachep, size_t size, void **list);
int kmem_cache_alloc_bulk(struct kmem_cache *cachep, gfp_t gfp, size_t size,
void **list);
struct slab_sheaf *
-kmem_cache_prefill_sheaf(struct kmem_cache *s, gfp_t gfp, unsigned int size);
+kmem_cache_prefill_sheaf(struct kmem_cache *s, gfp_t gfp, unsigned short size);
void *
kmem_cache_alloc_from_sheaf(struct kmem_cache *s, gfp_t gfp,
@@ -195,9 +195,9 @@ kmem_cache_alloc_from_sheaf(struct kmem_cache *s, gfp_t gfp,
void kmem_cache_return_sheaf(struct kmem_cache *s, gfp_t gfp,
struct slab_sheaf *sheaf);
int kmem_cache_refill_sheaf(struct kmem_cache *s, gfp_t gfp,
- struct slab_sheaf **sheafp, unsigned int size);
+ struct slab_sheaf **sheafp, unsigned short size);
-static inline unsigned int kmem_cache_sheaf_size(struct slab_sheaf *sheaf)
+static inline unsigned short kmem_cache_sheaf_size(struct slab_sheaf *sheaf)
{
return sheaf->size;
}
diff --git a/tools/testing/shared/linux.c b/tools/testing/shared/linux.c
index 8c7257155958..2da3a6617d87 100644
--- a/tools/testing/shared/linux.c
+++ b/tools/testing/shared/linux.c
@@ -252,7 +252,7 @@ __kmem_cache_create_args(const char *name, unsigned int size,
}
struct slab_sheaf *
-kmem_cache_prefill_sheaf(struct kmem_cache *s, gfp_t gfp, unsigned int size)
+kmem_cache_prefill_sheaf(struct kmem_cache *s, gfp_t gfp, unsigned short size)
{
struct slab_sheaf *sheaf;
unsigned int capacity;
@@ -281,7 +281,7 @@ kmem_cache_prefill_sheaf(struct kmem_cache *s, gfp_t gfp, unsigned int size)
}
int kmem_cache_refill_sheaf(struct kmem_cache *s, gfp_t gfp,
- struct slab_sheaf **sheafp, unsigned int size)
+ struct slab_sheaf **sheafp, unsigned short size)
{
struct slab_sheaf *sheaf = *sheafp;
int refill;
--
2.43.0
next prev parent reply other threads:[~2026-05-15 16:24 UTC|newest]
Thread overview: 9+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-05-15 16:24 [PATCH RFC 0/8] mm/slab: enable runtime sheaves tuning Harry Yoo (Oracle)
2026-05-15 16:24 ` [PATCH RFC 1/8] mm/slab: do not store cache pointer in struct slab_sheaf Harry Yoo (Oracle)
2026-05-15 16:24 ` Harry Yoo (Oracle) [this message]
2026-05-15 16:24 ` [PATCH RFC 3/8] mm/slab: track capacity per sheaf Harry Yoo (Oracle)
2026-05-15 16:24 ` [PATCH RFC 4/8] mm/slab: allow bootstrap_cache_sheaves() to fail Harry Yoo (Oracle)
2026-05-15 16:24 ` [PATCH RFC 5/8] mm/slab: rework cache_has_sheaves() to check immutable properties only Harry Yoo (Oracle)
2026-05-15 16:24 ` [PATCH RFC 6/8] mm/slab: allow changing sheaf_capacity at runtime Harry Yoo (Oracle)
2026-05-15 16:24 ` [PATCH RFC 7/8] mm/slab: add pcs->lock lockdep assert when accessing the barn Harry Yoo (Oracle)
2026-05-15 16:24 ` [PATCH RFC 8/8] mm/slab: allow changing max_{full,empty}_sheaves at runtime Harry Yoo (Oracle)
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20260516-sheaves-tuning-v1-2-221aa3e1d829@kernel.org \
--to=harry@kernel.org \
--cc=akpm@linux-foundation.org \
--cc=cl@gentwo.org \
--cc=hao.li@linux.dev \
--cc=liam@infradead.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=rientjes@google.com \
--cc=roman.gushchin@linux.dev \
--cc=surenb@google.com \
--cc=vbabka@kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox