linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
* [RFC PATCH] mm/slab: save memory by allocating slabobj_ext array from leftover
@ 2025-06-13  6:33 Harry Yoo
  2025-06-13  7:11 ` Harry Yoo
                   ` (2 more replies)
  0 siblings, 3 replies; 11+ messages in thread
From: Harry Yoo @ 2025-06-13  6:33 UTC (permalink / raw)
  To: Vlastimil Babka, David Rientjes, Christoph Lameter, Andrew Morton
  Cc: Johannes Weiner, Michal Hocko, Roman Gushchin, Shakeel Butt,
	Muchun Song, Suren Baghdasaryan, Kent Overstreet, Andrey Ryabinin,
	Alexander Potapenko, Andrey Konovalov, Dmitry Vyukov,
	Vincenzo Frascino, linux-mm, Harry Yoo

The leftover space in a slab is always smaller than s->size, and
kmem caches for large objects that are not power-of-two sizes tend to have
a greater amount of leftover space per slab. In some cases, the leftover
space is larger than the size of the slabobj_ext array for the slab.

An excellent example of such a cache is ext4_inode_cache. On my system,
the object size is 1144, with a preferred order of 3, 28 objects per slab,
and 736 bytes of leftover space per slab.

Since the size of the slabobj_ext array is only 224 bytes (w/o mem
profiling) or 448 bytes (w/ mem profiling) per slab, the entire array
fits within the leftover space.

Allocate slabobj_exts array from this unused space instead of using
kcalloc(), when it is large enough.

Enjoy the memory savings!

[ MEMCG=y, MEM_ALLOC_PROFILING=y ]

Before patch (run updatedb):
  Slab:            5815196 kB
  SReclaimable:    5042824 kB
  SUnreclaim:       772372 kB

After patch (run updatedb):
  Slab:            5748664 kB
  SReclaimable:    5041608 kB
  SUnreclaim:       707084 kB (-63.75 MiB)

[ MEMCG=y, MEM_ALLOC_PROFILING=n ]

Before patch (run updatedb):
  Slab:            5637764 kB
  SReclaimable:    5042428 kB
  SUnreclaim:       595284 kB

After patch (run updatedb):
  Slab:            5598992 kB
  SReclaimable:    5042248 kB
  SUnreclaim:       560396 kB (-34.07 MiB)

This saves from hundreds of KiBs up to several tens of MiBs of memory
on my machine, depending on the config and slab memory usage.

Enjoy the memory savings!

Signed-off-by: Harry Yoo <harry.yoo@oracle.com>
---
KASAN folks: Should we also poison the array before freeing the slab?
If so, which API would be appropriate to use?

 mm/slub.c | 95 ++++++++++++++++++++++++++++++++++++++++++++++++++-----
 1 file changed, 87 insertions(+), 8 deletions(-)

diff --git a/mm/slub.c b/mm/slub.c
index cf3637324243..20f0f76f0c65 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -785,6 +785,49 @@ static inline unsigned int get_orig_size(struct kmem_cache *s, void *object)
 	return *(unsigned int *)p;
 }
 
+#ifdef CONFIG_SLAB_OBJ_EXT
+static inline unsigned int obj_exts_size(struct slab *slab)
+{
+	return sizeof(struct slabobj_ext) * slab->objects;
+}
+
+static unsigned long obj_exts_offset(struct kmem_cache *s,
+				    struct slab *slab)
+{
+	unsigned long objext_offset;
+
+	objext_offset = s->red_left_pad + s->size * slab->objects;
+	objext_offset = ALIGN(objext_offset, sizeof(struct slabobj_ext));
+	return objext_offset;
+}
+
+static bool can_alloc_obj_exts_from_leftover(struct kmem_cache *s,
+					     struct slab *slab)
+{
+	unsigned long objext_offset = obj_exts_offset(s, slab);
+	unsigned long objext_size = obj_exts_size(slab);
+
+	return objext_offset + objext_size <= slab_size(slab);
+}
+#else
+static inline unsigned int obj_exts_size(struct slab *slab)
+{
+	return 0;
+}
+
+static unsigned long obj_exts_offset(struct kmem_cache *s,
+				    struct slab *slab)
+{
+	return 0;
+}
+
+static inline bool can_alloc_obj_exts_from_leftover(struct kmem_cache *s,
+						    struct slab *slab)
+{
+	return false;
+}
+#endif
+
 #ifdef CONFIG_SLUB_DEBUG
 static unsigned long object_map[BITS_TO_LONGS(MAX_OBJS_PER_PAGE)];
 static DEFINE_SPINLOCK(object_map_lock);
@@ -1307,7 +1350,15 @@ slab_pad_check(struct kmem_cache *s, struct slab *slab)
 	start = slab_address(slab);
 	length = slab_size(slab);
 	end = start + length;
-	remainder = length % s->size;
+
+	if (can_alloc_obj_exts_from_leftover(s, slab)) {
+		remainder = length;
+		remainder -= obj_exts_offset(s, slab);
+		remainder -= obj_exts_size(slab);
+	} else {
+		remainder = length % s->size;
+	}
+
 	if (!remainder)
 		return;
 
@@ -2049,6 +2100,21 @@ static noinline void free_slab_obj_exts(struct slab *slab)
 	slab->obj_exts = 0;
 }
 
+static void try_to_alloc_obj_exts_from_leftover(struct kmem_cache *s,
+						struct slab *slab)
+{
+	if (can_alloc_obj_exts_from_leftover(s, slab)) {
+		void *addr = slab_address(slab) + obj_exts_offset(s, slab);
+
+		slab->obj_exts = (unsigned long)addr;
+		kasan_unpoison_range(addr, obj_exts_size(slab));
+		memset(addr, 0, obj_exts_size(slab));
+#ifdef CONFIG_MEMCG
+		slab->obj_exts |= MEMCG_DATA_OBJEXTS;
+#endif
+	}
+}
+
 static inline bool need_slab_obj_ext(void)
 {
 	if (mem_alloc_profiling_enabled())
@@ -2077,6 +2143,11 @@ static inline void free_slab_obj_exts(struct slab *slab)
 {
 }
 
+static inline void try_to_alloc_obj_exts_from_leftover(struct kmem_cache *s,
+						       struct slab *slab)
+{
+}
+
 static inline bool need_slab_obj_ext(void)
 {
 	return false;
@@ -2592,7 +2663,9 @@ static inline bool shuffle_freelist(struct kmem_cache *s, struct slab *slab)
 static __always_inline void account_slab(struct slab *slab, int order,
 					 struct kmem_cache *s, gfp_t gfp)
 {
-	if (memcg_kmem_online() && (s->flags & SLAB_ACCOUNT))
+	if (memcg_kmem_online() &&
+			(s->flags & SLAB_ACCOUNT) &&
+			!slab_obj_exts(slab))
 		alloc_slab_obj_exts(slab, s, gfp, true);
 
 	mod_node_page_state(slab_pgdat(slab), cache_vmstat_idx(s),
@@ -2602,11 +2675,16 @@ static __always_inline void account_slab(struct slab *slab, int order,
 static __always_inline void unaccount_slab(struct slab *slab, int order,
 					   struct kmem_cache *s)
 {
-	if (memcg_kmem_online() || need_slab_obj_ext())
-		free_slab_obj_exts(slab);
-
 	mod_node_page_state(slab_pgdat(slab), cache_vmstat_idx(s),
 			    -(PAGE_SIZE << order));
+
+	if (can_alloc_obj_exts_from_leftover(s, slab)) {
+		slab->obj_exts = 0;
+		return;
+	}
+
+	if (memcg_kmem_online() || need_slab_obj_ext())
+		free_slab_obj_exts(slab);
 }
 
 static struct slab *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
@@ -2647,9 +2725,6 @@ static struct slab *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
 	slab->objects = oo_objects(oo);
 	slab->inuse = 0;
 	slab->frozen = 0;
-	init_slab_obj_exts(slab);
-
-	account_slab(slab, oo_order(oo), s, flags);
 
 	slab->slab_cache = s;
 
@@ -2658,6 +2733,10 @@ static struct slab *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
 	start = slab_address(slab);
 
 	setup_slab_debug(s, slab, start);
+	init_slab_obj_exts(slab);
+	/* Initialize the slabobj_ext array after poisoning the slab */
+	try_to_alloc_obj_exts_from_leftover(s, slab);
+	account_slab(slab, oo_order(oo), s, flags);
 
 	shuffle = shuffle_freelist(s, slab);
 
-- 
2.43.0



^ permalink raw reply related	[flat|nested] 11+ messages in thread

end of thread, other threads:[~2025-08-27 11:40 UTC | newest]

Thread overview: 11+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2025-06-13  6:33 [RFC PATCH] mm/slab: save memory by allocating slabobj_ext array from leftover Harry Yoo
2025-06-13  7:11 ` Harry Yoo
2025-06-13 11:42 ` Yeoreum Yun
2025-06-13 17:58   ` Harry Yoo
2025-06-13 16:04 ` Christoph Lameter (Ampere)
2025-06-13 17:47   ` Harry Yoo
2025-06-16 11:00     ` Harry Yoo
2025-06-19  7:56     ` Vlastimil Babka
2025-08-05 11:57       ` Harry Yoo
2025-08-08 14:44         ` Vlastimil Babka
2025-08-27 11:40           ` Harry Yoo

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).