* [PATCH 1/2] mm/sl[au]b: use own bulk free function when bulk alloc failed
2022-06-14 15:26 [PATCH 0/2] slab bulk alloc/free cleanups Hyeonggon Yoo
@ 2022-06-14 15:26 ` Hyeonggon Yoo
2022-06-14 15:26 ` [PATCH 2/2] mm/slab_common: move generic bulk alloc/free functions to SLOB Hyeonggon Yoo
2022-07-20 14:16 ` [PATCH 0/2] slab bulk alloc/free cleanups Vlastimil Babka (SUSE)
2 siblings, 0 replies; 4+ messages in thread
From: Hyeonggon Yoo @ 2022-06-14 15:26 UTC (permalink / raw)
To: Christoph Lameter, Pekka Enberg, David Rientjes, Joonsoo Kim,
Andrew Morton, Roman Gushchin, Hyeonggon Yoo, Matthew Wilcox
Cc: linux-mm
There is no benefit to call generic bulk free function when
kmem_cache_alloc_bulk() failed. Use own kmem_cache_free_bulk()
instead of generic function.
Note that if kmem_cache_alloc_bulk() fails to allocate first object in
SLUB, size is zero. So allow passing size == 0 to kmem_cache_free_bulk()
like SLAB's.
Signed-off-by: Hyeonggon Yoo <42.hyeyoo@gmail.com>
---
mm/slab.c | 2 +-
mm/slub.c | 4 ++--
2 files changed, 3 insertions(+), 3 deletions(-)
diff --git a/mm/slab.c b/mm/slab.c
index f8cd00f4ba13..ede27ce84326 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -3553,7 +3553,7 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
local_irq_enable();
cache_alloc_debugcheck_after_bulk(s, flags, i, p, _RET_IP_);
slab_post_alloc_hook(s, objcg, flags, i, p, false);
- __kmem_cache_free_bulk(s, i, p);
+ kmem_cache_free_bulk(s, i, p);
return 0;
}
EXPORT_SYMBOL(kmem_cache_alloc_bulk);
diff --git a/mm/slub.c b/mm/slub.c
index b1281b8654bd..ca9d42e7b579 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -3669,7 +3669,7 @@ int build_detached_freelist(struct kmem_cache *s, size_t size,
/* Note that interrupts must be enabled when calling this function. */
void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p)
{
- if (WARN_ON(!size))
+ if (!size)
return;
memcg_slab_free_hook(s, p, size);
@@ -3760,7 +3760,7 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
error:
slub_put_cpu_ptr(s->cpu_slab);
slab_post_alloc_hook(s, objcg, flags, i, p, false);
- __kmem_cache_free_bulk(s, i, p);
+ kmem_cache_free_bulk(s, i, p);
return 0;
}
EXPORT_SYMBOL(kmem_cache_alloc_bulk);
--
2.32.0
^ permalink raw reply related [flat|nested] 4+ messages in thread
* [PATCH 2/2] mm/slab_common: move generic bulk alloc/free functions to SLOB
2022-06-14 15:26 [PATCH 0/2] slab bulk alloc/free cleanups Hyeonggon Yoo
2022-06-14 15:26 ` [PATCH 1/2] mm/sl[au]b: use own bulk free function when bulk alloc failed Hyeonggon Yoo
@ 2022-06-14 15:26 ` Hyeonggon Yoo
2022-07-20 14:16 ` [PATCH 0/2] slab bulk alloc/free cleanups Vlastimil Babka (SUSE)
2 siblings, 0 replies; 4+ messages in thread
From: Hyeonggon Yoo @ 2022-06-14 15:26 UTC (permalink / raw)
To: Christoph Lameter, Pekka Enberg, David Rientjes, Joonsoo Kim,
Andrew Morton, Roman Gushchin, Hyeonggon Yoo, Matthew Wilcox
Cc: linux-mm
Now that only SLOB use __kmem_cache_{alloc,free}_bulk(), move them to
SLOB. No functional change intended.
Signed-off-by: Hyeonggon Yoo <42.hyeyoo@gmail.com>
---
mm/slab.h | 9 ---------
mm/slab_common.c | 27 ---------------------------
mm/slob.c | 25 +++++++++++++++++++++----
3 files changed, 21 insertions(+), 40 deletions(-)
diff --git a/mm/slab.h b/mm/slab.h
index db9fb5c8dae7..a6837605e4cc 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -380,15 +380,6 @@ void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s);
ssize_t slabinfo_write(struct file *file, const char __user *buffer,
size_t count, loff_t *ppos);
-/*
- * Generic implementation of bulk operations
- * These are useful for situations in which the allocator cannot
- * perform optimizations. In that case segments of the object listed
- * may be allocated or freed using these operations.
- */
-void __kmem_cache_free_bulk(struct kmem_cache *, size_t, void **);
-int __kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **);
-
static inline enum node_stat_item cache_vmstat_idx(struct kmem_cache *s)
{
return (s->flags & SLAB_RECLAIM_ACCOUNT) ?
diff --git a/mm/slab_common.c b/mm/slab_common.c
index 77c3adf40e50..a6787fd39aa4 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -105,33 +105,6 @@ static inline int kmem_cache_sanity_check(const char *name, unsigned int size)
}
#endif
-void __kmem_cache_free_bulk(struct kmem_cache *s, size_t nr, void **p)
-{
- size_t i;
-
- for (i = 0; i < nr; i++) {
- if (s)
- kmem_cache_free(s, p[i]);
- else
- kfree(p[i]);
- }
-}
-
-int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t nr,
- void **p)
-{
- size_t i;
-
- for (i = 0; i < nr; i++) {
- void *x = p[i] = kmem_cache_alloc(s, flags);
- if (!x) {
- __kmem_cache_free_bulk(s, i, p);
- return 0;
- }
- }
- return i;
-}
-
/*
* Figure out what the alignment of the objects will be given a set of
* flags, a user specified alignment and the size of the objects.
diff --git a/mm/slob.c b/mm/slob.c
index f47811f09aca..f8babd0806ee 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -692,16 +692,33 @@ void kmem_cache_free(struct kmem_cache *c, void *b)
}
EXPORT_SYMBOL(kmem_cache_free);
-void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p)
+void kmem_cache_free_bulk(struct kmem_cache *s, size_t nr, void **p)
{
- __kmem_cache_free_bulk(s, size, p);
+ size_t i;
+
+ for (i = 0; i < nr; i++) {
+ if (s)
+ kmem_cache_free(s, p[i]);
+ else
+ kfree(p[i]);
+ }
}
EXPORT_SYMBOL(kmem_cache_free_bulk);
-int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
+int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t nr,
void **p)
{
- return __kmem_cache_alloc_bulk(s, flags, size, p);
+ size_t i;
+
+ for (i = 0; i < nr; i++) {
+ void *x = p[i] = kmem_cache_alloc(s, flags);
+
+ if (!x) {
+ kmem_cache_free_bulk(s, i, p);
+ return 0;
+ }
+ }
+ return i;
}
EXPORT_SYMBOL(kmem_cache_alloc_bulk);
--
2.32.0
^ permalink raw reply related [flat|nested] 4+ messages in thread
* Re: [PATCH 0/2] slab bulk alloc/free cleanups
2022-06-14 15:26 [PATCH 0/2] slab bulk alloc/free cleanups Hyeonggon Yoo
2022-06-14 15:26 ` [PATCH 1/2] mm/sl[au]b: use own bulk free function when bulk alloc failed Hyeonggon Yoo
2022-06-14 15:26 ` [PATCH 2/2] mm/slab_common: move generic bulk alloc/free functions to SLOB Hyeonggon Yoo
@ 2022-07-20 14:16 ` Vlastimil Babka (SUSE)
2 siblings, 0 replies; 4+ messages in thread
From: Vlastimil Babka (SUSE) @ 2022-07-20 14:16 UTC (permalink / raw)
To: Hyeonggon Yoo, Christoph Lameter, Pekka Enberg, David Rientjes,
Joonsoo Kim, Andrew Morton, Roman Gushchin, Matthew Wilcox
Cc: linux-mm
On 6/14/22 17:26, Hyeonggon Yoo wrote:
> This makes SLUB and SLAB use their own kmem_cache_free_bulk()
> (instead of __kmem_cache_free_bulk()) when kmem_cache_alloc_bulk() failed.
> And then it moves generic __kmem_cache_{alloc,free}_bulk to SLOB.
>
> This is suggested by Matthew WilCox.
>
> Hyeonggon Yoo (2):
> mm/sl[au]b: use own bulk free function when bulk alloc failed
> mm/slab_common: move generic bulk alloc/free functions to SLOB
Thanks, pushed to slab/for-5.20/bulk_cleanup
> mm/slab.c | 2 +-
> mm/slab.h | 9 ---------
> mm/slab_common.c | 27 ---------------------------
> mm/slob.c | 25 +++++++++++++++++++++----
> mm/slub.c | 4 ++--
> 5 files changed, 24 insertions(+), 43 deletions(-)
>
^ permalink raw reply [flat|nested] 4+ messages in thread