* [PATCH 1/1] mm, slab: move prepare_slab_obj_exts_hook under CONFIG_MEM_ALLOC_PROFILING
@ 2024-07-11 17:02 Suren Baghdasaryan
2024-07-11 17:21 ` Vlastimil Babka
2024-07-12 3:51 ` xiongwei.song
0 siblings, 2 replies; 4+ messages in thread
From: Suren Baghdasaryan @ 2024-07-11 17:02 UTC (permalink / raw)
To: akpm
Cc: vbabka, sxwjean, xiongwei.song, willy, nathan, cl, penberg,
rientjes, iamjoonsoo.kim, roman.gushchin, 42.hyeyoo, linux-mm,
linux-kernel, Suren Baghdasaryan, kernel test robot
The only place prepare_slab_obj_exts_hook() is currently being used is
from alloc_tagging_slab_alloc_hook() when CONFIG_MEM_ALLOC_PROFILING=y.
Move its definition under CONFIG_MEM_ALLOC_PROFILING to prevent unused
function warning for CONFIG_SLAB_OBJ_EXT=n case.
Reported-by: kernel test robot <lkp@intel.com>
Closes: https://lore.kernel.org/oe-kbuild-all/202407050845.zNONqauD-lkp@intel.com/
Signed-off-by: Suren Baghdasaryan <surenb@google.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
---
Applied over slab/for-next because of the refactoring [1] in that branch.
[1] 302a3ea38aec ("mm, slab: move allocation tagging code in the alloc path into a hook")
mm/slub.c | 48 +++++++++++++++++++++---------------------------
1 file changed, 21 insertions(+), 27 deletions(-)
diff --git a/mm/slub.c b/mm/slub.c
index ce39544acf7c..829a1f08e8a2 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2027,6 +2027,27 @@ static inline bool need_slab_obj_ext(void)
return false;
}
+#else /* CONFIG_SLAB_OBJ_EXT */
+
+static int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s,
+ gfp_t gfp, bool new_slab)
+{
+ return 0;
+}
+
+static inline void free_slab_obj_exts(struct slab *slab)
+{
+}
+
+static inline bool need_slab_obj_ext(void)
+{
+ return false;
+}
+
+#endif /* CONFIG_SLAB_OBJ_EXT */
+
+#ifdef CONFIG_MEM_ALLOC_PROFILING
+
static inline struct slabobj_ext *
prepare_slab_obj_exts_hook(struct kmem_cache *s, gfp_t flags, void *p)
{
@@ -2051,33 +2072,6 @@ prepare_slab_obj_exts_hook(struct kmem_cache *s, gfp_t flags, void *p)
return slab_obj_exts(slab) + obj_to_index(s, slab, p);
}
-#else /* CONFIG_SLAB_OBJ_EXT */
-
-static int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s,
- gfp_t gfp, bool new_slab)
-{
- return 0;
-}
-
-static inline void free_slab_obj_exts(struct slab *slab)
-{
-}
-
-static inline bool need_slab_obj_ext(void)
-{
- return false;
-}
-
-static inline struct slabobj_ext *
-prepare_slab_obj_exts_hook(struct kmem_cache *s, gfp_t flags, void *p)
-{
- return NULL;
-}
-
-#endif /* CONFIG_SLAB_OBJ_EXT */
-
-#ifdef CONFIG_MEM_ALLOC_PROFILING
-
static inline void
alloc_tagging_slab_alloc_hook(struct kmem_cache *s, void *object, gfp_t flags)
{
base-commit: fe4e761669ab4d8b388fdb6e9dbe9a110eed8009
--
2.45.2.993.g49e7a77208-goog
^ permalink raw reply related [flat|nested] 4+ messages in thread* Re: [PATCH 1/1] mm, slab: move prepare_slab_obj_exts_hook under CONFIG_MEM_ALLOC_PROFILING
2024-07-11 17:02 [PATCH 1/1] mm, slab: move prepare_slab_obj_exts_hook under CONFIG_MEM_ALLOC_PROFILING Suren Baghdasaryan
@ 2024-07-11 17:21 ` Vlastimil Babka
2024-07-11 18:33 ` Suren Baghdasaryan
2024-07-12 3:51 ` xiongwei.song
1 sibling, 1 reply; 4+ messages in thread
From: Vlastimil Babka @ 2024-07-11 17:21 UTC (permalink / raw)
To: Suren Baghdasaryan, akpm
Cc: sxwjean, xiongwei.song, willy, nathan, cl, penberg, rientjes,
iamjoonsoo.kim, roman.gushchin, 42.hyeyoo, linux-mm, linux-kernel,
kernel test robot
On 7/11/24 7:02 PM, Suren Baghdasaryan wrote:
> The only place prepare_slab_obj_exts_hook() is currently being used is
> from alloc_tagging_slab_alloc_hook() when CONFIG_MEM_ALLOC_PROFILING=y.
> Move its definition under CONFIG_MEM_ALLOC_PROFILING to prevent unused
> function warning for CONFIG_SLAB_OBJ_EXT=n case.
>
> Reported-by: kernel test robot <lkp@intel.com>
> Closes: https://lore.kernel.org/oe-kbuild-all/202407050845.zNONqauD-lkp@intel.com/
Hm so that's a mainline report, but being a clang-only W=1 warning I think
it can just wait for the merge window and no need for last-minute 6.10 fix.
> Signed-off-by: Suren Baghdasaryan <surenb@google.com>
> Cc: Vlastimil Babka <vbabka@suse.cz>
> ---
> Applied over slab/for-next because of the refactoring [1] in that branch.
And thus took it there. thanks.
>
> [1] 302a3ea38aec ("mm, slab: move allocation tagging code in the alloc path into a hook")
>
> mm/slub.c | 48 +++++++++++++++++++++---------------------------
> 1 file changed, 21 insertions(+), 27 deletions(-)
>
> diff --git a/mm/slub.c b/mm/slub.c
> index ce39544acf7c..829a1f08e8a2 100644
> --- a/mm/slub.c
> +++ b/mm/slub.c
> @@ -2027,6 +2027,27 @@ static inline bool need_slab_obj_ext(void)
> return false;
> }
>
> +#else /* CONFIG_SLAB_OBJ_EXT */
> +
> +static int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s,
> + gfp_t gfp, bool new_slab)
> +{
> + return 0;
> +}
> +
> +static inline void free_slab_obj_exts(struct slab *slab)
> +{
> +}
> +
> +static inline bool need_slab_obj_ext(void)
> +{
> + return false;
> +}
> +
> +#endif /* CONFIG_SLAB_OBJ_EXT */
> +
> +#ifdef CONFIG_MEM_ALLOC_PROFILING
> +
> static inline struct slabobj_ext *
> prepare_slab_obj_exts_hook(struct kmem_cache *s, gfp_t flags, void *p)
> {
> @@ -2051,33 +2072,6 @@ prepare_slab_obj_exts_hook(struct kmem_cache *s, gfp_t flags, void *p)
> return slab_obj_exts(slab) + obj_to_index(s, slab, p);
> }
>
> -#else /* CONFIG_SLAB_OBJ_EXT */
> -
> -static int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s,
> - gfp_t gfp, bool new_slab)
> -{
> - return 0;
> -}
> -
> -static inline void free_slab_obj_exts(struct slab *slab)
> -{
> -}
> -
> -static inline bool need_slab_obj_ext(void)
> -{
> - return false;
> -}
> -
> -static inline struct slabobj_ext *
> -prepare_slab_obj_exts_hook(struct kmem_cache *s, gfp_t flags, void *p)
> -{
> - return NULL;
> -}
> -
> -#endif /* CONFIG_SLAB_OBJ_EXT */
> -
> -#ifdef CONFIG_MEM_ALLOC_PROFILING
> -
> static inline void
> alloc_tagging_slab_alloc_hook(struct kmem_cache *s, void *object, gfp_t flags)
> {
>
> base-commit: fe4e761669ab4d8b388fdb6e9dbe9a110eed8009
^ permalink raw reply [flat|nested] 4+ messages in thread* Re: [PATCH 1/1] mm, slab: move prepare_slab_obj_exts_hook under CONFIG_MEM_ALLOC_PROFILING
2024-07-11 17:21 ` Vlastimil Babka
@ 2024-07-11 18:33 ` Suren Baghdasaryan
0 siblings, 0 replies; 4+ messages in thread
From: Suren Baghdasaryan @ 2024-07-11 18:33 UTC (permalink / raw)
To: Vlastimil Babka
Cc: akpm, sxwjean, xiongwei.song, willy, nathan, cl, penberg,
rientjes, iamjoonsoo.kim, roman.gushchin, 42.hyeyoo, linux-mm,
linux-kernel, kernel test robot
On Thu, Jul 11, 2024 at 10:21 AM Vlastimil Babka <vbabka@suse.cz> wrote:
>
> On 7/11/24 7:02 PM, Suren Baghdasaryan wrote:
> > The only place prepare_slab_obj_exts_hook() is currently being used is
> > from alloc_tagging_slab_alloc_hook() when CONFIG_MEM_ALLOC_PROFILING=y.
> > Move its definition under CONFIG_MEM_ALLOC_PROFILING to prevent unused
> > function warning for CONFIG_SLAB_OBJ_EXT=n case.
> >
> > Reported-by: kernel test robot <lkp@intel.com>
> > Closes: https://lore.kernel.org/oe-kbuild-all/202407050845.zNONqauD-lkp@intel.com/
>
> Hm so that's a mainline report, but being a clang-only W=1 warning I think
> it can just wait for the merge window and no need for last-minute 6.10 fix.
Correct. I was wrong thinking that [1] caused this. The warning was
there even before but adding this into slab/for-next simply avoids
merge conflicts.
>
> > Signed-off-by: Suren Baghdasaryan <surenb@google.com>
> > Cc: Vlastimil Babka <vbabka@suse.cz>
> > ---
> > Applied over slab/for-next because of the refactoring [1] in that branch.
>
> And thus took it there. thanks.
Thanks!
>
> >
> > [1] 302a3ea38aec ("mm, slab: move allocation tagging code in the alloc path into a hook")
> >
> > mm/slub.c | 48 +++++++++++++++++++++---------------------------
> > 1 file changed, 21 insertions(+), 27 deletions(-)
> >
> > diff --git a/mm/slub.c b/mm/slub.c
> > index ce39544acf7c..829a1f08e8a2 100644
> > --- a/mm/slub.c
> > +++ b/mm/slub.c
> > @@ -2027,6 +2027,27 @@ static inline bool need_slab_obj_ext(void)
> > return false;
> > }
> >
> > +#else /* CONFIG_SLAB_OBJ_EXT */
> > +
> > +static int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s,
> > + gfp_t gfp, bool new_slab)
> > +{
> > + return 0;
> > +}
> > +
> > +static inline void free_slab_obj_exts(struct slab *slab)
> > +{
> > +}
> > +
> > +static inline bool need_slab_obj_ext(void)
> > +{
> > + return false;
> > +}
> > +
> > +#endif /* CONFIG_SLAB_OBJ_EXT */
> > +
> > +#ifdef CONFIG_MEM_ALLOC_PROFILING
> > +
> > static inline struct slabobj_ext *
> > prepare_slab_obj_exts_hook(struct kmem_cache *s, gfp_t flags, void *p)
> > {
> > @@ -2051,33 +2072,6 @@ prepare_slab_obj_exts_hook(struct kmem_cache *s, gfp_t flags, void *p)
> > return slab_obj_exts(slab) + obj_to_index(s, slab, p);
> > }
> >
> > -#else /* CONFIG_SLAB_OBJ_EXT */
> > -
> > -static int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s,
> > - gfp_t gfp, bool new_slab)
> > -{
> > - return 0;
> > -}
> > -
> > -static inline void free_slab_obj_exts(struct slab *slab)
> > -{
> > -}
> > -
> > -static inline bool need_slab_obj_ext(void)
> > -{
> > - return false;
> > -}
> > -
> > -static inline struct slabobj_ext *
> > -prepare_slab_obj_exts_hook(struct kmem_cache *s, gfp_t flags, void *p)
> > -{
> > - return NULL;
> > -}
> > -
> > -#endif /* CONFIG_SLAB_OBJ_EXT */
> > -
> > -#ifdef CONFIG_MEM_ALLOC_PROFILING
> > -
> > static inline void
> > alloc_tagging_slab_alloc_hook(struct kmem_cache *s, void *object, gfp_t flags)
> > {
> >
> > base-commit: fe4e761669ab4d8b388fdb6e9dbe9a110eed8009
>
^ permalink raw reply [flat|nested] 4+ messages in thread
* Re: [PATCH 1/1] mm, slab: move prepare_slab_obj_exts_hook under CONFIG_MEM_ALLOC_PROFILING
2024-07-11 17:02 [PATCH 1/1] mm, slab: move prepare_slab_obj_exts_hook under CONFIG_MEM_ALLOC_PROFILING Suren Baghdasaryan
2024-07-11 17:21 ` Vlastimil Babka
@ 2024-07-12 3:51 ` xiongwei.song
1 sibling, 0 replies; 4+ messages in thread
From: xiongwei.song @ 2024-07-12 3:51 UTC (permalink / raw)
To: Suren Baghdasaryan, akpm
Cc: vbabka, sxwjean, willy, nathan, cl, penberg, rientjes,
iamjoonsoo.kim, roman.gushchin, 42.hyeyoo, linux-mm, linux-kernel,
Suren Baghdasaryan, kernel test robot
July 12, 2024 at 1:02 AM, "Suren Baghdasaryan" <surenb@google.com> wrote:
>
> The only place prepare_slab_obj_exts_hook() is currently being used is
>
> from alloc_tagging_slab_alloc_hook() when CONFIG_MEM_ALLOC_PROFILING=y.
>
> Move its definition under CONFIG_MEM_ALLOC_PROFILING to prevent unused
>
> function warning for CONFIG_SLAB_OBJ_EXT=n case.
>
> Reported-by: kernel test robot <lkp@intel.com>
>
> Closes: https://lore.kernel.org/oe-kbuild-all/202407050845.zNONqauD-lkp@intel.com/
>
> Signed-off-by: Suren Baghdasaryan <surenb@google.com>
>
> Cc: Vlastimil Babka <vbabka@suse.cz>
Reviewed-by: Xiongwei Song <xiongwei.song@linux.dev>
Thanks.
>
> ---
>
> Applied over slab/for-next because of the refactoring [1] in that branch.
>
> [1] 302a3ea38aec ("mm, slab: move allocation tagging code in the alloc path into a hook")
>
> mm/slub.c | 48 +++++++++++++++++++++---------------------------
>
> 1 file changed, 21 insertions(+), 27 deletions(-)
>
> diff --git a/mm/slub.c b/mm/slub.c
>
> index ce39544acf7c..829a1f08e8a2 100644
>
> --- a/mm/slub.c
>
> +++ b/mm/slub.c
>
> @@ -2027,6 +2027,27 @@ static inline bool need_slab_obj_ext(void)
>
> return false;
>
> }
>
>
>
> +#else /* CONFIG_SLAB_OBJ_EXT */
>
> +
>
> +static int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s,
>
> + gfp_t gfp, bool new_slab)
>
> +{
>
> + return 0;
>
> +}
>
> +
>
> +static inline void free_slab_obj_exts(struct slab *slab)
>
> +{
>
> +}
>
> +
>
> +static inline bool need_slab_obj_ext(void)
>
> +{
>
> + return false;
>
> +}
>
> +
>
> +#endif /* CONFIG_SLAB_OBJ_EXT */
>
> +
>
> +#ifdef CONFIG_MEM_ALLOC_PROFILING
>
> +
>
> static inline struct slabobj_ext *
>
> prepare_slab_obj_exts_hook(struct kmem_cache *s, gfp_t flags, void *p)
>
> {
>
> @@ -2051,33 +2072,6 @@ prepare_slab_obj_exts_hook(struct kmem_cache *s, gfp_t flags, void *p)
>
> return slab_obj_exts(slab) + obj_to_index(s, slab, p);
>
> }
>
>
>
> -#else /* CONFIG_SLAB_OBJ_EXT */
>
> -
>
> -static int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s,
>
> - gfp_t gfp, bool new_slab)
>
> -{
>
> - return 0;
>
> -}
>
> -
>
> -static inline void free_slab_obj_exts(struct slab *slab)
>
> -{
>
> -}
>
> -
>
> -static inline bool need_slab_obj_ext(void)
>
> -{
>
> - return false;
>
> -}
>
> -
>
> -static inline struct slabobj_ext *
>
> -prepare_slab_obj_exts_hook(struct kmem_cache *s, gfp_t flags, void *p)
>
> -{
>
> - return NULL;
>
> -}
>
> -
>
> -#endif /* CONFIG_SLAB_OBJ_EXT */
>
> -
>
> -#ifdef CONFIG_MEM_ALLOC_PROFILING
>
> -
>
> static inline void
>
> alloc_tagging_slab_alloc_hook(struct kmem_cache *s, void *object, gfp_t flags)
>
> {
>
> base-commit: fe4e761669ab4d8b388fdb6e9dbe9a110eed8009
>
> --
>
> 2.45.2.993.g49e7a77208-goog
>
^ permalink raw reply [flat|nested] 4+ messages in thread
end of thread, other threads:[~2024-07-12 3:51 UTC | newest]
Thread overview: 4+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2024-07-11 17:02 [PATCH 1/1] mm, slab: move prepare_slab_obj_exts_hook under CONFIG_MEM_ALLOC_PROFILING Suren Baghdasaryan
2024-07-11 17:21 ` Vlastimil Babka
2024-07-11 18:33 ` Suren Baghdasaryan
2024-07-12 3:51 ` xiongwei.song
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).