* [PATCH] tracing: simplify pages allocation
@ 2026-04-25 1:44 Rosen Penev
0 siblings, 0 replies; only message in thread
From: Rosen Penev @ 2026-04-25 1:44 UTC (permalink / raw)
To: linux-trace-kernel
Cc: Steven Rostedt, Masami Hiramatsu, Mathieu Desnoyers, Kees Cook,
Gustavo A. R. Silva, open list:TRACING,
open list:KERNEL HARDENING (not covered by other areas):Keyword:b__counted_by(_le|_be)?b
Change to a flexible array member to allocate together with the array
struct.
Simplifies code slightly by removing no longer correct null checks for
pages and removing kfrees.
Signed-off-by: Rosen Penev <rosenp@gmail.com>
---
kernel/trace/tracing_map.c | 32 +++++++++++---------------------
kernel/trace/tracing_map.h | 2 +-
2 files changed, 12 insertions(+), 22 deletions(-)
diff --git a/kernel/trace/tracing_map.c b/kernel/trace/tracing_map.c
index bf1a507695b6..627cc3fdf69e 100644
--- a/kernel/trace/tracing_map.c
+++ b/kernel/trace/tracing_map.c
@@ -288,9 +288,6 @@ static void tracing_map_array_clear(struct tracing_map_array *a)
{
unsigned int i;
- if (!a->pages)
- return;
-
for (i = 0; i < a->n_pages; i++)
memset(a->pages[i], 0, PAGE_SIZE);
}
@@ -302,44 +299,37 @@ static void tracing_map_array_free(struct tracing_map_array *a)
if (!a)
return;
- if (!a->pages)
- goto free;
-
for (i = 0; i < a->n_pages; i++) {
if (!a->pages[i])
break;
kmemleak_free(a->pages[i]);
free_page((unsigned long)a->pages[i]);
}
-
- kfree(a->pages);
-
- free:
- kfree(a);
}
static struct tracing_map_array *tracing_map_array_alloc(unsigned int n_elts,
unsigned int entry_size)
{
struct tracing_map_array *a;
+ unsigned int entry_size_shift;
+ unsigned int entries_per_page;
+ unsigned int n_pages;
unsigned int i;
- a = kzalloc_obj(*a);
+ entry_size_shift = fls(roundup_pow_of_two(entry_size) - 1);
+ entries_per_page = PAGE_SIZE / (1 << entry_size_shift);
+ n_pages = max(1, n_elts / entries_per_page);
+
+ a = kzalloc_flex(*a, pages, n_pages);
if (!a)
return NULL;
- a->entry_size_shift = fls(roundup_pow_of_two(entry_size) - 1);
- a->entries_per_page = PAGE_SIZE / (1 << a->entry_size_shift);
- a->n_pages = n_elts / a->entries_per_page;
- if (!a->n_pages)
- a->n_pages = 1;
+ a->entry_size_shift = entry_size_shift;
+ a->entries_per_page = entries_per_page;
+ a->n_pages = n_pages;
a->entry_shift = fls(a->entries_per_page) - 1;
a->entry_mask = (1 << a->entry_shift) - 1;
- a->pages = kcalloc(a->n_pages, sizeof(void *), GFP_KERNEL);
- if (!a->pages)
- goto free;
-
for (i = 0; i < a->n_pages; i++) {
a->pages[i] = (void *)get_zeroed_page(GFP_KERNEL);
if (!a->pages[i])
diff --git a/kernel/trace/tracing_map.h b/kernel/trace/tracing_map.h
index 99c37eeebc16..18a02959d77b 100644
--- a/kernel/trace/tracing_map.h
+++ b/kernel/trace/tracing_map.h
@@ -167,7 +167,7 @@ struct tracing_map_array {
unsigned int entry_shift;
unsigned int entry_mask;
unsigned int n_pages;
- void **pages;
+ void *pages[] __counted_by(n_pages);
};
#define TRACING_MAP_ARRAY_ELT(array, idx) \
--
2.54.0
^ permalink raw reply related [flat|nested] only message in thread
only message in thread, other threads:[~2026-04-25 1:44 UTC | newest]
Thread overview: (only message) (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2026-04-25 1:44 [PATCH] tracing: simplify pages allocation Rosen Penev
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox