From: Ian Rogers <irogers@google.com>
To: irogers@google.com, acme@kernel.org
Cc: adrian.hunter@intel.com, ajones@ventanamicro.com,
ak@linux.intel.com, alex@ghiti.fr,
alexander.shishkin@linux.intel.com, anup@brainfault.org,
aou@eecs.berkeley.edu, atrajeev@linux.ibm.com,
blakejones@google.com, ctshao@google.com,
dapeng1.mi@linux.intel.com, derek.foreman@collabora.com,
dvyukov@google.com, howardchu95@gmail.com,
hrishikesh123s@gmail.com, james.clark@linaro.org,
jolsa@kernel.org, krzysztof.m.lopatowski@gmail.com,
leo.yan@arm.com, linux-kernel@vger.kernel.org,
linux-perf-users@vger.kernel.org, linux@treblig.org,
mingo@redhat.com, namhyung@kernel.org, nichen@iscas.ac.cn,
palmer@dabbelt.com, peterz@infradead.org, pjw@kernel.org,
ravi.bangoria@amd.com, swapnil.sapkal@amd.com, tanze@kylinos.cn,
thomas.falcon@intel.com, tianyou.li@intel.com,
yujie.liu@intel.com, zhouquan@iscas.ac.cn
Subject: [PATCH v13 32/32] perf kwork: Fix memory management of kwork_work
Date: Tue, 12 May 2026 15:30:01 -0700 [thread overview]
Message-ID: <20260512223001.2952848-33-irogers@google.com> (raw)
In-Reply-To: <20260512223001.2952848-1-irogers@google.com>
This commit addresses several memory management issues in builtin-kwork.c:
1. Implements a global cleanup function perf_kwork__exit to free all
kwork_work and kwork_atom_page objects at the end of the command.
2. Ensures all 'name' fields in struct kwork_work are malloc-ed (or NULL)
and properly freed by using strdup and zfree.
3. Fixes memory leaks in top_merge_tasks where kwork_work objects were
dropped without being freed.
4. Adds robustness with NULL checks for name fields.
5. Fixes workqueue_work_init to correctly resolve and strdup kernel
function names, preventing bad-free errors.
Signed-off-by: Ian Rogers <irogers@google.com>
---
tools/perf/builtin-kwork.c | 125 ++++++++++++++++++++++++++++--------
tools/perf/util/bpf_kwork.c | 14 ++--
tools/perf/util/kwork.h | 2 +
3 files changed, 112 insertions(+), 29 deletions(-)
diff --git a/tools/perf/builtin-kwork.c b/tools/perf/builtin-kwork.c
index a4604e152002..75c50073c350 100644
--- a/tools/perf/builtin-kwork.c
+++ b/tools/perf/builtin-kwork.c
@@ -323,8 +323,8 @@ static struct kwork_work *work_search(struct rb_root_cached *root,
else if (cmp < 0)
node = node->rb_right;
else {
- if (work->name == NULL)
- work->name = key->name;
+ if (work->name == NULL && key->name != NULL)
+ work->name = strdup(key->name);
return work;
}
}
@@ -371,11 +371,58 @@ static struct kwork_work *work_new(struct kwork_work *key)
work->id = key->id;
work->cpu = key->cpu;
- work->name = key->name;
+ work->name = key->name ? strdup(key->name) : NULL;
work->class = key->class;
return work;
}
+void work_exit(struct kwork_work *work)
+{
+ zfree(&work->name);
+}
+
+static void work_delete(struct kwork_work *work)
+{
+ if (work) {
+ work_exit(work);
+ free(work);
+ }
+}
+
+static void kwork_work__free_root(struct rb_root_cached *root)
+{
+ struct rb_node *next;
+ struct kwork_work *work;
+
+ while ((next = rb_first_cached(root))) {
+ work = rb_entry(next, struct kwork_work, node);
+ rb_erase_cached(next, root);
+ work_delete(work);
+ }
+}
+
+static void perf_kwork__exit(struct perf_kwork *kwork)
+{
+ struct kwork_class *class;
+ struct kwork_atom_page *page, *tmp_page;
+
+ list_for_each_entry(class, &kwork->class_list, list) {
+ kwork_work__free_root(&class->work_root);
+ }
+
+ kwork_work__free_root(&kwork->sorted_work_root);
+
+ list_for_each_entry_safe(page, tmp_page, &kwork->atom_page_list, list) {
+ list_del_init(&page->list);
+ free(page);
+ }
+
+ INIT_LIST_HEAD(&kwork->class_list);
+ INIT_LIST_HEAD(&kwork->atom_page_list);
+ INIT_LIST_HEAD(&kwork->sort_list);
+ INIT_LIST_HEAD(&kwork->cmp_id);
+}
+
static struct kwork_work *work_findnew(struct rb_root_cached *root,
struct kwork_work *key,
struct list_head *sort_list)
@@ -453,25 +500,29 @@ static int work_push_atom(struct perf_kwork *kwork,
struct kwork_work **ret_work,
bool overwrite)
{
- struct kwork_atom *atom, *dst_atom, *last_atom;
+ struct kwork_atom *atom = NULL, *dst_atom, *last_atom;
struct kwork_work *work, key;
+ int ret = 0;
BUG_ON(class->work_init == NULL);
class->work_init(kwork, class, &key, src_type, sample, machine);
atom = atom_new(kwork, sample);
- if (atom == NULL)
- return -1;
+ if (atom == NULL) {
+ ret = -1;
+ goto out;
+ }
work = work_findnew(&class->work_root, &key, &kwork->cmp_id);
if (work == NULL) {
atom_free(atom);
- return -1;
+ ret = -1;
+ goto out;
}
if (!profile_event_match(kwork, work, sample)) {
atom_free(atom);
- return 0;
+ goto out;
}
if (dst_type < KWORK_TRACE_MAX) {
@@ -498,8 +549,9 @@ static int work_push_atom(struct perf_kwork *kwork,
}
list_add_tail(&atom->list, &work->atom_list[src_type]);
-
- return 0;
+out:
+ work_exit(&key);
+ return ret;
}
static struct kwork_atom *work_pop_atom(struct perf_kwork *kwork,
@@ -510,7 +562,7 @@ static struct kwork_atom *work_pop_atom(struct perf_kwork *kwork,
struct machine *machine,
struct kwork_work **ret_work)
{
- struct kwork_atom *atom, *src_atom;
+ struct kwork_atom *atom = NULL, *src_atom;
struct kwork_work *work, key;
BUG_ON(class->work_init == NULL);
@@ -521,15 +573,15 @@ static struct kwork_atom *work_pop_atom(struct perf_kwork *kwork,
*ret_work = work;
if (work == NULL)
- return NULL;
+ goto out;
if (!profile_event_match(kwork, work, sample))
- return NULL;
+ goto out;
atom = list_last_entry_or_null(&work->atom_list[dst_type],
struct kwork_atom, list);
if (atom != NULL)
- return atom;
+ goto out;
src_atom = atom_new(kwork, sample);
if (src_atom != NULL)
@@ -538,8 +590,9 @@ static struct kwork_atom *work_pop_atom(struct perf_kwork *kwork,
if (ret_work != NULL)
*ret_work = NULL;
}
-
- return NULL;
+out:
+ work_exit(&key);
+ return atom;
}
static struct kwork_work *find_work_by_id(struct rb_root_cached *root,
@@ -1002,13 +1055,16 @@ static void irq_work_init(struct perf_kwork *kwork,
work->name = NULL;
} else {
work->id = perf_sample__intval(sample, "irq");
- work->name = perf_sample__strval(sample, "name");
+ work->name = strdup(perf_sample__strval(sample, "name") ?: "<unknown>");
}
}
static void irq_work_name(struct kwork_work *work, char *buf, int len)
{
- snprintf(buf, len, "%s:%" PRIu64 "", work->name, work->id);
+ if (work->name != NULL)
+ snprintf(buf, len, "%s:%" PRIu64 "", work->name, work->id);
+ else
+ snprintf(buf, len, "%" PRIu64 "", work->id);
}
static struct kwork_class kwork_irq = {
@@ -1135,7 +1191,10 @@ static void softirq_work_init(struct perf_kwork *kwork,
static void softirq_work_name(struct kwork_work *work, char *buf, int len)
{
- snprintf(buf, len, "(s)%s:%" PRIu64 "", work->name, work->id);
+ if (work->name != NULL)
+ snprintf(buf, len, "(s)%s:%" PRIu64 "", work->name, work->id);
+ else
+ snprintf(buf, len, "(s)%" PRIu64 "", work->id);
}
static struct kwork_class kwork_softirq = {
@@ -1220,8 +1279,14 @@ static void workqueue_work_init(struct perf_kwork *kwork __maybe_unused,
work->class = class;
work->cpu = sample->cpu;
work->id = perf_sample__intval(sample, "work");
- work->name = function_addr == 0 ? NULL :
- machine__resolve_kernel_addr(machine, &function_addr, &modp);
+ work->name = NULL;
+
+ if (function_addr != 0) {
+ const char *name = machine__resolve_kernel_addr(machine, &function_addr, &modp);
+
+ if (name)
+ work->name = strdup(name);
+ }
}
static void workqueue_work_name(struct kwork_work *work, char *buf, int len)
@@ -1284,16 +1349,16 @@ static void sched_work_init(struct perf_kwork *kwork __maybe_unused,
if (src_type == KWORK_TRACE_EXIT) {
work->id = perf_sample__intval(sample, "prev_pid");
- work->name = strdup(perf_sample__strval(sample, "prev_comm"));
+ work->name = strdup(perf_sample__strval(sample, "prev_comm") ?: "<unknown>");
} else if (src_type == KWORK_TRACE_ENTRY) {
work->id = perf_sample__intval(sample, "next_pid");
- work->name = strdup(perf_sample__strval(sample, "next_comm"));
+ work->name = strdup(perf_sample__strval(sample, "next_comm") ?: "<unknown>");
}
}
static void sched_work_name(struct kwork_work *work, char *buf, int len)
{
- snprintf(buf, len, "%s", work->name);
+ snprintf(buf, len, "%s", work->name ?: "");
}
static struct kwork_class kwork_sched = {
@@ -2100,8 +2165,10 @@ static void top_merge_tasks(struct perf_kwork *kwork)
rb_erase_cached(node, &class->work_root);
data = rb_entry(node, struct kwork_work, node);
- if (!profile_name_match(kwork, data))
+ if (!profile_name_match(kwork, data)) {
+ work_delete(data);
continue;
+ }
cpu = data->cpu;
merged_work = find_work_by_id(&merged_root, data->id,
@@ -2109,11 +2176,17 @@ static void top_merge_tasks(struct perf_kwork *kwork)
if (!merged_work) {
work_insert(&merged_root, data, &kwork->cmp_id);
} else {
+ if (merged_work->name == NULL && data->name != NULL)
+ merged_work->name = strdup(data->name);
+
merged_work->total_runtime += data->total_runtime;
merged_work->cpu_usage += data->cpu_usage;
}
top_calc_load_runtime(kwork, data);
+
+ if (merged_work)
+ work_delete(data);
}
work_sort(kwork, class, &merged_root);
@@ -2523,6 +2596,8 @@ int cmd_kwork(int argc, const char **argv)
} else
usage_with_options(kwork_usage, kwork_options);
+ perf_kwork__exit(&kwork);
+
/* free usage string allocated by parse_options_subcommand */
free((void *)kwork_usage[0]);
diff --git a/tools/perf/util/bpf_kwork.c b/tools/perf/util/bpf_kwork.c
index d3a2e548f2b6..2248f462a847 100644
--- a/tools/perf/util/bpf_kwork.c
+++ b/tools/perf/util/bpf_kwork.c
@@ -273,6 +273,7 @@ static int add_work(struct perf_kwork *kwork,
.cpu = key->cpu,
};
enum kwork_class_type type = key->type;
+ int ret = 0;
if (!valid_kwork_class_type(type)) {
pr_debug("Invalid class type %d to add work\n", type);
@@ -287,8 +288,10 @@ static int add_work(struct perf_kwork *kwork,
return -1;
work = kwork->add_work(kwork, tmp.class, &tmp);
- if (work == NULL)
- return -1;
+ if (work == NULL) {
+ ret = -1;
+ goto out;
+ }
if (kwork->report == KWORK_REPORT_RUNTIME) {
work->nr_atoms = data->nr;
@@ -304,13 +307,16 @@ static int add_work(struct perf_kwork *kwork,
work->max_latency_end = data->max_time_end;
} else {
pr_debug("Invalid bpf report type %d\n", kwork->report);
- return -1;
+ ret = -1;
+ goto out;
}
kwork->timestart = (u64)ts_start.tv_sec * NSEC_PER_SEC + ts_start.tv_nsec;
kwork->timeend = (u64)ts_end.tv_sec * NSEC_PER_SEC + ts_end.tv_nsec;
- return 0;
+out:
+ work_exit(&tmp);
+ return ret;
}
int perf_kwork__report_read_bpf(struct perf_kwork *kwork)
diff --git a/tools/perf/util/kwork.h b/tools/perf/util/kwork.h
index abf637d44794..c96f388b3159 100644
--- a/tools/perf/util/kwork.h
+++ b/tools/perf/util/kwork.h
@@ -164,6 +164,8 @@ struct kwork_class {
char *buf, int len);
};
+void work_exit(struct kwork_work *work);
+
struct trace_kwork_handler {
int (*raise_event)(struct perf_kwork *kwork,
struct kwork_class *class,
--
2.54.0.563.g4f69b47b94-goog
prev parent reply other threads:[~2026-05-12 22:31 UTC|newest]
Thread overview: 33+ messages / expand[flat|nested] mbox.gz Atom feed top
[not found] <20260413041143.1736055-1-irogers@google.com>
2026-05-12 22:29 ` [PATCH v13 00/32] perf tool: Add evsel to perf_sample Ian Rogers
2026-05-12 22:29 ` [PATCH v13 01/32] perf tool: Remove evsel from tool APIs that pass the sample Ian Rogers
2026-05-12 22:29 ` [PATCH v13 02/32] perf kvm: Don't pass evsel with sample Ian Rogers
2026-05-12 22:29 ` [PATCH v13 03/32] perf evsel: Refactor evsel tracepoint sample accessors perf_sample Ian Rogers
2026-05-12 22:29 ` [PATCH v13 04/32] perf trace: Don't pass evsel with sample Ian Rogers
2026-05-12 22:29 ` [PATCH v13 05/32] perf callchain: Don't pass evsel and sample Ian Rogers
2026-05-12 22:29 ` [PATCH v13 06/32] perf lock: Only pass sample to handlers Ian Rogers
2026-05-12 22:29 ` [PATCH v13 07/32] perf hist: Remove evsel parameter from inc samples functions Ian Rogers
2026-05-12 22:29 ` [PATCH v13 08/32] perf db-export: Remove evsel from struct export_sample Ian Rogers
2026-05-12 22:29 ` [PATCH v13 09/32] perf hist: Remove evsel from struct hist_entry_iter Ian Rogers
2026-05-12 22:29 ` [PATCH v13 10/32] perf report: Directly use sample->evsel to avoid computing from sample->id Ian Rogers
2026-05-12 22:29 ` [PATCH v13 11/32] perf annotate: Don't pass evsel to add_sample Ian Rogers
2026-05-12 22:29 ` [PATCH v13 12/32] perf inject: Don't pass evsel with sample Ian Rogers
2026-05-12 22:29 ` [PATCH v13 13/32] perf kmem: " Ian Rogers
2026-05-12 22:29 ` [PATCH v13 14/32] perf kwork: " Ian Rogers
2026-05-12 22:29 ` [PATCH v13 15/32] perf sched: " Ian Rogers
2026-05-12 22:29 ` [PATCH v13 16/32] perf timechart: " Ian Rogers
2026-05-12 22:29 ` [PATCH v13 17/32] perf trace: " Ian Rogers
2026-05-12 22:29 ` [PATCH v13 18/32] perf evlist: Try to avoid computing evsel from sample Ian Rogers
2026-05-12 22:29 ` [PATCH v13 19/32] perf script: Don't pass evsel with sample Ian Rogers
2026-05-12 22:29 ` [PATCH v13 20/32] perf s390-sample-raw: Don't pass evsel or its PMU " Ian Rogers
2026-05-12 22:29 ` [PATCH v13 21/32] perf evsel: Don't pass evsel " Ian Rogers
2026-05-12 22:29 ` [PATCH v13 22/32] perf lock: Constify trace_lock_handler variables Ian Rogers
2026-05-12 22:29 ` [PATCH v13 23/32] perf lock: Avoid segv if event is missing a callchain Ian Rogers
2026-05-12 22:29 ` [PATCH v13 24/32] perf timechart: Fix memory leaks Ian Rogers
2026-05-12 22:29 ` [PATCH v13 25/32] perf kmem: Fix memory leaks on error path and when skipping Ian Rogers
2026-05-12 22:29 ` [PATCH v13 26/32] perf synthetic-events: Bound check when synthesizing mmap2 and build_id events Ian Rogers
2026-05-12 22:29 ` [PATCH v13 27/32] perf kmem: Add bounds checks to tracepoint read values Ian Rogers
2026-05-12 22:29 ` [PATCH v13 28/32] perf sched: Bounds check CPU in sched switch events Ian Rogers
2026-05-12 22:29 ` [PATCH v13 29/32] perf timechart: Bounds check CPU Ian Rogers
2026-05-12 22:29 ` [PATCH v13 30/32] perf evsel: Add bounds checking to trace point raw data accessors Ian Rogers
2026-05-12 22:30 ` [PATCH v13 31/32] perf kwork: Fix address sanitizer issues Ian Rogers
2026-05-12 22:30 ` Ian Rogers [this message]
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20260512223001.2952848-33-irogers@google.com \
--to=irogers@google.com \
--cc=acme@kernel.org \
--cc=adrian.hunter@intel.com \
--cc=ajones@ventanamicro.com \
--cc=ak@linux.intel.com \
--cc=alex@ghiti.fr \
--cc=alexander.shishkin@linux.intel.com \
--cc=anup@brainfault.org \
--cc=aou@eecs.berkeley.edu \
--cc=atrajeev@linux.ibm.com \
--cc=blakejones@google.com \
--cc=ctshao@google.com \
--cc=dapeng1.mi@linux.intel.com \
--cc=derek.foreman@collabora.com \
--cc=dvyukov@google.com \
--cc=howardchu95@gmail.com \
--cc=hrishikesh123s@gmail.com \
--cc=james.clark@linaro.org \
--cc=jolsa@kernel.org \
--cc=krzysztof.m.lopatowski@gmail.com \
--cc=leo.yan@arm.com \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-perf-users@vger.kernel.org \
--cc=linux@treblig.org \
--cc=mingo@redhat.com \
--cc=namhyung@kernel.org \
--cc=nichen@iscas.ac.cn \
--cc=palmer@dabbelt.com \
--cc=peterz@infradead.org \
--cc=pjw@kernel.org \
--cc=ravi.bangoria@amd.com \
--cc=swapnil.sapkal@amd.com \
--cc=tanze@kylinos.cn \
--cc=thomas.falcon@intel.com \
--cc=tianyou.li@intel.com \
--cc=yujie.liu@intel.com \
--cc=zhouquan@iscas.ac.cn \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox