* [PATCH 01/28] perf session: Add minimum event size validation table
2026-05-10 3:33 [PATCH 00/28] perf: Harden perf.data parsing against crafted/corrupted files Arnaldo Carvalho de Melo
@ 2026-05-10 3:33 ` Arnaldo Carvalho de Melo
2026-05-11 19:01 ` Ian Rogers
2026-05-10 3:33 ` [PATCH 02/28] perf tools: Fix event_contains() macro to verify full field extent Arnaldo Carvalho de Melo
` (26 subsequent siblings)
27 siblings, 1 reply; 30+ messages in thread
From: Arnaldo Carvalho de Melo @ 2026-05-10 3:33 UTC (permalink / raw)
To: Namhyung Kim
Cc: Ingo Molnar, Thomas Gleixner, James Clark, Jiri Olsa, Ian Rogers,
Adrian Hunter, Kan Liang, Clark Williams, linux-kernel,
linux-perf-users, Arnaldo Carvalho de Melo, sashiko-bot,
Claude Opus 4.6 (1M context)
From: Arnaldo Carvalho de Melo <acme@redhat.com>
Add a per-type minimum size table (perf_event__min_size[]) and
enforce it before swap and processing, so that both cross-endian
and native-endian paths are protected from accessing fields past
the event boundary.
The table uses offsetof() for types with trailing variable-length
fields (filenames, strings, msg arrays) and sizeof() for
fixed-size types. Zero entries mean no minimum beyond the 8-byte
header already enforced by the reader.
Undersized events are skipped with a warning in process_event
and rejected in peek_event — both checked before the swap
handler runs, preventing OOB access on crafted event fields.
Also guard event_swap() against crafted event types >=
PERF_RECORD_HEADER_MAX to prevent OOB reads on the
perf_event__swap_ops[] array.
Reported-by: sashiko-bot@kernel.org # Running on a local machine
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Ian Rogers <irogers@google.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Assisted-by: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
---
tools/perf/util/session.c | 121 ++++++++++++++++++++++++++++++++++++++
1 file changed, 121 insertions(+)
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
index fe0de2a0277f09f9..aae0651fb6f025a1 100644
--- a/tools/perf/util/session.c
+++ b/tools/perf/util/session.c
@@ -1759,10 +1759,88 @@ int perf_session__deliver_synth_attr_event(struct perf_session *session,
return perf_session__deliver_synth_event(session, &ev.ev, NULL);
}
+/*
+ * Minimum event sizes indexed by type. Checked before swap and
+ * processing so that both cross-endian and native-endian paths
+ * are protected from accessing fields past the event boundary.
+ * Zero means no minimum beyond the 8-byte header (already
+ * enforced by the reader).
+ */
+static const u32 perf_event__min_size[PERF_RECORD_HEADER_MAX] = {
+ /*
+ * offsetof() for types with a trailing variable-length string
+ * (filename, comm, path, name, msg): sizeof() includes a
+ * PATH_MAX or fixed-size array, but valid events only need
+ * the fixed fields. Null-termination is checked separately.
+ *
+ * PERF_RECORD_SAMPLE is omitted: all64_swap is bounded by
+ * header.size, and the internal layout varies by sample_type
+ * so a fixed minimum is not meaningful.
+ */
+ [PERF_RECORD_MMAP] = offsetof(struct perf_record_mmap, filename),
+ [PERF_RECORD_LOST] = sizeof(struct perf_record_lost),
+ [PERF_RECORD_COMM] = offsetof(struct perf_record_comm, comm),
+ [PERF_RECORD_EXIT] = sizeof(struct perf_record_fork),
+ [PERF_RECORD_THROTTLE] = sizeof(struct perf_record_throttle),
+ [PERF_RECORD_UNTHROTTLE] = sizeof(struct perf_record_throttle),
+ [PERF_RECORD_FORK] = sizeof(struct perf_record_fork),
+ /*
+ * The kernel dynamically sizes PERF_RECORD_READ based on
+ * attr.read_format — the minimum has just pid + tid + value.
+ */
+ [PERF_RECORD_READ] = offsetof(struct perf_record_read, time_enabled),
+ [PERF_RECORD_MMAP2] = offsetof(struct perf_record_mmap2, filename),
+ [PERF_RECORD_LOST_SAMPLES] = sizeof(struct perf_record_lost_samples),
+ [PERF_RECORD_AUX] = sizeof(struct perf_record_aux),
+ [PERF_RECORD_ITRACE_START] = sizeof(struct perf_record_itrace_start),
+ [PERF_RECORD_SWITCH] = sizeof(struct perf_event_header),
+ [PERF_RECORD_SWITCH_CPU_WIDE] = sizeof(struct perf_record_switch),
+ [PERF_RECORD_NAMESPACES] = sizeof(struct perf_record_namespaces),
+ [PERF_RECORD_CGROUP] = offsetof(struct perf_record_cgroup, path),
+ [PERF_RECORD_TEXT_POKE] = sizeof(struct perf_record_text_poke_event),
+ [PERF_RECORD_KSYMBOL] = offsetof(struct perf_record_ksymbol, name),
+ [PERF_RECORD_BPF_EVENT] = sizeof(struct perf_record_bpf_event),
+ [PERF_RECORD_HEADER_ATTR] = sizeof(struct perf_event_header) + PERF_ATTR_SIZE_VER0,
+ [PERF_RECORD_HEADER_EVENT_TYPE] = sizeof(struct perf_record_header_event_type),
+ [PERF_RECORD_HEADER_TRACING_DATA] = sizeof(struct perf_record_header_tracing_data),
+ [PERF_RECORD_AUX_OUTPUT_HW_ID] = sizeof(struct perf_record_aux_output_hw_id),
+ [PERF_RECORD_AUXTRACE_INFO] = sizeof(struct perf_record_auxtrace_info),
+ [PERF_RECORD_AUXTRACE] = sizeof(struct perf_record_auxtrace),
+ [PERF_RECORD_AUXTRACE_ERROR] = offsetof(struct perf_record_auxtrace_error, msg),
+ [PERF_RECORD_THREAD_MAP] = sizeof(struct perf_record_thread_map),
+ /* Smallest valid variant is RANGE_CPUS: header(8) + type(2) + range(6) */
+ [PERF_RECORD_CPU_MAP] = sizeof(struct perf_event_header) +
+ sizeof(__u16) +
+ sizeof(struct perf_record_range_cpu_map),
+ [PERF_RECORD_STAT_CONFIG] = sizeof(struct perf_record_stat_config),
+ [PERF_RECORD_STAT] = sizeof(struct perf_record_stat),
+ [PERF_RECORD_STAT_ROUND] = sizeof(struct perf_record_stat_round),
+ /* Union inflates sizeof; use fixed header fields as minimum */
+ [PERF_RECORD_EVENT_UPDATE] = offsetof(struct perf_record_event_update, scale),
+ [PERF_RECORD_TIME_CONV] = offsetof(struct perf_record_time_conv, time_cycles),
+ [PERF_RECORD_ID_INDEX] = sizeof(struct perf_record_id_index),
+ [PERF_RECORD_HEADER_BUILD_ID] = sizeof(struct perf_record_header_build_id),
+ [PERF_RECORD_HEADER_FEATURE] = sizeof(struct perf_record_header_feature),
+ [PERF_RECORD_COMPRESSED2] = sizeof(struct perf_record_compressed2),
+ [PERF_RECORD_BPF_METADATA] = sizeof(struct perf_record_bpf_metadata),
+ [PERF_RECORD_CALLCHAIN_DEFERRED] = sizeof(struct perf_event_header) + sizeof(__u64),
+ /*
+ * SCHEDSTAT events have a version-dependent union after the
+ * fixed header fields; the minimum is the base (pre-union)
+ * portion so old and new versions both pass.
+ */
+ [PERF_RECORD_SCHEDSTAT_CPU] = offsetof(struct perf_record_schedstat_cpu, v15),
+ [PERF_RECORD_SCHEDSTAT_DOMAIN] = offsetof(struct perf_record_schedstat_domain, v15),
+};
+
static void event_swap(union perf_event *event, bool sample_id_all)
{
perf_event__swap_op swap;
+ /* Prevent OOB read on perf_event__swap_ops[] from crafted type */
+ if (event->header.type >= PERF_RECORD_HEADER_MAX)
+ return;
+
swap = perf_event__swap_ops[event->header.type];
if (swap)
swap(event, sample_id_all);
@@ -1780,6 +1858,20 @@ int perf_session__peek_event(struct perf_session *session, off_t file_offset,
if (session->one_mmap && !session->header.needs_swap) {
event = file_offset - session->one_mmap_offset +
session->one_mmap_addr;
+
+ /* Every event must at least contain its own header */
+ if (event->header.size < sizeof(struct perf_event_header))
+ return -1;
+
+ /* Reject undersized events on the native-endian fast path */
+ if (event->header.type < PERF_RECORD_HEADER_MAX) {
+ u32 min_sz = perf_event__min_size[event->header.type];
+
+ if (min_sz && event->header.size < min_sz) {
+ *event_ptr = event;
+ return -1;
+ }
+ }
goto out_parse_sample;
}
@@ -1810,6 +1902,20 @@ int perf_session__peek_event(struct perf_session *session, off_t file_offset,
if (readn(fd, buf, rest) != (ssize_t)rest)
return -1;
+ /* Reject undersized events before swapping */
+ if (event->header.type < PERF_RECORD_HEADER_MAX) {
+ u32 min_sz = perf_event__min_size[event->header.type];
+
+ if (min_sz && event->header.size < min_sz) {
+ pr_warning("WARNING: peek_event: %s event size %u too small (min %u)\n",
+ perf_event__name(event->header.type),
+ event->header.size, min_sz);
+ /* Expose so peek_events can advance past it */
+ *event_ptr = event;
+ return -1;
+ }
+ }
+
if (session->header.needs_swap)
event_swap(event, evlist__sample_id_all(session->evlist));
@@ -1860,6 +1966,21 @@ static s64 perf_session__process_event(struct perf_session *session,
const struct perf_tool *tool = session->tool;
int ret;
+ if (event->header.type < PERF_RECORD_HEADER_MAX) {
+ u32 min_sz = perf_event__min_size[event->header.type];
+
+ /*
+ * Skip rather than abort: a crafted file may have
+ * isolated bad events among otherwise valid data.
+ */
+ if (min_sz && event->header.size < min_sz) {
+ pr_warning("WARNING: %s event size %u too small (min %u), skipping\n",
+ perf_event__name(event->header.type),
+ event->header.size, min_sz);
+ return 0;
+ }
+ }
+
if (session->header.needs_swap)
event_swap(event, evlist__sample_id_all(evlist));
--
2.54.0
^ permalink raw reply related [flat|nested] 30+ messages in thread* Re: [PATCH 01/28] perf session: Add minimum event size validation table
2026-05-10 3:33 ` [PATCH 01/28] perf session: Add minimum event size validation table Arnaldo Carvalho de Melo
@ 2026-05-11 19:01 ` Ian Rogers
0 siblings, 0 replies; 30+ messages in thread
From: Ian Rogers @ 2026-05-11 19:01 UTC (permalink / raw)
To: Arnaldo Carvalho de Melo
Cc: Namhyung Kim, Ingo Molnar, Thomas Gleixner, James Clark,
Jiri Olsa, Adrian Hunter, Kan Liang, Clark Williams, linux-kernel,
linux-perf-users, Arnaldo Carvalho de Melo, sashiko-bot,
Claude Opus 4.6 (1M context)
On Sat, May 9, 2026 at 8:34 PM Arnaldo Carvalho de Melo <acme@kernel.org> wrote:
>
> From: Arnaldo Carvalho de Melo <acme@redhat.com>
>
> Add a per-type minimum size table (perf_event__min_size[]) and
> enforce it before swap and processing, so that both cross-endian
> and native-endian paths are protected from accessing fields past
> the event boundary.
>
> The table uses offsetof() for types with trailing variable-length
> fields (filenames, strings, msg arrays) and sizeof() for
> fixed-size types. Zero entries mean no minimum beyond the 8-byte
> header already enforced by the reader.
>
> Undersized events are skipped with a warning in process_event
> and rejected in peek_event — both checked before the swap
> handler runs, preventing OOB access on crafted event fields.
>
> Also guard event_swap() against crafted event types >=
> PERF_RECORD_HEADER_MAX to prevent OOB reads on the
> perf_event__swap_ops[] array.
>
> Reported-by: sashiko-bot@kernel.org # Running on a local machine
> Cc: Adrian Hunter <adrian.hunter@intel.com>
> Cc: Ian Rogers <irogers@google.com>
> Cc: Jiri Olsa <jolsa@kernel.org>
> Cc: Namhyung Kim <namhyung@kernel.org>
> Assisted-by: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
> Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
> ---
> tools/perf/util/session.c | 121 ++++++++++++++++++++++++++++++++++++++
> 1 file changed, 121 insertions(+)
>
> diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
> index fe0de2a0277f09f9..aae0651fb6f025a1 100644
> --- a/tools/perf/util/session.c
> +++ b/tools/perf/util/session.c
> @@ -1759,10 +1759,88 @@ int perf_session__deliver_synth_attr_event(struct perf_session *session,
> return perf_session__deliver_synth_event(session, &ev.ev, NULL);
> }
>
> +/*
> + * Minimum event sizes indexed by type. Checked before swap and
> + * processing so that both cross-endian and native-endian paths
> + * are protected from accessing fields past the event boundary.
> + * Zero means no minimum beyond the 8-byte header (already
> + * enforced by the reader).
> + */
> +static const u32 perf_event__min_size[PERF_RECORD_HEADER_MAX] = {
> + /*
> + * offsetof() for types with a trailing variable-length string
> + * (filename, comm, path, name, msg): sizeof() includes a
> + * PATH_MAX or fixed-size array, but valid events only need
> + * the fixed fields. Null-termination is checked separately.
> + *
> + * PERF_RECORD_SAMPLE is omitted: all64_swap is bounded by
> + * header.size, and the internal layout varies by sample_type
> + * so a fixed minimum is not meaningful.
> + */
> + [PERF_RECORD_MMAP] = offsetof(struct perf_record_mmap, filename),
> + [PERF_RECORD_LOST] = sizeof(struct perf_record_lost),
> + [PERF_RECORD_COMM] = offsetof(struct perf_record_comm, comm),
> + [PERF_RECORD_EXIT] = sizeof(struct perf_record_fork),
> + [PERF_RECORD_THROTTLE] = sizeof(struct perf_record_throttle),
> + [PERF_RECORD_UNTHROTTLE] = sizeof(struct perf_record_throttle),
> + [PERF_RECORD_FORK] = sizeof(struct perf_record_fork),
> + /*
> + * The kernel dynamically sizes PERF_RECORD_READ based on
> + * attr.read_format — the minimum has just pid + tid + value.
> + */
> + [PERF_RECORD_READ] = offsetof(struct perf_record_read, time_enabled),
> + [PERF_RECORD_MMAP2] = offsetof(struct perf_record_mmap2, filename),
Should this be +1 as the '\0' given the path is a string? Arguably for
just a mmap2 with build ID we don't need the path.
> + [PERF_RECORD_LOST_SAMPLES] = sizeof(struct perf_record_lost_samples),
> + [PERF_RECORD_AUX] = sizeof(struct perf_record_aux),
> + [PERF_RECORD_ITRACE_START] = sizeof(struct perf_record_itrace_start),
> + [PERF_RECORD_SWITCH] = sizeof(struct perf_event_header),
> + [PERF_RECORD_SWITCH_CPU_WIDE] = sizeof(struct perf_record_switch),
> + [PERF_RECORD_NAMESPACES] = sizeof(struct perf_record_namespaces),
> + [PERF_RECORD_CGROUP] = offsetof(struct perf_record_cgroup, path),
> + [PERF_RECORD_TEXT_POKE] = sizeof(struct perf_record_text_poke_event),
> + [PERF_RECORD_KSYMBOL] = offsetof(struct perf_record_ksymbol, name),
> + [PERF_RECORD_BPF_EVENT] = sizeof(struct perf_record_bpf_event),
> + [PERF_RECORD_HEADER_ATTR] = sizeof(struct perf_event_header) + PERF_ATTR_SIZE_VER0,
> + [PERF_RECORD_HEADER_EVENT_TYPE] = sizeof(struct perf_record_header_event_type),
> + [PERF_RECORD_HEADER_TRACING_DATA] = sizeof(struct perf_record_header_tracing_data),
> + [PERF_RECORD_AUX_OUTPUT_HW_ID] = sizeof(struct perf_record_aux_output_hw_id),
> + [PERF_RECORD_AUXTRACE_INFO] = sizeof(struct perf_record_auxtrace_info),
> + [PERF_RECORD_AUXTRACE] = sizeof(struct perf_record_auxtrace),
> + [PERF_RECORD_AUXTRACE_ERROR] = offsetof(struct perf_record_auxtrace_error, msg),
> + [PERF_RECORD_THREAD_MAP] = sizeof(struct perf_record_thread_map),
> + /* Smallest valid variant is RANGE_CPUS: header(8) + type(2) + range(6) */
> + [PERF_RECORD_CPU_MAP] = sizeof(struct perf_event_header) +
> + sizeof(__u16) +
> + sizeof(struct perf_record_range_cpu_map),
> + [PERF_RECORD_STAT_CONFIG] = sizeof(struct perf_record_stat_config),
> + [PERF_RECORD_STAT] = sizeof(struct perf_record_stat),
> + [PERF_RECORD_STAT_ROUND] = sizeof(struct perf_record_stat_round),
> + /* Union inflates sizeof; use fixed header fields as minimum */
> + [PERF_RECORD_EVENT_UPDATE] = offsetof(struct perf_record_event_update, scale),
> + [PERF_RECORD_TIME_CONV] = offsetof(struct perf_record_time_conv, time_cycles),
> + [PERF_RECORD_ID_INDEX] = sizeof(struct perf_record_id_index),
> + [PERF_RECORD_HEADER_BUILD_ID] = sizeof(struct perf_record_header_build_id),
> + [PERF_RECORD_HEADER_FEATURE] = sizeof(struct perf_record_header_feature),
> + [PERF_RECORD_COMPRESSED2] = sizeof(struct perf_record_compressed2),
> + [PERF_RECORD_BPF_METADATA] = sizeof(struct perf_record_bpf_metadata),
> + [PERF_RECORD_CALLCHAIN_DEFERRED] = sizeof(struct perf_event_header) + sizeof(__u64),
> + /*
> + * SCHEDSTAT events have a version-dependent union after the
> + * fixed header fields; the minimum is the base (pre-union)
> + * portion so old and new versions both pass.
> + */
> + [PERF_RECORD_SCHEDSTAT_CPU] = offsetof(struct perf_record_schedstat_cpu, v15),
> + [PERF_RECORD_SCHEDSTAT_DOMAIN] = offsetof(struct perf_record_schedstat_domain, v15),
> +};
> +
> static void event_swap(union perf_event *event, bool sample_id_all)
> {
> perf_event__swap_op swap;
>
> + /* Prevent OOB read on perf_event__swap_ops[] from crafted type */
> + if (event->header.type >= PERF_RECORD_HEADER_MAX)
> + return;
> +
> swap = perf_event__swap_ops[event->header.type];
> if (swap)
> swap(event, sample_id_all);
> @@ -1780,6 +1858,20 @@ int perf_session__peek_event(struct perf_session *session, off_t file_offset,
> if (session->one_mmap && !session->header.needs_swap) {
> event = file_offset - session->one_mmap_offset +
> session->one_mmap_addr;
> +
> + /* Every event must at least contain its own header */
> + if (event->header.size < sizeof(struct perf_event_header))
> + return -1;
> +
> + /* Reject undersized events on the native-endian fast path */
> + if (event->header.type < PERF_RECORD_HEADER_MAX) {
> + u32 min_sz = perf_event__min_size[event->header.type];
Using an array means we always need to bounds check the accesses which
is pretty repetetive. For `perf_event__sample_event_size`, we already
have a function. Would using a function here keep the code more
concise? A function could also be passed attr.sample_id_all and
sample_type so that the event size minimum can take these into
account.
Note, this has come up in some of my posted patches I think because
the LLMs are keen on bounds checking. Minimum size variable for python
event processing:
https://lore.kernel.org/lkml/20260428071903.1886173-16-irogers@google.com/
Branch stack sample event size fix:
https://lore.kernel.org/lkml/20260429181136.2712655-2-irogers@google.com/
Thanks,
Ian
> +
> + if (min_sz && event->header.size < min_sz) {
> + *event_ptr = event;
> + return -1;
> + }
> + }
> goto out_parse_sample;
> }
>
> @@ -1810,6 +1902,20 @@ int perf_session__peek_event(struct perf_session *session, off_t file_offset,
> if (readn(fd, buf, rest) != (ssize_t)rest)
> return -1;
>
> + /* Reject undersized events before swapping */
> + if (event->header.type < PERF_RECORD_HEADER_MAX) {
> + u32 min_sz = perf_event__min_size[event->header.type];
> +
> + if (min_sz && event->header.size < min_sz) {
> + pr_warning("WARNING: peek_event: %s event size %u too small (min %u)\n",
> + perf_event__name(event->header.type),
> + event->header.size, min_sz);
> + /* Expose so peek_events can advance past it */
> + *event_ptr = event;
> + return -1;
> + }
> + }
> +
> if (session->header.needs_swap)
> event_swap(event, evlist__sample_id_all(session->evlist));
>
> @@ -1860,6 +1966,21 @@ static s64 perf_session__process_event(struct perf_session *session,
> const struct perf_tool *tool = session->tool;
> int ret;
>
> + if (event->header.type < PERF_RECORD_HEADER_MAX) {
> + u32 min_sz = perf_event__min_size[event->header.type];
> +
> + /*
> + * Skip rather than abort: a crafted file may have
> + * isolated bad events among otherwise valid data.
> + */
> + if (min_sz && event->header.size < min_sz) {
> + pr_warning("WARNING: %s event size %u too small (min %u), skipping\n",
> + perf_event__name(event->header.type),
> + event->header.size, min_sz);
> + return 0;
> + }
> + }
> +
> if (session->header.needs_swap)
> event_swap(event, evlist__sample_id_all(evlist));
>
> --
> 2.54.0
>
^ permalink raw reply [flat|nested] 30+ messages in thread
* [PATCH 02/28] perf tools: Fix event_contains() macro to verify full field extent
2026-05-10 3:33 [PATCH 00/28] perf: Harden perf.data parsing against crafted/corrupted files Arnaldo Carvalho de Melo
2026-05-10 3:33 ` [PATCH 01/28] perf session: Add minimum event size validation table Arnaldo Carvalho de Melo
@ 2026-05-10 3:33 ` Arnaldo Carvalho de Melo
2026-05-10 3:33 ` [PATCH 03/28] perf zstd: Fix compression error path in zstd_compress_stream_to_records() Arnaldo Carvalho de Melo
` (25 subsequent siblings)
27 siblings, 0 replies; 30+ messages in thread
From: Arnaldo Carvalho de Melo @ 2026-05-10 3:33 UTC (permalink / raw)
To: Namhyung Kim
Cc: Ingo Molnar, Thomas Gleixner, James Clark, Jiri Olsa, Ian Rogers,
Adrian Hunter, Kan Liang, Clark Williams, linux-kernel,
linux-perf-users, Arnaldo Carvalho de Melo, sashiko-bot,
Claude Opus 4.6 (1M context)
From: Arnaldo Carvalho de Melo <acme@redhat.com>
event_contains() checked whether a field's start offset was within
the event (header.size > offsetof), but not whether the full field
fit. A crafted event with header.size = offsetof(field) + 1 would
pass the check, but an 8-byte access (bswap_64, direct read) would
overrun the event boundary by up to 7 bytes.
Fix the macro to verify the complete field:
header.size >= offsetof(field) + sizeof(field)
Also update all callers that check event_contains(time_cycles) but
access later fields (time_mask, cap_user_time_zero,
cap_user_time_short) to check for cap_user_time_short — the last
field accessed — so the entire extended block is verified:
tsc.c, arm-spe.c, cs-etm.c, jitdump.c.
Note: session.c's perf_event__time_conv_swap() also guards on
time_cycles but accesses time_mask — a pre-existing issue not
introduced by this macro change. It is fixed by a later patch
in this series ("perf session: Add validated swap
infrastructure with null-termination checks"), which changes
the guard to time_mask. The struct assignment overread
(session->time_conv = event->time_conv copies sizeof on a
potentially shorter event) is separately fixed by "perf
session: Use bounded copy for PERF_RECORD_TIME_CONV".
Reported-by: sashiko-bot@kernel.org # Running on a local machine
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Ian Rogers <irogers@google.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Assisted-by: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
---
tools/lib/perf/include/perf/event.h | 4 +++-
tools/perf/util/arm-spe.c | 2 +-
tools/perf/util/cs-etm.c | 2 +-
tools/perf/util/jitdump.c | 2 +-
tools/perf/util/tsc.c | 2 +-
5 files changed, 7 insertions(+), 5 deletions(-)
diff --git a/tools/lib/perf/include/perf/event.h b/tools/lib/perf/include/perf/event.h
index 9043dc72b5d68d58..c821143e6e4938c2 100644
--- a/tools/lib/perf/include/perf/event.h
+++ b/tools/lib/perf/include/perf/event.h
@@ -8,7 +8,9 @@
#include <linux/bpf.h>
#include <sys/types.h> /* pid_t */
-#define event_contains(obj, mem) ((obj).header.size > offsetof(typeof(obj), mem))
+/* Verify the full field fits within the event, not just its start offset */
+#define event_contains(obj, mem) \
+ ((obj).header.size >= offsetof(typeof(obj), mem) + sizeof((obj).mem))
struct perf_record_mmap {
struct perf_event_header header;
diff --git a/tools/perf/util/arm-spe.c b/tools/perf/util/arm-spe.c
index 2b31da231ef3ec84..6f87e8ef20880425 100644
--- a/tools/perf/util/arm-spe.c
+++ b/tools/perf/util/arm-spe.c
@@ -1982,7 +1982,7 @@ int arm_spe_process_auxtrace_info(union perf_event *event,
spe->tc.time_mult = tc->time_mult;
spe->tc.time_zero = tc->time_zero;
- if (event_contains(*tc, time_cycles)) {
+ if (event_contains(*tc, cap_user_time_short)) {
spe->tc.time_cycles = tc->time_cycles;
spe->tc.time_mask = tc->time_mask;
spe->tc.cap_user_time_zero = tc->cap_user_time_zero;
diff --git a/tools/perf/util/cs-etm.c b/tools/perf/util/cs-etm.c
index 8a639d2e51a4c5bf..02b80389810e767d 100644
--- a/tools/perf/util/cs-etm.c
+++ b/tools/perf/util/cs-etm.c
@@ -3496,7 +3496,7 @@ int cs_etm__process_auxtrace_info_full(union perf_event *event,
etm->tc.time_shift = tc->time_shift;
etm->tc.time_mult = tc->time_mult;
etm->tc.time_zero = tc->time_zero;
- if (event_contains(*tc, time_cycles)) {
+ if (event_contains(*tc, cap_user_time_short)) {
etm->tc.time_cycles = tc->time_cycles;
etm->tc.time_mask = tc->time_mask;
etm->tc.cap_user_time_zero = tc->cap_user_time_zero;
diff --git a/tools/perf/util/jitdump.c b/tools/perf/util/jitdump.c
index e0ce8b9047298362..e1e160cdec4cf2c2 100644
--- a/tools/perf/util/jitdump.c
+++ b/tools/perf/util/jitdump.c
@@ -409,7 +409,7 @@ static uint64_t convert_timestamp(struct jit_buf_desc *jd, uint64_t timestamp)
* checks the event size and assigns these extended fields if these
* fields are contained in the event.
*/
- if (event_contains(*time_conv, time_cycles)) {
+ if (event_contains(*time_conv, cap_user_time_short)) {
tc.time_cycles = time_conv->time_cycles;
tc.time_mask = time_conv->time_mask;
tc.cap_user_time_zero = time_conv->cap_user_time_zero;
diff --git a/tools/perf/util/tsc.c b/tools/perf/util/tsc.c
index 511a517ce613dff1..ebf289bf6b9d9add 100644
--- a/tools/perf/util/tsc.c
+++ b/tools/perf/util/tsc.c
@@ -127,7 +127,7 @@ size_t perf_event__fprintf_time_conv(union perf_event *event, FILE *fp)
* when supported cap_user_time_short, for backward compatibility,
* prints the extended fields only if they are contained in the event.
*/
- if (event_contains(*tc, time_cycles)) {
+ if (event_contains(*tc, cap_user_time_short)) {
ret += fprintf(fp, "... Time Cycles %" PRI_lu64 "\n",
tc->time_cycles);
ret += fprintf(fp, "... Time Mask %#" PRI_lx64 "\n",
--
2.54.0
^ permalink raw reply related [flat|nested] 30+ messages in thread* [PATCH 03/28] perf zstd: Fix compression error path in zstd_compress_stream_to_records()
2026-05-10 3:33 [PATCH 00/28] perf: Harden perf.data parsing against crafted/corrupted files Arnaldo Carvalho de Melo
2026-05-10 3:33 ` [PATCH 01/28] perf session: Add minimum event size validation table Arnaldo Carvalho de Melo
2026-05-10 3:33 ` [PATCH 02/28] perf tools: Fix event_contains() macro to verify full field extent Arnaldo Carvalho de Melo
@ 2026-05-10 3:33 ` Arnaldo Carvalho de Melo
2026-05-10 3:33 ` [PATCH 04/28] perf zstd: Fix multi-iteration decompression and error handling Arnaldo Carvalho de Melo
` (24 subsequent siblings)
27 siblings, 0 replies; 30+ messages in thread
From: Arnaldo Carvalho de Melo @ 2026-05-10 3:33 UTC (permalink / raw)
To: Namhyung Kim
Cc: Ingo Molnar, Thomas Gleixner, James Clark, Jiri Olsa, Ian Rogers,
Adrian Hunter, Kan Liang, Clark Williams, linux-kernel,
linux-perf-users, Arnaldo Carvalho de Melo, sashiko-bot,
Claude Opus 4.6 (1M context)
From: Arnaldo Carvalho de Melo <acme@redhat.com>
The error fallback does memcpy(dst, src, src_size) intending to store
uncompressed data when compression fails, but this has three bugs:
1. dst has been advanced past the record header (and potentially
past earlier compressed records), so the copy writes to the
wrong offset in the output buffer.
2. src still points to the start of the input, not to the
remaining uncompressed data at src + input.pos. On a second
or later iteration, previously compressed data would be
duplicated.
3. No check that dst_size >= src_size — if the remaining output
space is smaller, this is an out-of-bounds write.
Replace with return -1 after resetting the ZSTD compression
context via ZSTD_initCStream(), so the context is usable for
the flush retry in __cmd_record's out_child cleanup. The -1
propagates through zstd_compress() → record__pushfn() →
perf_mmap__push() to the recording loop, which breaks out and
runs the out_child flush with the reset context.
Also fix two pre-existing issues in the same function:
- Add a dst_size guard before subtracting the record header
size: if the output buffer is nearly full, the unsigned
dst_size -= size underflows to a huge value, causing
ZSTD_compressStream to write past the buffer boundary.
- Check the ZSTD_initCStream() return value and log an error
if the context reset itself fails.
Reported-by: sashiko-bot@kernel.org # Running on a local machine
Cc: Ian Rogers <irogers@google.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Assisted-by: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
---
tools/perf/util/zstd.c | 15 +++++++++++++--
1 file changed, 13 insertions(+), 2 deletions(-)
diff --git a/tools/perf/util/zstd.c b/tools/perf/util/zstd.c
index 57027e0ac7b658a8..fde9907cf4768eff 100644
--- a/tools/perf/util/zstd.c
+++ b/tools/perf/util/zstd.c
@@ -55,6 +55,9 @@ ssize_t zstd_compress_stream_to_records(struct zstd_data *data, void *dst, size_
while (input.pos < input.size) {
record = dst;
size = process_header(record, 0);
+ /* Output buffer full — cannot fit even the record header */
+ if (size > dst_size)
+ return -1;
compressed += size;
dst += size;
dst_size -= size;
@@ -65,8 +68,16 @@ ssize_t zstd_compress_stream_to_records(struct zstd_data *data, void *dst, size_
if (ZSTD_isError(ret)) {
pr_err("failed to compress %ld bytes: %s\n",
(long)src_size, ZSTD_getErrorName(ret));
- memcpy(dst, src, src_size);
- return src_size;
+ /*
+ * Reset so the context is usable for the flush
+ * retry in __cmd_record's out_child cleanup.
+ */
+ ret = ZSTD_initCStream(data->cstream, data->comp_level);
+ if (ZSTD_isError(ret)) {
+ pr_err("failed to reset compression context: %s\n",
+ ZSTD_getErrorName(ret));
+ }
+ return -1;
}
size = output.pos;
size = process_header(record, size);
--
2.54.0
^ permalink raw reply related [flat|nested] 30+ messages in thread* [PATCH 04/28] perf zstd: Fix multi-iteration decompression and error handling
2026-05-10 3:33 [PATCH 00/28] perf: Harden perf.data parsing against crafted/corrupted files Arnaldo Carvalho de Melo
` (2 preceding siblings ...)
2026-05-10 3:33 ` [PATCH 03/28] perf zstd: Fix compression error path in zstd_compress_stream_to_records() Arnaldo Carvalho de Melo
@ 2026-05-10 3:33 ` Arnaldo Carvalho de Melo
2026-05-10 3:33 ` [PATCH 05/28] perf session: Fix PERF_RECORD_READ swap and dump for variable-length events Arnaldo Carvalho de Melo
` (23 subsequent siblings)
27 siblings, 0 replies; 30+ messages in thread
From: Arnaldo Carvalho de Melo @ 2026-05-10 3:33 UTC (permalink / raw)
To: Namhyung Kim
Cc: Ingo Molnar, Thomas Gleixner, James Clark, Jiri Olsa, Ian Rogers,
Adrian Hunter, Kan Liang, Clark Williams, linux-kernel,
linux-perf-users, Arnaldo Carvalho de Melo, sashiko-bot,
Claude Opus 4.6 (1M context)
From: Arnaldo Carvalho de Melo <acme@redhat.com>
zstd_decompress_stream() has two bugs in its multi-iteration loop:
1. After each ZSTD_decompressStream() call, the code advances
output.dst by output.pos but doesn't reset output.pos to 0.
ZSTD interprets output.pos relative to output.dst, so the
next iteration writes at (dst + pos) + pos = dst + 2*pos,
skipping a gap and potentially writing out of bounds.
2. On ZSTD_decompressStream() error, the loop executes break
and returns output.pos (which is > 0 if some bytes were
decompressed before the error). The caller checks
!decomp_size and skips the error, silently accepting
truncated or corrupted data.
Fix both by removing the output buffer adjustment — ZSTD
correctly accumulates output.pos across calls without it.
Return 0 on decompression error so the caller detects it.
Add a no-progress guard to prevent infinite loops if the
output buffer fills before all input is consumed.
Note: the compressed event data_size is validated against
header.size by a subsequent patch in this series
("perf tools: Harden compressed event processing").
Reported-by: sashiko-bot@kernel.org # Running on a local machine
Cc: Ian Rogers <irogers@google.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Assisted-by: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
---
tools/perf/util/zstd.c | 20 ++++++++++++++++----
1 file changed, 16 insertions(+), 4 deletions(-)
diff --git a/tools/perf/util/zstd.c b/tools/perf/util/zstd.c
index fde9907cf4768eff..377be0505e50a493 100644
--- a/tools/perf/util/zstd.c
+++ b/tools/perf/util/zstd.c
@@ -111,14 +111,26 @@ size_t zstd_decompress_stream(struct zstd_data *data, void *src, size_t src_size
}
}
while (input.pos < input.size) {
+ size_t prev_in = input.pos;
+ size_t prev_out = output.pos;
+
ret = ZSTD_decompressStream(data->dstream, &output, &input);
if (ZSTD_isError(ret)) {
pr_err("failed to decompress (B): %zd -> %zd, dst_size %zd : %s\n",
- src_size, output.size, dst_size, ZSTD_getErrorName(ret));
- break;
+ src_size, output.pos, dst_size, ZSTD_getErrorName(ret));
+ return 0;
}
- output.dst = dst + output.pos;
- output.size = dst_size - output.pos;
+ /*
+ * Neither stream advanced — decompression is stuck.
+ * Return 0 (error) rather than partial output: perf
+ * uses ZSTD_flushStream (not ZSTD_endStream), so the
+ * stream is continuous across compressed events.
+ * Discarding unconsumed input would desynchronize the
+ * decompressor, causing the next call to produce
+ * garbage that could be misinterpreted as valid events.
+ */
+ if (input.pos == prev_in && output.pos == prev_out)
+ return 0;
}
return output.pos;
--
2.54.0
^ permalink raw reply related [flat|nested] 30+ messages in thread* [PATCH 05/28] perf session: Fix PERF_RECORD_READ swap and dump for variable-length events
2026-05-10 3:33 [PATCH 00/28] perf: Harden perf.data parsing against crafted/corrupted files Arnaldo Carvalho de Melo
` (3 preceding siblings ...)
2026-05-10 3:33 ` [PATCH 04/28] perf zstd: Fix multi-iteration decompression and error handling Arnaldo Carvalho de Melo
@ 2026-05-10 3:33 ` Arnaldo Carvalho de Melo
2026-05-10 3:33 ` [PATCH 06/28] perf session: Align auxtrace_info priv size before byte-swapping Arnaldo Carvalho de Melo
` (22 subsequent siblings)
27 siblings, 0 replies; 30+ messages in thread
From: Arnaldo Carvalho de Melo @ 2026-05-10 3:33 UTC (permalink / raw)
To: Namhyung Kim
Cc: Ingo Molnar, Thomas Gleixner, James Clark, Jiri Olsa, Ian Rogers,
Adrian Hunter, Kan Liang, Clark Williams, linux-kernel,
linux-perf-users, Arnaldo Carvalho de Melo,
Claude Opus 4.6 (1M context)
From: Arnaldo Carvalho de Melo <acme@redhat.com>
The kernel dynamically sizes PERF_RECORD_READ based on
attr.read_format: only the fields enabled by PERF_FORMAT_TOTAL_TIME_ENABLED,
PERF_FORMAT_TOTAL_TIME_RUNNING, PERF_FORMAT_ID, and PERF_FORMAT_LOST
are emitted, packed with no gaps.
perf_event__read_swap() unconditionally byte-swapped time_enabled,
time_running, and id at their fixed struct offsets, causing
out-of-bounds access on smaller events and swapping the wrong
bytes when not all format fields are present. It also dropped
the sample_id_all swap entirely.
Replace the individual field swaps with a single mem_bswap_64()
over the entire tail from value onward. Since every field after
pid/tid is u64 regardless of which combination is present, this
correctly handles any read_format combination and any trailing
sample_id_all fields.
Similarly, dump_read() accessed optional fields via fixed struct
offsets, displaying values from wrong positions when not all
format bits are set. Walk the packed u64 array sequentially
instead, with bounds checks against event->header.size.
Assisted-by: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
---
tools/perf/util/session.c | 61 +++++++++++++++++++++++++++------------
1 file changed, 43 insertions(+), 18 deletions(-)
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
index aae0651fb6f025a1..20b70d6fb7cc8ed4 100644
--- a/tools/perf/util/session.c
+++ b/tools/perf/util/session.c
@@ -354,17 +354,22 @@ static void perf_event__task_swap(union perf_event *event, bool sample_id_all)
swap_sample_id_all(event, &event->fork + 1);
}
-static void perf_event__read_swap(union perf_event *event, bool sample_id_all)
+static void perf_event__read_swap(union perf_event *event,
+ bool sample_id_all __maybe_unused)
{
- event->read.pid = bswap_32(event->read.pid);
- event->read.tid = bswap_32(event->read.tid);
- event->read.value = bswap_64(event->read.value);
- event->read.time_enabled = bswap_64(event->read.time_enabled);
- event->read.time_running = bswap_64(event->read.time_running);
- event->read.id = bswap_64(event->read.id);
+ size_t tail;
- if (sample_id_all)
- swap_sample_id_all(event, &event->read + 1);
+ event->read.pid = bswap_32(event->read.pid);
+ event->read.tid = bswap_32(event->read.tid);
+ /*
+ * Everything after pid/tid is u64: the read values (variable
+ * set determined by attr.read_format, which we don't have
+ * here) optionally followed by sample_id_all fields.
+ * Since all are u64, swap the entire remaining tail at once.
+ */
+ tail = event->header.size - offsetof(struct perf_record_read, value);
+ tail &= ~(size_t)(sizeof(__u64) - 1);
+ mem_bswap_64(&event->read.value, tail);
}
static void perf_event__aux_swap(union perf_event *event, bool sample_id_all)
@@ -1198,8 +1203,9 @@ static void dump_deferred_callchain(struct evsel *evsel, union perf_event *event
static void dump_read(struct evsel *evsel, union perf_event *event)
{
- struct perf_record_read *read_event = &event->read;
u64 read_format;
+ __u64 *array;
+ void *end;
if (!dump_trace)
return;
@@ -1211,18 +1217,37 @@ static void dump_read(struct evsel *evsel, union perf_event *event)
return;
read_format = evsel->core.attr.read_format;
+ /*
+ * The kernel packs only the enabled read_format fields
+ * after value, with no gaps. Walk the packed array
+ * instead of using fixed struct offsets.
+ */
+ array = &event->read.value + 1;
+ end = (void *)event + event->header.size;
- if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
- printf("... time enabled : %" PRI_lu64 "\n", read_event->time_enabled);
+ if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
+ if ((void *)(array + 1) > end)
+ return;
+ printf("... time enabled : %" PRI_lu64 "\n", *array++);
+ }
- if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
- printf("... time running : %" PRI_lu64 "\n", read_event->time_running);
+ if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
+ if ((void *)(array + 1) > end)
+ return;
+ printf("... time running : %" PRI_lu64 "\n", *array++);
+ }
- if (read_format & PERF_FORMAT_ID)
- printf("... id : %" PRI_lu64 "\n", read_event->id);
+ if (read_format & PERF_FORMAT_ID) {
+ if ((void *)(array + 1) > end)
+ return;
+ printf("... id : %" PRI_lu64 "\n", *array++);
+ }
- if (read_format & PERF_FORMAT_LOST)
- printf("... lost : %" PRI_lu64 "\n", read_event->lost);
+ if (read_format & PERF_FORMAT_LOST) {
+ if ((void *)(array + 1) > end)
+ return;
+ printf("... lost : %" PRI_lu64 "\n", *array++);
+ }
}
static struct machine *machines__find_for_cpumode(struct machines *machines,
--
2.54.0
^ permalink raw reply related [flat|nested] 30+ messages in thread* [PATCH 06/28] perf session: Align auxtrace_info priv size before byte-swapping
2026-05-10 3:33 [PATCH 00/28] perf: Harden perf.data parsing against crafted/corrupted files Arnaldo Carvalho de Melo
` (4 preceding siblings ...)
2026-05-10 3:33 ` [PATCH 05/28] perf session: Fix PERF_RECORD_READ swap and dump for variable-length events Arnaldo Carvalho de Melo
@ 2026-05-10 3:33 ` Arnaldo Carvalho de Melo
2026-05-10 3:33 ` [PATCH 07/28] perf session: Add validated swap infrastructure with null-termination checks Arnaldo Carvalho de Melo
` (21 subsequent siblings)
27 siblings, 0 replies; 30+ messages in thread
From: Arnaldo Carvalho de Melo @ 2026-05-10 3:33 UTC (permalink / raw)
To: Namhyung Kim
Cc: Ingo Molnar, Thomas Gleixner, James Clark, Jiri Olsa, Ian Rogers,
Adrian Hunter, Kan Liang, Clark Williams, linux-kernel,
linux-perf-users, Arnaldo Carvalho de Melo, sashiko-bot,
Claude Opus 4.6 (1M context)
From: Arnaldo Carvalho de Melo <acme@redhat.com>
perf_event__auxtrace_info_swap() passes the raw remainder of
header.size to mem_bswap_64(), which swaps in 8-byte chunks.
If the size is not a multiple of 8, the last iteration reads and
writes 8 bytes from a shorter region, overrunning the event
buffer by up to 7 bytes.
Round down to a u64 boundary — priv[] is a u64 array, so any
unaligned tail is padding that doesn't need swapping.
Also fix swap_sample_id_all() which had the same issue — replace
BUG_ON(size % sizeof(u64)) with rounding down, since crafted
events may have unaligned trailing data.
Note: the strlen calls in string-field swap handlers (comm,
mmap, mmap2, cgroup) are replaced with bounded strnlen by the
next patch in this series ("perf session: Add validated swap
infrastructure with null-termination checks").
Reported-by: sashiko-bot@kernel.org # Running on a local machine
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Ian Rogers <irogers@google.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Assisted-by: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
---
tools/perf/util/session.c | 14 +++++++++++---
1 file changed, 11 insertions(+), 3 deletions(-)
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
index 20b70d6fb7cc8ed4..b55c5168ee9f4aae 100644
--- a/tools/perf/util/session.c
+++ b/tools/perf/util/session.c
@@ -276,10 +276,16 @@ void perf_session__delete(struct perf_session *session)
static void swap_sample_id_all(union perf_event *event, void *data)
{
void *end = (void *) event + event->header.size;
- int size = end - data;
+ int size;
- BUG_ON(size % sizeof(u64));
- mem_bswap_64(data, size);
+ if (data >= end)
+ return;
+
+ size = end - data;
+ /* Only swap complete 8-byte elements */
+ size &= ~(int)(sizeof(u64) - 1);
+ if (size > 0)
+ mem_bswap_64(data, size);
}
static void perf_event__all64_swap(union perf_event *event,
@@ -585,6 +591,8 @@ static void perf_event__auxtrace_info_swap(union perf_event *event,
size = event->header.size;
size -= (void *)&event->auxtrace_info.priv - (void *)event;
+ /* priv[] is a u64 array; only swap complete 8-byte elements */
+ size &= ~(size_t)(sizeof(u64) - 1);
mem_bswap_64(event->auxtrace_info.priv, size);
}
--
2.54.0
^ permalink raw reply related [flat|nested] 30+ messages in thread* [PATCH 07/28] perf session: Add validated swap infrastructure with null-termination checks
2026-05-10 3:33 [PATCH 00/28] perf: Harden perf.data parsing against crafted/corrupted files Arnaldo Carvalho de Melo
` (5 preceding siblings ...)
2026-05-10 3:33 ` [PATCH 06/28] perf session: Align auxtrace_info priv size before byte-swapping Arnaldo Carvalho de Melo
@ 2026-05-10 3:33 ` Arnaldo Carvalho de Melo
2026-05-10 3:33 ` [PATCH 08/28] perf session: Use bounded copy for PERF_RECORD_TIME_CONV Arnaldo Carvalho de Melo
` (20 subsequent siblings)
27 siblings, 0 replies; 30+ messages in thread
From: Arnaldo Carvalho de Melo @ 2026-05-10 3:33 UTC (permalink / raw)
To: Namhyung Kim
Cc: Ingo Molnar, Thomas Gleixner, James Clark, Jiri Olsa, Ian Rogers,
Adrian Hunter, Kan Liang, Clark Williams, linux-kernel,
linux-perf-users, Arnaldo Carvalho de Melo, sashiko-bot,
Claude Opus 4.6 (1M context)
From: Arnaldo Carvalho de Melo <acme@redhat.com>
Change swap callbacks from void to int return so handlers can
propagate errors. All 28 existing handlers are converted to
return 0 on success, -1 on error. Three new handlers (KSYMBOL,
BPF_EVENT, HEADER_FEATURE) are added returning int from the
start, with sample_id_all handling for the kernel event types.
event_swap() propagates the return to its callers (process_event
and peek_event), which skip events that fail to swap.
Add perf_event__check_nul() for null-termination enforcement
on the common event delivery path for MMAP, MMAP2, COMM,
CGROUP, and KSYMBOL events. Events with
unterminated strings are skipped — native-endian files are
mapped read-only, so writing a NUL byte in place would segfault.
Swap handler hardening:
- Use strnlen bounded by event size (instead of strlen) in
COMM/MMAP/MMAP2/CGROUP swap handlers, returning -1 on
unterminated strings.
- Bounds check text_poke old_len+new_len before computing the
sample_id offset, returning -1 on overflow. Use offsetof()
for the native-path check in machines__deliver_event() since
sizeof() includes struct padding past the flexible array.
- Fix PERF_RECORD_SWITCH sample_id_all: non-CPU_WIDE SWITCH
events have sample_id immediately after the 8-byte header,
not at sizeof(struct perf_record_switch) which is the
CPU_WIDE variant size.
- Fix perf_event__time_conv_swap() guard: check time_mask
(the last field accessed) instead of time_cycles, so a
short event that fits time_cycles but not time_mask does
not cause an out-of-bounds bswap_64.
- Handle ABI0 (attr.size == 0) in perf_event__attr_swap()
by substituting PERF_ATTR_SIZE_VER0, so bswap_safe()
correctly swaps VER0 fields instead of skipping everything.
- peek_events: initialize event pointer to NULL to avoid
dereferencing stack garbage on early peek_event() failure;
on swap failure, advance past the malformed entry instead
of aborting the loop.
Note: the nr-field bounds checks for namespaces, thread_map,
cpu_map, and stat_config arrays are added by a subsequent
patch ("perf session: Validate nr fields against event size
on both swap and common paths"). The HEADER_ATTR attr.size
validation is added by ("perf session: Validate HEADER_ATTR
alignment and attr.size before swapping").
By establishing the int-returning swap infrastructure first,
all subsequent hardening patches can use direct error returns
from day one — no poison values, no workarounds for void return.
Reported-by: sashiko-bot@kernel.org # Running on a local machine
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Ian Rogers <irogers@google.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Assisted-by: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
---
tools/perf/util/session.c | 411 ++++++++++++++++++++++++++++++--------
1 file changed, 323 insertions(+), 88 deletions(-)
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
index b55c5168ee9f4aae..18e60ccf6829f05a 100644
--- a/tools/perf/util/session.c
+++ b/tools/perf/util/session.c
@@ -288,28 +288,43 @@ static void swap_sample_id_all(union perf_event *event, void *data)
mem_bswap_64(data, size);
}
-static void perf_event__all64_swap(union perf_event *event,
- bool sample_id_all __maybe_unused)
+static int perf_event__all64_swap(union perf_event *event,
+ bool sample_id_all __maybe_unused)
{
struct perf_event_header *hdr = &event->header;
- mem_bswap_64(hdr + 1, event->header.size - sizeof(*hdr));
+ size_t size = event->header.size - sizeof(*hdr);
+
+ /* Round down: mem_bswap_64() would overrun on unaligned tail */
+ size &= ~(size_t)(sizeof(u64) - 1);
+ mem_bswap_64(hdr + 1, size);
+ return 0;
}
-static void perf_event__comm_swap(union perf_event *event, bool sample_id_all)
+static int perf_event__comm_swap(union perf_event *event, bool sample_id_all)
{
event->comm.pid = bswap_32(event->comm.pid);
event->comm.tid = bswap_32(event->comm.tid);
if (sample_id_all) {
void *data = &event->comm.comm;
+ void *end = (void *)event + event->header.size;
+ size_t len = strnlen(data, end - data);
- data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
+ /*
+ * No NUL within the event boundary — can't locate where
+ * sample_id_all starts. Reject so the event is skipped
+ * rather than swapping garbage.
+ */
+ if (len == (size_t)(end - data))
+ return -1;
+ data += PERF_ALIGN(len + 1, sizeof(u64));
swap_sample_id_all(event, data);
}
+ return 0;
}
-static void perf_event__mmap_swap(union perf_event *event,
- bool sample_id_all)
+static int perf_event__mmap_swap(union perf_event *event,
+ bool sample_id_all)
{
event->mmap.pid = bswap_32(event->mmap.pid);
event->mmap.tid = bswap_32(event->mmap.tid);
@@ -319,13 +334,19 @@ static void perf_event__mmap_swap(union perf_event *event,
if (sample_id_all) {
void *data = &event->mmap.filename;
+ void *end = (void *)event + event->header.size;
+ size_t len = strnlen(data, end - data);
- data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
+ /* See comment in perf_event__comm_swap() */
+ if (len == (size_t)(end - data))
+ return -1;
+ data += PERF_ALIGN(len + 1, sizeof(u64));
swap_sample_id_all(event, data);
}
+ return 0;
}
-static void perf_event__mmap2_swap(union perf_event *event,
+static int perf_event__mmap2_swap(union perf_event *event,
bool sample_id_all)
{
event->mmap2.pid = bswap_32(event->mmap2.pid);
@@ -343,12 +364,19 @@ static void perf_event__mmap2_swap(union perf_event *event,
if (sample_id_all) {
void *data = &event->mmap2.filename;
+ void *end = (void *)event + event->header.size;
+ size_t len = strnlen(data, end - data);
- data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
+ /* See comment in perf_event__comm_swap() */
+ if (len == (size_t)(end - data))
+ return -1;
+ data += PERF_ALIGN(len + 1, sizeof(u64));
swap_sample_id_all(event, data);
}
+ return 0;
}
-static void perf_event__task_swap(union perf_event *event, bool sample_id_all)
+
+static int perf_event__task_swap(union perf_event *event, bool sample_id_all)
{
event->fork.pid = bswap_32(event->fork.pid);
event->fork.tid = bswap_32(event->fork.tid);
@@ -358,10 +386,11 @@ static void perf_event__task_swap(union perf_event *event, bool sample_id_all)
if (sample_id_all)
swap_sample_id_all(event, &event->fork + 1);
+ return 0;
}
-static void perf_event__read_swap(union perf_event *event,
- bool sample_id_all __maybe_unused)
+static int perf_event__read_swap(union perf_event *event,
+ bool sample_id_all __maybe_unused)
{
size_t tail;
@@ -376,9 +405,10 @@ static void perf_event__read_swap(union perf_event *event,
tail = event->header.size - offsetof(struct perf_record_read, value);
tail &= ~(size_t)(sizeof(__u64) - 1);
mem_bswap_64(&event->read.value, tail);
+ return 0;
}
-static void perf_event__aux_swap(union perf_event *event, bool sample_id_all)
+static int perf_event__aux_swap(union perf_event *event, bool sample_id_all)
{
event->aux.aux_offset = bswap_64(event->aux.aux_offset);
event->aux.aux_size = bswap_64(event->aux.aux_size);
@@ -386,19 +416,21 @@ static void perf_event__aux_swap(union perf_event *event, bool sample_id_all)
if (sample_id_all)
swap_sample_id_all(event, &event->aux + 1);
+ return 0;
}
-static void perf_event__itrace_start_swap(union perf_event *event,
- bool sample_id_all)
+static int perf_event__itrace_start_swap(union perf_event *event,
+ bool sample_id_all)
{
event->itrace_start.pid = bswap_32(event->itrace_start.pid);
event->itrace_start.tid = bswap_32(event->itrace_start.tid);
if (sample_id_all)
swap_sample_id_all(event, &event->itrace_start + 1);
+ return 0;
}
-static void perf_event__switch_swap(union perf_event *event, bool sample_id_all)
+static int perf_event__switch_swap(union perf_event *event, bool sample_id_all)
{
if (event->header.type == PERF_RECORD_SWITCH_CPU_WIDE) {
event->context_switch.next_prev_pid =
@@ -407,30 +439,45 @@ static void perf_event__switch_swap(union perf_event *event, bool sample_id_all)
bswap_32(event->context_switch.next_prev_tid);
}
- if (sample_id_all)
- swap_sample_id_all(event, &event->context_switch + 1);
+ if (sample_id_all) {
+ /*
+ * PERF_RECORD_SWITCH has no fields beyond the header;
+ * SWITCH_CPU_WIDE adds pid/tid. Use the right offset
+ * so sample_id starts at the correct position.
+ */
+ if (event->header.type == PERF_RECORD_SWITCH)
+ swap_sample_id_all(event, (void *)event + sizeof(event->header));
+ else
+ swap_sample_id_all(event, &event->context_switch + 1);
+ }
+ return 0;
}
-static void perf_event__text_poke_swap(union perf_event *event, bool sample_id_all)
+static int perf_event__text_poke_swap(union perf_event *event, bool sample_id_all)
{
event->text_poke.addr = bswap_64(event->text_poke.addr);
event->text_poke.old_len = bswap_16(event->text_poke.old_len);
event->text_poke.new_len = bswap_16(event->text_poke.new_len);
if (sample_id_all) {
+ void *data = &event->text_poke.old_len;
+ void *end = (void *)event + event->header.size;
size_t len = sizeof(event->text_poke.old_len) +
sizeof(event->text_poke.new_len) +
event->text_poke.old_len +
event->text_poke.new_len;
- void *data = &event->text_poke.old_len;
+ /* old_len + new_len exceeds event — can't find sample_id_all */
+ if (data + len > end)
+ return -1;
data += PERF_ALIGN(len, sizeof(u64));
swap_sample_id_all(event, data);
}
+ return 0;
}
-static void perf_event__throttle_swap(union perf_event *event,
- bool sample_id_all)
+static int perf_event__throttle_swap(union perf_event *event,
+ bool sample_id_all)
{
event->throttle.time = bswap_64(event->throttle.time);
event->throttle.id = bswap_64(event->throttle.id);
@@ -438,10 +485,11 @@ static void perf_event__throttle_swap(union perf_event *event,
if (sample_id_all)
swap_sample_id_all(event, &event->throttle + 1);
+ return 0;
}
-static void perf_event__namespaces_swap(union perf_event *event,
- bool sample_id_all)
+static int perf_event__namespaces_swap(union perf_event *event,
+ bool sample_id_all)
{
u64 i;
@@ -458,18 +506,25 @@ static void perf_event__namespaces_swap(union perf_event *event,
if (sample_id_all)
swap_sample_id_all(event, &event->namespaces.link_info[i]);
+ return 0;
}
-static void perf_event__cgroup_swap(union perf_event *event, bool sample_id_all)
+static int perf_event__cgroup_swap(union perf_event *event, bool sample_id_all)
{
event->cgroup.id = bswap_64(event->cgroup.id);
if (sample_id_all) {
void *data = &event->cgroup.path;
+ void *end = (void *)event + event->header.size;
+ size_t len = strnlen(data, end - data);
- data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
+ /* See comment in perf_event__comm_swap() */
+ if (len == (size_t)(end - data))
+ return -1;
+ data += PERF_ALIGN(len + 1, sizeof(u64));
swap_sample_id_all(event, data);
}
+ return 0;
}
static u8 revbyte(u8 b)
@@ -510,9 +565,19 @@ void perf_event__attr_swap(struct perf_event_attr *attr)
attr->type = bswap_32(attr->type);
attr->size = bswap_32(attr->size);
-#define bswap_safe(f, n) \
- (attr->size > (offsetof(struct perf_event_attr, f) + \
- sizeof(attr->f) * (n)))
+ /*
+ * ABI0: size == 0 means the producer didn't set it.
+ * Assume PERF_ATTR_SIZE_VER0 so bswap_safe() below
+ * correctly swaps the VER0 fields instead of skipping
+ * everything. Same convention as read_attr().
+ */
+ if (!attr->size)
+ attr->size = PERF_ATTR_SIZE_VER0;
+
+/* Verify the full field extent fits, not just its start offset */
+#define bswap_safe(f, n) \
+ (attr->size >= (offsetof(struct perf_event_attr, f) + \
+ sizeof(attr->f) * ((n) + 1)))
#define bswap_field(f, sz) \
do { \
if (bswap_safe(f, 0)) \
@@ -550,8 +615,8 @@ do { \
#undef bswap_safe
}
-static void perf_event__hdr_attr_swap(union perf_event *event,
- bool sample_id_all __maybe_unused)
+static int perf_event__hdr_attr_swap(union perf_event *event,
+ bool sample_id_all __maybe_unused)
{
size_t size;
@@ -560,30 +625,34 @@ static void perf_event__hdr_attr_swap(union perf_event *event,
size = event->header.size;
size -= perf_record_header_attr_id(event) - (void *)event;
mem_bswap_64(perf_record_header_attr_id(event), size);
+ return 0;
}
-static void perf_event__event_update_swap(union perf_event *event,
- bool sample_id_all __maybe_unused)
+static int perf_event__event_update_swap(union perf_event *event,
+ bool sample_id_all __maybe_unused)
{
event->event_update.type = bswap_64(event->event_update.type);
event->event_update.id = bswap_64(event->event_update.id);
+ return 0;
}
-static void perf_event__event_type_swap(union perf_event *event,
- bool sample_id_all __maybe_unused)
+static int perf_event__event_type_swap(union perf_event *event,
+ bool sample_id_all __maybe_unused)
{
event->event_type.event_type.event_id =
bswap_64(event->event_type.event_type.event_id);
+ return 0;
}
-static void perf_event__tracing_data_swap(union perf_event *event,
- bool sample_id_all __maybe_unused)
+static int perf_event__tracing_data_swap(union perf_event *event,
+ bool sample_id_all __maybe_unused)
{
event->tracing_data.size = bswap_32(event->tracing_data.size);
+ return 0;
}
-static void perf_event__auxtrace_info_swap(union perf_event *event,
- bool sample_id_all __maybe_unused)
+static int perf_event__auxtrace_info_swap(union perf_event *event,
+ bool sample_id_all __maybe_unused)
{
size_t size;
@@ -594,10 +663,11 @@ static void perf_event__auxtrace_info_swap(union perf_event *event,
/* priv[] is a u64 array; only swap complete 8-byte elements */
size &= ~(size_t)(sizeof(u64) - 1);
mem_bswap_64(event->auxtrace_info.priv, size);
+ return 0;
}
-static void perf_event__auxtrace_swap(union perf_event *event,
- bool sample_id_all __maybe_unused)
+static int perf_event__auxtrace_swap(union perf_event *event,
+ bool sample_id_all __maybe_unused)
{
event->auxtrace.size = bswap_64(event->auxtrace.size);
event->auxtrace.offset = bswap_64(event->auxtrace.offset);
@@ -605,10 +675,11 @@ static void perf_event__auxtrace_swap(union perf_event *event,
event->auxtrace.idx = bswap_32(event->auxtrace.idx);
event->auxtrace.tid = bswap_32(event->auxtrace.tid);
event->auxtrace.cpu = bswap_32(event->auxtrace.cpu);
+ return 0;
}
-static void perf_event__auxtrace_error_swap(union perf_event *event,
- bool sample_id_all __maybe_unused)
+static int perf_event__auxtrace_error_swap(union perf_event *event,
+ bool sample_id_all __maybe_unused)
{
event->auxtrace_error.type = bswap_32(event->auxtrace_error.type);
event->auxtrace_error.code = bswap_32(event->auxtrace_error.code);
@@ -623,10 +694,11 @@ static void perf_event__auxtrace_error_swap(union perf_event *event,
event->auxtrace_error.machine_pid = bswap_32(event->auxtrace_error.machine_pid);
event->auxtrace_error.vcpu = bswap_32(event->auxtrace_error.vcpu);
}
+ return 0;
}
-static void perf_event__thread_map_swap(union perf_event *event,
- bool sample_id_all __maybe_unused)
+static int perf_event__thread_map_swap(union perf_event *event,
+ bool sample_id_all __maybe_unused)
{
unsigned i;
@@ -634,10 +706,11 @@ static void perf_event__thread_map_swap(union perf_event *event,
for (i = 0; i < event->thread_map.nr; i++)
event->thread_map.entries[i].pid = bswap_64(event->thread_map.entries[i].pid);
+ return 0;
}
-static void perf_event__cpu_map_swap(union perf_event *event,
- bool sample_id_all __maybe_unused)
+static int perf_event__cpu_map_swap(union perf_event *event,
+ bool sample_id_all __maybe_unused)
{
struct perf_record_cpu_map_data *data = &event->cpu_map.data;
@@ -675,20 +748,22 @@ static void perf_event__cpu_map_swap(union perf_event *event,
default:
break;
}
+ return 0;
}
-static void perf_event__stat_config_swap(union perf_event *event,
- bool sample_id_all __maybe_unused)
+static int perf_event__stat_config_swap(union perf_event *event,
+ bool sample_id_all __maybe_unused)
{
u64 size;
size = bswap_64(event->stat_config.nr) * sizeof(event->stat_config.data[0]);
size += 1; /* nr item itself */
mem_bswap_64(&event->stat_config.nr, size);
+ return 0;
}
-static void perf_event__stat_swap(union perf_event *event,
- bool sample_id_all __maybe_unused)
+static int perf_event__stat_swap(union perf_event *event,
+ bool sample_id_all __maybe_unused)
{
event->stat.id = bswap_64(event->stat.id);
event->stat.thread = bswap_32(event->stat.thread);
@@ -696,44 +771,90 @@ static void perf_event__stat_swap(union perf_event *event,
event->stat.val = bswap_64(event->stat.val);
event->stat.ena = bswap_64(event->stat.ena);
event->stat.run = bswap_64(event->stat.run);
+ return 0;
}
-static void perf_event__stat_round_swap(union perf_event *event,
- bool sample_id_all __maybe_unused)
+static int perf_event__stat_round_swap(union perf_event *event,
+ bool sample_id_all __maybe_unused)
{
event->stat_round.type = bswap_64(event->stat_round.type);
event->stat_round.time = bswap_64(event->stat_round.time);
+ return 0;
}
-static void perf_event__time_conv_swap(union perf_event *event,
- bool sample_id_all __maybe_unused)
+static int perf_event__time_conv_swap(union perf_event *event,
+ bool sample_id_all __maybe_unused)
{
event->time_conv.time_shift = bswap_64(event->time_conv.time_shift);
event->time_conv.time_mult = bswap_64(event->time_conv.time_mult);
event->time_conv.time_zero = bswap_64(event->time_conv.time_zero);
- if (event_contains(event->time_conv, time_cycles)) {
+ if (event_contains(event->time_conv, time_mask)) {
event->time_conv.time_cycles = bswap_64(event->time_conv.time_cycles);
event->time_conv.time_mask = bswap_64(event->time_conv.time_mask);
}
+ return 0;
}
-static void
+static int
perf_event__schedstat_cpu_swap(union perf_event *event __maybe_unused,
bool sample_id_all __maybe_unused)
{
/* FIXME */
+ return 0;
}
-static void
+static int
perf_event__schedstat_domain_swap(union perf_event *event __maybe_unused,
bool sample_id_all __maybe_unused)
{
/* FIXME */
+ return 0;
+}
+
+static int perf_event__ksymbol_swap(union perf_event *event,
+ bool sample_id_all)
+{
+ event->ksymbol.addr = bswap_64(event->ksymbol.addr);
+ event->ksymbol.len = bswap_32(event->ksymbol.len);
+ event->ksymbol.ksym_type = bswap_16(event->ksymbol.ksym_type);
+ event->ksymbol.flags = bswap_16(event->ksymbol.flags);
+
+ if (sample_id_all) {
+ void *data = &event->ksymbol.name;
+ void *end = (void *)event + event->header.size;
+ size_t len = strnlen(data, end - data);
+
+ /* See comment in perf_event__comm_swap() */
+ if (len == (size_t)(end - data))
+ return -1;
+ data += PERF_ALIGN(len + 1, sizeof(u64));
+ swap_sample_id_all(event, data);
+ }
+ return 0;
}
-typedef void (*perf_event__swap_op)(union perf_event *event,
- bool sample_id_all);
+static int perf_event__bpf_event_swap(union perf_event *event,
+ bool sample_id_all)
+{
+ event->bpf.type = bswap_16(event->bpf.type);
+ event->bpf.flags = bswap_16(event->bpf.flags);
+ event->bpf.id = bswap_32(event->bpf.id);
+
+ if (sample_id_all)
+ swap_sample_id_all(event, &event->bpf + 1);
+ return 0;
+}
+
+static int perf_event__header_feature_swap(union perf_event *event,
+ bool sample_id_all __maybe_unused)
+{
+ event->feat.feat_id = bswap_64(event->feat.feat_id);
+ return 0;
+}
+
+typedef int (*perf_event__swap_op)(union perf_event *event,
+ bool sample_id_all);
static perf_event__swap_op perf_event__swap_ops[] = {
[PERF_RECORD_MMAP] = perf_event__mmap_swap,
@@ -753,6 +874,8 @@ static perf_event__swap_op perf_event__swap_ops[] = {
[PERF_RECORD_SWITCH_CPU_WIDE] = perf_event__switch_swap,
[PERF_RECORD_NAMESPACES] = perf_event__namespaces_swap,
[PERF_RECORD_CGROUP] = perf_event__cgroup_swap,
+ [PERF_RECORD_KSYMBOL] = perf_event__ksymbol_swap,
+ [PERF_RECORD_BPF_EVENT] = perf_event__bpf_event_swap,
[PERF_RECORD_TEXT_POKE] = perf_event__text_poke_swap,
[PERF_RECORD_AUX_OUTPUT_HW_ID] = perf_event__all64_swap,
[PERF_RECORD_CALLCHAIN_DEFERRED] = perf_event__all64_swap,
@@ -760,6 +883,7 @@ static perf_event__swap_op perf_event__swap_ops[] = {
[PERF_RECORD_HEADER_EVENT_TYPE] = perf_event__event_type_swap,
[PERF_RECORD_HEADER_TRACING_DATA] = perf_event__tracing_data_swap,
[PERF_RECORD_HEADER_BUILD_ID] = NULL,
+ [PERF_RECORD_HEADER_FEATURE] = perf_event__header_feature_swap,
[PERF_RECORD_ID_INDEX] = perf_event__all64_swap,
[PERF_RECORD_AUXTRACE_INFO] = perf_event__auxtrace_info_swap,
[PERF_RECORD_AUXTRACE] = perf_event__auxtrace_swap,
@@ -1484,6 +1608,25 @@ static int session__flush_deferred_samples(struct perf_session *session,
return ret;
}
+/*
+ * Return true if the string field is properly null-terminated
+ * within the event boundary. Native-endian files are mapped
+ * read-only (MAP_SHARED + PROT_READ) so we cannot write a
+ * null byte in place; skip the event instead.
+ */
+static bool perf_event__check_nul(const char *str, const void *end, const char *event_name)
+{
+ size_t max_len = (const char *)end - str;
+
+ if (max_len == 0 || strnlen(str, max_len) == max_len) {
+ pr_warning("WARNING: PERF_RECORD_%s: string not null-terminated, skipping event\n",
+ event_name);
+ return false;
+ }
+
+ return true;
+}
+
static int machines__deliver_event(struct machines *machines,
struct evlist *evlist,
union perf_event *event,
@@ -1534,16 +1677,32 @@ static int machines__deliver_event(struct machines *machines,
}
return evlist__deliver_sample(evlist, tool, event, sample, evsel, machine);
case PERF_RECORD_MMAP:
+ if (!perf_event__check_nul(event->mmap.filename,
+ (void *)event + event->header.size,
+ "MMAP"))
+ return 0;
return tool->mmap(tool, event, sample, machine);
case PERF_RECORD_MMAP2:
if (event->header.misc & PERF_RECORD_MISC_PROC_MAP_PARSE_TIMEOUT)
++evlist->stats.nr_proc_map_timeout;
+ if (!perf_event__check_nul(event->mmap2.filename,
+ (void *)event + event->header.size,
+ "MMAP2"))
+ return 0;
return tool->mmap2(tool, event, sample, machine);
case PERF_RECORD_COMM:
+ if (!perf_event__check_nul(event->comm.comm,
+ (void *)event + event->header.size,
+ "COMM"))
+ return 0;
return tool->comm(tool, event, sample, machine);
case PERF_RECORD_NAMESPACES:
return tool->namespaces(tool, event, sample, machine);
case PERF_RECORD_CGROUP:
+ if (!perf_event__check_nul(event->cgroup.path,
+ (void *)event + event->header.size,
+ "CGROUP"))
+ return 0;
return tool->cgroup(tool, event, sample, machine);
case PERF_RECORD_FORK:
return tool->fork(tool, event, sample, machine);
@@ -1582,11 +1741,25 @@ static int machines__deliver_event(struct machines *machines,
case PERF_RECORD_SWITCH_CPU_WIDE:
return tool->context_switch(tool, event, sample, machine);
case PERF_RECORD_KSYMBOL:
+ if (!perf_event__check_nul(event->ksymbol.name,
+ (void *)event + event->header.size,
+ "KSYMBOL"))
+ return 0;
return tool->ksymbol(tool, event, sample, machine);
case PERF_RECORD_BPF_EVENT:
return tool->bpf(tool, event, sample, machine);
- case PERF_RECORD_TEXT_POKE:
+ case PERF_RECORD_TEXT_POKE: {
+ /* offsetof(bytes), not sizeof — sizeof includes padding past the flexible array */
+ size_t text_poke_len = offsetof(struct perf_record_text_poke_event, bytes) +
+ event->text_poke.old_len +
+ event->text_poke.new_len;
+
+ if (event->header.size < text_poke_len) {
+ pr_warning("WARNING: PERF_RECORD_TEXT_POKE: old_len+new_len exceeds event, skipping\n");
+ return 0;
+ }
return tool->text_poke(tool, event, sample, machine);
+ }
case PERF_RECORD_AUX_OUTPUT_HW_ID:
return tool->aux_output_hw_id(tool, event, sample, machine);
case PERF_RECORD_CALLCHAIN_DEFERRED:
@@ -1792,19 +1965,40 @@ int perf_session__deliver_synth_attr_event(struct perf_session *session,
return perf_session__deliver_synth_event(session, &ev.ev, NULL);
}
+static int event_swap(union perf_event *event, bool sample_id_all)
+{
+ perf_event__swap_op swap;
+
+ /* Prevent OOB read on perf_event__swap_ops[] from crafted type */
+ if (event->header.type >= PERF_RECORD_HEADER_MAX)
+ return 0;
+
+ swap = perf_event__swap_ops[event->header.type];
+ if (swap)
+ return swap(event, sample_id_all);
+ return 0;
+}
+
/*
* Minimum event sizes indexed by type. Checked before swap and
* processing so that both cross-endian and native-endian paths
* are protected from accessing fields past the event boundary.
* Zero means no minimum beyond the 8-byte header (already
* enforced by the reader).
+ *
+ * These values represent the smallest event the kernel has ever
+ * emitted for each type, so they do not reject legitimate legacy
+ * perf.data files from older kernels. Variable-length events
+ * use offsetof() to the first variable field; the variable
+ * content is validated separately (e.g., perf_event__check_nul).
*/
static const u32 perf_event__min_size[PERF_RECORD_HEADER_MAX] = {
/*
* offsetof() for types with a trailing variable-length string
* (filename, comm, path, name, msg): sizeof() includes a
* PATH_MAX or fixed-size array, but valid events only need
- * the fixed fields. Null-termination is checked separately.
+ * the fixed fields. Null-termination is checked separately
+ * by perf_event__check_nul().
*
* PERF_RECORD_SAMPLE is omitted: all64_swap is bounded by
* header.size, and the internal layout varies by sample_type
@@ -1812,6 +2006,7 @@ static const u32 perf_event__min_size[PERF_RECORD_HEADER_MAX] = {
*/
[PERF_RECORD_MMAP] = offsetof(struct perf_record_mmap, filename),
[PERF_RECORD_LOST] = sizeof(struct perf_record_lost),
+ /* comm[] is variable-length; kernel aligns to 8 bytes */
[PERF_RECORD_COMM] = offsetof(struct perf_record_comm, comm),
[PERF_RECORD_EXIT] = sizeof(struct perf_record_fork),
[PERF_RECORD_THROTTLE] = sizeof(struct perf_record_throttle),
@@ -1819,7 +2014,9 @@ static const u32 perf_event__min_size[PERF_RECORD_HEADER_MAX] = {
[PERF_RECORD_FORK] = sizeof(struct perf_record_fork),
/*
* The kernel dynamically sizes PERF_RECORD_READ based on
- * attr.read_format — the minimum has just pid + tid + value.
+ * attr.read_format — only the enabled fields are emitted,
+ * packed with no gaps. The minimum valid event has just
+ * pid + tid + one u64 value (no optional fields).
*/
[PERF_RECORD_READ] = offsetof(struct perf_record_read, time_enabled),
[PERF_RECORD_MMAP2] = offsetof(struct perf_record_mmap2, filename),
@@ -1841,14 +2038,25 @@ static const u32 perf_event__min_size[PERF_RECORD_HEADER_MAX] = {
[PERF_RECORD_AUXTRACE] = sizeof(struct perf_record_auxtrace),
[PERF_RECORD_AUXTRACE_ERROR] = offsetof(struct perf_record_auxtrace_error, msg),
[PERF_RECORD_THREAD_MAP] = sizeof(struct perf_record_thread_map),
- /* Smallest valid variant is RANGE_CPUS: header(8) + type(2) + range(6) */
+ /*
+ * sizeof(perf_record_cpu_map) is 20 because the outer struct
+ * isn't packed and GCC adds 2 bytes of trailing padding.
+ * The smallest valid variant (RANGE_CPUS) is only 16 bytes:
+ * header(8) + type(2) + range_cpu_data(6). Per-variant
+ * bounds are checked in the swap handler via payload.
+ */
[PERF_RECORD_CPU_MAP] = sizeof(struct perf_event_header) +
sizeof(__u16) +
sizeof(struct perf_record_range_cpu_map),
[PERF_RECORD_STAT_CONFIG] = sizeof(struct perf_record_stat_config),
[PERF_RECORD_STAT] = sizeof(struct perf_record_stat),
[PERF_RECORD_STAT_ROUND] = sizeof(struct perf_record_stat_round),
- /* Union inflates sizeof; use fixed header fields as minimum */
+ /*
+ * EVENT_UPDATE has a union whose largest member (cpus)
+ * inflates sizeof to 40, but SCALE events are only 32
+ * and UNIT/NAME events can be even smaller. Use the
+ * fixed header fields (header + type + id) as minimum.
+ */
[PERF_RECORD_EVENT_UPDATE] = offsetof(struct perf_record_event_update, scale),
[PERF_RECORD_TIME_CONV] = offsetof(struct perf_record_time_conv, time_cycles),
[PERF_RECORD_ID_INDEX] = sizeof(struct perf_record_id_index),
@@ -1866,19 +2074,6 @@ static const u32 perf_event__min_size[PERF_RECORD_HEADER_MAX] = {
[PERF_RECORD_SCHEDSTAT_DOMAIN] = offsetof(struct perf_record_schedstat_domain, v15),
};
-static void event_swap(union perf_event *event, bool sample_id_all)
-{
- perf_event__swap_op swap;
-
- /* Prevent OOB read on perf_event__swap_ops[] from crafted type */
- if (event->header.type >= PERF_RECORD_HEADER_MAX)
- return;
-
- swap = perf_event__swap_ops[event->header.type];
- if (swap)
- swap(event, sample_id_all);
-}
-
int perf_session__peek_event(struct perf_session *session, off_t file_offset,
void *buf, size_t buf_sz,
union perf_event **event_ptr,
@@ -1896,7 +2091,6 @@ int perf_session__peek_event(struct perf_session *session, off_t file_offset,
if (event->header.size < sizeof(struct perf_event_header))
return -1;
- /* Reject undersized events on the native-endian fast path */
if (event->header.type < PERF_RECORD_HEADER_MAX) {
u32 min_sz = perf_event__min_size[event->header.type];
@@ -1935,7 +2129,6 @@ int perf_session__peek_event(struct perf_session *session, off_t file_offset,
if (readn(fd, buf, rest) != (ssize_t)rest)
return -1;
- /* Reject undersized events before swapping */
if (event->header.type < PERF_RECORD_HEADER_MAX) {
u32 min_sz = perf_event__min_size[event->header.type];
@@ -1949,8 +2142,16 @@ int perf_session__peek_event(struct perf_session *session, off_t file_offset,
}
}
- if (session->header.needs_swap)
- event_swap(event, evlist__sample_id_all(session->evlist));
+ if (session->header.needs_swap &&
+ event_swap(event, evlist__sample_id_all(session->evlist))) {
+ /*
+ * The header was already swapped so header.size is
+ * valid — expose the event so callers can advance
+ * past this malformed entry instead of aborting.
+ */
+ *event_ptr = event;
+ return -1;
+ }
out_parse_sample:
@@ -1968,15 +2169,34 @@ int perf_session__peek_events(struct perf_session *session, u64 offset,
{
u64 max_offset = offset + size;
char buf[PERF_SAMPLE_MAX_SIZE];
- union perf_event *event;
+ /*
+ * Initialized to NULL so the first-iteration error path
+ * doesn't dereference stack garbage. On subsequent failures
+ * event may point into buf from a prior read — peek_event
+ * sets *event_ptr on min_sz and swap failures so the header
+ * is from the current (failed) event, not a stale one.
+ */
+ union perf_event *event = NULL;
int err;
do {
+ event = NULL;
err = perf_session__peek_event(session, offset, buf,
PERF_SAMPLE_MAX_SIZE, &event,
NULL);
- if (err)
- return err;
+ if (err) {
+ /*
+ * peek_event sets event_ptr when it read enough
+ * to know the event size (min_sz and swap failures).
+ * If event is NULL or size is 0, we can't advance
+ * and must abort. Otherwise skip past this entry.
+ */
+ if (event && event->header.size)
+ offset += event->header.size;
+ else
+ return err;
+ continue;
+ }
err = cb(session, event, offset, data);
if (err)
@@ -2014,8 +2234,12 @@ static s64 perf_session__process_event(struct perf_session *session,
}
}
- if (session->header.needs_swap)
- event_swap(event, evlist__sample_id_all(evlist));
+ if (session->header.needs_swap &&
+ event_swap(event, evlist__sample_id_all(evlist))) {
+ pr_warning("WARNING: swap failed for %s event, skipping\n",
+ perf_event__name(event->header.type));
+ return 0;
+ }
if (event->header.type >= PERF_RECORD_HEADER_MAX) {
/* perf should not support unaligned event, stop here. */
@@ -2496,6 +2720,17 @@ reader__mmap(struct reader *rd, struct perf_session *session)
char *buf, **mmaps = rd->mmaps;
u64 page_offset;
+ /*
+ * Native-endian: MAP_SHARED + PROT_READ — the kernel
+ * guarantees page-level coherence but a concurrent writer
+ * could modify the file between validation and use. This
+ * is a theoretical TOCTOU that affects the entire perf.data
+ * processing pipeline; fixing it would require copying each
+ * event to a private buffer before processing.
+ *
+ * Cross-endian: MAP_PRIVATE + PROT_WRITE — swap handlers
+ * get a copy-on-write snapshot immune to concurrent writes.
+ */
mmap_prot = PROT_READ;
mmap_flags = MAP_SHARED;
--
2.54.0
^ permalink raw reply related [flat|nested] 30+ messages in thread* [PATCH 08/28] perf session: Use bounded copy for PERF_RECORD_TIME_CONV
2026-05-10 3:33 [PATCH 00/28] perf: Harden perf.data parsing against crafted/corrupted files Arnaldo Carvalho de Melo
` (6 preceding siblings ...)
2026-05-10 3:33 ` [PATCH 07/28] perf session: Add validated swap infrastructure with null-termination checks Arnaldo Carvalho de Melo
@ 2026-05-10 3:33 ` Arnaldo Carvalho de Melo
2026-05-10 3:34 ` [PATCH 09/28] perf session: Validate HEADER_ATTR alignment and attr.size before swapping Arnaldo Carvalho de Melo
` (19 subsequent siblings)
27 siblings, 0 replies; 30+ messages in thread
From: Arnaldo Carvalho de Melo @ 2026-05-10 3:33 UTC (permalink / raw)
To: Namhyung Kim
Cc: Ingo Molnar, Thomas Gleixner, James Clark, Jiri Olsa, Ian Rogers,
Adrian Hunter, Kan Liang, Clark Williams, linux-kernel,
linux-perf-users, Arnaldo Carvalho de Melo, sashiko-bot,
Claude Opus 4.6 (1M context)
From: Arnaldo Carvalho de Melo <acme@redhat.com>
session->time_conv = event->time_conv copies sizeof(struct
perf_record_time_conv) bytes unconditionally, but older kernels
emit shorter TIME_CONV events without the time_cycles, time_mask,
cap_user_time_zero, and cap_user_time_short fields.
For a 32-byte event (the original format), this reads 24 bytes
past the event boundary into adjacent mmap'd data. The garbage
values end up in session->time_conv and can cause incorrect TSC
conversion if cap_user_time_zero happens to be non-zero.
Replace the struct assignment with a bounded memcpy capped at
event->header.size, zeroing the remainder so extended fields
default to off when absent.
Reported-by: sashiko-bot@kernel.org # Running on a local machine
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Ian Rogers <irogers@google.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Assisted-by: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
---
tools/perf/util/session.c | 9 ++++++++-
1 file changed, 8 insertions(+), 1 deletion(-)
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
index 18e60ccf6829f05a..776061afd568858a 100644
--- a/tools/perf/util/session.c
+++ b/tools/perf/util/session.c
@@ -1894,7 +1894,14 @@ static s64 perf_session__process_user_event(struct perf_session *session,
err = tool->stat_round(tool, session, event);
break;
case PERF_RECORD_TIME_CONV:
- session->time_conv = event->time_conv;
+ /*
+ * Bounded copy: older kernels emit a shorter struct
+ * without time_cycles/time_mask/cap_user_time_*.
+ * Zero the rest so extended fields default to off.
+ */
+ memset(&session->time_conv, 0, sizeof(session->time_conv));
+ memcpy(&session->time_conv, &event->time_conv,
+ min((size_t)event->header.size, sizeof(session->time_conv)));
err = tool->time_conv(tool, session, event);
break;
case PERF_RECORD_HEADER_FEATURE:
--
2.54.0
^ permalink raw reply related [flat|nested] 30+ messages in thread* [PATCH 09/28] perf session: Validate HEADER_ATTR alignment and attr.size before swapping
2026-05-10 3:33 [PATCH 00/28] perf: Harden perf.data parsing against crafted/corrupted files Arnaldo Carvalho de Melo
` (7 preceding siblings ...)
2026-05-10 3:33 ` [PATCH 08/28] perf session: Use bounded copy for PERF_RECORD_TIME_CONV Arnaldo Carvalho de Melo
@ 2026-05-10 3:34 ` Arnaldo Carvalho de Melo
2026-05-10 3:34 ` [PATCH 10/28] perf session: Validate nr fields against event size on both swap and common paths Arnaldo Carvalho de Melo
` (18 subsequent siblings)
27 siblings, 0 replies; 30+ messages in thread
From: Arnaldo Carvalho de Melo @ 2026-05-10 3:34 UTC (permalink / raw)
To: Namhyung Kim
Cc: Ingo Molnar, Thomas Gleixner, James Clark, Jiri Olsa, Ian Rogers,
Adrian Hunter, Kan Liang, Clark Williams, linux-kernel,
linux-perf-users, Arnaldo Carvalho de Melo, sashiko-bot,
Claude Opus 4.6 (1M context)
From: Arnaldo Carvalho de Melo <acme@redhat.com>
Harden PERF_RECORD_HEADER_ATTR handling against crafted perf.data:
- Reject unaligned events (header.size not a multiple of u64).
- Validate attr.size: must be >= PERF_ATTR_SIZE_VER0, a multiple
of sizeof(u64), and fit within the event payload.
- Copy only min(attr.size, sizeof(struct perf_event_attr)) bytes
into a local attr, zeroing the rest so legacy files don't leak
adjacent event data into new fields.
- Keep the original attr.size so perf_event__synthesize_attr()
uses it for both allocation and ID-array placement.
Fix perf_event__synthesize_attr() to use attr->size (not the
compiled sizeof) for event allocation and layout, so perf inject
correctly re-synthesizes attrs from files recorded by a different
perf version. Without this, the ID array destination pointer
(computed via perf_record_header_attr_id()) would be inconsistent
with the allocation when attr->size differs from sizeof.
Also fix the parse-no-sample-id-all test to set attr.size, which
is now validated, and improve error handling in read_attr() for
short reads and invalid attr sizes.
Handle ABI0 pipe/inject events where attr.size is 0: use a local
attr_size variable set to PERF_ATTR_SIZE_VER0 for both the bounded
copy and ID array position, instead of writing back to the event.
Native-endian files may be MAP_SHARED (read-only mmap), so writing
to the event buffer would SIGSEGV. The swap path handles ABI0 in
perf_event__attr_swap() which writes to the MAP_PRIVATE copy.
Also add header.size alignment check on the native-endian path
(the swap handler already checks this) to reject misaligned events
that would produce unaligned u64 ID reads.
Reported-by: sashiko-bot@kernel.org # Running on a local machine
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Ian Rogers <irogers@google.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Assisted-by: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
---
tools/perf/tests/parse-no-sample-id-all.c | 6 ++
| 94 +++++++++++++++++++++--
tools/perf/util/session.c | 33 ++++++++
tools/perf/util/synthetic-events.c | 25 +++++-
4 files changed, 149 insertions(+), 9 deletions(-)
diff --git a/tools/perf/tests/parse-no-sample-id-all.c b/tools/perf/tests/parse-no-sample-id-all.c
index 50e68b7d43aad030..8ac862c94879f3a3 100644
--- a/tools/perf/tests/parse-no-sample-id-all.c
+++ b/tools/perf/tests/parse-no-sample-id-all.c
@@ -82,6 +82,9 @@ static int test__parse_no_sample_id_all(struct test_suite *test __maybe_unused,
.type = PERF_RECORD_HEADER_ATTR,
.size = sizeof(struct test_attr_event),
},
+ .attr = {
+ .size = sizeof(struct perf_event_attr),
+ },
.id = 1,
};
struct test_attr_event event2 = {
@@ -89,6 +92,9 @@ static int test__parse_no_sample_id_all(struct test_suite *test __maybe_unused,
.type = PERF_RECORD_HEADER_ATTR,
.size = sizeof(struct test_attr_event),
},
+ .attr = {
+ .size = sizeof(struct perf_event_attr),
+ },
.id = 2,
};
struct perf_record_mmap event3 = {
--git a/tools/perf/util/header.c b/tools/perf/util/header.c
index f30e48eb3fc32da2..b263f83601842736 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -4770,9 +4770,15 @@ static int read_attr(int fd, struct perf_header *ph,
if (sz == 0) {
/* assume ABI0 */
sz = PERF_ATTR_SIZE_VER0;
+ } else if (sz < PERF_ATTR_SIZE_VER0) {
+ pr_debug("bad attr size %zu, expected at least %d\n",
+ sz, PERF_ATTR_SIZE_VER0);
+ errno = EINVAL;
+ return -1;
} else if (sz > our_sz) {
pr_debug("file uses a more recent and unsupported ABI"
" (%zu bytes extra)\n", sz - our_sz);
+ errno = EINVAL;
return -1;
}
/* what we have not yet read and that we know about */
@@ -4782,11 +4788,21 @@ static int read_attr(int fd, struct perf_header *ph,
ptr += PERF_ATTR_SIZE_VER0;
ret = readn(fd, ptr, left);
+ if (ret <= 0) {
+ if (ret == 0)
+ errno = EIO;
+ return -1;
+ }
}
/* read perf_file_section, ids are read in caller */
ret = readn(fd, &f_attr->ids, sizeof(f_attr->ids));
+ if (ret <= 0) {
+ if (ret == 0)
+ errno = EIO;
+ return -1;
+ }
- return ret <= 0 ? -1 : 0;
+ return 0;
}
#ifdef HAVE_LIBTRACEEVENT
@@ -5094,11 +5110,40 @@ int perf_event__process_attr(const struct perf_tool *tool __maybe_unused,
union perf_event *event,
struct evlist **pevlist)
{
- u32 i, n_ids;
+ struct perf_event_attr attr;
+ u32 i, n_ids, raw_attr_size;
u64 *ids;
+ size_t attr_size, copy_size;
struct evsel *evsel;
struct evlist *evlist = *pevlist;
+ /*
+ * HEADER_ATTR event layout (pipe/inject mode):
+ *
+ * [header (8 bytes)] [attr (attr_size bytes)] [id0 id1 ... idN]
+ * |<------------------ header.size --------------------------->|
+ *
+ * attr_size varies across perf versions: VER0 = 64 bytes,
+ * current sizeof(struct perf_event_attr) = larger. A newer
+ * producer may emit a larger attr than we understand.
+ *
+ * attr.size == 0 (ABI0) means the producer didn't set it
+ * (e.g., bench/inject-buildid, older perf). Treat as VER0.
+ *
+ * Require 8-byte alignment so the u64 ID array is aligned
+ * and attr.size fits cleanly within the payload.
+ *
+ * Read attr.size once — the event may be on a shared mmap
+ * and re-reading could yield a different value.
+ */
+ raw_attr_size = event->attr.attr.size;
+ if (event->header.size < sizeof(event->header) + PERF_ATTR_SIZE_VER0 ||
+ event->header.size % sizeof(u64) ||
+ (raw_attr_size && (raw_attr_size < PERF_ATTR_SIZE_VER0 ||
+ raw_attr_size % sizeof(u64) ||
+ raw_attr_size > event->header.size - sizeof(event->header))))
+ return -EINVAL;
+
if (dump_trace)
perf_event__fprintf_attr(event, stdout);
@@ -5108,13 +5153,46 @@ int perf_event__process_attr(const struct perf_tool *tool __maybe_unused,
return -ENOMEM;
}
- evsel = evsel__new(&event->attr.attr);
+ /*
+ * attr_size = footprint of the attr in the event — determines
+ * where the ID array starts. For ABI0, assume VER0 (64 bytes).
+ *
+ * copy_size = how much we copy into our local struct, capped at
+ * sizeof(attr) so a newer producer's larger attr doesn't
+ * overflow. Fields beyond copy_size are zeroed.
+ *
+ * Do NOT write attr_size back to the event — native-endian
+ * files use MAP_SHARED (read-only), writing would SIGSEGV.
+ * The swap path handles ABI0 in perf_event__attr_swap()
+ * which writes to the writable MAP_PRIVATE copy instead.
+ */
+ attr_size = raw_attr_size ?: PERF_ATTR_SIZE_VER0;
+ copy_size = min(attr_size, sizeof(attr));
+ memcpy(&attr, &event->attr.attr, copy_size);
+ if (copy_size < sizeof(attr))
+ memset((void *)&attr + copy_size, 0, sizeof(attr) - copy_size);
+
+ /*
+ * Normalize ABI0: the swap path sets attr.size = VER0 on the
+ * event, but the native path leaves it as 0. Set it on the
+ * local copy so perf inject re-synthesizes with consistent
+ * layout regardless of endianness.
+ */
+ attr.size = attr_size;
+
+ evsel = evsel__new(&attr);
if (evsel == NULL)
return -ENOMEM;
evlist__add(evlist, evsel);
- n_ids = event->header.size - sizeof(event->header) - event->attr.attr.size;
+ /*
+ * IDs occupy the remainder after header + attr. Use attr_size
+ * (not copy_size) — even if the producer's attr is larger than
+ * our struct, the IDs start after attr_size bytes in the event.
+ * Validation above guarantees attr_size <= payload size.
+ */
+ n_ids = event->header.size - sizeof(event->header) - attr_size;
n_ids = n_ids / sizeof(u64);
/*
* We don't have the cpu and thread maps on the header, so
@@ -5124,7 +5202,13 @@ int perf_event__process_attr(const struct perf_tool *tool __maybe_unused,
if (perf_evsel__alloc_id(&evsel->core, 1, n_ids))
return -ENOMEM;
- ids = perf_record_header_attr_id(event);
+ /*
+ * Locate IDs at attr_size bytes past the attr start in the
+ * event. Cannot use perf_record_header_attr_id() — that
+ * macro reads event->attr.attr.size, which is 0 for ABI0
+ * on the native-endian path (no swap handler to fix it up).
+ */
+ ids = (void *)&event->attr.attr + attr_size;
for (i = 0; i < n_ids; i++) {
perf_evlist__id_add(&evlist->core, &evsel->core, 0, i, ids[i]);
}
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
index 776061afd568858a..f0b716db75cef7bb 100644
--- a/tools/perf/util/session.c
+++ b/tools/perf/util/session.c
@@ -618,8 +618,41 @@ do { \
static int perf_event__hdr_attr_swap(union perf_event *event,
bool sample_id_all __maybe_unused)
{
+ u32 attr_size, payload_size;
size_t size;
+ /*
+ * Validate alignment and attr.size (still foreign-endian)
+ * before calling perf_event__attr_swap(), which uses it via
+ * bswap_safe() to decide which fields to swap. A crafted
+ * attr.size larger than the event payload would swap past
+ * the event boundary and corrupt adjacent memory.
+ *
+ * The min_size table guarantees header.size >=
+ * sizeof(header) + PERF_ATTR_SIZE_VER0, so attr.size is
+ * safe to access.
+ */
+ if (event->header.size % sizeof(u64))
+ return -1;
+
+ attr_size = bswap_32(event->attr.attr.size);
+ /*
+ * ABI0: size field not set. This only happens in pipe/inject
+ * mode where HEADER_ATTR events carry their own attr. For
+ * regular perf.data files, read_attr() uses f_header.attr_size
+ * from the file header instead. Assume PERF_ATTR_SIZE_VER0.
+ */
+ if (!attr_size)
+ attr_size = PERF_ATTR_SIZE_VER0;
+ payload_size = event->header.size - sizeof(event->header);
+
+ if (attr_size < PERF_ATTR_SIZE_VER0 || attr_size % sizeof(u64) ||
+ attr_size > payload_size) {
+ pr_err("PERF_RECORD_HEADER_ATTR: invalid attr.size %u (min: %d, max: %u, 8-byte aligned)\n",
+ attr_size, PERF_ATTR_SIZE_VER0, payload_size);
+ return -1;
+ }
+
perf_event__attr_swap(&event->attr.attr);
size = event->header.size;
diff --git a/tools/perf/util/synthetic-events.c b/tools/perf/util/synthetic-events.c
index 85bee747f4cd2a73..86af854c27acb835 100644
--- a/tools/perf/util/synthetic-events.c
+++ b/tools/perf/util/synthetic-events.c
@@ -2170,11 +2170,21 @@ int perf_event__synthesize_attr(const struct perf_tool *tool, struct perf_event_
u32 ids, u64 *id, perf_event__handler_t process)
{
union perf_event *ev;
- size_t size;
+ size_t attr_size, size;
int err;
- size = sizeof(struct perf_event_attr);
- size = PERF_ALIGN(size, sizeof(u64));
+ /*
+ * Use attr->size for the event layout, not the compiled
+ * sizeof(struct perf_event_attr), so that synthesized events
+ * match the source perf.data layout. This matters for perf
+ * inject, which re-synthesizes attrs from a file that may
+ * have been recorded by a different version of perf.
+ * perf_record_header_attr_id() locates the ID array at
+ * attr->size bytes past the attr.
+ */
+ attr_size = attr->size ?: sizeof(struct perf_event_attr);
+
+ size = PERF_ALIGN(attr_size, sizeof(u64));
size += sizeof(struct perf_event_header);
size += ids * sizeof(u64);
@@ -2183,7 +2193,14 @@ int perf_event__synthesize_attr(const struct perf_tool *tool, struct perf_event_
if (ev == NULL)
return -ENOMEM;
- ev->attr.attr = *attr;
+ /*
+ * Copy only the bytes we understand; zalloc ensures that any
+ * extra bytes between sizeof(struct perf_event_attr) and
+ * attr_size are zero when the source file uses a newer, larger
+ * struct.
+ */
+ memcpy(&ev->attr.attr, attr, min(sizeof(struct perf_event_attr), attr_size));
+ ev->attr.attr.size = attr_size;
memcpy(perf_record_header_attr_id(ev), id, ids * sizeof(u64));
ev->attr.header.type = PERF_RECORD_HEADER_ATTR;
--
2.54.0
^ permalink raw reply related [flat|nested] 30+ messages in thread* [PATCH 10/28] perf session: Validate nr fields against event size on both swap and common paths
2026-05-10 3:33 [PATCH 00/28] perf: Harden perf.data parsing against crafted/corrupted files Arnaldo Carvalho de Melo
` (8 preceding siblings ...)
2026-05-10 3:34 ` [PATCH 09/28] perf session: Validate HEADER_ATTR alignment and attr.size before swapping Arnaldo Carvalho de Melo
@ 2026-05-10 3:34 ` Arnaldo Carvalho de Melo
2026-05-10 3:34 ` [PATCH 11/28] perf header: Byte-swap build ID event pid and bounds check section entries Arnaldo Carvalho de Melo
` (17 subsequent siblings)
27 siblings, 0 replies; 30+ messages in thread
From: Arnaldo Carvalho de Melo @ 2026-05-10 3:34 UTC (permalink / raw)
To: Namhyung Kim
Cc: Ingo Molnar, Thomas Gleixner, James Clark, Jiri Olsa, Ian Rogers,
Adrian Hunter, Kan Liang, Clark Williams, linux-kernel,
linux-perf-users, Arnaldo Carvalho de Melo, sashiko-bot,
Claude Opus 4.6 (1M context)
From: Arnaldo Carvalho de Melo <acme@redhat.com>
Several event types use an nr field to control iteration over
variable-length arrays. The swap handlers byte-swap and loop using
these fields without bounds checks, and the native processing path
trusts them as well.
Add bounds checks on both paths for:
- PERF_RECORD_THREAD_MAP: validate nr against payload, return -1
on the swap path. On the native path, reject with -EINVAL.
- PERF_RECORD_NAMESPACES: clamp nr on the swap path (safe because
each entry is indexed by type; missing entries just won't be
resolved). Skip the event on the native path.
- PERF_RECORD_CPU_MAP: clamp nr for CPUS and MASK sub-types on
the swap path. Add bounds checks for mask64 which previously
had no nr validation. Skip the event on the native path.
- PERF_RECORD_STAT_CONFIG: clamp nr on the swap path (safe because
each config entry is self-describing via its tag). Skip the
event on the native path.
The swap path (cross-endian, writable MAP_PRIVATE mapping) can
safely clamp by writing back to the event. The native path
(read-only MAP_SHARED mapping) must skip instead of clamping
because writing to the mmap'd event would segfault.
Also fix stat_config swap range: change size += 1 to
size += sizeof(event->stat_config.nr) for clarity. The old +1
happened to work because mem_bswap_64 processes 8-byte chunks,
but the intent is to include the 8-byte nr field in the swap
range.
Reported-by: sashiko-bot@kernel.org # Running on a local machine
Cc: Ian Rogers <irogers@google.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Assisted-by: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
---
tools/perf/util/session.c | 243 +++++++++++++++++++++++++++++++++++---
1 file changed, 224 insertions(+), 19 deletions(-)
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
index f0b716db75cef7bb..fbffa61762cae801 100644
--- a/tools/perf/util/session.c
+++ b/tools/perf/util/session.c
@@ -491,13 +491,28 @@ static int perf_event__throttle_swap(union perf_event *event,
static int perf_event__namespaces_swap(union perf_event *event,
bool sample_id_all)
{
- u64 i;
+ u64 i, nr, max_nr;
event->namespaces.pid = bswap_32(event->namespaces.pid);
event->namespaces.tid = bswap_32(event->namespaces.tid);
event->namespaces.nr_namespaces = bswap_64(event->namespaces.nr_namespaces);
- for (i = 0; i < event->namespaces.nr_namespaces; i++) {
+ nr = event->namespaces.nr_namespaces;
+ /* Cannot underflow: perf_event__min_size[] guarantees header.size >= sizeof */
+ max_nr = (event->header.size - sizeof(event->namespaces)) /
+ sizeof(event->namespaces.link_info[0]);
+ /*
+ * Safe to clamp: each namespace entry is indexed by type;
+ * missing entries just won't be resolved.
+ */
+ if (nr > max_nr) {
+ pr_warning("WARNING: PERF_RECORD_NAMESPACES: nr_namespaces %" PRIu64 " exceeds payload (max %" PRIu64 "), clamping\n",
+ nr, max_nr);
+ nr = max_nr;
+ event->namespaces.nr_namespaces = nr;
+ }
+
+ for (i = 0; i < nr; i++) {
struct perf_ns_link_info *ns = &event->namespaces.link_info[i];
ns->dev = bswap_64(ns->dev);
@@ -733,11 +748,23 @@ static int perf_event__auxtrace_error_swap(union perf_event *event,
static int perf_event__thread_map_swap(union perf_event *event,
bool sample_id_all __maybe_unused)
{
- unsigned i;
+ unsigned int i;
+ u64 nr;
event->thread_map.nr = bswap_64(event->thread_map.nr);
- for (i = 0; i < event->thread_map.nr; i++)
+ /*
+ * Reject rather than clamp: unlike namespaces (indexed by type)
+ * or stat_config (self-describing tags), a truncated thread map
+ * is structurally broken — downstream would get a wrong map.
+ */
+ /* Cannot underflow: perf_event__min_size[] guarantees header.size >= sizeof */
+ nr = event->thread_map.nr;
+ if (nr > (event->header.size - sizeof(event->thread_map)) /
+ sizeof(event->thread_map.entries[0]))
+ return -1;
+
+ for (i = 0; i < nr; i++)
event->thread_map.entries[i].pid = bswap_64(event->thread_map.entries[i].pid);
return 0;
}
@@ -746,32 +773,80 @@ static int perf_event__cpu_map_swap(union perf_event *event,
bool sample_id_all __maybe_unused)
{
struct perf_record_cpu_map_data *data = &event->cpu_map.data;
+ u32 payload = event->header.size - sizeof(event->header);
data->type = bswap_16(data->type);
+ /*
+ * Safe to clamp: a shorter CPU map just means some CPUs
+ * are absent; tools process the CPUs that are present.
+ */
switch (data->type) {
- case PERF_CPU_MAP__CPUS:
- data->cpus_data.nr = bswap_16(data->cpus_data.nr);
+ case PERF_CPU_MAP__CPUS: {
+ u16 nr, max_nr;
- for (unsigned i = 0; i < data->cpus_data.nr; i++)
+ data->cpus_data.nr = bswap_16(data->cpus_data.nr);
+ nr = data->cpus_data.nr;
+ max_nr = (payload - offsetof(struct perf_record_cpu_map_data,
+ cpus_data.cpu)) /
+ sizeof(data->cpus_data.cpu[0]);
+ if (nr > max_nr) {
+ pr_warning("WARNING: PERF_RECORD_CPU_MAP: nr %u exceeds payload (max %u), clamping\n",
+ nr, max_nr);
+ nr = max_nr;
+ data->cpus_data.nr = nr;
+ }
+ for (unsigned int i = 0; i < nr; i++)
data->cpus_data.cpu[i] = bswap_16(data->cpus_data.cpu[i]);
break;
+ }
case PERF_CPU_MAP__MASK:
data->mask32_data.long_size = bswap_16(data->mask32_data.long_size);
switch (data->mask32_data.long_size) {
- case 4:
+ case 4: {
+ u16 nr, max_nr;
+
data->mask32_data.nr = bswap_16(data->mask32_data.nr);
- for (unsigned i = 0; i < data->mask32_data.nr; i++)
+ nr = data->mask32_data.nr;
+ max_nr = (payload - offsetof(struct perf_record_cpu_map_data,
+ mask32_data.mask)) /
+ sizeof(data->mask32_data.mask[0]);
+ if (nr > max_nr) {
+ pr_warning("WARNING: PERF_RECORD_CPU_MAP mask32: nr %u exceeds payload (max %u), clamping\n",
+ nr, max_nr);
+ nr = max_nr;
+ data->mask32_data.nr = nr;
+ }
+ for (unsigned int i = 0; i < nr; i++)
data->mask32_data.mask[i] = bswap_32(data->mask32_data.mask[i]);
break;
- case 8:
+ }
+ case 8: {
+ u16 nr, max_nr;
+
data->mask64_data.nr = bswap_16(data->mask64_data.nr);
- for (unsigned i = 0; i < data->mask64_data.nr; i++)
+ nr = data->mask64_data.nr;
+ if (payload < offsetof(struct perf_record_cpu_map_data, mask64_data.mask)) {
+ data->mask64_data.nr = 0;
+ break;
+ }
+ max_nr = (payload - offsetof(struct perf_record_cpu_map_data,
+ mask64_data.mask)) /
+ sizeof(data->mask64_data.mask[0]);
+ if (nr > max_nr) {
+ pr_warning("WARNING: PERF_RECORD_CPU_MAP mask64: nr %u exceeds payload (max %u), clamping\n",
+ nr, max_nr);
+ nr = max_nr;
+ data->mask64_data.nr = nr;
+ }
+ for (unsigned int i = 0; i < nr; i++)
data->mask64_data.mask[i] = bswap_64(data->mask64_data.mask[i]);
break;
+ }
default:
- pr_err("cpu_map swap: unsupported long size\n");
+ pr_err("cpu_map swap: unsupported long size %u\n",
+ data->mask32_data.long_size);
}
break;
case PERF_CPU_MAP__RANGE_CPUS:
@@ -787,11 +862,27 @@ static int perf_event__cpu_map_swap(union perf_event *event,
static int perf_event__stat_config_swap(union perf_event *event,
bool sample_id_all __maybe_unused)
{
- u64 size;
+ u64 nr, max_nr, size;
- size = bswap_64(event->stat_config.nr) * sizeof(event->stat_config.data[0]);
- size += 1; /* nr item itself */
+ nr = bswap_64(event->stat_config.nr);
+ /* Cannot underflow: perf_event__min_size[] guarantees header.size >= sizeof */
+ max_nr = (event->header.size - sizeof(event->stat_config)) /
+ sizeof(event->stat_config.data[0]);
+ /*
+ * Safe to clamp: each config entry is self-describing
+ * via its tag; missing entries keep their defaults.
+ */
+ if (nr > max_nr) {
+ pr_warning("WARNING: PERF_RECORD_STAT_CONFIG: nr %" PRIu64 " exceeds payload (max %" PRIu64 "), clamping\n",
+ nr, max_nr);
+ nr = max_nr;
+ }
+ size = nr * sizeof(event->stat_config.data[0]);
+ /* The swap starts at &nr, so add its size to cover the full range */
+ size += sizeof(event->stat_config.nr);
mem_bswap_64(&event->stat_config.nr, size);
+ /* Persist the clamped value in native byte order */
+ event->stat_config.nr = nr;
return 0;
}
@@ -1729,8 +1820,24 @@ static int machines__deliver_event(struct machines *machines,
"COMM"))
return 0;
return tool->comm(tool, event, sample, machine);
- case PERF_RECORD_NAMESPACES:
+ case PERF_RECORD_NAMESPACES: {
+ /* Cannot underflow: perf_event__min_size[] guarantees header.size >= sizeof */
+ u64 max_nr = (event->header.size - sizeof(event->namespaces)) /
+ sizeof(event->namespaces.link_info[0]);
+
+ /*
+ * Native-endian events are mmap'd read-only, so we
+ * cannot clamp nr in place. Skip the event instead.
+ * The swap handler already clamps on the writable
+ * cross-endian path.
+ */
+ if (event->namespaces.nr_namespaces > max_nr) {
+ pr_warning("WARNING: PERF_RECORD_NAMESPACES: nr_namespaces %" PRIu64 " exceeds payload (max %" PRIu64 "), skipping\n",
+ (u64)event->namespaces.nr_namespaces, max_nr);
+ return 0;
+ }
return tool->namespaces(tool, event, sample, machine);
+ }
case PERF_RECORD_CGROUP:
if (!perf_event__check_nul(event->cgroup.path,
(void *)event + event->header.size,
@@ -1911,15 +2018,112 @@ static s64 perf_session__process_user_event(struct perf_session *session,
perf_session__auxtrace_error_inc(session, event);
err = tool->auxtrace_error(tool, session, event);
break;
- case PERF_RECORD_THREAD_MAP:
+ case PERF_RECORD_THREAD_MAP: {
+ u64 max_nr;
+
+ if (event->header.size < sizeof(event->thread_map)) {
+ pr_err("PERF_RECORD_THREAD_MAP: header.size (%u) too small\n",
+ event->header.size);
+ err = -EINVAL;
+ break;
+ }
+
+ max_nr = (event->header.size - sizeof(event->thread_map)) /
+ sizeof(event->thread_map.entries[0]);
+ if (event->thread_map.nr > max_nr) {
+ pr_err("PERF_RECORD_THREAD_MAP: nr %" PRIu64 " exceeds max %" PRIu64 "\n",
+ (u64)event->thread_map.nr, max_nr);
+ err = -EINVAL;
+ break;
+ }
+
err = tool->thread_map(tool, session, event);
break;
- case PERF_RECORD_CPU_MAP:
+ }
+ case PERF_RECORD_CPU_MAP: {
+ struct perf_record_cpu_map_data *data = &event->cpu_map.data;
+ u32 payload = event->header.size - sizeof(event->header);
+
+ /*
+ * Native-endian events are mmap'd read-only, so we
+ * cannot clamp nr fields in place. Skip the event
+ * if any variant overflows.
+ */
+ switch (data->type) {
+ case PERF_CPU_MAP__CPUS: {
+ u16 max_nr = (payload - offsetof(struct perf_record_cpu_map_data,
+ cpus_data.cpu)) /
+ sizeof(data->cpus_data.cpu[0]);
+
+ if (data->cpus_data.nr > max_nr) {
+ pr_warning("WARNING: PERF_RECORD_CPU_MAP: nr %u exceeds payload (max %u), skipping\n",
+ data->cpus_data.nr, max_nr);
+ err = 0;
+ goto out;
+ }
+ break;
+ }
+ case PERF_CPU_MAP__MASK:
+ if (data->mask32_data.long_size == 4) {
+ u16 max_nr = (payload - offsetof(struct perf_record_cpu_map_data,
+ mask32_data.mask)) /
+ sizeof(data->mask32_data.mask[0]);
+
+ if (data->mask32_data.nr > max_nr) {
+ pr_warning("WARNING: PERF_RECORD_CPU_MAP mask32: nr %u exceeds payload (max %u), skipping\n",
+ data->mask32_data.nr, max_nr);
+ err = 0;
+ goto out;
+ }
+ } else if (data->mask64_data.long_size == 8) {
+ u16 max_nr;
+
+ if (payload < offsetof(struct perf_record_cpu_map_data, mask64_data.mask)) {
+ err = 0;
+ goto out;
+ }
+ max_nr = (payload - offsetof(struct perf_record_cpu_map_data,
+ mask64_data.mask)) /
+ sizeof(data->mask64_data.mask[0]);
+ if (data->mask64_data.nr > max_nr) {
+ pr_warning("WARNING: PERF_RECORD_CPU_MAP mask64: nr %u exceeds payload (max %u), skipping\n",
+ data->mask64_data.nr, max_nr);
+ err = 0;
+ goto out;
+ }
+ } else {
+ pr_warning("WARNING: PERF_RECORD_CPU_MAP: unsupported long_size %u, skipping\n",
+ data->mask32_data.long_size);
+ err = 0;
+ goto out;
+ }
+ break;
+ default:
+ break;
+ }
+
err = tool->cpu_map(tool, session, event);
break;
- case PERF_RECORD_STAT_CONFIG:
+ }
+ case PERF_RECORD_STAT_CONFIG: {
+ /* Cannot underflow: perf_event__min_size[] guarantees header.size >= sizeof */
+ u64 max_nr = (event->header.size - sizeof(event->stat_config)) /
+ sizeof(event->stat_config.data[0]);
+
+ /*
+ * Native-endian events are mmap'd read-only, so we
+ * cannot clamp nr in place. Skip the event instead.
+ */
+ if (event->stat_config.nr > max_nr) {
+ pr_warning("WARNING: PERF_RECORD_STAT_CONFIG: nr %" PRIu64 " exceeds payload (max %" PRIu64 "), skipping\n",
+ (u64)event->stat_config.nr, max_nr);
+ err = 0;
+ goto out;
+ }
+
err = tool->stat_config(tool, session, event);
break;
+ }
case PERF_RECORD_STAT:
err = tool->stat(tool, session, event);
break;
@@ -1962,6 +2166,7 @@ static s64 perf_session__process_user_event(struct perf_session *session,
err = -EINVAL;
break;
}
+out:
perf_sample__exit(&sample);
return err;
}
--
2.54.0
^ permalink raw reply related [flat|nested] 30+ messages in thread* [PATCH 11/28] perf header: Byte-swap build ID event pid and bounds check section entries
2026-05-10 3:33 [PATCH 00/28] perf: Harden perf.data parsing against crafted/corrupted files Arnaldo Carvalho de Melo
` (9 preceding siblings ...)
2026-05-10 3:34 ` [PATCH 10/28] perf session: Validate nr fields against event size on both swap and common paths Arnaldo Carvalho de Melo
@ 2026-05-10 3:34 ` Arnaldo Carvalho de Melo
2026-05-10 3:34 ` [PATCH 12/28] perf cpumap: Reject RANGE_CPUS with start_cpu > end_cpu Arnaldo Carvalho de Melo
` (16 subsequent siblings)
27 siblings, 0 replies; 30+ messages in thread
From: Arnaldo Carvalho de Melo @ 2026-05-10 3:34 UTC (permalink / raw)
To: Namhyung Kim
Cc: Ingo Molnar, Thomas Gleixner, James Clark, Jiri Olsa, Ian Rogers,
Adrian Hunter, Kan Liang, Clark Williams, linux-kernel,
linux-perf-users, Arnaldo Carvalho de Melo, sashiko-bot,
Claude Opus 4.6 (1M context)
From: Arnaldo Carvalho de Melo <acme@redhat.com>
perf_header__read_build_ids() swaps the event header fields for cross-endian
perf.data files but not bev.pid. This causes perf_session__findnew_machine()
to look up the wrong machine for guest VM build IDs, misattributing them.
Swap bev.pid alongside the header fields.
Also add a build_id_swap callback for stream-mode build ID events.
Harden perf_header__read_build_ids() against crafted perf.data files:
- Add overflow check on offset + size to prevent wrap past ULLONG_MAX.
- Reject bev.header.size == 0 which would loop forever.
- Reject bev.header.size > remaining section to prevent reading past
the section boundary.
- Guard memcmp(filename, "nel.kallsyms]", 13) with len >= 13 to avoid
reading uninitialized stack memory on short filenames.
- Force NUL-termination of filename before passing it to functions
like machine__findnew_dso() that use strlen/strcmp.
Reported-by: sashiko-bot@kernel.org # Running on a local machine
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Ian Rogers <irogers@google.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Assisted-by: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
---
| 50 +++++++++++++++++++++++++++++++++++----
tools/perf/util/session.c | 16 ++++++++++++-
2 files changed, 61 insertions(+), 5 deletions(-)
--git a/tools/perf/util/header.c b/tools/perf/util/header.c
index b263f83601842736..f2198ab0defd5804 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
#include <errno.h>
#include <inttypes.h>
+#include <limits.h>
#include "string2.h"
#include <sys/param.h>
#include <sys/types.h>
@@ -2578,7 +2579,13 @@ static int perf_header__read_build_ids_abi_quirk(struct perf_header *header,
} old_bev;
struct perf_record_header_build_id bev;
char filename[PATH_MAX];
- u64 limit = offset + size;
+ u64 limit;
+
+ /* Prevent offset + size from wrapping past ULLONG_MAX */
+ if (size > ULLONG_MAX - offset)
+ return -1;
+
+ limit = offset + size;
while (offset < limit) {
ssize_t len;
@@ -2589,6 +2596,10 @@ static int perf_header__read_build_ids_abi_quirk(struct perf_header *header,
if (header->needs_swap)
perf_event_header__bswap(&old_bev.header);
+ /* size == 0 loops forever; size > remaining reads past section */
+ if (old_bev.header.size == 0 || old_bev.header.size > limit - offset)
+ return -1;
+
len = old_bev.header.size - sizeof(old_bev);
if (len < 0 || len >= PATH_MAX) {
pr_warning("invalid build_id filename length %zd\n", len);
@@ -2597,6 +2608,13 @@ static int perf_header__read_build_ids_abi_quirk(struct perf_header *header,
if (readn(input, filename, len) != len)
return -1;
+ /*
+ * The file data may lack a null terminator, which could
+ * indicate a corrupt or crafted perf.data file. Ensure
+ * filename is always a valid C string before passing it
+ * to functions like machine__findnew_dso().
+ */
+ filename[len] = '\0';
bev.header = old_bev.header;
@@ -2624,17 +2642,32 @@ static int perf_header__read_build_ids(struct perf_header *header,
struct perf_session *session = container_of(header, struct perf_session, header);
struct perf_record_header_build_id bev;
char filename[PATH_MAX];
- u64 limit = offset + size, orig_offset = offset;
+ u64 limit, orig_offset = offset;
int err = -1;
+ /* Prevent offset + size from wrapping past ULLONG_MAX */
+ if (size > ULLONG_MAX - offset)
+ return -1;
+
+ limit = offset + size;
+
while (offset < limit) {
ssize_t len;
if (readn(input, &bev, sizeof(bev)) != sizeof(bev))
goto out;
- if (header->needs_swap)
+ if (header->needs_swap) {
perf_event_header__bswap(&bev.header);
+ bev.pid = bswap_32(bev.pid);
+ }
+
+ /*
+ * size == 0 would loop forever (offset never advances);
+ * size > remaining would read past the section boundary.
+ */
+ if (bev.header.size == 0 || bev.header.size > limit - offset)
+ goto out;
len = bev.header.size - sizeof(bev);
if (len < 0 || len >= PATH_MAX) {
@@ -2644,6 +2677,13 @@ static int perf_header__read_build_ids(struct perf_header *header,
if (readn(input, filename, len) != len)
goto out;
+ /*
+ * The file data may lack a null terminator, which could
+ * indicate a corrupt or crafted perf.data file. Ensure
+ * filename is always a valid C string before passing it
+ * to functions like machine__findnew_dso().
+ */
+ filename[len] = '\0';
/*
* The a1645ce1 changeset:
*
@@ -2657,7 +2697,9 @@ static int perf_header__read_build_ids(struct perf_header *header,
* '[kernel.kallsyms]' string for the kernel build-id has the
* first 4 characters chopped off (where the pid_t sits).
*/
- if (memcmp(filename, "nel.kallsyms]", 13) == 0) {
+ /* Guard short filenames against memcmp reading past the buffer */
+ if (len >= (ssize_t)sizeof("nel.kallsyms]") - 1 &&
+ memcmp(filename, "nel.kallsyms]", sizeof("nel.kallsyms]") - 1) == 0) {
if (lseek(input, orig_offset, SEEK_SET) == (off_t)-1)
return -1;
return perf_header__read_build_ids_abi_quirk(header, input, offset, size);
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
index fbffa61762cae801..c23899c42ef7af34 100644
--- a/tools/perf/util/session.c
+++ b/tools/perf/util/session.c
@@ -676,6 +676,14 @@ static int perf_event__hdr_attr_swap(union perf_event *event,
return 0;
}
+static int perf_event__build_id_swap(union perf_event *event,
+ bool sample_id_all __maybe_unused)
+{
+ /* Only pid needs swapping — build_id[] is a raw byte array */
+ event->build_id.pid = bswap_32(event->build_id.pid);
+ return 0;
+}
+
static int perf_event__event_update_swap(union perf_event *event,
bool sample_id_all __maybe_unused)
{
@@ -1006,7 +1014,7 @@ static perf_event__swap_op perf_event__swap_ops[] = {
[PERF_RECORD_HEADER_ATTR] = perf_event__hdr_attr_swap,
[PERF_RECORD_HEADER_EVENT_TYPE] = perf_event__event_type_swap,
[PERF_RECORD_HEADER_TRACING_DATA] = perf_event__tracing_data_swap,
- [PERF_RECORD_HEADER_BUILD_ID] = NULL,
+ [PERF_RECORD_HEADER_BUILD_ID] = perf_event__build_id_swap,
[PERF_RECORD_HEADER_FEATURE] = perf_event__header_feature_swap,
[PERF_RECORD_ID_INDEX] = perf_event__all64_swap,
[PERF_RECORD_AUXTRACE_INFO] = perf_event__auxtrace_info_swap,
@@ -1993,6 +2001,12 @@ static s64 perf_session__process_user_event(struct perf_session *session,
err = tool->tracing_data(tool, session, event);
break;
case PERF_RECORD_HEADER_BUILD_ID:
+ if (!perf_event__check_nul(event->build_id.filename,
+ (void *)event + event->header.size,
+ "HEADER_BUILD_ID")) {
+ err = 0;
+ break;
+ }
err = tool->build_id(tool, session, event);
break;
case PERF_RECORD_FINISHED_ROUND:
--
2.54.0
^ permalink raw reply related [flat|nested] 30+ messages in thread* [PATCH 12/28] perf cpumap: Reject RANGE_CPUS with start_cpu > end_cpu
2026-05-10 3:33 [PATCH 00/28] perf: Harden perf.data parsing against crafted/corrupted files Arnaldo Carvalho de Melo
` (10 preceding siblings ...)
2026-05-10 3:34 ` [PATCH 11/28] perf header: Byte-swap build ID event pid and bounds check section entries Arnaldo Carvalho de Melo
@ 2026-05-10 3:34 ` Arnaldo Carvalho de Melo
2026-05-10 3:34 ` [PATCH 13/28] perf auxtrace: Harden auxtrace_error event handling Arnaldo Carvalho de Melo
` (15 subsequent siblings)
27 siblings, 0 replies; 30+ messages in thread
From: Arnaldo Carvalho de Melo @ 2026-05-10 3:34 UTC (permalink / raw)
To: Namhyung Kim
Cc: Ingo Molnar, Thomas Gleixner, James Clark, Jiri Olsa, Ian Rogers,
Adrian Hunter, Kan Liang, Clark Williams, linux-kernel,
linux-perf-users, Arnaldo Carvalho de Melo, sashiko-bot,
Claude Opus 4.6 (1M context)
From: Arnaldo Carvalho de Melo <acme@redhat.com>
cpu_map__from_range() computes nr_cpus as end_cpu - start_cpu + 1.
When a crafted perf.data has start_cpu > end_cpu, this wraps to a
huge value, causing perf_cpu_map__empty_new() to attempt a massive
allocation.
Return NULL when the range is inverted.
Also clamp any_cpu to boolean (0 or 1) since it is added to the
allocation count — a crafted value > 1 would inflate the map size.
Harden cpu_map__from_mask() to reject unsupported long_size values
(anything other than 4 or 8), preventing misinterpretation of the
mask data layout.
Reported-by: sashiko-bot@kernel.org # Running on a local machine
Cc: Ian Rogers <irogers@google.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Assisted-by: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
---
tools/perf/util/cpumap.c | 22 +++++++++++++++++++---
1 file changed, 19 insertions(+), 3 deletions(-)
diff --git a/tools/perf/util/cpumap.c b/tools/perf/util/cpumap.c
index 11922e1ded844a03..c32db7b307d7d959 100644
--- a/tools/perf/util/cpumap.c
+++ b/tools/perf/util/cpumap.c
@@ -93,9 +93,18 @@ static struct perf_cpu_map *cpu_map__from_entries(const struct perf_record_cpu_m
static struct perf_cpu_map *cpu_map__from_mask(const struct perf_record_cpu_map_data *data)
{
DECLARE_BITMAP(local_copy, 64);
- int weight = 0, mask_nr = data->mask32_data.nr;
+ int weight = 0, mask_nr;
+ /* Cache validated long_size — data is mmap'd and could change */
+ u16 long_size;
struct perf_cpu_map *map;
+ /* long_size must be 4 or 8; other values overflow cpus_per_i below */
+ if (data->mask32_data.long_size != 4 && data->mask32_data.long_size != 8)
+ return NULL;
+
+ long_size = data->mask32_data.long_size;
+ mask_nr = data->mask32_data.nr;
+
for (int i = 0; i < mask_nr; i++) {
perf_record_cpu_map_data__read_one_mask(data, i, local_copy);
weight += bitmap_weight(local_copy, 64);
@@ -106,11 +115,14 @@ static struct perf_cpu_map *cpu_map__from_mask(const struct perf_record_cpu_map_
return NULL;
for (int i = 0, j = 0; i < mask_nr; i++) {
- int cpus_per_i = (i * data->mask32_data.long_size * BITS_PER_BYTE);
+ int cpus_per_i = (i * long_size * BITS_PER_BYTE);
int cpu;
perf_record_cpu_map_data__read_one_mask(data, i, local_copy);
for_each_set_bit(cpu, local_copy, 64) {
+ /* Guard against more set bits than the first pass counted */
+ if (j >= weight)
+ break;
if (cpu + cpus_per_i < INT16_MAX) {
RC_CHK_ACCESS(map)->map[j++].cpu = cpu + cpus_per_i;
} else {
@@ -129,8 +141,12 @@ static struct perf_cpu_map *cpu_map__from_range(const struct perf_record_cpu_map
struct perf_cpu_map *map;
unsigned int i = 0;
+ if (data->range_cpu_data.end_cpu < data->range_cpu_data.start_cpu)
+ return NULL;
+
+ /* any_cpu is boolean (0 or 1), not a count — clamp to avoid inflated nr */
map = perf_cpu_map__empty_new(data->range_cpu_data.end_cpu -
- data->range_cpu_data.start_cpu + 1 + data->range_cpu_data.any_cpu);
+ data->range_cpu_data.start_cpu + 1 + !!data->range_cpu_data.any_cpu);
if (!map)
return NULL;
--
2.54.0
^ permalink raw reply related [flat|nested] 30+ messages in thread* [PATCH 13/28] perf auxtrace: Harden auxtrace_error event handling
2026-05-10 3:33 [PATCH 00/28] perf: Harden perf.data parsing against crafted/corrupted files Arnaldo Carvalho de Melo
` (11 preceding siblings ...)
2026-05-10 3:34 ` [PATCH 12/28] perf cpumap: Reject RANGE_CPUS with start_cpu > end_cpu Arnaldo Carvalho de Melo
@ 2026-05-10 3:34 ` Arnaldo Carvalho de Melo
2026-05-10 3:34 ` [PATCH 14/28] perf session: Add byte-swap and bounds check for PERF_RECORD_BPF_METADATA events Arnaldo Carvalho de Melo
` (14 subsequent siblings)
27 siblings, 0 replies; 30+ messages in thread
From: Arnaldo Carvalho de Melo @ 2026-05-10 3:34 UTC (permalink / raw)
To: Namhyung Kim
Cc: Ingo Molnar, Thomas Gleixner, James Clark, Jiri Olsa, Ian Rogers,
Adrian Hunter, Kan Liang, Clark Williams, linux-kernel,
linux-perf-users, Arnaldo Carvalho de Melo, sashiko-bot,
Claude Opus 4.6 (1M context)
From: Arnaldo Carvalho de Melo <acme@redhat.com>
Fix four issues in PERF_RECORD_AUXTRACE_ERROR handling:
1. auxtrace_error_name() takes a signed int parameter, but e->type
is __u32. A crafted value like 0xFFFFFFFF converts to -1, passes
the bounds check, and causes a negative array index. Fix by
changing the parameter to unsigned int.
2. The msg field is printed via %s without a length bound. The
min_size table only guarantees fields up to msg (offset 48), so
a truncated event has zero msg bytes within the event boundary.
Compute the available msg length from header.size, cap at
sizeof(e->msg), and use %.*s.
3. fmt >= 2 adds machine_pid and vcpu fields after msg[64]. Older
files may have fmt >= 2 but an event size that doesn't include
these fields. Add a size check in the swap handler to downgrade
fmt before the conditional field access, and a matching size
guard in the fprintf path for native-endian events (which are
mmap'd read-only and can't be modified in place).
4. python_process_auxtrace_error() had the same issues: msg was
passed to tuple_set_string() unbounded, and machine_pid/vcpu
were accessed unconditionally without checking fmt or event
size. Apply the same bounds checks.
Reported-by: sashiko-bot@kernel.org # Running on a local machine
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Ian Rogers <irogers@google.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Assisted-by: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
---
tools/perf/util/auxtrace.c | 24 +++++++++++++---
.../scripting-engines/trace-event-python.c | 28 +++++++++++++++++--
tools/perf/util/session.c | 18 ++++++++++--
3 files changed, 61 insertions(+), 9 deletions(-)
diff --git a/tools/perf/util/auxtrace.c b/tools/perf/util/auxtrace.c
index a224687ffbc1b5be..d9770e1d2f959fc4 100644
--- a/tools/perf/util/auxtrace.c
+++ b/tools/perf/util/auxtrace.c
@@ -1759,7 +1759,7 @@ static const char * const auxtrace_error_type_name[] = {
[PERF_AUXTRACE_ERROR_ITRACE] = "instruction trace",
};
-static const char *auxtrace_error_name(int type)
+static const char *auxtrace_error_name(unsigned int type)
{
const char *error_type_name = NULL;
@@ -1775,6 +1775,7 @@ size_t perf_event__fprintf_auxtrace_error(union perf_event *event, FILE *fp)
struct perf_record_auxtrace_error *e = &event->auxtrace_error;
unsigned long long nsecs = e->time;
const char *msg = e->msg;
+ int msg_max;
int ret;
ret = fprintf(fp, " %s error type %u",
@@ -1792,11 +1793,26 @@ size_t perf_event__fprintf_auxtrace_error(union perf_event *event, FILE *fp)
if (!e->fmt)
msg = (const char *)&e->time;
- if (e->fmt >= 2 && e->machine_pid)
+ /* Bound msg to the bytes actually within the event, capped at the array size */
+ msg_max = (int)((void *)event + event->header.size - (void *)msg);
+ if (msg_max < 0)
+ msg_max = 0;
+ if (msg_max > (int)sizeof(e->msg))
+ msg_max = sizeof(e->msg);
+
+ /*
+ * Unlike the swap path which downgrades fmt in place,
+ * native-endian events are mmap'd read-only — check size
+ * instead to avoid accessing machine_pid/vcpu OOB.
+ */
+ if (e->fmt >= 2 &&
+ event->header.size >= offsetof(typeof(event->auxtrace_error), vcpu) +
+ sizeof(event->auxtrace_error.vcpu) &&
+ e->machine_pid)
ret += fprintf(fp, " machine_pid %d vcpu %d", e->machine_pid, e->vcpu);
- ret += fprintf(fp, " cpu %d pid %d tid %d ip %#"PRI_lx64" code %u: %s\n",
- e->cpu, e->pid, e->tid, e->ip, e->code, msg);
+ ret += fprintf(fp, " cpu %d pid %d tid %d ip %#"PRI_lx64" code %u: %.*s\n",
+ e->cpu, e->pid, e->tid, e->ip, e->code, msg_max, msg);
return ret;
}
diff --git a/tools/perf/util/scripting-engines/trace-event-python.c b/tools/perf/util/scripting-engines/trace-event-python.c
index 5a30caaec73ef06b..5b8f629fd54cbe49 100644
--- a/tools/perf/util/scripting-engines/trace-event-python.c
+++ b/tools/perf/util/scripting-engines/trace-event-python.c
@@ -1614,6 +1614,9 @@ static void python_process_auxtrace_error(struct perf_session *session __maybe_u
const char *handler_name = "auxtrace_error";
unsigned long long tm = e->time;
const char *msg = e->msg;
+ s32 machine_pid = 0, vcpu = 0;
+ char msg_buf[MAX_AUXTRACE_ERROR_MSG + 1];
+ int msg_max;
PyObject *handler, *t;
handler = get_handler(handler_name);
@@ -1625,6 +1628,25 @@ static void python_process_auxtrace_error(struct perf_session *session __maybe_u
msg = (const char *)&e->time;
}
+ /* Bound msg to the bytes within the event, ensure NUL-termination */
+ msg_max = (int)((void *)event + event->header.size - (void *)msg);
+ if (msg_max <= 0) {
+ msg_buf[0] = '\0';
+ } else {
+ if (msg_max > (int)sizeof(msg_buf) - 1)
+ msg_max = sizeof(msg_buf) - 1;
+ memcpy(msg_buf, msg, msg_max);
+ msg_buf[msg_max] = '\0';
+ }
+
+ /* Only access fmt >= 2 fields if the event is large enough */
+ if (e->fmt >= 2 &&
+ event->header.size >= offsetof(typeof(event->auxtrace_error), vcpu) +
+ sizeof(event->auxtrace_error.vcpu)) {
+ machine_pid = e->machine_pid;
+ vcpu = e->vcpu;
+ }
+
t = tuple_new(11);
tuple_set_u32(t, 0, e->type);
@@ -1634,10 +1656,10 @@ static void python_process_auxtrace_error(struct perf_session *session __maybe_u
tuple_set_s32(t, 4, e->tid);
tuple_set_u64(t, 5, e->ip);
tuple_set_u64(t, 6, tm);
- tuple_set_string(t, 7, msg);
+ tuple_set_string(t, 7, msg_buf);
tuple_set_u32(t, 8, cpumode);
- tuple_set_s32(t, 9, e->machine_pid);
- tuple_set_s32(t, 10, e->vcpu);
+ tuple_set_s32(t, 9, machine_pid);
+ tuple_set_s32(t, 10, vcpu);
call_object(handler, t, handler_name);
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
index c23899c42ef7af34..a2dba77c6a2b9d2f 100644
--- a/tools/perf/util/session.c
+++ b/tools/perf/util/session.c
@@ -747,8 +747,22 @@ static int perf_event__auxtrace_error_swap(union perf_event *event,
if (event->auxtrace_error.fmt)
event->auxtrace_error.time = bswap_64(event->auxtrace_error.time);
if (event->auxtrace_error.fmt >= 2) {
- event->auxtrace_error.machine_pid = bswap_32(event->auxtrace_error.machine_pid);
- event->auxtrace_error.vcpu = bswap_32(event->auxtrace_error.vcpu);
+ /*
+ * fmt >= 2 adds machine_pid and vcpu after msg[64].
+ * Older files may have fmt >= 2 but an event size
+ * that doesn't include these fields — downgrade to
+ * avoid swapping out of bounds.
+ */
+ if (event->header.size < offsetof(typeof(event->auxtrace_error), vcpu) +
+ sizeof(event->auxtrace_error.vcpu)) {
+ pr_warning("WARNING: PERF_RECORD_AUXTRACE_ERROR: fmt %u but event too small for machine_pid/vcpu (%u bytes), downgrading fmt\n",
+ event->auxtrace_error.fmt,
+ event->header.size);
+ event->auxtrace_error.fmt = 1;
+ } else {
+ event->auxtrace_error.machine_pid = bswap_32(event->auxtrace_error.machine_pid);
+ event->auxtrace_error.vcpu = bswap_32(event->auxtrace_error.vcpu);
+ }
}
return 0;
}
--
2.54.0
^ permalink raw reply related [flat|nested] 30+ messages in thread* [PATCH 14/28] perf session: Add byte-swap and bounds check for PERF_RECORD_BPF_METADATA events
2026-05-10 3:33 [PATCH 00/28] perf: Harden perf.data parsing against crafted/corrupted files Arnaldo Carvalho de Melo
` (12 preceding siblings ...)
2026-05-10 3:34 ` [PATCH 13/28] perf auxtrace: Harden auxtrace_error event handling Arnaldo Carvalho de Melo
@ 2026-05-10 3:34 ` Arnaldo Carvalho de Melo
2026-05-10 3:34 ` [PATCH 15/28] perf header: Validate null-termination in PERF_RECORD_EVENT_UPDATE string fields Arnaldo Carvalho de Melo
` (13 subsequent siblings)
27 siblings, 0 replies; 30+ messages in thread
From: Arnaldo Carvalho de Melo @ 2026-05-10 3:34 UTC (permalink / raw)
To: Namhyung Kim
Cc: Ingo Molnar, Thomas Gleixner, James Clark, Jiri Olsa, Ian Rogers,
Adrian Hunter, Kan Liang, Clark Williams, linux-kernel,
linux-perf-users, Arnaldo Carvalho de Melo, sashiko-bot,
Blake Jones, Claude Opus 4.6 (1M context)
From: Arnaldo Carvalho de Melo <acme@redhat.com>
PERF_RECORD_BPF_METADATA has no entry in perf_event__swap_ops[],
so its nr_entries field is never byte-swapped when reading a
cross-endian perf.data file. Downstream processing in
perf_event__fprintf_bpf_metadata() loops over nr_entries, so a
foreign-endian value causes out-of-bounds reads.
Add a swap handler that byte-swaps nr_entries after validating
that header.size is large enough. The entries[] array contains
only char arrays (key/value strings), so no per-entry swap is
needed — but ensure NUL-termination on the writable cross-endian
path.
Validate header.size, nr_entries, and string NUL-termination in
the common event delivery path so that native-endian files with
malicious values are also rejected.
Fixes: ab38e84ba9a8 ("perf record: collect BPF metadata from existing BPF programs")
Reported-by: sashiko-bot@kernel.org # Running on a local machine
Cc: Blake Jones <blakejones@google.com>
Cc: Ian Rogers <irogers@google.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Assisted-by: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
---
tools/perf/util/session.c | 83 ++++++++++++++++++++++++++++++++++++++-
1 file changed, 82 insertions(+), 1 deletion(-)
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
index a2dba77c6a2b9d2f..876e20c4ba8a7808 100644
--- a/tools/perf/util/session.c
+++ b/tools/perf/util/session.c
@@ -942,6 +942,45 @@ static int perf_event__time_conv_swap(union perf_event *event,
return 0;
}
+static int perf_event__bpf_metadata_swap(union perf_event *event,
+ bool sample_id_all __maybe_unused)
+{
+ u64 i, nr, max_nr;
+
+ /* Fixed header must fit before accessing nr_entries or prog_name */
+ if (event->header.size < sizeof(event->bpf_metadata))
+ return -1;
+
+ event->bpf_metadata.nr_entries = bswap_64(event->bpf_metadata.nr_entries);
+
+ /*
+ * Ensure NUL-termination on the cross-endian path where the
+ * mapping is writable (MAP_PRIVATE + PROT_WRITE). Fixing
+ * the string in place is preferred over rejecting because it
+ * preserves the event for downstream processing — only the
+ * last byte is lost.
+ *
+ * The native-endian path (MAP_SHARED + PROT_READ) cannot
+ * write, so it validates and skips unterminated events in
+ * perf_session__process_user_event() instead. The two
+ * strategies produce different outcomes for the same
+ * malformed input (fix vs skip), which is inherent in the
+ * writable-vs-read-only mapping model.
+ */
+ event->bpf_metadata.prog_name[BPF_PROG_NAME_LEN - 1] = '\0';
+
+ nr = event->bpf_metadata.nr_entries;
+ max_nr = (event->header.size - sizeof(event->bpf_metadata)) /
+ sizeof(event->bpf_metadata.entries[0]);
+ if (nr > max_nr)
+ nr = max_nr;
+
+ for (i = 0; i < nr; i++) {
+ event->bpf_metadata.entries[i].key[BPF_METADATA_KEY_LEN - 1] = '\0';
+ event->bpf_metadata.entries[i].value[BPF_METADATA_VALUE_LEN - 1] = '\0';
+ }
+ return 0;
+}
static int
perf_event__schedstat_cpu_swap(union perf_event *event __maybe_unused,
bool sample_id_all __maybe_unused)
@@ -1041,6 +1080,7 @@ static perf_event__swap_op perf_event__swap_ops[] = {
[PERF_RECORD_STAT_ROUND] = perf_event__stat_round_swap,
[PERF_RECORD_EVENT_UPDATE] = perf_event__event_update_swap,
[PERF_RECORD_TIME_CONV] = perf_event__time_conv_swap,
+ [PERF_RECORD_BPF_METADATA] = perf_event__bpf_metadata_swap,
[PERF_RECORD_SCHEDSTAT_CPU] = perf_event__schedstat_cpu_swap,
[PERF_RECORD_SCHEDSTAT_DOMAIN] = perf_event__schedstat_domain_swap,
[PERF_RECORD_HEADER_MAX] = NULL,
@@ -2181,9 +2221,50 @@ static s64 perf_session__process_user_event(struct perf_session *session,
case PERF_RECORD_FINISHED_INIT:
err = tool->finished_init(tool, session, event);
break;
- case PERF_RECORD_BPF_METADATA:
+ case PERF_RECORD_BPF_METADATA: {
+ u64 max_entries;
+
+ if (event->header.size < sizeof(event->bpf_metadata)) {
+ pr_warning("WARNING: PERF_RECORD_BPF_METADATA: header.size (%u) too small, skipping\n",
+ event->header.size);
+ err = 0;
+ break;
+ }
+
+ /*
+ * Native-endian files are mmap'd read-only — validate
+ * NUL-termination instead of writing.
+ */
+ if (strnlen(event->bpf_metadata.prog_name,
+ BPF_PROG_NAME_LEN) == BPF_PROG_NAME_LEN) {
+ pr_warning("WARNING: PERF_RECORD_BPF_METADATA: prog_name not null-terminated, skipping\n");
+ err = 0;
+ break;
+ }
+
+ max_entries = (event->header.size - sizeof(event->bpf_metadata)) /
+ sizeof(event->bpf_metadata.entries[0]);
+ if (event->bpf_metadata.nr_entries > max_entries) {
+ pr_warning("WARNING: PERF_RECORD_BPF_METADATA: nr_entries %" PRIu64 " exceeds max %" PRIu64 ", skipping\n",
+ (u64)event->bpf_metadata.nr_entries, max_entries);
+ err = 0;
+ break;
+ }
+
+ for (u64 i = 0; i < event->bpf_metadata.nr_entries; i++) {
+ if (strnlen(event->bpf_metadata.entries[i].key,
+ BPF_METADATA_KEY_LEN) == BPF_METADATA_KEY_LEN ||
+ strnlen(event->bpf_metadata.entries[i].value,
+ BPF_METADATA_VALUE_LEN) == BPF_METADATA_VALUE_LEN) {
+ pr_warning("WARNING: PERF_RECORD_BPF_METADATA: entry %" PRIu64 " key/value not null-terminated, skipping\n", i);
+ err = 0;
+ goto out;
+ }
+ }
+
err = tool->bpf_metadata(tool, session, event);
break;
+ }
case PERF_RECORD_SCHEDSTAT_CPU:
err = tool->schedstat_cpu(tool, session, event);
break;
--
2.54.0
^ permalink raw reply related [flat|nested] 30+ messages in thread* [PATCH 15/28] perf header: Validate null-termination in PERF_RECORD_EVENT_UPDATE string fields
2026-05-10 3:33 [PATCH 00/28] perf: Harden perf.data parsing against crafted/corrupted files Arnaldo Carvalho de Melo
` (13 preceding siblings ...)
2026-05-10 3:34 ` [PATCH 14/28] perf session: Add byte-swap and bounds check for PERF_RECORD_BPF_METADATA events Arnaldo Carvalho de Melo
@ 2026-05-10 3:34 ` Arnaldo Carvalho de Melo
2026-05-10 3:34 ` [PATCH 16/28] perf tools: Bounds check perf_event_attr fields against attr.size before printing Arnaldo Carvalho de Melo
` (12 subsequent siblings)
27 siblings, 0 replies; 30+ messages in thread
From: Arnaldo Carvalho de Melo @ 2026-05-10 3:34 UTC (permalink / raw)
To: Namhyung Kim
Cc: Ingo Molnar, Thomas Gleixner, James Clark, Jiri Olsa, Ian Rogers,
Adrian Hunter, Kan Liang, Clark Williams, linux-kernel,
linux-perf-users, Arnaldo Carvalho de Melo, sashiko-bot,
Claude Opus 4.6 (1M context)
From: Arnaldo Carvalho de Melo <acme@redhat.com>
strdup(ev->unit) and strdup(ev->name) read until '\0' with no
guarantee the string is null-terminated within event->header.size.
The dump_trace fprintf path has the same problem with %s.
Validate before either path runs — same class of bug fixed for
MMAP/MMAP2/COMM/CGROUP by perf_event__check_nul().
Also harden the event_update swap handler to:
- Validate SCALE event size before swapping the double at
offset 24, which exceeds the 24-byte min_size.
- Validate CPUS event size before accessing the cpu_map
type/nr/long_size fields, which also start at the min_size
boundary.
- Swap CPUS variant fields (type, nr, long_size) so the
processing path sees native byte order.
Add validation in perf_event__process_event_update() for all
event update variants (UNIT, NAME, SCALE, CPUS) before
dump_trace or processing.
Fix a missing break before the default case in the CPUS
switch path.
Reported-by: sashiko-bot@kernel.org # Running on a local machine
Cc: Ian Rogers <irogers@google.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Assisted-by: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
---
| 126 +++++++++++++++++++++++++++++++++++---
tools/perf/util/session.c | 99 +++++++++++++++++++++++++++++-
2 files changed, 216 insertions(+), 9 deletions(-)
--git a/tools/perf/util/header.c b/tools/perf/util/header.c
index f2198ab0defd5804..d253063b581f21e9 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -5117,24 +5117,65 @@ size_t perf_event__fprintf_event_update(union perf_event *event, FILE *fp)
switch (ev->type) {
case PERF_EVENT_UPDATE__SCALE:
+ if (event->header.size < offsetof(struct perf_record_event_update, scale) +
+ sizeof(ev->scale)) {
+ ret += fprintf(fp, "... scale: (truncated)\n");
+ break;
+ }
ret += fprintf(fp, "... scale: %f\n", ev->scale.scale);
break;
case PERF_EVENT_UPDATE__UNIT:
- ret += fprintf(fp, "... unit: %s\n", ev->unit);
- break;
- case PERF_EVENT_UPDATE__NAME:
- ret += fprintf(fp, "... name: %s\n", ev->name);
+ case PERF_EVENT_UPDATE__NAME: {
+ size_t str_off = offsetof(struct perf_record_event_update, unit);
+ size_t max_len = event->header.size > str_off ?
+ event->header.size - str_off : 0;
+
+ if (max_len == 0 || strnlen(ev->unit, max_len) == max_len) {
+ ret += fprintf(fp, "... %s: (unterminated)\n",
+ ev->type == PERF_EVENT_UPDATE__UNIT ? "unit" : "name");
+ break;
+ }
+ ret += fprintf(fp, "... %s: %s\n",
+ ev->type == PERF_EVENT_UPDATE__UNIT ? "unit" : "name",
+ ev->unit);
break;
- case PERF_EVENT_UPDATE__CPUS:
+ }
+ case PERF_EVENT_UPDATE__CPUS: {
+ size_t cpus_off = offsetof(struct perf_record_event_update, cpus);
+ u32 cpus_payload;
+
+ if (event->header.size < cpus_off + sizeof(__u16) +
+ sizeof(struct perf_record_range_cpu_map)) {
+ ret += fprintf(fp, "... cpus: (truncated)\n");
+ break;
+ }
+
+ /*
+ * Validate nr against payload — this function may be
+ * called from the stub handler (dump_trace path) which
+ * bypasses perf_event__process_event_update() validation.
+ */
+ cpus_payload = event->header.size - cpus_off;
+ if (ev->cpus.cpus.type == PERF_CPU_MAP__CPUS &&
+ ev->cpus.cpus.cpus_data.nr >
+ (cpus_payload - offsetof(struct perf_record_cpu_map_data, cpus_data.cpu)) /
+ sizeof(ev->cpus.cpus.cpus_data.cpu[0])) {
+ ret += fprintf(fp, "... cpus: nr %u exceeds payload\n",
+ ev->cpus.cpus.cpus_data.nr);
+ break;
+ }
+
ret += fprintf(fp, "... ");
map = cpu_map__new_data(&ev->cpus.cpus);
if (map) {
ret += cpu_map__fprintf(map, fp);
perf_cpu_map__put(map);
- } else
+ } else {
ret += fprintf(fp, "failed to get cpus\n");
+ }
break;
+ }
default:
ret += fprintf(fp, "... unknown type\n");
break;
@@ -5267,6 +5308,75 @@ int perf_event__process_event_update(const struct perf_tool *tool __maybe_unused
struct evsel *evsel;
struct perf_cpu_map *map;
+ /*
+ * Validate payload before dump_trace or processing — both
+ * paths access variant-specific fields without further checks.
+ */
+ if (ev->type == PERF_EVENT_UPDATE__UNIT ||
+ ev->type == PERF_EVENT_UPDATE__NAME) {
+ size_t str_off = offsetof(struct perf_record_event_update, unit);
+ size_t max_len = event->header.size - str_off;
+
+ if (max_len == 0 || strnlen(ev->unit, max_len) == max_len) {
+ pr_warning("WARNING: PERF_RECORD_EVENT_UPDATE: %s not null-terminated, skipping\n",
+ ev->type == PERF_EVENT_UPDATE__UNIT ? "unit" : "name");
+ return 0;
+ }
+ } else if (ev->type == PERF_EVENT_UPDATE__SCALE) {
+ if (event->header.size < offsetof(struct perf_record_event_update, scale) +
+ sizeof(ev->scale)) {
+ pr_warning("WARNING: PERF_RECORD_EVENT_UPDATE: SCALE payload too small, skipping\n");
+ return 0;
+ }
+ } else if (ev->type == PERF_EVENT_UPDATE__CPUS) {
+ size_t cpus_off = offsetof(struct perf_record_event_update, cpus);
+ size_t min_cpus = sizeof(__u16) +
+ sizeof(struct perf_record_range_cpu_map);
+ u32 cpus_payload;
+
+ if (event->header.size < cpus_off + min_cpus) {
+ pr_warning("WARNING: PERF_RECORD_EVENT_UPDATE: CPUS payload too small, skipping\n");
+ return 0;
+ }
+
+ /*
+ * Validate per-variant nr against the remaining
+ * payload on the native path — the swap path clamps
+ * nr in perf_event__event_update_swap(), but native
+ * events are read-only and cannot be clamped in place.
+ * cpu_map__new_data() trusts nr for allocation and
+ * iteration, so unchecked values cause OOB reads.
+ */
+ cpus_payload = event->header.size - cpus_off;
+ switch (ev->cpus.cpus.type) {
+ case PERF_CPU_MAP__CPUS:
+ if (ev->cpus.cpus.cpus_data.nr >
+ (cpus_payload - offsetof(struct perf_record_cpu_map_data, cpus_data.cpu)) /
+ sizeof(ev->cpus.cpus.cpus_data.cpu[0])) {
+ pr_warning("WARNING: EVENT_UPDATE CPUS: nr %u exceeds payload, skipping\n",
+ ev->cpus.cpus.cpus_data.nr);
+ return 0;
+ }
+ break;
+ case PERF_CPU_MAP__MASK:
+ if (ev->cpus.cpus.mask32_data.long_size == 4) {
+ if (ev->cpus.cpus.mask32_data.nr >
+ (cpus_payload - offsetof(struct perf_record_cpu_map_data, mask32_data.mask)) /
+ sizeof(ev->cpus.cpus.mask32_data.mask[0]))
+ return 0;
+ } else if (ev->cpus.cpus.mask64_data.long_size == 8) {
+ if (cpus_payload < offsetof(struct perf_record_cpu_map_data, mask64_data.mask) ||
+ ev->cpus.cpus.mask64_data.nr >
+ (cpus_payload - offsetof(struct perf_record_cpu_map_data, mask64_data.mask)) /
+ sizeof(ev->cpus.cpus.mask64_data.mask[0]))
+ return 0;
+ }
+ break;
+ default:
+ break;
+ }
+ }
+
if (dump_trace)
perf_event__fprintf_event_update(event, stdout);
@@ -5296,8 +5406,10 @@ int perf_event__process_event_update(const struct perf_tool *tool __maybe_unused
if (map) {
perf_cpu_map__put(evsel->core.pmu_cpus);
evsel->core.pmu_cpus = map;
- } else
+ } else {
pr_err("failed to get event_update cpus\n");
+ }
+ break;
default:
break;
}
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
index 876e20c4ba8a7808..85591ccdc2e8ada3 100644
--- a/tools/perf/util/session.c
+++ b/tools/perf/util/session.c
@@ -687,8 +687,103 @@ static int perf_event__build_id_swap(union perf_event *event,
static int perf_event__event_update_swap(union perf_event *event,
bool sample_id_all __maybe_unused)
{
- event->event_update.type = bswap_64(event->event_update.type);
- event->event_update.id = bswap_64(event->event_update.id);
+ struct perf_record_event_update *ev = &event->event_update;
+
+ ev->type = bswap_64(ev->type);
+ ev->id = bswap_64(ev->id);
+
+ /*
+ * Swap variant-specific fields so the processing path
+ * sees native byte order.
+ */
+ if (ev->type == PERF_EVENT_UPDATE__SCALE) {
+ if (event->header.size < offsetof(struct perf_record_event_update, scale) +
+ sizeof(ev->scale))
+ return -1;
+ mem_bswap_64(&ev->scale.scale, sizeof(ev->scale.scale));
+ } else if (ev->type == PERF_EVENT_UPDATE__CPUS) {
+ u32 cpus_payload;
+ struct perf_record_cpu_map_data *data = &ev->cpus.cpus;
+
+ /* CPUS fields start at the same offset as scale (union) */
+ if (event->header.size < offsetof(struct perf_record_event_update, cpus) +
+ sizeof(__u16) + sizeof(struct perf_record_range_cpu_map))
+ return -1;
+ cpus_payload = event->header.size - offsetof(struct perf_record_event_update, cpus);
+ data->type = bswap_16(data->type);
+ /*
+ * Full swap including array elements — same logic as
+ * perf_event__cpu_map_swap() but scoped to the
+ * embedded cpu_map_data within EVENT_UPDATE.
+ */
+ switch (data->type) {
+ case PERF_CPU_MAP__CPUS: {
+ u16 nr, max_nr;
+
+ data->cpus_data.nr = bswap_16(data->cpus_data.nr);
+ nr = data->cpus_data.nr;
+ max_nr = (cpus_payload - offsetof(struct perf_record_cpu_map_data,
+ cpus_data.cpu)) /
+ sizeof(data->cpus_data.cpu[0]);
+ if (nr > max_nr) {
+ nr = max_nr;
+ data->cpus_data.nr = nr;
+ }
+ for (unsigned int i = 0; i < nr; i++)
+ data->cpus_data.cpu[i] = bswap_16(data->cpus_data.cpu[i]);
+ break;
+ }
+ case PERF_CPU_MAP__MASK:
+ data->mask32_data.long_size = bswap_16(data->mask32_data.long_size);
+ switch (data->mask32_data.long_size) {
+ case 4: {
+ u16 nr, max_nr;
+
+ data->mask32_data.nr = bswap_16(data->mask32_data.nr);
+ nr = data->mask32_data.nr;
+ max_nr = (cpus_payload - offsetof(struct perf_record_cpu_map_data,
+ mask32_data.mask)) /
+ sizeof(data->mask32_data.mask[0]);
+ if (nr > max_nr) {
+ nr = max_nr;
+ data->mask32_data.nr = nr;
+ }
+ for (unsigned int i = 0; i < nr; i++)
+ data->mask32_data.mask[i] = bswap_32(data->mask32_data.mask[i]);
+ break;
+ }
+ case 8: {
+ u16 nr, max_nr;
+
+ data->mask64_data.nr = bswap_16(data->mask64_data.nr);
+ nr = data->mask64_data.nr;
+ if (cpus_payload < offsetof(struct perf_record_cpu_map_data, mask64_data.mask)) {
+ data->mask64_data.nr = 0;
+ break;
+ }
+ max_nr = (cpus_payload - offsetof(struct perf_record_cpu_map_data,
+ mask64_data.mask)) /
+ sizeof(data->mask64_data.mask[0]);
+ if (nr > max_nr) {
+ nr = max_nr;
+ data->mask64_data.nr = nr;
+ }
+ for (unsigned int i = 0; i < nr; i++)
+ data->mask64_data.mask[i] = bswap_64(data->mask64_data.mask[i]);
+ break;
+ }
+ default:
+ break;
+ }
+ break;
+ case PERF_CPU_MAP__RANGE_CPUS:
+ data->range_cpu_data.start_cpu = bswap_16(data->range_cpu_data.start_cpu);
+ data->range_cpu_data.end_cpu = bswap_16(data->range_cpu_data.end_cpu);
+ break;
+ default:
+ break;
+ }
+ }
return 0;
}
--
2.54.0
^ permalink raw reply related [flat|nested] 30+ messages in thread* [PATCH 16/28] perf tools: Bounds check perf_event_attr fields against attr.size before printing
2026-05-10 3:33 [PATCH 00/28] perf: Harden perf.data parsing against crafted/corrupted files Arnaldo Carvalho de Melo
` (14 preceding siblings ...)
2026-05-10 3:34 ` [PATCH 15/28] perf header: Validate null-termination in PERF_RECORD_EVENT_UPDATE string fields Arnaldo Carvalho de Melo
@ 2026-05-10 3:34 ` Arnaldo Carvalho de Melo
2026-05-10 3:34 ` [PATCH 17/28] perf header: Propagate feature section processing errors Arnaldo Carvalho de Melo
` (11 subsequent siblings)
27 siblings, 0 replies; 30+ messages in thread
From: Arnaldo Carvalho de Melo @ 2026-05-10 3:34 UTC (permalink / raw)
To: Namhyung Kim
Cc: Ingo Molnar, Thomas Gleixner, James Clark, Jiri Olsa, Ian Rogers,
Adrian Hunter, Kan Liang, Clark Williams, linux-kernel,
linux-perf-users, Arnaldo Carvalho de Melo, sashiko-bot,
Claude Opus 4.6 (1M context)
From: Arnaldo Carvalho de Melo <acme@redhat.com>
perf_event_attr__fprintf() accessed all struct fields unconditionally,
but attrs from older perf.data files or BPF-captured syscall payloads
may have a smaller size than the current struct. Fields beyond the
recorded size contain uninitialized or zero-filled data.
Add size-guarded macros (PRINT_ATTRn, PRINT_ATTRn_bf) that compare
each field's offset against attr->size before accessing it.
Guard the bitfield block (disabled, inherit, ... defer_output) with
attr_size >= 48. These bitfields share a single __u64 at offset 40,
which is within PERF_ATTR_SIZE_VER0 for validated perf.data attrs,
but BPF-captured attrs from perf trace can have a smaller size when
the tracee passes a minimal struct to sys_perf_event_open.
Also fix the BPF trace path: when perf trace intercepts
sys_perf_event_open via BPF, the program copies PERF_ATTR_SIZE_VER0
bytes when the tracee passes size=0, but leaves the size field as 0.
Set attr->size to PERF_ATTR_SIZE_VER0 in the augmented syscall
handler so the bounds checks match the actual copied size.
Reported-by: sashiko-bot@kernel.org # Running on a local machine
Assisted-by: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
---
tools/perf/trace/beauty/perf_event_open.c | 19 ++-
tools/perf/util/perf_event_attr_fprintf.c | 140 ++++++++++++++--------
2 files changed, 109 insertions(+), 50 deletions(-)
diff --git a/tools/perf/trace/beauty/perf_event_open.c b/tools/perf/trace/beauty/perf_event_open.c
index 9f1ed989c7751ec5..fa4578e8389664e9 100644
--- a/tools/perf/trace/beauty/perf_event_open.c
+++ b/tools/perf/trace/beauty/perf_event_open.c
@@ -76,7 +76,24 @@ static size_t perf_event_attr___scnprintf(struct perf_event_attr *attr, char *bf
static size_t syscall_arg__scnprintf_augmented_perf_event_attr(struct syscall_arg *arg, char *bf, size_t size)
{
- return perf_event_attr___scnprintf((void *)arg->augmented.args->value, bf, size, arg->trace->show_zeros);
+ struct perf_event_attr *attr = (void *)arg->augmented.args->value;
+ struct perf_event_attr local_attr;
+
+ /*
+ * The BPF program copies PERF_ATTR_SIZE_VER0 bytes when the
+ * tracee passes size=0, but leaves the size field as 0.
+ * Copy to a local so we can fix up size without writing to
+ * the potentially read-only augmented args buffer.
+ */
+ if (!attr->size) {
+ memcpy(&local_attr, attr, PERF_ATTR_SIZE_VER0);
+ memset((void *)&local_attr + PERF_ATTR_SIZE_VER0, 0,
+ sizeof(local_attr) - PERF_ATTR_SIZE_VER0);
+ local_attr.size = PERF_ATTR_SIZE_VER0;
+ attr = &local_attr;
+ }
+
+ return perf_event_attr___scnprintf(attr, bf, size, arg->trace->show_zeros);
}
static size_t syscall_arg__scnprintf_perf_event_attr(char *bf, size_t size, struct syscall_arg *arg)
diff --git a/tools/perf/util/perf_event_attr_fprintf.c b/tools/perf/util/perf_event_attr_fprintf.c
index 741c3d657a8b6ae7..e7ee87685d635dd7 100644
--- a/tools/perf/util/perf_event_attr_fprintf.c
+++ b/tools/perf/util/perf_event_attr_fprintf.c
@@ -275,24 +275,55 @@ static void __p_config_id(struct perf_pmu *pmu, char *buf, size_t size, u32 type
#define p_type_id(val) __p_type_id(buf, BUF_SIZE, pmu, val)
#define p_config_id(val) __p_config_id(pmu, buf, BUF_SIZE, attr->type, val)
-#define PRINT_ATTRn(_n, _f, _p, _a) \
-do { \
- if (_a || attr->_f) { \
- _p(attr->_f); \
- ret += attr__fprintf(fp, _n, buf, priv);\
- } \
+#define PRINT_ATTRn(_n, _f, _p, _a) \
+do { \
+ if (attr_size >= offsetof(struct perf_event_attr, _f) + \
+ sizeof(attr->_f) && \
+ (_a || attr->_f)) { \
+ _p(attr->_f); \
+ ret += attr__fprintf(fp, _n, buf, priv); \
+ } \
+} while (0)
+
+/* bitfield members share an offset; most are within PERF_ATTR_SIZE_VER0 */
+#define PRINT_ATTRn_bf(_n, _f, _p, _a) \
+do { \
+ if (_a || attr->_f) { \
+ _p(attr->_f); \
+ ret += attr__fprintf(fp, _n, buf, priv); \
+ } \
} while (0)
#define PRINT_ATTRf(_f, _p) PRINT_ATTRn(#_f, _f, _p, false)
+#define PRINT_ATTRf_bf(_f, _p) PRINT_ATTRn_bf(#_f, _f, _p, false)
int perf_event_attr__fprintf(FILE *fp, struct perf_event_attr *attr,
attr__fprintf_f attr__fprintf, void *priv)
{
struct perf_pmu *pmu = perf_pmus__find_by_type(attr->type);
+ /*
+ * size == 0 means the caller didn't set it; a full struct
+ * is always in memory here. Attrs from perf.data already
+ * had size validated (>= PERF_ATTR_SIZE_VER0), so they
+ * never arrive with size == 0.
+ */
+ u32 attr_size = attr->size ?: sizeof(*attr);
char buf[BUF_SIZE];
int ret = 0;
- if (!pmu && (attr->type == PERF_TYPE_HARDWARE || attr->type == PERF_TYPE_HW_CACHE)) {
+ /*
+ * Cap to what we understand: all callers store the attr in a
+ * buffer of sizeof(*attr) bytes (perf.data read path copies
+ * min(attr.size, sizeof), BPF augmented path copies into a
+ * fixed-size value[] array). A spoofed attr->size larger
+ * than sizeof would cause PRINT_ATTRn to read past the
+ * actual buffer.
+ */
+ if (attr_size > sizeof(*attr))
+ attr_size = sizeof(*attr);
+
+ if (!pmu && attr_size >= offsetof(struct perf_event_attr, config) + sizeof(attr->config) &&
+ (attr->type == PERF_TYPE_HARDWARE || attr->type == PERF_TYPE_HW_CACHE)) {
u32 extended_type = attr->config >> PERF_PMU_TYPE_SHIFT;
if (extended_type)
@@ -306,45 +337,53 @@ int perf_event_attr__fprintf(FILE *fp, struct perf_event_attr *attr,
PRINT_ATTRf(sample_type, p_sample_type);
PRINT_ATTRf(read_format, p_read_format);
- PRINT_ATTRf(disabled, p_unsigned);
- PRINT_ATTRf(inherit, p_unsigned);
- PRINT_ATTRf(pinned, p_unsigned);
- PRINT_ATTRf(exclusive, p_unsigned);
- PRINT_ATTRf(exclude_user, p_unsigned);
- PRINT_ATTRf(exclude_kernel, p_unsigned);
- PRINT_ATTRf(exclude_hv, p_unsigned);
- PRINT_ATTRf(exclude_idle, p_unsigned);
- PRINT_ATTRf(mmap, p_unsigned);
- PRINT_ATTRf(comm, p_unsigned);
- PRINT_ATTRf(freq, p_unsigned);
- PRINT_ATTRf(inherit_stat, p_unsigned);
- PRINT_ATTRf(enable_on_exec, p_unsigned);
- PRINT_ATTRf(task, p_unsigned);
- PRINT_ATTRf(watermark, p_unsigned);
- PRINT_ATTRf(precise_ip, p_unsigned);
- PRINT_ATTRf(mmap_data, p_unsigned);
- PRINT_ATTRf(sample_id_all, p_unsigned);
- PRINT_ATTRf(exclude_host, p_unsigned);
- PRINT_ATTRf(exclude_guest, p_unsigned);
- PRINT_ATTRf(exclude_callchain_kernel, p_unsigned);
- PRINT_ATTRf(exclude_callchain_user, p_unsigned);
- PRINT_ATTRf(mmap2, p_unsigned);
- PRINT_ATTRf(comm_exec, p_unsigned);
- PRINT_ATTRf(use_clockid, p_unsigned);
- PRINT_ATTRf(context_switch, p_unsigned);
- PRINT_ATTRf(write_backward, p_unsigned);
- PRINT_ATTRf(namespaces, p_unsigned);
- PRINT_ATTRf(ksymbol, p_unsigned);
- PRINT_ATTRf(bpf_event, p_unsigned);
- PRINT_ATTRf(aux_output, p_unsigned);
- PRINT_ATTRf(cgroup, p_unsigned);
- PRINT_ATTRf(text_poke, p_unsigned);
- PRINT_ATTRf(build_id, p_unsigned);
- PRINT_ATTRf(inherit_thread, p_unsigned);
- PRINT_ATTRf(remove_on_exec, p_unsigned);
- PRINT_ATTRf(sigtrap, p_unsigned);
- PRINT_ATTRf(defer_callchain, p_unsigned);
- PRINT_ATTRf(defer_output, p_unsigned);
+ /*
+ * All bitfields share a single __u64 right after read_format.
+ * BPF-captured attrs from perf trace may have a small size
+ * when the tracee passes a minimal struct, so skip the
+ * entire block when it's not covered.
+ */
+ if (attr_size >= offsetof(struct perf_event_attr, wakeup_events)) {
+ PRINT_ATTRf_bf(disabled, p_unsigned);
+ PRINT_ATTRf_bf(inherit, p_unsigned);
+ PRINT_ATTRf_bf(pinned, p_unsigned);
+ PRINT_ATTRf_bf(exclusive, p_unsigned);
+ PRINT_ATTRf_bf(exclude_user, p_unsigned);
+ PRINT_ATTRf_bf(exclude_kernel, p_unsigned);
+ PRINT_ATTRf_bf(exclude_hv, p_unsigned);
+ PRINT_ATTRf_bf(exclude_idle, p_unsigned);
+ PRINT_ATTRf_bf(mmap, p_unsigned);
+ PRINT_ATTRf_bf(comm, p_unsigned);
+ PRINT_ATTRf_bf(freq, p_unsigned);
+ PRINT_ATTRf_bf(inherit_stat, p_unsigned);
+ PRINT_ATTRf_bf(enable_on_exec, p_unsigned);
+ PRINT_ATTRf_bf(task, p_unsigned);
+ PRINT_ATTRf_bf(watermark, p_unsigned);
+ PRINT_ATTRf_bf(precise_ip, p_unsigned);
+ PRINT_ATTRf_bf(mmap_data, p_unsigned);
+ PRINT_ATTRf_bf(sample_id_all, p_unsigned);
+ PRINT_ATTRf_bf(exclude_host, p_unsigned);
+ PRINT_ATTRf_bf(exclude_guest, p_unsigned);
+ PRINT_ATTRf_bf(exclude_callchain_kernel, p_unsigned);
+ PRINT_ATTRf_bf(exclude_callchain_user, p_unsigned);
+ PRINT_ATTRf_bf(mmap2, p_unsigned);
+ PRINT_ATTRf_bf(comm_exec, p_unsigned);
+ PRINT_ATTRf_bf(use_clockid, p_unsigned);
+ PRINT_ATTRf_bf(context_switch, p_unsigned);
+ PRINT_ATTRf_bf(write_backward, p_unsigned);
+ PRINT_ATTRf_bf(namespaces, p_unsigned);
+ PRINT_ATTRf_bf(ksymbol, p_unsigned);
+ PRINT_ATTRf_bf(bpf_event, p_unsigned);
+ PRINT_ATTRf_bf(aux_output, p_unsigned);
+ PRINT_ATTRf_bf(cgroup, p_unsigned);
+ PRINT_ATTRf_bf(text_poke, p_unsigned);
+ PRINT_ATTRf_bf(build_id, p_unsigned);
+ PRINT_ATTRf_bf(inherit_thread, p_unsigned);
+ PRINT_ATTRf_bf(remove_on_exec, p_unsigned);
+ PRINT_ATTRf_bf(sigtrap, p_unsigned);
+ PRINT_ATTRf_bf(defer_callchain, p_unsigned);
+ PRINT_ATTRf_bf(defer_output, p_unsigned);
+ }
PRINT_ATTRn("{ wakeup_events, wakeup_watermark }", wakeup_events, p_unsigned, false);
PRINT_ATTRf(bp_type, p_unsigned);
@@ -359,9 +398,12 @@ int perf_event_attr__fprintf(FILE *fp, struct perf_event_attr *attr,
PRINT_ATTRf(sample_max_stack, p_unsigned);
PRINT_ATTRf(aux_sample_size, p_unsigned);
PRINT_ATTRf(sig_data, p_unsigned);
- PRINT_ATTRf(aux_start_paused, p_unsigned);
- PRINT_ATTRf(aux_pause, p_unsigned);
- PRINT_ATTRf(aux_resume, p_unsigned);
+ /* aux_{start_paused,pause,resume} are at byte 116, past VER0 */
+ if (attr_size >= offsetof(struct perf_event_attr, sig_data)) {
+ PRINT_ATTRf_bf(aux_start_paused, p_unsigned);
+ PRINT_ATTRf_bf(aux_pause, p_unsigned);
+ PRINT_ATTRf_bf(aux_resume, p_unsigned);
+ }
return ret;
}
--
2.54.0
^ permalink raw reply related [flat|nested] 30+ messages in thread* [PATCH 17/28] perf header: Propagate feature section processing errors
2026-05-10 3:33 [PATCH 00/28] perf: Harden perf.data parsing against crafted/corrupted files Arnaldo Carvalho de Melo
` (15 preceding siblings ...)
2026-05-10 3:34 ` [PATCH 16/28] perf tools: Bounds check perf_event_attr fields against attr.size before printing Arnaldo Carvalho de Melo
@ 2026-05-10 3:34 ` Arnaldo Carvalho de Melo
2026-05-10 3:34 ` [PATCH 18/28] perf header: Validate f_attr.ids section before use in perf_session__read_header() Arnaldo Carvalho de Melo
` (10 subsequent siblings)
27 siblings, 0 replies; 30+ messages in thread
From: Arnaldo Carvalho de Melo @ 2026-05-10 3:34 UTC (permalink / raw)
To: Namhyung Kim
Cc: Ingo Molnar, Thomas Gleixner, James Clark, Jiri Olsa, Ian Rogers,
Adrian Hunter, Kan Liang, Clark Williams, linux-kernel,
linux-perf-users, Arnaldo Carvalho de Melo,
Claude Opus 4.6 (1M context)
From: Arnaldo Carvalho de Melo <acme@redhat.com>
perf_session__read_header() discards the return value from
perf_header__process_sections(), so any error from a feature
section processor (process_nrcpus, process_compressed, etc.)
is silently ignored and the session opens as if nothing went
wrong.
This defeats the validation added by subsequent commits in this
series: a crafted perf.data that fails a feature section check
would still be processed with partially-initialized state.
Check the return value and fail the session if any feature
section processor returns an error.
For truncated files (data.size == 0, i.e. recording was
interrupted before the header was finalized), skip feature
section processing entirely and clear the feature bitmap so
tools use their "feature not present" fallbacks instead of
accessing uninitialized env fields.
Change the feature processor stubs for optional libraries
(libtraceevent, libbpf) from returning -1 to returning 0,
so that perf.data files containing these features can still be
opened on builds without the optional library — the feature is
simply skipped rather than causing a fatal error.
Also fix evlist__prepare_tracepoint_events() failure to return
-EINVAL instead of -ENOMEM, since the failure is a data
validation issue, not an allocation failure.
Fixes: 1c0b04d12ae9 ("perf tools: Add perf_session__read_header function")
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Ian Rogers <irogers@google.com>
Assisted-by: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
---
| 52 ++++++++++++++++++++++++++++++----------
1 file changed, 39 insertions(+), 13 deletions(-)
--git a/tools/perf/util/header.c b/tools/perf/util/header.c
index d253063b581f21e9..5cbeda0335f1140c 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -2748,8 +2748,9 @@ static int process_tracing_data(struct feat_fd *ff __maybe_unused, void *data __
return ret < 0 ? -1 : 0;
#else
- pr_err("ERROR: Trying to read tracing data without libtraceevent support.\n");
- return -1;
+ /* Not an error — the feature is simply unsupported in this build */
+ pr_debug("Tracing data present but libtraceevent not available, skipping.\n");
+ return 0;
#endif
}
@@ -3643,8 +3644,9 @@ static int process_bpf_prog_info(struct feat_fd *ff __maybe_unused, void *data _
up_write(&env->bpf_progs.lock);
return err;
#else
- pr_err("ERROR: Trying to read bpf_prog_info without libbpf support.\n");
- return -1;
+ /* Not an error — the feature is simply unsupported in this build */
+ pr_debug("BPF prog info present but libbpf not available, skipping.\n");
+ return 0;
#endif // HAVE_LIBBPF_SUPPORT
}
@@ -3712,8 +3714,9 @@ static int process_bpf_btf(struct feat_fd *ff __maybe_unused, void *data __mayb
free(node);
return err;
#else
- pr_err("ERROR: Trying to read btf data without libbpf support.\n");
- return -1;
+ /* Not an error — the feature is simply unsupported in this build */
+ pr_debug("BTF data present but libbpf not available, skipping.\n");
+ return 0;
#endif // HAVE_LIBBPF_SUPPORT
}
@@ -4900,7 +4903,7 @@ int perf_session__read_header(struct perf_session *session)
struct perf_file_header f_header;
struct perf_file_attr f_attr;
u64 f_id;
- int nr_attrs, nr_ids, i, j, err;
+ int nr_attrs, nr_ids, i, j, err = -ENOMEM;
int fd = perf_data__fd(data);
session->evlist = evlist__new();
@@ -4920,6 +4923,8 @@ int perf_session__read_header(struct perf_session *session)
return err;
}
+ err = -ENOMEM;
+
if (perf_file_header__read(&f_header, header, fd) < 0)
return -EINVAL;
@@ -4997,15 +5002,36 @@ int perf_session__read_header(struct perf_session *session)
lseek(fd, tmp, SEEK_SET);
}
+ /*
+ * Skip feature section processing for truncated files
+ * (data.size == 0 means recording was interrupted). The
+ * section table is unreliable in that case, and the event
+ * data can still be processed without the feature headers.
+ * Clear the bitmap so has_feat() returns false and tools
+ * use their "feature not present" fallbacks instead of
+ * accessing uninitialized env fields.
+ */
+ if (f_header.data.size == 0) {
+ bitmap_zero(header->adds_features, HEADER_FEAT_BITS);
+ } else {
#ifdef HAVE_LIBTRACEEVENT
- perf_header__process_sections(header, fd, &session->tevent,
- perf_file_section__process);
+ err = perf_header__process_sections(header, fd, &session->tevent,
+ perf_file_section__process);
+ if (err < 0)
+ goto out_delete_evlist;
- if (evlist__prepare_tracepoint_events(session->evlist, session->tevent.pevent))
- goto out_delete_evlist;
+ if (evlist__prepare_tracepoint_events(session->evlist,
+ session->tevent.pevent)) {
+ err = -EINVAL;
+ goto out_delete_evlist;
+ }
#else
- perf_header__process_sections(header, fd, NULL, perf_file_section__process);
+ err = perf_header__process_sections(header, fd, NULL,
+ perf_file_section__process);
+ if (err < 0)
+ goto out_delete_evlist;
#endif
+ }
return 0;
out_errno:
@@ -5014,7 +5040,7 @@ int perf_session__read_header(struct perf_session *session)
out_delete_evlist:
evlist__delete(session->evlist);
session->evlist = NULL;
- return -ENOMEM;
+ return err;
}
int perf_event__process_feature(const struct perf_tool *tool __maybe_unused,
--
2.54.0
^ permalink raw reply related [flat|nested] 30+ messages in thread* [PATCH 18/28] perf header: Validate f_attr.ids section before use in perf_session__read_header()
2026-05-10 3:33 [PATCH 00/28] perf: Harden perf.data parsing against crafted/corrupted files Arnaldo Carvalho de Melo
` (16 preceding siblings ...)
2026-05-10 3:34 ` [PATCH 17/28] perf header: Propagate feature section processing errors Arnaldo Carvalho de Melo
@ 2026-05-10 3:34 ` Arnaldo Carvalho de Melo
2026-05-10 3:34 ` [PATCH 19/28] perf header: Validate feature section size and add read path bounds checking Arnaldo Carvalho de Melo
` (9 subsequent siblings)
27 siblings, 0 replies; 30+ messages in thread
From: Arnaldo Carvalho de Melo @ 2026-05-10 3:34 UTC (permalink / raw)
To: Namhyung Kim
Cc: Ingo Molnar, Thomas Gleixner, James Clark, Jiri Olsa, Ian Rogers,
Adrian Hunter, Kan Liang, Clark Williams, linux-kernel,
linux-perf-users, Arnaldo Carvalho de Melo, sashiko-bot,
Claude Opus 4.6 (1M context)
From: Arnaldo Carvalho de Melo <acme@redhat.com>
perf_session__read_header() reads f_attr.ids.size from the perf.data
file and divides it by sizeof(u64) to compute nr_ids, which is
declared as int. No validation is performed on the value before it
is used to allocate arrays and drive a read loop.
On 32-bit architectures, a crafted f_attr.ids.size of 0x100000000
(4 GB) produces nr_ids = 0x20000000, but the allocation size
1 * 0x20000000 * 8 overflows size_t to 0, so zalloc(0) returns a
valid pointer. The subsequent loop writes 0x20000000 IDs into that
zero-length buffer, corrupting the heap.
On 64-bit, the u64-to-int truncation silently drops high bits,
processing fewer IDs than the file claims. While not exploitable,
this is a data integrity issue.
Add validation before using f_attr.ids:
- Cap nr_attrs (attrs.size / attr_size) to MAX_NR_ATTRS (1 << 16)
with overflow-safe u64 comparison before assigning to int
- Reject ids.size not aligned to sizeof(u64)
- Cap ids.size / sizeof(u64) to MAX_IDS_PER_ATTR (1 << 24) to
prevent int truncation and size_t overflow on 32-bit
- Reject ids sections that extend past the end of the file,
guarded by S_ISREG() so non-regular files (block devices,
pipes) are not falsely rejected
Also fix perf_header__getbuffer64() to set errno = EIO when
readn() returns 0 (EOF). Without this, the out_errno path in
perf_session__read_header() returns -errno which is 0 (success)
on truncated files, causing downstream NULL dereferences.
Reported-by: sashiko-bot@kernel.org # Running on a local machine
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Ian Rogers <irogers@google.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Assisted-by: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
---
| 78 +++++++++++++++++++++++++++++++++++++++-
1 file changed, 77 insertions(+), 1 deletion(-)
--git a/tools/perf/util/header.c b/tools/perf/util/header.c
index 5cbeda0335f1140c..f4008878bd7eda04 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -64,6 +64,25 @@
#include <event-parse.h>
#endif
+/*
+ * nr_ids * sizeof(struct perf_sample_id) must not overflow
+ * size_t on 32-bit; the struct is ~104 bytes (32-bit) or
+ * ~184 bytes (64-bit), so 1<<24 (16M) keeps the product
+ * under 2 GB on 32-bit.
+ *
+ * This is a per-attribute cap only — the total across all
+ * attributes is not capped because legitimate high-core-count
+ * workloads (e.g. 5000 tracepoints × 4096 CPUs) can exceed
+ * a single-attribute limit.
+ */
+#define MAX_IDS_PER_ATTR (1 << 24)
+/*
+ * Cap nr_attrs to prevent resource exhaustion from crafted
+ * files. 65536 is well beyond any real workload (perf stat
+ * typically uses < 100 events) but prevents u64-to-int
+ * truncation on the attr count.
+ */
+#define MAX_NR_ATTRS (1 << 16)
#define MAX_BPF_DATA_LEN (256 * 1024 * 1024)
#define MAX_BPF_PROGS 131072
#define MAX_CACHE_ENTRIES 32768
@@ -4468,8 +4487,13 @@ int perf_session__inject_header(struct perf_session *session,
static int perf_header__getbuffer64(struct perf_header *header,
int fd, void *buf, size_t size)
{
- if (readn(fd, buf, size) <= 0)
+ ssize_t n = readn(fd, buf, size);
+
+ if (n <= 0) {
+ if (n == 0)
+ errno = EIO;
return -1;
+ }
if (header->needs_swap)
mem_bswap_64(buf, size);
@@ -4803,6 +4827,8 @@ static int read_attr(int fd, struct perf_header *ph,
if (ret <= 0) {
pr_debug("cannot read %d bytes of header attr\n",
PERF_ATTR_SIZE_VER0);
+ if (ret == 0)
+ errno = EIO;
return -1;
}
@@ -4903,6 +4929,7 @@ int perf_session__read_header(struct perf_session *session)
struct perf_file_header f_header;
struct perf_file_attr f_attr;
u64 f_id;
+ struct stat input_stat;
int nr_attrs, nr_ids, i, j, err = -ENOMEM;
int fd = perf_data__fd(data);
@@ -4952,6 +4979,15 @@ int perf_session__read_header(struct perf_session *session)
return -EINVAL;
}
+ if (fstat(fd, &input_stat) < 0)
+ return -errno;
+
+ /* Check before assigning to int to avoid u64-to-int truncation */
+ if (f_header.attrs.size / f_header.attr_size > MAX_NR_ATTRS) {
+ pr_err("Too many attributes: %" PRIu64 " (max %d)\n",
+ f_header.attrs.size / f_header.attr_size, MAX_NR_ATTRS);
+ return -EINVAL;
+ }
nr_attrs = f_header.attrs.size / f_header.attr_size;
lseek(fd, f_header.attrs.offset, SEEK_SET);
@@ -4968,6 +5004,45 @@ int perf_session__read_header(struct perf_session *session)
perf_event__attr_swap(&f_attr.attr);
}
+ /*
+ * Validate ids section: must be aligned to u64, and
+ * the count must fit in an int to avoid truncation in
+ * nr_ids and size_t overflow in perf_evsel__alloc_id()
+ * on 32-bit architectures.
+ */
+ if (f_attr.ids.size % sizeof(u64)) {
+ pr_err("Invalid ids section size %" PRIu64 " for attr %d, not aligned to u64\n",
+ f_attr.ids.size, i);
+ err = -EINVAL;
+ goto out_delete_evlist;
+ }
+
+ /*
+ * Cap the ID count to avoid int truncation of nr_ids
+ * on 64-bit and size_t overflow in the allocation
+ * paths (nr_ids * sizeof(u64), nr_ids *
+ * sizeof(struct perf_sample_id)) on 32-bit.
+ */
+ if (f_attr.ids.size / sizeof(u64) > MAX_IDS_PER_ATTR) {
+ pr_err("Invalid ids section size %" PRIu64 " for attr %d, too many IDs\n",
+ f_attr.ids.size, i);
+ err = -EINVAL;
+ goto out_delete_evlist;
+ }
+
+ /*
+ * FIXME: see perf_header__process_sections() — block
+ * devices bypass this check because st_size is 0.
+ */
+ if (S_ISREG(input_stat.st_mode) &&
+ (f_attr.ids.offset > (u64)input_stat.st_size ||
+ f_attr.ids.size > (u64)input_stat.st_size - f_attr.ids.offset)) {
+ pr_err("Invalid ids section for attr %d: offset=%" PRIu64 " size=%" PRIu64 " exceeds file size %" PRIu64 "\n",
+ i, f_attr.ids.offset, f_attr.ids.size, (u64)input_stat.st_size);
+ err = -EINVAL;
+ goto out_delete_evlist;
+ }
+
tmp = lseek(fd, 0, SEEK_CUR);
evsel = evsel__new(&f_attr.attr);
@@ -4982,6 +5057,7 @@ int perf_session__read_header(struct perf_session *session)
evlist__add(session->evlist, evsel);
nr_ids = f_attr.ids.size / sizeof(u64);
+
/*
* We don't have the cpu and thread maps on the header, so
* for allocating the perf_sample_id table we fake 1 cpu and
--
2.54.0
^ permalink raw reply related [flat|nested] 30+ messages in thread* [PATCH 19/28] perf header: Validate feature section size and add read path bounds checking
2026-05-10 3:33 [PATCH 00/28] perf: Harden perf.data parsing against crafted/corrupted files Arnaldo Carvalho de Melo
` (17 preceding siblings ...)
2026-05-10 3:34 ` [PATCH 18/28] perf header: Validate f_attr.ids section before use in perf_session__read_header() Arnaldo Carvalho de Melo
@ 2026-05-10 3:34 ` Arnaldo Carvalho de Melo
2026-05-10 3:34 ` [PATCH 20/28] perf header: Sanity check HEADER_EVENT_DESC attr.size before swap Arnaldo Carvalho de Melo
` (8 subsequent siblings)
27 siblings, 0 replies; 30+ messages in thread
From: Arnaldo Carvalho de Melo @ 2026-05-10 3:34 UTC (permalink / raw)
To: Namhyung Kim
Cc: Ingo Molnar, Thomas Gleixner, James Clark, Jiri Olsa, Ian Rogers,
Adrian Hunter, Kan Liang, Clark Williams, linux-kernel,
linux-perf-users, Arnaldo Carvalho de Melo, sashiko-bot,
David Carrillo-Cisneros, Claude Opus 4.6 (1M context)
From: Arnaldo Carvalho de Melo <acme@redhat.com>
Harden feature section parsing against crafted perf.data files:
1. perf_header__process_sections() reads the feature section table
and passes each section's offset and size directly to the
processing callbacks without validating them against the actual
file size. A crafted section size would make all downstream
bounds checks against ff->size ineffective since they compare
against the untrusted, inflated bound. Add an fstat() check
with S_ISREG() guard and verify that each section's offset +
size does not extend past EOF.
2. __do_read_buf() validates reads against ff->size (section size),
but __do_read_fd() had no such check, so a malformed perf.data
with an understated section size could cause reads past the end
of the current section into the next section's data. Add the
bounds check in __do_read(), the common caller of both helpers,
so it is enforced uniformly for both the fd and buf paths.
Track the section-relative offset in __do_read_fd() so the
check works for the fd path. Reject negative sizes which on
32-bit can occur when a u32 >= 0x80000000 is passed as ssize_t.
3. do_read_string() relied on file data being null-padded. Add
explicit null-termination (buf[len-1] = '\0') after reading
and validate length (>= 1, fits within section) before
allocating, so callers like process_cpu_topology() never
receive an unterminated string.
4. Initialize feat_fd.offset to 0 (section-relative) instead of
section->offset (file-absolute) so the bounds tracking is
consistent with __do_read()'s section-relative comparison.
Adjust process_build_id() to use lseek() for its file-absolute
offset needs since it cannot rely on ff->offset for that.
5. Propagate ff->size to perf_file_section__fprintf_info() so its
reads are also bounded.
Reported-by: sashiko-bot@kernel.org # Running on a local machine
Cc: David Carrillo-Cisneros <davidcc@google.com>
Cc: Ian Rogers <irogers@google.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Assisted-by: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
---
| 62 ++++++++++++++++++++++++++++++++++------
1 file changed, 53 insertions(+), 9 deletions(-)
--git a/tools/perf/util/header.c b/tools/perf/util/header.c
index f4008878bd7eda04..a8655a784eaa5ba9 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -233,23 +233,32 @@ static int __do_read_fd(struct feat_fd *ff, void *addr, ssize_t size)
if (ret != size)
return ret < 0 ? (int)ret : -1;
+ ff->offset += size;
return 0;
}
static int __do_read_buf(struct feat_fd *ff, void *addr, ssize_t size)
{
- if (size > (ssize_t)ff->size - ff->offset)
- return -1;
-
memcpy(addr, ff->buf + ff->offset, size);
ff->offset += size;
return 0;
-
}
static int __do_read(struct feat_fd *ff, void *addr, ssize_t size)
{
+ /*
+ * Reject negative sizes, which on 32-bit can occur when a
+ * u32 >= 0x80000000 is passed as ssize_t. The cast to
+ * ssize_t is safe because perf_header__process_sections()
+ * validates that each section fits within the file size
+ * before any feature callback reaches here, and only
+ * feature sections (metadata like build IDs, topology, etc.)
+ * use this path — these cannot legitimately approach 2GB.
+ */
+ if (size < 0 || size > (ssize_t)ff->size - ff->offset)
+ return -1;
+
if (!ff->buf)
return __do_read_fd(ff, addr, size);
return __do_read_buf(ff, addr, size);
@@ -289,16 +298,22 @@ static char *do_read_string(struct feat_fd *ff)
if (do_read_u32(ff, &len))
return NULL;
+ /* At least the null terminator. */
+ if (len < 1 || len > ff->size - ff->offset)
+ return NULL;
+
buf = malloc(len);
if (!buf)
return NULL;
if (!__do_read(ff, buf, len)) {
/*
- * strings are padded by zeroes
- * thus the actual strlen of buf
- * may be less than len
+ * do_write_string() writes len including the null
+ * terminator, padded to NAME_ALIGN. Ensure the
+ * string is always null-terminated even if the file
+ * data has been tampered with.
*/
+ buf[len - 1] = '\0';
return buf;
}
@@ -2775,7 +2790,12 @@ static int process_tracing_data(struct feat_fd *ff __maybe_unused, void *data __
static int process_build_id(struct feat_fd *ff, void *data __maybe_unused)
{
- if (perf_header__read_build_ids(ff->ph, ff->fd, ff->offset, ff->size))
+ off_t offset = lseek(ff->fd, 0, SEEK_CUR);
+
+ if (offset == (off_t)-1)
+ return -1;
+
+ if (perf_header__read_build_ids(ff->ph, ff->fd, offset, ff->size))
pr_debug("Failed to read buildids, continuing...\n");
return 0;
}
@@ -4152,6 +4172,7 @@ static int perf_file_section__fprintf_info(struct perf_file_section *section,
ff = (struct feat_fd) {
.fd = fd,
.ph = ph,
+ .size = section->size,
};
if (!feat_ops[feat].full_only || hd->full)
@@ -4512,6 +4533,7 @@ int perf_header__process_sections(struct perf_header *header, int fd,
int sec_size;
int feat;
int err;
+ struct stat st;
nr_sections = bitmap_weight(header->adds_features, HEADER_FEAT_BITS);
if (!nr_sections)
@@ -4529,7 +4551,29 @@ int perf_header__process_sections(struct perf_header *header, int fd,
if (err < 0)
goto out_free;
+ if (fstat(fd, &st) < 0) {
+ pr_err("Failed to stat the perf data file\n");
+ err = -1;
+ goto out_free;
+ }
+
for_each_set_bit(feat, header->adds_features, header->last_feat) {
+ /*
+ * FIXME: block devices have st_size == 0, so we skip
+ * bounds checking entirely. Historically perf never
+ * prevented using a block device as input, but it
+ * probably should — there's no valid use case for it
+ * and it bypasses all file-size validation.
+ */
+ if (S_ISREG(st.st_mode) &&
+ (sec->offset > (u64)st.st_size ||
+ sec->size > (u64)st.st_size - sec->offset)) {
+ pr_err("Feature %s (%d) section extends past EOF (offset=%" PRIu64 ", size=%" PRIu64 ", file=%" PRIu64 ")\n",
+ header_feat__name(feat), feat,
+ sec->offset, sec->size, (u64)st.st_size);
+ err = -1;
+ goto out_free;
+ }
err = process(sec++, header, feat, fd, data);
if (err < 0)
goto out_free;
@@ -4756,7 +4800,7 @@ static int perf_file_section__process(struct perf_file_section *section,
.fd = fd,
.ph = ph,
.size = section->size,
- .offset = section->offset,
+ .offset = 0,
};
if (lseek(fd, section->offset, SEEK_SET) == (off_t)-1) {
--
2.54.0
^ permalink raw reply related [flat|nested] 30+ messages in thread* [PATCH 20/28] perf header: Sanity check HEADER_EVENT_DESC attr.size before swap
2026-05-10 3:33 [PATCH 00/28] perf: Harden perf.data parsing against crafted/corrupted files Arnaldo Carvalho de Melo
` (18 preceding siblings ...)
2026-05-10 3:34 ` [PATCH 19/28] perf header: Validate feature section size and add read path bounds checking Arnaldo Carvalho de Melo
@ 2026-05-10 3:34 ` Arnaldo Carvalho de Melo
2026-05-10 3:34 ` [PATCH 21/28] perf header: Validate bitmap size before allocating in do_read_bitmap() Arnaldo Carvalho de Melo
` (7 subsequent siblings)
27 siblings, 0 replies; 30+ messages in thread
From: Arnaldo Carvalho de Melo @ 2026-05-10 3:34 UTC (permalink / raw)
To: Namhyung Kim
Cc: Ingo Molnar, Thomas Gleixner, James Clark, Jiri Olsa, Ian Rogers,
Adrian Hunter, Kan Liang, Clark Williams, linux-kernel,
linux-perf-users, Arnaldo Carvalho de Melo, sashiko-bot, Wang Nan,
Claude Opus 4.6 (1M context)
From: Arnaldo Carvalho de Melo <acme@redhat.com>
read_event_desc() reads nre (event count), sz (attr size), and nr
(IDs per event) from the file and uses them to control allocations
and loops without validating them against the section size.
A crafted perf.data could trigger large allocations or many loop
iterations before __do_read() eventually rejects the reads.
Add bounds checks in read_event_desc():
- Reject sz smaller than PERF_ATTR_SIZE_VER0.
- Require at least one event (nre > 0).
- Check that nre events fit in the remaining section, using the
minimum per-event footprint of sz + sizeof(u32).
- Reject attr->size > sz before calling perf_event__attr_swap()
to prevent heap out-of-bounds access.
- Check that nr IDs fit in the remaining section before allocating.
Fixes: b30b61729246 ("perf tools: Fix a problem when opening old perf.data with different byte order")
Reported-by: sashiko-bot@kernel.org # Running on a local machine
Cc: Wang Nan <wangnan0@huawei.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Ian Rogers <irogers@google.com>
Assisted-by: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
---
| 50 +++++++++++++++++++++++++++++++++++++++-
1 file changed, 49 insertions(+), 1 deletion(-)
--git a/tools/perf/util/header.c b/tools/perf/util/header.c
index a8655a784eaa5ba9..0bbe90865e9c1ceb 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -2170,9 +2170,25 @@ static struct evsel *read_event_desc(struct feat_fd *ff)
if (do_read_u32(ff, &nre))
goto error;
+ /* Size of each of the nre attributes. */
if (do_read_u32(ff, &sz))
goto error;
+ /*
+ * Require at least one event with an attr no smaller than the
+ * first published struct, and reject sz values where
+ * sz + sizeof(u32) would overflow size_t (possible on 32-bit)
+ * or nre == UINT32_MAX where nre + 1 wraps to 0 in the calloc.
+ *
+ * The minimum section footprint per event is sz bytes for the
+ * attr plus a u32 for the id count, check that nre events fit.
+ */
+ if (!nre || sz < PERF_ATTR_SIZE_VER0 ||
+ sz > ff->size || (size_t)sz > SIZE_MAX - sizeof(u32) ||
+ nre == UINT32_MAX ||
+ nre > (ff->size - ff->offset) / (sz + sizeof(u32)))
+ goto error;
+
/* buffer to hold on file attr struct */
buf = malloc(sz);
if (!buf)
@@ -2188,6 +2204,9 @@ static struct evsel *read_event_desc(struct feat_fd *ff)
msz = sz;
for (i = 0, evsel = events; i < nre; evsel++, i++) {
+ struct perf_event_attr *attr = buf;
+ u32 attr_size;
+
evsel->core.idx = i;
/*
@@ -2197,6 +2216,32 @@ static struct evsel *read_event_desc(struct feat_fd *ff)
if (__do_read(ff, buf, sz))
goto error;
+ /* Reject before attr_swap to prevent OOB via bswap_safe() */
+ attr_size = ff->ph->needs_swap ? bswap_32(attr->size) : attr->size;
+ /* ABI0: size == 0 means the producer didn't set it */
+ if (!attr_size) {
+ attr_size = PERF_ATTR_SIZE_VER0;
+ /*
+ * Write back so free_event_desc() doesn't
+ * treat this event as the end-of-array sentinel
+ * (it iterates while attr.size != 0).
+ *
+ * Only for native — the swap path must NOT
+ * write native-endian VER0 here because
+ * perf_event__attr_swap() would re-swap it
+ * to 0x40000000, defeating bswap_safe() bounds.
+ * perf_event__attr_swap() has its own ABI0
+ * fallback that sets VER0 after swapping.
+ */
+ if (!ff->ph->needs_swap)
+ attr->size = attr_size;
+ }
+ if (attr_size < PERF_ATTR_SIZE_VER0 || attr_size > sz) {
+ pr_err("Event %d attr.size (%u) invalid (min: %d, max: %u)\n",
+ i, attr_size, PERF_ATTR_SIZE_VER0, sz);
+ goto error;
+ }
+
if (ff->ph->needs_swap)
perf_event__attr_swap(buf);
@@ -2218,6 +2263,10 @@ static struct evsel *read_event_desc(struct feat_fd *ff)
if (!nr)
continue;
+ /* Prevent oversized allocation from crafted nr */
+ if (nr > (ff->size - ff->offset) / sizeof(*id))
+ goto error;
+
id = calloc(nr, sizeof(*id));
if (!id)
goto error;
@@ -4995,7 +5044,6 @@ int perf_session__read_header(struct perf_session *session)
}
err = -ENOMEM;
-
if (perf_file_header__read(&f_header, header, fd) < 0)
return -EINVAL;
--
2.54.0
^ permalink raw reply related [flat|nested] 30+ messages in thread* [PATCH 21/28] perf header: Validate bitmap size before allocating in do_read_bitmap()
2026-05-10 3:33 [PATCH 00/28] perf: Harden perf.data parsing against crafted/corrupted files Arnaldo Carvalho de Melo
` (19 preceding siblings ...)
2026-05-10 3:34 ` [PATCH 20/28] perf header: Sanity check HEADER_EVENT_DESC attr.size before swap Arnaldo Carvalho de Melo
@ 2026-05-10 3:34 ` Arnaldo Carvalho de Melo
2026-05-10 3:34 ` [PATCH 22/28] perf session: Add byte-swap for PERF_RECORD_COMPRESSED2 events Arnaldo Carvalho de Melo
` (6 subsequent siblings)
27 siblings, 0 replies; 30+ messages in thread
From: Arnaldo Carvalho de Melo @ 2026-05-10 3:34 UTC (permalink / raw)
To: Namhyung Kim
Cc: Ingo Molnar, Thomas Gleixner, James Clark, Jiri Olsa, Ian Rogers,
Adrian Hunter, Kan Liang, Clark Williams, linux-kernel,
linux-perf-users, Arnaldo Carvalho de Melo, sashiko-bot,
Claude Opus 4.6 (1M context)
From: Arnaldo Carvalho de Melo <acme@redhat.com>
do_read_bitmap() reads a u64 bit count from the file and passes it
to bitmap_zalloc() without checking it against the remaining section
size. A crafted perf.data could trigger a large allocation that would
only fail later when the per-element reads exceed section bounds.
Additionally, bitmap_zalloc() takes an int parameter, so a crafted
size with bits set above bit 31 (e.g. 0x100000040) would pass the
section bounds check but truncate when passed to bitmap_zalloc(),
allocating a much smaller buffer than the subsequent read loop
expects.
Reject size values that exceed INT_MAX, and check that the data
needed (BITS_TO_U64(size) u64 values) fits in the remaining section
before allocating. Switch from bitmap_zalloc() to calloc() of u64
units so the allocation size matches the u64 read/write granularity
and avoids unsigned long vs u64 mismatch on 32-bit architectures.
Fix do_write_bitmap() to cast via u64* rather than reading
unsigned long values from the bitmap directly, preventing
out-of-bounds reads on 32-bit systems where sizeof(unsigned long)
is 4 but the bitmap is stored in u64 units.
Fix process_mem_topology() minimum section size: the check used
nr * 2 * sizeof(u64) per node, but do_read_bitmap() reads an
additional u64 for the bitmap size, so the minimum is 3 * sizeof(u64).
Fix memory leak in process_mem_topology() error paths: replace
free(nodes) with memory_node__delete_nodes() to free per-node
bitmaps allocated by do_read_bitmap().
Currently used by process_mem_topology() for HEADER_MEM_TOPOLOGY.
Reported-by: sashiko-bot@kernel.org # Running on a local machine
Closes: https://lore.kernel.org/linux-perf-users/20260414224622.2AE69C19425@smtp.kernel.org/
Fixes: a881fc56038a ("perf header: Sanity check HEADER_MEM_TOPOLOGY")
Closes: https://lore.kernel.org/linux-perf-users/20260410223242.DD76FC19421@smtp.kernel.org/
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Ian Rogers <irogers@google.com>
Assisted-by: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
---
| 31 ++++++++++++++++++++++++++-----
1 file changed, 26 insertions(+), 5 deletions(-)
--git a/tools/perf/util/header.c b/tools/perf/util/header.c
index 0bbe90865e9c1ceb..bda8705e87648800 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -178,15 +178,25 @@ int do_write(struct feat_fd *ff, const void *buf, size_t size)
/* Return: 0 if succeeded, -ERR if failed. */
static int do_write_bitmap(struct feat_fd *ff, unsigned long *set, u64 size)
{
- u64 *p = (u64 *) set;
+ size_t byte_size = BITS_TO_LONGS(size) * sizeof(unsigned long);
int i, ret;
ret = do_write(ff, &size, sizeof(size));
if (ret < 0)
return ret;
+ /*
+ * The on-disk format uses u64 elements, but the in-memory bitmap
+ * uses unsigned long, which is only 4 bytes on 32-bit architectures.
+ * Copy with bounded size so the last element doesn't read past the
+ * bitmap allocation when BITS_TO_LONGS(size) is odd.
+ */
for (i = 0; (u64) i < BITS_TO_U64(size); i++) {
- ret = do_write(ff, p + i, sizeof(*p));
+ u64 val = 0;
+ size_t off = i * sizeof(val);
+
+ memcpy(&val, (char *)set + off, min(sizeof(val), byte_size - off));
+ ret = do_write(ff, &val, sizeof(val));
if (ret < 0)
return ret;
}
@@ -332,7 +342,18 @@ static int do_read_bitmap(struct feat_fd *ff, unsigned long **pset, u64 *psize)
if (ret)
return ret;
- set = bitmap_zalloc(size);
+ /* Bitmap APIs use int for nbits; reject u64 values that truncate. */
+ if (size > INT_MAX ||
+ BITS_TO_U64(size) > (ff->size - ff->offset) / sizeof(u64))
+ return -1;
+
+ /*
+ * bitmap_zalloc() allocates in unsigned long units, which are only
+ * 4 bytes on 32-bit architectures. The read loop below casts the
+ * buffer to u64 * and writes 8-byte elements, so allocate in u64
+ * units to ensure the buffer is large enough.
+ */
+ set = calloc(BITS_TO_U64(size), sizeof(u64));
if (!set)
return -ENOMEM;
@@ -3488,7 +3509,7 @@ static int process_mem_topology(struct feat_fd *ff,
return -1;
}
- if (ff->size < 3 * sizeof(u64) + nr * 2 * sizeof(u64)) {
+ if (ff->size < 3 * sizeof(u64) + nr * 3 * sizeof(u64)) {
pr_err("Invalid HEADER_MEM_TOPOLOGY: section too small (%zu) for %llu nodes\n",
ff->size, (unsigned long long)nr);
return -1;
@@ -3523,7 +3544,7 @@ static int process_mem_topology(struct feat_fd *ff,
out:
if (ret)
- free(nodes);
+ memory_node__delete_nodes(nodes, nr);
return ret;
}
--
2.54.0
^ permalink raw reply related [flat|nested] 30+ messages in thread* [PATCH 22/28] perf session: Add byte-swap for PERF_RECORD_COMPRESSED2 events
2026-05-10 3:33 [PATCH 00/28] perf: Harden perf.data parsing against crafted/corrupted files Arnaldo Carvalho de Melo
` (20 preceding siblings ...)
2026-05-10 3:34 ` [PATCH 21/28] perf header: Validate bitmap size before allocating in do_read_bitmap() Arnaldo Carvalho de Melo
@ 2026-05-10 3:34 ` Arnaldo Carvalho de Melo
2026-05-10 3:34 ` [PATCH 23/28] perf tools: Harden compressed event processing Arnaldo Carvalho de Melo
` (5 subsequent siblings)
27 siblings, 0 replies; 30+ messages in thread
From: Arnaldo Carvalho de Melo @ 2026-05-10 3:34 UTC (permalink / raw)
To: Namhyung Kim
Cc: Ingo Molnar, Thomas Gleixner, James Clark, Jiri Olsa, Ian Rogers,
Adrian Hunter, Kan Liang, Clark Williams, linux-kernel,
linux-perf-users, Arnaldo Carvalho de Melo, sashiko-bot,
Chun-Tse Shao, Claude Opus 4.6 (1M context)
From: Arnaldo Carvalho de Melo <acme@redhat.com>
PERF_RECORD_COMPRESSED2 has a data_size field (__u64) at offset 8
that is not covered by the header byte-swap in prefetch_event().
When reading a cross-endian perf.data file, data_size was used
without swapping, causing either garbage decompression sizes or
silent data corruption.
PERF_RECORD_COMPRESSED (the original format) has no fields beyond
the header, so it doesn't need a swap op.
Add perf_event__compressed2_swap() and register it in
perf_event__swap_ops[].
Fixes: 208c0e168344 ("perf record: Add 8-byte aligned event type PERF_RECORD_COMPRESSED2")
Reported-by: sashiko-bot@kernel.org # Running on a local machine
Cc: Chun-Tse Shao <ctshao@google.com>
Cc: Ian Rogers <irogers@google.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Assisted-by: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
---
tools/perf/util/session.c | 9 +++++++++
1 file changed, 9 insertions(+)
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
index 85591ccdc2e8ada3..80cb03d150cecc0b 100644
--- a/tools/perf/util/session.c
+++ b/tools/perf/util/session.c
@@ -1037,6 +1037,14 @@ static int perf_event__time_conv_swap(union perf_event *event,
return 0;
}
+static int perf_event__compressed2_swap(union perf_event *event,
+ bool sample_id_all __maybe_unused)
+{
+ /* Only data_size needs swapping — compressed payload is a raw byte stream */
+ event->pack2.data_size = bswap_64(event->pack2.data_size);
+ return 0;
+}
+
static int perf_event__bpf_metadata_swap(union perf_event *event,
bool sample_id_all __maybe_unused)
{
@@ -1175,6 +1183,7 @@ static perf_event__swap_op perf_event__swap_ops[] = {
[PERF_RECORD_STAT_ROUND] = perf_event__stat_round_swap,
[PERF_RECORD_EVENT_UPDATE] = perf_event__event_update_swap,
[PERF_RECORD_TIME_CONV] = perf_event__time_conv_swap,
+ [PERF_RECORD_COMPRESSED2] = perf_event__compressed2_swap,
[PERF_RECORD_BPF_METADATA] = perf_event__bpf_metadata_swap,
[PERF_RECORD_SCHEDSTAT_CPU] = perf_event__schedstat_cpu_swap,
[PERF_RECORD_SCHEDSTAT_DOMAIN] = perf_event__schedstat_domain_swap,
--
2.54.0
^ permalink raw reply related [flat|nested] 30+ messages in thread* [PATCH 23/28] perf tools: Harden compressed event processing
2026-05-10 3:33 [PATCH 00/28] perf: Harden perf.data parsing against crafted/corrupted files Arnaldo Carvalho de Melo
` (21 preceding siblings ...)
2026-05-10 3:34 ` [PATCH 22/28] perf session: Add byte-swap for PERF_RECORD_COMPRESSED2 events Arnaldo Carvalho de Melo
@ 2026-05-10 3:34 ` Arnaldo Carvalho de Melo
2026-05-10 3:34 ` [PATCH 24/28] perf session: Check for decompression buffer size overflow Arnaldo Carvalho de Melo
` (4 subsequent siblings)
27 siblings, 0 replies; 30+ messages in thread
From: Arnaldo Carvalho de Melo @ 2026-05-10 3:34 UTC (permalink / raw)
To: Namhyung Kim
Cc: Ingo Molnar, Thomas Gleixner, James Clark, Jiri Olsa, Ian Rogers,
Adrian Hunter, Kan Liang, Clark Williams, linux-kernel,
linux-perf-users, Arnaldo Carvalho de Melo, sashiko-bot,
Claude Opus 4.6 (1M context)
From: Arnaldo Carvalho de Melo <acme@redhat.com>
Add several hardening checks to the compressed event decompression
pipeline:
1. Guard against decomp_last_rem underflow: check that
decomp_last->head does not exceed decomp_last->size before
subtracting. A u64 underflow here would produce a huge
decomp_len, causing an oversized mmap allocation.
2. Validate comp_mmap_len from the HEADER_COMPRESSED feature
section: reject values that are not 4K-aligned, smaller than
4096, or larger than ~2 GB (prevents size_t overflow when
adding decomp_last_rem on 32-bit, while allowing legitimate
large mmap buffers from perf record -m).
3. Validate COMPRESSED event header size: reject events where
header.size is too small to contain the fixed struct fields,
preventing underflow in the payload size calculation.
4. Validate COMPRESSED2 event data_size: check that data_size
does not exceed the available payload (header.size minus the
fixed struct fields) for the newer compressed format.
5. Reject compressed events when the HEADER_COMPRESSED feature
is missing from the file header, which means no decompression
context was initialized.
Reported-by: sashiko-bot@kernel.org # Running on a local machine
Assisted-by: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
---
| 17 +++++++++++++++++
tools/perf/util/tool.c | 38 +++++++++++++++++++++++++++++++++++++-
2 files changed, 54 insertions(+), 1 deletion(-)
--git a/tools/perf/util/header.c b/tools/perf/util/header.c
index bda8705e87648800..994e54167ea3196b 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -3849,6 +3849,23 @@ static int process_compressed(struct feat_fd *ff,
if (do_read_u32(ff, &(env->comp_mmap_len)))
return -1;
+ /*
+ * FIXME: perf.data should record the recording system's page
+ * size — it affects mmap buffer alignment, sample addresses,
+ * and data_page_size/code_page_size interpretation. Without
+ * it we assume 4K (the smallest Linux page size) as a safe
+ * minimum alignment for comp_mmap_len validation.
+ *
+ * Cap at 2 GB to keep decomp_len + decomp_last_rem +
+ * sizeof(struct decomp) within size_t range on 32-bit.
+ */
+ if (env->comp_mmap_len < 4096 || env->comp_mmap_len % 4096 ||
+ env->comp_mmap_len > (2U * 1024 * 1024 * 1024 - 4096)) {
+ pr_err("Invalid HEADER_COMPRESSED: comp_mmap_len (%u) must be a 4K-aligned value in [4096, %u]\n",
+ env->comp_mmap_len, 2U * 1024 * 1024 * 1024 - 4096);
+ return -1;
+ }
+
return 0;
}
diff --git a/tools/perf/util/tool.c b/tools/perf/util/tool.c
index ff2150517b75587a..96fa6d6c55cdca1e 100644
--- a/tools/perf/util/tool.c
+++ b/tools/perf/util/tool.c
@@ -24,7 +24,15 @@ static int perf_session__process_compressed_event(const struct perf_tool *tool _
size_t mmap_len, decomp_len = perf_session__env(session)->comp_mmap_len;
struct decomp *decomp, *decomp_last = session->active_decomp->decomp_last;
+ if (!decomp_len) {
+ pr_err("Compressed events found but HEADER_COMPRESSED not set\n");
+ return -1;
+ }
+
if (decomp_last) {
+ /* Prevent u64 underflow in decomp_last_rem */
+ if (decomp_last->head > decomp_last->size)
+ return -1;
decomp_last_rem = decomp_last->size - decomp_last->head;
decomp_len += decomp_last_rem;
}
@@ -47,14 +55,37 @@ static int perf_session__process_compressed_event(const struct perf_tool *tool _
decomp->size = decomp_last_rem;
}
+ /*
+ * Events are read directly from the mmap'd file; fields could
+ * theoretically change via a FUSE-backed file, but that applies
+ * to the entire event processing pipeline, not just here.
+ */
if (event->header.type == PERF_RECORD_COMPRESSED) {
+ if (event->header.size < sizeof(struct perf_record_compressed))
+ goto err_decomp;
src = (void *)event + sizeof(struct perf_record_compressed);
src_size = event->pack.header.size - sizeof(struct perf_record_compressed);
} else if (event->header.type == PERF_RECORD_COMPRESSED2) {
+ /*
+ * prefetch_event() only guarantees that the 8-byte
+ * event header fits; validate that header.size covers
+ * the data_size field before accessing it, otherwise a
+ * crafted event reads data_size from adjacent memory.
+ */
+ if (event->header.size < sizeof(struct perf_record_compressed2))
+ goto err_decomp;
src = (void *)event + sizeof(struct perf_record_compressed2);
src_size = event->pack2.data_size;
+ /*
+ * data_size is independent of header.size (which
+ * includes padding); verify it doesn't exceed the
+ * actual payload to prevent out-of-bounds reads in
+ * zstd_decompress_stream().
+ */
+ if (src_size > event->header.size - sizeof(struct perf_record_compressed2))
+ goto err_decomp;
} else {
- return -1;
+ goto err_decomp;
}
decomp_size = zstd_decompress_stream(session->active_decomp->zstd_decomp, src, src_size,
@@ -77,6 +108,11 @@ static int perf_session__process_compressed_event(const struct perf_tool *tool _
pr_debug("decomp (B): %zd to %zd\n", src_size, decomp_size);
return 0;
+
+err_decomp:
+ munmap(decomp, mmap_len);
+ pr_err("Couldn't decompress data\n");
+ return -1;
}
#endif
--
2.54.0
^ permalink raw reply related [flat|nested] 30+ messages in thread* [PATCH 24/28] perf session: Check for decompression buffer size overflow
2026-05-10 3:33 [PATCH 00/28] perf: Harden perf.data parsing against crafted/corrupted files Arnaldo Carvalho de Melo
` (22 preceding siblings ...)
2026-05-10 3:34 ` [PATCH 23/28] perf tools: Harden compressed event processing Arnaldo Carvalho de Melo
@ 2026-05-10 3:34 ` Arnaldo Carvalho de Melo
2026-05-10 3:34 ` [PATCH 25/28] perf session: Bound nr_cpus_avail and validate sample CPU Arnaldo Carvalho de Melo
` (3 subsequent siblings)
27 siblings, 0 replies; 30+ messages in thread
From: Arnaldo Carvalho de Melo @ 2026-05-10 3:34 UTC (permalink / raw)
To: Namhyung Kim
Cc: Ingo Molnar, Thomas Gleixner, James Clark, Jiri Olsa, Ian Rogers,
Adrian Hunter, Kan Liang, Clark Williams, linux-kernel,
linux-perf-users, Arnaldo Carvalho de Melo, sashiko-bot,
Claude Opus 4.6 (1M context)
From: Arnaldo Carvalho de Melo <acme@redhat.com>
On 32-bit systems, sizeof(struct decomp) + decomp_len can wrap
size_t when comp_mmap_len is large. The preceding patch caps
comp_mmap_len at ~2 GB, which prevents decomp_len from exceeding
SIZE_MAX after adding decomp_last_rem, but the subsequent addition
of sizeof(struct decomp) could still theoretically overflow on
systems with very small size_t, resulting in a tiny mmap allocation
while zstd receives the original large decomp_len as the
destination size.
Add an explicit overflow check before computing mmap_len as
defense-in-depth.
Reported-by: sashiko-bot@kernel.org # Running on a local machine
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Ian Rogers <irogers@google.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Assisted-by: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
---
tools/perf/util/tool.c | 13 +++++++++++++
1 file changed, 13 insertions(+)
diff --git a/tools/perf/util/tool.c b/tools/perf/util/tool.c
index 96fa6d6c55cdca1e..bb13d526ce4c3382 100644
--- a/tools/perf/util/tool.c
+++ b/tools/perf/util/tool.c
@@ -34,9 +34,22 @@ static int perf_session__process_compressed_event(const struct perf_tool *tool _
if (decomp_last->head > decomp_last->size)
return -1;
decomp_last_rem = decomp_last->size - decomp_last->head;
+ /*
+ * Check before adding: on 32-bit, size_t += u64
+ * silently truncates, bypassing the overflow check
+ * below and producing an undersized buffer.
+ */
+ if (decomp_last_rem > SIZE_MAX - decomp_len - sizeof(struct decomp)) {
+ pr_err("Decompression buffer size overflow\n");
+ return -1;
+ }
decomp_len += decomp_last_rem;
}
+ if (decomp_len > SIZE_MAX - sizeof(struct decomp)) {
+ pr_err("Decompression buffer size overflow\n");
+ return -1;
+ }
mmap_len = sizeof(struct decomp) + decomp_len;
decomp = mmap(NULL, mmap_len, PROT_READ|PROT_WRITE,
MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
--
2.54.0
^ permalink raw reply related [flat|nested] 30+ messages in thread* [PATCH 25/28] perf session: Bound nr_cpus_avail and validate sample CPU
2026-05-10 3:33 [PATCH 00/28] perf: Harden perf.data parsing against crafted/corrupted files Arnaldo Carvalho de Melo
` (23 preceding siblings ...)
2026-05-10 3:34 ` [PATCH 24/28] perf session: Check for decompression buffer size overflow Arnaldo Carvalho de Melo
@ 2026-05-10 3:34 ` Arnaldo Carvalho de Melo
2026-05-10 3:34 ` [PATCH 26/28] perf timechart: Bounds check cpu_id and fix topology_map allocation Arnaldo Carvalho de Melo
` (2 subsequent siblings)
27 siblings, 0 replies; 30+ messages in thread
From: Arnaldo Carvalho de Melo @ 2026-05-10 3:34 UTC (permalink / raw)
To: Namhyung Kim
Cc: Ingo Molnar, Thomas Gleixner, James Clark, Jiri Olsa, Ian Rogers,
Adrian Hunter, Kan Liang, Clark Williams, linux-kernel,
linux-perf-users, Arnaldo Carvalho de Melo, sashiko-bot,
Claude Opus 4.6 (1M context)
From: Arnaldo Carvalho de Melo <acme@redhat.com>
Several downstream consumers (timechart, kwork, sched) use fixed-size
arrays indexed by CPU. A crafted perf.data can supply arbitrary CPU
values that index past these arrays, causing out-of-bounds access.
Clamp nr_cpus_avail to MAX_NR_CPUS when reading HEADER_NRCPUS, and
fall back to MAX_NR_CPUS when the header is missing (truncated files,
pipe mode, pre-2017 perf). Then validate sample.cpu against
nr_cpus_avail in perf_session__deliver_event() before any tool
callback runs.
Only validate when PERF_SAMPLE_CPU is set in sample_type — when
absent, evsel__parse_sample() leaves sample.cpu as (u32)-1, a
sentinel that downstream tools (script, inject) check to identify
events without CPU info. Clamping it to 0 would break those checks.
Also refactor the sample parsing in perf_session__deliver_event()
to call evsel__parse_sample() directly (via evlist__event2evsel()
for evsel lookup), with explicit guest VM SID resolution for
machine_pid and vcpu fields.
Fix an off-by-one in end_sample_processing(): change the loop bound
from cpu <= numcpus to cpu < numcpus to prevent accessing one
element past the array.
For pipe-mode streams where HEADER_NRCPUS may arrive late or not at
all, the MAX_NR_CPUS fallback ensures the bounds check is still
effective against the fixed-size downstream arrays.
Reported-by: sashiko-bot@kernel.org # Running on a local machine
Assisted-by: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
---
tools/perf/builtin-timechart.c | 2 +-
| 43 +++++++++++++++++++
tools/perf/util/session.c | 75 +++++++++++++++++++++++++++++++++-
3 files changed, 118 insertions(+), 2 deletions(-)
diff --git a/tools/perf/builtin-timechart.c b/tools/perf/builtin-timechart.c
index 28f33e39895d362d..40297f2dcd0353cc 100644
--- a/tools/perf/builtin-timechart.c
+++ b/tools/perf/builtin-timechart.c
@@ -700,7 +700,7 @@ static void end_sample_processing(struct timechart *tchart)
u64 cpu;
struct power_event *pwr;
- for (cpu = 0; cpu <= tchart->numcpus; cpu++) {
+ for (cpu = 0; cpu < tchart->numcpus; cpu++) {
/* C state */
#if 0
pwr = zalloc(sizeof(*pwr));
--git a/tools/perf/util/header.c b/tools/perf/util/header.c
index 994e54167ea3196b..30b65c58784b596f 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -48,6 +48,7 @@
#include <api/io_dir.h>
#include "asm/bug.h"
#include "tool.h"
+#include "../perf.h"
#include "time-utils.h"
#include "units.h"
#include "util/util.h" // perf_exe()
@@ -2884,12 +2885,36 @@ static int process_nrcpus(struct feat_fd *ff, void *data __maybe_unused)
if (ret)
return ret;
+ /* Validate raw values before clamping */
if (nr_cpus_online > nr_cpus_avail) {
pr_err("Invalid HEADER_NRCPUS: nr_cpus_online (%u) > nr_cpus_avail (%u)\n",
nr_cpus_online, nr_cpus_avail);
return -1;
}
+ /*
+ * FIXME: Several downstream consumers use fixed-size arrays
+ * indexed by CPU (timechart MAX_CPUS, kwork/sched/annotate
+ * DECLARE_BITMAP(MAX_NR_CPUS)). Until these are converted
+ * to dynamic allocation, clamp nr_cpus_avail so per-event
+ * CPU bounds checks reject samples above the array limit.
+ * Data from CPUs beyond MAX_NR_CPUS will be lost.
+ *
+ * Pipe-mode streams from pre-2017 perf or third-party tools
+ * that lack HEADER_NRCPUS will hit the MAX_NR_CPUS fallback
+ * in perf_session__deliver_event() instead.
+ */
+ if (nr_cpus_avail > MAX_NR_CPUS) {
+ pr_warning("WARNING: perf.data recorded on a %u-CPU machine but perf is compiled with MAX_NR_CPUS=%d.\n"
+ " Samples from CPUs >= %d will be clamped to CPU 0. Consider rebuilding\n"
+ " perf with a larger MAX_NR_CPUS, or help convert fixed-size CPU arrays to\n"
+ " dynamic allocation.\n",
+ nr_cpus_avail, MAX_NR_CPUS, MAX_NR_CPUS);
+ nr_cpus_avail = MAX_NR_CPUS;
+ if (nr_cpus_online > nr_cpus_avail)
+ nr_cpus_online = nr_cpus_avail;
+ }
+
env->nr_cpus_avail = (int)nr_cpus_avail;
env->nr_cpus_online = (int)nr_cpus_online;
return 0;
@@ -5239,6 +5264,24 @@ int perf_session__read_header(struct perf_session *session)
#endif
}
+ /*
+ * Without nr_cpus_avail the sample CPU bounds check in
+ * perf_session__deliver_event() is bypassed, allowing crafted
+ * CPU IDs to reach downstream consumers that index fixed-size
+ * arrays (timechart, kwork, sched — all sized MAX_NR_CPUS).
+ *
+ * This can happen with truncated files (interrupted recording
+ * loses all feature sections), very old files that predate
+ * HEADER_NRCPUS, or crafted files that omit it. Fall back to
+ * MAX_NR_CPUS so the bounds check is still effective — any
+ * CPU ID below that limit is safe for all downstream arrays.
+ */
+ if (header->env.nr_cpus_avail == 0) {
+ header->env.nr_cpus_avail = MAX_NR_CPUS;
+ pr_warning("WARNING: perf.data is missing HEADER_NRCPUS, using MAX_NR_CPUS (%d) as CPU bound\n",
+ MAX_NR_CPUS);
+ }
+
return 0;
out_errno:
return -errno;
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
index 80cb03d150cecc0b..dd84b3cd017a5073 100644
--- a/tools/perf/util/session.c
+++ b/tools/perf/util/session.c
@@ -2085,14 +2085,87 @@ static int perf_session__deliver_event(struct perf_session *session,
const char *file_path)
{
struct perf_sample sample;
+ struct evsel *evsel;
int ret;
perf_sample__init(&sample, /*all=*/false);
- ret = evlist__parse_sample(session->evlist, event, &sample);
+ evsel = evlist__event2evsel(session->evlist, event);
+ if (!evsel) {
+ ret = -EFAULT;
+ goto out;
+ }
+ ret = evsel__parse_sample(evsel, event, &sample);
if (ret) {
pr_err("Can't parse sample, err = %d\n", ret);
goto out;
}
+ /*
+ * evsel__parse_sample() doesn't populate machine_pid/vcpu,
+ * which are needed by machines__find_for_cpumode() to
+ * attribute samples to guest VMs. The SID table maps
+ * sample IDs to the guest that owns the event.
+ */
+ if (perf_guest && sample.id) {
+ struct perf_sample_id *sid = evlist__id2sid(session->evlist, sample.id);
+
+ if (sid) {
+ sample.machine_pid = sid->machine_pid;
+ sample.vcpu = sid->vcpu.cpu;
+ }
+ }
+
+ /*
+ * Validate sample.cpu before any callback can use it as an
+ * array index (kwork cpus_runtime, timechart cpus_cstate_*,
+ * sched cpu_last_switched).
+ *
+ * When PERF_SAMPLE_CPU is absent, evsel__parse_sample() leaves
+ * sample.cpu as (u32)-1 — a sentinel that downstream tools
+ * (script, inject) check to identify events without CPU info.
+ * Only check when sample.cpu was actually populated from event
+ * data: PERF_RECORD_SAMPLE always has it when PERF_SAMPLE_CPU
+ * is set; non-sample events only have it when sample_id_all is
+ * enabled. Otherwise sample.cpu is the (u32)-1 sentinel from
+ * evsel__parse_sample() and must not be validated or clamped.
+ */
+ if ((evsel->core.attr.sample_type & PERF_SAMPLE_CPU) &&
+ (event->header.type == PERF_RECORD_SAMPLE ||
+ evsel->core.attr.sample_id_all)) {
+ int nr_cpus_avail = perf_session__env(session)->nr_cpus_avail;
+
+ /*
+ * For perf.data files the MAX_NR_CPUS fallback in
+ * perf_session__read_header() guarantees this is set.
+ * For pipe mode, HEADER_NRCPUS may arrive late or not
+ * at all (pre-2017 perf, third-party tools). Fall
+ * back to MAX_NR_CPUS so the bounds check still works
+ * against fixed-size downstream arrays.
+ */
+ if (nr_cpus_avail <= 0) {
+ nr_cpus_avail = MAX_NR_CPUS;
+ perf_session__env(session)->nr_cpus_avail = nr_cpus_avail;
+ pr_warning_once("WARNING: HEADER_NRCPUS not set, using MAX_NR_CPUS (%d) as CPU bound\n",
+ MAX_NR_CPUS);
+ }
+ if (sample.cpu >= (u32)nr_cpus_avail &&
+ sample.cpu != (u32)-1) {
+ /*
+ * Warn rather than abort: synthesized events
+ * (MMAP, COMM) lack sample_id_all data, so
+ * parse_id_sample reads garbage from the event
+ * payload. Clamping to 0 protects downstream
+ * array indexing while keeping the session alive.
+ *
+ * Preserve (u32)-1: perf script and perf inject
+ * use it as a sentinel for "CPU not applicable."
+ * Downstream array users (timechart, kwork) have
+ * their own per-callback bounds checks.
+ */
+ pr_warning_once("WARNING: sample CPU %u >= nr_cpus_avail %u, clamping to 0\n",
+ sample.cpu, nr_cpus_avail);
+ sample.cpu = 0;
+ }
+ }
ret = auxtrace__process_event(session, event, &sample, tool);
if (ret < 0)
--
2.54.0
^ permalink raw reply related [flat|nested] 30+ messages in thread* [PATCH 26/28] perf timechart: Bounds check cpu_id and fix topology_map allocation
2026-05-10 3:33 [PATCH 00/28] perf: Harden perf.data parsing against crafted/corrupted files Arnaldo Carvalho de Melo
` (24 preceding siblings ...)
2026-05-10 3:34 ` [PATCH 25/28] perf session: Bound nr_cpus_avail and validate sample CPU Arnaldo Carvalho de Melo
@ 2026-05-10 3:34 ` Arnaldo Carvalho de Melo
2026-05-10 3:34 ` [PATCH 27/28] perf kwork: Bounds check work->cpu before indexing cpus_runtime[] Arnaldo Carvalho de Melo
2026-05-10 3:34 ` [PATCH 28/28] perf test: Add truncated perf.data robustness test Arnaldo Carvalho de Melo
27 siblings, 0 replies; 30+ messages in thread
From: Arnaldo Carvalho de Melo @ 2026-05-10 3:34 UTC (permalink / raw)
To: Namhyung Kim
Cc: Ingo Molnar, Thomas Gleixner, James Clark, Jiri Olsa, Ian Rogers,
Adrian Hunter, Kan Liang, Clark Williams, linux-kernel,
linux-perf-users, Arnaldo Carvalho de Melo, sashiko-bot,
Claude Opus 4.6 (1M context)
From: Arnaldo Carvalho de Melo <acme@redhat.com>
The cpu_idle, cpu_frequency, power_start, and power_frequency
tracepoint handlers extract cpu_id from the event payload via
evsel__intval() and use it directly as an array index into
cpus_cstate_start_times[] and cpus_pstate_start_times[], which
are allocated with MAX_CPUS (4096) entries.
Unlike sample->cpu which is validated in perf_session__deliver_event(),
cpu_id comes from the tracepoint data and is never bounds checked.
A crafted perf.data with a malicious cpu_id in a tracepoint event
causes out-of-bounds array accesses.
Validate cpu_id against tchart->numcpus (nr_cpus_avail from the
file header) and reject the event with an error if it is out of
range, as this indicates a corrupted or crafted file.
The power_end handler uses sample->cpu (not a tracepoint cpu_id
field). Add a bounds check there too since a crafted file could
omit PERF_SAMPLE_CPU, leaving sample->cpu as the (u32)-1 sentinel
which would cause out-of-bounds access in c_state_end().
Also validate sample->cpu in sched_switch and sched_wakeup
handlers, which store it in cpu_sample structs later used as
array indices into topology_map[] during SVG generation.
Fix svg_build_topology_map() to allocate topology_map using
nr_cpus_avail instead of nr_cpus_online. When offline CPUs exist,
nr_cpus_online < nr_cpus_avail, and a valid cpu_id that passes
the numcpus check could still exceed the topology_map allocation,
causing a heap out-of-bounds read in cpu2y(). Reject negative CPU
values in str_to_bitmap() to prevent perf_cpu_map__new("") on an
empty topology string from passing -1 to __set_bit(), which would
write at offset ULONG_MAX/BITS_PER_LONG.
Fix the pre-existing backtrace memory leak: change the
tracepoint_handler typedef to pass const char **backtrace
(pointer-to-pointer). Handlers that consume the string
(sched_switch, sched_wakeup) set *backtrace = NULL to claim
ownership. The caller always calls free() after the handler
returns — if ownership was taken the pointer is NULL and
free(NULL) is a no-op. Skip cat_backtrace() entirely when
tchart->with_backtrace is not set.
Cap tchart->numcpus at MAX_CPUS in the HEADER_NRCPUS callback
so the bounds check cannot exceed the array allocation size.
Reported-by: sashiko-bot@kernel.org # Running on a local machine
Cc: Ian Rogers <irogers@google.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Assisted-by: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
---
tools/perf/builtin-timechart.c | 115 ++++++++++++++++++++++++++++-----
tools/perf/util/svghelper.c | 6 +-
2 files changed, 104 insertions(+), 17 deletions(-)
diff --git a/tools/perf/builtin-timechart.c b/tools/perf/builtin-timechart.c
index 40297f2dcd0353cc..bccc48cfb99a1d57 100644
--- a/tools/perf/builtin-timechart.c
+++ b/tools/perf/builtin-timechart.c
@@ -71,6 +71,7 @@ struct timechart {
bool io_only,
skip_eagain;
u64 io_events;
+ u32 nr_invalid_cpu;
u64 min_time,
merge_dist;
};
@@ -569,7 +570,7 @@ static const char *cat_backtrace(union perf_event *event,
typedef int (*tracepoint_handler)(struct timechart *tchart,
struct evsel *evsel,
struct perf_sample *sample,
- const char *backtrace);
+ const char **backtrace);
static int process_sample_event(const struct perf_tool *tool,
union perf_event *event,
@@ -588,22 +589,46 @@ static int process_sample_event(const struct perf_tool *tool,
if (evsel->handler != NULL) {
tracepoint_handler f = evsel->handler;
- return f(tchart, evsel, sample,
- cat_backtrace(event, sample, machine));
+ const char *bt = NULL;
+ int ret;
+
+ if (tchart->with_backtrace)
+ bt = cat_backtrace(event, sample, machine);
+ ret = f(tchart, evsel, sample, &bt);
+ /*
+ * Handlers that consume backtrace (sched_switch,
+ * sched_wakeup) store the pointer and set *bt = NULL
+ * to claim ownership. For all other handlers bt is
+ * still ours to free. free(NULL) is safe.
+ */
+ free((void *)bt);
+ return ret;
}
return 0;
}
static int
-process_sample_cpu_idle(struct timechart *tchart __maybe_unused,
+process_sample_cpu_idle(struct timechart *tchart,
struct evsel *evsel,
struct perf_sample *sample,
- const char *backtrace __maybe_unused)
+ const char **backtrace __maybe_unused)
{
u32 state = evsel__intval(evsel, sample, "state");
u32 cpu_id = evsel__intval(evsel, sample, "cpu_id");
+ /*
+ * cpu_id from tracepoint data indexes cpus_cstate_start_times[]
+ * and cpus_pstate_start_times[], both allocated as MAX_CPUS
+ * entries. Reject out-of-range values to prevent OOB writes;
+ * numcpus (from nr_cpus_avail) is the tighter, valid bound.
+ */
+ if (cpu_id >= tchart->numcpus) {
+ pr_err("cpu_idle event cpu_id %u >= nr_cpus_avail %u\n",
+ cpu_id, tchart->numcpus);
+ return -EINVAL;
+ }
+
if (state == (u32)PWR_EVENT_EXIT)
c_state_end(tchart, cpu_id, sample->time);
else
@@ -615,11 +640,18 @@ static int
process_sample_cpu_frequency(struct timechart *tchart,
struct evsel *evsel,
struct perf_sample *sample,
- const char *backtrace __maybe_unused)
+ const char **backtrace __maybe_unused)
{
u32 state = evsel__intval(evsel, sample, "state");
u32 cpu_id = evsel__intval(evsel, sample, "cpu_id");
+ /* Same bounds check as process_sample_cpu_idle — see comment there */
+ if (cpu_id >= tchart->numcpus) {
+ pr_err("cpu_frequency event cpu_id %u >= nr_cpus_avail %u\n",
+ cpu_id, tchart->numcpus);
+ return -EINVAL;
+ }
+
p_state_change(tchart, cpu_id, sample->time, state);
return 0;
}
@@ -628,13 +660,20 @@ static int
process_sample_sched_wakeup(struct timechart *tchart,
struct evsel *evsel,
struct perf_sample *sample,
- const char *backtrace)
+ const char **backtrace)
{
u8 flags = evsel__intval(evsel, sample, "common_flags");
int waker = evsel__intval(evsel, sample, "common_pid");
int wakee = evsel__intval(evsel, sample, "pid");
- sched_wakeup(tchart, sample->cpu, sample->time, waker, wakee, flags, backtrace);
+ /* sample->cpu used as index into topology_map[] during SVG generation */
+ if (sample->cpu >= tchart->numcpus) {
+ tchart->nr_invalid_cpu++;
+ return 0;
+ }
+
+ sched_wakeup(tchart, sample->cpu, sample->time, waker, wakee, flags, *backtrace);
+ *backtrace = NULL;
return 0;
}
@@ -642,27 +681,41 @@ static int
process_sample_sched_switch(struct timechart *tchart,
struct evsel *evsel,
struct perf_sample *sample,
- const char *backtrace)
+ const char **backtrace)
{
int prev_pid = evsel__intval(evsel, sample, "prev_pid");
int next_pid = evsel__intval(evsel, sample, "next_pid");
u64 prev_state = evsel__intval(evsel, sample, "prev_state");
+ /* sample->cpu used as index into topology_map[] during SVG generation */
+ if (sample->cpu >= tchart->numcpus) {
+ tchart->nr_invalid_cpu++;
+ return 0;
+ }
+
sched_switch(tchart, sample->cpu, sample->time, prev_pid, next_pid,
- prev_state, backtrace);
+ prev_state, *backtrace);
+ *backtrace = NULL;
return 0;
}
#ifdef SUPPORT_OLD_POWER_EVENTS
static int
-process_sample_power_start(struct timechart *tchart __maybe_unused,
+process_sample_power_start(struct timechart *tchart,
struct evsel *evsel,
struct perf_sample *sample,
- const char *backtrace __maybe_unused)
+ const char **backtrace __maybe_unused)
{
u64 cpu_id = evsel__intval(evsel, sample, "cpu_id");
u64 value = evsel__intval(evsel, sample, "value");
+ /* Same bounds check as process_sample_cpu_idle — see comment there */
+ if (cpu_id >= tchart->numcpus) {
+ pr_err("power_start event cpu_id %" PRIu64 " >= nr_cpus_avail %u\n",
+ cpu_id, tchart->numcpus);
+ return -EINVAL;
+ }
+
c_state_start(cpu_id, sample->time, value);
return 0;
}
@@ -671,8 +724,16 @@ static int
process_sample_power_end(struct timechart *tchart,
struct evsel *evsel __maybe_unused,
struct perf_sample *sample,
- const char *backtrace __maybe_unused)
+ const char **backtrace __maybe_unused)
{
+ /*
+ * sample->cpu is validated centrally when PERF_SAMPLE_CPU is
+ * set, but a crafted file could omit it from sample_type.
+ */
+ if (sample->cpu >= tchart->numcpus) {
+ tchart->nr_invalid_cpu++;
+ return 0;
+ }
c_state_end(tchart, sample->cpu, sample->time);
return 0;
}
@@ -681,11 +742,18 @@ static int
process_sample_power_frequency(struct timechart *tchart,
struct evsel *evsel,
struct perf_sample *sample,
- const char *backtrace __maybe_unused)
+ const char **backtrace __maybe_unused)
{
u64 cpu_id = evsel__intval(evsel, sample, "cpu_id");
u64 value = evsel__intval(evsel, sample, "value");
+ /* Same bounds check as process_sample_cpu_idle — see comment there */
+ if (cpu_id >= tchart->numcpus) {
+ pr_err("power_frequency event cpu_id %" PRIu64 " >= nr_cpus_avail %u\n",
+ cpu_id, tchart->numcpus);
+ return -EINVAL;
+ }
+
p_state_change(tchart, cpu_id, sample->time, value);
return 0;
}
@@ -1519,7 +1587,8 @@ static int process_header(struct perf_file_section *section __maybe_unused,
switch (feat) {
case HEADER_NRCPUS:
- tchart->numcpus = ph->env.nr_cpus_avail;
+ /* Cap at MAX_CPUS — the allocation size of cpus_cstate/pstate arrays */
+ tchart->numcpus = min((int)ph->env.nr_cpus_avail, MAX_CPUS);
break;
case HEADER_CPU_TOPOLOGY:
@@ -1625,6 +1694,16 @@ static int __cmd_timechart(struct timechart *tchart, const char *output_name)
tchart,
process_header);
+ /*
+ * Truncated files (interrupted recording) lose all feature
+ * sections so the HEADER_NRCPUS callback never fires, and
+ * pipe mode doesn't use perf_header__process_sections at all.
+ * Fall back to MAX_CPUS — the actual allocation size of the
+ * cpus_cstate/pstate arrays.
+ */
+ if (!tchart->numcpus)
+ tchart->numcpus = MAX_CPUS;
+
if (!perf_session__has_traces(session, "timechart record"))
goto out_delete;
@@ -1646,6 +1725,12 @@ static int __cmd_timechart(struct timechart *tchart, const char *output_name)
pr_info("Written %2.1f seconds of trace to %s.\n",
(tchart->last_time - tchart->first_time) / (double)NSEC_PER_SEC, output_name);
+
+ if (tchart->nr_invalid_cpu) {
+ pr_warning("WARNING: %u events had invalid CPU values and were skipped.\n"
+ " Scheduling and power state data may be incomplete.\n",
+ tchart->nr_invalid_cpu);
+ }
out_delete:
perf_session__delete(session);
return ret;
diff --git a/tools/perf/util/svghelper.c b/tools/perf/util/svghelper.c
index e360e7736c7ba65b..a3c7cfecc072f3e3 100644
--- a/tools/perf/util/svghelper.c
+++ b/tools/perf/util/svghelper.c
@@ -736,7 +736,8 @@ static int str_to_bitmap(char *s, cpumask_t *b, int nr_cpus)
return -1;
perf_cpu_map__for_each_cpu(cpu, idx, map) {
- if (cpu.cpu >= nr_cpus) {
+ /* perf_cpu_map__new("") yields cpu=-1; reject to prevent __set_bit OOB */
+ if (cpu.cpu < 0 || cpu.cpu >= nr_cpus) {
ret = -1;
break;
}
@@ -756,7 +757,8 @@ int svg_build_topology_map(struct perf_env *env)
char *sib_core, *sib_thr;
int ret = -1;
- nr_cpus = min(env->nr_cpus_online, MAX_NR_CPUS);
+ /* Use nr_cpus_avail: offline CPUs still need slots in the topology map */
+ nr_cpus = min(env->nr_cpus_avail, MAX_NR_CPUS);
t.sib_core_nr = env->nr_sibling_cores;
t.sib_thr_nr = env->nr_sibling_threads;
--
2.54.0
^ permalink raw reply related [flat|nested] 30+ messages in thread* [PATCH 27/28] perf kwork: Bounds check work->cpu before indexing cpus_runtime[]
2026-05-10 3:33 [PATCH 00/28] perf: Harden perf.data parsing against crafted/corrupted files Arnaldo Carvalho de Melo
` (25 preceding siblings ...)
2026-05-10 3:34 ` [PATCH 26/28] perf timechart: Bounds check cpu_id and fix topology_map allocation Arnaldo Carvalho de Melo
@ 2026-05-10 3:34 ` Arnaldo Carvalho de Melo
2026-05-10 3:34 ` [PATCH 28/28] perf test: Add truncated perf.data robustness test Arnaldo Carvalho de Melo
27 siblings, 0 replies; 30+ messages in thread
From: Arnaldo Carvalho de Melo @ 2026-05-10 3:34 UTC (permalink / raw)
To: Namhyung Kim
Cc: Ingo Molnar, Thomas Gleixner, James Clark, Jiri Olsa, Ian Rogers,
Adrian Hunter, Kan Liang, Clark Williams, linux-kernel,
linux-perf-users, Arnaldo Carvalho de Melo, sashiko-bot,
Yang Jihong, Claude Opus 4.6 (1M context)
From: Arnaldo Carvalho de Melo <acme@redhat.com>
work->cpu comes from sample->cpu which is (u32)-1 when
PERF_SAMPLE_CPU is absent. Stored as int, this becomes -1
which passes the signed BUG_ON(work->cpu >= MAX_NR_CPUS) but
causes an out-of-bounds access on cpus_runtime[-1].
Replace the BUG_ON with an unsigned bounds check that skips
entries with invalid CPU values, and guard the idle and irq
runtime accumulators the same way.
Reported-by: sashiko-bot@kernel.org # Running on a local machine
Cc: Yang Jihong <yangjihong@bytedance.com>
Cc: Ian Rogers <irogers@google.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Assisted-by: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
---
tools/perf/builtin-kwork.c | 49 +++++++++++++++++++++++++++++++++-----
tools/perf/util/kwork.h | 1 +
2 files changed, 44 insertions(+), 6 deletions(-)
diff --git a/tools/perf/builtin-kwork.c b/tools/perf/builtin-kwork.c
index 9d3a4c779a41e383..6e124a0f277c8294 100644
--- a/tools/perf/builtin-kwork.c
+++ b/tools/perf/builtin-kwork.c
@@ -424,7 +424,9 @@ static bool profile_event_match(struct perf_kwork *kwork,
u64 time = sample->time;
struct perf_time_interval *ptime = &kwork->ptime;
- if ((kwork->cpu_list != NULL) && !test_bit(cpu, kwork->cpu_bitmap))
+ /* Guard test_bit: cpu == -1 (absent PERF_SAMPLE_CPU) would index past the bitmap */
+ if ((kwork->cpu_list != NULL) &&
+ ((unsigned int)cpu >= MAX_NR_CPUS || !test_bit(cpu, kwork->cpu_bitmap)))
return false;
if (((ptime->start != 0) && (ptime->start > time)) ||
@@ -2008,7 +2010,18 @@ static void top_calc_total_runtime(struct perf_kwork *kwork)
next = rb_first_cached(&class->work_root);
while (next) {
work = rb_entry(next, struct kwork_work, node);
- BUG_ON(work->cpu >= MAX_NR_CPUS);
+ /*
+ * work->cpu comes from sample->cpu which is -1 when
+ * PERF_SAMPLE_CPU is absent. As int that's -1, but as
+ * unsigned it exceeds MAX_NR_CPUS — skip to avoid OOB
+ * on cpus_runtime[].
+ */
+ /* Counted and reported in perf_kwork__top_report() */
+ if ((unsigned int)work->cpu >= MAX_NR_CPUS) {
+ stat->nr_skipped_cpu++;
+ next = rb_next(next);
+ continue;
+ }
stat->cpus_runtime[work->cpu].total += work->total_runtime;
stat->cpus_runtime[MAX_NR_CPUS].total += work->total_runtime;
next = rb_next(next);
@@ -2020,7 +2033,8 @@ static void top_calc_idle_time(struct perf_kwork *kwork,
{
struct kwork_top_stat *stat = &kwork->top_stat;
- if (work->id == 0) {
+ /* See comment in top_calc_total_runtime() */
+ if (work->id == 0 && (unsigned int)work->cpu < MAX_NR_CPUS) {
stat->cpus_runtime[work->cpu].idle += work->total_runtime;
stat->cpus_runtime[MAX_NR_CPUS].idle += work->total_runtime;
}
@@ -2032,6 +2046,12 @@ static void top_calc_irq_runtime(struct perf_kwork *kwork,
{
struct kwork_top_stat *stat = &kwork->top_stat;
+ /* See comment in top_calc_total_runtime() */
+ if ((unsigned int)work->cpu >= MAX_NR_CPUS) {
+ stat->nr_skipped_cpu++;
+ return;
+ }
+
if (type == KWORK_CLASS_IRQ) {
stat->cpus_runtime[work->cpu].irq += work->total_runtime;
stat->cpus_runtime[MAX_NR_CPUS].irq += work->total_runtime;
@@ -2084,12 +2104,21 @@ static void top_calc_cpu_usage(struct perf_kwork *kwork)
if (work->total_runtime == 0)
goto next;
+ /* See comment in top_calc_total_runtime() */
+ if ((unsigned int)work->cpu >= MAX_NR_CPUS) {
+ stat->nr_skipped_cpu++;
+ goto next;
+ }
+
__set_bit(work->cpu, stat->all_cpus_bitmap);
top_subtract_irq_runtime(kwork, work);
- work->cpu_usage = work->total_runtime * 10000 /
- stat->cpus_runtime[work->cpu].total;
+ /* Guard against division by zero if no runtime was accumulated */
+ if (stat->cpus_runtime[work->cpu].total) {
+ work->cpu_usage = work->total_runtime * 10000 /
+ stat->cpus_runtime[work->cpu].total;
+ }
top_calc_idle_time(kwork, work);
next:
@@ -2102,7 +2131,8 @@ static void top_calc_load_runtime(struct perf_kwork *kwork,
{
struct kwork_top_stat *stat = &kwork->top_stat;
- if (work->id != 0) {
+ /* See comment in top_calc_total_runtime() */
+ if (work->id != 0 && (unsigned int)work->cpu < MAX_NR_CPUS) {
stat->cpus_runtime[work->cpu].load += work->total_runtime;
stat->cpus_runtime[MAX_NR_CPUS].load += work->total_runtime;
}
@@ -2170,6 +2200,13 @@ static void perf_kwork__top_report(struct perf_kwork *kwork)
next = rb_next(next);
}
+ if (kwork->top_stat.nr_skipped_cpu) {
+ printf(" Warning: %u work entries with invalid CPU were excluded from totals.\n"
+ " Task runtimes may appear inflated (IRQ time not subtracted).\n"
+ " Consider re-recording with PERF_SAMPLE_CPU enabled.\n",
+ kwork->top_stat.nr_skipped_cpu);
+ }
+
printf("\n");
}
diff --git a/tools/perf/util/kwork.h b/tools/perf/util/kwork.h
index db00269b73f24c66..10290cd779402f9d 100644
--- a/tools/perf/util/kwork.h
+++ b/tools/perf/util/kwork.h
@@ -194,6 +194,7 @@ struct __top_cpus_runtime {
struct kwork_top_stat {
DECLARE_BITMAP(all_cpus_bitmap, MAX_NR_CPUS);
struct __top_cpus_runtime *cpus_runtime;
+ unsigned int nr_skipped_cpu;
};
struct perf_kwork {
--
2.54.0
^ permalink raw reply related [flat|nested] 30+ messages in thread* [PATCH 28/28] perf test: Add truncated perf.data robustness test
2026-05-10 3:33 [PATCH 00/28] perf: Harden perf.data parsing against crafted/corrupted files Arnaldo Carvalho de Melo
` (26 preceding siblings ...)
2026-05-10 3:34 ` [PATCH 27/28] perf kwork: Bounds check work->cpu before indexing cpus_runtime[] Arnaldo Carvalho de Melo
@ 2026-05-10 3:34 ` Arnaldo Carvalho de Melo
27 siblings, 0 replies; 30+ messages in thread
From: Arnaldo Carvalho de Melo @ 2026-05-10 3:34 UTC (permalink / raw)
To: Namhyung Kim
Cc: Ingo Molnar, Thomas Gleixner, James Clark, Jiri Olsa, Ian Rogers,
Adrian Hunter, Kan Liang, Clark Williams, linux-kernel,
linux-perf-users, Arnaldo Carvalho de Melo,
Claude Opus 4.6 (1M context)
From: Arnaldo Carvalho de Melo <acme@redhat.com>
Add a shell test that verifies perf report handles truncated
perf.data files gracefully — exiting with an error code rather
than crashing with SIGSEGV or SIGABRT.
The test records a simple workload, then truncates the resulting
perf.data at four offsets that exercise different parsing stages:
8 bytes — file header magic only
64 bytes — partial file header (attr section incomplete)
256 bytes — into the first events (partial event headers)
75% size — mid-stream truncation (partial event data)
For each truncation, perf report is run and the exit code is
checked. Signal-based exits (128+signal) in the range 134-159
indicate a crash and fail the test. Non-zero exits from normal
error handling are expected and acceptable.
This exercises the bounds checking, minimum-size validation,
and error propagation added by the preceding patches in this
series.
Cc: Ian Rogers <irogers@google.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Assisted-by: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
---
tools/perf/tests/shell/data_validation.sh | 59 +++++++++++++++++++++++
1 file changed, 59 insertions(+)
create mode 100755 tools/perf/tests/shell/data_validation.sh
diff --git a/tools/perf/tests/shell/data_validation.sh b/tools/perf/tests/shell/data_validation.sh
new file mode 100755
index 0000000000000000..649f71b6cdb93202
--- /dev/null
+++ b/tools/perf/tests/shell/data_validation.sh
@@ -0,0 +1,59 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+# Test that perf report handles truncated perf.data gracefully
+# (no crash, no segfault — clean error exit).
+#
+# Exercises the bounds checking and minimum-size validation added
+# by the perf-data-validation hardening series.
+
+err=0
+
+cleanup() {
+ rm -f "${perfdata}" "${truncated}"
+ trap - EXIT TERM INT
+}
+trap cleanup EXIT TERM INT
+
+perfdata=$(mktemp /tmp/__perf_test.perf.data.XXXXX)
+truncated=$(mktemp /tmp/__perf_test.perf.data.XXXXX)
+
+# Record a simple workload
+if ! perf record -o "${perfdata}" -- perf test -w noploop 2>/dev/null; then
+ echo "Skip: perf record failed"
+ cleanup
+ exit 2
+fi
+
+file_size=$(stat -c %s "${perfdata}")
+if [ "${file_size}" -lt 512 ]; then
+ echo "Skip: perf.data too small (${file_size} bytes)"
+ cleanup
+ exit 2
+fi
+
+# Test truncation at various offsets that exercise different
+# parsing stages:
+# 8 — file header magic only, no attrs or data
+# 64 — partial file header (attr section incomplete)
+# 256 — into the first events (partial event headers)
+# 75% — mid-stream truncation (partial event data)
+for cut_at in 8 64 256 $((file_size * 3 / 4)); do
+ if [ "${cut_at}" -ge "${file_size}" ]; then
+ continue
+ fi
+ head -c "${cut_at}" "${perfdata}" > "${truncated}"
+
+ # perf report should exit with an error, not crash.
+ # Suppress stdout/stderr — we only care about the exit code.
+ perf report -i "${truncated}" --stdio > /dev/null 2>&1
+ exit_code=$?
+
+ # 139 = SIGSEGV, 134 = SIGABRT, 136 = SIGFPE
+ if [ ${exit_code} -ge 134 ] && [ ${exit_code} -le 159 ]; then
+ echo "FAIL: perf report crashed (signal $((exit_code - 128))) on ${cut_at}-byte truncated file"
+ err=1
+ fi
+done
+
+cleanup
+exit ${err}
--
2.54.0
^ permalink raw reply related [flat|nested] 30+ messages in thread