From: Arnaldo Carvalho de Melo <acme@kernel.org>
To: Namhyung Kim <namhyung@kernel.org>
Cc: Ingo Molnar <mingo@kernel.org>,
Thomas Gleixner <tglx@linutronix.de>,
James Clark <james.clark@linaro.org>,
Jiri Olsa <jolsa@kernel.org>, Ian Rogers <irogers@google.com>,
Adrian Hunter <adrian.hunter@intel.com>,
Kan Liang <kan.liang@linux.intel.com>,
Clark Williams <williams@redhat.com>,
linux-kernel@vger.kernel.org, linux-perf-users@vger.kernel.org,
Arnaldo Carvalho de Melo <acme@redhat.com>,
sashiko-bot@kernel.org,
"Claude Opus 4.6 (1M context)" <noreply@anthropic.com>
Subject: [PATCH 15/28] perf header: Validate null-termination in PERF_RECORD_EVENT_UPDATE string fields
Date: Sun, 10 May 2026 00:34:06 -0300 [thread overview]
Message-ID: <20260510033424.255812-16-acme@kernel.org> (raw)
In-Reply-To: <20260510033424.255812-1-acme@kernel.org>
From: Arnaldo Carvalho de Melo <acme@redhat.com>
strdup(ev->unit) and strdup(ev->name) read until '\0' with no
guarantee the string is null-terminated within event->header.size.
The dump_trace fprintf path has the same problem with %s.
Validate before either path runs — same class of bug fixed for
MMAP/MMAP2/COMM/CGROUP by perf_event__check_nul().
Also harden the event_update swap handler to:
- Validate SCALE event size before swapping the double at
offset 24, which exceeds the 24-byte min_size.
- Validate CPUS event size before accessing the cpu_map
type/nr/long_size fields, which also start at the min_size
boundary.
- Swap CPUS variant fields (type, nr, long_size) so the
processing path sees native byte order.
Add validation in perf_event__process_event_update() for all
event update variants (UNIT, NAME, SCALE, CPUS) before
dump_trace or processing.
Fix a missing break before the default case in the CPUS
switch path.
Reported-by: sashiko-bot@kernel.org # Running on a local machine
Cc: Ian Rogers <irogers@google.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Assisted-by: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
---
| 126 +++++++++++++++++++++++++++++++++++---
tools/perf/util/session.c | 99 +++++++++++++++++++++++++++++-
2 files changed, 216 insertions(+), 9 deletions(-)
--git a/tools/perf/util/header.c b/tools/perf/util/header.c
index f2198ab0defd5804..d253063b581f21e9 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -5117,24 +5117,65 @@ size_t perf_event__fprintf_event_update(union perf_event *event, FILE *fp)
switch (ev->type) {
case PERF_EVENT_UPDATE__SCALE:
+ if (event->header.size < offsetof(struct perf_record_event_update, scale) +
+ sizeof(ev->scale)) {
+ ret += fprintf(fp, "... scale: (truncated)\n");
+ break;
+ }
ret += fprintf(fp, "... scale: %f\n", ev->scale.scale);
break;
case PERF_EVENT_UPDATE__UNIT:
- ret += fprintf(fp, "... unit: %s\n", ev->unit);
- break;
- case PERF_EVENT_UPDATE__NAME:
- ret += fprintf(fp, "... name: %s\n", ev->name);
+ case PERF_EVENT_UPDATE__NAME: {
+ size_t str_off = offsetof(struct perf_record_event_update, unit);
+ size_t max_len = event->header.size > str_off ?
+ event->header.size - str_off : 0;
+
+ if (max_len == 0 || strnlen(ev->unit, max_len) == max_len) {
+ ret += fprintf(fp, "... %s: (unterminated)\n",
+ ev->type == PERF_EVENT_UPDATE__UNIT ? "unit" : "name");
+ break;
+ }
+ ret += fprintf(fp, "... %s: %s\n",
+ ev->type == PERF_EVENT_UPDATE__UNIT ? "unit" : "name",
+ ev->unit);
break;
- case PERF_EVENT_UPDATE__CPUS:
+ }
+ case PERF_EVENT_UPDATE__CPUS: {
+ size_t cpus_off = offsetof(struct perf_record_event_update, cpus);
+ u32 cpus_payload;
+
+ if (event->header.size < cpus_off + sizeof(__u16) +
+ sizeof(struct perf_record_range_cpu_map)) {
+ ret += fprintf(fp, "... cpus: (truncated)\n");
+ break;
+ }
+
+ /*
+ * Validate nr against payload — this function may be
+ * called from the stub handler (dump_trace path) which
+ * bypasses perf_event__process_event_update() validation.
+ */
+ cpus_payload = event->header.size - cpus_off;
+ if (ev->cpus.cpus.type == PERF_CPU_MAP__CPUS &&
+ ev->cpus.cpus.cpus_data.nr >
+ (cpus_payload - offsetof(struct perf_record_cpu_map_data, cpus_data.cpu)) /
+ sizeof(ev->cpus.cpus.cpus_data.cpu[0])) {
+ ret += fprintf(fp, "... cpus: nr %u exceeds payload\n",
+ ev->cpus.cpus.cpus_data.nr);
+ break;
+ }
+
ret += fprintf(fp, "... ");
map = cpu_map__new_data(&ev->cpus.cpus);
if (map) {
ret += cpu_map__fprintf(map, fp);
perf_cpu_map__put(map);
- } else
+ } else {
ret += fprintf(fp, "failed to get cpus\n");
+ }
break;
+ }
default:
ret += fprintf(fp, "... unknown type\n");
break;
@@ -5267,6 +5308,75 @@ int perf_event__process_event_update(const struct perf_tool *tool __maybe_unused
struct evsel *evsel;
struct perf_cpu_map *map;
+ /*
+ * Validate payload before dump_trace or processing — both
+ * paths access variant-specific fields without further checks.
+ */
+ if (ev->type == PERF_EVENT_UPDATE__UNIT ||
+ ev->type == PERF_EVENT_UPDATE__NAME) {
+ size_t str_off = offsetof(struct perf_record_event_update, unit);
+ size_t max_len = event->header.size - str_off;
+
+ if (max_len == 0 || strnlen(ev->unit, max_len) == max_len) {
+ pr_warning("WARNING: PERF_RECORD_EVENT_UPDATE: %s not null-terminated, skipping\n",
+ ev->type == PERF_EVENT_UPDATE__UNIT ? "unit" : "name");
+ return 0;
+ }
+ } else if (ev->type == PERF_EVENT_UPDATE__SCALE) {
+ if (event->header.size < offsetof(struct perf_record_event_update, scale) +
+ sizeof(ev->scale)) {
+ pr_warning("WARNING: PERF_RECORD_EVENT_UPDATE: SCALE payload too small, skipping\n");
+ return 0;
+ }
+ } else if (ev->type == PERF_EVENT_UPDATE__CPUS) {
+ size_t cpus_off = offsetof(struct perf_record_event_update, cpus);
+ size_t min_cpus = sizeof(__u16) +
+ sizeof(struct perf_record_range_cpu_map);
+ u32 cpus_payload;
+
+ if (event->header.size < cpus_off + min_cpus) {
+ pr_warning("WARNING: PERF_RECORD_EVENT_UPDATE: CPUS payload too small, skipping\n");
+ return 0;
+ }
+
+ /*
+ * Validate per-variant nr against the remaining
+ * payload on the native path — the swap path clamps
+ * nr in perf_event__event_update_swap(), but native
+ * events are read-only and cannot be clamped in place.
+ * cpu_map__new_data() trusts nr for allocation and
+ * iteration, so unchecked values cause OOB reads.
+ */
+ cpus_payload = event->header.size - cpus_off;
+ switch (ev->cpus.cpus.type) {
+ case PERF_CPU_MAP__CPUS:
+ if (ev->cpus.cpus.cpus_data.nr >
+ (cpus_payload - offsetof(struct perf_record_cpu_map_data, cpus_data.cpu)) /
+ sizeof(ev->cpus.cpus.cpus_data.cpu[0])) {
+ pr_warning("WARNING: EVENT_UPDATE CPUS: nr %u exceeds payload, skipping\n",
+ ev->cpus.cpus.cpus_data.nr);
+ return 0;
+ }
+ break;
+ case PERF_CPU_MAP__MASK:
+ if (ev->cpus.cpus.mask32_data.long_size == 4) {
+ if (ev->cpus.cpus.mask32_data.nr >
+ (cpus_payload - offsetof(struct perf_record_cpu_map_data, mask32_data.mask)) /
+ sizeof(ev->cpus.cpus.mask32_data.mask[0]))
+ return 0;
+ } else if (ev->cpus.cpus.mask64_data.long_size == 8) {
+ if (cpus_payload < offsetof(struct perf_record_cpu_map_data, mask64_data.mask) ||
+ ev->cpus.cpus.mask64_data.nr >
+ (cpus_payload - offsetof(struct perf_record_cpu_map_data, mask64_data.mask)) /
+ sizeof(ev->cpus.cpus.mask64_data.mask[0]))
+ return 0;
+ }
+ break;
+ default:
+ break;
+ }
+ }
+
if (dump_trace)
perf_event__fprintf_event_update(event, stdout);
@@ -5296,8 +5406,10 @@ int perf_event__process_event_update(const struct perf_tool *tool __maybe_unused
if (map) {
perf_cpu_map__put(evsel->core.pmu_cpus);
evsel->core.pmu_cpus = map;
- } else
+ } else {
pr_err("failed to get event_update cpus\n");
+ }
+ break;
default:
break;
}
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
index 876e20c4ba8a7808..85591ccdc2e8ada3 100644
--- a/tools/perf/util/session.c
+++ b/tools/perf/util/session.c
@@ -687,8 +687,103 @@ static int perf_event__build_id_swap(union perf_event *event,
static int perf_event__event_update_swap(union perf_event *event,
bool sample_id_all __maybe_unused)
{
- event->event_update.type = bswap_64(event->event_update.type);
- event->event_update.id = bswap_64(event->event_update.id);
+ struct perf_record_event_update *ev = &event->event_update;
+
+ ev->type = bswap_64(ev->type);
+ ev->id = bswap_64(ev->id);
+
+ /*
+ * Swap variant-specific fields so the processing path
+ * sees native byte order.
+ */
+ if (ev->type == PERF_EVENT_UPDATE__SCALE) {
+ if (event->header.size < offsetof(struct perf_record_event_update, scale) +
+ sizeof(ev->scale))
+ return -1;
+ mem_bswap_64(&ev->scale.scale, sizeof(ev->scale.scale));
+ } else if (ev->type == PERF_EVENT_UPDATE__CPUS) {
+ u32 cpus_payload;
+ struct perf_record_cpu_map_data *data = &ev->cpus.cpus;
+
+ /* CPUS fields start at the same offset as scale (union) */
+ if (event->header.size < offsetof(struct perf_record_event_update, cpus) +
+ sizeof(__u16) + sizeof(struct perf_record_range_cpu_map))
+ return -1;
+ cpus_payload = event->header.size - offsetof(struct perf_record_event_update, cpus);
+ data->type = bswap_16(data->type);
+ /*
+ * Full swap including array elements — same logic as
+ * perf_event__cpu_map_swap() but scoped to the
+ * embedded cpu_map_data within EVENT_UPDATE.
+ */
+ switch (data->type) {
+ case PERF_CPU_MAP__CPUS: {
+ u16 nr, max_nr;
+
+ data->cpus_data.nr = bswap_16(data->cpus_data.nr);
+ nr = data->cpus_data.nr;
+ max_nr = (cpus_payload - offsetof(struct perf_record_cpu_map_data,
+ cpus_data.cpu)) /
+ sizeof(data->cpus_data.cpu[0]);
+ if (nr > max_nr) {
+ nr = max_nr;
+ data->cpus_data.nr = nr;
+ }
+ for (unsigned int i = 0; i < nr; i++)
+ data->cpus_data.cpu[i] = bswap_16(data->cpus_data.cpu[i]);
+ break;
+ }
+ case PERF_CPU_MAP__MASK:
+ data->mask32_data.long_size = bswap_16(data->mask32_data.long_size);
+ switch (data->mask32_data.long_size) {
+ case 4: {
+ u16 nr, max_nr;
+
+ data->mask32_data.nr = bswap_16(data->mask32_data.nr);
+ nr = data->mask32_data.nr;
+ max_nr = (cpus_payload - offsetof(struct perf_record_cpu_map_data,
+ mask32_data.mask)) /
+ sizeof(data->mask32_data.mask[0]);
+ if (nr > max_nr) {
+ nr = max_nr;
+ data->mask32_data.nr = nr;
+ }
+ for (unsigned int i = 0; i < nr; i++)
+ data->mask32_data.mask[i] = bswap_32(data->mask32_data.mask[i]);
+ break;
+ }
+ case 8: {
+ u16 nr, max_nr;
+
+ data->mask64_data.nr = bswap_16(data->mask64_data.nr);
+ nr = data->mask64_data.nr;
+ if (cpus_payload < offsetof(struct perf_record_cpu_map_data, mask64_data.mask)) {
+ data->mask64_data.nr = 0;
+ break;
+ }
+ max_nr = (cpus_payload - offsetof(struct perf_record_cpu_map_data,
+ mask64_data.mask)) /
+ sizeof(data->mask64_data.mask[0]);
+ if (nr > max_nr) {
+ nr = max_nr;
+ data->mask64_data.nr = nr;
+ }
+ for (unsigned int i = 0; i < nr; i++)
+ data->mask64_data.mask[i] = bswap_64(data->mask64_data.mask[i]);
+ break;
+ }
+ default:
+ break;
+ }
+ break;
+ case PERF_CPU_MAP__RANGE_CPUS:
+ data->range_cpu_data.start_cpu = bswap_16(data->range_cpu_data.start_cpu);
+ data->range_cpu_data.end_cpu = bswap_16(data->range_cpu_data.end_cpu);
+ break;
+ default:
+ break;
+ }
+ }
return 0;
}
--
2.54.0
next prev parent reply other threads:[~2026-05-10 3:35 UTC|newest]
Thread overview: 44+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-05-10 3:33 [PATCH 00/28] perf: Harden perf.data parsing against crafted/corrupted files Arnaldo Carvalho de Melo
2026-05-10 3:33 ` [PATCH 01/28] perf session: Add minimum event size validation table Arnaldo Carvalho de Melo
2026-05-11 19:01 ` Ian Rogers
2026-05-10 3:33 ` [PATCH 02/28] perf tools: Fix event_contains() macro to verify full field extent Arnaldo Carvalho de Melo
2026-05-11 23:46 ` sashiko-bot
2026-05-10 3:33 ` [PATCH 03/28] perf zstd: Fix compression error path in zstd_compress_stream_to_records() Arnaldo Carvalho de Melo
2026-05-12 0:13 ` sashiko-bot
2026-05-10 3:33 ` [PATCH 04/28] perf zstd: Fix multi-iteration decompression and error handling Arnaldo Carvalho de Melo
2026-05-10 3:33 ` [PATCH 05/28] perf session: Fix PERF_RECORD_READ swap and dump for variable-length events Arnaldo Carvalho de Melo
2026-05-10 3:33 ` [PATCH 06/28] perf session: Align auxtrace_info priv size before byte-swapping Arnaldo Carvalho de Melo
2026-05-10 3:33 ` [PATCH 07/28] perf session: Add validated swap infrastructure with null-termination checks Arnaldo Carvalho de Melo
2026-05-12 4:08 ` sashiko-bot
2026-05-10 3:33 ` [PATCH 08/28] perf session: Use bounded copy for PERF_RECORD_TIME_CONV Arnaldo Carvalho de Melo
2026-05-10 3:34 ` [PATCH 09/28] perf session: Validate HEADER_ATTR alignment and attr.size before swapping Arnaldo Carvalho de Melo
2026-05-10 3:34 ` [PATCH 10/28] perf session: Validate nr fields against event size on both swap and common paths Arnaldo Carvalho de Melo
2026-05-10 3:34 ` [PATCH 11/28] perf header: Byte-swap build ID event pid and bounds check section entries Arnaldo Carvalho de Melo
2026-05-10 3:34 ` [PATCH 12/28] perf cpumap: Reject RANGE_CPUS with start_cpu > end_cpu Arnaldo Carvalho de Melo
2026-05-12 21:37 ` sashiko-bot
2026-05-10 3:34 ` [PATCH 13/28] perf auxtrace: Harden auxtrace_error event handling Arnaldo Carvalho de Melo
2026-05-10 3:34 ` [PATCH 14/28] perf session: Add byte-swap and bounds check for PERF_RECORD_BPF_METADATA events Arnaldo Carvalho de Melo
2026-05-12 22:58 ` sashiko-bot
2026-05-10 3:34 ` Arnaldo Carvalho de Melo [this message]
2026-05-12 23:45 ` [PATCH 15/28] perf header: Validate null-termination in PERF_RECORD_EVENT_UPDATE string fields sashiko-bot
2026-05-10 3:34 ` [PATCH 16/28] perf tools: Bounds check perf_event_attr fields against attr.size before printing Arnaldo Carvalho de Melo
2026-05-10 3:34 ` [PATCH 17/28] perf header: Propagate feature section processing errors Arnaldo Carvalho de Melo
2026-05-13 3:21 ` sashiko-bot
2026-05-10 3:34 ` [PATCH 18/28] perf header: Validate f_attr.ids section before use in perf_session__read_header() Arnaldo Carvalho de Melo
2026-05-13 4:36 ` sashiko-bot
2026-05-10 3:34 ` [PATCH 19/28] perf header: Validate feature section size and add read path bounds checking Arnaldo Carvalho de Melo
2026-05-10 3:34 ` [PATCH 20/28] perf header: Sanity check HEADER_EVENT_DESC attr.size before swap Arnaldo Carvalho de Melo
2026-05-10 3:34 ` [PATCH 21/28] perf header: Validate bitmap size before allocating in do_read_bitmap() Arnaldo Carvalho de Melo
2026-05-10 3:34 ` [PATCH 22/28] perf session: Add byte-swap for PERF_RECORD_COMPRESSED2 events Arnaldo Carvalho de Melo
2026-05-10 3:34 ` [PATCH 23/28] perf tools: Harden compressed event processing Arnaldo Carvalho de Melo
2026-05-13 21:56 ` sashiko-bot
2026-05-10 3:34 ` [PATCH 24/28] perf session: Check for decompression buffer size overflow Arnaldo Carvalho de Melo
2026-05-10 3:34 ` [PATCH 25/28] perf session: Bound nr_cpus_avail and validate sample CPU Arnaldo Carvalho de Melo
2026-05-10 3:34 ` [PATCH 26/28] perf timechart: Bounds check cpu_id and fix topology_map allocation Arnaldo Carvalho de Melo
2026-05-12 18:32 ` Ian Rogers
2026-05-12 19:48 ` Arnaldo Carvalho de Melo
2026-05-13 23:43 ` sashiko-bot
2026-05-10 3:34 ` [PATCH 27/28] perf kwork: Bounds check work->cpu before indexing cpus_runtime[] Arnaldo Carvalho de Melo
2026-05-14 0:06 ` sashiko-bot
2026-05-10 3:34 ` [PATCH 28/28] perf test: Add truncated perf.data robustness test Arnaldo Carvalho de Melo
2026-05-14 0:18 ` sashiko-bot
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20260510033424.255812-16-acme@kernel.org \
--to=acme@kernel.org \
--cc=acme@redhat.com \
--cc=adrian.hunter@intel.com \
--cc=irogers@google.com \
--cc=james.clark@linaro.org \
--cc=jolsa@kernel.org \
--cc=kan.liang@linux.intel.com \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-perf-users@vger.kernel.org \
--cc=mingo@kernel.org \
--cc=namhyung@kernel.org \
--cc=noreply@anthropic.com \
--cc=sashiko-bot@kernel.org \
--cc=tglx@linutronix.de \
--cc=williams@redhat.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.