From: Dapeng Mi <dapeng1.mi@linux.intel.com>
To: Peter Zijlstra <peterz@infradead.org>,
Ingo Molnar <mingo@redhat.com>,
Arnaldo Carvalho de Melo <acme@kernel.org>,
Namhyung Kim <namhyung@kernel.org>,
Thomas Gleixner <tglx@linutronix.de>,
Dave Hansen <dave.hansen@linux.intel.com>,
Ian Rogers <irogers@google.com>,
Adrian Hunter <adrian.hunter@intel.com>,
Jiri Olsa <jolsa@kernel.org>,
Alexander Shishkin <alexander.shishkin@linux.intel.com>,
Kan Liang <kan.liang@linux.intel.com>,
Andi Kleen <ak@linux.intel.com>,
Eranian Stephane <eranian@google.com>
Cc: Mark Rutland <mark.rutland@arm.com>,
broonie@kernel.org, Ravi Bangoria <ravi.bangoria@amd.com>,
linux-kernel@vger.kernel.org, linux-perf-users@vger.kernel.org,
Dapeng Mi <dapeng1.mi@intel.com>,
Dapeng Mi <dapeng1.mi@linux.intel.com>
Subject: [Patch v4 08/17] perf/x86: Add YMM into sample_simd_vec_regs
Date: Thu, 25 Sep 2025 14:12:04 +0800 [thread overview]
Message-ID: <20250925061213.178796-9-dapeng1.mi@linux.intel.com> (raw)
In-Reply-To: <20250925061213.178796-1-dapeng1.mi@linux.intel.com>
From: Kan Liang <kan.liang@linux.intel.com>
The YMM0-15 is composed of XMM and YMMH. It requires 2 XSAVE commands to
get the complete value. Internally, the XMM and YMMH are stored in
different structures, which follow the XSAVE format. But the output
dumps the YMM as a whole.
The qwords 4 imply YMM.
Signed-off-by: Kan Liang <kan.liang@linux.intel.com>
Co-developed-by: Dapeng Mi <dapeng1.mi@linux.intel.com>
Signed-off-by: Dapeng Mi <dapeng1.mi@linux.intel.com>
---
arch/x86/events/core.c | 9 +++++++++
arch/x86/events/perf_event.h | 9 +++++++++
arch/x86/include/asm/perf_event.h | 4 ++++
arch/x86/include/uapi/asm/perf_regs.h | 8 ++++++--
arch/x86/kernel/perf_regs.c | 14 +++++++++++++-
5 files changed, 41 insertions(+), 3 deletions(-)
diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
index 7b1b3eb80aa7..8543b96eeb58 100644
--- a/arch/x86/events/core.c
+++ b/arch/x86/events/core.c
@@ -423,6 +423,9 @@ static void x86_pmu_get_ext_regs(struct x86_perf_regs *perf_regs, u64 mask)
if (valid_mask & XFEATURE_MASK_SSE)
perf_regs->xmm_space = xsave->i387.xmm_space;
+
+ if (valid_mask & XFEATURE_MASK_YMM)
+ perf_regs->ymmh = get_xsave_addr(xsave, XFEATURE_YMM);
}
static void release_ext_regs_buffers(void)
@@ -725,6 +728,9 @@ int x86_pmu_hw_config(struct perf_event *event)
if (event_needs_xmm(event) &&
!(x86_pmu.ext_regs_mask & XFEATURE_MASK_SSE))
return -EINVAL;
+ if (event_needs_ymm(event) &&
+ !(x86_pmu.ext_regs_mask & XFEATURE_MASK_YMM))
+ return -EINVAL;
}
}
@@ -1875,6 +1881,9 @@ static void x86_pmu_setup_extended_regs_data(struct perf_event *event,
perf_regs->xmm_regs = NULL;
if (event_needs_xmm(event))
mask |= XFEATURE_MASK_SSE;
+ perf_regs->ymmh_regs = NULL;
+ if (event_needs_ymm(event))
+ mask |= XFEATURE_MASK_YMM;
mask &= ~ignore_mask;
if (mask)
diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h
index 6f22ed718a75..3196191791a7 100644
--- a/arch/x86/events/perf_event.h
+++ b/arch/x86/events/perf_event.h
@@ -145,6 +145,15 @@ static inline bool event_needs_xmm(struct perf_event *event)
return false;
}
+static inline bool event_needs_ymm(struct perf_event *event)
+{
+ if (event->attr.sample_simd_regs_enabled &&
+ event->attr.sample_simd_vec_reg_qwords >= PERF_X86_YMM_QWORDS)
+ return true;
+
+ return false;
+}
+
struct amd_nb {
int nb_id; /* NorthBridge id */
int refcnt; /* reference count */
diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h
index fd4fe31e510b..fd5338a89ba3 100644
--- a/arch/x86/include/asm/perf_event.h
+++ b/arch/x86/include/asm/perf_event.h
@@ -601,6 +601,10 @@ struct x86_perf_regs {
u64 *xmm_regs;
u32 *xmm_space; /* for xsaves */
};
+ union {
+ u64 *ymmh_regs;
+ struct ymmh_struct *ymmh;
+ };
};
extern unsigned long perf_arch_instruction_pointer(struct pt_regs *regs);
diff --git a/arch/x86/include/uapi/asm/perf_regs.h b/arch/x86/include/uapi/asm/perf_regs.h
index c3862e5fdd6d..4fd598785f6d 100644
--- a/arch/x86/include/uapi/asm/perf_regs.h
+++ b/arch/x86/include/uapi/asm/perf_regs.h
@@ -57,19 +57,23 @@ enum perf_event_x86_regs {
enum {
PERF_REG_X86_XMM,
+ PERF_REG_X86_YMM,
PERF_REG_X86_MAX_SIMD_REGS,
};
enum {
PERF_X86_SIMD_XMM_REGS = 16,
- PERF_X86_SIMD_VEC_REGS_MAX = PERF_X86_SIMD_XMM_REGS,
+ PERF_X86_SIMD_YMM_REGS = 16,
+ PERF_X86_SIMD_VEC_REGS_MAX = PERF_X86_SIMD_YMM_REGS,
};
#define PERF_X86_SIMD_VEC_MASK GENMASK_ULL(PERF_X86_SIMD_VEC_REGS_MAX - 1, 0)
enum {
PERF_X86_XMM_QWORDS = 2,
- PERF_X86_SIMD_QWORDS_MAX = PERF_X86_XMM_QWORDS,
+ PERF_X86_YMMH_QWORDS = 2,
+ PERF_X86_YMM_QWORDS = 4,
+ PERF_X86_SIMD_QWORDS_MAX = PERF_X86_YMM_QWORDS,
};
#endif /* _ASM_X86_PERF_REGS_H */
diff --git a/arch/x86/kernel/perf_regs.c b/arch/x86/kernel/perf_regs.c
index 6fd691cb7e64..1fcf8fa76607 100644
--- a/arch/x86/kernel/perf_regs.c
+++ b/arch/x86/kernel/perf_regs.c
@@ -68,6 +68,11 @@ void perf_simd_reg_check(struct pt_regs *regs, u64 ignore,
!perf_regs->xmm_regs)
*nr_vectors = 0;
+ if (!(ignore & XFEATURE_MASK_YMM) &&
+ *vec_qwords >= PERF_X86_YMM_QWORDS &&
+ !perf_regs->ymmh_regs)
+ *vec_qwords = PERF_X86_XMM_QWORDS;
+
*nr_pred = 0;
}
@@ -95,6 +100,7 @@ u64 perf_simd_reg_value(struct pt_regs *regs, int idx,
u16 qwords_idx, bool pred)
{
struct x86_perf_regs *perf_regs = container_of(regs, struct x86_perf_regs, regs);
+ int index;
if (pred)
return 0;
@@ -107,6 +113,11 @@ u64 perf_simd_reg_value(struct pt_regs *regs, int idx,
if (!perf_regs->xmm_regs)
return 0;
return perf_regs->xmm_regs[idx * PERF_X86_XMM_QWORDS + qwords_idx];
+ } else if (qwords_idx < PERF_X86_YMM_QWORDS) {
+ if (!perf_regs->ymmh_regs)
+ return 0;
+ index = idx * PERF_X86_YMMH_QWORDS + qwords_idx - PERF_X86_XMM_QWORDS;
+ return perf_regs->ymmh_regs[index];
}
return 0;
@@ -123,7 +134,8 @@ int perf_simd_reg_validate(u16 vec_qwords, u64 vec_mask,
if (vec_mask)
return -EINVAL;
} else {
- if (vec_qwords != PERF_X86_XMM_QWORDS)
+ if (vec_qwords != PERF_X86_XMM_QWORDS &&
+ vec_qwords != PERF_X86_YMM_QWORDS)
return -EINVAL;
if (vec_mask & ~PERF_X86_SIMD_VEC_MASK)
return -EINVAL;
--
2.34.1
next prev parent reply other threads:[~2025-09-25 6:14 UTC|newest]
Thread overview: 22+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-09-25 6:11 [Patch v4 00/17] Support vector and more extended registers in perf Dapeng Mi
2025-09-25 6:11 ` [Patch v4 01/17] perf/x86: Use x86_perf_regs in the x86 nmi handler Dapeng Mi
2025-09-25 6:11 ` [Patch v4 02/17] perf/x86: Setup the regs data Dapeng Mi
2025-09-25 6:11 ` [Patch v4 03/17] x86/fpu/xstate: Add xsaves_nmi Dapeng Mi
2025-09-25 15:07 ` Dave Hansen
2025-09-28 5:31 ` Mi, Dapeng
2025-09-29 19:01 ` Dave Hansen
2025-09-30 2:44 ` Mi, Dapeng
2025-09-25 6:12 ` [Patch v4 04/17] perf: Move has_extended_regs() to header file Dapeng Mi
2025-09-25 6:12 ` [Patch v4 05/17] perf/x86: Support XMM register for non-PEBS and REGS_USER Dapeng Mi
2025-09-25 6:12 ` [Patch v4 06/17] perf: Support SIMD registers Dapeng Mi
2025-09-25 6:12 ` [Patch v4 07/17] perf/x86: Move XMM to sample_simd_vec_regs Dapeng Mi
2025-09-25 6:12 ` Dapeng Mi [this message]
2025-09-25 6:12 ` [Patch v4 09/17] perf/x86: Add ZMM into sample_simd_vec_regs Dapeng Mi
2025-09-25 6:12 ` [Patch v4 10/17] perf/x86: Add OPMASK into sample_simd_pred_reg Dapeng Mi
2025-09-25 6:12 ` [Patch v4 11/17] perf/x86: Add eGPRs into sample_regs Dapeng Mi
2025-09-25 6:12 ` [Patch v4 12/17] perf/x86: Add SSP " Dapeng Mi
2025-09-25 6:12 ` [Patch v4 13/17] perf/x86/intel: Enable PERF_PMU_CAP_SIMD_REGS Dapeng Mi
2025-09-25 6:12 ` [Patch v4 14/17] perf tools: Only support legacy regs for the PT and PERF_REGS_MASK Dapeng Mi
2025-09-25 6:12 ` [Patch v4 15/17] perf tools: headers: Sync with the kernel sources Dapeng Mi
2025-09-25 6:12 ` [Patch v4 16/17] perf tools: parse-regs: Support the new SIMD format Dapeng Mi
2025-09-25 6:12 ` [Patch v4 17/17] perf tools: regs: Support to dump regs for PERF_SAMPLE_REGS_ABI_SIMD Dapeng Mi
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20250925061213.178796-9-dapeng1.mi@linux.intel.com \
--to=dapeng1.mi@linux.intel.com \
--cc=acme@kernel.org \
--cc=adrian.hunter@intel.com \
--cc=ak@linux.intel.com \
--cc=alexander.shishkin@linux.intel.com \
--cc=broonie@kernel.org \
--cc=dapeng1.mi@intel.com \
--cc=dave.hansen@linux.intel.com \
--cc=eranian@google.com \
--cc=irogers@google.com \
--cc=jolsa@kernel.org \
--cc=kan.liang@linux.intel.com \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-perf-users@vger.kernel.org \
--cc=mark.rutland@arm.com \
--cc=mingo@redhat.com \
--cc=namhyung@kernel.org \
--cc=peterz@infradead.org \
--cc=ravi.bangoria@amd.com \
--cc=tglx@linutronix.de \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).