From: "Mi, Dapeng" <dapeng1.mi@linux.intel.com>
To: Peter Zijlstra <peterz@infradead.org>
Cc: Ingo Molnar <mingo@redhat.com>,
	Arnaldo Carvalho de Melo <acme@kernel.org>,
	Namhyung Kim <namhyung@kernel.org>,
	Ian Rogers <irogers@google.com>,
	Adrian Hunter <adrian.hunter@intel.com>,
	Alexander Shishkin <alexander.shishkin@linux.intel.com>,
	Andi Kleen <ak@linux.intel.com>,
	Eranian Stephane <eranian@google.com>,
	linux-kernel@vger.kernel.org, linux-perf-users@vger.kernel.org,
	Dapeng Mi <dapeng1.mi@intel.com>
Subject: Re: [Patch v8 05/12] perf/x86/intel: Initialize architectural PEBS
Date: Wed, 22 Oct 2025 13:27:31 +0800	[thread overview]
Message-ID: <ba31c391-e703-4ff6-9742-4518d36bffa6@linux.intel.com> (raw)
In-Reply-To: <20251021154349.GR3245006@noisy.programming.kicks-ass.net>
On 10/21/2025 11:43 PM, Peter Zijlstra wrote:
> On Wed, Oct 15, 2025 at 02:44:15PM +0800, Dapeng Mi wrote:
>> diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
>> index c88bcd5d2bc4..bfb123ff7c9a 100644
>> --- a/arch/x86/events/intel/core.c
>> +++ b/arch/x86/events/intel/core.c
>> @@ -5273,34 +5273,58 @@ static inline bool intel_pmu_broken_perf_cap(void)
>>  
>>  static void update_pmu_cap(struct pmu *pmu)
>>  {
>> -	unsigned int cntr, fixed_cntr, ecx, edx;
>> -	union cpuid35_eax eax;
>> -	union cpuid35_ebx ebx;
>> +	unsigned int eax, ebx, ecx, edx;
>> +	union cpuid35_eax eax_0;
>> +	union cpuid35_ebx ebx_0;
>> +	u64 cntrs_mask = 0;
>> +	u64 pebs_mask = 0;
>> +	u64 pdists_mask = 0;
>>  
>> -	cpuid(ARCH_PERFMON_EXT_LEAF, &eax.full, &ebx.full, &ecx, &edx);
>> +	cpuid(ARCH_PERFMON_EXT_LEAF, &eax_0.full, &ebx_0.full, &ecx, &edx);
>>  
>> -	if (ebx.split.umask2)
>> +	if (ebx_0.split.umask2)
>>  		hybrid(pmu, config_mask) |= ARCH_PERFMON_EVENTSEL_UMASK2;
>> -	if (ebx.split.eq)
>> +	if (ebx_0.split.eq)
>>  		hybrid(pmu, config_mask) |= ARCH_PERFMON_EVENTSEL_EQ;
>>  
>> -	if (eax.split.cntr_subleaf) {
>> +	if (eax_0.split.cntr_subleaf) {
>>  		cpuid_count(ARCH_PERFMON_EXT_LEAF, ARCH_PERFMON_NUM_COUNTER_LEAF,
>> -			    &cntr, &fixed_cntr, &ecx, &edx);
>> -		hybrid(pmu, cntr_mask64) = cntr;
>> -		hybrid(pmu, fixed_cntr_mask64) = fixed_cntr;
>> +			    &eax, &ebx, &ecx, &edx);
>> +		hybrid(pmu, cntr_mask64) = eax;
>> +		hybrid(pmu, fixed_cntr_mask64) = ebx;
>> +		cntrs_mask = (u64)ebx << INTEL_PMC_IDX_FIXED | eax;
>>  	}
>>  
>> -	if (eax.split.acr_subleaf) {
>> +	if (eax_0.split.acr_subleaf) {
>>  		cpuid_count(ARCH_PERFMON_EXT_LEAF, ARCH_PERFMON_ACR_LEAF,
>> -			    &cntr, &fixed_cntr, &ecx, &edx);
>> +			    &eax, &ebx, &ecx, &edx);
>>  		/* The mask of the counters which can be reloaded */
>> -		hybrid(pmu, acr_cntr_mask64) = cntr | ((u64)fixed_cntr << INTEL_PMC_IDX_FIXED);
>> +		hybrid(pmu, acr_cntr_mask64) = eax | ((u64)ebx << INTEL_PMC_IDX_FIXED);
>>  
>>  		/* The mask of the counters which can cause a reload of reloadable counters */
>>  		hybrid(pmu, acr_cause_mask64) = ecx | ((u64)edx << INTEL_PMC_IDX_FIXED);
>>  	}
>>  
>> +	/* Bits[5:4] should be set simultaneously if arch-PEBS is supported */
>> +	if (eax_0.split.pebs_caps_subleaf && eax_0.split.pebs_cnts_subleaf) {
>> +		cpuid_count(ARCH_PERFMON_EXT_LEAF, ARCH_PERFMON_PEBS_CAP_LEAF,
>> +			    &eax, &ebx, &ecx, &edx);
>> +		hybrid(pmu, arch_pebs_cap).caps = (u64)ebx << 32;
>> +
>> +		cpuid_count(ARCH_PERFMON_EXT_LEAF, ARCH_PERFMON_PEBS_COUNTER_LEAF,
>> +			    &eax, &ebx, &ecx, &edx);
>> +		pebs_mask = ((u64)ecx << INTEL_PMC_IDX_FIXED) | eax;
>> +		pdists_mask = ((u64)edx << INTEL_PMC_IDX_FIXED) | ebx;
>> +		hybrid(pmu, arch_pebs_cap).counters = pebs_mask;
>> +		hybrid(pmu, arch_pebs_cap).pdists = pdists_mask;
>> +
>> +		if (WARN_ON((pebs_mask | pdists_mask) & ~cntrs_mask))
>> +			x86_pmu.arch_pebs = 0;
>> +	} else {
>> +		WARN_ON(x86_pmu.arch_pebs == 1);
>> +		x86_pmu.arch_pebs = 0;
>> +	}
>> +
>>  	if (!intel_pmu_broken_perf_cap()) {
>>  		/* Perf Metric (Bit 15) and PEBS via PT (Bit 16) are hybrid enumeration */
>>  		rdmsrq(MSR_IA32_PERF_CAPABILITIES, hybrid(pmu, intel_cap).capabilities);
> I've stuck this on top.
>
> --- a/arch/x86/events/intel/core.c
> +++ b/arch/x86/events/intel/core.c
> @@ -5271,6 +5271,8 @@ static inline bool intel_pmu_broken_perf
>  	return false;
>  }
>  
> +#define counter_mask(_gp, _fixed) ((_gp) | ((u64)(_fixed) << INTEL_PMC_IDX_FIXED))
> +
>  static void update_pmu_cap(struct pmu *pmu)
>  {
>  	unsigned int eax, ebx, ecx, edx;
> @@ -5292,17 +5294,16 @@ static void update_pmu_cap(struct pmu *p
>  			    &eax, &ebx, &ecx, &edx);
>  		hybrid(pmu, cntr_mask64) = eax;
>  		hybrid(pmu, fixed_cntr_mask64) = ebx;
> -		cntrs_mask = (u64)ebx << INTEL_PMC_IDX_FIXED | eax;
> +		cntrs_mask = counter_mask(eax, ebx);
>  	}
>  
>  	if (eax_0.split.acr_subleaf) {
>  		cpuid_count(ARCH_PERFMON_EXT_LEAF, ARCH_PERFMON_ACR_LEAF,
>  			    &eax, &ebx, &ecx, &edx);
>  		/* The mask of the counters which can be reloaded */
> -		hybrid(pmu, acr_cntr_mask64) = eax | ((u64)ebx << INTEL_PMC_IDX_FIXED);
> -
> +		hybrid(pmu, acr_cntr_mask64) = counter_mask(eax, ebx);
>  		/* The mask of the counters which can cause a reload of reloadable counters */
> -		hybrid(pmu, acr_cause_mask64) = ecx | ((u64)edx << INTEL_PMC_IDX_FIXED);
> +		hybrid(pmu, acr_cause_mask64) = counter_mask(ecx, edx);
>  	}
>  
>  	/* Bits[5:4] should be set simultaneously if arch-PEBS is supported */
> @@ -5313,8 +5314,8 @@ static void update_pmu_cap(struct pmu *p
>  
>  		cpuid_count(ARCH_PERFMON_EXT_LEAF, ARCH_PERFMON_PEBS_COUNTER_LEAF,
>  			    &eax, &ebx, &ecx, &edx);
> -		pebs_mask = ((u64)ecx << INTEL_PMC_IDX_FIXED) | eax;
> -		pdists_mask = ((u64)edx << INTEL_PMC_IDX_FIXED) | ebx;
> +		pebs_mask   = counter_mask(eax, ecx);
> +		pdists_mask = counter_mask(ebx, edx);
>  		hybrid(pmu, arch_pebs_cap).counters = pebs_mask;
>  		hybrid(pmu, arch_pebs_cap).pdists = pdists_mask;
Nice suggestion. Would do. Thanks.
>  
next prev parent reply	other threads:[~2025-10-22  5:27 UTC|newest]
Thread overview: 22+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2025-10-15  6:44 [Patch v8 00/12] arch-PEBS enabling for Intel platforms Dapeng Mi
2025-10-15  6:44 ` [Patch v8 01/12] perf/x86: Remove redundant is_x86_event() prototype Dapeng Mi
2025-10-15  6:44 ` [Patch v8 02/12] perf/x86/intel: Fix NULL event access and potential PEBS record loss Dapeng Mi
2025-10-22  8:12   ` Mi, Dapeng
2025-10-22 11:24     ` Peter Zijlstra
2025-10-23  2:29       ` Mi, Dapeng
2025-10-15  6:44 ` [Patch v8 03/12] perf/x86/intel: Replace x86_pmu.drain_pebs calling with static call Dapeng Mi
2025-10-15  6:44 ` [Patch v8 04/12] perf/x86/intel: Correct large PEBS flag check Dapeng Mi
2025-10-15  6:44 ` [Patch v8 05/12] perf/x86/intel: Initialize architectural PEBS Dapeng Mi
2025-10-21 15:43   ` Peter Zijlstra
2025-10-22  5:27     ` Mi, Dapeng [this message]
2025-10-15  6:44 ` [Patch v8 06/12] perf/x86/intel/ds: Factor out PEBS record processing code to functions Dapeng Mi
2025-10-21 15:49   ` Peter Zijlstra
2025-10-22  5:32     ` Mi, Dapeng
2025-10-22 11:49   ` Peter Zijlstra
2025-10-23  1:06     ` Mi, Dapeng
2025-10-15  6:44 ` [Patch v8 07/12] perf/x86/intel/ds: Factor out PEBS group " Dapeng Mi
2025-10-15  6:44 ` [Patch v8 08/12] perf/x86/intel: Process arch-PEBS records or record fragments Dapeng Mi
2025-10-15  6:44 ` [Patch v8 09/12] perf/x86/intel: Allocate arch-PEBS buffer and initialize PEBS_BASE MSR Dapeng Mi
2025-10-15  6:44 ` [Patch v8 10/12] perf/x86/intel: Update dyn_constranit base on PEBS event precise level Dapeng Mi
2025-10-15  6:44 ` [Patch v8 11/12] perf/x86/intel: Setup PEBS data configuration and enable legacy groups Dapeng Mi
2025-10-15  6:44 ` [Patch v8 12/12] perf/x86/intel: Add counter group support for arch-PEBS Dapeng Mi
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox
  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):
  git send-email \
    --in-reply-to=ba31c391-e703-4ff6-9742-4518d36bffa6@linux.intel.com \
    --to=dapeng1.mi@linux.intel.com \
    --cc=acme@kernel.org \
    --cc=adrian.hunter@intel.com \
    --cc=ak@linux.intel.com \
    --cc=alexander.shishkin@linux.intel.com \
    --cc=dapeng1.mi@intel.com \
    --cc=eranian@google.com \
    --cc=irogers@google.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-perf-users@vger.kernel.org \
    --cc=mingo@redhat.com \
    --cc=namhyung@kernel.org \
    --cc=peterz@infradead.org \
    /path/to/YOUR_REPLY
  https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
  Be sure your reply has a Subject: header at the top and a blank line
  before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).