linux-perf-users.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH] perf/x86/intel: Only check GP counters for PEBS constraints validation
@ 2025-11-12  0:45 Dapeng Mi
  2025-11-19  8:56 ` Mi, Dapeng
  0 siblings, 1 reply; 2+ messages in thread
From: Dapeng Mi @ 2025-11-12  0:45 UTC (permalink / raw)
  To: Peter Zijlstra, Ingo Molnar, Arnaldo Carvalho de Melo,
	Namhyung Kim, Ian Rogers, Adrian Hunter, Alexander Shishkin,
	Andi Kleen, Eranian Stephane
  Cc: linux-kernel, linux-perf-users, Dapeng Mi, Zide Chen,
	Falcon Thomas, Xudong Hao, Dapeng Mi

It's good enough to only check GP counters for PEBS constraints
validation since constraints overlap can only happen on GP counters.

Besides opportunistically refine the code style and use pr_warn() to
replace pr_info() as the message itself is a warning message.

Signed-off-by: Dapeng Mi <dapeng1.mi@linux.intel.com>
---
 arch/x86/events/intel/core.c | 22 ++++++++++++++--------
 1 file changed, 14 insertions(+), 8 deletions(-)

diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index aad89c9d9514..81e6c8bcabde 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -5506,7 +5506,7 @@ static void __intel_pmu_check_dyn_constr(struct event_constraint *constr,
 			}
 
 			if (check_fail) {
-				pr_info("The two events 0x%llx and 0x%llx may not be "
+				pr_warn("The two events 0x%llx and 0x%llx may not be "
 					"fully scheduled under some circumstances as "
 					"%s.\n",
 					c1->code, c2->code, dyn_constr_type_name[type]);
@@ -5519,6 +5519,7 @@ static void intel_pmu_check_dyn_constr(struct pmu *pmu,
 				       struct event_constraint *constr,
 				       u64 cntr_mask)
 {
+	u64 gp_mask = GENMASK_ULL(INTEL_PMC_MAX_GENERIC - 1, 0);
 	enum dyn_constr_type i;
 	u64 mask;
 
@@ -5533,20 +5534,25 @@ static void intel_pmu_check_dyn_constr(struct pmu *pmu,
 				mask = x86_pmu.lbr_counters;
 			break;
 		case DYN_CONSTR_ACR_CNTR:
-			mask = hybrid(pmu, acr_cntr_mask64) & GENMASK_ULL(INTEL_PMC_MAX_GENERIC - 1, 0);
+			mask = hybrid(pmu, acr_cntr_mask64) & gp_mask;
 			break;
 		case DYN_CONSTR_ACR_CAUSE:
-			if (hybrid(pmu, acr_cntr_mask64) == hybrid(pmu, acr_cause_mask64))
+			if (hybrid(pmu, acr_cntr_mask64) ==
+					hybrid(pmu, acr_cause_mask64))
 				continue;
-			mask = hybrid(pmu, acr_cause_mask64) & GENMASK_ULL(INTEL_PMC_MAX_GENERIC - 1, 0);
+			mask = hybrid(pmu, acr_cause_mask64) & gp_mask;
 			break;
 		case DYN_CONSTR_PEBS:
-			if (x86_pmu.arch_pebs)
-				mask = hybrid(pmu, arch_pebs_cap).counters;
+			if (x86_pmu.arch_pebs) {
+				mask = hybrid(pmu, arch_pebs_cap).counters &
+				       gp_mask;
+			}
 			break;
 		case DYN_CONSTR_PDIST:
-			if (x86_pmu.arch_pebs)
-				mask = hybrid(pmu, arch_pebs_cap).pdists;
+			if (x86_pmu.arch_pebs) {
+				mask = hybrid(pmu, arch_pebs_cap).pdists &
+				       gp_mask;
+			}
 			break;
 		default:
 			pr_warn("Unsupported dynamic constraint type %d\n", i);

base-commit: 2093d8cf80fa5552d1025a78a8f3a10bf3b6466e
-- 
2.34.1


^ permalink raw reply related	[flat|nested] 2+ messages in thread

* Re: [PATCH] perf/x86/intel: Only check GP counters for PEBS constraints validation
  2025-11-12  0:45 [PATCH] perf/x86/intel: Only check GP counters for PEBS constraints validation Dapeng Mi
@ 2025-11-19  8:56 ` Mi, Dapeng
  0 siblings, 0 replies; 2+ messages in thread
From: Mi, Dapeng @ 2025-11-19  8:56 UTC (permalink / raw)
  To: Peter Zijlstra, Ingo Molnar, Arnaldo Carvalho de Melo,
	Namhyung Kim, Ian Rogers, Adrian Hunter, Alexander Shishkin,
	Andi Kleen, Eranian Stephane
  Cc: linux-kernel, linux-perf-users, Dapeng Mi, Zide Chen,
	Falcon Thomas, Xudong Hao

Hi Peter,

Could you please review this patch? The PEBS constraints overlap validation
should be only limited in GP counters, otherwise it may cause some false
alarms on some platforms.

Thanks,

Dapeng Mi

On 11/12/2025 8:45 AM, Dapeng Mi wrote:
> It's good enough to only check GP counters for PEBS constraints
> validation since constraints overlap can only happen on GP counters.
>
> Besides opportunistically refine the code style and use pr_warn() to
> replace pr_info() as the message itself is a warning message.
>
> Signed-off-by: Dapeng Mi <dapeng1.mi@linux.intel.com>
> ---
>  arch/x86/events/intel/core.c | 22 ++++++++++++++--------
>  1 file changed, 14 insertions(+), 8 deletions(-)
>
> diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
> index aad89c9d9514..81e6c8bcabde 100644
> --- a/arch/x86/events/intel/core.c
> +++ b/arch/x86/events/intel/core.c
> @@ -5506,7 +5506,7 @@ static void __intel_pmu_check_dyn_constr(struct event_constraint *constr,
>  			}
>  
>  			if (check_fail) {
> -				pr_info("The two events 0x%llx and 0x%llx may not be "
> +				pr_warn("The two events 0x%llx and 0x%llx may not be "
>  					"fully scheduled under some circumstances as "
>  					"%s.\n",
>  					c1->code, c2->code, dyn_constr_type_name[type]);
> @@ -5519,6 +5519,7 @@ static void intel_pmu_check_dyn_constr(struct pmu *pmu,
>  				       struct event_constraint *constr,
>  				       u64 cntr_mask)
>  {
> +	u64 gp_mask = GENMASK_ULL(INTEL_PMC_MAX_GENERIC - 1, 0);
>  	enum dyn_constr_type i;
>  	u64 mask;
>  
> @@ -5533,20 +5534,25 @@ static void intel_pmu_check_dyn_constr(struct pmu *pmu,
>  				mask = x86_pmu.lbr_counters;
>  			break;
>  		case DYN_CONSTR_ACR_CNTR:
> -			mask = hybrid(pmu, acr_cntr_mask64) & GENMASK_ULL(INTEL_PMC_MAX_GENERIC - 1, 0);
> +			mask = hybrid(pmu, acr_cntr_mask64) & gp_mask;
>  			break;
>  		case DYN_CONSTR_ACR_CAUSE:
> -			if (hybrid(pmu, acr_cntr_mask64) == hybrid(pmu, acr_cause_mask64))
> +			if (hybrid(pmu, acr_cntr_mask64) ==
> +					hybrid(pmu, acr_cause_mask64))
>  				continue;
> -			mask = hybrid(pmu, acr_cause_mask64) & GENMASK_ULL(INTEL_PMC_MAX_GENERIC - 1, 0);
> +			mask = hybrid(pmu, acr_cause_mask64) & gp_mask;
>  			break;
>  		case DYN_CONSTR_PEBS:
> -			if (x86_pmu.arch_pebs)
> -				mask = hybrid(pmu, arch_pebs_cap).counters;
> +			if (x86_pmu.arch_pebs) {
> +				mask = hybrid(pmu, arch_pebs_cap).counters &
> +				       gp_mask;
> +			}
>  			break;
>  		case DYN_CONSTR_PDIST:
> -			if (x86_pmu.arch_pebs)
> -				mask = hybrid(pmu, arch_pebs_cap).pdists;
> +			if (x86_pmu.arch_pebs) {
> +				mask = hybrid(pmu, arch_pebs_cap).pdists &
> +				       gp_mask;
> +			}
>  			break;
>  		default:
>  			pr_warn("Unsupported dynamic constraint type %d\n", i);
>
> base-commit: 2093d8cf80fa5552d1025a78a8f3a10bf3b6466e

^ permalink raw reply	[flat|nested] 2+ messages in thread

end of thread, other threads:[~2025-11-19  8:56 UTC | newest]

Thread overview: 2+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2025-11-12  0:45 [PATCH] perf/x86/intel: Only check GP counters for PEBS constraints validation Dapeng Mi
2025-11-19  8:56 ` Mi, Dapeng

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).