public inbox for linux-kernel@vger.kernel.org
 help / color / mirror / Atom feed
* [Patch v2 1/2] perf/x86: Introduce is_x86_pmu() helper
@ 2026-03-16  5:08 Dapeng Mi
  2026-03-16  5:08 ` [Patch v2 2/2] perf/x86: Update cap_user_rdpmc base on rdpmc user disable state Dapeng Mi
  2026-04-30 16:22 ` [Patch v2 1/2] perf/x86: Introduce is_x86_pmu() helper Chen, Zide
  0 siblings, 2 replies; 4+ messages in thread
From: Dapeng Mi @ 2026-03-16  5:08 UTC (permalink / raw)
  To: Peter Zijlstra, Ingo Molnar, Arnaldo Carvalho de Melo,
	Namhyung Kim, Ian Rogers, Adrian Hunter, Alexander Shishkin,
	Andi Kleen, Eranian Stephane
  Cc: linux-kernel, linux-perf-users, Dapeng Mi, Zide Chen,
	Falcon Thomas, Xudong Hao, Dapeng Mi

From: Ian Rogers <irogers@google.com>

To facilitate the detection of x86 PMU structures in upcoming patches,
the is_x86_pmu() helper is introduced. Additionally, the is_x86_event()
helper has been refactored to utilize is_x86_pmu().

No function changes intended.

Signed-off-by: Ian Rogers <irogers@google.com>
Signed-off-by: Dapeng Mi <dapeng1.mi@linux.intel.com>
---

v2: new patch.

 arch/x86/events/core.c       | 16 ----------------
 arch/x86/events/perf_event.h | 18 +++++++++++++++++-
 2 files changed, 17 insertions(+), 17 deletions(-)

diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
index 810ab21ffd99..66b1a873c395 100644
--- a/arch/x86/events/core.c
+++ b/arch/x86/events/core.c
@@ -774,22 +774,6 @@ void x86_pmu_enable_all(int added)
 	}
 }
 
-int is_x86_event(struct perf_event *event)
-{
-	/*
-	 * For a non-hybrid platforms, the type of X86 pmu is
-	 * always PERF_TYPE_RAW.
-	 * For a hybrid platform, the PERF_PMU_CAP_EXTENDED_HW_TYPE
-	 * is a unique capability for the X86 PMU.
-	 * Use them to detect a X86 event.
-	 */
-	if (event->pmu->type == PERF_TYPE_RAW ||
-	    event->pmu->capabilities & PERF_PMU_CAP_EXTENDED_HW_TYPE)
-		return true;
-
-	return false;
-}
-
 struct pmu *x86_get_pmu(unsigned int cpu)
 {
 	struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h
index fad87d3c8b2c..025f67726ecc 100644
--- a/arch/x86/events/perf_event.h
+++ b/arch/x86/events/perf_event.h
@@ -115,7 +115,23 @@ static inline bool is_topdown_event(struct perf_event *event)
 	return is_metric_event(event) || is_slots_event(event);
 }
 
-int is_x86_event(struct perf_event *event);
+static inline bool is_x86_pmu(struct pmu *pmu)
+{
+	/*
+	 * For a non-hybrid platforms, the type of X86 pmu is
+	 * always PERF_TYPE_RAW.
+	 * For a hybrid platform, the PERF_PMU_CAP_EXTENDED_HW_TYPE
+	 * is a unique capability for the X86 PMU.
+	 * Use them to detect a X86 event.
+	 */
+	return pmu->type == PERF_TYPE_RAW ||
+	       pmu->capabilities & PERF_PMU_CAP_EXTENDED_HW_TYPE;
+}
+
+static inline bool is_x86_event(struct perf_event *event)
+{
+	return is_x86_pmu(event->pmu);
+}
 
 static inline bool check_leader_group(struct perf_event *leader, int flags)
 {

base-commit: becb26c89be3a6448dcd92522894427544d5b091
-- 
2.34.1


^ permalink raw reply related	[flat|nested] 4+ messages in thread

* [Patch v2 2/2] perf/x86: Update cap_user_rdpmc base on rdpmc user disable state
  2026-03-16  5:08 [Patch v2 1/2] perf/x86: Introduce is_x86_pmu() helper Dapeng Mi
@ 2026-03-16  5:08 ` Dapeng Mi
  2026-04-30 16:22   ` Chen, Zide
  2026-04-30 16:22 ` [Patch v2 1/2] perf/x86: Introduce is_x86_pmu() helper Chen, Zide
  1 sibling, 1 reply; 4+ messages in thread
From: Dapeng Mi @ 2026-03-16  5:08 UTC (permalink / raw)
  To: Peter Zijlstra, Ingo Molnar, Arnaldo Carvalho de Melo,
	Namhyung Kim, Ian Rogers, Adrian Hunter, Alexander Shishkin,
	Andi Kleen, Eranian Stephane
  Cc: linux-kernel, linux-perf-users, Dapeng Mi, Zide Chen,
	Falcon Thomas, Xudong Hao, Dapeng Mi

After introducing the RDPMC user disable feature, user-space RDPMC may
return 0 instead of the actual event count. This creates an inconsistency
with cap_user_rdpmc, where cap_user_rdpmc is set, but user-space RDPMC
only returns 0.

To accurately represent the user-space RDPMC capability, update
cap_user_rdpmc based on the RDPMC user disable state. If RDPMC user
disable is enabled, cap_user_rdpmc is set to false, allowing user-space
programs to fall back to the read() syscall to obtain the real event
count.

Since arch_perf_update_userpage() could be called for software events,
enhance x86_pmu_has_rdpmc_user_disable() to only check the x86 PMUs.

Fixes: 59af95e028d4 ("perf/x86/intel: Add support for rdpmc user disable feature")
Signed-off-by: Dapeng Mi <dapeng1.mi@linux.intel.com>
---
 
v2: Add is_x86_pmu() check before checking if rdpmc user disable feature
is supported.

v1: https://lore.kernel.org/all/20260311075201.2951073-2-dapeng1.mi@linux.intel.com/

 arch/x86/events/core.c       | 3 +++
 arch/x86/events/perf_event.h | 5 +++--
 2 files changed, 6 insertions(+), 2 deletions(-)

diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
index 66b1a873c395..34eda8813716 100644
--- a/arch/x86/events/core.c
+++ b/arch/x86/events/core.c
@@ -2794,6 +2794,9 @@ void arch_perf_update_userpage(struct perf_event *event,
 	userpg->cap_user_time_zero = 0;
 	userpg->cap_user_rdpmc =
 		!!(event->hw.flags & PERF_EVENT_FLAG_USER_READ_CNT);
+	if (x86_pmu_has_rdpmc_user_disable(event->pmu) &&
+	    event->hw.config & ARCH_PERFMON_EVENTSEL_RDPMC_USER_DISABLE)
+		userpg->cap_user_rdpmc = 0;
 	userpg->pmc_width = x86_pmu.cntval_bits;
 
 	if (!using_native_sched_clock() || !sched_clock_stable())
diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h
index 025f67726ecc..307361b106d2 100644
--- a/arch/x86/events/perf_event.h
+++ b/arch/x86/events/perf_event.h
@@ -1351,8 +1351,9 @@ static inline u64 x86_pmu_get_event_config(struct perf_event *event)
 
 static inline bool x86_pmu_has_rdpmc_user_disable(struct pmu *pmu)
 {
-	return !!(hybrid(pmu, config_mask) &
-		 ARCH_PERFMON_EVENTSEL_RDPMC_USER_DISABLE);
+	return is_x86_pmu(pmu) &&
+	       (hybrid(pmu, config_mask) &
+		ARCH_PERFMON_EVENTSEL_RDPMC_USER_DISABLE);
 }
 
 extern struct event_constraint emptyconstraint;
-- 
2.34.1


^ permalink raw reply related	[flat|nested] 4+ messages in thread

* Re: [Patch v2 1/2] perf/x86: Introduce is_x86_pmu() helper
  2026-03-16  5:08 [Patch v2 1/2] perf/x86: Introduce is_x86_pmu() helper Dapeng Mi
  2026-03-16  5:08 ` [Patch v2 2/2] perf/x86: Update cap_user_rdpmc base on rdpmc user disable state Dapeng Mi
@ 2026-04-30 16:22 ` Chen, Zide
  1 sibling, 0 replies; 4+ messages in thread
From: Chen, Zide @ 2026-04-30 16:22 UTC (permalink / raw)
  To: Dapeng Mi, Peter Zijlstra, Ingo Molnar, Arnaldo Carvalho de Melo,
	Namhyung Kim, Ian Rogers, Adrian Hunter, Alexander Shishkin,
	Andi Kleen, Eranian Stephane
  Cc: linux-kernel, linux-perf-users, Dapeng Mi, Falcon Thomas,
	Xudong Hao



On 3/15/2026 10:08 PM, Dapeng Mi wrote:
> From: Ian Rogers <irogers@google.com>
> 
> To facilitate the detection of x86 PMU structures in upcoming patches,
> the is_x86_pmu() helper is introduced. Additionally, the is_x86_event()
> helper has been refactored to utilize is_x86_pmu().
> 
> No function changes intended.
> 
> Signed-off-by: Ian Rogers <irogers@google.com>
> Signed-off-by: Dapeng Mi <dapeng1.mi@linux.intel.com>
> ---

Reviewed-by: Zide Chen <zide.chen@intel.com>



> v2: new patch.
> 
>  arch/x86/events/core.c       | 16 ----------------
>  arch/x86/events/perf_event.h | 18 +++++++++++++++++-
>  2 files changed, 17 insertions(+), 17 deletions(-)
> 
> diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
> index 810ab21ffd99..66b1a873c395 100644
> --- a/arch/x86/events/core.c
> +++ b/arch/x86/events/core.c
> @@ -774,22 +774,6 @@ void x86_pmu_enable_all(int added)
>  	}
>  }
>  
> -int is_x86_event(struct perf_event *event)
> -{
> -	/*
> -	 * For a non-hybrid platforms, the type of X86 pmu is
> -	 * always PERF_TYPE_RAW.
> -	 * For a hybrid platform, the PERF_PMU_CAP_EXTENDED_HW_TYPE
> -	 * is a unique capability for the X86 PMU.
> -	 * Use them to detect a X86 event.
> -	 */
> -	if (event->pmu->type == PERF_TYPE_RAW ||
> -	    event->pmu->capabilities & PERF_PMU_CAP_EXTENDED_HW_TYPE)
> -		return true;
> -
> -	return false;
> -}
> -
>  struct pmu *x86_get_pmu(unsigned int cpu)
>  {
>  	struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
> diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h
> index fad87d3c8b2c..025f67726ecc 100644
> --- a/arch/x86/events/perf_event.h
> +++ b/arch/x86/events/perf_event.h
> @@ -115,7 +115,23 @@ static inline bool is_topdown_event(struct perf_event *event)
>  	return is_metric_event(event) || is_slots_event(event);
>  }
>  
> -int is_x86_event(struct perf_event *event);
> +static inline bool is_x86_pmu(struct pmu *pmu)
> +{
> +	/*
> +	 * For a non-hybrid platforms, the type of X86 pmu is
> +	 * always PERF_TYPE_RAW.
> +	 * For a hybrid platform, the PERF_PMU_CAP_EXTENDED_HW_TYPE
> +	 * is a unique capability for the X86 PMU.
> +	 * Use them to detect a X86 event.
> +	 */
> +	return pmu->type == PERF_TYPE_RAW ||
> +	       pmu->capabilities & PERF_PMU_CAP_EXTENDED_HW_TYPE;
> +}
> +
> +static inline bool is_x86_event(struct perf_event *event)
> +{
> +	return is_x86_pmu(event->pmu);
> +}
>  
>  static inline bool check_leader_group(struct perf_event *leader, int flags)
>  {
> 
> base-commit: becb26c89be3a6448dcd92522894427544d5b091


^ permalink raw reply	[flat|nested] 4+ messages in thread

* Re: [Patch v2 2/2] perf/x86: Update cap_user_rdpmc base on rdpmc user disable state
  2026-03-16  5:08 ` [Patch v2 2/2] perf/x86: Update cap_user_rdpmc base on rdpmc user disable state Dapeng Mi
@ 2026-04-30 16:22   ` Chen, Zide
  0 siblings, 0 replies; 4+ messages in thread
From: Chen, Zide @ 2026-04-30 16:22 UTC (permalink / raw)
  To: Dapeng Mi, Peter Zijlstra, Ingo Molnar, Arnaldo Carvalho de Melo,
	Namhyung Kim, Ian Rogers, Adrian Hunter, Alexander Shishkin,
	Andi Kleen, Eranian Stephane
  Cc: linux-kernel, linux-perf-users, Dapeng Mi, Falcon Thomas,
	Xudong Hao



On 3/15/2026 10:08 PM, Dapeng Mi wrote:
> After introducing the RDPMC user disable feature, user-space RDPMC may
> return 0 instead of the actual event count. This creates an inconsistency
> with cap_user_rdpmc, where cap_user_rdpmc is set, but user-space RDPMC
> only returns 0.
> 
> To accurately represent the user-space RDPMC capability, update
> cap_user_rdpmc based on the RDPMC user disable state. If RDPMC user
> disable is enabled, cap_user_rdpmc is set to false, allowing user-space
> programs to fall back to the read() syscall to obtain the real event
> count.
> 
> Since arch_perf_update_userpage() could be called for software events,
> enhance x86_pmu_has_rdpmc_user_disable() to only check the x86 PMUs.
> 
> Fixes: 59af95e028d4 ("perf/x86/intel: Add support for rdpmc user disable feature")
> Signed-off-by: Dapeng Mi <dapeng1.mi@linux.intel.com>
> ---


Reviewed-by: Zide Chen <zide.chen@intel.com>


>  
> v2: Add is_x86_pmu() check before checking if rdpmc user disable feature
> is supported.
> 
> v1: https://lore.kernel.org/all/20260311075201.2951073-2-dapeng1.mi@linux.intel.com/
> 
>  arch/x86/events/core.c       | 3 +++
>  arch/x86/events/perf_event.h | 5 +++--
>  2 files changed, 6 insertions(+), 2 deletions(-)
> 
> diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
> index 66b1a873c395..34eda8813716 100644
> --- a/arch/x86/events/core.c
> +++ b/arch/x86/events/core.c
> @@ -2794,6 +2794,9 @@ void arch_perf_update_userpage(struct perf_event *event,
>  	userpg->cap_user_time_zero = 0;
>  	userpg->cap_user_rdpmc =
>  		!!(event->hw.flags & PERF_EVENT_FLAG_USER_READ_CNT);
> +	if (x86_pmu_has_rdpmc_user_disable(event->pmu) &&
> +	    event->hw.config & ARCH_PERFMON_EVENTSEL_RDPMC_USER_DISABLE)
> +		userpg->cap_user_rdpmc = 0;
>  	userpg->pmc_width = x86_pmu.cntval_bits;
>  
>  	if (!using_native_sched_clock() || !sched_clock_stable())
> diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h
> index 025f67726ecc..307361b106d2 100644
> --- a/arch/x86/events/perf_event.h
> +++ b/arch/x86/events/perf_event.h
> @@ -1351,8 +1351,9 @@ static inline u64 x86_pmu_get_event_config(struct perf_event *event)
>  
>  static inline bool x86_pmu_has_rdpmc_user_disable(struct pmu *pmu)
>  {
> -	return !!(hybrid(pmu, config_mask) &
> -		 ARCH_PERFMON_EVENTSEL_RDPMC_USER_DISABLE);
> +	return is_x86_pmu(pmu) &&
> +	       (hybrid(pmu, config_mask) &
> +		ARCH_PERFMON_EVENTSEL_RDPMC_USER_DISABLE);
>  }
>  
>  extern struct event_constraint emptyconstraint;


^ permalink raw reply	[flat|nested] 4+ messages in thread

end of thread, other threads:[~2026-04-30 16:22 UTC | newest]

Thread overview: 4+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2026-03-16  5:08 [Patch v2 1/2] perf/x86: Introduce is_x86_pmu() helper Dapeng Mi
2026-03-16  5:08 ` [Patch v2 2/2] perf/x86: Update cap_user_rdpmc base on rdpmc user disable state Dapeng Mi
2026-04-30 16:22   ` Chen, Zide
2026-04-30 16:22 ` [Patch v2 1/2] perf/x86: Introduce is_x86_pmu() helper Chen, Zide

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox