Linux Perf Users
 help / color / mirror / Atom feed
From: Zide Chen <zide.chen@intel.com>
To: Peter Zijlstra <peterz@infradead.org>,
	Ingo Molnar <mingo@redhat.com>,
	Arnaldo Carvalho de Melo <acme@kernel.org>,
	Namhyung Kim <namhyung@kernel.org>,
	Ian Rogers <irogers@google.com>,
	Adrian Hunter <adrian.hunter@intel.com>,
	Alexander Shishkin <alexander.shishkin@linux.intel.com>,
	Andi Kleen <ak@linux.intel.com>,
	Eranian Stephane <eranian@google.com>
Cc: linux-kernel@vger.kernel.org, linux-perf-users@vger.kernel.org,
	Dapeng Mi <dapeng1.mi@linux.intel.com>,
	Zide Chen <zide.chen@intel.com>
Subject: [PATCH 5/7] perf/x86/intel/uncore: Introduce PMU flags and broken state
Date: Tue, 12 May 2026 16:30:46 -0700	[thread overview]
Message-ID: <20260512233048.9577-6-zide.chen@intel.com> (raw)
In-Reply-To: <20260512233048.9577-1-zide.chen@intel.com>

Replace the boolean 'registered' field in intel_uncore_pmu with an
unsigned long 'flags' field, and add a PMU_BROKEN flag to track box
setup failures.

When any box fails to initialize, the PMU is marked broken.  Broken
PMUs reject new event assignments and skip future box setup attempts.
If the PMU was already registered, it remains so to avoid disrupting
in-flight events on other boxes.

To prevent retry loops, die_refcnt and cpu_refcnt are not decremented
on failure, and broken PMUs are skipped in the CPU hotplug and box
allocation paths.

Signed-off-by: Zide Chen <zide.chen@intel.com>
---
 arch/x86/events/intel/uncore.c     | 36 +++++++++++++++++++++++-------
 arch/x86/events/intel/uncore.h     | 12 +++++++++-
 arch/x86/events/intel/uncore_snb.c |  2 +-
 3 files changed, 40 insertions(+), 10 deletions(-)

diff --git a/arch/x86/events/intel/uncore.c b/arch/x86/events/intel/uncore.c
index 00ed4e5047ac..922ba299533e 100644
--- a/arch/x86/events/intel/uncore.c
+++ b/arch/x86/events/intel/uncore.c
@@ -757,7 +757,7 @@ static int uncore_pmu_event_init(struct perf_event *event)
 
 	pmu = uncore_event_to_pmu(event);
 	/* no device found for this pmu */
-	if (!pmu->registered)
+	if (!uncore_pmu_available(pmu))
 		return -ENOENT;
 
 	/* Sampling not supported yet */
@@ -953,16 +953,16 @@ static int uncore_pmu_register(struct intel_uncore_pmu *pmu)
 
 	ret = perf_pmu_register(&pmu->pmu, pmu->name, -1);
 	if (!ret)
-		pmu->registered = true;
+		uncore_pmu_set_registered(pmu);
 	return ret;
 }
 
 static void uncore_pmu_unregister(struct intel_uncore_pmu *pmu)
 {
-	if (!pmu->registered)
+	if (!uncore_pmu_registered(pmu))
 		return;
 	perf_pmu_unregister(&pmu->pmu);
-	pmu->registered = false;
+	WRITE_ONCE(pmu->flags, 0);
 }
 
 static void uncore_free_boxes(struct intel_uncore_pmu *pmu)
@@ -1155,7 +1155,13 @@ static int uncore_box_setup(struct intel_uncore_pmu *pmu,
 
 	/* die_refcnt tracks online dies, not only functioning boxes. */
 	dies = atomic_inc_return(&pmu->die_refcnt);
-	uncore_box_init(box);
+
+	if (uncore_pmu_broken(pmu))
+		return -ENODEV;
+
+	ret = uncore_box_init(box);
+	if (ret)
+		goto err;
 
 	/* First active box registers the pmu. */
 	if (dies > 1)
@@ -1167,6 +1173,19 @@ static int uncore_box_setup(struct intel_uncore_pmu *pmu,
 
 	return 0;
 err:
+	/*
+	 * On failure on any box, mark the per-package PMU as broken regardless
+	 * of whether it was registered or not.
+	 *
+	 * Don't decrement die_refcnt to prevent any future CPU online
+	 * event or PCI probe, from retrying the failed PMU registration.
+	 *
+	 * Don't decrement cpu_refcnt to avoid other in-die CPUs from
+	 * trying to set up the PMU box again.
+	 *
+	 * Don't kfree box; MSR and MMIO boxes are freed at module exit only.
+	 */
+	uncore_pmu_set_broken(pmu);
 	uncore_box_exit(box);
 	return ret;
 }
@@ -1502,7 +1521,8 @@ static void uncore_change_type_ctx(struct intel_uncore_type *type, int old_cpu,
 
 		if (old_cpu < 0) {
 			WARN_ON_ONCE(box->cpu != -1);
-			if (uncore_die_has_box(type, die, pmu->pmu_idx)) {
+			if (uncore_die_has_box(type, die, pmu->pmu_idx) &&
+			    !uncore_pmu_broken(pmu)) {
 				box->cpu = new_cpu;
 				cpumask_set_cpu(new_cpu, &pmu->cpu_mask);
 			}
@@ -1512,7 +1532,7 @@ static void uncore_change_type_ctx(struct intel_uncore_type *type, int old_cpu,
 		WARN_ON_ONCE(box->cpu != -1 && box->cpu != old_cpu);
 		box->cpu = -1;
 		cpumask_clear_cpu(old_cpu, &pmu->cpu_mask);
-		if (new_cpu < 0)
+		if (new_cpu < 0 || uncore_pmu_broken(pmu))
 			continue;
 
 		if (!uncore_die_has_box(type, die, pmu->pmu_idx))
@@ -1592,7 +1612,7 @@ static int allocate_boxes(struct intel_uncore_type **types,
 		type = *types;
 		pmu = type->pmus;
 		for (i = 0; i < type->num_boxes; i++, pmu++) {
-			if (pmu->boxes[die])
+			if (pmu->boxes[die] || uncore_pmu_broken(pmu))
 				continue;
 			box = uncore_alloc_box(type, cpu_to_node(cpu));
 			if (!box)
diff --git a/arch/x86/events/intel/uncore.h b/arch/x86/events/intel/uncore.h
index 5ee05545116a..4d3a99bf1455 100644
--- a/arch/x86/events/intel/uncore.h
+++ b/arch/x86/events/intel/uncore.h
@@ -146,13 +146,23 @@ struct intel_uncore_pmu {
 	struct pmu			pmu;
 	char				name[UNCORE_PMU_NAME_LEN];
 	int				pmu_idx;
-	bool				registered;
+	unsigned long			flags;
 	atomic_t			die_refcnt;
 	cpumask_t			cpu_mask;
 	struct intel_uncore_type	*type;
 	struct intel_uncore_box		**boxes;
 };
 
+#define PMU_REGISTERED_BIT	0
+#define PMU_BROKEN_BIT		1
+
+#define uncore_pmu_registered(pmu)	test_bit(PMU_REGISTERED_BIT, &(pmu)->flags)
+#define uncore_pmu_broken(pmu)		test_bit(PMU_BROKEN_BIT, &(pmu)->flags)
+#define uncore_pmu_available(pmu)	(uncore_pmu_registered(pmu) &&	\
+					 !uncore_pmu_broken(pmu))
+#define uncore_pmu_set_registered(pmu)	set_bit(PMU_REGISTERED_BIT, &(pmu)->flags)
+#define uncore_pmu_set_broken(pmu)	set_bit(PMU_BROKEN_BIT, &(pmu)->flags)
+
 struct intel_uncore_extra_reg {
 	raw_spinlock_t lock;
 	u64 config, config1, config2;
diff --git a/arch/x86/events/intel/uncore_snb.c b/arch/x86/events/intel/uncore_snb.c
index c5347920541c..055131c508ff 100644
--- a/arch/x86/events/intel/uncore_snb.c
+++ b/arch/x86/events/intel/uncore_snb.c
@@ -940,7 +940,7 @@ static int snb_uncore_imc_event_init(struct perf_event *event)
 
 	pmu = uncore_event_to_pmu(event);
 	/* no device found for this pmu */
-	if (!pmu->registered)
+	if (!uncore_pmu_available(pmu))
 		return -ENOENT;
 
 	/* Sampling not supported yet */
-- 
2.54.0


  parent reply	other threads:[~2026-05-12 23:39 UTC|newest]

Thread overview: 17+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2026-05-12 23:30 [PATCH 0/7] perf/x86/intel/uncore: PMU setup robustness fixes Zide Chen
2026-05-12 23:30 ` [PATCH 1/7] perf/x86/intel/uncore: Rename refcount fields and other cleanups Zide Chen
2026-05-13  0:26   ` Ian Rogers
2026-05-12 23:30 ` [PATCH 2/7] perf/x86/intel/uncore: Let init_box() callback report failures Zide Chen
2026-05-13  0:23   ` Ian Rogers
2026-05-12 23:30 ` [PATCH 3/7] perf/x86/intel/uncore: Keep PCI PMUs working when MMIO/MSR setup fails Zide Chen
2026-05-13  0:30   ` Ian Rogers
2026-05-12 23:30 ` [PATCH 4/7] perf/x86/intel/uncore: Factor out box setup code Zide Chen
2026-05-13  0:27   ` Ian Rogers
2026-05-12 23:30 ` Zide Chen [this message]
2026-05-13  0:28   ` [PATCH 5/7] perf/x86/intel/uncore: Introduce PMU flags and broken state Ian Rogers
2026-05-12 23:30 ` [PATCH 6/7] perf/x86/intel/uncore: Fix uncore_box ref/unref ordering on CPU hotplug Zide Chen
2026-05-13  0:32   ` Ian Rogers
2026-05-13  8:59   ` Mi, Dapeng
2026-05-12 23:30 ` [PATCH 7/7] perf/x86/intel/uncore: Implement lazy setup for MSR/MMIO PMU Zide Chen
2026-05-13  0:34   ` Ian Rogers
2026-05-13  9:03   ` Mi, Dapeng

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20260512233048.9577-6-zide.chen@intel.com \
    --to=zide.chen@intel.com \
    --cc=acme@kernel.org \
    --cc=adrian.hunter@intel.com \
    --cc=ak@linux.intel.com \
    --cc=alexander.shishkin@linux.intel.com \
    --cc=dapeng1.mi@linux.intel.com \
    --cc=eranian@google.com \
    --cc=irogers@google.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-perf-users@vger.kernel.org \
    --cc=mingo@redhat.com \
    --cc=namhyung@kernel.org \
    --cc=peterz@infradead.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox