public inbox for linux-kernel@vger.kernel.org
 help / color / mirror / Atom feed
From: Peter Zijlstra <peterz@infradead.org>
To: mingo@kernel.org, ravi.bangoria@amd.com, lucas.demarchi@intel.com
Cc: linux-kernel@vger.kernel.org, peterz@infradead.org,
	willy@infradead.org, acme@kernel.org, namhyung@kernel.org,
	mark.rutland@arm.com, alexander.shishkin@linux.intel.com,
	jolsa@kernel.org, irogers@google.com, adrian.hunter@intel.com,
	kan.liang@linux.intel.com
Subject: [PATCH v2 16/24] perf: Detach perf_cpu_pmu_context and pmu lifetimes
Date: Wed, 05 Feb 2025 11:21:36 +0100	[thread overview]
Message-ID: <20250205102450.016081954@infradead.org> (raw)
In-Reply-To: 20250205102120.531585416@infradead.org

In prepration for being able to unregister a pmu with existing events,
it becomes important to detach struct perf_cpu_pmu_context lifetimes
from that of struct pmu.

Notably perf_cpu_pmu_context embeds a perf_event_pmu_context that can
stay referenced until the last event goes.

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
---
 include/linux/perf_event.h |    4 +--
 kernel/events/core.c       |   56 +++++++++++++++++++++++++++++++++++++--------
 2 files changed, 49 insertions(+), 11 deletions(-)

--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -336,7 +336,7 @@ struct pmu {
 	 */
 	unsigned int			scope;
 
-	struct perf_cpu_pmu_context __percpu *cpu_pmu_context;
+	struct perf_cpu_pmu_context __percpu **cpu_pmu_context;
 	atomic_t			exclusive_cnt; /* < 0: cpu; > 0: tsk */
 	int				task_ctx_nr;
 	int				hrtimer_interval_ms;
@@ -901,7 +901,7 @@ struct perf_event_pmu_context {
 	struct list_head		pinned_active;
 	struct list_head		flexible_active;
 
-	/* Used to avoid freeing per-cpu perf_event_pmu_context */
+	/* Used to identify the per-cpu perf_event_pmu_context */
 	unsigned int			embedded : 1;
 
 	unsigned int			nr_events;
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -1178,7 +1178,7 @@ static int perf_mux_hrtimer_restart_ipi(
 
 static __always_inline struct perf_cpu_pmu_context *this_cpc(struct pmu *pmu)
 {
-	return this_cpu_ptr(pmu->cpu_pmu_context);
+	return *this_cpu_ptr(pmu->cpu_pmu_context);
 }
 
 void perf_pmu_disable(struct pmu *pmu)
@@ -4971,11 +4971,14 @@ find_get_pmu_context(struct pmu *pmu, st
 		 */
 		struct perf_cpu_pmu_context *cpc;
 
-		cpc = per_cpu_ptr(pmu->cpu_pmu_context, event->cpu);
+		cpc = *per_cpu_ptr(pmu->cpu_pmu_context, event->cpu);
 		epc = &cpc->epc;
 		raw_spin_lock_irq(&ctx->lock);
 		if (!epc->ctx) {
-			atomic_set(&epc->refcount, 1);
+			/*
+			 * One extra reference for the pmu; see perf_pmu_free().
+			 */
+			atomic_set(&epc->refcount, 2);
 			epc->embedded = 1;
 			list_add(&epc->pmu_ctx_entry, &ctx->pmu_ctx_list);
 			epc->ctx = ctx;
@@ -5044,6 +5047,15 @@ static void get_pmu_ctx(struct perf_even
 	WARN_ON_ONCE(!atomic_inc_not_zero(&epc->refcount));
 }
 
+static void free_cpc_rcu(struct rcu_head *head)
+{
+	struct perf_cpu_pmu_context *cpc =
+		container_of(head, typeof(*cpc), epc.rcu_head);
+
+	kfree(cpc->epc.task_ctx_data);
+	kfree(cpc);
+}
+
 static void free_epc_rcu(struct rcu_head *head)
 {
 	struct perf_event_pmu_context *epc = container_of(head, typeof(*epc), rcu_head);
@@ -5078,8 +5090,10 @@ static void put_pmu_ctx(struct perf_even
 
 	raw_spin_unlock_irqrestore(&ctx->lock, flags);
 
-	if (epc->embedded)
+	if (epc->embedded) {
+		call_rcu(&epc->rcu_head, free_cpc_rcu);
 		return;
+	}
 
 	call_rcu(&epc->rcu_head, free_epc_rcu);
 }
@@ -11595,7 +11609,7 @@ perf_event_mux_interval_ms_store(struct
 	cpus_read_lock();
 	for_each_online_cpu(cpu) {
 		struct perf_cpu_pmu_context *cpc;
-		cpc = per_cpu_ptr(pmu->cpu_pmu_context, cpu);
+		cpc = *per_cpu_ptr(pmu->cpu_pmu_context, cpu);
 		cpc->hrtimer_interval = ns_to_ktime(NSEC_PER_MSEC * timer);
 
 		cpu_function_call(cpu, perf_mux_hrtimer_restart_ipi, cpc);
@@ -11767,7 +11781,25 @@ static void perf_pmu_free(struct pmu *pm
 		device_del(pmu->dev);
 		put_device(pmu->dev);
 	}
-	free_percpu(pmu->cpu_pmu_context);
+
+	if (pmu->cpu_pmu_context) {
+		int cpu;
+
+		for_each_possible_cpu(cpu) {
+			struct perf_cpu_pmu_context *cpc;
+
+			cpc = *per_cpu_ptr(pmu->cpu_pmu_context, cpu);
+			if (!cpc)
+				continue;
+			if (cpc->epc.embedded) {
+				/* refcount managed */
+				put_pmu_ctx(&cpc->epc);
+				continue;
+			}
+			kfree(cpc);
+		}
+		free_percpu(pmu->cpu_pmu_context);
+	}
 }
 
 DEFINE_FREE(pmu_unregister, struct pmu *, if (_T) perf_pmu_free(_T))
@@ -11806,14 +11838,20 @@ int perf_pmu_register(struct pmu *_pmu,
 			return ret;
 	}
 
-	pmu->cpu_pmu_context = alloc_percpu(struct perf_cpu_pmu_context);
+	pmu->cpu_pmu_context = alloc_percpu(struct perf_cpu_pmu_context *);
 	if (!pmu->cpu_pmu_context)
 		return -ENOMEM;
 
 	for_each_possible_cpu(cpu) {
-		struct perf_cpu_pmu_context *cpc;
+		struct perf_cpu_pmu_context *cpc =
+			kmalloc_node(sizeof(struct perf_cpu_pmu_context),
+				     GFP_KERNEL | __GFP_ZERO,
+				     cpu_to_node(cpu));
+
+		if (!cpc)
+			return -ENOMEM;
 
-		cpc = per_cpu_ptr(pmu->cpu_pmu_context, cpu);
+		*per_cpu_ptr(pmu->cpu_pmu_context, cpu) = cpc;
 		__perf_init_event_pmu_context(&cpc->epc, pmu);
 		__perf_mux_hrtimer_init(cpc, cpu);
 	}



  parent reply	other threads:[~2025-02-05 10:25 UTC|newest]

Thread overview: 47+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2025-02-05 10:21 [PATCH v2 00/24] perf: Make perf_pmu_unregister() usable Peter Zijlstra
2025-02-05 10:21 ` [PATCH v2 01/24] lockdep: Fix might_fault() Peter Zijlstra
2025-02-06 18:19   ` David Hildenbrand
2025-02-05 10:21 ` [PATCH v2 02/24] perf: Ensure bpf_perf_link path is properly serialized Peter Zijlstra
2025-02-05 10:21 ` [PATCH v2 03/24] perf: Simplify child event tear-down Peter Zijlstra
2025-02-05 10:21 ` [PATCH v2 04/24] perf: Simplify perf_event_free_task() wait Peter Zijlstra
2025-02-05 10:21 ` [PATCH v2 05/24] perf: Simplify perf_event_release_kernel() Peter Zijlstra
2025-02-05 10:21 ` [PATCH v2 06/24] perf: Fix pmus_lock vs pmus_srcu ordering Peter Zijlstra
2025-02-27 16:59   ` Lucas De Marchi
2025-02-05 10:21 ` [PATCH v2 07/24] perf: Fix perf_pmu_register() vs perf_init_event() Peter Zijlstra
2025-02-05 10:21 ` [PATCH v2 08/24] perf: Cleanup perf_try_init_event() Peter Zijlstra
2025-03-05 11:29   ` [tip: perf/core] perf/core: Clean up perf_try_init_event() tip-bot2 for Peter Zijlstra
2025-02-05 10:21 ` [PATCH v2 09/24] perf: Simplify perf_event_alloc() error path Peter Zijlstra
2025-02-05 10:21 ` [PATCH v2 10/24] perf: Simplify perf_pmu_register() " Peter Zijlstra
2025-02-05 10:21 ` [PATCH v2 11/24] perf: Simplify perf_pmu_register() Peter Zijlstra
2025-02-05 10:21 ` [PATCH v2 12/24] perf: Simplify perf_init_event() Peter Zijlstra
2025-02-05 10:21 ` [PATCH v2 13/24] perf: Simplify perf_event_alloc() Peter Zijlstra
2025-02-05 10:21 ` [PATCH v2 14/24] perf: Merge pmu_disable_count into cpu_pmu_context Peter Zijlstra
2025-02-05 10:21 ` [PATCH v2 15/24] perf: Add this_cpc() helper Peter Zijlstra
2025-02-05 10:21 ` Peter Zijlstra [this message]
2025-02-05 10:21 ` [PATCH v2 17/24] perf: Introduce perf_free_addr_filters() Peter Zijlstra
2025-02-05 10:21 ` [PATCH v2 18/24] perf: Robustify perf_event_free_bpf_prog() Peter Zijlstra
2025-02-05 10:21 ` [PATCH v2 19/24] perf: Simplify perf_mmap() control flow Peter Zijlstra
2025-03-03  5:39   ` Ravi Bangoria
2025-03-03 11:19     ` Ingo Molnar
2025-03-03 13:36       ` Ravi Bangoria
2025-03-04  8:44         ` Ingo Molnar
2025-02-05 10:21 ` [PATCH v2 20/24] perf: Fix perf_mmap() failure path Peter Zijlstra
2025-02-05 10:21 ` [PATCH v2 21/24] perf: Further simplify perf_mmap() Peter Zijlstra
2025-02-05 10:21 ` [PATCH v2 22/24] perf: Remove retry loop from perf_mmap() Peter Zijlstra
2025-02-05 10:21 ` [PATCH v2 23/24] perf: Lift event->mmap_mutex in perf_mmap() Peter Zijlstra
2025-02-05 10:21 ` [PATCH v2 24/24] perf: Make perf_pmu_unregister() useable Peter Zijlstra
2025-02-10  6:39   ` Ravi Bangoria
2025-02-11 15:46     ` Peter Zijlstra
2025-02-10  6:42   ` Ravi Bangoria
2025-02-12 12:49     ` Peter Zijlstra
2025-02-13  7:52       ` Ravi Bangoria
2025-02-13 13:08         ` Peter Zijlstra
2025-02-14  3:57           ` Ravi Bangoria
2025-02-14 20:24         ` Peter Zijlstra
2025-02-17  8:24           ` Ravi Bangoria
2025-02-17 16:31             ` Ravi Bangoria
2025-02-19 13:23               ` Ravi Bangoria
2025-02-19 14:30                 ` Ravi Bangoria
2025-02-10  6:59   ` Ravi Bangoria
2025-02-13 13:07     ` Peter Zijlstra
2025-03-03  6:01 ` [PATCH v2 00/24] perf: Make perf_pmu_unregister() usable Ravi Bangoria

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20250205102450.016081954@infradead.org \
    --to=peterz@infradead.org \
    --cc=acme@kernel.org \
    --cc=adrian.hunter@intel.com \
    --cc=alexander.shishkin@linux.intel.com \
    --cc=irogers@google.com \
    --cc=jolsa@kernel.org \
    --cc=kan.liang@linux.intel.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=lucas.demarchi@intel.com \
    --cc=mark.rutland@arm.com \
    --cc=mingo@kernel.org \
    --cc=namhyung@kernel.org \
    --cc=ravi.bangoria@amd.com \
    --cc=willy@infradead.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox