From: Sean Christopherson <seanjc@google.com>
To: Marc Zyngier <maz@kernel.org>, Oliver Upton <oupton@kernel.org>,
Tianrui Zhao <zhaotianrui@loongson.cn>,
Bibo Mao <maobibo@loongson.cn>,
Huacai Chen <chenhuacai@kernel.org>,
Anup Patel <anup@brainfault.org>, Paul Walmsley <pjw@kernel.org>,
Palmer Dabbelt <palmer@dabbelt.com>,
Albert Ou <aou@eecs.berkeley.edu>, Xin Li <xin@zytor.com>,
"H. Peter Anvin" <hpa@zytor.com>,
Andy Lutomirski <luto@kernel.org>,
Peter Zijlstra <peterz@infradead.org>,
Ingo Molnar <mingo@redhat.com>,
Arnaldo Carvalho de Melo <acme@kernel.org>,
Namhyung Kim <namhyung@kernel.org>,
Sean Christopherson <seanjc@google.com>,
Paolo Bonzini <pbonzini@redhat.com>
Cc: linux-arm-kernel@lists.infradead.org, kvmarm@lists.linux.dev,
kvm@vger.kernel.org, loongarch@lists.linux.dev,
kvm-riscv@lists.infradead.org, linux-riscv@lists.infradead.org,
linux-kernel@vger.kernel.org, linux-perf-users@vger.kernel.org,
Mingwei Zhang <mizhang@google.com>,
Xudong Hao <xudong.hao@intel.com>,
Sandipan Das <sandipan.das@amd.com>,
Dapeng Mi <dapeng1.mi@linux.intel.com>,
Xiong Zhang <xiong.y.zhang@linux.intel.com>,
Manali Shukla <manali.shukla@amd.com>,
Jim Mattson <jmattson@google.com>
Subject: [PATCH v6 01/44] perf: Skip pmu_ctx based on event_type
Date: Fri, 5 Dec 2025 16:16:37 -0800 [thread overview]
Message-ID: <20251206001720.468579-2-seanjc@google.com> (raw)
In-Reply-To: <20251206001720.468579-1-seanjc@google.com>
From: Kan Liang <kan.liang@linux.intel.com>
To optimize the cgroup context switch, the perf_event_pmu_context
iteration skips the PMUs without cgroup events. A bool cgroup was
introduced to indicate the case. It can work, but this way is hard to
extend for other cases, e.g. skipping non-mediated PMUs. It doesn't
make sense to keep adding bool variables.
Pass the event_type instead of the specific bool variable. Check both
the event_type and related pmu_ctx variables to decide whether skipping
a PMU.
Event flags, e.g., EVENT_CGROUP, should be cleard in the ctx->is_active.
Add EVENT_FLAGS to indicate such event flags.
No functional change.
Signed-off-by: Kan Liang <kan.liang@linux.intel.com>
Tested-by: Yongwei Ma <yongwei.ma@intel.com>
Signed-off-by: Mingwei Zhang <mizhang@google.com>
Tested-by: Xudong Hao <xudong.hao@intel.com>
Signed-off-by: Sean Christopherson <seanjc@google.com>
---
kernel/events/core.c | 74 ++++++++++++++++++++++++--------------------
1 file changed, 40 insertions(+), 34 deletions(-)
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 2c35acc2722b..4cc95dd15620 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -164,7 +164,7 @@ enum event_type_t {
/* see ctx_resched() for details */
EVENT_CPU = 0x10,
EVENT_CGROUP = 0x20,
-
+ EVENT_FLAGS = EVENT_CGROUP,
/* compound helpers */
EVENT_ALL = EVENT_FLEXIBLE | EVENT_PINNED,
EVENT_TIME_FROZEN = EVENT_TIME | EVENT_FROZEN,
@@ -778,27 +778,37 @@ do { \
___p; \
})
-#define for_each_epc(_epc, _ctx, _pmu, _cgroup) \
+static bool perf_skip_pmu_ctx(struct perf_event_pmu_context *pmu_ctx,
+ enum event_type_t event_type)
+{
+ if ((event_type & EVENT_CGROUP) && !pmu_ctx->nr_cgroups)
+ return true;
+ return false;
+}
+
+#define for_each_epc(_epc, _ctx, _pmu, _event_type) \
list_for_each_entry(_epc, &((_ctx)->pmu_ctx_list), pmu_ctx_entry) \
- if (_cgroup && !_epc->nr_cgroups) \
+ if (perf_skip_pmu_ctx(_epc, _event_type)) \
continue; \
else if (_pmu && _epc->pmu != _pmu) \
continue; \
else
-static void perf_ctx_disable(struct perf_event_context *ctx, bool cgroup)
+static void perf_ctx_disable(struct perf_event_context *ctx,
+ enum event_type_t event_type)
{
struct perf_event_pmu_context *pmu_ctx;
- for_each_epc(pmu_ctx, ctx, NULL, cgroup)
+ for_each_epc(pmu_ctx, ctx, NULL, event_type)
perf_pmu_disable(pmu_ctx->pmu);
}
-static void perf_ctx_enable(struct perf_event_context *ctx, bool cgroup)
+static void perf_ctx_enable(struct perf_event_context *ctx,
+ enum event_type_t event_type)
{
struct perf_event_pmu_context *pmu_ctx;
- for_each_epc(pmu_ctx, ctx, NULL, cgroup)
+ for_each_epc(pmu_ctx, ctx, NULL, event_type)
perf_pmu_enable(pmu_ctx->pmu);
}
@@ -963,8 +973,7 @@ static void perf_cgroup_switch(struct task_struct *task)
return;
WARN_ON_ONCE(cpuctx->ctx.nr_cgroups == 0);
-
- perf_ctx_disable(&cpuctx->ctx, true);
+ perf_ctx_disable(&cpuctx->ctx, EVENT_CGROUP);
ctx_sched_out(&cpuctx->ctx, NULL, EVENT_ALL|EVENT_CGROUP);
/*
@@ -980,7 +989,7 @@ static void perf_cgroup_switch(struct task_struct *task)
*/
ctx_sched_in(&cpuctx->ctx, NULL, EVENT_ALL|EVENT_CGROUP);
- perf_ctx_enable(&cpuctx->ctx, true);
+ perf_ctx_enable(&cpuctx->ctx, EVENT_CGROUP);
}
static int perf_cgroup_ensure_storage(struct perf_event *event,
@@ -2904,11 +2913,11 @@ static void ctx_resched(struct perf_cpu_context *cpuctx,
event_type &= EVENT_ALL;
- for_each_epc(epc, &cpuctx->ctx, pmu, false)
+ for_each_epc(epc, &cpuctx->ctx, pmu, 0)
perf_pmu_disable(epc->pmu);
if (task_ctx) {
- for_each_epc(epc, task_ctx, pmu, false)
+ for_each_epc(epc, task_ctx, pmu, 0)
perf_pmu_disable(epc->pmu);
task_ctx_sched_out(task_ctx, pmu, event_type);
@@ -2928,11 +2937,11 @@ static void ctx_resched(struct perf_cpu_context *cpuctx,
perf_event_sched_in(cpuctx, task_ctx, pmu);
- for_each_epc(epc, &cpuctx->ctx, pmu, false)
+ for_each_epc(epc, &cpuctx->ctx, pmu, 0)
perf_pmu_enable(epc->pmu);
if (task_ctx) {
- for_each_epc(epc, task_ctx, pmu, false)
+ for_each_epc(epc, task_ctx, pmu, 0)
perf_pmu_enable(epc->pmu);
}
}
@@ -3481,11 +3490,10 @@ static void
ctx_sched_out(struct perf_event_context *ctx, struct pmu *pmu, enum event_type_t event_type)
{
struct perf_cpu_context *cpuctx = this_cpu_ptr(&perf_cpu_context);
+ enum event_type_t active_type = event_type & ~EVENT_FLAGS;
struct perf_event_pmu_context *pmu_ctx;
int is_active = ctx->is_active;
- bool cgroup = event_type & EVENT_CGROUP;
- event_type &= ~EVENT_CGROUP;
lockdep_assert_held(&ctx->lock);
@@ -3516,7 +3524,7 @@ ctx_sched_out(struct perf_event_context *ctx, struct pmu *pmu, enum event_type_t
* see __load_acquire() in perf_event_time_now()
*/
barrier();
- ctx->is_active &= ~event_type;
+ ctx->is_active &= ~active_type;
if (!(ctx->is_active & EVENT_ALL)) {
/*
@@ -3537,7 +3545,7 @@ ctx_sched_out(struct perf_event_context *ctx, struct pmu *pmu, enum event_type_t
is_active ^= ctx->is_active; /* changed bits */
- for_each_epc(pmu_ctx, ctx, pmu, cgroup)
+ for_each_epc(pmu_ctx, ctx, pmu, event_type)
__pmu_ctx_sched_out(pmu_ctx, is_active);
}
@@ -3693,7 +3701,7 @@ perf_event_context_sched_out(struct task_struct *task, struct task_struct *next)
raw_spin_lock_nested(&next_ctx->lock, SINGLE_DEPTH_NESTING);
if (context_equiv(ctx, next_ctx)) {
- perf_ctx_disable(ctx, false);
+ perf_ctx_disable(ctx, 0);
/* PMIs are disabled; ctx->nr_no_switch_fast is stable. */
if (local_read(&ctx->nr_no_switch_fast) ||
@@ -3717,7 +3725,7 @@ perf_event_context_sched_out(struct task_struct *task, struct task_struct *next)
perf_ctx_sched_task_cb(ctx, task, false);
- perf_ctx_enable(ctx, false);
+ perf_ctx_enable(ctx, 0);
/*
* RCU_INIT_POINTER here is safe because we've not
@@ -3741,13 +3749,13 @@ perf_event_context_sched_out(struct task_struct *task, struct task_struct *next)
if (do_switch) {
raw_spin_lock(&ctx->lock);
- perf_ctx_disable(ctx, false);
+ perf_ctx_disable(ctx, 0);
inside_switch:
perf_ctx_sched_task_cb(ctx, task, false);
task_ctx_sched_out(ctx, NULL, EVENT_ALL);
- perf_ctx_enable(ctx, false);
+ perf_ctx_enable(ctx, 0);
raw_spin_unlock(&ctx->lock);
}
}
@@ -4056,11 +4064,9 @@ static void
ctx_sched_in(struct perf_event_context *ctx, struct pmu *pmu, enum event_type_t event_type)
{
struct perf_cpu_context *cpuctx = this_cpu_ptr(&perf_cpu_context);
+ enum event_type_t active_type = event_type & ~EVENT_FLAGS;
struct perf_event_pmu_context *pmu_ctx;
int is_active = ctx->is_active;
- bool cgroup = event_type & EVENT_CGROUP;
-
- event_type &= ~EVENT_CGROUP;
lockdep_assert_held(&ctx->lock);
@@ -4078,7 +4084,7 @@ ctx_sched_in(struct perf_event_context *ctx, struct pmu *pmu, enum event_type_t
barrier();
}
- ctx->is_active |= (event_type | EVENT_TIME);
+ ctx->is_active |= active_type | EVENT_TIME;
if (ctx->task) {
if (!(is_active & EVENT_ALL))
cpuctx->task_ctx = ctx;
@@ -4093,13 +4099,13 @@ ctx_sched_in(struct perf_event_context *ctx, struct pmu *pmu, enum event_type_t
* in order to give them the best chance of going on.
*/
if (is_active & EVENT_PINNED) {
- for_each_epc(pmu_ctx, ctx, pmu, cgroup)
+ for_each_epc(pmu_ctx, ctx, pmu, event_type)
__pmu_ctx_sched_in(pmu_ctx, EVENT_PINNED);
}
/* Then walk through the lower prio flexible groups */
if (is_active & EVENT_FLEXIBLE) {
- for_each_epc(pmu_ctx, ctx, pmu, cgroup)
+ for_each_epc(pmu_ctx, ctx, pmu, event_type)
__pmu_ctx_sched_in(pmu_ctx, EVENT_FLEXIBLE);
}
}
@@ -4116,11 +4122,11 @@ static void perf_event_context_sched_in(struct task_struct *task)
if (cpuctx->task_ctx == ctx) {
perf_ctx_lock(cpuctx, ctx);
- perf_ctx_disable(ctx, false);
+ perf_ctx_disable(ctx, 0);
perf_ctx_sched_task_cb(ctx, task, true);
- perf_ctx_enable(ctx, false);
+ perf_ctx_enable(ctx, 0);
perf_ctx_unlock(cpuctx, ctx);
goto rcu_unlock;
}
@@ -4133,7 +4139,7 @@ static void perf_event_context_sched_in(struct task_struct *task)
if (!ctx->nr_events)
goto unlock;
- perf_ctx_disable(ctx, false);
+ perf_ctx_disable(ctx, 0);
/*
* We want to keep the following priority order:
* cpu pinned (that don't need to move), task pinned,
@@ -4143,7 +4149,7 @@ static void perf_event_context_sched_in(struct task_struct *task)
* events, no need to flip the cpuctx's events around.
*/
if (!RB_EMPTY_ROOT(&ctx->pinned_groups.tree)) {
- perf_ctx_disable(&cpuctx->ctx, false);
+ perf_ctx_disable(&cpuctx->ctx, 0);
ctx_sched_out(&cpuctx->ctx, NULL, EVENT_FLEXIBLE);
}
@@ -4152,9 +4158,9 @@ static void perf_event_context_sched_in(struct task_struct *task)
perf_ctx_sched_task_cb(cpuctx->task_ctx, task, true);
if (!RB_EMPTY_ROOT(&ctx->pinned_groups.tree))
- perf_ctx_enable(&cpuctx->ctx, false);
+ perf_ctx_enable(&cpuctx->ctx, 0);
- perf_ctx_enable(ctx, false);
+ perf_ctx_enable(ctx, 0);
unlock:
perf_ctx_unlock(cpuctx, ctx);
--
2.52.0.223.gf5cc29aaa4-goog
next prev parent reply other threads:[~2025-12-06 0:17 UTC|newest]
Thread overview: 60+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-12-06 0:16 [PATCH v6 00/44] KVM: x86: Add support for mediated vPMUs Sean Christopherson
2025-12-06 0:16 ` Sean Christopherson [this message]
2025-12-06 0:16 ` [PATCH v6 02/44] perf: Add generic exclude_guest support Sean Christopherson
2025-12-06 0:16 ` [PATCH v6 03/44] perf: Move security_perf_event_free() call to __free_event() Sean Christopherson
2025-12-06 0:16 ` [PATCH v6 04/44] perf: Add APIs to create/release mediated guest vPMUs Sean Christopherson
2025-12-08 11:51 ` Peter Zijlstra
2025-12-08 18:07 ` Sean Christopherson
2025-12-06 0:16 ` [PATCH v6 05/44] perf: Clean up perf ctx time Sean Christopherson
2025-12-06 0:16 ` [PATCH v6 06/44] perf: Add a EVENT_GUEST flag Sean Christopherson
2025-12-06 0:16 ` [PATCH v6 07/44] perf: Add APIs to load/put guest mediated PMU context Sean Christopherson
2025-12-06 0:16 ` [PATCH v6 08/44] perf/x86/core: Register a new vector for handling mediated guest PMIs Sean Christopherson
2025-12-06 0:16 ` [PATCH v6 09/44] perf/x86/core: Add APIs to switch to/from mediated PMI vector (for KVM) Sean Christopherson
2025-12-06 0:16 ` [PATCH v6 10/44] perf/x86/core: Do not set bit width for unavailable counters Sean Christopherson
2025-12-06 0:16 ` [PATCH v6 11/44] perf/x86/core: Plumb mediated PMU capability from x86_pmu to x86_pmu_cap Sean Christopherson
2025-12-06 0:16 ` [PATCH v6 12/44] perf/x86/intel: Support PERF_PMU_CAP_MEDIATED_VPMU Sean Christopherson
2025-12-06 0:16 ` [PATCH v6 13/44] perf/x86/amd: Support PERF_PMU_CAP_MEDIATED_VPMU for AMD host Sean Christopherson
2025-12-06 0:16 ` [PATCH v6 14/44] KVM: Add a simplified wrapper for registering perf callbacks Sean Christopherson
2025-12-06 0:16 ` [PATCH v6 15/44] KVM: x86/pmu: Snapshot host (i.e. perf's) reported PMU capabilities Sean Christopherson
2025-12-06 0:16 ` [PATCH v6 16/44] KVM: x86/pmu: Start stubbing in mediated PMU support Sean Christopherson
2025-12-06 0:16 ` [PATCH v6 17/44] KVM: x86/pmu: Implement Intel mediated PMU requirements and constraints Sean Christopherson
2025-12-06 0:16 ` [PATCH v6 18/44] KVM: x86/pmu: Implement AMD mediated PMU requirements Sean Christopherson
2025-12-06 0:16 ` [PATCH v6 19/44] KVM: x86/pmu: Register PMI handler for mediated vPMU Sean Christopherson
2025-12-06 0:16 ` [PATCH v6 20/44] KVM: x86/pmu: Disable RDPMC interception for compatible " Sean Christopherson
2025-12-06 0:16 ` [PATCH v6 21/44] KVM: x86/pmu: Load/save GLOBAL_CTRL via entry/exit fields for mediated PMU Sean Christopherson
2025-12-06 0:16 ` [PATCH v6 22/44] KVM: x86/pmu: Disable interception of select PMU MSRs for mediated vPMUs Sean Christopherson
2025-12-06 0:16 ` [PATCH v6 23/44] KVM: x86/pmu: Bypass perf checks when emulating mediated PMU counter accesses Sean Christopherson
2025-12-06 0:17 ` [PATCH v6 24/44] KVM: x86/pmu: Introduce eventsel_hw to prepare for pmu event filtering Sean Christopherson
2025-12-06 0:17 ` [PATCH v6 25/44] KVM: x86/pmu: Reprogram mediated PMU event selectors on event filter updates Sean Christopherson
2025-12-06 0:17 ` [PATCH v6 26/44] KVM: x86/pmu: Always stuff GuestOnly=1,HostOnly=0 for mediated PMCs on AMD Sean Christopherson
2025-12-06 0:17 ` [PATCH v6 27/44] KVM: x86/pmu: Load/put mediated PMU context when entering/exiting guest Sean Christopherson
2025-12-06 0:17 ` [PATCH v6 28/44] KVM: x86/pmu: Disallow emulation in the fastpath if mediated PMCs are active Sean Christopherson
2025-12-06 0:17 ` [PATCH v6 29/44] KVM: x86/pmu: Handle emulated instruction for mediated vPMU Sean Christopherson
2025-12-06 0:17 ` [PATCH v6 30/44] KVM: nVMX: Add macros to simplify nested MSR interception setting Sean Christopherson
2025-12-06 0:17 ` [PATCH v6 31/44] KVM: nVMX: Disable PMU MSR interception as appropriate while running L2 Sean Christopherson
2025-12-06 0:17 ` [PATCH v6 32/44] KVM: nSVM: " Sean Christopherson
2025-12-06 0:17 ` [PATCH v6 33/44] KVM: x86/pmu: Expose enable_mediated_pmu parameter to user space Sean Christopherson
2025-12-06 0:17 ` [PATCH v6 34/44] KVM: x86/pmu: Elide WRMSRs when loading guest PMCs if values already match Sean Christopherson
2025-12-06 0:17 ` [PATCH v6 35/44] KVM: VMX: Drop intermediate "guest" field from msr_autostore Sean Christopherson
2025-12-08 9:14 ` Mi, Dapeng
2025-12-06 0:17 ` [PATCH v6 36/44] KVM: nVMX: Don't update msr_autostore count when saving TSC for vmcs12 Sean Christopherson
2025-12-06 0:17 ` [PATCH v6 37/44] KVM: VMX: Dedup code for removing MSR from VMCS's auto-load list Sean Christopherson
2025-12-08 9:29 ` Mi, Dapeng
2025-12-09 17:37 ` Sean Christopherson
2025-12-10 1:08 ` Mi, Dapeng
2025-12-06 0:17 ` [PATCH v6 38/44] KVM: VMX: Drop unused @entry_only param from add_atomic_switch_msr() Sean Christopherson
2025-12-08 9:32 ` Mi, Dapeng
2025-12-06 0:17 ` [PATCH v6 39/44] KVM: VMX: Bug the VM if either MSR auto-load list is full Sean Christopherson
2025-12-08 9:32 ` Mi, Dapeng
2025-12-08 9:34 ` Mi, Dapeng
2025-12-06 0:17 ` [PATCH v6 40/44] KVM: VMX: Set MSR index auto-load entry if and only if entry is "new" Sean Christopherson
2025-12-08 9:35 ` Mi, Dapeng
2025-12-06 0:17 ` [PATCH v6 41/44] KVM: VMX: Compartmentalize adding MSRs to host vs. guest auto-load list Sean Christopherson
2025-12-08 9:36 ` Mi, Dapeng
2025-12-06 0:17 ` [PATCH v6 42/44] KVM: VMX: Dedup code for adding MSR to VMCS's auto list Sean Christopherson
2025-12-08 9:37 ` Mi, Dapeng
2025-12-06 0:17 ` [PATCH v6 43/44] KVM: VMX: Initialize vmcs01.VM_EXIT_MSR_STORE_ADDR with list address Sean Christopherson
2025-12-06 0:17 ` [PATCH v6 44/44] KVM: VMX: Add mediated PMU support for CPUs without "save perf global ctrl" Sean Christopherson
2025-12-08 9:39 ` Mi, Dapeng
2025-12-09 6:31 ` Mi, Dapeng
2025-12-08 15:37 ` [PATCH v6 00/44] KVM: x86: Add support for mediated vPMUs Peter Zijlstra
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20251206001720.468579-2-seanjc@google.com \
--to=seanjc@google.com \
--cc=acme@kernel.org \
--cc=anup@brainfault.org \
--cc=aou@eecs.berkeley.edu \
--cc=chenhuacai@kernel.org \
--cc=dapeng1.mi@linux.intel.com \
--cc=hpa@zytor.com \
--cc=jmattson@google.com \
--cc=kvm-riscv@lists.infradead.org \
--cc=kvm@vger.kernel.org \
--cc=kvmarm@lists.linux.dev \
--cc=linux-arm-kernel@lists.infradead.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-perf-users@vger.kernel.org \
--cc=linux-riscv@lists.infradead.org \
--cc=loongarch@lists.linux.dev \
--cc=luto@kernel.org \
--cc=manali.shukla@amd.com \
--cc=maobibo@loongson.cn \
--cc=maz@kernel.org \
--cc=mingo@redhat.com \
--cc=mizhang@google.com \
--cc=namhyung@kernel.org \
--cc=oupton@kernel.org \
--cc=palmer@dabbelt.com \
--cc=pbonzini@redhat.com \
--cc=peterz@infradead.org \
--cc=pjw@kernel.org \
--cc=sandipan.das@amd.com \
--cc=xin@zytor.com \
--cc=xiong.y.zhang@linux.intel.com \
--cc=xudong.hao@intel.com \
--cc=zhaotianrui@loongson.cn \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).