From: Colton Lewis <coltonlewis@google.com>
To: kvm@vger.kernel.org
Cc: Paolo Bonzini <pbonzini@redhat.com>,
Jonathan Corbet <corbet@lwn.net>,
Russell King <linux@armlinux.org.uk>,
Catalin Marinas <catalin.marinas@arm.com>,
Will Deacon <will@kernel.org>, Marc Zyngier <maz@kernel.org>,
Oliver Upton <oliver.upton@linux.dev>,
Mingwei Zhang <mizhang@google.com>,
Joey Gouly <joey.gouly@arm.com>,
Suzuki K Poulose <suzuki.poulose@arm.com>,
Zenghui Yu <yuzenghui@huawei.com>,
Mark Rutland <mark.rutland@arm.com>,
Shuah Khan <shuah@kernel.org>,
Ganapatrao Kulkarni <gankulkarni@os.amperecomputing.com>,
linux-doc@vger.kernel.org, linux-kernel@vger.kernel.org,
linux-arm-kernel@lists.infradead.org, kvmarm@lists.linux.dev,
linux-perf-users@vger.kernel.org,
linux-kselftest@vger.kernel.org,
Colton Lewis <coltonlewis@google.com>
Subject: [PATCH v5 19/24] KVM: arm64: Implement lazy PMU context swaps
Date: Tue, 9 Dec 2025 20:51:16 +0000 [thread overview]
Message-ID: <20251209205121.1871534-20-coltonlewis@google.com> (raw)
In-Reply-To: <20251209205121.1871534-1-coltonlewis@google.com>
Since many guests will never touch the PMU, they need not pay the cost
of context swapping those registers.
Use an enum to implement a simple state machine for PMU register
access. The PMU either accesses registers virtually or physically.
Virtual access implies all PMU registers are trapped coarsely by
MDCR_EL2.TPM and therefore do not need to be context swapped. Physical
access implies some registers are untrapped through FGT and do need to
be context swapped. All vCPUs do virtual access by default and
transition to physical if the PMU is partitioned and the guest
actually tries a PMU access.
Signed-off-by: Colton Lewis <coltonlewis@google.com>
---
arch/arm64/include/asm/kvm_host.h | 1 +
arch/arm64/include/asm/kvm_pmu.h | 4 ++++
arch/arm64/include/asm/kvm_types.h | 7 ++++++-
arch/arm64/kvm/debug.c | 2 +-
arch/arm64/kvm/hyp/include/hyp/switch.h | 2 ++
arch/arm64/kvm/pmu-direct.c | 21 +++++++++++++++++++++
arch/arm64/kvm/pmu.c | 7 +++++++
arch/arm64/kvm/sys_regs.c | 4 ++++
8 files changed, 46 insertions(+), 2 deletions(-)
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index c7e52aaf469dc..f92027d8fdfd0 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -1373,6 +1373,7 @@ static inline bool kvm_system_needs_idmapped_vectors(void)
return cpus_have_final_cap(ARM64_SPECTRE_V3A);
}
+void kvm_arm_setup_mdcr_el2(struct kvm_vcpu *vcpu);
void kvm_init_host_debug_data(void);
void kvm_debug_init_vhe(void);
void kvm_vcpu_load_debug(struct kvm_vcpu *vcpu);
diff --git a/arch/arm64/include/asm/kvm_pmu.h b/arch/arm64/include/asm/kvm_pmu.h
index 25a5eb8c623da..43aa334dce517 100644
--- a/arch/arm64/include/asm/kvm_pmu.h
+++ b/arch/arm64/include/asm/kvm_pmu.h
@@ -38,6 +38,7 @@ struct kvm_pmu {
int irq_num;
bool created;
bool irq_level;
+ enum vcpu_pmu_register_access access;
};
struct arm_pmu_entry {
@@ -106,6 +107,8 @@ u8 kvm_pmu_hpmn(struct kvm_vcpu *vcpu);
void kvm_pmu_load(struct kvm_vcpu *vcpu);
void kvm_pmu_put(struct kvm_vcpu *vcpu);
+void kvm_pmu_set_physical_access(struct kvm_vcpu *vcpu);
+
#if !defined(__KVM_NVHE_HYPERVISOR__)
bool kvm_vcpu_pmu_is_partitioned(struct kvm_vcpu *vcpu);
bool kvm_vcpu_pmu_use_fgt(struct kvm_vcpu *vcpu);
@@ -188,6 +191,7 @@ static inline u8 kvm_pmu_hpmn(struct kvm_vcpu *vcpu)
}
static inline void kvm_pmu_load(struct kvm_vcpu *vcpu) {}
static inline void kvm_pmu_put(struct kvm_vcpu *vcpu) {}
+static inline void kvm_pmu_set_physical_access(struct kvm_vcpu *vcpu) {}
static inline void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu,
u64 select_idx, u64 val) {}
static inline void kvm_pmu_set_counter_value_user(struct kvm_vcpu *vcpu,
diff --git a/arch/arm64/include/asm/kvm_types.h b/arch/arm64/include/asm/kvm_types.h
index 9a126b9e2d7c9..9f67165359f5c 100644
--- a/arch/arm64/include/asm/kvm_types.h
+++ b/arch/arm64/include/asm/kvm_types.h
@@ -4,5 +4,10 @@
#define KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE 40
-#endif /* _ASM_ARM64_KVM_TYPES_H */
+enum vcpu_pmu_register_access {
+ VCPU_PMU_ACCESS_UNSET,
+ VCPU_PMU_ACCESS_VIRTUAL,
+ VCPU_PMU_ACCESS_PHYSICAL,
+};
+#endif /* _ASM_ARM64_KVM_TYPES_H */
diff --git a/arch/arm64/kvm/debug.c b/arch/arm64/kvm/debug.c
index 0ab89c91e19cb..c2cf6b308ec60 100644
--- a/arch/arm64/kvm/debug.c
+++ b/arch/arm64/kvm/debug.c
@@ -34,7 +34,7 @@ static int cpu_has_spe(u64 dfr0)
* - Self-hosted Trace Filter controls (MDCR_EL2_TTRF)
* - Self-hosted Trace (MDCR_EL2_TTRF/MDCR_EL2_E2TB)
*/
-static void kvm_arm_setup_mdcr_el2(struct kvm_vcpu *vcpu)
+void kvm_arm_setup_mdcr_el2(struct kvm_vcpu *vcpu)
{
int hpmn = kvm_pmu_hpmn(vcpu);
diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h b/arch/arm64/kvm/hyp/include/hyp/switch.h
index bde79ec1a1836..ea288a712bb5d 100644
--- a/arch/arm64/kvm/hyp/include/hyp/switch.h
+++ b/arch/arm64/kvm/hyp/include/hyp/switch.h
@@ -963,6 +963,8 @@ static bool kvm_hyp_handle_pmu_regs(struct kvm_vcpu *vcpu)
if (ret)
__kvm_skip_instr(vcpu);
+ kvm_pmu_set_physical_access(vcpu);
+
return ret;
}
diff --git a/arch/arm64/kvm/pmu-direct.c b/arch/arm64/kvm/pmu-direct.c
index 8d0d6d1a0d851..c5767e2ebc651 100644
--- a/arch/arm64/kvm/pmu-direct.c
+++ b/arch/arm64/kvm/pmu-direct.c
@@ -73,6 +73,7 @@ bool kvm_vcpu_pmu_use_fgt(struct kvm_vcpu *vcpu)
u8 hpmn = vcpu->kvm->arch.nr_pmu_counters;
return kvm_vcpu_pmu_is_partitioned(vcpu) &&
+ vcpu->arch.pmu.access == VCPU_PMU_ACCESS_PHYSICAL &&
cpus_have_final_cap(ARM64_HAS_FGT) &&
(hpmn != 0 || cpus_have_final_cap(ARM64_HAS_HPMN0));
}
@@ -92,6 +93,26 @@ u64 kvm_pmu_fgt2_bits(void)
| HDFGRTR2_EL2_nPMICNTR_EL0;
}
+/**
+ * kvm_pmu_set_physical_access()
+ * @vcpu: Pointer to vcpu struct
+ *
+ * Reconfigure the guest for physical access of PMU hardware if
+ * allowed. This means reconfiguring mdcr_el2 and loading the vCPU
+ * state onto hardware.
+ *
+ */
+
+void kvm_pmu_set_physical_access(struct kvm_vcpu *vcpu)
+{
+ if (kvm_vcpu_pmu_is_partitioned(vcpu)
+ && vcpu->arch.pmu.access == VCPU_PMU_ACCESS_VIRTUAL) {
+ vcpu->arch.pmu.access = VCPU_PMU_ACCESS_PHYSICAL;
+ kvm_arm_setup_mdcr_el2(vcpu);
+ kvm_pmu_load(vcpu);
+ }
+}
+
/**
* kvm_pmu_host_counter_mask() - Compute bitmask of host-reserved counters
* @pmu: Pointer to arm_pmu struct
diff --git a/arch/arm64/kvm/pmu.c b/arch/arm64/kvm/pmu.c
index 48b39f096fa12..c9862e55a4049 100644
--- a/arch/arm64/kvm/pmu.c
+++ b/arch/arm64/kvm/pmu.c
@@ -471,6 +471,12 @@ int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu)
return 0;
}
+static void kvm_pmu_register_init(struct kvm_vcpu *vcpu)
+{
+ if (vcpu->arch.pmu.access == VCPU_PMU_ACCESS_UNSET)
+ vcpu->arch.pmu.access = VCPU_PMU_ACCESS_VIRTUAL;
+}
+
static int kvm_arm_pmu_v3_init(struct kvm_vcpu *vcpu)
{
if (irqchip_in_kernel(vcpu->kvm)) {
@@ -496,6 +502,7 @@ static int kvm_arm_pmu_v3_init(struct kvm_vcpu *vcpu)
init_irq_work(&vcpu->arch.pmu.overflow_work,
kvm_pmu_perf_overflow_notify_vcpu);
+ kvm_pmu_register_init(vcpu);
vcpu->arch.pmu.created = true;
return 0;
}
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index f2ae761625a66..d73218706b834 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -1197,6 +1197,8 @@ static bool access_pmu_evtyper(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
p->regval = __vcpu_sys_reg(vcpu, reg);
}
+ kvm_pmu_set_physical_access(vcpu);
+
return true;
}
@@ -1302,6 +1304,8 @@ static bool access_pmovs(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
p->regval = __vcpu_sys_reg(vcpu, PMOVSSET_EL0);
}
+ kvm_pmu_set_physical_access(vcpu);
+
return true;
}
--
2.52.0.239.gd5f0c6e74e-goog
next prev parent reply other threads:[~2025-12-09 20:52 UTC|newest]
Thread overview: 64+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-12-09 20:50 [PATCH v5 00/24] ARM64 PMU Partitioning Colton Lewis
2025-12-09 20:50 ` [PATCH v5 01/24] arm64: cpufeature: Add cpucap for HPMN0 Colton Lewis
2025-12-10 10:54 ` Suzuki K Poulose
2025-12-12 19:22 ` Colton Lewis
2025-12-09 20:50 ` [PATCH v5 02/24] KVM: arm64: Move arm_{psci,hypercalls}.h to an internal KVM path Colton Lewis
2025-12-09 20:51 ` [PATCH v5 03/24] KVM: arm64: Include KVM headers to get forward declarations Colton Lewis
2025-12-09 20:51 ` [PATCH v5 04/24] KVM: arm64: Move ARM specific headers in include/kvm to arch directory Colton Lewis
2025-12-09 20:51 ` [PATCH v5 05/24] KVM: arm64: Reorganize PMU includes Colton Lewis
2025-12-09 20:51 ` [PATCH v5 06/24] KVM: arm64: Reorganize PMU functions Colton Lewis
2025-12-09 20:51 ` [PATCH v5 07/24] perf: arm_pmuv3: Introduce method to partition the PMU Colton Lewis
2025-12-09 20:51 ` [PATCH v5 08/24] perf: arm_pmuv3: Generalize counter bitmasks Colton Lewis
2025-12-09 20:51 ` [PATCH v5 09/24] perf: arm_pmuv3: Keep out of guest counter partition Colton Lewis
2025-12-10 20:21 ` kernel test robot
2025-12-12 20:29 ` Colton Lewis
2025-12-09 20:51 ` [PATCH v5 10/24] KVM: arm64: Set up FGT for Partitioned PMU Colton Lewis
2025-12-09 21:08 ` Oliver Upton
2025-12-12 20:51 ` Colton Lewis
2025-12-17 0:13 ` Oliver Upton
2025-12-09 20:51 ` [PATCH v5 11/24] KVM: arm64: Writethrough trapped PMEVTYPER register Colton Lewis
2025-12-10 18:31 ` kernel test robot
2025-12-12 20:27 ` Colton Lewis
2025-12-09 20:51 ` [PATCH v5 12/24] KVM: arm64: Use physical PMSELR for PMXEVTYPER if partitioned Colton Lewis
2025-12-09 21:14 ` Oliver Upton
2025-12-12 20:54 ` Colton Lewis
2025-12-09 20:51 ` [PATCH v5 13/24] KVM: arm64: Writethrough trapped PMOVS register Colton Lewis
2025-12-09 21:19 ` Oliver Upton
2025-12-12 21:06 ` Colton Lewis
2025-12-09 20:51 ` [PATCH v5 14/24] KVM: arm64: Write fast path PMU register handlers Colton Lewis
2025-12-17 0:38 ` Oliver Upton
2025-12-17 23:03 ` Colton Lewis
2025-12-09 20:51 ` [PATCH v5 15/24] KVM: arm64: Setup MDCR_EL2 to handle a partitioned PMU Colton Lewis
2025-12-09 21:33 ` Oliver Upton
2025-12-12 21:22 ` Colton Lewis
2025-12-09 20:51 ` [PATCH v5 16/24] KVM: arm64: Account for partitioning in PMCR_EL0 access Colton Lewis
2025-12-09 21:37 ` Oliver Upton
2025-12-12 21:31 ` Colton Lewis
2025-12-09 20:51 ` [PATCH v5 17/24] KVM: arm64: Context swap Partitioned PMU guest registers Colton Lewis
2025-12-09 21:55 ` Oliver Upton
2025-12-12 21:57 ` Colton Lewis
2025-12-09 20:51 ` [PATCH v5 18/24] KVM: arm64: Enforce PMU event filter at vcpu_load() Colton Lewis
2025-12-09 22:00 ` Oliver Upton
2025-12-12 21:59 ` Colton Lewis
2025-12-17 0:57 ` Oliver Upton
2025-12-17 23:05 ` Colton Lewis
2025-12-09 20:51 ` Colton Lewis [this message]
2025-12-09 22:06 ` [PATCH v5 19/24] KVM: arm64: Implement lazy PMU context swaps Oliver Upton
2025-12-12 22:25 ` Colton Lewis
2025-12-15 18:06 ` Oliver Upton
2025-12-09 20:51 ` [PATCH v5 20/24] perf: arm_pmuv3: Handle IRQs for Partitioned PMU guest counters Colton Lewis
2025-12-09 22:45 ` Oliver Upton
2025-12-12 22:36 ` Colton Lewis
2025-12-09 20:51 ` [PATCH v5 21/24] KVM: arm64: Inject recorded guest interrupts Colton Lewis
2025-12-09 22:52 ` Oliver Upton
2025-12-12 22:55 ` Colton Lewis
2025-12-15 17:50 ` Oliver Upton
2025-12-09 20:51 ` [PATCH v5 22/24] KVM: arm64: Add KVM_CAP to partition the PMU Colton Lewis
2025-12-09 22:58 ` Oliver Upton
2025-12-12 22:59 ` Colton Lewis
2025-12-09 20:51 ` [PATCH v5 23/24] KVM: selftests: Add find_bit to KVM library Colton Lewis
2025-12-09 20:51 ` [PATCH v5 24/24] KVM: arm64: selftests: Add test case for partitioned PMU Colton Lewis
2025-12-09 23:00 ` [PATCH v5 00/24] ARM64 PMU Partitioning Oliver Upton
2026-01-15 13:02 ` Will Deacon
2026-01-15 18:09 ` Colton Lewis
2026-04-14 19:55 ` Colton Lewis
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20251209205121.1871534-20-coltonlewis@google.com \
--to=coltonlewis@google.com \
--cc=catalin.marinas@arm.com \
--cc=corbet@lwn.net \
--cc=gankulkarni@os.amperecomputing.com \
--cc=joey.gouly@arm.com \
--cc=kvm@vger.kernel.org \
--cc=kvmarm@lists.linux.dev \
--cc=linux-arm-kernel@lists.infradead.org \
--cc=linux-doc@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-kselftest@vger.kernel.org \
--cc=linux-perf-users@vger.kernel.org \
--cc=linux@armlinux.org.uk \
--cc=mark.rutland@arm.com \
--cc=maz@kernel.org \
--cc=mizhang@google.com \
--cc=oliver.upton@linux.dev \
--cc=pbonzini@redhat.com \
--cc=shuah@kernel.org \
--cc=suzuki.poulose@arm.com \
--cc=will@kernel.org \
--cc=yuzenghui@huawei.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox