From: Colton Lewis <coltonlewis@google.com>
To: kvm@vger.kernel.org
Cc: Paolo Bonzini <pbonzini@redhat.com>,
Jonathan Corbet <corbet@lwn.net>,
Russell King <linux@armlinux.org.uk>,
Catalin Marinas <catalin.marinas@arm.com>,
Will Deacon <will@kernel.org>, Marc Zyngier <maz@kernel.org>,
Oliver Upton <oliver.upton@linux.dev>,
Joey Gouly <joey.gouly@arm.com>,
Suzuki K Poulose <suzuki.poulose@arm.com>,
Zenghui Yu <yuzenghui@huawei.com>,
Mark Rutland <mark.rutland@arm.com>,
Shuah Khan <shuah@kernel.org>,
linux-doc@vger.kernel.org, linux-kernel@vger.kernel.org,
linux-arm-kernel@lists.infradead.org, kvmarm@lists.linux.dev,
linux-perf-users@vger.kernel.org,
linux-kselftest@vger.kernel.org,
Colton Lewis <coltonlewis@google.com>
Subject: [PATCH v2 18/23] KVM: arm64: Context swap Partitioned PMU guest registers
Date: Fri, 20 Jun 2025 22:13:19 +0000 [thread overview]
Message-ID: <20250620221326.1261128-20-coltonlewis@google.com> (raw)
In-Reply-To: <20250620221326.1261128-1-coltonlewis@google.com>
Save and restore newly untrapped registers that will be directly
accessed by the guest when the PMU is partitioned.
* PMEVCNTRn_EL0
* PMCCNTR_EL0
* PMICNTR_EL0
* PMUSERENR_EL0
* PMSELR_EL0
* PMCR_EL0
* PMCNTEN_EL0
* PMINTEN_EL1
If the PMU is not partitioned or MDCR_EL2.TPM is set, all PMU
registers are trapped so return immediately.
Signed-off-by: Colton Lewis <coltonlewis@google.com>
---
arch/arm64/include/asm/kvm_host.h | 2 +
arch/arm64/include/asm/kvm_pmu.h | 2 +
arch/arm64/kvm/arm.c | 2 +
arch/arm64/kvm/pmu-part.c | 101 ++++++++++++++++++++++++++++++
4 files changed, 107 insertions(+)
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index 2df76689381a..374771557d2c 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -453,9 +453,11 @@ enum vcpu_sysreg {
PMEVCNTR0_EL0, /* Event Counter Register (0-30) */
PMEVCNTR30_EL0 = PMEVCNTR0_EL0 + 30,
PMCCNTR_EL0, /* Cycle Counter Register */
+ PMICNTR_EL0, /* Instruction Counter Register */
PMEVTYPER0_EL0, /* Event Type Register (0-30) */
PMEVTYPER30_EL0 = PMEVTYPER0_EL0 + 30,
PMCCFILTR_EL0, /* Cycle Count Filter Register */
+ PMICFILTR_EL0, /* Insturction Count Filter Register */
PMCNTENSET_EL0, /* Count Enable Set Register */
PMINTENSET_EL1, /* Interrupt Enable Set Register */
PMOVSSET_EL0, /* Overflow Flag Status Set Register */
diff --git a/arch/arm64/include/asm/kvm_pmu.h b/arch/arm64/include/asm/kvm_pmu.h
index 1b68f1a706d1..208893485027 100644
--- a/arch/arm64/include/asm/kvm_pmu.h
+++ b/arch/arm64/include/asm/kvm_pmu.h
@@ -96,6 +96,8 @@ void kvm_pmu_host_counters_disable(void);
u8 kvm_pmu_guest_num_counters(struct kvm_vcpu *vcpu);
u8 kvm_pmu_hpmn(struct kvm_vcpu *vcpu);
+void kvm_pmu_load(struct kvm_vcpu *vcpu);
+void kvm_pmu_put(struct kvm_vcpu *vcpu);
#if !defined(__KVM_NVHE_HYPERVISOR__)
bool kvm_vcpu_pmu_is_partitioned(struct kvm_vcpu *vcpu);
diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
index e452aba1a3b2..7c007ee44ecb 100644
--- a/arch/arm64/kvm/arm.c
+++ b/arch/arm64/kvm/arm.c
@@ -616,6 +616,7 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
kvm_vcpu_load_vhe(vcpu);
kvm_arch_vcpu_load_fp(vcpu);
kvm_vcpu_pmu_restore_guest(vcpu);
+ kvm_pmu_load(vcpu);
if (kvm_arm_is_pvtime_enabled(&vcpu->arch))
kvm_make_request(KVM_REQ_RECORD_STEAL, vcpu);
@@ -658,6 +659,7 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
kvm_timer_vcpu_put(vcpu);
kvm_vgic_put(vcpu);
kvm_vcpu_pmu_restore_host(vcpu);
+ kvm_pmu_put(vcpu);
if (vcpu_has_nv(vcpu))
kvm_vcpu_put_hw_mmu(vcpu);
kvm_arm_vmid_clear_active();
diff --git a/arch/arm64/kvm/pmu-part.c b/arch/arm64/kvm/pmu-part.c
index 289f396bd887..19bd6e0da222 100644
--- a/arch/arm64/kvm/pmu-part.c
+++ b/arch/arm64/kvm/pmu-part.c
@@ -8,6 +8,7 @@
#include <linux/perf/arm_pmu.h>
#include <linux/perf/arm_pmuv3.h>
+#include <asm/kvm_emulate.h>
#include <asm/kvm_pmu.h>
#include <asm/arm_pmuv3.h>
@@ -175,3 +176,103 @@ u8 kvm_pmu_hpmn(struct kvm_vcpu *vcpu)
return hpmn;
}
+
+/**
+ * kvm_pmu_load() - Load untrapped PMU registers
+ * @vcpu: Pointer to struct kvm_vcpu
+ *
+ * Load all untrapped PMU registers from the VCPU into the PCPU. Mask
+ * to only bits belonging to guest-reserved counters and leave
+ * host-reserved counters alone in bitmask registers.
+ */
+void kvm_pmu_load(struct kvm_vcpu *vcpu)
+{
+ struct arm_pmu *pmu = vcpu->kvm->arch.arm_pmu;
+ u64 mask = kvm_pmu_guest_counter_mask(pmu);
+ u8 i;
+ u64 val;
+
+ /*
+ * If the PMU is not partitioned or we have MDCR_EL2_TPM,
+ * every PMU access is trapped so don't bother with the swap.
+ */
+ if (!kvm_pmu_is_partitioned(pmu) || (vcpu->arch.mdcr_el2 & MDCR_EL2_TPM))
+ return;
+
+ for (i = 0; i < pmu->hpmn_max; i++) {
+ val = __vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i);
+ write_pmevcntrn(i, val);
+ }
+
+ val = __vcpu_sys_reg(vcpu, PMCCNTR_EL0);
+ write_pmccntr(val);
+
+ val = __vcpu_sys_reg(vcpu, PMUSERENR_EL0);
+ write_pmuserenr(val);
+
+ val = __vcpu_sys_reg(vcpu, PMSELR_EL0);
+ write_pmselr(val);
+
+ val = __vcpu_sys_reg(vcpu, PMCR_EL0);
+ write_pmcr(val);
+
+ /*
+ * Loading these registers is tricky because of
+ * 1. Applying only the bits for guest counters (indicated by mask)
+ * 2. Setting and clearing are different registers
+ */
+ val = __vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
+ write_pmcntenset(val & mask);
+ write_pmcntenclr(~val & mask);
+
+ val = __vcpu_sys_reg(vcpu, PMINTENSET_EL1);
+ write_pmintenset(val & mask);
+ write_pmintenclr(~val & mask);
+}
+
+/**
+ * kvm_pmu_put() - Put untrapped PMU registers
+ * @vcpu: Pointer to struct kvm_vcpu
+ *
+ * Put all untrapped PMU registers from the VCPU into the PCPU. Mask
+ * to only bits belonging to guest-reserved counters and leave
+ * host-reserved counters alone in bitmask registers.
+ */
+void kvm_pmu_put(struct kvm_vcpu *vcpu)
+{
+ struct arm_pmu *pmu = vcpu->kvm->arch.arm_pmu;
+ u64 mask = kvm_pmu_guest_counter_mask(pmu);
+ u8 i;
+ u64 val;
+
+ /*
+ * If the PMU is not partitioned or we have MDCR_EL2_TPM,
+ * every PMU access is trapped so don't bother with the swap.
+ */
+ if (!kvm_pmu_is_partitioned(pmu) || (vcpu->arch.mdcr_el2 & MDCR_EL2_TPM))
+ return;
+
+ for (i = 0; i < pmu->hpmn_max; i++) {
+ val = read_pmevcntrn(i);
+ __vcpu_assign_sys_reg(vcpu, PMEVCNTR0_EL0 + i, val);
+ }
+
+ val = read_pmccntr();
+ __vcpu_assign_sys_reg(vcpu, PMCCNTR_EL0, val);
+
+ val = read_pmuserenr();
+ __vcpu_assign_sys_reg(vcpu, PMUSERENR_EL0, val);
+
+ val = read_pmselr();
+ __vcpu_assign_sys_reg(vcpu, PMSELR_EL0, val);
+
+ val = read_pmcr();
+ __vcpu_assign_sys_reg(vcpu, PMCR_EL0, val);
+
+ /* Mask these to only save the guest relevant bits. */
+ val = read_pmcntenset();
+ __vcpu_assign_sys_reg(vcpu, PMCNTENSET_EL0, val & mask);
+
+ val = read_pmintenset();
+ __vcpu_assign_sys_reg(vcpu, PMINTENSET_EL1, val & mask);
+}
--
2.50.0.714.g196bf9f422-goog
next prev parent reply other threads:[~2025-06-20 22:18 UTC|newest]
Thread overview: 40+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-06-20 22:13 [PATCH v2 00/23] ARM64 PMU Partitioning Colton Lewis
2025-06-20 22:13 ` [PATCH v2 01/23] arm64: cpufeature: Add cpucap for HPMN0 Colton Lewis
2025-06-21 0:44 ` Oliver Upton
2025-06-23 18:25 ` Colton Lewis
2025-06-24 7:28 ` Oliver Upton
2025-06-24 20:05 ` Colton Lewis
2025-06-20 22:13 ` [PATCH v2 02/23] arm64: Generate sign macro for sysreg Enums Colton Lewis
2025-06-20 22:13 ` [PATCH v2 03/23] arm64: cpufeature: Add cpucap for PMICNTR Colton Lewis
2025-06-21 0:45 ` Oliver Upton
2025-06-23 18:25 ` Colton Lewis
2025-06-20 22:13 ` [PATCH v2 04/23] arm64: Define PMI{CNTR,FILTR}_EL0 as undef_access Colton Lewis
2025-06-20 22:13 ` [PATCH v2 05/23] KVM: arm64: Cleanup PMU includes Colton Lewis
2025-06-21 14:56 ` kernel test robot
2025-06-23 22:04 ` Colton Lewis
2025-06-20 22:13 ` [PATCH v2 06/23] KVM: arm64: Reorganize PMU functions Colton Lewis
2025-06-20 22:13 ` [PATCH v2 07/23] perf: arm_pmuv3: Introduce method to partition the PMU Colton Lewis
2025-06-21 1:06 ` Oliver Upton
2025-06-23 18:26 ` Colton Lewis
2025-06-24 7:05 ` Oliver Upton
2025-06-24 20:05 ` Colton Lewis
2025-06-20 22:13 ` [PATCH v2 07/23] perf: pmuv3: " Colton Lewis
2025-06-20 22:13 ` [PATCH v2 08/23] perf: arm_pmuv3: Generalize counter bitmasks Colton Lewis
2025-06-20 22:13 ` [PATCH v2 09/23] perf: arm_pmuv3: Keep out of guest counter partition Colton Lewis
2025-06-20 22:13 ` [PATCH v2 10/23] KVM: arm64: Correct kvm_arm_pmu_get_max_counters() Colton Lewis
2025-06-20 22:13 ` [PATCH v2 11/23] KVM: arm64: Set up FGT for Partitioned PMU Colton Lewis
2025-06-20 22:13 ` [PATCH v2 12/23] KVM: arm64: Writethrough trapped PMEVTYPER register Colton Lewis
2025-06-20 22:13 ` [PATCH v2 13/23] KVM: arm64: Use physical PMSELR for PMXEVTYPER if partitioned Colton Lewis
2025-06-20 22:13 ` [PATCH v2 14/23] KVM: arm64: Writethrough trapped PMOVS register Colton Lewis
2025-06-20 22:13 ` [PATCH v2 15/23] KVM: arm64: Write fast path PMU register handlers Colton Lewis
2025-06-20 22:13 ` [PATCH v2 16/23] KVM: arm64: Setup MDCR_EL2 to handle a partitioned PMU Colton Lewis
2025-06-20 22:13 ` [PATCH v2 17/23] KVM: arm64: Account for partitioning in PMCR_EL0 access Colton Lewis
2025-06-22 9:32 ` kernel test robot
2025-06-23 22:11 ` Colton Lewis
2025-06-20 22:13 ` Colton Lewis [this message]
2025-06-20 22:13 ` [PATCH v2 19/23] KVM: arm64: Enforce PMU event filter at vcpu_load() Colton Lewis
2025-06-20 22:13 ` [PATCH v2 20/23] perf: arm_pmuv3: Handle IRQs for Partitioned PMU guest counters Colton Lewis
2025-06-20 22:13 ` [PATCH v2 20/23] perf: pmuv3: " Colton Lewis
2025-06-20 22:13 ` [PATCH v2 21/23] KVM: arm64: Inject recorded guest interrupts Colton Lewis
2025-06-20 22:13 ` [PATCH v2 22/23] KVM: arm64: Add ioctl to partition the PMU when supported Colton Lewis
2025-06-20 22:13 ` [PATCH v2 23/23] KVM: arm64: selftests: Add test case for partitioned PMU Colton Lewis
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20250620221326.1261128-20-coltonlewis@google.com \
--to=coltonlewis@google.com \
--cc=catalin.marinas@arm.com \
--cc=corbet@lwn.net \
--cc=joey.gouly@arm.com \
--cc=kvm@vger.kernel.org \
--cc=kvmarm@lists.linux.dev \
--cc=linux-arm-kernel@lists.infradead.org \
--cc=linux-doc@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-kselftest@vger.kernel.org \
--cc=linux-perf-users@vger.kernel.org \
--cc=linux@armlinux.org.uk \
--cc=mark.rutland@arm.com \
--cc=maz@kernel.org \
--cc=oliver.upton@linux.dev \
--cc=pbonzini@redhat.com \
--cc=shuah@kernel.org \
--cc=suzuki.poulose@arm.com \
--cc=will@kernel.org \
--cc=yuzenghui@huawei.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox