public inbox for linux-arm-kernel@lists.infradead.org
 help / color / mirror / Atom feed
From: Colton Lewis <coltonlewis@google.com>
To: kvm@vger.kernel.org
Cc: Alexandru Elisei <alexandru.elisei@arm.com>,
	Paolo Bonzini <pbonzini@redhat.com>,
	 Jonathan Corbet <corbet@lwn.net>,
	Russell King <linux@armlinux.org.uk>,
	 Catalin Marinas <catalin.marinas@arm.com>,
	Will Deacon <will@kernel.org>, Marc Zyngier <maz@kernel.org>,
	 Oliver Upton <oliver.upton@linux.dev>,
	Mingwei Zhang <mizhang@google.com>,
	 Joey Gouly <joey.gouly@arm.com>,
	Suzuki K Poulose <suzuki.poulose@arm.com>,
	 Zenghui Yu <yuzenghui@huawei.com>,
	Mark Rutland <mark.rutland@arm.com>,
	 Shuah Khan <shuah@kernel.org>,
	Ganapatrao Kulkarni <gankulkarni@os.amperecomputing.com>,
	 James Clark <james.clark@linaro.org>,
	linux-doc@vger.kernel.org,  linux-kernel@vger.kernel.org,
	linux-arm-kernel@lists.infradead.org,  kvmarm@lists.linux.dev,
	linux-perf-users@vger.kernel.org,
	 linux-kselftest@vger.kernel.org,
	Colton Lewis <coltonlewis@google.com>
Subject: [PATCH v7 08/20] KVM: arm64: Add Partitioned PMU register trap handlers
Date: Mon,  4 May 2026 21:18:01 +0000	[thread overview]
Message-ID: <20260504211813.1804997-9-coltonlewis@google.com> (raw)
In-Reply-To: <20260504211813.1804997-1-coltonlewis@google.com>

We may want a partitioned PMU but not have FEAT_FGT to untrap the
specific registers that would normally be untrapped. Add handling for
those trapped register accesses that does the right thing if the PMU
is partitioned.

For registers that shouldn't be written to hardware because they
require special handling (PMEVTYPER and PMOVS), write to the virtual
register. A later patch will ensure these are handled correctly at
vcpu_load time.

Signed-off-by: Colton Lewis <coltonlewis@google.com>
---
 arch/arm64/kvm/sys_regs.c | 236 +++++++++++++++++++++++++++++++-------
 1 file changed, 197 insertions(+), 39 deletions(-)

diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index 0a8e8ee69cd00..cc3d1804ab200 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -985,9 +985,25 @@ static u64 reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
 	return __vcpu_sys_reg(vcpu, r->reg);
 }
 
+static void pmu_write_pmuserenr(struct kvm_vcpu *vcpu, u64 val)
+{
+	if (kvm_vcpu_pmu_is_partitioned(vcpu))
+		write_sysreg(val, pmuserenr_el0);
+	else
+		__vcpu_assign_sys_reg(vcpu, PMUSERENR_EL0, val);
+}
+
+static u64 pmu_read_pmuserenr(struct kvm_vcpu *vcpu)
+{
+	if (kvm_vcpu_pmu_is_partitioned(vcpu))
+		return read_sysreg(pmuserenr_el0);
+	else
+		return __vcpu_sys_reg(vcpu, PMUSERENR_EL0);
+}
+
 static bool check_pmu_access_disabled(struct kvm_vcpu *vcpu, u64 flags)
 {
-	u64 reg = __vcpu_sys_reg(vcpu, PMUSERENR_EL0);
+	u64 reg = pmu_read_pmuserenr(vcpu);
 	bool enabled = (reg & flags) || vcpu_mode_priv(vcpu);
 
 	if (!enabled)
@@ -1016,6 +1032,29 @@ static bool pmu_access_event_counter_el0_disabled(struct kvm_vcpu *vcpu)
 	return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_ER | ARMV8_PMU_USERENR_EN);
 }
 
+static void pmu_write_pmcr(struct kvm_vcpu *vcpu, u64 val)
+{
+	if (kvm_vcpu_pmu_is_partitioned(vcpu)) {
+		write_sysreg(val, pmcr_el0);
+		return;
+	}
+
+	kvm_pmu_handle_pmcr(vcpu, val);
+}
+
+static u64 pmu_read_pmcr(struct kvm_vcpu *vcpu)
+{
+	if (kvm_vcpu_pmu_is_partitioned(vcpu)) {
+		return u64_replace_bits(
+			read_sysreg(pmcr_el0),
+			vcpu->kvm->arch.nr_pmu_counters,
+			ARMV8_PMU_PMCR_N);
+	}
+
+	return kvm_vcpu_read_pmcr(vcpu);
+
+}
+
 static bool access_pmcr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
 			const struct sys_reg_desc *r)
 {
@@ -1026,18 +1065,17 @@ static bool access_pmcr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
 
 	if (p->is_write) {
 		/*
-		 * Only update writeable bits of PMCR (continuing into
-		 * kvm_pmu_handle_pmcr() as well)
+		 * Only update writeable bits of PMCR
 		 */
-		val = kvm_vcpu_read_pmcr(vcpu);
+		val = pmu_read_pmcr(vcpu);
 		val &= ~ARMV8_PMU_PMCR_MASK;
 		val |= p->regval & ARMV8_PMU_PMCR_MASK;
 		if (!kvm_supports_32bit_el0())
 			val |= ARMV8_PMU_PMCR_LC;
-		kvm_pmu_handle_pmcr(vcpu, val);
+		pmu_write_pmcr(vcpu, val);
 	} else {
 		/* PMCR.P & PMCR.C are RAZ */
-		val = kvm_vcpu_read_pmcr(vcpu)
+		val = pmu_read_pmcr(vcpu)
 		      & ~(ARMV8_PMU_PMCR_P | ARMV8_PMU_PMCR_C);
 		p->regval = val;
 	}
@@ -1045,6 +1083,24 @@ static bool access_pmcr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
 	return true;
 }
 
+static void pmu_write_pmselr(struct kvm_vcpu *vcpu, u64 val)
+{
+	if (kvm_vcpu_pmu_is_partitioned(vcpu)) {
+		write_sysreg(val, pmselr_el0);
+		return;
+	}
+
+	__vcpu_assign_sys_reg(vcpu, PMSELR_EL0, val);
+}
+
+static u64 pmu_read_pmselr(struct kvm_vcpu *vcpu)
+{
+	if (kvm_vcpu_pmu_is_partitioned(vcpu))
+		return read_sysreg(pmselr_el0);
+
+	return __vcpu_sys_reg(vcpu, PMSELR_EL0);
+}
+
 static bool access_pmselr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
 			  const struct sys_reg_desc *r)
 {
@@ -1052,10 +1108,10 @@ static bool access_pmselr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
 		return false;
 
 	if (p->is_write)
-		__vcpu_assign_sys_reg(vcpu, PMSELR_EL0, p->regval);
+		pmu_write_pmselr(vcpu, p->regval);
 	else
 		/* return PMSELR.SEL field */
-		p->regval = __vcpu_sys_reg(vcpu, PMSELR_EL0)
+		p->regval = pmu_read_pmselr(vcpu)
 			    & PMSELR_EL0_SEL_MASK;
 
 	return true;
@@ -1128,6 +1184,44 @@ static int set_pmu_evcntr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
 	return 0;
 }
 
+static void pmu_write_evcntr(struct kvm_vcpu *vcpu, u64 val, u64 idx)
+{
+	u64 pmselr;
+
+	if (!kvm_vcpu_pmu_is_partitioned(vcpu)) {
+		kvm_pmu_set_counter_value(vcpu, idx, val);
+		return;
+	}
+
+	if (idx == ARMV8_PMU_CYCLE_IDX) {
+		write_sysreg(val, pmccntr_el0);
+		return;
+	}
+
+	pmselr = read_sysreg(pmselr_el0);
+	write_sysreg(idx, pmselr_el0);
+	write_sysreg(val, pmxevcntr_el0);
+	write_sysreg(pmselr, pmselr_el0);
+}
+
+static u64 pmu_read_evcntr(struct kvm_vcpu *vcpu, u64 idx)
+{
+	u64 pmselr;
+	u64 val;
+
+	if (!kvm_vcpu_pmu_is_partitioned(vcpu))
+		return kvm_pmu_get_counter_value(vcpu, idx);
+
+	if (idx == ARMV8_PMU_CYCLE_IDX)
+		return read_sysreg(pmccntr_el0);
+
+	pmselr = read_sysreg(pmselr_el0);
+	write_sysreg(idx, pmselr_el0);
+	val = read_sysreg(pmxevcntr_el0);
+	write_sysreg(pmselr, pmselr_el0);
+	return val;
+}
+
 static bool access_pmu_evcntr(struct kvm_vcpu *vcpu,
 			      struct sys_reg_params *p,
 			      const struct sys_reg_desc *r)
@@ -1141,7 +1235,7 @@ static bool access_pmu_evcntr(struct kvm_vcpu *vcpu,
 				return false;
 
 			idx = SYS_FIELD_GET(PMSELR_EL0, SEL,
-					    __vcpu_sys_reg(vcpu, PMSELR_EL0));
+					    pmu_read_pmselr(vcpu));
 		} else if (r->Op2 == 0) {
 			/* PMCCNTR_EL0 */
 			if (pmu_access_cycle_counter_el0_disabled(vcpu))
@@ -1173,14 +1267,34 @@ static bool access_pmu_evcntr(struct kvm_vcpu *vcpu,
 		if (pmu_access_el0_disabled(vcpu))
 			return false;
 
-		kvm_pmu_set_counter_value(vcpu, idx, p->regval);
+		pmu_write_evcntr(vcpu, p->regval, idx);
 	} else {
-		p->regval = kvm_pmu_get_counter_value(vcpu, idx);
+		p->regval = pmu_read_evcntr(vcpu, idx);
 	}
 
 	return true;
 }
 
+
+static void pmu_write_evtyper(struct kvm_vcpu *vcpu, u64 val, u64 idx)
+{
+	u64 mask;
+
+	if (kvm_vcpu_pmu_is_partitioned(vcpu)) {
+		mask = kvm_pmu_evtyper_mask(vcpu->kvm);
+		__vcpu_assign_sys_reg(vcpu, PMEVTYPER0_EL0 + idx, val & mask);
+		return;
+	}
+
+	kvm_pmu_set_counter_event_type(vcpu, val, idx);
+	kvm_vcpu_pmu_restore_guest(vcpu);
+}
+
+static u64 pmu_read_evtyper(struct kvm_vcpu *vcpu, u64 idx)
+{
+	return __vcpu_sys_reg(vcpu, PMEVTYPER0_EL0 + idx);
+}
+
 static bool access_pmu_evtyper(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
 			       const struct sys_reg_desc *r)
 {
@@ -1191,7 +1305,7 @@ static bool access_pmu_evtyper(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
 
 	if (r->CRn == 9 && r->CRm == 13 && r->Op2 == 1) {
 		/* PMXEVTYPER_EL0 */
-		idx = SYS_FIELD_GET(PMSELR_EL0, SEL, __vcpu_sys_reg(vcpu, PMSELR_EL0));
+		idx = SYS_FIELD_GET(PMSELR_EL0, SEL, pmu_read_pmselr(vcpu));
 		reg = PMEVTYPER0_EL0 + idx;
 	} else if (r->CRn == 14 && (r->CRm & 12) == 12) {
 		idx = ((r->CRm & 3) << 3) | (r->Op2 & 7);
@@ -1207,12 +1321,10 @@ static bool access_pmu_evtyper(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
 	if (!pmu_counter_idx_valid(vcpu, idx))
 		return false;
 
-	if (p->is_write) {
-		kvm_pmu_set_counter_event_type(vcpu, p->regval, idx);
-		kvm_vcpu_pmu_restore_guest(vcpu);
-	} else {
-		p->regval = __vcpu_sys_reg(vcpu, reg);
-	}
+	if (p->is_write)
+		pmu_write_evtyper(vcpu, p->regval, idx);
+	else
+		p->regval = pmu_read_evtyper(vcpu, idx);
 
 	return true;
 }
@@ -1235,6 +1347,35 @@ static int get_pmreg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r, u64 *v
 	return 0;
 }
 
+static void pmu_write_pmcnten(struct kvm_vcpu *vcpu, u64 val, bool set)
+{
+	if (kvm_vcpu_pmu_is_partitioned(vcpu)) {
+		if (set)
+			write_sysreg(val, pmcntenset_el0);
+		else
+			write_sysreg(val, pmcntenclr_el0);
+
+		return;
+	}
+
+	if (set)
+		/* accessing PMCNTENSET_EL0 */
+		__vcpu_rmw_sys_reg(vcpu, PMCNTENSET_EL0, |=, val);
+	else
+		/* accessing PMCNTENCLR_EL0 */
+		__vcpu_rmw_sys_reg(vcpu, PMCNTENSET_EL0, &=, ~val);
+
+	kvm_pmu_reprogram_counter_mask(vcpu, val);
+}
+
+static u64 pmu_read_pmcnten(struct kvm_vcpu *vcpu)
+{
+	if (kvm_vcpu_pmu_is_partitioned(vcpu))
+		return read_sysreg(pmcntenset_el0);
+
+	return __vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
+}
+
 static bool access_pmcnten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
 			   const struct sys_reg_desc *r)
 {
@@ -1246,40 +1387,58 @@ static bool access_pmcnten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
 	mask = kvm_pmu_accessible_counter_mask(vcpu);
 	if (p->is_write) {
 		val = p->regval & mask;
-		if (r->Op2 & 0x1)
-			/* accessing PMCNTENSET_EL0 */
-			__vcpu_rmw_sys_reg(vcpu, PMCNTENSET_EL0, |=, val);
-		else
-			/* accessing PMCNTENCLR_EL0 */
-			__vcpu_rmw_sys_reg(vcpu, PMCNTENSET_EL0, &=, ~val);
-
-		kvm_pmu_reprogram_counter_mask(vcpu, val);
+		pmu_write_pmcnten(vcpu, val, r->Op2 & 0x1);
 	} else {
-		p->regval = __vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
+		p->regval = pmu_read_pmcnten(vcpu);
 	}
 
 	return true;
 }
 
+static void pmu_write_pminten(struct kvm_vcpu *vcpu, u64 val, bool set)
+{
+	if (kvm_vcpu_pmu_is_partitioned(vcpu)) {
+		if (set)
+			write_sysreg(val, pmintenset_el1);
+		else
+			write_sysreg(val, pmintenclr_el1);
+
+		return;
+	}
+
+	if (set)
+		/* accessing PMINTENSET_EL1 */
+		__vcpu_rmw_sys_reg(vcpu, PMINTENSET_EL1, |=, val);
+	else
+		/* accessing PMINTENCLR_EL1 */
+		__vcpu_rmw_sys_reg(vcpu, PMINTENSET_EL1, &=, ~val);
+
+	kvm_pmu_reprogram_counter_mask(vcpu, val);
+}
+
+static u64 pmu_read_pminten(struct kvm_vcpu *vcpu)
+{
+	if (kvm_vcpu_pmu_is_partitioned(vcpu))
+		return read_sysreg(pmintenset_el1);
+
+	return __vcpu_sys_reg(vcpu, PMINTENSET_EL1);
+}
+
 static bool access_pminten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
 			   const struct sys_reg_desc *r)
 {
-	u64 mask = kvm_pmu_accessible_counter_mask(vcpu);
+	u64 val, mask;
 
 	if (check_pmu_access_disabled(vcpu, 0))
 		return false;
 
+	mask = kvm_pmu_accessible_counter_mask(vcpu);
 	if (p->is_write) {
-		u64 val = p->regval & mask;
+		val = p->regval & mask;
 
-		if (r->Op2 & 0x1)
-			/* accessing PMINTENSET_EL1 */
-			__vcpu_rmw_sys_reg(vcpu, PMINTENSET_EL1, |=, val);
-		else
-			/* accessing PMINTENCLR_EL1 */
-			__vcpu_rmw_sys_reg(vcpu, PMINTENSET_EL1, &=, ~val);
+		pmu_write_pminten(vcpu, val, r->Op2 & 0x1);
 	} else {
-		p->regval = __vcpu_sys_reg(vcpu, PMINTENSET_EL1);
+		p->regval = pmu_read_pminten(vcpu);
 	}
 
 	return true;
@@ -1330,10 +1489,9 @@ static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
 		if (!vcpu_mode_priv(vcpu))
 			return undef_access(vcpu, p, r);
 
-		__vcpu_assign_sys_reg(vcpu, PMUSERENR_EL0,
-				      (p->regval & ARMV8_PMU_USERENR_MASK));
+		pmu_write_pmuserenr(vcpu, p->regval & ARMV8_PMU_USERENR_MASK);
 	} else {
-		p->regval = __vcpu_sys_reg(vcpu, PMUSERENR_EL0)
+		p->regval = pmu_read_pmuserenr(vcpu)
 			    & ARMV8_PMU_USERENR_MASK;
 	}
 
-- 
2.54.0.545.g6539524ca2-goog



  parent reply	other threads:[~2026-05-04 21:18 UTC|newest]

Thread overview: 21+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2026-05-04 21:17 [PATCH v7 00/20] ARM64 PMU Partitioning Colton Lewis
2026-05-04 21:17 ` [PATCH v7 01/20] arm64: cpufeature: Add cpucap for HPMN0 Colton Lewis
2026-05-04 21:17 ` [PATCH v7 02/20] KVM: arm64: Reorganize PMU includes Colton Lewis
2026-05-04 21:17 ` [PATCH v7 03/20] KVM: arm64: Reorganize PMU functions Colton Lewis
2026-05-04 21:17 ` [PATCH v7 04/20] perf: arm_pmuv3: Generalize counter bitmasks Colton Lewis
2026-05-04 21:17 ` [PATCH v7 05/20] perf: arm_pmuv3: Check cntr_mask before using pmccntr Colton Lewis
2026-05-04 21:17 ` [PATCH v7 06/20] perf: arm_pmuv3: Add method to partition the PMU Colton Lewis
2026-05-04 21:18 ` [PATCH v7 07/20] KVM: arm64: Set up FGT for Partitioned PMU Colton Lewis
2026-05-04 21:18 ` Colton Lewis [this message]
2026-05-04 21:18 ` [PATCH v7 09/20] KVM: arm64: Set up MDCR_EL2 to handle a " Colton Lewis
2026-05-04 21:18 ` [PATCH v7 10/20] KVM: arm64: Context swap Partitioned PMU guest registers Colton Lewis
2026-05-04 21:18 ` [PATCH v7 11/20] KVM: arm64: Enforce PMU event filter at vcpu_load() Colton Lewis
2026-05-04 21:18 ` [PATCH v7 12/20] perf: Add perf_pmu_resched_update() Colton Lewis
2026-05-04 21:18 ` [PATCH v7 13/20] KVM: arm64: Apply dynamic guest counter reservations Colton Lewis
2026-05-04 21:18 ` [PATCH v7 14/20] KVM: arm64: Implement lazy PMU context swaps Colton Lewis
2026-05-04 21:18 ` [PATCH v7 15/20] perf: arm_pmuv3: Handle IRQs for Partitioned PMU guest counters Colton Lewis
2026-05-04 21:18 ` [PATCH v7 16/20] KVM: arm64: Detect overflows for the Partitioned PMU Colton Lewis
2026-05-04 21:18 ` [PATCH v7 17/20] KVM: arm64: Add vCPU device attr to partition the PMU Colton Lewis
2026-05-04 21:18 ` [PATCH v7 18/20] KVM: selftests: Add find_bit to KVM library Colton Lewis
2026-05-04 21:18 ` [PATCH v7 19/20] KVM: arm64: selftests: Add test case for Partitioned PMU Colton Lewis
2026-05-04 21:18 ` [PATCH v7 20/20] KVM: arm64: selftests: Relax testing for exceptions when partitioned Colton Lewis

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20260504211813.1804997-9-coltonlewis@google.com \
    --to=coltonlewis@google.com \
    --cc=alexandru.elisei@arm.com \
    --cc=catalin.marinas@arm.com \
    --cc=corbet@lwn.net \
    --cc=gankulkarni@os.amperecomputing.com \
    --cc=james.clark@linaro.org \
    --cc=joey.gouly@arm.com \
    --cc=kvm@vger.kernel.org \
    --cc=kvmarm@lists.linux.dev \
    --cc=linux-arm-kernel@lists.infradead.org \
    --cc=linux-doc@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-kselftest@vger.kernel.org \
    --cc=linux-perf-users@vger.kernel.org \
    --cc=linux@armlinux.org.uk \
    --cc=mark.rutland@arm.com \
    --cc=maz@kernel.org \
    --cc=mizhang@google.com \
    --cc=oliver.upton@linux.dev \
    --cc=pbonzini@redhat.com \
    --cc=shuah@kernel.org \
    --cc=suzuki.poulose@arm.com \
    --cc=will@kernel.org \
    --cc=yuzenghui@huawei.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox