From: Yosry Ahmed <yosry@kernel.org>
To: Sean Christopherson <seanjc@google.com>
Cc: Paolo Bonzini <pbonzini@redhat.com>,
Jim Mattson <jmattson@google.com>,
kvm@vger.kernel.org, linux-kernel@vger.kernel.org,
Yosry Ahmed <yosry@kernel.org>
Subject: [PATCH v4 4/6] KVM: x86/pmu: Re-evaluate Host-Only/Guest-Only on nested SVM transitions
Date: Thu, 26 Mar 2026 03:11:48 +0000 [thread overview]
Message-ID: <20260326031150.3774017-5-yosry@kernel.org> (raw)
In-Reply-To: <20260326031150.3774017-1-yosry@kernel.org>
Reprogram all counters on nested transitions for the mediated PMU, to
re-evaluate Host-Only and Guest-Only bits and enable/disable the PMU
counters accordingly. For example, if Host-Only is set and Guest-Only is
cleared, a counter should be disabled when entering guest mode and
enabled when exiting guest mode.
Having one of Host-Only and Guest-Only set is only effective when
EFER.SVME is set, so also trigger counter reprogramming when EFER.SVME
is toggled.
Track counters with one of Host-Only and Guest-Only set as counters
requiring reprogramming on nested transitions in a bitmap. Use the
bitmap to only request KVM_PMU_REQ if some counters need reprogramming,
and only reprogram the counters that actually need it.
Track such counters even if EFER.SVME is cleared, such that if/when
EFER.SVME is set, KVM can reprogram those counters and enable/disable
them appropriately. Otherwise, toggling EFER.SVME would need to
reprogram all counters and use a different code path than
kvm_pmu_handle_nested_transition().
Signed-off-by: Yosry Ahmed <yosry@kernel.org>
---
arch/x86/include/asm/kvm_host.h | 6 ++++++
arch/x86/kvm/pmu.c | 1 +
arch/x86/kvm/pmu.h | 13 +++++++++++++
arch/x86/kvm/svm/pmu.c | 13 ++++++++++++-
arch/x86/kvm/svm/svm.c | 1 +
arch/x86/kvm/x86.h | 5 +++++
6 files changed, 38 insertions(+), 1 deletion(-)
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index d3bdc98281339..b2f8710838372 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -594,6 +594,12 @@ struct kvm_pmu {
DECLARE_BITMAP(pmc_counting_instructions, X86_PMC_IDX_MAX);
DECLARE_BITMAP(pmc_counting_branches, X86_PMC_IDX_MAX);
+ /*
+ * Whether or not PMU counters need to be reprogrammed on transitions
+ * between L1 and L2 (or when nesting enablement is toggled).
+ */
+ DECLARE_BITMAP(pmc_needs_nested_reprogram, X86_PMC_IDX_MAX);
+
u64 ds_area;
u64 pebs_enable;
u64 pebs_enable_rsvd;
diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c
index e35d598f809a2..a7b38c104d067 100644
--- a/arch/x86/kvm/pmu.c
+++ b/arch/x86/kvm/pmu.c
@@ -932,6 +932,7 @@ static void kvm_pmu_reset(struct kvm_vcpu *vcpu)
pmu->need_cleanup = false;
bitmap_zero(pmu->reprogram_pmi, X86_PMC_IDX_MAX);
+ bitmap_zero(pmu->pmc_needs_nested_reprogram, X86_PMC_IDX_MAX);
kvm_for_each_pmc(pmu, pmc, i, pmu->all_valid_pmc_idx) {
pmc_stop_counter(pmc);
diff --git a/arch/x86/kvm/pmu.h b/arch/x86/kvm/pmu.h
index bdbe0456049d0..fb73806d3bfa0 100644
--- a/arch/x86/kvm/pmu.h
+++ b/arch/x86/kvm/pmu.h
@@ -248,6 +248,19 @@ static inline bool kvm_pmu_is_fastpath_emulation_allowed(struct kvm_vcpu *vcpu)
X86_PMC_IDX_MAX);
}
+static inline void kvm_pmu_handle_nested_transition(struct kvm_vcpu *vcpu)
+{
+ struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
+
+ if (bitmap_empty(pmu->pmc_needs_nested_reprogram, X86_PMC_IDX_MAX))
+ return;
+
+ BUILD_BUG_ON(sizeof(pmu->pmc_needs_nested_reprogram) != sizeof(atomic64_t));
+ atomic64_or(*(s64 *)pmu->pmc_needs_nested_reprogram,
+ &vcpu_to_pmu(vcpu)->__reprogram_pmi);
+ kvm_make_request(KVM_REQ_PMU, vcpu);
+}
+
void kvm_pmu_deliver_pmi(struct kvm_vcpu *vcpu);
void kvm_pmu_handle_event(struct kvm_vcpu *vcpu);
int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned pmc, u64 *data);
diff --git a/arch/x86/kvm/svm/pmu.c b/arch/x86/kvm/svm/pmu.c
index 60931dfd624b2..cc1eabb0ad15f 100644
--- a/arch/x86/kvm/svm/pmu.c
+++ b/arch/x86/kvm/svm/pmu.c
@@ -262,17 +262,28 @@ static void amd_mediated_pmu_put(struct kvm_vcpu *vcpu)
static void amd_mediated_pmu_handle_host_guest_bits(struct kvm_pmc *pmc)
{
+ struct kvm_pmu *pmu = pmc_to_pmu(pmc);
struct kvm_vcpu *vcpu = pmc->vcpu;
u64 host_guest_bits;
+ __clear_bit(pmc->idx, pmu->pmc_needs_nested_reprogram);
+
if (!(pmc->eventsel & ARCH_PERFMON_EVENTSEL_ENABLE))
return;
- /* Count all events if both bits are cleared or both bits are set */
+ /*
+ * If both bits are cleared or both bits are set, count all events.
+ * Otherwise, the counter enablement should be re-evaluated on every
+ * nested transition. Track which counters need to be re-evaluated even
+ * if EFER.SVME == 0, such that the counters are correctly reprogrammed
+ * on nested transitions after EFER.SVME is set.
+ */
host_guest_bits = pmc->eventsel & AMD64_EVENTSEL_HOST_GUEST_MASK;
if (hweight64(host_guest_bits) != 1)
return;
+ __set_bit(pmc->idx, pmu->pmc_needs_nested_reprogram);
+
/* Host-Only and Guest-Only are ignored if EFER.SVME == 0 */
if (!(vcpu->arch.efer & EFER_SVME))
return;
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index d2ca226871c2f..1ac00d2cba0ab 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -261,6 +261,7 @@ int svm_set_efer(struct kvm_vcpu *vcpu, u64 efer)
set_exception_intercept(svm, GP_VECTOR);
}
+ kvm_pmu_handle_nested_transition(vcpu);
kvm_make_request(KVM_REQ_RECALC_INTERCEPTS, vcpu);
}
diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
index f1c29ac306917..966e4138308f6 100644
--- a/arch/x86/kvm/x86.h
+++ b/arch/x86/kvm/x86.h
@@ -9,6 +9,7 @@
#include "kvm_cache_regs.h"
#include "kvm_emulate.h"
#include "cpuid.h"
+#include "pmu.h"
#define KVM_MAX_MCE_BANKS 32
@@ -152,6 +153,8 @@ static inline void enter_guest_mode(struct kvm_vcpu *vcpu)
{
vcpu->arch.hflags |= HF_GUEST_MASK;
vcpu->stat.guest_mode = 1;
+
+ kvm_pmu_handle_nested_transition(vcpu);
}
static inline void leave_guest_mode(struct kvm_vcpu *vcpu)
@@ -164,6 +167,8 @@ static inline void leave_guest_mode(struct kvm_vcpu *vcpu)
}
vcpu->stat.guest_mode = 0;
+
+ kvm_pmu_handle_nested_transition(vcpu);
}
static inline bool is_guest_mode(struct kvm_vcpu *vcpu)
--
2.53.0.1018.g2bb0e51243-goog
next prev parent reply other threads:[~2026-03-26 3:12 UTC|newest]
Thread overview: 14+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-03-26 3:11 [PATCH v4 0/6] KVM: x86/pmu: Add support for AMD Host-Only/Guest-Only bits Yosry Ahmed
2026-03-26 3:11 ` [PATCH v4 1/6] KVM: x86: Move enable_pmu/enable_mediated_pmu to pmu.h and pmu.c Yosry Ahmed
2026-03-26 3:11 ` [PATCH v4 2/6] KVM: x86: Move guest_mode helpers to x86.h Yosry Ahmed
2026-03-26 22:48 ` kernel test robot
2026-03-26 23:18 ` Yosry Ahmed
2026-03-27 3:15 ` kernel test robot
2026-03-26 3:11 ` [PATCH v4 3/6] KVM: x86/pmu: Disable counters based on Host-Only/Guest-Only bits in SVM Yosry Ahmed
2026-04-07 1:30 ` Sean Christopherson
2026-03-26 3:11 ` Yosry Ahmed [this message]
2026-04-07 1:35 ` [PATCH v4 4/6] KVM: x86/pmu: Re-evaluate Host-Only/Guest-Only on nested SVM transitions Sean Christopherson
2026-03-26 3:11 ` [PATCH v4 5/6] KVM: x86/pmu: Allow Host-Only/Guest-Only bits with nSVM and mediated PMU Yosry Ahmed
2026-03-26 3:11 ` [PATCH v4 6/6] KVM: selftests: Add svm_pmu_host_guest_test for Host-Only/Guest-Only bits Yosry Ahmed
2026-04-07 1:39 ` Sean Christopherson
2026-04-07 3:23 ` Jim Mattson
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20260326031150.3774017-5-yosry@kernel.org \
--to=yosry@kernel.org \
--cc=jmattson@google.com \
--cc=kvm@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=pbonzini@redhat.com \
--cc=seanjc@google.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox