From: Marc Zyngier <maz@kernel.org>
To: kvmarm@lists.linux.dev, linux-arm-kernel@lists.infradead.org,
kvm@vger.kernel.org
Cc: Joey Gouly <joey.gouly@arm.com>,
Suzuki K Poulose <suzuki.poulose@arm.com>,
Oliver Upton <oliver.upton@linux.dev>,
Zenghui Yu <yuzenghui@huawei.com>,
Christoffer Dall <christoffer.dall@arm.com>,
Volodymyr Babchuk <Volodymyr_Babchuk@epam.com>
Subject: [PATCH 19/33] KVM: arm64: Eagerly save VMCR on exit
Date: Mon, 3 Nov 2025 16:55:03 +0000 [thread overview]
Message-ID: <20251103165517.2960148-20-maz@kernel.org> (raw)
In-Reply-To: <20251103165517.2960148-1-maz@kernel.org>
We currently save/restore the VMCR register in a pretty lazy way
(on load/put, consistently with what we do with the APRs).
However, we are going to need the group-enable bits that are backed
by VMCR on each entry (so that we can avoid injecting interrupts for
disabled groups).
Move the synchronisation from put to sync, which results in some minor
churn in the nVHE hypercalls to simplify things.
Signed-off-by: Marc Zyngier <maz@kernel.org>
---
arch/arm64/include/asm/kvm_asm.h | 2 +-
arch/arm64/include/asm/kvm_hyp.h | 2 +-
arch/arm64/kvm/arm.c | 3 +--
arch/arm64/kvm/hyp/nvhe/hyp-main.c | 7 ++++---
arch/arm64/kvm/hyp/vgic-v3-sr.c | 15 +++------------
arch/arm64/kvm/vgic/vgic-v2.c | 2 +-
arch/arm64/kvm/vgic/vgic-v3-nested.c | 2 +-
arch/arm64/kvm/vgic/vgic-v3.c | 2 +-
8 files changed, 13 insertions(+), 22 deletions(-)
diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h
index 9da54d4ee49ee..f8adbd535b4ae 100644
--- a/arch/arm64/include/asm/kvm_asm.h
+++ b/arch/arm64/include/asm/kvm_asm.h
@@ -79,7 +79,7 @@ enum __kvm_host_smccc_func {
__KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_vmid_range,
__KVM_HOST_SMCCC_FUNC___kvm_flush_cpu_context,
__KVM_HOST_SMCCC_FUNC___kvm_timer_set_cntvoff,
- __KVM_HOST_SMCCC_FUNC___vgic_v3_save_vmcr_aprs,
+ __KVM_HOST_SMCCC_FUNC___vgic_v3_save_aprs,
__KVM_HOST_SMCCC_FUNC___vgic_v3_restore_vmcr_aprs,
__KVM_HOST_SMCCC_FUNC___pkvm_reserve_vm,
__KVM_HOST_SMCCC_FUNC___pkvm_unreserve_vm,
diff --git a/arch/arm64/include/asm/kvm_hyp.h b/arch/arm64/include/asm/kvm_hyp.h
index e6be1f5d0967f..dbf16a9f67728 100644
--- a/arch/arm64/include/asm/kvm_hyp.h
+++ b/arch/arm64/include/asm/kvm_hyp.h
@@ -82,7 +82,7 @@ void __vgic_v3_save_state(struct vgic_v3_cpu_if *cpu_if);
void __vgic_v3_restore_state(struct vgic_v3_cpu_if *cpu_if);
void __vgic_v3_activate_traps(struct vgic_v3_cpu_if *cpu_if);
void __vgic_v3_deactivate_traps(struct vgic_v3_cpu_if *cpu_if);
-void __vgic_v3_save_vmcr_aprs(struct vgic_v3_cpu_if *cpu_if);
+void __vgic_v3_save_aprs(struct vgic_v3_cpu_if *cpu_if);
void __vgic_v3_restore_vmcr_aprs(struct vgic_v3_cpu_if *cpu_if);
int __vgic_v3_perform_cpuif_access(struct kvm_vcpu *vcpu);
diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
index 870953b4a8a74..733195ef183e1 100644
--- a/arch/arm64/kvm/arm.c
+++ b/arch/arm64/kvm/arm.c
@@ -659,8 +659,7 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
{
if (is_protected_kvm_enabled()) {
- kvm_call_hyp(__vgic_v3_save_vmcr_aprs,
- &vcpu->arch.vgic_cpu.vgic_v3);
+ kvm_call_hyp(__vgic_v3_save_aprs, &vcpu->arch.vgic_cpu.vgic_v3);
kvm_call_hyp_nvhe(__pkvm_vcpu_put);
}
diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-main.c b/arch/arm64/kvm/hyp/nvhe/hyp-main.c
index 29430c031095a..a7c689152f686 100644
--- a/arch/arm64/kvm/hyp/nvhe/hyp-main.c
+++ b/arch/arm64/kvm/hyp/nvhe/hyp-main.c
@@ -157,6 +157,7 @@ static void sync_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu)
host_vcpu->arch.iflags = hyp_vcpu->vcpu.arch.iflags;
host_cpu_if->vgic_hcr = hyp_cpu_if->vgic_hcr;
+ host_cpu_if->vgic_vmcr = hyp_cpu_if->vgic_vmcr;
for (i = 0; i < hyp_cpu_if->used_lrs; ++i)
host_cpu_if->vgic_lr[i] = hyp_cpu_if->vgic_lr[i];
}
@@ -464,11 +465,11 @@ static void handle___vgic_v3_init_lrs(struct kvm_cpu_context *host_ctxt)
__vgic_v3_init_lrs();
}
-static void handle___vgic_v3_save_vmcr_aprs(struct kvm_cpu_context *host_ctxt)
+static void handle___vgic_v3_save_aprs(struct kvm_cpu_context *host_ctxt)
{
DECLARE_REG(struct vgic_v3_cpu_if *, cpu_if, host_ctxt, 1);
- __vgic_v3_save_vmcr_aprs(kern_hyp_va(cpu_if));
+ __vgic_v3_save_aprs(kern_hyp_va(cpu_if));
}
static void handle___vgic_v3_restore_vmcr_aprs(struct kvm_cpu_context *host_ctxt)
@@ -616,7 +617,7 @@ static const hcall_t host_hcall[] = {
HANDLE_FUNC(__kvm_tlb_flush_vmid_range),
HANDLE_FUNC(__kvm_flush_cpu_context),
HANDLE_FUNC(__kvm_timer_set_cntvoff),
- HANDLE_FUNC(__vgic_v3_save_vmcr_aprs),
+ HANDLE_FUNC(__vgic_v3_save_aprs),
HANDLE_FUNC(__vgic_v3_restore_vmcr_aprs),
HANDLE_FUNC(__pkvm_reserve_vm),
HANDLE_FUNC(__pkvm_unreserve_vm),
diff --git a/arch/arm64/kvm/hyp/vgic-v3-sr.c b/arch/arm64/kvm/hyp/vgic-v3-sr.c
index d001b26a21f16..1e5c1cf4b9144 100644
--- a/arch/arm64/kvm/hyp/vgic-v3-sr.c
+++ b/arch/arm64/kvm/hyp/vgic-v3-sr.c
@@ -235,6 +235,8 @@ void __vgic_v3_save_state(struct vgic_v3_cpu_if *cpu_if)
}
}
+ cpu_if->vgic_vmcr = read_gicreg(ICH_VMCR_EL2);
+
if (cpu_if->vgic_hcr & ICH_HCR_EL2_LRENPIE) {
u64 val = read_gicreg(ICH_HCR_EL2);
cpu_if->vgic_hcr &= ~ICH_HCR_EL2_EOIcount;
@@ -332,10 +334,6 @@ void __vgic_v3_deactivate_traps(struct vgic_v3_cpu_if *cpu_if)
{
u64 val;
- if (!cpu_if->vgic_sre) {
- cpu_if->vgic_vmcr = read_gicreg(ICH_VMCR_EL2);
- }
-
/* Only restore SRE if the host implements the GICv2 interface */
if (static_branch_unlikely(&vgic_v3_has_v2_compat)) {
val = read_gicreg(ICC_SRE_EL2);
@@ -357,7 +355,7 @@ void __vgic_v3_deactivate_traps(struct vgic_v3_cpu_if *cpu_if)
write_gicreg(0, ICH_HCR_EL2);
}
-static void __vgic_v3_save_aprs(struct vgic_v3_cpu_if *cpu_if)
+void __vgic_v3_save_aprs(struct vgic_v3_cpu_if *cpu_if)
{
u64 val;
u32 nr_pre_bits;
@@ -518,13 +516,6 @@ static void __vgic_v3_write_vmcr(u32 vmcr)
write_gicreg(vmcr, ICH_VMCR_EL2);
}
-void __vgic_v3_save_vmcr_aprs(struct vgic_v3_cpu_if *cpu_if)
-{
- __vgic_v3_save_aprs(cpu_if);
- if (cpu_if->vgic_sre)
- cpu_if->vgic_vmcr = __vgic_v3_read_vmcr();
-}
-
void __vgic_v3_restore_vmcr_aprs(struct vgic_v3_cpu_if *cpu_if)
{
__vgic_v3_compat_mode_enable();
diff --git a/arch/arm64/kvm/vgic/vgic-v2.c b/arch/arm64/kvm/vgic/vgic-v2.c
index 5a2165a8d22c0..07e93acafd04d 100644
--- a/arch/arm64/kvm/vgic/vgic-v2.c
+++ b/arch/arm64/kvm/vgic/vgic-v2.c
@@ -451,6 +451,7 @@ void vgic_v2_save_state(struct kvm_vcpu *vcpu)
if (!base)
return;
+ cpu_if->vgic_vmcr = readl_relaxed(kvm_vgic_global_state.vctrl_base + GICH_VMCR);
if (used_lrs)
save_lrs(vcpu, base);
@@ -495,6 +496,5 @@ void vgic_v2_put(struct kvm_vcpu *vcpu)
{
struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
- cpu_if->vgic_vmcr = readl_relaxed(kvm_vgic_global_state.vctrl_base + GICH_VMCR);
cpu_if->vgic_apr = readl_relaxed(kvm_vgic_global_state.vctrl_base + GICH_APR);
}
diff --git a/arch/arm64/kvm/vgic/vgic-v3-nested.c b/arch/arm64/kvm/vgic/vgic-v3-nested.c
index 387557e20a272..17bceef83269e 100644
--- a/arch/arm64/kvm/vgic/vgic-v3-nested.c
+++ b/arch/arm64/kvm/vgic/vgic-v3-nested.c
@@ -341,7 +341,7 @@ void vgic_v3_put_nested(struct kvm_vcpu *vcpu)
u64 val;
int i;
- __vgic_v3_save_vmcr_aprs(s_cpu_if);
+ __vgic_v3_save_aprs(s_cpu_if);
__vgic_v3_deactivate_traps(s_cpu_if);
__vgic_v3_save_state(s_cpu_if);
diff --git a/arch/arm64/kvm/vgic/vgic-v3.c b/arch/arm64/kvm/vgic/vgic-v3.c
index ad5c0dbae2dfe..15225cf353f91 100644
--- a/arch/arm64/kvm/vgic/vgic-v3.c
+++ b/arch/arm64/kvm/vgic/vgic-v3.c
@@ -815,7 +815,7 @@ void vgic_v3_put(struct kvm_vcpu *vcpu)
}
if (likely(!is_protected_kvm_enabled()))
- kvm_call_hyp(__vgic_v3_save_vmcr_aprs, cpu_if);
+ kvm_call_hyp(__vgic_v3_save_aprs, cpu_if);
WARN_ON(vgic_v4_put(vcpu));
if (has_vhe())
--
2.47.3
next prev parent reply other threads:[~2025-11-03 16:55 UTC|newest]
Thread overview: 40+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-11-03 16:54 [PATCH 00/33] KVM: arm64: Add LR overflow infrastructure Marc Zyngier
2025-11-03 16:54 ` [PATCH 01/33] irqchip/gic: Add missing GICH_HCR control bits Marc Zyngier
2025-11-03 16:54 ` [PATCH 02/33] irqchip/gic: Expose CPU interface VA to KVM Marc Zyngier
2025-11-03 16:54 ` [PATCH 03/33] irqchip/apple-aic: Spit out ICH_MIDR_EL2 value on spurious vGIC MI Marc Zyngier
2025-11-04 11:13 ` Zenghui Yu
2025-11-03 16:54 ` [PATCH 04/33] KVM: arm64: Turn vgic-v3 errata traps into a patched-in constant Marc Zyngier
2025-11-03 16:54 ` [PATCH 05/33] KVM: arm64: GICv3: Detect and work around the lack of ICV_DIR_EL1 trapping Marc Zyngier
2025-11-04 8:50 ` Yao Yuan
2025-11-04 9:04 ` Marc Zyngier
2025-11-04 9:40 ` Yao Yuan
2025-11-05 2:01 ` kernel test robot
2025-11-05 11:31 ` Marc Zyngier
2025-11-03 16:54 ` [PATCH 06/33] KVM: arm64: Repack struct vgic_irq fields Marc Zyngier
2025-11-03 16:54 ` [PATCH 07/33] KVM: arm64: Add tracking of vgic_irq being present in a LR Marc Zyngier
2025-11-03 16:54 ` [PATCH 08/33] KVM: arm64: Add LR overflow handling documentation Marc Zyngier
2025-11-03 16:54 ` [PATCH 09/33] KVM: arm64: GICv3: Drop LPI active state when folding LRs Marc Zyngier
2025-11-03 16:54 ` [PATCH 10/33] KVM: arm64: GICv3: Preserve EOIcount on exit Marc Zyngier
2025-11-03 16:54 ` [PATCH 11/33] KVM: arm64: GICv3: Decouple ICH_HCR_EL2 programming from LRs Marc Zyngier
2025-11-03 16:54 ` [PATCH 12/33] KVM: arm64: GICv3: Extract LR folding primitive Marc Zyngier
2025-11-03 16:54 ` [PATCH 13/33] KVM: arm64: GICv3: Extract LR computing primitive Marc Zyngier
2025-11-03 16:54 ` [PATCH 14/33] KVM: arm64: GICv2: Preserve EOIcount on exit Marc Zyngier
2025-11-03 16:54 ` [PATCH 15/33] KVM: arm64: GICv2: Decouple GICH_HCR programming from LRs being loaded Marc Zyngier
2025-11-03 16:55 ` [PATCH 16/33] KVM: arm64: GICv2: Extract LR folding primitive Marc Zyngier
2025-11-03 16:55 ` [PATCH 17/33] KVM: arm64: GICv2: Extract LR computing primitive Marc Zyngier
2025-11-03 16:55 ` [PATCH 18/33] KVM: arm64: Compute vgic state irrespective of the number of interrupts Marc Zyngier
2025-11-03 16:55 ` Marc Zyngier [this message]
2025-11-03 16:55 ` [PATCH 20/33] KVM: arm64: Revamp vgic maintenance interrupt configuration Marc Zyngier
2025-11-03 16:55 ` [PATCH 21/33] KVM: arm64: Make vgic_target_oracle() globally available Marc Zyngier
2025-11-03 16:55 ` [PATCH 22/33] KVM: arm64: Invert ap_list sorting to push active interrupts out Marc Zyngier
2025-11-03 16:55 ` [PATCH 23/33] KVM: arm64: Move undeliverable interrupts to the end of ap_list Marc Zyngier
2025-11-03 16:55 ` [PATCH 24/33] KVM: arm64: Use MI to detect groups being enabled/disabled Marc Zyngier
2025-11-03 16:55 ` [PATCH 25/33] KVM: arm64: Add AP-list overflow split/splice Marc Zyngier
2025-11-03 16:55 ` [PATCH 26/33] KVM: arm64: GICv3: Handle LR overflow when EOImode==0 Marc Zyngier
2025-11-03 16:55 ` [PATCH 27/33] KVM: arm64: GICv3: Handle deactivation via ICV_DIR_EL1 traps Marc Zyngier
2025-11-03 16:55 ` [PATCH 28/33] KVM: arm64: GICv3: Add GICv2 SGI handling to deactivation primitive Marc Zyngier
2025-11-03 16:55 ` [PATCH 29/33] KVM: arm64: GICv3: Set ICH_HCR_EL2.TDIR when interrupts overflow LR capacity Marc Zyngier
2025-11-03 16:55 ` [PATCH 30/33] KVM: arm64: GICv2: Handle LR overflow when EOImode==0 Marc Zyngier
2025-11-03 16:55 ` [PATCH 31/33] KVM: arm64: GICv2: Handle deactivation via GICV_DIR traps Marc Zyngier
2025-11-03 16:55 ` [PATCH 32/33] KVM: arm64: GICv2: Always trap GICV_DIR register Marc Zyngier
2025-11-03 16:55 ` [PATCH 33/33] KVM: arm64: GICv3: Add SPI tracking to handle asymmetric deactivation Marc Zyngier
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20251103165517.2960148-20-maz@kernel.org \
--to=maz@kernel.org \
--cc=Volodymyr_Babchuk@epam.com \
--cc=christoffer.dall@arm.com \
--cc=joey.gouly@arm.com \
--cc=kvm@vger.kernel.org \
--cc=kvmarm@lists.linux.dev \
--cc=linux-arm-kernel@lists.infradead.org \
--cc=oliver.upton@linux.dev \
--cc=suzuki.poulose@arm.com \
--cc=yuzenghui@huawei.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).