From: Zenghui Yu <zenghui.yu@linux.dev>
To: kvmarm@lists.linux.dev, linux-arm-kernel@lists.infradead.org
Cc: maz@kernel.org, oupton@kernel.org, joey.gouly@arm.com,
suzuki.poulose@arm.com,
"Zenghui Yu (Huawei)" <zenghui.yu@linux.dev>
Subject: [PATCH] KVM: arm64: Remove @arch from __load_stage2()
Date: Wed, 18 Mar 2026 22:43:05 +0800 [thread overview]
Message-ID: <20260318144305.56831-1-zenghui.yu@linux.dev> (raw)
From: "Zenghui Yu (Huawei)" <zenghui.yu@linux.dev>
Since commit fe49fd940e22 ("KVM: arm64: Move VTCR_EL2 into struct s2_mmu"),
@arch is no longer required to obtain the per-kvm_s2_mmu vtcr and can be
removed from __load_stage2().
Signed-off-by: Zenghui Yu (Huawei) <zenghui.yu@linux.dev>
---
arch/arm64/include/asm/kvm_mmu.h | 3 +--
arch/arm64/kvm/at.c | 2 +-
arch/arm64/kvm/hyp/include/nvhe/mem_protect.h | 2 +-
arch/arm64/kvm/hyp/nvhe/mem_protect.c | 2 +-
arch/arm64/kvm/hyp/nvhe/switch.c | 2 +-
arch/arm64/kvm/hyp/nvhe/tlb.c | 4 ++--
arch/arm64/kvm/hyp/vhe/switch.c | 2 +-
arch/arm64/kvm/hyp/vhe/tlb.c | 4 ++--
8 files changed, 10 insertions(+), 11 deletions(-)
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
index d968aca0461a..c1e535e3d931 100644
--- a/arch/arm64/include/asm/kvm_mmu.h
+++ b/arch/arm64/include/asm/kvm_mmu.h
@@ -318,8 +318,7 @@ static __always_inline u64 kvm_get_vttbr(struct kvm_s2_mmu *mmu)
* Must be called from hyp code running at EL2 with an updated VTTBR
* and interrupts disabled.
*/
-static __always_inline void __load_stage2(struct kvm_s2_mmu *mmu,
- struct kvm_arch *arch)
+static __always_inline void __load_stage2(struct kvm_s2_mmu *mmu)
{
write_sysreg(mmu->vtcr, vtcr_el2);
write_sysreg(kvm_get_vttbr(mmu), vttbr_el2);
diff --git a/arch/arm64/kvm/at.c b/arch/arm64/kvm/at.c
index a024d9a770dc..3b61da0a24d8 100644
--- a/arch/arm64/kvm/at.c
+++ b/arch/arm64/kvm/at.c
@@ -1379,7 +1379,7 @@ static u64 __kvm_at_s1e01_fast(struct kvm_vcpu *vcpu, u32 op, u64 vaddr)
}
}
write_sysreg_el1(vcpu_read_sys_reg(vcpu, SCTLR_EL1), SYS_SCTLR);
- __load_stage2(mmu, mmu->arch);
+ __load_stage2(mmu);
skip_mmu_switch:
/* Temporarily switch back to guest context */
diff --git a/arch/arm64/kvm/hyp/include/nvhe/mem_protect.h b/arch/arm64/kvm/hyp/include/nvhe/mem_protect.h
index 5f9d56754e39..803961cdd39e 100644
--- a/arch/arm64/kvm/hyp/include/nvhe/mem_protect.h
+++ b/arch/arm64/kvm/hyp/include/nvhe/mem_protect.h
@@ -63,7 +63,7 @@ int refill_memcache(struct kvm_hyp_memcache *mc, unsigned long min_pages,
static __always_inline void __load_host_stage2(void)
{
if (static_branch_likely(&kvm_protected_mode_initialized))
- __load_stage2(&host_mmu.arch.mmu, &host_mmu.arch);
+ __load_stage2(&host_mmu.arch.mmu);
else
write_sysreg(0, vttbr_el2);
}
diff --git a/arch/arm64/kvm/hyp/nvhe/mem_protect.c b/arch/arm64/kvm/hyp/nvhe/mem_protect.c
index d815265bd374..87a169838481 100644
--- a/arch/arm64/kvm/hyp/nvhe/mem_protect.c
+++ b/arch/arm64/kvm/hyp/nvhe/mem_protect.c
@@ -336,7 +336,7 @@ int __pkvm_prot_finalize(void)
kvm_flush_dcache_to_poc(params, sizeof(*params));
write_sysreg_hcr(params->hcr_el2);
- __load_stage2(&host_mmu.arch.mmu, &host_mmu.arch);
+ __load_stage2(&host_mmu.arch.mmu);
/*
* Make sure to have an ISB before the TLB maintenance below but only
diff --git a/arch/arm64/kvm/hyp/nvhe/switch.c b/arch/arm64/kvm/hyp/nvhe/switch.c
index 779089e42681..3938997e7963 100644
--- a/arch/arm64/kvm/hyp/nvhe/switch.c
+++ b/arch/arm64/kvm/hyp/nvhe/switch.c
@@ -299,7 +299,7 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
__sysreg_restore_state_nvhe(guest_ctxt);
mmu = kern_hyp_va(vcpu->arch.hw_mmu);
- __load_stage2(mmu, kern_hyp_va(mmu->arch));
+ __load_stage2(mmu);
__activate_traps(vcpu);
__hyp_vgic_restore_state(vcpu);
diff --git a/arch/arm64/kvm/hyp/nvhe/tlb.c b/arch/arm64/kvm/hyp/nvhe/tlb.c
index 3dc1ce0d27fe..01226a5168d2 100644
--- a/arch/arm64/kvm/hyp/nvhe/tlb.c
+++ b/arch/arm64/kvm/hyp/nvhe/tlb.c
@@ -110,7 +110,7 @@ static void enter_vmid_context(struct kvm_s2_mmu *mmu,
if (vcpu)
__load_host_stage2();
else
- __load_stage2(mmu, kern_hyp_va(mmu->arch));
+ __load_stage2(mmu);
asm(ALTERNATIVE("isb", "nop", ARM64_WORKAROUND_SPECULATIVE_AT));
}
@@ -128,7 +128,7 @@ static void exit_vmid_context(struct tlb_inv_context *cxt)
return;
if (vcpu)
- __load_stage2(mmu, kern_hyp_va(mmu->arch));
+ __load_stage2(mmu);
else
__load_host_stage2();
diff --git a/arch/arm64/kvm/hyp/vhe/switch.c b/arch/arm64/kvm/hyp/vhe/switch.c
index 9db3f11a4754..bc8090d915bf 100644
--- a/arch/arm64/kvm/hyp/vhe/switch.c
+++ b/arch/arm64/kvm/hyp/vhe/switch.c
@@ -219,7 +219,7 @@ void kvm_vcpu_load_vhe(struct kvm_vcpu *vcpu)
__vcpu_load_switch_sysregs(vcpu);
__vcpu_load_activate_traps(vcpu);
- __load_stage2(vcpu->arch.hw_mmu, vcpu->arch.hw_mmu->arch);
+ __load_stage2(vcpu->arch.hw_mmu);
}
void kvm_vcpu_put_vhe(struct kvm_vcpu *vcpu)
diff --git a/arch/arm64/kvm/hyp/vhe/tlb.c b/arch/arm64/kvm/hyp/vhe/tlb.c
index 35855dadfb1b..539e44d09f17 100644
--- a/arch/arm64/kvm/hyp/vhe/tlb.c
+++ b/arch/arm64/kvm/hyp/vhe/tlb.c
@@ -60,7 +60,7 @@ static void enter_vmid_context(struct kvm_s2_mmu *mmu,
* place before clearing TGE. __load_stage2() already
* has an ISB in order to deal with this.
*/
- __load_stage2(mmu, mmu->arch);
+ __load_stage2(mmu);
val = read_sysreg(hcr_el2);
val &= ~HCR_TGE;
write_sysreg_hcr(val);
@@ -78,7 +78,7 @@ static void exit_vmid_context(struct tlb_inv_context *cxt)
/* ... and the stage-2 MMU context that we switched away from */
if (cxt->mmu)
- __load_stage2(cxt->mmu, cxt->mmu->arch);
+ __load_stage2(cxt->mmu);
if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
/* Restore the registers to what they were */
--
2.53.0
reply other threads:[~2026-03-18 14:43 UTC|newest]
Thread overview: [no followups] expand[flat|nested] mbox.gz Atom feed
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20260318144305.56831-1-zenghui.yu@linux.dev \
--to=zenghui.yu@linux.dev \
--cc=joey.gouly@arm.com \
--cc=kvmarm@lists.linux.dev \
--cc=linux-arm-kernel@lists.infradead.org \
--cc=maz@kernel.org \
--cc=oupton@kernel.org \
--cc=suzuki.poulose@arm.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox