Linux-ARM-Kernel Archive on lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH] KVM: arm64: Remove @arch from __load_stage2()
@ 2026-03-18 14:43 Zenghui Yu
  2026-05-15  5:48 ` Anshuman Khandual
  0 siblings, 1 reply; 2+ messages in thread
From: Zenghui Yu @ 2026-03-18 14:43 UTC (permalink / raw)
  To: kvmarm, linux-arm-kernel
  Cc: maz, oupton, joey.gouly, suzuki.poulose, Zenghui Yu (Huawei)

From: "Zenghui Yu (Huawei)" <zenghui.yu@linux.dev>

Since commit fe49fd940e22 ("KVM: arm64: Move VTCR_EL2 into struct s2_mmu"),
@arch is no longer required to obtain the per-kvm_s2_mmu vtcr and can be
removed from __load_stage2().

Signed-off-by: Zenghui Yu (Huawei) <zenghui.yu@linux.dev>
---
 arch/arm64/include/asm/kvm_mmu.h              | 3 +--
 arch/arm64/kvm/at.c                           | 2 +-
 arch/arm64/kvm/hyp/include/nvhe/mem_protect.h | 2 +-
 arch/arm64/kvm/hyp/nvhe/mem_protect.c         | 2 +-
 arch/arm64/kvm/hyp/nvhe/switch.c              | 2 +-
 arch/arm64/kvm/hyp/nvhe/tlb.c                 | 4 ++--
 arch/arm64/kvm/hyp/vhe/switch.c               | 2 +-
 arch/arm64/kvm/hyp/vhe/tlb.c                  | 4 ++--
 8 files changed, 10 insertions(+), 11 deletions(-)

diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
index d968aca0461a..c1e535e3d931 100644
--- a/arch/arm64/include/asm/kvm_mmu.h
+++ b/arch/arm64/include/asm/kvm_mmu.h
@@ -318,8 +318,7 @@ static __always_inline u64 kvm_get_vttbr(struct kvm_s2_mmu *mmu)
  * Must be called from hyp code running at EL2 with an updated VTTBR
  * and interrupts disabled.
  */
-static __always_inline void __load_stage2(struct kvm_s2_mmu *mmu,
-					  struct kvm_arch *arch)
+static __always_inline void __load_stage2(struct kvm_s2_mmu *mmu)
 {
 	write_sysreg(mmu->vtcr, vtcr_el2);
 	write_sysreg(kvm_get_vttbr(mmu), vttbr_el2);
diff --git a/arch/arm64/kvm/at.c b/arch/arm64/kvm/at.c
index a024d9a770dc..3b61da0a24d8 100644
--- a/arch/arm64/kvm/at.c
+++ b/arch/arm64/kvm/at.c
@@ -1379,7 +1379,7 @@ static u64 __kvm_at_s1e01_fast(struct kvm_vcpu *vcpu, u32 op, u64 vaddr)
 		}
 	}
 	write_sysreg_el1(vcpu_read_sys_reg(vcpu, SCTLR_EL1),	SYS_SCTLR);
-	__load_stage2(mmu, mmu->arch);
+	__load_stage2(mmu);
 
 skip_mmu_switch:
 	/* Temporarily switch back to guest context */
diff --git a/arch/arm64/kvm/hyp/include/nvhe/mem_protect.h b/arch/arm64/kvm/hyp/include/nvhe/mem_protect.h
index 5f9d56754e39..803961cdd39e 100644
--- a/arch/arm64/kvm/hyp/include/nvhe/mem_protect.h
+++ b/arch/arm64/kvm/hyp/include/nvhe/mem_protect.h
@@ -63,7 +63,7 @@ int refill_memcache(struct kvm_hyp_memcache *mc, unsigned long min_pages,
 static __always_inline void __load_host_stage2(void)
 {
 	if (static_branch_likely(&kvm_protected_mode_initialized))
-		__load_stage2(&host_mmu.arch.mmu, &host_mmu.arch);
+		__load_stage2(&host_mmu.arch.mmu);
 	else
 		write_sysreg(0, vttbr_el2);
 }
diff --git a/arch/arm64/kvm/hyp/nvhe/mem_protect.c b/arch/arm64/kvm/hyp/nvhe/mem_protect.c
index d815265bd374..87a169838481 100644
--- a/arch/arm64/kvm/hyp/nvhe/mem_protect.c
+++ b/arch/arm64/kvm/hyp/nvhe/mem_protect.c
@@ -336,7 +336,7 @@ int __pkvm_prot_finalize(void)
 	kvm_flush_dcache_to_poc(params, sizeof(*params));
 
 	write_sysreg_hcr(params->hcr_el2);
-	__load_stage2(&host_mmu.arch.mmu, &host_mmu.arch);
+	__load_stage2(&host_mmu.arch.mmu);
 
 	/*
 	 * Make sure to have an ISB before the TLB maintenance below but only
diff --git a/arch/arm64/kvm/hyp/nvhe/switch.c b/arch/arm64/kvm/hyp/nvhe/switch.c
index 779089e42681..3938997e7963 100644
--- a/arch/arm64/kvm/hyp/nvhe/switch.c
+++ b/arch/arm64/kvm/hyp/nvhe/switch.c
@@ -299,7 +299,7 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
 	__sysreg_restore_state_nvhe(guest_ctxt);
 
 	mmu = kern_hyp_va(vcpu->arch.hw_mmu);
-	__load_stage2(mmu, kern_hyp_va(mmu->arch));
+	__load_stage2(mmu);
 	__activate_traps(vcpu);
 
 	__hyp_vgic_restore_state(vcpu);
diff --git a/arch/arm64/kvm/hyp/nvhe/tlb.c b/arch/arm64/kvm/hyp/nvhe/tlb.c
index 3dc1ce0d27fe..01226a5168d2 100644
--- a/arch/arm64/kvm/hyp/nvhe/tlb.c
+++ b/arch/arm64/kvm/hyp/nvhe/tlb.c
@@ -110,7 +110,7 @@ static void enter_vmid_context(struct kvm_s2_mmu *mmu,
 	if (vcpu)
 		__load_host_stage2();
 	else
-		__load_stage2(mmu, kern_hyp_va(mmu->arch));
+		__load_stage2(mmu);
 
 	asm(ALTERNATIVE("isb", "nop", ARM64_WORKAROUND_SPECULATIVE_AT));
 }
@@ -128,7 +128,7 @@ static void exit_vmid_context(struct tlb_inv_context *cxt)
 		return;
 
 	if (vcpu)
-		__load_stage2(mmu, kern_hyp_va(mmu->arch));
+		__load_stage2(mmu);
 	else
 		__load_host_stage2();
 
diff --git a/arch/arm64/kvm/hyp/vhe/switch.c b/arch/arm64/kvm/hyp/vhe/switch.c
index 9db3f11a4754..bc8090d915bf 100644
--- a/arch/arm64/kvm/hyp/vhe/switch.c
+++ b/arch/arm64/kvm/hyp/vhe/switch.c
@@ -219,7 +219,7 @@ void kvm_vcpu_load_vhe(struct kvm_vcpu *vcpu)
 
 	__vcpu_load_switch_sysregs(vcpu);
 	__vcpu_load_activate_traps(vcpu);
-	__load_stage2(vcpu->arch.hw_mmu, vcpu->arch.hw_mmu->arch);
+	__load_stage2(vcpu->arch.hw_mmu);
 }
 
 void kvm_vcpu_put_vhe(struct kvm_vcpu *vcpu)
diff --git a/arch/arm64/kvm/hyp/vhe/tlb.c b/arch/arm64/kvm/hyp/vhe/tlb.c
index 35855dadfb1b..539e44d09f17 100644
--- a/arch/arm64/kvm/hyp/vhe/tlb.c
+++ b/arch/arm64/kvm/hyp/vhe/tlb.c
@@ -60,7 +60,7 @@ static void enter_vmid_context(struct kvm_s2_mmu *mmu,
 	 * place before clearing TGE. __load_stage2() already
 	 * has an ISB in order to deal with this.
 	 */
-	__load_stage2(mmu, mmu->arch);
+	__load_stage2(mmu);
 	val = read_sysreg(hcr_el2);
 	val &= ~HCR_TGE;
 	write_sysreg_hcr(val);
@@ -78,7 +78,7 @@ static void exit_vmid_context(struct tlb_inv_context *cxt)
 
 	/* ... and the stage-2 MMU context that we switched away from */
 	if (cxt->mmu)
-		__load_stage2(cxt->mmu, cxt->mmu->arch);
+		__load_stage2(cxt->mmu);
 
 	if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
 		/* Restore the registers to what they were */
-- 
2.53.0



^ permalink raw reply related	[flat|nested] 2+ messages in thread

* Re: [PATCH] KVM: arm64: Remove @arch from __load_stage2()
  2026-03-18 14:43 [PATCH] KVM: arm64: Remove @arch from __load_stage2() Zenghui Yu
@ 2026-05-15  5:48 ` Anshuman Khandual
  0 siblings, 0 replies; 2+ messages in thread
From: Anshuman Khandual @ 2026-05-15  5:48 UTC (permalink / raw)
  To: Zenghui Yu, kvmarm, linux-arm-kernel
  Cc: maz, oupton, joey.gouly, suzuki.poulose



On 18/03/26 8:13 PM, Zenghui Yu wrote:
> From: "Zenghui Yu (Huawei)" <zenghui.yu@linux.dev>
> 
> Since commit fe49fd940e22 ("KVM: arm64: Move VTCR_EL2 into struct s2_mmu"),
> @arch is no longer required to obtain the per-kvm_s2_mmu vtcr and can be
> removed from __load_stage2().
> 
> Signed-off-by: Zenghui Yu (Huawei) <zenghui.yu@linux.dev>

Reviewed-by: Anshuman Khandual <anshuman.khandual@arm.com>

> ---
>  arch/arm64/include/asm/kvm_mmu.h              | 3 +--
>  arch/arm64/kvm/at.c                           | 2 +-
>  arch/arm64/kvm/hyp/include/nvhe/mem_protect.h | 2 +-
>  arch/arm64/kvm/hyp/nvhe/mem_protect.c         | 2 +-
>  arch/arm64/kvm/hyp/nvhe/switch.c              | 2 +-
>  arch/arm64/kvm/hyp/nvhe/tlb.c                 | 4 ++--
>  arch/arm64/kvm/hyp/vhe/switch.c               | 2 +-
>  arch/arm64/kvm/hyp/vhe/tlb.c                  | 4 ++--
>  8 files changed, 10 insertions(+), 11 deletions(-)
> 
> diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
> index d968aca0461a..c1e535e3d931 100644
> --- a/arch/arm64/include/asm/kvm_mmu.h
> +++ b/arch/arm64/include/asm/kvm_mmu.h
> @@ -318,8 +318,7 @@ static __always_inline u64 kvm_get_vttbr(struct kvm_s2_mmu *mmu)
>   * Must be called from hyp code running at EL2 with an updated VTTBR
>   * and interrupts disabled.
>   */
> -static __always_inline void __load_stage2(struct kvm_s2_mmu *mmu,
> -					  struct kvm_arch *arch)
> +static __always_inline void __load_stage2(struct kvm_s2_mmu *mmu)
>  {
>  	write_sysreg(mmu->vtcr, vtcr_el2);
>  	write_sysreg(kvm_get_vttbr(mmu), vttbr_el2);
> diff --git a/arch/arm64/kvm/at.c b/arch/arm64/kvm/at.c
> index a024d9a770dc..3b61da0a24d8 100644
> --- a/arch/arm64/kvm/at.c
> +++ b/arch/arm64/kvm/at.c
> @@ -1379,7 +1379,7 @@ static u64 __kvm_at_s1e01_fast(struct kvm_vcpu *vcpu, u32 op, u64 vaddr)
>  		}
>  	}
>  	write_sysreg_el1(vcpu_read_sys_reg(vcpu, SCTLR_EL1),	SYS_SCTLR);
> -	__load_stage2(mmu, mmu->arch);
> +	__load_stage2(mmu);
>  
>  skip_mmu_switch:
>  	/* Temporarily switch back to guest context */
> diff --git a/arch/arm64/kvm/hyp/include/nvhe/mem_protect.h b/arch/arm64/kvm/hyp/include/nvhe/mem_protect.h
> index 5f9d56754e39..803961cdd39e 100644
> --- a/arch/arm64/kvm/hyp/include/nvhe/mem_protect.h
> +++ b/arch/arm64/kvm/hyp/include/nvhe/mem_protect.h
> @@ -63,7 +63,7 @@ int refill_memcache(struct kvm_hyp_memcache *mc, unsigned long min_pages,
>  static __always_inline void __load_host_stage2(void)
>  {
>  	if (static_branch_likely(&kvm_protected_mode_initialized))
> -		__load_stage2(&host_mmu.arch.mmu, &host_mmu.arch);
> +		__load_stage2(&host_mmu.arch.mmu);
>  	else
>  		write_sysreg(0, vttbr_el2);
>  }
> diff --git a/arch/arm64/kvm/hyp/nvhe/mem_protect.c b/arch/arm64/kvm/hyp/nvhe/mem_protect.c
> index d815265bd374..87a169838481 100644
> --- a/arch/arm64/kvm/hyp/nvhe/mem_protect.c
> +++ b/arch/arm64/kvm/hyp/nvhe/mem_protect.c
> @@ -336,7 +336,7 @@ int __pkvm_prot_finalize(void)
>  	kvm_flush_dcache_to_poc(params, sizeof(*params));
>  
>  	write_sysreg_hcr(params->hcr_el2);
> -	__load_stage2(&host_mmu.arch.mmu, &host_mmu.arch);
> +	__load_stage2(&host_mmu.arch.mmu);
>  
>  	/*
>  	 * Make sure to have an ISB before the TLB maintenance below but only
> diff --git a/arch/arm64/kvm/hyp/nvhe/switch.c b/arch/arm64/kvm/hyp/nvhe/switch.c
> index 779089e42681..3938997e7963 100644
> --- a/arch/arm64/kvm/hyp/nvhe/switch.c
> +++ b/arch/arm64/kvm/hyp/nvhe/switch.c
> @@ -299,7 +299,7 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
>  	__sysreg_restore_state_nvhe(guest_ctxt);
>  
>  	mmu = kern_hyp_va(vcpu->arch.hw_mmu);
> -	__load_stage2(mmu, kern_hyp_va(mmu->arch));
> +	__load_stage2(mmu);
>  	__activate_traps(vcpu);
>  
>  	__hyp_vgic_restore_state(vcpu);
> diff --git a/arch/arm64/kvm/hyp/nvhe/tlb.c b/arch/arm64/kvm/hyp/nvhe/tlb.c
> index 3dc1ce0d27fe..01226a5168d2 100644
> --- a/arch/arm64/kvm/hyp/nvhe/tlb.c
> +++ b/arch/arm64/kvm/hyp/nvhe/tlb.c
> @@ -110,7 +110,7 @@ static void enter_vmid_context(struct kvm_s2_mmu *mmu,
>  	if (vcpu)
>  		__load_host_stage2();
>  	else
> -		__load_stage2(mmu, kern_hyp_va(mmu->arch));
> +		__load_stage2(mmu);
>  
>  	asm(ALTERNATIVE("isb", "nop", ARM64_WORKAROUND_SPECULATIVE_AT));
>  }
> @@ -128,7 +128,7 @@ static void exit_vmid_context(struct tlb_inv_context *cxt)
>  		return;
>  
>  	if (vcpu)
> -		__load_stage2(mmu, kern_hyp_va(mmu->arch));
> +		__load_stage2(mmu);
>  	else
>  		__load_host_stage2();
>  
> diff --git a/arch/arm64/kvm/hyp/vhe/switch.c b/arch/arm64/kvm/hyp/vhe/switch.c
> index 9db3f11a4754..bc8090d915bf 100644
> --- a/arch/arm64/kvm/hyp/vhe/switch.c
> +++ b/arch/arm64/kvm/hyp/vhe/switch.c
> @@ -219,7 +219,7 @@ void kvm_vcpu_load_vhe(struct kvm_vcpu *vcpu)
>  
>  	__vcpu_load_switch_sysregs(vcpu);
>  	__vcpu_load_activate_traps(vcpu);
> -	__load_stage2(vcpu->arch.hw_mmu, vcpu->arch.hw_mmu->arch);
> +	__load_stage2(vcpu->arch.hw_mmu);
>  }
>  
>  void kvm_vcpu_put_vhe(struct kvm_vcpu *vcpu)
> diff --git a/arch/arm64/kvm/hyp/vhe/tlb.c b/arch/arm64/kvm/hyp/vhe/tlb.c
> index 35855dadfb1b..539e44d09f17 100644
> --- a/arch/arm64/kvm/hyp/vhe/tlb.c
> +++ b/arch/arm64/kvm/hyp/vhe/tlb.c
> @@ -60,7 +60,7 @@ static void enter_vmid_context(struct kvm_s2_mmu *mmu,
>  	 * place before clearing TGE. __load_stage2() already
>  	 * has an ISB in order to deal with this.
>  	 */
> -	__load_stage2(mmu, mmu->arch);
> +	__load_stage2(mmu);
>  	val = read_sysreg(hcr_el2);
>  	val &= ~HCR_TGE;
>  	write_sysreg_hcr(val);
> @@ -78,7 +78,7 @@ static void exit_vmid_context(struct tlb_inv_context *cxt)
>  
>  	/* ... and the stage-2 MMU context that we switched away from */
>  	if (cxt->mmu)
> -		__load_stage2(cxt->mmu, cxt->mmu->arch);
> +		__load_stage2(cxt->mmu);
>  
>  	if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
>  		/* Restore the registers to what they were */



^ permalink raw reply	[flat|nested] 2+ messages in thread

end of thread, other threads:[~2026-05-15  5:49 UTC | newest]

Thread overview: 2+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2026-03-18 14:43 [PATCH] KVM: arm64: Remove @arch from __load_stage2() Zenghui Yu
2026-05-15  5:48 ` Anshuman Khandual

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox