public inbox for linux-arm-kernel@lists.infradead.org
 help / color / mirror / Atom feed
* [PATCH 0/2] KVM: arm64: Handle unsupported guest translation granules
@ 2026-04-06 16:46 Wei-Lin Chang
  2026-04-06 16:46 ` [PATCH 1/2] KVM: arm64: Factor out TG0/1 decoding of VTCR and TCR Wei-Lin Chang
  2026-04-06 16:46 ` [PATCH 2/2] KVM: arm64: Fallback to a supported value for unsupported guest TGx Wei-Lin Chang
  0 siblings, 2 replies; 4+ messages in thread
From: Wei-Lin Chang @ 2026-04-06 16:46 UTC (permalink / raw)
  To: linux-arm-kernel, kvmarm, linux-kernel
  Cc: Marc Zyngier, Oliver Upton, Joey Gouly, Suzuki K Poulose,
	Zenghui Yu, Catalin Marinas, Will Deacon, Wei-Lin Chang

Hi,

This small series fixes the granule size selection for software stage-1
and stage-2 walks. Previously we treat the guest's TCR/VTCR.TGx as-is
and use the encoded granule size for the walks. However this is
incorrect if the granule sizes are not advertised in the guest's
ID_AA64MMFR0_EL1.TGRAN*. The architecture specifies that when an
unsupported size is programed in TGx, it must be treated as an
implemented size. Fix this by choosing an available one while
prioritizing PAGE_SIZE.

The first patch is a refactor to prepare for the fix, and the fix is
implemented in the second patch. I lightly tested for regressions by
booting up nested guests of each page size.

Thanks!

Wei-Lin Chang (2):
  KVM: arm64: Factor out TG0/1 decoding of VTCR and TCR
  KVM: arm64: Fallback to a supported value for unsupported guest TGx

 arch/arm64/kvm/at.c     | 121 +++++++++++++++++++++++++--------
 arch/arm64/kvm/nested.c | 145 ++++++++++++++++++++++++++++------------
 2 files changed, 194 insertions(+), 72 deletions(-)

-- 
2.43.0



^ permalink raw reply	[flat|nested] 4+ messages in thread

* [PATCH 1/2] KVM: arm64: Factor out TG0/1 decoding of VTCR and TCR
  2026-04-06 16:46 [PATCH 0/2] KVM: arm64: Handle unsupported guest translation granules Wei-Lin Chang
@ 2026-04-06 16:46 ` Wei-Lin Chang
  2026-04-07  7:17   ` Marc Zyngier
  2026-04-06 16:46 ` [PATCH 2/2] KVM: arm64: Fallback to a supported value for unsupported guest TGx Wei-Lin Chang
  1 sibling, 1 reply; 4+ messages in thread
From: Wei-Lin Chang @ 2026-04-06 16:46 UTC (permalink / raw)
  To: linux-arm-kernel, kvmarm, linux-kernel
  Cc: Marc Zyngier, Oliver Upton, Joey Gouly, Suzuki K Poulose,
	Zenghui Yu, Catalin Marinas, Will Deacon, Wei-Lin Chang

The current code decodes TCR.TG0/TG1 and VTCR.TG0 inline at several
places. Extract this logic into helpers so the granule size is derived
in one place. This enables us to alter the effective granule size in
the same place, which we will need in a later patch.

Signed-off-by: Wei-Lin Chang <weilin.chang@arm.com>
---
 arch/arm64/kvm/at.c     | 73 +++++++++++++++++++++++++----------------
 arch/arm64/kvm/nested.c | 70 ++++++++++++++++++++++++---------------
 2 files changed, 89 insertions(+), 54 deletions(-)

diff --git a/arch/arm64/kvm/at.c b/arch/arm64/kvm/at.c
index c5c5644b1878..ff8ba30e917b 100644
--- a/arch/arm64/kvm/at.c
+++ b/arch/arm64/kvm/at.c
@@ -135,14 +135,54 @@ static void compute_s1poe(struct kvm_vcpu *vcpu, struct s1_walk_info *wi)
 	wi->e0poe = (wi->regime != TR_EL2) && (val & TCR2_EL1_E0POE);
 }
 
+static unsigned int tg0_to_shift(u64 tg0)
+{
+	switch (tg0) {
+	case TCR_EL1_TG0_4K:
+		return 12;
+	case TCR_EL1_TG0_16K:
+		return 14;
+	case TCR_EL1_TG0_64K:
+	default:	/* IMPDEF: treat any other value as 64k */
+		return 16;
+	}
+}
+
+static unsigned int tg1_to_shift(u64 tg1)
+{
+	switch (tg1) {
+	case TCR_EL1_TG1_4K:
+		return 12;
+	case TCR_EL1_TG1_16K:
+		return 14;
+	case TCR_EL1_TG1_64K:
+	default:	/* IMPDEF: treat any other value as 64k */
+		return 16;
+	}
+}
+
+static u64 tcr_tg_shift(struct kvm *kvm, u64 tcr, bool upper_range)
+{
+	unsigned int shift;
+
+	/* Someone was silly enough to encode TG0/TG1 differently */
+	if (upper_range)
+		shift = tg1_to_shift(FIELD_GET(TCR_EL1_TG1_MASK, tcr));
+	else
+		shift = tg0_to_shift(FIELD_GET(TCR_EL1_TG0_MASK, tcr));
+
+	return shift;
+}
+
 static int setup_s1_walk(struct kvm_vcpu *vcpu, struct s1_walk_info *wi,
 			 struct s1_walk_result *wr, u64 va)
 {
-	u64 hcr, sctlr, tcr, tg, ps, ia_bits, ttbr;
+	u64 hcr, sctlr, tcr, ps, ia_bits, ttbr;
 	unsigned int stride, x;
-	bool va55, tbi, lva;
+	bool va55, tbi, lva, upper_range;
 
 	va55 = va & BIT(55);
+	upper_range = va55 && wi->regime != TR_EL2;
 
 	if (vcpu_has_nv(vcpu)) {
 		hcr = __vcpu_sys_reg(vcpu, HCR_EL2);
@@ -173,35 +213,12 @@ static int setup_s1_walk(struct kvm_vcpu *vcpu, struct s1_walk_info *wi,
 		BUG();
 	}
 
-	/* Someone was silly enough to encode TG0/TG1 differently */
-	if (va55 && wi->regime != TR_EL2) {
+	if (upper_range)
 		wi->txsz = FIELD_GET(TCR_T1SZ_MASK, tcr);
-		tg = FIELD_GET(TCR_TG1_MASK, tcr);
-
-		switch (tg << TCR_TG1_SHIFT) {
-		case TCR_TG1_4K:
-			wi->pgshift = 12;	 break;
-		case TCR_TG1_16K:
-			wi->pgshift = 14;	 break;
-		case TCR_TG1_64K:
-		default:	    /* IMPDEF: treat any other value as 64k */
-			wi->pgshift = 16;	 break;
-		}
-	} else {
+	else
 		wi->txsz = FIELD_GET(TCR_T0SZ_MASK, tcr);
-		tg = FIELD_GET(TCR_TG0_MASK, tcr);
-
-		switch (tg << TCR_TG0_SHIFT) {
-		case TCR_TG0_4K:
-			wi->pgshift = 12;	 break;
-		case TCR_TG0_16K:
-			wi->pgshift = 14;	 break;
-		case TCR_TG0_64K:
-		default:	    /* IMPDEF: treat any other value as 64k */
-			wi->pgshift = 16;	 break;
-		}
-	}
 
+	wi->pgshift = tcr_tg_shift(vcpu->kvm, tcr, upper_range);
 	wi->pa52bit = has_52bit_pa(vcpu, wi, tcr);
 
 	ia_bits = get_ia_size(wi);
diff --git a/arch/arm64/kvm/nested.c b/arch/arm64/kvm/nested.c
index 883b6c1008fb..2bfab3007cb3 100644
--- a/arch/arm64/kvm/nested.c
+++ b/arch/arm64/kvm/nested.c
@@ -378,20 +378,36 @@ static int walk_nested_s2_pgd(struct kvm_vcpu *vcpu, phys_addr_t ipa,
 	return 0;
 }
 
-static void vtcr_to_walk_info(u64 vtcr, struct s2_walk_info *wi)
+static unsigned int tg0_to_shift(u64 tg0)
+{
+	switch (tg0) {
+	case VTCR_EL2_TG0_4K:
+		return 12;
+	case VTCR_EL2_TG0_16K:
+		return 14;
+	case VTCR_EL2_TG0_64K:
+	default:	/* IMPDEF: treat any other value as 64k */
+		return 16;
+	}
+}
+
+static u64 vtcr_tg0_shift(struct kvm *kvm, u64 vtcr)
+{
+	u64 tg0 = FIELD_GET(VTCR_EL2_TG0_MASK, vtcr);
+	unsigned int shift = tg0_to_shift(tg0);
+
+	return shift;
+}
+
+static size_t vtcr_tg0_size(struct kvm *kvm, u64 vtcr)
+{
+	return BIT(vtcr_tg0_shift(kvm, vtcr));
+}
+
+static void vtcr_to_walk_info(struct kvm *kvm, u64 vtcr, struct s2_walk_info *wi)
 {
 	wi->t0sz = vtcr & TCR_EL2_T0SZ_MASK;
-
-	switch (FIELD_GET(VTCR_EL2_TG0_MASK, vtcr)) {
-	case VTCR_EL2_TG0_4K:
-		wi->pgshift = 12;	 break;
-	case VTCR_EL2_TG0_16K:
-		wi->pgshift = 14;	 break;
-	case VTCR_EL2_TG0_64K:
-	default:	    /* IMPDEF: treat any other value as 64k */
-		wi->pgshift = 16;	 break;
-	}
-
+	wi->pgshift = vtcr_tg0_shift(kvm, vtcr);
 	wi->sl = FIELD_GET(VTCR_EL2_SL0_MASK, vtcr);
 	/* Global limit for now, should eventually be per-VM */
 	wi->max_oa_bits = min(get_kvm_ipa_limit(),
@@ -414,7 +430,7 @@ int kvm_walk_nested_s2(struct kvm_vcpu *vcpu, phys_addr_t gipa,
 
 	wi.baddr = vcpu_read_sys_reg(vcpu, VTTBR_EL2);
 
-	vtcr_to_walk_info(vtcr, &wi);
+	vtcr_to_walk_info(vcpu->kvm, vtcr, &wi);
 
 	wi.be = vcpu_read_sys_reg(vcpu, SCTLR_EL2) & SCTLR_ELx_EE;
 
@@ -515,17 +531,19 @@ static u8 get_guest_mapping_ttl(struct kvm_s2_mmu *mmu, u64 addr)
 	u64 tmp, sz = 0, vtcr = mmu->tlb_vtcr;
 	kvm_pte_t pte;
 	u8 ttl, level;
+	struct kvm *kvm = kvm_s2_mmu_to_kvm(mmu);
+	size_t tg0_size = vtcr_tg0_size(kvm, vtcr);
 
-	lockdep_assert_held_write(&kvm_s2_mmu_to_kvm(mmu)->mmu_lock);
+	lockdep_assert_held_write(&kvm->mmu_lock);
 
-	switch (FIELD_GET(VTCR_EL2_TG0_MASK, vtcr)) {
-	case VTCR_EL2_TG0_4K:
+	switch (tg0_size) {
+	case SZ_4K:
 		ttl = (TLBI_TTL_TG_4K << 2);
 		break;
-	case VTCR_EL2_TG0_16K:
+	case SZ_16K:
 		ttl = (TLBI_TTL_TG_16K << 2);
 		break;
-	case VTCR_EL2_TG0_64K:
+	case SZ_64K:
 	default:	    /* IMPDEF: treat any other value as 64k */
 		ttl = (TLBI_TTL_TG_64K << 2);
 		break;
@@ -535,19 +553,19 @@ static u8 get_guest_mapping_ttl(struct kvm_s2_mmu *mmu, u64 addr)
 
 again:
 	/* Iteratively compute the block sizes for a particular granule size */
-	switch (FIELD_GET(VTCR_EL2_TG0_MASK, vtcr)) {
-	case VTCR_EL2_TG0_4K:
+	switch (tg0_size) {
+	case SZ_4K:
 		if	(sz < SZ_4K)	sz = SZ_4K;
 		else if (sz < SZ_2M)	sz = SZ_2M;
 		else if (sz < SZ_1G)	sz = SZ_1G;
 		else			sz = 0;
 		break;
-	case VTCR_EL2_TG0_16K:
+	case SZ_16K:
 		if	(sz < SZ_16K)	sz = SZ_16K;
 		else if (sz < SZ_32M)	sz = SZ_32M;
 		else			sz = 0;
 		break;
-	case VTCR_EL2_TG0_64K:
+	case SZ_64K:
 	default:	    /* IMPDEF: treat any other value as 64k */
 		if	(sz < SZ_64K)	sz = SZ_64K;
 		else if (sz < SZ_512M)	sz = SZ_512M;
@@ -598,14 +616,14 @@ unsigned long compute_tlb_inval_range(struct kvm_s2_mmu *mmu, u64 val)
 
 	if (!max_size) {
 		/* Compute the maximum extent of the invalidation */
-		switch (FIELD_GET(VTCR_EL2_TG0_MASK, mmu->tlb_vtcr)) {
-		case VTCR_EL2_TG0_4K:
+		switch (vtcr_tg0_size(kvm, mmu->tlb_vtcr)) {
+		case SZ_4K:
 			max_size = SZ_1G;
 			break;
-		case VTCR_EL2_TG0_16K:
+		case SZ_16K:
 			max_size = SZ_32M;
 			break;
-		case VTCR_EL2_TG0_64K:
+		case SZ_64K:
 		default:    /* IMPDEF: treat any other value as 64k */
 			/*
 			 * No, we do not support 52bit IPA in nested yet. Once
-- 
2.43.0



^ permalink raw reply related	[flat|nested] 4+ messages in thread

* [PATCH 2/2] KVM: arm64: Fallback to a supported value for unsupported guest TGx
  2026-04-06 16:46 [PATCH 0/2] KVM: arm64: Handle unsupported guest translation granules Wei-Lin Chang
  2026-04-06 16:46 ` [PATCH 1/2] KVM: arm64: Factor out TG0/1 decoding of VTCR and TCR Wei-Lin Chang
@ 2026-04-06 16:46 ` Wei-Lin Chang
  1 sibling, 0 replies; 4+ messages in thread
From: Wei-Lin Chang @ 2026-04-06 16:46 UTC (permalink / raw)
  To: linux-arm-kernel, kvmarm, linux-kernel
  Cc: Marc Zyngier, Oliver Upton, Joey Gouly, Suzuki K Poulose,
	Zenghui Yu, Catalin Marinas, Will Deacon, Wei-Lin Chang

When KVM derives the translation granule for emulated stage-1 and
stage-2 walks, it decodes TCR/VTCR.TGx and treats the granule as-is.
This is wrong when the guest programs a granule size that is not
advertised in the guest's ID_AA64MMFR0_EL1.TGRAN* fields.
Architecturally, such a value must be treated as an implemented granule
size. Choose an available one while prioritizing PAGE_SIZE.

Signed-off-by: Wei-Lin Chang <weilin.chang@arm.com>
---
 arch/arm64/kvm/at.c     | 48 ++++++++++++++++++++++++++
 arch/arm64/kvm/nested.c | 75 +++++++++++++++++++++++++++++++----------
 2 files changed, 105 insertions(+), 18 deletions(-)

diff --git a/arch/arm64/kvm/at.c b/arch/arm64/kvm/at.c
index ff8ba30e917b..6dd883798f83 100644
--- a/arch/arm64/kvm/at.c
+++ b/arch/arm64/kvm/at.c
@@ -135,6 +135,30 @@ static void compute_s1poe(struct kvm_vcpu *vcpu, struct s1_walk_info *wi)
 	wi->e0poe = (wi->regime != TR_EL2) && (val & TCR2_EL1_E0POE);
 }
 
+#define _has_tgran(__r, __sz)					\
+	({							\
+		u64 _s1, _mmfr0 = __r;				\
+								\
+		_s1 = SYS_FIELD_GET(ID_AA64MMFR0_EL1,		\
+				    TGRAN##__sz, _mmfr0);	\
+								\
+		_s1 != ID_AA64MMFR0_EL1_TGRAN##__sz##_NI;	\
+	})
+
+static bool has_tgran(u64 mmfr0, unsigned int shift)
+{
+	switch (shift) {
+	case 12:
+		return _has_tgran(mmfr0, 4);
+	case 14:
+		return _has_tgran(mmfr0, 16);
+	case 16:
+		return _has_tgran(mmfr0, 64);
+	default:
+		BUG();
+	}
+}
+
 static unsigned int tg0_to_shift(u64 tg0)
 {
 	switch (tg0) {
@@ -161,8 +185,23 @@ static unsigned int tg1_to_shift(u64 tg1)
 	}
 }
 
+static unsigned int fallback_tgran_shift(u64 mmfr0)
+{
+	if (has_tgran(mmfr0, PAGE_SHIFT))
+		return PAGE_SHIFT;
+	else if (has_tgran(mmfr0, 12))
+		return 12;
+	else if (has_tgran(mmfr0, 14))
+		return 14;
+	else if (has_tgran(mmfr0, 16))
+		return 16;
+	else
+		return PAGE_SHIFT;
+}
+
 static u64 tcr_tg_shift(struct kvm *kvm, u64 tcr, bool upper_range)
 {
+	u64 mmfr0 = kvm_read_vm_id_reg(kvm, SYS_ID_AA64MMFR0_EL1);
 	unsigned int shift;
 
 	/* Someone was silly enough to encode TG0/TG1 differently */
@@ -171,6 +210,15 @@ static u64 tcr_tg_shift(struct kvm *kvm, u64 tcr, bool upper_range)
 	else
 		shift = tg0_to_shift(FIELD_GET(TCR_EL1_TG0_MASK, tcr));
 
+	/*
+	 * If TGx is programmed to an unimplemented value (not advertised in
+	 * ID_AA64MMFR0_EL1), we should treat it as if an implemented value is
+	 * written, as per the architecture. Choose an available one while
+	 * prioritizing PAGE_SIZE.
+	 */
+	if (!has_tgran(mmfr0, shift))
+		return fallback_tgran_shift(mmfr0);
+
 	return shift;
 }
 
diff --git a/arch/arm64/kvm/nested.c b/arch/arm64/kvm/nested.c
index 2bfab3007cb3..64794ba4848d 100644
--- a/arch/arm64/kvm/nested.c
+++ b/arch/arm64/kvm/nested.c
@@ -378,6 +378,36 @@ static int walk_nested_s2_pgd(struct kvm_vcpu *vcpu, phys_addr_t ipa,
 	return 0;
 }
 
+#define _has_tgran_2(__r, __sz)						\
+	({								\
+		u64 _s1, _s2, _mmfr0 = __r;				\
+									\
+		_s2 = SYS_FIELD_GET(ID_AA64MMFR0_EL1,			\
+				    TGRAN##__sz##_2, _mmfr0);		\
+									\
+		_s1 = SYS_FIELD_GET(ID_AA64MMFR0_EL1,			\
+				    TGRAN##__sz, _mmfr0);		\
+									\
+		((_s2 != ID_AA64MMFR0_EL1_TGRAN##__sz##_2_NI &&		\
+		  _s2 != ID_AA64MMFR0_EL1_TGRAN##__sz##_2_TGRAN##__sz) || \
+		 (_s2 == ID_AA64MMFR0_EL1_TGRAN##__sz##_2_TGRAN##__sz && \
+		  _s1 != ID_AA64MMFR0_EL1_TGRAN##__sz##_NI));		\
+	})
+
+static bool has_tgran_2(u64 mmfr0, unsigned int shift)
+{
+	switch (shift) {
+	case 12:
+		return _has_tgran_2(mmfr0, 4);
+	case 14:
+		return _has_tgran_2(mmfr0, 16);
+	case 16:
+		return _has_tgran_2(mmfr0, 64);
+	default:
+		BUG();
+	}
+}
+
 static unsigned int tg0_to_shift(u64 tg0)
 {
 	switch (tg0) {
@@ -391,11 +421,35 @@ static unsigned int tg0_to_shift(u64 tg0)
 	}
 }
 
+static unsigned int fallback_tgran2_shift(u64 mmfr0)
+{
+	if (has_tgran_2(mmfr0, PAGE_SHIFT))
+		return PAGE_SHIFT;
+	else if (has_tgran_2(mmfr0, 12))
+		return 12;
+	else if (has_tgran_2(mmfr0, 14))
+		return 14;
+	else if (has_tgran_2(mmfr0, 16))
+		return 16;
+	else
+		return PAGE_SHIFT;
+}
+
 static u64 vtcr_tg0_shift(struct kvm *kvm, u64 vtcr)
 {
+	u64 mmfr0 = kvm_read_vm_id_reg(kvm, SYS_ID_AA64MMFR0_EL1);
 	u64 tg0 = FIELD_GET(VTCR_EL2_TG0_MASK, vtcr);
 	unsigned int shift = tg0_to_shift(tg0);
 
+	/*
+	 * If TGx is programmed to an unimplemented value (not advertised in
+	 * ID_AA64MMFR0_EL1), we should treat it as if an implemented value is
+	 * written, as per the architecture. Choose an available one while
+	 * prioritizing PAGE_SIZE.
+	 */
+	if (!has_tgran_2(mmfr0, shift))
+		return fallback_tgran2_shift(mmfr0);
+
 	return shift;
 }
 
@@ -1516,21 +1570,6 @@ static void kvm_map_l1_vncr(struct kvm_vcpu *vcpu)
 	}
 }
 
-#define has_tgran_2(__r, __sz)						\
-	({								\
-		u64 _s1, _s2, _mmfr0 = __r;				\
-									\
-		_s2 = SYS_FIELD_GET(ID_AA64MMFR0_EL1,			\
-				    TGRAN##__sz##_2, _mmfr0);		\
-									\
-		_s1 = SYS_FIELD_GET(ID_AA64MMFR0_EL1,			\
-				    TGRAN##__sz, _mmfr0);		\
-									\
-		((_s2 != ID_AA64MMFR0_EL1_TGRAN##__sz##_2_NI &&		\
-		  _s2 != ID_AA64MMFR0_EL1_TGRAN##__sz##_2_TGRAN##__sz) || \
-		 (_s2 == ID_AA64MMFR0_EL1_TGRAN##__sz##_2_TGRAN##__sz && \
-		  _s1 != ID_AA64MMFR0_EL1_TGRAN##__sz##_NI));		\
-	})
 /*
  * Our emulated CPU doesn't support all the possible features. For the
  * sake of simplicity (and probably mental sanity), wipe out a number
@@ -1617,15 +1656,15 @@ u64 limit_nv_id_reg(struct kvm *kvm, u32 reg, u64 val)
 		 */
 		switch (PAGE_SIZE) {
 		case SZ_4K:
-			if (has_tgran_2(orig_val, 4))
+			if (_has_tgran_2(orig_val, 4))
 				val |= SYS_FIELD_PREP_ENUM(ID_AA64MMFR0_EL1, TGRAN4_2, IMP);
 			fallthrough;
 		case SZ_16K:
-			if (has_tgran_2(orig_val, 16))
+			if (_has_tgran_2(orig_val, 16))
 				val |= SYS_FIELD_PREP_ENUM(ID_AA64MMFR0_EL1, TGRAN16_2, IMP);
 			fallthrough;
 		case SZ_64K:
-			if (has_tgran_2(orig_val, 64))
+			if (_has_tgran_2(orig_val, 64))
 				val |= SYS_FIELD_PREP_ENUM(ID_AA64MMFR0_EL1, TGRAN64_2, IMP);
 			break;
 		}
-- 
2.43.0



^ permalink raw reply related	[flat|nested] 4+ messages in thread

* Re: [PATCH 1/2] KVM: arm64: Factor out TG0/1 decoding of VTCR and TCR
  2026-04-06 16:46 ` [PATCH 1/2] KVM: arm64: Factor out TG0/1 decoding of VTCR and TCR Wei-Lin Chang
@ 2026-04-07  7:17   ` Marc Zyngier
  0 siblings, 0 replies; 4+ messages in thread
From: Marc Zyngier @ 2026-04-07  7:17 UTC (permalink / raw)
  To: Wei-Lin Chang
  Cc: linux-arm-kernel, kvmarm, linux-kernel, Oliver Upton, Joey Gouly,
	Suzuki K Poulose, Zenghui Yu, Catalin Marinas, Will Deacon

On Mon, 06 Apr 2026 17:46:17 +0100,
Wei-Lin Chang <weilin.chang@arm.com> wrote:
> 
> The current code decodes TCR.TG0/TG1 and VTCR.TG0 inline at several
> places. Extract this logic into helpers so the granule size is derived
> in one place. This enables us to alter the effective granule size in
> the same place, which we will need in a later patch.
> 
> Signed-off-by: Wei-Lin Chang <weilin.chang@arm.com>
> ---
>  arch/arm64/kvm/at.c     | 73 +++++++++++++++++++++++++----------------
>  arch/arm64/kvm/nested.c | 70 ++++++++++++++++++++++++---------------
>  2 files changed, 89 insertions(+), 54 deletions(-)
> 
> diff --git a/arch/arm64/kvm/at.c b/arch/arm64/kvm/at.c
> index c5c5644b1878..ff8ba30e917b 100644
> --- a/arch/arm64/kvm/at.c
> +++ b/arch/arm64/kvm/at.c
> @@ -135,14 +135,54 @@ static void compute_s1poe(struct kvm_vcpu *vcpu, struct s1_walk_info *wi)
>  	wi->e0poe = (wi->regime != TR_EL2) && (val & TCR2_EL1_E0POE);
>  }
>  
> +static unsigned int tg0_to_shift(u64 tg0)
> +{

It'd be better to abstract these helpers a bit more by making them
take the full TCR_ELx value, and to give them a slightly better name.

I'd suggest something like:

static unsigned int tcr_to_tg0_pgshift(u64 tcr)
{
	u64 tg0 = tcr & TCR_TG0_MASK, tcr;

which makes it clear that the result is a page shift, as required by
wi->pgshift.

> +	switch (tg0) {
> +	case TCR_EL1_TG0_4K:
> +		return 12;
> +	case TCR_EL1_TG0_16K:
> +		return 14;
> +	case TCR_EL1_TG0_64K:

Please don't mix the _EL1 definition and those without _EL1 in the
same file. For a start, that's not always EL1. Also, this makes very
hard to reason about what is shifted and what is not.

> +	default:	/* IMPDEF: treat any other value as 64k */
> +		return 16;
> +	}
> +}
> +
> +static unsigned int tg1_to_shift(u64 tg1)
> +{
> +	switch (tg1) {
> +	case TCR_EL1_TG1_4K:
> +		return 12;
> +	case TCR_EL1_TG1_16K:
> +		return 14;
> +	case TCR_EL1_TG1_64K:
> +	default:	/* IMPDEF: treat any other value as 64k */
> +		return 16;
> +	}
> +}
> +
> +static u64 tcr_tg_shift(struct kvm *kvm, u64 tcr, bool upper_range)
> +{
> +	unsigned int shift;
> +
> +	/* Someone was silly enough to encode TG0/TG1 differently */
> +	if (upper_range)
> +		shift = tg1_to_shift(FIELD_GET(TCR_EL1_TG1_MASK, tcr));
> +	else
> +		shift = tg0_to_shift(FIELD_GET(TCR_EL1_TG0_MASK, tcr));
> +
> +	return shift;
> +}
> +
>  static int setup_s1_walk(struct kvm_vcpu *vcpu, struct s1_walk_info *wi,
>  			 struct s1_walk_result *wr, u64 va)
>  {
> -	u64 hcr, sctlr, tcr, tg, ps, ia_bits, ttbr;
> +	u64 hcr, sctlr, tcr, ps, ia_bits, ttbr;
>  	unsigned int stride, x;
> -	bool va55, tbi, lva;
> +	bool va55, tbi, lva, upper_range;
>  
>  	va55 = va & BIT(55);
> +	upper_range = va55 && wi->regime != TR_EL2;
>  
>  	if (vcpu_has_nv(vcpu)) {
>  		hcr = __vcpu_sys_reg(vcpu, HCR_EL2);
> @@ -173,35 +213,12 @@ static int setup_s1_walk(struct kvm_vcpu *vcpu, struct s1_walk_info *wi,
>  		BUG();
>  	}
>  
> -	/* Someone was silly enough to encode TG0/TG1 differently */
> -	if (va55 && wi->regime != TR_EL2) {
> +	if (upper_range)
>  		wi->txsz = FIELD_GET(TCR_T1SZ_MASK, tcr);
> -		tg = FIELD_GET(TCR_TG1_MASK, tcr);
> -
> -		switch (tg << TCR_TG1_SHIFT) {
> -		case TCR_TG1_4K:
> -			wi->pgshift = 12;	 break;
> -		case TCR_TG1_16K:
> -			wi->pgshift = 14;	 break;
> -		case TCR_TG1_64K:
> -		default:	    /* IMPDEF: treat any other value as 64k */
> -			wi->pgshift = 16;	 break;
> -		}
> -	} else {
> +	else
>  		wi->txsz = FIELD_GET(TCR_T0SZ_MASK, tcr);
> -		tg = FIELD_GET(TCR_TG0_MASK, tcr);
> -
> -		switch (tg << TCR_TG0_SHIFT) {
> -		case TCR_TG0_4K:
> -			wi->pgshift = 12;	 break;
> -		case TCR_TG0_16K:
> -			wi->pgshift = 14;	 break;
> -		case TCR_TG0_64K:
> -		default:	    /* IMPDEF: treat any other value as 64k */
> -			wi->pgshift = 16;	 break;
> -		}
> -	}
>  
> +	wi->pgshift = tcr_tg_shift(vcpu->kvm, tcr, upper_range);
>  	wi->pa52bit = has_52bit_pa(vcpu, wi, tcr);
>
>  	ia_bits = get_ia_size(wi);
> diff --git a/arch/arm64/kvm/nested.c b/arch/arm64/kvm/nested.c
> index 883b6c1008fb..2bfab3007cb3 100644
> --- a/arch/arm64/kvm/nested.c
> +++ b/arch/arm64/kvm/nested.c
> @@ -378,20 +378,36 @@ static int walk_nested_s2_pgd(struct kvm_vcpu *vcpu, phys_addr_t ipa,
>  	return 0;
>  }
>  
> -static void vtcr_to_walk_info(u64 vtcr, struct s2_walk_info *wi)
> +static unsigned int tg0_to_shift(u64 tg0)

Same comments as above.

> +{
> +	switch (tg0) {
> +	case VTCR_EL2_TG0_4K:
> +		return 12;
> +	case VTCR_EL2_TG0_16K:
> +		return 14;
> +	case VTCR_EL2_TG0_64K:
> +	default:	/* IMPDEF: treat any other value as 64k */
> +		return 16;
> +	}
> +}
> +
> +static u64 vtcr_tg0_shift(struct kvm *kvm, u64 vtcr)
> +{
> +	u64 tg0 = FIELD_GET(VTCR_EL2_TG0_MASK, vtcr);
> +	unsigned int shift = tg0_to_shift(tg0);
> +
> +	return shift;

shift is an unsigned int. Why is the return value a u64? Try and make
sure that types are consistent, even if they cast nicely in C.

> +}
> +
> +static size_t vtcr_tg0_size(struct kvm *kvm, u64 vtcr)
> +{
> +	return BIT(vtcr_tg0_shift(kvm, vtcr));
> +}
> +
> +static void vtcr_to_walk_info(struct kvm *kvm, u64 vtcr, struct s2_walk_info *wi)

This prototype reads bizarrely. vtcr is per CPU, the walk info is
evidently per CPU, and yet you pass a kvm struct.

Instead, rename this to:

static void setup_s2_walk(struct kvm_vcpu *vcpu,
			  struct s2_walk_info *wi)
{
	u64 vtcr = vcpu_read_sys_reg(vcpu, VTCR_EL2);

and call that directly. You can then extract vcpu->kvm as needed. It
also aligns the naming on the s1 part, which isn't a bad thing to do.

>  {
>  	wi->t0sz = vtcr & TCR_EL2_T0SZ_MASK;
> -
> -	switch (FIELD_GET(VTCR_EL2_TG0_MASK, vtcr)) {
> -	case VTCR_EL2_TG0_4K:
> -		wi->pgshift = 12;	 break;
> -	case VTCR_EL2_TG0_16K:
> -		wi->pgshift = 14;	 break;
> -	case VTCR_EL2_TG0_64K:
> -	default:	    /* IMPDEF: treat any other value as 64k */
> -		wi->pgshift = 16;	 break;
> -	}
> -
> +	wi->pgshift = vtcr_tg0_shift(kvm, vtcr);
>  	wi->sl = FIELD_GET(VTCR_EL2_SL0_MASK, vtcr);
>  	/* Global limit for now, should eventually be per-VM */
>  	wi->max_oa_bits = min(get_kvm_ipa_limit(),
> @@ -414,7 +430,7 @@ int kvm_walk_nested_s2(struct kvm_vcpu *vcpu, phys_addr_t gipa,
>  
>  	wi.baddr = vcpu_read_sys_reg(vcpu, VTTBR_EL2);
>  
> -	vtcr_to_walk_info(vtcr, &wi);
> +	vtcr_to_walk_info(vcpu->kvm, vtcr, &wi);
>  
>  	wi.be = vcpu_read_sys_reg(vcpu, SCTLR_EL2) & SCTLR_ELx_EE;
>  
> @@ -515,17 +531,19 @@ static u8 get_guest_mapping_ttl(struct kvm_s2_mmu *mmu, u64 addr)
>  	u64 tmp, sz = 0, vtcr = mmu->tlb_vtcr;
>  	kvm_pte_t pte;
>  	u8 ttl, level;
> +	struct kvm *kvm = kvm_s2_mmu_to_kvm(mmu);
> +	size_t tg0_size = vtcr_tg0_size(kvm, vtcr);
>  
> -	lockdep_assert_held_write(&kvm_s2_mmu_to_kvm(mmu)->mmu_lock);
> +	lockdep_assert_held_write(&kvm->mmu_lock);
>  
> -	switch (FIELD_GET(VTCR_EL2_TG0_MASK, vtcr)) {
> -	case VTCR_EL2_TG0_4K:
> +	switch (tg0_size) {
> +	case SZ_4K:
>  		ttl = (TLBI_TTL_TG_4K << 2);
>  		break;
> -	case VTCR_EL2_TG0_16K:
> +	case SZ_16K:
>  		ttl = (TLBI_TTL_TG_16K << 2);
>  		break;
> -	case VTCR_EL2_TG0_64K:
> +	case SZ_64K:

All these unit changes make the patch harder to read than it should
be. Consider having a separate patch to do that.

Thanks,

	M.

-- 
Jazz isn't dead. It just smells funny.


^ permalink raw reply	[flat|nested] 4+ messages in thread

end of thread, other threads:[~2026-04-07  7:17 UTC | newest]

Thread overview: 4+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2026-04-06 16:46 [PATCH 0/2] KVM: arm64: Handle unsupported guest translation granules Wei-Lin Chang
2026-04-06 16:46 ` [PATCH 1/2] KVM: arm64: Factor out TG0/1 decoding of VTCR and TCR Wei-Lin Chang
2026-04-07  7:17   ` Marc Zyngier
2026-04-06 16:46 ` [PATCH 2/2] KVM: arm64: Fallback to a supported value for unsupported guest TGx Wei-Lin Chang

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox