* [PATCH v2 01/30] KVM: arm64: Extract VMA size resolution in user_mem_abort()
2026-03-27 11:35 [PATCH v2 00/30] KVM: arm64: Combined user_mem_abort() rework Marc Zyngier
@ 2026-03-27 11:35 ` Marc Zyngier
2026-03-27 11:35 ` [PATCH v2 02/30] KVM: arm64: Introduce struct kvm_s2_fault to user_mem_abort() Marc Zyngier
` (28 subsequent siblings)
29 siblings, 0 replies; 32+ messages in thread
From: Marc Zyngier @ 2026-03-27 11:35 UTC (permalink / raw)
To: kvmarm, linux-arm-kernel, kvm
Cc: Joey Gouly, Suzuki K Poulose, Oliver Upton, Zenghui Yu,
Fuad Tabba, Will Deacon, Quentin Perret
From: Fuad Tabba <tabba@google.com>
As part of an effort to refactor user_mem_abort() into smaller, more
focused helper functions, extract the logic responsible for determining
the VMA shift and page size into a new static helper,
kvm_s2_resolve_vma_size().
Reviewed-by: Joey Gouly <joey.gouly@arm.com>
Signed-off-by: Fuad Tabba <tabba@google.com>
Signed-off-by: Marc Zyngier <maz@kernel.org>
---
arch/arm64/kvm/mmu.c | 130 ++++++++++++++++++++++++-------------------
1 file changed, 73 insertions(+), 57 deletions(-)
diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
index 17d64a1e11e5c..f8064b2d32045 100644
--- a/arch/arm64/kvm/mmu.c
+++ b/arch/arm64/kvm/mmu.c
@@ -1639,6 +1639,77 @@ static int gmem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
return ret != -EAGAIN ? ret : 0;
}
+static short kvm_s2_resolve_vma_size(struct vm_area_struct *vma,
+ unsigned long hva,
+ struct kvm_memory_slot *memslot,
+ struct kvm_s2_trans *nested,
+ bool *force_pte, phys_addr_t *ipa)
+{
+ short vma_shift;
+ long vma_pagesize;
+
+ if (*force_pte)
+ vma_shift = PAGE_SHIFT;
+ else
+ vma_shift = get_vma_page_shift(vma, hva);
+
+ switch (vma_shift) {
+#ifndef __PAGETABLE_PMD_FOLDED
+ case PUD_SHIFT:
+ if (fault_supports_stage2_huge_mapping(memslot, hva, PUD_SIZE))
+ break;
+ fallthrough;
+#endif
+ case CONT_PMD_SHIFT:
+ vma_shift = PMD_SHIFT;
+ fallthrough;
+ case PMD_SHIFT:
+ if (fault_supports_stage2_huge_mapping(memslot, hva, PMD_SIZE))
+ break;
+ fallthrough;
+ case CONT_PTE_SHIFT:
+ vma_shift = PAGE_SHIFT;
+ *force_pte = true;
+ fallthrough;
+ case PAGE_SHIFT:
+ break;
+ default:
+ WARN_ONCE(1, "Unknown vma_shift %d", vma_shift);
+ }
+
+ vma_pagesize = 1UL << vma_shift;
+
+ if (nested) {
+ unsigned long max_map_size;
+
+ max_map_size = *force_pte ? PAGE_SIZE : PUD_SIZE;
+
+ *ipa = kvm_s2_trans_output(nested);
+
+ /*
+ * If we're about to create a shadow stage 2 entry, then we
+ * can only create a block mapping if the guest stage 2 page
+ * table uses at least as big a mapping.
+ */
+ max_map_size = min(kvm_s2_trans_size(nested), max_map_size);
+
+ /*
+ * Be careful that if the mapping size falls between
+ * two host sizes, take the smallest of the two.
+ */
+ if (max_map_size >= PMD_SIZE && max_map_size < PUD_SIZE)
+ max_map_size = PMD_SIZE;
+ else if (max_map_size >= PAGE_SIZE && max_map_size < PMD_SIZE)
+ max_map_size = PAGE_SIZE;
+
+ *force_pte = (max_map_size == PAGE_SIZE);
+ vma_pagesize = min_t(long, vma_pagesize, max_map_size);
+ vma_shift = __ffs(vma_pagesize);
+ }
+
+ return vma_shift;
+}
+
static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
struct kvm_s2_trans *nested,
struct kvm_memory_slot *memslot, unsigned long hva,
@@ -1695,65 +1766,10 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
return -EFAULT;
}
- if (force_pte)
- vma_shift = PAGE_SHIFT;
- else
- vma_shift = get_vma_page_shift(vma, hva);
-
- switch (vma_shift) {
-#ifndef __PAGETABLE_PMD_FOLDED
- case PUD_SHIFT:
- if (fault_supports_stage2_huge_mapping(memslot, hva, PUD_SIZE))
- break;
- fallthrough;
-#endif
- case CONT_PMD_SHIFT:
- vma_shift = PMD_SHIFT;
- fallthrough;
- case PMD_SHIFT:
- if (fault_supports_stage2_huge_mapping(memslot, hva, PMD_SIZE))
- break;
- fallthrough;
- case CONT_PTE_SHIFT:
- vma_shift = PAGE_SHIFT;
- force_pte = true;
- fallthrough;
- case PAGE_SHIFT:
- break;
- default:
- WARN_ONCE(1, "Unknown vma_shift %d", vma_shift);
- }
-
+ vma_shift = kvm_s2_resolve_vma_size(vma, hva, memslot, nested,
+ &force_pte, &ipa);
vma_pagesize = 1UL << vma_shift;
- if (nested) {
- unsigned long max_map_size;
-
- max_map_size = force_pte ? PAGE_SIZE : PUD_SIZE;
-
- ipa = kvm_s2_trans_output(nested);
-
- /*
- * If we're about to create a shadow stage 2 entry, then we
- * can only create a block mapping if the guest stage 2 page
- * table uses at least as big a mapping.
- */
- max_map_size = min(kvm_s2_trans_size(nested), max_map_size);
-
- /*
- * Be careful that if the mapping size falls between
- * two host sizes, take the smallest of the two.
- */
- if (max_map_size >= PMD_SIZE && max_map_size < PUD_SIZE)
- max_map_size = PMD_SIZE;
- else if (max_map_size >= PAGE_SIZE && max_map_size < PMD_SIZE)
- max_map_size = PAGE_SIZE;
-
- force_pte = (max_map_size == PAGE_SIZE);
- vma_pagesize = min_t(long, vma_pagesize, max_map_size);
- vma_shift = __ffs(vma_pagesize);
- }
-
/*
* Both the canonical IPA and fault IPA must be aligned to the
* mapping size to ensure we find the right PFN and lay down the
--
2.47.3
^ permalink raw reply related [flat|nested] 32+ messages in thread* [PATCH v2 02/30] KVM: arm64: Introduce struct kvm_s2_fault to user_mem_abort()
2026-03-27 11:35 [PATCH v2 00/30] KVM: arm64: Combined user_mem_abort() rework Marc Zyngier
2026-03-27 11:35 ` [PATCH v2 01/30] KVM: arm64: Extract VMA size resolution in user_mem_abort() Marc Zyngier
@ 2026-03-27 11:35 ` Marc Zyngier
2026-03-27 11:35 ` [PATCH v2 03/30] KVM: arm64: Extract PFN resolution in user_mem_abort() Marc Zyngier
` (27 subsequent siblings)
29 siblings, 0 replies; 32+ messages in thread
From: Marc Zyngier @ 2026-03-27 11:35 UTC (permalink / raw)
To: kvmarm, linux-arm-kernel, kvm
Cc: Joey Gouly, Suzuki K Poulose, Oliver Upton, Zenghui Yu,
Fuad Tabba, Will Deacon, Quentin Perret
From: Fuad Tabba <tabba@google.com>
The user_mem_abort() function takes many arguments and defines a large
number of local variables. Passing all these variables around to helper
functions would result in functions with too many arguments.
Introduce struct kvm_s2_fault to encapsulate the stage-2 fault state.
This structure holds both the input parameters and the intermediate
state required during the fault handling process.
Update user_mem_abort() to initialize this structure and replace the
usage of local variables with fields from the new structure.
This prepares the ground for further extracting parts of
user_mem_abort() into smaller helper functions that can simply take a
pointer to the fault state structure.
Reviewed-by: Joey Gouly <joey.gouly@arm.com>
Signed-off-by: Fuad Tabba <tabba@google.com>
Signed-off-by: Marc Zyngier <maz@kernel.org>
---
arch/arm64/kvm/mmu.c | 212 +++++++++++++++++++++++++------------------
1 file changed, 123 insertions(+), 89 deletions(-)
diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
index f8064b2d32045..b366bde15a429 100644
--- a/arch/arm64/kvm/mmu.c
+++ b/arch/arm64/kvm/mmu.c
@@ -1710,38 +1710,68 @@ static short kvm_s2_resolve_vma_size(struct vm_area_struct *vma,
return vma_shift;
}
+struct kvm_s2_fault {
+ struct kvm_vcpu *vcpu;
+ phys_addr_t fault_ipa;
+ struct kvm_s2_trans *nested;
+ struct kvm_memory_slot *memslot;
+ unsigned long hva;
+ bool fault_is_perm;
+
+ bool write_fault;
+ bool exec_fault;
+ bool writable;
+ bool topup_memcache;
+ bool mte_allowed;
+ bool is_vma_cacheable;
+ bool s2_force_noncacheable;
+ bool vfio_allow_any_uc;
+ unsigned long mmu_seq;
+ phys_addr_t ipa;
+ short vma_shift;
+ gfn_t gfn;
+ kvm_pfn_t pfn;
+ bool logging_active;
+ bool force_pte;
+ long vma_pagesize;
+ long fault_granule;
+ enum kvm_pgtable_prot prot;
+ struct page *page;
+ vm_flags_t vm_flags;
+};
+
static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
struct kvm_s2_trans *nested,
struct kvm_memory_slot *memslot, unsigned long hva,
bool fault_is_perm)
{
int ret = 0;
- bool topup_memcache;
- bool write_fault, writable;
- bool exec_fault, mte_allowed, is_vma_cacheable;
- bool s2_force_noncacheable = false, vfio_allow_any_uc = false;
- unsigned long mmu_seq;
- phys_addr_t ipa = fault_ipa;
+ struct kvm_s2_fault fault_data = {
+ .vcpu = vcpu,
+ .fault_ipa = fault_ipa,
+ .nested = nested,
+ .memslot = memslot,
+ .hva = hva,
+ .fault_is_perm = fault_is_perm,
+ .ipa = fault_ipa,
+ .logging_active = memslot_is_logging(memslot),
+ .force_pte = memslot_is_logging(memslot),
+ .s2_force_noncacheable = false,
+ .vfio_allow_any_uc = false,
+ .prot = KVM_PGTABLE_PROT_R,
+ };
+ struct kvm_s2_fault *fault = &fault_data;
struct kvm *kvm = vcpu->kvm;
struct vm_area_struct *vma;
- short vma_shift;
void *memcache;
- gfn_t gfn;
- kvm_pfn_t pfn;
- bool logging_active = memslot_is_logging(memslot);
- bool force_pte = logging_active;
- long vma_pagesize, fault_granule;
- enum kvm_pgtable_prot prot = KVM_PGTABLE_PROT_R;
struct kvm_pgtable *pgt;
- struct page *page;
- vm_flags_t vm_flags;
enum kvm_pgtable_walk_flags flags = KVM_PGTABLE_WALK_SHARED;
- if (fault_is_perm)
- fault_granule = kvm_vcpu_trap_get_perm_fault_granule(vcpu);
- write_fault = kvm_is_write_fault(vcpu);
- exec_fault = kvm_vcpu_trap_is_exec_fault(vcpu);
- VM_WARN_ON_ONCE(write_fault && exec_fault);
+ if (fault->fault_is_perm)
+ fault->fault_granule = kvm_vcpu_trap_get_perm_fault_granule(fault->vcpu);
+ fault->write_fault = kvm_is_write_fault(fault->vcpu);
+ fault->exec_fault = kvm_vcpu_trap_is_exec_fault(fault->vcpu);
+ VM_WARN_ON_ONCE(fault->write_fault && fault->exec_fault);
/*
* Permission faults just need to update the existing leaf entry,
@@ -1749,8 +1779,9 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
* only exception to this is when dirty logging is enabled at runtime
* and a write fault needs to collapse a block entry into a table.
*/
- topup_memcache = !fault_is_perm || (logging_active && write_fault);
- ret = prepare_mmu_memcache(vcpu, topup_memcache, &memcache);
+ fault->topup_memcache = !fault->fault_is_perm ||
+ (fault->logging_active && fault->write_fault);
+ ret = prepare_mmu_memcache(fault->vcpu, fault->topup_memcache, &memcache);
if (ret)
return ret;
@@ -1759,33 +1790,33 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
* get block mapping for device MMIO region.
*/
mmap_read_lock(current->mm);
- vma = vma_lookup(current->mm, hva);
+ vma = vma_lookup(current->mm, fault->hva);
if (unlikely(!vma)) {
- kvm_err("Failed to find VMA for hva 0x%lx\n", hva);
+ kvm_err("Failed to find VMA for fault->hva 0x%lx\n", fault->hva);
mmap_read_unlock(current->mm);
return -EFAULT;
}
- vma_shift = kvm_s2_resolve_vma_size(vma, hva, memslot, nested,
- &force_pte, &ipa);
- vma_pagesize = 1UL << vma_shift;
+ fault->vma_shift = kvm_s2_resolve_vma_size(vma, fault->hva, fault->memslot, fault->nested,
+ &fault->force_pte, &fault->ipa);
+ fault->vma_pagesize = 1UL << fault->vma_shift;
/*
* Both the canonical IPA and fault IPA must be aligned to the
* mapping size to ensure we find the right PFN and lay down the
* mapping in the right place.
*/
- fault_ipa = ALIGN_DOWN(fault_ipa, vma_pagesize);
- ipa = ALIGN_DOWN(ipa, vma_pagesize);
+ fault->fault_ipa = ALIGN_DOWN(fault->fault_ipa, fault->vma_pagesize);
+ fault->ipa = ALIGN_DOWN(fault->ipa, fault->vma_pagesize);
- gfn = ipa >> PAGE_SHIFT;
- mte_allowed = kvm_vma_mte_allowed(vma);
+ fault->gfn = fault->ipa >> PAGE_SHIFT;
+ fault->mte_allowed = kvm_vma_mte_allowed(vma);
- vfio_allow_any_uc = vma->vm_flags & VM_ALLOW_ANY_UNCACHED;
+ fault->vfio_allow_any_uc = vma->vm_flags & VM_ALLOW_ANY_UNCACHED;
- vm_flags = vma->vm_flags;
+ fault->vm_flags = vma->vm_flags;
- is_vma_cacheable = kvm_vma_is_cacheable(vma);
+ fault->is_vma_cacheable = kvm_vma_is_cacheable(vma);
/* Don't use the VMA after the unlock -- it may have vanished */
vma = NULL;
@@ -1798,24 +1829,25 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
* Rely on mmap_read_unlock() for an implicit smp_rmb(), which pairs
* with the smp_wmb() in kvm_mmu_invalidate_end().
*/
- mmu_seq = kvm->mmu_invalidate_seq;
+ fault->mmu_seq = kvm->mmu_invalidate_seq;
mmap_read_unlock(current->mm);
- pfn = __kvm_faultin_pfn(memslot, gfn, write_fault ? FOLL_WRITE : 0,
- &writable, &page);
- if (pfn == KVM_PFN_ERR_HWPOISON) {
- kvm_send_hwpoison_signal(hva, vma_shift);
+ fault->pfn = __kvm_faultin_pfn(fault->memslot, fault->gfn,
+ fault->write_fault ? FOLL_WRITE : 0,
+ &fault->writable, &fault->page);
+ if (fault->pfn == KVM_PFN_ERR_HWPOISON) {
+ kvm_send_hwpoison_signal(fault->hva, fault->vma_shift);
return 0;
}
- if (is_error_noslot_pfn(pfn))
+ if (is_error_noslot_pfn(fault->pfn))
return -EFAULT;
/*
* Check if this is non-struct page memory PFN, and cannot support
* CMOs. It could potentially be unsafe to access as cacheable.
*/
- if (vm_flags & (VM_PFNMAP | VM_MIXEDMAP) && !pfn_is_map_memory(pfn)) {
- if (is_vma_cacheable) {
+ if (fault->vm_flags & (VM_PFNMAP | VM_MIXEDMAP) && !pfn_is_map_memory(fault->pfn)) {
+ if (fault->is_vma_cacheable) {
/*
* Whilst the VMA owner expects cacheable mapping to this
* PFN, hardware also has to support the FWB and CACHE DIC
@@ -1833,25 +1865,25 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
} else {
/*
* If the page was identified as device early by looking at
- * the VMA flags, vma_pagesize is already representing the
+ * the VMA flags, fault->vma_pagesize is already representing the
* largest quantity we can map. If instead it was mapped
- * via __kvm_faultin_pfn(), vma_pagesize is set to PAGE_SIZE
+ * via __kvm_faultin_pfn(), fault->vma_pagesize is set to PAGE_SIZE
* and must not be upgraded.
*
* In both cases, we don't let transparent_hugepage_adjust()
* change things at the last minute.
*/
- s2_force_noncacheable = true;
+ fault->s2_force_noncacheable = true;
}
- } else if (logging_active && !write_fault) {
+ } else if (fault->logging_active && !fault->write_fault) {
/*
- * Only actually map the page as writable if this was a write
+ * Only actually map the page as fault->writable if this was a write
* fault.
*/
- writable = false;
+ fault->writable = false;
}
- if (exec_fault && s2_force_noncacheable)
+ if (fault->exec_fault && fault->s2_force_noncacheable)
ret = -ENOEXEC;
if (ret)
@@ -1860,21 +1892,21 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
/*
* Guest performs atomic/exclusive operations on memory with unsupported
* attributes (e.g. ld64b/st64b on normal memory when no FEAT_LS64WB)
- * and trigger the exception here. Since the memslot is valid, inject
+ * and trigger the exception here. Since the fault->memslot is valid, inject
* the fault back to the guest.
*/
- if (esr_fsc_is_excl_atomic_fault(kvm_vcpu_get_esr(vcpu))) {
- kvm_inject_dabt_excl_atomic(vcpu, kvm_vcpu_get_hfar(vcpu));
+ if (esr_fsc_is_excl_atomic_fault(kvm_vcpu_get_esr(fault->vcpu))) {
+ kvm_inject_dabt_excl_atomic(fault->vcpu, kvm_vcpu_get_hfar(fault->vcpu));
ret = 1;
goto out_put_page;
}
- if (nested)
- adjust_nested_fault_perms(nested, &prot, &writable);
+ if (fault->nested)
+ adjust_nested_fault_perms(fault->nested, &fault->prot, &fault->writable);
kvm_fault_lock(kvm);
- pgt = vcpu->arch.hw_mmu->pgt;
- if (mmu_invalidate_retry(kvm, mmu_seq)) {
+ pgt = fault->vcpu->arch.hw_mmu->pgt;
+ if (mmu_invalidate_retry(kvm, fault->mmu_seq)) {
ret = -EAGAIN;
goto out_unlock;
}
@@ -1883,78 +1915,80 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
* If we are not forced to use page mapping, check if we are
* backed by a THP and thus use block mapping if possible.
*/
- if (vma_pagesize == PAGE_SIZE && !(force_pte || s2_force_noncacheable)) {
- if (fault_is_perm && fault_granule > PAGE_SIZE)
- vma_pagesize = fault_granule;
+ if (fault->vma_pagesize == PAGE_SIZE &&
+ !(fault->force_pte || fault->s2_force_noncacheable)) {
+ if (fault->fault_is_perm && fault->fault_granule > PAGE_SIZE)
+ fault->vma_pagesize = fault->fault_granule;
else
- vma_pagesize = transparent_hugepage_adjust(kvm, memslot,
- hva, &pfn,
- &fault_ipa);
+ fault->vma_pagesize = transparent_hugepage_adjust(kvm, fault->memslot,
+ fault->hva, &fault->pfn,
+ &fault->fault_ipa);
- if (vma_pagesize < 0) {
- ret = vma_pagesize;
+ if (fault->vma_pagesize < 0) {
+ ret = fault->vma_pagesize;
goto out_unlock;
}
}
- if (!fault_is_perm && !s2_force_noncacheable && kvm_has_mte(kvm)) {
+ if (!fault->fault_is_perm && !fault->s2_force_noncacheable && kvm_has_mte(kvm)) {
/* Check the VMM hasn't introduced a new disallowed VMA */
- if (mte_allowed) {
- sanitise_mte_tags(kvm, pfn, vma_pagesize);
+ if (fault->mte_allowed) {
+ sanitise_mte_tags(kvm, fault->pfn, fault->vma_pagesize);
} else {
ret = -EFAULT;
goto out_unlock;
}
}
- if (writable)
- prot |= KVM_PGTABLE_PROT_W;
+ if (fault->writable)
+ fault->prot |= KVM_PGTABLE_PROT_W;
- if (exec_fault)
- prot |= KVM_PGTABLE_PROT_X;
+ if (fault->exec_fault)
+ fault->prot |= KVM_PGTABLE_PROT_X;
- if (s2_force_noncacheable) {
- if (vfio_allow_any_uc)
- prot |= KVM_PGTABLE_PROT_NORMAL_NC;
+ if (fault->s2_force_noncacheable) {
+ if (fault->vfio_allow_any_uc)
+ fault->prot |= KVM_PGTABLE_PROT_NORMAL_NC;
else
- prot |= KVM_PGTABLE_PROT_DEVICE;
+ fault->prot |= KVM_PGTABLE_PROT_DEVICE;
} else if (cpus_have_final_cap(ARM64_HAS_CACHE_DIC)) {
- prot |= KVM_PGTABLE_PROT_X;
+ fault->prot |= KVM_PGTABLE_PROT_X;
}
- if (nested)
- adjust_nested_exec_perms(kvm, nested, &prot);
+ if (fault->nested)
+ adjust_nested_exec_perms(kvm, fault->nested, &fault->prot);
/*
* Under the premise of getting a FSC_PERM fault, we just need to relax
- * permissions only if vma_pagesize equals fault_granule. Otherwise,
+ * permissions only if fault->vma_pagesize equals fault->fault_granule. Otherwise,
* kvm_pgtable_stage2_map() should be called to change block size.
*/
- if (fault_is_perm && vma_pagesize == fault_granule) {
+ if (fault->fault_is_perm && fault->vma_pagesize == fault->fault_granule) {
/*
* Drop the SW bits in favour of those stored in the
* PTE, which will be preserved.
*/
- prot &= ~KVM_NV_GUEST_MAP_SZ;
- ret = KVM_PGT_FN(kvm_pgtable_stage2_relax_perms)(pgt, fault_ipa, prot, flags);
+ fault->prot &= ~KVM_NV_GUEST_MAP_SZ;
+ ret = KVM_PGT_FN(kvm_pgtable_stage2_relax_perms)(pgt, fault->fault_ipa, fault->prot,
+ flags);
} else {
- ret = KVM_PGT_FN(kvm_pgtable_stage2_map)(pgt, fault_ipa, vma_pagesize,
- __pfn_to_phys(pfn), prot,
- memcache, flags);
+ ret = KVM_PGT_FN(kvm_pgtable_stage2_map)(pgt, fault->fault_ipa, fault->vma_pagesize,
+ __pfn_to_phys(fault->pfn), fault->prot,
+ memcache, flags);
}
out_unlock:
- kvm_release_faultin_page(kvm, page, !!ret, writable);
+ kvm_release_faultin_page(kvm, fault->page, !!ret, fault->writable);
kvm_fault_unlock(kvm);
/* Mark the page dirty only if the fault is handled successfully */
- if (writable && !ret)
- mark_page_dirty_in_slot(kvm, memslot, gfn);
+ if (fault->writable && !ret)
+ mark_page_dirty_in_slot(kvm, fault->memslot, fault->gfn);
return ret != -EAGAIN ? ret : 0;
out_put_page:
- kvm_release_page_unused(page);
+ kvm_release_page_unused(fault->page);
return ret;
}
--
2.47.3
^ permalink raw reply related [flat|nested] 32+ messages in thread* [PATCH v2 03/30] KVM: arm64: Extract PFN resolution in user_mem_abort()
2026-03-27 11:35 [PATCH v2 00/30] KVM: arm64: Combined user_mem_abort() rework Marc Zyngier
2026-03-27 11:35 ` [PATCH v2 01/30] KVM: arm64: Extract VMA size resolution in user_mem_abort() Marc Zyngier
2026-03-27 11:35 ` [PATCH v2 02/30] KVM: arm64: Introduce struct kvm_s2_fault to user_mem_abort() Marc Zyngier
@ 2026-03-27 11:35 ` Marc Zyngier
2026-03-27 11:35 ` [PATCH v2 04/30] KVM: arm64: Isolate mmap_read_lock inside new kvm_s2_fault_get_vma_info() helper Marc Zyngier
` (26 subsequent siblings)
29 siblings, 0 replies; 32+ messages in thread
From: Marc Zyngier @ 2026-03-27 11:35 UTC (permalink / raw)
To: kvmarm, linux-arm-kernel, kvm
Cc: Joey Gouly, Suzuki K Poulose, Oliver Upton, Zenghui Yu,
Fuad Tabba, Will Deacon, Quentin Perret
From: Fuad Tabba <tabba@google.com>
Extract the section of code responsible for pinning the physical page
frame number (PFN) backing the faulting IPA into a new helper,
kvm_s2_fault_pin_pfn().
This helper encapsulates the critical section where the mmap_read_lock
is held, the VMA is looked up, the mmu invalidate sequence is sampled,
and the PFN is ultimately resolved via __kvm_faultin_pfn(). It also
handles the early exits for hardware poisoned pages and noslot PFNs.
By isolating this region, we can begin to organize the state variables
required for PFN resolution into the kvm_s2_fault struct, clearing out
a significant amount of local variable clutter from user_mem_abort().
Signed-off-by: Fuad Tabba <tabba@google.com>
Signed-off-by: Marc Zyngier <maz@kernel.org>
---
arch/arm64/kvm/mmu.c | 105 ++++++++++++++++++++++++-------------------
1 file changed, 59 insertions(+), 46 deletions(-)
diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
index b366bde15a429..5079a58b65b14 100644
--- a/arch/arm64/kvm/mmu.c
+++ b/arch/arm64/kvm/mmu.c
@@ -1740,55 +1740,11 @@ struct kvm_s2_fault {
vm_flags_t vm_flags;
};
-static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
- struct kvm_s2_trans *nested,
- struct kvm_memory_slot *memslot, unsigned long hva,
- bool fault_is_perm)
+static int kvm_s2_fault_pin_pfn(struct kvm_s2_fault *fault)
{
- int ret = 0;
- struct kvm_s2_fault fault_data = {
- .vcpu = vcpu,
- .fault_ipa = fault_ipa,
- .nested = nested,
- .memslot = memslot,
- .hva = hva,
- .fault_is_perm = fault_is_perm,
- .ipa = fault_ipa,
- .logging_active = memslot_is_logging(memslot),
- .force_pte = memslot_is_logging(memslot),
- .s2_force_noncacheable = false,
- .vfio_allow_any_uc = false,
- .prot = KVM_PGTABLE_PROT_R,
- };
- struct kvm_s2_fault *fault = &fault_data;
- struct kvm *kvm = vcpu->kvm;
struct vm_area_struct *vma;
- void *memcache;
- struct kvm_pgtable *pgt;
- enum kvm_pgtable_walk_flags flags = KVM_PGTABLE_WALK_SHARED;
-
- if (fault->fault_is_perm)
- fault->fault_granule = kvm_vcpu_trap_get_perm_fault_granule(fault->vcpu);
- fault->write_fault = kvm_is_write_fault(fault->vcpu);
- fault->exec_fault = kvm_vcpu_trap_is_exec_fault(fault->vcpu);
- VM_WARN_ON_ONCE(fault->write_fault && fault->exec_fault);
+ struct kvm *kvm = fault->vcpu->kvm;
- /*
- * Permission faults just need to update the existing leaf entry,
- * and so normally don't require allocations from the memcache. The
- * only exception to this is when dirty logging is enabled at runtime
- * and a write fault needs to collapse a block entry into a table.
- */
- fault->topup_memcache = !fault->fault_is_perm ||
- (fault->logging_active && fault->write_fault);
- ret = prepare_mmu_memcache(fault->vcpu, fault->topup_memcache, &memcache);
- if (ret)
- return ret;
-
- /*
- * Let's check if we will get back a huge page backed by hugetlbfs, or
- * get block mapping for device MMIO region.
- */
mmap_read_lock(current->mm);
vma = vma_lookup(current->mm, fault->hva);
if (unlikely(!vma)) {
@@ -1842,6 +1798,63 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
if (is_error_noslot_pfn(fault->pfn))
return -EFAULT;
+ return 1;
+}
+
+static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
+ struct kvm_s2_trans *nested,
+ struct kvm_memory_slot *memslot, unsigned long hva,
+ bool fault_is_perm)
+{
+ int ret = 0;
+ struct kvm_s2_fault fault_data = {
+ .vcpu = vcpu,
+ .fault_ipa = fault_ipa,
+ .nested = nested,
+ .memslot = memslot,
+ .hva = hva,
+ .fault_is_perm = fault_is_perm,
+ .ipa = fault_ipa,
+ .logging_active = memslot_is_logging(memslot),
+ .force_pte = memslot_is_logging(memslot),
+ .s2_force_noncacheable = false,
+ .vfio_allow_any_uc = false,
+ .prot = KVM_PGTABLE_PROT_R,
+ };
+ struct kvm_s2_fault *fault = &fault_data;
+ struct kvm *kvm = vcpu->kvm;
+ void *memcache;
+ struct kvm_pgtable *pgt;
+ enum kvm_pgtable_walk_flags flags = KVM_PGTABLE_WALK_SHARED;
+
+ if (fault->fault_is_perm)
+ fault->fault_granule = kvm_vcpu_trap_get_perm_fault_granule(fault->vcpu);
+ fault->write_fault = kvm_is_write_fault(fault->vcpu);
+ fault->exec_fault = kvm_vcpu_trap_is_exec_fault(fault->vcpu);
+ VM_WARN_ON_ONCE(fault->write_fault && fault->exec_fault);
+
+ /*
+ * Permission faults just need to update the existing leaf entry,
+ * and so normally don't require allocations from the memcache. The
+ * only exception to this is when dirty logging is enabled at runtime
+ * and a write fault needs to collapse a block entry into a table.
+ */
+ fault->topup_memcache = !fault->fault_is_perm ||
+ (fault->logging_active && fault->write_fault);
+ ret = prepare_mmu_memcache(fault->vcpu, fault->topup_memcache, &memcache);
+ if (ret)
+ return ret;
+
+ /*
+ * Let's check if we will get back a huge page backed by hugetlbfs, or
+ * get block mapping for device MMIO region.
+ */
+ ret = kvm_s2_fault_pin_pfn(fault);
+ if (ret != 1)
+ return ret;
+
+ ret = 0;
+
/*
* Check if this is non-struct page memory PFN, and cannot support
* CMOs. It could potentially be unsafe to access as cacheable.
--
2.47.3
^ permalink raw reply related [flat|nested] 32+ messages in thread* [PATCH v2 04/30] KVM: arm64: Isolate mmap_read_lock inside new kvm_s2_fault_get_vma_info() helper
2026-03-27 11:35 [PATCH v2 00/30] KVM: arm64: Combined user_mem_abort() rework Marc Zyngier
` (2 preceding siblings ...)
2026-03-27 11:35 ` [PATCH v2 03/30] KVM: arm64: Extract PFN resolution in user_mem_abort() Marc Zyngier
@ 2026-03-27 11:35 ` Marc Zyngier
2026-03-27 11:35 ` [PATCH v2 05/30] KVM: arm64: Extract stage-2 permission logic in user_mem_abort() Marc Zyngier
` (25 subsequent siblings)
29 siblings, 0 replies; 32+ messages in thread
From: Marc Zyngier @ 2026-03-27 11:35 UTC (permalink / raw)
To: kvmarm, linux-arm-kernel, kvm
Cc: Joey Gouly, Suzuki K Poulose, Oliver Upton, Zenghui Yu,
Fuad Tabba, Will Deacon, Quentin Perret
From: Fuad Tabba <tabba@google.com>
Extract the VMA lookup and metadata snapshotting logic from
kvm_s2_fault_pin_pfn() into a tightly-scoped sub-helper.
This refactoring structurally fixes a TOCTOU (Time-Of-Check to
Time-Of-Use) vulnerability and Use-After-Free risk involving the vma
pointer. In the previous layout, the mmap_read_lock is taken, the vma is
looked up, and then the lock is dropped before the function continues to
map the PFN. While an explicit vma = NULL safeguard was present, the vma
variable was still lexically in scope for the remainder of the function.
By isolating the locked region into kvm_s2_fault_get_vma_info(), the vma
pointer becomes a local variable strictly confined to that sub-helper.
Because the pointer's scope literally ends when the sub-helper returns,
it is not possible for the subsequent page fault logic in
kvm_s2_fault_pin_pfn() to accidentally access the vanished VMA,
eliminating this bug class by design.
Signed-off-by: Fuad Tabba <tabba@google.com>
Signed-off-by: Marc Zyngier <maz@kernel.org>
---
arch/arm64/kvm/mmu.c | 16 ++++++++++++----
1 file changed, 12 insertions(+), 4 deletions(-)
diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
index 5079a58b65b14..1f2c2200ccd8d 100644
--- a/arch/arm64/kvm/mmu.c
+++ b/arch/arm64/kvm/mmu.c
@@ -1740,7 +1740,7 @@ struct kvm_s2_fault {
vm_flags_t vm_flags;
};
-static int kvm_s2_fault_pin_pfn(struct kvm_s2_fault *fault)
+static int kvm_s2_fault_get_vma_info(struct kvm_s2_fault *fault)
{
struct vm_area_struct *vma;
struct kvm *kvm = fault->vcpu->kvm;
@@ -1774,9 +1774,6 @@ static int kvm_s2_fault_pin_pfn(struct kvm_s2_fault *fault)
fault->is_vma_cacheable = kvm_vma_is_cacheable(vma);
- /* Don't use the VMA after the unlock -- it may have vanished */
- vma = NULL;
-
/*
* Read mmu_invalidate_seq so that KVM can detect if the results of
* vma_lookup() or __kvm_faultin_pfn() become stale prior to
@@ -1788,6 +1785,17 @@ static int kvm_s2_fault_pin_pfn(struct kvm_s2_fault *fault)
fault->mmu_seq = kvm->mmu_invalidate_seq;
mmap_read_unlock(current->mm);
+ return 0;
+}
+
+static int kvm_s2_fault_pin_pfn(struct kvm_s2_fault *fault)
+{
+ int ret;
+
+ ret = kvm_s2_fault_get_vma_info(fault);
+ if (ret)
+ return ret;
+
fault->pfn = __kvm_faultin_pfn(fault->memslot, fault->gfn,
fault->write_fault ? FOLL_WRITE : 0,
&fault->writable, &fault->page);
--
2.47.3
^ permalink raw reply related [flat|nested] 32+ messages in thread* [PATCH v2 05/30] KVM: arm64: Extract stage-2 permission logic in user_mem_abort()
2026-03-27 11:35 [PATCH v2 00/30] KVM: arm64: Combined user_mem_abort() rework Marc Zyngier
` (3 preceding siblings ...)
2026-03-27 11:35 ` [PATCH v2 04/30] KVM: arm64: Isolate mmap_read_lock inside new kvm_s2_fault_get_vma_info() helper Marc Zyngier
@ 2026-03-27 11:35 ` Marc Zyngier
2026-03-27 11:35 ` [PATCH v2 06/30] KVM: arm64: Extract page table mapping " Marc Zyngier
` (24 subsequent siblings)
29 siblings, 0 replies; 32+ messages in thread
From: Marc Zyngier @ 2026-03-27 11:35 UTC (permalink / raw)
To: kvmarm, linux-arm-kernel, kvm
Cc: Joey Gouly, Suzuki K Poulose, Oliver Upton, Zenghui Yu,
Fuad Tabba, Will Deacon, Quentin Perret
From: Fuad Tabba <tabba@google.com>
Extract the logic that computes the stage-2 protections and checks for
various permission faults (e.g., execution faults on non-cacheable
memory) into a new helper function, kvm_s2_fault_compute_prot(). This
helper also handles injecting atomic/exclusive faults back into the
guest when necessary.
This refactoring step separates the permission computation from the
mapping logic, making the main fault handler flow clearer.
Signed-off-by: Fuad Tabba <tabba@google.com>
Signed-off-by: Marc Zyngier <maz@kernel.org>
---
arch/arm64/kvm/mmu.c | 163 +++++++++++++++++++++++--------------------
1 file changed, 87 insertions(+), 76 deletions(-)
diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
index 1f2c2200ccd8d..d1ffdce18631a 100644
--- a/arch/arm64/kvm/mmu.c
+++ b/arch/arm64/kvm/mmu.c
@@ -1809,6 +1809,89 @@ static int kvm_s2_fault_pin_pfn(struct kvm_s2_fault *fault)
return 1;
}
+static int kvm_s2_fault_compute_prot(struct kvm_s2_fault *fault)
+{
+ struct kvm *kvm = fault->vcpu->kvm;
+
+ /*
+ * Check if this is non-struct page memory PFN, and cannot support
+ * CMOs. It could potentially be unsafe to access as cacheable.
+ */
+ if (fault->vm_flags & (VM_PFNMAP | VM_MIXEDMAP) && !pfn_is_map_memory(fault->pfn)) {
+ if (fault->is_vma_cacheable) {
+ /*
+ * Whilst the VMA owner expects cacheable mapping to this
+ * PFN, hardware also has to support the FWB and CACHE DIC
+ * features.
+ *
+ * ARM64 KVM relies on kernel VA mapping to the PFN to
+ * perform cache maintenance as the CMO instructions work on
+ * virtual addresses. VM_PFNMAP region are not necessarily
+ * mapped to a KVA and hence the presence of hardware features
+ * S2FWB and CACHE DIC are mandatory to avoid the need for
+ * cache maintenance.
+ */
+ if (!kvm_supports_cacheable_pfnmap())
+ return -EFAULT;
+ } else {
+ /*
+ * If the page was identified as device early by looking at
+ * the VMA flags, vma_pagesize is already representing the
+ * largest quantity we can map. If instead it was mapped
+ * via __kvm_faultin_pfn(), vma_pagesize is set to PAGE_SIZE
+ * and must not be upgraded.
+ *
+ * In both cases, we don't let transparent_hugepage_adjust()
+ * change things at the last minute.
+ */
+ fault->s2_force_noncacheable = true;
+ }
+ } else if (fault->logging_active && !fault->write_fault) {
+ /*
+ * Only actually map the page as writable if this was a write
+ * fault.
+ */
+ fault->writable = false;
+ }
+
+ if (fault->exec_fault && fault->s2_force_noncacheable)
+ return -ENOEXEC;
+
+ /*
+ * Guest performs atomic/exclusive operations on memory with unsupported
+ * attributes (e.g. ld64b/st64b on normal memory when no FEAT_LS64WB)
+ * and trigger the exception here. Since the memslot is valid, inject
+ * the fault back to the guest.
+ */
+ if (esr_fsc_is_excl_atomic_fault(kvm_vcpu_get_esr(fault->vcpu))) {
+ kvm_inject_dabt_excl_atomic(fault->vcpu, kvm_vcpu_get_hfar(fault->vcpu));
+ return 1;
+ }
+
+ if (fault->nested)
+ adjust_nested_fault_perms(fault->nested, &fault->prot, &fault->writable);
+
+ if (fault->writable)
+ fault->prot |= KVM_PGTABLE_PROT_W;
+
+ if (fault->exec_fault)
+ fault->prot |= KVM_PGTABLE_PROT_X;
+
+ if (fault->s2_force_noncacheable) {
+ if (fault->vfio_allow_any_uc)
+ fault->prot |= KVM_PGTABLE_PROT_NORMAL_NC;
+ else
+ fault->prot |= KVM_PGTABLE_PROT_DEVICE;
+ } else if (cpus_have_final_cap(ARM64_HAS_CACHE_DIC)) {
+ fault->prot |= KVM_PGTABLE_PROT_X;
+ }
+
+ if (fault->nested)
+ adjust_nested_exec_perms(kvm, fault->nested, &fault->prot);
+
+ return 0;
+}
+
static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
struct kvm_s2_trans *nested,
struct kvm_memory_slot *memslot, unsigned long hva,
@@ -1863,68 +1946,14 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
ret = 0;
- /*
- * Check if this is non-struct page memory PFN, and cannot support
- * CMOs. It could potentially be unsafe to access as cacheable.
- */
- if (fault->vm_flags & (VM_PFNMAP | VM_MIXEDMAP) && !pfn_is_map_memory(fault->pfn)) {
- if (fault->is_vma_cacheable) {
- /*
- * Whilst the VMA owner expects cacheable mapping to this
- * PFN, hardware also has to support the FWB and CACHE DIC
- * features.
- *
- * ARM64 KVM relies on kernel VA mapping to the PFN to
- * perform cache maintenance as the CMO instructions work on
- * virtual addresses. VM_PFNMAP region are not necessarily
- * mapped to a KVA and hence the presence of hardware features
- * S2FWB and CACHE DIC are mandatory to avoid the need for
- * cache maintenance.
- */
- if (!kvm_supports_cacheable_pfnmap())
- ret = -EFAULT;
- } else {
- /*
- * If the page was identified as device early by looking at
- * the VMA flags, fault->vma_pagesize is already representing the
- * largest quantity we can map. If instead it was mapped
- * via __kvm_faultin_pfn(), fault->vma_pagesize is set to PAGE_SIZE
- * and must not be upgraded.
- *
- * In both cases, we don't let transparent_hugepage_adjust()
- * change things at the last minute.
- */
- fault->s2_force_noncacheable = true;
- }
- } else if (fault->logging_active && !fault->write_fault) {
- /*
- * Only actually map the page as fault->writable if this was a write
- * fault.
- */
- fault->writable = false;
+ ret = kvm_s2_fault_compute_prot(fault);
+ if (ret == 1) {
+ ret = 1; /* fault injected */
+ goto out_put_page;
}
-
- if (fault->exec_fault && fault->s2_force_noncacheable)
- ret = -ENOEXEC;
-
if (ret)
goto out_put_page;
- /*
- * Guest performs atomic/exclusive operations on memory with unsupported
- * attributes (e.g. ld64b/st64b on normal memory when no FEAT_LS64WB)
- * and trigger the exception here. Since the fault->memslot is valid, inject
- * the fault back to the guest.
- */
- if (esr_fsc_is_excl_atomic_fault(kvm_vcpu_get_esr(fault->vcpu))) {
- kvm_inject_dabt_excl_atomic(fault->vcpu, kvm_vcpu_get_hfar(fault->vcpu));
- ret = 1;
- goto out_put_page;
- }
-
- if (fault->nested)
- adjust_nested_fault_perms(fault->nested, &fault->prot, &fault->writable);
-
kvm_fault_lock(kvm);
pgt = fault->vcpu->arch.hw_mmu->pgt;
if (mmu_invalidate_retry(kvm, fault->mmu_seq)) {
@@ -1961,24 +1990,6 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
}
}
- if (fault->writable)
- fault->prot |= KVM_PGTABLE_PROT_W;
-
- if (fault->exec_fault)
- fault->prot |= KVM_PGTABLE_PROT_X;
-
- if (fault->s2_force_noncacheable) {
- if (fault->vfio_allow_any_uc)
- fault->prot |= KVM_PGTABLE_PROT_NORMAL_NC;
- else
- fault->prot |= KVM_PGTABLE_PROT_DEVICE;
- } else if (cpus_have_final_cap(ARM64_HAS_CACHE_DIC)) {
- fault->prot |= KVM_PGTABLE_PROT_X;
- }
-
- if (fault->nested)
- adjust_nested_exec_perms(kvm, fault->nested, &fault->prot);
-
/*
* Under the premise of getting a FSC_PERM fault, we just need to relax
* permissions only if fault->vma_pagesize equals fault->fault_granule. Otherwise,
--
2.47.3
^ permalink raw reply related [flat|nested] 32+ messages in thread* [PATCH v2 06/30] KVM: arm64: Extract page table mapping in user_mem_abort()
2026-03-27 11:35 [PATCH v2 00/30] KVM: arm64: Combined user_mem_abort() rework Marc Zyngier
` (4 preceding siblings ...)
2026-03-27 11:35 ` [PATCH v2 05/30] KVM: arm64: Extract stage-2 permission logic in user_mem_abort() Marc Zyngier
@ 2026-03-27 11:35 ` Marc Zyngier
2026-03-27 11:35 ` [PATCH v2 07/30] KVM: arm64: Simplify nested VMA shift calculation Marc Zyngier
` (23 subsequent siblings)
29 siblings, 0 replies; 32+ messages in thread
From: Marc Zyngier @ 2026-03-27 11:35 UTC (permalink / raw)
To: kvmarm, linux-arm-kernel, kvm
Cc: Joey Gouly, Suzuki K Poulose, Oliver Upton, Zenghui Yu,
Fuad Tabba, Will Deacon, Quentin Perret
From: Fuad Tabba <tabba@google.com>
Extract the code responsible for locking the KVM MMU and mapping the PFN
into the stage-2 page tables into a new helper, kvm_s2_fault_map().
This helper manages the kvm_fault_lock, checks for MMU invalidation
retries, attempts to adjust for transparent huge pages (THP), handles
MTE sanitization if needed, and finally maps or relaxes permissions on
the stage-2 entries.
With this change, the main user_mem_abort() function is now a sequential
dispatcher that delegates to specialized helper functions.
Signed-off-by: Fuad Tabba <tabba@google.com>
Signed-off-by: Marc Zyngier <maz@kernel.org>
---
arch/arm64/kvm/mmu.c | 128 +++++++++++++++++++++++--------------------
1 file changed, 68 insertions(+), 60 deletions(-)
diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
index d1ffdce18631a..164f1160ea33d 100644
--- a/arch/arm64/kvm/mmu.c
+++ b/arch/arm64/kvm/mmu.c
@@ -1892,68 +1892,13 @@ static int kvm_s2_fault_compute_prot(struct kvm_s2_fault *fault)
return 0;
}
-static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
- struct kvm_s2_trans *nested,
- struct kvm_memory_slot *memslot, unsigned long hva,
- bool fault_is_perm)
+static int kvm_s2_fault_map(struct kvm_s2_fault *fault, void *memcache)
{
- int ret = 0;
- struct kvm_s2_fault fault_data = {
- .vcpu = vcpu,
- .fault_ipa = fault_ipa,
- .nested = nested,
- .memslot = memslot,
- .hva = hva,
- .fault_is_perm = fault_is_perm,
- .ipa = fault_ipa,
- .logging_active = memslot_is_logging(memslot),
- .force_pte = memslot_is_logging(memslot),
- .s2_force_noncacheable = false,
- .vfio_allow_any_uc = false,
- .prot = KVM_PGTABLE_PROT_R,
- };
- struct kvm_s2_fault *fault = &fault_data;
- struct kvm *kvm = vcpu->kvm;
- void *memcache;
+ struct kvm *kvm = fault->vcpu->kvm;
struct kvm_pgtable *pgt;
+ int ret;
enum kvm_pgtable_walk_flags flags = KVM_PGTABLE_WALK_SHARED;
- if (fault->fault_is_perm)
- fault->fault_granule = kvm_vcpu_trap_get_perm_fault_granule(fault->vcpu);
- fault->write_fault = kvm_is_write_fault(fault->vcpu);
- fault->exec_fault = kvm_vcpu_trap_is_exec_fault(fault->vcpu);
- VM_WARN_ON_ONCE(fault->write_fault && fault->exec_fault);
-
- /*
- * Permission faults just need to update the existing leaf entry,
- * and so normally don't require allocations from the memcache. The
- * only exception to this is when dirty logging is enabled at runtime
- * and a write fault needs to collapse a block entry into a table.
- */
- fault->topup_memcache = !fault->fault_is_perm ||
- (fault->logging_active && fault->write_fault);
- ret = prepare_mmu_memcache(fault->vcpu, fault->topup_memcache, &memcache);
- if (ret)
- return ret;
-
- /*
- * Let's check if we will get back a huge page backed by hugetlbfs, or
- * get block mapping for device MMIO region.
- */
- ret = kvm_s2_fault_pin_pfn(fault);
- if (ret != 1)
- return ret;
-
- ret = 0;
-
- ret = kvm_s2_fault_compute_prot(fault);
- if (ret == 1) {
- ret = 1; /* fault injected */
- goto out_put_page;
- }
- if (ret)
- goto out_put_page;
-
kvm_fault_lock(kvm);
pgt = fault->vcpu->arch.hw_mmu->pgt;
if (mmu_invalidate_retry(kvm, fault->mmu_seq)) {
@@ -2001,8 +1946,8 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
* PTE, which will be preserved.
*/
fault->prot &= ~KVM_NV_GUEST_MAP_SZ;
- ret = KVM_PGT_FN(kvm_pgtable_stage2_relax_perms)(pgt, fault->fault_ipa, fault->prot,
- flags);
+ ret = KVM_PGT_FN(kvm_pgtable_stage2_relax_perms)(pgt, fault->fault_ipa,
+ fault->prot, flags);
} else {
ret = KVM_PGT_FN(kvm_pgtable_stage2_map)(pgt, fault->fault_ipa, fault->vma_pagesize,
__pfn_to_phys(fault->pfn), fault->prot,
@@ -2018,6 +1963,69 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
mark_page_dirty_in_slot(kvm, fault->memslot, fault->gfn);
return ret != -EAGAIN ? ret : 0;
+}
+
+static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
+ struct kvm_s2_trans *nested,
+ struct kvm_memory_slot *memslot, unsigned long hva,
+ bool fault_is_perm)
+{
+ int ret = 0;
+ struct kvm_s2_fault fault_data = {
+ .vcpu = vcpu,
+ .fault_ipa = fault_ipa,
+ .nested = nested,
+ .memslot = memslot,
+ .hva = hva,
+ .fault_is_perm = fault_is_perm,
+ .ipa = fault_ipa,
+ .logging_active = memslot_is_logging(memslot),
+ .force_pte = memslot_is_logging(memslot),
+ .s2_force_noncacheable = false,
+ .vfio_allow_any_uc = false,
+ .prot = KVM_PGTABLE_PROT_R,
+ };
+ struct kvm_s2_fault *fault = &fault_data;
+ void *memcache;
+
+ if (fault->fault_is_perm)
+ fault->fault_granule = kvm_vcpu_trap_get_perm_fault_granule(fault->vcpu);
+ fault->write_fault = kvm_is_write_fault(fault->vcpu);
+ fault->exec_fault = kvm_vcpu_trap_is_exec_fault(fault->vcpu);
+ VM_WARN_ON_ONCE(fault->write_fault && fault->exec_fault);
+
+ /*
+ * Permission faults just need to update the existing leaf entry,
+ * and so normally don't require allocations from the memcache. The
+ * only exception to this is when dirty logging is enabled at runtime
+ * and a write fault needs to collapse a block entry into a table.
+ */
+ fault->topup_memcache = !fault->fault_is_perm ||
+ (fault->logging_active && fault->write_fault);
+ ret = prepare_mmu_memcache(fault->vcpu, fault->topup_memcache, &memcache);
+ if (ret)
+ return ret;
+
+ /*
+ * Let's check if we will get back a huge page backed by hugetlbfs, or
+ * get block mapping for device MMIO region.
+ */
+ ret = kvm_s2_fault_pin_pfn(fault);
+ if (ret != 1)
+ return ret;
+
+ ret = 0;
+
+ ret = kvm_s2_fault_compute_prot(fault);
+ if (ret == 1) {
+ ret = 1; /* fault injected */
+ goto out_put_page;
+ }
+ if (ret)
+ goto out_put_page;
+
+ ret = kvm_s2_fault_map(fault, memcache);
+ return ret;
out_put_page:
kvm_release_page_unused(fault->page);
--
2.47.3
^ permalink raw reply related [flat|nested] 32+ messages in thread* [PATCH v2 07/30] KVM: arm64: Simplify nested VMA shift calculation
2026-03-27 11:35 [PATCH v2 00/30] KVM: arm64: Combined user_mem_abort() rework Marc Zyngier
` (5 preceding siblings ...)
2026-03-27 11:35 ` [PATCH v2 06/30] KVM: arm64: Extract page table mapping " Marc Zyngier
@ 2026-03-27 11:35 ` Marc Zyngier
2026-03-27 11:35 ` [PATCH v2 08/30] KVM: arm64: Remove redundant state variables from struct kvm_s2_fault Marc Zyngier
` (22 subsequent siblings)
29 siblings, 0 replies; 32+ messages in thread
From: Marc Zyngier @ 2026-03-27 11:35 UTC (permalink / raw)
To: kvmarm, linux-arm-kernel, kvm
Cc: Joey Gouly, Suzuki K Poulose, Oliver Upton, Zenghui Yu,
Fuad Tabba, Will Deacon, Quentin Perret
From: Fuad Tabba <tabba@google.com>
In the kvm_s2_resolve_vma_size() helper, the local variable vma_pagesize
is calculated from vma_shift, only to be used to bound the vma_pagesize
by max_map_size and subsequently convert it back to a shift via __ffs().
Because vma_pagesize and max_map_size are both powers of two, we can
simplify the logic by omitting vma_pagesize entirely and bounding the
vma_shift directly using the shift of max_map_size. This achieves the
same result while keeping the size-to-shift conversion out of the helper
logic.
Signed-off-by: Fuad Tabba <tabba@google.com>
Signed-off-by: Marc Zyngier <maz@kernel.org>
---
arch/arm64/kvm/mmu.c | 6 +-----
1 file changed, 1 insertion(+), 5 deletions(-)
diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
index 164f1160ea33d..5572b127f8663 100644
--- a/arch/arm64/kvm/mmu.c
+++ b/arch/arm64/kvm/mmu.c
@@ -1646,7 +1646,6 @@ static short kvm_s2_resolve_vma_size(struct vm_area_struct *vma,
bool *force_pte, phys_addr_t *ipa)
{
short vma_shift;
- long vma_pagesize;
if (*force_pte)
vma_shift = PAGE_SHIFT;
@@ -1677,8 +1676,6 @@ static short kvm_s2_resolve_vma_size(struct vm_area_struct *vma,
WARN_ONCE(1, "Unknown vma_shift %d", vma_shift);
}
- vma_pagesize = 1UL << vma_shift;
-
if (nested) {
unsigned long max_map_size;
@@ -1703,8 +1700,7 @@ static short kvm_s2_resolve_vma_size(struct vm_area_struct *vma,
max_map_size = PAGE_SIZE;
*force_pte = (max_map_size == PAGE_SIZE);
- vma_pagesize = min_t(long, vma_pagesize, max_map_size);
- vma_shift = __ffs(vma_pagesize);
+ vma_shift = min_t(short, vma_shift, __ffs(max_map_size));
}
return vma_shift;
--
2.47.3
^ permalink raw reply related [flat|nested] 32+ messages in thread* [PATCH v2 08/30] KVM: arm64: Remove redundant state variables from struct kvm_s2_fault
2026-03-27 11:35 [PATCH v2 00/30] KVM: arm64: Combined user_mem_abort() rework Marc Zyngier
` (6 preceding siblings ...)
2026-03-27 11:35 ` [PATCH v2 07/30] KVM: arm64: Simplify nested VMA shift calculation Marc Zyngier
@ 2026-03-27 11:35 ` Marc Zyngier
2026-03-27 11:35 ` [PATCH v2 09/30] KVM: arm64: Simplify return logic in user_mem_abort() Marc Zyngier
` (21 subsequent siblings)
29 siblings, 0 replies; 32+ messages in thread
From: Marc Zyngier @ 2026-03-27 11:35 UTC (permalink / raw)
To: kvmarm, linux-arm-kernel, kvm
Cc: Joey Gouly, Suzuki K Poulose, Oliver Upton, Zenghui Yu,
Fuad Tabba, Will Deacon, Quentin Perret
From: Fuad Tabba <tabba@google.com>
Remove redundant variables vma_shift and vfio_allow_any_uc from struct
kvm_s2_fault as they are easily derived or checked when needed.
Signed-off-by: Fuad Tabba <tabba@google.com>
Signed-off-by: Marc Zyngier <maz@kernel.org>
---
arch/arm64/kvm/mmu.c | 15 +++++----------
1 file changed, 5 insertions(+), 10 deletions(-)
diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
index 5572b127f8663..1b991300735be 100644
--- a/arch/arm64/kvm/mmu.c
+++ b/arch/arm64/kvm/mmu.c
@@ -1721,10 +1721,8 @@ struct kvm_s2_fault {
bool mte_allowed;
bool is_vma_cacheable;
bool s2_force_noncacheable;
- bool vfio_allow_any_uc;
unsigned long mmu_seq;
phys_addr_t ipa;
- short vma_shift;
gfn_t gfn;
kvm_pfn_t pfn;
bool logging_active;
@@ -1749,9 +1747,9 @@ static int kvm_s2_fault_get_vma_info(struct kvm_s2_fault *fault)
return -EFAULT;
}
- fault->vma_shift = kvm_s2_resolve_vma_size(vma, fault->hva, fault->memslot, fault->nested,
- &fault->force_pte, &fault->ipa);
- fault->vma_pagesize = 1UL << fault->vma_shift;
+ fault->vma_pagesize = 1UL << kvm_s2_resolve_vma_size(vma, fault->hva, fault->memslot,
+ fault->nested, &fault->force_pte,
+ &fault->ipa);
/*
* Both the canonical IPA and fault IPA must be aligned to the
@@ -1764,8 +1762,6 @@ static int kvm_s2_fault_get_vma_info(struct kvm_s2_fault *fault)
fault->gfn = fault->ipa >> PAGE_SHIFT;
fault->mte_allowed = kvm_vma_mte_allowed(vma);
- fault->vfio_allow_any_uc = vma->vm_flags & VM_ALLOW_ANY_UNCACHED;
-
fault->vm_flags = vma->vm_flags;
fault->is_vma_cacheable = kvm_vma_is_cacheable(vma);
@@ -1796,7 +1792,7 @@ static int kvm_s2_fault_pin_pfn(struct kvm_s2_fault *fault)
fault->write_fault ? FOLL_WRITE : 0,
&fault->writable, &fault->page);
if (fault->pfn == KVM_PFN_ERR_HWPOISON) {
- kvm_send_hwpoison_signal(fault->hva, fault->vma_shift);
+ kvm_send_hwpoison_signal(fault->hva, __ffs(fault->vma_pagesize));
return 0;
}
if (is_error_noslot_pfn(fault->pfn))
@@ -1874,7 +1870,7 @@ static int kvm_s2_fault_compute_prot(struct kvm_s2_fault *fault)
fault->prot |= KVM_PGTABLE_PROT_X;
if (fault->s2_force_noncacheable) {
- if (fault->vfio_allow_any_uc)
+ if (fault->vm_flags & VM_ALLOW_ANY_UNCACHED)
fault->prot |= KVM_PGTABLE_PROT_NORMAL_NC;
else
fault->prot |= KVM_PGTABLE_PROT_DEVICE;
@@ -1978,7 +1974,6 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
.logging_active = memslot_is_logging(memslot),
.force_pte = memslot_is_logging(memslot),
.s2_force_noncacheable = false,
- .vfio_allow_any_uc = false,
.prot = KVM_PGTABLE_PROT_R,
};
struct kvm_s2_fault *fault = &fault_data;
--
2.47.3
^ permalink raw reply related [flat|nested] 32+ messages in thread* [PATCH v2 09/30] KVM: arm64: Simplify return logic in user_mem_abort()
2026-03-27 11:35 [PATCH v2 00/30] KVM: arm64: Combined user_mem_abort() rework Marc Zyngier
` (7 preceding siblings ...)
2026-03-27 11:35 ` [PATCH v2 08/30] KVM: arm64: Remove redundant state variables from struct kvm_s2_fault Marc Zyngier
@ 2026-03-27 11:35 ` Marc Zyngier
2026-03-27 11:35 ` [PATCH v2 10/30] KVM: arm64: Initialize struct kvm_s2_fault completely at declaration Marc Zyngier
` (20 subsequent siblings)
29 siblings, 0 replies; 32+ messages in thread
From: Marc Zyngier @ 2026-03-27 11:35 UTC (permalink / raw)
To: kvmarm, linux-arm-kernel, kvm
Cc: Joey Gouly, Suzuki K Poulose, Oliver Upton, Zenghui Yu,
Fuad Tabba, Will Deacon, Quentin Perret
From: Fuad Tabba <tabba@google.com>
With the refactoring done, the final return block of user_mem_abort()
can be tidied up a bit more.
Clean up the trailing edge by dropping the unnecessary assignment,
collapsing the return evaluation for kvm_s2_fault_compute_prot(), and
tail calling kvm_s2_fault_map() directly.
Signed-off-by: Fuad Tabba <tabba@google.com>
Signed-off-by: Marc Zyngier <maz@kernel.org>
---
arch/arm64/kvm/mmu.c | 17 ++++-------------
1 file changed, 4 insertions(+), 13 deletions(-)
diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
index 1b991300735be..e77b0b60697f6 100644
--- a/arch/arm64/kvm/mmu.c
+++ b/arch/arm64/kvm/mmu.c
@@ -2005,22 +2005,13 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
if (ret != 1)
return ret;
- ret = 0;
-
ret = kvm_s2_fault_compute_prot(fault);
- if (ret == 1) {
- ret = 1; /* fault injected */
- goto out_put_page;
+ if (ret) {
+ kvm_release_page_unused(fault->page);
+ return ret;
}
- if (ret)
- goto out_put_page;
- ret = kvm_s2_fault_map(fault, memcache);
- return ret;
-
-out_put_page:
- kvm_release_page_unused(fault->page);
- return ret;
+ return kvm_s2_fault_map(fault, memcache);
}
/* Resolve the access fault by making the page young again. */
--
2.47.3
^ permalink raw reply related [flat|nested] 32+ messages in thread* [PATCH v2 10/30] KVM: arm64: Initialize struct kvm_s2_fault completely at declaration
2026-03-27 11:35 [PATCH v2 00/30] KVM: arm64: Combined user_mem_abort() rework Marc Zyngier
` (8 preceding siblings ...)
2026-03-27 11:35 ` [PATCH v2 09/30] KVM: arm64: Simplify return logic in user_mem_abort() Marc Zyngier
@ 2026-03-27 11:35 ` Marc Zyngier
2026-03-27 11:35 ` [PATCH v2 11/30] KVM: arm64: Optimize early exit checks in kvm_s2_fault_pin_pfn() Marc Zyngier
` (19 subsequent siblings)
29 siblings, 0 replies; 32+ messages in thread
From: Marc Zyngier @ 2026-03-27 11:35 UTC (permalink / raw)
To: kvmarm, linux-arm-kernel, kvm
Cc: Joey Gouly, Suzuki K Poulose, Oliver Upton, Zenghui Yu,
Fuad Tabba, Will Deacon, Quentin Perret
From: Fuad Tabba <tabba@google.com>
Simplify the initialization of struct kvm_s2_fault in user_mem_abort().
Instead of partially initializing the struct via designated initializers
and then sequentially assigning the remaining fields (like write_fault
and topup_memcache) further down the function, evaluate those
dependencies upfront.
This allows the entire struct to be fully initialized at declaration. It
also eliminates the need for the intermediate fault_data variable and
its associated fault pointer, reducing boilerplate.
Signed-off-by: Fuad Tabba <tabba@google.com>
Signed-off-by: Marc Zyngier <maz@kernel.org>
---
arch/arm64/kvm/mmu.c | 34 ++++++++++++++++------------------
1 file changed, 16 insertions(+), 18 deletions(-)
diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
index e77b0b60697f6..2b85daaa4426b 100644
--- a/arch/arm64/kvm/mmu.c
+++ b/arch/arm64/kvm/mmu.c
@@ -1962,8 +1962,9 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
struct kvm_memory_slot *memslot, unsigned long hva,
bool fault_is_perm)
{
- int ret = 0;
- struct kvm_s2_fault fault_data = {
+ bool write_fault = kvm_is_write_fault(vcpu);
+ bool logging_active = memslot_is_logging(memslot);
+ struct kvm_s2_fault fault = {
.vcpu = vcpu,
.fault_ipa = fault_ipa,
.nested = nested,
@@ -1971,19 +1972,18 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
.hva = hva,
.fault_is_perm = fault_is_perm,
.ipa = fault_ipa,
- .logging_active = memslot_is_logging(memslot),
- .force_pte = memslot_is_logging(memslot),
- .s2_force_noncacheable = false,
+ .logging_active = logging_active,
+ .force_pte = logging_active,
.prot = KVM_PGTABLE_PROT_R,
+ .fault_granule = fault_is_perm ? kvm_vcpu_trap_get_perm_fault_granule(vcpu) : 0,
+ .write_fault = write_fault,
+ .exec_fault = kvm_vcpu_trap_is_exec_fault(vcpu),
+ .topup_memcache = !fault_is_perm || (logging_active && write_fault),
};
- struct kvm_s2_fault *fault = &fault_data;
void *memcache;
+ int ret;
- if (fault->fault_is_perm)
- fault->fault_granule = kvm_vcpu_trap_get_perm_fault_granule(fault->vcpu);
- fault->write_fault = kvm_is_write_fault(fault->vcpu);
- fault->exec_fault = kvm_vcpu_trap_is_exec_fault(fault->vcpu);
- VM_WARN_ON_ONCE(fault->write_fault && fault->exec_fault);
+ VM_WARN_ON_ONCE(fault.write_fault && fault.exec_fault);
/*
* Permission faults just need to update the existing leaf entry,
@@ -1991,9 +1991,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
* only exception to this is when dirty logging is enabled at runtime
* and a write fault needs to collapse a block entry into a table.
*/
- fault->topup_memcache = !fault->fault_is_perm ||
- (fault->logging_active && fault->write_fault);
- ret = prepare_mmu_memcache(fault->vcpu, fault->topup_memcache, &memcache);
+ ret = prepare_mmu_memcache(vcpu, fault.topup_memcache, &memcache);
if (ret)
return ret;
@@ -2001,17 +1999,17 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
* Let's check if we will get back a huge page backed by hugetlbfs, or
* get block mapping for device MMIO region.
*/
- ret = kvm_s2_fault_pin_pfn(fault);
+ ret = kvm_s2_fault_pin_pfn(&fault);
if (ret != 1)
return ret;
- ret = kvm_s2_fault_compute_prot(fault);
+ ret = kvm_s2_fault_compute_prot(&fault);
if (ret) {
- kvm_release_page_unused(fault->page);
+ kvm_release_page_unused(fault.page);
return ret;
}
- return kvm_s2_fault_map(fault, memcache);
+ return kvm_s2_fault_map(&fault, memcache);
}
/* Resolve the access fault by making the page young again. */
--
2.47.3
^ permalink raw reply related [flat|nested] 32+ messages in thread* [PATCH v2 11/30] KVM: arm64: Optimize early exit checks in kvm_s2_fault_pin_pfn()
2026-03-27 11:35 [PATCH v2 00/30] KVM: arm64: Combined user_mem_abort() rework Marc Zyngier
` (9 preceding siblings ...)
2026-03-27 11:35 ` [PATCH v2 10/30] KVM: arm64: Initialize struct kvm_s2_fault completely at declaration Marc Zyngier
@ 2026-03-27 11:35 ` Marc Zyngier
2026-03-27 11:36 ` [PATCH v2 12/30] KVM: arm64: Hoist MTE validation check out of MMU lock path Marc Zyngier
` (18 subsequent siblings)
29 siblings, 0 replies; 32+ messages in thread
From: Marc Zyngier @ 2026-03-27 11:35 UTC (permalink / raw)
To: kvmarm, linux-arm-kernel, kvm
Cc: Joey Gouly, Suzuki K Poulose, Oliver Upton, Zenghui Yu,
Fuad Tabba, Will Deacon, Quentin Perret
From: Fuad Tabba <tabba@google.com>
Optimize the early exit checks in kvm_s2_fault_pin_pfn by grouping all
error responses under the generic is_error_noslot_pfn check first,
avoiding unnecessary branches in the hot path.
Reviewed-by: Joey Gouly <joey.gouly@arm.com>
Signed-off-by: Fuad Tabba <tabba@google.com>
Signed-off-by: Marc Zyngier <maz@kernel.org>
---
arch/arm64/kvm/mmu.c | 11 ++++++-----
1 file changed, 6 insertions(+), 5 deletions(-)
diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
index 2b85daaa4426b..0c71e3a9af8b0 100644
--- a/arch/arm64/kvm/mmu.c
+++ b/arch/arm64/kvm/mmu.c
@@ -1791,12 +1791,13 @@ static int kvm_s2_fault_pin_pfn(struct kvm_s2_fault *fault)
fault->pfn = __kvm_faultin_pfn(fault->memslot, fault->gfn,
fault->write_fault ? FOLL_WRITE : 0,
&fault->writable, &fault->page);
- if (fault->pfn == KVM_PFN_ERR_HWPOISON) {
- kvm_send_hwpoison_signal(fault->hva, __ffs(fault->vma_pagesize));
- return 0;
- }
- if (is_error_noslot_pfn(fault->pfn))
+ if (unlikely(is_error_noslot_pfn(fault->pfn))) {
+ if (fault->pfn == KVM_PFN_ERR_HWPOISON) {
+ kvm_send_hwpoison_signal(fault->hva, __ffs(fault->vma_pagesize));
+ return 0;
+ }
return -EFAULT;
+ }
return 1;
}
--
2.47.3
^ permalink raw reply related [flat|nested] 32+ messages in thread* [PATCH v2 12/30] KVM: arm64: Hoist MTE validation check out of MMU lock path
2026-03-27 11:35 [PATCH v2 00/30] KVM: arm64: Combined user_mem_abort() rework Marc Zyngier
` (10 preceding siblings ...)
2026-03-27 11:35 ` [PATCH v2 11/30] KVM: arm64: Optimize early exit checks in kvm_s2_fault_pin_pfn() Marc Zyngier
@ 2026-03-27 11:36 ` Marc Zyngier
2026-03-27 11:36 ` [PATCH v2 13/30] KVM: arm64: Clean up control flow in kvm_s2_fault_map() Marc Zyngier
` (17 subsequent siblings)
29 siblings, 0 replies; 32+ messages in thread
From: Marc Zyngier @ 2026-03-27 11:36 UTC (permalink / raw)
To: kvmarm, linux-arm-kernel, kvm
Cc: Joey Gouly, Suzuki K Poulose, Oliver Upton, Zenghui Yu,
Fuad Tabba, Will Deacon, Quentin Perret
From: Fuad Tabba <tabba@google.com>
Simplify the non-cacheable attributes assignment by using a ternary
operator. Additionally, hoist the MTE validation check (mte_allowed) out
of kvm_s2_fault_map() and into kvm_s2_fault_compute_prot(). This allows
us to fail faster and avoid acquiring the KVM MMU lock unnecessarily
when the VMM introduces a disallowed VMA for an MTE-enabled guest.
Signed-off-by: Fuad Tabba <tabba@google.com>
Signed-off-by: Marc Zyngier <maz@kernel.org>
---
arch/arm64/kvm/mmu.c | 28 ++++++++++++----------------
1 file changed, 12 insertions(+), 16 deletions(-)
diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
index 0c71e3a9af8b0..ee2a548999b1b 100644
--- a/arch/arm64/kvm/mmu.c
+++ b/arch/arm64/kvm/mmu.c
@@ -1870,18 +1870,21 @@ static int kvm_s2_fault_compute_prot(struct kvm_s2_fault *fault)
if (fault->exec_fault)
fault->prot |= KVM_PGTABLE_PROT_X;
- if (fault->s2_force_noncacheable) {
- if (fault->vm_flags & VM_ALLOW_ANY_UNCACHED)
- fault->prot |= KVM_PGTABLE_PROT_NORMAL_NC;
- else
- fault->prot |= KVM_PGTABLE_PROT_DEVICE;
- } else if (cpus_have_final_cap(ARM64_HAS_CACHE_DIC)) {
+ if (fault->s2_force_noncacheable)
+ fault->prot |= (fault->vm_flags & VM_ALLOW_ANY_UNCACHED) ?
+ KVM_PGTABLE_PROT_NORMAL_NC : KVM_PGTABLE_PROT_DEVICE;
+ else if (cpus_have_final_cap(ARM64_HAS_CACHE_DIC))
fault->prot |= KVM_PGTABLE_PROT_X;
- }
if (fault->nested)
adjust_nested_exec_perms(kvm, fault->nested, &fault->prot);
+ if (!fault->fault_is_perm && !fault->s2_force_noncacheable && kvm_has_mte(kvm)) {
+ /* Check the VMM hasn't introduced a new disallowed VMA */
+ if (!fault->mte_allowed)
+ return -EFAULT;
+ }
+
return 0;
}
@@ -1918,15 +1921,8 @@ static int kvm_s2_fault_map(struct kvm_s2_fault *fault, void *memcache)
}
}
- if (!fault->fault_is_perm && !fault->s2_force_noncacheable && kvm_has_mte(kvm)) {
- /* Check the VMM hasn't introduced a new disallowed VMA */
- if (fault->mte_allowed) {
- sanitise_mte_tags(kvm, fault->pfn, fault->vma_pagesize);
- } else {
- ret = -EFAULT;
- goto out_unlock;
- }
- }
+ if (!fault->fault_is_perm && !fault->s2_force_noncacheable && kvm_has_mte(kvm))
+ sanitise_mte_tags(kvm, fault->pfn, fault->vma_pagesize);
/*
* Under the premise of getting a FSC_PERM fault, we just need to relax
--
2.47.3
^ permalink raw reply related [flat|nested] 32+ messages in thread* [PATCH v2 13/30] KVM: arm64: Clean up control flow in kvm_s2_fault_map()
2026-03-27 11:35 [PATCH v2 00/30] KVM: arm64: Combined user_mem_abort() rework Marc Zyngier
` (11 preceding siblings ...)
2026-03-27 11:36 ` [PATCH v2 12/30] KVM: arm64: Hoist MTE validation check out of MMU lock path Marc Zyngier
@ 2026-03-27 11:36 ` Marc Zyngier
2026-03-27 11:36 ` [PATCH v2 14/30] KVM: arm64: Kill fault->ipa Marc Zyngier
` (16 subsequent siblings)
29 siblings, 0 replies; 32+ messages in thread
From: Marc Zyngier @ 2026-03-27 11:36 UTC (permalink / raw)
To: kvmarm, linux-arm-kernel, kvm
Cc: Joey Gouly, Suzuki K Poulose, Oliver Upton, Zenghui Yu,
Fuad Tabba, Will Deacon, Quentin Perret
From: Fuad Tabba <tabba@google.com>
Clean up the KVM MMU lock retry loop by pre-assigning the error code.
Add clear braces to the THP adjustment integration for readability, and
safely unnest the transparent hugepage logic branches.
Signed-off-by: Fuad Tabba <tabba@google.com>
Signed-off-by: Marc Zyngier <maz@kernel.org>
---
arch/arm64/kvm/mmu.c | 20 +++++++++++---------
1 file changed, 11 insertions(+), 9 deletions(-)
diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
index ee2a548999b1b..c6cd6ce5254be 100644
--- a/arch/arm64/kvm/mmu.c
+++ b/arch/arm64/kvm/mmu.c
@@ -1897,10 +1897,9 @@ static int kvm_s2_fault_map(struct kvm_s2_fault *fault, void *memcache)
kvm_fault_lock(kvm);
pgt = fault->vcpu->arch.hw_mmu->pgt;
- if (mmu_invalidate_retry(kvm, fault->mmu_seq)) {
- ret = -EAGAIN;
+ ret = -EAGAIN;
+ if (mmu_invalidate_retry(kvm, fault->mmu_seq))
goto out_unlock;
- }
/*
* If we are not forced to use page mapping, check if we are
@@ -1908,16 +1907,17 @@ static int kvm_s2_fault_map(struct kvm_s2_fault *fault, void *memcache)
*/
if (fault->vma_pagesize == PAGE_SIZE &&
!(fault->force_pte || fault->s2_force_noncacheable)) {
- if (fault->fault_is_perm && fault->fault_granule > PAGE_SIZE)
+ if (fault->fault_is_perm && fault->fault_granule > PAGE_SIZE) {
fault->vma_pagesize = fault->fault_granule;
- else
+ } else {
fault->vma_pagesize = transparent_hugepage_adjust(kvm, fault->memslot,
fault->hva, &fault->pfn,
&fault->fault_ipa);
- if (fault->vma_pagesize < 0) {
- ret = fault->vma_pagesize;
- goto out_unlock;
+ if (fault->vma_pagesize < 0) {
+ ret = fault->vma_pagesize;
+ goto out_unlock;
+ }
}
}
@@ -1951,7 +1951,9 @@ static int kvm_s2_fault_map(struct kvm_s2_fault *fault, void *memcache)
if (fault->writable && !ret)
mark_page_dirty_in_slot(kvm, fault->memslot, fault->gfn);
- return ret != -EAGAIN ? ret : 0;
+ if (ret != -EAGAIN)
+ return ret;
+ return 0;
}
static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
--
2.47.3
^ permalink raw reply related [flat|nested] 32+ messages in thread* [PATCH v2 14/30] KVM: arm64: Kill fault->ipa
2026-03-27 11:35 [PATCH v2 00/30] KVM: arm64: Combined user_mem_abort() rework Marc Zyngier
` (12 preceding siblings ...)
2026-03-27 11:36 ` [PATCH v2 13/30] KVM: arm64: Clean up control flow in kvm_s2_fault_map() Marc Zyngier
@ 2026-03-27 11:36 ` Marc Zyngier
2026-03-27 11:36 ` [PATCH v2 15/30] KVM: arm64: Make fault_ipa immutable Marc Zyngier
` (15 subsequent siblings)
29 siblings, 0 replies; 32+ messages in thread
From: Marc Zyngier @ 2026-03-27 11:36 UTC (permalink / raw)
To: kvmarm, linux-arm-kernel, kvm
Cc: Joey Gouly, Suzuki K Poulose, Oliver Upton, Zenghui Yu,
Fuad Tabba, Will Deacon, Quentin Perret
fault->ipa, in a nested contest, represents the output of the guest's
S2 translation for the fault->fault_ipa input, and is equal to
fault->fault_ipa otherwise,
Given that this is readily available from kvm_s2_trans_output(),
drop fault->ipa and directly compute fault->gfn instead, which
is really what we want.
Tested-by: Fuad Tabba <tabba@google.com>
Reviewed-by: Fuad Tabba <tabba@google.com>
Reviewed-by: Suzuki K Poulose <suzuki.poulose@arm.com>
Signed-off-by: Marc Zyngier <maz@kernel.org>
---
arch/arm64/kvm/mmu.c | 14 +++++---------
1 file changed, 5 insertions(+), 9 deletions(-)
diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
index c6cd6ce5254be..67e5e867e01dc 100644
--- a/arch/arm64/kvm/mmu.c
+++ b/arch/arm64/kvm/mmu.c
@@ -1643,7 +1643,7 @@ static short kvm_s2_resolve_vma_size(struct vm_area_struct *vma,
unsigned long hva,
struct kvm_memory_slot *memslot,
struct kvm_s2_trans *nested,
- bool *force_pte, phys_addr_t *ipa)
+ bool *force_pte)
{
short vma_shift;
@@ -1681,8 +1681,6 @@ static short kvm_s2_resolve_vma_size(struct vm_area_struct *vma,
max_map_size = *force_pte ? PAGE_SIZE : PUD_SIZE;
- *ipa = kvm_s2_trans_output(nested);
-
/*
* If we're about to create a shadow stage 2 entry, then we
* can only create a block mapping if the guest stage 2 page
@@ -1722,7 +1720,6 @@ struct kvm_s2_fault {
bool is_vma_cacheable;
bool s2_force_noncacheable;
unsigned long mmu_seq;
- phys_addr_t ipa;
gfn_t gfn;
kvm_pfn_t pfn;
bool logging_active;
@@ -1738,6 +1735,7 @@ static int kvm_s2_fault_get_vma_info(struct kvm_s2_fault *fault)
{
struct vm_area_struct *vma;
struct kvm *kvm = fault->vcpu->kvm;
+ phys_addr_t ipa;
mmap_read_lock(current->mm);
vma = vma_lookup(current->mm, fault->hva);
@@ -1748,8 +1746,7 @@ static int kvm_s2_fault_get_vma_info(struct kvm_s2_fault *fault)
}
fault->vma_pagesize = 1UL << kvm_s2_resolve_vma_size(vma, fault->hva, fault->memslot,
- fault->nested, &fault->force_pte,
- &fault->ipa);
+ fault->nested, &fault->force_pte);
/*
* Both the canonical IPA and fault IPA must be aligned to the
@@ -1757,9 +1754,9 @@ static int kvm_s2_fault_get_vma_info(struct kvm_s2_fault *fault)
* mapping in the right place.
*/
fault->fault_ipa = ALIGN_DOWN(fault->fault_ipa, fault->vma_pagesize);
- fault->ipa = ALIGN_DOWN(fault->ipa, fault->vma_pagesize);
+ ipa = fault->nested ? kvm_s2_trans_output(fault->nested) : fault->fault_ipa;
+ fault->gfn = ALIGN_DOWN(ipa, fault->vma_pagesize) >> PAGE_SHIFT;
- fault->gfn = fault->ipa >> PAGE_SHIFT;
fault->mte_allowed = kvm_vma_mte_allowed(vma);
fault->vm_flags = vma->vm_flags;
@@ -1970,7 +1967,6 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
.memslot = memslot,
.hva = hva,
.fault_is_perm = fault_is_perm,
- .ipa = fault_ipa,
.logging_active = logging_active,
.force_pte = logging_active,
.prot = KVM_PGTABLE_PROT_R,
--
2.47.3
^ permalink raw reply related [flat|nested] 32+ messages in thread* [PATCH v2 15/30] KVM: arm64: Make fault_ipa immutable
2026-03-27 11:35 [PATCH v2 00/30] KVM: arm64: Combined user_mem_abort() rework Marc Zyngier
` (13 preceding siblings ...)
2026-03-27 11:36 ` [PATCH v2 14/30] KVM: arm64: Kill fault->ipa Marc Zyngier
@ 2026-03-27 11:36 ` Marc Zyngier
2026-03-27 11:36 ` [PATCH v2 16/30] KVM: arm64: Move fault context to const structure Marc Zyngier
` (14 subsequent siblings)
29 siblings, 0 replies; 32+ messages in thread
From: Marc Zyngier @ 2026-03-27 11:36 UTC (permalink / raw)
To: kvmarm, linux-arm-kernel, kvm
Cc: Joey Gouly, Suzuki K Poulose, Oliver Upton, Zenghui Yu,
Fuad Tabba, Will Deacon, Quentin Perret
Updating fault_ipa is conceptually annoying, as it changes something
that is a property of the fault itself.
Stop doing so and instead use fault->gfn as the sole piece of state
that can be used to represent the faulting IPA.
At the same time, introduce get_canonical_gfn() for the couple of case
we're we are concerned with the memslot-related IPA and not the faulting
one.
Tested-by: Fuad Tabba <tabba@google.com>
Reviewed-by: Fuad Tabba <tabba@google.com>
Reviewed-by: Suzuki K Poulose <suzuki.poulose@arm.com>
Signed-off-by: Marc Zyngier <maz@kernel.org>
---
arch/arm64/kvm/mmu.c | 38 ++++++++++++++++++++++++++------------
1 file changed, 26 insertions(+), 12 deletions(-)
diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
index 67e5e867e01dc..496bf5903ed3d 100644
--- a/arch/arm64/kvm/mmu.c
+++ b/arch/arm64/kvm/mmu.c
@@ -1400,10 +1400,10 @@ static bool fault_supports_stage2_huge_mapping(struct kvm_memory_slot *memslot,
*/
static long
transparent_hugepage_adjust(struct kvm *kvm, struct kvm_memory_slot *memslot,
- unsigned long hva, kvm_pfn_t *pfnp,
- phys_addr_t *ipap)
+ unsigned long hva, kvm_pfn_t *pfnp, gfn_t *gfnp)
{
kvm_pfn_t pfn = *pfnp;
+ gfn_t gfn = *gfnp;
/*
* Make sure the adjustment is done only for THP pages. Also make
@@ -1419,7 +1419,8 @@ transparent_hugepage_adjust(struct kvm *kvm, struct kvm_memory_slot *memslot,
if (sz < PMD_SIZE)
return PAGE_SIZE;
- *ipap &= PMD_MASK;
+ gfn &= ~(PTRS_PER_PMD - 1);
+ *gfnp = gfn;
pfn &= ~(PTRS_PER_PMD - 1);
*pfnp = pfn;
@@ -1735,7 +1736,6 @@ static int kvm_s2_fault_get_vma_info(struct kvm_s2_fault *fault)
{
struct vm_area_struct *vma;
struct kvm *kvm = fault->vcpu->kvm;
- phys_addr_t ipa;
mmap_read_lock(current->mm);
vma = vma_lookup(current->mm, fault->hva);
@@ -1753,9 +1753,7 @@ static int kvm_s2_fault_get_vma_info(struct kvm_s2_fault *fault)
* mapping size to ensure we find the right PFN and lay down the
* mapping in the right place.
*/
- fault->fault_ipa = ALIGN_DOWN(fault->fault_ipa, fault->vma_pagesize);
- ipa = fault->nested ? kvm_s2_trans_output(fault->nested) : fault->fault_ipa;
- fault->gfn = ALIGN_DOWN(ipa, fault->vma_pagesize) >> PAGE_SHIFT;
+ fault->gfn = ALIGN_DOWN(fault->fault_ipa, fault->vma_pagesize) >> PAGE_SHIFT;
fault->mte_allowed = kvm_vma_mte_allowed(vma);
@@ -1777,6 +1775,17 @@ static int kvm_s2_fault_get_vma_info(struct kvm_s2_fault *fault)
return 0;
}
+static gfn_t get_canonical_gfn(struct kvm_s2_fault *fault)
+{
+ phys_addr_t ipa;
+
+ if (!fault->nested)
+ return fault->gfn;
+
+ ipa = kvm_s2_trans_output(fault->nested);
+ return ALIGN_DOWN(ipa, fault->vma_pagesize) >> PAGE_SHIFT;
+}
+
static int kvm_s2_fault_pin_pfn(struct kvm_s2_fault *fault)
{
int ret;
@@ -1785,7 +1794,7 @@ static int kvm_s2_fault_pin_pfn(struct kvm_s2_fault *fault)
if (ret)
return ret;
- fault->pfn = __kvm_faultin_pfn(fault->memslot, fault->gfn,
+ fault->pfn = __kvm_faultin_pfn(fault->memslot, get_canonical_gfn(fault),
fault->write_fault ? FOLL_WRITE : 0,
&fault->writable, &fault->page);
if (unlikely(is_error_noslot_pfn(fault->pfn))) {
@@ -1885,6 +1894,11 @@ static int kvm_s2_fault_compute_prot(struct kvm_s2_fault *fault)
return 0;
}
+static phys_addr_t get_ipa(const struct kvm_s2_fault *fault)
+{
+ return gfn_to_gpa(fault->gfn);
+}
+
static int kvm_s2_fault_map(struct kvm_s2_fault *fault, void *memcache)
{
struct kvm *kvm = fault->vcpu->kvm;
@@ -1909,7 +1923,7 @@ static int kvm_s2_fault_map(struct kvm_s2_fault *fault, void *memcache)
} else {
fault->vma_pagesize = transparent_hugepage_adjust(kvm, fault->memslot,
fault->hva, &fault->pfn,
- &fault->fault_ipa);
+ &fault->gfn);
if (fault->vma_pagesize < 0) {
ret = fault->vma_pagesize;
@@ -1932,10 +1946,10 @@ static int kvm_s2_fault_map(struct kvm_s2_fault *fault, void *memcache)
* PTE, which will be preserved.
*/
fault->prot &= ~KVM_NV_GUEST_MAP_SZ;
- ret = KVM_PGT_FN(kvm_pgtable_stage2_relax_perms)(pgt, fault->fault_ipa,
+ ret = KVM_PGT_FN(kvm_pgtable_stage2_relax_perms)(pgt, get_ipa(fault),
fault->prot, flags);
} else {
- ret = KVM_PGT_FN(kvm_pgtable_stage2_map)(pgt, fault->fault_ipa, fault->vma_pagesize,
+ ret = KVM_PGT_FN(kvm_pgtable_stage2_map)(pgt, get_ipa(fault), fault->vma_pagesize,
__pfn_to_phys(fault->pfn), fault->prot,
memcache, flags);
}
@@ -1946,7 +1960,7 @@ static int kvm_s2_fault_map(struct kvm_s2_fault *fault, void *memcache)
/* Mark the page dirty only if the fault is handled successfully */
if (fault->writable && !ret)
- mark_page_dirty_in_slot(kvm, fault->memslot, fault->gfn);
+ mark_page_dirty_in_slot(kvm, fault->memslot, get_canonical_gfn(fault));
if (ret != -EAGAIN)
return ret;
--
2.47.3
^ permalink raw reply related [flat|nested] 32+ messages in thread* [PATCH v2 16/30] KVM: arm64: Move fault context to const structure
2026-03-27 11:35 [PATCH v2 00/30] KVM: arm64: Combined user_mem_abort() rework Marc Zyngier
` (14 preceding siblings ...)
2026-03-27 11:36 ` [PATCH v2 15/30] KVM: arm64: Make fault_ipa immutable Marc Zyngier
@ 2026-03-27 11:36 ` Marc Zyngier
2026-03-27 11:36 ` [PATCH v2 17/30] KVM: arm64: Replace fault_is_perm with a helper Marc Zyngier
` (13 subsequent siblings)
29 siblings, 0 replies; 32+ messages in thread
From: Marc Zyngier @ 2026-03-27 11:36 UTC (permalink / raw)
To: kvmarm, linux-arm-kernel, kvm
Cc: Joey Gouly, Suzuki K Poulose, Oliver Upton, Zenghui Yu,
Fuad Tabba, Will Deacon, Quentin Perret
In order to make it clearer what gets updated or not during fault
handling, move a set of information that losely represents the
fault context.
This gets populated early, from handle_mem_abort(), and gets passed
along as a const pointer. user_mem_abort()'s signature is majorly
improved in doing so, and kvm_s2_fault loses a bunch of fields.
gmem_abort() will get a similar treatment down the line.
Tested-by: Fuad Tabba <tabba@google.com>
Reviewed-by: Fuad Tabba <tabba@google.com>
Reviewed-by: Suzuki K Poulose <suzuki.poulose@arm.com>
Signed-off-by: Marc Zyngier <maz@kernel.org>
---
arch/arm64/kvm/mmu.c | 133 ++++++++++++++++++++++---------------------
1 file changed, 69 insertions(+), 64 deletions(-)
diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
index 496bf5903ed3d..09e32f08028e4 100644
--- a/arch/arm64/kvm/mmu.c
+++ b/arch/arm64/kvm/mmu.c
@@ -1565,6 +1565,14 @@ static void adjust_nested_exec_perms(struct kvm *kvm,
*prot &= ~KVM_PGTABLE_PROT_PX;
}
+struct kvm_s2_fault_desc {
+ struct kvm_vcpu *vcpu;
+ phys_addr_t fault_ipa;
+ struct kvm_s2_trans *nested;
+ struct kvm_memory_slot *memslot;
+ unsigned long hva;
+};
+
static int gmem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
struct kvm_s2_trans *nested,
struct kvm_memory_slot *memslot, bool is_perm)
@@ -1640,23 +1648,20 @@ static int gmem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
return ret != -EAGAIN ? ret : 0;
}
-static short kvm_s2_resolve_vma_size(struct vm_area_struct *vma,
- unsigned long hva,
- struct kvm_memory_slot *memslot,
- struct kvm_s2_trans *nested,
- bool *force_pte)
+static short kvm_s2_resolve_vma_size(const struct kvm_s2_fault_desc *s2fd,
+ struct vm_area_struct *vma, bool *force_pte)
{
short vma_shift;
if (*force_pte)
vma_shift = PAGE_SHIFT;
else
- vma_shift = get_vma_page_shift(vma, hva);
+ vma_shift = get_vma_page_shift(vma, s2fd->hva);
switch (vma_shift) {
#ifndef __PAGETABLE_PMD_FOLDED
case PUD_SHIFT:
- if (fault_supports_stage2_huge_mapping(memslot, hva, PUD_SIZE))
+ if (fault_supports_stage2_huge_mapping(s2fd->memslot, s2fd->hva, PUD_SIZE))
break;
fallthrough;
#endif
@@ -1664,7 +1669,7 @@ static short kvm_s2_resolve_vma_size(struct vm_area_struct *vma,
vma_shift = PMD_SHIFT;
fallthrough;
case PMD_SHIFT:
- if (fault_supports_stage2_huge_mapping(memslot, hva, PMD_SIZE))
+ if (fault_supports_stage2_huge_mapping(s2fd->memslot, s2fd->hva, PMD_SIZE))
break;
fallthrough;
case CONT_PTE_SHIFT:
@@ -1677,7 +1682,7 @@ static short kvm_s2_resolve_vma_size(struct vm_area_struct *vma,
WARN_ONCE(1, "Unknown vma_shift %d", vma_shift);
}
- if (nested) {
+ if (s2fd->nested) {
unsigned long max_map_size;
max_map_size = *force_pte ? PAGE_SIZE : PUD_SIZE;
@@ -1687,7 +1692,7 @@ static short kvm_s2_resolve_vma_size(struct vm_area_struct *vma,
* can only create a block mapping if the guest stage 2 page
* table uses at least as big a mapping.
*/
- max_map_size = min(kvm_s2_trans_size(nested), max_map_size);
+ max_map_size = min(kvm_s2_trans_size(s2fd->nested), max_map_size);
/*
* Be careful that if the mapping size falls between
@@ -1706,11 +1711,6 @@ static short kvm_s2_resolve_vma_size(struct vm_area_struct *vma,
}
struct kvm_s2_fault {
- struct kvm_vcpu *vcpu;
- phys_addr_t fault_ipa;
- struct kvm_s2_trans *nested;
- struct kvm_memory_slot *memslot;
- unsigned long hva;
bool fault_is_perm;
bool write_fault;
@@ -1732,28 +1732,28 @@ struct kvm_s2_fault {
vm_flags_t vm_flags;
};
-static int kvm_s2_fault_get_vma_info(struct kvm_s2_fault *fault)
+static int kvm_s2_fault_get_vma_info(const struct kvm_s2_fault_desc *s2fd,
+ struct kvm_s2_fault *fault)
{
struct vm_area_struct *vma;
- struct kvm *kvm = fault->vcpu->kvm;
+ struct kvm *kvm = s2fd->vcpu->kvm;
mmap_read_lock(current->mm);
- vma = vma_lookup(current->mm, fault->hva);
+ vma = vma_lookup(current->mm, s2fd->hva);
if (unlikely(!vma)) {
- kvm_err("Failed to find VMA for fault->hva 0x%lx\n", fault->hva);
+ kvm_err("Failed to find VMA for hva 0x%lx\n", s2fd->hva);
mmap_read_unlock(current->mm);
return -EFAULT;
}
- fault->vma_pagesize = 1UL << kvm_s2_resolve_vma_size(vma, fault->hva, fault->memslot,
- fault->nested, &fault->force_pte);
+ fault->vma_pagesize = BIT(kvm_s2_resolve_vma_size(s2fd, vma, &fault->force_pte));
/*
* Both the canonical IPA and fault IPA must be aligned to the
* mapping size to ensure we find the right PFN and lay down the
* mapping in the right place.
*/
- fault->gfn = ALIGN_DOWN(fault->fault_ipa, fault->vma_pagesize) >> PAGE_SHIFT;
+ fault->gfn = ALIGN_DOWN(s2fd->fault_ipa, fault->vma_pagesize) >> PAGE_SHIFT;
fault->mte_allowed = kvm_vma_mte_allowed(vma);
@@ -1775,31 +1775,33 @@ static int kvm_s2_fault_get_vma_info(struct kvm_s2_fault *fault)
return 0;
}
-static gfn_t get_canonical_gfn(struct kvm_s2_fault *fault)
+static gfn_t get_canonical_gfn(const struct kvm_s2_fault_desc *s2fd,
+ const struct kvm_s2_fault *fault)
{
phys_addr_t ipa;
- if (!fault->nested)
+ if (!s2fd->nested)
return fault->gfn;
- ipa = kvm_s2_trans_output(fault->nested);
+ ipa = kvm_s2_trans_output(s2fd->nested);
return ALIGN_DOWN(ipa, fault->vma_pagesize) >> PAGE_SHIFT;
}
-static int kvm_s2_fault_pin_pfn(struct kvm_s2_fault *fault)
+static int kvm_s2_fault_pin_pfn(const struct kvm_s2_fault_desc *s2fd,
+ struct kvm_s2_fault *fault)
{
int ret;
- ret = kvm_s2_fault_get_vma_info(fault);
+ ret = kvm_s2_fault_get_vma_info(s2fd, fault);
if (ret)
return ret;
- fault->pfn = __kvm_faultin_pfn(fault->memslot, get_canonical_gfn(fault),
+ fault->pfn = __kvm_faultin_pfn(s2fd->memslot, get_canonical_gfn(s2fd, fault),
fault->write_fault ? FOLL_WRITE : 0,
&fault->writable, &fault->page);
if (unlikely(is_error_noslot_pfn(fault->pfn))) {
if (fault->pfn == KVM_PFN_ERR_HWPOISON) {
- kvm_send_hwpoison_signal(fault->hva, __ffs(fault->vma_pagesize));
+ kvm_send_hwpoison_signal(s2fd->hva, __ffs(fault->vma_pagesize));
return 0;
}
return -EFAULT;
@@ -1808,9 +1810,10 @@ static int kvm_s2_fault_pin_pfn(struct kvm_s2_fault *fault)
return 1;
}
-static int kvm_s2_fault_compute_prot(struct kvm_s2_fault *fault)
+static int kvm_s2_fault_compute_prot(const struct kvm_s2_fault_desc *s2fd,
+ struct kvm_s2_fault *fault)
{
- struct kvm *kvm = fault->vcpu->kvm;
+ struct kvm *kvm = s2fd->vcpu->kvm;
/*
* Check if this is non-struct page memory PFN, and cannot support
@@ -1862,13 +1865,13 @@ static int kvm_s2_fault_compute_prot(struct kvm_s2_fault *fault)
* and trigger the exception here. Since the memslot is valid, inject
* the fault back to the guest.
*/
- if (esr_fsc_is_excl_atomic_fault(kvm_vcpu_get_esr(fault->vcpu))) {
- kvm_inject_dabt_excl_atomic(fault->vcpu, kvm_vcpu_get_hfar(fault->vcpu));
+ if (esr_fsc_is_excl_atomic_fault(kvm_vcpu_get_esr(s2fd->vcpu))) {
+ kvm_inject_dabt_excl_atomic(s2fd->vcpu, kvm_vcpu_get_hfar(s2fd->vcpu));
return 1;
}
- if (fault->nested)
- adjust_nested_fault_perms(fault->nested, &fault->prot, &fault->writable);
+ if (s2fd->nested)
+ adjust_nested_fault_perms(s2fd->nested, &fault->prot, &fault->writable);
if (fault->writable)
fault->prot |= KVM_PGTABLE_PROT_W;
@@ -1882,8 +1885,8 @@ static int kvm_s2_fault_compute_prot(struct kvm_s2_fault *fault)
else if (cpus_have_final_cap(ARM64_HAS_CACHE_DIC))
fault->prot |= KVM_PGTABLE_PROT_X;
- if (fault->nested)
- adjust_nested_exec_perms(kvm, fault->nested, &fault->prot);
+ if (s2fd->nested)
+ adjust_nested_exec_perms(kvm, s2fd->nested, &fault->prot);
if (!fault->fault_is_perm && !fault->s2_force_noncacheable && kvm_has_mte(kvm)) {
/* Check the VMM hasn't introduced a new disallowed VMA */
@@ -1899,15 +1902,16 @@ static phys_addr_t get_ipa(const struct kvm_s2_fault *fault)
return gfn_to_gpa(fault->gfn);
}
-static int kvm_s2_fault_map(struct kvm_s2_fault *fault, void *memcache)
+static int kvm_s2_fault_map(const struct kvm_s2_fault_desc *s2fd,
+ struct kvm_s2_fault *fault, void *memcache)
{
- struct kvm *kvm = fault->vcpu->kvm;
+ struct kvm *kvm = s2fd->vcpu->kvm;
struct kvm_pgtable *pgt;
int ret;
enum kvm_pgtable_walk_flags flags = KVM_PGTABLE_WALK_SHARED;
kvm_fault_lock(kvm);
- pgt = fault->vcpu->arch.hw_mmu->pgt;
+ pgt = s2fd->vcpu->arch.hw_mmu->pgt;
ret = -EAGAIN;
if (mmu_invalidate_retry(kvm, fault->mmu_seq))
goto out_unlock;
@@ -1921,8 +1925,8 @@ static int kvm_s2_fault_map(struct kvm_s2_fault *fault, void *memcache)
if (fault->fault_is_perm && fault->fault_granule > PAGE_SIZE) {
fault->vma_pagesize = fault->fault_granule;
} else {
- fault->vma_pagesize = transparent_hugepage_adjust(kvm, fault->memslot,
- fault->hva, &fault->pfn,
+ fault->vma_pagesize = transparent_hugepage_adjust(kvm, s2fd->memslot,
+ s2fd->hva, &fault->pfn,
&fault->gfn);
if (fault->vma_pagesize < 0) {
@@ -1960,34 +1964,27 @@ static int kvm_s2_fault_map(struct kvm_s2_fault *fault, void *memcache)
/* Mark the page dirty only if the fault is handled successfully */
if (fault->writable && !ret)
- mark_page_dirty_in_slot(kvm, fault->memslot, get_canonical_gfn(fault));
+ mark_page_dirty_in_slot(kvm, s2fd->memslot, get_canonical_gfn(s2fd, fault));
if (ret != -EAGAIN)
return ret;
return 0;
}
-static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
- struct kvm_s2_trans *nested,
- struct kvm_memory_slot *memslot, unsigned long hva,
- bool fault_is_perm)
+static int user_mem_abort(const struct kvm_s2_fault_desc *s2fd)
{
- bool write_fault = kvm_is_write_fault(vcpu);
- bool logging_active = memslot_is_logging(memslot);
+ bool perm_fault = kvm_vcpu_trap_is_permission_fault(s2fd->vcpu);
+ bool write_fault = kvm_is_write_fault(s2fd->vcpu);
+ bool logging_active = memslot_is_logging(s2fd->memslot);
struct kvm_s2_fault fault = {
- .vcpu = vcpu,
- .fault_ipa = fault_ipa,
- .nested = nested,
- .memslot = memslot,
- .hva = hva,
- .fault_is_perm = fault_is_perm,
+ .fault_is_perm = perm_fault,
.logging_active = logging_active,
.force_pte = logging_active,
.prot = KVM_PGTABLE_PROT_R,
- .fault_granule = fault_is_perm ? kvm_vcpu_trap_get_perm_fault_granule(vcpu) : 0,
+ .fault_granule = perm_fault ? kvm_vcpu_trap_get_perm_fault_granule(s2fd->vcpu) : 0,
.write_fault = write_fault,
- .exec_fault = kvm_vcpu_trap_is_exec_fault(vcpu),
- .topup_memcache = !fault_is_perm || (logging_active && write_fault),
+ .exec_fault = kvm_vcpu_trap_is_exec_fault(s2fd->vcpu),
+ .topup_memcache = !perm_fault || (logging_active && write_fault),
};
void *memcache;
int ret;
@@ -2000,7 +1997,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
* only exception to this is when dirty logging is enabled at runtime
* and a write fault needs to collapse a block entry into a table.
*/
- ret = prepare_mmu_memcache(vcpu, fault.topup_memcache, &memcache);
+ ret = prepare_mmu_memcache(s2fd->vcpu, fault.topup_memcache, &memcache);
if (ret)
return ret;
@@ -2008,17 +2005,17 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
* Let's check if we will get back a huge page backed by hugetlbfs, or
* get block mapping for device MMIO region.
*/
- ret = kvm_s2_fault_pin_pfn(&fault);
+ ret = kvm_s2_fault_pin_pfn(s2fd, &fault);
if (ret != 1)
return ret;
- ret = kvm_s2_fault_compute_prot(&fault);
+ ret = kvm_s2_fault_compute_prot(s2fd, &fault);
if (ret) {
kvm_release_page_unused(fault.page);
return ret;
}
- return kvm_s2_fault_map(&fault, memcache);
+ return kvm_s2_fault_map(s2fd, &fault, memcache);
}
/* Resolve the access fault by making the page young again. */
@@ -2284,12 +2281,20 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu)
VM_WARN_ON_ONCE(kvm_vcpu_trap_is_permission_fault(vcpu) &&
!write_fault && !kvm_vcpu_trap_is_exec_fault(vcpu));
+ const struct kvm_s2_fault_desc s2fd = {
+ .vcpu = vcpu,
+ .fault_ipa = fault_ipa,
+ .nested = nested,
+ .memslot = memslot,
+ .hva = hva,
+ };
+
if (kvm_slot_has_gmem(memslot))
ret = gmem_abort(vcpu, fault_ipa, nested, memslot,
esr_fsc_is_permission_fault(esr));
else
- ret = user_mem_abort(vcpu, fault_ipa, nested, memslot, hva,
- esr_fsc_is_permission_fault(esr));
+ ret = user_mem_abort(&s2fd);
+
if (ret == 0)
ret = 1;
out:
--
2.47.3
^ permalink raw reply related [flat|nested] 32+ messages in thread* [PATCH v2 17/30] KVM: arm64: Replace fault_is_perm with a helper
2026-03-27 11:35 [PATCH v2 00/30] KVM: arm64: Combined user_mem_abort() rework Marc Zyngier
` (15 preceding siblings ...)
2026-03-27 11:36 ` [PATCH v2 16/30] KVM: arm64: Move fault context to const structure Marc Zyngier
@ 2026-03-27 11:36 ` Marc Zyngier
2026-03-27 11:36 ` [PATCH v2 18/30] KVM: arm64: Constrain fault_granule to kvm_s2_fault_map() Marc Zyngier
` (12 subsequent siblings)
29 siblings, 0 replies; 32+ messages in thread
From: Marc Zyngier @ 2026-03-27 11:36 UTC (permalink / raw)
To: kvmarm, linux-arm-kernel, kvm
Cc: Joey Gouly, Suzuki K Poulose, Oliver Upton, Zenghui Yu,
Fuad Tabba, Will Deacon, Quentin Perret
Carrying a boolean to indicate that a given fault is a permission fault
is slightly odd, as this is a property of the fault itself, and we'd
better avoid duplicating state.
For this purpose, introduce a kvm_s2_fault_is_perm() predicate that
can take a fault descriptor as a parameter. fault_is_perm is therefore
dropped from kvm_s2_fault.
Tested-by: Fuad Tabba <tabba@google.com>
Reviewed-by: Fuad Tabba <tabba@google.com>
Reviewed-by: Suzuki K Poulose <suzuki.poulose@arm.com>
Reviewed-by: Joey Gouly <joey.gouly@arm.com>
Signed-off-by: Marc Zyngier <maz@kernel.org>
---
arch/arm64/kvm/mmu.c | 17 ++++++++++-------
1 file changed, 10 insertions(+), 7 deletions(-)
diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
index 09e32f08028e4..1e0d93d6d265a 100644
--- a/arch/arm64/kvm/mmu.c
+++ b/arch/arm64/kvm/mmu.c
@@ -1711,8 +1711,6 @@ static short kvm_s2_resolve_vma_size(const struct kvm_s2_fault_desc *s2fd,
}
struct kvm_s2_fault {
- bool fault_is_perm;
-
bool write_fault;
bool exec_fault;
bool writable;
@@ -1732,6 +1730,11 @@ struct kvm_s2_fault {
vm_flags_t vm_flags;
};
+static bool kvm_s2_fault_is_perm(const struct kvm_s2_fault_desc *s2fd)
+{
+ return kvm_vcpu_trap_is_permission_fault(s2fd->vcpu);
+}
+
static int kvm_s2_fault_get_vma_info(const struct kvm_s2_fault_desc *s2fd,
struct kvm_s2_fault *fault)
{
@@ -1888,7 +1891,7 @@ static int kvm_s2_fault_compute_prot(const struct kvm_s2_fault_desc *s2fd,
if (s2fd->nested)
adjust_nested_exec_perms(kvm, s2fd->nested, &fault->prot);
- if (!fault->fault_is_perm && !fault->s2_force_noncacheable && kvm_has_mte(kvm)) {
+ if (!kvm_s2_fault_is_perm(s2fd) && !fault->s2_force_noncacheable && kvm_has_mte(kvm)) {
/* Check the VMM hasn't introduced a new disallowed VMA */
if (!fault->mte_allowed)
return -EFAULT;
@@ -1905,6 +1908,7 @@ static phys_addr_t get_ipa(const struct kvm_s2_fault *fault)
static int kvm_s2_fault_map(const struct kvm_s2_fault_desc *s2fd,
struct kvm_s2_fault *fault, void *memcache)
{
+ bool fault_is_perm = kvm_s2_fault_is_perm(s2fd);
struct kvm *kvm = s2fd->vcpu->kvm;
struct kvm_pgtable *pgt;
int ret;
@@ -1922,7 +1926,7 @@ static int kvm_s2_fault_map(const struct kvm_s2_fault_desc *s2fd,
*/
if (fault->vma_pagesize == PAGE_SIZE &&
!(fault->force_pte || fault->s2_force_noncacheable)) {
- if (fault->fault_is_perm && fault->fault_granule > PAGE_SIZE) {
+ if (fault_is_perm && fault->fault_granule > PAGE_SIZE) {
fault->vma_pagesize = fault->fault_granule;
} else {
fault->vma_pagesize = transparent_hugepage_adjust(kvm, s2fd->memslot,
@@ -1936,7 +1940,7 @@ static int kvm_s2_fault_map(const struct kvm_s2_fault_desc *s2fd,
}
}
- if (!fault->fault_is_perm && !fault->s2_force_noncacheable && kvm_has_mte(kvm))
+ if (!fault_is_perm && !fault->s2_force_noncacheable && kvm_has_mte(kvm))
sanitise_mte_tags(kvm, fault->pfn, fault->vma_pagesize);
/*
@@ -1944,7 +1948,7 @@ static int kvm_s2_fault_map(const struct kvm_s2_fault_desc *s2fd,
* permissions only if fault->vma_pagesize equals fault->fault_granule. Otherwise,
* kvm_pgtable_stage2_map() should be called to change block size.
*/
- if (fault->fault_is_perm && fault->vma_pagesize == fault->fault_granule) {
+ if (fault_is_perm && fault->vma_pagesize == fault->fault_granule) {
/*
* Drop the SW bits in favour of those stored in the
* PTE, which will be preserved.
@@ -1977,7 +1981,6 @@ static int user_mem_abort(const struct kvm_s2_fault_desc *s2fd)
bool write_fault = kvm_is_write_fault(s2fd->vcpu);
bool logging_active = memslot_is_logging(s2fd->memslot);
struct kvm_s2_fault fault = {
- .fault_is_perm = perm_fault,
.logging_active = logging_active,
.force_pte = logging_active,
.prot = KVM_PGTABLE_PROT_R,
--
2.47.3
^ permalink raw reply related [flat|nested] 32+ messages in thread* [PATCH v2 18/30] KVM: arm64: Constrain fault_granule to kvm_s2_fault_map()
2026-03-27 11:35 [PATCH v2 00/30] KVM: arm64: Combined user_mem_abort() rework Marc Zyngier
` (16 preceding siblings ...)
2026-03-27 11:36 ` [PATCH v2 17/30] KVM: arm64: Replace fault_is_perm with a helper Marc Zyngier
@ 2026-03-27 11:36 ` Marc Zyngier
2026-03-27 11:36 ` [PATCH v2 19/30] KVM: arm64: Kill write_fault from kvm_s2_fault Marc Zyngier
` (11 subsequent siblings)
29 siblings, 0 replies; 32+ messages in thread
From: Marc Zyngier @ 2026-03-27 11:36 UTC (permalink / raw)
To: kvmarm, linux-arm-kernel, kvm
Cc: Joey Gouly, Suzuki K Poulose, Oliver Upton, Zenghui Yu,
Fuad Tabba, Will Deacon, Quentin Perret
The notion of fault_granule is specific to kvm_s2_fault_map(), and
is unused anywhere else.
Move this variable locally, removing it from kvm_s2_fault.
Tested-by: Fuad Tabba <tabba@google.com>
Reviewed-by: Fuad Tabba <tabba@google.com>
Reviewed-by: Suzuki K Poulose <suzuki.poulose@arm.com>
Signed-off-by: Marc Zyngier <maz@kernel.org>
---
arch/arm64/kvm/mmu.c | 17 +++++++++--------
1 file changed, 9 insertions(+), 8 deletions(-)
diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
index 1e0d93d6d265a..981c04a74ab7a 100644
--- a/arch/arm64/kvm/mmu.c
+++ b/arch/arm64/kvm/mmu.c
@@ -1724,7 +1724,6 @@ struct kvm_s2_fault {
bool logging_active;
bool force_pte;
long vma_pagesize;
- long fault_granule;
enum kvm_pgtable_prot prot;
struct page *page;
vm_flags_t vm_flags;
@@ -1908,9 +1907,9 @@ static phys_addr_t get_ipa(const struct kvm_s2_fault *fault)
static int kvm_s2_fault_map(const struct kvm_s2_fault_desc *s2fd,
struct kvm_s2_fault *fault, void *memcache)
{
- bool fault_is_perm = kvm_s2_fault_is_perm(s2fd);
struct kvm *kvm = s2fd->vcpu->kvm;
struct kvm_pgtable *pgt;
+ long perm_fault_granule;
int ret;
enum kvm_pgtable_walk_flags flags = KVM_PGTABLE_WALK_SHARED;
@@ -1920,14 +1919,17 @@ static int kvm_s2_fault_map(const struct kvm_s2_fault_desc *s2fd,
if (mmu_invalidate_retry(kvm, fault->mmu_seq))
goto out_unlock;
+ perm_fault_granule = (kvm_s2_fault_is_perm(s2fd) ?
+ kvm_vcpu_trap_get_perm_fault_granule(s2fd->vcpu) : 0);
+
/*
* If we are not forced to use page mapping, check if we are
* backed by a THP and thus use block mapping if possible.
*/
if (fault->vma_pagesize == PAGE_SIZE &&
!(fault->force_pte || fault->s2_force_noncacheable)) {
- if (fault_is_perm && fault->fault_granule > PAGE_SIZE) {
- fault->vma_pagesize = fault->fault_granule;
+ if (perm_fault_granule > PAGE_SIZE) {
+ fault->vma_pagesize = perm_fault_granule;
} else {
fault->vma_pagesize = transparent_hugepage_adjust(kvm, s2fd->memslot,
s2fd->hva, &fault->pfn,
@@ -1940,15 +1942,15 @@ static int kvm_s2_fault_map(const struct kvm_s2_fault_desc *s2fd,
}
}
- if (!fault_is_perm && !fault->s2_force_noncacheable && kvm_has_mte(kvm))
+ if (!perm_fault_granule && !fault->s2_force_noncacheable && kvm_has_mte(kvm))
sanitise_mte_tags(kvm, fault->pfn, fault->vma_pagesize);
/*
* Under the premise of getting a FSC_PERM fault, we just need to relax
- * permissions only if fault->vma_pagesize equals fault->fault_granule. Otherwise,
+ * permissions only if vma_pagesize equals perm_fault_granule. Otherwise,
* kvm_pgtable_stage2_map() should be called to change block size.
*/
- if (fault_is_perm && fault->vma_pagesize == fault->fault_granule) {
+ if (fault->vma_pagesize == perm_fault_granule) {
/*
* Drop the SW bits in favour of those stored in the
* PTE, which will be preserved.
@@ -1984,7 +1986,6 @@ static int user_mem_abort(const struct kvm_s2_fault_desc *s2fd)
.logging_active = logging_active,
.force_pte = logging_active,
.prot = KVM_PGTABLE_PROT_R,
- .fault_granule = perm_fault ? kvm_vcpu_trap_get_perm_fault_granule(s2fd->vcpu) : 0,
.write_fault = write_fault,
.exec_fault = kvm_vcpu_trap_is_exec_fault(s2fd->vcpu),
.topup_memcache = !perm_fault || (logging_active && write_fault),
--
2.47.3
^ permalink raw reply related [flat|nested] 32+ messages in thread* [PATCH v2 19/30] KVM: arm64: Kill write_fault from kvm_s2_fault
2026-03-27 11:35 [PATCH v2 00/30] KVM: arm64: Combined user_mem_abort() rework Marc Zyngier
` (17 preceding siblings ...)
2026-03-27 11:36 ` [PATCH v2 18/30] KVM: arm64: Constrain fault_granule to kvm_s2_fault_map() Marc Zyngier
@ 2026-03-27 11:36 ` Marc Zyngier
2026-03-27 11:36 ` [PATCH v2 20/30] KVM: arm64: Kill exec_fault " Marc Zyngier
` (10 subsequent siblings)
29 siblings, 0 replies; 32+ messages in thread
From: Marc Zyngier @ 2026-03-27 11:36 UTC (permalink / raw)
To: kvmarm, linux-arm-kernel, kvm
Cc: Joey Gouly, Suzuki K Poulose, Oliver Upton, Zenghui Yu,
Fuad Tabba, Will Deacon, Quentin Perret
We already have kvm_is_write_fault() as a predicate indicating
a S2 fault on a write, and we're better off just using that instead
of duplicating the state.
Tested-by: Fuad Tabba <tabba@google.com>
Reviewed-by: Fuad Tabba <tabba@google.com>
Reviewed-by: Suzuki K Poulose <suzuki.poulose@arm.com>
Signed-off-by: Marc Zyngier <maz@kernel.org>
---
arch/arm64/kvm/mmu.c | 11 +++--------
1 file changed, 3 insertions(+), 8 deletions(-)
diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
index 981c04a74ab7a..7dab0c3faa5bf 100644
--- a/arch/arm64/kvm/mmu.c
+++ b/arch/arm64/kvm/mmu.c
@@ -1711,7 +1711,6 @@ static short kvm_s2_resolve_vma_size(const struct kvm_s2_fault_desc *s2fd,
}
struct kvm_s2_fault {
- bool write_fault;
bool exec_fault;
bool writable;
bool topup_memcache;
@@ -1799,7 +1798,7 @@ static int kvm_s2_fault_pin_pfn(const struct kvm_s2_fault_desc *s2fd,
return ret;
fault->pfn = __kvm_faultin_pfn(s2fd->memslot, get_canonical_gfn(s2fd, fault),
- fault->write_fault ? FOLL_WRITE : 0,
+ kvm_is_write_fault(s2fd->vcpu) ? FOLL_WRITE : 0,
&fault->writable, &fault->page);
if (unlikely(is_error_noslot_pfn(fault->pfn))) {
if (fault->pfn == KVM_PFN_ERR_HWPOISON) {
@@ -1850,7 +1849,7 @@ static int kvm_s2_fault_compute_prot(const struct kvm_s2_fault_desc *s2fd,
*/
fault->s2_force_noncacheable = true;
}
- } else if (fault->logging_active && !fault->write_fault) {
+ } else if (fault->logging_active && !kvm_is_write_fault(s2fd->vcpu)) {
/*
* Only actually map the page as writable if this was a write
* fault.
@@ -1980,21 +1979,17 @@ static int kvm_s2_fault_map(const struct kvm_s2_fault_desc *s2fd,
static int user_mem_abort(const struct kvm_s2_fault_desc *s2fd)
{
bool perm_fault = kvm_vcpu_trap_is_permission_fault(s2fd->vcpu);
- bool write_fault = kvm_is_write_fault(s2fd->vcpu);
bool logging_active = memslot_is_logging(s2fd->memslot);
struct kvm_s2_fault fault = {
.logging_active = logging_active,
.force_pte = logging_active,
.prot = KVM_PGTABLE_PROT_R,
- .write_fault = write_fault,
.exec_fault = kvm_vcpu_trap_is_exec_fault(s2fd->vcpu),
- .topup_memcache = !perm_fault || (logging_active && write_fault),
+ .topup_memcache = !perm_fault || (logging_active && kvm_is_write_fault(s2fd->vcpu)),
};
void *memcache;
int ret;
- VM_WARN_ON_ONCE(fault.write_fault && fault.exec_fault);
-
/*
* Permission faults just need to update the existing leaf entry,
* and so normally don't require allocations from the memcache. The
--
2.47.3
^ permalink raw reply related [flat|nested] 32+ messages in thread* [PATCH v2 20/30] KVM: arm64: Kill exec_fault from kvm_s2_fault
2026-03-27 11:35 [PATCH v2 00/30] KVM: arm64: Combined user_mem_abort() rework Marc Zyngier
` (18 preceding siblings ...)
2026-03-27 11:36 ` [PATCH v2 19/30] KVM: arm64: Kill write_fault from kvm_s2_fault Marc Zyngier
@ 2026-03-27 11:36 ` Marc Zyngier
2026-03-27 11:36 ` [PATCH v2 21/30] KVM: arm64: Kill topup_memcache " Marc Zyngier
` (9 subsequent siblings)
29 siblings, 0 replies; 32+ messages in thread
From: Marc Zyngier @ 2026-03-27 11:36 UTC (permalink / raw)
To: kvmarm, linux-arm-kernel, kvm
Cc: Joey Gouly, Suzuki K Poulose, Oliver Upton, Zenghui Yu,
Fuad Tabba, Will Deacon, Quentin Perret
Similarly to write_fault, exec_fault can be advantageously replaced
by the kvm_vcpu_trap_is_exec_fault() predicate where needed.
Another one bites the dust...
Tested-by: Fuad Tabba <tabba@google.com>
Reviewed-by: Fuad Tabba <tabba@google.com>
Reviewed-by: Suzuki K Poulose <suzuki.poulose@arm.com>
Signed-off-by: Marc Zyngier <maz@kernel.org>
---
arch/arm64/kvm/mmu.c | 6 ++----
1 file changed, 2 insertions(+), 4 deletions(-)
diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
index 7dab0c3faa5bf..e8bda71e862b2 100644
--- a/arch/arm64/kvm/mmu.c
+++ b/arch/arm64/kvm/mmu.c
@@ -1711,7 +1711,6 @@ static short kvm_s2_resolve_vma_size(const struct kvm_s2_fault_desc *s2fd,
}
struct kvm_s2_fault {
- bool exec_fault;
bool writable;
bool topup_memcache;
bool mte_allowed;
@@ -1857,7 +1856,7 @@ static int kvm_s2_fault_compute_prot(const struct kvm_s2_fault_desc *s2fd,
fault->writable = false;
}
- if (fault->exec_fault && fault->s2_force_noncacheable)
+ if (kvm_vcpu_trap_is_exec_fault(s2fd->vcpu) && fault->s2_force_noncacheable)
return -ENOEXEC;
/*
@@ -1877,7 +1876,7 @@ static int kvm_s2_fault_compute_prot(const struct kvm_s2_fault_desc *s2fd,
if (fault->writable)
fault->prot |= KVM_PGTABLE_PROT_W;
- if (fault->exec_fault)
+ if (kvm_vcpu_trap_is_exec_fault(s2fd->vcpu))
fault->prot |= KVM_PGTABLE_PROT_X;
if (fault->s2_force_noncacheable)
@@ -1984,7 +1983,6 @@ static int user_mem_abort(const struct kvm_s2_fault_desc *s2fd)
.logging_active = logging_active,
.force_pte = logging_active,
.prot = KVM_PGTABLE_PROT_R,
- .exec_fault = kvm_vcpu_trap_is_exec_fault(s2fd->vcpu),
.topup_memcache = !perm_fault || (logging_active && kvm_is_write_fault(s2fd->vcpu)),
};
void *memcache;
--
2.47.3
^ permalink raw reply related [flat|nested] 32+ messages in thread* [PATCH v2 21/30] KVM: arm64: Kill topup_memcache from kvm_s2_fault
2026-03-27 11:35 [PATCH v2 00/30] KVM: arm64: Combined user_mem_abort() rework Marc Zyngier
` (19 preceding siblings ...)
2026-03-27 11:36 ` [PATCH v2 20/30] KVM: arm64: Kill exec_fault " Marc Zyngier
@ 2026-03-27 11:36 ` Marc Zyngier
2026-03-27 14:49 ` Marc Zyngier
2026-03-27 11:36 ` [PATCH v2 22/30] KVM: arm64: Move VMA-related information to kvm_s2_fault_vma_info Marc Zyngier
` (8 subsequent siblings)
29 siblings, 1 reply; 32+ messages in thread
From: Marc Zyngier @ 2026-03-27 11:36 UTC (permalink / raw)
To: kvmarm, linux-arm-kernel, kvm
Cc: Joey Gouly, Suzuki K Poulose, Oliver Upton, Zenghui Yu,
Fuad Tabba, Will Deacon, Quentin Perret
The topup_memcache field can be easily replaced by the equivalent
conditions, and the resulting code is not much worse.
Tested-by: Fuad Tabba <tabba@google.com>
Reviewed-by: Fuad Tabba <tabba@google.com>
Reviewed-by: Suzuki K Poulose <suzuki.poulose@arm.com>
Signed-off-by: Marc Zyngier <maz@kernel.org>
---
arch/arm64/kvm/mmu.c | 12 ++++++------
1 file changed, 6 insertions(+), 6 deletions(-)
diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
index e8bda71e862b2..5b05caecdbd92 100644
--- a/arch/arm64/kvm/mmu.c
+++ b/arch/arm64/kvm/mmu.c
@@ -1712,7 +1712,6 @@ static short kvm_s2_resolve_vma_size(const struct kvm_s2_fault_desc *s2fd,
struct kvm_s2_fault {
bool writable;
- bool topup_memcache;
bool mte_allowed;
bool is_vma_cacheable;
bool s2_force_noncacheable;
@@ -1983,9 +1982,8 @@ static int user_mem_abort(const struct kvm_s2_fault_desc *s2fd)
.logging_active = logging_active,
.force_pte = logging_active,
.prot = KVM_PGTABLE_PROT_R,
- .topup_memcache = !perm_fault || (logging_active && kvm_is_write_fault(s2fd->vcpu)),
};
- void *memcache;
+ void *memcache = NULL;
int ret;
/*
@@ -1994,9 +1992,11 @@ static int user_mem_abort(const struct kvm_s2_fault_desc *s2fd)
* only exception to this is when dirty logging is enabled at runtime
* and a write fault needs to collapse a block entry into a table.
*/
- ret = prepare_mmu_memcache(s2fd->vcpu, fault.topup_memcache, &memcache);
- if (ret)
- return ret;
+ if (!perm_fault || (logging_active && kvm_is_write_fault(s2fd->vcpu))) {
+ ret = prepare_mmu_memcache(s2fd->vcpu, true, &memcache);
+ if (ret)
+ return ret;
+ }
/*
* Let's check if we will get back a huge page backed by hugetlbfs, or
--
2.47.3
^ permalink raw reply related [flat|nested] 32+ messages in thread* Re: [PATCH v2 21/30] KVM: arm64: Kill topup_memcache from kvm_s2_fault
2026-03-27 11:36 ` [PATCH v2 21/30] KVM: arm64: Kill topup_memcache " Marc Zyngier
@ 2026-03-27 14:49 ` Marc Zyngier
0 siblings, 0 replies; 32+ messages in thread
From: Marc Zyngier @ 2026-03-27 14:49 UTC (permalink / raw)
To: kvmarm, linux-arm-kernel, kvm
Cc: Joey Gouly, Suzuki K Poulose, Oliver Upton, Zenghui Yu,
Fuad Tabba, Will Deacon, Quentin Perret
On Fri, 27 Mar 2026 11:36:09 +0000,
Marc Zyngier <maz@kernel.org> wrote:
>
> The topup_memcache field can be easily replaced by the equivalent
> conditions, and the resulting code is not much worse.
>
> Tested-by: Fuad Tabba <tabba@google.com>
> Reviewed-by: Fuad Tabba <tabba@google.com>
> Reviewed-by: Suzuki K Poulose <suzuki.poulose@arm.com>
> Signed-off-by: Marc Zyngier <maz@kernel.org>
> ---
> arch/arm64/kvm/mmu.c | 12 ++++++------
> 1 file changed, 6 insertions(+), 6 deletions(-)
>
> diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
> index e8bda71e862b2..5b05caecdbd92 100644
> --- a/arch/arm64/kvm/mmu.c
> +++ b/arch/arm64/kvm/mmu.c
> @@ -1712,7 +1712,6 @@ static short kvm_s2_resolve_vma_size(const struct kvm_s2_fault_desc *s2fd,
>
> struct kvm_s2_fault {
> bool writable;
> - bool topup_memcache;
> bool mte_allowed;
> bool is_vma_cacheable;
> bool s2_force_noncacheable;
> @@ -1983,9 +1982,8 @@ static int user_mem_abort(const struct kvm_s2_fault_desc *s2fd)
> .logging_active = logging_active,
> .force_pte = logging_active,
> .prot = KVM_PGTABLE_PROT_R,
> - .topup_memcache = !perm_fault || (logging_active && kvm_is_write_fault(s2fd->vcpu)),
> };
> - void *memcache;
> + void *memcache = NULL;
> int ret;
>
> /*
> @@ -1994,9 +1992,11 @@ static int user_mem_abort(const struct kvm_s2_fault_desc *s2fd)
> * only exception to this is when dirty logging is enabled at runtime
> * and a write fault needs to collapse a block entry into a table.
> */
> - ret = prepare_mmu_memcache(s2fd->vcpu, fault.topup_memcache, &memcache);
> - if (ret)
> - return ret;
> + if (!perm_fault || (logging_active && kvm_is_write_fault(s2fd->vcpu))) {
> + ret = prepare_mmu_memcache(s2fd->vcpu, true, &memcache);
> + if (ret)
> + return ret;
> + }
>
> /*
> * Let's check if we will get back a huge page backed by hugetlbfs, or
Sashiko has spotted [1] an interesting corner case here, which is that the
original code always initialises memcache to its correct value, while
we now only do it in a limited number of cases.
I'm proposing to restore the original behaviour by folding the
following change into this patch, splitting the retrieval of the
memcache pointer from the top-up and avoiding the ugly pointer
indirection:
diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
index 1fe7182be45ac..03e1f389339c7 100644
--- a/arch/arm64/kvm/mmu.c
+++ b/arch/arm64/kvm/mmu.c
@@ -1513,25 +1513,22 @@ static bool kvm_vma_is_cacheable(struct vm_area_struct *vma)
}
}
-static int prepare_mmu_memcache(struct kvm_vcpu *vcpu, bool topup_memcache,
- void **memcache)
+static void *get_mmu_memcache(struct kvm_vcpu *vcpu)
{
- int min_pages;
-
if (!is_protected_kvm_enabled())
- *memcache = &vcpu->arch.mmu_page_cache;
+ return &vcpu->arch.mmu_page_cache;
else
- *memcache = &vcpu->arch.pkvm_memcache;
-
- if (!topup_memcache)
- return 0;
+ return &vcpu->arch.pkvm_memcache;
+}
- min_pages = kvm_mmu_cache_min_pages(vcpu->arch.hw_mmu);
+static int topup_mmu_memcache(struct kvm_vcpu *vcpu, void *memcache)
+{
+ int min_pages = kvm_mmu_cache_min_pages(vcpu->arch.hw_mmu);
if (!is_protected_kvm_enabled())
- return kvm_mmu_topup_memory_cache(*memcache, min_pages);
+ return kvm_mmu_topup_memory_cache(memcache, min_pages);
- return topup_hyp_memcache(*memcache, min_pages);
+ return topup_hyp_memcache(memcache, min_pages);
}
/*
@@ -1589,7 +1586,8 @@ static int gmem_abort(const struct kvm_s2_fault_desc *s2fd)
gfn_t gfn;
int ret;
- ret = prepare_mmu_memcache(s2fd->vcpu, true, &memcache);
+ memcache = get_mmu_memcache(s2fd->vcpu);
+ ret = topup_mmu_memcache(s2fd->vcpu, memcache);
if (ret)
return ret;
@@ -1993,7 +1991,7 @@ static int user_mem_abort(const struct kvm_s2_fault_desc *s2fd)
bool perm_fault = kvm_vcpu_trap_is_permission_fault(s2fd->vcpu);
struct kvm_s2_fault_vma_info s2vi = {};
enum kvm_pgtable_prot prot;
- void *memcache = NULL;
+ void *memcache;
int ret;
/*
@@ -2002,9 +2000,10 @@ static int user_mem_abort(const struct kvm_s2_fault_desc *s2fd)
* only exception to this is when dirty logging is enabled at runtime
* and a write fault needs to collapse a block entry into a table.
*/
+ memcache = get_mmu_memcache(s2fd->vcpu);
if (!perm_fault || (memslot_is_logging(s2fd->memslot) &&
kvm_is_write_fault(s2fd->vcpu))) {
- ret = prepare_mmu_memcache(s2fd->vcpu, true, &memcache);
+ ret = topup_mmu_memcache(s2fd->vcpu, memcache);
if (ret)
return ret;
}
The bot has also pointed out a couple of cases where memcache and
permission faults interact badly. I'll look into them separately, as
they predate this rework.
Thanks,
M.
[1] https://sashiko.dev/#/patchset/20260327113618.4051534-1-maz%40kernel.org?patch=12134
--
Without deviation from the norm, progress is not possible.
^ permalink raw reply related [flat|nested] 32+ messages in thread
* [PATCH v2 22/30] KVM: arm64: Move VMA-related information to kvm_s2_fault_vma_info
2026-03-27 11:35 [PATCH v2 00/30] KVM: arm64: Combined user_mem_abort() rework Marc Zyngier
` (20 preceding siblings ...)
2026-03-27 11:36 ` [PATCH v2 21/30] KVM: arm64: Kill topup_memcache " Marc Zyngier
@ 2026-03-27 11:36 ` Marc Zyngier
2026-03-27 11:36 ` [PATCH v2 23/30] KVM: arm64: Kill logging_active from kvm_s2_fault Marc Zyngier
` (7 subsequent siblings)
29 siblings, 0 replies; 32+ messages in thread
From: Marc Zyngier @ 2026-03-27 11:36 UTC (permalink / raw)
To: kvmarm, linux-arm-kernel, kvm
Cc: Joey Gouly, Suzuki K Poulose, Oliver Upton, Zenghui Yu,
Fuad Tabba, Will Deacon, Quentin Perret
Mecanically extract a bunch of VMA-related fields from kvm_s2_fault
and move them to a new kvm_s2_fault_vma_info structure.
This is not much, but it already allows us to define which functions
can update this structure, and which ones are pure consumers of the
data. Those in the latter camp are updated to take a const pointer
to that structure.
Tested-by: Fuad Tabba <tabba@google.com>
Reviewed-by: Fuad Tabba <tabba@google.com>
Reviewed-by: Suzuki K Poulose <suzuki.poulose@arm.com>
Signed-off-by: Marc Zyngier <maz@kernel.org>
---
arch/arm64/kvm/mmu.c | 117 ++++++++++++++++++++++++-------------------
1 file changed, 65 insertions(+), 52 deletions(-)
diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
index 5b05caecdbd92..5b2862e2bfcf3 100644
--- a/arch/arm64/kvm/mmu.c
+++ b/arch/arm64/kvm/mmu.c
@@ -1648,6 +1648,15 @@ static int gmem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
return ret != -EAGAIN ? ret : 0;
}
+struct kvm_s2_fault_vma_info {
+ unsigned long mmu_seq;
+ long vma_pagesize;
+ vm_flags_t vm_flags;
+ gfn_t gfn;
+ bool mte_allowed;
+ bool is_vma_cacheable;
+};
+
static short kvm_s2_resolve_vma_size(const struct kvm_s2_fault_desc *s2fd,
struct vm_area_struct *vma, bool *force_pte)
{
@@ -1712,18 +1721,12 @@ static short kvm_s2_resolve_vma_size(const struct kvm_s2_fault_desc *s2fd,
struct kvm_s2_fault {
bool writable;
- bool mte_allowed;
- bool is_vma_cacheable;
bool s2_force_noncacheable;
- unsigned long mmu_seq;
- gfn_t gfn;
kvm_pfn_t pfn;
bool logging_active;
bool force_pte;
- long vma_pagesize;
enum kvm_pgtable_prot prot;
struct page *page;
- vm_flags_t vm_flags;
};
static bool kvm_s2_fault_is_perm(const struct kvm_s2_fault_desc *s2fd)
@@ -1732,7 +1735,8 @@ static bool kvm_s2_fault_is_perm(const struct kvm_s2_fault_desc *s2fd)
}
static int kvm_s2_fault_get_vma_info(const struct kvm_s2_fault_desc *s2fd,
- struct kvm_s2_fault *fault)
+ struct kvm_s2_fault *fault,
+ struct kvm_s2_fault_vma_info *s2vi)
{
struct vm_area_struct *vma;
struct kvm *kvm = s2fd->vcpu->kvm;
@@ -1745,20 +1749,20 @@ static int kvm_s2_fault_get_vma_info(const struct kvm_s2_fault_desc *s2fd,
return -EFAULT;
}
- fault->vma_pagesize = BIT(kvm_s2_resolve_vma_size(s2fd, vma, &fault->force_pte));
+ s2vi->vma_pagesize = BIT(kvm_s2_resolve_vma_size(s2fd, vma, &fault->force_pte));
/*
* Both the canonical IPA and fault IPA must be aligned to the
* mapping size to ensure we find the right PFN and lay down the
* mapping in the right place.
*/
- fault->gfn = ALIGN_DOWN(s2fd->fault_ipa, fault->vma_pagesize) >> PAGE_SHIFT;
+ s2vi->gfn = ALIGN_DOWN(s2fd->fault_ipa, s2vi->vma_pagesize) >> PAGE_SHIFT;
- fault->mte_allowed = kvm_vma_mte_allowed(vma);
+ s2vi->mte_allowed = kvm_vma_mte_allowed(vma);
- fault->vm_flags = vma->vm_flags;
+ s2vi->vm_flags = vma->vm_flags;
- fault->is_vma_cacheable = kvm_vma_is_cacheable(vma);
+ s2vi->is_vma_cacheable = kvm_vma_is_cacheable(vma);
/*
* Read mmu_invalidate_seq so that KVM can detect if the results of
@@ -1768,39 +1772,40 @@ static int kvm_s2_fault_get_vma_info(const struct kvm_s2_fault_desc *s2fd,
* Rely on mmap_read_unlock() for an implicit smp_rmb(), which pairs
* with the smp_wmb() in kvm_mmu_invalidate_end().
*/
- fault->mmu_seq = kvm->mmu_invalidate_seq;
+ s2vi->mmu_seq = kvm->mmu_invalidate_seq;
mmap_read_unlock(current->mm);
return 0;
}
static gfn_t get_canonical_gfn(const struct kvm_s2_fault_desc *s2fd,
- const struct kvm_s2_fault *fault)
+ const struct kvm_s2_fault_vma_info *s2vi)
{
phys_addr_t ipa;
if (!s2fd->nested)
- return fault->gfn;
+ return s2vi->gfn;
ipa = kvm_s2_trans_output(s2fd->nested);
- return ALIGN_DOWN(ipa, fault->vma_pagesize) >> PAGE_SHIFT;
+ return ALIGN_DOWN(ipa, s2vi->vma_pagesize) >> PAGE_SHIFT;
}
static int kvm_s2_fault_pin_pfn(const struct kvm_s2_fault_desc *s2fd,
- struct kvm_s2_fault *fault)
+ struct kvm_s2_fault *fault,
+ struct kvm_s2_fault_vma_info *s2vi)
{
int ret;
- ret = kvm_s2_fault_get_vma_info(s2fd, fault);
+ ret = kvm_s2_fault_get_vma_info(s2fd, fault, s2vi);
if (ret)
return ret;
- fault->pfn = __kvm_faultin_pfn(s2fd->memslot, get_canonical_gfn(s2fd, fault),
+ fault->pfn = __kvm_faultin_pfn(s2fd->memslot, get_canonical_gfn(s2fd, s2vi),
kvm_is_write_fault(s2fd->vcpu) ? FOLL_WRITE : 0,
&fault->writable, &fault->page);
if (unlikely(is_error_noslot_pfn(fault->pfn))) {
if (fault->pfn == KVM_PFN_ERR_HWPOISON) {
- kvm_send_hwpoison_signal(s2fd->hva, __ffs(fault->vma_pagesize));
+ kvm_send_hwpoison_signal(s2fd->hva, __ffs(s2vi->vma_pagesize));
return 0;
}
return -EFAULT;
@@ -1810,7 +1815,8 @@ static int kvm_s2_fault_pin_pfn(const struct kvm_s2_fault_desc *s2fd,
}
static int kvm_s2_fault_compute_prot(const struct kvm_s2_fault_desc *s2fd,
- struct kvm_s2_fault *fault)
+ struct kvm_s2_fault *fault,
+ const struct kvm_s2_fault_vma_info *s2vi)
{
struct kvm *kvm = s2fd->vcpu->kvm;
@@ -1818,8 +1824,8 @@ static int kvm_s2_fault_compute_prot(const struct kvm_s2_fault_desc *s2fd,
* Check if this is non-struct page memory PFN, and cannot support
* CMOs. It could potentially be unsafe to access as cacheable.
*/
- if (fault->vm_flags & (VM_PFNMAP | VM_MIXEDMAP) && !pfn_is_map_memory(fault->pfn)) {
- if (fault->is_vma_cacheable) {
+ if (s2vi->vm_flags & (VM_PFNMAP | VM_MIXEDMAP) && !pfn_is_map_memory(fault->pfn)) {
+ if (s2vi->is_vma_cacheable) {
/*
* Whilst the VMA owner expects cacheable mapping to this
* PFN, hardware also has to support the FWB and CACHE DIC
@@ -1879,7 +1885,7 @@ static int kvm_s2_fault_compute_prot(const struct kvm_s2_fault_desc *s2fd,
fault->prot |= KVM_PGTABLE_PROT_X;
if (fault->s2_force_noncacheable)
- fault->prot |= (fault->vm_flags & VM_ALLOW_ANY_UNCACHED) ?
+ fault->prot |= (s2vi->vm_flags & VM_ALLOW_ANY_UNCACHED) ?
KVM_PGTABLE_PROT_NORMAL_NC : KVM_PGTABLE_PROT_DEVICE;
else if (cpus_have_final_cap(ARM64_HAS_CACHE_DIC))
fault->prot |= KVM_PGTABLE_PROT_X;
@@ -1889,74 +1895,73 @@ static int kvm_s2_fault_compute_prot(const struct kvm_s2_fault_desc *s2fd,
if (!kvm_s2_fault_is_perm(s2fd) && !fault->s2_force_noncacheable && kvm_has_mte(kvm)) {
/* Check the VMM hasn't introduced a new disallowed VMA */
- if (!fault->mte_allowed)
+ if (!s2vi->mte_allowed)
return -EFAULT;
}
return 0;
}
-static phys_addr_t get_ipa(const struct kvm_s2_fault *fault)
-{
- return gfn_to_gpa(fault->gfn);
-}
-
static int kvm_s2_fault_map(const struct kvm_s2_fault_desc *s2fd,
- struct kvm_s2_fault *fault, void *memcache)
+ struct kvm_s2_fault *fault,
+ const struct kvm_s2_fault_vma_info *s2vi, void *memcache)
{
+ enum kvm_pgtable_walk_flags flags = KVM_PGTABLE_WALK_SHARED;
struct kvm *kvm = s2fd->vcpu->kvm;
struct kvm_pgtable *pgt;
long perm_fault_granule;
+ long mapping_size;
+ gfn_t gfn;
int ret;
- enum kvm_pgtable_walk_flags flags = KVM_PGTABLE_WALK_SHARED;
kvm_fault_lock(kvm);
pgt = s2fd->vcpu->arch.hw_mmu->pgt;
ret = -EAGAIN;
- if (mmu_invalidate_retry(kvm, fault->mmu_seq))
+ if (mmu_invalidate_retry(kvm, s2vi->mmu_seq))
goto out_unlock;
perm_fault_granule = (kvm_s2_fault_is_perm(s2fd) ?
kvm_vcpu_trap_get_perm_fault_granule(s2fd->vcpu) : 0);
+ mapping_size = s2vi->vma_pagesize;
+ gfn = s2vi->gfn;
/*
* If we are not forced to use page mapping, check if we are
* backed by a THP and thus use block mapping if possible.
*/
- if (fault->vma_pagesize == PAGE_SIZE &&
+ if (mapping_size == PAGE_SIZE &&
!(fault->force_pte || fault->s2_force_noncacheable)) {
if (perm_fault_granule > PAGE_SIZE) {
- fault->vma_pagesize = perm_fault_granule;
+ mapping_size = perm_fault_granule;
} else {
- fault->vma_pagesize = transparent_hugepage_adjust(kvm, s2fd->memslot,
- s2fd->hva, &fault->pfn,
- &fault->gfn);
-
- if (fault->vma_pagesize < 0) {
- ret = fault->vma_pagesize;
+ mapping_size = transparent_hugepage_adjust(kvm, s2fd->memslot,
+ s2fd->hva, &fault->pfn,
+ &gfn);
+ if (mapping_size < 0) {
+ ret = mapping_size;
goto out_unlock;
}
}
}
if (!perm_fault_granule && !fault->s2_force_noncacheable && kvm_has_mte(kvm))
- sanitise_mte_tags(kvm, fault->pfn, fault->vma_pagesize);
+ sanitise_mte_tags(kvm, fault->pfn, mapping_size);
/*
* Under the premise of getting a FSC_PERM fault, we just need to relax
- * permissions only if vma_pagesize equals perm_fault_granule. Otherwise,
+ * permissions only if mapping_size equals perm_fault_granule. Otherwise,
* kvm_pgtable_stage2_map() should be called to change block size.
*/
- if (fault->vma_pagesize == perm_fault_granule) {
+ if (mapping_size == perm_fault_granule) {
/*
* Drop the SW bits in favour of those stored in the
* PTE, which will be preserved.
*/
fault->prot &= ~KVM_NV_GUEST_MAP_SZ;
- ret = KVM_PGT_FN(kvm_pgtable_stage2_relax_perms)(pgt, get_ipa(fault),
+ ret = KVM_PGT_FN(kvm_pgtable_stage2_relax_perms)(pgt, gfn_to_gpa(gfn),
fault->prot, flags);
} else {
- ret = KVM_PGT_FN(kvm_pgtable_stage2_map)(pgt, get_ipa(fault), fault->vma_pagesize,
+ ret = KVM_PGT_FN(kvm_pgtable_stage2_map)(pgt, gfn_to_gpa(gfn), mapping_size,
__pfn_to_phys(fault->pfn), fault->prot,
memcache, flags);
}
@@ -1965,9 +1970,16 @@ static int kvm_s2_fault_map(const struct kvm_s2_fault_desc *s2fd,
kvm_release_faultin_page(kvm, fault->page, !!ret, fault->writable);
kvm_fault_unlock(kvm);
- /* Mark the page dirty only if the fault is handled successfully */
- if (fault->writable && !ret)
- mark_page_dirty_in_slot(kvm, s2fd->memslot, get_canonical_gfn(s2fd, fault));
+ /*
+ * Mark the page dirty only if the fault is handled successfully,
+ * making sure we adjust the canonical IPA if the mapping size has
+ * been updated (via a THP upgrade, for example).
+ */
+ if (fault->writable && !ret) {
+ phys_addr_t ipa = gfn_to_gpa(get_canonical_gfn(s2fd, s2vi));
+ ipa &= ~(mapping_size - 1);
+ mark_page_dirty_in_slot(kvm, s2fd->memslot, gpa_to_gfn(ipa));
+ }
if (ret != -EAGAIN)
return ret;
@@ -1978,6 +1990,7 @@ static int user_mem_abort(const struct kvm_s2_fault_desc *s2fd)
{
bool perm_fault = kvm_vcpu_trap_is_permission_fault(s2fd->vcpu);
bool logging_active = memslot_is_logging(s2fd->memslot);
+ struct kvm_s2_fault_vma_info s2vi = {};
struct kvm_s2_fault fault = {
.logging_active = logging_active,
.force_pte = logging_active,
@@ -2002,17 +2015,17 @@ static int user_mem_abort(const struct kvm_s2_fault_desc *s2fd)
* Let's check if we will get back a huge page backed by hugetlbfs, or
* get block mapping for device MMIO region.
*/
- ret = kvm_s2_fault_pin_pfn(s2fd, &fault);
+ ret = kvm_s2_fault_pin_pfn(s2fd, &fault, &s2vi);
if (ret != 1)
return ret;
- ret = kvm_s2_fault_compute_prot(s2fd, &fault);
+ ret = kvm_s2_fault_compute_prot(s2fd, &fault, &s2vi);
if (ret) {
kvm_release_page_unused(fault.page);
return ret;
}
- return kvm_s2_fault_map(s2fd, &fault, memcache);
+ return kvm_s2_fault_map(s2fd, &fault, &s2vi, memcache);
}
/* Resolve the access fault by making the page young again. */
--
2.47.3
^ permalink raw reply related [flat|nested] 32+ messages in thread* [PATCH v2 23/30] KVM: arm64: Kill logging_active from kvm_s2_fault
2026-03-27 11:35 [PATCH v2 00/30] KVM: arm64: Combined user_mem_abort() rework Marc Zyngier
` (21 preceding siblings ...)
2026-03-27 11:36 ` [PATCH v2 22/30] KVM: arm64: Move VMA-related information to kvm_s2_fault_vma_info Marc Zyngier
@ 2026-03-27 11:36 ` Marc Zyngier
2026-03-27 11:36 ` [PATCH v2 24/30] KVM: arm64: Restrict the scope of the 'writable' attribute Marc Zyngier
` (6 subsequent siblings)
29 siblings, 0 replies; 32+ messages in thread
From: Marc Zyngier @ 2026-03-27 11:36 UTC (permalink / raw)
To: kvmarm, linux-arm-kernel, kvm
Cc: Joey Gouly, Suzuki K Poulose, Oliver Upton, Zenghui Yu,
Fuad Tabba, Will Deacon, Quentin Perret
There are only two spots where we evaluate whether logging is
active. Replace the boolean with calls to the relevant helper.
Tested-by: Fuad Tabba <tabba@google.com>
Reviewed-by: Fuad Tabba <tabba@google.com>
Reviewed-by: Suzuki K Poulose <suzuki.poulose@arm.com>
Signed-off-by: Marc Zyngier <maz@kernel.org>
---
arch/arm64/kvm/mmu.c | 10 ++++------
1 file changed, 4 insertions(+), 6 deletions(-)
diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
index 5b2862e2bfcf3..26313e0b40c25 100644
--- a/arch/arm64/kvm/mmu.c
+++ b/arch/arm64/kvm/mmu.c
@@ -1723,7 +1723,6 @@ struct kvm_s2_fault {
bool writable;
bool s2_force_noncacheable;
kvm_pfn_t pfn;
- bool logging_active;
bool force_pte;
enum kvm_pgtable_prot prot;
struct page *page;
@@ -1853,7 +1852,7 @@ static int kvm_s2_fault_compute_prot(const struct kvm_s2_fault_desc *s2fd,
*/
fault->s2_force_noncacheable = true;
}
- } else if (fault->logging_active && !kvm_is_write_fault(s2fd->vcpu)) {
+ } else if (memslot_is_logging(s2fd->memslot) && !kvm_is_write_fault(s2fd->vcpu)) {
/*
* Only actually map the page as writable if this was a write
* fault.
@@ -1989,11 +1988,9 @@ static int kvm_s2_fault_map(const struct kvm_s2_fault_desc *s2fd,
static int user_mem_abort(const struct kvm_s2_fault_desc *s2fd)
{
bool perm_fault = kvm_vcpu_trap_is_permission_fault(s2fd->vcpu);
- bool logging_active = memslot_is_logging(s2fd->memslot);
struct kvm_s2_fault_vma_info s2vi = {};
struct kvm_s2_fault fault = {
- .logging_active = logging_active,
- .force_pte = logging_active,
+ .force_pte = memslot_is_logging(s2fd->memslot),
.prot = KVM_PGTABLE_PROT_R,
};
void *memcache = NULL;
@@ -2005,7 +2002,8 @@ static int user_mem_abort(const struct kvm_s2_fault_desc *s2fd)
* only exception to this is when dirty logging is enabled at runtime
* and a write fault needs to collapse a block entry into a table.
*/
- if (!perm_fault || (logging_active && kvm_is_write_fault(s2fd->vcpu))) {
+ if (!perm_fault || (memslot_is_logging(s2fd->memslot) &&
+ kvm_is_write_fault(s2fd->vcpu))) {
ret = prepare_mmu_memcache(s2fd->vcpu, true, &memcache);
if (ret)
return ret;
--
2.47.3
^ permalink raw reply related [flat|nested] 32+ messages in thread* [PATCH v2 24/30] KVM: arm64: Restrict the scope of the 'writable' attribute
2026-03-27 11:35 [PATCH v2 00/30] KVM: arm64: Combined user_mem_abort() rework Marc Zyngier
` (22 preceding siblings ...)
2026-03-27 11:36 ` [PATCH v2 23/30] KVM: arm64: Kill logging_active from kvm_s2_fault Marc Zyngier
@ 2026-03-27 11:36 ` Marc Zyngier
2026-03-27 11:36 ` [PATCH v2 25/30] KVM: arm64: Move kvm_s2_fault.{pfn,page} to kvm_s2_vma_info Marc Zyngier
` (5 subsequent siblings)
29 siblings, 0 replies; 32+ messages in thread
From: Marc Zyngier @ 2026-03-27 11:36 UTC (permalink / raw)
To: kvmarm, linux-arm-kernel, kvm
Cc: Joey Gouly, Suzuki K Poulose, Oliver Upton, Zenghui Yu,
Fuad Tabba, Will Deacon, Quentin Perret
The 'writable' field is ambiguous, and indicates multiple things:
- whether the underlying memslot is writable
- whether we are resolving the fault with writable attributes
Add a new field to kvm_s2_fault_vma_info (map_writable) to indicate
the former condition, and have local writable variables to track
the latter.
Tested-by: Fuad Tabba <tabba@google.com>
Reviewed-by: Fuad Tabba <tabba@google.com>
Reviewed-by: Suzuki K Poulose <suzuki.poulose@arm.com>
Signed-off-by: Marc Zyngier <maz@kernel.org>
---
arch/arm64/kvm/mmu.c | 16 +++++++++-------
1 file changed, 9 insertions(+), 7 deletions(-)
diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
index 26313e0b40c25..91767a2e6e9f2 100644
--- a/arch/arm64/kvm/mmu.c
+++ b/arch/arm64/kvm/mmu.c
@@ -1655,6 +1655,7 @@ struct kvm_s2_fault_vma_info {
gfn_t gfn;
bool mte_allowed;
bool is_vma_cacheable;
+ bool map_writable;
};
static short kvm_s2_resolve_vma_size(const struct kvm_s2_fault_desc *s2fd,
@@ -1720,7 +1721,6 @@ static short kvm_s2_resolve_vma_size(const struct kvm_s2_fault_desc *s2fd,
}
struct kvm_s2_fault {
- bool writable;
bool s2_force_noncacheable;
kvm_pfn_t pfn;
bool force_pte;
@@ -1801,7 +1801,7 @@ static int kvm_s2_fault_pin_pfn(const struct kvm_s2_fault_desc *s2fd,
fault->pfn = __kvm_faultin_pfn(s2fd->memslot, get_canonical_gfn(s2fd, s2vi),
kvm_is_write_fault(s2fd->vcpu) ? FOLL_WRITE : 0,
- &fault->writable, &fault->page);
+ &s2vi->map_writable, &fault->page);
if (unlikely(is_error_noslot_pfn(fault->pfn))) {
if (fault->pfn == KVM_PFN_ERR_HWPOISON) {
kvm_send_hwpoison_signal(s2fd->hva, __ffs(s2vi->vma_pagesize));
@@ -1818,6 +1818,7 @@ static int kvm_s2_fault_compute_prot(const struct kvm_s2_fault_desc *s2fd,
const struct kvm_s2_fault_vma_info *s2vi)
{
struct kvm *kvm = s2fd->vcpu->kvm;
+ bool writable = s2vi->map_writable;
/*
* Check if this is non-struct page memory PFN, and cannot support
@@ -1857,7 +1858,7 @@ static int kvm_s2_fault_compute_prot(const struct kvm_s2_fault_desc *s2fd,
* Only actually map the page as writable if this was a write
* fault.
*/
- fault->writable = false;
+ writable = false;
}
if (kvm_vcpu_trap_is_exec_fault(s2fd->vcpu) && fault->s2_force_noncacheable)
@@ -1875,9 +1876,9 @@ static int kvm_s2_fault_compute_prot(const struct kvm_s2_fault_desc *s2fd,
}
if (s2fd->nested)
- adjust_nested_fault_perms(s2fd->nested, &fault->prot, &fault->writable);
+ adjust_nested_fault_perms(s2fd->nested, &fault->prot, &writable);
- if (fault->writable)
+ if (writable)
fault->prot |= KVM_PGTABLE_PROT_W;
if (kvm_vcpu_trap_is_exec_fault(s2fd->vcpu))
@@ -1906,6 +1907,7 @@ static int kvm_s2_fault_map(const struct kvm_s2_fault_desc *s2fd,
const struct kvm_s2_fault_vma_info *s2vi, void *memcache)
{
enum kvm_pgtable_walk_flags flags = KVM_PGTABLE_WALK_SHARED;
+ bool writable = fault->prot & KVM_PGTABLE_PROT_W;
struct kvm *kvm = s2fd->vcpu->kvm;
struct kvm_pgtable *pgt;
long perm_fault_granule;
@@ -1966,7 +1968,7 @@ static int kvm_s2_fault_map(const struct kvm_s2_fault_desc *s2fd,
}
out_unlock:
- kvm_release_faultin_page(kvm, fault->page, !!ret, fault->writable);
+ kvm_release_faultin_page(kvm, fault->page, !!ret, writable);
kvm_fault_unlock(kvm);
/*
@@ -1974,7 +1976,7 @@ static int kvm_s2_fault_map(const struct kvm_s2_fault_desc *s2fd,
* making sure we adjust the canonical IPA if the mapping size has
* been updated (via a THP upgrade, for example).
*/
- if (fault->writable && !ret) {
+ if (writable && !ret) {
phys_addr_t ipa = gfn_to_gpa(get_canonical_gfn(s2fd, s2vi));
ipa &= ~(mapping_size - 1);
mark_page_dirty_in_slot(kvm, s2fd->memslot, gpa_to_gfn(ipa));
--
2.47.3
^ permalink raw reply related [flat|nested] 32+ messages in thread* [PATCH v2 25/30] KVM: arm64: Move kvm_s2_fault.{pfn,page} to kvm_s2_vma_info
2026-03-27 11:35 [PATCH v2 00/30] KVM: arm64: Combined user_mem_abort() rework Marc Zyngier
` (23 preceding siblings ...)
2026-03-27 11:36 ` [PATCH v2 24/30] KVM: arm64: Restrict the scope of the 'writable' attribute Marc Zyngier
@ 2026-03-27 11:36 ` Marc Zyngier
2026-03-27 11:36 ` [PATCH v2 26/30] KVM: arm64: Replace force_pte with a max_map_size attribute Marc Zyngier
` (4 subsequent siblings)
29 siblings, 0 replies; 32+ messages in thread
From: Marc Zyngier @ 2026-03-27 11:36 UTC (permalink / raw)
To: kvmarm, linux-arm-kernel, kvm
Cc: Joey Gouly, Suzuki K Poulose, Oliver Upton, Zenghui Yu,
Fuad Tabba, Will Deacon, Quentin Perret
Continue restricting the visibility/mutability of some attributes
by moving kvm_s2_fault.{pfn,page} to kvm_s2_vma_info.
This is a pretty mechanical change.
Tested-by: Fuad Tabba <tabba@google.com>
Reviewed-by: Fuad Tabba <tabba@google.com>
Reviewed-by: Suzuki K Poulose <suzuki.poulose@arm.com>
Signed-off-by: Marc Zyngier <maz@kernel.org>
---
arch/arm64/kvm/mmu.c | 28 +++++++++++++++-------------
1 file changed, 15 insertions(+), 13 deletions(-)
diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
index 91767a2e6e9f2..39f01dd59259c 100644
--- a/arch/arm64/kvm/mmu.c
+++ b/arch/arm64/kvm/mmu.c
@@ -1652,6 +1652,8 @@ struct kvm_s2_fault_vma_info {
unsigned long mmu_seq;
long vma_pagesize;
vm_flags_t vm_flags;
+ struct page *page;
+ kvm_pfn_t pfn;
gfn_t gfn;
bool mte_allowed;
bool is_vma_cacheable;
@@ -1722,10 +1724,8 @@ static short kvm_s2_resolve_vma_size(const struct kvm_s2_fault_desc *s2fd,
struct kvm_s2_fault {
bool s2_force_noncacheable;
- kvm_pfn_t pfn;
bool force_pte;
enum kvm_pgtable_prot prot;
- struct page *page;
};
static bool kvm_s2_fault_is_perm(const struct kvm_s2_fault_desc *s2fd)
@@ -1799,11 +1799,11 @@ static int kvm_s2_fault_pin_pfn(const struct kvm_s2_fault_desc *s2fd,
if (ret)
return ret;
- fault->pfn = __kvm_faultin_pfn(s2fd->memslot, get_canonical_gfn(s2fd, s2vi),
- kvm_is_write_fault(s2fd->vcpu) ? FOLL_WRITE : 0,
- &s2vi->map_writable, &fault->page);
- if (unlikely(is_error_noslot_pfn(fault->pfn))) {
- if (fault->pfn == KVM_PFN_ERR_HWPOISON) {
+ s2vi->pfn = __kvm_faultin_pfn(s2fd->memslot, get_canonical_gfn(s2fd, s2vi),
+ kvm_is_write_fault(s2fd->vcpu) ? FOLL_WRITE : 0,
+ &s2vi->map_writable, &s2vi->page);
+ if (unlikely(is_error_noslot_pfn(s2vi->pfn))) {
+ if (s2vi->pfn == KVM_PFN_ERR_HWPOISON) {
kvm_send_hwpoison_signal(s2fd->hva, __ffs(s2vi->vma_pagesize));
return 0;
}
@@ -1824,7 +1824,7 @@ static int kvm_s2_fault_compute_prot(const struct kvm_s2_fault_desc *s2fd,
* Check if this is non-struct page memory PFN, and cannot support
* CMOs. It could potentially be unsafe to access as cacheable.
*/
- if (s2vi->vm_flags & (VM_PFNMAP | VM_MIXEDMAP) && !pfn_is_map_memory(fault->pfn)) {
+ if (s2vi->vm_flags & (VM_PFNMAP | VM_MIXEDMAP) && !pfn_is_map_memory(s2vi->pfn)) {
if (s2vi->is_vma_cacheable) {
/*
* Whilst the VMA owner expects cacheable mapping to this
@@ -1912,6 +1912,7 @@ static int kvm_s2_fault_map(const struct kvm_s2_fault_desc *s2fd,
struct kvm_pgtable *pgt;
long perm_fault_granule;
long mapping_size;
+ kvm_pfn_t pfn;
gfn_t gfn;
int ret;
@@ -1924,6 +1925,7 @@ static int kvm_s2_fault_map(const struct kvm_s2_fault_desc *s2fd,
perm_fault_granule = (kvm_s2_fault_is_perm(s2fd) ?
kvm_vcpu_trap_get_perm_fault_granule(s2fd->vcpu) : 0);
mapping_size = s2vi->vma_pagesize;
+ pfn = s2vi->pfn;
gfn = s2vi->gfn;
/*
@@ -1936,7 +1938,7 @@ static int kvm_s2_fault_map(const struct kvm_s2_fault_desc *s2fd,
mapping_size = perm_fault_granule;
} else {
mapping_size = transparent_hugepage_adjust(kvm, s2fd->memslot,
- s2fd->hva, &fault->pfn,
+ s2fd->hva, &pfn,
&gfn);
if (mapping_size < 0) {
ret = mapping_size;
@@ -1946,7 +1948,7 @@ static int kvm_s2_fault_map(const struct kvm_s2_fault_desc *s2fd,
}
if (!perm_fault_granule && !fault->s2_force_noncacheable && kvm_has_mte(kvm))
- sanitise_mte_tags(kvm, fault->pfn, mapping_size);
+ sanitise_mte_tags(kvm, pfn, mapping_size);
/*
* Under the premise of getting a FSC_PERM fault, we just need to relax
@@ -1963,12 +1965,12 @@ static int kvm_s2_fault_map(const struct kvm_s2_fault_desc *s2fd,
fault->prot, flags);
} else {
ret = KVM_PGT_FN(kvm_pgtable_stage2_map)(pgt, gfn_to_gpa(gfn), mapping_size,
- __pfn_to_phys(fault->pfn), fault->prot,
+ __pfn_to_phys(pfn), fault->prot,
memcache, flags);
}
out_unlock:
- kvm_release_faultin_page(kvm, fault->page, !!ret, writable);
+ kvm_release_faultin_page(kvm, s2vi->page, !!ret, writable);
kvm_fault_unlock(kvm);
/*
@@ -2021,7 +2023,7 @@ static int user_mem_abort(const struct kvm_s2_fault_desc *s2fd)
ret = kvm_s2_fault_compute_prot(s2fd, &fault, &s2vi);
if (ret) {
- kvm_release_page_unused(fault.page);
+ kvm_release_page_unused(s2vi.page);
return ret;
}
--
2.47.3
^ permalink raw reply related [flat|nested] 32+ messages in thread* [PATCH v2 26/30] KVM: arm64: Replace force_pte with a max_map_size attribute
2026-03-27 11:35 [PATCH v2 00/30] KVM: arm64: Combined user_mem_abort() rework Marc Zyngier
` (24 preceding siblings ...)
2026-03-27 11:36 ` [PATCH v2 25/30] KVM: arm64: Move kvm_s2_fault.{pfn,page} to kvm_s2_vma_info Marc Zyngier
@ 2026-03-27 11:36 ` Marc Zyngier
2026-03-27 11:36 ` [PATCH v2 27/30] KVM: arm64: Move device mapping management into kvm_s2_fault_pin_pfn() Marc Zyngier
` (3 subsequent siblings)
29 siblings, 0 replies; 32+ messages in thread
From: Marc Zyngier @ 2026-03-27 11:36 UTC (permalink / raw)
To: kvmarm, linux-arm-kernel, kvm
Cc: Joey Gouly, Suzuki K Poulose, Oliver Upton, Zenghui Yu,
Fuad Tabba, Will Deacon, Quentin Perret
force_pte is annoyingly limited in what it expresses, and we'd
be better off with a more generic primitive. Introduce max_map_size
instead, which does the trick and can be moved into the vma_info
structure. This firther allows it to reduce the scopes in which
it is mutable.
Tested-by: Fuad Tabba <tabba@google.com>
Reviewed-by: Fuad Tabba <tabba@google.com>
Reviewed-by: Suzuki K Poulose <suzuki.poulose@arm.com>
Signed-off-by: Marc Zyngier <maz@kernel.org>
---
arch/arm64/kvm/mmu.c | 23 +++++++++++++----------
1 file changed, 13 insertions(+), 10 deletions(-)
diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
index 39f01dd59259c..61b979365c6ee 100644
--- a/arch/arm64/kvm/mmu.c
+++ b/arch/arm64/kvm/mmu.c
@@ -1652,6 +1652,7 @@ struct kvm_s2_fault_vma_info {
unsigned long mmu_seq;
long vma_pagesize;
vm_flags_t vm_flags;
+ unsigned long max_map_size;
struct page *page;
kvm_pfn_t pfn;
gfn_t gfn;
@@ -1661,14 +1662,18 @@ struct kvm_s2_fault_vma_info {
};
static short kvm_s2_resolve_vma_size(const struct kvm_s2_fault_desc *s2fd,
- struct vm_area_struct *vma, bool *force_pte)
+ struct kvm_s2_fault_vma_info *s2vi,
+ struct vm_area_struct *vma)
{
short vma_shift;
- if (*force_pte)
+ if (memslot_is_logging(s2fd->memslot)) {
+ s2vi->max_map_size = PAGE_SIZE;
vma_shift = PAGE_SHIFT;
- else
+ } else {
+ s2vi->max_map_size = PUD_SIZE;
vma_shift = get_vma_page_shift(vma, s2fd->hva);
+ }
switch (vma_shift) {
#ifndef __PAGETABLE_PMD_FOLDED
@@ -1686,7 +1691,7 @@ static short kvm_s2_resolve_vma_size(const struct kvm_s2_fault_desc *s2fd,
fallthrough;
case CONT_PTE_SHIFT:
vma_shift = PAGE_SHIFT;
- *force_pte = true;
+ s2vi->max_map_size = PAGE_SIZE;
fallthrough;
case PAGE_SHIFT:
break;
@@ -1697,7 +1702,7 @@ static short kvm_s2_resolve_vma_size(const struct kvm_s2_fault_desc *s2fd,
if (s2fd->nested) {
unsigned long max_map_size;
- max_map_size = *force_pte ? PAGE_SIZE : PUD_SIZE;
+ max_map_size = min(s2vi->max_map_size, PUD_SIZE);
/*
* If we're about to create a shadow stage 2 entry, then we
@@ -1715,7 +1720,7 @@ static short kvm_s2_resolve_vma_size(const struct kvm_s2_fault_desc *s2fd,
else if (max_map_size >= PAGE_SIZE && max_map_size < PMD_SIZE)
max_map_size = PAGE_SIZE;
- *force_pte = (max_map_size == PAGE_SIZE);
+ s2vi->max_map_size = max_map_size;
vma_shift = min_t(short, vma_shift, __ffs(max_map_size));
}
@@ -1724,7 +1729,6 @@ static short kvm_s2_resolve_vma_size(const struct kvm_s2_fault_desc *s2fd,
struct kvm_s2_fault {
bool s2_force_noncacheable;
- bool force_pte;
enum kvm_pgtable_prot prot;
};
@@ -1748,7 +1752,7 @@ static int kvm_s2_fault_get_vma_info(const struct kvm_s2_fault_desc *s2fd,
return -EFAULT;
}
- s2vi->vma_pagesize = BIT(kvm_s2_resolve_vma_size(s2fd, vma, &fault->force_pte));
+ s2vi->vma_pagesize = BIT(kvm_s2_resolve_vma_size(s2fd, s2vi, vma));
/*
* Both the canonical IPA and fault IPA must be aligned to the
@@ -1933,7 +1937,7 @@ static int kvm_s2_fault_map(const struct kvm_s2_fault_desc *s2fd,
* backed by a THP and thus use block mapping if possible.
*/
if (mapping_size == PAGE_SIZE &&
- !(fault->force_pte || fault->s2_force_noncacheable)) {
+ !(s2vi->max_map_size == PAGE_SIZE || fault->s2_force_noncacheable)) {
if (perm_fault_granule > PAGE_SIZE) {
mapping_size = perm_fault_granule;
} else {
@@ -1994,7 +1998,6 @@ static int user_mem_abort(const struct kvm_s2_fault_desc *s2fd)
bool perm_fault = kvm_vcpu_trap_is_permission_fault(s2fd->vcpu);
struct kvm_s2_fault_vma_info s2vi = {};
struct kvm_s2_fault fault = {
- .force_pte = memslot_is_logging(s2fd->memslot),
.prot = KVM_PGTABLE_PROT_R,
};
void *memcache = NULL;
--
2.47.3
^ permalink raw reply related [flat|nested] 32+ messages in thread* [PATCH v2 27/30] KVM: arm64: Move device mapping management into kvm_s2_fault_pin_pfn()
2026-03-27 11:35 [PATCH v2 00/30] KVM: arm64: Combined user_mem_abort() rework Marc Zyngier
` (25 preceding siblings ...)
2026-03-27 11:36 ` [PATCH v2 26/30] KVM: arm64: Replace force_pte with a max_map_size attribute Marc Zyngier
@ 2026-03-27 11:36 ` Marc Zyngier
2026-03-27 11:36 ` [PATCH v2 28/30] KVM: arm64: Directly expose mapping prot and kill kvm_s2_fault Marc Zyngier
` (2 subsequent siblings)
29 siblings, 0 replies; 32+ messages in thread
From: Marc Zyngier @ 2026-03-27 11:36 UTC (permalink / raw)
To: kvmarm, linux-arm-kernel, kvm
Cc: Joey Gouly, Suzuki K Poulose, Oliver Upton, Zenghui Yu,
Fuad Tabba, Will Deacon, Quentin Perret
Attributes computed for devices are computed very late in the fault
handling process, meanning they are mutable for that long.
Introduce both 'device' and 'map_non_cacheable' attributes to the
vma_info structure, allowing that information to be set in stone
earlier, in kvm_s2_fault_pin_pfn().
Tested-by: Fuad Tabba <tabba@google.com>
Reviewed-by: Fuad Tabba <tabba@google.com>
Reviewed-by: Suzuki K Poulose <suzuki.poulose@arm.com>
Signed-off-by: Marc Zyngier <maz@kernel.org>
---
arch/arm64/kvm/mmu.c | 52 ++++++++++++++++++++++++--------------------
1 file changed, 29 insertions(+), 23 deletions(-)
diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
index 61b979365c6ee..23245ee7b1ec2 100644
--- a/arch/arm64/kvm/mmu.c
+++ b/arch/arm64/kvm/mmu.c
@@ -1656,9 +1656,11 @@ struct kvm_s2_fault_vma_info {
struct page *page;
kvm_pfn_t pfn;
gfn_t gfn;
+ bool device;
bool mte_allowed;
bool is_vma_cacheable;
bool map_writable;
+ bool map_non_cacheable;
};
static short kvm_s2_resolve_vma_size(const struct kvm_s2_fault_desc *s2fd,
@@ -1728,7 +1730,6 @@ static short kvm_s2_resolve_vma_size(const struct kvm_s2_fault_desc *s2fd,
}
struct kvm_s2_fault {
- bool s2_force_noncacheable;
enum kvm_pgtable_prot prot;
};
@@ -1738,7 +1739,6 @@ static bool kvm_s2_fault_is_perm(const struct kvm_s2_fault_desc *s2fd)
}
static int kvm_s2_fault_get_vma_info(const struct kvm_s2_fault_desc *s2fd,
- struct kvm_s2_fault *fault,
struct kvm_s2_fault_vma_info *s2vi)
{
struct vm_area_struct *vma;
@@ -1794,12 +1794,11 @@ static gfn_t get_canonical_gfn(const struct kvm_s2_fault_desc *s2fd,
}
static int kvm_s2_fault_pin_pfn(const struct kvm_s2_fault_desc *s2fd,
- struct kvm_s2_fault *fault,
struct kvm_s2_fault_vma_info *s2vi)
{
int ret;
- ret = kvm_s2_fault_get_vma_info(s2fd, fault, s2vi);
+ ret = kvm_s2_fault_get_vma_info(s2fd, s2vi);
if (ret)
return ret;
@@ -1814,16 +1813,6 @@ static int kvm_s2_fault_pin_pfn(const struct kvm_s2_fault_desc *s2fd,
return -EFAULT;
}
- return 1;
-}
-
-static int kvm_s2_fault_compute_prot(const struct kvm_s2_fault_desc *s2fd,
- struct kvm_s2_fault *fault,
- const struct kvm_s2_fault_vma_info *s2vi)
-{
- struct kvm *kvm = s2fd->vcpu->kvm;
- bool writable = s2vi->map_writable;
-
/*
* Check if this is non-struct page memory PFN, and cannot support
* CMOs. It could potentially be unsafe to access as cacheable.
@@ -1842,8 +1831,10 @@ static int kvm_s2_fault_compute_prot(const struct kvm_s2_fault_desc *s2fd,
* S2FWB and CACHE DIC are mandatory to avoid the need for
* cache maintenance.
*/
- if (!kvm_supports_cacheable_pfnmap())
+ if (!kvm_supports_cacheable_pfnmap()) {
+ kvm_release_faultin_page(s2fd->vcpu->kvm, s2vi->page, true, false);
return -EFAULT;
+ }
} else {
/*
* If the page was identified as device early by looking at
@@ -1855,9 +1846,24 @@ static int kvm_s2_fault_compute_prot(const struct kvm_s2_fault_desc *s2fd,
* In both cases, we don't let transparent_hugepage_adjust()
* change things at the last minute.
*/
- fault->s2_force_noncacheable = true;
+ s2vi->map_non_cacheable = true;
}
- } else if (memslot_is_logging(s2fd->memslot) && !kvm_is_write_fault(s2fd->vcpu)) {
+
+ s2vi->device = true;
+ }
+
+ return 1;
+}
+
+static int kvm_s2_fault_compute_prot(const struct kvm_s2_fault_desc *s2fd,
+ struct kvm_s2_fault *fault,
+ const struct kvm_s2_fault_vma_info *s2vi)
+{
+ struct kvm *kvm = s2fd->vcpu->kvm;
+ bool writable = s2vi->map_writable;
+
+ if (!s2vi->device && memslot_is_logging(s2fd->memslot) &&
+ !kvm_is_write_fault(s2fd->vcpu)) {
/*
* Only actually map the page as writable if this was a write
* fault.
@@ -1865,7 +1871,7 @@ static int kvm_s2_fault_compute_prot(const struct kvm_s2_fault_desc *s2fd,
writable = false;
}
- if (kvm_vcpu_trap_is_exec_fault(s2fd->vcpu) && fault->s2_force_noncacheable)
+ if (kvm_vcpu_trap_is_exec_fault(s2fd->vcpu) && s2vi->map_non_cacheable)
return -ENOEXEC;
/*
@@ -1888,7 +1894,7 @@ static int kvm_s2_fault_compute_prot(const struct kvm_s2_fault_desc *s2fd,
if (kvm_vcpu_trap_is_exec_fault(s2fd->vcpu))
fault->prot |= KVM_PGTABLE_PROT_X;
- if (fault->s2_force_noncacheable)
+ if (s2vi->map_non_cacheable)
fault->prot |= (s2vi->vm_flags & VM_ALLOW_ANY_UNCACHED) ?
KVM_PGTABLE_PROT_NORMAL_NC : KVM_PGTABLE_PROT_DEVICE;
else if (cpus_have_final_cap(ARM64_HAS_CACHE_DIC))
@@ -1897,7 +1903,7 @@ static int kvm_s2_fault_compute_prot(const struct kvm_s2_fault_desc *s2fd,
if (s2fd->nested)
adjust_nested_exec_perms(kvm, s2fd->nested, &fault->prot);
- if (!kvm_s2_fault_is_perm(s2fd) && !fault->s2_force_noncacheable && kvm_has_mte(kvm)) {
+ if (!kvm_s2_fault_is_perm(s2fd) && !s2vi->map_non_cacheable && kvm_has_mte(kvm)) {
/* Check the VMM hasn't introduced a new disallowed VMA */
if (!s2vi->mte_allowed)
return -EFAULT;
@@ -1937,7 +1943,7 @@ static int kvm_s2_fault_map(const struct kvm_s2_fault_desc *s2fd,
* backed by a THP and thus use block mapping if possible.
*/
if (mapping_size == PAGE_SIZE &&
- !(s2vi->max_map_size == PAGE_SIZE || fault->s2_force_noncacheable)) {
+ !(s2vi->max_map_size == PAGE_SIZE || s2vi->map_non_cacheable)) {
if (perm_fault_granule > PAGE_SIZE) {
mapping_size = perm_fault_granule;
} else {
@@ -1951,7 +1957,7 @@ static int kvm_s2_fault_map(const struct kvm_s2_fault_desc *s2fd,
}
}
- if (!perm_fault_granule && !fault->s2_force_noncacheable && kvm_has_mte(kvm))
+ if (!perm_fault_granule && !s2vi->map_non_cacheable && kvm_has_mte(kvm))
sanitise_mte_tags(kvm, pfn, mapping_size);
/*
@@ -2020,7 +2026,7 @@ static int user_mem_abort(const struct kvm_s2_fault_desc *s2fd)
* Let's check if we will get back a huge page backed by hugetlbfs, or
* get block mapping for device MMIO region.
*/
- ret = kvm_s2_fault_pin_pfn(s2fd, &fault, &s2vi);
+ ret = kvm_s2_fault_pin_pfn(s2fd, &s2vi);
if (ret != 1)
return ret;
--
2.47.3
^ permalink raw reply related [flat|nested] 32+ messages in thread* [PATCH v2 28/30] KVM: arm64: Directly expose mapping prot and kill kvm_s2_fault
2026-03-27 11:35 [PATCH v2 00/30] KVM: arm64: Combined user_mem_abort() rework Marc Zyngier
` (26 preceding siblings ...)
2026-03-27 11:36 ` [PATCH v2 27/30] KVM: arm64: Move device mapping management into kvm_s2_fault_pin_pfn() Marc Zyngier
@ 2026-03-27 11:36 ` Marc Zyngier
2026-03-27 11:36 ` [PATCH v2 29/30] KVM: arm64: Simplify integration of adjust_nested_*_perms() Marc Zyngier
2026-03-27 11:36 ` [PATCH v2 30/30] KVM: arm64: Convert gmem_abort() to struct kvm_s2_fault_desc Marc Zyngier
29 siblings, 0 replies; 32+ messages in thread
From: Marc Zyngier @ 2026-03-27 11:36 UTC (permalink / raw)
To: kvmarm, linux-arm-kernel, kvm
Cc: Joey Gouly, Suzuki K Poulose, Oliver Upton, Zenghui Yu,
Fuad Tabba, Will Deacon, Quentin Perret
The 'prot' field is the only one left in kvm_s2_fault. Expose it
directly to the functions needing it, and get rid of kvm_s2_fault.
It has served us well during this refactoring, but it is now no
longer needed.
Tested-by: Fuad Tabba <tabba@google.com>
Reviewed-by: Fuad Tabba <tabba@google.com>
Reviewed-by: Suzuki K Poulose <suzuki.poulose@arm.com>
Signed-off-by: Marc Zyngier <maz@kernel.org>
---
arch/arm64/kvm/mmu.c | 45 +++++++++++++++++++++-----------------------
1 file changed, 21 insertions(+), 24 deletions(-)
diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
index 23245ee7b1ec2..0fbdac77b1140 100644
--- a/arch/arm64/kvm/mmu.c
+++ b/arch/arm64/kvm/mmu.c
@@ -1729,10 +1729,6 @@ static short kvm_s2_resolve_vma_size(const struct kvm_s2_fault_desc *s2fd,
return vma_shift;
}
-struct kvm_s2_fault {
- enum kvm_pgtable_prot prot;
-};
-
static bool kvm_s2_fault_is_perm(const struct kvm_s2_fault_desc *s2fd)
{
return kvm_vcpu_trap_is_permission_fault(s2fd->vcpu);
@@ -1856,8 +1852,8 @@ static int kvm_s2_fault_pin_pfn(const struct kvm_s2_fault_desc *s2fd,
}
static int kvm_s2_fault_compute_prot(const struct kvm_s2_fault_desc *s2fd,
- struct kvm_s2_fault *fault,
- const struct kvm_s2_fault_vma_info *s2vi)
+ const struct kvm_s2_fault_vma_info *s2vi,
+ enum kvm_pgtable_prot *prot)
{
struct kvm *kvm = s2fd->vcpu->kvm;
bool writable = s2vi->map_writable;
@@ -1885,23 +1881,25 @@ static int kvm_s2_fault_compute_prot(const struct kvm_s2_fault_desc *s2fd,
return 1;
}
+ *prot = KVM_PGTABLE_PROT_R;
+
if (s2fd->nested)
- adjust_nested_fault_perms(s2fd->nested, &fault->prot, &writable);
+ adjust_nested_fault_perms(s2fd->nested, prot, &writable);
if (writable)
- fault->prot |= KVM_PGTABLE_PROT_W;
+ *prot |= KVM_PGTABLE_PROT_W;
if (kvm_vcpu_trap_is_exec_fault(s2fd->vcpu))
- fault->prot |= KVM_PGTABLE_PROT_X;
+ *prot |= KVM_PGTABLE_PROT_X;
if (s2vi->map_non_cacheable)
- fault->prot |= (s2vi->vm_flags & VM_ALLOW_ANY_UNCACHED) ?
- KVM_PGTABLE_PROT_NORMAL_NC : KVM_PGTABLE_PROT_DEVICE;
+ *prot |= (s2vi->vm_flags & VM_ALLOW_ANY_UNCACHED) ?
+ KVM_PGTABLE_PROT_NORMAL_NC : KVM_PGTABLE_PROT_DEVICE;
else if (cpus_have_final_cap(ARM64_HAS_CACHE_DIC))
- fault->prot |= KVM_PGTABLE_PROT_X;
+ *prot |= KVM_PGTABLE_PROT_X;
if (s2fd->nested)
- adjust_nested_exec_perms(kvm, s2fd->nested, &fault->prot);
+ adjust_nested_exec_perms(kvm, s2fd->nested, prot);
if (!kvm_s2_fault_is_perm(s2fd) && !s2vi->map_non_cacheable && kvm_has_mte(kvm)) {
/* Check the VMM hasn't introduced a new disallowed VMA */
@@ -1913,11 +1911,12 @@ static int kvm_s2_fault_compute_prot(const struct kvm_s2_fault_desc *s2fd,
}
static int kvm_s2_fault_map(const struct kvm_s2_fault_desc *s2fd,
- struct kvm_s2_fault *fault,
- const struct kvm_s2_fault_vma_info *s2vi, void *memcache)
+ const struct kvm_s2_fault_vma_info *s2vi,
+ enum kvm_pgtable_prot prot,
+ void *memcache)
{
enum kvm_pgtable_walk_flags flags = KVM_PGTABLE_WALK_SHARED;
- bool writable = fault->prot & KVM_PGTABLE_PROT_W;
+ bool writable = prot & KVM_PGTABLE_PROT_W;
struct kvm *kvm = s2fd->vcpu->kvm;
struct kvm_pgtable *pgt;
long perm_fault_granule;
@@ -1970,12 +1969,12 @@ static int kvm_s2_fault_map(const struct kvm_s2_fault_desc *s2fd,
* Drop the SW bits in favour of those stored in the
* PTE, which will be preserved.
*/
- fault->prot &= ~KVM_NV_GUEST_MAP_SZ;
+ prot &= ~KVM_NV_GUEST_MAP_SZ;
ret = KVM_PGT_FN(kvm_pgtable_stage2_relax_perms)(pgt, gfn_to_gpa(gfn),
- fault->prot, flags);
+ prot, flags);
} else {
ret = KVM_PGT_FN(kvm_pgtable_stage2_map)(pgt, gfn_to_gpa(gfn), mapping_size,
- __pfn_to_phys(pfn), fault->prot,
+ __pfn_to_phys(pfn), prot,
memcache, flags);
}
@@ -2003,9 +2002,7 @@ static int user_mem_abort(const struct kvm_s2_fault_desc *s2fd)
{
bool perm_fault = kvm_vcpu_trap_is_permission_fault(s2fd->vcpu);
struct kvm_s2_fault_vma_info s2vi = {};
- struct kvm_s2_fault fault = {
- .prot = KVM_PGTABLE_PROT_R,
- };
+ enum kvm_pgtable_prot prot;
void *memcache = NULL;
int ret;
@@ -2030,13 +2027,13 @@ static int user_mem_abort(const struct kvm_s2_fault_desc *s2fd)
if (ret != 1)
return ret;
- ret = kvm_s2_fault_compute_prot(s2fd, &fault, &s2vi);
+ ret = kvm_s2_fault_compute_prot(s2fd, &s2vi, &prot);
if (ret) {
kvm_release_page_unused(s2vi.page);
return ret;
}
- return kvm_s2_fault_map(s2fd, &fault, &s2vi, memcache);
+ return kvm_s2_fault_map(s2fd, &s2vi, prot, memcache);
}
/* Resolve the access fault by making the page young again. */
--
2.47.3
^ permalink raw reply related [flat|nested] 32+ messages in thread* [PATCH v2 29/30] KVM: arm64: Simplify integration of adjust_nested_*_perms()
2026-03-27 11:35 [PATCH v2 00/30] KVM: arm64: Combined user_mem_abort() rework Marc Zyngier
` (27 preceding siblings ...)
2026-03-27 11:36 ` [PATCH v2 28/30] KVM: arm64: Directly expose mapping prot and kill kvm_s2_fault Marc Zyngier
@ 2026-03-27 11:36 ` Marc Zyngier
2026-03-27 11:36 ` [PATCH v2 30/30] KVM: arm64: Convert gmem_abort() to struct kvm_s2_fault_desc Marc Zyngier
29 siblings, 0 replies; 32+ messages in thread
From: Marc Zyngier @ 2026-03-27 11:36 UTC (permalink / raw)
To: kvmarm, linux-arm-kernel, kvm
Cc: Joey Gouly, Suzuki K Poulose, Oliver Upton, Zenghui Yu,
Fuad Tabba, Will Deacon, Quentin Perret
Instead of passing pointers to adjust_nested_*_perms(), allow
them to return a new set of permissions.
With some careful moving around so that the canonical permissions
are computed before the nested ones are applied, we end-up with
a bit less code, and something a bit more readable.
Tested-by: Fuad Tabba <tabba@google.com>
Reviewed-by: Fuad Tabba <tabba@google.com>
Reviewed-by: Suzuki K Poulose <suzuki.poulose@arm.com>
Signed-off-by: Marc Zyngier <maz@kernel.org>
---
arch/arm64/kvm/mmu.c | 62 +++++++++++++++++++-------------------------
1 file changed, 27 insertions(+), 35 deletions(-)
diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
index 0fbdac77b1140..f4c8f72642e02 100644
--- a/arch/arm64/kvm/mmu.c
+++ b/arch/arm64/kvm/mmu.c
@@ -1544,25 +1544,27 @@ static int prepare_mmu_memcache(struct kvm_vcpu *vcpu, bool topup_memcache,
* TLB invalidation from the guest and used to limit the invalidation scope if a
* TTL hint or a range isn't provided.
*/
-static void adjust_nested_fault_perms(struct kvm_s2_trans *nested,
- enum kvm_pgtable_prot *prot,
- bool *writable)
+static enum kvm_pgtable_prot adjust_nested_fault_perms(struct kvm_s2_trans *nested,
+ enum kvm_pgtable_prot prot)
{
- *writable &= kvm_s2_trans_writable(nested);
+ if (!kvm_s2_trans_writable(nested))
+ prot &= ~KVM_PGTABLE_PROT_W;
if (!kvm_s2_trans_readable(nested))
- *prot &= ~KVM_PGTABLE_PROT_R;
+ prot &= ~KVM_PGTABLE_PROT_R;
- *prot |= kvm_encode_nested_level(nested);
+ return prot | kvm_encode_nested_level(nested);
}
-static void adjust_nested_exec_perms(struct kvm *kvm,
- struct kvm_s2_trans *nested,
- enum kvm_pgtable_prot *prot)
+static enum kvm_pgtable_prot adjust_nested_exec_perms(struct kvm *kvm,
+ struct kvm_s2_trans *nested,
+ enum kvm_pgtable_prot prot)
{
if (!kvm_s2_trans_exec_el0(kvm, nested))
- *prot &= ~KVM_PGTABLE_PROT_UX;
+ prot &= ~KVM_PGTABLE_PROT_UX;
if (!kvm_s2_trans_exec_el1(kvm, nested))
- *prot &= ~KVM_PGTABLE_PROT_PX;
+ prot &= ~KVM_PGTABLE_PROT_PX;
+
+ return prot;
}
struct kvm_s2_fault_desc {
@@ -1577,7 +1579,7 @@ static int gmem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
struct kvm_s2_trans *nested,
struct kvm_memory_slot *memslot, bool is_perm)
{
- bool write_fault, exec_fault, writable;
+ bool write_fault, exec_fault;
enum kvm_pgtable_walk_flags flags = KVM_PGTABLE_WALK_SHARED;
enum kvm_pgtable_prot prot = KVM_PGTABLE_PROT_R;
struct kvm_pgtable *pgt = vcpu->arch.hw_mmu->pgt;
@@ -1614,19 +1616,17 @@ static int gmem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
return ret;
}
- writable = !(memslot->flags & KVM_MEM_READONLY);
+ if (!(memslot->flags & KVM_MEM_READONLY))
+ prot |= KVM_PGTABLE_PROT_W;
if (nested)
- adjust_nested_fault_perms(nested, &prot, &writable);
-
- if (writable)
- prot |= KVM_PGTABLE_PROT_W;
+ prot = adjust_nested_fault_perms(nested, prot);
if (exec_fault || cpus_have_final_cap(ARM64_HAS_CACHE_DIC))
prot |= KVM_PGTABLE_PROT_X;
if (nested)
- adjust_nested_exec_perms(kvm, nested, &prot);
+ prot = adjust_nested_exec_perms(kvm, nested, prot);
kvm_fault_lock(kvm);
if (mmu_invalidate_retry(kvm, mmu_seq)) {
@@ -1639,10 +1639,10 @@ static int gmem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
memcache, flags);
out_unlock:
- kvm_release_faultin_page(kvm, page, !!ret, writable);
+ kvm_release_faultin_page(kvm, page, !!ret, prot & KVM_PGTABLE_PROT_W);
kvm_fault_unlock(kvm);
- if (writable && !ret)
+ if ((prot & KVM_PGTABLE_PROT_W) && !ret)
mark_page_dirty_in_slot(kvm, memslot, gfn);
return ret != -EAGAIN ? ret : 0;
@@ -1856,16 +1856,6 @@ static int kvm_s2_fault_compute_prot(const struct kvm_s2_fault_desc *s2fd,
enum kvm_pgtable_prot *prot)
{
struct kvm *kvm = s2fd->vcpu->kvm;
- bool writable = s2vi->map_writable;
-
- if (!s2vi->device && memslot_is_logging(s2fd->memslot) &&
- !kvm_is_write_fault(s2fd->vcpu)) {
- /*
- * Only actually map the page as writable if this was a write
- * fault.
- */
- writable = false;
- }
if (kvm_vcpu_trap_is_exec_fault(s2fd->vcpu) && s2vi->map_non_cacheable)
return -ENOEXEC;
@@ -1883,12 +1873,14 @@ static int kvm_s2_fault_compute_prot(const struct kvm_s2_fault_desc *s2fd,
*prot = KVM_PGTABLE_PROT_R;
- if (s2fd->nested)
- adjust_nested_fault_perms(s2fd->nested, prot, &writable);
-
- if (writable)
+ if (s2vi->map_writable && (s2vi->device ||
+ !memslot_is_logging(s2fd->memslot) ||
+ kvm_is_write_fault(s2fd->vcpu)))
*prot |= KVM_PGTABLE_PROT_W;
+ if (s2fd->nested)
+ *prot = adjust_nested_fault_perms(s2fd->nested, *prot);
+
if (kvm_vcpu_trap_is_exec_fault(s2fd->vcpu))
*prot |= KVM_PGTABLE_PROT_X;
@@ -1899,7 +1891,7 @@ static int kvm_s2_fault_compute_prot(const struct kvm_s2_fault_desc *s2fd,
*prot |= KVM_PGTABLE_PROT_X;
if (s2fd->nested)
- adjust_nested_exec_perms(kvm, s2fd->nested, prot);
+ *prot = adjust_nested_exec_perms(kvm, s2fd->nested, *prot);
if (!kvm_s2_fault_is_perm(s2fd) && !s2vi->map_non_cacheable && kvm_has_mte(kvm)) {
/* Check the VMM hasn't introduced a new disallowed VMA */
--
2.47.3
^ permalink raw reply related [flat|nested] 32+ messages in thread* [PATCH v2 30/30] KVM: arm64: Convert gmem_abort() to struct kvm_s2_fault_desc
2026-03-27 11:35 [PATCH v2 00/30] KVM: arm64: Combined user_mem_abort() rework Marc Zyngier
` (28 preceding siblings ...)
2026-03-27 11:36 ` [PATCH v2 29/30] KVM: arm64: Simplify integration of adjust_nested_*_perms() Marc Zyngier
@ 2026-03-27 11:36 ` Marc Zyngier
29 siblings, 0 replies; 32+ messages in thread
From: Marc Zyngier @ 2026-03-27 11:36 UTC (permalink / raw)
To: kvmarm, linux-arm-kernel, kvm
Cc: Joey Gouly, Suzuki K Poulose, Oliver Upton, Zenghui Yu,
Fuad Tabba, Will Deacon, Quentin Perret
Having fully converted user_mem_abort() to kvm_s2_fault_desc and
co, convert gmem_abort() to it as well. The change is obviously
much simpler.
Tested-by: Fuad Tabba <tabba@google.com>
Reviewed-by: Fuad Tabba <tabba@google.com>
Reviewed-by: Suzuki K Poulose <suzuki.poulose@arm.com>
Signed-off-by: Marc Zyngier <maz@kernel.org>
---
arch/arm64/kvm/mmu.c | 41 +++++++++++++++++++----------------------
1 file changed, 19 insertions(+), 22 deletions(-)
diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
index f4c8f72642e02..1fe7182be45ac 100644
--- a/arch/arm64/kvm/mmu.c
+++ b/arch/arm64/kvm/mmu.c
@@ -1575,33 +1575,31 @@ struct kvm_s2_fault_desc {
unsigned long hva;
};
-static int gmem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
- struct kvm_s2_trans *nested,
- struct kvm_memory_slot *memslot, bool is_perm)
+static int gmem_abort(const struct kvm_s2_fault_desc *s2fd)
{
bool write_fault, exec_fault;
enum kvm_pgtable_walk_flags flags = KVM_PGTABLE_WALK_SHARED;
enum kvm_pgtable_prot prot = KVM_PGTABLE_PROT_R;
- struct kvm_pgtable *pgt = vcpu->arch.hw_mmu->pgt;
+ struct kvm_pgtable *pgt = s2fd->vcpu->arch.hw_mmu->pgt;
unsigned long mmu_seq;
struct page *page;
- struct kvm *kvm = vcpu->kvm;
+ struct kvm *kvm = s2fd->vcpu->kvm;
void *memcache;
kvm_pfn_t pfn;
gfn_t gfn;
int ret;
- ret = prepare_mmu_memcache(vcpu, true, &memcache);
+ ret = prepare_mmu_memcache(s2fd->vcpu, true, &memcache);
if (ret)
return ret;
- if (nested)
- gfn = kvm_s2_trans_output(nested) >> PAGE_SHIFT;
+ if (s2fd->nested)
+ gfn = kvm_s2_trans_output(s2fd->nested) >> PAGE_SHIFT;
else
- gfn = fault_ipa >> PAGE_SHIFT;
+ gfn = s2fd->fault_ipa >> PAGE_SHIFT;
- write_fault = kvm_is_write_fault(vcpu);
- exec_fault = kvm_vcpu_trap_is_exec_fault(vcpu);
+ write_fault = kvm_is_write_fault(s2fd->vcpu);
+ exec_fault = kvm_vcpu_trap_is_exec_fault(s2fd->vcpu);
VM_WARN_ON_ONCE(write_fault && exec_fault);
@@ -1609,24 +1607,24 @@ static int gmem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
/* Pairs with the smp_wmb() in kvm_mmu_invalidate_end(). */
smp_rmb();
- ret = kvm_gmem_get_pfn(kvm, memslot, gfn, &pfn, &page, NULL);
+ ret = kvm_gmem_get_pfn(kvm, s2fd->memslot, gfn, &pfn, &page, NULL);
if (ret) {
- kvm_prepare_memory_fault_exit(vcpu, fault_ipa, PAGE_SIZE,
+ kvm_prepare_memory_fault_exit(s2fd->vcpu, s2fd->fault_ipa, PAGE_SIZE,
write_fault, exec_fault, false);
return ret;
}
- if (!(memslot->flags & KVM_MEM_READONLY))
+ if (!(s2fd->memslot->flags & KVM_MEM_READONLY))
prot |= KVM_PGTABLE_PROT_W;
- if (nested)
- prot = adjust_nested_fault_perms(nested, prot);
+ if (s2fd->nested)
+ prot = adjust_nested_fault_perms(s2fd->nested, prot);
if (exec_fault || cpus_have_final_cap(ARM64_HAS_CACHE_DIC))
prot |= KVM_PGTABLE_PROT_X;
- if (nested)
- prot = adjust_nested_exec_perms(kvm, nested, prot);
+ if (s2fd->nested)
+ prot = adjust_nested_exec_perms(kvm, s2fd->nested, prot);
kvm_fault_lock(kvm);
if (mmu_invalidate_retry(kvm, mmu_seq)) {
@@ -1634,7 +1632,7 @@ static int gmem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
goto out_unlock;
}
- ret = KVM_PGT_FN(kvm_pgtable_stage2_map)(pgt, fault_ipa, PAGE_SIZE,
+ ret = KVM_PGT_FN(kvm_pgtable_stage2_map)(pgt, s2fd->fault_ipa, PAGE_SIZE,
__pfn_to_phys(pfn), prot,
memcache, flags);
@@ -1643,7 +1641,7 @@ static int gmem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
kvm_fault_unlock(kvm);
if ((prot & KVM_PGTABLE_PROT_W) && !ret)
- mark_page_dirty_in_slot(kvm, memslot, gfn);
+ mark_page_dirty_in_slot(kvm, s2fd->memslot, gfn);
return ret != -EAGAIN ? ret : 0;
}
@@ -2300,8 +2298,7 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu)
};
if (kvm_slot_has_gmem(memslot))
- ret = gmem_abort(vcpu, fault_ipa, nested, memslot,
- esr_fsc_is_permission_fault(esr));
+ ret = gmem_abort(&s2fd);
else
ret = user_mem_abort(&s2fd);
--
2.47.3
^ permalink raw reply related [flat|nested] 32+ messages in thread