From: Marc Zyngier <maz@kernel.org>
To: kvmarm@lists.linux.dev, linux-arm-kernel@lists.infradead.org,
kvm@vger.kernel.org
Cc: Joey Gouly <joey.gouly@arm.com>,
Suzuki K Poulose <suzuki.poulose@arm.com>,
Oliver Upton <oupton@kernel.org>,
Zenghui Yu <yuzenghui@huawei.com>, Fuad Tabba <tabba@google.com>,
Will Deacon <will@kernel.org>,
Quentin Perret <qperret@google.com>
Subject: [PATCH v2 27/30] KVM: arm64: Move device mapping management into kvm_s2_fault_pin_pfn()
Date: Fri, 27 Mar 2026 11:36:15 +0000 [thread overview]
Message-ID: <20260327113618.4051534-28-maz@kernel.org> (raw)
In-Reply-To: <20260327113618.4051534-1-maz@kernel.org>
Attributes computed for devices are computed very late in the fault
handling process, meanning they are mutable for that long.
Introduce both 'device' and 'map_non_cacheable' attributes to the
vma_info structure, allowing that information to be set in stone
earlier, in kvm_s2_fault_pin_pfn().
Tested-by: Fuad Tabba <tabba@google.com>
Reviewed-by: Fuad Tabba <tabba@google.com>
Reviewed-by: Suzuki K Poulose <suzuki.poulose@arm.com>
Signed-off-by: Marc Zyngier <maz@kernel.org>
---
arch/arm64/kvm/mmu.c | 52 ++++++++++++++++++++++++--------------------
1 file changed, 29 insertions(+), 23 deletions(-)
diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
index 61b979365c6ee..23245ee7b1ec2 100644
--- a/arch/arm64/kvm/mmu.c
+++ b/arch/arm64/kvm/mmu.c
@@ -1656,9 +1656,11 @@ struct kvm_s2_fault_vma_info {
struct page *page;
kvm_pfn_t pfn;
gfn_t gfn;
+ bool device;
bool mte_allowed;
bool is_vma_cacheable;
bool map_writable;
+ bool map_non_cacheable;
};
static short kvm_s2_resolve_vma_size(const struct kvm_s2_fault_desc *s2fd,
@@ -1728,7 +1730,6 @@ static short kvm_s2_resolve_vma_size(const struct kvm_s2_fault_desc *s2fd,
}
struct kvm_s2_fault {
- bool s2_force_noncacheable;
enum kvm_pgtable_prot prot;
};
@@ -1738,7 +1739,6 @@ static bool kvm_s2_fault_is_perm(const struct kvm_s2_fault_desc *s2fd)
}
static int kvm_s2_fault_get_vma_info(const struct kvm_s2_fault_desc *s2fd,
- struct kvm_s2_fault *fault,
struct kvm_s2_fault_vma_info *s2vi)
{
struct vm_area_struct *vma;
@@ -1794,12 +1794,11 @@ static gfn_t get_canonical_gfn(const struct kvm_s2_fault_desc *s2fd,
}
static int kvm_s2_fault_pin_pfn(const struct kvm_s2_fault_desc *s2fd,
- struct kvm_s2_fault *fault,
struct kvm_s2_fault_vma_info *s2vi)
{
int ret;
- ret = kvm_s2_fault_get_vma_info(s2fd, fault, s2vi);
+ ret = kvm_s2_fault_get_vma_info(s2fd, s2vi);
if (ret)
return ret;
@@ -1814,16 +1813,6 @@ static int kvm_s2_fault_pin_pfn(const struct kvm_s2_fault_desc *s2fd,
return -EFAULT;
}
- return 1;
-}
-
-static int kvm_s2_fault_compute_prot(const struct kvm_s2_fault_desc *s2fd,
- struct kvm_s2_fault *fault,
- const struct kvm_s2_fault_vma_info *s2vi)
-{
- struct kvm *kvm = s2fd->vcpu->kvm;
- bool writable = s2vi->map_writable;
-
/*
* Check if this is non-struct page memory PFN, and cannot support
* CMOs. It could potentially be unsafe to access as cacheable.
@@ -1842,8 +1831,10 @@ static int kvm_s2_fault_compute_prot(const struct kvm_s2_fault_desc *s2fd,
* S2FWB and CACHE DIC are mandatory to avoid the need for
* cache maintenance.
*/
- if (!kvm_supports_cacheable_pfnmap())
+ if (!kvm_supports_cacheable_pfnmap()) {
+ kvm_release_faultin_page(s2fd->vcpu->kvm, s2vi->page, true, false);
return -EFAULT;
+ }
} else {
/*
* If the page was identified as device early by looking at
@@ -1855,9 +1846,24 @@ static int kvm_s2_fault_compute_prot(const struct kvm_s2_fault_desc *s2fd,
* In both cases, we don't let transparent_hugepage_adjust()
* change things at the last minute.
*/
- fault->s2_force_noncacheable = true;
+ s2vi->map_non_cacheable = true;
}
- } else if (memslot_is_logging(s2fd->memslot) && !kvm_is_write_fault(s2fd->vcpu)) {
+
+ s2vi->device = true;
+ }
+
+ return 1;
+}
+
+static int kvm_s2_fault_compute_prot(const struct kvm_s2_fault_desc *s2fd,
+ struct kvm_s2_fault *fault,
+ const struct kvm_s2_fault_vma_info *s2vi)
+{
+ struct kvm *kvm = s2fd->vcpu->kvm;
+ bool writable = s2vi->map_writable;
+
+ if (!s2vi->device && memslot_is_logging(s2fd->memslot) &&
+ !kvm_is_write_fault(s2fd->vcpu)) {
/*
* Only actually map the page as writable if this was a write
* fault.
@@ -1865,7 +1871,7 @@ static int kvm_s2_fault_compute_prot(const struct kvm_s2_fault_desc *s2fd,
writable = false;
}
- if (kvm_vcpu_trap_is_exec_fault(s2fd->vcpu) && fault->s2_force_noncacheable)
+ if (kvm_vcpu_trap_is_exec_fault(s2fd->vcpu) && s2vi->map_non_cacheable)
return -ENOEXEC;
/*
@@ -1888,7 +1894,7 @@ static int kvm_s2_fault_compute_prot(const struct kvm_s2_fault_desc *s2fd,
if (kvm_vcpu_trap_is_exec_fault(s2fd->vcpu))
fault->prot |= KVM_PGTABLE_PROT_X;
- if (fault->s2_force_noncacheable)
+ if (s2vi->map_non_cacheable)
fault->prot |= (s2vi->vm_flags & VM_ALLOW_ANY_UNCACHED) ?
KVM_PGTABLE_PROT_NORMAL_NC : KVM_PGTABLE_PROT_DEVICE;
else if (cpus_have_final_cap(ARM64_HAS_CACHE_DIC))
@@ -1897,7 +1903,7 @@ static int kvm_s2_fault_compute_prot(const struct kvm_s2_fault_desc *s2fd,
if (s2fd->nested)
adjust_nested_exec_perms(kvm, s2fd->nested, &fault->prot);
- if (!kvm_s2_fault_is_perm(s2fd) && !fault->s2_force_noncacheable && kvm_has_mte(kvm)) {
+ if (!kvm_s2_fault_is_perm(s2fd) && !s2vi->map_non_cacheable && kvm_has_mte(kvm)) {
/* Check the VMM hasn't introduced a new disallowed VMA */
if (!s2vi->mte_allowed)
return -EFAULT;
@@ -1937,7 +1943,7 @@ static int kvm_s2_fault_map(const struct kvm_s2_fault_desc *s2fd,
* backed by a THP and thus use block mapping if possible.
*/
if (mapping_size == PAGE_SIZE &&
- !(s2vi->max_map_size == PAGE_SIZE || fault->s2_force_noncacheable)) {
+ !(s2vi->max_map_size == PAGE_SIZE || s2vi->map_non_cacheable)) {
if (perm_fault_granule > PAGE_SIZE) {
mapping_size = perm_fault_granule;
} else {
@@ -1951,7 +1957,7 @@ static int kvm_s2_fault_map(const struct kvm_s2_fault_desc *s2fd,
}
}
- if (!perm_fault_granule && !fault->s2_force_noncacheable && kvm_has_mte(kvm))
+ if (!perm_fault_granule && !s2vi->map_non_cacheable && kvm_has_mte(kvm))
sanitise_mte_tags(kvm, pfn, mapping_size);
/*
@@ -2020,7 +2026,7 @@ static int user_mem_abort(const struct kvm_s2_fault_desc *s2fd)
* Let's check if we will get back a huge page backed by hugetlbfs, or
* get block mapping for device MMIO region.
*/
- ret = kvm_s2_fault_pin_pfn(s2fd, &fault, &s2vi);
+ ret = kvm_s2_fault_pin_pfn(s2fd, &s2vi);
if (ret != 1)
return ret;
--
2.47.3
next prev parent reply other threads:[~2026-03-27 11:37 UTC|newest]
Thread overview: 32+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-03-27 11:35 [PATCH v2 00/30] KVM: arm64: Combined user_mem_abort() rework Marc Zyngier
2026-03-27 11:35 ` [PATCH v2 01/30] KVM: arm64: Extract VMA size resolution in user_mem_abort() Marc Zyngier
2026-03-27 11:35 ` [PATCH v2 02/30] KVM: arm64: Introduce struct kvm_s2_fault to user_mem_abort() Marc Zyngier
2026-03-27 11:35 ` [PATCH v2 03/30] KVM: arm64: Extract PFN resolution in user_mem_abort() Marc Zyngier
2026-03-27 11:35 ` [PATCH v2 04/30] KVM: arm64: Isolate mmap_read_lock inside new kvm_s2_fault_get_vma_info() helper Marc Zyngier
2026-03-27 11:35 ` [PATCH v2 05/30] KVM: arm64: Extract stage-2 permission logic in user_mem_abort() Marc Zyngier
2026-03-27 11:35 ` [PATCH v2 06/30] KVM: arm64: Extract page table mapping " Marc Zyngier
2026-03-27 11:35 ` [PATCH v2 07/30] KVM: arm64: Simplify nested VMA shift calculation Marc Zyngier
2026-03-27 11:35 ` [PATCH v2 08/30] KVM: arm64: Remove redundant state variables from struct kvm_s2_fault Marc Zyngier
2026-03-27 11:35 ` [PATCH v2 09/30] KVM: arm64: Simplify return logic in user_mem_abort() Marc Zyngier
2026-03-27 11:35 ` [PATCH v2 10/30] KVM: arm64: Initialize struct kvm_s2_fault completely at declaration Marc Zyngier
2026-03-27 11:35 ` [PATCH v2 11/30] KVM: arm64: Optimize early exit checks in kvm_s2_fault_pin_pfn() Marc Zyngier
2026-03-27 11:36 ` [PATCH v2 12/30] KVM: arm64: Hoist MTE validation check out of MMU lock path Marc Zyngier
2026-03-27 11:36 ` [PATCH v2 13/30] KVM: arm64: Clean up control flow in kvm_s2_fault_map() Marc Zyngier
2026-03-27 11:36 ` [PATCH v2 14/30] KVM: arm64: Kill fault->ipa Marc Zyngier
2026-03-27 11:36 ` [PATCH v2 15/30] KVM: arm64: Make fault_ipa immutable Marc Zyngier
2026-03-27 11:36 ` [PATCH v2 16/30] KVM: arm64: Move fault context to const structure Marc Zyngier
2026-03-27 11:36 ` [PATCH v2 17/30] KVM: arm64: Replace fault_is_perm with a helper Marc Zyngier
2026-03-27 11:36 ` [PATCH v2 18/30] KVM: arm64: Constrain fault_granule to kvm_s2_fault_map() Marc Zyngier
2026-03-27 11:36 ` [PATCH v2 19/30] KVM: arm64: Kill write_fault from kvm_s2_fault Marc Zyngier
2026-03-27 11:36 ` [PATCH v2 20/30] KVM: arm64: Kill exec_fault " Marc Zyngier
2026-03-27 11:36 ` [PATCH v2 21/30] KVM: arm64: Kill topup_memcache " Marc Zyngier
2026-03-27 14:49 ` Marc Zyngier
2026-03-27 11:36 ` [PATCH v2 22/30] KVM: arm64: Move VMA-related information to kvm_s2_fault_vma_info Marc Zyngier
2026-03-27 11:36 ` [PATCH v2 23/30] KVM: arm64: Kill logging_active from kvm_s2_fault Marc Zyngier
2026-03-27 11:36 ` [PATCH v2 24/30] KVM: arm64: Restrict the scope of the 'writable' attribute Marc Zyngier
2026-03-27 11:36 ` [PATCH v2 25/30] KVM: arm64: Move kvm_s2_fault.{pfn,page} to kvm_s2_vma_info Marc Zyngier
2026-03-27 11:36 ` [PATCH v2 26/30] KVM: arm64: Replace force_pte with a max_map_size attribute Marc Zyngier
2026-03-27 11:36 ` Marc Zyngier [this message]
2026-03-27 11:36 ` [PATCH v2 28/30] KVM: arm64: Directly expose mapping prot and kill kvm_s2_fault Marc Zyngier
2026-03-27 11:36 ` [PATCH v2 29/30] KVM: arm64: Simplify integration of adjust_nested_*_perms() Marc Zyngier
2026-03-27 11:36 ` [PATCH v2 30/30] KVM: arm64: Convert gmem_abort() to struct kvm_s2_fault_desc Marc Zyngier
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20260327113618.4051534-28-maz@kernel.org \
--to=maz@kernel.org \
--cc=joey.gouly@arm.com \
--cc=kvm@vger.kernel.org \
--cc=kvmarm@lists.linux.dev \
--cc=linux-arm-kernel@lists.infradead.org \
--cc=oupton@kernel.org \
--cc=qperret@google.com \
--cc=suzuki.poulose@arm.com \
--cc=tabba@google.com \
--cc=will@kernel.org \
--cc=yuzenghui@huawei.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox