From: Oliver Upton <oliver.upton@linux.dev>
To: Marc Zyngier <maz@kernel.org>, James Morse <james.morse@arm.com>,
Alexandru Elisei <alexandru.elisei@arm.com>
Cc: linux-arm-kernel@lists.infradead.org,
kvmarm@lists.cs.columbia.edu, kvm@vger.kernel.org,
Reiji Watanabe <reijiw@google.com>,
Ricardo Koller <ricarkol@google.com>,
David Matlack <dmatlack@google.com>,
Quentin Perret <qperret@google.com>,
Ben Gardon <bgardon@google.com>, Gavin Shan <gshan@redhat.com>,
Peter Xu <peterx@redhat.com>, Will Deacon <will@kernel.org>,
Sean Christopherson <seanjc@google.com>,
kvmarm@lists.linux.dev, Oliver Upton <oliver.upton@linux.dev>
Subject: [PATCH v5 14/14] KVM: arm64: Handle stage-2 faults in parallel
Date: Mon, 7 Nov 2022 22:00:33 +0000 [thread overview]
Message-ID: <20221107220033.1895655-1-oliver.upton@linux.dev> (raw)
In-Reply-To: <20221107215644.1895162-1-oliver.upton@linux.dev>
The stage-2 map walker has been made parallel-aware, and as such can be
called while only holding the read side of the MMU lock. Rip out the
conditional locking in user_mem_abort() and instead grab the read lock.
Continue to take the write lock from other callsites to
kvm_pgtable_stage2_map().
Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
---
arch/arm64/include/asm/kvm_pgtable.h | 3 ++-
arch/arm64/kvm/hyp/nvhe/mem_protect.c | 2 +-
arch/arm64/kvm/hyp/pgtable.c | 5 +++--
arch/arm64/kvm/mmu.c | 31 ++++++---------------------
4 files changed, 13 insertions(+), 28 deletions(-)
diff --git a/arch/arm64/include/asm/kvm_pgtable.h b/arch/arm64/include/asm/kvm_pgtable.h
index 7634b6964779..a874ce0ce7b5 100644
--- a/arch/arm64/include/asm/kvm_pgtable.h
+++ b/arch/arm64/include/asm/kvm_pgtable.h
@@ -412,6 +412,7 @@ void kvm_pgtable_stage2_free_removed(struct kvm_pgtable_mm_ops *mm_ops, void *pg
* @prot: Permissions and attributes for the mapping.
* @mc: Cache of pre-allocated and zeroed memory from which to allocate
* page-table pages.
+ * @flags: Flags to control the page-table walk (ex. a shared walk)
*
* The offset of @addr within a page is ignored, @size is rounded-up to
* the next page boundary and @phys is rounded-down to the previous page
@@ -433,7 +434,7 @@ void kvm_pgtable_stage2_free_removed(struct kvm_pgtable_mm_ops *mm_ops, void *pg
*/
int kvm_pgtable_stage2_map(struct kvm_pgtable *pgt, u64 addr, u64 size,
u64 phys, enum kvm_pgtable_prot prot,
- void *mc);
+ void *mc, enum kvm_pgtable_walk_flags flags);
/**
* kvm_pgtable_stage2_set_owner() - Unmap and annotate pages in the IPA space to
diff --git a/arch/arm64/kvm/hyp/nvhe/mem_protect.c b/arch/arm64/kvm/hyp/nvhe/mem_protect.c
index 735769886b55..f6d82bf33ce1 100644
--- a/arch/arm64/kvm/hyp/nvhe/mem_protect.c
+++ b/arch/arm64/kvm/hyp/nvhe/mem_protect.c
@@ -257,7 +257,7 @@ static inline int __host_stage2_idmap(u64 start, u64 end,
enum kvm_pgtable_prot prot)
{
return kvm_pgtable_stage2_map(&host_kvm.pgt, start, end - start, start,
- prot, &host_s2_pool);
+ prot, &host_s2_pool, 0);
}
/*
diff --git a/arch/arm64/kvm/hyp/pgtable.c b/arch/arm64/kvm/hyp/pgtable.c
index f814422ef795..5bca9610d040 100644
--- a/arch/arm64/kvm/hyp/pgtable.c
+++ b/arch/arm64/kvm/hyp/pgtable.c
@@ -912,7 +912,7 @@ static int stage2_map_walker(const struct kvm_pgtable_visit_ctx *ctx,
int kvm_pgtable_stage2_map(struct kvm_pgtable *pgt, u64 addr, u64 size,
u64 phys, enum kvm_pgtable_prot prot,
- void *mc)
+ void *mc, enum kvm_pgtable_walk_flags flags)
{
int ret;
struct stage2_map_data map_data = {
@@ -923,7 +923,8 @@ int kvm_pgtable_stage2_map(struct kvm_pgtable *pgt, u64 addr, u64 size,
};
struct kvm_pgtable_walker walker = {
.cb = stage2_map_walker,
- .flags = KVM_PGTABLE_WALK_TABLE_PRE |
+ .flags = flags |
+ KVM_PGTABLE_WALK_TABLE_PRE |
KVM_PGTABLE_WALK_LEAF,
.arg = &map_data,
};
diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
index 52e042399ba5..410c2a37fe32 100644
--- a/arch/arm64/kvm/mmu.c
+++ b/arch/arm64/kvm/mmu.c
@@ -861,7 +861,7 @@ int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
write_lock(&kvm->mmu_lock);
ret = kvm_pgtable_stage2_map(pgt, addr, PAGE_SIZE, pa, prot,
- &cache);
+ &cache, 0);
write_unlock(&kvm->mmu_lock);
if (ret)
break;
@@ -1156,7 +1156,6 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
gfn_t gfn;
kvm_pfn_t pfn;
bool logging_active = memslot_is_logging(memslot);
- bool use_read_lock = false;
unsigned long fault_level = kvm_vcpu_trap_get_fault_level(vcpu);
unsigned long vma_pagesize, fault_granule;
enum kvm_pgtable_prot prot = KVM_PGTABLE_PROT_R;
@@ -1191,8 +1190,6 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
if (logging_active) {
force_pte = true;
vma_shift = PAGE_SHIFT;
- use_read_lock = (fault_status == FSC_PERM && write_fault &&
- fault_granule == PAGE_SIZE);
} else {
vma_shift = get_vma_page_shift(vma, hva);
}
@@ -1291,15 +1288,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
if (exec_fault && device)
return -ENOEXEC;
- /*
- * To reduce MMU contentions and enhance concurrency during dirty
- * logging dirty logging, only acquire read lock for permission
- * relaxation.
- */
- if (use_read_lock)
- read_lock(&kvm->mmu_lock);
- else
- write_lock(&kvm->mmu_lock);
+ read_lock(&kvm->mmu_lock);
pgt = vcpu->arch.hw_mmu->pgt;
if (mmu_invalidate_retry(kvm, mmu_seq))
goto out_unlock;
@@ -1343,15 +1332,12 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
* permissions only if vma_pagesize equals fault_granule. Otherwise,
* kvm_pgtable_stage2_map() should be called to change block size.
*/
- if (fault_status == FSC_PERM && vma_pagesize == fault_granule) {
+ if (fault_status == FSC_PERM && vma_pagesize == fault_granule)
ret = kvm_pgtable_stage2_relax_perms(pgt, fault_ipa, prot);
- } else {
- WARN_ONCE(use_read_lock, "Attempted stage-2 map outside of write lock\n");
-
+ else
ret = kvm_pgtable_stage2_map(pgt, fault_ipa, vma_pagesize,
__pfn_to_phys(pfn), prot,
- memcache);
- }
+ memcache, KVM_PGTABLE_WALK_SHARED);
/* Mark the page dirty only if the fault is handled successfully */
if (writable && !ret) {
@@ -1360,10 +1346,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
}
out_unlock:
- if (use_read_lock)
- read_unlock(&kvm->mmu_lock);
- else
- write_unlock(&kvm->mmu_lock);
+ read_unlock(&kvm->mmu_lock);
kvm_set_pfn_accessed(pfn);
kvm_release_pfn_clean(pfn);
return ret != -EAGAIN ? ret : 0;
@@ -1569,7 +1552,7 @@ bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
*/
kvm_pgtable_stage2_map(kvm->arch.mmu.pgt, range->start << PAGE_SHIFT,
PAGE_SIZE, __pfn_to_phys(pfn),
- KVM_PGTABLE_PROT_R, NULL);
+ KVM_PGTABLE_PROT_R, NULL, 0);
return false;
}
--
2.38.1.431.g37b22c650d-goog
_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel
next prev parent reply other threads:[~2022-11-07 22:03 UTC|newest]
Thread overview: 52+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-11-07 21:56 [PATCH v5 00/14] KVM: arm64: Parallel stage-2 fault handling Oliver Upton
2022-11-07 21:56 ` [PATCH v5 01/14] KVM: arm64: Combine visitor arguments into a context structure Oliver Upton
2022-11-09 22:23 ` Ben Gardon
2022-11-09 22:48 ` Oliver Upton
2022-11-10 0:23 ` Gavin Shan
2022-11-10 0:42 ` Oliver Upton
2022-11-10 3:40 ` Gavin Shan
2022-11-07 21:56 ` [PATCH v5 02/14] KVM: arm64: Stash observed pte value in visitor context Oliver Upton
2022-11-09 22:23 ` Ben Gardon
2022-11-10 4:55 ` Gavin Shan
2022-11-07 21:56 ` [PATCH v5 03/14] KVM: arm64: Pass mm_ops through the " Oliver Upton
2022-11-09 22:23 ` Ben Gardon
2022-11-10 5:22 ` Gavin Shan
2022-11-10 5:30 ` Gavin Shan
2022-11-07 21:56 ` [PATCH v5 04/14] KVM: arm64: Don't pass kvm_pgtable through kvm_pgtable_walk_data Oliver Upton
2022-11-09 22:23 ` Ben Gardon
2022-11-10 5:30 ` Gavin Shan
2022-11-10 5:38 ` Oliver Upton
2022-11-07 21:56 ` [PATCH v5 05/14] KVM: arm64: Add a helper to tear down unlinked stage-2 subtrees Oliver Upton
2022-11-09 22:23 ` Ben Gardon
2022-11-09 22:54 ` Oliver Upton
2022-11-07 21:56 ` [PATCH v5 06/14] KVM: arm64: Use an opaque type for pteps Oliver Upton
2022-11-09 22:23 ` Ben Gardon
2022-11-07 21:56 ` [PATCH v5 07/14] KVM: arm64: Tear down unlinked stage-2 subtree after break-before-make Oliver Upton
2022-11-09 22:24 ` Ben Gardon
2022-11-07 21:56 ` [PATCH v5 08/14] KVM: arm64: Protect stage-2 traversal with RCU Oliver Upton
2022-11-09 21:53 ` Sean Christopherson
2022-11-09 23:55 ` Oliver Upton
2022-11-15 18:47 ` Ricardo Koller
2022-11-15 18:57 ` Oliver Upton
2022-11-09 22:25 ` Ben Gardon
2022-11-10 13:34 ` Marc Zyngier
[not found] ` <CGME20221114142915eucas1p258f3ca2c536bde712c068e96851468fd@eucas1p2.samsung.com>
2022-11-14 14:29 ` Marek Szyprowski
2022-11-14 17:42 ` Oliver Upton
2022-12-05 5:51 ` Mingwei Zhang
2022-12-05 7:47 ` Oliver Upton
2022-11-07 21:56 ` [PATCH v5 09/14] KVM: arm64: Atomically update stage 2 leaf attributes in parallel walks Oliver Upton
2022-11-09 22:26 ` Ben Gardon
2022-11-09 22:42 ` Sean Christopherson
2022-11-09 23:00 ` Ben Gardon
2022-11-10 13:40 ` Marc Zyngier
2022-11-07 21:56 ` [PATCH v5 10/14] KVM: arm64: Split init and set for table PTE Oliver Upton
2022-11-09 22:26 ` Ben Gardon
2022-11-09 23:00 ` Oliver Upton
2022-11-07 21:58 ` [PATCH v5 11/14] KVM: arm64: Make block->table PTE changes parallel-aware Oliver Upton
2022-11-09 22:26 ` Ben Gardon
2022-11-09 23:03 ` Oliver Upton
2022-11-07 21:59 ` [PATCH v5 12/14] KVM: arm64: Make leaf->leaf " Oliver Upton
2022-11-09 22:26 ` Ben Gardon
2022-11-07 22:00 ` [PATCH v5 13/14] KVM: arm64: Make table->block " Oliver Upton
2022-11-07 22:00 ` Oliver Upton [this message]
2022-11-11 15:47 ` [PATCH v5 00/14] KVM: arm64: Parallel stage-2 fault handling Marc Zyngier
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20221107220033.1895655-1-oliver.upton@linux.dev \
--to=oliver.upton@linux.dev \
--cc=alexandru.elisei@arm.com \
--cc=bgardon@google.com \
--cc=dmatlack@google.com \
--cc=gshan@redhat.com \
--cc=james.morse@arm.com \
--cc=kvm@vger.kernel.org \
--cc=kvmarm@lists.cs.columbia.edu \
--cc=kvmarm@lists.linux.dev \
--cc=linux-arm-kernel@lists.infradead.org \
--cc=maz@kernel.org \
--cc=peterx@redhat.com \
--cc=qperret@google.com \
--cc=reijiw@google.com \
--cc=ricarkol@google.com \
--cc=seanjc@google.com \
--cc=will@kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).