From: David Stevens <stevensd@chromium.org>
To: Sean Christopherson <seanjc@google.com>
Cc: kvm@vger.kernel.org, Marc Zyngier <maz@kernel.org>,
linux-kernel@vger.kernel.org, Peter Xu <peterx@redhat.com>,
Yu Zhang <yu.c.zhang@linux.intel.com>,
Isaku Yamahata <isaku.yamahata@gmail.com>,
kvmarm@lists.linux.dev, linuxppc-dev@lists.ozlabs.org,
linux-arm-kernel@lists.infradead.org,
David Stevens <stevensd@chromium.org>
Subject: [PATCH v8 7/8] KVM: PPC: Migrate to __kvm_follow_pfn
Date: Thu, 24 Aug 2023 17:04:07 +0900 [thread overview]
Message-ID: <20230824080408.2933205-8-stevensd@google.com> (raw)
In-Reply-To: <20230824080408.2933205-1-stevensd@google.com>
From: David Stevens <stevensd@chromium.org>
Migrate from __gfn_to_pfn_memslot to __kvm_follow_pfn. As part of the
refactoring, remove the redundant calls to get_user_page_fast_only,
since the check for !async && !atomic was removed from the KVM generic
code in b9b33da2aa74. Also, remove the kvm_ro parameter because the KVM
generic code handles RO memslots.
Signed-off-by: David Stevens <stevensd@chromium.org>
---
arch/powerpc/include/asm/kvm_book3s.h | 2 +-
arch/powerpc/kvm/book3s_64_mmu_hv.c | 38 +++++++++-----------
arch/powerpc/kvm/book3s_64_mmu_radix.c | 50 +++++++++++---------------
arch/powerpc/kvm/book3s_hv_nested.c | 4 +--
4 files changed, 38 insertions(+), 56 deletions(-)
diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h
index bbf5e2c5fe09..bf48c511e700 100644
--- a/arch/powerpc/include/asm/kvm_book3s.h
+++ b/arch/powerpc/include/asm/kvm_book3s.h
@@ -202,7 +202,7 @@ extern bool kvmppc_hv_handle_set_rc(struct kvm *kvm, bool nested,
extern int kvmppc_book3s_instantiate_page(struct kvm_vcpu *vcpu,
unsigned long gpa,
struct kvm_memory_slot *memslot,
- bool writing, bool kvm_ro,
+ bool writing,
pte_t *inserted_pte, unsigned int *levelp);
extern int kvmppc_init_vm_radix(struct kvm *kvm);
extern void kvmppc_free_radix(struct kvm *kvm);
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
index 7f765d5ad436..4688046626af 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
@@ -523,6 +523,9 @@ int kvmppc_book3s_hv_page_fault(struct kvm_vcpu *vcpu,
unsigned long rcbits;
long mmio_update;
pte_t pte, *ptep;
+ struct kvm_follow_pfn foll = {
+ .try_map_writable = true,
+ };
if (kvm_is_radix(kvm))
return kvmppc_book3s_radix_page_fault(vcpu, ea, dsisr);
@@ -599,29 +602,20 @@ int kvmppc_book3s_hv_page_fault(struct kvm_vcpu *vcpu,
page = NULL;
writing = (dsisr & DSISR_ISSTORE) != 0;
/* If writing != 0, then the HPTE must allow writing, if we get here */
- write_ok = writing;
- hva = gfn_to_hva_memslot(memslot, gfn);
- /*
- * Do a fast check first, since __gfn_to_pfn_memslot doesn't
- * do it with !atomic && !async, which is how we call it.
- * We always ask for write permission since the common case
- * is that the page is writable.
- */
- if (get_user_page_fast_only(hva, FOLL_WRITE, &page)) {
- write_ok = true;
- } else {
- /* Call KVM generic code to do the slow-path check */
- pfn = __gfn_to_pfn_memslot(memslot, gfn, false, false, NULL,
- writing, &write_ok, NULL);
- if (is_error_noslot_pfn(pfn))
- return -EFAULT;
- page = NULL;
- if (pfn_valid(pfn)) {
- page = pfn_to_page(pfn);
- if (PageReserved(page))
- page = NULL;
- }
+ foll.slot = memslot;
+ foll.gfn = gfn;
+ foll.flags = FOLL_GET | (writing ? FOLL_WRITE : 0);
+ pfn = __kvm_follow_pfn(&foll);
+ if (is_error_noslot_pfn(pfn))
+ return -EFAULT;
+ page = NULL;
+ write_ok = foll.writable;
+ hva = foll.hva;
+ if (pfn_valid(pfn)) {
+ page = pfn_to_page(pfn);
+ if (PageReserved(page))
+ page = NULL;
}
/*
diff --git a/arch/powerpc/kvm/book3s_64_mmu_radix.c b/arch/powerpc/kvm/book3s_64_mmu_radix.c
index 572707858d65..498f89128c3a 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_radix.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_radix.c
@@ -815,47 +815,39 @@ bool kvmppc_hv_handle_set_rc(struct kvm *kvm, bool nested, bool writing,
int kvmppc_book3s_instantiate_page(struct kvm_vcpu *vcpu,
unsigned long gpa,
struct kvm_memory_slot *memslot,
- bool writing, bool kvm_ro,
+ bool writing,
pte_t *inserted_pte, unsigned int *levelp)
{
struct kvm *kvm = vcpu->kvm;
struct page *page = NULL;
unsigned long mmu_seq;
- unsigned long hva, gfn = gpa >> PAGE_SHIFT;
- bool upgrade_write = false;
- bool *upgrade_p = &upgrade_write;
+ unsigned long hva, pfn, gfn = gpa >> PAGE_SHIFT;
+ bool upgrade_write;
pte_t pte, *ptep;
unsigned int shift, level;
int ret;
bool large_enable;
+ struct kvm_follow_pfn foll = {
+ .slot = memslot,
+ .gfn = gfn,
+ .flags = FOLL_GET | (writing ? FOLL_WRITE : 0),
+ .try_map_writable = true,
+ };
/* used to check for invalidations in progress */
mmu_seq = kvm->mmu_invalidate_seq;
smp_rmb();
- /*
- * Do a fast check first, since __gfn_to_pfn_memslot doesn't
- * do it with !atomic && !async, which is how we call it.
- * We always ask for write permission since the common case
- * is that the page is writable.
- */
- hva = gfn_to_hva_memslot(memslot, gfn);
- if (!kvm_ro && get_user_page_fast_only(hva, FOLL_WRITE, &page)) {
- upgrade_write = true;
- } else {
- unsigned long pfn;
-
- /* Call KVM generic code to do the slow-path check */
- pfn = __gfn_to_pfn_memslot(memslot, gfn, false, false, NULL,
- writing, upgrade_p, NULL);
- if (is_error_noslot_pfn(pfn))
- return -EFAULT;
- page = NULL;
- if (pfn_valid(pfn)) {
- page = pfn_to_page(pfn);
- if (PageReserved(page))
- page = NULL;
- }
+ pfn = __kvm_follow_pfn(&foll);
+ if (is_error_noslot_pfn(pfn))
+ return -EFAULT;
+ page = NULL;
+ hva = foll.hva;
+ upgrade_write = foll.writable;
+ if (pfn_valid(pfn)) {
+ page = pfn_to_page(pfn);
+ if (PageReserved(page))
+ page = NULL;
}
/*
@@ -944,7 +936,6 @@ int kvmppc_book3s_radix_page_fault(struct kvm_vcpu *vcpu,
struct kvm_memory_slot *memslot;
long ret;
bool writing = !!(dsisr & DSISR_ISSTORE);
- bool kvm_ro = false;
/* Check for unusual errors */
if (dsisr & DSISR_UNSUPP_MMU) {
@@ -997,7 +988,6 @@ int kvmppc_book3s_radix_page_fault(struct kvm_vcpu *vcpu,
ea, DSISR_ISSTORE | DSISR_PROTFAULT);
return RESUME_GUEST;
}
- kvm_ro = true;
}
/* Failed to set the reference/change bits */
@@ -1015,7 +1005,7 @@ int kvmppc_book3s_radix_page_fault(struct kvm_vcpu *vcpu,
/* Try to insert a pte */
ret = kvmppc_book3s_instantiate_page(vcpu, gpa, memslot, writing,
- kvm_ro, NULL, NULL);
+ NULL, NULL);
if (ret == 0 || ret == -EAGAIN)
ret = RESUME_GUEST;
diff --git a/arch/powerpc/kvm/book3s_hv_nested.c b/arch/powerpc/kvm/book3s_hv_nested.c
index 377d0b4a05ee..6d531051df04 100644
--- a/arch/powerpc/kvm/book3s_hv_nested.c
+++ b/arch/powerpc/kvm/book3s_hv_nested.c
@@ -1497,7 +1497,6 @@ static long int __kvmhv_nested_page_fault(struct kvm_vcpu *vcpu,
unsigned long n_gpa, gpa, gfn, perm = 0UL;
unsigned int shift, l1_shift, level;
bool writing = !!(dsisr & DSISR_ISSTORE);
- bool kvm_ro = false;
long int ret;
if (!gp->l1_gr_to_hr) {
@@ -1577,7 +1576,6 @@ static long int __kvmhv_nested_page_fault(struct kvm_vcpu *vcpu,
ea, DSISR_ISSTORE | DSISR_PROTFAULT);
return RESUME_GUEST;
}
- kvm_ro = true;
}
/* 2. Find the host pte for this L1 guest real address */
@@ -1599,7 +1597,7 @@ static long int __kvmhv_nested_page_fault(struct kvm_vcpu *vcpu,
if (!pte_present(pte) || (writing && !(pte_val(pte) & _PAGE_WRITE))) {
/* No suitable pte found -> try to insert a mapping */
ret = kvmppc_book3s_instantiate_page(vcpu, gpa, memslot,
- writing, kvm_ro, &pte, &level);
+ writing, &pte, &level);
if (ret == -EAGAIN)
return RESUME_GUEST;
else if (ret)
--
2.42.0.rc1.204.g551eb34607-goog
next prev parent reply other threads:[~2023-08-24 8:11 UTC|newest]
Thread overview: 10+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-08-24 8:04 [PATCH v8 0/8] KVM: allow mapping non-refcounted pages David Stevens
2023-08-24 8:04 ` [PATCH v8 1/8] KVM: Assert that a page's refcount is elevated when marking accessed/dirty David Stevens
2023-08-24 8:04 ` [PATCH v8 2/8] KVM: mmu: Introduce __kvm_follow_pfn function David Stevens
2023-08-24 8:04 ` [PATCH v8 3/8] KVM: mmu: Make __kvm_follow_pfn not imply FOLL_GET David Stevens
2023-08-24 8:04 ` [PATCH v8 4/8] KVM: x86/mmu: Migrate to __kvm_follow_pfn David Stevens
2023-08-24 8:04 ` [PATCH v8 5/8] KVM: x86/mmu: Don't pass FOLL_GET " David Stevens
2023-08-24 9:13 ` Mika Penttilä
2023-08-24 8:04 ` [PATCH v8 6/8] KVM: arm64: Migrate " David Stevens
2023-08-24 8:04 ` David Stevens [this message]
2023-08-24 8:04 ` [PATCH v8 8/8] KVM: mmu: remove __gfn_to_pfn_memslot David Stevens
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20230824080408.2933205-8-stevensd@google.com \
--to=stevensd@chromium.org \
--cc=isaku.yamahata@gmail.com \
--cc=kvm@vger.kernel.org \
--cc=kvmarm@lists.linux.dev \
--cc=linux-arm-kernel@lists.infradead.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linuxppc-dev@lists.ozlabs.org \
--cc=maz@kernel.org \
--cc=peterx@redhat.com \
--cc=seanjc@google.com \
--cc=yu.c.zhang@linux.intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).