From: Ram Pai <linuxram@us.ibm.com>
To: kvm-ppc@vger.kernel.org, linuxppc-dev@lists.ozlabs.org
Cc: ldufour@linux.ibm.com, linuxram@us.ibm.com,
cclaudio@linux.ibm.com, bharata@linux.ibm.com,
aneesh.kumar@linux.ibm.com, sukadev@linux.vnet.ibm.com,
bauerman@linux.ibm.com, david@gibson.dropbear.id.au
Subject: [PATCH v1 3/4] KVM: PPC: Book3S HV: migrate remaining normal-GFNs to secure-GFNs in H_SVM_INIT_DONE
Date: Sat, 30 May 2020 19:27:50 -0700 [thread overview]
Message-ID: <1590892071-25549-4-git-send-email-linuxram@us.ibm.com> (raw)
In-Reply-To: <1590892071-25549-1-git-send-email-linuxram@us.ibm.com>
H_SVM_INIT_DONE incorrectly assumes that the Ultravisor has explicitly
called H_SVM_PAGE_IN for all secure pages. These GFNs continue to be
normal GFNs associated with normal PFNs; when infact, these GFNs should
have been secure GFNs associated with device PFNs.
Move all the PFN associated with the SVM's GFNs to device PFNs, in
H_SVM_INIT_DONE. Skip the GFNs that are already Paged-in or Shared
through H_SVM_PAGE_IN.
Cc: Paul Mackerras <paulus@ozlabs.org>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Bharata B Rao <bharata@linux.ibm.com>
Cc: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
Cc: Sukadev Bhattiprolu <sukadev@linux.vnet.ibm.com>
Cc: Laurent Dufour <ldufour@linux.ibm.com>
Cc: Thiago Jung Bauermann <bauerman@linux.ibm.com>
Cc: David Gibson <david@gibson.dropbear.id.au>
Cc: Claudio Carvalho <cclaudio@linux.ibm.com>
Cc: kvm-ppc@vger.kernel.org
Cc: linuxppc-dev@lists.ozlabs.org
Signed-off-by: Ram Pai <linuxram@us.ibm.com>
---
Documentation/powerpc/ultravisor.rst | 2 +
arch/powerpc/kvm/book3s_hv_uvmem.c | 219 ++++++++++++++++++++++++-----------
2 files changed, 154 insertions(+), 67 deletions(-)
diff --git a/Documentation/powerpc/ultravisor.rst b/Documentation/powerpc/ultravisor.rst
index 363736d..3bc8957 100644
--- a/Documentation/powerpc/ultravisor.rst
+++ b/Documentation/powerpc/ultravisor.rst
@@ -933,6 +933,8 @@ Return values
* H_UNSUPPORTED if called from the wrong context (e.g.
from an SVM or before an H_SVM_INIT_START
hypercall).
+ * H_STATE if the hypervisor could not successfully
+ transition the VM to Secure VM.
Description
~~~~~~~~~~~
diff --git a/arch/powerpc/kvm/book3s_hv_uvmem.c b/arch/powerpc/kvm/book3s_hv_uvmem.c
index 2ef1e03..36dda1d 100644
--- a/arch/powerpc/kvm/book3s_hv_uvmem.c
+++ b/arch/powerpc/kvm/book3s_hv_uvmem.c
@@ -318,14 +318,149 @@ unsigned long kvmppc_h_svm_init_start(struct kvm *kvm)
return ret;
}
+static struct page *kvmppc_uvmem_get_page(unsigned long gpa, struct kvm *kvm);
+
+/*
+ * Alloc a PFN from private device memory pool. If @pagein is true,
+ * copy page from normal memory to secure memory using UV_PAGE_IN uvcall.
+ */
+static int kvmppc_svm_migrate_page(struct vm_area_struct *vma,
+ unsigned long start,
+ unsigned long end, unsigned long gpa, struct kvm *kvm,
+ unsigned long page_shift,
+ bool pagein)
+{
+ unsigned long src_pfn, dst_pfn = 0;
+ struct migrate_vma mig;
+ struct page *dpage;
+ struct page *spage;
+ unsigned long pfn;
+ int ret = 0;
+
+ memset(&mig, 0, sizeof(mig));
+ mig.vma = vma;
+ mig.start = start;
+ mig.end = end;
+ mig.src = &src_pfn;
+ mig.dst = &dst_pfn;
+
+ ret = migrate_vma_setup(&mig);
+ if (ret)
+ return ret;
+
+ if (!(*mig.src & MIGRATE_PFN_MIGRATE)) {
+ ret = -1;
+ goto out_finalize;
+ }
+
+ dpage = kvmppc_uvmem_get_page(gpa, kvm);
+ if (!dpage) {
+ ret = -1;
+ goto out_finalize;
+ }
+
+ if (pagein) {
+ pfn = *mig.src >> MIGRATE_PFN_SHIFT;
+ spage = migrate_pfn_to_page(*mig.src);
+ if (spage) {
+ ret = uv_page_in(kvm->arch.lpid, pfn << page_shift,
+ gpa, 0, page_shift);
+ if (ret)
+ goto out_finalize;
+ }
+ }
+
+ *mig.dst = migrate_pfn(page_to_pfn(dpage)) | MIGRATE_PFN_LOCKED;
+ migrate_vma_pages(&mig);
+out_finalize:
+ migrate_vma_finalize(&mig);
+ return ret;
+}
+
+static int uv_migrate_mem_slot(struct kvm *kvm,
+ const struct kvm_memory_slot *memslot)
+{
+ unsigned long gfn = memslot->base_gfn;
+ unsigned long end;
+ bool downgrade = false;
+ struct vm_area_struct *vma;
+ int i, ret = 0;
+ unsigned long start = gfn_to_hva(kvm, gfn);
+
+ if (kvm_is_error_hva(start))
+ return H_STATE;
+
+ end = start + (memslot->npages << PAGE_SHIFT);
+
+ down_write(&kvm->mm->mmap_sem);
+
+ mutex_lock(&kvm->arch.uvmem_lock);
+ vma = find_vma_intersection(kvm->mm, start, end);
+ if (!vma || vma->vm_start > start || vma->vm_end < end) {
+ ret = H_STATE;
+ goto out_unlock;
+ }
+
+ ret = ksm_madvise(vma, vma->vm_start, vma->vm_end,
+ MADV_UNMERGEABLE, &vma->vm_flags);
+ downgrade_write(&kvm->mm->mmap_sem);
+ downgrade = true;
+ if (ret) {
+ ret = H_STATE;
+ goto out_unlock;
+ }
+
+ for (i = 0; i < memslot->npages; i++, ++gfn) {
+ /* skip paged-in pages and shared pages */
+ if (kvmppc_gfn_is_uvmem_pfn(gfn, kvm, NULL) ||
+ kvmppc_gfn_is_uvmem_shared(gfn, kvm))
+ continue;
+
+ start = gfn_to_hva(kvm, gfn);
+ end = start + (1UL << PAGE_SHIFT);
+ ret = kvmppc_svm_migrate_page(vma, start, end,
+ (gfn << PAGE_SHIFT), kvm, PAGE_SHIFT, false);
+
+ if (ret)
+ goto out_unlock;
+ }
+
+out_unlock:
+ mutex_unlock(&kvm->arch.uvmem_lock);
+ if (downgrade)
+ up_read(&kvm->mm->mmap_sem);
+ else
+ up_write(&kvm->mm->mmap_sem);
+ return ret;
+}
+
unsigned long kvmppc_h_svm_init_done(struct kvm *kvm)
{
+ struct kvm_memslots *slots;
+ struct kvm_memory_slot *memslot;
+ int srcu_idx;
+ long ret = H_SUCCESS;
+
if (!(kvm->arch.secure_guest & KVMPPC_SECURE_INIT_START))
return H_UNSUPPORTED;
+ /* migrate any unmoved normal pfn to device pfns*/
+ srcu_idx = srcu_read_lock(&kvm->srcu);
+ slots = kvm_memslots(kvm);
+ kvm_for_each_memslot(memslot, slots) {
+ ret = uv_migrate_mem_slot(kvm, memslot);
+ if (ret) {
+ ret = H_STATE;
+ goto out;
+ }
+ }
+
kvm->arch.secure_guest |= KVMPPC_SECURE_INIT_DONE;
pr_info("LPID %d went secure\n", kvm->arch.lpid);
- return H_SUCCESS;
+
+out:
+ srcu_read_unlock(&kvm->srcu, srcu_idx);
+ return ret;
}
/*
@@ -459,68 +594,6 @@ static struct page *kvmppc_uvmem_get_page(unsigned long gpa, struct kvm *kvm)
}
/*
- * Alloc a PFN from private device memory pool and copy page from normal
- * memory to secure memory using UV_PAGE_IN uvcall.
- */
-static int kvmppc_svm_page_in(struct vm_area_struct *vma, unsigned long start,
- unsigned long end, unsigned long gpa, struct kvm *kvm,
- unsigned long page_shift, bool *downgrade)
-{
- unsigned long src_pfn, dst_pfn = 0;
- struct migrate_vma mig;
- struct page *spage;
- unsigned long pfn;
- struct page *dpage;
- int ret = 0;
-
- memset(&mig, 0, sizeof(mig));
- mig.vma = vma;
- mig.start = start;
- mig.end = end;
- mig.src = &src_pfn;
- mig.dst = &dst_pfn;
-
- /*
- * We come here with mmap_sem write lock held just for
- * ksm_madvise(), otherwise we only need read mmap_sem.
- * Hence downgrade to read lock once ksm_madvise() is done.
- */
- ret = ksm_madvise(vma, vma->vm_start, vma->vm_end,
- MADV_UNMERGEABLE, &vma->vm_flags);
- downgrade_write(&kvm->mm->mmap_sem);
- *downgrade = true;
- if (ret)
- return ret;
-
- ret = migrate_vma_setup(&mig);
- if (ret)
- return ret;
-
- if (!(*mig.src & MIGRATE_PFN_MIGRATE)) {
- ret = -1;
- goto out_finalize;
- }
-
- dpage = kvmppc_uvmem_get_page(gpa, kvm);
- if (!dpage) {
- ret = -1;
- goto out_finalize;
- }
-
- pfn = *mig.src >> MIGRATE_PFN_SHIFT;
- spage = migrate_pfn_to_page(*mig.src);
- if (spage)
- uv_page_in(kvm->arch.lpid, pfn << page_shift, gpa, 0,
- page_shift);
-
- *mig.dst = migrate_pfn(page_to_pfn(dpage)) | MIGRATE_PFN_LOCKED;
- migrate_vma_pages(&mig);
-out_finalize:
- migrate_vma_finalize(&mig);
- return ret;
-}
-
-/*
* Shares the page with HV, thus making it a normal page.
*
* - If the page is already secure, then provision a new page and share
@@ -623,11 +696,23 @@ unsigned long kvmppc_h_svm_page_in(struct kvm *kvm, unsigned long gpa,
if (!vma || vma->vm_start > start || vma->vm_end < end)
goto out_unlock;
- if (!kvmppc_svm_page_in(vma, start, end, gpa, kvm, page_shift,
- &downgrade)) {
- kvmppc_gfn_uvmem_shared(gfn, kvm, false);
- ret = H_SUCCESS;
+ ret = ksm_madvise(vma, vma->vm_start, vma->vm_end,
+ MADV_UNMERGEABLE, &vma->vm_flags);
+ downgrade_write(&kvm->mm->mmap_sem);
+ downgrade = true;
+ if (ret) {
+ ret = H_PARAMETER;
+ goto out_unlock;
}
+
+ ret = H_PARAMETER;
+ if (kvmppc_svm_migrate_page(vma, start, end, gpa, kvm, page_shift,
+ true))
+ goto out_unlock;
+
+ kvmppc_gfn_uvmem_shared(gfn, kvm, false);
+ ret = H_SUCCESS;
+
out_unlock:
mutex_unlock(&kvm->arch.uvmem_lock);
out:
--
1.8.3.1
next prev parent reply other threads:[~2020-05-31 2:34 UTC|newest]
Thread overview: 18+ messages / expand[flat|nested] mbox.gz Atom feed top
2020-05-31 2:27 [PATCH v1 0/4] Migrate non-migrated pages of a SVM Ram Pai
2020-05-31 2:27 ` [PATCH v1 1/4] KVM: PPC: Book3S HV: Fix function definition in book3s_hv_uvmem.c Ram Pai
2020-05-31 2:27 ` [PATCH v1 2/4] KVM: PPC: Book3S HV: track shared GFNs of secure VMs Ram Pai
2020-06-01 4:12 ` kbuild test robot
2020-06-05 9:48 ` Laurent Dufour
2020-06-05 14:38 ` Ram Pai
2020-05-31 2:27 ` Ram Pai [this message]
2020-06-01 11:55 ` [PATCH v1 3/4] KVM: PPC: Book3S HV: migrate remaining normal-GFNs to secure-GFNs in H_SVM_INIT_DONE Bharata B Rao
2020-06-01 19:05 ` Ram Pai
2020-06-02 10:06 ` Bharata B Rao
2020-06-03 23:10 ` Ram Pai
2020-06-04 2:31 ` Bharata B Rao
2020-05-31 2:27 ` [PATCH v1 4/4] KVM: PPC: Book3S HV: migrate hot plugged memory Ram Pai
2020-06-01 4:35 ` kbuild test robot
2020-06-01 4:45 ` kbuild test robot
2020-06-02 8:31 ` Laurent Dufour
2020-06-03 13:25 ` Ram Pai
2020-06-15 17:00 ` Laurent Dufour
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1590892071-25549-4-git-send-email-linuxram@us.ibm.com \
--to=linuxram@us.ibm.com \
--cc=aneesh.kumar@linux.ibm.com \
--cc=bauerman@linux.ibm.com \
--cc=bharata@linux.ibm.com \
--cc=cclaudio@linux.ibm.com \
--cc=david@gibson.dropbear.id.au \
--cc=kvm-ppc@vger.kernel.org \
--cc=ldufour@linux.ibm.com \
--cc=linuxppc-dev@lists.ozlabs.org \
--cc=sukadev@linux.vnet.ibm.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).