From: Will Deacon <will@kernel.org>
To: kvmarm@lists.linux.dev
Cc: "Will Deacon" <will@kernel.org>,
"Sean Christopherson" <seanjc@google.com>,
"Vincent Donnefort" <vdonnefort@google.com>,
"Alexandru Elisei" <alexandru.elisei@arm.com>,
"Catalin Marinas" <catalin.marinas@arm.com>,
"Philippe Mathieu-Daudé" <philmd@linaro.org>,
"James Morse" <james.morse@arm.com>,
"Chao Peng" <chao.p.peng@linux.intel.com>,
"Quentin Perret" <qperret@google.com>,
"Suzuki K Poulose" <suzuki.poulose@arm.com>,
"Mark Rutland" <mark.rutland@arm.com>,
"Fuad Tabba" <tabba@google.com>,
"Oliver Upton" <oliver.upton@linux.dev>,
"Marc Zyngier" <maz@kernel.org>,
kernel-team@android.com, kvm@vger.kernel.org,
linux-arm-kernel@lists.infradead.org
Subject: [PATCH v5 20/25] KVM: arm64: Return guest memory from EL2 via dedicated teardown memcache
Date: Thu, 20 Oct 2022 14:38:22 +0100 [thread overview]
Message-ID: <20221020133827.5541-21-will@kernel.org> (raw)
In-Reply-To: <20221020133827.5541-1-will@kernel.org>
From: Quentin Perret <qperret@google.com>
Rather than relying on the host to free the previously-donated pKVM
hypervisor VM pages explicitly on teardown, introduce a dedicated
teardown memcache which allows the host to reclaim guest memory
resources without having to keep track of all of the allocations made by
the pKVM hypervisor at EL2.
Tested-by: Vincent Donnefort <vdonnefort@google.com>
Co-developed-by: Fuad Tabba <tabba@google.com>
Signed-off-by: Fuad Tabba <tabba@google.com>
Signed-off-by: Quentin Perret <qperret@google.com>
Signed-off-by: Will Deacon <will@kernel.org>
---
arch/arm64/include/asm/kvm_host.h | 7 +----
arch/arm64/kvm/hyp/include/nvhe/mem_protect.h | 2 +-
arch/arm64/kvm/hyp/nvhe/mem_protect.c | 17 ++++++----
arch/arm64/kvm/hyp/nvhe/pkvm.c | 20 ++++++++++--
arch/arm64/kvm/pkvm.c | 31 ++++---------------
5 files changed, 36 insertions(+), 41 deletions(-)
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index 57218f0c449e..63307e7dc9c5 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -176,12 +176,7 @@ typedef unsigned int pkvm_handle_t;
struct kvm_protected_vm {
pkvm_handle_t handle;
-
- struct {
- void *pgd;
- void *vm;
- void *vcpus[KVM_MAX_VCPUS];
- } hyp_donations;
+ struct kvm_hyp_memcache teardown_mc;
};
struct kvm_arch {
diff --git a/arch/arm64/kvm/hyp/include/nvhe/mem_protect.h b/arch/arm64/kvm/hyp/include/nvhe/mem_protect.h
index 420b87e755a4..b7bdbe63deed 100644
--- a/arch/arm64/kvm/hyp/include/nvhe/mem_protect.h
+++ b/arch/arm64/kvm/hyp/include/nvhe/mem_protect.h
@@ -76,7 +76,7 @@ void handle_host_mem_abort(struct kvm_cpu_context *host_ctxt);
int hyp_pin_shared_mem(void *from, void *to);
void hyp_unpin_shared_mem(void *from, void *to);
-void reclaim_guest_pages(struct pkvm_hyp_vm *vm);
+void reclaim_guest_pages(struct pkvm_hyp_vm *vm, struct kvm_hyp_memcache *mc);
int refill_memcache(struct kvm_hyp_memcache *mc, unsigned long min_pages,
struct kvm_hyp_memcache *host_mc);
diff --git a/arch/arm64/kvm/hyp/nvhe/mem_protect.c b/arch/arm64/kvm/hyp/nvhe/mem_protect.c
index 27b16a6b85bb..ffa56a89acdb 100644
--- a/arch/arm64/kvm/hyp/nvhe/mem_protect.c
+++ b/arch/arm64/kvm/hyp/nvhe/mem_protect.c
@@ -260,19 +260,24 @@ int kvm_guest_prepare_stage2(struct pkvm_hyp_vm *vm, void *pgd)
return 0;
}
-void reclaim_guest_pages(struct pkvm_hyp_vm *vm)
+void reclaim_guest_pages(struct pkvm_hyp_vm *vm, struct kvm_hyp_memcache *mc)
{
- void *pgd = vm->pgt.pgd;
- unsigned long nr_pages;
-
- nr_pages = kvm_pgtable_stage2_pgd_size(vm->kvm.arch.vtcr) >> PAGE_SHIFT;
+ void *addr;
+ /* Dump all pgtable pages in the hyp_pool */
guest_lock_component(vm);
kvm_pgtable_stage2_destroy(&vm->pgt);
vm->kvm.arch.mmu.pgd_phys = 0ULL;
guest_unlock_component(vm);
- WARN_ON(__pkvm_hyp_donate_host(hyp_virt_to_pfn(pgd), nr_pages));
+ /* Drain the hyp_pool into the memcache */
+ addr = hyp_alloc_pages(&vm->pool, 0);
+ while (addr) {
+ memset(hyp_virt_to_page(addr), 0, sizeof(struct hyp_page));
+ push_hyp_memcache(mc, addr, hyp_virt_to_phys);
+ WARN_ON(__pkvm_hyp_donate_host(hyp_virt_to_pfn(addr), 1));
+ addr = hyp_alloc_pages(&vm->pool, 0);
+ }
}
int __pkvm_prot_finalize(void)
diff --git a/arch/arm64/kvm/hyp/nvhe/pkvm.c b/arch/arm64/kvm/hyp/nvhe/pkvm.c
index 604505ed7727..0bf9d20b0eeb 100644
--- a/arch/arm64/kvm/hyp/nvhe/pkvm.c
+++ b/arch/arm64/kvm/hyp/nvhe/pkvm.c
@@ -527,8 +527,21 @@ int __pkvm_init_vcpu(pkvm_handle_t handle, struct kvm_vcpu *host_vcpu,
return ret;
}
+static void
+teardown_donated_memory(struct kvm_hyp_memcache *mc, void *addr, size_t size)
+{
+ size = PAGE_ALIGN(size);
+ memset(addr, 0, size);
+
+ for (void *start = addr; start < addr + size; start += PAGE_SIZE)
+ push_hyp_memcache(mc, start, hyp_virt_to_phys);
+
+ unmap_donated_memory_noclear(addr, size);
+}
+
int __pkvm_teardown_vm(pkvm_handle_t handle)
{
+ struct kvm_hyp_memcache *mc;
struct pkvm_hyp_vm *hyp_vm;
unsigned int idx;
size_t vm_size;
@@ -552,7 +565,8 @@ int __pkvm_teardown_vm(pkvm_handle_t handle)
hyp_spin_unlock(&vm_table_lock);
/* Reclaim guest pages (including page-table pages) */
- reclaim_guest_pages(hyp_vm);
+ mc = &hyp_vm->host_kvm->arch.pkvm.teardown_mc;
+ reclaim_guest_pages(hyp_vm, mc);
unpin_host_vcpus(hyp_vm->vcpus, hyp_vm->nr_vcpus);
/* Push the metadata pages to the teardown memcache */
@@ -561,11 +575,11 @@ int __pkvm_teardown_vm(pkvm_handle_t handle)
for (idx = 0; idx < hyp_vm->nr_vcpus; ++idx) {
struct pkvm_hyp_vcpu *hyp_vcpu = hyp_vm->vcpus[idx];
- unmap_donated_memory(hyp_vcpu, sizeof(*hyp_vcpu));
+ teardown_donated_memory(mc, hyp_vcpu, sizeof(*hyp_vcpu));
}
vm_size = pkvm_get_hyp_vm_size(hyp_vm->kvm.created_vcpus);
- unmap_donated_memory(hyp_vm, vm_size);
+ teardown_donated_memory(mc, hyp_vm, vm_size);
return 0;
err_unlock:
diff --git a/arch/arm64/kvm/pkvm.c b/arch/arm64/kvm/pkvm.c
index 8c443b915e43..cf56958b1492 100644
--- a/arch/arm64/kvm/pkvm.c
+++ b/arch/arm64/kvm/pkvm.c
@@ -147,8 +147,6 @@ static int __pkvm_create_hyp_vm(struct kvm *host_kvm)
handle = ret;
host_kvm->arch.pkvm.handle = handle;
- host_kvm->arch.pkvm.hyp_donations.pgd = pgd;
- host_kvm->arch.pkvm.hyp_donations.vm = hyp_vm;
/* Donate memory for the vcpus at hyp and initialize it. */
hyp_vcpu_sz = PAGE_ALIGN(PKVM_HYP_VCPU_SIZE);
@@ -167,12 +165,12 @@ static int __pkvm_create_hyp_vm(struct kvm *host_kvm)
goto destroy_vm;
}
- host_kvm->arch.pkvm.hyp_donations.vcpus[idx] = hyp_vcpu;
-
ret = kvm_call_hyp_nvhe(__pkvm_init_vcpu, handle, host_vcpu,
hyp_vcpu);
- if (ret)
+ if (ret) {
+ free_pages_exact(hyp_vcpu, hyp_vcpu_sz);
goto destroy_vm;
+ }
}
return 0;
@@ -201,30 +199,13 @@ int pkvm_create_hyp_vm(struct kvm *host_kvm)
void pkvm_destroy_hyp_vm(struct kvm *host_kvm)
{
- unsigned long idx, nr_vcpus = host_kvm->created_vcpus;
- size_t pgd_sz, hyp_vm_sz;
-
- if (host_kvm->arch.pkvm.handle)
+ if (host_kvm->arch.pkvm.handle) {
WARN_ON(kvm_call_hyp_nvhe(__pkvm_teardown_vm,
host_kvm->arch.pkvm.handle));
-
- host_kvm->arch.pkvm.handle = 0;
-
- for (idx = 0; idx < nr_vcpus; ++idx) {
- void *hyp_vcpu = host_kvm->arch.pkvm.hyp_donations.vcpus[idx];
-
- if (!hyp_vcpu)
- break;
-
- free_pages_exact(hyp_vcpu, PAGE_ALIGN(PKVM_HYP_VCPU_SIZE));
}
- hyp_vm_sz = PAGE_ALIGN(size_add(PKVM_HYP_VM_SIZE,
- size_mul(sizeof(void *), nr_vcpus)));
- pgd_sz = kvm_pgtable_stage2_pgd_size(host_kvm->arch.vtcr);
-
- free_pages_exact(host_kvm->arch.pkvm.hyp_donations.vm, hyp_vm_sz);
- free_pages_exact(host_kvm->arch.pkvm.hyp_donations.pgd, pgd_sz);
+ host_kvm->arch.pkvm.handle = 0;
+ free_hyp_memcache(&host_kvm->arch.pkvm.teardown_mc);
}
int pkvm_init_host_vm(struct kvm *host_kvm)
--
2.38.0.413.g74048e4d9e-goog
_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel
next prev parent reply other threads:[~2022-10-20 14:05 UTC|newest]
Thread overview: 35+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-10-20 13:38 [PATCH v5 00/25] KVM: arm64: Introduce pKVM hyp VM and vCPU state at EL2 Will Deacon
2022-10-20 13:38 ` [PATCH v5 01/25] KVM: arm64: Move hyp refcount manipulation helpers to common header file Will Deacon
2022-10-20 13:38 ` [PATCH v5 02/25] KVM: arm64: Allow attaching of non-coalescable pages to a hyp pool Will Deacon
2022-10-28 0:17 ` Oliver Upton
2022-10-28 8:09 ` Oliver Upton
2022-10-28 9:29 ` Quentin Perret
2022-10-28 9:28 ` Quentin Perret
2022-10-20 13:38 ` [PATCH v5 03/25] KVM: arm64: Back the hypervisor 'struct hyp_page' array for all memory Will Deacon
2022-10-20 13:38 ` [PATCH v5 04/25] KVM: arm64: Fix-up hyp stage-1 refcounts for all pages mapped at EL2 Will Deacon
2022-10-28 0:31 ` Oliver Upton
2022-10-20 13:38 ` [PATCH v5 05/25] KVM: arm64: Unify identifiers used to distinguish host and hypervisor Will Deacon
2022-10-28 0:32 ` Oliver Upton
2022-10-20 13:38 ` [PATCH v5 06/25] KVM: arm64: Implement do_donate() helper for donating memory Will Deacon
2022-10-28 7:52 ` Oliver Upton
2022-10-28 10:01 ` Quentin Perret
2022-10-20 13:38 ` [PATCH v5 07/25] KVM: arm64: Prevent the donation of no-map pages Will Deacon
2022-10-20 13:38 ` [PATCH v5 08/25] KVM: arm64: Add helpers to pin memory shared with the hypervisor at EL2 Will Deacon
2022-10-20 13:38 ` [PATCH v5 09/25] KVM: arm64: Include asm/kvm_mmu.h in nvhe/mem_protect.h Will Deacon
2022-10-20 13:38 ` [PATCH v5 10/25] KVM: arm64: Add hyp_spinlock_t static initializer Will Deacon
2022-10-20 13:38 ` [PATCH v5 11/25] KVM: arm64: Rename 'host_kvm' to 'host_mmu' Will Deacon
2022-10-20 13:38 ` [PATCH v5 12/25] KVM: arm64: Add infrastructure to create and track pKVM instances at EL2 Will Deacon
2022-10-20 13:38 ` [PATCH v5 13/25] KVM: arm64: Instantiate pKVM hypervisor VM and vCPU structures from EL1 Will Deacon
2022-10-20 13:38 ` [PATCH v5 14/25] KVM: arm64: Add per-cpu fixmap infrastructure at EL2 Will Deacon
2022-10-20 13:38 ` [PATCH v5 15/25] KVM: arm64: Initialise hypervisor copies of host symbols unconditionally Will Deacon
2022-10-20 13:38 ` [PATCH v5 16/25] KVM: arm64: Provide I-cache invalidation by virtual address at EL2 Will Deacon
2022-10-20 13:38 ` [PATCH v5 17/25] KVM: arm64: Add generic hyp_memcache helpers Will Deacon
2022-10-20 13:38 ` [PATCH v5 18/25] KVM: arm64: Consolidate stage-2 initialisation into a single function Will Deacon
2022-10-20 13:38 ` [PATCH v5 19/25] KVM: arm64: Instantiate guest stage-2 page-tables at EL2 Will Deacon
2022-10-20 13:38 ` Will Deacon [this message]
2022-10-27 13:13 ` [PATCH v5 20/25] KVM: arm64: Return guest memory from EL2 via dedicated teardown memcache Quentin Perret
2022-10-20 13:38 ` [PATCH v5 21/25] KVM: arm64: Unmap 'kvm_arm_hyp_percpu_base' from the host Will Deacon
2022-10-20 13:38 ` [PATCH v5 22/25] KVM: arm64: Maintain a copy of 'kvm_arm_vmid_bits' at EL2 Will Deacon
2022-10-20 13:38 ` [PATCH v5 23/25] KVM: arm64: Explicitly map 'kvm_vgic_global_state' " Will Deacon
2022-10-20 13:38 ` [PATCH v5 24/25] KVM: arm64: Don't unnecessarily map host kernel sections " Will Deacon
2022-10-20 13:38 ` [RFC PATCH v5 25/25] KVM: arm64: Use the pKVM hyp vCPU structure in handle___kvm_vcpu_run() Will Deacon
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20221020133827.5541-21-will@kernel.org \
--to=will@kernel.org \
--cc=alexandru.elisei@arm.com \
--cc=catalin.marinas@arm.com \
--cc=chao.p.peng@linux.intel.com \
--cc=james.morse@arm.com \
--cc=kernel-team@android.com \
--cc=kvm@vger.kernel.org \
--cc=kvmarm@lists.linux.dev \
--cc=linux-arm-kernel@lists.infradead.org \
--cc=mark.rutland@arm.com \
--cc=maz@kernel.org \
--cc=oliver.upton@linux.dev \
--cc=philmd@linaro.org \
--cc=qperret@google.com \
--cc=seanjc@google.com \
--cc=suzuki.poulose@arm.com \
--cc=tabba@google.com \
--cc=vdonnefort@google.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).