linux-arm-kernel.lists.infradead.org archive mirror
 help / color / mirror / Atom feed
From: Will Deacon <will@kernel.org>
To: kvmarm@lists.cs.columbia.edu
Cc: Will Deacon <will@kernel.org>, Ard Biesheuvel <ardb@kernel.org>,
	Sean Christopherson <seanjc@google.com>,
	Alexandru Elisei <alexandru.elisei@arm.com>,
	Andy Lutomirski <luto@amacapital.net>,
	Catalin Marinas <catalin.marinas@arm.com>,
	James Morse <james.morse@arm.com>,
	Chao Peng <chao.p.peng@linux.intel.com>,
	Quentin Perret <qperret@google.com>,
	Suzuki K Poulose <suzuki.poulose@arm.com>,
	Michael Roth <michael.roth@amd.com>,
	Mark Rutland <mark.rutland@arm.com>,
	Fuad Tabba <tabba@google.com>,
	Oliver Upton <oliver.upton@linux.dev>,
	Marc Zyngier <maz@kernel.org>,
	kernel-team@android.com, kvm@vger.kernel.org,
	linux-arm-kernel@lists.infradead.org
Subject: [PATCH v2 18/24] KVM: arm64: Instantiate guest stage-2 page-tables at EL2
Date: Thu, 30 Jun 2022 14:57:41 +0100	[thread overview]
Message-ID: <20220630135747.26983-19-will@kernel.org> (raw)
In-Reply-To: <20220630135747.26983-1-will@kernel.org>

From: Quentin Perret <qperret@google.com>

Extend the shadow initialisation at EL2 so that we instantiate a memory
pool and a full 'struct kvm_s2_mmu' structure for each VM, with a
stage-2 page-table entirely independent from the one managed by the host
at EL1.

For now, the new page-table is unused as there is no way for the host
to map anything into it. Yet.

Signed-off-by: Quentin Perret <qperret@google.com>
Signed-off-by: Will Deacon <will@kernel.org>
---
 arch/arm64/kvm/hyp/include/nvhe/pkvm.h |   6 ++
 arch/arm64/kvm/hyp/nvhe/mem_protect.c  | 127 ++++++++++++++++++++++++-
 2 files changed, 130 insertions(+), 3 deletions(-)

diff --git a/arch/arm64/kvm/hyp/include/nvhe/pkvm.h b/arch/arm64/kvm/hyp/include/nvhe/pkvm.h
index 1d0a33f70879..c0e32a750b6e 100644
--- a/arch/arm64/kvm/hyp/include/nvhe/pkvm.h
+++ b/arch/arm64/kvm/hyp/include/nvhe/pkvm.h
@@ -9,6 +9,9 @@
 
 #include <asm/kvm_pkvm.h>
 
+#include <nvhe/gfp.h>
+#include <nvhe/spinlock.h>
+
 /*
  * Holds the relevant data for maintaining the vcpu state completely at hyp.
  */
@@ -37,6 +40,9 @@ struct kvm_shadow_vm {
 	size_t shadow_area_size;
 
 	struct kvm_pgtable pgt;
+	struct kvm_pgtable_mm_ops mm_ops;
+	struct hyp_pool pool;
+	hyp_spinlock_t lock;
 
 	/* Array of the shadow state per vcpu. */
 	struct kvm_shadow_vcpu_state shadow_vcpu_states[0];
diff --git a/arch/arm64/kvm/hyp/nvhe/mem_protect.c b/arch/arm64/kvm/hyp/nvhe/mem_protect.c
index a0af23de2640..5b22bba77e57 100644
--- a/arch/arm64/kvm/hyp/nvhe/mem_protect.c
+++ b/arch/arm64/kvm/hyp/nvhe/mem_protect.c
@@ -25,6 +25,21 @@ struct host_kvm host_kvm;
 
 static struct hyp_pool host_s2_pool;
 
+static DEFINE_PER_CPU(struct kvm_shadow_vm *, __current_vm);
+#define current_vm (*this_cpu_ptr(&__current_vm))
+
+static void guest_lock_component(struct kvm_shadow_vm *vm)
+{
+	hyp_spin_lock(&vm->lock);
+	current_vm = vm;
+}
+
+static void guest_unlock_component(struct kvm_shadow_vm *vm)
+{
+	current_vm = NULL;
+	hyp_spin_unlock(&vm->lock);
+}
+
 static void host_lock_component(void)
 {
 	hyp_spin_lock(&host_kvm.lock);
@@ -140,18 +155,124 @@ int kvm_host_prepare_stage2(void *pgt_pool_base)
 	return 0;
 }
 
+static bool guest_stage2_force_pte_cb(u64 addr, u64 end,
+				      enum kvm_pgtable_prot prot)
+{
+	return true;
+}
+
+static void *guest_s2_zalloc_pages_exact(size_t size)
+{
+	void *addr = hyp_alloc_pages(&current_vm->pool, get_order(size));
+
+	WARN_ON(size != (PAGE_SIZE << get_order(size)));
+	hyp_split_page(hyp_virt_to_page(addr));
+
+	return addr;
+}
+
+static void guest_s2_free_pages_exact(void *addr, unsigned long size)
+{
+	u8 order = get_order(size);
+	unsigned int i;
+
+	for (i = 0; i < (1 << order); i++)
+		hyp_put_page(&current_vm->pool, addr + (i * PAGE_SIZE));
+}
+
+static void *guest_s2_zalloc_page(void *mc)
+{
+	struct hyp_page *p;
+	void *addr;
+
+	addr = hyp_alloc_pages(&current_vm->pool, 0);
+	if (addr)
+		return addr;
+
+	addr = pop_hyp_memcache(mc, hyp_phys_to_virt);
+	if (!addr)
+		return addr;
+
+	memset(addr, 0, PAGE_SIZE);
+	p = hyp_virt_to_page(addr);
+	memset(p, 0, sizeof(*p));
+	p->refcount = 1;
+
+	return addr;
+}
+
+static void guest_s2_get_page(void *addr)
+{
+	hyp_get_page(&current_vm->pool, addr);
+}
+
+static void guest_s2_put_page(void *addr)
+{
+	hyp_put_page(&current_vm->pool, addr);
+}
+
+static void clean_dcache_guest_page(void *va, size_t size)
+{
+	__clean_dcache_guest_page(hyp_fixmap_map(__hyp_pa(va)), size);
+	hyp_fixmap_unmap();
+}
+
+static void invalidate_icache_guest_page(void *va, size_t size)
+{
+	__invalidate_icache_guest_page(hyp_fixmap_map(__hyp_pa(va)), size);
+	hyp_fixmap_unmap();
+}
+
 int kvm_guest_prepare_stage2(struct kvm_shadow_vm *vm, void *pgd)
 {
-	vm->pgt.pgd = pgd;
+	struct kvm_s2_mmu *mmu = &vm->kvm.arch.mmu;
+	unsigned long nr_pages;
+	int ret;
+
+	nr_pages = kvm_pgtable_stage2_pgd_size(vm->kvm.arch.vtcr) >> PAGE_SHIFT;
+	ret = hyp_pool_init(&vm->pool, hyp_virt_to_pfn(pgd), nr_pages, 0);
+	if (ret)
+		return ret;
+
+	hyp_spin_lock_init(&vm->lock);
+	vm->mm_ops = (struct kvm_pgtable_mm_ops) {
+		.zalloc_pages_exact	= guest_s2_zalloc_pages_exact,
+		.free_pages_exact	= guest_s2_free_pages_exact,
+		.zalloc_page		= guest_s2_zalloc_page,
+		.phys_to_virt		= hyp_phys_to_virt,
+		.virt_to_phys		= hyp_virt_to_phys,
+		.page_count		= hyp_page_count,
+		.get_page		= guest_s2_get_page,
+		.put_page		= guest_s2_put_page,
+		.dcache_clean_inval_poc	= clean_dcache_guest_page,
+		.icache_inval_pou	= invalidate_icache_guest_page,
+	};
+
+	guest_lock_component(vm);
+	ret = __kvm_pgtable_stage2_init(mmu->pgt, mmu, &vm->mm_ops, 0,
+					guest_stage2_force_pte_cb);
+	guest_unlock_component(vm);
+	if (ret)
+		return ret;
+
+	vm->kvm.arch.mmu.pgd_phys = __hyp_pa(vm->pgt.pgd);
+
 	return 0;
 }
 
 void reclaim_guest_pages(struct kvm_shadow_vm *vm)
 {
-	unsigned long nr_pages;
+	unsigned long nr_pages, pfn;
 
 	nr_pages = kvm_pgtable_stage2_pgd_size(vm->kvm.arch.vtcr) >> PAGE_SHIFT;
-	WARN_ON(__pkvm_hyp_donate_host(hyp_virt_to_pfn(vm->pgt.pgd), nr_pages));
+	pfn = hyp_virt_to_pfn(vm->pgt.pgd);
+
+	guest_lock_component(vm);
+	kvm_pgtable_stage2_destroy(&vm->pgt);
+	vm->kvm.arch.mmu.pgd_phys = 0ULL;
+	guest_unlock_component(vm);
+
+	WARN_ON(__pkvm_hyp_donate_host(pfn, nr_pages));
 }
 
 int __pkvm_prot_finalize(void)
-- 
2.37.0.rc0.161.g10f37bed90-goog


_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel

  parent reply	other threads:[~2022-06-30 14:09 UTC|newest]

Thread overview: 45+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-06-30 13:57 [PATCH v2 00/24] KVM: arm64: Introduce pKVM shadow state at EL2 Will Deacon
2022-06-30 13:57 ` [PATCH v2 01/24] KVM: arm64: Move hyp refcount manipulation helpers Will Deacon
2022-06-30 13:57 ` [PATCH v2 02/24] KVM: arm64: Allow non-coalescable pages in a hyp_pool Will Deacon
2022-06-30 13:57 ` [PATCH v2 03/24] KVM: arm64: Add flags to struct hyp_page Will Deacon
2022-07-18 10:54   ` Vincent Donnefort
2022-07-18 10:57     ` Vincent Donnefort
2022-06-30 13:57 ` [PATCH v2 04/24] KVM: arm64: Back hyp_vmemmap for all of memory Will Deacon
2022-06-30 13:57 ` [PATCH v2 05/24] KVM: arm64: Make hyp stage-1 refcnt correct on the whole range Will Deacon
2022-06-30 13:57 ` [PATCH v2 06/24] KVM: arm64: Unify identifiers used to distinguish host and hypervisor Will Deacon
2022-07-20 15:11   ` Oliver Upton
2022-07-20 18:14     ` Will Deacon
2022-07-29 19:28       ` Oliver Upton
2022-06-30 13:57 ` [PATCH v2 07/24] KVM: arm64: Implement do_donate() helper for donating memory Will Deacon
2022-06-30 13:57 ` [PATCH v2 08/24] KVM: arm64: Prevent the donation of no-map pages Will Deacon
2022-06-30 13:57 ` [PATCH v2 09/24] KVM: arm64: Add helpers to pin memory shared with hyp Will Deacon
2022-06-30 13:57 ` [PATCH v2 10/24] KVM: arm64: Include asm/kvm_mmu.h in nvhe/mem_protect.h Will Deacon
2022-06-30 13:57 ` [PATCH v2 11/24] KVM: arm64: Add hyp_spinlock_t static initializer Will Deacon
2022-06-30 13:57 ` [PATCH v2 12/24] KVM: arm64: Introduce shadow VM state at EL2 Will Deacon
2022-07-18 18:40   ` Vincent Donnefort
2022-07-19  9:41     ` Marc Zyngier
2022-07-20 18:20     ` Will Deacon
2022-06-30 13:57 ` [PATCH v2 13/24] KVM: arm64: Instantiate VM shadow data from EL1 Will Deacon
2022-06-30 13:57 ` [PATCH v2 14/24] KVM: arm64: Add pcpu fixmap infrastructure at EL2 Will Deacon
2022-07-19 13:30   ` Vincent Donnefort
2022-07-19 14:09     ` Quentin Perret
2022-07-19 14:10       ` Quentin Perret
2022-06-30 13:57 ` [PATCH v2 15/24] KVM: arm64: Initialise hyp symbols regardless of pKVM Will Deacon
2022-06-30 13:57 ` [PATCH v2 16/24] KVM: arm64: Provide I-cache invalidation by VA at EL2 Will Deacon
2022-06-30 13:57 ` [PATCH v2 17/24] KVM: arm64: Add generic hyp_memcache helpers Will Deacon
2022-06-30 13:57 ` Will Deacon [this message]
2022-07-19 13:32   ` [PATCH v2 18/24] KVM: arm64: Instantiate guest stage-2 page-tables at EL2 Vincent Donnefort
2022-07-20 18:26     ` Will Deacon
2022-06-30 13:57 ` [PATCH v2 19/24] KVM: arm64: Return guest memory from EL2 via dedicated teardown memcache Will Deacon
2022-06-30 13:57 ` [PATCH v2 20/24] KVM: arm64: Unmap kvm_arm_hyp_percpu_base from the host Will Deacon
2022-06-30 13:57 ` [PATCH v2 21/24] KVM: arm64: Maintain a copy of 'kvm_arm_vmid_bits' at EL2 Will Deacon
2022-06-30 13:57 ` [PATCH v2 22/24] KVM: arm64: Explicitly map kvm_vgic_global_state " Will Deacon
2022-06-30 13:57 ` [PATCH v2 23/24] KVM: arm64: Don't map host sections in pkvm Will Deacon
2022-06-30 13:57 ` [RFC PATCH v2 24/24] KVM: arm64: Use the shadow vCPU structure in handle___kvm_vcpu_run() Will Deacon
2022-07-06 19:17 ` [PATCH v2 00/24] KVM: arm64: Introduce pKVM shadow state at EL2 Sean Christopherson
2022-07-08 16:23   ` Will Deacon
2022-07-19 16:11     ` Sean Christopherson
2022-07-20  9:25       ` Marc Zyngier
2022-07-20 18:48       ` Will Deacon
2022-07-20 21:17         ` Sean Christopherson
2022-07-19 14:24 ` Vincent Donnefort

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20220630135747.26983-19-will@kernel.org \
    --to=will@kernel.org \
    --cc=alexandru.elisei@arm.com \
    --cc=ardb@kernel.org \
    --cc=catalin.marinas@arm.com \
    --cc=chao.p.peng@linux.intel.com \
    --cc=james.morse@arm.com \
    --cc=kernel-team@android.com \
    --cc=kvm@vger.kernel.org \
    --cc=kvmarm@lists.cs.columbia.edu \
    --cc=linux-arm-kernel@lists.infradead.org \
    --cc=luto@amacapital.net \
    --cc=mark.rutland@arm.com \
    --cc=maz@kernel.org \
    --cc=michael.roth@amd.com \
    --cc=oliver.upton@linux.dev \
    --cc=qperret@google.com \
    --cc=seanjc@google.com \
    --cc=suzuki.poulose@arm.com \
    --cc=tabba@google.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).