From: Alexandre Chartre <alexandre.chartre@oracle.com>
To: pbonzini@redhat.com, rkrcmar@redhat.com, tglx@linutronix.de,
mingo@redhat.com, bp@alien8.de, hpa@zytor.com,
dave.hansen@linux.intel.com, luto@kernel.org,
peterz@infradead.org, kvm@vger.kernel.org, x86@kernel.org,
linux-mm@kvack.org, linux-kernel@vger.kernel.org
Cc: konrad.wilk@oracle.com, jan.setjeeilers@oracle.com,
liran.alon@oracle.com, jwadams@google.com,
alexandre.chartre@oracle.com
Subject: [RFC KVM 19/27] kvm/isolation: initialize the KVM page table with core mappings
Date: Mon, 13 May 2019 16:38:27 +0200 [thread overview]
Message-ID: <1557758315-12667-20-git-send-email-alexandre.chartre@oracle.com> (raw)
In-Reply-To: <1557758315-12667-1-git-send-email-alexandre.chartre@oracle.com>
The KVM page table is initialized with adding core memory mappings:
the kernel text, the per-cpu memory, the kvm module, the cpu_entry_area,
%esp fixup stacks, IRQ stacks.
Signed-off-by: Alexandre Chartre <alexandre.chartre@oracle.com>
---
arch/x86/kernel/cpu/common.c | 2 +
arch/x86/kvm/isolation.c | 131 ++++++++++++++++++++++++++++++++++++++++++
arch/x86/kvm/isolation.h | 10 +++
include/linux/percpu.h | 2 +
mm/percpu.c | 6 +-
5 files changed, 149 insertions(+), 2 deletions(-)
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 3764054..0fa44b1 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -1511,6 +1511,8 @@ static __init int setup_clearcpuid(char *arg)
EXPORT_PER_CPU_SYMBOL(current_task);
DEFINE_PER_CPU(struct irq_stack *, hardirq_stack_ptr);
+EXPORT_PER_CPU_SYMBOL_GPL(hardirq_stack_ptr);
+
DEFINE_PER_CPU(unsigned int, irq_count) __visible = -1;
DEFINE_PER_CPU(int, __preempt_count) = INIT_PREEMPT_COUNT;
diff --git a/arch/x86/kvm/isolation.c b/arch/x86/kvm/isolation.c
index 2052abf..cf5ee0d 100644
--- a/arch/x86/kvm/isolation.c
+++ b/arch/x86/kvm/isolation.c
@@ -10,6 +10,8 @@
#include <linux/printk.h>
#include <linux/slab.h>
+#include <asm/cpu_entry_area.h>
+#include <asm/processor.h>
#include <asm/mmu_context.h>
#include <asm/pgalloc.h>
@@ -88,6 +90,8 @@ struct mm_struct kvm_mm = {
DEFINE_STATIC_KEY_FALSE(kvm_isolation_enabled);
EXPORT_SYMBOL(kvm_isolation_enabled);
+static void kvm_isolation_uninit_page_table(void);
+static void kvm_isolation_uninit_mm(void);
static void kvm_clear_mapping(void *ptr, size_t size,
enum page_table_level level);
@@ -1024,10 +1028,130 @@ int kvm_copy_percpu_mapping(void *percpu_ptr, size_t size)
EXPORT_SYMBOL(kvm_copy_percpu_mapping);
+static int kvm_isolation_init_page_table(void)
+{
+ void *stack;
+ int cpu, rv;
+
+ /*
+ * Copy the mapping for all the kernel text. We copy at the PMD
+ * level since the PUD is shared with the module mapping space.
+ */
+ rv = kvm_copy_mapping((void *)__START_KERNEL_map, KERNEL_IMAGE_SIZE,
+ PGT_LEVEL_PMD);
+ if (rv)
+ goto out_uninit_page_table;
+
+ /* copy the mapping of per cpu memory */
+ rv = kvm_copy_mapping(pcpu_base_addr, pcpu_unit_size * pcpu_nr_units,
+ PGT_LEVEL_PMD);
+ if (rv)
+ goto out_uninit_page_table;
+
+ /*
+ * Copy the mapping for cpu_entry_area and %esp fixup stacks
+ * (this is based on the PTI userland address space, but probably
+ * not needed because the KVM address space is not directly
+ * enterered from userspace). They can both be copied at the P4D
+ * level since they each have a dedicated P4D entry.
+ */
+ rv = kvm_copy_mapping((void *)CPU_ENTRY_AREA_PER_CPU, P4D_SIZE,
+ PGT_LEVEL_P4D);
+ if (rv)
+ goto out_uninit_page_table;
+
+#ifdef CONFIG_X86_ESPFIX64
+ rv = kvm_copy_mapping((void *)ESPFIX_BASE_ADDR, P4D_SIZE,
+ PGT_LEVEL_P4D);
+ if (rv)
+ goto out_uninit_page_table;
+#endif
+
+#ifdef CONFIG_VMAP_STACK
+ /*
+ * Interrupt stacks are vmap'ed with guard pages, so we need to
+ * copy mappings.
+ */
+ for_each_possible_cpu(cpu) {
+ stack = per_cpu(hardirq_stack_ptr, cpu);
+ pr_debug("IRQ Stack %px\n", stack);
+ if (!stack)
+ continue;
+ rv = kvm_copy_ptes(stack - IRQ_STACK_SIZE, IRQ_STACK_SIZE);
+ if (rv)
+ goto out_uninit_page_table;
+ }
+
+#endif
+
+ /* copy mapping of the current module (kvm) */
+ rv = kvm_copy_module_mapping();
+ if (rv)
+ goto out_uninit_page_table;
+
+ return 0;
+
+out_uninit_page_table:
+ kvm_isolation_uninit_page_table();
+ return rv;
+}
+
+/*
+ * Free all buffers used by the kvm page table. These buffers are stored
+ * in the kvm_pgt_dgroup_list.
+ */
+static void kvm_isolation_uninit_page_table(void)
+{
+ struct pgt_directory_group *dgroup, *dgroup_next;
+ enum page_table_level level;
+ void *ptr;
+ int i;
+
+ mutex_lock(&kvm_pgt_dgroup_lock);
+
+ list_for_each_entry_safe(dgroup, dgroup_next,
+ &kvm_pgt_dgroup_list, list) {
+
+ for (i = 0; i < dgroup->count; i++) {
+ ptr = dgroup->directory[i].ptr;
+ level = dgroup->directory[i].level;
+
+ switch (dgroup->directory[i].level) {
+
+ case PGT_LEVEL_PTE:
+ kvm_pte_free(NULL, ptr);
+ break;
+
+ case PGT_LEVEL_PMD:
+ kvm_pmd_free(NULL, ptr);
+ break;
+
+ case PGT_LEVEL_PUD:
+ kvm_pud_free(NULL, ptr);
+ break;
+
+ case PGT_LEVEL_P4D:
+ kvm_p4d_free(NULL, ptr);
+ break;
+
+ default:
+ pr_err("unexpected page directory %d for %px\n",
+ level, ptr);
+ }
+ }
+
+ list_del(&dgroup->list);
+ kfree(dgroup);
+ }
+
+ mutex_unlock(&kvm_pgt_dgroup_lock);
+}
+
static int kvm_isolation_init_mm(void)
{
pgd_t *kvm_pgd;
gfp_t gfp_mask;
+ int rv;
gfp_mask = GFP_KERNEL | __GFP_ZERO;
kvm_pgd = (pgd_t *)__get_free_pages(gfp_mask, PGD_ALLOCATION_ORDER);
@@ -1054,6 +1178,12 @@ static int kvm_isolation_init_mm(void)
mm_init_cpumask(&kvm_mm);
init_new_context(NULL, &kvm_mm);
+ rv = kvm_isolation_init_page_table();
+ if (rv) {
+ kvm_isolation_uninit_mm();
+ return rv;
+ }
+
return 0;
}
@@ -1065,6 +1195,7 @@ static void kvm_isolation_uninit_mm(void)
destroy_context(&kvm_mm);
+ kvm_isolation_uninit_page_table();
kvm_free_all_range_mapping();
#ifdef CONFIG_PAGE_TABLE_ISOLATION
diff --git a/arch/x86/kvm/isolation.h b/arch/x86/kvm/isolation.h
index 3ef2060..1f79e28 100644
--- a/arch/x86/kvm/isolation.h
+++ b/arch/x86/kvm/isolation.h
@@ -3,6 +3,16 @@
#define ARCH_X86_KVM_ISOLATION_H
#include <linux/kvm_host.h>
+#include <linux/export.h>
+
+/*
+ * Copy the memory mapping for the current module. This is defined as a
+ * macro to ensure it is expanded in the module making the call so that
+ * THIS_MODULE has the correct value.
+ */
+#define kvm_copy_module_mapping() \
+ (kvm_copy_ptes(THIS_MODULE->core_layout.base, \
+ THIS_MODULE->core_layout.size))
DECLARE_STATIC_KEY_FALSE(kvm_isolation_enabled);
diff --git a/include/linux/percpu.h b/include/linux/percpu.h
index 70b7123..fb0ab9a 100644
--- a/include/linux/percpu.h
+++ b/include/linux/percpu.h
@@ -70,6 +70,8 @@
extern void *pcpu_base_addr;
extern const unsigned long *pcpu_unit_offsets;
+extern int pcpu_unit_size;
+extern int pcpu_nr_units;
struct pcpu_group_info {
int nr_units; /* aligned # of units */
diff --git a/mm/percpu.c b/mm/percpu.c
index 68dd2e7..b68b3d8 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -119,8 +119,10 @@
#endif /* CONFIG_SMP */
static int pcpu_unit_pages __ro_after_init;
-static int pcpu_unit_size __ro_after_init;
-static int pcpu_nr_units __ro_after_init;
+int pcpu_unit_size __ro_after_init;
+EXPORT_SYMBOL(pcpu_unit_size);
+int pcpu_nr_units __ro_after_init;
+EXPORT_SYMBOL(pcpu_nr_units);
static int pcpu_atom_size __ro_after_init;
int pcpu_nr_slots __ro_after_init;
static size_t pcpu_chunk_struct_size __ro_after_init;
--
1.7.1
next prev parent reply other threads:[~2019-05-13 14:40 UTC|newest]
Thread overview: 87+ messages / expand[flat|nested] mbox.gz Atom feed top
2019-05-13 14:38 [RFC KVM 00/27] KVM Address Space Isolation Alexandre Chartre
2019-05-13 14:38 ` [RFC KVM 01/27] kernel: Export memory-management symbols required for KVM address space isolation Alexandre Chartre
2019-05-13 15:15 ` Peter Zijlstra
2019-05-13 15:17 ` Liran Alon
2019-05-13 14:38 ` [RFC KVM 02/27] KVM: x86: Introduce address_space_isolation module parameter Alexandre Chartre
2019-05-13 15:46 ` Andy Lutomirski
2019-05-13 15:55 ` Alexandre Chartre
2019-05-13 14:38 ` [RFC KVM 03/27] KVM: x86: Introduce KVM separate virtual address space Alexandre Chartre
2019-05-13 15:45 ` Andy Lutomirski
2019-05-13 16:04 ` Alexandre Chartre
2019-05-13 14:38 ` [RFC KVM 04/27] KVM: x86: Switch to KVM address space on entry to guest Alexandre Chartre
2019-05-13 14:38 ` [RFC KVM 05/27] KVM: x86: Add handler to exit kvm isolation Alexandre Chartre
2019-05-13 15:49 ` Andy Lutomirski
2019-05-13 16:10 ` Alexandre Chartre
2019-05-13 14:38 ` [RFC KVM 06/27] KVM: x86: Exit KVM isolation on IRQ entry Alexandre Chartre
2019-05-13 15:51 ` Andy Lutomirski
2019-05-13 16:28 ` Alexandre Chartre
2019-05-13 18:13 ` Andy Lutomirski
2019-05-14 7:07 ` Peter Zijlstra
2019-05-14 7:58 ` Alexandre Chartre
2019-05-13 14:38 ` [RFC KVM 07/27] KVM: x86: Switch to host address space when may access sensitive data Alexandre Chartre
2019-05-13 14:38 ` [RFC KVM 08/27] KVM: x86: Optimize branches which checks if address space isolation enabled Alexandre Chartre
2019-05-13 14:38 ` [RFC KVM 09/27] kvm/isolation: function to track buffers allocated for the KVM page table Alexandre Chartre
2019-05-13 14:38 ` [RFC KVM 10/27] kvm/isolation: add KVM page table entry free functions Alexandre Chartre
2019-05-13 14:38 ` [RFC KVM 11/27] kvm/isolation: add KVM page table entry offset functions Alexandre Chartre
2019-05-13 14:38 ` [RFC KVM 12/27] kvm/isolation: add KVM page table entry allocation functions Alexandre Chartre
2019-05-13 14:38 ` [RFC KVM 13/27] kvm/isolation: add KVM page table entry set functions Alexandre Chartre
2019-05-13 14:38 ` [RFC KVM 14/27] kvm/isolation: functions to copy page table entries for a VA range Alexandre Chartre
2019-05-13 14:38 ` [RFC KVM 15/27] kvm/isolation: keep track of VA range mapped in KVM address space Alexandre Chartre
2019-05-13 14:38 ` [RFC KVM 16/27] kvm/isolation: functions to clear page table entries for a VA range Alexandre Chartre
2019-05-13 14:38 ` [RFC KVM 17/27] kvm/isolation: improve mapping copy when mapping is already present Alexandre Chartre
2019-05-13 14:38 ` [RFC KVM 18/27] kvm/isolation: function to copy page table entries for percpu buffer Alexandre Chartre
2019-05-13 18:18 ` Andy Lutomirski
2019-05-14 7:09 ` Peter Zijlstra
2019-05-14 8:25 ` Alexandre Chartre
2019-05-14 8:34 ` Andy Lutomirski
2019-05-14 9:41 ` Alexandre Chartre
2019-05-14 15:23 ` Andy Lutomirski
2019-05-14 16:24 ` Alexandre Chartre
2019-05-14 17:05 ` Peter Zijlstra
2019-05-14 18:09 ` Sean Christopherson
2019-05-14 20:33 ` Andy Lutomirski
2019-05-14 21:06 ` Sean Christopherson
2019-05-14 21:55 ` Andy Lutomirski
2019-05-14 22:38 ` Sean Christopherson
2019-05-18 0:05 ` Jonathan Adams
2019-05-14 20:27 ` Andy Lutomirski
2019-05-13 14:38 ` Alexandre Chartre [this message]
2019-05-13 15:50 ` [RFC KVM 19/27] kvm/isolation: initialize the KVM page table with core mappings Dave Hansen
2019-05-13 16:00 ` Andy Lutomirski
2019-05-13 17:00 ` Alexandre Chartre
2019-05-13 16:46 ` Sean Christopherson
2019-05-13 16:47 ` Alexandre Chartre
2019-05-14 10:26 ` Alexandre Chartre
2019-05-13 14:38 ` [RFC KVM 20/27] kvm/isolation: initialize the KVM page table with vmx specific data Alexandre Chartre
2019-05-13 14:38 ` [RFC KVM 21/27] kvm/isolation: initialize the KVM page table with vmx VM data Alexandre Chartre
2019-05-13 14:38 ` [RFC KVM 22/27] kvm/isolation: initialize the KVM page table with vmx cpu data Alexandre Chartre
2019-05-13 14:38 ` [RFC KVM 23/27] kvm/isolation: initialize the KVM page table with the vcpu tasks Alexandre Chartre
2019-05-13 14:38 ` [RFC KVM 24/27] kvm/isolation: KVM page fault handler Alexandre Chartre
2019-05-13 15:15 ` Peter Zijlstra
2019-05-13 21:25 ` Liran Alon
2019-05-14 2:02 ` Andy Lutomirski
2019-05-14 7:21 ` Peter Zijlstra
2019-05-14 15:36 ` Alexandre Chartre
2019-05-14 15:43 ` Andy Lutomirski
2019-05-13 16:02 ` Andy Lutomirski
2019-05-13 16:21 ` Alexandre Chartre
2019-05-13 14:38 ` [RFC KVM 25/27] kvm/isolation: implement actual KVM isolation enter/exit Alexandre Chartre
2019-05-13 15:16 ` Peter Zijlstra
2019-05-13 16:01 ` Andy Lutomirski
2019-05-13 14:38 ` [RFC KVM 26/27] kvm/isolation: initialize the KVM page table with KVM memslots Alexandre Chartre
2019-05-13 14:38 ` [RFC KVM 27/27] kvm/isolation: initialize the KVM page table with KVM buses Alexandre Chartre
2019-05-13 16:42 ` [RFC KVM 00/27] KVM Address Space Isolation Liran Alon
2019-05-13 18:17 ` Andy Lutomirski
2019-05-13 21:08 ` Liran Alon
2019-05-14 2:07 ` Andy Lutomirski
2019-05-14 7:37 ` Peter Zijlstra
2019-05-14 21:32 ` Jan Setje-Eilers
2019-05-14 8:05 ` Liran Alon
2019-05-14 7:29 ` Peter Zijlstra
2019-05-14 7:57 ` Liran Alon
2019-05-14 8:33 ` Alexandre Chartre
2019-05-13 19:31 ` Nakajima, Jun
2019-05-13 21:16 ` Liran Alon
2019-05-13 21:42 ` Nakajima, Jun
2019-05-13 21:53 ` Liran Alon
2019-05-15 12:52 ` Alexandre Chartre
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1557758315-12667-20-git-send-email-alexandre.chartre@oracle.com \
--to=alexandre.chartre@oracle.com \
--cc=bp@alien8.de \
--cc=dave.hansen@linux.intel.com \
--cc=hpa@zytor.com \
--cc=jan.setjeeilers@oracle.com \
--cc=jwadams@google.com \
--cc=konrad.wilk@oracle.com \
--cc=kvm@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=liran.alon@oracle.com \
--cc=luto@kernel.org \
--cc=mingo@redhat.com \
--cc=pbonzini@redhat.com \
--cc=peterz@infradead.org \
--cc=rkrcmar@redhat.com \
--cc=tglx@linutronix.de \
--cc=x86@kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).