xen-devel.lists.xenproject.org archive mirror
 help / color / mirror / Atom feed
From: Juergen Gross <jgross@suse.com>
To: xen-devel@lists.xenproject.org
Cc: Juergen Gross <jgross@suse.com>,
	andrew.cooper3@citrix.com, dfaggioli@suse.com, jbeulich@suse.com
Subject: [PATCH v3 15/17] x86: fill XPTI shadow pages and keep them in sync with guest L4
Date: Fri,  9 Feb 2018 15:01:49 +0100	[thread overview]
Message-ID: <20180209140151.24714-16-jgross@suse.com> (raw)
In-Reply-To: <20180209140151.24714-1-jgross@suse.com>

For being able to use the XPTI shadow L4 page tables in the hypervisor
fill them with the related entries of their masters and keep them in
sync when updates are done by the guest.

Signed-off-by: Juergen Gross <jgross@suse.com>
---
 xen/arch/x86/mm.c              | 43 ++++++++++++++++++++++++++++++++++++++----
 xen/arch/x86/mm/shadow/multi.c |  2 ++
 xen/arch/x86/pv/dom0_build.c   |  3 +++
 xen/arch/x86/pv/xpti.c         | 35 ++++++++++++++++++++++++++++++++++
 xen/include/asm-x86/pv/mm.h    |  4 ++++
 5 files changed, 83 insertions(+), 4 deletions(-)

diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index 16b004abe6..14dc776a52 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -1609,6 +1609,18 @@ void init_xen_l4_slots(l4_pgentry_t *l4t, mfn_t l4mfn,
                (ROOT_PAGETABLE_FIRST_XEN_SLOT + slots -
                 l4_table_offset(XEN_VIRT_START)) * sizeof(*l4t));
     }
+
+    if ( is_domain_xpti_active(d) )
+    {
+        unsigned int slot;
+
+        for ( slot = ROOT_PAGETABLE_FIRST_XEN_SLOT;
+              slot <= ROOT_PAGETABLE_LAST_XEN_SLOT;
+              slot++ )
+            xpti_update_l4(d,
+                           mfn_x(mfn_eq(sl4mfn, INVALID_MFN) ? l4mfn : sl4mfn),
+                           slot, l4t[slot]);
+    }
 }
 
 bool fill_ro_mpt(const struct domain *d, mfn_t mfn)
@@ -1621,6 +1633,9 @@ bool fill_ro_mpt(const struct domain *d, mfn_t mfn)
         l4tab[l4_table_offset(RO_MPT_VIRT_START)] =
             idle_pg_table[l4_table_offset(RO_MPT_VIRT_START)];
         ret = true;
+        if ( is_domain_xpti_active(d) )
+            xpti_update_l4(d, mfn_x(mfn), l4_table_offset(RO_MPT_VIRT_START),
+                           idle_pg_table[l4_table_offset(RO_MPT_VIRT_START)]);
     }
     unmap_domain_page(l4tab);
 
@@ -1632,6 +1647,11 @@ void zap_ro_mpt(const struct domain *d, mfn_t mfn)
     l4_pgentry_t *l4tab = map_domain_page(mfn);
 
     l4tab[l4_table_offset(RO_MPT_VIRT_START)] = l4e_empty();
+
+    if ( is_domain_xpti_active(d) )
+        xpti_update_l4(d, mfn_x(mfn), l4_table_offset(RO_MPT_VIRT_START),
+                       l4e_empty());
+
     unmap_domain_page(l4tab);
 }
 
@@ -1682,6 +1702,8 @@ static int alloc_l4_table(struct page_info *page)
         }
 
         pl4e[i] = adjust_guest_l4e(pl4e[i], d);
+        if ( is_domain_xpti_active(d) )
+            xpti_update_l4(d, pfn, i, pl4e[i]);
     }
 
     if ( rc >= 0 )
@@ -2141,6 +2163,20 @@ static int mod_l3_entry(l3_pgentry_t *pl3e,
     return rc;
 }
 
+static bool update_l4pte(l4_pgentry_t *pl4e, l4_pgentry_t ol4e,
+                         l4_pgentry_t nl4e, unsigned long pfn,
+                         struct vcpu *v, bool preserve_ad)
+{
+    bool rc;
+
+    rc = UPDATE_ENTRY(l4, pl4e, ol4e, nl4e, pfn, v, preserve_ad);
+    if ( rc && is_vcpu_xpti_active(v) &&
+         (!paging_mode_shadow(v->domain) || !paging_get_hostmode(v)) )
+        xpti_update_l4(v->domain, pfn, pgentry_ptr_to_slot(pl4e), nl4e);
+
+    return rc;
+}
+
 /* Update the L4 entry at pl4e to new value nl4e. pl4e is within frame pfn. */
 static int mod_l4_entry(l4_pgentry_t *pl4e,
                         l4_pgentry_t nl4e,
@@ -2175,7 +2211,7 @@ static int mod_l4_entry(l4_pgentry_t *pl4e,
         if ( !l4e_has_changed(ol4e, nl4e, ~FASTPATH_FLAG_WHITELIST) )
         {
             nl4e = adjust_guest_l4e(nl4e, d);
-            rc = UPDATE_ENTRY(l4, pl4e, ol4e, nl4e, pfn, vcpu, preserve_ad);
+            rc = update_l4pte(pl4e, ol4e, nl4e, pfn, vcpu, preserve_ad);
             return rc ? 0 : -EFAULT;
         }
 
@@ -2185,14 +2221,13 @@ static int mod_l4_entry(l4_pgentry_t *pl4e,
         rc = 0;
 
         nl4e = adjust_guest_l4e(nl4e, d);
-        if ( unlikely(!UPDATE_ENTRY(l4, pl4e, ol4e, nl4e, pfn, vcpu,
-                                    preserve_ad)) )
+        if ( unlikely(!update_l4pte(pl4e, ol4e, nl4e, pfn, vcpu, preserve_ad)) )
         {
             ol4e = nl4e;
             rc = -EFAULT;
         }
     }
-    else if ( unlikely(!UPDATE_ENTRY(l4, pl4e, ol4e, nl4e, pfn, vcpu,
+    else if ( unlikely(!update_l4pte(pl4e, ol4e, nl4e, pfn, vcpu,
                                      preserve_ad)) )
     {
         return -EFAULT;
diff --git a/xen/arch/x86/mm/shadow/multi.c b/xen/arch/x86/mm/shadow/multi.c
index 170163fbcf..110a5449a6 100644
--- a/xen/arch/x86/mm/shadow/multi.c
+++ b/xen/arch/x86/mm/shadow/multi.c
@@ -954,6 +954,8 @@ static int shadow_set_l4e(struct domain *d,
     /* Write the new entry */
     shadow_write_entries(sl4e, &new_sl4e, 1, sl4mfn);
     flags |= SHADOW_SET_CHANGED;
+    if ( is_domain_xpti_active(d) )
+        xpti_update_l4(d, mfn_x(sl4mfn), pgentry_ptr_to_slot(sl4e), new_sl4e);
 
     if ( shadow_l4e_get_flags(old_sl4e) & _PAGE_PRESENT )
     {
diff --git a/xen/arch/x86/pv/dom0_build.c b/xen/arch/x86/pv/dom0_build.c
index 6e7bc435ab..8ef9c87845 100644
--- a/xen/arch/x86/pv/dom0_build.c
+++ b/xen/arch/x86/pv/dom0_build.c
@@ -142,6 +142,9 @@ static __init void setup_pv_physmap(struct domain *d, unsigned long pgtbl_pfn,
             pl3e = __map_domain_page(page);
             clear_page(pl3e);
             *pl4e = l4e_from_page(page, L4_PROT);
+            if ( is_domain_xpti_active(d) )
+                xpti_update_l4(d, pgtbl_pfn, l4_table_offset(vphysmap_start),
+                               *pl4e);
         } else
             pl3e = map_l3t_from_l4e(*pl4e);
 
diff --git a/xen/arch/x86/pv/xpti.c b/xen/arch/x86/pv/xpti.c
index f663fae806..da83339563 100644
--- a/xen/arch/x86/pv/xpti.c
+++ b/xen/arch/x86/pv/xpti.c
@@ -357,6 +357,18 @@ static unsigned int xpti_shadow_getforce(struct xpti_domain *xd)
     return idx;
 }
 
+static void xpti_init_xen_l4(struct xpti_domain *xd, struct xpti_l4pg *l4pg)
+{
+    unsigned int i;
+    l4_pgentry_t *src, *dest;
+
+    src = map_domain_page(_mfn(l4pg->guest_mfn));
+    dest = mfn_to_virt(l4pg->xen_mfn);
+    for ( i = 0; i < L4_PAGETABLE_ENTRIES; i++ )
+        dest[i] = src[i];
+    unmap_domain_page(src);
+}
+
 static unsigned int xpti_shadow_get(struct xpti_domain *xd, unsigned long mfn)
 {
     unsigned int idx;
@@ -385,6 +397,9 @@ static unsigned int xpti_shadow_get(struct xpti_domain *xd, unsigned long mfn)
     l4pg->ref_next = l4ref->idx;
     l4ref->idx = idx;
 
+    /* Fill the shadow page table entries. */
+    xpti_init_xen_l4(xd, l4pg);
+
     return idx;
 }
 
@@ -403,6 +418,26 @@ static unsigned int xpti_shadow_activate(struct xpti_domain *xd,
     return idx;
 }
 
+void xpti_update_l4(const struct domain *d, unsigned long mfn,
+                    unsigned int slot, l4_pgentry_t e)
+{
+    struct xpti_domain *xd = d->arch.pv_domain.xpti;
+    unsigned long flags;
+    unsigned int idx;
+    l4_pgentry_t *l4;
+
+    spin_lock_irqsave(&xd->lock, flags);
+
+    idx = xpti_shadow_from_hashlist(xd, mfn);
+    if ( idx != L4_INVALID )
+    {
+        l4 = mfn_to_virt(xd->l4pg[idx].xen_mfn);
+        l4[slot] = e;
+    }
+
+    spin_unlock_irqrestore(&xd->lock, flags);
+}
+
 void xpti_make_cr3(struct vcpu *v, unsigned long mfn)
 {
     struct xpti_domain *xd = v->domain->arch.pv_domain.xpti;
diff --git a/xen/include/asm-x86/pv/mm.h b/xen/include/asm-x86/pv/mm.h
index 25c035988c..8a90af1084 100644
--- a/xen/include/asm-x86/pv/mm.h
+++ b/xen/include/asm-x86/pv/mm.h
@@ -36,6 +36,8 @@ int xpti_domain_init(struct domain *d);
 void xpti_domain_destroy(struct domain *d);
 void xpti_make_cr3(struct vcpu *v, unsigned long mfn);
 void xpti_free_l4(struct domain *d, unsigned long mfn);
+void xpti_update_l4(const struct domain *d, unsigned long mfn,
+                    unsigned int slot, l4_pgentry_t e);
 
 static inline bool is_domain_xpti_active(const struct domain *d)
 {
@@ -73,6 +75,8 @@ static inline int xpti_domain_init(struct domain *d) { return 0; }
 static inline void xpti_domain_destroy(struct domain *d) { }
 static inline void xpti_make_cr3(struct vcpu *v, unsigned long mfn) { }
 static inline void xpti_free_l4(struct domain *d, unsigned long mfn) { }
+static inline void xpti_update_l4(const struct domain *d, unsigned long mfn,
+                                  unsigned int slot, l4_pgentry_t e) { }
 
 static inline bool is_domain_xpti_active(const struct domain *d)
 { return false; }
-- 
2.13.6


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

  parent reply	other threads:[~2018-02-09 14:04 UTC|newest]

Thread overview: 23+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2018-02-09 14:01 [PATCH v3 00/17] Alternative Meltdown mitigation Juergen Gross
2018-02-09 14:01 ` [PATCH v3 01/17] x86: don't use hypervisor stack size for dumping guest stacks Juergen Gross
2018-02-09 14:01 ` [PATCH v3 02/17] x86: do a revert of e871e80c38547d9faefc6604532ba3e985e65873 Juergen Gross
2018-02-13 10:14   ` Jan Beulich
2018-02-09 14:01 ` [PATCH v3 03/17] x86: revert 5784de3e2067ed73efc2fe42e62831e8ae7f46c4 Juergen Gross
2018-02-09 14:01 ` [PATCH v3 04/17] x86: don't access saved user regs via rsp in trap handlers Juergen Gross
2018-02-09 14:01 ` [PATCH v3 05/17] x86: add a xpti command line parameter Juergen Gross
2018-02-09 14:01 ` [PATCH v3 06/17] x86: allow per-domain mappings without NX bit or with specific mfn Juergen Gross
2018-02-09 14:01 ` [PATCH v3 07/17] xen/x86: split _set_tssldt_desc() into ldt and tss specific functions Juergen Gross
2018-02-09 14:01 ` [PATCH v3 08/17] x86: add support for spectre mitigation with local thunk Juergen Gross
2018-02-09 14:01 ` [PATCH v3 09/17] x86: create syscall stub for per-domain mapping Juergen Gross
2018-02-09 14:01 ` [PATCH v3 10/17] x86: allocate per-vcpu stacks for interrupt entries Juergen Gross
2018-02-09 14:01 ` [PATCH v3 11/17] x86: modify interrupt handlers to support stack switching Juergen Gross
2018-02-09 14:01 ` [PATCH v3 12/17] x86: activate per-vcpu stacks in case of xpti Juergen Gross
2018-02-09 14:01 ` [PATCH v3 13/17] x86: allocate hypervisor L4 page table for XPTI Juergen Gross
2018-02-09 14:01 ` [PATCH v3 14/17] xen: add domain pointer to fill_ro_mpt() and zap_ro_mpt() functions Juergen Gross
2018-02-09 14:01 ` Juergen Gross [this message]
2018-02-09 14:01 ` [PATCH v3 16/17] x86: do page table switching when entering/leaving hypervisor Juergen Gross
2018-02-09 14:01 ` [PATCH v3 17/17] x86: hide most hypervisor mappings in XPTI shadow page tables Juergen Gross
2018-02-12 17:54 ` [PATCH v3 00/17] Alternative Meltdown mitigation Dario Faggioli
2018-02-13 11:36   ` Juergen Gross
2018-02-13 14:16     ` Jan Beulich
     [not found]     ` <5A83014E02000078001A7619@suse.com>
2018-02-13 14:29       ` Juergen Gross

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20180209140151.24714-16-jgross@suse.com \
    --to=jgross@suse.com \
    --cc=andrew.cooper3@citrix.com \
    --cc=dfaggioli@suse.com \
    --cc=jbeulich@suse.com \
    --cc=xen-devel@lists.xenproject.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).