From: Andrew Cooper <andrew.cooper3@citrix.com>
To: Xen-devel <xen-devel@lists.xen.org>
Cc: George Dunlap <george.dunlap@eu.citrix.com>,
Andrew Cooper <andrew.cooper3@citrix.com>,
Tim Deegan <tim@xen.org>, Jan Beulich <JBeulich@suse.com>
Subject: [PATCH 3/7] x86/mm: Further restrict permissions on some virtual mappings
Date: Tue, 2 May 2017 19:05:22 +0100 [thread overview]
Message-ID: <1493748326-9582-4-git-send-email-andrew.cooper3@citrix.com> (raw)
In-Reply-To: <1493748326-9582-1-git-send-email-andrew.cooper3@citrix.com>
As originally reported, the Linear Pagetable slot maps 512GB of ram as RWX,
where the guest has full read access and a lot of direct or indirect control
over the written content. It isn't hard for a PV guest to hide shellcode
here.
Therefore, increase defence in depth by auditing our current pagetable
mappings.
* The regular linear, shadow linear, and per-domain slots have no business
being executable (but need to be written), so are updated to be NX.
* The Read Only mappings of the M2P (compat and regular) don't need to be
writeable or executable.
* The PV GDT mappings don't need to be executable.
Reported-by: Jann Horn <jannh@google.com>
Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
---
CC: Jan Beulich <JBeulich@suse.com>
CC: Tim Deegan <tim@xen.org>
CC: George Dunlap <george.dunlap@eu.citrix.com>
---
xen/arch/x86/domain.c | 2 +-
xen/arch/x86/mm.c | 10 +++++-----
xen/arch/x86/mm/hap/hap.c | 4 ++--
xen/arch/x86/mm/shadow/multi.c | 18 +++++++++---------
xen/arch/x86/x86_64/mm.c | 12 ++++++------
5 files changed, 23 insertions(+), 23 deletions(-)
diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
index 90e2b1f..ef8c05a 100644
--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -2040,7 +2040,7 @@ static void __context_switch(void)
for ( i = 0; i < NR_RESERVED_GDT_PAGES; i++ )
l1e_write(pl1e + FIRST_RESERVED_GDT_PAGE + i,
- l1e_from_pfn(mfn + i, __PAGE_HYPERVISOR));
+ l1e_from_pfn(mfn + i, __PAGE_HYPERVISOR_RW));
}
if ( need_full_gdt(pd) &&
diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index e639ce2..77b0af1 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -384,7 +384,7 @@ void __init arch_init_memory(void)
for ( ; i < L3_PAGETABLE_ENTRIES; ++i )
l3tab[i] = l3e_empty();
split_l4e = l4e_from_pfn(virt_to_mfn(l3tab),
- __PAGE_HYPERVISOR);
+ __PAGE_HYPERVISOR_RW);
}
else
++root_pgt_pv_xen_slots;
@@ -1588,9 +1588,9 @@ void init_guest_l4_table(l4_pgentry_t l4tab[], const struct domain *d,
split_l4e;
#endif
l4tab[l4_table_offset(LINEAR_PT_VIRT_START)] =
- l4e_from_pfn(domain_page_map_to_mfn(l4tab), __PAGE_HYPERVISOR);
+ l4e_from_pfn(domain_page_map_to_mfn(l4tab), __PAGE_HYPERVISOR_RW);
l4tab[l4_table_offset(PERDOMAIN_VIRT_START)] =
- l4e_from_page(d->arch.perdomain_l3_pg, __PAGE_HYPERVISOR);
+ l4e_from_page(d->arch.perdomain_l3_pg, __PAGE_HYPERVISOR_RW);
if ( zap_ro_mpt || is_pv_32bit_domain(d) || paging_mode_refcounts(d) )
l4tab[l4_table_offset(RO_MPT_VIRT_START)] = l4e_empty();
}
@@ -6380,7 +6380,7 @@ int create_perdomain_mapping(struct domain *d, unsigned long va,
}
l2tab = __map_domain_page(pg);
clear_page(l2tab);
- l3tab[l3_table_offset(va)] = l3e_from_page(pg, __PAGE_HYPERVISOR);
+ l3tab[l3_table_offset(va)] = l3e_from_page(pg, __PAGE_HYPERVISOR_RW);
}
else
l2tab = map_domain_page(_mfn(l3e_get_pfn(l3tab[l3_table_offset(va)])));
@@ -6422,7 +6422,7 @@ int create_perdomain_mapping(struct domain *d, unsigned long va,
l1tab = __map_domain_page(pg);
}
clear_page(l1tab);
- *pl2e = l2e_from_page(pg, __PAGE_HYPERVISOR);
+ *pl2e = l2e_from_page(pg, __PAGE_HYPERVISOR_RW);
}
else if ( !l1tab )
l1tab = map_domain_page(_mfn(l2e_get_pfn(*pl2e)));
diff --git a/xen/arch/x86/mm/hap/hap.c b/xen/arch/x86/mm/hap/hap.c
index b981432..8476269 100644
--- a/xen/arch/x86/mm/hap/hap.c
+++ b/xen/arch/x86/mm/hap/hap.c
@@ -409,11 +409,11 @@ static void hap_install_xen_entries_in_l4(struct vcpu *v, mfn_t l4mfn)
/* Install the per-domain mappings for this domain */
l4e[l4_table_offset(PERDOMAIN_VIRT_START)] =
l4e_from_pfn(mfn_x(page_to_mfn(d->arch.perdomain_l3_pg)),
- __PAGE_HYPERVISOR);
+ __PAGE_HYPERVISOR_RW);
/* Install a linear mapping */
l4e[l4_table_offset(LINEAR_PT_VIRT_START)] =
- l4e_from_pfn(mfn_x(l4mfn), __PAGE_HYPERVISOR);
+ l4e_from_pfn(mfn_x(l4mfn), __PAGE_HYPERVISOR_RW);
unmap_domain_page(l4e);
}
diff --git a/xen/arch/x86/mm/shadow/multi.c b/xen/arch/x86/mm/shadow/multi.c
index 2fb0125..f65ffc6 100644
--- a/xen/arch/x86/mm/shadow/multi.c
+++ b/xen/arch/x86/mm/shadow/multi.c
@@ -1475,7 +1475,7 @@ void sh_install_xen_entries_in_l4(struct domain *d, mfn_t gl4mfn, mfn_t sl4mfn)
/* Install the per-domain mappings for this domain */
sl4e[shadow_l4_table_offset(PERDOMAIN_VIRT_START)] =
shadow_l4e_from_mfn(page_to_mfn(d->arch.perdomain_l3_pg),
- __PAGE_HYPERVISOR);
+ __PAGE_HYPERVISOR_RW);
if ( !shadow_mode_external(d) && !is_pv_32bit_domain(d) &&
!VM_ASSIST(d, m2p_strict) )
@@ -1489,7 +1489,7 @@ void sh_install_xen_entries_in_l4(struct domain *d, mfn_t gl4mfn, mfn_t sl4mfn)
* monitor pagetable structure, which is built in make_monitor_table
* and maintained by sh_update_linear_entries. */
sl4e[shadow_l4_table_offset(SH_LINEAR_PT_VIRT_START)] =
- shadow_l4e_from_mfn(sl4mfn, __PAGE_HYPERVISOR);
+ shadow_l4e_from_mfn(sl4mfn, __PAGE_HYPERVISOR_RW);
/* Self linear mapping. */
if ( shadow_mode_translate(d) && !shadow_mode_external(d) )
@@ -1501,7 +1501,7 @@ void sh_install_xen_entries_in_l4(struct domain *d, mfn_t gl4mfn, mfn_t sl4mfn)
else
{
sl4e[shadow_l4_table_offset(LINEAR_PT_VIRT_START)] =
- shadow_l4e_from_mfn(gl4mfn, __PAGE_HYPERVISOR);
+ shadow_l4e_from_mfn(gl4mfn, __PAGE_HYPERVISOR_RW);
}
unmap_domain_page(sl4e);
@@ -1654,12 +1654,12 @@ sh_make_monitor_table(struct vcpu *v)
m3mfn = shadow_alloc(d, SH_type_monitor_table, 0);
mfn_to_page(m3mfn)->shadow_flags = 3;
l4e[shadow_l4_table_offset(SH_LINEAR_PT_VIRT_START)]
- = l4e_from_pfn(mfn_x(m3mfn), __PAGE_HYPERVISOR);
+ = l4e_from_pfn(mfn_x(m3mfn), __PAGE_HYPERVISOR_RW);
m2mfn = shadow_alloc(d, SH_type_monitor_table, 0);
mfn_to_page(m2mfn)->shadow_flags = 2;
l3e = map_domain_page(m3mfn);
- l3e[0] = l3e_from_pfn(mfn_x(m2mfn), __PAGE_HYPERVISOR);
+ l3e[0] = l3e_from_pfn(mfn_x(m2mfn), __PAGE_HYPERVISOR_RW);
unmap_domain_page(l3e);
if ( is_pv_32bit_domain(d) )
@@ -1668,7 +1668,7 @@ sh_make_monitor_table(struct vcpu *v)
* area into its usual VAs in the monitor tables */
m3mfn = shadow_alloc(d, SH_type_monitor_table, 0);
mfn_to_page(m3mfn)->shadow_flags = 3;
- l4e[0] = l4e_from_pfn(mfn_x(m3mfn), __PAGE_HYPERVISOR);
+ l4e[0] = l4e_from_pfn(mfn_x(m3mfn), __PAGE_HYPERVISOR_RW);
m2mfn = shadow_alloc(d, SH_type_monitor_table, 0);
mfn_to_page(m2mfn)->shadow_flags = 2;
@@ -3838,7 +3838,7 @@ sh_update_linear_entries(struct vcpu *v)
{
__linear_l4_table[l4_linear_offset(SH_LINEAR_PT_VIRT_START)] =
l4e_from_pfn(pagetable_get_pfn(v->arch.shadow_table[0]),
- __PAGE_HYPERVISOR);
+ __PAGE_HYPERVISOR_RW);
}
else
{
@@ -3846,7 +3846,7 @@ sh_update_linear_entries(struct vcpu *v)
ml4e = map_domain_page(pagetable_get_mfn(v->arch.monitor_table));
ml4e[l4_table_offset(SH_LINEAR_PT_VIRT_START)] =
l4e_from_pfn(pagetable_get_pfn(v->arch.shadow_table[0]),
- __PAGE_HYPERVISOR);
+ __PAGE_HYPERVISOR_RW);
unmap_domain_page(ml4e);
}
}
@@ -3902,7 +3902,7 @@ sh_update_linear_entries(struct vcpu *v)
ml2e[i] =
(shadow_l3e_get_flags(sl3e[i]) & _PAGE_PRESENT)
? l2e_from_pfn(mfn_x(shadow_l3e_get_mfn(sl3e[i])),
- __PAGE_HYPERVISOR)
+ __PAGE_HYPERVISOR_RW)
: l2e_empty();
}
diff --git a/xen/arch/x86/x86_64/mm.c b/xen/arch/x86/x86_64/mm.c
index 34f3250..ac358a8 100644
--- a/xen/arch/x86/x86_64/mm.c
+++ b/xen/arch/x86/x86_64/mm.c
@@ -470,7 +470,7 @@ static int setup_m2p_table(struct mem_hotadd_info *info)
clear_page(l2_ro_mpt);
l3e_write(&l3_ro_mpt[l3_table_offset(va)],
l3e_from_paddr(__pa(l2_ro_mpt),
- __PAGE_HYPERVISOR | _PAGE_USER));
+ __PAGE_HYPERVISOR_RO | _PAGE_USER));
l2_ro_mpt += l2_table_offset(va);
}
@@ -515,7 +515,7 @@ void __init paging_init(void)
l3_ro_mpt = page_to_virt(l3_pg);
clear_page(l3_ro_mpt);
l4e_write(&idle_pg_table[l4_table_offset(va)],
- l4e_from_page(l3_pg, __PAGE_HYPERVISOR));
+ l4e_from_page(l3_pg, __PAGE_HYPERVISOR_RW));
}
}
@@ -525,7 +525,7 @@ void __init paging_init(void)
l3_ro_mpt = page_to_virt(l3_pg);
clear_page(l3_ro_mpt);
l4e_write(&idle_pg_table[l4_table_offset(RO_MPT_VIRT_START)],
- l4e_from_page(l3_pg, __PAGE_HYPERVISOR | _PAGE_USER));
+ l4e_from_page(l3_pg, __PAGE_HYPERVISOR_RO | _PAGE_USER));
/*
* Allocate and map the machine-to-phys table.
@@ -612,7 +612,7 @@ void __init paging_init(void)
l2_ro_mpt = page_to_virt(l2_pg);
clear_page(l2_ro_mpt);
l3e_write(&l3_ro_mpt[l3_table_offset(va)],
- l3e_from_page(l2_pg, __PAGE_HYPERVISOR | _PAGE_USER));
+ l3e_from_page(l2_pg, __PAGE_HYPERVISOR_RO | _PAGE_USER));
ASSERT(!l2_table_offset(va));
}
/* NB. Cannot be GLOBAL: guest user mode should not see it. */
@@ -634,7 +634,7 @@ void __init paging_init(void)
compat_idle_pg_table_l2 = l2_ro_mpt;
clear_page(l2_ro_mpt);
l3e_write(&l3_ro_mpt[l3_table_offset(HIRO_COMPAT_MPT_VIRT_START)],
- l3e_from_paddr(__pa(l2_ro_mpt), __PAGE_HYPERVISOR));
+ l3e_from_paddr(__pa(l2_ro_mpt), __PAGE_HYPERVISOR_RO));
l2_ro_mpt += l2_table_offset(HIRO_COMPAT_MPT_VIRT_START);
/* Allocate and map the compatibility mode machine-to-phys table. */
mpt_size = (mpt_size >> 1) + (1UL << (L2_PAGETABLE_SHIFT - 1));
@@ -679,7 +679,7 @@ void __init paging_init(void)
/* Set up linear page table mapping. */
l4e_write(&idle_pg_table[l4_table_offset(LINEAR_PT_VIRT_START)],
- l4e_from_paddr(__pa(idle_pg_table), __PAGE_HYPERVISOR));
+ l4e_from_paddr(__pa(idle_pg_table), __PAGE_HYPERVISOR_RW));
return;
nomem:
--
2.1.4
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel
next prev parent reply other threads:[~2017-05-02 18:05 UTC|newest]
Thread overview: 48+ messages / expand[flat|nested] mbox.gz Atom feed top
2017-05-02 18:05 [PATCH 0/7] XSAs 213-315 followups Andrew Cooper
2017-05-02 18:05 ` [PATCH 1/7] x86/traps: Drop 32bit fields out of tss_struct Andrew Cooper
2017-05-03 8:10 ` Jan Beulich
2017-05-03 12:33 ` Andrew Cooper
2017-05-03 9:48 ` Wei Liu
2017-05-02 18:05 ` [PATCH 2/7] x86/traps: Poison unused stack pointers in the TSS Andrew Cooper
2017-05-03 8:14 ` Jan Beulich
2017-05-03 12:47 ` Andrew Cooper
2017-05-03 13:29 ` [PATCH v2 " Andrew Cooper
2017-05-03 13:45 ` Jan Beulich
2017-05-02 18:05 ` Andrew Cooper [this message]
2017-05-03 8:49 ` [PATCH 3/7] x86/mm: Further restrict permissions on some virtual mappings Jan Beulich
2017-05-03 13:38 ` Andrew Cooper
2017-05-03 13:48 ` Jan Beulich
2017-05-03 9:48 ` Wei Liu
2017-05-03 10:11 ` Tim Deegan
2017-05-03 11:13 ` George Dunlap
2017-05-02 18:05 ` [PATCH 4/7] x86/traps: Rename compat_hypercall() to entry_int82() Andrew Cooper
2017-05-03 8:55 ` Jan Beulich
2017-05-03 13:41 ` Andrew Cooper
2017-05-02 18:05 ` [PATCH 5/7] x86/traps: Lift all non-entrypoint logic in entry_int82() up into C Andrew Cooper
2017-05-03 9:02 ` Jan Beulich
2017-05-03 11:26 ` Wei Liu
2017-05-03 11:38 ` Andrew Cooper
2017-05-03 11:43 ` Wei Liu
2017-05-03 12:02 ` Jan Beulich
2017-05-03 12:18 ` Andrew Cooper
2017-05-03 12:37 ` Jan Beulich
2017-05-03 18:29 ` Andrew Cooper
2017-05-04 7:27 ` Jan Beulich
2017-05-04 9:27 ` Andrew Cooper
2017-05-04 9:36 ` Jan Beulich
2017-05-04 9:57 ` Andrew Cooper
2017-05-03 12:00 ` Jan Beulich
2017-05-04 10:01 ` [PATCH v2 " Andrew Cooper
2017-05-04 10:16 ` Andrew Cooper
2017-05-04 10:28 ` Jan Beulich
2017-05-04 11:09 ` Andrew Cooper
2017-05-04 10:22 ` Jan Beulich
2017-05-02 18:05 ` [PATCH 6/7] x86/asm: Fold LOAD_C_CLOBBERED into RESTORE_ALL Andrew Cooper
2017-05-03 9:08 ` Jan Beulich
2017-05-03 9:48 ` Wei Liu
2017-05-02 18:05 ` [PATCH 7/7] x86/asm: Clobber %r{8..15} on exit to 32bit PV guests Andrew Cooper
2017-05-03 9:13 ` Jan Beulich
2017-05-03 17:51 ` [PATCH v2 " Andrew Cooper
2017-05-04 8:50 ` Jan Beulich
2017-05-04 11:11 ` [RFC for 4.9] [PATCH 0/7] XSAs 213-315 followups Andrew Cooper
2017-05-04 12:52 ` Julien Grall
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1493748326-9582-4-git-send-email-andrew.cooper3@citrix.com \
--to=andrew.cooper3@citrix.com \
--cc=JBeulich@suse.com \
--cc=george.dunlap@eu.citrix.com \
--cc=tim@xen.org \
--cc=xen-devel@lists.xen.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).