From: Andrew Cooper <andrew.cooper3@citrix.com>
To: Xen-devel <xen-devel@lists.xen.org>
Cc: George Dunlap <george.dunlap@eu.citrix.com>,
Andrew Cooper <andrew.cooper3@citrix.com>,
Tim Deegan <tim@xen.org>, Jan Beulich <JBeulich@suse.com>
Subject: [PATCH v2 7/9] x86/shadow: Use the pagewalk reserved bits helpers
Date: Thu, 16 Mar 2017 16:31:41 +0000 [thread overview]
Message-ID: <1489681903-28119-8-git-send-email-andrew.cooper3@citrix.com> (raw)
In-Reply-To: <1489681903-28119-1-git-send-email-andrew.cooper3@citrix.com>
The shadow logic should not create a valid/present shadow of a guest PTE which
contains reserved bits from the guests point of view. It is not guaranteed
that the hardware pagewalk will come to the same conclusion, and raise a
pagefault.
Shadows created on demand from the pagefault handler are fine because the
pagewalk over the guest tables will have injected the fault into the guest
rather than creating a shadow.
However, shadows created by sh_resync_l1() and sh_prefetch() haven't undergone
a pagewalk and need to account for reserved bits before creating the shadow.
In practice, this means a 3-level guest could previously cause PTEs with bits
63:52 set to be shadowed (and discarded). This PTE should cause #PF[RSVD]
when encountered by hardware, but the installed shadow is valid and hardware
doesn't fault.
Reuse the pagewalk reserved bits helpers, and assert in
l?e_propagate_from_guest() that shadows are not attempted to be created with
reserved bits set.
Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
---
CC: Jan Beulich <JBeulich@suse.com>
CC: Tim Deegan <tim@xen.org>
CC: George Dunlap <george.dunlap@eu.citrix.com>
v2:
* Reword commit message, and include sh_resync_l1/sh_prefetch
---
xen/arch/x86/mm/shadow/multi.c | 56 +++++++++++++++++++++++++++++++++---------
1 file changed, 45 insertions(+), 11 deletions(-)
diff --git a/xen/arch/x86/mm/shadow/multi.c b/xen/arch/x86/mm/shadow/multi.c
index 4762e15..56114c7 100644
--- a/xen/arch/x86/mm/shadow/multi.c
+++ b/xen/arch/x86/mm/shadow/multi.c
@@ -745,6 +745,10 @@ l4e_propagate_from_guest(struct vcpu *v,
shadow_l4e_t *sl4e,
fetch_type_t ft)
{
+ if ( !mfn_eq(sl3mfn, INVALID_MFN) &&
+ (guest_l4e_get_flags(gl4e) & _PAGE_PRESENT) )
+ ASSERT(!guest_l4e_rsvd_bits(v, gl4e));
+
_sh_propagate(v, gl4e.l4, sl3mfn, sl4e, 4, ft, p2m_ram_rw);
}
@@ -755,6 +759,10 @@ l3e_propagate_from_guest(struct vcpu *v,
shadow_l3e_t *sl3e,
fetch_type_t ft)
{
+ if ( !mfn_eq(sl2mfn, INVALID_MFN) &&
+ (guest_l3e_get_flags(gl3e) & _PAGE_PRESENT) )
+ ASSERT(!guest_l3e_rsvd_bits(v, gl3e));
+
_sh_propagate(v, gl3e.l3, sl2mfn, sl3e, 3, ft, p2m_ram_rw);
}
#endif // GUEST_PAGING_LEVELS >= 4
@@ -766,6 +774,10 @@ l2e_propagate_from_guest(struct vcpu *v,
shadow_l2e_t *sl2e,
fetch_type_t ft)
{
+ if ( !mfn_eq(sl1mfn, INVALID_MFN) &&
+ (guest_l2e_get_flags(gl2e) & _PAGE_PRESENT) )
+ ASSERT(!guest_l2e_rsvd_bits(v, gl2e));
+
_sh_propagate(v, gl2e.l2, sl1mfn, sl2e, 2, ft, p2m_ram_rw);
}
@@ -777,6 +789,10 @@ l1e_propagate_from_guest(struct vcpu *v,
fetch_type_t ft,
p2m_type_t p2mt)
{
+ if ( !mfn_eq(gmfn, INVALID_MFN) &&
+ (guest_l1e_get_flags(gl1e) & _PAGE_PRESENT) )
+ ASSERT(!guest_l1e_rsvd_bits(v, gl1e));
+
_sh_propagate(v, gl1e.l1, gmfn, sl1e, 1, ft, p2mt);
}
@@ -2157,7 +2173,8 @@ static int validate_gl4e(struct vcpu *v, void *new_ge, mfn_t sl4mfn, void *se)
perfc_incr(shadow_validate_gl4e_calls);
- if ( guest_l4e_get_flags(new_gl4e) & _PAGE_PRESENT )
+ if ( (guest_l4e_get_flags(new_gl4e) & _PAGE_PRESENT) &&
+ !guest_l4e_rsvd_bits(v, new_gl4e) )
{
gfn_t gl3gfn = guest_l4e_get_gfn(new_gl4e);
mfn_t gl3mfn = get_gfn_query_unlocked(d, gfn_x(gl3gfn), &p2mt);
@@ -2215,7 +2232,8 @@ static int validate_gl3e(struct vcpu *v, void *new_ge, mfn_t sl3mfn, void *se)
perfc_incr(shadow_validate_gl3e_calls);
- if ( guest_l3e_get_flags(new_gl3e) & _PAGE_PRESENT )
+ if ( (guest_l3e_get_flags(new_gl3e) & _PAGE_PRESENT) &&
+ !guest_l3e_rsvd_bits(v, new_gl3e) )
{
gfn_t gl2gfn = guest_l3e_get_gfn(new_gl3e);
mfn_t gl2mfn = get_gfn_query_unlocked(d, gfn_x(gl2gfn), &p2mt);
@@ -2248,7 +2266,8 @@ static int validate_gl2e(struct vcpu *v, void *new_ge, mfn_t sl2mfn, void *se)
perfc_incr(shadow_validate_gl2e_calls);
- if ( guest_l2e_get_flags(new_gl2e) & _PAGE_PRESENT )
+ if ( (guest_l2e_get_flags(new_gl2e) & _PAGE_PRESENT) &&
+ !guest_l2e_rsvd_bits(v, new_gl2e) )
{
gfn_t gl1gfn = guest_l2e_get_gfn(new_gl2e);
if ( guest_supports_l2_superpages(v) &&
@@ -2289,7 +2308,7 @@ static int validate_gl1e(struct vcpu *v, void *new_ge, mfn_t sl1mfn, void *se)
guest_l1e_t new_gl1e = *(guest_l1e_t *)new_ge;
shadow_l1e_t *sl1p = se;
gfn_t gfn;
- mfn_t gmfn;
+ mfn_t gmfn = INVALID_MFN;
p2m_type_t p2mt;
int result = 0;
#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC)
@@ -2298,8 +2317,12 @@ static int validate_gl1e(struct vcpu *v, void *new_ge, mfn_t sl1mfn, void *se)
perfc_incr(shadow_validate_gl1e_calls);
- gfn = guest_l1e_get_gfn(new_gl1e);
- gmfn = get_gfn_query_unlocked(d, gfn_x(gfn), &p2mt);
+ if ( (guest_l1e_get_flags(new_gl1e) & _PAGE_PRESENT) &&
+ !guest_l1e_rsvd_bits(v, new_gl1e) )
+ {
+ gfn = guest_l1e_get_gfn(new_gl1e);
+ gmfn = get_gfn_query_unlocked(d, gfn_x(gfn), &p2mt);
+ }
l1e_propagate_from_guest(v, new_gl1e, gmfn, &new_sl1e, ft_prefetch, p2mt);
result |= shadow_set_l1e(d, sl1p, new_sl1e, p2mt, sl1mfn);
@@ -2355,12 +2378,17 @@ void sh_resync_l1(struct vcpu *v, mfn_t gl1mfn, mfn_t snpmfn)
if ( memcmp(snpl1p, &gl1e, sizeof(gl1e)) )
{
gfn_t gfn;
- mfn_t gmfn;
+ mfn_t gmfn = INVALID_MFN;
p2m_type_t p2mt;
shadow_l1e_t nsl1e;
- gfn = guest_l1e_get_gfn(gl1e);
- gmfn = get_gfn_query_unlocked(d, gfn_x(gfn), &p2mt);
+ if ( (guest_l1e_get_flags(gl1e) & _PAGE_PRESENT) &&
+ !guest_l1e_rsvd_bits(v, gl1e) )
+ {
+ gfn = guest_l1e_get_gfn(gl1e);
+ gmfn = get_gfn_query_unlocked(d, gfn_x(gfn), &p2mt);
+ }
+
l1e_propagate_from_guest(v, gl1e, gmfn, &nsl1e, ft_prefetch, p2mt);
rc |= shadow_set_l1e(d, sl1p, nsl1e, p2mt, sl1mfn);
*snpl1p = gl1e;
@@ -2686,8 +2714,14 @@ static void sh_prefetch(struct vcpu *v, walk_t *gw,
}
/* Look at the gfn that the l1e is pointing at */
- gfn = guest_l1e_get_gfn(gl1e);
- gmfn = get_gfn_query_unlocked(d, gfn_x(gfn), &p2mt);
+ if ( (guest_l1e_get_flags(gl1e) & _PAGE_PRESENT) &&
+ !guest_l1e_rsvd_bits(v, gl1e) )
+ {
+ gfn = guest_l1e_get_gfn(gl1e);
+ gmfn = get_gfn_query_unlocked(d, gfn_x(gfn), &p2mt);
+ }
+ else
+ gmfn = INVALID_MFN;
/* Propagate the entry. */
l1e_propagate_from_guest(v, gl1e, gmfn, &sl1e, ft_prefetch, p2mt);
--
2.1.4
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel
next prev parent reply other threads:[~2017-03-16 16:31 UTC|newest]
Thread overview: 41+ messages / expand[flat|nested] mbox.gz Atom feed top
2017-03-16 16:31 [PATCH v2 0/9] Fixes to pagetable handling Andrew Cooper
2017-03-16 16:31 ` [PATCH v2 1/9] x86/cpuid: Sort cpu_has_* predicates by feature number Andrew Cooper
2017-03-17 16:08 ` Jan Beulich
2017-03-16 16:31 ` [PATCH v2 2/9] x86/pagewalk: Use pointer syntax for pfec parameter Andrew Cooper
2017-03-17 16:09 ` Jan Beulich
2017-03-20 11:29 ` George Dunlap
2017-03-23 16:28 ` Tim Deegan
2017-03-16 16:31 ` [PATCH v2 3/9] x86/shadow: Drop VALID_GFN() Andrew Cooper
2017-03-23 16:30 ` Tim Deegan
2017-03-16 16:31 ` [PATCH v2 4/9] x86/pagewalk: Clean up guest_supports_* predicates Andrew Cooper
2017-03-20 8:45 ` Jan Beulich
2017-03-20 13:36 ` Andrew Cooper
2017-03-20 13:59 ` Jan Beulich
2017-03-23 17:32 ` Andrew Cooper
2017-03-24 7:19 ` Jan Beulich
2017-03-23 16:34 ` Tim Deegan
2017-03-16 16:31 ` [PATCH v2 5/9] x86/pagewalk: Helpers for reserved bit handling Andrew Cooper
2017-03-20 8:48 ` Jan Beulich
2017-03-23 16:55 ` Tim Deegan
2017-03-23 17:02 ` Andrew Cooper
2017-03-23 17:12 ` Tim Deegan
2017-03-23 17:35 ` Andrew Cooper
2017-03-24 5:45 ` Juergen Gross
2017-03-24 7:51 ` Jan Beulich
[not found] ` <58D4DDFF0200007800147138@suse.com>
2017-03-24 7:58 ` Juergen Gross
2017-03-24 8:25 ` Jan Beulich
2017-03-24 9:06 ` Andrew Cooper
2017-03-24 7:47 ` Jan Beulich
2017-03-24 8:36 ` Andrew Cooper
2017-03-16 16:31 ` [PATCH v2 6/9] x86/pagewalk: Re-implement the pagetable walker Andrew Cooper
2017-03-16 16:31 ` Andrew Cooper [this message]
2017-03-16 17:25 ` [PATCH v2 7/9] x86/shadow: Use the pagewalk reserved bits helpers Andrew Cooper
2017-03-20 8:53 ` Jan Beulich
2017-03-23 16:57 ` Tim Deegan
2017-03-16 16:31 ` [PATCH v2 8/9] x86/pagewalk: Improve the logic behind setting access and dirty bits Andrew Cooper
2017-03-20 9:03 ` Jan Beulich
2017-03-23 17:09 ` Tim Deegan
2017-03-23 17:40 ` Andrew Cooper
2017-03-16 16:31 ` [PATCH v2 9/9] x86/pagewalk: non-functional cleanup Andrew Cooper
2017-03-20 9:04 ` Jan Beulich
2017-03-23 17:10 ` Tim Deegan
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1489681903-28119-8-git-send-email-andrew.cooper3@citrix.com \
--to=andrew.cooper3@citrix.com \
--cc=JBeulich@suse.com \
--cc=george.dunlap@eu.citrix.com \
--cc=tim@xen.org \
--cc=xen-devel@lists.xen.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).