xen-devel.lists.xenproject.org archive mirror
 help / color / mirror / Atom feed
* [PATCH] x86/PV: properly populate descriptor tables
@ 2015-09-23 15:34 Jan Beulich
  2015-09-23 15:47 ` Andrew Cooper
                   ` (2 more replies)
  0 siblings, 3 replies; 10+ messages in thread
From: Jan Beulich @ 2015-09-23 15:34 UTC (permalink / raw)
  To: xen-devel; +Cc: Andrew Cooper, Keir Fraser, Wei Liu

[-- Attachment #1: Type: text/plain, Size: 3001 bytes --]

Us extending the GDT limit past the Xen descriptors so far meant that
guests (including user mode programs) accessing any descriptor table
slot above the original OS'es limit but below the first Xen descriptor
caused a #PF, converted to a #GP in our #PF handler. Which is quite
different from the native behavior, where some of such accesses (LAR
and LSL) don't fault. Mimic that behavior by mapping a blank page into
unused slots.

While not strictly required, treat the LDT the same for consistency.

Reported-by: Andrew Cooper <andrew.cooper3@citrix.com>
Signed-off-by: Jan Beulich <jbeulich@suse.com>
---
Not sure about 4.6 here: Beyond Andrew noticing I don't think anyone
ran into this issue in a real world environment, and hence it doesn't
seem to be too critical to get this fixed.

--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -502,12 +502,13 @@ void update_cr3(struct vcpu *v)
     make_cr3(v, cr3_mfn);
 }
 
+static const char __section(".bss.page_aligned") zero_page[PAGE_SIZE];
 
 static void invalidate_shadow_ldt(struct vcpu *v, int flush)
 {
     l1_pgentry_t *pl1e;
-    int i;
-    unsigned long pfn;
+    unsigned int i;
+    unsigned long pfn, zero_pfn = PFN_DOWN(__pa(zero_page));
     struct page_info *page;
 
     BUG_ON(unlikely(in_irq()));
@@ -523,8 +524,9 @@ static void invalidate_shadow_ldt(struct
     for ( i = 16; i < 32; i++ )
     {
         pfn = l1e_get_pfn(pl1e[i]);
-        if ( pfn == 0 ) continue;
-        l1e_write(&pl1e[i], l1e_empty());
+        if ( !(l1e_get_flags(pl1e[i]) & _PAGE_PRESENT) || pfn == zero_pfn )
+            continue;
+        l1e_write(&pl1e[i], l1e_from_pfn(zero_pfn, __PAGE_HYPERVISOR_RO));
         page = mfn_to_page(pfn);
         ASSERT_PAGE_IS_TYPE(page, PGT_seg_desc_page);
         ASSERT_PAGE_IS_DOMAIN(page, v->domain);
@@ -4420,16 +4422,17 @@ long do_update_va_mapping_otherdomain(un
 void destroy_gdt(struct vcpu *v)
 {
     l1_pgentry_t *pl1e;
-    int i;
-    unsigned long pfn;
+    unsigned int i;
+    unsigned long pfn, zero_pfn = PFN_DOWN(__pa(zero_page));
 
     v->arch.pv_vcpu.gdt_ents = 0;
     pl1e = gdt_ldt_ptes(v->domain, v);
     for ( i = 0; i < FIRST_RESERVED_GDT_PAGE; i++ )
     {
-        if ( (pfn = l1e_get_pfn(pl1e[i])) != 0 )
+        pfn = l1e_get_pfn(pl1e[i]);
+        if ( (l1e_get_flags(pl1e[i]) & _PAGE_PRESENT) && pfn != zero_pfn )
             put_page_and_type(mfn_to_page(pfn));
-        l1e_write(&pl1e[i], l1e_empty());
+        l1e_write(&pl1e[i], l1e_from_pfn(zero_pfn, __PAGE_HYPERVISOR_RO));
         v->arch.pv_vcpu.gdt_frames[i] = 0;
     }
 }
@@ -4442,7 +4445,7 @@ long set_gdt(struct vcpu *v, 
     struct domain *d = v->domain;
     l1_pgentry_t *pl1e;
     /* NB. There are 512 8-byte entries per GDT page. */
-    int i, nr_pages = (entries + 511) / 512;
+    unsigned int i, nr_pages = (entries + 511) / 512;
 
     if ( entries > FIRST_RESERVED_GDT_ENTRY )
         return -EINVAL;




[-- Attachment #2: x86-zero-fill-dt.patch --]
[-- Type: text/plain, Size: 3042 bytes --]

x86/PV: properly populate descriptor tables

Us extending the GDT limit past the Xen descriptors so far meant that
guests (including user mode programs) accessing any descriptor table
slot above the original OS'es limit but below the first Xen descriptor
caused a #PF, converted to a #GP in our #PF handler. Which is quite
different from the native behavior, where some of such accesses (LAR
and LSL) don't fault. Mimic that behavior by mapping a blank page into
unused slots.

While not strictly required, treat the LDT the same for consistency.

Reported-by: Andrew Cooper <andrew.cooper3@citrix.com>
Signed-off-by: Jan Beulich <jbeulich@suse.com>
---
Not sure about 4.6 here: Beyond Andrew noticing I don't think anyone
ran into this issue in a real world environment, and hence it doesn't
seem to be too critical to get this fixed.

--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -502,12 +502,13 @@ void update_cr3(struct vcpu *v)
     make_cr3(v, cr3_mfn);
 }
 
+static const char __section(".bss.page_aligned") zero_page[PAGE_SIZE];
 
 static void invalidate_shadow_ldt(struct vcpu *v, int flush)
 {
     l1_pgentry_t *pl1e;
-    int i;
-    unsigned long pfn;
+    unsigned int i;
+    unsigned long pfn, zero_pfn = PFN_DOWN(__pa(zero_page));
     struct page_info *page;
 
     BUG_ON(unlikely(in_irq()));
@@ -523,8 +524,9 @@ static void invalidate_shadow_ldt(struct
     for ( i = 16; i < 32; i++ )
     {
         pfn = l1e_get_pfn(pl1e[i]);
-        if ( pfn == 0 ) continue;
-        l1e_write(&pl1e[i], l1e_empty());
+        if ( !(l1e_get_flags(pl1e[i]) & _PAGE_PRESENT) || pfn == zero_pfn )
+            continue;
+        l1e_write(&pl1e[i], l1e_from_pfn(zero_pfn, __PAGE_HYPERVISOR_RO));
         page = mfn_to_page(pfn);
         ASSERT_PAGE_IS_TYPE(page, PGT_seg_desc_page);
         ASSERT_PAGE_IS_DOMAIN(page, v->domain);
@@ -4420,16 +4422,17 @@ long do_update_va_mapping_otherdomain(un
 void destroy_gdt(struct vcpu *v)
 {
     l1_pgentry_t *pl1e;
-    int i;
-    unsigned long pfn;
+    unsigned int i;
+    unsigned long pfn, zero_pfn = PFN_DOWN(__pa(zero_page));
 
     v->arch.pv_vcpu.gdt_ents = 0;
     pl1e = gdt_ldt_ptes(v->domain, v);
     for ( i = 0; i < FIRST_RESERVED_GDT_PAGE; i++ )
     {
-        if ( (pfn = l1e_get_pfn(pl1e[i])) != 0 )
+        pfn = l1e_get_pfn(pl1e[i]);
+        if ( (l1e_get_flags(pl1e[i]) & _PAGE_PRESENT) && pfn != zero_pfn )
             put_page_and_type(mfn_to_page(pfn));
-        l1e_write(&pl1e[i], l1e_empty());
+        l1e_write(&pl1e[i], l1e_from_pfn(zero_pfn, __PAGE_HYPERVISOR_RO));
         v->arch.pv_vcpu.gdt_frames[i] = 0;
     }
 }
@@ -4442,7 +4445,7 @@ long set_gdt(struct vcpu *v, 
     struct domain *d = v->domain;
     l1_pgentry_t *pl1e;
     /* NB. There are 512 8-byte entries per GDT page. */
-    int i, nr_pages = (entries + 511) / 512;
+    unsigned int i, nr_pages = (entries + 511) / 512;
 
     if ( entries > FIRST_RESERVED_GDT_ENTRY )
         return -EINVAL;

[-- Attachment #3: Type: text/plain, Size: 126 bytes --]

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 10+ messages in thread

end of thread, other threads:[~2015-10-26 16:02 UTC | newest]

Thread overview: 10+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2015-09-23 15:34 [PATCH] x86/PV: properly populate descriptor tables Jan Beulich
2015-09-23 15:47 ` Andrew Cooper
2015-09-24 16:18 ` Wei Liu
2015-10-26 14:43 ` David Vrabel
2015-10-26 14:55   ` Andrew Cooper
2015-10-26 14:58     ` Andrew Cooper
2015-10-26 15:08       ` Jan Beulich
2015-10-26 15:41         ` David Vrabel
2015-10-26 15:58           ` Jan Beulich
2015-10-26 16:02             ` David Vrabel

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).