xen-devel.lists.xenproject.org archive mirror
 help / color / mirror / Atom feed
From: Andrew Cooper <andrew.cooper3@citrix.com>
To: Xen-devel <xen-devel@lists.xen.org>
Cc: Andrew Cooper <andrew.cooper3@citrix.com>,
	Jan Beulich <JBeulich@suse.com>
Subject: [PATCH v2 19/19] x86/hvm: Use system-segment relative memory accesses
Date: Mon, 28 Nov 2016 11:13:36 +0000	[thread overview]
Message-ID: <1480331616-6165-20-git-send-email-andrew.cooper3@citrix.com> (raw)
In-Reply-To: <1480331616-6165-1-git-send-email-andrew.cooper3@citrix.com>

With hvm_virtual_to_linear_addr() capable of doing proper system-segment
relative memory accesses, avoid open-coding the address and limit calculations
locally.

When a table spans the 4GB boundary (32bit) or non-canonical boundary (64bit),
segmentation errors are now raised.  Previously, the use of x86_seg_none
resulted in segmentation being skipped, and the linear address being truncated
through the pagewalk, and possibly coming out valid on the far side.

Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
Signed-off-by: Jan Beulich <JBeulich@suse.com>
Reviewed-by: George Dunlap <george.dunlap@citrix.com>
---
v2:
 * Shorten exception handling
 * Replace ->cmpxchg() assertion with proper exception handling
---
 xen/arch/x86/hvm/hvm.c                 |   8 +++
 xen/arch/x86/x86_emulate/x86_emulate.c | 123 +++++++++++++++++++++------------
 2 files changed, 85 insertions(+), 46 deletions(-)

diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 5abdc3c..dd4df47 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -2470,6 +2470,14 @@ bool_t hvm_virtual_to_linear_addr(
     unsigned long addr = offset, last_byte;
     bool_t okay = 0;
 
+    /*
+     * These checks are for a memory access through an active segment.
+     *
+     * It is expected that the access rights of reg are suitable for seg (and
+     * that this is enforced at the point that seg is loaded).
+     */
+    ASSERT(seg < x86_seg_none);
+
     if ( !(current->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PE) )
     {
         /*
diff --git a/xen/arch/x86/x86_emulate/x86_emulate.c b/xen/arch/x86/x86_emulate/x86_emulate.c
index fa6fba1..dad696c 100644
--- a/xen/arch/x86/x86_emulate/x86_emulate.c
+++ b/xen/arch/x86/x86_emulate/x86_emulate.c
@@ -1181,20 +1181,36 @@ static int ioport_access_check(
         return rc;
 
     /* Ensure the TSS has an io-bitmap-offset field. */
-    generate_exception_if(tr.attr.fields.type != 0xb ||
-                          tr.limit < 0x67, EXC_GP, 0);
+    generate_exception_if(tr.attr.fields.type != 0xb, EXC_GP, 0);
 
-    if ( (rc = read_ulong(x86_seg_none, tr.base + 0x66,
-                          &iobmp, 2, ctxt, ops)) )
+    switch ( rc = read_ulong(x86_seg_tr, 0x66, &iobmp, 2, ctxt, ops) )
+    {
+    case X86EMUL_OKAY:
+        break;
+
+    case X86EMUL_EXCEPTION:
+        generate_exception_if(!ctxt->event_pending, EXC_GP, 0);
+        /* fallthrough */
+
+    default:
         return rc;
+    }
 
-    /* Ensure TSS includes two bytes including byte containing first port. */
-    iobmp += first_port / 8;
-    generate_exception_if(tr.limit <= iobmp, EXC_GP, 0);
+    /* Read two bytes including byte containing first port. */
+    switch ( rc = read_ulong(x86_seg_tr, iobmp + first_port / 8,
+                             &iobmp, 2, ctxt, ops) )
+    {
+    case X86EMUL_OKAY:
+        break;
+
+    case X86EMUL_EXCEPTION:
+        generate_exception_if(!ctxt->event_pending, EXC_GP, 0);
+        /* fallthrough */
 
-    if ( (rc = read_ulong(x86_seg_none, tr.base + iobmp,
-                          &iobmp, 2, ctxt, ops)) )
+    default:
         return rc;
+    }
+
     generate_exception_if(iobmp & (((1 << bytes) - 1) << (first_port & 7)),
                           EXC_GP, 0);
 
@@ -1317,9 +1333,12 @@ realmode_load_seg(
     struct x86_emulate_ctxt *ctxt,
     const struct x86_emulate_ops *ops)
 {
-    int rc = ops->read_segment(seg, sreg, ctxt);
+    int rc;
+
+    if ( !ops->read_segment )
+        return X86EMUL_UNHANDLEABLE;
 
-    if ( !rc )
+    if ( (rc = ops->read_segment(seg, sreg, ctxt)) == X86EMUL_OKAY )
     {
         sreg->sel  = sel;
         sreg->base = (uint32_t)sel << 4;
@@ -1336,7 +1355,7 @@ protmode_load_seg(
     struct x86_emulate_ctxt *ctxt,
     const struct x86_emulate_ops *ops)
 {
-    struct segment_register desctab;
+    enum x86_segment sel_seg = (sel & 4) ? x86_seg_ldtr : x86_seg_gdtr;
     struct { uint32_t a, b; } desc;
     uint8_t dpl, rpl;
     int cpl = get_cpl(ctxt, ops);
@@ -1369,21 +1388,19 @@ protmode_load_seg(
     if ( !is_x86_user_segment(seg) && (sel & 4) )
         goto raise_exn;
 
-    if ( (rc = ops->read_segment((sel & 4) ? x86_seg_ldtr : x86_seg_gdtr,
-                                 &desctab, ctxt)) )
-        return rc;
-
-    /* Segment not valid for use (cooked meaning of .p)? */
-    if ( !desctab.attr.fields.p )
-        goto raise_exn;
+    switch ( rc = ops->read(sel_seg, sel & 0xfff8, &desc, sizeof(desc), ctxt) )
+    {
+    case X86EMUL_OKAY:
+        break;
 
-    /* Check against descriptor table limit. */
-    if ( ((sel & 0xfff8) + 7) > desctab.limit )
-        goto raise_exn;
+    case X86EMUL_EXCEPTION:
+        if ( !ctxt->event_pending )
+            goto raise_exn;
+        /* fallthrough */
 
-    if ( (rc = ops->read(x86_seg_none, desctab.base + (sel & 0xfff8),
-                         &desc, sizeof(desc), ctxt)) )
+    default:
         return rc;
+    }
 
     if ( !is_x86_user_segment(seg) )
     {
@@ -1471,9 +1488,20 @@ protmode_load_seg(
     {
         uint32_t new_desc_b = desc.b | a_flag;
 
-        if ( (rc = ops->cmpxchg(x86_seg_none, desctab.base + (sel & 0xfff8) + 4,
-                                &desc.b, &new_desc_b, 4, ctxt)) != 0 )
+        switch ( (rc = ops->cmpxchg(sel_seg, (sel & 0xfff8) + 4, &desc.b,
+                                    &new_desc_b, sizeof(desc.b), ctxt)) )
+        {
+        case X86EMUL_OKAY:
+            break;
+
+        case X86EMUL_EXCEPTION:
+            if ( !ctxt->event_pending )
+                goto raise_exn;
+            /* fallthrough */
+
+        default:
             return rc;
+        }
 
         /* Force the Accessed flag in our local copy. */
         desc.b = new_desc_b;
@@ -1507,8 +1535,7 @@ load_seg(
     struct segment_register reg;
     int rc;
 
-    if ( (ops->read_segment == NULL) ||
-         (ops->write_segment == NULL) )
+    if ( !ops->write_segment )
         return X86EMUL_UNHANDLEABLE;
 
     if ( !sreg )
@@ -1636,8 +1663,7 @@ static int inject_swint(enum x86_swint_type type,
         if ( !in_realmode(ctxt, ops) )
         {
             unsigned int idte_size, idte_offset;
-            struct segment_register idtr;
-            uint32_t idte_ctl;
+            struct { uint32_t a, b, c, d; } idte;
             int lm = in_longmode(ctxt, ops);
 
             if ( lm < 0 )
@@ -1660,24 +1686,30 @@ static int inject_swint(enum x86_swint_type type,
                  ((ctxt->regs->eflags & EFLG_IOPL) != EFLG_IOPL) )
                 goto raise_exn;
 
-            fail_if(ops->read_segment == NULL);
             fail_if(ops->read == NULL);
-            if ( (rc = ops->read_segment(x86_seg_idtr, &idtr, ctxt)) )
-                goto done;
-
-            if ( (idte_offset + idte_size - 1) > idtr.limit )
-                goto raise_exn;
 
             /*
-             * Should strictly speaking read all 8/16 bytes of an entry,
-             * but we currently only care about the dpl and present bits.
+             * Read all 8/16 bytes so the idtr limit check is applied properly
+             * to this entry, even though we only end up looking at the 2nd
+             * word.
              */
-            if ( (rc = ops->read(x86_seg_none, idtr.base + idte_offset + 4,
-                                 &idte_ctl, sizeof(idte_ctl), ctxt)) )
-                goto done;
+            switch ( rc = ops->read(x86_seg_idtr, idte_offset,
+                                    &idte, idte_size, ctxt) )
+            {
+            case X86EMUL_OKAY:
+                break;
+
+            case X86EMUL_EXCEPTION:
+                if ( !ctxt->event_pending )
+                    goto raise_exn;
+                /* fallthrough */
+
+            default:
+                return rc;
+            }
 
             /* Is this entry present? */
-            if ( !(idte_ctl & (1u << 15)) )
+            if ( !(idte.b & (1u << 15)) )
             {
                 fault_type = EXC_NP;
                 goto raise_exn;
@@ -1686,12 +1718,11 @@ static int inject_swint(enum x86_swint_type type,
             /* icebp counts as a hardware event, and bypasses the dpl check. */
             if ( type != x86_swint_icebp )
             {
-                struct segment_register ss;
+                int cpl = get_cpl(ctxt, ops);
 
-                if ( (rc = ops->read_segment(x86_seg_ss, &ss, ctxt)) )
-                    goto done;
+                fail_if(cpl < 0);
 
-                if ( ss.attr.fields.dpl > ((idte_ctl >> 13) & 3) )
+                if ( cpl > ((idte.b >> 13) & 3) )
                     goto raise_exn;
             }
         }
-- 
2.1.4


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

      parent reply	other threads:[~2016-11-28 11:13 UTC|newest]

Thread overview: 57+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2016-11-28 11:13 [PATCH for-4.9 v2 00/19] XSA-191 followup Andrew Cooper
2016-11-28 11:13 ` [PATCH v2 01/19] x86/shadow: Fix #PFs from emulated writes crossing a page boundary Andrew Cooper
2016-11-28 11:55   ` Tim Deegan
2016-11-29 15:24   ` Jan Beulich
2016-11-28 11:13 ` [PATCH v2 02/19] x86/emul: Drop X86EMUL_CMPXCHG_FAILED Andrew Cooper
2016-11-28 11:55   ` Tim Deegan
2016-11-29 15:29   ` Jan Beulich
2016-11-28 11:13 ` [PATCH v2 03/19] x86/emul: Simplfy emulation state setup Andrew Cooper
2016-11-28 11:58   ` Paul Durrant
2016-11-28 12:54   ` Paul Durrant
2016-11-28 11:13 ` [PATCH v2 04/19] x86/emul: Rename hvm_trap to x86_event and move it into the emulation infrastructure Andrew Cooper
2016-11-28 11:13 ` [PATCH v2 05/19] x86/emul: Rename HVM_DELIVER_NO_ERROR_CODE to X86_EVENT_NO_EC Andrew Cooper
2016-11-28 11:13 ` [PATCH v2 06/19] x86/pv: Implement pv_inject_{event, page_fault, hw_exception}() Andrew Cooper
2016-11-28 11:58   ` Tim Deegan
2016-11-28 11:59     ` Andrew Cooper
2016-11-29 16:00   ` Jan Beulich
2016-11-29 16:50     ` Andrew Cooper
2016-11-30  8:41       ` Jan Beulich
2016-11-30 13:17         ` Andrew Cooper
2016-11-28 11:13 ` [PATCH v2 07/19] x86/emul: Remove opencoded exception generation Andrew Cooper
2016-11-28 11:13 ` [PATCH v2 08/19] x86/emul: Rework emulator event injection Andrew Cooper
2016-11-28 12:04   ` Tim Deegan
2016-11-28 12:48     ` Andrew Cooper
2016-11-28 14:24       ` Tim Deegan
2016-11-28 14:34         ` Andrew Cooper
2016-11-28 11:13 ` [PATCH v2 09/19] x86/vmx: Use hvm_{get, set}_segment_register() rather than vmx_{get, set}_segment_register() Andrew Cooper
2016-11-28 11:13 ` [PATCH v2 10/19] x86/hvm: Reposition the modification of raw segment data from the VMCB/VMCS Andrew Cooper
2016-11-28 14:18   ` Boris Ostrovsky
2016-11-28 11:13 ` [PATCH v2 11/19] x86/emul: Avoid raising faults behind the emulators back Andrew Cooper
2016-11-28 12:47   ` Paul Durrant
2016-11-29 16:02   ` Jan Beulich
2016-11-28 11:13 ` [PATCH v2 12/19] x86/pv: " Andrew Cooper
2016-11-28 11:13 ` [PATCH v2 13/19] x86/shadow: " Andrew Cooper
2016-11-28 14:49   ` Tim Deegan
2016-11-28 16:04     ` Andrew Cooper
2016-11-28 17:21       ` Tim Deegan
2016-11-28 17:36         ` Andrew Cooper
2016-11-28 11:13 ` [PATCH v2 14/19] x86/hvm: Extend the hvm_copy_*() API with a pagefault_info pointer Andrew Cooper
2016-11-28 11:13 ` [PATCH v2 15/19] x86/hvm: Reimplement hvm_copy_*_nofault() in terms of no pagefault_info Andrew Cooper
2016-11-28 12:56   ` Paul Durrant
2016-11-28 11:13 ` [PATCH v2 16/19] x86/hvm: Rename hvm_copy_*_guest_virt() to hvm_copy_*_guest_linear() Andrew Cooper
2016-11-28 11:59   ` Paul Durrant
2016-11-28 11:13 ` [PATCH v2 17/19] x86/hvm: Avoid __hvm_copy() raising #PF behind the emulators back Andrew Cooper
2016-11-28 11:56   ` Paul Durrant
2016-11-28 12:58     ` Andrew Cooper
2016-11-28 13:01       ` Paul Durrant
2016-11-28 13:03         ` Andrew Cooper
2016-11-28 14:56   ` Tim Deegan
2016-11-28 16:32     ` Andrew Cooper
2016-11-28 16:42       ` Tim Deegan
2016-11-29  1:22   ` Tian, Kevin
2016-11-29 16:24   ` Jan Beulich
2016-11-29 16:30     ` Andrew Cooper
2016-11-29 16:36       ` Jan Beulich
2016-11-29 16:38         ` Andrew Cooper
2016-11-28 11:13 ` [PATCH v2 18/19] x86/hvm: Prepare to allow use of system segments for memory references Andrew Cooper
2016-11-28 11:13 ` Andrew Cooper [this message]

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1480331616-6165-20-git-send-email-andrew.cooper3@citrix.com \
    --to=andrew.cooper3@citrix.com \
    --cc=JBeulich@suse.com \
    --cc=xen-devel@lists.xen.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).