xen-devel.lists.xenproject.org archive mirror
 help / color / mirror / Atom feed
From: Andrew Cooper <andrew.cooper3@citrix.com>
To: Xen-devel <xen-devel@lists.xen.org>
Cc: Petre Pircalabu <ppircalabu@bitdefender.com>,
	Andrew Cooper <andrew.cooper3@citrix.com>,
	Jan Beulich <JBeulich@suse.com>
Subject: [PATCH RFC] x86/emul: Fix the handling of unimplemented Grp7 instructions
Date: Mon, 4 Sep 2017 18:21:01 +0100	[thread overview]
Message-ID: <1504545661-24626-1-git-send-email-andrew.cooper3@citrix.com> (raw)

Grp7 is abnormally complicated to decode, even by x86's standards, with
{s,l}msw being the problematic cases.

Previously, any value which fell through the first switch statement (looking
for instructions with entirely implicit operands) would be interpreted by the
second switch statement (handling instructions with memory operands).

Unimplemented instructions would then hit the #UD case for having a non-memory
operand, rather than taking the cannot_emulate path.

Place a big if/else around the two switch statements (accounting for {s,l}msw
which need handling in the else clause), so both switch statments can have a
default goto cannot_emulate path.

Reported-by: Petre Pircalabu <ppircalabu@bitdefender.com>
Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
---
CC: Jan Beulich <JBeulich@suse.com>
CC: Petre Pircalabu <ppircalabu@bitdefender.com>

RFC as I've only done light testing so far.
---
 xen/arch/x86/x86_emulate/x86_emulate.c | 353 +++++++++++++++++----------------
 1 file changed, 184 insertions(+), 169 deletions(-)

diff --git a/xen/arch/x86/x86_emulate/x86_emulate.c b/xen/arch/x86/x86_emulate/x86_emulate.c
index 2201852..af3d8da 100644
--- a/xen/arch/x86/x86_emulate/x86_emulate.c
+++ b/xen/arch/x86/x86_emulate/x86_emulate.c
@@ -4987,197 +4987,212 @@ x86_emulate(
         }
         break;
 
-    case X86EMUL_OPC(0x0f, 0x01): /* Grp7 */ {
-        unsigned long base, limit, cr0, cr0w;
+    case X86EMUL_OPC(0x0f, 0x01): /* Grp7 */
+    {
+        unsigned long base, limit;
 
-        switch( modrm )
+        if ( (modrm & 0xc0) == 0xc0 &&
+             (modrm_reg & 7) != 4 /* smsw */ &&
+             (modrm_reg & 7) != 6 /* lmsw */ )
         {
-        case 0xca: /* clac */
-        case 0xcb: /* stac */
-            vcpu_must_have(smap);
-            generate_exception_if(vex.pfx || !mode_ring0(), EXC_UD);
-
-            _regs.eflags &= ~X86_EFLAGS_AC;
-            if ( modrm == 0xcb )
-                _regs.eflags |= X86_EFLAGS_AC;
-            goto complete_insn;
+            switch ( modrm )
+            {
+            case 0xca: /* clac */
+            case 0xcb: /* stac */
+                vcpu_must_have(smap);
+                generate_exception_if(vex.pfx || !mode_ring0(), EXC_UD);
+
+                _regs.eflags &= ~X86_EFLAGS_AC;
+                if ( modrm == 0xcb )
+                    _regs.eflags |= X86_EFLAGS_AC;
+                goto complete_insn;
 
 #ifdef __XEN__
-        case 0xd1: /* xsetbv */
-            generate_exception_if(vex.pfx, EXC_UD);
-            if ( !ops->read_cr || ops->read_cr(4, &cr4, ctxt) != X86EMUL_OKAY )
-                cr4 = 0;
-            generate_exception_if(!(cr4 & X86_CR4_OSXSAVE), EXC_UD);
-            generate_exception_if(!mode_ring0() ||
-                                  handle_xsetbv(_regs.ecx,
-                                                _regs.eax | (_regs.rdx << 32)),
-                                  EXC_GP, 0);
-            goto complete_insn;
+            case 0xd1: /* xsetbv */
+                generate_exception_if(vex.pfx, EXC_UD);
+                if ( !ops->read_cr ||
+                     ops->read_cr(4, &cr4, ctxt) != X86EMUL_OKAY )
+                    cr4 = 0;
+                generate_exception_if(!(cr4 & X86_CR4_OSXSAVE), EXC_UD);
+                generate_exception_if(!mode_ring0() ||
+                                      handle_xsetbv(_regs.ecx, _regs.eax |
+                                                    (_regs.rdx << 32)),
+                                      EXC_GP, 0);
+                goto complete_insn;
 #endif
 
-        case 0xd4: /* vmfunc */
-            generate_exception_if(vex.pfx, EXC_UD);
-            fail_if(!ops->vmfunc);
-            if ( (rc = ops->vmfunc(ctxt)) != X86EMUL_OKAY )
-                goto done;
-            goto complete_insn;
-
-        case 0xd5: /* xend */
-            generate_exception_if(vex.pfx, EXC_UD);
-            generate_exception_if(!vcpu_has_rtm(), EXC_UD);
-            generate_exception_if(vcpu_has_rtm(), EXC_GP, 0);
-            break;
-
-        case 0xd6: /* xtest */
-            generate_exception_if(vex.pfx, EXC_UD);
-            generate_exception_if(!vcpu_has_rtm() && !vcpu_has_hle(),
-                                  EXC_UD);
-            /* Neither HLE nor RTM can be active when we get here. */
-            _regs.eflags |= X86_EFLAGS_ZF;
-            goto complete_insn;
+            case 0xd4: /* vmfunc */
+                generate_exception_if(vex.pfx, EXC_UD);
+                fail_if(!ops->vmfunc);
+                if ( (rc = ops->vmfunc(ctxt)) != X86EMUL_OKAY )
+                    goto done;
+                goto complete_insn;
 
-        case 0xdf: /* invlpga */
-            generate_exception_if(!in_protmode(ctxt, ops), EXC_UD);
-            generate_exception_if(!mode_ring0(), EXC_GP, 0);
-            fail_if(ops->invlpg == NULL);
-            if ( (rc = ops->invlpg(x86_seg_none, truncate_ea(_regs.r(ax)),
-                                   ctxt)) )
-                goto done;
-            goto complete_insn;
+            case 0xd5: /* xend */
+                generate_exception_if(vex.pfx, EXC_UD);
+                generate_exception_if(!vcpu_has_rtm(), EXC_UD);
+                generate_exception_if(vcpu_has_rtm(), EXC_GP, 0);
+                break;
 
-        case 0xf9: /* rdtscp */
-            fail_if(ops->read_msr == NULL);
-            if ( (rc = ops->read_msr(MSR_TSC_AUX,
-                                     &msr_val, ctxt)) != X86EMUL_OKAY )
-                goto done;
-            _regs.r(cx) = (uint32_t)msr_val;
-            goto rdtsc;
+            case 0xd6: /* xtest */
+                generate_exception_if(vex.pfx, EXC_UD);
+                generate_exception_if(!vcpu_has_rtm() && !vcpu_has_hle(),
+                                      EXC_UD);
+                /* Neither HLE nor RTM can be active when we get here. */
+                _regs.eflags |= X86_EFLAGS_ZF;
+                goto complete_insn;
 
-        case 0xfc: /* clzero */
-        {
-            unsigned long zero = 0;
+            case 0xdf: /* invlpga */
+                generate_exception_if(!in_protmode(ctxt, ops), EXC_UD);
+                generate_exception_if(!mode_ring0(), EXC_GP, 0);
+                fail_if(ops->invlpg == NULL);
+                if ( (rc = ops->invlpg(x86_seg_none, truncate_ea(_regs.r(ax)),
+                                       ctxt)) )
+                    goto done;
+                goto complete_insn;
 
-            vcpu_must_have(clzero);
+            case 0xf9: /* rdtscp */
+                fail_if(ops->read_msr == NULL);
+                if ( (rc = ops->read_msr(MSR_TSC_AUX,
+                                         &msr_val, ctxt)) != X86EMUL_OKAY )
+                    goto done;
+                _regs.r(cx) = (uint32_t)msr_val;
+                goto rdtsc;
 
-            base = ad_bytes == 8 ? _regs.r(ax) :
-                   ad_bytes == 4 ? _regs.eax : _regs.ax;
-            limit = 0;
-            if ( vcpu_has_clflush() &&
-                 ops->cpuid(1, 0, &cpuid_leaf, ctxt) == X86EMUL_OKAY )
-                limit = ((cpuid_leaf.b >> 8) & 0xff) * 8;
-            generate_exception_if(limit < sizeof(long) ||
-                                  (limit & (limit - 1)), EXC_UD);
-            base &= ~(limit - 1);
-            if ( ops->rep_stos )
+            case 0xfc: /* clzero */
             {
-                unsigned long nr_reps = limit / sizeof(zero);
+                unsigned long zero = 0;
+
+                vcpu_must_have(clzero);
+
+                base = ad_bytes == 8 ? _regs.r(ax) :
+                    ad_bytes == 4 ? _regs.eax : _regs.ax;
+                limit = 0;
+                if ( vcpu_has_clflush() &&
+                     ops->cpuid(1, 0, &cpuid_leaf, ctxt) == X86EMUL_OKAY )
+                    limit = ((cpuid_leaf.b >> 8) & 0xff) * 8;
+                generate_exception_if(limit < sizeof(long) ||
+                                      (limit & (limit - 1)), EXC_UD);
+                base &= ~(limit - 1);
+                if ( ops->rep_stos )
+                {
+                    unsigned long nr_reps = limit / sizeof(zero);
 
-                rc = ops->rep_stos(&zero, ea.mem.seg, base, sizeof(zero),
-                                   &nr_reps, ctxt);
-                if ( rc == X86EMUL_OKAY )
+                    rc = ops->rep_stos(&zero, ea.mem.seg, base, sizeof(zero),
+                                       &nr_reps, ctxt);
+                    if ( rc == X86EMUL_OKAY )
+                    {
+                        base += nr_reps * sizeof(zero);
+                        limit -= nr_reps * sizeof(zero);
+                    }
+                    else if ( rc != X86EMUL_UNHANDLEABLE )
+                        goto done;
+                }
+                fail_if(limit && !ops->write);
+                while ( limit )
                 {
-                    base += nr_reps * sizeof(zero);
-                    limit -= nr_reps * sizeof(zero);
+                    rc = ops->write(ea.mem.seg, base, &zero,
+                                    sizeof(zero), ctxt);
+                    if ( rc != X86EMUL_OKAY )
+                        goto done;
+                    base += sizeof(zero);
+                    limit -= sizeof(zero);
                 }
-                else if ( rc != X86EMUL_UNHANDLEABLE )
-                    goto done;
+                goto complete_insn;
             }
-            fail_if(limit && !ops->write);
-            while ( limit )
-            {
-                rc = ops->write(ea.mem.seg, base, &zero, sizeof(zero), ctxt);
-                if ( rc != X86EMUL_OKAY )
-                    goto done;
-                base += sizeof(zero);
-                limit -= sizeof(zero);
+
+            default:
+                goto cannot_emulate;
             }
-            goto complete_insn;
-        }
         }
+        else
+        {
+            unsigned long cr0, cr0w;
 
-        seg = (modrm_reg & 1) ? x86_seg_idtr : x86_seg_gdtr;
+            seg = (modrm_reg & 1) ? x86_seg_idtr : x86_seg_gdtr;
 
-        switch ( modrm_reg & 7 )
-        {
-        case 0: /* sgdt */
-        case 1: /* sidt */
-            generate_exception_if(ea.type != OP_MEM, EXC_UD);
-            generate_exception_if(umip_active(ctxt, ops), EXC_GP, 0);
-            fail_if(!ops->read_segment || !ops->write);
-            if ( (rc = ops->read_segment(seg, &sreg, ctxt)) )
-                goto done;
-            if ( mode_64bit() )
-                op_bytes = 8;
-            else if ( op_bytes == 2 )
-            {
-                sreg.base &= 0xffffff;
-                op_bytes = 4;
-            }
-            if ( (rc = ops->write(ea.mem.seg, ea.mem.off, &sreg.limit,
-                                  2, ctxt)) != X86EMUL_OKAY ||
-                 (rc = ops->write(ea.mem.seg, ea.mem.off + 2, &sreg.base,
-                                  op_bytes, ctxt)) != X86EMUL_OKAY )
-                goto done;
-            break;
-        case 2: /* lgdt */
-        case 3: /* lidt */
-            generate_exception_if(!mode_ring0(), EXC_GP, 0);
-            generate_exception_if(ea.type != OP_MEM, EXC_UD);
-            fail_if(ops->write_segment == NULL);
-            memset(&sreg, 0, sizeof(sreg));
-            if ( (rc = read_ulong(ea.mem.seg, ea.mem.off+0,
-                                  &limit, 2, ctxt, ops)) ||
-                 (rc = read_ulong(ea.mem.seg, ea.mem.off+2,
-                                  &base, mode_64bit() ? 8 : 4, ctxt, ops)) )
-                goto done;
-            generate_exception_if(!is_canonical_address(base), EXC_GP, 0);
-            sreg.base = base;
-            sreg.limit = limit;
-            if ( !mode_64bit() && op_bytes == 2 )
-                sreg.base &= 0xffffff;
-            if ( (rc = ops->write_segment(seg, &sreg, ctxt)) )
-                goto done;
-            break;
-        case 4: /* smsw */
-            generate_exception_if(umip_active(ctxt, ops), EXC_GP, 0);
-            if ( ea.type == OP_MEM )
+            switch ( modrm_reg & 7 )
             {
-                fail_if(!ops->write);
-                d |= Mov; /* force writeback */
-                ea.bytes = 2;
+            case 0: /* sgdt */
+            case 1: /* sidt */
+                generate_exception_if(ea.type != OP_MEM, EXC_UD);
+                generate_exception_if(umip_active(ctxt, ops), EXC_GP, 0);
+                fail_if(!ops->read_segment || !ops->write);
+                if ( (rc = ops->read_segment(seg, &sreg, ctxt)) )
+                    goto done;
+                if ( mode_64bit() )
+                    op_bytes = 8;
+                else if ( op_bytes == 2 )
+                {
+                    sreg.base &= 0xffffff;
+                    op_bytes = 4;
+                }
+                if ( (rc = ops->write(ea.mem.seg, ea.mem.off, &sreg.limit,
+                                      2, ctxt)) != X86EMUL_OKAY ||
+                     (rc = ops->write(ea.mem.seg, ea.mem.off + 2, &sreg.base,
+                                      op_bytes, ctxt)) != X86EMUL_OKAY )
+                    goto done;
+                break;
+            case 2: /* lgdt */
+            case 3: /* lidt */
+                generate_exception_if(!mode_ring0(), EXC_GP, 0);
+                generate_exception_if(ea.type != OP_MEM, EXC_UD);
+                fail_if(ops->write_segment == NULL);
+                memset(&sreg, 0, sizeof(sreg));
+                if ( (rc = read_ulong(ea.mem.seg, ea.mem.off+0,
+                                      &limit, 2, ctxt, ops)) ||
+                     (rc = read_ulong(ea.mem.seg, ea.mem.off+2,
+                                      &base, mode_64bit() ? 8 : 4, ctxt, ops)) )
+                    goto done;
+                generate_exception_if(!is_canonical_address(base), EXC_GP, 0);
+                sreg.base = base;
+                sreg.limit = limit;
+                if ( !mode_64bit() && op_bytes == 2 )
+                    sreg.base &= 0xffffff;
+                if ( (rc = ops->write_segment(seg, &sreg, ctxt)) )
+                    goto done;
+                break;
+            case 4: /* smsw */
+                generate_exception_if(umip_active(ctxt, ops), EXC_GP, 0);
+                if ( ea.type == OP_MEM )
+                {
+                    fail_if(!ops->write);
+                    d |= Mov; /* force writeback */
+                    ea.bytes = 2;
+                }
+                else
+                    ea.bytes = op_bytes;
+                dst = ea;
+                fail_if(ops->read_cr == NULL);
+                if ( (rc = ops->read_cr(0, &dst.val, ctxt)) )
+                    goto done;
+                break;
+            case 6: /* lmsw */
+                fail_if(ops->read_cr == NULL);
+                fail_if(ops->write_cr == NULL);
+                generate_exception_if(!mode_ring0(), EXC_GP, 0);
+                if ( (rc = ops->read_cr(0, &cr0, ctxt)) )
+                    goto done;
+                if ( ea.type == OP_REG )
+                    cr0w = *ea.reg;
+                else if ( (rc = read_ulong(ea.mem.seg, ea.mem.off,
+                                           &cr0w, 2, ctxt, ops)) )
+                    goto done;
+                /* LMSW can: (1) set bits 0-3; (2) clear bits 1-3. */
+                cr0 = (cr0 & ~0xe) | (cr0w & 0xf);
+                if ( (rc = ops->write_cr(0, cr0, ctxt)) )
+                    goto done;
+                break;
+            case 7: /* invlpg */
+                generate_exception_if(!mode_ring0(), EXC_GP, 0);
+                generate_exception_if(ea.type != OP_MEM, EXC_UD);
+                fail_if(ops->invlpg == NULL);
+                if ( (rc = ops->invlpg(ea.mem.seg, ea.mem.off, ctxt)) )
+                    goto done;
+                break;
+            default:
+                goto cannot_emulate;
             }
-            else
-                ea.bytes = op_bytes;
-            dst = ea;
-            fail_if(ops->read_cr == NULL);
-            if ( (rc = ops->read_cr(0, &dst.val, ctxt)) )
-                goto done;
-            break;
-        case 6: /* lmsw */
-            fail_if(ops->read_cr == NULL);
-            fail_if(ops->write_cr == NULL);
-            generate_exception_if(!mode_ring0(), EXC_GP, 0);
-            if ( (rc = ops->read_cr(0, &cr0, ctxt)) )
-                goto done;
-            if ( ea.type == OP_REG )
-                cr0w = *ea.reg;
-            else if ( (rc = read_ulong(ea.mem.seg, ea.mem.off,
-                                       &cr0w, 2, ctxt, ops)) )
-                goto done;
-            /* LMSW can: (1) set bits 0-3; (2) clear bits 1-3. */
-            cr0 = (cr0 & ~0xe) | (cr0w & 0xf);
-            if ( (rc = ops->write_cr(0, cr0, ctxt)) )
-                goto done;
-            break;
-        case 7: /* invlpg */
-            generate_exception_if(!mode_ring0(), EXC_GP, 0);
-            generate_exception_if(ea.type != OP_MEM, EXC_UD);
-            fail_if(ops->invlpg == NULL);
-            if ( (rc = ops->invlpg(ea.mem.seg, ea.mem.off, ctxt)) )
-                goto done;
-            break;
-        default:
-            goto cannot_emulate;
         }
         break;
     }
-- 
2.1.4


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

             reply	other threads:[~2017-09-04 17:21 UTC|newest]

Thread overview: 10+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2017-09-04 17:21 Andrew Cooper [this message]
2017-09-04 17:34 ` [PATCH RFC v2] x86/emul: Fix the handling of unimplemented Grp7 instructions Andrew Cooper
2017-09-05  6:57   ` Jan Beulich
2017-09-05  7:34     ` Andrew Cooper
2017-09-05  9:43       ` Jan Beulich
2017-09-05  9:53         ` Andrew Cooper
2017-09-05 10:07           ` Jan Beulich
2017-09-05  8:41 ` [PATCH v3] " Andrew Cooper
2017-09-05 10:23   ` Jan Beulich
2017-09-05 17:02   ` Petre Ovidiu PIRCALABU

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1504545661-24626-1-git-send-email-andrew.cooper3@citrix.com \
    --to=andrew.cooper3@citrix.com \
    --cc=JBeulich@suse.com \
    --cc=ppircalabu@bitdefender.com \
    --cc=xen-devel@lists.xen.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).