* [PATCH v2 2/4] x86/emul: Optimise decode_register() somewhat
2018-01-30 15:56 [PATCH v2 1/4] x86/emul: Introduce a test covering legacy byte ops Andrew Cooper
@ 2018-01-30 15:56 ` Andrew Cooper
2018-01-31 11:09 ` Jan Beulich
2018-01-30 15:56 ` [PATCH v2 3/4] x86/hvm: Improvements to external users of decode_register() Andrew Cooper
` (2 subsequent siblings)
3 siblings, 1 reply; 8+ messages in thread
From: Andrew Cooper @ 2018-01-30 15:56 UTC (permalink / raw)
To: Xen-devel; +Cc: Andrew Cooper, Jan Beulich
The positions of GPRs inside struct cpu_user_regs doesn't follow any
particular order, so as compiled, decode_register() becomes a jump table to 16
blocks which calculate the appropriate offset, at a total of 207 bytes.
Instead, pre-compute the offsets at build time and use pointer arithmetic to
calculate the result. By observation, most callers in x86_emulate() inline
and constant-propagate the highbyte_regs value of 0.
The splitting of the general and legacy byte-op cases means that we will now
hit an ASSERT if any code path tries to use the legacy byte-op encoding with a
REX prefix.
Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
---
CC: Jan Beulich <JBeulich@suse.com>
v2:
* Move byteop_offsets[] into function scope. Rearrange to have a smaller
byteop_offsets[] array.
---
xen/arch/x86/x86_emulate/x86_emulate.c | 75 ++++++++++++++++++++++++----------
1 file changed, 53 insertions(+), 22 deletions(-)
diff --git a/xen/arch/x86/x86_emulate/x86_emulate.c b/xen/arch/x86/x86_emulate/x86_emulate.c
index ff0a003..123d941 100644
--- a/xen/arch/x86/x86_emulate/x86_emulate.c
+++ b/xen/arch/x86/x86_emulate/x86_emulate.c
@@ -1935,36 +1935,67 @@ load_seg(
return rc;
}
+/* Map GPRs by ModRM encoding to their offset within struct cpu_user_regs. */
+static const uint8_t cpu_user_regs_gpr_offsets[] = {
+ offsetof(struct cpu_user_regs, r(ax)),
+ offsetof(struct cpu_user_regs, r(cx)),
+ offsetof(struct cpu_user_regs, r(dx)),
+ offsetof(struct cpu_user_regs, r(bx)),
+ offsetof(struct cpu_user_regs, r(sp)),
+ offsetof(struct cpu_user_regs, r(bp)),
+ offsetof(struct cpu_user_regs, r(si)),
+ offsetof(struct cpu_user_regs, r(di)),
+#ifdef __x86_64__
+ offsetof(struct cpu_user_regs, r8),
+ offsetof(struct cpu_user_regs, r9),
+ offsetof(struct cpu_user_regs, r10),
+ offsetof(struct cpu_user_regs, r11),
+ offsetof(struct cpu_user_regs, r12),
+ offsetof(struct cpu_user_regs, r13),
+ offsetof(struct cpu_user_regs, r14),
+ offsetof(struct cpu_user_regs, r15),
+#endif
+};
+
void *
decode_register(
uint8_t modrm_reg, struct cpu_user_regs *regs, int highbyte_regs)
{
- void *p;
+ static const uint8_t byteop_offsets[] = {
+ offsetof(struct cpu_user_regs, al),
+ offsetof(struct cpu_user_regs, cl),
+ offsetof(struct cpu_user_regs, dl),
+ offsetof(struct cpu_user_regs, bl),
+ offsetof(struct cpu_user_regs, ah),
+ offsetof(struct cpu_user_regs, ch),
+ offsetof(struct cpu_user_regs, dh),
+ offsetof(struct cpu_user_regs, bh),
+ };
- switch ( modrm_reg )
+ if ( !highbyte_regs )
{
- case 0: p = ®s->r(ax); break;
- case 1: p = ®s->r(cx); break;
- case 2: p = ®s->r(dx); break;
- case 3: p = ®s->r(bx); break;
- case 4: p = (highbyte_regs ? ®s->ah : (void *)®s->r(sp)); break;
- case 5: p = (highbyte_regs ? ®s->ch : (void *)®s->r(bp)); break;
- case 6: p = (highbyte_regs ? ®s->dh : (void *)®s->r(si)); break;
- case 7: p = (highbyte_regs ? ®s->bh : (void *)®s->r(di)); break;
-#if defined(__x86_64__)
- case 8: p = ®s->r8; break;
- case 9: p = ®s->r9; break;
- case 10: p = ®s->r10; break;
- case 11: p = ®s->r11; break;
- case 12: p = ®s->r12; break;
- case 13: p = ®s->r13; break;
- case 14: p = ®s->r14; break;
- case 15: p = ®s->r15; break;
-#endif
- default: BUG(); p = NULL; break;
+ /* Check that the array is a power of two. */
+ BUILD_BUG_ON(ARRAY_SIZE(cpu_user_regs_gpr_offsets) &
+ (ARRAY_SIZE(cpu_user_regs_gpr_offsets) - 1));
+
+ ASSERT(modrm_reg < ARRAY_SIZE(cpu_user_regs_gpr_offsets));
+
+ /* For safety in release builds. Debug builds will hit the ASSERT() */
+ modrm_reg &= ARRAY_SIZE(cpu_user_regs_gpr_offsets) - 1;
+
+ return (void *)regs + cpu_user_regs_gpr_offsets[modrm_reg];
}
- return p;
+ /* Check that the array is a power of two. */
+ BUILD_BUG_ON(ARRAY_SIZE(byteop_offsets) &
+ (ARRAY_SIZE(byteop_offsets) - 1));
+
+ ASSERT(modrm_reg < ARRAY_SIZE(byteop_offsets));
+
+ /* For safety in release builds. Debug builds will hit the ASSERT() */
+ modrm_reg &= ARRAY_SIZE(byteop_offsets) - 1;
+
+ return (void *)regs + byteop_offsets[modrm_reg];
}
static void *decode_vex_gpr(unsigned int vex_reg, struct cpu_user_regs *regs,
--
2.1.4
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel
^ permalink raw reply related [flat|nested] 8+ messages in thread* [PATCH v2 3/4] x86/hvm: Improvements to external users of decode_register()
2018-01-30 15:56 [PATCH v2 1/4] x86/emul: Introduce a test covering legacy byte ops Andrew Cooper
2018-01-30 15:56 ` [PATCH v2 2/4] x86/emul: Optimise decode_register() somewhat Andrew Cooper
@ 2018-01-30 15:56 ` Andrew Cooper
2018-01-30 15:56 ` [PATCH v2 4/4] x86/emul: Improvements to internal " Andrew Cooper
2018-01-31 11:06 ` [PATCH v2 1/4] x86/emul: Introduce a test covering legacy byte ops Jan Beulich
3 siblings, 0 replies; 8+ messages in thread
From: Andrew Cooper @ 2018-01-30 15:56 UTC (permalink / raw)
To: Xen-devel; +Cc: Andrew Cooper
* Rename to decode_gpr() to be more specific as to its purpose
* Drop the highbyte encoding handling, as no users currently care, and it
unlikely that future users would care.
* Change to a static inline, returning an unsigned long pointer.
Doing so highlights that the "invalid gpr" paths in hvm_mov_{to,from}_cr()
were actually unreachable. All callers already passed in-range GPRs, and
out-of-range GPRs would have hit the BUG() previously.
Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
Reviewed-by: Kevin Tian <kevin.tian@intel.com>
Reviewed-by: Jan Beulich <jbeulich@suse.com>
---
v2:
* Fix commit message
---
xen/arch/x86/hvm/hvm.c | 17 ++---------------
xen/arch/x86/hvm/vmx/vvmx.c | 16 +++-------------
xen/arch/x86/x86_emulate/x86_emulate.c | 15 ++-------------
xen/arch/x86/x86_emulate/x86_emulate.h | 29 +++++++++++++++++++++++++----
4 files changed, 32 insertions(+), 45 deletions(-)
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 8d67851..18d721d 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -2061,16 +2061,9 @@ static void hvm_set_uc_mode(struct vcpu *v, bool_t is_in_uc_mode)
int hvm_mov_to_cr(unsigned int cr, unsigned int gpr)
{
struct vcpu *curr = current;
- unsigned long val, *reg;
+ unsigned long val = *decode_gpr(guest_cpu_user_regs(), gpr);
int rc;
- if ( (reg = decode_register(gpr, guest_cpu_user_regs(), 0)) == NULL )
- {
- gdprintk(XENLOG_ERR, "invalid gpr: %u\n", gpr);
- goto exit_and_crash;
- }
-
- val = *reg;
HVMTRACE_LONG_2D(CR_WRITE, cr, TRC_PAR_LONG(val));
HVM_DBG_LOG(DBG_LEVEL_1, "CR%u, value = %lx", cr, val);
@@ -2111,13 +2104,7 @@ int hvm_mov_to_cr(unsigned int cr, unsigned int gpr)
int hvm_mov_from_cr(unsigned int cr, unsigned int gpr)
{
struct vcpu *curr = current;
- unsigned long val = 0, *reg;
-
- if ( (reg = decode_register(gpr, guest_cpu_user_regs(), 0)) == NULL )
- {
- gdprintk(XENLOG_ERR, "invalid gpr: %u\n", gpr);
- goto exit_and_crash;
- }
+ unsigned long val = 0, *reg = decode_gpr(guest_cpu_user_regs(), gpr);
switch ( cr )
{
diff --git a/xen/arch/x86/hvm/vmx/vvmx.c b/xen/arch/x86/hvm/vmx/vvmx.c
index 885eab3..dfe97b9 100644
--- a/xen/arch/x86/hvm/vmx/vvmx.c
+++ b/xen/arch/x86/hvm/vmx/vvmx.c
@@ -347,18 +347,14 @@ enum vmx_insn_errno set_vvmcs_real_safe(const struct vcpu *v, u32 encoding,
static unsigned long reg_read(struct cpu_user_regs *regs,
unsigned int index)
{
- unsigned long *pval = decode_register(index, regs, 0);
-
- return *pval;
+ return *decode_gpr(regs, index);
}
static void reg_write(struct cpu_user_regs *regs,
unsigned int index,
unsigned long value)
{
- unsigned long *pval = decode_register(index, regs, 0);
-
- *pval = value;
+ *decode_gpr(regs, index) = value;
}
static inline u32 __n2_pin_exec_control(struct vcpu *v)
@@ -2483,14 +2479,8 @@ int nvmx_n2_vmexit_handler(struct cpu_user_regs *regs,
case VMX_CONTROL_REG_ACCESS_TYPE_MOV_TO_CR:
{
unsigned long gp = VMX_CONTROL_REG_ACCESS_GPR(exit_qualification);
- unsigned long *reg;
+ val = *decode_gpr(guest_cpu_user_regs(), gp);
- if ( (reg = decode_register(gp, guest_cpu_user_regs(), 0)) == NULL )
- {
- gdprintk(XENLOG_ERR, "invalid gpr: %lx\n", gp);
- break;
- }
- val = *reg;
if ( cr == 0 )
{
u64 cr0_gh_mask = get_vvmcs(v, CR0_GUEST_HOST_MASK);
diff --git a/xen/arch/x86/x86_emulate/x86_emulate.c b/xen/arch/x86/x86_emulate/x86_emulate.c
index 123d941..3766b7a 100644
--- a/xen/arch/x86/x86_emulate/x86_emulate.c
+++ b/xen/arch/x86/x86_emulate/x86_emulate.c
@@ -1936,7 +1936,7 @@ load_seg(
}
/* Map GPRs by ModRM encoding to their offset within struct cpu_user_regs. */
-static const uint8_t cpu_user_regs_gpr_offsets[] = {
+const uint8_t cpu_user_regs_gpr_offsets[] = {
offsetof(struct cpu_user_regs, r(ax)),
offsetof(struct cpu_user_regs, r(cx)),
offsetof(struct cpu_user_regs, r(dx)),
@@ -1973,18 +1973,7 @@ decode_register(
};
if ( !highbyte_regs )
- {
- /* Check that the array is a power of two. */
- BUILD_BUG_ON(ARRAY_SIZE(cpu_user_regs_gpr_offsets) &
- (ARRAY_SIZE(cpu_user_regs_gpr_offsets) - 1));
-
- ASSERT(modrm_reg < ARRAY_SIZE(cpu_user_regs_gpr_offsets));
-
- /* For safety in release builds. Debug builds will hit the ASSERT() */
- modrm_reg &= ARRAY_SIZE(cpu_user_regs_gpr_offsets) - 1;
-
- return (void *)regs + cpu_user_regs_gpr_offsets[modrm_reg];
- }
+ return decode_gpr(regs, modrm_reg);
/* Check that the array is a power of two. */
BUILD_BUG_ON(ARRAY_SIZE(byteop_offsets) &
diff --git a/xen/arch/x86/x86_emulate/x86_emulate.h b/xen/arch/x86/x86_emulate/x86_emulate.h
index 0c8c80a..ab5ef41 100644
--- a/xen/arch/x86/x86_emulate/x86_emulate.h
+++ b/xen/arch/x86/x86_emulate/x86_emulate.h
@@ -25,6 +25,14 @@
#define MAX_INST_LEN 15
+#if defined(__i386__)
+# define X86_NR_GPRS 8
+#elif defined(__x86_64__)
+# define X86_NR_GPRS 16
+#else
+# error Unknown compilation width
+#endif
+
struct x86_emulate_ctxt;
/*
@@ -601,14 +609,27 @@ int x86_emulate_wrapper(
#define x86_emulate x86_emulate_wrapper
#endif
+/* Map GPRs by ModRM encoding to their offset within struct cpu_user_regs. */
+extern const uint8_t cpu_user_regs_gpr_offsets[X86_NR_GPRS];
+
/*
* Given the 'reg' portion of a ModRM byte, and a register block, return a
* pointer into the block that addresses the relevant register.
- * @highbyte_regs specifies whether to decode AH,CH,DH,BH.
*/
-void *
-decode_register(
- uint8_t modrm_reg, struct cpu_user_regs *regs, int highbyte_regs);
+static inline unsigned long *decode_gpr(struct cpu_user_regs *regs,
+ unsigned int modrm)
+{
+ /* Check that the array is a power of two. */
+ BUILD_BUG_ON(ARRAY_SIZE(cpu_user_regs_gpr_offsets) &
+ (ARRAY_SIZE(cpu_user_regs_gpr_offsets) - 1));
+
+ ASSERT(modrm < ARRAY_SIZE(cpu_user_regs_gpr_offsets));
+
+ /* For safety in release builds. Debug builds will hit the ASSERT() */
+ modrm &= ARRAY_SIZE(cpu_user_regs_gpr_offsets) - 1;
+
+ return (void *)regs + cpu_user_regs_gpr_offsets[modrm];
+}
/* Unhandleable read, write or instruction fetch */
int
--
2.1.4
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel
^ permalink raw reply related [flat|nested] 8+ messages in thread* [PATCH v2 4/4] x86/emul: Improvements to internal users of decode_register()
2018-01-30 15:56 [PATCH v2 1/4] x86/emul: Introduce a test covering legacy byte ops Andrew Cooper
2018-01-30 15:56 ` [PATCH v2 2/4] x86/emul: Optimise decode_register() somewhat Andrew Cooper
2018-01-30 15:56 ` [PATCH v2 3/4] x86/hvm: Improvements to external users of decode_register() Andrew Cooper
@ 2018-01-30 15:56 ` Andrew Cooper
2018-01-31 11:17 ` Jan Beulich
2018-01-31 11:06 ` [PATCH v2 1/4] x86/emul: Introduce a test covering legacy byte ops Jan Beulich
3 siblings, 1 reply; 8+ messages in thread
From: Andrew Cooper @ 2018-01-30 15:56 UTC (permalink / raw)
To: Xen-devel; +Cc: Andrew Cooper, Jan Beulich
Most users of decode_register() can be replaced with decode_gpr() right away.
For the few sites which do care about possibly using the legacy byteop
encoding, rename decode_register() to decode_gpr_byteop(), and adjust its 'int
highbyte_regs' parameter to the more correct 'bool legacy_byteop'.
Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
---
CC: Jan Beulich <JBeulich@suse.com>
v2:
* s/highbyte/legacy_byteop/ across the patch
* Keep decode_gpr_byteop() returning void. It doesn't affect any usecases
---
xen/arch/x86/x86_emulate/x86_emulate.c | 69 +++++++++++++++-------------------
1 file changed, 30 insertions(+), 39 deletions(-)
diff --git a/xen/arch/x86/x86_emulate/x86_emulate.c b/xen/arch/x86/x86_emulate/x86_emulate.c
index 3766b7a..2444417 100644
--- a/xen/arch/x86/x86_emulate/x86_emulate.c
+++ b/xen/arch/x86/x86_emulate/x86_emulate.c
@@ -1957,9 +1957,8 @@ const uint8_t cpu_user_regs_gpr_offsets[] = {
#endif
};
-void *
-decode_register(
- uint8_t modrm_reg, struct cpu_user_regs *regs, int highbyte_regs)
+static void *decode_gpr_byteop(
+ struct cpu_user_regs *regs, unsigned int modrm_reg, bool legacy_byteop)
{
static const uint8_t byteop_offsets[] = {
offsetof(struct cpu_user_regs, al),
@@ -1972,7 +1971,7 @@ decode_register(
offsetof(struct cpu_user_regs, bh),
};
- if ( !highbyte_regs )
+ if ( !legacy_byteop )
return decode_gpr(regs, modrm_reg);
/* Check that the array is a power of two. */
@@ -1987,10 +1986,11 @@ decode_register(
return (void *)regs + byteop_offsets[modrm_reg];
}
-static void *decode_vex_gpr(unsigned int vex_reg, struct cpu_user_regs *regs,
- const struct x86_emulate_ctxt *ctxt)
+static unsigned long *decode_vex_gpr(
+ unsigned int vex_reg, struct cpu_user_regs *regs,
+ const struct x86_emulate_ctxt *ctxt)
{
- return decode_register(~vex_reg & (mode_64bit() ? 0xf : 7), regs, 0);
+ return decode_gpr(regs, ~vex_reg & (mode_64bit() ? 0xf : 7));
}
static bool is_aligned(enum x86_segment seg, unsigned long offs,
@@ -2799,8 +2799,7 @@ x86_decode(
sib_index = ((sib >> 3) & 7) | ((rex_prefix << 2) & 8);
sib_base = (sib & 7) | ((rex_prefix << 3) & 8);
if ( sib_index != 4 && !(d & vSIB) )
- ea.mem.off = *(long *)decode_register(sib_index,
- state->regs, 0);
+ ea.mem.off = *decode_gpr(state->regs, sib_index);
ea.mem.off <<= (sib >> 6) & 3;
if ( (modrm_mod == 0) && ((sib_base & 7) == 5) )
ea.mem.off += insn_fetch_type(int32_t);
@@ -2819,15 +2818,13 @@ x86_decode(
ea.mem.off += state->regs->r(bp);
}
else
- ea.mem.off += *(long *)decode_register(sib_base,
- state->regs, 0);
+ ea.mem.off += *decode_gpr(state->regs, sib_base);
}
else
{
generate_exception_if(d & vSIB, EXC_UD);
modrm_rm |= (rex_prefix & 1) << 3;
- ea.mem.off = *(long *)decode_register(modrm_rm,
- state->regs, 0);
+ ea.mem.off = *decode_gpr(state->regs, modrm_rm);
if ( (modrm_rm == 5) && (modrm_mod != 0) )
ea.mem.seg = x86_seg_ss;
}
@@ -3052,8 +3049,7 @@ x86_emulate(
generate_exception_if(state->not_64bit && mode_64bit(), EXC_UD);
if ( ea.type == OP_REG )
- ea.reg = decode_register(modrm_rm, &_regs,
- (d & ByteOp) && !rex_prefix);
+ ea.reg = decode_gpr_byteop(&_regs, modrm_rm, (d & ByteOp) && !rex_prefix);
memset(mmvalp, 0xaa /* arbitrary */, sizeof(*mmvalp));
@@ -3067,13 +3063,13 @@ x86_emulate(
src.type = OP_REG;
if ( d & ByteOp )
{
- src.reg = decode_register(modrm_reg, &_regs, (rex_prefix == 0));
+ src.reg = decode_gpr_byteop(&_regs, modrm_reg, !rex_prefix);
src.val = *(uint8_t *)src.reg;
src.bytes = 1;
}
else
{
- src.reg = decode_register(modrm_reg, &_regs, 0);
+ src.reg = decode_gpr(&_regs, modrm_reg);
switch ( (src.bytes = op_bytes) )
{
case 2: src.val = *(uint16_t *)src.reg; break;
@@ -3143,13 +3139,13 @@ x86_emulate(
dst.type = OP_REG;
if ( d & ByteOp )
{
- dst.reg = decode_register(modrm_reg, &_regs, (rex_prefix == 0));
+ dst.reg = decode_gpr_byteop(&_regs, modrm_reg, !rex_prefix);
dst.val = *(uint8_t *)dst.reg;
dst.bytes = 1;
}
else
{
- dst.reg = decode_register(modrm_reg, &_regs, 0);
+ dst.reg = decode_gpr(&_regs, modrm_reg);
switch ( (dst.bytes = op_bytes) )
{
case 2: dst.val = *(uint16_t *)dst.reg; break;
@@ -3339,7 +3335,7 @@ x86_emulate(
case 0x40 ... 0x4f: /* inc/dec reg */
dst.type = OP_REG;
- dst.reg = decode_register(b & 7, &_regs, 0);
+ dst.reg = decode_gpr(&_regs, b & 7);
dst.bytes = op_bytes;
dst.val = *dst.reg;
if ( b & 8 )
@@ -3349,14 +3345,12 @@ x86_emulate(
break;
case 0x50 ... 0x57: /* push reg */
- src.val = *(unsigned long *)decode_register(
- (b & 7) | ((rex_prefix & 1) << 3), &_regs, 0);
+ src.val = *decode_gpr(&_regs, (b & 7) | ((rex_prefix & 1) << 3));
goto push;
case 0x58 ... 0x5f: /* pop reg */
dst.type = OP_REG;
- dst.reg = decode_register(
- (b & 7) | ((rex_prefix & 1) << 3), &_regs, 0);
+ dst.reg = decode_gpr(&_regs, (b & 7) | ((rex_prefix & 1) << 3));
dst.bytes = op_bytes;
if ( mode_64bit() && (dst.bytes == 4) )
dst.bytes = 8;
@@ -3370,7 +3364,7 @@ x86_emulate(
ea.val = _regs.esp;
for ( i = 0; i < 8; i++ )
{
- void *reg = decode_register(i, &_regs, 0);
+ void *reg = decode_gpr(&_regs, i);
if ( (rc = ops->write(x86_seg_ss, sp_pre_dec(op_bytes),
reg != &_regs.esp ? reg : &ea.val,
@@ -3382,7 +3376,7 @@ x86_emulate(
case 0x61: /* popa */
for ( i = 0; i < 8; i++ )
{
- void *reg = decode_register(7 - i, &_regs, 0);
+ void *reg = decode_gpr(&_regs, 7 - i);
if ( (rc = read_ulong(x86_seg_ss, sp_post_inc(op_bytes),
&dst.val, op_bytes, ctxt, ops)) != 0 )
@@ -3662,8 +3656,7 @@ x86_emulate(
case 0x91 ... 0x97: /* xchg reg,%%rax */
dst.type = OP_REG;
dst.bytes = op_bytes;
- dst.reg = decode_register(
- (b & 7) | ((rex_prefix & 1) << 3), &_regs, 0);
+ dst.reg = decode_gpr(&_regs, (b & 7) | ((rex_prefix & 1) << 3));
dst.val = *dst.reg;
goto xchg;
@@ -3895,14 +3888,13 @@ x86_emulate(
}
case 0xb0 ... 0xb7: /* mov imm8,r8 */
- dst.reg = decode_register(
- (b & 7) | ((rex_prefix & 1) << 3), &_regs, (rex_prefix == 0));
+ dst.reg = decode_gpr_byteop(&_regs, (b & 7) | ((rex_prefix & 1) << 3),
+ !rex_prefix);
dst.val = src.val;
break;
case 0xb8 ... 0xbf: /* mov imm{16,32,64},r{16,32,64} */
- dst.reg = decode_register(
- (b & 7) | ((rex_prefix & 1) << 3), &_regs, 0);
+ dst.reg = decode_gpr(&_regs, (b & 7) | ((rex_prefix & 1) << 3));
dst.val = src.val;
break;
@@ -5684,7 +5676,7 @@ x86_emulate(
opc[2] = 0xc3;
copy_REX_VEX(opc, rex_prefix, vex);
- ea.reg = decode_register(modrm_reg, &_regs, 0);
+ ea.reg = decode_gpr(&_regs, modrm_reg);
invoke_stub("", "", "=a" (*ea.reg), "+m" (fic.exn_raised)
: "c" (mmvalp), "m" (*mmvalp));
@@ -6461,7 +6453,7 @@ x86_emulate(
else
{
shift = src.val;
- src.reg = decode_register(modrm_reg, &_regs, 0);
+ src.reg = decode_gpr(&_regs, modrm_reg);
src.val = truncate_word(*src.reg, dst.bytes);
}
if ( (shift &= width - 1) == 0 )
@@ -6580,7 +6572,7 @@ x86_emulate(
fail_if(!ops->read_segment);
if ( (rc = ops->read_segment(seg, &sreg, ctxt)) != X86EMUL_OKAY )
goto done;
- dst.reg = decode_register(modrm_rm, &_regs, 0);
+ dst.reg = decode_gpr(&_regs, modrm_rm);
if ( !(modrm_reg & 2) )
{
/* rd{f,g}sbase */
@@ -6640,7 +6632,7 @@ x86_emulate(
case X86EMUL_OPC(0x0f, 0xb6): /* movzx rm8,r{16,32,64} */
/* Recompute DstReg as we may have decoded AH/BH/CH/DH. */
- dst.reg = decode_register(modrm_reg, &_regs, 0);
+ dst.reg = decode_gpr(&_regs, modrm_reg);
dst.bytes = op_bytes;
dst.val = (uint8_t)src.val;
break;
@@ -6732,7 +6724,7 @@ x86_emulate(
case X86EMUL_OPC(0x0f, 0xbe): /* movsx rm8,r{16,32,64} */
/* Recompute DstReg as we may have decoded AH/BH/CH/DH. */
- dst.reg = decode_register(modrm_reg, &_regs, 0);
+ dst.reg = decode_gpr(&_regs, modrm_reg);
dst.bytes = op_bytes;
dst.val = (int8_t)src.val;
break;
@@ -6957,8 +6949,7 @@ x86_emulate(
case X86EMUL_OPC(0x0f, 0xc8) ... X86EMUL_OPC(0x0f, 0xcf): /* bswap */
dst.type = OP_REG;
- dst.reg = decode_register(
- (b & 7) | ((rex_prefix & 1) << 3), &_regs, 0);
+ dst.reg = decode_gpr(&_regs, (b & 7) | ((rex_prefix & 1) << 3));
switch ( dst.bytes = op_bytes )
{
default: /* case 2: */
--
2.1.4
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel
^ permalink raw reply related [flat|nested] 8+ messages in thread