* [PATCH 1/8] x86: re-introduce non-underscore prefixed 32-bit register names
2017-02-28 13:27 [PATCH 0/8] x86: switch away from temporary 32-bit register names Jan Beulich
@ 2017-02-28 13:35 ` Jan Beulich
2017-02-28 13:35 ` [PATCH 2/8] x86: switch away from temporary " Jan Beulich
` (7 subsequent siblings)
8 siblings, 0 replies; 16+ messages in thread
From: Jan Beulich @ 2017-02-28 13:35 UTC (permalink / raw)
To: xen-devel; +Cc: George Dunlap, Andrew Cooper
[-- Attachment #1: Type: text/plain, Size: 1092 bytes --]
For a transitional period (until we've managed to replace all
underscore prefixed instances), allow both names to co-exist.
Signed-off-by: Jan Beulich <jbeulich@suse.com>
--- a/xen/include/public/arch-x86/xen-x86_64.h
+++ b/xen/include/public/arch-x86/xen-x86_64.h
@@ -134,7 +134,7 @@ struct iret_context {
/* Anonymous unions include all permissible names (e.g., al/ah/ax/eax/rax). */
#define __DECL_REG_LOHI(which) union { \
uint64_t r ## which ## x; \
- uint32_t _e ## which ## x; \
+ uint32_t e ## which ## x, _e ## which ## x; \
uint16_t which ## x; \
struct { \
uint8_t which ## l; \
@@ -143,13 +143,13 @@ struct iret_context {
}
#define __DECL_REG_LO8(name) union { \
uint64_t r ## name; \
- uint32_t _e ## name; \
+ uint32_t e ## name, _e ## name; \
uint16_t name; \
uint8_t name ## l; \
}
#define __DECL_REG_LO16(name) union { \
uint64_t r ## name; \
- uint32_t _e ## name; \
+ uint32_t e ## name, _e ## name; \
uint16_t name; \
}
#define __DECL_REG_HI(num) union { \
[-- Attachment #2: x86-regnames-32bit-union.patch --]
[-- Type: text/plain, Size: 1153 bytes --]
x86: re-introduce non-underscore prefixed 32-bit register names
For a transitional period (until we've managed to replace all
underscore prefixed instances), allow both names to co-exist.
Signed-off-by: Jan Beulich <jbeulich@suse.com>
--- a/xen/include/public/arch-x86/xen-x86_64.h
+++ b/xen/include/public/arch-x86/xen-x86_64.h
@@ -134,7 +134,7 @@ struct iret_context {
/* Anonymous unions include all permissible names (e.g., al/ah/ax/eax/rax). */
#define __DECL_REG_LOHI(which) union { \
uint64_t r ## which ## x; \
- uint32_t _e ## which ## x; \
+ uint32_t e ## which ## x, _e ## which ## x; \
uint16_t which ## x; \
struct { \
uint8_t which ## l; \
@@ -143,13 +143,13 @@ struct iret_context {
}
#define __DECL_REG_LO8(name) union { \
uint64_t r ## name; \
- uint32_t _e ## name; \
+ uint32_t e ## name, _e ## name; \
uint16_t name; \
uint8_t name ## l; \
}
#define __DECL_REG_LO16(name) union { \
uint64_t r ## name; \
- uint32_t _e ## name; \
+ uint32_t e ## name, _e ## name; \
uint16_t name; \
}
#define __DECL_REG_HI(num) union { \
[-- Attachment #3: Type: text/plain, Size: 127 bytes --]
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel
^ permalink raw reply [flat|nested] 16+ messages in thread* [PATCH 2/8] x86: switch away from temporary 32-bit register names
2017-02-28 13:27 [PATCH 0/8] x86: switch away from temporary 32-bit register names Jan Beulich
2017-02-28 13:35 ` [PATCH 1/8] x86: re-introduce non-underscore prefixed " Jan Beulich
@ 2017-02-28 13:35 ` Jan Beulich
2017-02-28 13:36 ` [PATCH 3/8] x86/HVM: " Jan Beulich
` (6 subsequent siblings)
8 siblings, 0 replies; 16+ messages in thread
From: Jan Beulich @ 2017-02-28 13:35 UTC (permalink / raw)
To: xen-devel, Jan Beulich; +Cc: George Dunlap, Andrew Cooper
[-- Attachment #1: Type: text/plain, Size: 14884 bytes --]
Signed-off-by: Jan Beulich <jbeulich@suse.com>
--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -1015,11 +1015,11 @@ int arch_set_info_guest(
init_int80_direct_trap(v);
/* IOPL privileges are virtualised. */
- v->arch.pv_vcpu.iopl = v->arch.user_regs._eflags & X86_EFLAGS_IOPL;
- v->arch.user_regs._eflags &= ~X86_EFLAGS_IOPL;
+ v->arch.pv_vcpu.iopl = v->arch.user_regs.eflags & X86_EFLAGS_IOPL;
+ v->arch.user_regs.eflags &= ~X86_EFLAGS_IOPL;
/* Ensure real hardware interrupts are enabled. */
- v->arch.user_regs._eflags |= X86_EFLAGS_IF;
+ v->arch.user_regs.eflags |= X86_EFLAGS_IF;
if ( !v->is_initialised )
{
@@ -1776,14 +1776,14 @@ static void load_segments(struct vcpu *n
if ( !ring_1(regs) )
{
ret = put_user(regs->ss, esp-1);
- ret |= put_user(regs->_esp, esp-2);
+ ret |= put_user(regs->esp, esp-2);
esp -= 2;
}
if ( ret |
put_user(rflags, esp-1) |
put_user(cs_and_mask, esp-2) |
- put_user(regs->_eip, esp-3) |
+ put_user(regs->eip, esp-3) |
put_user(uregs->gs, esp-4) |
put_user(uregs->fs, esp-5) |
put_user(uregs->es, esp-6) |
@@ -1798,12 +1798,12 @@ static void load_segments(struct vcpu *n
vcpu_info(n, evtchn_upcall_mask) = 1;
regs->entry_vector |= TRAP_syscall;
- regs->_eflags &= ~(X86_EFLAGS_VM|X86_EFLAGS_RF|X86_EFLAGS_NT|
+ regs->eflags &= ~(X86_EFLAGS_VM|X86_EFLAGS_RF|X86_EFLAGS_NT|
X86_EFLAGS_IOPL|X86_EFLAGS_TF);
regs->ss = FLAT_COMPAT_KERNEL_SS;
- regs->_esp = (unsigned long)(esp-7);
+ regs->esp = (unsigned long)(esp-7);
regs->cs = FLAT_COMPAT_KERNEL_CS;
- regs->_eip = pv->failsafe_callback_eip;
+ regs->eip = pv->failsafe_callback_eip;
return;
}
--- a/xen/arch/x86/domain_build.c
+++ b/xen/arch/x86/domain_build.c
@@ -1667,7 +1667,7 @@ int __init construct_dom0(
regs->rip = parms.virt_entry;
regs->rsp = vstack_end;
regs->rsi = vstartinfo_start;
- regs->_eflags = X86_EFLAGS_IF;
+ regs->eflags = X86_EFLAGS_IF;
#ifdef CONFIG_SHADOW_PAGING
if ( opt_dom0_shadow )
--- a/xen/arch/x86/domctl.c
+++ b/xen/arch/x86/domctl.c
@@ -1587,8 +1587,8 @@ void arch_get_info_guest(struct vcpu *v,
}
/* IOPL privileges are virtualised: merge back into returned eflags. */
- BUG_ON((c(user_regs._eflags) & X86_EFLAGS_IOPL) != 0);
- c(user_regs._eflags |= v->arch.pv_vcpu.iopl);
+ BUG_ON((c(user_regs.eflags) & X86_EFLAGS_IOPL) != 0);
+ c(user_regs.eflags |= v->arch.pv_vcpu.iopl);
if ( !compat )
{
--- a/xen/arch/x86/gdbstub.c
+++ b/xen/arch/x86/gdbstub.c
@@ -68,14 +68,14 @@ gdb_arch_resume(struct cpu_user_regs *re
if ( addr != -1UL )
regs->rip = addr;
- regs->_eflags &= ~X86_EFLAGS_TF;
+ regs->eflags &= ~X86_EFLAGS_TF;
/* Set eflags.RF to ensure we do not re-enter. */
- regs->_eflags |= X86_EFLAGS_RF;
+ regs->eflags |= X86_EFLAGS_RF;
/* Set the trap flag if we are single stepping. */
if ( type == GDB_STEP )
- regs->_eflags |= X86_EFLAGS_TF;
+ regs->eflags |= X86_EFLAGS_TF;
}
/*
--- a/xen/arch/x86/traps.c
+++ b/xen/arch/x86/traps.c
@@ -626,7 +626,7 @@ void fatal_trap(const struct cpu_user_re
panic("FATAL TRAP: vector = %d (%s)\n"
"[error_code=%04x] %s",
trapnr, trapstr(trapnr), regs->error_code,
- (regs->_eflags & X86_EFLAGS_IF) ? "" : ", IN INTERRUPT CONTEXT");
+ (regs->eflags & X86_EFLAGS_IF) ? "" : ", IN INTERRUPT CONTEXT");
}
void pv_inject_event(const struct x86_event *event)
@@ -703,8 +703,8 @@ static inline void do_guest_trap(unsigne
static void instruction_done(struct cpu_user_regs *regs, unsigned long rip)
{
regs->rip = rip;
- regs->_eflags &= ~X86_EFLAGS_RF;
- if ( regs->_eflags & X86_EFLAGS_TF )
+ regs->eflags &= ~X86_EFLAGS_RF;
+ if ( regs->eflags & X86_EFLAGS_TF )
{
current->arch.debugreg[6] |= DR_STEP | DR_STATUS_RESERVED_ONE;
do_guest_trap(TRAP_debug, regs);
@@ -1070,7 +1070,7 @@ static int emulate_forced_invalid_op(str
eip += sizeof(instr);
- guest_cpuid(current, regs->_eax, regs->_ecx, &res);
+ guest_cpuid(current, regs->eax, regs->ecx, &res);
regs->rax = res.a;
regs->rbx = res.b;
@@ -1395,7 +1395,7 @@ leaf:
* - Page fault in kernel mode
*/
if ( (cr4 & X86_CR4_SMAP) && !(error_code & PFEC_user_mode) &&
- (((regs->cs & 3) == 3) || !(regs->_eflags & X86_EFLAGS_AC)) )
+ (((regs->cs & 3) == 3) || !(regs->eflags & X86_EFLAGS_AC)) )
return smap_fault;
}
@@ -1425,7 +1425,7 @@ static int fixup_page_fault(unsigned lon
struct domain *d = v->domain;
/* No fixups in interrupt context or when interrupts are disabled. */
- if ( in_irq() || !(regs->_eflags & X86_EFLAGS_IF) )
+ if ( in_irq() || !(regs->eflags & X86_EFLAGS_IF) )
return 0;
if ( !(regs->error_code & PFEC_page_present) &&
@@ -2290,7 +2290,7 @@ static int priv_op_rep_ins(uint16_t port
break;
/* x86_emulate() clips the repetition count to ensure we don't wrap. */
- if ( unlikely(ctxt->regs->_eflags & X86_EFLAGS_DF) )
+ if ( unlikely(ctxt->regs->eflags & X86_EFLAGS_DF) )
offset -= bytes_per_rep;
else
offset += bytes_per_rep;
@@ -2358,7 +2358,7 @@ static int priv_op_rep_outs(enum x86_seg
break;
/* x86_emulate() clips the repetition count to ensure we don't wrap. */
- if ( unlikely(ctxt->regs->_eflags & X86_EFLAGS_DF) )
+ if ( unlikely(ctxt->regs->eflags & X86_EFLAGS_DF) )
offset -= bytes_per_rep;
else
offset += bytes_per_rep;
@@ -3004,14 +3004,14 @@ static int emulate_privileged_op(struct
return 0;
/* Mirror virtualized state into EFLAGS. */
- ASSERT(regs->_eflags & X86_EFLAGS_IF);
+ ASSERT(regs->eflags & X86_EFLAGS_IF);
if ( vcpu_info(curr, evtchn_upcall_mask) )
- regs->_eflags &= ~X86_EFLAGS_IF;
+ regs->eflags &= ~X86_EFLAGS_IF;
else
- regs->_eflags |= X86_EFLAGS_IF;
- ASSERT(!(regs->_eflags & X86_EFLAGS_IOPL));
- regs->_eflags |= curr->arch.pv_vcpu.iopl;
- eflags = regs->_eflags;
+ regs->eflags |= X86_EFLAGS_IF;
+ ASSERT(!(regs->eflags & X86_EFLAGS_IOPL));
+ regs->eflags |= curr->arch.pv_vcpu.iopl;
+ eflags = regs->eflags;
ctxt.ctxt.addr_size = ar & _SEGMENT_L ? 64 : ar & _SEGMENT_DB ? 32 : 16;
/* Leave zero in ctxt.ctxt.sp_size, as it's not needed. */
@@ -3025,10 +3025,10 @@ static int emulate_privileged_op(struct
* Nothing we allow to be emulated can change anything other than the
* arithmetic bits, and the resume flag.
*/
- ASSERT(!((regs->_eflags ^ eflags) &
+ ASSERT(!((regs->eflags ^ eflags) &
~(X86_EFLAGS_RF | X86_EFLAGS_ARITH_MASK)));
- regs->_eflags |= X86_EFLAGS_IF;
- regs->_eflags &= ~X86_EFLAGS_IOPL;
+ regs->eflags |= X86_EFLAGS_IF;
+ regs->eflags &= ~X86_EFLAGS_IOPL;
/* More strict than x86_emulate_wrapper(). */
ASSERT(ctxt.ctxt.event_pending == (rc == X86EMUL_EXCEPTION));
@@ -3348,7 +3348,8 @@ static void emulate_gate_op(struct cpu_u
!(ar & _SEGMENT_WR) ||
!check_stack_limit(ar, limit, esp + nparm * 4, nparm * 4) )
return do_guest_trap(TRAP_gp_fault, regs);
- ustkp = (unsigned int *)(unsigned long)((unsigned int)base + regs->_esp + nparm * 4);
+ ustkp = (unsigned int *)(unsigned long)
+ ((unsigned int)base + regs->esp + nparm * 4);
if ( !compat_access_ok(ustkp - nparm, nparm * 4) )
{
do_guest_trap(TRAP_gp_fault, regs);
@@ -3728,20 +3729,20 @@ void do_debug(struct cpu_user_regs *regs
if ( !guest_mode(regs) )
{
- if ( regs->_eflags & X86_EFLAGS_TF )
+ if ( regs->eflags & X86_EFLAGS_TF )
{
/* In SYSENTER entry path we can't zap TF until EFLAGS is saved. */
if ( (regs->rip >= (unsigned long)sysenter_entry) &&
(regs->rip <= (unsigned long)sysenter_eflags_saved) )
{
if ( regs->rip == (unsigned long)sysenter_eflags_saved )
- regs->_eflags &= ~X86_EFLAGS_TF;
+ regs->eflags &= ~X86_EFLAGS_TF;
goto out;
}
if ( !debugger_trap_fatal(TRAP_debug, regs) )
{
WARN();
- regs->_eflags &= ~X86_EFLAGS_TF;
+ regs->eflags &= ~X86_EFLAGS_TF;
}
}
else
--- a/xen/arch/x86/x86_64/compat/mm.c
+++ b/xen/arch/x86/x86_64/compat/mm.c
@@ -327,7 +327,7 @@ int compat_mmuext_op(XEN_GUEST_HANDLE_PA
struct cpu_user_regs *regs = guest_cpu_user_regs();
struct mc_state *mcs = ¤t->mc_state;
unsigned int arg1 = !(mcs->flags & MCSF_in_multicall)
- ? regs->_ecx
+ ? regs->ecx
: mcs->call.args[1];
unsigned int left = arg1 & ~MMU_UPDATE_PREEMPTED;
@@ -341,7 +341,7 @@ int compat_mmuext_op(XEN_GUEST_HANDLE_PA
BUG_ON(!hypercall_xlat_continuation(&left, 4, 0x01, nat_ops,
cmp_uops));
if ( !(mcs->flags & MCSF_in_multicall) )
- regs->_ecx += count - i;
+ regs->ecx += count - i;
else
mcs->compat_call.args[1] += count - i;
}
--- a/xen/arch/x86/x86_64/compat/traps.c
+++ b/xen/arch/x86/x86_64/compat/traps.c
@@ -8,7 +8,7 @@ void compat_show_guest_stack(struct vcpu
{
unsigned int i, *stack, addr, mask = STACK_SIZE;
- stack = (unsigned int *)(unsigned long)regs->_esp;
+ stack = (unsigned int *)(unsigned long)regs->esp;
printk("Guest stack trace from esp=%08lx:\n ", (unsigned long)stack);
if ( !__compat_access_ok(v->domain, stack, sizeof(*stack)) )
@@ -76,14 +76,14 @@ unsigned int compat_iret(void)
regs->rsp = (u32)regs->rsp;
/* Restore EAX (clobbered by hypercall). */
- if ( unlikely(__get_user(regs->_eax, (u32 *)regs->rsp)) )
+ if ( unlikely(__get_user(regs->eax, (u32 *)regs->rsp)) )
{
domain_crash(v->domain);
return 0;
}
/* Restore CS and EIP. */
- if ( unlikely(__get_user(regs->_eip, (u32 *)regs->rsp + 1)) ||
+ if ( unlikely(__get_user(regs->eip, (u32 *)regs->rsp + 1)) ||
unlikely(__get_user(regs->cs, (u32 *)regs->rsp + 2)) )
{
domain_crash(v->domain);
@@ -103,7 +103,7 @@ unsigned int compat_iret(void)
if ( VM_ASSIST(v->domain, architectural_iopl) )
v->arch.pv_vcpu.iopl = eflags & X86_EFLAGS_IOPL;
- regs->_eflags = (eflags & ~X86_EFLAGS_IOPL) | X86_EFLAGS_IF;
+ regs->eflags = (eflags & ~X86_EFLAGS_IOPL) | X86_EFLAGS_IF;
if ( unlikely(eflags & X86_EFLAGS_VM) )
{
@@ -121,8 +121,8 @@ unsigned int compat_iret(void)
int rc = 0;
gdprintk(XENLOG_ERR, "VM86 mode unavailable (ksp:%08X->%08X)\n",
- regs->_esp, ksp);
- if ( ksp < regs->_esp )
+ regs->esp, ksp);
+ if ( ksp < regs->esp )
{
for (i = 1; i < 10; ++i)
{
@@ -130,7 +130,7 @@ unsigned int compat_iret(void)
rc |= __put_user(x, (u32 *)(unsigned long)ksp + i);
}
}
- else if ( ksp > regs->_esp )
+ else if ( ksp > regs->esp )
{
for ( i = 9; i > 0; --i )
{
@@ -143,20 +143,20 @@ unsigned int compat_iret(void)
domain_crash(v->domain);
return 0;
}
- regs->_esp = ksp;
+ regs->esp = ksp;
regs->ss = v->arch.pv_vcpu.kernel_ss;
ti = &v->arch.pv_vcpu.trap_ctxt[TRAP_gp_fault];
if ( TI_GET_IF(ti) )
eflags &= ~X86_EFLAGS_IF;
- regs->_eflags &= ~(X86_EFLAGS_VM|X86_EFLAGS_RF|
- X86_EFLAGS_NT|X86_EFLAGS_TF);
+ regs->eflags &= ~(X86_EFLAGS_VM|X86_EFLAGS_RF|
+ X86_EFLAGS_NT|X86_EFLAGS_TF);
if ( unlikely(__put_user(0, (u32 *)regs->rsp)) )
{
domain_crash(v->domain);
return 0;
}
- regs->_eip = ti->address;
+ regs->eip = ti->address;
regs->cs = ti->cs;
}
else if ( unlikely(ring_0(regs)) )
@@ -165,10 +165,10 @@ unsigned int compat_iret(void)
return 0;
}
else if ( ring_1(regs) )
- regs->_esp += 16;
+ regs->esp += 16;
/* Return to ring 2/3: restore ESP and SS. */
else if ( __get_user(regs->ss, (u32 *)regs->rsp + 5) ||
- __get_user(regs->_esp, (u32 *)regs->rsp + 4) )
+ __get_user(regs->esp, (u32 *)regs->rsp + 4) )
{
domain_crash(v->domain);
return 0;
@@ -183,7 +183,7 @@ unsigned int compat_iret(void)
* The hypercall exit path will overwrite EAX with this return
* value.
*/
- return regs->_eax;
+ return regs->eax;
}
static long compat_register_guest_callback(
--- a/xen/arch/x86/x86_64/gdbstub.c
+++ b/xen/arch/x86/x86_64/gdbstub.c
@@ -44,7 +44,7 @@ gdb_arch_read_reg_array(struct cpu_user_
GDB_REG64(regs->r15);
GDB_REG64(regs->rip);
- GDB_REG32(regs->_eflags);
+ GDB_REG32(regs->eflags);
GDB_REG32(regs->cs);
GDB_REG32(regs->ss);
--- a/xen/include/asm-x86/msr.h
+++ b/xen/include/asm-x86/msr.h
@@ -73,7 +73,7 @@ static inline int wrmsr_safe(unsigned in
static inline uint64_t msr_fold(const struct cpu_user_regs *regs)
{
- return (regs->rdx << 32) | regs->_eax;
+ return (regs->rdx << 32) | regs->eax;
}
static inline void msr_split(struct cpu_user_regs *regs, uint64_t val)
[-- Attachment #2: x86-regnames.patch --]
[-- Type: text/plain, Size: 14937 bytes --]
x86: switch away from temporary 32-bit register names
Signed-off-by: Jan Beulich <jbeulich@suse.com>
--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -1015,11 +1015,11 @@ int arch_set_info_guest(
init_int80_direct_trap(v);
/* IOPL privileges are virtualised. */
- v->arch.pv_vcpu.iopl = v->arch.user_regs._eflags & X86_EFLAGS_IOPL;
- v->arch.user_regs._eflags &= ~X86_EFLAGS_IOPL;
+ v->arch.pv_vcpu.iopl = v->arch.user_regs.eflags & X86_EFLAGS_IOPL;
+ v->arch.user_regs.eflags &= ~X86_EFLAGS_IOPL;
/* Ensure real hardware interrupts are enabled. */
- v->arch.user_regs._eflags |= X86_EFLAGS_IF;
+ v->arch.user_regs.eflags |= X86_EFLAGS_IF;
if ( !v->is_initialised )
{
@@ -1776,14 +1776,14 @@ static void load_segments(struct vcpu *n
if ( !ring_1(regs) )
{
ret = put_user(regs->ss, esp-1);
- ret |= put_user(regs->_esp, esp-2);
+ ret |= put_user(regs->esp, esp-2);
esp -= 2;
}
if ( ret |
put_user(rflags, esp-1) |
put_user(cs_and_mask, esp-2) |
- put_user(regs->_eip, esp-3) |
+ put_user(regs->eip, esp-3) |
put_user(uregs->gs, esp-4) |
put_user(uregs->fs, esp-5) |
put_user(uregs->es, esp-6) |
@@ -1798,12 +1798,12 @@ static void load_segments(struct vcpu *n
vcpu_info(n, evtchn_upcall_mask) = 1;
regs->entry_vector |= TRAP_syscall;
- regs->_eflags &= ~(X86_EFLAGS_VM|X86_EFLAGS_RF|X86_EFLAGS_NT|
+ regs->eflags &= ~(X86_EFLAGS_VM|X86_EFLAGS_RF|X86_EFLAGS_NT|
X86_EFLAGS_IOPL|X86_EFLAGS_TF);
regs->ss = FLAT_COMPAT_KERNEL_SS;
- regs->_esp = (unsigned long)(esp-7);
+ regs->esp = (unsigned long)(esp-7);
regs->cs = FLAT_COMPAT_KERNEL_CS;
- regs->_eip = pv->failsafe_callback_eip;
+ regs->eip = pv->failsafe_callback_eip;
return;
}
--- a/xen/arch/x86/domain_build.c
+++ b/xen/arch/x86/domain_build.c
@@ -1667,7 +1667,7 @@ int __init construct_dom0(
regs->rip = parms.virt_entry;
regs->rsp = vstack_end;
regs->rsi = vstartinfo_start;
- regs->_eflags = X86_EFLAGS_IF;
+ regs->eflags = X86_EFLAGS_IF;
#ifdef CONFIG_SHADOW_PAGING
if ( opt_dom0_shadow )
--- a/xen/arch/x86/domctl.c
+++ b/xen/arch/x86/domctl.c
@@ -1587,8 +1587,8 @@ void arch_get_info_guest(struct vcpu *v,
}
/* IOPL privileges are virtualised: merge back into returned eflags. */
- BUG_ON((c(user_regs._eflags) & X86_EFLAGS_IOPL) != 0);
- c(user_regs._eflags |= v->arch.pv_vcpu.iopl);
+ BUG_ON((c(user_regs.eflags) & X86_EFLAGS_IOPL) != 0);
+ c(user_regs.eflags |= v->arch.pv_vcpu.iopl);
if ( !compat )
{
--- a/xen/arch/x86/gdbstub.c
+++ b/xen/arch/x86/gdbstub.c
@@ -68,14 +68,14 @@ gdb_arch_resume(struct cpu_user_regs *re
if ( addr != -1UL )
regs->rip = addr;
- regs->_eflags &= ~X86_EFLAGS_TF;
+ regs->eflags &= ~X86_EFLAGS_TF;
/* Set eflags.RF to ensure we do not re-enter. */
- regs->_eflags |= X86_EFLAGS_RF;
+ regs->eflags |= X86_EFLAGS_RF;
/* Set the trap flag if we are single stepping. */
if ( type == GDB_STEP )
- regs->_eflags |= X86_EFLAGS_TF;
+ regs->eflags |= X86_EFLAGS_TF;
}
/*
--- a/xen/arch/x86/traps.c
+++ b/xen/arch/x86/traps.c
@@ -626,7 +626,7 @@ void fatal_trap(const struct cpu_user_re
panic("FATAL TRAP: vector = %d (%s)\n"
"[error_code=%04x] %s",
trapnr, trapstr(trapnr), regs->error_code,
- (regs->_eflags & X86_EFLAGS_IF) ? "" : ", IN INTERRUPT CONTEXT");
+ (regs->eflags & X86_EFLAGS_IF) ? "" : ", IN INTERRUPT CONTEXT");
}
void pv_inject_event(const struct x86_event *event)
@@ -703,8 +703,8 @@ static inline void do_guest_trap(unsigne
static void instruction_done(struct cpu_user_regs *regs, unsigned long rip)
{
regs->rip = rip;
- regs->_eflags &= ~X86_EFLAGS_RF;
- if ( regs->_eflags & X86_EFLAGS_TF )
+ regs->eflags &= ~X86_EFLAGS_RF;
+ if ( regs->eflags & X86_EFLAGS_TF )
{
current->arch.debugreg[6] |= DR_STEP | DR_STATUS_RESERVED_ONE;
do_guest_trap(TRAP_debug, regs);
@@ -1070,7 +1070,7 @@ static int emulate_forced_invalid_op(str
eip += sizeof(instr);
- guest_cpuid(current, regs->_eax, regs->_ecx, &res);
+ guest_cpuid(current, regs->eax, regs->ecx, &res);
regs->rax = res.a;
regs->rbx = res.b;
@@ -1395,7 +1395,7 @@ leaf:
* - Page fault in kernel mode
*/
if ( (cr4 & X86_CR4_SMAP) && !(error_code & PFEC_user_mode) &&
- (((regs->cs & 3) == 3) || !(regs->_eflags & X86_EFLAGS_AC)) )
+ (((regs->cs & 3) == 3) || !(regs->eflags & X86_EFLAGS_AC)) )
return smap_fault;
}
@@ -1425,7 +1425,7 @@ static int fixup_page_fault(unsigned lon
struct domain *d = v->domain;
/* No fixups in interrupt context or when interrupts are disabled. */
- if ( in_irq() || !(regs->_eflags & X86_EFLAGS_IF) )
+ if ( in_irq() || !(regs->eflags & X86_EFLAGS_IF) )
return 0;
if ( !(regs->error_code & PFEC_page_present) &&
@@ -2290,7 +2290,7 @@ static int priv_op_rep_ins(uint16_t port
break;
/* x86_emulate() clips the repetition count to ensure we don't wrap. */
- if ( unlikely(ctxt->regs->_eflags & X86_EFLAGS_DF) )
+ if ( unlikely(ctxt->regs->eflags & X86_EFLAGS_DF) )
offset -= bytes_per_rep;
else
offset += bytes_per_rep;
@@ -2358,7 +2358,7 @@ static int priv_op_rep_outs(enum x86_seg
break;
/* x86_emulate() clips the repetition count to ensure we don't wrap. */
- if ( unlikely(ctxt->regs->_eflags & X86_EFLAGS_DF) )
+ if ( unlikely(ctxt->regs->eflags & X86_EFLAGS_DF) )
offset -= bytes_per_rep;
else
offset += bytes_per_rep;
@@ -3004,14 +3004,14 @@ static int emulate_privileged_op(struct
return 0;
/* Mirror virtualized state into EFLAGS. */
- ASSERT(regs->_eflags & X86_EFLAGS_IF);
+ ASSERT(regs->eflags & X86_EFLAGS_IF);
if ( vcpu_info(curr, evtchn_upcall_mask) )
- regs->_eflags &= ~X86_EFLAGS_IF;
+ regs->eflags &= ~X86_EFLAGS_IF;
else
- regs->_eflags |= X86_EFLAGS_IF;
- ASSERT(!(regs->_eflags & X86_EFLAGS_IOPL));
- regs->_eflags |= curr->arch.pv_vcpu.iopl;
- eflags = regs->_eflags;
+ regs->eflags |= X86_EFLAGS_IF;
+ ASSERT(!(regs->eflags & X86_EFLAGS_IOPL));
+ regs->eflags |= curr->arch.pv_vcpu.iopl;
+ eflags = regs->eflags;
ctxt.ctxt.addr_size = ar & _SEGMENT_L ? 64 : ar & _SEGMENT_DB ? 32 : 16;
/* Leave zero in ctxt.ctxt.sp_size, as it's not needed. */
@@ -3025,10 +3025,10 @@ static int emulate_privileged_op(struct
* Nothing we allow to be emulated can change anything other than the
* arithmetic bits, and the resume flag.
*/
- ASSERT(!((regs->_eflags ^ eflags) &
+ ASSERT(!((regs->eflags ^ eflags) &
~(X86_EFLAGS_RF | X86_EFLAGS_ARITH_MASK)));
- regs->_eflags |= X86_EFLAGS_IF;
- regs->_eflags &= ~X86_EFLAGS_IOPL;
+ regs->eflags |= X86_EFLAGS_IF;
+ regs->eflags &= ~X86_EFLAGS_IOPL;
/* More strict than x86_emulate_wrapper(). */
ASSERT(ctxt.ctxt.event_pending == (rc == X86EMUL_EXCEPTION));
@@ -3348,7 +3348,8 @@ static void emulate_gate_op(struct cpu_u
!(ar & _SEGMENT_WR) ||
!check_stack_limit(ar, limit, esp + nparm * 4, nparm * 4) )
return do_guest_trap(TRAP_gp_fault, regs);
- ustkp = (unsigned int *)(unsigned long)((unsigned int)base + regs->_esp + nparm * 4);
+ ustkp = (unsigned int *)(unsigned long)
+ ((unsigned int)base + regs->esp + nparm * 4);
if ( !compat_access_ok(ustkp - nparm, nparm * 4) )
{
do_guest_trap(TRAP_gp_fault, regs);
@@ -3728,20 +3729,20 @@ void do_debug(struct cpu_user_regs *regs
if ( !guest_mode(regs) )
{
- if ( regs->_eflags & X86_EFLAGS_TF )
+ if ( regs->eflags & X86_EFLAGS_TF )
{
/* In SYSENTER entry path we can't zap TF until EFLAGS is saved. */
if ( (regs->rip >= (unsigned long)sysenter_entry) &&
(regs->rip <= (unsigned long)sysenter_eflags_saved) )
{
if ( regs->rip == (unsigned long)sysenter_eflags_saved )
- regs->_eflags &= ~X86_EFLAGS_TF;
+ regs->eflags &= ~X86_EFLAGS_TF;
goto out;
}
if ( !debugger_trap_fatal(TRAP_debug, regs) )
{
WARN();
- regs->_eflags &= ~X86_EFLAGS_TF;
+ regs->eflags &= ~X86_EFLAGS_TF;
}
}
else
--- a/xen/arch/x86/x86_64/compat/mm.c
+++ b/xen/arch/x86/x86_64/compat/mm.c
@@ -327,7 +327,7 @@ int compat_mmuext_op(XEN_GUEST_HANDLE_PA
struct cpu_user_regs *regs = guest_cpu_user_regs();
struct mc_state *mcs = ¤t->mc_state;
unsigned int arg1 = !(mcs->flags & MCSF_in_multicall)
- ? regs->_ecx
+ ? regs->ecx
: mcs->call.args[1];
unsigned int left = arg1 & ~MMU_UPDATE_PREEMPTED;
@@ -341,7 +341,7 @@ int compat_mmuext_op(XEN_GUEST_HANDLE_PA
BUG_ON(!hypercall_xlat_continuation(&left, 4, 0x01, nat_ops,
cmp_uops));
if ( !(mcs->flags & MCSF_in_multicall) )
- regs->_ecx += count - i;
+ regs->ecx += count - i;
else
mcs->compat_call.args[1] += count - i;
}
--- a/xen/arch/x86/x86_64/compat/traps.c
+++ b/xen/arch/x86/x86_64/compat/traps.c
@@ -8,7 +8,7 @@ void compat_show_guest_stack(struct vcpu
{
unsigned int i, *stack, addr, mask = STACK_SIZE;
- stack = (unsigned int *)(unsigned long)regs->_esp;
+ stack = (unsigned int *)(unsigned long)regs->esp;
printk("Guest stack trace from esp=%08lx:\n ", (unsigned long)stack);
if ( !__compat_access_ok(v->domain, stack, sizeof(*stack)) )
@@ -76,14 +76,14 @@ unsigned int compat_iret(void)
regs->rsp = (u32)regs->rsp;
/* Restore EAX (clobbered by hypercall). */
- if ( unlikely(__get_user(regs->_eax, (u32 *)regs->rsp)) )
+ if ( unlikely(__get_user(regs->eax, (u32 *)regs->rsp)) )
{
domain_crash(v->domain);
return 0;
}
/* Restore CS and EIP. */
- if ( unlikely(__get_user(regs->_eip, (u32 *)regs->rsp + 1)) ||
+ if ( unlikely(__get_user(regs->eip, (u32 *)regs->rsp + 1)) ||
unlikely(__get_user(regs->cs, (u32 *)regs->rsp + 2)) )
{
domain_crash(v->domain);
@@ -103,7 +103,7 @@ unsigned int compat_iret(void)
if ( VM_ASSIST(v->domain, architectural_iopl) )
v->arch.pv_vcpu.iopl = eflags & X86_EFLAGS_IOPL;
- regs->_eflags = (eflags & ~X86_EFLAGS_IOPL) | X86_EFLAGS_IF;
+ regs->eflags = (eflags & ~X86_EFLAGS_IOPL) | X86_EFLAGS_IF;
if ( unlikely(eflags & X86_EFLAGS_VM) )
{
@@ -121,8 +121,8 @@ unsigned int compat_iret(void)
int rc = 0;
gdprintk(XENLOG_ERR, "VM86 mode unavailable (ksp:%08X->%08X)\n",
- regs->_esp, ksp);
- if ( ksp < regs->_esp )
+ regs->esp, ksp);
+ if ( ksp < regs->esp )
{
for (i = 1; i < 10; ++i)
{
@@ -130,7 +130,7 @@ unsigned int compat_iret(void)
rc |= __put_user(x, (u32 *)(unsigned long)ksp + i);
}
}
- else if ( ksp > regs->_esp )
+ else if ( ksp > regs->esp )
{
for ( i = 9; i > 0; --i )
{
@@ -143,20 +143,20 @@ unsigned int compat_iret(void)
domain_crash(v->domain);
return 0;
}
- regs->_esp = ksp;
+ regs->esp = ksp;
regs->ss = v->arch.pv_vcpu.kernel_ss;
ti = &v->arch.pv_vcpu.trap_ctxt[TRAP_gp_fault];
if ( TI_GET_IF(ti) )
eflags &= ~X86_EFLAGS_IF;
- regs->_eflags &= ~(X86_EFLAGS_VM|X86_EFLAGS_RF|
- X86_EFLAGS_NT|X86_EFLAGS_TF);
+ regs->eflags &= ~(X86_EFLAGS_VM|X86_EFLAGS_RF|
+ X86_EFLAGS_NT|X86_EFLAGS_TF);
if ( unlikely(__put_user(0, (u32 *)regs->rsp)) )
{
domain_crash(v->domain);
return 0;
}
- regs->_eip = ti->address;
+ regs->eip = ti->address;
regs->cs = ti->cs;
}
else if ( unlikely(ring_0(regs)) )
@@ -165,10 +165,10 @@ unsigned int compat_iret(void)
return 0;
}
else if ( ring_1(regs) )
- regs->_esp += 16;
+ regs->esp += 16;
/* Return to ring 2/3: restore ESP and SS. */
else if ( __get_user(regs->ss, (u32 *)regs->rsp + 5) ||
- __get_user(regs->_esp, (u32 *)regs->rsp + 4) )
+ __get_user(regs->esp, (u32 *)regs->rsp + 4) )
{
domain_crash(v->domain);
return 0;
@@ -183,7 +183,7 @@ unsigned int compat_iret(void)
* The hypercall exit path will overwrite EAX with this return
* value.
*/
- return regs->_eax;
+ return regs->eax;
}
static long compat_register_guest_callback(
--- a/xen/arch/x86/x86_64/gdbstub.c
+++ b/xen/arch/x86/x86_64/gdbstub.c
@@ -44,7 +44,7 @@ gdb_arch_read_reg_array(struct cpu_user_
GDB_REG64(regs->r15);
GDB_REG64(regs->rip);
- GDB_REG32(regs->_eflags);
+ GDB_REG32(regs->eflags);
GDB_REG32(regs->cs);
GDB_REG32(regs->ss);
--- a/xen/include/asm-x86/msr.h
+++ b/xen/include/asm-x86/msr.h
@@ -73,7 +73,7 @@ static inline int wrmsr_safe(unsigned in
static inline uint64_t msr_fold(const struct cpu_user_regs *regs)
{
- return (regs->rdx << 32) | regs->_eax;
+ return (regs->rdx << 32) | regs->eax;
}
static inline void msr_split(struct cpu_user_regs *regs, uint64_t val)
[-- Attachment #3: Type: text/plain, Size: 127 bytes --]
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel
^ permalink raw reply [flat|nested] 16+ messages in thread* [PATCH 3/8] x86/HVM: switch away from temporary 32-bit register names
2017-02-28 13:27 [PATCH 0/8] x86: switch away from temporary 32-bit register names Jan Beulich
2017-02-28 13:35 ` [PATCH 1/8] x86: re-introduce non-underscore prefixed " Jan Beulich
2017-02-28 13:35 ` [PATCH 2/8] x86: switch away from temporary " Jan Beulich
@ 2017-02-28 13:36 ` Jan Beulich
2017-02-28 13:36 ` [PATCH 4/8] x86/HVMemul: " Jan Beulich
` (5 subsequent siblings)
8 siblings, 0 replies; 16+ messages in thread
From: Jan Beulich @ 2017-02-28 13:36 UTC (permalink / raw)
To: xen-devel; +Cc: George Dunlap, Andrew Cooper
[-- Attachment #1: Type: text/plain, Size: 2906 bytes --]
Signed-off-by: Jan Beulich <jbeulich@suse.com>
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -2968,20 +2968,20 @@ void hvm_task_switch(
if ( rc != HVMCOPY_okay )
goto out;
- eflags = regs->_eflags;
+ eflags = regs->eflags;
if ( taskswitch_reason == TSW_iret )
eflags &= ~X86_EFLAGS_NT;
- tss.eip = regs->_eip;
+ tss.eip = regs->eip;
tss.eflags = eflags;
- tss.eax = regs->_eax;
- tss.ecx = regs->_ecx;
- tss.edx = regs->_edx;
- tss.ebx = regs->_ebx;
- tss.esp = regs->_esp;
- tss.ebp = regs->_ebp;
- tss.esi = regs->_esi;
- tss.edi = regs->_edi;
+ tss.eax = regs->eax;
+ tss.ecx = regs->ecx;
+ tss.edx = regs->edx;
+ tss.ebx = regs->ebx;
+ tss.esp = regs->esp;
+ tss.ebp = regs->ebp;
+ tss.esi = regs->esi;
+ tss.edi = regs->edi;
hvm_get_segment_register(v, x86_seg_es, &segr);
tss.es = segr.sel;
@@ -3047,7 +3047,7 @@ void hvm_task_switch(
if ( taskswitch_reason == TSW_call_or_int )
{
- regs->_eflags |= X86_EFLAGS_NT;
+ regs->eflags |= X86_EFLAGS_NT;
tss.back_link = prev_tr.sel;
rc = hvm_copy_to_guest_linear(tr.base + offsetof(typeof(tss), back_link),
@@ -3084,7 +3084,7 @@ void hvm_task_switch(
opsz = segr.attr.fields.db ? 4 : 2;
hvm_get_segment_register(v, x86_seg_ss, &segr);
if ( segr.attr.fields.db )
- sp = regs->_esp -= opsz;
+ sp = regs->esp -= opsz;
else
sp = regs->sp -= opsz;
if ( hvm_virtual_to_linear_addr(x86_seg_ss, &segr, sp, opsz,
@@ -3370,7 +3370,7 @@ void hvm_rdtsc_intercept(struct cpu_user
{
msr_split(regs, _hvm_rdtsc_intercept());
- HVMTRACE_2D(RDTSC, regs->_eax, regs->_edx);
+ HVMTRACE_2D(RDTSC, regs->eax, regs->edx);
}
int hvm_msr_read_intercept(unsigned int msr, uint64_t *msr_content)
@@ -3684,11 +3684,11 @@ void hvm_ud_intercept(struct cpu_user_re
(memcmp(sig, "\xf\xbxen", sizeof(sig)) == 0) )
{
regs->rip += sizeof(sig);
- regs->_eflags &= ~X86_EFLAGS_RF;
+ regs->eflags &= ~X86_EFLAGS_RF;
/* Zero the upper 32 bits of %rip if not in 64bit mode. */
if ( !(hvm_long_mode_enabled(cur) && cs->attr.fields.l) )
- regs->rip = regs->_eip;
+ regs->rip = regs->eip;
add_taint(TAINT_HVM_FEP);
@@ -3732,7 +3732,7 @@ enum hvm_intblk hvm_interrupt_blocked(st
}
if ( (intack.source != hvm_intsrc_nmi) &&
- !(guest_cpu_user_regs()->_eflags & X86_EFLAGS_IF) )
+ !(guest_cpu_user_regs()->eflags & X86_EFLAGS_IF) )
return hvm_intblk_rflags_ie;
intr_shadow = hvm_funcs.get_interrupt_shadow(v);
[-- Attachment #2: x86-regnames-HVM.patch --]
[-- Type: text/plain, Size: 2961 bytes --]
x86/HVM: switch away from temporary 32-bit register names
Signed-off-by: Jan Beulich <jbeulich@suse.com>
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -2968,20 +2968,20 @@ void hvm_task_switch(
if ( rc != HVMCOPY_okay )
goto out;
- eflags = regs->_eflags;
+ eflags = regs->eflags;
if ( taskswitch_reason == TSW_iret )
eflags &= ~X86_EFLAGS_NT;
- tss.eip = regs->_eip;
+ tss.eip = regs->eip;
tss.eflags = eflags;
- tss.eax = regs->_eax;
- tss.ecx = regs->_ecx;
- tss.edx = regs->_edx;
- tss.ebx = regs->_ebx;
- tss.esp = regs->_esp;
- tss.ebp = regs->_ebp;
- tss.esi = regs->_esi;
- tss.edi = regs->_edi;
+ tss.eax = regs->eax;
+ tss.ecx = regs->ecx;
+ tss.edx = regs->edx;
+ tss.ebx = regs->ebx;
+ tss.esp = regs->esp;
+ tss.ebp = regs->ebp;
+ tss.esi = regs->esi;
+ tss.edi = regs->edi;
hvm_get_segment_register(v, x86_seg_es, &segr);
tss.es = segr.sel;
@@ -3047,7 +3047,7 @@ void hvm_task_switch(
if ( taskswitch_reason == TSW_call_or_int )
{
- regs->_eflags |= X86_EFLAGS_NT;
+ regs->eflags |= X86_EFLAGS_NT;
tss.back_link = prev_tr.sel;
rc = hvm_copy_to_guest_linear(tr.base + offsetof(typeof(tss), back_link),
@@ -3084,7 +3084,7 @@ void hvm_task_switch(
opsz = segr.attr.fields.db ? 4 : 2;
hvm_get_segment_register(v, x86_seg_ss, &segr);
if ( segr.attr.fields.db )
- sp = regs->_esp -= opsz;
+ sp = regs->esp -= opsz;
else
sp = regs->sp -= opsz;
if ( hvm_virtual_to_linear_addr(x86_seg_ss, &segr, sp, opsz,
@@ -3370,7 +3370,7 @@ void hvm_rdtsc_intercept(struct cpu_user
{
msr_split(regs, _hvm_rdtsc_intercept());
- HVMTRACE_2D(RDTSC, regs->_eax, regs->_edx);
+ HVMTRACE_2D(RDTSC, regs->eax, regs->edx);
}
int hvm_msr_read_intercept(unsigned int msr, uint64_t *msr_content)
@@ -3684,11 +3684,11 @@ void hvm_ud_intercept(struct cpu_user_re
(memcmp(sig, "\xf\xbxen", sizeof(sig)) == 0) )
{
regs->rip += sizeof(sig);
- regs->_eflags &= ~X86_EFLAGS_RF;
+ regs->eflags &= ~X86_EFLAGS_RF;
/* Zero the upper 32 bits of %rip if not in 64bit mode. */
if ( !(hvm_long_mode_enabled(cur) && cs->attr.fields.l) )
- regs->rip = regs->_eip;
+ regs->rip = regs->eip;
add_taint(TAINT_HVM_FEP);
@@ -3732,7 +3732,7 @@ enum hvm_intblk hvm_interrupt_blocked(st
}
if ( (intack.source != hvm_intsrc_nmi) &&
- !(guest_cpu_user_regs()->_eflags & X86_EFLAGS_IF) )
+ !(guest_cpu_user_regs()->eflags & X86_EFLAGS_IF) )
return hvm_intblk_rflags_ie;
intr_shadow = hvm_funcs.get_interrupt_shadow(v);
[-- Attachment #3: Type: text/plain, Size: 127 bytes --]
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel
^ permalink raw reply [flat|nested] 16+ messages in thread* [PATCH 4/8] x86/HVMemul: switch away from temporary 32-bit register names
2017-02-28 13:27 [PATCH 0/8] x86: switch away from temporary 32-bit register names Jan Beulich
` (2 preceding siblings ...)
2017-02-28 13:36 ` [PATCH 3/8] x86/HVM: " Jan Beulich
@ 2017-02-28 13:36 ` Jan Beulich
2017-02-28 13:46 ` Paul Durrant
2017-02-28 13:37 ` [PATCH 5/8] x86/mm: " Jan Beulich
` (4 subsequent siblings)
8 siblings, 1 reply; 16+ messages in thread
From: Jan Beulich @ 2017-02-28 13:36 UTC (permalink / raw)
To: xen-devel; +Cc: George Dunlap, Andrew Cooper, Paul Durrant
[-- Attachment #1: Type: text/plain, Size: 2987 bytes --]
Signed-off-by: Jan Beulich <jbeulich@suse.com>
--- a/xen/arch/x86/hvm/emulate.c
+++ b/xen/arch/x86/hvm/emulate.c
@@ -442,7 +442,7 @@ static int hvmemul_linear_to_phys(
}
/* Reverse mode if this is a backwards multi-iteration string operation. */
- reverse = (hvmemul_ctxt->ctxt.regs->_eflags & X86_EFLAGS_DF) && (*reps > 1);
+ reverse = (hvmemul_ctxt->ctxt.regs->eflags & X86_EFLAGS_DF) && (*reps > 1);
if ( reverse && ((PAGE_SIZE - offset) < bytes_per_rep) )
{
@@ -539,7 +539,7 @@ static int hvmemul_virtual_to_linear(
if ( IS_ERR(reg) )
return -PTR_ERR(reg);
- if ( (hvmemul_ctxt->ctxt.regs->_eflags & X86_EFLAGS_DF) && (*reps > 1) )
+ if ( (hvmemul_ctxt->ctxt.regs->eflags & X86_EFLAGS_DF) && (*reps > 1) )
{
/*
* x86_emulate() clips the repetition count to ensure we don't wrap
@@ -1085,7 +1085,7 @@ static int hvmemul_rep_ins(
return X86EMUL_UNHANDLEABLE;
return hvmemul_do_pio_addr(src_port, reps, bytes_per_rep, IOREQ_READ,
- !!(ctxt->regs->_eflags & X86_EFLAGS_DF), gpa);
+ !!(ctxt->regs->eflags & X86_EFLAGS_DF), gpa);
}
static int hvmemul_rep_outs_set_context(
@@ -1154,7 +1154,7 @@ static int hvmemul_rep_outs(
return X86EMUL_UNHANDLEABLE;
return hvmemul_do_pio_addr(dst_port, reps, bytes_per_rep, IOREQ_WRITE,
- !!(ctxt->regs->_eflags & X86_EFLAGS_DF), gpa);
+ !!(ctxt->regs->eflags & X86_EFLAGS_DF), gpa);
}
static int hvmemul_rep_movs(
@@ -1173,7 +1173,7 @@ static int hvmemul_rep_movs(
paddr_t sgpa, dgpa;
uint32_t pfec = PFEC_page_present;
p2m_type_t sp2mt, dp2mt;
- int rc, df = !!(ctxt->regs->_eflags & X86_EFLAGS_DF);
+ int rc, df = !!(ctxt->regs->eflags & X86_EFLAGS_DF);
char *buf;
rc = hvmemul_virtual_to_linear(
@@ -1327,7 +1327,7 @@ static int hvmemul_rep_stos(
unsigned long addr, bytes;
paddr_t gpa;
p2m_type_t p2mt;
- bool_t df = !!(ctxt->regs->_eflags & X86_EFLAGS_DF);
+ bool_t df = !!(ctxt->regs->eflags & X86_EFLAGS_DF);
int rc = hvmemul_virtual_to_linear(seg, offset, bytes_per_rep, reps,
hvm_access_write, hvmemul_ctxt, &addr);
@@ -1775,7 +1775,7 @@ static int _hvm_emulate_one(struct hvm_e
if ( hvmemul_ctxt->ctxt.retire.hlt &&
!hvm_local_events_need_delivery(curr) )
{
- hvm_hlt(regs->_eflags);
+ hvm_hlt(regs->eflags);
}
return X86EMUL_OKAY;
--- a/xen/arch/x86/hvm/io.c
+++ b/xen/arch/x86/hvm/io.c
@@ -136,7 +136,7 @@ bool handle_pio(uint16_t port, unsigned
ASSERT((size - 1) < 4 && size != 3);
if ( dir == IOREQ_WRITE )
- data = guest_cpu_user_regs()->_eax;
+ data = guest_cpu_user_regs()->eax;
rc = hvmemul_do_pio_buffer(port, size, dir, &data);
[-- Attachment #2: x86-regnames-HVM-emul.patch --]
[-- Type: text/plain, Size: 3046 bytes --]
x86/HVMemul: switch away from temporary 32-bit register names
Signed-off-by: Jan Beulich <jbeulich@suse.com>
--- a/xen/arch/x86/hvm/emulate.c
+++ b/xen/arch/x86/hvm/emulate.c
@@ -442,7 +442,7 @@ static int hvmemul_linear_to_phys(
}
/* Reverse mode if this is a backwards multi-iteration string operation. */
- reverse = (hvmemul_ctxt->ctxt.regs->_eflags & X86_EFLAGS_DF) && (*reps > 1);
+ reverse = (hvmemul_ctxt->ctxt.regs->eflags & X86_EFLAGS_DF) && (*reps > 1);
if ( reverse && ((PAGE_SIZE - offset) < bytes_per_rep) )
{
@@ -539,7 +539,7 @@ static int hvmemul_virtual_to_linear(
if ( IS_ERR(reg) )
return -PTR_ERR(reg);
- if ( (hvmemul_ctxt->ctxt.regs->_eflags & X86_EFLAGS_DF) && (*reps > 1) )
+ if ( (hvmemul_ctxt->ctxt.regs->eflags & X86_EFLAGS_DF) && (*reps > 1) )
{
/*
* x86_emulate() clips the repetition count to ensure we don't wrap
@@ -1085,7 +1085,7 @@ static int hvmemul_rep_ins(
return X86EMUL_UNHANDLEABLE;
return hvmemul_do_pio_addr(src_port, reps, bytes_per_rep, IOREQ_READ,
- !!(ctxt->regs->_eflags & X86_EFLAGS_DF), gpa);
+ !!(ctxt->regs->eflags & X86_EFLAGS_DF), gpa);
}
static int hvmemul_rep_outs_set_context(
@@ -1154,7 +1154,7 @@ static int hvmemul_rep_outs(
return X86EMUL_UNHANDLEABLE;
return hvmemul_do_pio_addr(dst_port, reps, bytes_per_rep, IOREQ_WRITE,
- !!(ctxt->regs->_eflags & X86_EFLAGS_DF), gpa);
+ !!(ctxt->regs->eflags & X86_EFLAGS_DF), gpa);
}
static int hvmemul_rep_movs(
@@ -1173,7 +1173,7 @@ static int hvmemul_rep_movs(
paddr_t sgpa, dgpa;
uint32_t pfec = PFEC_page_present;
p2m_type_t sp2mt, dp2mt;
- int rc, df = !!(ctxt->regs->_eflags & X86_EFLAGS_DF);
+ int rc, df = !!(ctxt->regs->eflags & X86_EFLAGS_DF);
char *buf;
rc = hvmemul_virtual_to_linear(
@@ -1327,7 +1327,7 @@ static int hvmemul_rep_stos(
unsigned long addr, bytes;
paddr_t gpa;
p2m_type_t p2mt;
- bool_t df = !!(ctxt->regs->_eflags & X86_EFLAGS_DF);
+ bool_t df = !!(ctxt->regs->eflags & X86_EFLAGS_DF);
int rc = hvmemul_virtual_to_linear(seg, offset, bytes_per_rep, reps,
hvm_access_write, hvmemul_ctxt, &addr);
@@ -1775,7 +1775,7 @@ static int _hvm_emulate_one(struct hvm_e
if ( hvmemul_ctxt->ctxt.retire.hlt &&
!hvm_local_events_need_delivery(curr) )
{
- hvm_hlt(regs->_eflags);
+ hvm_hlt(regs->eflags);
}
return X86EMUL_OKAY;
--- a/xen/arch/x86/hvm/io.c
+++ b/xen/arch/x86/hvm/io.c
@@ -136,7 +136,7 @@ bool handle_pio(uint16_t port, unsigned
ASSERT((size - 1) < 4 && size != 3);
if ( dir == IOREQ_WRITE )
- data = guest_cpu_user_regs()->_eax;
+ data = guest_cpu_user_regs()->eax;
rc = hvmemul_do_pio_buffer(port, size, dir, &data);
[-- Attachment #3: Type: text/plain, Size: 127 bytes --]
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel
^ permalink raw reply [flat|nested] 16+ messages in thread* Re: [PATCH 4/8] x86/HVMemul: switch away from temporary 32-bit register names
2017-02-28 13:36 ` [PATCH 4/8] x86/HVMemul: " Jan Beulich
@ 2017-02-28 13:46 ` Paul Durrant
0 siblings, 0 replies; 16+ messages in thread
From: Paul Durrant @ 2017-02-28 13:46 UTC (permalink / raw)
To: 'Jan Beulich', xen-devel; +Cc: Andrew Cooper, George Dunlap
> -----Original Message-----
> From: Jan Beulich [mailto:JBeulich@suse.com]
> Sent: 28 February 2017 13:37
> To: xen-devel <xen-devel@lists.xenproject.org>
> Cc: Andrew Cooper <Andrew.Cooper3@citrix.com>; Paul Durrant
> <Paul.Durrant@citrix.com>; George Dunlap <George.Dunlap@citrix.com>
> Subject: [PATCH 4/8] x86/HVMemul: switch away from temporary 32-bit
> register names
>
> Signed-off-by: Jan Beulich <jbeulich@suse.com>
>
Reviewed-by: Paul Durrant <paul.durrant@citrix.com>
> --- a/xen/arch/x86/hvm/emulate.c
> +++ b/xen/arch/x86/hvm/emulate.c
> @@ -442,7 +442,7 @@ static int hvmemul_linear_to_phys(
> }
>
> /* Reverse mode if this is a backwards multi-iteration string operation. */
> - reverse = (hvmemul_ctxt->ctxt.regs->_eflags & X86_EFLAGS_DF) &&
> (*reps > 1);
> + reverse = (hvmemul_ctxt->ctxt.regs->eflags & X86_EFLAGS_DF) &&
> (*reps > 1);
>
> if ( reverse && ((PAGE_SIZE - offset) < bytes_per_rep) )
> {
> @@ -539,7 +539,7 @@ static int hvmemul_virtual_to_linear(
> if ( IS_ERR(reg) )
> return -PTR_ERR(reg);
>
> - if ( (hvmemul_ctxt->ctxt.regs->_eflags & X86_EFLAGS_DF) && (*reps > 1)
> )
> + if ( (hvmemul_ctxt->ctxt.regs->eflags & X86_EFLAGS_DF) && (*reps > 1) )
> {
> /*
> * x86_emulate() clips the repetition count to ensure we don't wrap
> @@ -1085,7 +1085,7 @@ static int hvmemul_rep_ins(
> return X86EMUL_UNHANDLEABLE;
>
> return hvmemul_do_pio_addr(src_port, reps, bytes_per_rep,
> IOREQ_READ,
> - !!(ctxt->regs->_eflags & X86_EFLAGS_DF), gpa);
> + !!(ctxt->regs->eflags & X86_EFLAGS_DF), gpa);
> }
>
> static int hvmemul_rep_outs_set_context(
> @@ -1154,7 +1154,7 @@ static int hvmemul_rep_outs(
> return X86EMUL_UNHANDLEABLE;
>
> return hvmemul_do_pio_addr(dst_port, reps, bytes_per_rep,
> IOREQ_WRITE,
> - !!(ctxt->regs->_eflags & X86_EFLAGS_DF), gpa);
> + !!(ctxt->regs->eflags & X86_EFLAGS_DF), gpa);
> }
>
> static int hvmemul_rep_movs(
> @@ -1173,7 +1173,7 @@ static int hvmemul_rep_movs(
> paddr_t sgpa, dgpa;
> uint32_t pfec = PFEC_page_present;
> p2m_type_t sp2mt, dp2mt;
> - int rc, df = !!(ctxt->regs->_eflags & X86_EFLAGS_DF);
> + int rc, df = !!(ctxt->regs->eflags & X86_EFLAGS_DF);
> char *buf;
>
> rc = hvmemul_virtual_to_linear(
> @@ -1327,7 +1327,7 @@ static int hvmemul_rep_stos(
> unsigned long addr, bytes;
> paddr_t gpa;
> p2m_type_t p2mt;
> - bool_t df = !!(ctxt->regs->_eflags & X86_EFLAGS_DF);
> + bool_t df = !!(ctxt->regs->eflags & X86_EFLAGS_DF);
> int rc = hvmemul_virtual_to_linear(seg, offset, bytes_per_rep, reps,
> hvm_access_write, hvmemul_ctxt, &addr);
>
> @@ -1775,7 +1775,7 @@ static int _hvm_emulate_one(struct hvm_e
> if ( hvmemul_ctxt->ctxt.retire.hlt &&
> !hvm_local_events_need_delivery(curr) )
> {
> - hvm_hlt(regs->_eflags);
> + hvm_hlt(regs->eflags);
> }
>
> return X86EMUL_OKAY;
> --- a/xen/arch/x86/hvm/io.c
> +++ b/xen/arch/x86/hvm/io.c
> @@ -136,7 +136,7 @@ bool handle_pio(uint16_t port, unsigned
> ASSERT((size - 1) < 4 && size != 3);
>
> if ( dir == IOREQ_WRITE )
> - data = guest_cpu_user_regs()->_eax;
> + data = guest_cpu_user_regs()->eax;
>
> rc = hvmemul_do_pio_buffer(port, size, dir, &data);
>
>
>
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel
^ permalink raw reply [flat|nested] 16+ messages in thread
* [PATCH 5/8] x86/mm: switch away from temporary 32-bit register names
2017-02-28 13:27 [PATCH 0/8] x86: switch away from temporary 32-bit register names Jan Beulich
` (3 preceding siblings ...)
2017-02-28 13:36 ` [PATCH 4/8] x86/HVMemul: " Jan Beulich
@ 2017-02-28 13:37 ` Jan Beulich
2017-03-01 12:16 ` George Dunlap
2017-02-28 13:38 ` [PATCH 6/8] x86/SVM: " Jan Beulich
` (3 subsequent siblings)
8 siblings, 1 reply; 16+ messages in thread
From: Jan Beulich @ 2017-02-28 13:37 UTC (permalink / raw)
To: xen-devel; +Cc: George Dunlap, Andrew Cooper
[-- Attachment #1: Type: text/plain, Size: 560 bytes --]
Signed-off-by: Jan Beulich <jbeulich@suse.com>
--- a/xen/arch/x86/mm/guest_walk.c
+++ b/xen/arch/x86/mm/guest_walk.c
@@ -196,7 +196,7 @@ guest_walk_tables(struct vcpu *v, struct
* - Page fault in kernel mode
*/
smap = hvm_smap_enabled(v) &&
- ((hvm_get_cpl(v) == 3) || !(regs->_eflags & X86_EFLAGS_AC));
+ ((hvm_get_cpl(v) == 3) || !(regs->eflags & X86_EFLAGS_AC));
break;
case SMAP_CHECK_ENABLED:
smap = hvm_smap_enabled(v);
[-- Attachment #2: x86-regnames-mm.patch --]
[-- Type: text/plain, Size: 614 bytes --]
x86/mm: switch away from temporary 32-bit register names
Signed-off-by: Jan Beulich <jbeulich@suse.com>
--- a/xen/arch/x86/mm/guest_walk.c
+++ b/xen/arch/x86/mm/guest_walk.c
@@ -196,7 +196,7 @@ guest_walk_tables(struct vcpu *v, struct
* - Page fault in kernel mode
*/
smap = hvm_smap_enabled(v) &&
- ((hvm_get_cpl(v) == 3) || !(regs->_eflags & X86_EFLAGS_AC));
+ ((hvm_get_cpl(v) == 3) || !(regs->eflags & X86_EFLAGS_AC));
break;
case SMAP_CHECK_ENABLED:
smap = hvm_smap_enabled(v);
[-- Attachment #3: Type: text/plain, Size: 127 bytes --]
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel
^ permalink raw reply [flat|nested] 16+ messages in thread* [PATCH 6/8] x86/SVM: switch away from temporary 32-bit register names
2017-02-28 13:27 [PATCH 0/8] x86: switch away from temporary 32-bit register names Jan Beulich
` (4 preceding siblings ...)
2017-02-28 13:37 ` [PATCH 5/8] x86/mm: " Jan Beulich
@ 2017-02-28 13:38 ` Jan Beulich
2017-02-28 13:55 ` Boris Ostrovsky
2017-02-28 13:38 ` [PATCH 5/8] x86/Viridian: " Jan Beulich
` (2 subsequent siblings)
8 siblings, 1 reply; 16+ messages in thread
From: Jan Beulich @ 2017-02-28 13:38 UTC (permalink / raw)
To: xen-devel
Cc: George Dunlap, Andrew Cooper, Boris Ostrovsky,
Suravee Suthikulpanit
[-- Attachment #1: Type: text/plain, Size: 5183 bytes --]
Signed-off-by: Jan Beulich <jbeulich@suse.com>
--- a/xen/arch/x86/hvm/svm/nestedsvm.c
+++ b/xen/arch/x86/hvm/svm/nestedsvm.c
@@ -975,7 +975,7 @@ nsvm_vmcb_guest_intercepts_exitcode(stru
break;
ns_vmcb = nv->nv_vvmcx;
vmexits = nsvm_vmcb_guest_intercepts_msr(svm->ns_cached_msrpm,
- regs->_ecx, ns_vmcb->exitinfo1 != 0);
+ regs->ecx, ns_vmcb->exitinfo1 != 0);
if (vmexits == NESTEDHVM_VMEXIT_HOST)
return 0;
break;
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -111,11 +111,11 @@ void __update_guest_eip(struct cpu_user_
ASSERT(regs == guest_cpu_user_regs());
regs->rip += inst_len;
- regs->_eflags &= ~X86_EFLAGS_RF;
+ regs->eflags &= ~X86_EFLAGS_RF;
curr->arch.hvm_svm.vmcb->interrupt_shadow = 0;
- if ( regs->_eflags & X86_EFLAGS_TF )
+ if ( regs->eflags & X86_EFLAGS_TF )
hvm_inject_hw_exception(TRAP_debug, X86_EVENT_NO_EC);
}
@@ -515,7 +515,7 @@ static int svm_guest_x86_mode(struct vcp
if ( unlikely(!(v->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PE)) )
return 0;
- if ( unlikely(guest_cpu_user_regs()->_eflags & X86_EFLAGS_VM) )
+ if ( unlikely(guest_cpu_user_regs()->eflags & X86_EFLAGS_VM) )
return 1;
if ( hvm_long_mode_enabled(v) && likely(vmcb->cs.attr.fields.l) )
return 8;
@@ -1223,7 +1223,7 @@ static void svm_inject_event(const struc
switch ( _event.vector )
{
case TRAP_debug:
- if ( regs->_eflags & X86_EFLAGS_TF )
+ if ( regs->eflags & X86_EFLAGS_TF )
{
__restore_debug_registers(vmcb, curr);
vmcb_set_dr6(vmcb, vmcb_get_dr6(vmcb) | 0x4000);
@@ -1313,7 +1313,7 @@ static void svm_inject_event(const struc
*/
if ( !((vmcb->_efer & EFER_LMA) && vmcb->cs.attr.fields.l) )
{
- regs->rip = regs->_eip;
+ regs->rip = regs->eip;
vmcb->nextrip = (uint32_t)vmcb->nextrip;
}
@@ -1595,8 +1595,8 @@ static void svm_vmexit_do_cpuid(struct c
if ( (inst_len = __get_instruction_length(curr, INSTR_CPUID)) == 0 )
return;
- guest_cpuid(curr, regs->_eax, regs->_ecx, &res);
- HVMTRACE_5D(CPUID, regs->_eax, res.a, res.b, res.c, res.d);
+ guest_cpuid(curr, regs->eax, regs->ecx, &res);
+ HVMTRACE_5D(CPUID, regs->eax, res.a, res.b, res.c, res.d);
regs->rax = res.a;
regs->rbx = res.b;
@@ -1973,12 +1973,12 @@ static void svm_do_msr_access(struct cpu
{
uint64_t msr_content = 0;
- rc = hvm_msr_read_intercept(regs->_ecx, &msr_content);
+ rc = hvm_msr_read_intercept(regs->ecx, &msr_content);
if ( rc == X86EMUL_OKAY )
msr_split(regs, msr_content);
}
else
- rc = hvm_msr_write_intercept(regs->_ecx, msr_fold(regs), 1);
+ rc = hvm_msr_write_intercept(regs->ecx, msr_fold(regs), 1);
if ( rc == X86EMUL_OKAY )
__update_guest_eip(regs, inst_len);
@@ -1993,7 +1993,7 @@ static void svm_vmexit_do_hlt(struct vmc
return;
__update_guest_eip(regs, inst_len);
- hvm_hlt(regs->_eflags);
+ hvm_hlt(regs->eflags);
}
static void svm_vmexit_do_rdtsc(struct cpu_user_regs *regs)
@@ -2338,11 +2338,11 @@ void svm_vmexit_handler(struct cpu_user_
if ( hvm_long_mode_enabled(v) )
HVMTRACE_ND(VMEXIT64, vcpu_guestmode ? TRC_HVM_NESTEDFLAG : 0,
1/*cycles*/, 3, exit_reason,
- regs->_eip, regs->rip >> 32, 0, 0, 0);
+ regs->eip, regs->rip >> 32, 0, 0, 0);
else
HVMTRACE_ND(VMEXIT, vcpu_guestmode ? TRC_HVM_NESTEDFLAG : 0,
1/*cycles*/, 2, exit_reason,
- regs->_eip, 0, 0, 0, 0);
+ regs->eip, 0, 0, 0, 0);
if ( vcpu_guestmode ) {
enum nestedhvm_vmexits nsret;
@@ -2621,7 +2621,7 @@ void svm_vmexit_handler(struct cpu_user_
case VMEXIT_INVLPGA:
if ( (inst_len = __get_instruction_length(v, INSTR_INVLPGA)) == 0 )
break;
- svm_invlpga_intercept(v, regs->rax, regs->_ecx);
+ svm_invlpga_intercept(v, regs->rax, regs->ecx);
__update_guest_eip(regs, inst_len);
break;
@@ -2629,7 +2629,7 @@ void svm_vmexit_handler(struct cpu_user_
if ( (inst_len = __get_instruction_length(v, INSTR_VMCALL)) == 0 )
break;
BUG_ON(vcpu_guestmode);
- HVMTRACE_1D(VMMCALL, regs->_eax);
+ HVMTRACE_1D(VMMCALL, regs->eax);
if ( hvm_hypercall(regs) == HVM_HCALL_completed )
__update_guest_eip(regs, inst_len);
@@ -2687,7 +2687,7 @@ void svm_vmexit_handler(struct cpu_user_
if ( vmcb_get_cpl(vmcb) )
hvm_inject_hw_exception(TRAP_gp_fault, 0);
else if ( (inst_len = __get_instruction_length(v, INSTR_XSETBV)) &&
- hvm_handle_xsetbv(regs->_ecx, msr_fold(regs)) == 0 )
+ hvm_handle_xsetbv(regs->ecx, msr_fold(regs)) == 0 )
__update_guest_eip(regs, inst_len);
break;
[-- Attachment #2: x86-regnames-SVM.patch --]
[-- Type: text/plain, Size: 5240 bytes --]
x86/SVM: switch away from temporary 32-bit register names
Signed-off-by: Jan Beulich <jbeulich@suse.com>
--- a/xen/arch/x86/hvm/svm/nestedsvm.c
+++ b/xen/arch/x86/hvm/svm/nestedsvm.c
@@ -975,7 +975,7 @@ nsvm_vmcb_guest_intercepts_exitcode(stru
break;
ns_vmcb = nv->nv_vvmcx;
vmexits = nsvm_vmcb_guest_intercepts_msr(svm->ns_cached_msrpm,
- regs->_ecx, ns_vmcb->exitinfo1 != 0);
+ regs->ecx, ns_vmcb->exitinfo1 != 0);
if (vmexits == NESTEDHVM_VMEXIT_HOST)
return 0;
break;
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -111,11 +111,11 @@ void __update_guest_eip(struct cpu_user_
ASSERT(regs == guest_cpu_user_regs());
regs->rip += inst_len;
- regs->_eflags &= ~X86_EFLAGS_RF;
+ regs->eflags &= ~X86_EFLAGS_RF;
curr->arch.hvm_svm.vmcb->interrupt_shadow = 0;
- if ( regs->_eflags & X86_EFLAGS_TF )
+ if ( regs->eflags & X86_EFLAGS_TF )
hvm_inject_hw_exception(TRAP_debug, X86_EVENT_NO_EC);
}
@@ -515,7 +515,7 @@ static int svm_guest_x86_mode(struct vcp
if ( unlikely(!(v->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PE)) )
return 0;
- if ( unlikely(guest_cpu_user_regs()->_eflags & X86_EFLAGS_VM) )
+ if ( unlikely(guest_cpu_user_regs()->eflags & X86_EFLAGS_VM) )
return 1;
if ( hvm_long_mode_enabled(v) && likely(vmcb->cs.attr.fields.l) )
return 8;
@@ -1223,7 +1223,7 @@ static void svm_inject_event(const struc
switch ( _event.vector )
{
case TRAP_debug:
- if ( regs->_eflags & X86_EFLAGS_TF )
+ if ( regs->eflags & X86_EFLAGS_TF )
{
__restore_debug_registers(vmcb, curr);
vmcb_set_dr6(vmcb, vmcb_get_dr6(vmcb) | 0x4000);
@@ -1313,7 +1313,7 @@ static void svm_inject_event(const struc
*/
if ( !((vmcb->_efer & EFER_LMA) && vmcb->cs.attr.fields.l) )
{
- regs->rip = regs->_eip;
+ regs->rip = regs->eip;
vmcb->nextrip = (uint32_t)vmcb->nextrip;
}
@@ -1595,8 +1595,8 @@ static void svm_vmexit_do_cpuid(struct c
if ( (inst_len = __get_instruction_length(curr, INSTR_CPUID)) == 0 )
return;
- guest_cpuid(curr, regs->_eax, regs->_ecx, &res);
- HVMTRACE_5D(CPUID, regs->_eax, res.a, res.b, res.c, res.d);
+ guest_cpuid(curr, regs->eax, regs->ecx, &res);
+ HVMTRACE_5D(CPUID, regs->eax, res.a, res.b, res.c, res.d);
regs->rax = res.a;
regs->rbx = res.b;
@@ -1973,12 +1973,12 @@ static void svm_do_msr_access(struct cpu
{
uint64_t msr_content = 0;
- rc = hvm_msr_read_intercept(regs->_ecx, &msr_content);
+ rc = hvm_msr_read_intercept(regs->ecx, &msr_content);
if ( rc == X86EMUL_OKAY )
msr_split(regs, msr_content);
}
else
- rc = hvm_msr_write_intercept(regs->_ecx, msr_fold(regs), 1);
+ rc = hvm_msr_write_intercept(regs->ecx, msr_fold(regs), 1);
if ( rc == X86EMUL_OKAY )
__update_guest_eip(regs, inst_len);
@@ -1993,7 +1993,7 @@ static void svm_vmexit_do_hlt(struct vmc
return;
__update_guest_eip(regs, inst_len);
- hvm_hlt(regs->_eflags);
+ hvm_hlt(regs->eflags);
}
static void svm_vmexit_do_rdtsc(struct cpu_user_regs *regs)
@@ -2338,11 +2338,11 @@ void svm_vmexit_handler(struct cpu_user_
if ( hvm_long_mode_enabled(v) )
HVMTRACE_ND(VMEXIT64, vcpu_guestmode ? TRC_HVM_NESTEDFLAG : 0,
1/*cycles*/, 3, exit_reason,
- regs->_eip, regs->rip >> 32, 0, 0, 0);
+ regs->eip, regs->rip >> 32, 0, 0, 0);
else
HVMTRACE_ND(VMEXIT, vcpu_guestmode ? TRC_HVM_NESTEDFLAG : 0,
1/*cycles*/, 2, exit_reason,
- regs->_eip, 0, 0, 0, 0);
+ regs->eip, 0, 0, 0, 0);
if ( vcpu_guestmode ) {
enum nestedhvm_vmexits nsret;
@@ -2621,7 +2621,7 @@ void svm_vmexit_handler(struct cpu_user_
case VMEXIT_INVLPGA:
if ( (inst_len = __get_instruction_length(v, INSTR_INVLPGA)) == 0 )
break;
- svm_invlpga_intercept(v, regs->rax, regs->_ecx);
+ svm_invlpga_intercept(v, regs->rax, regs->ecx);
__update_guest_eip(regs, inst_len);
break;
@@ -2629,7 +2629,7 @@ void svm_vmexit_handler(struct cpu_user_
if ( (inst_len = __get_instruction_length(v, INSTR_VMCALL)) == 0 )
break;
BUG_ON(vcpu_guestmode);
- HVMTRACE_1D(VMMCALL, regs->_eax);
+ HVMTRACE_1D(VMMCALL, regs->eax);
if ( hvm_hypercall(regs) == HVM_HCALL_completed )
__update_guest_eip(regs, inst_len);
@@ -2687,7 +2687,7 @@ void svm_vmexit_handler(struct cpu_user_
if ( vmcb_get_cpl(vmcb) )
hvm_inject_hw_exception(TRAP_gp_fault, 0);
else if ( (inst_len = __get_instruction_length(v, INSTR_XSETBV)) &&
- hvm_handle_xsetbv(regs->_ecx, msr_fold(regs)) == 0 )
+ hvm_handle_xsetbv(regs->ecx, msr_fold(regs)) == 0 )
__update_guest_eip(regs, inst_len);
break;
[-- Attachment #3: Type: text/plain, Size: 127 bytes --]
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel
^ permalink raw reply [flat|nested] 16+ messages in thread* [PATCH 5/8] x86/Viridian: switch away from temporary 32-bit register names
2017-02-28 13:27 [PATCH 0/8] x86: switch away from temporary 32-bit register names Jan Beulich
` (5 preceding siblings ...)
2017-02-28 13:38 ` [PATCH 6/8] x86/SVM: " Jan Beulich
@ 2017-02-28 13:38 ` Jan Beulich
2017-02-28 13:47 ` Paul Durrant
2017-02-28 13:39 ` [PATCH 8/8] x86/VMX: " Jan Beulich
2017-02-28 13:47 ` [PATCH 0/8] x86: " Andrew Cooper
8 siblings, 1 reply; 16+ messages in thread
From: Jan Beulich @ 2017-02-28 13:38 UTC (permalink / raw)
To: xen-devel; +Cc: George Dunlap, Andrew Cooper, Paul Durrant
[-- Attachment #1: Type: text/plain, Size: 662 bytes --]
Signed-off-by: Jan Beulich <jbeulich@suse.com>
--- a/xen/arch/x86/hvm/viridian.c
+++ b/xen/arch/x86/hvm/viridian.c
@@ -666,9 +666,9 @@ int viridian_hypercall(struct cpu_user_r
output_params_gpa = regs->r8;
break;
case 4:
- input.raw = (regs->rdx << 32) | regs->_eax;
- input_params_gpa = (regs->rbx << 32) | regs->_ecx;
- output_params_gpa = (regs->rdi << 32) | regs->_esi;
+ input.raw = (regs->rdx << 32) | regs->eax;
+ input_params_gpa = (regs->rbx << 32) | regs->ecx;
+ output_params_gpa = (regs->rdi << 32) | regs->esi;
break;
default:
goto out;
[-- Attachment #2: x86-regnames-Viridian.patch --]
[-- Type: text/plain, Size: 722 bytes --]
x86/Viridian: switch away from temporary 32-bit register names
Signed-off-by: Jan Beulich <jbeulich@suse.com>
--- a/xen/arch/x86/hvm/viridian.c
+++ b/xen/arch/x86/hvm/viridian.c
@@ -666,9 +666,9 @@ int viridian_hypercall(struct cpu_user_r
output_params_gpa = regs->r8;
break;
case 4:
- input.raw = (regs->rdx << 32) | regs->_eax;
- input_params_gpa = (regs->rbx << 32) | regs->_ecx;
- output_params_gpa = (regs->rdi << 32) | regs->_esi;
+ input.raw = (regs->rdx << 32) | regs->eax;
+ input_params_gpa = (regs->rbx << 32) | regs->ecx;
+ output_params_gpa = (regs->rdi << 32) | regs->esi;
break;
default:
goto out;
[-- Attachment #3: Type: text/plain, Size: 127 bytes --]
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel
^ permalink raw reply [flat|nested] 16+ messages in thread* Re: [PATCH 5/8] x86/Viridian: switch away from temporary 32-bit register names
2017-02-28 13:38 ` [PATCH 5/8] x86/Viridian: " Jan Beulich
@ 2017-02-28 13:47 ` Paul Durrant
0 siblings, 0 replies; 16+ messages in thread
From: Paul Durrant @ 2017-02-28 13:47 UTC (permalink / raw)
To: 'Jan Beulich', xen-devel; +Cc: Andrew Cooper, George Dunlap
> -----Original Message-----
> From: Jan Beulich [mailto:JBeulich@suse.com]
> Sent: 28 February 2017 13:39
> To: xen-devel <xen-devel@lists.xenproject.org>
> Cc: Andrew Cooper <Andrew.Cooper3@citrix.com>; Paul Durrant
> <Paul.Durrant@citrix.com>; George Dunlap <George.Dunlap@citrix.com>
> Subject: [PATCH 5/8] x86/Viridian: switch away from temporary 32-bit
> register names
>
> Signed-off-by: Jan Beulich <jbeulich@suse.com>
>
Reviewed-by: Paul Durrant <paul.durrant@citrix.com>
> --- a/xen/arch/x86/hvm/viridian.c
> +++ b/xen/arch/x86/hvm/viridian.c
> @@ -666,9 +666,9 @@ int viridian_hypercall(struct cpu_user_r
> output_params_gpa = regs->r8;
> break;
> case 4:
> - input.raw = (regs->rdx << 32) | regs->_eax;
> - input_params_gpa = (regs->rbx << 32) | regs->_ecx;
> - output_params_gpa = (regs->rdi << 32) | regs->_esi;
> + input.raw = (regs->rdx << 32) | regs->eax;
> + input_params_gpa = (regs->rbx << 32) | regs->ecx;
> + output_params_gpa = (regs->rdi << 32) | regs->esi;
> break;
> default:
> goto out;
>
>
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel
^ permalink raw reply [flat|nested] 16+ messages in thread
* [PATCH 8/8] x86/VMX: switch away from temporary 32-bit register names
2017-02-28 13:27 [PATCH 0/8] x86: switch away from temporary 32-bit register names Jan Beulich
` (6 preceding siblings ...)
2017-02-28 13:38 ` [PATCH 5/8] x86/Viridian: " Jan Beulich
@ 2017-02-28 13:39 ` Jan Beulich
2017-03-02 6:21 ` Tian, Kevin
2017-02-28 13:47 ` [PATCH 0/8] x86: " Andrew Cooper
8 siblings, 1 reply; 16+ messages in thread
From: Jan Beulich @ 2017-02-28 13:39 UTC (permalink / raw)
To: xen-devel; +Cc: George Dunlap, Andrew Cooper, Kevin Tian, Jun Nakajima
[-- Attachment #1: Type: text/plain, Size: 8949 bytes --]
Signed-off-by: Jan Beulich <jbeulich@suse.com>
--- a/xen/arch/x86/hvm/vmx/realmode.c
+++ b/xen/arch/x86/hvm/vmx/realmode.c
@@ -72,7 +72,7 @@ static void realmode_deliver_exception(
/* We can't test hvmemul_ctxt->ctxt.sp_size: it may not be initialised. */
if ( hvmemul_ctxt->seg_reg[x86_seg_ss].attr.fields.db )
- pstk = regs->_esp -= 6;
+ pstk = regs->esp -= 6;
else
pstk = regs->sp -= 6;
@@ -82,7 +82,7 @@ static void realmode_deliver_exception(
csr->sel = cs_eip >> 16;
csr->base = (uint32_t)csr->sel << 4;
regs->ip = (uint16_t)cs_eip;
- regs->_eflags &= ~(X86_EFLAGS_TF | X86_EFLAGS_IF | X86_EFLAGS_RF);
+ regs->eflags &= ~(X86_EFLAGS_TF | X86_EFLAGS_IF | X86_EFLAGS_RF);
/* Exception delivery clears STI and MOV-SS blocking. */
if ( hvmemul_ctxt->intr_shadow &
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -607,7 +607,7 @@ int vmx_guest_x86_mode(struct vcpu *v)
if ( unlikely(!(v->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PE)) )
return 0;
- if ( unlikely(guest_cpu_user_regs()->_eflags & X86_EFLAGS_VM) )
+ if ( unlikely(guest_cpu_user_regs()->eflags & X86_EFLAGS_VM) )
return 1;
__vmread(GUEST_CS_AR_BYTES, &cs_ar_bytes);
if ( hvm_long_mode_enabled(v) &&
@@ -1753,7 +1753,7 @@ static void vmx_inject_event(const struc
switch ( _event.vector | -(_event.type == X86_EVENTTYPE_SW_INTERRUPT) )
{
case TRAP_debug:
- if ( guest_cpu_user_regs()->_eflags & X86_EFLAGS_TF )
+ if ( guest_cpu_user_regs()->eflags & X86_EFLAGS_TF )
{
__restore_debug_registers(curr);
write_debugreg(6, read_debugreg(6) | DR_STEP);
@@ -1853,7 +1853,7 @@ static void vmx_set_info_guest(struct vc
*/
__vmread(GUEST_INTERRUPTIBILITY_INFO, &intr_shadow);
if ( v->domain->debugger_attached &&
- (v->arch.user_regs._eflags & X86_EFLAGS_TF) &&
+ (v->arch.user_regs.eflags & X86_EFLAGS_TF) &&
(intr_shadow & VMX_INTR_SHADOW_STI) )
{
intr_shadow &= ~VMX_INTR_SHADOW_STI;
@@ -2092,8 +2092,8 @@ static int vmx_vcpu_emulate_vmfunc(const
struct vcpu *curr = current;
if ( !cpu_has_vmx_vmfunc && altp2m_active(curr->domain) &&
- regs->_eax == 0 &&
- p2m_switch_vcpu_altp2m_by_id(curr, regs->_ecx) )
+ regs->eax == 0 &&
+ p2m_switch_vcpu_altp2m_by_id(curr, regs->ecx) )
rc = X86EMUL_OKAY;
return rc;
@@ -2416,7 +2416,7 @@ void update_guest_eip(void)
unsigned long x;
regs->rip += get_instruction_length(); /* Safe: callers audited */
- regs->_eflags &= ~X86_EFLAGS_RF;
+ regs->eflags &= ~X86_EFLAGS_RF;
__vmread(GUEST_INTERRUPTIBILITY_INFO, &x);
if ( x & (VMX_INTR_SHADOW_STI | VMX_INTR_SHADOW_MOV_SS) )
@@ -2425,7 +2425,7 @@ void update_guest_eip(void)
__vmwrite(GUEST_INTERRUPTIBILITY_INFO, x);
}
- if ( regs->_eflags & X86_EFLAGS_TF )
+ if ( regs->eflags & X86_EFLAGS_TF )
hvm_inject_hw_exception(TRAP_debug, X86_EVENT_NO_EC);
}
@@ -2446,7 +2446,7 @@ static void vmx_fpu_dirty_intercept(void
static int vmx_do_cpuid(struct cpu_user_regs *regs)
{
struct vcpu *curr = current;
- uint32_t leaf = regs->_eax, subleaf = regs->_ecx;
+ uint32_t leaf = regs->eax, subleaf = regs->ecx;
struct cpuid_leaf res;
if ( hvm_check_cpuid_faulting(current) )
@@ -3204,8 +3204,8 @@ void vmx_enter_realmode(struct cpu_user_
/* Adjust RFLAGS to enter virtual 8086 mode with IOPL == 3. Since
* we have CR4.VME == 1 and our own TSS with an empty interrupt
* redirection bitmap, all software INTs will be handled by vm86 */
- v->arch.hvm_vmx.vm86_saved_eflags = regs->_eflags;
- regs->_eflags |= (X86_EFLAGS_VM | X86_EFLAGS_IOPL);
+ v->arch.hvm_vmx.vm86_saved_eflags = regs->eflags;
+ regs->eflags |= (X86_EFLAGS_VM | X86_EFLAGS_IOPL);
}
static int vmx_handle_eoi_write(void)
@@ -3347,10 +3347,10 @@ void vmx_vmexit_handler(struct cpu_user_
if ( hvm_long_mode_enabled(v) )
HVMTRACE_ND(VMEXIT64, 0, 1/*cycles*/, 3, exit_reason,
- regs->_eip, regs->rip >> 32, 0, 0, 0);
+ regs->eip, regs->rip >> 32, 0, 0, 0);
else
HVMTRACE_ND(VMEXIT, 0, 1/*cycles*/, 2, exit_reason,
- regs->_eip, 0, 0, 0, 0);
+ regs->eip, 0, 0, 0, 0);
perfc_incra(vmexits, exit_reason);
@@ -3435,8 +3435,8 @@ void vmx_vmexit_handler(struct cpu_user_
if ( v->arch.hvm_vmx.vmx_realmode )
{
/* Put RFLAGS back the way the guest wants it */
- regs->_eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_IOPL);
- regs->_eflags |= (v->arch.hvm_vmx.vm86_saved_eflags & X86_EFLAGS_IOPL);
+ regs->eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_IOPL);
+ regs->eflags |= (v->arch.hvm_vmx.vm86_saved_eflags & X86_EFLAGS_IOPL);
/* Unless this exit was for an interrupt, we've hit something
* vm86 can't handle. Try again, using the emulator. */
@@ -3681,7 +3681,7 @@ void vmx_vmexit_handler(struct cpu_user_
}
case EXIT_REASON_HLT:
update_guest_eip(); /* Safe: HLT */
- hvm_hlt(regs->_eflags);
+ hvm_hlt(regs->eflags);
break;
case EXIT_REASON_INVLPG:
update_guest_eip(); /* Safe: INVLPG */
@@ -3698,7 +3698,7 @@ void vmx_vmexit_handler(struct cpu_user_
break;
case EXIT_REASON_VMCALL:
- HVMTRACE_1D(VMMCALL, regs->_eax);
+ HVMTRACE_1D(VMMCALL, regs->eax);
if ( hvm_hypercall(regs) == HVM_HCALL_completed )
update_guest_eip(); /* Safe: VMCALL */
@@ -3722,7 +3722,7 @@ void vmx_vmexit_handler(struct cpu_user_
{
uint64_t msr_content = 0;
- switch ( hvm_msr_read_intercept(regs->_ecx, &msr_content) )
+ switch ( hvm_msr_read_intercept(regs->ecx, &msr_content) )
{
case X86EMUL_OKAY:
msr_split(regs, msr_content);
@@ -3731,7 +3731,7 @@ void vmx_vmexit_handler(struct cpu_user_
}
case EXIT_REASON_MSR_WRITE:
- switch ( hvm_msr_write_intercept(regs->_ecx, msr_fold(regs), 1) )
+ switch ( hvm_msr_write_intercept(regs->ecx, msr_fold(regs), 1) )
{
case X86EMUL_OKAY:
update_guest_eip(); /* Safe: WRMSR */
@@ -3894,7 +3894,7 @@ void vmx_vmexit_handler(struct cpu_user_
break;
case EXIT_REASON_XSETBV:
- if ( hvm_handle_xsetbv(regs->_ecx, msr_fold(regs)) == 0 )
+ if ( hvm_handle_xsetbv(regs->ecx, msr_fold(regs)) == 0 )
update_guest_eip(); /* Safe: XSETBV */
break;
@@ -3952,7 +3952,7 @@ out:
*/
mode = vmx_guest_x86_mode(v);
if ( mode == 8 ? !is_canonical_address(regs->rip)
- : regs->rip != regs->_eip )
+ : regs->rip != regs->eip )
{
gprintk(XENLOG_WARNING, "Bad rIP %lx for mode %u\n", regs->rip, mode);
@@ -3966,7 +3966,7 @@ out:
regs->rip = (long)(regs->rip << (64 - VADDR_BITS)) >>
(64 - VADDR_BITS);
else
- regs->rip = regs->_eip;
+ regs->rip = regs->eip;
}
else
domain_crash(v->domain);
--- a/xen/arch/x86/hvm/vmx/vvmx.c
+++ b/xen/arch/x86/hvm/vmx/vvmx.c
@@ -462,23 +462,23 @@ gp_fault:
static void vmsucceed(struct cpu_user_regs *regs)
{
- regs->_eflags &= ~X86_EFLAGS_ARITH_MASK;
+ regs->eflags &= ~X86_EFLAGS_ARITH_MASK;
}
static void vmfail_valid(struct cpu_user_regs *regs, enum vmx_insn_errno errno)
{
struct vcpu *v = current;
- unsigned int eflags = regs->_eflags;
+ unsigned int eflags = regs->eflags;
- regs->_eflags = (eflags & ~X86_EFLAGS_ARITH_MASK) | X86_EFLAGS_ZF;
+ regs->eflags = (eflags & ~X86_EFLAGS_ARITH_MASK) | X86_EFLAGS_ZF;
set_vvmcs(v, VM_INSTRUCTION_ERROR, errno);
}
static void vmfail_invalid(struct cpu_user_regs *regs)
{
- unsigned int eflags = regs->_eflags;
+ unsigned int eflags = regs->eflags;
- regs->_eflags = (eflags & ~X86_EFLAGS_ARITH_MASK) | X86_EFLAGS_CF;
+ regs->eflags = (eflags & ~X86_EFLAGS_ARITH_MASK) | X86_EFLAGS_CF;
}
static void vmfail(struct cpu_user_regs *regs, enum vmx_insn_errno errno)
@@ -2187,7 +2187,7 @@ int nvmx_n2_vmexit_handler(struct cpu_us
ctrl = __n2_exec_control(v);
if ( ctrl & CPU_BASED_ACTIVATE_MSR_BITMAP )
{
- status = vmx_check_msr_bitmap(nvmx->msrbitmap, regs->_ecx,
+ status = vmx_check_msr_bitmap(nvmx->msrbitmap, regs->ecx,
!!(exit_reason == EXIT_REASON_MSR_WRITE));
if ( status )
nvcpu->nv_vmexit_pending = 1;
[-- Attachment #2: x86-regnames-VMX.patch --]
[-- Type: text/plain, Size: 9006 bytes --]
x86/VMX: switch away from temporary 32-bit register names
Signed-off-by: Jan Beulich <jbeulich@suse.com>
--- a/xen/arch/x86/hvm/vmx/realmode.c
+++ b/xen/arch/x86/hvm/vmx/realmode.c
@@ -72,7 +72,7 @@ static void realmode_deliver_exception(
/* We can't test hvmemul_ctxt->ctxt.sp_size: it may not be initialised. */
if ( hvmemul_ctxt->seg_reg[x86_seg_ss].attr.fields.db )
- pstk = regs->_esp -= 6;
+ pstk = regs->esp -= 6;
else
pstk = regs->sp -= 6;
@@ -82,7 +82,7 @@ static void realmode_deliver_exception(
csr->sel = cs_eip >> 16;
csr->base = (uint32_t)csr->sel << 4;
regs->ip = (uint16_t)cs_eip;
- regs->_eflags &= ~(X86_EFLAGS_TF | X86_EFLAGS_IF | X86_EFLAGS_RF);
+ regs->eflags &= ~(X86_EFLAGS_TF | X86_EFLAGS_IF | X86_EFLAGS_RF);
/* Exception delivery clears STI and MOV-SS blocking. */
if ( hvmemul_ctxt->intr_shadow &
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -607,7 +607,7 @@ int vmx_guest_x86_mode(struct vcpu *v)
if ( unlikely(!(v->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PE)) )
return 0;
- if ( unlikely(guest_cpu_user_regs()->_eflags & X86_EFLAGS_VM) )
+ if ( unlikely(guest_cpu_user_regs()->eflags & X86_EFLAGS_VM) )
return 1;
__vmread(GUEST_CS_AR_BYTES, &cs_ar_bytes);
if ( hvm_long_mode_enabled(v) &&
@@ -1753,7 +1753,7 @@ static void vmx_inject_event(const struc
switch ( _event.vector | -(_event.type == X86_EVENTTYPE_SW_INTERRUPT) )
{
case TRAP_debug:
- if ( guest_cpu_user_regs()->_eflags & X86_EFLAGS_TF )
+ if ( guest_cpu_user_regs()->eflags & X86_EFLAGS_TF )
{
__restore_debug_registers(curr);
write_debugreg(6, read_debugreg(6) | DR_STEP);
@@ -1853,7 +1853,7 @@ static void vmx_set_info_guest(struct vc
*/
__vmread(GUEST_INTERRUPTIBILITY_INFO, &intr_shadow);
if ( v->domain->debugger_attached &&
- (v->arch.user_regs._eflags & X86_EFLAGS_TF) &&
+ (v->arch.user_regs.eflags & X86_EFLAGS_TF) &&
(intr_shadow & VMX_INTR_SHADOW_STI) )
{
intr_shadow &= ~VMX_INTR_SHADOW_STI;
@@ -2092,8 +2092,8 @@ static int vmx_vcpu_emulate_vmfunc(const
struct vcpu *curr = current;
if ( !cpu_has_vmx_vmfunc && altp2m_active(curr->domain) &&
- regs->_eax == 0 &&
- p2m_switch_vcpu_altp2m_by_id(curr, regs->_ecx) )
+ regs->eax == 0 &&
+ p2m_switch_vcpu_altp2m_by_id(curr, regs->ecx) )
rc = X86EMUL_OKAY;
return rc;
@@ -2416,7 +2416,7 @@ void update_guest_eip(void)
unsigned long x;
regs->rip += get_instruction_length(); /* Safe: callers audited */
- regs->_eflags &= ~X86_EFLAGS_RF;
+ regs->eflags &= ~X86_EFLAGS_RF;
__vmread(GUEST_INTERRUPTIBILITY_INFO, &x);
if ( x & (VMX_INTR_SHADOW_STI | VMX_INTR_SHADOW_MOV_SS) )
@@ -2425,7 +2425,7 @@ void update_guest_eip(void)
__vmwrite(GUEST_INTERRUPTIBILITY_INFO, x);
}
- if ( regs->_eflags & X86_EFLAGS_TF )
+ if ( regs->eflags & X86_EFLAGS_TF )
hvm_inject_hw_exception(TRAP_debug, X86_EVENT_NO_EC);
}
@@ -2446,7 +2446,7 @@ static void vmx_fpu_dirty_intercept(void
static int vmx_do_cpuid(struct cpu_user_regs *regs)
{
struct vcpu *curr = current;
- uint32_t leaf = regs->_eax, subleaf = regs->_ecx;
+ uint32_t leaf = regs->eax, subleaf = regs->ecx;
struct cpuid_leaf res;
if ( hvm_check_cpuid_faulting(current) )
@@ -3204,8 +3204,8 @@ void vmx_enter_realmode(struct cpu_user_
/* Adjust RFLAGS to enter virtual 8086 mode with IOPL == 3. Since
* we have CR4.VME == 1 and our own TSS with an empty interrupt
* redirection bitmap, all software INTs will be handled by vm86 */
- v->arch.hvm_vmx.vm86_saved_eflags = regs->_eflags;
- regs->_eflags |= (X86_EFLAGS_VM | X86_EFLAGS_IOPL);
+ v->arch.hvm_vmx.vm86_saved_eflags = regs->eflags;
+ regs->eflags |= (X86_EFLAGS_VM | X86_EFLAGS_IOPL);
}
static int vmx_handle_eoi_write(void)
@@ -3347,10 +3347,10 @@ void vmx_vmexit_handler(struct cpu_user_
if ( hvm_long_mode_enabled(v) )
HVMTRACE_ND(VMEXIT64, 0, 1/*cycles*/, 3, exit_reason,
- regs->_eip, regs->rip >> 32, 0, 0, 0);
+ regs->eip, regs->rip >> 32, 0, 0, 0);
else
HVMTRACE_ND(VMEXIT, 0, 1/*cycles*/, 2, exit_reason,
- regs->_eip, 0, 0, 0, 0);
+ regs->eip, 0, 0, 0, 0);
perfc_incra(vmexits, exit_reason);
@@ -3435,8 +3435,8 @@ void vmx_vmexit_handler(struct cpu_user_
if ( v->arch.hvm_vmx.vmx_realmode )
{
/* Put RFLAGS back the way the guest wants it */
- regs->_eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_IOPL);
- regs->_eflags |= (v->arch.hvm_vmx.vm86_saved_eflags & X86_EFLAGS_IOPL);
+ regs->eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_IOPL);
+ regs->eflags |= (v->arch.hvm_vmx.vm86_saved_eflags & X86_EFLAGS_IOPL);
/* Unless this exit was for an interrupt, we've hit something
* vm86 can't handle. Try again, using the emulator. */
@@ -3681,7 +3681,7 @@ void vmx_vmexit_handler(struct cpu_user_
}
case EXIT_REASON_HLT:
update_guest_eip(); /* Safe: HLT */
- hvm_hlt(regs->_eflags);
+ hvm_hlt(regs->eflags);
break;
case EXIT_REASON_INVLPG:
update_guest_eip(); /* Safe: INVLPG */
@@ -3698,7 +3698,7 @@ void vmx_vmexit_handler(struct cpu_user_
break;
case EXIT_REASON_VMCALL:
- HVMTRACE_1D(VMMCALL, regs->_eax);
+ HVMTRACE_1D(VMMCALL, regs->eax);
if ( hvm_hypercall(regs) == HVM_HCALL_completed )
update_guest_eip(); /* Safe: VMCALL */
@@ -3722,7 +3722,7 @@ void vmx_vmexit_handler(struct cpu_user_
{
uint64_t msr_content = 0;
- switch ( hvm_msr_read_intercept(regs->_ecx, &msr_content) )
+ switch ( hvm_msr_read_intercept(regs->ecx, &msr_content) )
{
case X86EMUL_OKAY:
msr_split(regs, msr_content);
@@ -3731,7 +3731,7 @@ void vmx_vmexit_handler(struct cpu_user_
}
case EXIT_REASON_MSR_WRITE:
- switch ( hvm_msr_write_intercept(regs->_ecx, msr_fold(regs), 1) )
+ switch ( hvm_msr_write_intercept(regs->ecx, msr_fold(regs), 1) )
{
case X86EMUL_OKAY:
update_guest_eip(); /* Safe: WRMSR */
@@ -3894,7 +3894,7 @@ void vmx_vmexit_handler(struct cpu_user_
break;
case EXIT_REASON_XSETBV:
- if ( hvm_handle_xsetbv(regs->_ecx, msr_fold(regs)) == 0 )
+ if ( hvm_handle_xsetbv(regs->ecx, msr_fold(regs)) == 0 )
update_guest_eip(); /* Safe: XSETBV */
break;
@@ -3952,7 +3952,7 @@ out:
*/
mode = vmx_guest_x86_mode(v);
if ( mode == 8 ? !is_canonical_address(regs->rip)
- : regs->rip != regs->_eip )
+ : regs->rip != regs->eip )
{
gprintk(XENLOG_WARNING, "Bad rIP %lx for mode %u\n", regs->rip, mode);
@@ -3966,7 +3966,7 @@ out:
regs->rip = (long)(regs->rip << (64 - VADDR_BITS)) >>
(64 - VADDR_BITS);
else
- regs->rip = regs->_eip;
+ regs->rip = regs->eip;
}
else
domain_crash(v->domain);
--- a/xen/arch/x86/hvm/vmx/vvmx.c
+++ b/xen/arch/x86/hvm/vmx/vvmx.c
@@ -462,23 +462,23 @@ gp_fault:
static void vmsucceed(struct cpu_user_regs *regs)
{
- regs->_eflags &= ~X86_EFLAGS_ARITH_MASK;
+ regs->eflags &= ~X86_EFLAGS_ARITH_MASK;
}
static void vmfail_valid(struct cpu_user_regs *regs, enum vmx_insn_errno errno)
{
struct vcpu *v = current;
- unsigned int eflags = regs->_eflags;
+ unsigned int eflags = regs->eflags;
- regs->_eflags = (eflags & ~X86_EFLAGS_ARITH_MASK) | X86_EFLAGS_ZF;
+ regs->eflags = (eflags & ~X86_EFLAGS_ARITH_MASK) | X86_EFLAGS_ZF;
set_vvmcs(v, VM_INSTRUCTION_ERROR, errno);
}
static void vmfail_invalid(struct cpu_user_regs *regs)
{
- unsigned int eflags = regs->_eflags;
+ unsigned int eflags = regs->eflags;
- regs->_eflags = (eflags & ~X86_EFLAGS_ARITH_MASK) | X86_EFLAGS_CF;
+ regs->eflags = (eflags & ~X86_EFLAGS_ARITH_MASK) | X86_EFLAGS_CF;
}
static void vmfail(struct cpu_user_regs *regs, enum vmx_insn_errno errno)
@@ -2187,7 +2187,7 @@ int nvmx_n2_vmexit_handler(struct cpu_us
ctrl = __n2_exec_control(v);
if ( ctrl & CPU_BASED_ACTIVATE_MSR_BITMAP )
{
- status = vmx_check_msr_bitmap(nvmx->msrbitmap, regs->_ecx,
+ status = vmx_check_msr_bitmap(nvmx->msrbitmap, regs->ecx,
!!(exit_reason == EXIT_REASON_MSR_WRITE));
if ( status )
nvcpu->nv_vmexit_pending = 1;
[-- Attachment #3: Type: text/plain, Size: 127 bytes --]
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel
^ permalink raw reply [flat|nested] 16+ messages in thread* Re: [PATCH 0/8] x86: switch away from temporary 32-bit register names
2017-02-28 13:27 [PATCH 0/8] x86: switch away from temporary 32-bit register names Jan Beulich
` (7 preceding siblings ...)
2017-02-28 13:39 ` [PATCH 8/8] x86/VMX: " Jan Beulich
@ 2017-02-28 13:47 ` Andrew Cooper
2017-02-28 14:19 ` Jan Beulich
8 siblings, 1 reply; 16+ messages in thread
From: Andrew Cooper @ 2017-02-28 13:47 UTC (permalink / raw)
To: Jan Beulich, xen-devel; +Cc: George Dunlap
On 28/02/17 13:27, Jan Beulich wrote:
> This is only part of the necessary changes. Some needed to be
> dropped due to code having changed recently, and the biggest
> missing part is the adjustment of the insn emulator, when I'd
> prefer to do this work only after the non-RFC parts of
> https://lists.xenproject.org/archives/html/xen-devel/2017-02/msg03474.html
> have gone in (in order to avoid having to ping-pong re-base
> that and this series).
>
> 1: re-introduce non-underscore prefixed 32-bit register names
> 2: switch away from temporary 32-bit register names
> 3: HVM: switch away from temporary 32-bit register names
> 4: HVMemul: switch away from temporary 32-bit register names
> 5: mm: switch away from temporary 32-bit register names
> 6: SVM: switch away from temporary 32-bit register names
> 7: Viridian: switch away from temporary 32-bit register names
> 8: VMX: switch away from temporary 32-bit register names
>
> Signed-off-by: Jan Beulich <jbeulich@suse.com>
>
Your Viridian patch is labelled 7 here, but 5 in the email. I guess
that is just an oversight?
All Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel
^ permalink raw reply [flat|nested] 16+ messages in thread* Re: [PATCH 0/8] x86: switch away from temporary 32-bit register names
2017-02-28 13:47 ` [PATCH 0/8] x86: " Andrew Cooper
@ 2017-02-28 14:19 ` Jan Beulich
0 siblings, 0 replies; 16+ messages in thread
From: Jan Beulich @ 2017-02-28 14:19 UTC (permalink / raw)
To: Andrew Cooper, xen-devel; +Cc: George Dunlap
>>> On 28.02.17 at 14:47, <andrew.cooper3@citrix.com> wrote:
> On 28/02/17 13:27, Jan Beulich wrote:
>> This is only part of the necessary changes. Some needed to be
>> dropped due to code having changed recently, and the biggest
>> missing part is the adjustment of the insn emulator, when I'd
>> prefer to do this work only after the non-RFC parts of
>> https://lists.xenproject.org/archives/html/xen-devel/2017-02/msg03474.html
>> have gone in (in order to avoid having to ping-pong re-base
>> that and this series).
>>
>> 1: re-introduce non-underscore prefixed 32-bit register names
>> 2: switch away from temporary 32-bit register names
>> 3: HVM: switch away from temporary 32-bit register names
>> 4: HVMemul: switch away from temporary 32-bit register names
>> 5: mm: switch away from temporary 32-bit register names
>> 6: SVM: switch away from temporary 32-bit register names
>> 7: Viridian: switch away from temporary 32-bit register names
>> 8: VMX: switch away from temporary 32-bit register names
>>
>> Signed-off-by: Jan Beulich <jbeulich@suse.com>
>>
>
> Your Viridian patch is labelled 7 here, but 5 in the email. I guess
> that is just an oversight?
Indeed - I don#t know how that has happened.
> All Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
Thanks, Jan
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel
^ permalink raw reply [flat|nested] 16+ messages in thread