From: Andrew Cooper <andrew.cooper3@citrix.com>
To: Xen-devel <xen-devel@lists.xen.org>
Cc: Kevin Tian <kevin.tian@intel.com>,
Jan Beulich <JBeulich@suse.com>,
George Dunlap <george.dunlap@eu.citrix.com>,
Andrew Cooper <andrew.cooper3@citrix.com>,
Tim Deegan <tim@xen.org>, Julien Grall <julien.grall@arm.com>,
Paul Durrant <paul.durrant@citrix.com>,
Jun Nakajima <jun.nakajima@intel.com>,
Boris Ostrovsky <boris.ostrovsky@oracle.com>,
Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
Subject: [PATCH for 4.9 2/6] x86/hvm: Correct long mode predicate
Date: Fri, 31 Mar 2017 20:50:49 +0100 [thread overview]
Message-ID: <1490989853-21879-3-git-send-email-andrew.cooper3@citrix.com> (raw)
In-Reply-To: <1490989853-21879-1-git-send-email-andrew.cooper3@citrix.com>
hvm_long_mode_enabled() tests for EFER.LMA, which is specifically different to
EFER.LME.
Rename it to match its behaviour, and have it strictly return a boolean value
(although all its callers already use it in implicitly-boolean contexts, so no
functional change).
Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
---
CC: Jan Beulich <JBeulich@suse.com>
CC: Paul Durrant <paul.durrant@citrix.com>
CC: Tim Deegan <tim@xen.org>
CC: George Dunlap <george.dunlap@eu.citrix.com>
CC: Jun Nakajima <jun.nakajima@intel.com>
CC: Kevin Tian <kevin.tian@intel.com>
CC: Boris Ostrovsky <boris.ostrovsky@oracle.com>
CC: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
CC: Julien Grall <julien.grall@arm.com>
---
xen/arch/x86/cpuid.c | 2 +-
xen/arch/x86/hvm/emulate.c | 2 +-
xen/arch/x86/hvm/hvm.c | 10 +++++-----
xen/arch/x86/hvm/svm/svm.c | 6 +++---
xen/arch/x86/hvm/vmx/vmx.c | 6 +++---
xen/arch/x86/hvm/vmx/vvmx.c | 8 ++++----
xen/arch/x86/mm/hap/hap.c | 8 ++++----
xen/arch/x86/mm/shadow/common.c | 4 ++--
xen/arch/x86/oprofile/backtrace.c | 2 +-
xen/include/asm-x86/hvm/hvm.h | 3 +--
10 files changed, 25 insertions(+), 26 deletions(-)
diff --git a/xen/arch/x86/cpuid.c b/xen/arch/x86/cpuid.c
index 1c6a6c6..d359e09 100644
--- a/xen/arch/x86/cpuid.c
+++ b/xen/arch/x86/cpuid.c
@@ -911,7 +911,7 @@ void guest_cpuid(const struct vcpu *v, uint32_t leaf,
case 0x80000001:
/* SYSCALL is hidden outside of long mode on Intel. */
if ( p->x86_vendor == X86_VENDOR_INTEL &&
- is_hvm_domain(d) && !hvm_long_mode_enabled(v) )
+ is_hvm_domain(d) && !hvm_long_mode_active(v) )
res->d &= ~cpufeat_mask(X86_FEATURE_SYSCALL);
common_leaf1_adjustments:
diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c
index 4073715..3d084ca 100644
--- a/xen/arch/x86/hvm/emulate.c
+++ b/xen/arch/x86/hvm/emulate.c
@@ -2051,7 +2051,7 @@ void hvm_emulate_init_per_insn(
unsigned int pfec = PFEC_page_present;
unsigned long addr;
- if ( hvm_long_mode_enabled(curr) &&
+ if ( hvm_long_mode_active(curr) &&
hvmemul_ctxt->seg_reg[x86_seg_cs].attr.fields.l )
hvmemul_ctxt->ctxt.addr_size = hvmemul_ctxt->ctxt.sp_size = 64;
else
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 0282986..9f83cd8 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -2227,7 +2227,7 @@ int hvm_set_cr0(unsigned long value, bool_t may_defer)
}
/* When CR0.PG is cleared, LMA is cleared immediately. */
- if ( hvm_long_mode_enabled(v) )
+ if ( hvm_long_mode_active(v) )
{
v->arch.hvm_vcpu.guest_efer &= ~EFER_LMA;
hvm_update_guest_efer(v);
@@ -2321,7 +2321,7 @@ int hvm_set_cr4(unsigned long value, bool_t may_defer)
if ( !(value & X86_CR4_PAE) )
{
- if ( hvm_long_mode_enabled(v) )
+ if ( hvm_long_mode_active(v) )
{
HVM_DBG_LOG(DBG_LEVEL_1, "Guest cleared CR4.PAE while "
"EFER.LMA is set");
@@ -2332,7 +2332,7 @@ int hvm_set_cr4(unsigned long value, bool_t may_defer)
old_cr = v->arch.hvm_vcpu.guest_cr[4];
if ( (value & X86_CR4_PCIDE) && !(old_cr & X86_CR4_PCIDE) &&
- (!hvm_long_mode_enabled(v) ||
+ (!hvm_long_mode_active(v) ||
(v->arch.hvm_vcpu.guest_cr[3] & 0xfff)) )
{
HVM_DBG_LOG(DBG_LEVEL_1, "Guest attempts to change CR4.PCIDE from "
@@ -3605,7 +3605,7 @@ void hvm_ud_intercept(struct cpu_user_regs *regs)
if ( hvm_virtual_to_linear_addr(x86_seg_cs, cs, regs->rip,
sizeof(sig), hvm_access_insn_fetch,
- (hvm_long_mode_enabled(cur) &&
+ (hvm_long_mode_active(cur) &&
cs->attr.fields.l) ? 64 :
cs->attr.fields.db ? 32 : 16, &addr) &&
(hvm_fetch_from_guest_linear(sig, addr, sizeof(sig),
@@ -3616,7 +3616,7 @@ void hvm_ud_intercept(struct cpu_user_regs *regs)
regs->eflags &= ~X86_EFLAGS_RF;
/* Zero the upper 32 bits of %rip if not in 64bit mode. */
- if ( !(hvm_long_mode_enabled(cur) && cs->attr.fields.l) )
+ if ( !(hvm_long_mode_active(cur) && cs->attr.fields.l) )
regs->rip = regs->eip;
add_taint(TAINT_HVM_FEP);
diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c
index b69789b..4d7e49f 100644
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -516,7 +516,7 @@ static int svm_guest_x86_mode(struct vcpu *v)
return 0;
if ( unlikely(guest_cpu_user_regs()->eflags & X86_EFLAGS_VM) )
return 1;
- if ( hvm_long_mode_enabled(v) && likely(vmcb->cs.attr.fields.l) )
+ if ( hvm_long_mode_active(v) && likely(vmcb->cs.attr.fields.l) )
return 8;
return (likely(vmcb->cs.attr.fields.db) ? 4 : 2);
}
@@ -2279,7 +2279,7 @@ void svm_vmexit_handler(struct cpu_user_regs *regs)
exit_reason = vmcb->exitcode;
- if ( hvm_long_mode_enabled(v) )
+ if ( hvm_long_mode_active(v) )
HVMTRACE_ND(VMEXIT64, vcpu_guestmode ? TRC_HVM_NESTEDFLAG : 0,
1/*cycles*/, 3, exit_reason,
regs->eip, regs->rip >> 32, 0, 0, 0);
@@ -2429,7 +2429,7 @@ void svm_vmexit_handler(struct cpu_user_regs *regs)
{
if ( trace_will_trace_event(TRC_SHADOW) )
break;
- if ( hvm_long_mode_enabled(v) )
+ if ( hvm_long_mode_active(v) )
HVMTRACE_LONG_2D(PF_XEN, regs->error_code, TRC_PAR_LONG(va));
else
HVMTRACE_2D(PF_XEN, regs->error_code, va);
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index d201956..b6526c9 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -611,7 +611,7 @@ int vmx_guest_x86_mode(struct vcpu *v)
if ( unlikely(guest_cpu_user_regs()->eflags & X86_EFLAGS_VM) )
return 1;
__vmread(GUEST_CS_AR_BYTES, &cs_ar_bytes);
- if ( hvm_long_mode_enabled(v) &&
+ if ( hvm_long_mode_active(v) &&
likely(cs_ar_bytes & X86_SEG_AR_CS_LM_ACTIVE) )
return 8;
return (likely(cs_ar_bytes & X86_SEG_AR_DEF_OP_SIZE) ? 4 : 2);
@@ -3392,7 +3392,7 @@ void vmx_vmexit_handler(struct cpu_user_regs *regs)
__vmread(VM_EXIT_REASON, &exit_reason);
- if ( hvm_long_mode_enabled(v) )
+ if ( hvm_long_mode_active(v) )
HVMTRACE_ND(VMEXIT64, 0, 1/*cycles*/, 3, exit_reason,
regs->eip, regs->rip >> 32, 0, 0, 0);
else
@@ -3632,7 +3632,7 @@ void vmx_vmexit_handler(struct cpu_user_regs *regs)
{
if ( trace_will_trace_event(TRC_SHADOW) )
break;
- if ( hvm_long_mode_enabled(v) )
+ if ( hvm_long_mode_active(v) )
HVMTRACE_LONG_2D(PF_XEN, regs->error_code,
TRC_PAR_LONG(exit_qualification) );
else
diff --git a/xen/arch/x86/hvm/vmx/vvmx.c b/xen/arch/x86/hvm/vmx/vvmx.c
index 09e4250..e9860f7 100644
--- a/xen/arch/x86/hvm/vmx/vvmx.c
+++ b/xen/arch/x86/hvm/vmx/vvmx.c
@@ -392,7 +392,7 @@ static int vmx_inst_check_privilege(struct cpu_user_regs *regs, int vmxop_check)
else if ( !nvmx_vcpu_in_vmx(v) )
goto invalid_op;
- if ( vmx_guest_x86_mode(v) < (hvm_long_mode_enabled(v) ? 8 : 2) )
+ if ( vmx_guest_x86_mode(v) < (hvm_long_mode_active(v) ? 8 : 2) )
goto invalid_op;
else if ( nestedhvm_vcpu_in_guestmode(v) )
goto vmexit;
@@ -1154,13 +1154,13 @@ static void virtual_vmentry(struct cpu_user_regs *regs)
/*
* EFER handling:
* hvm_set_efer won't work if CR0.PG = 1, so we change the value
- * directly to make hvm_long_mode_enabled(v) work in L2.
+ * directly to make hvm_long_mode_active(v) work in L2.
* An additional update_paging_modes is also needed if
* there is 32/64 switch. v->arch.hvm_vcpu.guest_efer doesn't
* need to be saved, since its value on vmexit is determined by
* L1 exit_controls
*/
- lm_l1 = !!hvm_long_mode_enabled(v);
+ lm_l1 = !!hvm_long_mode_active(v);
lm_l2 = !!(get_vvmcs(v, VM_ENTRY_CONTROLS) & VM_ENTRY_IA32E_MODE);
if ( lm_l2 )
@@ -1359,7 +1359,7 @@ static void virtual_vmexit(struct cpu_user_regs *regs)
nvcpu->nv_vmexit_pending = 0;
nvcpu->nv_vmswitch_in_progress = 1;
- lm_l2 = !!hvm_long_mode_enabled(v);
+ lm_l2 = !!hvm_long_mode_active(v);
lm_l1 = !!(get_vvmcs(v, VM_EXIT_CONTROLS) & VM_EXIT_IA32E_MODE);
if ( lm_l1 )
diff --git a/xen/arch/x86/mm/hap/hap.c b/xen/arch/x86/mm/hap/hap.c
index a57b385..283d4b7 100644
--- a/xen/arch/x86/mm/hap/hap.c
+++ b/xen/arch/x86/mm/hap/hap.c
@@ -690,10 +690,10 @@ static void hap_update_cr3(struct vcpu *v, int do_locking)
const struct paging_mode *
hap_paging_get_mode(struct vcpu *v)
{
- return !hvm_paging_enabled(v) ? &hap_paging_real_mode :
- hvm_long_mode_enabled(v) ? &hap_paging_long_mode :
- hvm_pae_enabled(v) ? &hap_paging_pae_mode :
- &hap_paging_protected_mode;
+ return !hvm_paging_enabled(v) ? &hap_paging_real_mode :
+ hvm_long_mode_active(v) ? &hap_paging_long_mode :
+ hvm_pae_enabled(v) ? &hap_paging_pae_mode :
+ &hap_paging_protected_mode;
}
static void hap_update_paging_modes(struct vcpu *v)
diff --git a/xen/arch/x86/mm/shadow/common.c b/xen/arch/x86/mm/shadow/common.c
index 03cb24d..14a07dd 100644
--- a/xen/arch/x86/mm/shadow/common.c
+++ b/xen/arch/x86/mm/shadow/common.c
@@ -331,7 +331,7 @@ const struct x86_emulate_ops *shadow_init_emulation(
creg = hvm_get_seg_reg(x86_seg_cs, sh_ctxt);
/* Work out the emulation mode. */
- if ( hvm_long_mode_enabled(v) && creg->attr.fields.l )
+ if ( hvm_long_mode_active(v) && creg->attr.fields.l )
{
sh_ctxt->ctxt.addr_size = sh_ctxt->ctxt.sp_size = 64;
}
@@ -2921,7 +2921,7 @@ static void sh_update_paging_modes(struct vcpu *v)
v->arch.guest_table = d->arch.paging.shadow.unpaged_pagetable;
v->arch.paging.mode = &SHADOW_INTERNAL_NAME(sh_paging_mode, 2);
}
- else if ( hvm_long_mode_enabled(v) )
+ else if ( hvm_long_mode_active(v) )
{
// long mode guest...
v->arch.paging.mode =
diff --git a/xen/arch/x86/oprofile/backtrace.c b/xen/arch/x86/oprofile/backtrace.c
index f0fbb42..316821f 100644
--- a/xen/arch/x86/oprofile/backtrace.c
+++ b/xen/arch/x86/oprofile/backtrace.c
@@ -47,7 +47,7 @@ dump_hypervisor_backtrace(struct vcpu *vcpu, const struct frame_head *head,
static inline int is_32bit_vcpu(struct vcpu *vcpu)
{
if (is_hvm_vcpu(vcpu))
- return !hvm_long_mode_enabled(vcpu);
+ return !hvm_long_mode_active(vcpu);
else
return is_pv_32bit_vcpu(vcpu);
}
diff --git a/xen/include/asm-x86/hvm/hvm.h b/xen/include/asm-x86/hvm/hvm.h
index c854183..49c8001 100644
--- a/xen/include/asm-x86/hvm/hvm.h
+++ b/xen/include/asm-x86/hvm/hvm.h
@@ -302,8 +302,7 @@ int hvm_girq_dest_2_vcpu_id(struct domain *d, uint8_t dest, uint8_t dest_mode);
#define hap_has_1gb (!!(hvm_funcs.hap_capabilities & HVM_HAP_SUPERPAGE_1GB))
#define hap_has_2mb (!!(hvm_funcs.hap_capabilities & HVM_HAP_SUPERPAGE_2MB))
-#define hvm_long_mode_enabled(v) \
- ((v)->arch.hvm_vcpu.guest_efer & EFER_LMA)
+#define hvm_long_mode_active(v) (!!((v)->arch.hvm_vcpu.guest_efer & EFER_LMA))
enum hvm_intblk
hvm_interrupt_blocked(struct vcpu *v, struct hvm_intack intack);
--
2.1.4
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel
next prev parent reply other threads:[~2017-03-31 19:50 UTC|newest]
Thread overview: 37+ messages / expand[flat|nested] mbox.gz Atom feed top
2017-03-31 19:50 [PATCH for 4.9 0/6] x86/emul: Fixes Andrew Cooper
2017-03-31 19:50 ` [PATCH for 4.9 1/6] x86/hvm: Correct some address space terminology Andrew Cooper
2017-04-03 8:24 ` Paul Durrant
2017-04-03 8:24 ` Jan Beulich
2017-04-03 10:19 ` Andrew Cooper
2017-04-03 10:29 ` Jan Beulich
2017-03-31 19:50 ` Andrew Cooper [this message]
2017-04-03 8:26 ` [PATCH for 4.9 2/6] x86/hvm: Correct long mode predicate Paul Durrant
2017-04-03 8:30 ` Jan Beulich
2017-04-03 8:50 ` George Dunlap
2017-04-05 7:08 ` Tian, Kevin
2017-03-31 19:50 ` [PATCH for 4.9 3/6] x86/hvm: Fix segmentation logic for system segments Andrew Cooper
2017-04-03 8:31 ` Paul Durrant
2017-04-03 9:13 ` Jan Beulich
2017-04-03 14:27 ` Andrew Cooper
2017-04-03 15:07 ` Jan Beulich
2017-04-03 15:42 ` Andrew Cooper
2017-04-03 16:08 ` Jan Beulich
2017-04-03 17:37 ` Andrew Cooper
2017-04-04 10:18 ` Andrew Cooper
2017-04-04 10:32 ` Jan Beulich
2017-03-31 19:50 ` [PATCH for 4.9 4/6] x86/svm: Introduce svm_emul_swint_injection() Andrew Cooper
2017-04-03 9:30 ` Jan Beulich
2017-04-03 14:04 ` Boris Ostrovsky
2017-03-31 19:50 ` [PATCH for 4.9 5/6] x86/emul: Drop swint_emulate infrastructure Andrew Cooper
2017-04-03 8:36 ` Paul Durrant
2017-04-03 9:38 ` Jan Beulich
2017-03-31 19:50 ` [PATCH for 4.9 6/6] x86/emul: Require callers to provide LMA in the emulation context Andrew Cooper
2017-04-03 8:40 ` Paul Durrant
2017-04-03 10:10 ` Jan Beulich
2017-04-05 16:24 ` Andrew Cooper
2017-04-06 6:58 ` Jan Beulich
2017-04-06 9:47 ` Andrew Cooper
2017-04-06 14:14 ` Jan Beulich
2017-04-06 16:32 ` Andrew Cooper
2017-04-07 8:35 ` Jan Beulich
2017-04-05 16:07 ` Jan Beulich
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1490989853-21879-3-git-send-email-andrew.cooper3@citrix.com \
--to=andrew.cooper3@citrix.com \
--cc=JBeulich@suse.com \
--cc=boris.ostrovsky@oracle.com \
--cc=george.dunlap@eu.citrix.com \
--cc=julien.grall@arm.com \
--cc=jun.nakajima@intel.com \
--cc=kevin.tian@intel.com \
--cc=paul.durrant@citrix.com \
--cc=suravee.suthikulpanit@amd.com \
--cc=tim@xen.org \
--cc=xen-devel@lists.xen.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).