From: Paul Durrant <Paul.Durrant@citrix.com>
To: 'Jan Beulich' <JBeulich@suse.com>,
xen-devel <xen-devel@lists.xenproject.org>
Cc: Andrew Cooper <Andrew.Cooper3@citrix.com>
Subject: Re: [PATCH 10/16] x86/HVM: patch indirect calls through hvm_funcs to direct ones
Date: Fri, 13 Jul 2018 10:06:18 +0000 [thread overview]
Message-ID: <e1ed1d7f5cba4af0bfe36c24066bc1fa@AMSPEX02CL02.citrite.net> (raw)
In-Reply-To: <5B46094902000078001D32B0@prv1-mh.provo.novell.com>
> -----Original Message-----
> From: Xen-devel [mailto:xen-devel-bounces@lists.xenproject.org] On Behalf
> Of Jan Beulich
> Sent: 11 July 2018 14:43
> To: xen-devel <xen-devel@lists.xenproject.org>
> Cc: Andrew Cooper <Andrew.Cooper3@citrix.com>
> Subject: [Xen-devel] [PATCH 10/16] x86/HVM: patch indirect calls through
> hvm_funcs to direct ones
>
> This is intentionally not touching hooks used rarely (or not at all)
> during the lifetime of a VM, like {domain,vcpu}_initialise or cpu_up,
> as well as nested, VM event, and altp2m ones (they can all be done
> later, if so desired). Virtual Interrupt delivery ones will be dealt
> with in a subsequent patch.
>
> Signed-off-by: Jan Beulich <jbeulich@suse.com>
> ---
> Needless to say that I'm pretty unhappy about the workaround I had to
> add to hvm_set_rdtsc_exiting(). Improvement suggestions welcome.
Presumably the workaround is needed because of the ?: in ALT_CALL_ARG() and will occur for any bool argument to an alternatives patched call? If so, and ad hoc workaround seems undesirable. Is it possible to suppress it by dropping the gcc-ism from the ternary operator?
Paul
>
> --- a/xen/arch/x86/hvm/emulate.c
> +++ b/xen/arch/x86/hvm/emulate.c
> @@ -2017,7 +2017,7 @@ static int hvmemul_write_msr(
> static int hvmemul_wbinvd(
> struct x86_emulate_ctxt *ctxt)
> {
> - hvm_funcs.wbinvd_intercept();
> + alternative_vcall0(hvm_funcs.wbinvd_intercept);
> return X86EMUL_OKAY;
> }
>
> @@ -2035,7 +2035,7 @@ static int hvmemul_get_fpu(
> struct vcpu *curr = current;
>
> if ( !curr->fpu_dirtied )
> - hvm_funcs.fpu_dirty_intercept();
> + alternative_vcall0(hvm_funcs.fpu_dirty_intercept);
> else if ( type == X86EMUL_FPU_fpu )
> {
> const typeof(curr->arch.xsave_area->fpu_sse) *fpu_ctxt =
> @@ -2152,7 +2152,7 @@ static void hvmemul_put_fpu(
> {
> curr->fpu_dirtied = false;
> stts();
> - hvm_funcs.fpu_leave(curr);
> + alternative_vcall1(hvm_funcs.fpu_leave, curr);
> }
> }
> }
> @@ -2314,7 +2314,8 @@ static int _hvm_emulate_one(struct hvm_e
> if ( hvmemul_ctxt->intr_shadow != new_intr_shadow )
> {
> hvmemul_ctxt->intr_shadow = new_intr_shadow;
> - hvm_funcs.set_interrupt_shadow(curr, new_intr_shadow);
> + alternative_vcall2(hvm_funcs.set_interrupt_shadow,
> + curr, new_intr_shadow);
> }
>
> if ( hvmemul_ctxt->ctxt.retire.hlt &&
> @@ -2451,7 +2452,8 @@ void hvm_emulate_init_once(
>
> memset(hvmemul_ctxt, 0, sizeof(*hvmemul_ctxt));
>
> - hvmemul_ctxt->intr_shadow = hvm_funcs.get_interrupt_shadow(curr);
> + hvmemul_ctxt->intr_shadow =
> + alternative_call1(hvm_funcs.get_interrupt_shadow, curr);
> hvmemul_get_seg_reg(x86_seg_cs, hvmemul_ctxt);
> hvmemul_get_seg_reg(x86_seg_ss, hvmemul_ctxt);
>
> --- a/xen/arch/x86/hvm/hvm.c
> +++ b/xen/arch/x86/hvm/hvm.c
> @@ -271,13 +271,24 @@ void hvm_set_rdtsc_exiting(struct domain
> {
> struct vcpu *v;
>
> +#if __GNUC__ >= 7
> +/*
> + * gcc from 7.x onwards warns about ternary operators with their middle
> operand
> + * omitted and the controlling expression itself being of _Bool type.
> + */
> +# pragma GCC diagnostic push
> +# pragma GCC diagnostic ignored "-Wparentheses"
> +#endif
> for_each_vcpu ( d, v )
> - hvm_funcs.set_rdtsc_exiting(v, enable);
> + alternative_vcall2(hvm_funcs.set_rdtsc_exiting, v, enable);
> +#if __GNUC__ >= 7
> +# pragma GCC diagnostic pop
> +#endif
> }
>
> void hvm_get_guest_pat(struct vcpu *v, u64 *guest_pat)
> {
> - if ( !hvm_funcs.get_guest_pat(v, guest_pat) )
> + if ( !alternative_call2(hvm_funcs.get_guest_pat, v, guest_pat) )
> *guest_pat = v->arch.hvm_vcpu.pat_cr;
> }
>
> @@ -302,7 +313,7 @@ int hvm_set_guest_pat(struct vcpu *v, u6
> return 0;
> }
>
> - if ( !hvm_funcs.set_guest_pat(v, guest_pat) )
> + if ( !alternative_call2(hvm_funcs.set_guest_pat, v, guest_pat) )
> v->arch.hvm_vcpu.pat_cr = guest_pat;
>
> return 1;
> @@ -342,7 +353,7 @@ bool hvm_set_guest_bndcfgs(struct vcpu *
> /* nothing, best effort only */;
> }
>
> - return hvm_funcs.set_guest_bndcfgs(v, val);
> + return alternative_call2(hvm_funcs.set_guest_bndcfgs, v, val);
> }
>
> /*
> @@ -502,7 +513,8 @@ void hvm_migrate_pirqs(struct vcpu *v)
> static bool hvm_get_pending_event(struct vcpu *v, struct x86_event *info)
> {
> info->cr2 = v->arch.hvm_vcpu.guest_cr[2];
> - return hvm_funcs.get_pending_event(v, info);
> +
> + return alternative_call2(hvm_funcs.get_pending_event, v, info);
> }
>
> void hvm_do_resume(struct vcpu *v)
> @@ -1684,7 +1696,7 @@ void hvm_inject_event(const struct x86_e
> }
> }
>
> - hvm_funcs.inject_event(event);
> + alternative_vcall1(hvm_funcs.inject_event, event);
> }
>
> int hvm_hap_nested_page_fault(paddr_t gpa, unsigned long gla,
> @@ -2271,7 +2283,7 @@ int hvm_set_cr0(unsigned long value, boo
> (!rangeset_is_empty(d->iomem_caps) ||
> !rangeset_is_empty(d->arch.ioport_caps) ||
> has_arch_pdevs(d)) )
> - hvm_funcs.handle_cd(v, value);
> + alternative_vcall2(hvm_funcs.handle_cd, v, value);
>
> hvm_update_cr(v, 0, value);
>
> @@ -3515,7 +3527,8 @@ int hvm_msr_read_intercept(unsigned int
> goto gp_fault;
> /* If ret == 0 then this is not an MCE MSR, see other MSRs. */
> ret = ((ret == 0)
> - ? hvm_funcs.msr_read_intercept(msr, msr_content)
> + ? alternative_call2(hvm_funcs.msr_read_intercept,
> + msr, msr_content)
> : X86EMUL_OKAY);
> break;
> }
> @@ -3672,7 +3685,8 @@ int hvm_msr_write_intercept(unsigned int
> goto gp_fault;
> /* If ret == 0 then this is not an MCE MSR, see other MSRs. */
> ret = ((ret == 0)
> - ? hvm_funcs.msr_write_intercept(msr, msr_content)
> + ? alternative_call2(hvm_funcs.msr_write_intercept,
> + msr, msr_content)
> : X86EMUL_OKAY);
> break;
> }
> @@ -3864,7 +3878,7 @@ void hvm_hypercall_page_initialise(struc
> void *hypercall_page)
> {
> hvm_latch_shinfo_size(d);
> - hvm_funcs.init_hypercall_page(d, hypercall_page);
> + alternative_vcall2(hvm_funcs.init_hypercall_page, d, hypercall_page);
> }
>
> void hvm_vcpu_reset_state(struct vcpu *v, uint16_t cs, uint16_t ip)
> @@ -4988,7 +5002,7 @@ void hvm_domain_soft_reset(struct domain
> void hvm_get_segment_register(struct vcpu *v, enum x86_segment seg,
> struct segment_register *reg)
> {
> - hvm_funcs.get_segment_register(v, seg, reg);
> + alternative_vcall3(hvm_funcs.get_segment_register, v, seg, reg);
>
> switch ( seg )
> {
> @@ -5134,7 +5148,7 @@ void hvm_set_segment_register(struct vcp
> return;
> }
>
> - hvm_funcs.set_segment_register(v, seg, reg);
> + alternative_vcall3(hvm_funcs.set_segment_register, v, seg, reg);
> }
>
> /*
> --- a/xen/arch/x86/mm.c
> +++ b/xen/arch/x86/mm.c
> @@ -5785,7 +5785,7 @@ void paging_invlpg(struct vcpu *v, unsig
> if ( is_pv_vcpu(v) )
> flush_tlb_one_local(va);
> else
> - hvm_funcs.invlpg(v, va);
> + alternative_vcall2(hvm_funcs.invlpg, v, va);
> }
>
> /* Build a 32bit PSE page table using 4MB pages. */
> --- a/xen/include/asm-x86/hvm/hvm.h
> +++ b/xen/include/asm-x86/hvm/hvm.h
> @@ -317,42 +317,42 @@ static inline int
> hvm_guest_x86_mode(struct vcpu *v)
> {
> ASSERT(v == current);
> - return hvm_funcs.guest_x86_mode(v);
> + return alternative_call1(hvm_funcs.guest_x86_mode, v);
> }
>
> static inline void
> hvm_update_host_cr3(struct vcpu *v)
> {
> if ( hvm_funcs.update_host_cr3 )
> - hvm_funcs.update_host_cr3(v);
> + alternative_vcall1(hvm_funcs.update_host_cr3, v);
> }
>
> static inline void hvm_update_guest_cr(struct vcpu *v, unsigned int cr)
> {
> - hvm_funcs.update_guest_cr(v, cr, 0);
> + alternative_vcall3(hvm_funcs.update_guest_cr, v, cr, 0);
> }
>
> static inline void hvm_update_guest_cr3(struct vcpu *v, bool noflush)
> {
> unsigned int flags = noflush ? HVM_UPDATE_GUEST_CR3_NOFLUSH : 0;
>
> - hvm_funcs.update_guest_cr(v, 3, flags);
> + alternative_vcall3(hvm_funcs.update_guest_cr, v, 3, flags);
> }
>
> static inline void hvm_update_guest_efer(struct vcpu *v)
> {
> - hvm_funcs.update_guest_efer(v);
> + alternative_vcall1(hvm_funcs.update_guest_efer, v);
> }
>
> static inline void hvm_cpuid_policy_changed(struct vcpu *v)
> {
> - hvm_funcs.cpuid_policy_changed(v);
> + alternative_vcall1(hvm_funcs.cpuid_policy_changed, v);
> }
>
> static inline void hvm_set_tsc_offset(struct vcpu *v, uint64_t offset,
> uint64_t at_tsc)
> {
> - hvm_funcs.set_tsc_offset(v, offset, at_tsc);
> + alternative_vcall3(hvm_funcs.set_tsc_offset, v, offset, at_tsc);
> }
>
> /*
> @@ -372,7 +372,7 @@ void hvm_hypercall_page_initialise(struc
> static inline unsigned int
> hvm_get_cpl(struct vcpu *v)
> {
> - return hvm_funcs.get_cpl(v);
> + return alternative_call1(hvm_funcs.get_cpl, v);
> }
>
> void hvm_get_segment_register(struct vcpu *v, enum x86_segment seg,
> @@ -382,13 +382,13 @@ void hvm_set_segment_register(struct vcp
>
> static inline unsigned long hvm_get_shadow_gs_base(struct vcpu *v)
> {
> - return hvm_funcs.get_shadow_gs_base(v);
> + return alternative_call1(hvm_funcs.get_shadow_gs_base, v);
> }
>
> static inline bool hvm_get_guest_bndcfgs(struct vcpu *v, u64 *val)
> {
> return hvm_funcs.get_guest_bndcfgs &&
> - hvm_funcs.get_guest_bndcfgs(v, val);
> + alternative_call2(hvm_funcs.get_guest_bndcfgs, v, val);
> }
>
> bool hvm_set_guest_bndcfgs(struct vcpu *v, u64 val);
> @@ -454,7 +454,7 @@ static inline void hvm_inject_page_fault
>
> static inline int hvm_event_pending(struct vcpu *v)
> {
> - return hvm_funcs.event_pending(v);
> + return alternative_call1(hvm_funcs.event_pending, v);
> }
>
> /* These bits in CR4 are owned by the host. */
> @@ -485,7 +485,8 @@ static inline void hvm_cpu_down(void)
>
> static inline unsigned int hvm_get_insn_bytes(struct vcpu *v, uint8_t *buf)
> {
> - return (hvm_funcs.get_insn_bytes ? hvm_funcs.get_insn_bytes(v, buf) :
> 0);
> + return (hvm_funcs.get_insn_bytes
> + ? alternative_call2(hvm_funcs.get_insn_bytes, v, buf) : 0);
> }
>
> enum hvm_task_switch_reason { TSW_jmp, TSW_iret, TSW_call_or_int };
> @@ -517,7 +518,7 @@ void hvm_mapped_guest_frames_mark_dirty(
> static inline void hvm_set_info_guest(struct vcpu *v)
> {
> if ( hvm_funcs.set_info_guest )
> - return hvm_funcs.set_info_guest(v);
> + alternative_vcall1(hvm_funcs.set_info_guest, v);
> }
>
> int hvm_debug_op(struct vcpu *v, int32_t op);
>
>
>
>
> _______________________________________________
> Xen-devel mailing list
> Xen-devel@lists.xenproject.org
> https://lists.xenproject.org/mailman/listinfo/xen-devel
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel
next prev parent reply other threads:[~2018-07-13 12:22 UTC|newest]
Thread overview: 68+ messages / expand[flat|nested] mbox.gz Atom feed top
2018-07-11 13:15 [PATCH 00/16] x86: indirect call overhead reduction Jan Beulich
2018-07-11 13:23 ` [PATCH 01/16] VMX: reduce number of posted-interrupt hooks Jan Beulich
2018-07-13 12:17 ` Andrew Cooper
2018-07-19 1:51 ` Tian, Kevin
2018-07-19 6:13 ` Jan Beulich
2018-07-11 13:23 ` [PATCH 02/16] VMX: don't unconditionally set the tsc_scaling.setup hook Jan Beulich
2018-07-13 12:17 ` Andrew Cooper
2018-07-19 1:52 ` Tian, Kevin
2018-07-11 13:24 ` [PATCH 03/16] x86/HVM: switch virtual_intr_delivery_enabled() hook to simple boolean Jan Beulich
2018-07-13 12:18 ` Andrew Cooper
2018-07-19 1:56 ` Tian, Kevin
2018-07-19 6:15 ` Jan Beulich
2018-07-19 6:18 ` Tian, Kevin
2018-07-11 13:25 ` [PATCH 04/16] x86/HVM: drop vmfunc_intercept Jan Beulich
2018-07-13 12:18 ` Andrew Cooper
2018-07-19 1:56 ` Tian, Kevin
2018-07-11 13:26 ` [PATCH 05/16] x86/HVM: add wrapper for hvm_funcs.set_tsc_offset() Jan Beulich
2018-07-13 12:19 ` Andrew Cooper
2018-07-13 13:20 ` Jan Beulich
2018-07-11 13:27 ` [PATCH 06/16] x86: allow producing .i or .s for multiply compiled files Jan Beulich
2018-07-13 12:20 ` Andrew Cooper
2018-07-11 13:29 ` [PATCH 07/16] x86/shadow: fetch CPL just once in sh_page_fault() Jan Beulich
2018-07-11 13:41 ` Andrew Cooper
2018-07-11 13:46 ` Tim Deegan
2018-07-11 13:50 ` Jan Beulich
2018-07-11 13:39 ` [PATCH 08/16] x86/alternatives: allow using assembler macros in favor of C ones Jan Beulich
2018-07-11 13:40 ` [PATCH 09/16] x86: infrastructure to allow converting certain indirect calls to direct ones Jan Beulich
2018-07-11 13:42 ` [PATCH 10/16] x86/HVM: patch indirect calls through hvm_funcs " Jan Beulich
2018-07-11 13:47 ` Jan Beulich
2018-07-13 10:06 ` Paul Durrant [this message]
2018-07-13 13:18 ` Jan Beulich
2018-07-11 13:43 ` [PATCH 11/16] x86/HVM: patch vINTR " Jan Beulich
2018-07-19 2:00 ` Tian, Kevin
2018-07-11 13:44 ` [PATCH 12/16] x86: patch ctxt_switch_masking() indirect call to direct one Jan Beulich
2018-07-11 13:44 ` [PATCH 13/16] x86/genapic: drop .target_cpus() hook Jan Beulich
2018-07-11 13:45 ` [PATCH 14/16] x86/genapic: remove indirection from genapic hook accesses Jan Beulich
2018-07-11 13:46 ` [PATCH 15/16] x86/genapic: patch indirect calls to direct ones Jan Beulich
2018-07-11 13:46 ` [PATCH 16/16] x86/cpuidle: patch some " Jan Beulich
2018-07-13 8:10 ` [PATCH 00/16] x86: indirect call overhead reduction Jan Beulich
2018-07-13 13:00 ` Julien Grall
2018-07-13 13:27 ` Jan Beulich
2018-07-13 13:39 ` Julien Grall
2018-07-13 14:27 ` Jan Beulich
2018-07-13 17:15 ` Julien Grall
2018-07-16 6:15 ` Jan Beulich
2018-08-01 10:01 ` Jan Beulich
2018-08-29 13:55 ` [PATCH v2 00/12] " Jan Beulich
2018-08-29 13:59 ` [PATCH v2 01/12] VMX: reduce number of posted-interrupt hooks Jan Beulich
2018-08-29 14:56 ` Andrew Cooper
2018-08-30 1:41 ` Tian, Kevin
2018-08-29 14:00 ` [PATCH v2 02/12] x86/alternatives: allow using assembler macros in favor of C ones Jan Beulich
2018-08-29 14:52 ` Andrew Cooper
2018-08-29 14:02 ` [PATCH v2 03/12] x86: infrastructure to allow converting certain indirect calls to direct ones Jan Beulich
2018-08-29 14:37 ` Julien Grall
2018-08-29 14:50 ` Jan Beulich
2018-08-29 16:01 ` Andrew Cooper
2018-08-30 7:19 ` Jan Beulich
2018-08-29 14:04 ` [PATCH v2 04/12] x86/HVM: patch indirect calls through hvm_funcs " Jan Beulich
2018-08-29 14:04 ` [PATCH v2 05/12] x86/HVM: patch vINTR " Jan Beulich
2018-08-29 14:05 ` [PATCH v2 06/12] x86: patch ctxt_switch_masking() indirect call to direct one Jan Beulich
2018-08-29 14:06 ` [PATCH v2 07/12] x86/genapic: drop .target_cpus() hook Jan Beulich
2018-08-29 15:45 ` Andrew Cooper
2018-08-29 14:06 ` [PATCH v2 08/12] x86/genapic: remove indirection from genapic hook accesses Jan Beulich
2018-08-29 14:07 ` [PATCH v2 09/12] x86/genapic: patch indirect calls to direct ones Jan Beulich
2018-08-29 14:07 ` [PATCH v2 10/12] x86/cpuidle: patch some " Jan Beulich
2018-08-29 14:08 ` [PATCH v2 11/12] cpufreq: convert to a single post-init driver (hooks) instance Jan Beulich
2018-08-29 14:09 ` [PATCH v2 12/12] cpufreq: patch target() indirect call to direct one Jan Beulich
2018-08-29 15:12 ` [PATCH v2 00/12] x86: indirect call overhead reduction Jan Beulich
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=e1ed1d7f5cba4af0bfe36c24066bc1fa@AMSPEX02CL02.citrite.net \
--to=paul.durrant@citrix.com \
--cc=Andrew.Cooper3@citrix.com \
--cc=JBeulich@suse.com \
--cc=xen-devel@lists.xenproject.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).