From: Paul Durrant <Paul.Durrant@citrix.com>
To: Jan Beulich <JBeulich@suse.com>,
xen-devel <xen-devel@lists.xenproject.org>
Cc: Andrew Cooper <Andrew.Cooper3@citrix.com>
Subject: Re: [PATCH v4 2/5] x86emul: generalize exception handling for rep_* hooks
Date: Tue, 13 Dec 2016 15:15:42 +0000 [thread overview]
Message-ID: <e54d412f6cb248fabe8a8881723e41fb@AMSPEX02CL03.citrite.net> (raw)
In-Reply-To: <584FE9390200007800128882@prv-mh.provo.novell.com>
> -----Original Message-----
> From: Jan Beulich [mailto:JBeulich@suse.com]
> Sent: 13 December 2016 11:28
> To: xen-devel <xen-devel@lists.xenproject.org>
> Cc: Andrew Cooper <Andrew.Cooper3@citrix.com>; Paul Durrant
> <Paul.Durrant@citrix.com>
> Subject: [PATCH v4 2/5] x86emul: generalize exception handling for rep_*
> hooks
>
> If any of those hooks returns X86EMUL_EXCEPTION, some register state
> should still get updated if some iterations have been performed (but
> the rIP update will get suppressed if not all of them did get handled).
> This updating is done by register_address_increment() and
> __put_rep_prefix() (which hence must no longer be bypassed). As a
> result put_rep_prefix() can then skip most of the writeback, but needs
> to ensure proper completion of the executed instruction.
>
> While on the HVM side the VA -> LA -> PA translation process ensures
> that an exception would be raised on the first iteration only, doing so
> would unduly complicate the PV side code about to be added.
>
> Signed-off-by: Jan Beulich <jbeulich@suse.com>
> Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
Reviewed-by: Paul Durrant <paul.durrant@citrix.com>
> ---
> v4: Move changes to xen/arch/x86/hvm/emulate.c here (from a later
> patch). Re-base.
> v3: Broken out from a later patch.
>
> --- a/xen/arch/x86/hvm/emulate.c
> +++ b/xen/arch/x86/hvm/emulate.c
> @@ -459,6 +459,7 @@ static int hvmemul_linear_to_phys(
> {
> if ( pfec & (PFEC_page_paged | PFEC_page_shared) )
> return X86EMUL_RETRY;
> + *reps = 0;
> x86_emul_pagefault(pfec, addr, &hvmemul_ctxt->ctxt);
> return X86EMUL_EXCEPTION;
> }
> @@ -478,6 +479,7 @@ static int hvmemul_linear_to_phys(
> if ( pfec & (PFEC_page_paged | PFEC_page_shared) )
> return X86EMUL_RETRY;
> done /= bytes_per_rep;
> + *reps = done;
> if ( done == 0 )
> {
> ASSERT(!reverse);
> @@ -486,7 +488,6 @@ static int hvmemul_linear_to_phys(
> x86_emul_pagefault(pfec, addr & PAGE_MASK, &hvmemul_ctxt-
> >ctxt);
> return X86EMUL_EXCEPTION;
> }
> - *reps = done;
> break;
> }
>
> @@ -572,6 +573,7 @@ static int hvmemul_virtual_to_linear(
> * neither know the exact error code to be used, nor can we easily
> * determine the kind of exception (#GP or #TS) in that case.
> */
> + *reps = 0;
> if ( is_x86_user_segment(seg) )
> x86_emul_hw_exception((seg == x86_seg_ss)
> ? TRAP_stack_error
> --- a/xen/arch/x86/x86_emulate/x86_emulate.c
> +++ b/xen/arch/x86/x86_emulate/x86_emulate.c
> @@ -971,7 +971,11 @@ static void __put_rep_prefix(
>
> #define put_rep_prefix(reps_completed) ({ \
> if ( rep_prefix() ) \
> + { \
> __put_rep_prefix(&_regs, ctxt->regs, ad_bytes, reps_completed); \
> + if ( unlikely(rc == X86EMUL_EXCEPTION) ) \
> + goto no_writeback; \
> + } \
> })
>
> /* Clip maximum repetitions so that the index register at most just wraps. */
> @@ -2919,14 +2923,9 @@ x86_emulate(
> dst.mem.off = truncate_ea_and_reps(_regs.edi, nr_reps, dst.bytes);
> if ( (rc = ioport_access_check(port, dst.bytes, ctxt, ops)) != 0 )
> goto done;
> - if ( (nr_reps > 1) && (ops->rep_ins != NULL) &&
> + if ( (nr_reps == 1) || !ops->rep_ins ||
> ((rc = ops->rep_ins(port, dst.mem.seg, dst.mem.off, dst.bytes,
> - &nr_reps, ctxt)) != X86EMUL_UNHANDLEABLE) )
> - {
> - if ( rc != 0 )
> - goto done;
> - }
> - else
> + &nr_reps, ctxt)) == X86EMUL_UNHANDLEABLE) )
> {
> fail_if(ops->read_io == NULL);
> if ( (rc = ops->read_io(port, dst.bytes, &dst.val, ctxt)) != 0 )
> @@ -2936,6 +2935,8 @@ x86_emulate(
> }
> register_address_adjust(_regs.edi, nr_reps * dst.bytes);
> put_rep_prefix(nr_reps);
> + if ( rc != X86EMUL_OKAY )
> + goto done;
> break;
> }
>
> @@ -2946,14 +2947,9 @@ x86_emulate(
> ea.mem.off = truncate_ea_and_reps(_regs.esi, nr_reps, dst.bytes);
> if ( (rc = ioport_access_check(port, dst.bytes, ctxt, ops)) != 0 )
> goto done;
> - if ( (nr_reps > 1) && (ops->rep_outs != NULL) &&
> + if ( (nr_reps == 1) || !ops->rep_outs ||
> ((rc = ops->rep_outs(ea.mem.seg, ea.mem.off, port, dst.bytes,
> - &nr_reps, ctxt)) != X86EMUL_UNHANDLEABLE) )
> - {
> - if ( rc != 0 )
> - goto done;
> - }
> - else
> + &nr_reps, ctxt)) == X86EMUL_UNHANDLEABLE) )
> {
> if ( (rc = read_ulong(ea.mem.seg, truncate_ea(_regs.esi),
> &dst.val, dst.bytes, ctxt, ops)) != 0 )
> @@ -2965,6 +2961,8 @@ x86_emulate(
> }
> register_address_adjust(_regs.esi, nr_reps * dst.bytes);
> put_rep_prefix(nr_reps);
> + if ( rc != X86EMUL_OKAY )
> + goto done;
> break;
> }
>
> @@ -3187,15 +3185,10 @@ x86_emulate(
> dst.mem.seg = x86_seg_es;
> dst.mem.off = truncate_ea_and_reps(_regs.edi, nr_reps, dst.bytes);
> src.mem.off = truncate_ea_and_reps(_regs.esi, nr_reps, dst.bytes);
> - if ( (nr_reps > 1) && (ops->rep_movs != NULL) &&
> + if ( (nr_reps == 1) || !ops->rep_movs ||
> ((rc = ops->rep_movs(ea.mem.seg, src.mem.off,
> dst.mem.seg, dst.mem.off, dst.bytes,
> - &nr_reps, ctxt)) != X86EMUL_UNHANDLEABLE) )
> - {
> - if ( rc != 0 )
> - goto done;
> - }
> - else
> + &nr_reps, ctxt)) == X86EMUL_UNHANDLEABLE) )
> {
> if ( (rc = read_ulong(ea.mem.seg, src.mem.off,
> &dst.val, dst.bytes, ctxt, ops)) != 0 )
> @@ -3206,6 +3199,8 @@ x86_emulate(
> register_address_adjust(_regs.esi, nr_reps * dst.bytes);
> register_address_adjust(_regs.edi, nr_reps * dst.bytes);
> put_rep_prefix(nr_reps);
> + if ( rc != X86EMUL_OKAY )
> + goto done;
> break;
> }
>
> @@ -3244,11 +3239,12 @@ x86_emulate(
> dst.val = src.val;
> dst.type = OP_MEM;
> nr_reps = 1;
> + rc = X86EMUL_OKAY;
> }
> - else if ( rc != X86EMUL_OKAY )
> - goto done;
> register_address_adjust(_regs.edi, nr_reps * dst.bytes);
> put_rep_prefix(nr_reps);
> + if ( rc != X86EMUL_OKAY )
> + goto done;
> break;
> }
>
>
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel
next prev parent reply other threads:[~2016-12-13 15:28 UTC|newest]
Thread overview: 14+ messages / expand[flat|nested] mbox.gz Atom feed top
2016-12-13 11:08 [PATCH v4 0/5] x86: extend use of generic insn emulator for PV Jan Beulich
2016-12-13 11:26 ` [PATCH v4 1/5] x86/32on64: use generic instruction decoding for call gate emulation Jan Beulich
2016-12-13 15:50 ` Andrew Cooper
2016-12-13 11:27 ` [PATCH v4 2/5] x86emul: generalize exception handling for rep_* hooks Jan Beulich
2016-12-13 15:15 ` Paul Durrant [this message]
2016-12-13 11:28 ` [PATCH v4 3/5] x86emul: make write and cmpxchg hooks optional Jan Beulich
2016-12-13 15:52 ` Andrew Cooper
2016-12-13 11:28 ` [PATCH v4 4/5] x86/PV: use generic emulator for privileged instruction handling Jan Beulich
2016-12-13 16:53 ` Andrew Cooper
2016-12-13 17:16 ` Jan Beulich
2016-12-14 7:35 ` Jan Beulich
2016-12-14 9:26 ` Andrew Cooper
2016-12-13 11:29 ` [PATCH v4 5/5] x86/PV: prefer pv_inject_hw_exception() Jan Beulich
2016-12-13 15:54 ` Andrew Cooper
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=e54d412f6cb248fabe8a8881723e41fb@AMSPEX02CL03.citrite.net \
--to=paul.durrant@citrix.com \
--cc=Andrew.Cooper3@citrix.com \
--cc=JBeulich@suse.com \
--cc=xen-devel@lists.xenproject.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).