From: Andrew Cooper <andrew.cooper3@citrix.com>
To: Jan Beulich <JBeulich@suse.com>
Cc: xen-devel <xen-devel@lists.xenproject.org>, Keir Fraser <keir@xen.org>
Subject: Re: [PATCH 3/5] x86/HVM: don't ignore hvm_copy_to_guest_phys() errors during I/O intercept
Date: Tue, 8 Oct 2013 19:20:03 +0100 [thread overview]
Message-ID: <52544CD3.5010709@citrix.com> (raw)
In-Reply-To: <5249918B02000078000F807F@nat28.tlf.novell.com>
[-- Attachment #1.1: Type: text/plain, Size: 7254 bytes --]
On 30/09/13 13:58, Jan Beulich wrote:
> Building upon the extended retry logic we can now also make sure to
> not ignore errors resulting from writing data back to guest memory.
>
> Signed-off-by: Jan Beulich <jbeulich@suse.com>
>
> --- a/xen/arch/x86/hvm/intercept.c
> +++ b/xen/arch/x86/hvm/intercept.c
> @@ -47,6 +47,7 @@ static int hvm_mmio_access(struct vcpu *
> hvm_mmio_read_t read_handler,
> hvm_mmio_write_t write_handler)
> {
> + struct hvm_vcpu_io *vio = ¤t->arch.hvm_vcpu.hvm_io;
struct vcpu is passed in to this function, which can be used in
preference to current. (And is possibly fractionally faster unless the
compiler can prove that v is always current)
Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
> unsigned long data;
> int rc = X86EMUL_OKAY, i, step = p->df ? -p->size : p->size;
>
> @@ -54,7 +55,16 @@ static int hvm_mmio_access(struct vcpu *
> {
> if ( p->dir == IOREQ_READ )
> {
> - rc = read_handler(v, p->addr, p->size, &data);
> + if ( vio->mmio_retrying )
> + {
> + if ( vio->mmio_large_read_bytes != p->size )
> + return X86EMUL_UNHANDLEABLE;
> + memcpy(&data, vio->mmio_large_read, p->size);
> + vio->mmio_large_read_bytes = 0;
> + vio->mmio_retrying = 0;
> + }
> + else
> + rc = read_handler(v, p->addr, p->size, &data);
> p->data = data;
> }
> else /* p->dir == IOREQ_WRITE */
> @@ -66,18 +76,48 @@ static int hvm_mmio_access(struct vcpu *
> {
> for ( i = 0; i < p->count; i++ )
> {
> - int ret;
> -
> - rc = read_handler(v, p->addr + step * i, p->size, &data);
> - if ( rc != X86EMUL_OKAY )
> - break;
> - ret = hvm_copy_to_guest_phys(p->data + step * i, &data, p->size);
> - if ( (ret == HVMCOPY_gfn_paged_out) ||
> - (ret == HVMCOPY_gfn_shared) )
> + if ( vio->mmio_retrying )
> + {
> + if ( vio->mmio_large_read_bytes != p->size )
> + return X86EMUL_UNHANDLEABLE;
> + memcpy(&data, vio->mmio_large_read, p->size);
> + vio->mmio_large_read_bytes = 0;
> + vio->mmio_retrying = 0;
> + }
> + else
> {
> + rc = read_handler(v, p->addr + step * i, p->size, &data);
> + if ( rc != X86EMUL_OKAY )
> + break;
> + }
> + switch ( hvm_copy_to_guest_phys(p->data + step * i,
> + &data, p->size) )
> + {
> + case HVMCOPY_okay:
> + break;
> + case HVMCOPY_gfn_paged_out:
> + case HVMCOPY_gfn_shared:
> rc = X86EMUL_RETRY;
> break;
> + case HVMCOPY_bad_gfn_to_mfn:
> + /* Drop the write as real hardware would. */
> + continue;
> + case HVMCOPY_bad_gva_to_gfn:
> + ASSERT(0);
> + /* fall through */
> + default:
> + rc = X86EMUL_UNHANDLEABLE;
> + break;
> }
> + if ( rc != X86EMUL_OKAY)
> + break;
> + }
> +
> + if ( rc == X86EMUL_RETRY )
> + {
> + vio->mmio_retry = 1;
> + vio->mmio_large_read_bytes = p->size;
> + memcpy(vio->mmio_large_read, &data, p->size);
> }
> }
> else
> @@ -109,6 +149,9 @@ static int hvm_mmio_access(struct vcpu *
> if ( rc != X86EMUL_OKAY )
> break;
> }
> +
> + if ( rc == X86EMUL_RETRY )
> + vio->mmio_retry = 1;
> }
>
> if ( i != 0 )
> @@ -137,6 +180,7 @@ int hvm_mmio_intercept(ioreq_t *p)
>
> static int process_portio_intercept(portio_action_t action, ioreq_t *p)
> {
> + struct hvm_vcpu_io *vio = ¤t->arch.hvm_vcpu.hvm_io;
> int rc = X86EMUL_OKAY, i, step = p->df ? -p->size : p->size;
> uint32_t data;
>
> @@ -144,7 +188,16 @@ static int process_portio_intercept(port
> {
> if ( p->dir == IOREQ_READ )
> {
> - rc = action(IOREQ_READ, p->addr, p->size, &data);
> + if ( vio->mmio_retrying )
> + {
> + if ( vio->mmio_large_read_bytes != p->size )
> + return X86EMUL_UNHANDLEABLE;
> + memcpy(&data, vio->mmio_large_read, p->size);
> + vio->mmio_large_read_bytes = 0;
> + vio->mmio_retrying = 0;
> + }
> + else
> + rc = action(IOREQ_READ, p->addr, p->size, &data);
> p->data = data;
> }
> else
> @@ -159,10 +212,48 @@ static int process_portio_intercept(port
> {
> for ( i = 0; i < p->count; i++ )
> {
> - rc = action(IOREQ_READ, p->addr, p->size, &data);
> - if ( rc != X86EMUL_OKAY )
> + if ( vio->mmio_retrying )
> + {
> + if ( vio->mmio_large_read_bytes != p->size )
> + return X86EMUL_UNHANDLEABLE;
> + memcpy(&data, vio->mmio_large_read, p->size);
> + vio->mmio_large_read_bytes = 0;
> + vio->mmio_retrying = 0;
> + }
> + else
> + {
> + rc = action(IOREQ_READ, p->addr, p->size, &data);
> + if ( rc != X86EMUL_OKAY )
> + break;
> + }
> + switch ( hvm_copy_to_guest_phys(p->data + step * i,
> + &data, p->size) )
> + {
> + case HVMCOPY_okay:
> + break;
> + case HVMCOPY_gfn_paged_out:
> + case HVMCOPY_gfn_shared:
> + rc = X86EMUL_RETRY;
> break;
> - (void)hvm_copy_to_guest_phys(p->data + step * i, &data, p->size);
> + case HVMCOPY_bad_gfn_to_mfn:
> + /* Drop the write as real hardware would. */
> + continue;
> + case HVMCOPY_bad_gva_to_gfn:
> + ASSERT(0);
> + /* fall through */
> + default:
> + rc = X86EMUL_UNHANDLEABLE;
> + break;
> + }
> + if ( rc != X86EMUL_OKAY)
> + break;
> + }
> +
> + if ( rc == X86EMUL_RETRY )
> + {
> + vio->mmio_retry = 1;
> + vio->mmio_large_read_bytes = p->size;
> + memcpy(vio->mmio_large_read, &data, p->size);
> }
> }
> else /* p->dir == IOREQ_WRITE */
> @@ -195,6 +286,9 @@ static int process_portio_intercept(port
> if ( rc != X86EMUL_OKAY )
> break;
> }
> +
> + if ( rc == X86EMUL_RETRY )
> + vio->mmio_retry = 1;
> }
>
> if ( i != 0 )
>
>
>
>
> _______________________________________________
> Xen-devel mailing list
> Xen-devel@lists.xen.org
> http://lists.xen.org/xen-devel
[-- Attachment #1.2: Type: text/html, Size: 8158 bytes --]
[-- Attachment #2: Type: text/plain, Size: 126 bytes --]
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel
next prev parent reply other threads:[~2013-10-08 18:20 UTC|newest]
Thread overview: 27+ messages / expand[flat|nested] mbox.gz Atom feed top
2013-09-30 12:51 [PATCH 0/5] x86/HVM: XSA-63 follow-ups Jan Beulich
2013-09-30 12:57 ` [PATCH 1/5] x86/HVM: properly handle backward string instruction emulation Jan Beulich
2013-10-08 16:36 ` Andrew Cooper
2013-09-30 12:57 ` [PATCH 2/5] x86/HVM: fix direct PCI port I/O emulation retry and error handling Jan Beulich
2013-10-08 18:13 ` Andrew Cooper
2013-09-30 12:58 ` [PATCH 3/5] x86/HVM: don't ignore hvm_copy_to_guest_phys() errors during I/O intercept Jan Beulich
2013-10-08 18:20 ` Andrew Cooper [this message]
2013-10-09 7:48 ` Jan Beulich
2013-09-30 12:58 ` [PATCH 4/5] x86/HVM: properly deal with hvm_copy_*_guest_phys() errors Jan Beulich
2013-09-30 13:07 ` Andrew Cooper
2013-09-30 12:59 ` [PATCH 5/5] x86/HVM: cache emulated instruction for retry processing Jan Beulich
2013-10-10 11:35 ` Andrew Cooper
2013-12-18 8:36 ` Zhang, Yang Z
2013-12-18 8:48 ` Jan Beulich
2013-12-18 9:40 ` Zhang, Yang Z
2013-12-18 10:53 ` Jan Beulich
2013-12-24 11:29 ` Zhang, Yang Z
2014-01-07 8:28 ` Jan Beulich
2014-01-07 8:54 ` Zhang, Yang Z
2014-01-07 9:43 ` Egger, Christoph
2014-01-08 5:50 ` Zhang, Yang Z
2014-01-09 12:19 ` Egger, Christoph
2014-01-16 4:42 ` Zhang, Yang Z
2014-01-16 8:23 ` Jan Beulich
2014-01-17 2:53 ` Zhang, Yang Z
2013-10-08 15:10 ` Ping: [PATCH 0/5] x86/HVM: XSA-63 follow-ups Jan Beulich
2013-10-14 7:29 ` Keir Fraser
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=52544CD3.5010709@citrix.com \
--to=andrew.cooper3@citrix.com \
--cc=JBeulich@suse.com \
--cc=keir@xen.org \
--cc=xen-devel@lists.xenproject.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).