* [PATCH v2] x86/hvm: add support for broadcast of buffered ioreqs...
@ 2015-07-10 16:07 Paul Durrant
2015-07-13 8:43 ` Jan Beulich
0 siblings, 1 reply; 3+ messages in thread
From: Paul Durrant @ 2015-07-10 16:07 UTC (permalink / raw)
To: xen-devel; +Cc: Andrew Cooper, Paul Durrant, Keir Fraser, Jan Beulich
...and make RTC timeoffset ioreqs use it.
Without this patch RTC timeoffset updates go nowhere and Xen complains
with a (non-rate-limited) printk.
Signed-off-by: Paul Durrant <paul.durrant@citrix.com>
Cc: Keir Fraser <keir@xen.org>
Cc: Jan Beulich <jbeulich@suse.com>
Cc: Andrew Cooper <andrew.cooper3@citrix.com>
---
v2:
- Add (g)printk back in as requested by Jan. It will be emitted if
one or more time-offset ioreqs fails.
- Also added code to flag up a map-cache invalidation ioreq failure
in a similar fashion.
---
xen/arch/x86/hvm/emulate.c | 2 +-
xen/arch/x86/hvm/hvm.c | 29 +++++++++++++++++------------
xen/arch/x86/hvm/io.c | 7 ++++---
xen/arch/x86/hvm/stdvga.c | 8 +++++---
xen/include/asm-x86/hvm/hvm.h | 4 ++--
xen/include/asm-x86/hvm/io.h | 1 -
6 files changed, 29 insertions(+), 22 deletions(-)
diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c
index 01ee972..795321c 100644
--- a/xen/arch/x86/hvm/emulate.c
+++ b/xen/arch/x86/hvm/emulate.c
@@ -161,7 +161,7 @@ static int hvmemul_do_io(
}
else
{
- rc = hvm_send_assist_req(s, &p);
+ rc = hvm_send_ioreq(s, &p, 0);
if ( rc != X86EMUL_RETRY || curr->domain->is_shutting_down )
vio->io_req.state = STATE_IOREQ_NONE;
else if ( data_is_addr )
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index ebcf7a9..36b7408 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -2561,10 +2561,9 @@ struct hvm_ioreq_server *hvm_select_ioreq_server(struct domain *d,
return d->arch.hvm_domain.default_ioreq_server;
}
-int hvm_buffered_io_send(ioreq_t *p)
+static int hvm_send_buffered_ioreq(struct hvm_ioreq_server *s, ioreq_t *p)
{
struct domain *d = current->domain;
- struct hvm_ioreq_server *s = hvm_select_ioreq_server(d, p);
struct hvm_ioreq_page *iorp;
buffered_iopage_t *pg;
buf_ioreq_t bp = { .data = p->data,
@@ -2577,14 +2576,11 @@ int hvm_buffered_io_send(ioreq_t *p)
/* Ensure buffered_iopage fits in a page */
BUILD_BUG_ON(sizeof(buffered_iopage_t) > PAGE_SIZE);
- if ( !s )
- return 0;
-
iorp = &s->bufioreq;
pg = iorp->va;
if ( !pg )
- return 0;
+ return X86EMUL_UNHANDLEABLE;
/*
* Return 0 for the cases we can't deal with:
@@ -2614,7 +2610,7 @@ int hvm_buffered_io_send(ioreq_t *p)
break;
default:
gdprintk(XENLOG_WARNING, "unexpected ioreq size: %u\n", p->size);
- return 0;
+ return X86EMUL_UNHANDLEABLE;
}
spin_lock(&s->bufioreq_lock);
@@ -2624,7 +2620,7 @@ int hvm_buffered_io_send(ioreq_t *p)
{
/* The queue is full: send the iopacket through the normal path. */
spin_unlock(&s->bufioreq_lock);
- return 0;
+ return X86EMUL_UNHANDLEABLE;
}
pg->buf_ioreq[pg->ptrs.write_pointer % IOREQ_BUFFER_SLOT_NUM] = bp;
@@ -2654,16 +2650,21 @@ int hvm_buffered_io_send(ioreq_t *p)
notify_via_xen_event_channel(d, s->bufioreq_evtchn);
spin_unlock(&s->bufioreq_lock);
- return 1;
+ return X86EMUL_OKAY;
}
-int hvm_send_assist_req(struct hvm_ioreq_server *s, ioreq_t *proto_p)
+int hvm_send_ioreq(struct hvm_ioreq_server *s, ioreq_t *proto_p,
+ bool_t buffered)
{
struct vcpu *curr = current;
struct domain *d = curr->domain;
struct hvm_ioreq_vcpu *sv;
ASSERT(s);
+
+ if ( buffered )
+ return hvm_send_buffered_ioreq(s, proto_p);
+
if ( unlikely(!vcpu_start_shutdown_deferral(curr)) )
return X86EMUL_RETRY;
@@ -2710,17 +2711,21 @@ int hvm_send_assist_req(struct hvm_ioreq_server *s, ioreq_t *proto_p)
return X86EMUL_UNHANDLEABLE;
}
-void hvm_broadcast_assist_req(ioreq_t *p)
+int hvm_broadcast_ioreq(ioreq_t *p, bool_t buffered)
{
struct domain *d = current->domain;
struct hvm_ioreq_server *s;
+ unsigned int failed = 0;
ASSERT(p->type == IOREQ_TYPE_INVALIDATE);
list_for_each_entry ( s,
&d->arch.hvm_domain.ioreq_server.list,
list_entry )
- (void) hvm_send_assist_req(s, p);
+ if ( hvm_send_ioreq(s, p, buffered) == X86EMUL_UNHANDLEABLE )
+ failed++;
+
+ return failed;
}
void hvm_hlt(unsigned long rflags)
diff --git a/xen/arch/x86/hvm/io.c b/xen/arch/x86/hvm/io.c
index 3b51d59..9dc6c71 100644
--- a/xen/arch/x86/hvm/io.c
+++ b/xen/arch/x86/hvm/io.c
@@ -60,8 +60,8 @@ void send_timeoffset_req(unsigned long timeoff)
if ( timeoff == 0 )
return;
- if ( !hvm_buffered_io_send(&p) )
- printk("Unsuccessful timeoffset update\n");
+ if ( hvm_broadcast_ioreq(&p, 1) != 0 )
+ gprintk(XENLOG_ERR, "Unsuccessful timeoffset update\n");
}
/* Ask ioemu mapcache to invalidate mappings. */
@@ -74,7 +74,8 @@ void send_invalidate_req(void)
.data = ~0UL, /* flush all */
};
- hvm_broadcast_assist_req(&p);
+ if ( hvm_broadcast_ioreq(&p, 0) != 0 )
+ gprintk(XENLOG_ERR, "Unsuccessful map-cache invalidate\n");
}
int handle_mmio(void)
diff --git a/xen/arch/x86/hvm/stdvga.c b/xen/arch/x86/hvm/stdvga.c
index 4a7593d..8222af3 100644
--- a/xen/arch/x86/hvm/stdvga.c
+++ b/xen/arch/x86/hvm/stdvga.c
@@ -439,6 +439,7 @@ static int stdvga_mem_write(const struct hvm_io_handler *handler,
.dir = IOREQ_WRITE,
.data = data,
};
+ struct hvm_ioreq_server *srv;
if ( !s->cache )
goto done;
@@ -479,10 +480,11 @@ static int stdvga_mem_write(const struct hvm_io_handler *handler,
}
done:
- if ( hvm_buffered_io_send(&p) )
- return X86EMUL_OKAY;
+ srv = hvm_select_ioreq_server(current->domain, &p);
+ if ( !srv )
+ return X86EMUL_UNHANDLEABLE;
- return X86EMUL_UNHANDLEABLE;
+ return hvm_send_ioreq(srv, &p, 1);
}
static bool_t stdvga_mem_accept(const struct hvm_io_handler *handler,
diff --git a/xen/include/asm-x86/hvm/hvm.h b/xen/include/asm-x86/hvm/hvm.h
index 35f1300..29a282b 100644
--- a/xen/include/asm-x86/hvm/hvm.h
+++ b/xen/include/asm-x86/hvm/hvm.h
@@ -226,8 +226,8 @@ void hvm_vcpu_reset_state(struct vcpu *v, uint16_t cs, uint16_t ip);
struct hvm_ioreq_server *hvm_select_ioreq_server(struct domain *d,
ioreq_t *p);
-int hvm_send_assist_req(struct hvm_ioreq_server *s, ioreq_t *p);
-void hvm_broadcast_assist_req(ioreq_t *p);
+int hvm_send_ioreq(struct hvm_ioreq_server *s, ioreq_t *p, bool_t buffered);
+int hvm_broadcast_ioreq(ioreq_t *p, bool_t buffered);
void hvm_get_guest_pat(struct vcpu *v, u64 *guest_pat);
int hvm_set_guest_pat(struct vcpu *v, u64 guest_pat);
diff --git a/xen/include/asm-x86/hvm/io.h b/xen/include/asm-x86/hvm/io.h
index 577b21a..cf46689 100644
--- a/xen/include/asm-x86/hvm/io.h
+++ b/xen/include/asm-x86/hvm/io.h
@@ -117,7 +117,6 @@ void relocate_portio_handler(
struct domain *d, unsigned int old_port, unsigned int new_port,
unsigned int size);
-int hvm_buffered_io_send(ioreq_t *p);
void send_timeoffset_req(unsigned long timeoff);
void send_invalidate_req(void);
int handle_mmio(void);
--
1.7.10.4
^ permalink raw reply related [flat|nested] 3+ messages in thread
* Re: [PATCH v2] x86/hvm: add support for broadcast of buffered ioreqs...
2015-07-10 16:07 [PATCH v2] x86/hvm: add support for broadcast of buffered ioreqs Paul Durrant
@ 2015-07-13 8:43 ` Jan Beulich
2015-07-13 8:52 ` Paul Durrant
0 siblings, 1 reply; 3+ messages in thread
From: Jan Beulich @ 2015-07-13 8:43 UTC (permalink / raw)
To: Paul Durrant; +Cc: Andrew Cooper, Keir Fraser, xen-devel
>>> On 10.07.15 at 18:07, <paul.durrant@citrix.com> wrote:
> @@ -2710,17 +2711,21 @@ int hvm_send_assist_req(struct hvm_ioreq_server *s, ioreq_t *proto_p)
> return X86EMUL_UNHANDLEABLE;
> }
>
> -void hvm_broadcast_assist_req(ioreq_t *p)
> +int hvm_broadcast_ioreq(ioreq_t *p, bool_t buffered)
> {
> struct domain *d = current->domain;
> struct hvm_ioreq_server *s;
> + unsigned int failed = 0;
>
> ASSERT(p->type == IOREQ_TYPE_INVALIDATE);
>
> list_for_each_entry ( s,
> &d->arch.hvm_domain.ioreq_server.list,
> list_entry )
> - (void) hvm_send_assist_req(s, p);
> + if ( hvm_send_ioreq(s, p, buffered) == X86EMUL_UNHANDLEABLE )
> + failed++;
> +
> + return failed;
I'll try to remember fixing up the mismatch between function return
type and return expression upon commit. Looks good beyond that.
Thanks, Jan
^ permalink raw reply [flat|nested] 3+ messages in thread
* Re: [PATCH v2] x86/hvm: add support for broadcast of buffered ioreqs...
2015-07-13 8:43 ` Jan Beulich
@ 2015-07-13 8:52 ` Paul Durrant
0 siblings, 0 replies; 3+ messages in thread
From: Paul Durrant @ 2015-07-13 8:52 UTC (permalink / raw)
To: Jan Beulich; +Cc: Andrew Cooper, Keir (Xen.org), xen-devel@lists.xen.org
> -----Original Message-----
> From: Jan Beulich [mailto:JBeulich@suse.com]
> Sent: 13 July 2015 09:44
> To: Paul Durrant
> Cc: Andrew Cooper; xen-devel@lists.xen.org; Keir (Xen.org)
> Subject: Re: [PATCH v2] x86/hvm: add support for broadcast of buffered
> ioreqs...
>
> >>> On 10.07.15 at 18:07, <paul.durrant@citrix.com> wrote:
> > @@ -2710,17 +2711,21 @@ int hvm_send_assist_req(struct
> hvm_ioreq_server *s, ioreq_t *proto_p)
> > return X86EMUL_UNHANDLEABLE;
> > }
> >
> > -void hvm_broadcast_assist_req(ioreq_t *p)
> > +int hvm_broadcast_ioreq(ioreq_t *p, bool_t buffered)
> > {
> > struct domain *d = current->domain;
> > struct hvm_ioreq_server *s;
> > + unsigned int failed = 0;
> >
> > ASSERT(p->type == IOREQ_TYPE_INVALIDATE);
> >
> > list_for_each_entry ( s,
> > &d->arch.hvm_domain.ioreq_server.list,
> > list_entry )
> > - (void) hvm_send_assist_req(s, p);
> > + if ( hvm_send_ioreq(s, p, buffered) == X86EMUL_UNHANDLEABLE )
> > + failed++;
> > +
> > + return failed;
>
> I'll try to remember fixing up the mismatch between function return
> type and return expression upon commit. Looks good beyond that.
>
Ok. Thanks,
Paul
> Thanks, Jan
^ permalink raw reply [flat|nested] 3+ messages in thread
end of thread, other threads:[~2015-07-13 8:52 UTC | newest]
Thread overview: 3+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2015-07-10 16:07 [PATCH v2] x86/hvm: add support for broadcast of buffered ioreqs Paul Durrant
2015-07-13 8:43 ` Jan Beulich
2015-07-13 8:52 ` Paul Durrant
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).