From: <stefano.stabellini@eu.citrix.com>
To: xen-devel@lists.xensource.com
Cc: qemu-devel@nongnu.org, agraf@suse.de,
Stefano Stabellini <stefano.stabellini@eu.citrix.com>
Subject: [PATCH 3/4] xen: introduce an event channel for buffered io event notifications
Date: Tue, 15 Nov 2011 14:51:10 +0000 [thread overview]
Message-ID: <1321368671-1134-3-git-send-email-stefano.stabellini@eu.citrix.com> (raw)
In-Reply-To: <alpine.DEB.2.00.1111151354000.3519@kaball-desktop>
From: Stefano Stabellini <stefano.stabellini@eu.citrix.com>
Use the newly introduced HVM_PARAM_BUFIOREQ_EVTCHN to receive
notifications for buffered io events.
After the first notification is received leave the event channel masked
and setup a timer to process the rest of the batch.
Once we have completed processing the batch, unmask the event channel
and delete the timer.
Signed-off-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com>
---
xen-all.c | 38 ++++++++++++++++++++++++++++++++------
1 files changed, 32 insertions(+), 6 deletions(-)
diff --git a/xen-all.c b/xen-all.c
index b5e28ab..b28d7e7 100644
--- a/xen-all.c
+++ b/xen-all.c
@@ -70,6 +70,8 @@ typedef struct XenIOState {
QEMUTimer *buffered_io_timer;
/* the evtchn port for polling the notification, */
evtchn_port_t *ioreq_local_port;
+ /* evtchn local port for buffered io */
+ evtchn_port_t bufioreq_local_port;
/* the evtchn fd for polling */
XenEvtchn xce_handle;
/* which vcpu we are serving */
@@ -516,6 +518,12 @@ static ioreq_t *cpu_get_ioreq(XenIOState *state)
evtchn_port_t port;
port = xc_evtchn_pending(state->xce_handle);
+ if (port == state->bufioreq_local_port) {
+ qemu_mod_timer(state->buffered_io_timer,
+ BUFFER_IO_MAX_DELAY + qemu_get_clock_ms(rt_clock));
+ return NULL;
+ }
+
if (port != -1) {
for (i = 0; i < smp_cpus; i++) {
if (state->ioreq_local_port[i] == port) {
@@ -664,16 +672,18 @@ static void handle_ioreq(ioreq_t *req)
}
}
-static void handle_buffered_iopage(XenIOState *state)
+static int handle_buffered_iopage(XenIOState *state)
{
buf_ioreq_t *buf_req = NULL;
ioreq_t req;
int qw;
if (!state->buffered_io_page) {
- return;
+ return 0;
}
+ memset(&req, 0x00, sizeof(req));
+
while (state->buffered_io_page->read_pointer != state->buffered_io_page->write_pointer) {
buf_req = &state->buffered_io_page->buf_ioreq[
state->buffered_io_page->read_pointer % IOREQ_BUFFER_SLOT_NUM];
@@ -698,15 +708,21 @@ static void handle_buffered_iopage(XenIOState *state)
xen_mb();
state->buffered_io_page->read_pointer += qw ? 2 : 1;
}
+
+ return req.count;
}
static void handle_buffered_io(void *opaque)
{
XenIOState *state = opaque;
- handle_buffered_iopage(state);
- qemu_mod_timer(state->buffered_io_timer,
- BUFFER_IO_MAX_DELAY + qemu_get_clock_ms(rt_clock));
+ if (handle_buffered_iopage(state)) {
+ qemu_mod_timer(state->buffered_io_timer,
+ BUFFER_IO_MAX_DELAY + qemu_get_clock_ms(rt_clock));
+ } else {
+ qemu_del_timer(state->buffered_io_timer);
+ xc_evtchn_unmask(state->xce_handle, state->bufioreq_local_port);
+ }
}
static void cpu_handle_ioreq(void *opaque)
@@ -836,7 +852,6 @@ static void xen_main_loop_prepare(XenIOState *state)
state->buffered_io_timer = qemu_new_timer_ms(rt_clock, handle_buffered_io,
state);
- qemu_mod_timer(state->buffered_io_timer, qemu_get_clock_ms(rt_clock));
if (evtchn_fd != -1) {
qemu_set_fd_handler(evtchn_fd, cpu_handle_ioreq, NULL, state);
@@ -888,6 +903,7 @@ int xen_hvm_init(void)
{
int i, rc;
unsigned long ioreq_pfn;
+ unsigned long bufioreq_evtchn;
XenIOState *state;
state = g_malloc0(sizeof (XenIOState));
@@ -937,6 +953,16 @@ int xen_hvm_init(void)
state->ioreq_local_port[i] = rc;
}
+ xc_get_hvm_param(xen_xc, xen_domid, HVM_PARAM_BUFIOREQ_EVTCHN,
+ &bufioreq_evtchn);
+ rc = xc_evtchn_bind_interdomain(state->xce_handle, xen_domid,
+ (uint32_t)bufioreq_evtchn);
+ if (rc == -1) {
+ fprintf(stderr, "bind interdomain ioctl error %d\n", errno);
+ return -1;
+ }
+ state->bufioreq_local_port = rc;
+
/* Init RAM management */
xen_map_cache_init();
xen_ram_init(ram_size);
--
1.7.2.3
next prev parent reply other threads:[~2011-11-15 14:51 UTC|newest]
Thread overview: 17+ messages / expand[flat|nested] mbox.gz Atom feed top
2011-11-15 14:48 [PATCH 0/4] prevent Qemu from waking up needlessly Stefano Stabellini
2011-11-15 14:51 ` [PATCH 1/4] xen: introduce mc146818rtcxen stefano.stabellini
2011-11-15 14:54 ` Anthony Liguori
2011-11-15 16:57 ` [Qemu-devel] " Stefano Stabellini
2011-11-18 11:46 ` Stefano Stabellini
2011-11-18 13:58 ` Anthony Liguori
2011-11-18 14:54 ` Anthony Liguori
2011-11-20 14:53 ` Avi Kivity
2011-11-21 20:49 ` Anthony Liguori
2011-11-21 11:05 ` Stefano Stabellini
2011-11-21 13:21 ` Paolo Bonzini
2011-11-15 14:51 ` [PATCH 2/4] xen: do not initialize the interval timer emulator stefano.stabellini
2011-11-15 14:51 ` stefano.stabellini [this message]
2011-11-15 17:13 ` [Xen-devel] [PATCH 3/4] xen: introduce an event channel for buffered io event notifications Ian Campbell
2011-11-15 17:20 ` Stefano Stabellini
2011-11-15 17:24 ` Ian Campbell
2011-11-15 14:51 ` [PATCH 4/4] qemu_calculate_timeout: increase minimum timeout to 1h stefano.stabellini
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1321368671-1134-3-git-send-email-stefano.stabellini@eu.citrix.com \
--to=stefano.stabellini@eu.citrix.com \
--cc=agraf@suse.de \
--cc=qemu-devel@nongnu.org \
--cc=xen-devel@lists.xensource.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).