qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: Stefano Stabellini <stefano.stabellini@eu.citrix.com>
To: qemu-devel@nongnu.org
Cc: xen-devel@lists.xensource.com,
	Stefano Stabellini <stefano.stabellini@eu.citrix.com>,
	jan.kiszka@siemens.com, avi@redhat.com, pbonzini@redhat.com
Subject: [Qemu-devel] [PATCH v5 3/6] xen: introduce an event channel for buffered io event notifications
Date: Tue, 14 Feb 2012 15:07:15 +0000	[thread overview]
Message-ID: <1329232038-12349-3-git-send-email-stefano.stabellini@eu.citrix.com> (raw)
In-Reply-To: <alpine.DEB.2.00.1202141455160.7456@kaball-desktop>

Use the newly introduced HVM_PARAM_BUFIOREQ_EVTCHN to receive
notifications for buffered io events.
After the first notification is received leave the event channel masked
and setup a timer to process the rest of the batch.
Once we have completed processing the batch, unmask the event channel
and delete the timer.

Signed-off-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com>
---
 xen-all.c |   45 +++++++++++++++++++++++++++++++++++++++------
 1 files changed, 39 insertions(+), 6 deletions(-)

diff --git a/xen-all.c b/xen-all.c
index 101c962..0ce8002 100644
--- a/xen-all.c
+++ b/xen-all.c
@@ -59,6 +59,9 @@ static inline ioreq_t *xen_vcpu_ioreq(shared_iopage_t *shared_page, int vcpu)
 }
 #  define FMT_ioreq_size "u"
 #endif
+#ifndef HVM_PARAM_BUFIOREQ_EVTCHN
+#define HVM_PARAM_BUFIOREQ_EVTCHN 26
+#endif
 
 #define BUFFER_IO_MAX_DELAY  100
 
@@ -77,6 +80,8 @@ typedef struct XenIOState {
     QEMUTimer *buffered_io_timer;
     /* the evtchn port for polling the notification, */
     evtchn_port_t *ioreq_local_port;
+    /* evtchn local port for buffered io */
+    evtchn_port_t bufioreq_local_port;
     /* the evtchn fd for polling */
     XenEvtchn xce_handle;
     /* which vcpu we are serving */
@@ -549,6 +554,12 @@ static ioreq_t *cpu_get_ioreq(XenIOState *state)
     evtchn_port_t port;
 
     port = xc_evtchn_pending(state->xce_handle);
+    if (port == state->bufioreq_local_port) {
+        qemu_mod_timer(state->buffered_io_timer,
+                BUFFER_IO_MAX_DELAY + qemu_get_clock_ms(rt_clock));
+        return NULL;
+    }
+
     if (port != -1) {
         for (i = 0; i < smp_cpus; i++) {
             if (state->ioreq_local_port[i] == port) {
@@ -697,16 +708,18 @@ static void handle_ioreq(ioreq_t *req)
     }
 }
 
-static void handle_buffered_iopage(XenIOState *state)
+static int handle_buffered_iopage(XenIOState *state)
 {
     buf_ioreq_t *buf_req = NULL;
     ioreq_t req;
     int qw;
 
     if (!state->buffered_io_page) {
-        return;
+        return 0;
     }
 
+    memset(&req, 0x00, sizeof(req));
+
     while (state->buffered_io_page->read_pointer != state->buffered_io_page->write_pointer) {
         buf_req = &state->buffered_io_page->buf_ioreq[
             state->buffered_io_page->read_pointer % IOREQ_BUFFER_SLOT_NUM];
@@ -731,15 +744,21 @@ static void handle_buffered_iopage(XenIOState *state)
         xen_mb();
         state->buffered_io_page->read_pointer += qw ? 2 : 1;
     }
+
+    return req.count;
 }
 
 static void handle_buffered_io(void *opaque)
 {
     XenIOState *state = opaque;
 
-    handle_buffered_iopage(state);
-    qemu_mod_timer(state->buffered_io_timer,
-                   BUFFER_IO_MAX_DELAY + qemu_get_clock_ms(rt_clock));
+    if (handle_buffered_iopage(state)) {
+        qemu_mod_timer(state->buffered_io_timer,
+                BUFFER_IO_MAX_DELAY + qemu_get_clock_ms(rt_clock));
+    } else {
+        qemu_del_timer(state->buffered_io_timer);
+        xc_evtchn_unmask(state->xce_handle, state->bufioreq_local_port);
+    }
 }
 
 static void cpu_handle_ioreq(void *opaque)
@@ -869,7 +888,6 @@ static void xen_main_loop_prepare(XenIOState *state)
 
     state->buffered_io_timer = qemu_new_timer_ms(rt_clock, handle_buffered_io,
                                                  state);
-    qemu_mod_timer(state->buffered_io_timer, qemu_get_clock_ms(rt_clock));
 
     if (evtchn_fd != -1) {
         qemu_set_fd_handler(evtchn_fd, cpu_handle_ioreq, NULL, state);
@@ -921,6 +939,7 @@ int xen_hvm_init(void)
 {
     int i, rc;
     unsigned long ioreq_pfn;
+    unsigned long bufioreq_evtchn;
     XenIOState *state;
 
     state = g_malloc0(sizeof (XenIOState));
@@ -970,6 +989,20 @@ int xen_hvm_init(void)
         state->ioreq_local_port[i] = rc;
     }
 
+    rc = xc_get_hvm_param(xen_xc, xen_domid, HVM_PARAM_BUFIOREQ_EVTCHN,
+            &bufioreq_evtchn);
+    if (rc < 0) {
+        fprintf(stderr, "failed to get HVM_PARAM_BUFIOREQ_EVTCHN\n");
+        return -1;
+    }
+    rc = xc_evtchn_bind_interdomain(state->xce_handle, xen_domid,
+            (uint32_t)bufioreq_evtchn);
+    if (rc == -1) {
+        fprintf(stderr, "bind interdomain ioctl error %d\n", errno);
+        return -1;
+    }
+    state->bufioreq_local_port = rc;
+
     /* Init RAM management */
     xen_map_cache_init();
     xen_ram_init(ram_size);
-- 
1.7.2.5

  parent reply	other threads:[~2012-02-14 15:03 UTC|newest]

Thread overview: 7+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2012-02-14 15:05 [Qemu-devel] [PATCH v5 0/6] prevent QEMU from waking up needlessly Stefano Stabellini
2012-02-14 15:07 ` [Qemu-devel] [PATCH v5 1/6] xen: do not initialize the interval timer and PCSPK emulator Stefano Stabellini
2012-02-14 15:07 ` [Qemu-devel] [PATCH v5 2/6] xen: disable rtc_clock Stefano Stabellini
2012-02-14 15:07 ` Stefano Stabellini [this message]
2012-02-14 15:07 ` [Qemu-devel] [PATCH v5 4/6] timers: the rearm function should be able to handle delta = INT64_MAX Stefano Stabellini
2012-02-14 15:07 ` [Qemu-devel] [PATCH v5 5/6] qemu_next_alarm_deadline: check the expire time of a clock only if it is enabled Stefano Stabellini
2012-02-14 15:07 ` [Qemu-devel] [PATCH v5 6/6] main_loop_wait: block indefinitely Stefano Stabellini

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1329232038-12349-3-git-send-email-stefano.stabellini@eu.citrix.com \
    --to=stefano.stabellini@eu.citrix.com \
    --cc=avi@redhat.com \
    --cc=jan.kiszka@siemens.com \
    --cc=pbonzini@redhat.com \
    --cc=qemu-devel@nongnu.org \
    --cc=xen-devel@lists.xensource.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).