xen-devel.lists.xenproject.org archive mirror
 help / color / mirror / Atom feed
From: <stefano.stabellini@eu.citrix.com>
To: xen-devel@lists.xensource.com
Cc: Ian.Jackson@eu.citrix.com,
	Stefano Stabellini <stefano.stabellini@eu.citrix.com>
Subject: [PATCH 1/3] xen: introduce an event channel for buffered io event notifications
Date: Fri, 18 Nov 2011 14:05:23 +0000	[thread overview]
Message-ID: <1321625125-30726-1-git-send-email-stefano.stabellini@eu.citrix.com> (raw)
In-Reply-To: <alpine.DEB.2.00.1111181351190.3519@kaball-desktop>

From: Stefano Stabellini <stefano.stabellini@eu.citrix.com>

Use the newly introduced HVM_PARAM_BUFIOREQ_EVTCHN to receive
notifications for buffered io events.
After the first notification is received leave the event channel masked
and setup a timer to process the rest of the batch.
Once we have completed processing the batch, unmask the event channel
and delete the timer.

Signed-off-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com>
---
 i386-dm/helper2.c |   41 +++++++++++++++++++++++++++++++++++------
 1 files changed, 35 insertions(+), 6 deletions(-)

diff --git a/i386-dm/helper2.c b/i386-dm/helper2.c
index 481c620..b121e30 100644
--- a/i386-dm/helper2.c
+++ b/i386-dm/helper2.c
@@ -111,12 +111,15 @@ int send_vcpu = 0;
 
 //the evtchn port for polling the notification,
 evtchn_port_t *ioreq_local_port;
+/* evtchn local port for buffered io */
+evtchn_port_t bufioreq_local_port;
 
 CPUX86State *cpu_x86_init(const char *cpu_model)
 {
     CPUX86State *env;
     static int inited;
     int i, rc;
+    unsigned long bufioreq_evtchn;
 
     env = qemu_mallocz(sizeof(CPUX86State));
     if (!env)
@@ -154,6 +157,19 @@ CPUX86State *cpu_x86_init(const char *cpu_model)
             }
             ioreq_local_port[i] = rc;
         }
+        rc = xc_get_hvm_param(xc_handle, domid, HVM_PARAM_BUFIOREQ_EVTCHN,
+                &bufioreq_evtchn);
+        if (rc < 0) {
+            fprintf(logfile, "failed to get HVM_PARAM_BUFIOREQ_EVTCHN error=%d\n",
+                    errno);
+            return NULL;
+        }
+        rc = xc_evtchn_bind_interdomain(xce_handle, domid, (uint32_t)bufioreq_evtchn);
+        if (rc == -1) {
+            fprintf(logfile, "bind interdomain ioctl error %d\n", errno);
+            return NULL;
+        }
+        bufioreq_local_port = rc;
     }
 
     return env;
@@ -263,6 +279,12 @@ static ioreq_t *cpu_get_ioreq(void)
     evtchn_port_t port;
 
     port = xc_evtchn_pending(xce_handle);
+    if (port == bufioreq_local_port) {
+        qemu_mod_timer(buffered_io_timer,
+                BUFFER_IO_MAX_DELAY + qemu_get_clock(rt_clock));
+        return NULL;
+    }
+ 
     if (port != -1) {
         for ( i = 0; i < vcpus; i++ )
             if ( ioreq_local_port[i] == port )
@@ -459,14 +481,16 @@ static void __handle_ioreq(CPUState *env, ioreq_t *req)
     }
 }
 
-static void __handle_buffered_iopage(CPUState *env)
+static int __handle_buffered_iopage(CPUState *env)
 {
     buf_ioreq_t *buf_req = NULL;
     ioreq_t req;
     int qw;
 
     if (!buffered_io_page)
-        return;
+        return 0;
+
+    memset(&req, 0x00, sizeof(req));
 
     while (buffered_io_page->read_pointer !=
            buffered_io_page->write_pointer) {
@@ -493,15 +517,21 @@ static void __handle_buffered_iopage(CPUState *env)
         xen_mb();
         buffered_io_page->read_pointer += qw ? 2 : 1;
     }
+
+    return req.count;
 }
 
 static void handle_buffered_io(void *opaque)
 {
     CPUState *env = opaque;
 
-    __handle_buffered_iopage(env);
-    qemu_mod_timer(buffered_io_timer, BUFFER_IO_MAX_DELAY +
-		   qemu_get_clock(rt_clock));
+    if (__handle_buffered_iopage(env)) {
+        qemu_mod_timer(buffered_io_timer,
+                BUFFER_IO_MAX_DELAY + qemu_get_clock(rt_clock));
+    } else {
+        qemu_del_timer(buffered_io_timer);
+        xc_evtchn_unmask(xce_handle, bufioreq_local_port);
+    }
 }
 
 static void cpu_handle_ioreq(void *opaque)
@@ -561,7 +591,6 @@ int main_loop(void)
 
     buffered_io_timer = qemu_new_timer(rt_clock, handle_buffered_io,
 				       cpu_single_env);
-    qemu_mod_timer(buffered_io_timer, qemu_get_clock(rt_clock));
 
     if (evtchn_fd != -1)
         qemu_set_fd_handler(evtchn_fd, cpu_handle_ioreq, NULL, env);
-- 
1.7.2.5

  reply	other threads:[~2011-11-18 14:05 UTC|newest]

Thread overview: 6+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2011-11-18 14:04 [PATCH 0/3] prevent qemu-xen-traditional from waking up needlessly Stefano Stabellini
2011-11-18 14:05 ` stefano.stabellini [this message]
2012-04-03 14:43   ` [PATCH 1/3] xen: introduce an event channel for buffered io event notifications Ian Jackson
2011-11-18 14:05 ` [PATCH 2/3] xen: don't initialize the RTC timers if xen is available stefano.stabellini
2011-11-18 14:05 ` [PATCH 3/3] increase minimum timeout to 1h stefano.stabellini
2011-12-01 17:56 ` [PATCH 0/3] prevent qemu-xen-traditional from waking up needlessly Ian Jackson

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1321625125-30726-1-git-send-email-stefano.stabellini@eu.citrix.com \
    --to=stefano.stabellini@eu.citrix.com \
    --cc=Ian.Jackson@eu.citrix.com \
    --cc=xen-devel@lists.xensource.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).