kvm.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH] qemu-kvm: Flush icache after dma operations for ia64
@ 2009-05-11 10:20 Zhang, Xiantao
  2009-05-11 11:11 ` Avi Kivity
  0 siblings, 1 reply; 15+ messages in thread
From: Zhang, Xiantao @ 2009-05-11 10:20 UTC (permalink / raw)
  To: Avi Kivity
  Cc: kvm@vger.kernel.org, kvm-ia64@vger.kernel.org, Hollis Blanchard

[-- Attachment #1: Type: text/plain, Size: 5697 bytes --]

 Avi
     This is the new patch for icache flush after DMA emualtion for ia64, and it should address Hollis's comments. 
Xiantao

>From 60a27e2ea9758c97e974aa5bb1925ad4ed045c5f Mon Sep 17 00:00:00 2001
From: Xiantao Zhang <xiantao.zhang@intel.com>
Date: Mon, 11 May 2009 18:04:15 +0800
Subject: [PATCH] qemu-kvm: Flush icache after dma operations for ia64
 
ia64 system depends on that platform issues snoop cycle to flush
icache for memory touched by DMA write operations, but virtual DMA
operations is emulated by memcpy, so use explict instrustions to flush
the related icache, otherwise, guest may use obsolete icache.
 
Signed-off-by: Xiantao Zhang <xiantao.zhang@intel.com>
---
 cache-utils.h           |   10 ++++++++++
 cutils.c                |   14 ++++++++++++++
 dma-helpers.c           |   21 +++++++++++++++++++++
 exec.c                  |    9 +++++++--
 target-ia64/cpu.h       |    1 -
 target-ia64/fake-exec.c |    9 ---------
 6 files changed, 52 insertions(+), 12 deletions(-)
 
diff --git a/cache-utils.h b/cache-utils.h
index b45fde4..db850ba 100644
--- a/cache-utils.h
+++ b/cache-utils.h
@@ -34,6 +34,16 @@ static inline void flush_icache_range(unsigned long start, unsigned long stop)
     asm volatile ("isync" : : : "memory");
 }
 
+#elif defined(__ia64__)
+static inline void flush_icache_range(unsigned long start, unsigned long stop)
+{
+    while (start < stop) {
+ asm volatile ("fc %0" :: "r"(start));
+ start += 32;
+    }
+    asm volatile (";;sync.i;;srlz.i;;");
+}
+#define qemu_cache_utils_init(envp) do { (void) (envp); } while (0)
 #else
 #define qemu_cache_utils_init(envp) do { (void) (envp); } while (0)
 #endif
diff --git a/cutils.c b/cutils.c
index a1652ab..6b7d506 100644
--- a/cutils.c
+++ b/cutils.c
@@ -25,6 +25,10 @@
 #include "host-utils.h"
 #include <assert.h>
 
+#ifdef __ia64__
+#include "cache-utils.h"
+#endif
+
 void pstrcpy(char *buf, int buf_size, const char *str)
 {
     int c;
@@ -176,6 +180,16 @@ void qemu_iovec_from_buffer(QEMUIOVector *qiov, const void *buf, size_t count)
         if (copy > qiov->iov[i].iov_len)
             copy = qiov->iov[i].iov_len;
         memcpy(qiov->iov[i].iov_base, p, copy);
+
+ /*ia64 system depends on that platform issues snoop cycle to flush
+ * icache for memory touched by DMA write operations, but virtual DMA
+ * operations is emulated by memcpy, so use explict instrustions to flush
+ * the related icache, otherwise, guest may use obsolete icache. */
+#ifdef __ia64__
+ flush_icache_range((unsigned long)qiov->iov[i].iov_base,
+                  (unsigned long)(qiov->iov[i].iov_base + copy));
+#endif
+
         p     += copy;
         count -= copy;
     }
diff --git a/dma-helpers.c b/dma-helpers.c
index f9eb224..b895099 100644
--- a/dma-helpers.c
+++ b/dma-helpers.c
@@ -10,6 +10,10 @@
 #include "dma.h"
 #include "block_int.h"
 
+#ifdef __ia64__
+#include "cache-utils.h"
+#endif
+
 static AIOPool dma_aio_pool;
 
 void qemu_sglist_init(QEMUSGList *qsg, int alloc_hint)
@@ -149,6 +153,23 @@ static BlockDriverAIOCB *dma_bdrv_io(
     dbs->bh = NULL;
     qemu_iovec_init(&dbs->iov, sg->nsg);
     dma_bdrv_cb(dbs, 0);
+
+    /*ia64 system depends on that platform issues snoop cycle to flush
+     * icache for memory touched by DMA write operations, but virtual DMA
+     * operations is emulated by memcpy, so use explict instrustions to flush
+     * the related icache, otherwise, guest may use obsolete icache. */
+#ifdef __ia64__
+    int i;
+    QEMUIOVector *qiov;
+    if (!is_write) {
+        qiov = &dbs->iov;
+        for (i = 0; i < qiov->niov; ++i) {
+           flush_icache_range((unsigned long)qiov->iov[i].iov_base,
+                 (unsigned long)(qiov->iov[i].iov_base + qiov->iov[i].iov_len));
+ }
+    }
+#endif
+
     if (!dbs->acb) {
         qemu_aio_release(dbs);
         return NULL;
diff --git a/exec.c b/exec.c
index 29c91fb..170ede1 100644
--- a/exec.c
+++ b/exec.c
@@ -35,6 +35,7 @@
 #include "cpu.h"
 #include "exec-all.h"
 #include "qemu-common.h"
+#include "cache-utils.h"
 
 #if !defined(TARGET_IA64)
 #include "tcg.h"
@@ -3402,8 +3403,12 @@ void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
                 }
                 addr1 += l;
                 access_len -= l;
-            }
-        }
+     }
+#ifdef TARGET_IA64
+     flush_icache_range((unsigned long)buffer,
+       (unsigned long)buffer + access_len);
+#endif
+       }
         return;
     }
     if (is_write) {
diff --git a/target-ia64/cpu.h b/target-ia64/cpu.h
index e002d56..fb51463 100644
--- a/target-ia64/cpu.h
+++ b/target-ia64/cpu.h
@@ -71,7 +71,6 @@ static inline int cpu_mmu_index (CPUState *env)
  * These ones really should go to the appropriate tcg header file, if/when
  * tcg support is added for ia64.
  */
-void flush_icache_range(unsigned long start, unsigned long stop);
 void tcg_dump_info(FILE *f,
                    int (*cpu_fprintf)(FILE *f, const char *fmt, ...));
 
diff --git a/target-ia64/fake-exec.c b/target-ia64/fake-exec.c
index c11cc32..8d6ded0 100644
--- a/target-ia64/fake-exec.c
+++ b/target-ia64/fake-exec.c
@@ -41,15 +41,6 @@ void tcg_dump_info(FILE *f,
     return;
 }
 
-void flush_icache_range(unsigned long start, unsigned long stop)
-{
-    while (start < stop) {
- asm volatile ("fc %0" :: "r"(start));
- start += 32;
-    }
-    asm volatile (";;sync.i;;srlz.i;;");
-}
-
 int cpu_restore_state(TranslationBlock *tb,
                       CPUState *env, unsigned long searched_pc,
                       void *puc)
-- 
1.6.0
 
 

[-- Attachment #2: 0001-qemu-kvm-Flush-icache-after-dma-operations-for-ia64.patch --]
[-- Type: application/octet-stream, Size: 5381 bytes --]

From 60a27e2ea9758c97e974aa5bb1925ad4ed045c5f Mon Sep 17 00:00:00 2001
From: Xiantao Zhang <xiantao.zhang@intel.com>
Date: Mon, 11 May 2009 18:04:15 +0800
Subject: [PATCH] qemu-kvm: Flush icache after dma operations for ia64

ia64 system depends on that platform issues snoop cycle to flush
icache for memory touched by DMA write operations, but virtual DMA
operations is emulated by memcpy, so use explict instrustions to flush
the related icache, otherwise, guest may use obsolete icache.

Signed-off-by: Xiantao Zhang <xiantao.zhang@intel.com>
---
 cache-utils.h           |   10 ++++++++++
 cutils.c                |   14 ++++++++++++++
 dma-helpers.c           |   21 +++++++++++++++++++++
 exec.c                  |    9 +++++++--
 target-ia64/cpu.h       |    1 -
 target-ia64/fake-exec.c |    9 ---------
 6 files changed, 52 insertions(+), 12 deletions(-)

diff --git a/cache-utils.h b/cache-utils.h
index b45fde4..db850ba 100644
--- a/cache-utils.h
+++ b/cache-utils.h
@@ -34,6 +34,16 @@ static inline void flush_icache_range(unsigned long start, unsigned long stop)
     asm volatile ("isync" : : : "memory");
 }
 
+#elif defined(__ia64__)
+static inline void flush_icache_range(unsigned long start, unsigned long stop)
+{
+    while (start < stop) {
+	asm volatile ("fc %0" :: "r"(start));
+	start += 32;
+    }
+    asm volatile (";;sync.i;;srlz.i;;");
+}
+#define qemu_cache_utils_init(envp) do { (void) (envp); } while (0)
 #else
 #define qemu_cache_utils_init(envp) do { (void) (envp); } while (0)
 #endif
diff --git a/cutils.c b/cutils.c
index a1652ab..6b7d506 100644
--- a/cutils.c
+++ b/cutils.c
@@ -25,6 +25,10 @@
 #include "host-utils.h"
 #include <assert.h>
 
+#ifdef __ia64__
+#include "cache-utils.h"
+#endif
+
 void pstrcpy(char *buf, int buf_size, const char *str)
 {
     int c;
@@ -176,6 +180,16 @@ void qemu_iovec_from_buffer(QEMUIOVector *qiov, const void *buf, size_t count)
         if (copy > qiov->iov[i].iov_len)
             copy = qiov->iov[i].iov_len;
         memcpy(qiov->iov[i].iov_base, p, copy);
+
+	/*ia64 system depends on that platform issues snoop cycle to flush
+	* icache for memory touched by DMA write operations, but virtual DMA
+	* operations is emulated by memcpy, so use explict instrustions to flush
+	* the related icache, otherwise, guest may use obsolete icache. */
+#ifdef __ia64__
+	flush_icache_range((unsigned long)qiov->iov[i].iov_base,
+                  (unsigned long)(qiov->iov[i].iov_base + copy));
+#endif
+
         p     += copy;
         count -= copy;
     }
diff --git a/dma-helpers.c b/dma-helpers.c
index f9eb224..b895099 100644
--- a/dma-helpers.c
+++ b/dma-helpers.c
@@ -10,6 +10,10 @@
 #include "dma.h"
 #include "block_int.h"
 
+#ifdef __ia64__
+#include "cache-utils.h"
+#endif
+
 static AIOPool dma_aio_pool;
 
 void qemu_sglist_init(QEMUSGList *qsg, int alloc_hint)
@@ -149,6 +153,23 @@ static BlockDriverAIOCB *dma_bdrv_io(
     dbs->bh = NULL;
     qemu_iovec_init(&dbs->iov, sg->nsg);
     dma_bdrv_cb(dbs, 0);
+
+    /*ia64 system depends on that platform issues snoop cycle to flush
+     * icache for memory touched by DMA write operations, but virtual DMA
+     * operations is emulated by memcpy, so use explict instrustions to flush
+     * the related icache, otherwise, guest may use obsolete icache. */
+#ifdef __ia64__
+    int i;
+    QEMUIOVector *qiov;
+    if (!is_write) {
+        qiov = &dbs->iov;
+        for (i = 0; i < qiov->niov; ++i) {
+           flush_icache_range((unsigned long)qiov->iov[i].iov_base,
+                 (unsigned long)(qiov->iov[i].iov_base + qiov->iov[i].iov_len));
+	}
+    }
+#endif
+
     if (!dbs->acb) {
         qemu_aio_release(dbs);
         return NULL;
diff --git a/exec.c b/exec.c
index 29c91fb..170ede1 100644
--- a/exec.c
+++ b/exec.c
@@ -35,6 +35,7 @@
 #include "cpu.h"
 #include "exec-all.h"
 #include "qemu-common.h"
+#include "cache-utils.h"
 
 #if !defined(TARGET_IA64)
 #include "tcg.h"
@@ -3402,8 +3403,12 @@ void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
                 }
                 addr1 += l;
                 access_len -= l;
-            }
-        }
+	    }
+#ifdef TARGET_IA64
+	    flush_icache_range((unsigned long)buffer,
+			    (unsigned long)buffer + access_len);
+#endif
+       }
         return;
     }
     if (is_write) {
diff --git a/target-ia64/cpu.h b/target-ia64/cpu.h
index e002d56..fb51463 100644
--- a/target-ia64/cpu.h
+++ b/target-ia64/cpu.h
@@ -71,7 +71,6 @@ static inline int cpu_mmu_index (CPUState *env)
  * These ones really should go to the appropriate tcg header file, if/when
  * tcg support is added for ia64.
  */
-void flush_icache_range(unsigned long start, unsigned long stop);
 void tcg_dump_info(FILE *f,
                    int (*cpu_fprintf)(FILE *f, const char *fmt, ...));
 
diff --git a/target-ia64/fake-exec.c b/target-ia64/fake-exec.c
index c11cc32..8d6ded0 100644
--- a/target-ia64/fake-exec.c
+++ b/target-ia64/fake-exec.c
@@ -41,15 +41,6 @@ void tcg_dump_info(FILE *f,
     return;
 }
 
-void flush_icache_range(unsigned long start, unsigned long stop)
-{
-    while (start < stop) {
-	asm volatile ("fc %0" :: "r"(start));
-	start += 32;
-    }
-    asm volatile (";;sync.i;;srlz.i;;");
-}
-
 int cpu_restore_state(TranslationBlock *tb,
                       CPUState *env, unsigned long searched_pc,
                       void *puc)
-- 
1.6.0


^ permalink raw reply related	[flat|nested] 15+ messages in thread

* Re: [PATCH] qemu-kvm: Flush icache after dma operations for ia64
  2009-05-11 10:20 [PATCH] qemu-kvm: Flush icache after dma operations for ia64 Zhang, Xiantao
@ 2009-05-11 11:11 ` Avi Kivity
  2009-05-25 10:55   ` Jes Sorensen
  0 siblings, 1 reply; 15+ messages in thread
From: Avi Kivity @ 2009-05-11 11:11 UTC (permalink / raw)
  To: Zhang, Xiantao
  Cc: kvm@vger.kernel.org, kvm-ia64@vger.kernel.org, Hollis Blanchard

Zhang, Xiantao wrote:
>  Avi
>      This is the new patch for icache flush after DMA emualtion for ia64, and it should address Hollis's comments. 
> Xiantao
>
> From 60a27e2ea9758c97e974aa5bb1925ad4ed045c5f Mon Sep 17 00:00:00 2001
> From: Xiantao Zhang <xiantao.zhang@intel.com>
> Date: Mon, 11 May 2009 18:04:15 +0800
> Subject: [PATCH] qemu-kvm: Flush icache after dma operations for ia64
>  
> ia64 system depends on that platform issues snoop cycle to flush
> icache for memory touched by DMA write operations, but virtual DMA
> operations is emulated by memcpy, so use explict instrustions to flush
> the related icache, otherwise, guest may use obsolete icache.
>  
>  
> +#elif defined(__ia64__)
> +static inline void flush_icache_range(unsigned long start, unsigned long stop)
> +{
> +    while (start < stop) {
> + asm volatile ("fc %0" :: "r"(start));
> + start += 32;
> +    }
> +    asm volatile (";;sync.i;;srlz.i;;");
> +}
> +#define qemu_cache_utils_init(envp) do { (void) (envp); } while (0)
>  #else
>  #define qemu_cache_utils_init(envp) do { (void) (envp); } while (0)
>  #endif
> diff --git a/cutils.c b/cutils.c
> index a1652ab..6b7d506 100644
> --- a/cutils.c
> +++ b/cutils.c
> @@ -25,6 +25,10 @@
>  #include "host-utils.h"
>  #include <assert.h>
>  
> +#ifdef __ia64__
> +#include "cache-utils.h"
> +#endif
>   

#includes should be unconditional.

> +
>  void pstrcpy(char *buf, int buf_size, const char *str)
>  {
>      int c;
> @@ -176,6 +180,16 @@ void qemu_iovec_from_buffer(QEMUIOVector *qiov, const void *buf, size_t count)
>          if (copy > qiov->iov[i].iov_len)
>              copy = qiov->iov[i].iov_len;
>          memcpy(qiov->iov[i].iov_base, p, copy);
> +
> + /*ia64 system depends on that platform issues snoop cycle to flush
> + * icache for memory touched by DMA write operations, but virtual DMA
> + * operations is emulated by memcpy, so use explict instrustions to flush
> + * the related icache, otherwise, guest may use obsolete icache. */
> +#ifdef __ia64__
> + flush_icache_range((unsigned long)qiov->iov[i].iov_base,
> +                  (unsigned long)(qiov->iov[i].iov_base + copy));
> +#endif
>   

Instead of the #ifdef, please add a flush_icache_range_after_dma() 
function which does flush_icache_range() for ia64 and nothing for other 
architectures.  This avoids the #ifdef everywhere, and you will only 
need one copy of the comment.

I don't think you need to flush here.  Instead, put the flush in 
cpu_physical_memory_unmap().  Every dma access should invoke that.  If 
you find a path which doesn't, it should be fixed.

>  
> +#ifdef __ia64__
> +#include "cache-utils.h"
> +#endif
>   

Unconditional #include.

> +
>  static AIOPool dma_aio_pool;
>  
>  void qemu_sglist_init(QEMUSGList *qsg, int alloc_hint)
> @@ -149,6 +153,23 @@ static BlockDriverAIOCB *dma_bdrv_io(
>      dbs->bh = NULL;
>      qemu_iovec_init(&dbs->iov, sg->nsg);
>      dma_bdrv_cb(dbs, 0);
> +
> +    /*ia64 system depends on that platform issues snoop cycle to flush
> +     * icache for memory touched by DMA write operations, but virtual DMA
> +     * operations is emulated by memcpy, so use explict instrustions to flush
> +     * the related icache, otherwise, guest may use obsolete icache. */
> +#ifdef __ia64__
> +    int i;
> +    QEMUIOVector *qiov;
> +    if (!is_write) {
> +        qiov = &dbs->iov;
> +        for (i = 0; i < qiov->niov; ++i) {
> +           flush_icache_range((unsigned long)qiov->iov[i].iov_base,
> +                 (unsigned long)(qiov->iov[i].iov_base + qiov->iov[i].iov_len));
> + }
> +    }
> +#endif
>   

If you move the flush to cpu_physical_memory_unmap(), this can go away.

> +
>      if (!dbs->acb) {
>          qemu_aio_release(dbs);
>          return NULL;
> diff --git a/exec.c b/exec.c
> index 29c91fb..170ede1 100644
> --- a/exec.c
> +++ b/exec.c
> @@ -35,6 +35,7 @@
>  #include "cpu.h"
>  #include "exec-all.h"
>  #include "qemu-common.h"
> +#include "cache-utils.h"
>  
>  #if !defined(TARGET_IA64)
>  #include "tcg.h"
> @@ -3402,8 +3403,12 @@ void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
>                  }
>                  addr1 += l;
>                  access_len -= l;
> -            }
> -        }
> +     }
> +#ifdef TARGET_IA64
> +     flush_icache_range((unsigned long)buffer,
> +       (unsigned long)buffer + access_len);
> +#endif
> +       }
>   

Whitespace damage.

access_len here is zero, so this does nothing.


-- 
error compiling committee.c: too many arguments to function


^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH] qemu-kvm: Flush icache after dma operations for ia64
  2009-05-11 11:11 ` Avi Kivity
@ 2009-05-25 10:55   ` Jes Sorensen
  2009-05-25 10:56     ` Jes Sorensen
  0 siblings, 1 reply; 15+ messages in thread
From: Jes Sorensen @ 2009-05-25 10:55 UTC (permalink / raw)
  To: Avi Kivity
  Cc: Zhang, Xiantao, kvm@vger.kernel.org, kvm-ia64@vger.kernel.org,
	Hollis Blanchard

Hi,

Looking at Avi's comments, I came up with a slightly modified version
of this patch, which avoids the #ifdef __ia64__ problem by introducing
dma_flush_range().

I have made it a noop on non ia64, but it may need to be changed for
PPC?  Hollis would you check if you guys need this too?

It also fixes the problem of trying to flush a zero sized block in
cpu_physical_memory_unmap().

Xiantao, are you ok with this version?

Cheers,
Jes

[...]


^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH] qemu-kvm: Flush icache after dma operations for ia64
  2009-05-25 10:55   ` Jes Sorensen
@ 2009-05-25 10:56     ` Jes Sorensen
  2009-05-25 11:25       ` Avi Kivity
  0 siblings, 1 reply; 15+ messages in thread
From: Jes Sorensen @ 2009-05-25 10:56 UTC (permalink / raw)
  To: Avi Kivity
  Cc: Zhang, Xiantao, kvm@vger.kernel.org, kvm-ia64@vger.kernel.org,
	Hollis Blanchard

ARGH!

I think the attachment was lost :-( Trying again.

Jes



>>>>> "Jes" == Jes Sorensen <jes@sgi.com> writes:

Jes> Hi, Looking at Avi's comments, I came up with a slightly modified
Jes> version of this patch, which avoids the #ifdef __ia64__ problem
Jes> by introducing dma_flush_range().

Jes> I have made it a noop on non ia64, but it may need to be changed
Jes> for PPC?  Hollis would you check if you guys need this too?

Jes> It also fixes the problem of trying to flush a zero sized block
Jes> in cpu_physical_memory_unmap().

Jes> Xiantao, are you ok with this version?

Jes> Cheers, Jes

ia64 system depends on that platform issues snoop cycle to flush
icache for memory touched by DMA write operations, but virtual DMA
operations is emulated by memcpy, so use explict instrustions to flush
the related icache, otherwise, guest may use obsolete icache.

Slightly modified version of Xiantao's patch, which avoids the #ifdef's
for ia64 by introducing a dma_flush_range() function defined as a noop
on architectures which do not need it.

Signed-off-by: Xiantao Zhang <xiantao.zhang@intel.com>
Signed-off-by: Jes Sorensen <jes@sgi.com>

---
 cache-utils.h           |   19 +++++++++++++++++++
 cutils.c                |    5 +++++
 dma-helpers.c           |   12 ++++++++++++
 exec.c                  |    7 ++++++-
 target-ia64/cpu.h       |    1 -
 target-ia64/fake-exec.c |    9 ---------
 6 files changed, 42 insertions(+), 11 deletions(-)

Index: qemu-kvm/cache-utils.h
===================================================================
--- qemu-kvm.orig/cache-utils.h
+++ qemu-kvm/cache-utils.h
@@ -34,7 +34,26 @@
     asm volatile ("isync" : : : "memory");
 }
 
+/*
+ * Is this correct for PPC?
+ */
+#define dma_flush_range(start, end)                  \
+    do { (void) (start); (void) (end); } while (0)
+
+#elif defined(__ia64__)
+static inline void flush_icache_range(unsigned long start, unsigned long stop)
+{
+    while (start < stop) {
+	asm volatile ("fc %0" :: "r"(start));
+	start += 32;
+    }
+    asm volatile (";;sync.i;;srlz.i;;");
+}
+#define dma_flush_range(start, end) flush_icache_range(start, end)
+#define qemu_cache_utils_init(envp) do { (void) (envp); } while (0)
 #else
+#define dma_flush_range(start, end)              \
+    do { (void) (start); (void) (end); } while (0)
 #define qemu_cache_utils_init(envp) do { (void) (envp); } while (0)
 #endif
 
Index: qemu-kvm/cutils.c
===================================================================
--- qemu-kvm.orig/cutils.c
+++ qemu-kvm/cutils.c
@@ -24,6 +24,7 @@
 #include "qemu-common.h"
 #include "host-utils.h"
 #include <assert.h>
+#include "cache-utils.h"
 
 void pstrcpy(char *buf, int buf_size, const char *str)
 {
@@ -176,6 +177,10 @@
         if (copy > qiov->iov[i].iov_len)
             copy = qiov->iov[i].iov_len;
         memcpy(qiov->iov[i].iov_base, p, copy);
+
+        dma_flush_range((unsigned long)qiov->iov[i].iov_base,
+                        (unsigned long)qiov->iov[i].iov_base + copy);
+
         p     += copy;
         count -= copy;
     }
Index: qemu-kvm/dma-helpers.c
===================================================================
--- qemu-kvm.orig/dma-helpers.c
+++ qemu-kvm/dma-helpers.c
@@ -9,6 +9,7 @@
 
 #include "dma.h"
 #include "block_int.h"
+#include "cache-utils.h"
 
 static AIOPool dma_aio_pool;
 
@@ -149,6 +150,17 @@
     dbs->bh = NULL;
     qemu_iovec_init(&dbs->iov, sg->nsg);
     dma_bdrv_cb(dbs, 0);
+
+    if (!is_write) {
+        int i;
+        QEMUIOVector *qiov;
+        qiov = &dbs->iov;
+        for (i = 0; i < qiov->niov; ++i) {
+            dma_flush_range((unsigned long)qiov->iov[i].iov_base,
+                (unsigned long)(qiov->iov[i].iov_base + qiov->iov[i].iov_len));
+	}
+    }
+
     if (!dbs->acb) {
         qemu_aio_release(dbs);
         return NULL;
Index: qemu-kvm/exec.c
===================================================================
--- qemu-kvm.orig/exec.c
+++ qemu-kvm/exec.c
@@ -35,6 +35,7 @@
 #include "cpu.h"
 #include "exec-all.h"
 #include "qemu-common.h"
+#include "cache-utils.h"
 
 #if !defined(TARGET_IA64)
 #include "tcg.h"
@@ -3385,6 +3386,8 @@
 void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
                                int is_write, target_phys_addr_t access_len)
 {
+    unsigned long flush_len = (unsigned long)access_len;
+
     if (buffer != bounce.buffer) {
         if (is_write) {
             ram_addr_t addr1 = qemu_ram_addr_from_host(buffer);
@@ -3402,7 +3405,9 @@
                 }
                 addr1 += l;
                 access_len -= l;
-            }
+	    }
+	    dma_flush_range((unsigned long)buffer,
+			    (unsigned long)buffer + flush_len);
         }
         return;
     }
Index: qemu-kvm/target-ia64/cpu.h
===================================================================
--- qemu-kvm.orig/target-ia64/cpu.h
+++ qemu-kvm/target-ia64/cpu.h
@@ -73,7 +73,6 @@
  * These ones really should go to the appropriate tcg header file, if/when
  * tcg support is added for ia64.
  */
-void flush_icache_range(unsigned long start, unsigned long stop);
 void tcg_dump_info(FILE *f,
                    int (*cpu_fprintf)(FILE *f, const char *fmt, ...));
 
Index: qemu-kvm/target-ia64/fake-exec.c
===================================================================
--- qemu-kvm.orig/target-ia64/fake-exec.c
+++ qemu-kvm/target-ia64/fake-exec.c
@@ -41,15 +41,6 @@
     return;
 }
 
-void flush_icache_range(unsigned long start, unsigned long stop)
-{
-    while (start < stop) {
-	asm volatile ("fc %0" :: "r"(start));
-	start += 32;
-    }
-    asm volatile (";;sync.i;;srlz.i;;");
-}
-
 int cpu_restore_state(TranslationBlock *tb,
                       CPUState *env, unsigned long searched_pc,
                       void *puc)

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH] qemu-kvm: Flush icache after dma operations for ia64
  2009-05-25 10:56     ` Jes Sorensen
@ 2009-05-25 11:25       ` Avi Kivity
  2009-05-25 13:12         ` Jes Sorensen
  0 siblings, 1 reply; 15+ messages in thread
From: Avi Kivity @ 2009-05-25 11:25 UTC (permalink / raw)
  To: Jes Sorensen
  Cc: Zhang, Xiantao, kvm@vger.kernel.org, kvm-ia64@vger.kernel.org,
	Hollis Blanchard

Jes Sorensen wrote:
>  
> +/*
> + * Is this correct for PPC?
> + */
> +#define dma_flush_range(start, end)                  \
> +    do { (void) (start); (void) (end); } while (0)
>   

Correct or not, should be a function/

> +#define dma_flush_range(start, end)              \
> +    do { (void) (start); (void) (end); } while (0)
>  
>   
Another function.

> @@ -24,6 +24,7 @@
>  #include "qemu-common.h"
>  #include "host-utils.h"
>  #include <assert.h>
> +#include "cache-utils.h"
>  
>  void pstrcpy(char *buf, int buf_size, const char *str)
>  {
> @@ -176,6 +177,10 @@
>          if (copy > qiov->iov[i].iov_len)
>              copy = qiov->iov[i].iov_len;
>          memcpy(qiov->iov[i].iov_base, p, copy);
> +
> +        dma_flush_range((unsigned long)qiov->iov[i].iov_base,
> +                        (unsigned long)qiov->iov[i].iov_base + copy);
> +
>   

Bogus.

>  static AIOPool dma_aio_pool;
>  
> @@ -149,6 +150,17 @@
>      dbs->bh = NULL;
>      qemu_iovec_init(&dbs->iov, sg->nsg);
>      dma_bdrv_cb(dbs, 0);
> +
> +    if (!is_write) {
> +        int i;
> +        QEMUIOVector *qiov;
> +        qiov = &dbs->iov;
> +        for (i = 0; i < qiov->niov; ++i) {
> +            dma_flush_range((unsigned long)qiov->iov[i].iov_base,
> +                (unsigned long)(qiov->iov[i].iov_base + qiov->iov[i].iov_len));
> +	}
> +    }
> +
>   

cpu_physical_memory_unmap() will do this for you...

>  
>  #if !defined(TARGET_IA64)
>  #include "tcg.h"
> @@ -3385,6 +3386,8 @@
>  void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
>                                 int is_write, target_phys_addr_t access_len)
>  {
> +    unsigned long flush_len = (unsigned long)access_len;
> +
>      if (buffer != bounce.buffer) {
>          if (is_write) {
>              ram_addr_t addr1 = qemu_ram_addr_from_host(buffer);
> @@ -3402,7 +3405,9 @@
>                  }
>                  addr1 += l;
>                  access_len -= l;
> -            }
> +	    }
> +	    dma_flush_range((unsigned long)buffer,
> +			    (unsigned long)buffer + flush_len);
>          }
>   

... here.

-- 
error compiling committee.c: too many arguments to function


^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH] qemu-kvm: Flush icache after dma operations for ia64
  2009-05-25 11:25       ` Avi Kivity
@ 2009-05-25 13:12         ` Jes Sorensen
  2009-05-26 12:30           ` Avi Kivity
  0 siblings, 1 reply; 15+ messages in thread
From: Jes Sorensen @ 2009-05-25 13:12 UTC (permalink / raw)
  To: Avi Kivity
  Cc: Zhang, Xiantao, kvm@vger.kernel.org, kvm-ia64@vger.kernel.org,
	Hollis Blanchard

[-- Attachment #1: Type: text/plain, Size: 407 bytes --]

Ok,

Trying once more. After spending a couple of hours trying to follow
the QEMU dma codeflow, I have convinced myself Avi is right and those
two functions don't need to do the flushing as they all end up calling
dma_bdrv_cb() which calls dma_brdv_unmap(). I have added a couple
comments to the code, which will hopefully save the next person the
'pleasure' of trying to figure out this too.

Cheers,
Jes


[-- Attachment #2: 0001-qemu-kvm-Flush-icache-after-dma-operations-for-ia64-v3.patch --]
[-- Type: text/x-patch, Size: 4678 bytes --]

ia64 system depends on that platform issues snoop cycle to flush
icache for memory touched by DMA write operations, but virtual DMA
operations is emulated by memcpy, so use explict instrustions to flush
the related icache, otherwise, guest may use obsolete icache.

Slightly modified version of Xiantao's patch, which avoids the #ifdef's
for ia64 by introducing a dma_flush_range() function defined as a noop
on architectures which do not need it.

Signed-off-by: Xiantao Zhang <xiantao.zhang@intel.com>
Signed-off-by: Jes Sorensen <jes@sgi.com>

---
 cache-utils.h           |   21 +++++++++++++++++++++
 cutils.c                |    5 +++++
 dma-helpers.c           |    4 ++++
 exec.c                  |    7 ++++++-
 target-ia64/cpu.h       |    1 -
 target-ia64/fake-exec.c |    9 ---------
 6 files changed, 36 insertions(+), 11 deletions(-)

Index: qemu-kvm/cache-utils.h
===================================================================
--- qemu-kvm.orig/cache-utils.h
+++ qemu-kvm/cache-utils.h
@@ -34,7 +34,28 @@
     asm volatile ("isync" : : : "memory");
 }
 
+/*
+ * Is this correct for PPC?
+ */
+static inline void dma_flush_range(unsigned long start, unsigned long stop)
+{
+}
+
+#elif defined(__ia64__)
+static inline void flush_icache_range(unsigned long start, unsigned long stop)
+{
+    while (start < stop) {
+	asm volatile ("fc %0" :: "r"(start));
+	start += 32;
+    }
+    asm volatile (";;sync.i;;srlz.i;;");
+}
+#define dma_flush_range(start, end) flush_icache_range(start, end)
+#define qemu_cache_utils_init(envp) do { (void) (envp); } while (0)
 #else
+static inline void dma_flush_range(unsigned long start, unsigned long stop)
+{
+}
 #define qemu_cache_utils_init(envp) do { (void) (envp); } while (0)
 #endif
 
Index: qemu-kvm/cutils.c
===================================================================
--- qemu-kvm.orig/cutils.c
+++ qemu-kvm/cutils.c
@@ -165,6 +165,11 @@
     }
 }
 
+/*
+ * No dma flushing needed here, as the aio code will call dma_bdrv_cb()
+ * on completion as well, which will result in a call to
+ * dma_bdrv_unmap() which will do the flushing ....
+ */
 void qemu_iovec_from_buffer(QEMUIOVector *qiov, const void *buf, size_t count)
 {
     const uint8_t *p = (const uint8_t *)buf;
Index: qemu-kvm/dma-helpers.c
===================================================================
--- qemu-kvm.orig/dma-helpers.c
+++ qemu-kvm/dma-helpers.c
@@ -148,6 +148,10 @@
     dbs->is_write = is_write;
     dbs->bh = NULL;
     qemu_iovec_init(&dbs->iov, sg->nsg);
+    /*
+     * DMA flushing is handled in dma_bdrv_cb() calling dma_bdrv_unmap()
+     * so we don't need to do that here.
+     */
     dma_bdrv_cb(dbs, 0);
     if (!dbs->acb) {
         qemu_aio_release(dbs);
Index: qemu-kvm/exec.c
===================================================================
--- qemu-kvm.orig/exec.c
+++ qemu-kvm/exec.c
@@ -35,6 +35,7 @@
 #include "cpu.h"
 #include "exec-all.h"
 #include "qemu-common.h"
+#include "cache-utils.h"
 
 #if !defined(TARGET_IA64)
 #include "tcg.h"
@@ -3385,6 +3386,8 @@
 void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
                                int is_write, target_phys_addr_t access_len)
 {
+    unsigned long flush_len = (unsigned long)access_len;
+
     if (buffer != bounce.buffer) {
         if (is_write) {
             ram_addr_t addr1 = qemu_ram_addr_from_host(buffer);
@@ -3402,7 +3405,9 @@
                 }
                 addr1 += l;
                 access_len -= l;
-            }
+	    }
+	    dma_flush_range((unsigned long)buffer,
+			    (unsigned long)buffer + flush_len);
         }
         return;
     }
Index: qemu-kvm/target-ia64/cpu.h
===================================================================
--- qemu-kvm.orig/target-ia64/cpu.h
+++ qemu-kvm/target-ia64/cpu.h
@@ -73,7 +73,6 @@
  * These ones really should go to the appropriate tcg header file, if/when
  * tcg support is added for ia64.
  */
-void flush_icache_range(unsigned long start, unsigned long stop);
 void tcg_dump_info(FILE *f,
                    int (*cpu_fprintf)(FILE *f, const char *fmt, ...));
 
Index: qemu-kvm/target-ia64/fake-exec.c
===================================================================
--- qemu-kvm.orig/target-ia64/fake-exec.c
+++ qemu-kvm/target-ia64/fake-exec.c
@@ -41,15 +41,6 @@
     return;
 }
 
-void flush_icache_range(unsigned long start, unsigned long stop)
-{
-    while (start < stop) {
-	asm volatile ("fc %0" :: "r"(start));
-	start += 32;
-    }
-    asm volatile (";;sync.i;;srlz.i;;");
-}
-
 int cpu_restore_state(TranslationBlock *tb,
                       CPUState *env, unsigned long searched_pc,
                       void *puc)

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH] qemu-kvm: Flush icache after dma operations for ia64
  2009-05-25 13:12         ` Jes Sorensen
@ 2009-05-26 12:30           ` Avi Kivity
  2009-06-01  5:40             ` Zhang, Xiantao
  0 siblings, 1 reply; 15+ messages in thread
From: Avi Kivity @ 2009-05-26 12:30 UTC (permalink / raw)
  To: Jes Sorensen
  Cc: Zhang, Xiantao, kvm@vger.kernel.org, kvm-ia64@vger.kernel.org,
	Hollis Blanchard

Jes Sorensen wrote:
> Ok,
>
> Trying once more. After spending a couple of hours trying to follow
> the QEMU dma codeflow, I have convinced myself Avi is right and those
> two functions don't need to do the flushing as they all end up calling
> dma_bdrv_cb() which calls dma_brdv_unmap(). I have added a couple
> comments to the code, which will hopefully save the next person the
> 'pleasure' of trying to figure out this too.
>

It looks right to me.  Xiantao?

>                  access_len -= l;
> -            }
> +	    }
> +	    dma_flush_range((unsigned long)buffer,
> +			    (unsigned long)buffer + flush_len);
>          }
>       

Detab your code, please.

-- 
error compiling committee.c: too many arguments to function


^ permalink raw reply	[flat|nested] 15+ messages in thread

* RE: [PATCH] qemu-kvm: Flush icache after dma operations for ia64
  2009-05-26 12:30           ` Avi Kivity
@ 2009-06-01  5:40             ` Zhang, Xiantao
  2009-06-01  7:45               ` Avi Kivity
  0 siblings, 1 reply; 15+ messages in thread
From: Zhang, Xiantao @ 2009-06-01  5:40 UTC (permalink / raw)
  To: Avi Kivity, Jes Sorensen
  Cc: kvm@vger.kernel.org, kvm-ia64@vger.kernel.org, Hollis Blanchard

Avi Kivity wrote:
> Jes Sorensen wrote:
>> Ok,
>> 
>> Trying once more. After spending a couple of hours trying to follow
>> the QEMU dma codeflow, I have convinced myself Avi is right and those
>> two functions don't need to do the flushing as they all end up
>> calling dma_bdrv_cb() which calls dma_brdv_unmap(). I have added a
>> couple comments to the code, which will hopefully save the next
>> person the 'pleasure' of trying to figure out this too.
>> 
> 
> It looks right to me.  Xiantao?

Fine to me.  But seems the change in qemu_iovec_from_buffer is lost in this patch or that change is also not unnecessary ?

Xiantao


>>                  access_len -= l;
>> -            }
>> +	    }
>> +	    dma_flush_range((unsigned long)buffer,
>> +			    (unsigned long)buffer + flush_len);
>>          }
>> 
> 
> Detab your code, please.


^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH] qemu-kvm: Flush icache after dma operations for ia64
  2009-06-01  5:40             ` Zhang, Xiantao
@ 2009-06-01  7:45               ` Avi Kivity
  2009-06-02 10:56                 ` Jes Sorensen
  0 siblings, 1 reply; 15+ messages in thread
From: Avi Kivity @ 2009-06-01  7:45 UTC (permalink / raw)
  To: Zhang, Xiantao
  Cc: Jes Sorensen, kvm@vger.kernel.org, kvm-ia64@vger.kernel.org,
	Hollis Blanchard

Zhang, Xiantao wrote:
> Avi Kivity wrote:
>   
>> Jes Sorensen wrote:
>>     
>>> Ok,
>>>
>>> Trying once more. After spending a couple of hours trying to follow
>>> the QEMU dma codeflow, I have convinced myself Avi is right and those
>>> two functions don't need to do the flushing as they all end up
>>> calling dma_bdrv_cb() which calls dma_brdv_unmap(). I have added a
>>> couple comments to the code, which will hopefully save the next
>>> person the 'pleasure' of trying to figure out this too.
>>>
>>>       
>> It looks right to me.  Xiantao?
>>     
>
> Fine to me.  But seems the change in qemu_iovec_from_buffer is lost in this patch or that change is also not unnecessary ?
>   


I think the fixed unmap handles that case.  Can you test to make sure?

-- 
Do not meddle in the internals of kernels, for they are subtle and quick to panic.


^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH] qemu-kvm: Flush icache after dma operations for ia64
  2009-06-01  7:45               ` Avi Kivity
@ 2009-06-02 10:56                 ` Jes Sorensen
  2009-06-02 15:20                   ` Zhang, Xiantao
  0 siblings, 1 reply; 15+ messages in thread
From: Jes Sorensen @ 2009-06-02 10:56 UTC (permalink / raw)
  To: Avi Kivity
  Cc: Zhang, Xiantao, kvm@vger.kernel.org, kvm-ia64@vger.kernel.org,
	Hollis Blanchard

Avi Kivity wrote:
> Zhang, Xiantao wrote:
>> Fine to me.  But seems the change in qemu_iovec_from_buffer is lost in 
>> this patch or that change is also not unnecessary ?
> 
> I think the fixed unmap handles that case.  Can you test to make sure?
> 

Avi and I went through the code and verified that it was all covered by
the unmap case. It was pretty messy to come to the conclusion, which is
why I tried to document it in the code and the patch description.

Cheers,
Jes

^ permalink raw reply	[flat|nested] 15+ messages in thread

* RE: [PATCH] qemu-kvm: Flush icache after dma operations for ia64
  2009-06-02 10:56                 ` Jes Sorensen
@ 2009-06-02 15:20                   ` Zhang, Xiantao
  2009-06-04 13:09                     ` Jes Sorensen
  0 siblings, 1 reply; 15+ messages in thread
From: Zhang, Xiantao @ 2009-06-02 15:20 UTC (permalink / raw)
  To: Jes Sorensen, Avi Kivity
  Cc: kvm@vger.kernel.org, kvm-ia64@vger.kernel.org, Hollis Blanchard

Hi, Jes
    Have you verified whether it works for you ?  You may run kernel build in the guest with 4 vcpus,  if it can be done successfully without any error, it should be Okay I think, otherwise, we may need to investigate it further. :)
Xiantao 
 


	 

-----Original Message-----
From: Jes Sorensen [mailto:jes@sgi.com] 
Sent: Tuesday, June 02, 2009 6:57 PM
To: Avi Kivity
Cc: Zhang, Xiantao; kvm@vger.kernel.org; kvm-ia64@vger.kernel.org; Hollis Blanchard
Subject: Re: [PATCH] qemu-kvm: Flush icache after dma operations for ia64

Avi Kivity wrote:
> Zhang, Xiantao wrote:
>> Fine to me.  But seems the change in qemu_iovec_from_buffer is lost in 
>> this patch or that change is also not unnecessary ?
> 
> I think the fixed unmap handles that case.  Can you test to make sure?
> 

Avi and I went through the code and verified that it was all covered by
the unmap case. It was pretty messy to come to the conclusion, which is
why I tried to document it in the code and the patch description.

Cheers,
Jes

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH] qemu-kvm: Flush icache after dma operations for ia64
  2009-06-02 15:20                   ` Zhang, Xiantao
@ 2009-06-04 13:09                     ` Jes Sorensen
  2009-06-05  1:38                       ` Zhang, Xiantao
  0 siblings, 1 reply; 15+ messages in thread
From: Jes Sorensen @ 2009-06-04 13:09 UTC (permalink / raw)
  To: Zhang, Xiantao
  Cc: Avi Kivity, kvm@vger.kernel.org, kvm-ia64@vger.kernel.org,
	Hollis Blanchard

Zhang, Xiantao wrote:
> Hi, Jes
>     Have you verified whether it works for you ?  You may run kernel build in the guest with 4 vcpus,  if it can be done successfully without any error, it should be Okay I think, otherwise, we may need to investigate it further. :)
> Xiantao 

Hi Xiantao,

I was able to run a 16 vCPU guest and build the kernel using make -j 16.
How quickly would the problem show up for you, on every run, or should I
run more tests?

Cheers,
Jes

^ permalink raw reply	[flat|nested] 15+ messages in thread

* RE: [PATCH] qemu-kvm: Flush icache after dma operations for ia64
  2009-06-04 13:09                     ` Jes Sorensen
@ 2009-06-05  1:38                       ` Zhang, Xiantao
  2009-06-05 11:13                         ` Jes Sorensen
  0 siblings, 1 reply; 15+ messages in thread
From: Zhang, Xiantao @ 2009-06-05  1:38 UTC (permalink / raw)
  To: Jes Sorensen
  Cc: Avi Kivity, kvm@vger.kernel.org, kvm-ia64@vger.kernel.org,
	Hollis Blanchard

Jes Sorensen wrote:
> Zhang, Xiantao wrote:
>> Hi, Jes
>>     Have you verified whether it works for you ?  You may run kernel
>> build in the guest with 4 vcpus,  if it can be done successfully
>> without any error, it should be Okay I think, otherwise, we may need
>> to investigate it further. :) Xiantao  
> 
> Hi Xiantao,
> 
> I was able to run a 16 vCPU guest and build the kernel using make -j
> 16. How quickly would the problem show up for you, on every run, or
> should I run more tests?

Hi Jes, 
 Good news! On my machine, without the patch, smp guest can't build one whole kernel at all. So if you can build it without errors and use it to boot up the guest, I think it should work well.  
Xiantao




^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH] qemu-kvm: Flush icache after dma operations for ia64
  2009-06-05  1:38                       ` Zhang, Xiantao
@ 2009-06-05 11:13                         ` Jes Sorensen
  2009-06-07  6:28                           ` Avi Kivity
  0 siblings, 1 reply; 15+ messages in thread
From: Jes Sorensen @ 2009-06-05 11:13 UTC (permalink / raw)
  To: Zhang, Xiantao
  Cc: Avi Kivity, kvm@vger.kernel.org, kvm-ia64@vger.kernel.org,
	Hollis Blanchard

Zhang, Xiantao wrote:
> Hi Jes, 
>  Good news! On my machine, without the patch, smp guest can't build one whole kernel at all. So if you can build it without errors and use it to boot up the guest, I think it should work well.  
> Xiantao

Yep, compiles, boots, the works ...

Avi, I think we can conclude this patch is fine to go in. If it ends up
I did something wrong, you can all attack me later :-)


Cheers,
Jes

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH] qemu-kvm: Flush icache after dma operations for ia64
  2009-06-05 11:13                         ` Jes Sorensen
@ 2009-06-07  6:28                           ` Avi Kivity
  0 siblings, 0 replies; 15+ messages in thread
From: Avi Kivity @ 2009-06-07  6:28 UTC (permalink / raw)
  To: Jes Sorensen
  Cc: Zhang, Xiantao, kvm@vger.kernel.org, kvm-ia64@vger.kernel.org,
	Hollis Blanchard

Jes Sorensen wrote:
> Zhang, Xiantao wrote:
>> Hi Jes,  Good news! On my machine, without the patch, smp guest can't 
>> build one whole kernel at all. So if you can build it without errors 
>> and use it to boot up the guest, I think it should work well.  Xiantao
>
> Yep, compiles, boots, the works ...
>
> Avi, I think we can conclude this patch is fine to go in. If it ends up
> I did something wrong, you can all attack me later :-)

It's in, thanks.

-- 
Do not meddle in the internals of kernels, for they are subtle and quick to panic.


^ permalink raw reply	[flat|nested] 15+ messages in thread

end of thread, other threads:[~2009-06-07  6:28 UTC | newest]

Thread overview: 15+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2009-05-11 10:20 [PATCH] qemu-kvm: Flush icache after dma operations for ia64 Zhang, Xiantao
2009-05-11 11:11 ` Avi Kivity
2009-05-25 10:55   ` Jes Sorensen
2009-05-25 10:56     ` Jes Sorensen
2009-05-25 11:25       ` Avi Kivity
2009-05-25 13:12         ` Jes Sorensen
2009-05-26 12:30           ` Avi Kivity
2009-06-01  5:40             ` Zhang, Xiantao
2009-06-01  7:45               ` Avi Kivity
2009-06-02 10:56                 ` Jes Sorensen
2009-06-02 15:20                   ` Zhang, Xiantao
2009-06-04 13:09                     ` Jes Sorensen
2009-06-05  1:38                       ` Zhang, Xiantao
2009-06-05 11:13                         ` Jes Sorensen
2009-06-07  6:28                           ` Avi Kivity

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).