* [PATCH] kvm: Flush coalesced MMIO buffer periodly
@ 2010-01-21 9:37 Sheng Yang
2010-01-21 10:19 ` Avi Kivity
0 siblings, 1 reply; 7+ messages in thread
From: Sheng Yang @ 2010-01-21 9:37 UTC (permalink / raw)
To: Avi Kivity, Marcelo Tosatti; +Cc: kvm, Sheng Yang
The default action of coalesced MMIO is, cache the writing in buffer, until:
1. The buffer is full.
2. Or the exit to QEmu due to other reasons.
But this would result in a very late writing in some condition.
1. The each time write to MMIO content is small.
2. The writing interval is big.
3. No need for input or accessing other devices frequently.
This issue was observed in a experimental embbed system. The test image
simply print "test" every 1 seconds. The output in QEmu meets expectation,
but the output in KVM is delayed for seconds.
Per Avi's suggestion, I add a periodly flushing coalesced MMIO buffer in
QEmu IO thread. By this way, We don't need vcpu explicit exit to QEmu to
handle this issue. Current synchronize rate is 1/25s.
Signed-off-by: Sheng Yang <sheng@linux.intel.com>
---
qemu-kvm.c | 47 +++++++++++++++++++++++++++++++++++++++++++++--
qemu-kvm.h | 2 ++
2 files changed, 47 insertions(+), 2 deletions(-)
diff --git a/qemu-kvm.c b/qemu-kvm.c
index 599c3d6..38f890c 100644
--- a/qemu-kvm.c
+++ b/qemu-kvm.c
@@ -463,6 +463,12 @@ static void kvm_create_vcpu(CPUState *env, int id)
goto err_fd;
}
+#ifdef KVM_CAP_COALESCED_MMIO
+ if (kvm_state->coalesced_mmio && !kvm_state->coalesced_mmio_ring)
+ kvm_state->coalesced_mmio_ring = (void *) env->kvm_run +
+ kvm_state->coalesced_mmio * PAGE_SIZE;
+#endif
+
return;
err_fd:
close(env->kvm_fd);
@@ -927,8 +933,7 @@ int kvm_run(CPUState *env)
#if defined(KVM_CAP_COALESCED_MMIO)
if (kvm_state->coalesced_mmio) {
- struct kvm_coalesced_mmio_ring *ring =
- (void *) run + kvm_state->coalesced_mmio * PAGE_SIZE;
+ struct kvm_coalesced_mmio_ring *ring = kvm_state->coalesced_mmio_ring;
while (ring->first != ring->last) {
cpu_physical_memory_rw(ring->coalesced_mmio[ring->first].phys_addr,
&ring->coalesced_mmio[ring->first].data[0],
@@ -2073,6 +2078,29 @@ static void io_thread_wakeup(void *opaque)
}
}
+#ifdef KVM_CAP_COALESCED_MMIO
+
+/* flush interval is 1/25 second */
+#define KVM_COALESCED_MMIO_FLUSH_INTERVAL 40000000LL
+
+static void flush_coalesced_mmio_buffer(void *opaque)
+{
+ if (kvm_state->coalesced_mmio_ring) {
+ struct kvm_coalesced_mmio_ring *ring =
+ kvm_state->coalesced_mmio_ring;
+ while (ring->first != ring->last) {
+ cpu_physical_memory_rw(ring->coalesced_mmio[ring->first].phys_addr,
+ &ring->coalesced_mmio[ring->first].data[0],
+ ring->coalesced_mmio[ring->first].len, 1);
+ smp_wmb();
+ ring->first = (ring->first + 1) % KVM_COALESCED_MMIO_MAX;
+ }
+ }
+ qemu_mod_timer(kvm_state->coalesced_mmio_timer,
+ qemu_get_clock(host_clock) + KVM_COALESCED_MMIO_FLUSH_INTERVAL);
+}
+#endif
+
int kvm_main_loop(void)
{
int fds[2];
@@ -2117,6 +2145,15 @@ int kvm_main_loop(void)
io_thread_sigfd = sigfd;
cpu_single_env = NULL;
+#ifdef KVM_CAP_COALESCED_MMIO
+ if (kvm_state->coalesced_mmio) {
+ kvm_state->coalesced_mmio_timer =
+ qemu_new_timer(host_clock, flush_coalesced_mmio_buffer, NULL);
+ qemu_mod_timer(kvm_state->coalesced_mmio_timer,
+ qemu_get_clock(host_clock) + KVM_COALESCED_MMIO_FLUSH_INTERVAL);
+ }
+#endif
+
while (1) {
main_loop_wait(1000);
if (qemu_shutdown_requested()) {
@@ -2135,6 +2172,12 @@ int kvm_main_loop(void)
}
}
+#ifdef KVM_CAP_COALESCED_MMIO
+ if (kvm_state->coalesced_mmio) {
+ qemu_del_timer(kvm_state->coalesced_mmio_timer);
+ qemu_free_timer(kvm_state->coalesced_mmio_timer);
+ }
+#endif
pause_all_threads();
pthread_mutex_unlock(&qemu_mutex);
diff --git a/qemu-kvm.h b/qemu-kvm.h
index 6b3e5a1..17f9d1b 100644
--- a/qemu-kvm.h
+++ b/qemu-kvm.h
@@ -1144,6 +1144,8 @@ typedef struct KVMState {
int fd;
int vmfd;
int coalesced_mmio;
+ struct kvm_coalesced_mmio_ring *coalesced_mmio_ring;
+ struct QEMUTimer *coalesced_mmio_timer;
int broken_set_mem_region;
int migration_log;
int vcpu_events;
--
1.5.4.5
^ permalink raw reply related [flat|nested] 7+ messages in thread
* Re: [PATCH] kvm: Flush coalesced MMIO buffer periodly
2010-01-21 9:37 [PATCH] kvm: Flush coalesced MMIO buffer periodly Sheng Yang
@ 2010-01-21 10:19 ` Avi Kivity
2010-01-22 2:22 ` Sheng Yang
0 siblings, 1 reply; 7+ messages in thread
From: Avi Kivity @ 2010-01-21 10:19 UTC (permalink / raw)
To: Sheng Yang; +Cc: Marcelo Tosatti, kvm
On 01/21/2010 11:37 AM, Sheng Yang wrote:
> The default action of coalesced MMIO is, cache the writing in buffer, until:
> 1. The buffer is full.
> 2. Or the exit to QEmu due to other reasons.
>
> But this would result in a very late writing in some condition.
> 1. The each time write to MMIO content is small.
> 2. The writing interval is big.
> 3. No need for input or accessing other devices frequently.
>
> This issue was observed in a experimental embbed system. The test image
> simply print "test" every 1 seconds. The output in QEmu meets expectation,
> but the output in KVM is delayed for seconds.
>
> Per Avi's suggestion, I add a periodly flushing coalesced MMIO buffer in
> QEmu IO thread. By this way, We don't need vcpu explicit exit to QEmu to
> handle this issue. Current synchronize rate is 1/25s.
>
>
I'm not sure that a new timer is needed. If the only problem case is
the display, maybe we can flush coalesced mmio from the vga refresh
timer. That ensures that we flash exactly when needed, and don't have
extra timers.
--
error compiling committee.c: too many arguments to function
^ permalink raw reply [flat|nested] 7+ messages in thread
* [PATCH] kvm: Flush coalesced MMIO buffer periodly
2010-01-21 10:19 ` Avi Kivity
@ 2010-01-22 2:22 ` Sheng Yang
2010-01-24 7:35 ` Avi Kivity
0 siblings, 1 reply; 7+ messages in thread
From: Sheng Yang @ 2010-01-22 2:22 UTC (permalink / raw)
To: Avi Kivity, Marcelo Tosatti; +Cc: kvm, Sheng Yang
The default action of coalesced MMIO is, cache the writing in buffer, until:
1. The buffer is full.
2. Or the exit to QEmu due to other reasons.
But this would result in a very late writing in some condition.
1. The each time write to MMIO content is small.
2. The writing interval is big.
3. No need for input or accessing other devices frequently.
This issue was observed in a experimental embbed system. The test image
simply print "test" every 1 seconds. The output in QEmu meets expectation,
but the output in KVM is delayed for seconds.
Per Avi's suggestion, I hooked a flushing for coalesced MMIO buffer in VGA
update handler. By this way, We don't need vcpu explicit exit to QEmu to
handle this issue.
Signed-off-by: Sheng Yang <sheng@linux.intel.com>
---
Like this?
qemu-kvm.c | 26 ++++++++++++++++++++++++--
qemu-kvm.h | 6 ++++++
vl.c | 2 ++
3 files changed, 32 insertions(+), 2 deletions(-)
diff --git a/qemu-kvm.c b/qemu-kvm.c
index 599c3d6..a9b5107 100644
--- a/qemu-kvm.c
+++ b/qemu-kvm.c
@@ -463,6 +463,12 @@ static void kvm_create_vcpu(CPUState *env, int id)
goto err_fd;
}
+#ifdef KVM_CAP_COALESCED_MMIO
+ if (kvm_state->coalesced_mmio && !kvm_state->coalesced_mmio_ring)
+ kvm_state->coalesced_mmio_ring = (void *) env->kvm_run +
+ kvm_state->coalesced_mmio * PAGE_SIZE;
+#endif
+
return;
err_fd:
close(env->kvm_fd);
@@ -927,8 +933,7 @@ int kvm_run(CPUState *env)
#if defined(KVM_CAP_COALESCED_MMIO)
if (kvm_state->coalesced_mmio) {
- struct kvm_coalesced_mmio_ring *ring =
- (void *) run + kvm_state->coalesced_mmio * PAGE_SIZE;
+ struct kvm_coalesced_mmio_ring *ring = kvm_state->coalesced_mmio_ring;
while (ring->first != ring->last) {
cpu_physical_memory_rw(ring->coalesced_mmio[ring->first].phys_addr,
&ring->coalesced_mmio[ring->first].data[0],
@@ -2073,6 +2078,23 @@ static void io_thread_wakeup(void *opaque)
}
}
+#ifdef KVM_CAP_COALESCED_MMIO
+void kvm_flush_coalesced_mmio_buffer(void)
+{
+ if (kvm_state->coalesced_mmio_ring) {
+ struct kvm_coalesced_mmio_ring *ring =
+ kvm_state->coalesced_mmio_ring;
+ while (ring->first != ring->last) {
+ cpu_physical_memory_rw(ring->coalesced_mmio[ring->first].phys_addr,
+ &ring->coalesced_mmio[ring->first].data[0],
+ ring->coalesced_mmio[ring->first].len, 1);
+ smp_wmb();
+ ring->first = (ring->first + 1) % KVM_COALESCED_MMIO_MAX;
+ }
+ }
+}
+#endif
+
int kvm_main_loop(void)
{
int fds[2];
diff --git a/qemu-kvm.h b/qemu-kvm.h
index 6b3e5a1..8188ff6 100644
--- a/qemu-kvm.h
+++ b/qemu-kvm.h
@@ -1125,6 +1125,11 @@ static inline int kvm_set_migration_log(int enable)
return kvm_physical_memory_set_dirty_tracking(enable);
}
+#ifdef KVM_CAP_COALESCED_MMIO
+void kvm_flush_coalesced_mmio_buffer(void);
+#else
+void kvm_flush_coalesced_mmio_buffer(void) {}
+#endif
int kvm_irqchip_in_kernel(void);
#ifdef CONFIG_KVM
@@ -1144,6 +1149,7 @@ typedef struct KVMState {
int fd;
int vmfd;
int coalesced_mmio;
+ struct kvm_coalesced_mmio_ring *coalesced_mmio_ring;
int broken_set_mem_region;
int migration_log;
int vcpu_events;
diff --git a/vl.c b/vl.c
index 9edea10..64902f2 100644
--- a/vl.c
+++ b/vl.c
@@ -3235,6 +3235,7 @@ static void gui_update(void *opaque)
interval = dcl->gui_timer_interval;
dcl = dcl->next;
}
+ kvm_flush_coalesced_mmio_buffer();
qemu_mod_timer(ds->gui_timer, interval + qemu_get_clock(rt_clock));
}
@@ -3242,6 +3243,7 @@ static void nographic_update(void *opaque)
{
uint64_t interval = GUI_REFRESH_INTERVAL;
+ kvm_flush_coalesced_mmio_buffer();
qemu_mod_timer(nographic_timer, interval + qemu_get_clock(rt_clock));
}
--
1.5.4.5
^ permalink raw reply related [flat|nested] 7+ messages in thread
* Re: [PATCH] kvm: Flush coalesced MMIO buffer periodly
2010-01-22 2:22 ` Sheng Yang
@ 2010-01-24 7:35 ` Avi Kivity
2010-01-25 7:45 ` Sheng Yang
0 siblings, 1 reply; 7+ messages in thread
From: Avi Kivity @ 2010-01-24 7:35 UTC (permalink / raw)
To: Sheng Yang; +Cc: Marcelo Tosatti, kvm
On 01/22/2010 04:22 AM, Sheng Yang wrote:
> The default action of coalesced MMIO is, cache the writing in buffer, until:
> 1. The buffer is full.
> 2. Or the exit to QEmu due to other reasons.
>
> But this would result in a very late writing in some condition.
> 1. The each time write to MMIO content is small.
> 2. The writing interval is big.
> 3. No need for input or accessing other devices frequently.
>
> This issue was observed in a experimental embbed system. The test image
> simply print "test" every 1 seconds. The output in QEmu meets expectation,
> but the output in KVM is delayed for seconds.
>
> Per Avi's suggestion, I hooked a flushing for coalesced MMIO buffer in VGA
> update handler. By this way, We don't need vcpu explicit exit to QEmu to
> handle this issue.
>
> Signed-off-by: Sheng Yang<sheng@linux.intel.com>
> ---
>
> Like this?
>
> qemu-kvm.c | 26 ++++++++++++++++++++++++--
> qemu-kvm.h | 6 ++++++
> vl.c | 2 ++
> 3 files changed, 32 insertions(+), 2 deletions(-)
>
>
>
> +#ifdef KVM_CAP_COALESCED_MMIO
> +void kvm_flush_coalesced_mmio_buffer(void)
> +{
> + if (kvm_state->coalesced_mmio_ring) {
> + struct kvm_coalesced_mmio_ring *ring =
> + kvm_state->coalesced_mmio_ring;
> + while (ring->first != ring->last) {
> + cpu_physical_memory_rw(ring->coalesced_mmio[ring->first].phys_addr,
> +&ring->coalesced_mmio[ring->first].data[0],
> + ring->coalesced_mmio[ring->first].len, 1);
> + smp_wmb();
> + ring->first = (ring->first + 1) % KVM_COALESCED_MMIO_MAX;
> + }
> + }
> +}
> +#endif
> +
>
qemu has a coalesced mmio api, (qemu_register_colaesced_mmio), so please
follow it (stubs in exec.c, and implementation in kvm-specific files).
Please send the patch against the uq/master branch (which follow upstream).
> diff --git a/vl.c b/vl.c
> index 9edea10..64902f2 100644
> --- a/vl.c
> +++ b/vl.c
> @@ -3235,6 +3235,7 @@ static void gui_update(void *opaque)
> interval = dcl->gui_timer_interval;
> dcl = dcl->next;
> }
> + kvm_flush_coalesced_mmio_buffer();
> qemu_mod_timer(ds->gui_timer, interval + qemu_get_clock(rt_clock));
> }
>
Better to do that before the call to dpy_refresh().
>
> @@ -3242,6 +3243,7 @@ static void nographic_update(void *opaque)
> {
> uint64_t interval = GUI_REFRESH_INTERVAL;
>
> + kvm_flush_coalesced_mmio_buffer();
> qemu_mod_timer(nographic_timer, interval + qemu_get_clock(rt_clock));
> }
>
Any need to do it here?
(why does nographic_update use a timer?)
--
Do not meddle in the internals of kernels, for they are subtle and quick to panic.
^ permalink raw reply [flat|nested] 7+ messages in thread
* Re: [PATCH] kvm: Flush coalesced MMIO buffer periodly
2010-01-24 7:35 ` Avi Kivity
@ 2010-01-25 7:45 ` Sheng Yang
0 siblings, 0 replies; 7+ messages in thread
From: Sheng Yang @ 2010-01-25 7:45 UTC (permalink / raw)
To: Avi Kivity; +Cc: Marcelo Tosatti, kvm
On Sunday 24 January 2010 15:35:58 Avi Kivity wrote:
> On 01/22/2010 04:22 AM, Sheng Yang wrote:
> > The default action of coalesced MMIO is, cache the writing in buffer,
> > until: 1. The buffer is full.
> > 2. Or the exit to QEmu due to other reasons.
> >
> > But this would result in a very late writing in some condition.
> > 1. The each time write to MMIO content is small.
> > 2. The writing interval is big.
> > 3. No need for input or accessing other devices frequently.
> >
> > This issue was observed in a experimental embbed system. The test image
> > simply print "test" every 1 seconds. The output in QEmu meets
> > expectation, but the output in KVM is delayed for seconds.
> >
> > Per Avi's suggestion, I hooked a flushing for coalesced MMIO buffer in
> > VGA update handler. By this way, We don't need vcpu explicit exit to QEmu
> > to handle this issue.
> >
> > Signed-off-by: Sheng Yang<sheng@linux.intel.com>
> > ---
> >
> > Like this?
> >
> > qemu-kvm.c | 26 ++++++++++++++++++++++++--
> > qemu-kvm.h | 6 ++++++
> > vl.c | 2 ++
> > 3 files changed, 32 insertions(+), 2 deletions(-)
> >
> >
> >
> > +#ifdef KVM_CAP_COALESCED_MMIO
> > +void kvm_flush_coalesced_mmio_buffer(void)
> > +{
> > + if (kvm_state->coalesced_mmio_ring) {
> > + struct kvm_coalesced_mmio_ring *ring =
> > + kvm_state->coalesced_mmio_ring;
> > + while (ring->first != ring->last) {
> > +
> > cpu_physical_memory_rw(ring->coalesced_mmio[ring->first].phys_addr,
> > +&ring->coalesced_mmio[ring->first].data[0],
> > + ring->coalesced_mmio[ring->first].len, 1);
> > + smp_wmb();
> > + ring->first = (ring->first + 1) % KVM_COALESCED_MMIO_MAX;
> > + }
> > + }
> > +}
> > +#endif
> > +
>
> qemu has a coalesced mmio api, (qemu_register_colaesced_mmio), so please
> follow it (stubs in exec.c, and implementation in kvm-specific files).
> Please send the patch against the uq/master branch (which follow upstream).
You means put it in the kvm-all.c?(see the latest patch) Um, seems I have to
import "libkvm.h" in with a #ifndef KVM_UPSTREAM, due to smp_wmb() and
PAGE_SIZE macro. I am not sure about which is the proper way to get it work...
(the relationship between qemu-kvm and upstream qemu's kvm often confuse me. I
supposed kvm-all.c should belong to upstream kvm, and qemu-kvm.* and libkvm
should belong to qemu-kvm?)
Another issue is, upstream QEmu compile error with "TARGET_PHYS_ADDR_BITS
redefined". I would like to wait for that to be fixed.
>
> > diff --git a/vl.c b/vl.c
> > index 9edea10..64902f2 100644
> > --- a/vl.c
> > +++ b/vl.c
> > @@ -3235,6 +3235,7 @@ static void gui_update(void *opaque)
> > interval = dcl->gui_timer_interval;
> > dcl = dcl->next;
> > }
> > + kvm_flush_coalesced_mmio_buffer();
> > qemu_mod_timer(ds->gui_timer, interval + qemu_get_clock(rt_clock));
> > }
>
> Better to do that before the call to dpy_refresh().
OK.
>
> > @@ -3242,6 +3243,7 @@ static void nographic_update(void *opaque)
> > {
> > uint64_t interval = GUI_REFRESH_INTERVAL;
> >
> > + kvm_flush_coalesced_mmio_buffer();
> > qemu_mod_timer(nographic_timer, interval +
> > qemu_get_clock(rt_clock)); }
>
> Any need to do it here?
>
> (why does nographic_update use a timer?)
>
VNC would need nographic_update().
--
regards
Yang, Sheng
^ permalink raw reply [flat|nested] 7+ messages in thread
* [PATCH] kvm: Flush coalesced MMIO buffer periodly
@ 2010-01-25 7:46 Sheng Yang
2010-01-25 16:08 ` Marcelo Tosatti
0 siblings, 1 reply; 7+ messages in thread
From: Sheng Yang @ 2010-01-25 7:46 UTC (permalink / raw)
To: Avi Kivity, Marcelo Tosatti; +Cc: kvm, Sheng Yang
The default action of coalesced MMIO is, cache the writing in buffer, until:
1. The buffer is full.
2. Or the exit to QEmu due to other reasons.
But this would result in a very late writing in some condition.
1. The each time write to MMIO content is small.
2. The writing interval is big.
3. No need for input or accessing other devices frequently.
This issue was observed in a experimental embbed system. The test image
simply print "test" every 1 seconds. The output in QEmu meets expectation,
but the output in KVM is delayed for seconds.
Per Avi's suggestion, I hooked flushing coalesced MMIO buffer in VGA update
handler. By this way, We don't need vcpu explicit exit to QEmu to
handle this issue.
Signed-off-by: Sheng Yang <sheng@linux.intel.com>
---
cpu-all.h | 2 ++
exec.c | 6 ++++++
kvm-all.c | 20 ++++++++++++++++++++
qemu-kvm.c | 9 +++++++--
qemu-kvm.h | 2 ++
vl.c | 2 ++
6 files changed, 39 insertions(+), 2 deletions(-)
diff --git a/cpu-all.h b/cpu-all.h
index 8ed76c7..51effc0 100644
--- a/cpu-all.h
+++ b/cpu-all.h
@@ -916,6 +916,8 @@ void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size);
void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size);
+void qemu_flush_coalesced_mmio_buffer(void);
+
/*******************************************/
/* host CPU ticks (if available) */
diff --git a/exec.c b/exec.c
index 99e88e1..40c01a1 100644
--- a/exec.c
+++ b/exec.c
@@ -2424,6 +2424,12 @@ void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
kvm_uncoalesce_mmio_region(addr, size);
}
+void qemu_flush_coalesced_mmio_buffer(void)
+{
+ if (kvm_enabled())
+ kvm_flush_coalesced_mmio_buffer();
+}
+
#ifdef __linux__
#include <sys/vfs.h>
diff --git a/kvm-all.c b/kvm-all.c
index 0423fff..3d9fcc0 100644
--- a/kvm-all.c
+++ b/kvm-all.c
@@ -25,6 +25,9 @@
#include "hw/hw.h"
#include "gdbstub.h"
#include "kvm.h"
+#ifndef KVM_UPSTREAM
+#include "libkvm.h"
+#endif
#ifdef KVM_UPSTREAM
/* KVM uses PAGE_SIZE in it's definition of COALESCED_MMIO_MAX */
@@ -385,6 +388,23 @@ int kvm_uncoalesce_mmio_region(target_phys_addr_t start, ram_addr_t size)
return ret;
}
+void kvm_flush_coalesced_mmio_buffer(void)
+{
+#ifdef KVM_CAP_COALESCED_MMIO
+ if (kvm_state->coalesced_mmio_ring) {
+ struct kvm_coalesced_mmio_ring *ring =
+ kvm_state->coalesced_mmio_ring;
+ while (ring->first != ring->last) {
+ cpu_physical_memory_rw(ring->coalesced_mmio[ring->first].phys_addr,
+ &ring->coalesced_mmio[ring->first].data[0],
+ ring->coalesced_mmio[ring->first].len, 1);
+ smp_wmb();
+ ring->first = (ring->first + 1) % KVM_COALESCED_MMIO_MAX;
+ }
+ }
+#endif
+}
+
int kvm_check_extension(KVMState *s, unsigned int extension)
{
int ret;
diff --git a/qemu-kvm.c b/qemu-kvm.c
index 599c3d6..70d7658 100644
--- a/qemu-kvm.c
+++ b/qemu-kvm.c
@@ -463,6 +463,12 @@ static void kvm_create_vcpu(CPUState *env, int id)
goto err_fd;
}
+#ifdef KVM_CAP_COALESCED_MMIO
+ if (kvm_state->coalesced_mmio && !kvm_state->coalesced_mmio_ring)
+ kvm_state->coalesced_mmio_ring = (void *) env->kvm_run +
+ kvm_state->coalesced_mmio * PAGE_SIZE;
+#endif
+
return;
err_fd:
close(env->kvm_fd);
@@ -927,8 +933,7 @@ int kvm_run(CPUState *env)
#if defined(KVM_CAP_COALESCED_MMIO)
if (kvm_state->coalesced_mmio) {
- struct kvm_coalesced_mmio_ring *ring =
- (void *) run + kvm_state->coalesced_mmio * PAGE_SIZE;
+ struct kvm_coalesced_mmio_ring *ring = kvm_state->coalesced_mmio_ring;
while (ring->first != ring->last) {
cpu_physical_memory_rw(ring->coalesced_mmio[ring->first].phys_addr,
&ring->coalesced_mmio[ring->first].data[0],
diff --git a/qemu-kvm.h b/qemu-kvm.h
index 6b3e5a1..066e1c0 100644
--- a/qemu-kvm.h
+++ b/qemu-kvm.h
@@ -435,6 +435,7 @@ int kvm_register_coalesced_mmio(kvm_context_t kvm, uint64_t addr,
uint32_t size);
int kvm_unregister_coalesced_mmio(kvm_context_t kvm, uint64_t addr,
uint32_t size);
+void kvm_flush_coalesced_mmio_buffer(void);
/*!
* \brief Create a memory alias
@@ -1144,6 +1145,7 @@ typedef struct KVMState {
int fd;
int vmfd;
int coalesced_mmio;
+ struct kvm_coalesced_mmio_ring *coalesced_mmio_ring;
int broken_set_mem_region;
int migration_log;
int vcpu_events;
diff --git a/vl.c b/vl.c
index 9edea10..f39ad56 100644
--- a/vl.c
+++ b/vl.c
@@ -3227,6 +3227,7 @@ static void gui_update(void *opaque)
DisplayState *ds = opaque;
DisplayChangeListener *dcl = ds->listeners;
+ qemu_flush_coalesced_mmio_buffer();
dpy_refresh(ds);
while (dcl != NULL) {
@@ -3242,6 +3243,7 @@ static void nographic_update(void *opaque)
{
uint64_t interval = GUI_REFRESH_INTERVAL;
+ qemu_flush_coalesced_mmio_buffer();
qemu_mod_timer(nographic_timer, interval + qemu_get_clock(rt_clock));
}
--
1.5.4.5
^ permalink raw reply related [flat|nested] 7+ messages in thread
* Re: [PATCH] kvm: Flush coalesced MMIO buffer periodly
2010-01-25 7:46 Sheng Yang
@ 2010-01-25 16:08 ` Marcelo Tosatti
0 siblings, 0 replies; 7+ messages in thread
From: Marcelo Tosatti @ 2010-01-25 16:08 UTC (permalink / raw)
To: Sheng Yang; +Cc: Avi Kivity, kvm
On Mon, Jan 25, 2010 at 03:46:44PM +0800, Sheng Yang wrote:
> The default action of coalesced MMIO is, cache the writing in buffer, until:
> 1. The buffer is full.
> 2. Or the exit to QEmu due to other reasons.
>
> But this would result in a very late writing in some condition.
> 1. The each time write to MMIO content is small.
> 2. The writing interval is big.
> 3. No need for input or accessing other devices frequently.
>
> This issue was observed in a experimental embbed system. The test image
> simply print "test" every 1 seconds. The output in QEmu meets expectation,
> but the output in KVM is delayed for seconds.
>
> Per Avi's suggestion, I hooked flushing coalesced MMIO buffer in VGA update
> handler. By this way, We don't need vcpu explicit exit to QEmu to
> handle this issue.
Sheng,
Can you send this to QEMU upstream first, since the feature is present
there.
> Signed-off-by: Sheng Yang <sheng@linux.intel.com>
> ---
> cpu-all.h | 2 ++
> exec.c | 6 ++++++
> kvm-all.c | 20 ++++++++++++++++++++
> qemu-kvm.c | 9 +++++++--
> qemu-kvm.h | 2 ++
> vl.c | 2 ++
> 6 files changed, 39 insertions(+), 2 deletions(-)
>
> diff --git a/cpu-all.h b/cpu-all.h
> index 8ed76c7..51effc0 100644
> --- a/cpu-all.h
> +++ b/cpu-all.h
> @@ -916,6 +916,8 @@ void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size);
>
> void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size);
>
> +void qemu_flush_coalesced_mmio_buffer(void);
> +
> /*******************************************/
> /* host CPU ticks (if available) */
>
> diff --git a/exec.c b/exec.c
> index 99e88e1..40c01a1 100644
> --- a/exec.c
> +++ b/exec.c
> @@ -2424,6 +2424,12 @@ void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
> kvm_uncoalesce_mmio_region(addr, size);
> }
>
> +void qemu_flush_coalesced_mmio_buffer(void)
> +{
> + if (kvm_enabled())
> + kvm_flush_coalesced_mmio_buffer();
> +}
> +
> #ifdef __linux__
>
> #include <sys/vfs.h>
> diff --git a/kvm-all.c b/kvm-all.c
> index 0423fff..3d9fcc0 100644
> --- a/kvm-all.c
> +++ b/kvm-all.c
> @@ -25,6 +25,9 @@
> #include "hw/hw.h"
> #include "gdbstub.h"
> #include "kvm.h"
> +#ifndef KVM_UPSTREAM
> +#include "libkvm.h"
> +#endif
>
> #ifdef KVM_UPSTREAM
> /* KVM uses PAGE_SIZE in it's definition of COALESCED_MMIO_MAX */
> @@ -385,6 +388,23 @@ int kvm_uncoalesce_mmio_region(target_phys_addr_t start, ram_addr_t size)
> return ret;
> }
>
> +void kvm_flush_coalesced_mmio_buffer(void)
> +{
> +#ifdef KVM_CAP_COALESCED_MMIO
> + if (kvm_state->coalesced_mmio_ring) {
> + struct kvm_coalesced_mmio_ring *ring =
> + kvm_state->coalesced_mmio_ring;
> + while (ring->first != ring->last) {
> + cpu_physical_memory_rw(ring->coalesced_mmio[ring->first].phys_addr,
> + &ring->coalesced_mmio[ring->first].data[0],
> + ring->coalesced_mmio[ring->first].len, 1);
> + smp_wmb();
Tab breakage.
^ permalink raw reply [flat|nested] 7+ messages in thread
end of thread, other threads:[~2010-01-25 16:08 UTC | newest]
Thread overview: 7+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2010-01-21 9:37 [PATCH] kvm: Flush coalesced MMIO buffer periodly Sheng Yang
2010-01-21 10:19 ` Avi Kivity
2010-01-22 2:22 ` Sheng Yang
2010-01-24 7:35 ` Avi Kivity
2010-01-25 7:45 ` Sheng Yang
-- strict thread matches above, loose matches on Subject: below --
2010-01-25 7:46 Sheng Yang
2010-01-25 16:08 ` Marcelo Tosatti
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox