* [Qemu-devel] [PATCH] spice/qxl: locking fix for qemu-kvm
@ 2011-02-27 20:40 Alon Levy
0 siblings, 0 replies; 3+ messages in thread
From: Alon Levy @ 2011-02-27 20:40 UTC (permalink / raw)
To: qemu-devel
From: Gerd Hoffmann <kraxel@redhat.com>
qxl needs to release the qemu lock before calling some libspice
functions (and re-aquire it later). In upstream qemu qxl can just
use qemu_mutex_{unlock,lock}_iothread. In qemu-kvm this doesn't
work, qxl needs additionally save+restore the cpu_single_env pointer
on unlock+lock.
Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
---
This patch fixes the segmentation faults reported from gentoo and fedora
on cpu_single_env asserts.
---
hw/qxl.c | 37 +++++++++++++++++++++++++++++--------
ui/spice-display.c | 12 ++++++------
ui/spice-display.h | 6 ++++++
3 files changed, 41 insertions(+), 14 deletions(-)
diff --git a/hw/qxl.c b/hw/qxl.c
index fe4212b..117f7c8 100644
--- a/hw/qxl.c
+++ b/hw/qxl.c
@@ -125,6 +125,27 @@ static void qxl_reset_memslots(PCIQXLDevice *d);
static void qxl_reset_surfaces(PCIQXLDevice *d);
static void qxl_ring_set_dirty(PCIQXLDevice *qxl);
+/* qemu-kvm locking ... */
+void qxl_unlock_iothread(SimpleSpiceDisplay *ssd)
+{
+ if (cpu_single_env) {
+ assert(ssd->env == NULL);
+ ssd->env = cpu_single_env;
+ cpu_single_env = NULL;
+ }
+ qemu_mutex_unlock_iothread();
+}
+
+void qxl_lock_iothread(SimpleSpiceDisplay *ssd)
+{
+ qemu_mutex_lock_iothread();
+ if (ssd->env) {
+ assert(cpu_single_env == NULL);
+ cpu_single_env = ssd->env;
+ ssd->env = NULL;
+ }
+}
+
static inline uint32_t msb_mask(uint32_t val)
{
uint32_t mask;
@@ -662,10 +683,10 @@ static void qxl_hard_reset(PCIQXLDevice *d, int loadvm)
dprint(d, 1, "%s: start%s\n", __FUNCTION__,
loadvm ? " (loadvm)" : "");
- qemu_mutex_unlock_iothread();
+ qxl_unlock_iothread(&d->ssd);
d->ssd.worker->reset_cursor(d->ssd.worker);
d->ssd.worker->reset_image_cache(d->ssd.worker);
- qemu_mutex_lock_iothread();
+ qxl_lock_iothread(&d->ssd);
qxl_reset_surfaces(d);
qxl_reset_memslots(d);
@@ -795,9 +816,9 @@ static void qxl_reset_surfaces(PCIQXLDevice *d)
{
dprint(d, 1, "%s:\n", __FUNCTION__);
d->mode = QXL_MODE_UNDEFINED;
- qemu_mutex_unlock_iothread();
+ qxl_unlock_iothread(&d->ssd);
d->ssd.worker->destroy_surfaces(d->ssd.worker);
- qemu_mutex_lock_iothread();
+ qxl_lock_iothread(&d->ssd);
memset(&d->guest_surfaces.cmds, 0, sizeof(d->guest_surfaces.cmds));
}
@@ -866,9 +887,9 @@ static void qxl_destroy_primary(PCIQXLDevice *d)
dprint(d, 1, "%s\n", __FUNCTION__);
d->mode = QXL_MODE_UNDEFINED;
- qemu_mutex_unlock_iothread();
+ qxl_unlock_iothread(&d->ssd);
d->ssd.worker->destroy_primary_surface(d->ssd.worker, 0);
- qemu_mutex_lock_iothread();
+ qxl_lock_iothread(&d->ssd);
}
static void qxl_set_mode(PCIQXLDevice *d, int modenr, int loadvm)
@@ -938,10 +959,10 @@ static void ioport_write(void *opaque, uint32_t addr, uint32_t val)
case QXL_IO_UPDATE_AREA:
{
QXLRect update = d->ram->update_area;
- qemu_mutex_unlock_iothread();
+ qxl_unlock_iothread(&d->ssd);
d->ssd.worker->update_area(d->ssd.worker, d->ram->update_surface,
&update, NULL, 0, 0);
- qemu_mutex_lock_iothread();
+ qxl_lock_iothread(&d->ssd);
break;
}
case QXL_IO_NOTIFY_CMD:
diff --git a/ui/spice-display.c b/ui/spice-display.c
index 020b423..defe652 100644
--- a/ui/spice-display.c
+++ b/ui/spice-display.c
@@ -186,18 +186,18 @@ void qemu_spice_create_host_primary(SimpleSpiceDisplay *ssd)
surface.mem = (intptr_t)ssd->buf;
surface.group_id = MEMSLOT_GROUP_HOST;
- qemu_mutex_unlock_iothread();
+ qxl_unlock_iothread(ssd);
ssd->worker->create_primary_surface(ssd->worker, 0, &surface);
- qemu_mutex_lock_iothread();
+ qxl_lock_iothread(ssd);
}
void qemu_spice_destroy_host_primary(SimpleSpiceDisplay *ssd)
{
dprint(1, "%s:\n", __FUNCTION__);
- qemu_mutex_unlock_iothread();
+ qxl_unlock_iothread(ssd);
ssd->worker->destroy_primary_surface(ssd->worker, 0);
- qemu_mutex_lock_iothread();
+ qxl_lock_iothread(ssd);
}
void qemu_spice_vm_change_state_handler(void *opaque, int running, int reason)
@@ -207,9 +207,9 @@ void qemu_spice_vm_change_state_handler(void *opaque, int running, int reason)
if (running) {
ssd->worker->start(ssd->worker);
} else {
- qemu_mutex_unlock_iothread();
+ qxl_unlock_iothread(ssd);
ssd->worker->stop(ssd->worker);
- qemu_mutex_lock_iothread();
+ qxl_lock_iothread(ssd);
}
ssd->running = running;
}
diff --git a/ui/spice-display.h b/ui/spice-display.h
index aef0464..df74828 100644
--- a/ui/spice-display.h
+++ b/ui/spice-display.h
@@ -43,6 +43,9 @@ typedef struct SimpleSpiceDisplay {
QXLRect dirty;
int notify;
int running;
+
+ /* qemu-kvm locking ... */
+ void *env;
} SimpleSpiceDisplay;
typedef struct SimpleSpiceUpdate {
@@ -52,6 +55,9 @@ typedef struct SimpleSpiceUpdate {
uint8_t *bitmap;
} SimpleSpiceUpdate;
+void qxl_unlock_iothread(SimpleSpiceDisplay *ssd);
+void qxl_lock_iothread(SimpleSpiceDisplay *ssd);
+
int qemu_spice_rect_is_empty(const QXLRect* r);
void qemu_spice_rect_union(QXLRect *dest, const QXLRect *r);
--
1.7.4.1
^ permalink raw reply related [flat|nested] 3+ messages in thread
* [Qemu-devel] [PATCH] spice/qxl: locking fix for qemu-kvm
@ 2011-03-02 12:32 Alon Levy
2011-03-02 12:40 ` Alon Levy
0 siblings, 1 reply; 3+ messages in thread
From: Alon Levy @ 2011-03-02 12:32 UTC (permalink / raw)
To: qemu-devel
From: Gerd Hoffmann <kraxel@redhat.com>
qxl needs to release the qemu lock before calling some libspice
functions (and re-aquire it later). In upstream qemu qxl can just
use qemu_mutex_{unlock,lock}_iothread. In qemu-kvm this doesn't
work, qxl needs additionally save+restore the cpu_single_env pointer
on unlock+lock.
This fixes the following assertion in kvm_mutex_unlock that happened in the
released qemu-kvm 0.14.0 on gentoo when using spice's qxl device:
> /var/tmp/portage/app-emulation/qemu-kvm-0.14.0/work/qemu-kvm-0.14.0/qemu-kvm.c:1724:
> kvm_mutex_unlock: Assertion `!cpu_single_env' failed.
Happening as a result of io from the guest (qxl reset):
> (gdb) bt
> #0 0x00007ffff5daa165 in raise () from /lib/libc.so.6
> #1 0x00007ffff5dab580 in abort () from /lib/libc.so.6
> #2 0x00007ffff5da3201 in __assert_fail () from /lib/libc.so.6
> #3 0x0000000000436f7e in kvm_mutex_unlock ()
> at /var/tmp/portage/app-emulation/qemu-kvm-0.14.0/work/qemu-kvm-0.14.0/qemu-kvm.c:1724
> #4 qemu_mutex_unlock_iothread ()
> at /var/tmp/portage/app-emulation/qemu-kvm-0.14.0/work/qemu-kvm-0.14.0/qemu-kvm.c:1737
> #5 0x00000000005e84ee in qxl_hard_reset (d=0x15d3080, loadvm=0)
> at /var/tmp/portage/app-emulation/qemu-kvm-0.14.0/work/qemu-kvm-0.14.0/hw/qxl.c:665
> #6 0x00000000005e9f9a in ioport_write (opaque=0x15d3080, addr=<value
According to Jan, this bug (the wrong value for cpu_single_env) is also present
in qemu, but no abort is triggered because it isn't asserted.
Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
---
hw/qxl.c | 37 +++++++++++++++++++++++++++++--------
ui/spice-display.c | 12 ++++++------
ui/spice-display.h | 6 ++++++
3 files changed, 41 insertions(+), 14 deletions(-)
diff --git a/hw/qxl.c b/hw/qxl.c
index fe4212b..117f7c8 100644
--- a/hw/qxl.c
+++ b/hw/qxl.c
@@ -125,6 +125,27 @@ static void qxl_reset_memslots(PCIQXLDevice *d);
static void qxl_reset_surfaces(PCIQXLDevice *d);
static void qxl_ring_set_dirty(PCIQXLDevice *qxl);
+/* qemu-kvm locking ... */
+void qxl_unlock_iothread(SimpleSpiceDisplay *ssd)
+{
+ if (cpu_single_env) {
+ assert(ssd->env == NULL);
+ ssd->env = cpu_single_env;
+ cpu_single_env = NULL;
+ }
+ qemu_mutex_unlock_iothread();
+}
+
+void qxl_lock_iothread(SimpleSpiceDisplay *ssd)
+{
+ qemu_mutex_lock_iothread();
+ if (ssd->env) {
+ assert(cpu_single_env == NULL);
+ cpu_single_env = ssd->env;
+ ssd->env = NULL;
+ }
+}
+
static inline uint32_t msb_mask(uint32_t val)
{
uint32_t mask;
@@ -662,10 +683,10 @@ static void qxl_hard_reset(PCIQXLDevice *d, int loadvm)
dprint(d, 1, "%s: start%s\n", __FUNCTION__,
loadvm ? " (loadvm)" : "");
- qemu_mutex_unlock_iothread();
+ qxl_unlock_iothread(&d->ssd);
d->ssd.worker->reset_cursor(d->ssd.worker);
d->ssd.worker->reset_image_cache(d->ssd.worker);
- qemu_mutex_lock_iothread();
+ qxl_lock_iothread(&d->ssd);
qxl_reset_surfaces(d);
qxl_reset_memslots(d);
@@ -795,9 +816,9 @@ static void qxl_reset_surfaces(PCIQXLDevice *d)
{
dprint(d, 1, "%s:\n", __FUNCTION__);
d->mode = QXL_MODE_UNDEFINED;
- qemu_mutex_unlock_iothread();
+ qxl_unlock_iothread(&d->ssd);
d->ssd.worker->destroy_surfaces(d->ssd.worker);
- qemu_mutex_lock_iothread();
+ qxl_lock_iothread(&d->ssd);
memset(&d->guest_surfaces.cmds, 0, sizeof(d->guest_surfaces.cmds));
}
@@ -866,9 +887,9 @@ static void qxl_destroy_primary(PCIQXLDevice *d)
dprint(d, 1, "%s\n", __FUNCTION__);
d->mode = QXL_MODE_UNDEFINED;
- qemu_mutex_unlock_iothread();
+ qxl_unlock_iothread(&d->ssd);
d->ssd.worker->destroy_primary_surface(d->ssd.worker, 0);
- qemu_mutex_lock_iothread();
+ qxl_lock_iothread(&d->ssd);
}
static void qxl_set_mode(PCIQXLDevice *d, int modenr, int loadvm)
@@ -938,10 +959,10 @@ static void ioport_write(void *opaque, uint32_t addr, uint32_t val)
case QXL_IO_UPDATE_AREA:
{
QXLRect update = d->ram->update_area;
- qemu_mutex_unlock_iothread();
+ qxl_unlock_iothread(&d->ssd);
d->ssd.worker->update_area(d->ssd.worker, d->ram->update_surface,
&update, NULL, 0, 0);
- qemu_mutex_lock_iothread();
+ qxl_lock_iothread(&d->ssd);
break;
}
case QXL_IO_NOTIFY_CMD:
diff --git a/ui/spice-display.c b/ui/spice-display.c
index 020b423..defe652 100644
--- a/ui/spice-display.c
+++ b/ui/spice-display.c
@@ -186,18 +186,18 @@ void qemu_spice_create_host_primary(SimpleSpiceDisplay *ssd)
surface.mem = (intptr_t)ssd->buf;
surface.group_id = MEMSLOT_GROUP_HOST;
- qemu_mutex_unlock_iothread();
+ qxl_unlock_iothread(ssd);
ssd->worker->create_primary_surface(ssd->worker, 0, &surface);
- qemu_mutex_lock_iothread();
+ qxl_lock_iothread(ssd);
}
void qemu_spice_destroy_host_primary(SimpleSpiceDisplay *ssd)
{
dprint(1, "%s:\n", __FUNCTION__);
- qemu_mutex_unlock_iothread();
+ qxl_unlock_iothread(ssd);
ssd->worker->destroy_primary_surface(ssd->worker, 0);
- qemu_mutex_lock_iothread();
+ qxl_lock_iothread(ssd);
}
void qemu_spice_vm_change_state_handler(void *opaque, int running, int reason)
@@ -207,9 +207,9 @@ void qemu_spice_vm_change_state_handler(void *opaque, int running, int reason)
if (running) {
ssd->worker->start(ssd->worker);
} else {
- qemu_mutex_unlock_iothread();
+ qxl_unlock_iothread(ssd);
ssd->worker->stop(ssd->worker);
- qemu_mutex_lock_iothread();
+ qxl_lock_iothread(ssd);
}
ssd->running = running;
}
diff --git a/ui/spice-display.h b/ui/spice-display.h
index aef0464..df74828 100644
--- a/ui/spice-display.h
+++ b/ui/spice-display.h
@@ -43,6 +43,9 @@ typedef struct SimpleSpiceDisplay {
QXLRect dirty;
int notify;
int running;
+
+ /* qemu-kvm locking ... */
+ void *env;
} SimpleSpiceDisplay;
typedef struct SimpleSpiceUpdate {
@@ -52,6 +55,9 @@ typedef struct SimpleSpiceUpdate {
uint8_t *bitmap;
} SimpleSpiceUpdate;
+void qxl_unlock_iothread(SimpleSpiceDisplay *ssd);
+void qxl_lock_iothread(SimpleSpiceDisplay *ssd);
+
int qemu_spice_rect_is_empty(const QXLRect* r);
void qemu_spice_rect_union(QXLRect *dest, const QXLRect *r);
--
1.7.4.1
^ permalink raw reply related [flat|nested] 3+ messages in thread
* Re: [Qemu-devel] [PATCH] spice/qxl: locking fix for qemu-kvm
2011-03-02 12:32 Alon Levy
@ 2011-03-02 12:40 ` Alon Levy
0 siblings, 0 replies; 3+ messages in thread
From: Alon Levy @ 2011-03-02 12:40 UTC (permalink / raw)
To: qemu-devel
On Wed, Mar 02, 2011 at 02:32:03PM +0200, Alon Levy wrote:
> From: Gerd Hoffmann <kraxel@redhat.com>
Err, that "From" got there by mistake, and the title should of course
not say "for qemu-kvm"..
>
> qxl needs to release the qemu lock before calling some libspice
> functions (and re-aquire it later). In upstream qemu qxl can just
> use qemu_mutex_{unlock,lock}_iothread. In qemu-kvm this doesn't
> work, qxl needs additionally save+restore the cpu_single_env pointer
> on unlock+lock.
>
> This fixes the following assertion in kvm_mutex_unlock that happened in the
> released qemu-kvm 0.14.0 on gentoo when using spice's qxl device:
>
> > /var/tmp/portage/app-emulation/qemu-kvm-0.14.0/work/qemu-kvm-0.14.0/qemu-kvm.c:1724:
> > kvm_mutex_unlock: Assertion `!cpu_single_env' failed.
>
> Happening as a result of io from the guest (qxl reset):
> > (gdb) bt
> > #0 0x00007ffff5daa165 in raise () from /lib/libc.so.6
> > #1 0x00007ffff5dab580 in abort () from /lib/libc.so.6
> > #2 0x00007ffff5da3201 in __assert_fail () from /lib/libc.so.6
> > #3 0x0000000000436f7e in kvm_mutex_unlock ()
> > at /var/tmp/portage/app-emulation/qemu-kvm-0.14.0/work/qemu-kvm-0.14.0/qemu-kvm.c:1724
> > #4 qemu_mutex_unlock_iothread ()
> > at /var/tmp/portage/app-emulation/qemu-kvm-0.14.0/work/qemu-kvm-0.14.0/qemu-kvm.c:1737
> > #5 0x00000000005e84ee in qxl_hard_reset (d=0x15d3080, loadvm=0)
> > at /var/tmp/portage/app-emulation/qemu-kvm-0.14.0/work/qemu-kvm-0.14.0/hw/qxl.c:665
> > #6 0x00000000005e9f9a in ioport_write (opaque=0x15d3080, addr=<value
>
> According to Jan, this bug (the wrong value for cpu_single_env) is also present
> in qemu, but no abort is triggered because it isn't asserted.
>
> Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
> ---
> hw/qxl.c | 37 +++++++++++++++++++++++++++++--------
> ui/spice-display.c | 12 ++++++------
> ui/spice-display.h | 6 ++++++
> 3 files changed, 41 insertions(+), 14 deletions(-)
>
> diff --git a/hw/qxl.c b/hw/qxl.c
> index fe4212b..117f7c8 100644
> --- a/hw/qxl.c
> +++ b/hw/qxl.c
> @@ -125,6 +125,27 @@ static void qxl_reset_memslots(PCIQXLDevice *d);
> static void qxl_reset_surfaces(PCIQXLDevice *d);
> static void qxl_ring_set_dirty(PCIQXLDevice *qxl);
>
> +/* qemu-kvm locking ... */
> +void qxl_unlock_iothread(SimpleSpiceDisplay *ssd)
> +{
> + if (cpu_single_env) {
> + assert(ssd->env == NULL);
> + ssd->env = cpu_single_env;
> + cpu_single_env = NULL;
> + }
> + qemu_mutex_unlock_iothread();
> +}
> +
> +void qxl_lock_iothread(SimpleSpiceDisplay *ssd)
> +{
> + qemu_mutex_lock_iothread();
> + if (ssd->env) {
> + assert(cpu_single_env == NULL);
> + cpu_single_env = ssd->env;
> + ssd->env = NULL;
> + }
> +}
> +
> static inline uint32_t msb_mask(uint32_t val)
> {
> uint32_t mask;
> @@ -662,10 +683,10 @@ static void qxl_hard_reset(PCIQXLDevice *d, int loadvm)
> dprint(d, 1, "%s: start%s\n", __FUNCTION__,
> loadvm ? " (loadvm)" : "");
>
> - qemu_mutex_unlock_iothread();
> + qxl_unlock_iothread(&d->ssd);
> d->ssd.worker->reset_cursor(d->ssd.worker);
> d->ssd.worker->reset_image_cache(d->ssd.worker);
> - qemu_mutex_lock_iothread();
> + qxl_lock_iothread(&d->ssd);
> qxl_reset_surfaces(d);
> qxl_reset_memslots(d);
>
> @@ -795,9 +816,9 @@ static void qxl_reset_surfaces(PCIQXLDevice *d)
> {
> dprint(d, 1, "%s:\n", __FUNCTION__);
> d->mode = QXL_MODE_UNDEFINED;
> - qemu_mutex_unlock_iothread();
> + qxl_unlock_iothread(&d->ssd);
> d->ssd.worker->destroy_surfaces(d->ssd.worker);
> - qemu_mutex_lock_iothread();
> + qxl_lock_iothread(&d->ssd);
> memset(&d->guest_surfaces.cmds, 0, sizeof(d->guest_surfaces.cmds));
> }
>
> @@ -866,9 +887,9 @@ static void qxl_destroy_primary(PCIQXLDevice *d)
> dprint(d, 1, "%s\n", __FUNCTION__);
>
> d->mode = QXL_MODE_UNDEFINED;
> - qemu_mutex_unlock_iothread();
> + qxl_unlock_iothread(&d->ssd);
> d->ssd.worker->destroy_primary_surface(d->ssd.worker, 0);
> - qemu_mutex_lock_iothread();
> + qxl_lock_iothread(&d->ssd);
> }
>
> static void qxl_set_mode(PCIQXLDevice *d, int modenr, int loadvm)
> @@ -938,10 +959,10 @@ static void ioport_write(void *opaque, uint32_t addr, uint32_t val)
> case QXL_IO_UPDATE_AREA:
> {
> QXLRect update = d->ram->update_area;
> - qemu_mutex_unlock_iothread();
> + qxl_unlock_iothread(&d->ssd);
> d->ssd.worker->update_area(d->ssd.worker, d->ram->update_surface,
> &update, NULL, 0, 0);
> - qemu_mutex_lock_iothread();
> + qxl_lock_iothread(&d->ssd);
> break;
> }
> case QXL_IO_NOTIFY_CMD:
> diff --git a/ui/spice-display.c b/ui/spice-display.c
> index 020b423..defe652 100644
> --- a/ui/spice-display.c
> +++ b/ui/spice-display.c
> @@ -186,18 +186,18 @@ void qemu_spice_create_host_primary(SimpleSpiceDisplay *ssd)
> surface.mem = (intptr_t)ssd->buf;
> surface.group_id = MEMSLOT_GROUP_HOST;
>
> - qemu_mutex_unlock_iothread();
> + qxl_unlock_iothread(ssd);
> ssd->worker->create_primary_surface(ssd->worker, 0, &surface);
> - qemu_mutex_lock_iothread();
> + qxl_lock_iothread(ssd);
> }
>
> void qemu_spice_destroy_host_primary(SimpleSpiceDisplay *ssd)
> {
> dprint(1, "%s:\n", __FUNCTION__);
>
> - qemu_mutex_unlock_iothread();
> + qxl_unlock_iothread(ssd);
> ssd->worker->destroy_primary_surface(ssd->worker, 0);
> - qemu_mutex_lock_iothread();
> + qxl_lock_iothread(ssd);
> }
>
> void qemu_spice_vm_change_state_handler(void *opaque, int running, int reason)
> @@ -207,9 +207,9 @@ void qemu_spice_vm_change_state_handler(void *opaque, int running, int reason)
> if (running) {
> ssd->worker->start(ssd->worker);
> } else {
> - qemu_mutex_unlock_iothread();
> + qxl_unlock_iothread(ssd);
> ssd->worker->stop(ssd->worker);
> - qemu_mutex_lock_iothread();
> + qxl_lock_iothread(ssd);
> }
> ssd->running = running;
> }
> diff --git a/ui/spice-display.h b/ui/spice-display.h
> index aef0464..df74828 100644
> --- a/ui/spice-display.h
> +++ b/ui/spice-display.h
> @@ -43,6 +43,9 @@ typedef struct SimpleSpiceDisplay {
> QXLRect dirty;
> int notify;
> int running;
> +
> + /* qemu-kvm locking ... */
> + void *env;
> } SimpleSpiceDisplay;
>
> typedef struct SimpleSpiceUpdate {
> @@ -52,6 +55,9 @@ typedef struct SimpleSpiceUpdate {
> uint8_t *bitmap;
> } SimpleSpiceUpdate;
>
> +void qxl_unlock_iothread(SimpleSpiceDisplay *ssd);
> +void qxl_lock_iothread(SimpleSpiceDisplay *ssd);
> +
> int qemu_spice_rect_is_empty(const QXLRect* r);
> void qemu_spice_rect_union(QXLRect *dest, const QXLRect *r);
>
> --
> 1.7.4.1
>
>
^ permalink raw reply [flat|nested] 3+ messages in thread
end of thread, other threads:[~2011-03-02 12:40 UTC | newest]
Thread overview: 3+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2011-02-27 20:40 [Qemu-devel] [PATCH] spice/qxl: locking fix for qemu-kvm Alon Levy
-- strict thread matches above, loose matches on Subject: below --
2011-03-02 12:32 Alon Levy
2011-03-02 12:40 ` Alon Levy
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).