From: Alon Levy <alevy@redhat.com>
To: qemu-devel@nongnu.org
Subject: [Qemu-devel] [PATCH] spice/qxl: locking fix for qemu-kvm
Date: Sun, 27 Feb 2011 22:40:18 +0200 [thread overview]
Message-ID: <1298839218-28412-1-git-send-email-alevy@redhat.com> (raw)
From: Gerd Hoffmann <kraxel@redhat.com>
qxl needs to release the qemu lock before calling some libspice
functions (and re-aquire it later). In upstream qemu qxl can just
use qemu_mutex_{unlock,lock}_iothread. In qemu-kvm this doesn't
work, qxl needs additionally save+restore the cpu_single_env pointer
on unlock+lock.
Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
---
This patch fixes the segmentation faults reported from gentoo and fedora
on cpu_single_env asserts.
---
hw/qxl.c | 37 +++++++++++++++++++++++++++++--------
ui/spice-display.c | 12 ++++++------
ui/spice-display.h | 6 ++++++
3 files changed, 41 insertions(+), 14 deletions(-)
diff --git a/hw/qxl.c b/hw/qxl.c
index fe4212b..117f7c8 100644
--- a/hw/qxl.c
+++ b/hw/qxl.c
@@ -125,6 +125,27 @@ static void qxl_reset_memslots(PCIQXLDevice *d);
static void qxl_reset_surfaces(PCIQXLDevice *d);
static void qxl_ring_set_dirty(PCIQXLDevice *qxl);
+/* qemu-kvm locking ... */
+void qxl_unlock_iothread(SimpleSpiceDisplay *ssd)
+{
+ if (cpu_single_env) {
+ assert(ssd->env == NULL);
+ ssd->env = cpu_single_env;
+ cpu_single_env = NULL;
+ }
+ qemu_mutex_unlock_iothread();
+}
+
+void qxl_lock_iothread(SimpleSpiceDisplay *ssd)
+{
+ qemu_mutex_lock_iothread();
+ if (ssd->env) {
+ assert(cpu_single_env == NULL);
+ cpu_single_env = ssd->env;
+ ssd->env = NULL;
+ }
+}
+
static inline uint32_t msb_mask(uint32_t val)
{
uint32_t mask;
@@ -662,10 +683,10 @@ static void qxl_hard_reset(PCIQXLDevice *d, int loadvm)
dprint(d, 1, "%s: start%s\n", __FUNCTION__,
loadvm ? " (loadvm)" : "");
- qemu_mutex_unlock_iothread();
+ qxl_unlock_iothread(&d->ssd);
d->ssd.worker->reset_cursor(d->ssd.worker);
d->ssd.worker->reset_image_cache(d->ssd.worker);
- qemu_mutex_lock_iothread();
+ qxl_lock_iothread(&d->ssd);
qxl_reset_surfaces(d);
qxl_reset_memslots(d);
@@ -795,9 +816,9 @@ static void qxl_reset_surfaces(PCIQXLDevice *d)
{
dprint(d, 1, "%s:\n", __FUNCTION__);
d->mode = QXL_MODE_UNDEFINED;
- qemu_mutex_unlock_iothread();
+ qxl_unlock_iothread(&d->ssd);
d->ssd.worker->destroy_surfaces(d->ssd.worker);
- qemu_mutex_lock_iothread();
+ qxl_lock_iothread(&d->ssd);
memset(&d->guest_surfaces.cmds, 0, sizeof(d->guest_surfaces.cmds));
}
@@ -866,9 +887,9 @@ static void qxl_destroy_primary(PCIQXLDevice *d)
dprint(d, 1, "%s\n", __FUNCTION__);
d->mode = QXL_MODE_UNDEFINED;
- qemu_mutex_unlock_iothread();
+ qxl_unlock_iothread(&d->ssd);
d->ssd.worker->destroy_primary_surface(d->ssd.worker, 0);
- qemu_mutex_lock_iothread();
+ qxl_lock_iothread(&d->ssd);
}
static void qxl_set_mode(PCIQXLDevice *d, int modenr, int loadvm)
@@ -938,10 +959,10 @@ static void ioport_write(void *opaque, uint32_t addr, uint32_t val)
case QXL_IO_UPDATE_AREA:
{
QXLRect update = d->ram->update_area;
- qemu_mutex_unlock_iothread();
+ qxl_unlock_iothread(&d->ssd);
d->ssd.worker->update_area(d->ssd.worker, d->ram->update_surface,
&update, NULL, 0, 0);
- qemu_mutex_lock_iothread();
+ qxl_lock_iothread(&d->ssd);
break;
}
case QXL_IO_NOTIFY_CMD:
diff --git a/ui/spice-display.c b/ui/spice-display.c
index 020b423..defe652 100644
--- a/ui/spice-display.c
+++ b/ui/spice-display.c
@@ -186,18 +186,18 @@ void qemu_spice_create_host_primary(SimpleSpiceDisplay *ssd)
surface.mem = (intptr_t)ssd->buf;
surface.group_id = MEMSLOT_GROUP_HOST;
- qemu_mutex_unlock_iothread();
+ qxl_unlock_iothread(ssd);
ssd->worker->create_primary_surface(ssd->worker, 0, &surface);
- qemu_mutex_lock_iothread();
+ qxl_lock_iothread(ssd);
}
void qemu_spice_destroy_host_primary(SimpleSpiceDisplay *ssd)
{
dprint(1, "%s:\n", __FUNCTION__);
- qemu_mutex_unlock_iothread();
+ qxl_unlock_iothread(ssd);
ssd->worker->destroy_primary_surface(ssd->worker, 0);
- qemu_mutex_lock_iothread();
+ qxl_lock_iothread(ssd);
}
void qemu_spice_vm_change_state_handler(void *opaque, int running, int reason)
@@ -207,9 +207,9 @@ void qemu_spice_vm_change_state_handler(void *opaque, int running, int reason)
if (running) {
ssd->worker->start(ssd->worker);
} else {
- qemu_mutex_unlock_iothread();
+ qxl_unlock_iothread(ssd);
ssd->worker->stop(ssd->worker);
- qemu_mutex_lock_iothread();
+ qxl_lock_iothread(ssd);
}
ssd->running = running;
}
diff --git a/ui/spice-display.h b/ui/spice-display.h
index aef0464..df74828 100644
--- a/ui/spice-display.h
+++ b/ui/spice-display.h
@@ -43,6 +43,9 @@ typedef struct SimpleSpiceDisplay {
QXLRect dirty;
int notify;
int running;
+
+ /* qemu-kvm locking ... */
+ void *env;
} SimpleSpiceDisplay;
typedef struct SimpleSpiceUpdate {
@@ -52,6 +55,9 @@ typedef struct SimpleSpiceUpdate {
uint8_t *bitmap;
} SimpleSpiceUpdate;
+void qxl_unlock_iothread(SimpleSpiceDisplay *ssd);
+void qxl_lock_iothread(SimpleSpiceDisplay *ssd);
+
int qemu_spice_rect_is_empty(const QXLRect* r);
void qemu_spice_rect_union(QXLRect *dest, const QXLRect *r);
--
1.7.4.1
next reply other threads:[~2011-02-27 20:41 UTC|newest]
Thread overview: 3+ messages / expand[flat|nested] mbox.gz Atom feed top
2011-02-27 20:40 Alon Levy [this message]
-- strict thread matches above, loose matches on Subject: below --
2011-03-02 12:32 [Qemu-devel] [PATCH] spice/qxl: locking fix for qemu-kvm Alon Levy
2011-03-02 12:40 ` Alon Levy
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1298839218-28412-1-git-send-email-alevy@redhat.com \
--to=alevy@redhat.com \
--cc=qemu-devel@nongnu.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).