From: Alon Levy <alevy@redhat.com>
To: qemu-devel@nongnu.org
Cc: yhalperi@redhat.com, kraxel@redhat.com
Subject: [Qemu-devel] [PATCH] qxl: fix surface tracking & locking
Date: Wed, 6 Jul 2011 14:19:18 +0200 [thread overview]
Message-ID: <1309954766-9728-10-git-send-email-alevy@redhat.com> (raw)
In-Reply-To: <1309954766-9728-1-git-send-email-alevy@redhat.com>
From: Gerd Hoffmann <kraxel@redhat.com>
Surface tracking needs proper locking with some commands affecting
surfaces running in a thread, add it. Also reset the surface counter
when zapping all surfaces.
[ alon: use track_lock instead of wlock for guest_surfaces ]
Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
---
hw/qxl.c | 13 ++++++++++++-
hw/qxl.h | 2 ++
2 files changed, 14 insertions(+), 1 deletions(-)
diff --git a/hw/qxl.c b/hw/qxl.c
index 830ddae..241c27c 100644
--- a/hw/qxl.c
+++ b/hw/qxl.c
@@ -153,7 +153,12 @@ void qxl_spice_update_area(PCIQXLDevice *qxl, uint32_t surface_id,
void qxl_spice_destroy_surface_wait(PCIQXLDevice *qxl, uint32_t id)
{
qemu_mutex_lock(&qxl->ssd.wlock);
+ qemu_mutex_lock(&qxl->track_lock);
+ PANIC_ON(id >= NUM_SURFACES);
qxl->ssd.worker->destroy_surface_wait(qxl->ssd.worker, id);
+ qxl->guest_surfaces.cmds[id] = 0;
+ qxl->guest_surfaces.count--;
+ qemu_mutex_unlock(&qxl->track_lock);
qemu_mutex_unlock(&qxl->ssd.wlock);
}
@@ -182,7 +187,11 @@ void qxl_spice_reset_memslots(PCIQXLDevice *qxl)
void qxl_spice_destroy_surfaces(PCIQXLDevice *qxl)
{
qemu_mutex_lock(&qxl->ssd.wlock);
+ qemu_mutex_lock(&qxl->track_lock);
qxl->ssd.worker->destroy_surfaces(qxl->ssd.worker);
+ memset(&qxl->guest_surfaces.cmds, 0, sizeof(qxl->guest_surfaces.cmds));
+ qxl->guest_surfaces.count = 0;
+ qemu_mutex_unlock(&qxl->track_lock);
qemu_mutex_unlock(&qxl->ssd.wlock);
}
@@ -346,6 +355,7 @@ static void qxl_track_command(PCIQXLDevice *qxl, struct QXLCommandExt *ext)
QXLSurfaceCmd *cmd = qxl_phys2virt(qxl, ext->cmd.data, ext->group_id);
uint32_t id = le32_to_cpu(cmd->surface_id);
PANIC_ON(id >= NUM_SURFACES);
+ qemu_mutex_lock(&qxl->track_lock);
if (cmd->type == QXL_SURFACE_CMD_CREATE) {
qxl->guest_surfaces.cmds[id] = ext->cmd.data;
qxl->guest_surfaces.count++;
@@ -356,6 +366,7 @@ static void qxl_track_command(PCIQXLDevice *qxl, struct QXLCommandExt *ext)
qxl->guest_surfaces.cmds[id] = 0;
qxl->guest_surfaces.count--;
}
+ qemu_mutex_unlock(&qxl->track_lock);
break;
}
case QXL_CMD_CURSOR:
@@ -897,7 +908,6 @@ static void qxl_reset_surfaces(PCIQXLDevice *d)
dprint(d, 1, "%s:\n", __FUNCTION__);
d->mode = QXL_MODE_UNDEFINED;
qxl_spice_destroy_surfaces(d);
- memset(&d->guest_surfaces.cmds, 0, sizeof(d->guest_surfaces.cmds));
}
/* called from spice server thread context only */
@@ -1321,6 +1331,7 @@ static int qxl_init_common(PCIQXLDevice *qxl)
qxl->generation = 1;
qxl->num_memslots = NUM_MEMSLOTS;
qxl->num_surfaces = NUM_SURFACES;
+ qemu_mutex_init(&qxl->track_lock);
switch (qxl->revision) {
case 1: /* spice 0.4 -- qxl-1 */
diff --git a/hw/qxl.h b/hw/qxl.h
index 489d518..087ef6b 100644
--- a/hw/qxl.h
+++ b/hw/qxl.h
@@ -55,6 +55,8 @@ typedef struct PCIQXLDevice {
} guest_surfaces;
QXLPHYSICAL guest_cursor;
+ QemuMutex track_lock;
+
/* thread signaling */
pthread_t main;
int pipe[2];
--
1.7.5.4
next prev parent reply other threads:[~2011-07-06 12:19 UTC|newest]
Thread overview: 26+ messages / expand[flat|nested] mbox.gz Atom feed top
2011-07-06 12:19 [Qemu-devel] [PATCH] async + suspend reworked Alon Levy
2011-07-06 12:19 ` [Qemu-devel] [PATCH] qxl: add defines from latest spice-protocol Alon Levy
2011-07-07 7:33 ` Gerd Hoffmann
2011-07-07 7:43 ` Alon Levy
2011-07-06 12:19 ` [Qemu-devel] [PATCH] spice: add worker wrapper functions Alon Levy
2011-07-06 12:19 ` [Qemu-devel] [PATCH] spice: add qemu_spice_display_init_common Alon Levy
2011-07-06 12:19 ` [Qemu-devel] [PATCH] spice: lock spice worker calls Alon Levy
2011-07-07 7:40 ` Gerd Hoffmann
2011-07-07 7:52 ` Alon Levy
2011-07-06 12:19 ` [Qemu-devel] [PATCH] qxl: move qemu_spice_add_memslot call out of qxl_add_memslot Alon Levy
2011-07-07 7:39 ` Gerd Hoffmann
2011-07-07 8:45 ` Alon Levy
2011-07-06 12:19 ` [Qemu-devel] [PATCH] qxl: move qemu_spice_create_primary_surface call out of qxl_create_guest_primary Alon Levy
2011-07-06 12:19 ` [Qemu-devel] [PATCH] qxl: remove qxl_destroy_primary() Alon Levy
2011-07-06 12:19 ` [Qemu-devel] [PATCH] spice/qxl: move worker wrappers Alon Levy
2011-07-06 12:19 ` Alon Levy [this message]
2011-07-06 12:19 ` [Qemu-devel] [PATCH] qxl: add io_port_to_string Alon Levy
2011-07-06 12:19 ` [Qemu-devel] [PATCH] qxl: error handling fixes and cleanups Alon Levy
2011-07-06 12:19 ` [Qemu-devel] [PATCH] qxl: make qxl_guest_bug take variable arguments Alon Levy
2011-07-06 12:19 ` [Qemu-devel] [PATCH] qxl: async I/O Alon Levy
2011-07-07 7:47 ` Gerd Hoffmann
2011-07-07 8:11 ` Alon Levy
2011-07-06 12:19 ` [Qemu-devel] [PATCH] qxl: bump pci rev Alon Levy
2011-07-06 12:19 ` [Qemu-devel] [PATCH] qxl: only disallow specific io's in vga mode Alon Levy
2011-07-06 12:19 ` [Qemu-devel] [PATCH] qxl: add QXL_IO_FLUSH_{SURFACES, RELEASE} for guest S3&S4 support Alon Levy
2011-07-06 12:19 ` [Qemu-devel] [PATCH] qxl: use QXL_REVISION_* Alon Levy
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1309954766-9728-10-git-send-email-alevy@redhat.com \
--to=alevy@redhat.com \
--cc=kraxel@redhat.com \
--cc=qemu-devel@nongnu.org \
--cc=yhalperi@redhat.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).