qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: Alon Levy <alevy@redhat.com>
To: qemu-devel@nongnu.org
Cc: yhalperi@redhat.com, kraxel@redhat.com
Subject: [Qemu-devel] [PATCH v2] qxl: fix surface tracking & locking
Date: Thu,  7 Jul 2011 18:50:46 +0200	[thread overview]
Message-ID: <1310057455-18570-6-git-send-email-alevy@redhat.com> (raw)
In-Reply-To: <1310057455-18570-1-git-send-email-alevy@redhat.com>

From: Gerd Hoffmann <kraxel@redhat.com>

Surface tracking needs proper locking since it is used from vcpu and spice
worker threads, add it.  Also reset the surface counter when zapping all
surfaces.

Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
---
 hw/qxl.c |   13 ++++++++++++-
 hw/qxl.h |    2 ++
 2 files changed, 14 insertions(+), 1 deletions(-)

diff --git a/hw/qxl.c b/hw/qxl.c
index def128d..9116c99 100644
--- a/hw/qxl.c
+++ b/hw/qxl.c
@@ -135,7 +135,12 @@ void qxl_spice_update_area(PCIQXLDevice *qxl, uint32_t surface_id,
 
 void qxl_spice_destroy_surface_wait(PCIQXLDevice *qxl, uint32_t id)
 {
+    qemu_mutex_lock(&qxl->track_lock);
+    PANIC_ON(id >= NUM_SURFACES);
     qxl->ssd.worker->destroy_surface_wait(qxl->ssd.worker, id);
+    qxl->guest_surfaces.cmds[id] = 0;
+    qxl->guest_surfaces.count--;
+    qemu_mutex_unlock(&qxl->track_lock);
 }
 
 void qxl_spice_loadvm_commands(PCIQXLDevice *qxl, struct QXLCommandExt *ext,
@@ -156,7 +161,11 @@ void qxl_spice_reset_memslots(PCIQXLDevice *qxl)
 
 void qxl_spice_destroy_surfaces(PCIQXLDevice *qxl)
 {
+    qemu_mutex_lock(&qxl->track_lock);
     qxl->ssd.worker->destroy_surfaces(qxl->ssd.worker);
+    memset(&qxl->guest_surfaces.cmds, 0, sizeof(qxl->guest_surfaces.cmds));
+    qxl->guest_surfaces.count = 0;
+    qemu_mutex_unlock(&qxl->track_lock);
 }
 
 void qxl_spice_reset_image_cache(PCIQXLDevice *qxl)
@@ -315,6 +324,7 @@ static void qxl_track_command(PCIQXLDevice *qxl, struct QXLCommandExt *ext)
         QXLSurfaceCmd *cmd = qxl_phys2virt(qxl, ext->cmd.data, ext->group_id);
         uint32_t id = le32_to_cpu(cmd->surface_id);
         PANIC_ON(id >= NUM_SURFACES);
+        qemu_mutex_lock(&qxl->track_lock);
         if (cmd->type == QXL_SURFACE_CMD_CREATE) {
             qxl->guest_surfaces.cmds[id] = ext->cmd.data;
             qxl->guest_surfaces.count++;
@@ -325,6 +335,7 @@ static void qxl_track_command(PCIQXLDevice *qxl, struct QXLCommandExt *ext)
             qxl->guest_surfaces.cmds[id] = 0;
             qxl->guest_surfaces.count--;
         }
+        qemu_mutex_unlock(&qxl->track_lock);
         break;
     }
     case QXL_CMD_CURSOR:
@@ -867,7 +878,6 @@ static void qxl_reset_surfaces(PCIQXLDevice *d)
     dprint(d, 1, "%s:\n", __FUNCTION__);
     d->mode = QXL_MODE_UNDEFINED;
     qxl_spice_destroy_surfaces(d);
-    memset(&d->guest_surfaces.cmds, 0, sizeof(d->guest_surfaces.cmds));
 }
 
 /* called from spice server thread context only */
@@ -1278,6 +1288,7 @@ static int qxl_init_common(PCIQXLDevice *qxl)
     qxl->generation = 1;
     qxl->num_memslots = NUM_MEMSLOTS;
     qxl->num_surfaces = NUM_SURFACES;
+    qemu_mutex_init(&qxl->track_lock);
 
     switch (qxl->revision) {
     case 1: /* spice 0.4 -- qxl-1 */
diff --git a/hw/qxl.h b/hw/qxl.h
index 489d518..087ef6b 100644
--- a/hw/qxl.h
+++ b/hw/qxl.h
@@ -55,6 +55,8 @@ typedef struct PCIQXLDevice {
     } guest_surfaces;
     QXLPHYSICAL        guest_cursor;
 
+    QemuMutex          track_lock;
+
     /* thread signaling */
     pthread_t          main;
     int                pipe[2];
-- 
1.7.5.4

  parent reply	other threads:[~2011-07-07 17:01 UTC|newest]

Thread overview: 22+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2011-07-07 16:50 [Qemu-devel] [PATCH v2] async + suspend reworked Alon Levy
2011-07-07 16:50 ` [Qemu-devel] [PATCH v2] spice: add worker wrapper functions Alon Levy
2011-07-07 16:50 ` [Qemu-devel] [PATCH v2] spice: add qemu_spice_display_init_common Alon Levy
2011-07-07 16:50 ` [Qemu-devel] [PATCH v2] qxl: remove qxl_destroy_primary() Alon Levy
2011-07-07 16:50 ` [Qemu-devel] [PATCH v2] spice/qxl: move worker wrappers Alon Levy
2011-07-07 16:50 ` Alon Levy [this message]
2011-07-07 16:50 ` [Qemu-devel] [PATCH v2] qxl: add io_port_to_string Alon Levy
2011-07-07 16:50 ` [Qemu-devel] [PATCH v2] qxl: error handling fixes and cleanups Alon Levy
2011-07-07 16:50 ` [Qemu-devel] [PATCH v2] qxl: make qxl_guest_bug take variable arguments Alon Levy
2011-07-07 16:50 ` [Qemu-devel] [PATCH v2] qxl: async I/O Alon Levy
2011-07-08  7:17   ` Gerd Hoffmann
2011-07-08  8:00     ` Alon Levy
2011-07-08  8:10       ` Gerd Hoffmann
2011-07-08  8:12     ` Alon Levy
2011-07-08  8:16       ` Gerd Hoffmann
2011-07-07 16:50 ` [Qemu-devel] [PATCH v2] qxl: bump pci rev Alon Levy
2011-07-08  7:19   ` Gerd Hoffmann
2011-07-08  8:02     ` Alon Levy
2011-07-07 16:50 ` [Qemu-devel] [PATCH v2] qxl: only disallow specific io's in vga mode Alon Levy
2011-07-07 16:50 ` [Qemu-devel] [PATCH v2] qxl: add QXL_IO_FLUSH_{SURFACES, RELEASE} for guest S3&S4 support Alon Levy
2011-07-07 16:50 ` [Qemu-devel] [PATCH v2] qxl: use QXL_REVISION_* Alon Levy
2011-07-07 16:50 ` [Qemu-devel] [PATCH v2] qxl: use update_area_async in qxl-render Alon Levy

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1310057455-18570-6-git-send-email-alevy@redhat.com \
    --to=alevy@redhat.com \
    --cc=kraxel@redhat.com \
    --cc=qemu-devel@nongnu.org \
    --cc=yhalperi@redhat.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).