From: Gerd Hoffmann <kraxel@redhat.com>
To: qemu-devel@nongnu.org
Cc: Gerd Hoffmann <kraxel@redhat.com>
Subject: [Qemu-devel] [PATCH 3/4] spice: don't call displaystate callbacks from spice server context.
Date: Fri, 29 Apr 2011 11:38:31 +0200 [thread overview]
Message-ID: <1304069912-21629-4-git-send-email-kraxel@redhat.com> (raw)
In-Reply-To: <1304069912-21629-1-git-send-email-kraxel@redhat.com>
This patch moves the displaystate callback calls for setting the cursor
and the mouse pointer from spice server to qemu (iothread) context.
This allows us to simplify locking.
Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
---
hw/qxl-render.c | 25 ++++++++++++-------------
hw/qxl.c | 2 ++
ui/spice-display.c | 12 ++++++++++++
ui/spice-display.h | 3 +++
4 files changed, 29 insertions(+), 13 deletions(-)
diff --git a/hw/qxl-render.c b/hw/qxl-render.c
index 58965e0..1316066 100644
--- a/hw/qxl-render.c
+++ b/hw/qxl-render.c
@@ -185,7 +185,6 @@ void qxl_render_cursor(PCIQXLDevice *qxl, QXLCommandExt *ext)
QXLCursorCmd *cmd = qxl_phys2virt(qxl, ext->cmd.data, ext->group_id);
QXLCursor *cursor;
QEMUCursor *c;
- int x = -1, y = -1;
if (!qxl->ssd.ds->mouse_set || !qxl->ssd.ds->cursor_define) {
return;
@@ -198,8 +197,6 @@ void qxl_render_cursor(PCIQXLDevice *qxl, QXLCommandExt *ext)
}
switch (cmd->type) {
case QXL_CURSOR_SET:
- x = cmd->u.set.position.x;
- y = cmd->u.set.position.y;
cursor = qxl_phys2virt(qxl, cmd->u.set.shape, ext->group_id);
if (cursor->chunk.data_size != cursor->data_size) {
fprintf(stderr, "%s: multiple chunks\n", __FUNCTION__);
@@ -209,18 +206,20 @@ void qxl_render_cursor(PCIQXLDevice *qxl, QXLCommandExt *ext)
if (c == NULL) {
c = cursor_builtin_left_ptr();
}
- qemu_mutex_lock_iothread();
- qxl->ssd.ds->cursor_define(c);
- qxl->ssd.ds->mouse_set(x, y, 1);
- qemu_mutex_unlock_iothread();
- cursor_put(c);
+ qemu_mutex_lock(&qxl->ssd.lock);
+ if (qxl->ssd.cursor) {
+ cursor_put(qxl->ssd.cursor);
+ }
+ qxl->ssd.cursor = c;
+ qxl->ssd.mouse_x = cmd->u.set.position.x;
+ qxl->ssd.mouse_y = cmd->u.set.position.y;
+ qemu_mutex_unlock(&qxl->ssd.lock);
break;
case QXL_CURSOR_MOVE:
- x = cmd->u.position.x;
- y = cmd->u.position.y;
- qemu_mutex_lock_iothread();
- qxl->ssd.ds->mouse_set(x, y, 1);
- qemu_mutex_unlock_iothread();
+ qemu_mutex_lock(&qxl->ssd.lock);
+ qxl->ssd.mouse_x = cmd->u.position.x;
+ qxl->ssd.mouse_y = cmd->u.position.y;
+ qemu_mutex_unlock(&qxl->ssd.lock);
break;
}
}
diff --git a/hw/qxl.c b/hw/qxl.c
index bd250db..4dfddf0 100644
--- a/hw/qxl.c
+++ b/hw/qxl.c
@@ -1309,6 +1309,8 @@ static int qxl_init_primary(PCIDevice *dev)
qxl_hw_screen_dump, qxl_hw_text_update, qxl);
qxl->ssd.ds = vga->ds;
qemu_mutex_init(&qxl->ssd.lock);
+ qxl->ssd.mouse_x = -1;
+ qxl->ssd.mouse_y = -1;
qxl->ssd.bufsize = (16 * 1024 * 1024);
qxl->ssd.buf = qemu_malloc(qxl->ssd.bufsize);
diff --git a/ui/spice-display.c b/ui/spice-display.c
index 39c0ba1..1e1a35d 100644
--- a/ui/spice-display.c
+++ b/ui/spice-display.c
@@ -254,6 +254,16 @@ void qemu_spice_display_refresh(SimpleSpiceDisplay *ssd)
ssd->update = qemu_spice_create_update(ssd);
ssd->notify++;
}
+ if (ssd->cursor) {
+ ssd->ds->cursor_define(ssd->cursor);
+ cursor_put(ssd->cursor);
+ ssd->cursor = NULL;
+ }
+ if (ssd->mouse_x != -1 && ssd->mouse_y != -1) {
+ ssd->ds->mouse_set(ssd->mouse_x, ssd->mouse_y, 1);
+ ssd->mouse_x = -1;
+ ssd->mouse_y = -1;
+ }
qemu_mutex_unlock(&ssd->lock);
if (ssd->notify) {
@@ -409,6 +419,8 @@ void qemu_spice_display_init(DisplayState *ds)
assert(sdpy.ds == NULL);
sdpy.ds = ds;
qemu_mutex_init(&sdpy.lock);
+ sdpy.mouse_x = -1;
+ sdpy.mouse_y = -1;
sdpy.bufsize = (16 * 1024 * 1024);
sdpy.buf = qemu_malloc(sdpy.bufsize);
register_displaychangelistener(ds, &display_listener);
diff --git a/ui/spice-display.h b/ui/spice-display.h
index e0cc46e..2f95f68 100644
--- a/ui/spice-display.h
+++ b/ui/spice-display.h
@@ -20,6 +20,7 @@
#include <spice/qxl_dev.h>
#include "qemu-thread.h"
+#include "console.h"
#include "pflib.h"
#define NUM_MEMSLOTS 8
@@ -55,6 +56,8 @@ struct SimpleSpiceDisplay {
*/
QemuMutex lock;
SimpleSpiceUpdate *update;
+ QEMUCursor *cursor;
+ int mouse_x, mouse_y;
};
struct SimpleSpiceUpdate {
--
1.7.1
next prev parent reply other threads:[~2011-04-29 9:38 UTC|newest]
Thread overview: 11+ messages / expand[flat|nested] mbox.gz Atom feed top
2011-04-29 9:38 [Qemu-devel] [PATCH 0/4] spice: fix locking Gerd Hoffmann
2011-04-29 9:38 ` [Qemu-devel] [PATCH 1/4] Make spice dummy functions inline to fix calls not checking return values Gerd Hoffmann
2011-04-29 9:38 ` [Qemu-devel] [PATCH 2/4] spice: don't create updates in spice server context Gerd Hoffmann
2011-04-29 11:18 ` Alon Levy
2011-04-29 11:56 ` Gerd Hoffmann
2011-04-29 9:38 ` Gerd Hoffmann [this message]
2011-04-29 9:38 ` [Qemu-devel] [PATCH 4/4] spice: drop obsolete iothread locking Gerd Hoffmann
2012-10-17 9:32 ` Bing Bu Cao
2011-04-29 11:20 ` [Qemu-devel] [PATCH 0/4] spice: fix locking Alon Levy
2011-04-29 22:33 ` Alon Levy
-- strict thread matches above, loose matches on Subject: below --
2011-05-03 15:06 [Qemu-devel] [PULL] " Gerd Hoffmann
2011-05-03 15:06 ` [Qemu-devel] [PATCH 3/4] spice: don't call displaystate callbacks from spice server context Gerd Hoffmann
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1304069912-21629-4-git-send-email-kraxel@redhat.com \
--to=kraxel@redhat.com \
--cc=qemu-devel@nongnu.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).