From: Hans de Goede <hdegoede@redhat.com>
To: Alon Levy <alevy@redhat.com>
Cc: qemu-devel@nongnu.org
Subject: Re: [Qemu-devel] [PATCH 3/4] qxl/spice: remove qemu_mutex_{un, }lock_iothread around dispatcher
Date: Wed, 16 Mar 2011 10:20:10 +0100 [thread overview]
Message-ID: <4D8080CA.6010804@redhat.com> (raw)
In-Reply-To: <1300220228-27423-4-git-send-email-alevy@redhat.com>
Ack (assuming the issues with the previous 2 patches are fixed):
Acked-by: Hans de Goede <hdegoede@redhat.com>
On 03/15/2011 09:17 PM, Alon Levy wrote:
> with the previous patch making sure get_command no longer needs to lock,
> there is no reason to drop the qemu iothread mutex in qxl.c and in
> ui/spice-display.c
>
> The only location where the lock remains are the cursor related callbacks,
> that path is currently broken. It is only triggered if running spice and sdl,
> which is broken already before that.
> ---
> hw/qxl.c | 8 --------
> ui/spice-display.c | 11 ++---------
> 2 files changed, 2 insertions(+), 17 deletions(-)
>
> diff --git a/hw/qxl.c b/hw/qxl.c
> index 2419236..72f204b 100644
> --- a/hw/qxl.c
> +++ b/hw/qxl.c
> @@ -707,10 +707,8 @@ static void qxl_hard_reset(PCIQXLDevice *d, int loadvm)
> dprint(d, 1, "%s: start%s\n", __FUNCTION__,
> loadvm ? " (loadvm)" : "");
>
> - qemu_mutex_unlock_iothread();
> d->ssd.worker->reset_cursor(d->ssd.worker);
> d->ssd.worker->reset_image_cache(d->ssd.worker);
> - qemu_mutex_lock_iothread();
> qxl_reset_surfaces(d);
> qxl_reset_memslots(d);
>
> @@ -840,9 +838,7 @@ static void qxl_reset_surfaces(PCIQXLDevice *d)
> {
> dprint(d, 1, "%s:\n", __FUNCTION__);
> d->mode = QXL_MODE_UNDEFINED;
> - qemu_mutex_unlock_iothread();
> d->ssd.worker->destroy_surfaces(d->ssd.worker);
> - qemu_mutex_lock_iothread();
> memset(&d->guest_surfaces.cmds, 0, sizeof(d->guest_surfaces.cmds));
> }
>
> @@ -911,9 +907,7 @@ static void qxl_destroy_primary(PCIQXLDevice *d)
> dprint(d, 1, "%s\n", __FUNCTION__);
>
> d->mode = QXL_MODE_UNDEFINED;
> - qemu_mutex_unlock_iothread();
> d->ssd.worker->destroy_primary_surface(d->ssd.worker, 0);
> - qemu_mutex_lock_iothread();
> }
>
> static void qxl_set_mode(PCIQXLDevice *d, int modenr, int loadvm)
> @@ -983,10 +977,8 @@ static void ioport_write(void *opaque, uint32_t addr, uint32_t val)
> case QXL_IO_UPDATE_AREA:
> {
> QXLRect update = d->ram->update_area;
> - qemu_mutex_unlock_iothread();
> d->ssd.worker->update_area(d->ssd.worker, d->ram->update_surface,
> &update, NULL, 0, 0);
> - qemu_mutex_lock_iothread();
> break;
> }
> case QXL_IO_NOTIFY_CMD:
> diff --git a/ui/spice-display.c b/ui/spice-display.c
> index a9ecee0..f3dfba8 100644
> --- a/ui/spice-display.c
> +++ b/ui/spice-display.c
> @@ -78,9 +78,7 @@ SimpleSpiceUpdate *qemu_spice_create_update(SimpleSpiceDisplay *ssd)
> uint8_t *src, *dst;
> int by, bw, bh;
>
> - qemu_mutex_lock_iothread();
> if (qemu_spice_rect_is_empty(&ssd->dirty)) {
> - qemu_mutex_unlock_iothread();
> return NULL;
> };
>
> @@ -141,7 +139,6 @@ SimpleSpiceUpdate *qemu_spice_create_update(SimpleSpiceDisplay *ssd)
> cmd->data = (intptr_t)drawable;
>
> memset(&ssd->dirty, 0, sizeof(ssd->dirty));
> - qemu_mutex_unlock_iothread();
> return update;
> }
>
> @@ -169,6 +166,7 @@ void qemu_spice_create_host_memslot(SimpleSpiceDisplay *ssd)
> ssd->worker->add_memslot(ssd->worker,&memslot);
> }
>
> +/* called from iothread (main) or a vcpu-thread */
> void qemu_spice_create_host_primary(SimpleSpiceDisplay *ssd)
> {
> QXLDevSurfaceCreate surface;
> @@ -186,18 +184,14 @@ void qemu_spice_create_host_primary(SimpleSpiceDisplay *ssd)
> surface.mem = (intptr_t)ssd->buf;
> surface.group_id = MEMSLOT_GROUP_HOST;
>
> - qemu_mutex_unlock_iothread();
> ssd->worker->create_primary_surface(ssd->worker, 0,&surface);
> - qemu_mutex_lock_iothread();
> }
>
> void qemu_spice_destroy_host_primary(SimpleSpiceDisplay *ssd)
> {
> dprint(1, "%s:\n", __FUNCTION__);
>
> - qemu_mutex_unlock_iothread();
> ssd->worker->destroy_primary_surface(ssd->worker, 0);
> - qemu_mutex_lock_iothread();
> }
>
> void qemu_spice_vm_change_state_handler(void *opaque, int running, int reason)
> @@ -207,9 +201,7 @@ void qemu_spice_vm_change_state_handler(void *opaque, int running, int reason)
> if (running) {
> ssd->worker->start(ssd->worker);
> } else {
> - qemu_mutex_unlock_iothread();
> ssd->worker->stop(ssd->worker);
> - qemu_mutex_lock_iothread();
> }
> ssd->running = running;
> }
> @@ -233,6 +225,7 @@ void qemu_spice_display_update(SimpleSpiceDisplay *ssd,
> qemu_spice_rect_union(&ssd->dirty,&update_area);
> }
>
> +/* called only from iothread (main) */
> void qemu_spice_display_resize(SimpleSpiceDisplay *ssd)
> {
> dprint(1, "%s:\n", __FUNCTION__);
next prev parent reply other threads:[~2011-03-16 9:18 UTC|newest]
Thread overview: 9+ messages / expand[flat|nested] mbox.gz Atom feed top
2011-03-15 20:17 [Qemu-devel] [PATCH 0/4] qxl: implement vga mode without locks Alon Levy
2011-03-15 20:17 ` [Qemu-devel] [PATCH 1/4] qxl/spice-display: move pipe to ssd Alon Levy
2011-03-16 9:17 ` Hans de Goede
2011-03-15 20:17 ` [Qemu-devel] [PATCH 2/4] qxl: implement get_command in vga mode without locks Alon Levy
2011-03-16 9:18 ` Hans de Goede
2011-03-15 20:17 ` [Qemu-devel] [PATCH 3/4] qxl/spice: remove qemu_mutex_{un, }lock_iothread around dispatcher Alon Levy
2011-03-16 9:20 ` Hans de Goede [this message]
2011-03-15 20:17 ` [Qemu-devel] [PATCH 4/4] hw/qxl-render: drop cursor locks, add TODO's Alon Levy
2011-03-16 9:22 ` Hans de Goede
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=4D8080CA.6010804@redhat.com \
--to=hdegoede@redhat.com \
--cc=alevy@redhat.com \
--cc=qemu-devel@nongnu.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).