From mboxrd@z Thu Jan 1 00:00:00 1970 Received: from eggs.gnu.org ([208.118.235.92]:34943) by lists.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1TOPzX-0004hH-Fy for qemu-devel@nongnu.org; Wed, 17 Oct 2012 05:32:49 -0400 Received: from Debian-exim by eggs.gnu.org with spam-scanned (Exim 4.71) (envelope-from ) id 1TOPzV-0003UL-Up for qemu-devel@nongnu.org; Wed, 17 Oct 2012 05:32:43 -0400 Received: from mail-ie0-f173.google.com ([209.85.223.173]:49431) by eggs.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1TOPzV-0003UD-NB for qemu-devel@nongnu.org; Wed, 17 Oct 2012 05:32:41 -0400 Received: by mail-ie0-f173.google.com with SMTP id 17so11204187iea.4 for ; Wed, 17 Oct 2012 02:32:40 -0700 (PDT) Message-ID: <507E7B31.3010905@gmail.com> Date: Wed, 17 Oct 2012 17:32:33 +0800 From: Bing Bu Cao MIME-Version: 1.0 References: <1304069912-21629-1-git-send-email-kraxel@redhat.com> <1304069912-21629-5-git-send-email-kraxel@redhat.com> In-Reply-To: <1304069912-21629-5-git-send-email-kraxel@redhat.com> Content-Type: multipart/alternative; boundary="------------070908010702030605050401" Subject: Re: [Qemu-devel] [PATCH 4/4] spice: drop obsolete iothread locking List-Id: List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , To: Gerd Hoffmann Cc: qemu-devel@nongnu.org This is a multi-part message in MIME format. --------------070908010702030605050401 Content-Type: text/plain; charset=ISO-8859-1; format=flowed Content-Transfer-Encoding: 7bit Hi,Gerd I found the lock has been revert in qemu-kvm repository and qemu-kvm on RHEL6.3 with spice/qxl has one problem: Doing some display configuration (such as changing resolution) in guest will involve in deadlock in guest,meanwhile which affect the host hang for a long while. I wonder whether this patch should be revert back in qemu-kvm again? Actually, I have no idea why added in QEMU but not in qemu-kvm? :) On 04/29/2011 05:38 PM, Gerd Hoffmann wrote: > We don't use qemu internals from spice server context any more. > Thus we don't also need to grab the iothread mutex from spice > server context. And we don't have to temporarely release the > lock to avoid deadlocks. Drop all the calls. > > Signed-off-by: Gerd Hoffmann > --- > hw/qxl.c | 8 -------- > ui/spice-display.c | 6 ------ > 2 files changed, 0 insertions(+), 14 deletions(-) > > diff --git a/hw/qxl.c b/hw/qxl.c > index 4dfddf0..2bb36c6 100644 > --- a/hw/qxl.c > +++ b/hw/qxl.c > @@ -666,10 +666,8 @@ static void qxl_hard_reset(PCIQXLDevice *d, int loadvm) > dprint(d, 1, "%s: start%s\n", __FUNCTION__, > loadvm ? " (loadvm)" : ""); > > - qemu_mutex_unlock_iothread(); > d->ssd.worker->reset_cursor(d->ssd.worker); > d->ssd.worker->reset_image_cache(d->ssd.worker); > - qemu_mutex_lock_iothread(); > qxl_reset_surfaces(d); > qxl_reset_memslots(d); > > @@ -799,9 +797,7 @@ static void qxl_reset_surfaces(PCIQXLDevice *d) > { > dprint(d, 1, "%s:\n", __FUNCTION__); > d->mode = QXL_MODE_UNDEFINED; > - qemu_mutex_unlock_iothread(); > d->ssd.worker->destroy_surfaces(d->ssd.worker); > - qemu_mutex_lock_iothread(); > memset(&d->guest_surfaces.cmds, 0, sizeof(d->guest_surfaces.cmds)); > } > > @@ -870,9 +866,7 @@ static void qxl_destroy_primary(PCIQXLDevice *d) > dprint(d, 1, "%s\n", __FUNCTION__); > > d->mode = QXL_MODE_UNDEFINED; > - qemu_mutex_unlock_iothread(); > d->ssd.worker->destroy_primary_surface(d->ssd.worker, 0); > - qemu_mutex_lock_iothread(); > } > > static void qxl_set_mode(PCIQXLDevice *d, int modenr, int loadvm) > @@ -942,10 +936,8 @@ static void ioport_write(void *opaque, uint32_t addr, uint32_t val) > case QXL_IO_UPDATE_AREA: > { > QXLRect update = d->ram->update_area; > - qemu_mutex_unlock_iothread(); > d->ssd.worker->update_area(d->ssd.worker, d->ram->update_surface, > &update, NULL, 0, 0); > - qemu_mutex_lock_iothread(); > break; > } > case QXL_IO_NOTIFY_CMD: > diff --git a/ui/spice-display.c b/ui/spice-display.c > index 1e1a35d..34201cd 100644 > --- a/ui/spice-display.c > +++ b/ui/spice-display.c > @@ -176,18 +176,14 @@ void qemu_spice_create_host_primary(SimpleSpiceDisplay *ssd) > surface.mem = (intptr_t)ssd->buf; > surface.group_id = MEMSLOT_GROUP_HOST; > > - qemu_mutex_unlock_iothread(); > ssd->worker->create_primary_surface(ssd->worker, 0, &surface); > - qemu_mutex_lock_iothread(); > } > > void qemu_spice_destroy_host_primary(SimpleSpiceDisplay *ssd) > { > dprint(1, "%s:\n", __FUNCTION__); > > - qemu_mutex_unlock_iothread(); > ssd->worker->destroy_primary_surface(ssd->worker, 0); > - qemu_mutex_lock_iothread(); > } > > void qemu_spice_vm_change_state_handler(void *opaque, int running, int reason) > @@ -197,9 +193,7 @@ void qemu_spice_vm_change_state_handler(void *opaque, int running, int reason) > if (running) { > ssd->worker->start(ssd->worker); > } else { > - qemu_mutex_unlock_iothread(); > ssd->worker->stop(ssd->worker); > - qemu_mutex_lock_iothread(); > } > ssd->running = running; > } --------------070908010702030605050401 Content-Type: text/html; charset=ISO-8859-1 Content-Transfer-Encoding: 7bit
Hi,Gerd

I found the lock has been revert in qemu-kvm repository and
qemu-kvm on RHEL6.3 with spice/qxl has one problem:


Doing some display configuration (such as changing resolution) in guest will involve in deadlock in guest,meanwhile
which affect the host hang for a long while.


I wonder whether this patch should be revert back in qemu-kvm again?
Actually, I have no idea why added in QEMU but not in qemu-kvm? :)



On 04/29/2011 05:38 PM, Gerd Hoffmann wrote:
We don't use qemu internals from spice server context any more.
Thus we don't also need to grab the iothread mutex from spice
server context.  And we don't have to temporarely release the
lock to avoid deadlocks.  Drop all the calls.

Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
---
 hw/qxl.c           |    8 --------
 ui/spice-display.c |    6 ------
 2 files changed, 0 insertions(+), 14 deletions(-)

diff --git a/hw/qxl.c b/hw/qxl.c
index 4dfddf0..2bb36c6 100644
--- a/hw/qxl.c
+++ b/hw/qxl.c
@@ -666,10 +666,8 @@ static void qxl_hard_reset(PCIQXLDevice *d, int loadvm)
     dprint(d, 1, "%s: start%s\n", __FUNCTION__,
            loadvm ? " (loadvm)" : "");
 
-    qemu_mutex_unlock_iothread();
     d->ssd.worker->reset_cursor(d->ssd.worker);
     d->ssd.worker->reset_image_cache(d->ssd.worker);
-    qemu_mutex_lock_iothread();
     qxl_reset_surfaces(d);
     qxl_reset_memslots(d);
 
@@ -799,9 +797,7 @@ static void qxl_reset_surfaces(PCIQXLDevice *d)
 {
     dprint(d, 1, "%s:\n", __FUNCTION__);
     d->mode = QXL_MODE_UNDEFINED;
-    qemu_mutex_unlock_iothread();
     d->ssd.worker->destroy_surfaces(d->ssd.worker);
-    qemu_mutex_lock_iothread();
     memset(&d->guest_surfaces.cmds, 0, sizeof(d->guest_surfaces.cmds));
 }
 
@@ -870,9 +866,7 @@ static void qxl_destroy_primary(PCIQXLDevice *d)
     dprint(d, 1, "%s\n", __FUNCTION__);
 
     d->mode = QXL_MODE_UNDEFINED;
-    qemu_mutex_unlock_iothread();
     d->ssd.worker->destroy_primary_surface(d->ssd.worker, 0);
-    qemu_mutex_lock_iothread();
 }
 
 static void qxl_set_mode(PCIQXLDevice *d, int modenr, int loadvm)
@@ -942,10 +936,8 @@ static void ioport_write(void *opaque, uint32_t addr, uint32_t val)
     case QXL_IO_UPDATE_AREA:
     {
         QXLRect update = d->ram->update_area;
-        qemu_mutex_unlock_iothread();
         d->ssd.worker->update_area(d->ssd.worker, d->ram->update_surface,
                                    &update, NULL, 0, 0);
-        qemu_mutex_lock_iothread();
         break;
     }
     case QXL_IO_NOTIFY_CMD:
diff --git a/ui/spice-display.c b/ui/spice-display.c
index 1e1a35d..34201cd 100644
--- a/ui/spice-display.c
+++ b/ui/spice-display.c
@@ -176,18 +176,14 @@ void qemu_spice_create_host_primary(SimpleSpiceDisplay *ssd)
     surface.mem        = (intptr_t)ssd->buf;
     surface.group_id   = MEMSLOT_GROUP_HOST;
 
-    qemu_mutex_unlock_iothread();
     ssd->worker->create_primary_surface(ssd->worker, 0, &surface);
-    qemu_mutex_lock_iothread();
 }
 
 void qemu_spice_destroy_host_primary(SimpleSpiceDisplay *ssd)
 {
     dprint(1, "%s:\n", __FUNCTION__);
 
-    qemu_mutex_unlock_iothread();
     ssd->worker->destroy_primary_surface(ssd->worker, 0);
-    qemu_mutex_lock_iothread();
 }
 
 void qemu_spice_vm_change_state_handler(void *opaque, int running, int reason)
@@ -197,9 +193,7 @@ void qemu_spice_vm_change_state_handler(void *opaque, int running, int reason)
     if (running) {
         ssd->worker->start(ssd->worker);
     } else {
-        qemu_mutex_unlock_iothread();
         ssd->worker->stop(ssd->worker);
-        qemu_mutex_lock_iothread();
     }
     ssd->running = running;
 }

--------------070908010702030605050401--