qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: Juan Quintela <quintela@redhat.com>
To: qemu-devel@nongnu.org
Cc: dgilbert@redhat.com, Anna Melekhova <annam@virtuozzo.com>,
	Paolo Bonzini <pbonzini@redhat.com>,
	amit.shah@redhat.com, "Denis V. Lunev" <den@openvz.org>
Subject: [Qemu-devel] [PULL 3/3] migration: fix deadlock
Date: Thu, 15 Oct 2015 08:28:00 +0200	[thread overview]
Message-ID: <1444890480-6371-4-git-send-email-quintela@redhat.com> (raw)
In-Reply-To: <1444890480-6371-1-git-send-email-quintela@redhat.com>

From: "Denis V. Lunev" <den@openvz.org>

Release qemu global mutex before call synchronize_rcu().
synchronize_rcu() waiting for all readers to finish their critical
sections. There is at least one critical section in which we try
to get QGM (critical section is in address_space_rw() and
prepare_mmio_access() is trying to aquire QGM).

Both functions (migration_end() and migration_bitmap_extend())
are called from main thread which is holding QGM.

Thus there is a race condition that ends up with deadlock:
main thread     working thread
Lock QGA                |
|             Call KVM_EXIT_IO handler
|                       |
|        Open rcu reader's critical section
Migration cleanup bh    |
|                       |
synchronize_rcu() is    |
waiting for readers     |
|            prepare_mmio_access() is waiting for QGM
  \                   /
         deadlock

The patch changes bitmap freeing from direct g_free after synchronize_rcu
to free inside call_rcu.

Signed-off-by: Denis V. Lunev <den@openvz.org>
Reported-by: Igor Redko <redkoi@virtuozzo.com>
Tested-by: Igor Redko <redkoi@virtuozzo.com>
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
Reviewed-by: Juan Quintela <quintela@redhat.com>
Signed-off-by: Juan Quintela <quintela@redhat.com>

CC: Anna Melekhova <annam@virtuozzo.com>
CC: Juan Quintela <quintela@redhat.com>
CC: Amit Shah <amit.shah@redhat.com>
CC: Paolo Bonzini <pbonzini@redhat.com>
CC: Wen Congyang <wency@cn.fujitsu.com>
---
 migration/ram.c | 44 +++++++++++++++++++++++++++-----------------
 1 file changed, 27 insertions(+), 17 deletions(-)

diff --git a/migration/ram.c b/migration/ram.c
index 2d1d0b9..a25bcc7 100644
--- a/migration/ram.c
+++ b/migration/ram.c
@@ -219,7 +219,6 @@ static RAMBlock *last_seen_block;
 /* This is the last block from where we have sent data */
 static RAMBlock *last_sent_block;
 static ram_addr_t last_offset;
-static unsigned long *migration_bitmap;
 static QemuMutex migration_bitmap_mutex;
 static uint64_t migration_dirty_pages;
 static uint32_t last_version;
@@ -236,6 +235,11 @@ struct PageSearchStatus {
 };
 typedef struct PageSearchStatus PageSearchStatus;

+static struct BitmapRcu {
+    struct rcu_head rcu;
+    unsigned long *bmap;
+} *migration_bitmap_rcu;
+
 struct CompressParam {
     bool start;
     bool done;
@@ -540,7 +544,7 @@ ram_addr_t migration_bitmap_find_and_reset_dirty(RAMBlock *rb,

     unsigned long next;

-    bitmap = atomic_rcu_read(&migration_bitmap);
+    bitmap = atomic_rcu_read(&migration_bitmap_rcu)->bmap;
     if (ram_bulk_stage && nr > base) {
         next = nr + 1;
     } else {
@@ -558,7 +562,7 @@ ram_addr_t migration_bitmap_find_and_reset_dirty(RAMBlock *rb,
 static void migration_bitmap_sync_range(ram_addr_t start, ram_addr_t length)
 {
     unsigned long *bitmap;
-    bitmap = atomic_rcu_read(&migration_bitmap);
+    bitmap = atomic_rcu_read(&migration_bitmap_rcu)->bmap;
     migration_dirty_pages +=
         cpu_physical_memory_sync_dirty_bitmap(bitmap, start, length);
 }
@@ -1090,17 +1094,22 @@ void free_xbzrle_decoded_buf(void)
     xbzrle_decoded_buf = NULL;
 }

+static void migration_bitmap_free(struct BitmapRcu *bmap)
+{
+    g_free(bmap->bmap);
+    g_free(bmap);
+}
+
 static void migration_end(void)
 {
     /* caller have hold iothread lock or is in a bh, so there is
      * no writing race against this migration_bitmap
      */
-    unsigned long *bitmap = migration_bitmap;
-    atomic_rcu_set(&migration_bitmap, NULL);
+    struct BitmapRcu *bitmap = migration_bitmap_rcu;
+    atomic_rcu_set(&migration_bitmap_rcu, NULL);
     if (bitmap) {
         memory_global_dirty_log_stop();
-        synchronize_rcu();
-        g_free(bitmap);
+        call_rcu(bitmap, migration_bitmap_free, rcu);
     }

     XBZRLE_cache_lock();
@@ -1136,9 +1145,10 @@ void migration_bitmap_extend(ram_addr_t old, ram_addr_t new)
     /* called in qemu main thread, so there is
      * no writing race against this migration_bitmap
      */
-    if (migration_bitmap) {
-        unsigned long *old_bitmap = migration_bitmap, *bitmap;
-        bitmap = bitmap_new(new);
+    if (migration_bitmap_rcu) {
+        struct BitmapRcu *old_bitmap = migration_bitmap_rcu, *bitmap;
+        bitmap = g_new(struct BitmapRcu, 1);
+        bitmap->bmap = bitmap_new(new);

         /* prevent migration_bitmap content from being set bit
          * by migration_bitmap_sync_range() at the same time.
@@ -1146,13 +1156,12 @@ void migration_bitmap_extend(ram_addr_t old, ram_addr_t new)
          * at the same time.
          */
         qemu_mutex_lock(&migration_bitmap_mutex);
-        bitmap_copy(bitmap, old_bitmap, old);
-        bitmap_set(bitmap, old, new - old);
-        atomic_rcu_set(&migration_bitmap, bitmap);
+        bitmap_copy(bitmap->bmap, old_bitmap->bmap, old);
+        bitmap_set(bitmap->bmap, old, new - old);
+        atomic_rcu_set(&migration_bitmap_rcu, bitmap);
         qemu_mutex_unlock(&migration_bitmap_mutex);
         migration_dirty_pages += new - old;
-        synchronize_rcu();
-        g_free(old_bitmap);
+        call_rcu(old_bitmap, migration_bitmap_free, rcu);
     }
 }

@@ -1210,8 +1219,9 @@ static int ram_save_setup(QEMUFile *f, void *opaque)
     reset_ram_globals();

     ram_bitmap_pages = last_ram_offset() >> TARGET_PAGE_BITS;
-    migration_bitmap = bitmap_new(ram_bitmap_pages);
-    bitmap_set(migration_bitmap, 0, ram_bitmap_pages);
+    migration_bitmap_rcu = g_new(struct BitmapRcu, 1);
+    migration_bitmap_rcu->bmap = bitmap_new(ram_bitmap_pages);
+    bitmap_set(migration_bitmap_rcu->bmap, 0, ram_bitmap_pages);

     /*
      * Count the total number of pages used by ram blocks not including any
-- 
2.4.3

  parent reply	other threads:[~2015-10-15  6:28 UTC|newest]

Thread overview: 9+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2015-10-15  6:27 [Qemu-devel] [PULL 0/3] Migration pull Juan Quintela
2015-10-15  6:27 ` [Qemu-devel] [PULL 1/3] Migration: Generate the completed event only when we complete Juan Quintela
2015-10-15 14:46   ` Eric Blake
2015-10-15 15:07   ` Christian Borntraeger
2015-10-15 16:12     ` Dr. David Alan Gilbert
2015-10-16  8:13     ` Juan Quintela
2015-10-15  6:27 ` [Qemu-devel] [PULL 2/3] migration: announce VM's new home just before VM is runnable Juan Quintela
2015-10-15  6:28 ` Juan Quintela [this message]
2015-10-16 15:23 ` [Qemu-devel] [PULL 0/3] Migration pull Peter Maydell

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1444890480-6371-4-git-send-email-quintela@redhat.com \
    --to=quintela@redhat.com \
    --cc=amit.shah@redhat.com \
    --cc=annam@virtuozzo.com \
    --cc=den@openvz.org \
    --cc=dgilbert@redhat.com \
    --cc=pbonzini@redhat.com \
    --cc=qemu-devel@nongnu.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).