qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: Stefan Hajnoczi <stefanha@redhat.com>
To: qemu-devel@nongnu.org
Cc: "Kevin Wolf" <kwolf@redhat.com>,
	"Peter Maydell" <peter.maydell@linaro.org>,
	"Ming Lei" <ming.lei@canonical.com>,
	"Marcin Gibuła" <m.gibula@beyond.pl>,
	"Stefan Hajnoczi" <stefanha@redhat.com>,
	"Paolo Bonzini" <pbonzini@redhat.com>
Subject: [Qemu-devel] [PULL 28/35] linux-aio: avoid deadlock in nested aio_poll() calls
Date: Fri, 29 Aug 2014 17:29:56 +0100	[thread overview]
Message-ID: <1409329803-20744-29-git-send-email-stefanha@redhat.com> (raw)
In-Reply-To: <1409329803-20744-1-git-send-email-stefanha@redhat.com>

If two Linux AIO request completions are fetched in the same
io_getevents() call, QEMU will deadlock if request A's callback waits
for request B to complete using an aio_poll() loop.  This was reported
to happen with the mirror blockjob.

This patch moves completion processing into a BH and makes it resumable.
Nested event loops can resume completion processing so that request B
will complete and the deadlock will not occur.

Cc: Kevin Wolf <kwolf@redhat.com>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Ming Lei <ming.lei@canonical.com>
Cc: Marcin Gibuła <m.gibula@beyond.pl>
Reported-by: Marcin Gibuła <m.gibula@beyond.pl>
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
Tested-by: Marcin Gibuła <m.gibula@beyond.pl>
---
 block/linux-aio.c | 71 ++++++++++++++++++++++++++++++++++++++++++-------------
 1 file changed, 55 insertions(+), 16 deletions(-)

diff --git a/block/linux-aio.c b/block/linux-aio.c
index 7ac7e8c..9aca758 100644
--- a/block/linux-aio.c
+++ b/block/linux-aio.c
@@ -51,6 +51,12 @@ struct qemu_laio_state {
 
     /* io queue for submit at batch */
     LaioQueue io_q;
+
+    /* I/O completion processing */
+    QEMUBH *completion_bh;
+    struct io_event events[MAX_EVENTS];
+    int event_idx;
+    int event_max;
 };
 
 static inline ssize_t io_event_ret(struct io_event *ev)
@@ -86,27 +92,58 @@ static void qemu_laio_process_completion(struct qemu_laio_state *s,
     qemu_aio_release(laiocb);
 }
 
-static void qemu_laio_completion_cb(EventNotifier *e)
+/* The completion BH fetches completed I/O requests and invokes their
+ * callbacks.
+ *
+ * The function is somewhat tricky because it supports nested event loops, for
+ * example when a request callback invokes aio_poll().  In order to do this,
+ * the completion events array and index are kept in qemu_laio_state.  The BH
+ * reschedules itself as long as there are completions pending so it will
+ * either be called again in a nested event loop or will be called after all
+ * events have been completed.  When there are no events left to complete, the
+ * BH returns without rescheduling.
+ */
+static void qemu_laio_completion_bh(void *opaque)
 {
-    struct qemu_laio_state *s = container_of(e, struct qemu_laio_state, e);
-
-    while (event_notifier_test_and_clear(&s->e)) {
-        struct io_event events[MAX_EVENTS];
-        struct timespec ts = { 0 };
-        int nevents, i;
+    struct qemu_laio_state *s = opaque;
 
+    /* Fetch more completion events when empty */
+    if (s->event_idx == s->event_max) {
         do {
-            nevents = io_getevents(s->ctx, MAX_EVENTS, MAX_EVENTS, events, &ts);
-        } while (nevents == -EINTR);
+            struct timespec ts = { 0 };
+            s->event_max = io_getevents(s->ctx, MAX_EVENTS, MAX_EVENTS,
+                                        s->events, &ts);
+        } while (s->event_max == -EINTR);
+
+        s->event_idx = 0;
+        if (s->event_max <= 0) {
+            s->event_max = 0;
+            return; /* no more events */
+        }
+    }
 
-        for (i = 0; i < nevents; i++) {
-            struct iocb *iocb = events[i].obj;
-            struct qemu_laiocb *laiocb =
-                    container_of(iocb, struct qemu_laiocb, iocb);
+    /* Reschedule so nested event loops see currently pending completions */
+    qemu_bh_schedule(s->completion_bh);
 
-            laiocb->ret = io_event_ret(&events[i]);
-            qemu_laio_process_completion(s, laiocb);
-        }
+    /* Process completion events */
+    while (s->event_idx < s->event_max) {
+        struct iocb *iocb = s->events[s->event_idx].obj;
+        struct qemu_laiocb *laiocb =
+                container_of(iocb, struct qemu_laiocb, iocb);
+
+        laiocb->ret = io_event_ret(&s->events[s->event_idx]);
+        s->event_idx++;
+
+        qemu_laio_process_completion(s, laiocb);
+    }
+}
+
+static void qemu_laio_completion_cb(EventNotifier *e)
+{
+    struct qemu_laio_state *s = container_of(e, struct qemu_laio_state, e);
+
+    if (event_notifier_test_and_clear(&s->e)) {
+        qemu_bh_schedule(s->completion_bh);
     }
 }
 
@@ -272,12 +309,14 @@ void laio_detach_aio_context(void *s_, AioContext *old_context)
     struct qemu_laio_state *s = s_;
 
     aio_set_event_notifier(old_context, &s->e, NULL);
+    qemu_bh_delete(s->completion_bh);
 }
 
 void laio_attach_aio_context(void *s_, AioContext *new_context)
 {
     struct qemu_laio_state *s = s_;
 
+    s->completion_bh = aio_bh_new(new_context, qemu_laio_completion_bh, s);
     aio_set_event_notifier(new_context, &s->e, qemu_laio_completion_cb);
 }
 
-- 
1.9.3

  parent reply	other threads:[~2014-08-29 16:31 UTC|newest]

Thread overview: 39+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2014-08-29 16:29 [Qemu-devel] [PULL 00/35] Block patches Stefan Hajnoczi
2014-08-29 16:29 ` [Qemu-devel] [PULL 01/35] ide: Fix bootindex for bus_id > 9 Stefan Hajnoczi
2014-08-29 16:29 ` [Qemu-devel] [PULL 02/35] block.curl: adding 'timeout' option Stefan Hajnoczi
2014-08-29 16:29 ` [Qemu-devel] [PULL 03/35] qemu-img: fix img_commit() error return value Stefan Hajnoczi
2014-08-29 16:29 ` [Qemu-devel] [PULL 04/35] qemu-img: fix img_compare() flags error path Stefan Hajnoczi
2014-08-29 16:29 ` [Qemu-devel] [PULL 05/35] qemu-img: always goto out in img_snapshot() error paths Stefan Hajnoczi
2014-08-29 16:29 ` [Qemu-devel] [PULL 06/35] sheepdog: adopting protocol update for VDI locking Stefan Hajnoczi
2014-08-29 16:29 ` [Qemu-devel] [PULL 07/35] sheepdog: improve error handling for a case of failed lock Stefan Hajnoczi
2014-08-29 16:29 ` [Qemu-devel] [PULL 08/35] qapi: add read-pattern enum for quorum Stefan Hajnoczi
2014-08-29 16:29 ` [Qemu-devel] [PULL 09/35] block/quorum: add simple read pattern support Stefan Hajnoczi
2014-08-29 16:47   ` Benoît Canet
2014-09-01 15:21     ` Stefan Hajnoczi
2014-08-29 16:29 ` [Qemu-devel] [PULL 10/35] coroutine: Drop co_sleep_ns Stefan Hajnoczi
2014-08-29 16:29 ` [Qemu-devel] [PULL 11/35] blockdev: fix drive-mirror 'granularity' error message Stefan Hajnoczi
2014-08-29 16:29 ` [Qemu-devel] [PULL 12/35] AioContext: take bottom halves into account when computing aio_poll timeout Stefan Hajnoczi
2014-08-29 16:29 ` [Qemu-devel] [PULL 13/35] aio-win32: Evaluate timers after handles Stefan Hajnoczi
2014-08-29 16:29 ` [Qemu-devel] [PULL 14/35] aio-win32: Factor out duplicate code into aio_dispatch_handlers Stefan Hajnoczi
2014-08-29 16:29 ` [Qemu-devel] [PULL 15/35] AioContext: run bottom halves after polling Stefan Hajnoczi
2014-08-29 16:29 ` [Qemu-devel] [PULL 16/35] AioContext: export and use aio_dispatch Stefan Hajnoczi
2014-08-29 16:29 ` [Qemu-devel] [PULL 17/35] test-aio: test timers on Windows too Stefan Hajnoczi
2014-08-29 16:29 ` [Qemu-devel] [PULL 18/35] aio-win32: add aio_set_dispatching optimization Stefan Hajnoczi
2014-08-29 16:29 ` [Qemu-devel] [PULL 19/35] AioContext: introduce aio_prepare Stefan Hajnoczi
2014-08-29 16:29 ` [Qemu-devel] [PULL 20/35] qemu-coroutine-io: fix for Win32 Stefan Hajnoczi
2014-08-29 16:29 ` [Qemu-devel] [PULL 21/35] aio-win32: add support for sockets Stefan Hajnoczi
2014-08-29 16:29 ` [Qemu-devel] [PULL 22/35] sheepdog: fix a core dump while do auto-reconnecting Stefan Hajnoczi
2014-08-29 16:29 ` [Qemu-devel] [PULL 23/35] nbd: Drop nbd_can_read() Stefan Hajnoczi
2014-08-29 16:29 ` [Qemu-devel] [PULL 24/35] block: Add AIO context notifiers Stefan Hajnoczi
2014-08-29 16:29 ` [Qemu-devel] [PULL 25/35] nbd: Follow the BDS' AIO context Stefan Hajnoczi
2014-08-29 16:29 ` [Qemu-devel] [PULL 26/35] block: fix overlapping multiwrite requests Stefan Hajnoczi
2014-08-29 16:29 ` [Qemu-devel] [PULL 27/35] qemu-iotests: add multiwrite test cases Stefan Hajnoczi
2014-08-29 16:29 ` Stefan Hajnoczi [this message]
2014-08-29 16:29 ` [Qemu-devel] [PULL 29/35] block: acquire AioContext in do_drive_del() Stefan Hajnoczi
2014-08-29 16:29 ` [Qemu-devel] [PULL 30/35] virtio-blk: allow drive_del with dataplane Stefan Hajnoczi
2014-08-29 16:29 ` [Qemu-devel] [PULL 31/35] curl: Allow a cookie or cookies to be sent with http/https requests Stefan Hajnoczi
2014-08-29 16:30 ` [Qemu-devel] [PULL 32/35] curl: Don't deref NULL pointer in call to aio_poll Stefan Hajnoczi
2014-08-29 16:30 ` [Qemu-devel] [PULL 33/35] nfs: Fix leak of opts in nfs_file_open Stefan Hajnoczi
2014-08-29 16:30 ` [Qemu-devel] [PULL 34/35] blkverify: Fix leak of opts in blkverify_open Stefan Hajnoczi
2014-08-29 16:30 ` [Qemu-devel] [PULL 35/35] quorum: Fix leak of opts in quorum_open Stefan Hajnoczi
2014-09-01  9:49 ` [Qemu-devel] [PULL 00/35] Block patches Peter Maydell

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1409329803-20744-29-git-send-email-stefanha@redhat.com \
    --to=stefanha@redhat.com \
    --cc=kwolf@redhat.com \
    --cc=m.gibula@beyond.pl \
    --cc=ming.lei@canonical.com \
    --cc=pbonzini@redhat.com \
    --cc=peter.maydell@linaro.org \
    --cc=qemu-devel@nongnu.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).