qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: Paolo Bonzini <pbonzini@redhat.com>
To: qemu-devel@nongnu.org
Cc: famz@redhat.com, stefanha@redhat.com
Subject: [Qemu-devel] [PATCH 11/18] aio: push aio_context_acquire/release down to dispatching
Date: Wed,  1 Feb 2017 04:05:26 -0800	[thread overview]
Message-ID: <20170201120533.13838-12-pbonzini@redhat.com> (raw)
In-Reply-To: <20170201120533.13838-1-pbonzini@redhat.com>

The AioContext data structures are now protected by list_lock and/or
they are walked with FOREACH_RCU primitives.  There is no need anymore
to acquire the AioContext for the entire duration of aio_dispatch.
Instead, just acquire it before and after invoking the callbacks.
The next step is then to push it further down.

Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
---
 util/aio-posix.c | 25 +++++++++++--------------
 util/aio-win32.c | 15 +++++++--------
 util/async.c     |  2 ++
 3 files changed, 20 insertions(+), 22 deletions(-)

diff --git a/util/aio-posix.c b/util/aio-posix.c
index a8d7090..b590c5a 100644
--- a/util/aio-posix.c
+++ b/util/aio-posix.c
@@ -402,7 +402,9 @@ static bool aio_dispatch_handlers(AioContext *ctx)
             (revents & (G_IO_IN | G_IO_HUP | G_IO_ERR)) &&
             aio_node_check(ctx, node->is_external) &&
             node->io_read) {
+            aio_context_acquire(ctx);
             node->io_read(node->opaque);
+            aio_context_release(ctx);
 
             /* aio_notify() does not count as progress */
             if (node->opaque != &ctx->notifier) {
@@ -413,7 +415,9 @@ static bool aio_dispatch_handlers(AioContext *ctx)
             (revents & (G_IO_OUT | G_IO_ERR)) &&
             aio_node_check(ctx, node->is_external) &&
             node->io_write) {
+            aio_context_acquire(ctx);
             node->io_write(node->opaque);
+            aio_context_release(ctx);
             progress = true;
         }
 
@@ -450,7 +454,9 @@ bool aio_dispatch(AioContext *ctx, bool dispatch_fds)
     }
 
     /* Run our timers */
+    aio_context_acquire(ctx);
     progress |= timerlistgroup_run_timers(&ctx->tlg);
+    aio_context_release(ctx);
 
     return progress;
 }
@@ -597,9 +603,6 @@ bool aio_poll(AioContext *ctx, bool blocking)
     int64_t timeout;
     int64_t start = 0;
 
-    aio_context_acquire(ctx);
-    progress = false;
-
     /* aio_notify can avoid the expensive event_notifier_set if
      * everything (file descriptors, bottom halves, timers) will
      * be re-evaluated before the next blocking poll().  This is
@@ -617,9 +620,11 @@ bool aio_poll(AioContext *ctx, bool blocking)
         start = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
     }
 
-    if (try_poll_mode(ctx, blocking)) {
-        progress = true;
-    } else {
+    aio_context_acquire(ctx);
+    progress = try_poll_mode(ctx, blocking);
+    aio_context_release(ctx);
+
+    if (!progress) {
         assert(npfd == 0);
 
         /* fill pollfds */
@@ -636,9 +641,6 @@ bool aio_poll(AioContext *ctx, bool blocking)
         timeout = blocking ? aio_compute_timeout(ctx) : 0;
 
         /* wait until next event */
-        if (timeout) {
-            aio_context_release(ctx);
-        }
         if (aio_epoll_check_poll(ctx, pollfds, npfd, timeout)) {
             AioHandler epoll_handler;
 
@@ -650,9 +652,6 @@ bool aio_poll(AioContext *ctx, bool blocking)
         } else  {
             ret = qemu_poll_ns(pollfds, npfd, timeout);
         }
-        if (timeout) {
-            aio_context_acquire(ctx);
-        }
     }
 
     if (blocking) {
@@ -717,8 +716,6 @@ bool aio_poll(AioContext *ctx, bool blocking)
         progress = true;
     }
 
-    aio_context_release(ctx);
-
     return progress;
 }
 
diff --git a/util/aio-win32.c b/util/aio-win32.c
index 900524c..ab6d0e5 100644
--- a/util/aio-win32.c
+++ b/util/aio-win32.c
@@ -266,7 +266,9 @@ static bool aio_dispatch_handlers(AioContext *ctx, HANDLE event)
             (revents || event_notifier_get_handle(node->e) == event) &&
             node->io_notify) {
             node->pfd.revents = 0;
+            aio_context_acquire(ctx);
             node->io_notify(node->e);
+            aio_context_release(ctx);
 
             /* aio_notify() does not count as progress */
             if (node->e != &ctx->notifier) {
@@ -278,11 +280,15 @@ static bool aio_dispatch_handlers(AioContext *ctx, HANDLE event)
             (node->io_read || node->io_write)) {
             node->pfd.revents = 0;
             if ((revents & G_IO_IN) && node->io_read) {
+                aio_context_acquire(ctx);
                 node->io_read(node->opaque);
+                aio_context_release(ctx);
                 progress = true;
             }
             if ((revents & G_IO_OUT) && node->io_write) {
+                aio_context_acquire(ctx);
                 node->io_write(node->opaque);
+                aio_context_release(ctx);
                 progress = true;
             }
 
@@ -329,7 +335,6 @@ bool aio_poll(AioContext *ctx, bool blocking)
     int count;
     int timeout;
 
-    aio_context_acquire(ctx);
     progress = false;
 
     /* aio_notify can avoid the expensive event_notifier_set if
@@ -371,17 +376,11 @@ bool aio_poll(AioContext *ctx, bool blocking)
 
         timeout = blocking && !have_select_revents
             ? qemu_timeout_ns_to_ms(aio_compute_timeout(ctx)) : 0;
-        if (timeout) {
-            aio_context_release(ctx);
-        }
         ret = WaitForMultipleObjects(count, events, FALSE, timeout);
         if (blocking) {
             assert(first);
             atomic_sub(&ctx->notify_me, 2);
         }
-        if (timeout) {
-            aio_context_acquire(ctx);
-        }
 
         if (first) {
             aio_notify_accept(ctx);
@@ -404,8 +403,8 @@ bool aio_poll(AioContext *ctx, bool blocking)
         progress |= aio_dispatch_handlers(ctx, event);
     } while (count > 0);
 
+    aio_context_acquire(ctx);
     progress |= timerlistgroup_run_timers(&ctx->tlg);
-
     aio_context_release(ctx);
     return progress;
 }
diff --git a/util/async.c b/util/async.c
index 44c9c3b..8e65e4b 100644
--- a/util/async.c
+++ b/util/async.c
@@ -114,7 +114,9 @@ int aio_bh_poll(AioContext *ctx)
                 ret = 1;
             }
             bh->idle = 0;
+            aio_context_acquire(ctx);
             aio_bh_call(bh);
+            aio_context_release(ctx);
         }
         if (bh->deleted) {
             deleted = true;
-- 
2.9.3

  parent reply	other threads:[~2017-02-01 12:06 UTC|newest]

Thread overview: 22+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2017-02-01 12:05 [Qemu-devel] [PATCH v3 00/18] aio_context_acquire/release pushdown, part 2 Paolo Bonzini
2017-02-01 12:05 ` [Qemu-devel] [PATCH 01/18] block: move AioContext and QEMUTimer to libqemuutil Paolo Bonzini
2017-02-01 12:05 ` [Qemu-devel] [PATCH 02/18] aio: introduce aio_co_schedule and aio_co_wake Paolo Bonzini
2017-02-01 12:05 ` [Qemu-devel] [PATCH 03/18] block-backend: allow blk_prw from coroutine context Paolo Bonzini
2017-02-01 12:05 ` [Qemu-devel] [PATCH 04/18] test-thread-pool: use generic AioContext infrastructure Paolo Bonzini
2017-02-01 12:05 ` [Qemu-devel] [PATCH 05/18] io: add methods to set I/O handlers on AioContext Paolo Bonzini
2017-02-01 12:05 ` [Qemu-devel] [PATCH 06/18] io: make qio_channel_yield aware of AioContexts Paolo Bonzini
2017-02-01 12:05 ` [Qemu-devel] [PATCH 07/18] nbd: convert to use qio_channel_yield Paolo Bonzini
2017-02-01 12:05 ` [Qemu-devel] [PATCH 08/18] coroutine-lock: reschedule coroutine on the AioContext it was running on Paolo Bonzini
2017-02-01 12:05 ` [Qemu-devel] [PATCH 09/18] blkdebug: reschedule coroutine on the AioContext it is " Paolo Bonzini
2017-02-01 12:05 ` [Qemu-devel] [PATCH 10/18] qed: introduce qed_aio_start_io and qed_aio_next_io_cb Paolo Bonzini
2017-02-01 12:05 ` Paolo Bonzini [this message]
2017-02-01 12:05 ` [Qemu-devel] [PATCH 12/18] block: explicitly acquire aiocontext in timers that need it Paolo Bonzini
2017-02-01 12:05 ` [Qemu-devel] [PATCH 13/18] block: explicitly acquire aiocontext in callbacks " Paolo Bonzini
2017-02-01 12:05 ` [Qemu-devel] [PATCH 14/18] block: explicitly acquire aiocontext in bottom halves " Paolo Bonzini
2017-02-01 12:05 ` [Qemu-devel] [PATCH 15/18] block: explicitly acquire aiocontext in aio callbacks " Paolo Bonzini
2017-02-01 12:05 ` [Qemu-devel] [PATCH 16/18] aio-posix: partially inline aio_dispatch into aio_poll Paolo Bonzini
2017-02-01 12:05 ` [Qemu-devel] [PATCH 17/18] async: remove unnecessary inc/dec pairs Paolo Bonzini
2017-02-01 12:05 ` [Qemu-devel] [PATCH 18/18] block: document fields protected by AioContext lock Paolo Bonzini
2017-02-03 14:37 ` [Qemu-devel] [PATCH v3 00/18] aio_context_acquire/release pushdown, part 2 Stefan Hajnoczi
  -- strict thread matches above, loose matches on Subject: below --
2017-02-13 10:08 [Qemu-devel] [PATCH v4 " Paolo Bonzini
2017-02-13 10:08 ` [Qemu-devel] [PATCH 11/18] aio: push aio_context_acquire/release down to dispatching Paolo Bonzini
2017-02-13 13:52 [Qemu-devel] [PATCH v5 00/18] aio_context_acquire/release pushdown, part 2 Paolo Bonzini
2017-02-13 13:52 ` [Qemu-devel] [PATCH 11/18] aio: push aio_context_acquire/release down to dispatching Paolo Bonzini

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20170201120533.13838-12-pbonzini@redhat.com \
    --to=pbonzini@redhat.com \
    --cc=famz@redhat.com \
    --cc=qemu-devel@nongnu.org \
    --cc=stefanha@redhat.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).