qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: Stefan Hajnoczi <stefanha@redhat.com>
To: qemu-devel@nongnu.org
Cc: Kevin Wolf <kwolf@redhat.com>,
	qemu-block@nongnu.org, pbonzini@redhat.com,
	Hanna Reitz <hreitz@redhat.com>, Fam Zheng <fam@euphon.net>,
	Fiona Ebner <f.ebner@proxmox.com>,
	Stefan Hajnoczi <stefanha@redhat.com>
Subject: [RFC 1/3] aio-posix: run aio_set_fd_handler() in target AioContext
Date: Wed, 13 Dec 2023 16:15:42 -0500	[thread overview]
Message-ID: <20231213211544.1601971-2-stefanha@redhat.com> (raw)
In-Reply-To: <20231213211544.1601971-1-stefanha@redhat.com>

TODO
- What about Windows?

Most of the event loop code runs in the AioContext's home thread. The
exceptions are aio_notify(), aio_bh_scheduler(), aio_set_fd_handler(),
etc. Amongst them, aio_set_fd_handler() is the most complicated because
the aio_handlers list must be both thread-safe and handle nested
aio_poll() simultaneously.

This patch eliminates the multi-threading concerns by moving the actual
work into a BH in the AioContext's home thread.

This change is required to call the AioHandler's io_poll_end() callback
from the AioContext's home thread in a later patch.

Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
---
 util/aio-posix.c | 106 +++++++++++++++++++++++++++++++++++++++++++----
 1 file changed, 99 insertions(+), 7 deletions(-)

diff --git a/util/aio-posix.c b/util/aio-posix.c
index 7f2c99729d..c5e944f30b 100644
--- a/util/aio-posix.c
+++ b/util/aio-posix.c
@@ -97,13 +97,14 @@ static bool aio_remove_fd_handler(AioContext *ctx, AioHandler *node)
     return true;
 }
 
-void aio_set_fd_handler(AioContext *ctx,
-                        int fd,
-                        IOHandler *io_read,
-                        IOHandler *io_write,
-                        AioPollFn *io_poll,
-                        IOHandler *io_poll_ready,
-                        void *opaque)
+/* Perform aio_set_fd_handler() in this thread's AioContext */
+static void aio_set_fd_handler_local(AioContext *ctx,
+                                     int fd,
+                                     IOHandler *io_read,
+                                     IOHandler *io_write,
+                                     AioPollFn *io_poll,
+                                     IOHandler *io_poll_ready,
+                                     void *opaque)
 {
     AioHandler *node;
     AioHandler *new_node = NULL;
@@ -178,6 +179,97 @@ void aio_set_fd_handler(AioContext *ctx,
     }
 }
 
+typedef struct {
+    AioContext *ctx;
+    int fd;
+    IOHandler *io_read;
+    IOHandler *io_write;
+    AioPollFn *io_poll;
+    IOHandler *io_poll_ready;
+    void *opaque;
+    QemuEvent done;
+} AioSetFdHandlerRemote;
+
+static void aio_set_fd_handler_remote_bh(void *opaque)
+{
+    AioSetFdHandlerRemote *data = opaque;
+
+    aio_set_fd_handler_local(data->ctx, data->fd, data->io_read,
+                             data->io_write, data->io_poll,
+                             data->io_poll_ready, data->opaque);
+    qemu_event_set(&data->done);
+}
+
+/* Perform aio_set_fd_handler() in another thread's AioContext */
+static void aio_set_fd_handler_remote(AioContext *ctx,
+                                      int fd,
+                                      IOHandler *io_read,
+                                      IOHandler *io_write,
+                                      AioPollFn *io_poll,
+                                      IOHandler *io_poll_ready,
+                                      void *opaque)
+{
+    AioSetFdHandlerRemote data = {
+        .ctx = ctx,
+        .fd = fd,
+        .io_read = io_read,
+        .io_write = io_write,
+        .io_poll = io_poll,
+        .io_poll_ready = io_poll_ready,
+        .opaque = opaque,
+    };
+
+    /*
+     * Arbitrary threads waiting for each other can deadlock, so only allow
+     * cross-thread aio_set_fd_handler() when the BQL is held.
+     */
+    assert(qemu_in_main_thread());
+
+    qemu_event_init(&data.done, false);
+
+    aio_bh_schedule_oneshot(ctx, aio_set_fd_handler_remote_bh, &data);
+
+    /*
+     * The BQL is not dropped when run from the main loop thread so the
+     * assumption is that this wait is fast.
+     */
+    qemu_event_wait(&data.done);
+
+    qemu_event_destroy(&data.done);
+}
+
+void aio_set_fd_handler(AioContext *ctx,
+                        int fd,
+                        IOHandler *io_read,
+                        IOHandler *io_write,
+                        AioPollFn *io_poll,
+                        IOHandler *io_poll_ready,
+                        void *opaque)
+{
+    /*
+     * Special case for ctx->notifier: it's not possible to rely on
+     * in_aio_context_home_thread() or iohandler_get_aio_context() below when
+     * aio_context_new() calls aio_set_fd_handler() on ctx->notifier.
+     * qemu_set_current_aio_context() and iohandler_ctx haven't been set up yet
+     * at this point. Treat ctx as local when dealing with ctx->notifier.
+     */
+    bool is_ctx_notifier = fd == event_notifier_get_fd(&ctx->notifier);
+
+    /*
+     * iohandler_ctx is special in that it runs in the main thread, but that
+     * thread's context is qemu_aio_context.
+     */
+    if (is_ctx_notifier ||
+        in_aio_context_home_thread(ctx == iohandler_get_aio_context() ?
+                                   qemu_get_aio_context() : ctx)) {
+        aio_set_fd_handler_local(ctx, fd, io_read, io_write, io_poll,
+                                 io_poll_ready, opaque);
+    } else {
+        aio_set_fd_handler_remote(ctx, fd, io_read, io_write, io_poll,
+                                  io_poll_ready, opaque);
+    }
+}
+
 static void aio_set_fd_poll(AioContext *ctx, int fd,
                             IOHandler *io_poll_begin,
                             IOHandler *io_poll_end)
-- 
2.43.0



  reply	other threads:[~2023-12-13 21:17 UTC|newest]

Thread overview: 30+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-12-13 21:15 [RFC 0/3] aio-posix: call ->poll_end() when removing AioHandler Stefan Hajnoczi
2023-12-13 21:15 ` Stefan Hajnoczi [this message]
2023-12-13 21:15 ` [RFC 2/3] aio: use counter instead of ctx->list_lock Stefan Hajnoczi
2023-12-13 21:15 ` [RFC 3/3] aio-posix: call ->poll_end() when removing AioHandler Stefan Hajnoczi
2023-12-13 21:52   ` Paolo Bonzini
2023-12-14 20:12     ` Stefan Hajnoczi
2023-12-14 20:39       ` Paolo Bonzini
2023-12-18 14:27         ` Stefan Hajnoczi
2023-12-13 21:52 ` [RFC 0/3] " Stefan Hajnoczi
2023-12-13 23:10 ` Paolo Bonzini
2023-12-14 19:52   ` Stefan Hajnoczi
2023-12-14 13:38 ` Fiona Ebner
2023-12-14 19:53   ` Stefan Hajnoczi
2023-12-18 12:41     ` Fiona Ebner
2023-12-18 14:25       ` Stefan Hajnoczi
2023-12-18 14:49       ` Paolo Bonzini
2023-12-19  8:40         ` Fiona Ebner
2024-01-02 15:24 ` Hanna Czenczek
2024-01-02 15:53   ` Paolo Bonzini
2024-01-02 16:55     ` Hanna Czenczek
2024-01-03 11:40   ` Fiona Ebner
2024-01-03 13:35     ` Paolo Bonzini
2024-01-05 13:43       ` Fiona Ebner
2024-01-05 14:30         ` Fiona Ebner
2024-01-22 17:41           ` Hanna Czenczek
2024-01-22 17:52             ` Hanna Czenczek
2024-01-23 11:12               ` Fiona Ebner
2024-01-23 11:25                 ` Hanna Czenczek
2024-01-23 11:15               ` Hanna Czenczek
2024-01-23 16:28   ` Hanna Czenczek

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20231213211544.1601971-2-stefanha@redhat.com \
    --to=stefanha@redhat.com \
    --cc=f.ebner@proxmox.com \
    --cc=fam@euphon.net \
    --cc=hreitz@redhat.com \
    --cc=kwolf@redhat.com \
    --cc=pbonzini@redhat.com \
    --cc=qemu-block@nongnu.org \
    --cc=qemu-devel@nongnu.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).