* [PATCH v2 1/4] block: rename blk_io_plug_call() API to defer_call()
2023-08-17 15:58 [PATCH v2 0/4] virtio-blk: use blk_io_plug_call() instead of notification BH Stefan Hajnoczi
@ 2023-08-17 15:58 ` Stefan Hajnoczi
2023-08-18 8:33 ` Philippe Mathieu-Daudé
2023-08-18 11:06 ` Paul Durrant
2023-08-17 15:58 ` [PATCH v2 2/4] util/defer-call: move defer_call() to util/ Stefan Hajnoczi
` (2 subsequent siblings)
3 siblings, 2 replies; 12+ messages in thread
From: Stefan Hajnoczi @ 2023-08-17 15:58 UTC (permalink / raw)
To: qemu-devel
Cc: qemu-block, Stefan Hajnoczi, Fam Zheng, xen-devel, Anthony Perard,
Hanna Reitz, Kevin Wolf, Julia Suvorova, Stefano Garzarella,
Paolo Bonzini, Aarushi Mehta, Philippe Mathieu-Daudé,
Michael S. Tsirkin, Stefano Stabellini, Paul Durrant,
Ilya Maximets
Prepare to move the blk_io_plug_call() API out of the block layer so
that other subsystems call use this deferred call mechanism. Rename it
to defer_call() but leave the code in block/plug.c.
The next commit will move the code out of the block layer.
Suggested-by: Ilya Maximets <i.maximets@ovn.org>
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
---
include/sysemu/block-backend-io.h | 6 +-
block/blkio.c | 8 +--
block/io_uring.c | 4 +-
block/linux-aio.c | 4 +-
block/nvme.c | 4 +-
block/plug.c | 109 +++++++++++++++---------------
hw/block/dataplane/xen-block.c | 10 +--
hw/block/virtio-blk.c | 4 +-
hw/scsi/virtio-scsi.c | 6 +-
9 files changed, 76 insertions(+), 79 deletions(-)
diff --git a/include/sysemu/block-backend-io.h b/include/sysemu/block-backend-io.h
index be4dcef59d..cfcfd85c1d 100644
--- a/include/sysemu/block-backend-io.h
+++ b/include/sysemu/block-backend-io.h
@@ -100,9 +100,9 @@ void blk_iostatus_set_err(BlockBackend *blk, int error);
int blk_get_max_iov(BlockBackend *blk);
int blk_get_max_hw_iov(BlockBackend *blk);
-void blk_io_plug(void);
-void blk_io_unplug(void);
-void blk_io_plug_call(void (*fn)(void *), void *opaque);
+void defer_call_begin(void);
+void defer_call_end(void);
+void defer_call(void (*fn)(void *), void *opaque);
AioContext *blk_get_aio_context(BlockBackend *blk);
BlockAcctStats *blk_get_stats(BlockBackend *blk);
diff --git a/block/blkio.c b/block/blkio.c
index 1dd495617c..7cf6d61f47 100644
--- a/block/blkio.c
+++ b/block/blkio.c
@@ -312,10 +312,10 @@ static void blkio_detach_aio_context(BlockDriverState *bs)
}
/*
- * Called by blk_io_unplug() or immediately if not plugged. Called without
- * blkio_lock.
+ * Called by defer_call_end() or immediately if not in a deferred section.
+ * Called without blkio_lock.
*/
-static void blkio_unplug_fn(void *opaque)
+static void blkio_deferred_fn(void *opaque)
{
BDRVBlkioState *s = opaque;
@@ -332,7 +332,7 @@ static void blkio_submit_io(BlockDriverState *bs)
{
BDRVBlkioState *s = bs->opaque;
- blk_io_plug_call(blkio_unplug_fn, s);
+ defer_call(blkio_deferred_fn, s);
}
static int coroutine_fn
diff --git a/block/io_uring.c b/block/io_uring.c
index 69d9820928..8429f341be 100644
--- a/block/io_uring.c
+++ b/block/io_uring.c
@@ -306,7 +306,7 @@ static void ioq_init(LuringQueue *io_q)
io_q->blocked = false;
}
-static void luring_unplug_fn(void *opaque)
+static void luring_deferred_fn(void *opaque)
{
LuringState *s = opaque;
trace_luring_unplug_fn(s, s->io_q.blocked, s->io_q.in_queue,
@@ -367,7 +367,7 @@ static int luring_do_submit(int fd, LuringAIOCB *luringcb, LuringState *s,
return ret;
}
- blk_io_plug_call(luring_unplug_fn, s);
+ defer_call(luring_deferred_fn, s);
}
return 0;
}
diff --git a/block/linux-aio.c b/block/linux-aio.c
index 561c71a9ae..9a08219db0 100644
--- a/block/linux-aio.c
+++ b/block/linux-aio.c
@@ -353,7 +353,7 @@ static uint64_t laio_max_batch(LinuxAioState *s, uint64_t dev_max_batch)
return max_batch;
}
-static void laio_unplug_fn(void *opaque)
+static void laio_deferred_fn(void *opaque)
{
LinuxAioState *s = opaque;
@@ -393,7 +393,7 @@ static int laio_do_submit(int fd, struct qemu_laiocb *laiocb, off_t offset,
if (s->io_q.in_queue >= laio_max_batch(s, dev_max_batch)) {
ioq_submit(s);
} else {
- blk_io_plug_call(laio_unplug_fn, s);
+ defer_call(laio_deferred_fn, s);
}
}
diff --git a/block/nvme.c b/block/nvme.c
index b6e95f0b7e..dfbd1085fd 100644
--- a/block/nvme.c
+++ b/block/nvme.c
@@ -476,7 +476,7 @@ static void nvme_trace_command(const NvmeCmd *cmd)
}
}
-static void nvme_unplug_fn(void *opaque)
+static void nvme_deferred_fn(void *opaque)
{
NVMeQueuePair *q = opaque;
@@ -503,7 +503,7 @@ static void nvme_submit_command(NVMeQueuePair *q, NVMeRequest *req,
q->need_kick++;
qemu_mutex_unlock(&q->lock);
- blk_io_plug_call(nvme_unplug_fn, q);
+ defer_call(nvme_deferred_fn, q);
}
static void nvme_admin_cmd_sync_cb(void *opaque, int ret)
diff --git a/block/plug.c b/block/plug.c
index 98a155d2f4..f26173559c 100644
--- a/block/plug.c
+++ b/block/plug.c
@@ -1,24 +1,21 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
- * Block I/O plugging
+ * Deferred calls
*
* Copyright Red Hat.
*
- * This API defers a function call within a blk_io_plug()/blk_io_unplug()
+ * This API defers a function call within a defer_call_begin()/defer_call_end()
* section, allowing multiple calls to batch up. This is a performance
* optimization that is used in the block layer to submit several I/O requests
* at once instead of individually:
*
- * blk_io_plug(); <-- start of plugged region
+ * defer_call_begin(); <-- start of section
* ...
- * blk_io_plug_call(my_func, my_obj); <-- deferred my_func(my_obj) call
- * blk_io_plug_call(my_func, my_obj); <-- another
- * blk_io_plug_call(my_func, my_obj); <-- another
+ * defer_call(my_func, my_obj); <-- deferred my_func(my_obj) call
+ * defer_call(my_func, my_obj); <-- another
+ * defer_call(my_func, my_obj); <-- another
* ...
- * blk_io_unplug(); <-- end of plugged region, my_func(my_obj) is called once
- *
- * This code is actually generic and not tied to the block layer. If another
- * subsystem needs this functionality, it could be renamed.
+ * defer_call_end(); <-- end of section, my_func(my_obj) is called once
*/
#include "qemu/osdep.h"
@@ -27,66 +24,66 @@
#include "qemu/thread.h"
#include "sysemu/block-backend.h"
-/* A function call that has been deferred until unplug() */
+/* A function call that has been deferred until defer_call_end() */
typedef struct {
void (*fn)(void *);
void *opaque;
-} UnplugFn;
+} DeferredCall;
/* Per-thread state */
typedef struct {
- unsigned count; /* how many times has plug() been called? */
- GArray *unplug_fns; /* functions to call at unplug time */
-} Plug;
+ unsigned nesting_level;
+ GArray *deferred_call_array;
+} DeferCallThreadState;
-/* Use get_ptr_plug() to fetch this thread-local value */
-QEMU_DEFINE_STATIC_CO_TLS(Plug, plug);
+/* Use get_ptr_defer_call_thread_state() to fetch this thread-local value */
+QEMU_DEFINE_STATIC_CO_TLS(DeferCallThreadState, defer_call_thread_state);
/* Called at thread cleanup time */
-static void blk_io_plug_atexit(Notifier *n, void *value)
+static void defer_call_atexit(Notifier *n, void *value)
{
- Plug *plug = get_ptr_plug();
- g_array_free(plug->unplug_fns, TRUE);
+ DeferCallThreadState *thread_state = get_ptr_defer_call_thread_state();
+ g_array_free(thread_state->deferred_call_array, TRUE);
}
/* This won't involve coroutines, so use __thread */
-static __thread Notifier blk_io_plug_atexit_notifier;
+static __thread Notifier defer_call_atexit_notifier;
/**
- * blk_io_plug_call:
+ * defer_call:
* @fn: a function pointer to be invoked
* @opaque: a user-defined argument to @fn()
*
- * Call @fn(@opaque) immediately if not within a blk_io_plug()/blk_io_unplug()
- * section.
+ * Call @fn(@opaque) immediately if not within a
+ * defer_call_begin()/defer_call_end() section.
*
* Otherwise defer the call until the end of the outermost
- * blk_io_plug()/blk_io_unplug() section in this thread. If the same
+ * defer_call_begin()/defer_call_end() section in this thread. If the same
* @fn/@opaque pair has already been deferred, it will only be called once upon
- * blk_io_unplug() so that accumulated calls are batched into a single call.
+ * defer_call_end() so that accumulated calls are batched into a single call.
*
* The caller must ensure that @opaque is not freed before @fn() is invoked.
*/
-void blk_io_plug_call(void (*fn)(void *), void *opaque)
+void defer_call(void (*fn)(void *), void *opaque)
{
- Plug *plug = get_ptr_plug();
+ DeferCallThreadState *thread_state = get_ptr_defer_call_thread_state();
- /* Call immediately if we're not plugged */
- if (plug->count == 0) {
+ /* Call immediately if we're not deferring calls */
+ if (thread_state->nesting_level == 0) {
fn(opaque);
return;
}
- GArray *array = plug->unplug_fns;
+ GArray *array = thread_state->deferred_call_array;
if (!array) {
- array = g_array_new(FALSE, FALSE, sizeof(UnplugFn));
- plug->unplug_fns = array;
- blk_io_plug_atexit_notifier.notify = blk_io_plug_atexit;
- qemu_thread_atexit_add(&blk_io_plug_atexit_notifier);
+ array = g_array_new(FALSE, FALSE, sizeof(DeferredCall));
+ thread_state->deferred_call_array = array;
+ defer_call_atexit_notifier.notify = defer_call_atexit;
+ qemu_thread_atexit_add(&defer_call_atexit_notifier);
}
- UnplugFn *fns = (UnplugFn *)array->data;
- UnplugFn new_fn = {
+ DeferredCall *fns = (DeferredCall *)array->data;
+ DeferredCall new_fn = {
.fn = fn,
.opaque = opaque,
};
@@ -106,46 +103,46 @@ void blk_io_plug_call(void (*fn)(void *), void *opaque)
}
/**
- * blk_io_plug: Defer blk_io_plug_call() functions until blk_io_unplug()
+ * defer_call_begin: Defer defer_call() functions until defer_call_end()
*
- * blk_io_plug/unplug are thread-local operations. This means that multiple
- * threads can simultaneously call plug/unplug, but the caller must ensure that
- * each unplug() is called in the same thread of the matching plug().
+ * defer_call_begin() and defer_call_end() are thread-local operations. The
+ * caller must ensure that each defer_call_begin() has a matching
+ * defer_call_end() in the same thread.
*
- * Nesting is supported. blk_io_plug_call() functions are only called at the
- * outermost blk_io_unplug().
+ * Nesting is supported. defer_call() functions are only called at the
+ * outermost defer_call_end().
*/
-void blk_io_plug(void)
+void defer_call_begin(void)
{
- Plug *plug = get_ptr_plug();
+ DeferCallThreadState *thread_state = get_ptr_defer_call_thread_state();
- assert(plug->count < UINT32_MAX);
+ assert(thread_state->nesting_level < UINT32_MAX);
- plug->count++;
+ thread_state->nesting_level++;
}
/**
- * blk_io_unplug: Run any pending blk_io_plug_call() functions
+ * defer_call_end: Run any pending defer_call() functions
*
- * There must have been a matching blk_io_plug() call in the same thread prior
- * to this blk_io_unplug() call.
+ * There must have been a matching defer_call_begin() call in the same thread
+ * prior to this defer_call_end() call.
*/
-void blk_io_unplug(void)
+void defer_call_end(void)
{
- Plug *plug = get_ptr_plug();
+ DeferCallThreadState *thread_state = get_ptr_defer_call_thread_state();
- assert(plug->count > 0);
+ assert(thread_state->nesting_level > 0);
- if (--plug->count > 0) {
+ if (--thread_state->nesting_level > 0) {
return;
}
- GArray *array = plug->unplug_fns;
+ GArray *array = thread_state->deferred_call_array;
if (!array) {
return;
}
- UnplugFn *fns = (UnplugFn *)array->data;
+ DeferredCall *fns = (DeferredCall *)array->data;
for (guint i = 0; i < array->len; i++) {
fns[i].fn(fns[i].opaque);
diff --git a/hw/block/dataplane/xen-block.c b/hw/block/dataplane/xen-block.c
index 3b6f2b0aa2..e9dd8f8a99 100644
--- a/hw/block/dataplane/xen-block.c
+++ b/hw/block/dataplane/xen-block.c
@@ -509,7 +509,7 @@ static int xen_block_get_request(XenBlockDataPlane *dataplane,
/*
* Threshold of in-flight requests above which we will start using
- * blk_io_plug()/blk_io_unplug() to batch requests.
+ * defer_call_begin()/defer_call_end() to batch requests.
*/
#define IO_PLUG_THRESHOLD 1
@@ -537,7 +537,7 @@ static bool xen_block_handle_requests(XenBlockDataPlane *dataplane)
* is below us.
*/
if (inflight_atstart > IO_PLUG_THRESHOLD) {
- blk_io_plug();
+ defer_call_begin();
}
while (rc != rp) {
/* pull request from ring */
@@ -577,12 +577,12 @@ static bool xen_block_handle_requests(XenBlockDataPlane *dataplane)
if (inflight_atstart > IO_PLUG_THRESHOLD &&
batched >= inflight_atstart) {
- blk_io_unplug();
+ defer_call_end();
}
xen_block_do_aio(request);
if (inflight_atstart > IO_PLUG_THRESHOLD) {
if (batched >= inflight_atstart) {
- blk_io_plug();
+ defer_call_begin();
batched = 0;
} else {
batched++;
@@ -590,7 +590,7 @@ static bool xen_block_handle_requests(XenBlockDataPlane *dataplane)
}
}
if (inflight_atstart > IO_PLUG_THRESHOLD) {
- blk_io_unplug();
+ defer_call_end();
}
return done_something;
diff --git a/hw/block/virtio-blk.c b/hw/block/virtio-blk.c
index 39e7f23fab..6a45033d15 100644
--- a/hw/block/virtio-blk.c
+++ b/hw/block/virtio-blk.c
@@ -1134,7 +1134,7 @@ void virtio_blk_handle_vq(VirtIOBlock *s, VirtQueue *vq)
bool suppress_notifications = virtio_queue_get_notification(vq);
aio_context_acquire(blk_get_aio_context(s->blk));
- blk_io_plug();
+ defer_call_begin();
do {
if (suppress_notifications) {
@@ -1158,7 +1158,7 @@ void virtio_blk_handle_vq(VirtIOBlock *s, VirtQueue *vq)
virtio_blk_submit_multireq(s, &mrb);
}
- blk_io_unplug();
+ defer_call_end();
aio_context_release(blk_get_aio_context(s->blk));
}
diff --git a/hw/scsi/virtio-scsi.c b/hw/scsi/virtio-scsi.c
index 45b95ea070..c2465e3e88 100644
--- a/hw/scsi/virtio-scsi.c
+++ b/hw/scsi/virtio-scsi.c
@@ -799,7 +799,7 @@ static int virtio_scsi_handle_cmd_req_prepare(VirtIOSCSI *s, VirtIOSCSIReq *req)
return -ENOBUFS;
}
scsi_req_ref(req->sreq);
- blk_io_plug();
+ defer_call_begin();
object_unref(OBJECT(d));
return 0;
}
@@ -810,7 +810,7 @@ static void virtio_scsi_handle_cmd_req_submit(VirtIOSCSI *s, VirtIOSCSIReq *req)
if (scsi_req_enqueue(sreq)) {
scsi_req_continue(sreq);
}
- blk_io_unplug();
+ defer_call_end();
scsi_req_unref(sreq);
}
@@ -836,7 +836,7 @@ static void virtio_scsi_handle_cmd_vq(VirtIOSCSI *s, VirtQueue *vq)
while (!QTAILQ_EMPTY(&reqs)) {
req = QTAILQ_FIRST(&reqs);
QTAILQ_REMOVE(&reqs, req, next);
- blk_io_unplug();
+ defer_call_end();
scsi_req_unref(req->sreq);
virtqueue_detach_element(req->vq, &req->elem, 0);
virtio_scsi_free_req(req);
--
2.41.0
^ permalink raw reply related [flat|nested] 12+ messages in thread
* Re: [PATCH v2 1/4] block: rename blk_io_plug_call() API to defer_call()
2023-08-17 15:58 ` [PATCH v2 1/4] block: rename blk_io_plug_call() API to defer_call() Stefan Hajnoczi
@ 2023-08-18 8:33 ` Philippe Mathieu-Daudé
2023-08-18 11:06 ` Paul Durrant
1 sibling, 0 replies; 12+ messages in thread
From: Philippe Mathieu-Daudé @ 2023-08-18 8:33 UTC (permalink / raw)
To: Stefan Hajnoczi, qemu-devel
Cc: qemu-block, Fam Zheng, xen-devel, Anthony Perard, Hanna Reitz,
Kevin Wolf, Julia Suvorova, Stefano Garzarella, Paolo Bonzini,
Aarushi Mehta, Michael S. Tsirkin, Stefano Stabellini,
Paul Durrant, Ilya Maximets
On 17/8/23 17:58, Stefan Hajnoczi wrote:
> Prepare to move the blk_io_plug_call() API out of the block layer so
> that other subsystems call use this deferred call mechanism. Rename it
> to defer_call() but leave the code in block/plug.c.
>
> The next commit will move the code out of the block layer.
>
> Suggested-by: Ilya Maximets <i.maximets@ovn.org>
> Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
> ---
> include/sysemu/block-backend-io.h | 6 +-
> block/blkio.c | 8 +--
> block/io_uring.c | 4 +-
> block/linux-aio.c | 4 +-
> block/nvme.c | 4 +-
> block/plug.c | 109 +++++++++++++++---------------
> hw/block/dataplane/xen-block.c | 10 +--
> hw/block/virtio-blk.c | 4 +-
> hw/scsi/virtio-scsi.c | 6 +-
> 9 files changed, 76 insertions(+), 79 deletions(-)
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
^ permalink raw reply [flat|nested] 12+ messages in thread
* Re: [PATCH v2 1/4] block: rename blk_io_plug_call() API to defer_call()
2023-08-17 15:58 ` [PATCH v2 1/4] block: rename blk_io_plug_call() API to defer_call() Stefan Hajnoczi
2023-08-18 8:33 ` Philippe Mathieu-Daudé
@ 2023-08-18 11:06 ` Paul Durrant
1 sibling, 0 replies; 12+ messages in thread
From: Paul Durrant @ 2023-08-18 11:06 UTC (permalink / raw)
To: Stefan Hajnoczi, qemu-devel
Cc: qemu-block, Fam Zheng, xen-devel, Anthony Perard, Hanna Reitz,
Kevin Wolf, Julia Suvorova, Stefano Garzarella, Paolo Bonzini,
Aarushi Mehta, Philippe Mathieu-Daudé, Michael S. Tsirkin,
Stefano Stabellini, Ilya Maximets
On 17/08/2023 16:58, Stefan Hajnoczi wrote:
> Prepare to move the blk_io_plug_call() API out of the block layer so
> that other subsystems call use this deferred call mechanism. Rename it
> to defer_call() but leave the code in block/plug.c.
>
> The next commit will move the code out of the block layer.
>
> Suggested-by: Ilya Maximets <i.maximets@ovn.org>
> Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
> ---
> include/sysemu/block-backend-io.h | 6 +-
> block/blkio.c | 8 +--
> block/io_uring.c | 4 +-
> block/linux-aio.c | 4 +-
> block/nvme.c | 4 +-
> block/plug.c | 109 +++++++++++++++---------------
> hw/block/dataplane/xen-block.c | 10 +--
> hw/block/virtio-blk.c | 4 +-
> hw/scsi/virtio-scsi.c | 6 +-
> 9 files changed, 76 insertions(+), 79 deletions(-)
>
Reviewed-by: Paul Durrant <paul@xen.org>
^ permalink raw reply [flat|nested] 12+ messages in thread
* [PATCH v2 2/4] util/defer-call: move defer_call() to util/
2023-08-17 15:58 [PATCH v2 0/4] virtio-blk: use blk_io_plug_call() instead of notification BH Stefan Hajnoczi
2023-08-17 15:58 ` [PATCH v2 1/4] block: rename blk_io_plug_call() API to defer_call() Stefan Hajnoczi
@ 2023-08-17 15:58 ` Stefan Hajnoczi
2023-08-18 8:31 ` Philippe Mathieu-Daudé
2023-08-17 15:58 ` [PATCH v2 3/4] virtio: use defer_call() in virtio_irqfd_notify() Stefan Hajnoczi
2023-08-17 15:58 ` [PATCH v2 4/4] virtio-blk: remove batch notification BH Stefan Hajnoczi
3 siblings, 1 reply; 12+ messages in thread
From: Stefan Hajnoczi @ 2023-08-17 15:58 UTC (permalink / raw)
To: qemu-devel
Cc: qemu-block, Stefan Hajnoczi, Fam Zheng, xen-devel, Anthony Perard,
Hanna Reitz, Kevin Wolf, Julia Suvorova, Stefano Garzarella,
Paolo Bonzini, Aarushi Mehta, Philippe Mathieu-Daudé,
Michael S. Tsirkin, Stefano Stabellini, Paul Durrant,
Ilya Maximets
The networking subsystem may wish to use defer_call(), so move the code
to util/ where it can be reused.
As a reminder of what defer_call() does:
This API defers a function call within a defer_call_begin()/defer_call_end()
section, allowing multiple calls to batch up. This is a performance
optimization that is used in the block layer to submit several I/O requests
at once instead of individually:
defer_call_begin(); <-- start of section
...
defer_call(my_func, my_obj); <-- deferred my_func(my_obj) call
defer_call(my_func, my_obj); <-- another
defer_call(my_func, my_obj); <-- another
...
defer_call_end(); <-- end of section, my_func(my_obj) is called once
Suggested-by: Ilya Maximets <i.maximets@ovn.org>
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
---
MAINTAINERS | 3 ++-
include/qemu/defer-call.h | 15 +++++++++++++++
include/sysemu/block-backend-io.h | 4 ----
block/blkio.c | 1 +
block/io_uring.c | 1 +
block/linux-aio.c | 1 +
block/nvme.c | 1 +
hw/block/dataplane/xen-block.c | 1 +
hw/block/virtio-blk.c | 1 +
hw/scsi/virtio-scsi.c | 1 +
block/plug.c => util/defer-call.c | 2 +-
block/meson.build | 1 -
util/meson.build | 1 +
13 files changed, 26 insertions(+), 7 deletions(-)
create mode 100644 include/qemu/defer-call.h
rename block/plug.c => util/defer-call.c (99%)
diff --git a/MAINTAINERS b/MAINTAINERS
index 6111b6b4d9..7cd7132ffc 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -2676,12 +2676,13 @@ S: Supported
F: util/async.c
F: util/aio-*.c
F: util/aio-*.h
+F: util/defer-call.c
F: util/fdmon-*.c
F: block/io.c
-F: block/plug.c
F: migration/block*
F: include/block/aio.h
F: include/block/aio-wait.h
+F: include/qemu/defer-call.h
F: scripts/qemugdb/aio.py
F: tests/unit/test-fdmon-epoll.c
T: git https://github.com/stefanha/qemu.git block
diff --git a/include/qemu/defer-call.h b/include/qemu/defer-call.h
new file mode 100644
index 0000000000..291f86c987
--- /dev/null
+++ b/include/qemu/defer-call.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Deferred calls
+ *
+ * Copyright Red Hat.
+ */
+
+#ifndef QEMU_DEFER_CALL_H
+#define QEMU_DEFER_CALL_H
+
+void defer_call_begin(void);
+void defer_call_end(void);
+void defer_call(void (*fn)(void *), void *opaque);
+
+#endif /* QEMU_DEFER_CALL_H */
diff --git a/include/sysemu/block-backend-io.h b/include/sysemu/block-backend-io.h
index cfcfd85c1d..d174275a5c 100644
--- a/include/sysemu/block-backend-io.h
+++ b/include/sysemu/block-backend-io.h
@@ -100,10 +100,6 @@ void blk_iostatus_set_err(BlockBackend *blk, int error);
int blk_get_max_iov(BlockBackend *blk);
int blk_get_max_hw_iov(BlockBackend *blk);
-void defer_call_begin(void);
-void defer_call_end(void);
-void defer_call(void (*fn)(void *), void *opaque);
-
AioContext *blk_get_aio_context(BlockBackend *blk);
BlockAcctStats *blk_get_stats(BlockBackend *blk);
void *blk_aio_get(const AIOCBInfo *aiocb_info, BlockBackend *blk,
diff --git a/block/blkio.c b/block/blkio.c
index 7cf6d61f47..0a0a6c0f5f 100644
--- a/block/blkio.c
+++ b/block/blkio.c
@@ -13,6 +13,7 @@
#include "block/block_int.h"
#include "exec/memory.h"
#include "exec/cpu-common.h" /* for qemu_ram_get_fd() */
+#include "qemu/defer-call.h"
#include "qapi/error.h"
#include "qemu/error-report.h"
#include "qapi/qmp/qdict.h"
diff --git a/block/io_uring.c b/block/io_uring.c
index 8429f341be..3a1e1f45b3 100644
--- a/block/io_uring.c
+++ b/block/io_uring.c
@@ -15,6 +15,7 @@
#include "block/block.h"
#include "block/raw-aio.h"
#include "qemu/coroutine.h"
+#include "qemu/defer-call.h"
#include "qapi/error.h"
#include "sysemu/block-backend.h"
#include "trace.h"
diff --git a/block/linux-aio.c b/block/linux-aio.c
index 9a08219db0..62380593c8 100644
--- a/block/linux-aio.c
+++ b/block/linux-aio.c
@@ -14,6 +14,7 @@
#include "block/raw-aio.h"
#include "qemu/event_notifier.h"
#include "qemu/coroutine.h"
+#include "qemu/defer-call.h"
#include "qapi/error.h"
#include "sysemu/block-backend.h"
diff --git a/block/nvme.c b/block/nvme.c
index dfbd1085fd..96b3f8f2fa 100644
--- a/block/nvme.c
+++ b/block/nvme.c
@@ -16,6 +16,7 @@
#include "qapi/error.h"
#include "qapi/qmp/qdict.h"
#include "qapi/qmp/qstring.h"
+#include "qemu/defer-call.h"
#include "qemu/error-report.h"
#include "qemu/main-loop.h"
#include "qemu/module.h"
diff --git a/hw/block/dataplane/xen-block.c b/hw/block/dataplane/xen-block.c
index e9dd8f8a99..c4bb28c66f 100644
--- a/hw/block/dataplane/xen-block.c
+++ b/hw/block/dataplane/xen-block.c
@@ -19,6 +19,7 @@
*/
#include "qemu/osdep.h"
+#include "qemu/defer-call.h"
#include "qemu/error-report.h"
#include "qemu/main-loop.h"
#include "qemu/memalign.h"
diff --git a/hw/block/virtio-blk.c b/hw/block/virtio-blk.c
index 6a45033d15..a1f8e15522 100644
--- a/hw/block/virtio-blk.c
+++ b/hw/block/virtio-blk.c
@@ -12,6 +12,7 @@
*/
#include "qemu/osdep.h"
+#include "qemu/defer-call.h"
#include "qapi/error.h"
#include "qemu/iov.h"
#include "qemu/module.h"
diff --git a/hw/scsi/virtio-scsi.c b/hw/scsi/virtio-scsi.c
index c2465e3e88..83c154e74e 100644
--- a/hw/scsi/virtio-scsi.c
+++ b/hw/scsi/virtio-scsi.c
@@ -18,6 +18,7 @@
#include "standard-headers/linux/virtio_ids.h"
#include "hw/virtio/virtio-scsi.h"
#include "migration/qemu-file-types.h"
+#include "qemu/defer-call.h"
#include "qemu/error-report.h"
#include "qemu/iov.h"
#include "qemu/module.h"
diff --git a/block/plug.c b/util/defer-call.c
similarity index 99%
rename from block/plug.c
rename to util/defer-call.c
index f26173559c..037dc0abf0 100644
--- a/block/plug.c
+++ b/util/defer-call.c
@@ -22,7 +22,7 @@
#include "qemu/coroutine-tls.h"
#include "qemu/notify.h"
#include "qemu/thread.h"
-#include "sysemu/block-backend.h"
+#include "qemu/defer-call.h"
/* A function call that has been deferred until defer_call_end() */
typedef struct {
diff --git a/block/meson.build b/block/meson.build
index 529fc172c6..ba20e17acf 100644
--- a/block/meson.build
+++ b/block/meson.build
@@ -23,7 +23,6 @@ block_ss.add(files(
'mirror.c',
'nbd.c',
'null.c',
- 'plug.c',
'qapi.c',
'qcow2-bitmap.c',
'qcow2-cache.c',
diff --git a/util/meson.build b/util/meson.build
index a375160286..b9880357aa 100644
--- a/util/meson.build
+++ b/util/meson.build
@@ -29,6 +29,7 @@ util_ss.add(when: 'CONFIG_WIN32', if_true: pathcch)
if glib_has_gslice
util_ss.add(files('qtree.c'))
endif
+util_ss.add(files('defer-call.c'))
util_ss.add(files('envlist.c', 'path.c', 'module.c'))
util_ss.add(files('host-utils.c'))
util_ss.add(files('bitmap.c', 'bitops.c'))
--
2.41.0
^ permalink raw reply related [flat|nested] 12+ messages in thread
* Re: [PATCH v2 2/4] util/defer-call: move defer_call() to util/
2023-08-17 15:58 ` [PATCH v2 2/4] util/defer-call: move defer_call() to util/ Stefan Hajnoczi
@ 2023-08-18 8:31 ` Philippe Mathieu-Daudé
2023-09-13 19:39 ` Stefan Hajnoczi
0 siblings, 1 reply; 12+ messages in thread
From: Philippe Mathieu-Daudé @ 2023-08-18 8:31 UTC (permalink / raw)
To: Stefan Hajnoczi, qemu-devel
Cc: qemu-block, Fam Zheng, xen-devel, Anthony Perard, Hanna Reitz,
Kevin Wolf, Julia Suvorova, Stefano Garzarella, Paolo Bonzini,
Aarushi Mehta, Michael S. Tsirkin, Stefano Stabellini,
Paul Durrant, Ilya Maximets
Hi Stefan,
On 17/8/23 17:58, Stefan Hajnoczi wrote:
> The networking subsystem may wish to use defer_call(), so move the code
> to util/ where it can be reused.
>
> As a reminder of what defer_call() does:
>
> This API defers a function call within a defer_call_begin()/defer_call_end()
> section, allowing multiple calls to batch up. This is a performance
> optimization that is used in the block layer to submit several I/O requests
> at once instead of individually:
>
> defer_call_begin(); <-- start of section
> ...
> defer_call(my_func, my_obj); <-- deferred my_func(my_obj) call
> defer_call(my_func, my_obj); <-- another
> defer_call(my_func, my_obj); <-- another
> ...
> defer_call_end(); <-- end of section, my_func(my_obj) is called once
>
> Suggested-by: Ilya Maximets <i.maximets@ovn.org>
> Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
> ---
> MAINTAINERS | 3 ++-
> include/qemu/defer-call.h | 15 +++++++++++++++
> include/sysemu/block-backend-io.h | 4 ----
> block/blkio.c | 1 +
> block/io_uring.c | 1 +
> block/linux-aio.c | 1 +
> block/nvme.c | 1 +
> hw/block/dataplane/xen-block.c | 1 +
> hw/block/virtio-blk.c | 1 +
> hw/scsi/virtio-scsi.c | 1 +
> block/plug.c => util/defer-call.c | 2 +-
> block/meson.build | 1 -
> util/meson.build | 1 +
> 13 files changed, 26 insertions(+), 7 deletions(-)
> create mode 100644 include/qemu/defer-call.h
> rename block/plug.c => util/defer-call.c (99%)
>
> diff --git a/MAINTAINERS b/MAINTAINERS
> index 6111b6b4d9..7cd7132ffc 100644
> --- a/MAINTAINERS
> +++ b/MAINTAINERS
> @@ -2676,12 +2676,13 @@ S: Supported
> F: util/async.c
> F: util/aio-*.c
> F: util/aio-*.h
> +F: util/defer-call.c
If used by network/other backends, maybe worth adding a
brand new section instead, rather than "Block I/O path".
> F: util/fdmon-*.c
> F: block/io.c
> -F: block/plug.c
> F: migration/block*
> F: include/block/aio.h
> F: include/block/aio-wait.h
> +F: include/qemu/defer-call.h
> F: scripts/qemugdb/aio.py
> F: tests/unit/test-fdmon-epoll.c
> T: git https://github.com/stefanha/qemu.git block
> diff --git a/include/qemu/defer-call.h b/include/qemu/defer-call.h
> new file mode 100644
> index 0000000000..291f86c987
> --- /dev/null
> +++ b/include/qemu/defer-call.h
> @@ -0,0 +1,15 @@
> +/* SPDX-License-Identifier: GPL-2.0-or-later */
> +/*
> + * Deferred calls
> + *
> + * Copyright Red Hat.
> + */
> +
> +#ifndef QEMU_DEFER_CALL_H
> +#define QEMU_DEFER_CALL_H
> +
Please add smth like:
/* See documentation in util/defer-call.c */
> +void defer_call_begin(void);
> +void defer_call_end(void);
> +void defer_call(void (*fn)(void *), void *opaque);
> +
> +#endif /* QEMU_DEFER_CALL_H */
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
^ permalink raw reply [flat|nested] 12+ messages in thread
* Re: [PATCH v2 2/4] util/defer-call: move defer_call() to util/
2023-08-18 8:31 ` Philippe Mathieu-Daudé
@ 2023-09-13 19:39 ` Stefan Hajnoczi
0 siblings, 0 replies; 12+ messages in thread
From: Stefan Hajnoczi @ 2023-09-13 19:39 UTC (permalink / raw)
To: Philippe Mathieu-Daudé
Cc: qemu-devel, qemu-block, Fam Zheng, xen-devel, Anthony Perard,
Hanna Reitz, Kevin Wolf, Julia Suvorova, Stefano Garzarella,
Paolo Bonzini, Aarushi Mehta, Michael S. Tsirkin,
Stefano Stabellini, Paul Durrant, Ilya Maximets
[-- Attachment #1: Type: text/plain, Size: 3705 bytes --]
On Fri, Aug 18, 2023 at 10:31:40AM +0200, Philippe Mathieu-Daudé wrote:
> Hi Stefan,
>
> On 17/8/23 17:58, Stefan Hajnoczi wrote:
> > The networking subsystem may wish to use defer_call(), so move the code
> > to util/ where it can be reused.
> >
> > As a reminder of what defer_call() does:
> >
> > This API defers a function call within a defer_call_begin()/defer_call_end()
> > section, allowing multiple calls to batch up. This is a performance
> > optimization that is used in the block layer to submit several I/O requests
> > at once instead of individually:
> >
> > defer_call_begin(); <-- start of section
> > ...
> > defer_call(my_func, my_obj); <-- deferred my_func(my_obj) call
> > defer_call(my_func, my_obj); <-- another
> > defer_call(my_func, my_obj); <-- another
> > ...
> > defer_call_end(); <-- end of section, my_func(my_obj) is called once
> >
> > Suggested-by: Ilya Maximets <i.maximets@ovn.org>
> > Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
> > ---
> > MAINTAINERS | 3 ++-
> > include/qemu/defer-call.h | 15 +++++++++++++++
> > include/sysemu/block-backend-io.h | 4 ----
> > block/blkio.c | 1 +
> > block/io_uring.c | 1 +
> > block/linux-aio.c | 1 +
> > block/nvme.c | 1 +
> > hw/block/dataplane/xen-block.c | 1 +
> > hw/block/virtio-blk.c | 1 +
> > hw/scsi/virtio-scsi.c | 1 +
> > block/plug.c => util/defer-call.c | 2 +-
> > block/meson.build | 1 -
> > util/meson.build | 1 +
> > 13 files changed, 26 insertions(+), 7 deletions(-)
> > create mode 100644 include/qemu/defer-call.h
> > rename block/plug.c => util/defer-call.c (99%)
> >
> > diff --git a/MAINTAINERS b/MAINTAINERS
> > index 6111b6b4d9..7cd7132ffc 100644
> > --- a/MAINTAINERS
> > +++ b/MAINTAINERS
> > @@ -2676,12 +2676,13 @@ S: Supported
> > F: util/async.c
> > F: util/aio-*.c
> > F: util/aio-*.h
> > +F: util/defer-call.c
>
> If used by network/other backends, maybe worth adding a
> brand new section instead, rather than "Block I/O path".
Changes to defer-call.c will go through my block tree. We don't split
out the event loop (async.c, aio-*.c, etc) either even though it's
shared by other subsystems. The important thing is that
scripts/get_maintainer.pl identifies the maintainers.
I'd rather not create lots of micro-subsystems in MAINTAINERS that
duplicate my email and block git repo URL.
>
> > F: util/fdmon-*.c
> > F: block/io.c
> > -F: block/plug.c
> > F: migration/block*
> > F: include/block/aio.h
> > F: include/block/aio-wait.h
> > +F: include/qemu/defer-call.h
> > F: scripts/qemugdb/aio.py
> > F: tests/unit/test-fdmon-epoll.c
> > T: git https://github.com/stefanha/qemu.git block
> > diff --git a/include/qemu/defer-call.h b/include/qemu/defer-call.h
> > new file mode 100644
> > index 0000000000..291f86c987
> > --- /dev/null
> > +++ b/include/qemu/defer-call.h
> > @@ -0,0 +1,15 @@
> > +/* SPDX-License-Identifier: GPL-2.0-or-later */
> > +/*
> > + * Deferred calls
> > + *
> > + * Copyright Red Hat.
> > + */
> > +
> > +#ifndef QEMU_DEFER_CALL_H
> > +#define QEMU_DEFER_CALL_H
> > +
>
> Please add smth like:
>
> /* See documentation in util/defer-call.c */
Sure, will fix.
>
> > +void defer_call_begin(void);
> > +void defer_call_end(void);
> > +void defer_call(void (*fn)(void *), void *opaque);
> > +
> > +#endif /* QEMU_DEFER_CALL_H */
>
> Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
>
[-- Attachment #2: signature.asc --]
[-- Type: application/pgp-signature, Size: 488 bytes --]
^ permalink raw reply [flat|nested] 12+ messages in thread
* [PATCH v2 3/4] virtio: use defer_call() in virtio_irqfd_notify()
2023-08-17 15:58 [PATCH v2 0/4] virtio-blk: use blk_io_plug_call() instead of notification BH Stefan Hajnoczi
2023-08-17 15:58 ` [PATCH v2 1/4] block: rename blk_io_plug_call() API to defer_call() Stefan Hajnoczi
2023-08-17 15:58 ` [PATCH v2 2/4] util/defer-call: move defer_call() to util/ Stefan Hajnoczi
@ 2023-08-17 15:58 ` Stefan Hajnoczi
2023-08-18 15:17 ` Eric Blake
2023-08-21 11:02 ` Ilya Maximets
2023-08-17 15:58 ` [PATCH v2 4/4] virtio-blk: remove batch notification BH Stefan Hajnoczi
3 siblings, 2 replies; 12+ messages in thread
From: Stefan Hajnoczi @ 2023-08-17 15:58 UTC (permalink / raw)
To: qemu-devel
Cc: qemu-block, Stefan Hajnoczi, Fam Zheng, xen-devel, Anthony Perard,
Hanna Reitz, Kevin Wolf, Julia Suvorova, Stefano Garzarella,
Paolo Bonzini, Aarushi Mehta, Philippe Mathieu-Daudé,
Michael S. Tsirkin, Stefano Stabellini, Paul Durrant,
Ilya Maximets
virtio-blk and virtio-scsi invoke virtio_irqfd_notify() to send Used
Buffer Notifications from an IOThread. This involves an eventfd
write(2) syscall. Calling this repeatedly when completing multiple I/O
requests in a row is wasteful.
Use the defer_call() API to batch together virtio_irqfd_notify() calls
made during thread pool (aio=threads), Linux AIO (aio=native), and
io_uring (aio=io_uring) completion processing.
Behavior is unchanged for emulated devices that do not use
defer_call_begin()/defer_call_end() since defer_call() immediately
invokes the callback when called outside a
defer_call_begin()/defer_call_end() region.
fio rw=randread bs=4k iodepth=64 numjobs=8 IOPS increases by ~9% with a
single IOThread and 8 vCPUs. iodepth=1 decreases by ~1% but this could
be noise. Detailed performance data and configuration specifics are
available here:
https://gitlab.com/stefanha/virt-playbooks/-/tree/blk_io_plug-irqfd
This duplicates the BH that virtio-blk uses for batching. The next
commit will remove it.
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
---
block/io_uring.c | 6 ++++++
block/linux-aio.c | 4 ++++
hw/virtio/virtio.c | 11 ++++++++++-
util/thread-pool.c | 5 +++++
4 files changed, 25 insertions(+), 1 deletion(-)
diff --git a/block/io_uring.c b/block/io_uring.c
index 3a1e1f45b3..7cdd00e9f1 100644
--- a/block/io_uring.c
+++ b/block/io_uring.c
@@ -125,6 +125,9 @@ static void luring_process_completions(LuringState *s)
{
struct io_uring_cqe *cqes;
int total_bytes;
+
+ defer_call_begin();
+
/*
* Request completion callbacks can run the nested event loop.
* Schedule ourselves so the nested event loop will "see" remaining
@@ -217,7 +220,10 @@ end:
aio_co_wake(luringcb->co);
}
}
+
qemu_bh_cancel(s->completion_bh);
+
+ defer_call_end();
}
static int ioq_submit(LuringState *s)
diff --git a/block/linux-aio.c b/block/linux-aio.c
index 62380593c8..ab607ade6a 100644
--- a/block/linux-aio.c
+++ b/block/linux-aio.c
@@ -205,6 +205,8 @@ static void qemu_laio_process_completions(LinuxAioState *s)
{
struct io_event *events;
+ defer_call_begin();
+
/* Reschedule so nested event loops see currently pending completions */
qemu_bh_schedule(s->completion_bh);
@@ -231,6 +233,8 @@ static void qemu_laio_process_completions(LinuxAioState *s)
* own `for` loop. If we are the last all counters droped to zero. */
s->event_max = 0;
s->event_idx = 0;
+
+ defer_call_end();
}
static void qemu_laio_process_completions_and_submit(LinuxAioState *s)
diff --git a/hw/virtio/virtio.c b/hw/virtio/virtio.c
index 309038fd46..5eb1f91b41 100644
--- a/hw/virtio/virtio.c
+++ b/hw/virtio/virtio.c
@@ -15,6 +15,7 @@
#include "qapi/error.h"
#include "qapi/qapi-commands-virtio.h"
#include "trace.h"
+#include "qemu/defer-call.h"
#include "qemu/error-report.h"
#include "qemu/log.h"
#include "qemu/main-loop.h"
@@ -28,6 +29,7 @@
#include "hw/virtio/virtio-bus.h"
#include "hw/qdev-properties.h"
#include "hw/virtio/virtio-access.h"
+#include "sysemu/block-backend.h"
#include "sysemu/dma.h"
#include "sysemu/runstate.h"
#include "virtio-qmp.h"
@@ -2426,6 +2428,13 @@ static bool virtio_should_notify(VirtIODevice *vdev, VirtQueue *vq)
}
}
+/* Batch irqs while inside a defer_call_begin()/defer_call_end() section */
+static void virtio_notify_irqfd_deferred_fn(void *opaque)
+{
+ EventNotifier *notifier = opaque;
+ event_notifier_set(notifier);
+}
+
void virtio_notify_irqfd(VirtIODevice *vdev, VirtQueue *vq)
{
WITH_RCU_READ_LOCK_GUARD() {
@@ -2452,7 +2461,7 @@ void virtio_notify_irqfd(VirtIODevice *vdev, VirtQueue *vq)
* to an atomic operation.
*/
virtio_set_isr(vq->vdev, 0x1);
- event_notifier_set(&vq->guest_notifier);
+ defer_call(virtio_notify_irqfd_deferred_fn, &vq->guest_notifier);
}
static void virtio_irq(VirtQueue *vq)
diff --git a/util/thread-pool.c b/util/thread-pool.c
index e3d8292d14..d84961779a 100644
--- a/util/thread-pool.c
+++ b/util/thread-pool.c
@@ -15,6 +15,7 @@
* GNU GPL, version 2 or (at your option) any later version.
*/
#include "qemu/osdep.h"
+#include "qemu/defer-call.h"
#include "qemu/queue.h"
#include "qemu/thread.h"
#include "qemu/coroutine.h"
@@ -175,6 +176,8 @@ static void thread_pool_completion_bh(void *opaque)
ThreadPool *pool = opaque;
ThreadPoolElement *elem, *next;
+ defer_call_begin(); /* cb() may use defer_call() to coalesce work */
+
restart:
QLIST_FOREACH_SAFE(elem, &pool->head, all, next) {
if (elem->state != THREAD_DONE) {
@@ -208,6 +211,8 @@ restart:
qemu_aio_unref(elem);
}
}
+
+ defer_call_end();
}
static void thread_pool_cancel(BlockAIOCB *acb)
--
2.41.0
^ permalink raw reply related [flat|nested] 12+ messages in thread
* Re: [PATCH v2 3/4] virtio: use defer_call() in virtio_irqfd_notify()
2023-08-17 15:58 ` [PATCH v2 3/4] virtio: use defer_call() in virtio_irqfd_notify() Stefan Hajnoczi
@ 2023-08-18 15:17 ` Eric Blake
2023-08-21 11:02 ` Ilya Maximets
1 sibling, 0 replies; 12+ messages in thread
From: Eric Blake @ 2023-08-18 15:17 UTC (permalink / raw)
To: Stefan Hajnoczi
Cc: qemu-devel, qemu-block, Fam Zheng, xen-devel, Anthony Perard,
Hanna Reitz, Kevin Wolf, Julia Suvorova, Stefano Garzarella,
Paolo Bonzini, Aarushi Mehta, Philippe Mathieu-Daudé,
Michael S. Tsirkin, Stefano Stabellini, Paul Durrant,
Ilya Maximets
On Thu, Aug 17, 2023 at 11:58:46AM -0400, Stefan Hajnoczi wrote:
> virtio-blk and virtio-scsi invoke virtio_irqfd_notify() to send Used
> Buffer Notifications from an IOThread. This involves an eventfd
> write(2) syscall. Calling this repeatedly when completing multiple I/O
> requests in a row is wasteful.
>
> Use the defer_call() API to batch together virtio_irqfd_notify() calls
> made during thread pool (aio=threads), Linux AIO (aio=native), and
> io_uring (aio=io_uring) completion processing.
>
> Behavior is unchanged for emulated devices that do not use
> defer_call_begin()/defer_call_end() since defer_call() immediately
> invokes the callback when called outside a
> defer_call_begin()/defer_call_end() region.
>
> fio rw=randread bs=4k iodepth=64 numjobs=8 IOPS increases by ~9% with a
> single IOThread and 8 vCPUs. iodepth=1 decreases by ~1% but this could
> be noise. Detailed performance data and configuration specifics are
> available here:
> https://gitlab.com/stefanha/virt-playbooks/-/tree/blk_io_plug-irqfd
>
> This duplicates the BH that virtio-blk uses for batching. The next
> commit will remove it.
>
> Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
> ---
> block/io_uring.c | 6 ++++++
> block/linux-aio.c | 4 ++++
> hw/virtio/virtio.c | 11 ++++++++++-
> util/thread-pool.c | 5 +++++
> 4 files changed, 25 insertions(+), 1 deletion(-)
Reviewed-by: Eric Blake <eblake@redhat.com>
--
Eric Blake, Principal Software Engineer
Red Hat, Inc.
Virtualization: qemu.org | libguestfs.org
^ permalink raw reply [flat|nested] 12+ messages in thread
* Re: [PATCH v2 3/4] virtio: use defer_call() in virtio_irqfd_notify()
2023-08-17 15:58 ` [PATCH v2 3/4] virtio: use defer_call() in virtio_irqfd_notify() Stefan Hajnoczi
2023-08-18 15:17 ` Eric Blake
@ 2023-08-21 11:02 ` Ilya Maximets
1 sibling, 0 replies; 12+ messages in thread
From: Ilya Maximets @ 2023-08-21 11:02 UTC (permalink / raw)
To: Stefan Hajnoczi, qemu-devel
Cc: i.maximets, qemu-block, Fam Zheng, xen-devel, Anthony Perard,
Hanna Reitz, Kevin Wolf, Julia Suvorova, Stefano Garzarella,
Paolo Bonzini, Aarushi Mehta, Philippe Mathieu-Daudé,
Michael S. Tsirkin, Stefano Stabellini, Paul Durrant
On 8/17/23 17:58, Stefan Hajnoczi wrote:
> virtio-blk and virtio-scsi invoke virtio_irqfd_notify() to send Used
> Buffer Notifications from an IOThread. This involves an eventfd
> write(2) syscall. Calling this repeatedly when completing multiple I/O
> requests in a row is wasteful.
>
> Use the defer_call() API to batch together virtio_irqfd_notify() calls
> made during thread pool (aio=threads), Linux AIO (aio=native), and
> io_uring (aio=io_uring) completion processing.
>
> Behavior is unchanged for emulated devices that do not use
> defer_call_begin()/defer_call_end() since defer_call() immediately
> invokes the callback when called outside a
> defer_call_begin()/defer_call_end() region.
>
> fio rw=randread bs=4k iodepth=64 numjobs=8 IOPS increases by ~9% with a
> single IOThread and 8 vCPUs. iodepth=1 decreases by ~1% but this could
> be noise. Detailed performance data and configuration specifics are
> available here:
> https://gitlab.com/stefanha/virt-playbooks/-/tree/blk_io_plug-irqfd
>
> This duplicates the BH that virtio-blk uses for batching. The next
> commit will remove it.
>
> Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
> ---
> block/io_uring.c | 6 ++++++
> block/linux-aio.c | 4 ++++
> hw/virtio/virtio.c | 11 ++++++++++-
> util/thread-pool.c | 5 +++++
> 4 files changed, 25 insertions(+), 1 deletion(-)
>
> diff --git a/block/io_uring.c b/block/io_uring.c
> index 3a1e1f45b3..7cdd00e9f1 100644
> --- a/block/io_uring.c
> +++ b/block/io_uring.c
> @@ -125,6 +125,9 @@ static void luring_process_completions(LuringState *s)
> {
> struct io_uring_cqe *cqes;
> int total_bytes;
> +
> + defer_call_begin();
> +
> /*
> * Request completion callbacks can run the nested event loop.
> * Schedule ourselves so the nested event loop will "see" remaining
> @@ -217,7 +220,10 @@ end:
> aio_co_wake(luringcb->co);
> }
> }
> +
> qemu_bh_cancel(s->completion_bh);
> +
> + defer_call_end();
> }
>
> static int ioq_submit(LuringState *s)
> diff --git a/block/linux-aio.c b/block/linux-aio.c
> index 62380593c8..ab607ade6a 100644
> --- a/block/linux-aio.c
> +++ b/block/linux-aio.c
> @@ -205,6 +205,8 @@ static void qemu_laio_process_completions(LinuxAioState *s)
> {
> struct io_event *events;
>
> + defer_call_begin();
> +
> /* Reschedule so nested event loops see currently pending completions */
> qemu_bh_schedule(s->completion_bh);
>
> @@ -231,6 +233,8 @@ static void qemu_laio_process_completions(LinuxAioState *s)
> * own `for` loop. If we are the last all counters droped to zero. */
> s->event_max = 0;
> s->event_idx = 0;
> +
> + defer_call_end();
> }
>
> static void qemu_laio_process_completions_and_submit(LinuxAioState *s)
> diff --git a/hw/virtio/virtio.c b/hw/virtio/virtio.c
> index 309038fd46..5eb1f91b41 100644
> --- a/hw/virtio/virtio.c
> +++ b/hw/virtio/virtio.c
> @@ -15,6 +15,7 @@
> #include "qapi/error.h"
> #include "qapi/qapi-commands-virtio.h"
> #include "trace.h"
> +#include "qemu/defer-call.h"
> #include "qemu/error-report.h"
> #include "qemu/log.h"
> #include "qemu/main-loop.h"
> @@ -28,6 +29,7 @@
> #include "hw/virtio/virtio-bus.h"
> #include "hw/qdev-properties.h"
> #include "hw/virtio/virtio-access.h"
> +#include "sysemu/block-backend.h"
An artifact from the previous version.
> #include "sysemu/dma.h"
> #include "sysemu/runstate.h"
> #include "virtio-qmp.h"
> @@ -2426,6 +2428,13 @@ static bool virtio_should_notify(VirtIODevice *vdev, VirtQueue *vq)
> }
> }
>
> +/* Batch irqs while inside a defer_call_begin()/defer_call_end() section */
> +static void virtio_notify_irqfd_deferred_fn(void *opaque)
> +{
> + EventNotifier *notifier = opaque;
> + event_notifier_set(notifier);
> +}
> +
> void virtio_notify_irqfd(VirtIODevice *vdev, VirtQueue *vq)
> {
> WITH_RCU_READ_LOCK_GUARD() {
> @@ -2452,7 +2461,7 @@ void virtio_notify_irqfd(VirtIODevice *vdev, VirtQueue *vq)
> * to an atomic operation.
> */
> virtio_set_isr(vq->vdev, 0x1);
> - event_notifier_set(&vq->guest_notifier);
> + defer_call(virtio_notify_irqfd_deferred_fn, &vq->guest_notifier);
Should we move the trace from this function to deferred one?
Or maybe add a new trace?
> }
>
> static void virtio_irq(VirtQueue *vq)
> diff --git a/util/thread-pool.c b/util/thread-pool.c
> index e3d8292d14..d84961779a 100644
> --- a/util/thread-pool.c
> +++ b/util/thread-pool.c
> @@ -15,6 +15,7 @@
> * GNU GPL, version 2 or (at your option) any later version.
> */
> #include "qemu/osdep.h"
> +#include "qemu/defer-call.h"
> #include "qemu/queue.h"
> #include "qemu/thread.h"
> #include "qemu/coroutine.h"
> @@ -175,6 +176,8 @@ static void thread_pool_completion_bh(void *opaque)
> ThreadPool *pool = opaque;
> ThreadPoolElement *elem, *next;
>
> + defer_call_begin(); /* cb() may use defer_call() to coalesce work */
> +
> restart:
> QLIST_FOREACH_SAFE(elem, &pool->head, all, next) {
> if (elem->state != THREAD_DONE) {
> @@ -208,6 +211,8 @@ restart:
> qemu_aio_unref(elem);
> }
> }
> +
> + defer_call_end();
> }
>
> static void thread_pool_cancel(BlockAIOCB *acb)
^ permalink raw reply [flat|nested] 12+ messages in thread
* [PATCH v2 4/4] virtio-blk: remove batch notification BH
2023-08-17 15:58 [PATCH v2 0/4] virtio-blk: use blk_io_plug_call() instead of notification BH Stefan Hajnoczi
` (2 preceding siblings ...)
2023-08-17 15:58 ` [PATCH v2 3/4] virtio: use defer_call() in virtio_irqfd_notify() Stefan Hajnoczi
@ 2023-08-17 15:58 ` Stefan Hajnoczi
2023-08-18 15:19 ` Eric Blake
3 siblings, 1 reply; 12+ messages in thread
From: Stefan Hajnoczi @ 2023-08-17 15:58 UTC (permalink / raw)
To: qemu-devel
Cc: qemu-block, Stefan Hajnoczi, Fam Zheng, xen-devel, Anthony Perard,
Hanna Reitz, Kevin Wolf, Julia Suvorova, Stefano Garzarella,
Paolo Bonzini, Aarushi Mehta, Philippe Mathieu-Daudé,
Michael S. Tsirkin, Stefano Stabellini, Paul Durrant,
Ilya Maximets
There is a batching mechanism for virtio-blk Used Buffer Notifications
that is no longer needed because the previous commit added batching to
virtio_notify_irqfd().
Note that this mechanism was rarely used in practice because it is only
enabled when EVENT_IDX is not negotiated by the driver. Modern drivers
enable EVENT_IDX.
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
---
hw/block/dataplane/virtio-blk.c | 48 +--------------------------------
1 file changed, 1 insertion(+), 47 deletions(-)
diff --git a/hw/block/dataplane/virtio-blk.c b/hw/block/dataplane/virtio-blk.c
index da36fcfd0b..f83bb0f116 100644
--- a/hw/block/dataplane/virtio-blk.c
+++ b/hw/block/dataplane/virtio-blk.c
@@ -31,9 +31,6 @@ struct VirtIOBlockDataPlane {
VirtIOBlkConf *conf;
VirtIODevice *vdev;
- QEMUBH *bh; /* bh for guest notification */
- unsigned long *batch_notify_vqs;
- bool batch_notifications;
/* Note that these EventNotifiers are assigned by value. This is
* fine as long as you do not call event_notifier_cleanup on them
@@ -47,36 +44,7 @@ struct VirtIOBlockDataPlane {
/* Raise an interrupt to signal guest, if necessary */
void virtio_blk_data_plane_notify(VirtIOBlockDataPlane *s, VirtQueue *vq)
{
- if (s->batch_notifications) {
- set_bit(virtio_get_queue_index(vq), s->batch_notify_vqs);
- qemu_bh_schedule(s->bh);
- } else {
- virtio_notify_irqfd(s->vdev, vq);
- }
-}
-
-static void notify_guest_bh(void *opaque)
-{
- VirtIOBlockDataPlane *s = opaque;
- unsigned nvqs = s->conf->num_queues;
- unsigned long bitmap[BITS_TO_LONGS(nvqs)];
- unsigned j;
-
- memcpy(bitmap, s->batch_notify_vqs, sizeof(bitmap));
- memset(s->batch_notify_vqs, 0, sizeof(bitmap));
-
- for (j = 0; j < nvqs; j += BITS_PER_LONG) {
- unsigned long bits = bitmap[j / BITS_PER_LONG];
-
- while (bits != 0) {
- unsigned i = j + ctzl(bits);
- VirtQueue *vq = virtio_get_queue(s->vdev, i);
-
- virtio_notify_irqfd(s->vdev, vq);
-
- bits &= bits - 1; /* clear right-most bit */
- }
- }
+ virtio_notify_irqfd(s->vdev, vq);
}
/* Context: QEMU global mutex held */
@@ -126,9 +94,6 @@ bool virtio_blk_data_plane_create(VirtIODevice *vdev, VirtIOBlkConf *conf,
} else {
s->ctx = qemu_get_aio_context();
}
- s->bh = aio_bh_new_guarded(s->ctx, notify_guest_bh, s,
- &DEVICE(vdev)->mem_reentrancy_guard);
- s->batch_notify_vqs = bitmap_new(conf->num_queues);
*dataplane = s;
@@ -146,8 +111,6 @@ void virtio_blk_data_plane_destroy(VirtIOBlockDataPlane *s)
vblk = VIRTIO_BLK(s->vdev);
assert(!vblk->dataplane_started);
- g_free(s->batch_notify_vqs);
- qemu_bh_delete(s->bh);
if (s->iothread) {
object_unref(OBJECT(s->iothread));
}
@@ -173,12 +136,6 @@ int virtio_blk_data_plane_start(VirtIODevice *vdev)
s->starting = true;
- if (!virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX)) {
- s->batch_notifications = true;
- } else {
- s->batch_notifications = false;
- }
-
/* Set up guest notifier (irq) */
r = k->set_guest_notifiers(qbus->parent, nvqs, true);
if (r != 0) {
@@ -370,9 +327,6 @@ void virtio_blk_data_plane_stop(VirtIODevice *vdev)
aio_context_release(s->ctx);
- qemu_bh_cancel(s->bh);
- notify_guest_bh(s); /* final chance to notify guest */
-
/* Clean up guest notifier (irq) */
k->set_guest_notifiers(qbus->parent, nvqs, false);
--
2.41.0
^ permalink raw reply related [flat|nested] 12+ messages in thread
* Re: [PATCH v2 4/4] virtio-blk: remove batch notification BH
2023-08-17 15:58 ` [PATCH v2 4/4] virtio-blk: remove batch notification BH Stefan Hajnoczi
@ 2023-08-18 15:19 ` Eric Blake
0 siblings, 0 replies; 12+ messages in thread
From: Eric Blake @ 2023-08-18 15:19 UTC (permalink / raw)
To: Stefan Hajnoczi
Cc: qemu-devel, qemu-block, Fam Zheng, xen-devel, Anthony Perard,
Hanna Reitz, Kevin Wolf, Julia Suvorova, Stefano Garzarella,
Paolo Bonzini, Aarushi Mehta, Philippe Mathieu-Daudé,
Michael S. Tsirkin, Stefano Stabellini, Paul Durrant,
Ilya Maximets
On Thu, Aug 17, 2023 at 11:58:47AM -0400, Stefan Hajnoczi wrote:
> There is a batching mechanism for virtio-blk Used Buffer Notifications
> that is no longer needed because the previous commit added batching to
> virtio_notify_irqfd().
>
> Note that this mechanism was rarely used in practice because it is only
> enabled when EVENT_IDX is not negotiated by the driver. Modern drivers
> enable EVENT_IDX.
>
> Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
> ---
> hw/block/dataplane/virtio-blk.c | 48 +--------------------------------
> 1 file changed, 1 insertion(+), 47 deletions(-)
>
Reviewed-by: Eric Blake <eblake@redhat.com>
--
Eric Blake, Principal Software Engineer
Red Hat, Inc.
Virtualization: qemu.org | libguestfs.org
^ permalink raw reply [flat|nested] 12+ messages in thread