* [PATCH 1/3] util & iothread: Introduce event-loop abstract class
2022-02-21 17:08 [PATCH 0/3] util/thread-pool: Expose minimun and maximum size Nicolas Saenz Julienne
@ 2022-02-21 17:08 ` Nicolas Saenz Julienne
2022-02-24 9:48 ` Stefan Hajnoczi
2022-02-21 17:08 ` [PATCH 2/3] util/main-loop: Introduce the main loop into QOM Nicolas Saenz Julienne
2022-02-21 17:08 ` [PATCH 3/3] util/event-loop: Introduce options to set the thread pool size Nicolas Saenz Julienne
2 siblings, 1 reply; 13+ messages in thread
From: Nicolas Saenz Julienne @ 2022-02-21 17:08 UTC (permalink / raw)
To: kwolf, stefanha, berrange
Cc: fam, eduardo, qemu-block, michael.roth, mtosatti, qemu-devel,
armbru, hreitz, pbonzini, Nicolas Saenz Julienne, eblake
Introduce the 'event-loop-backend' abstract class, it'll hold the
properties common to all event loops and provide the necessary hooks for
their creation. Then have 'IOThread' inherit from it. The class is
defined as user creatable and provides a hook for its children to attach
themselves to the user creatable class 'complete' function.
The new 'event-loop-backend' class will live in the util directory, and
will be packed into the qom static library.
No functional changes intended.
Signed-off-by: Nicolas Saenz Julienne <nsaenzju@redhat.com>
---
MAINTAINERS | 1 +
include/sysemu/iothread.h | 11 +--
iothread.c | 171 ++++----------------------------------
qom/meson.build | 1 +
util/event-loop.c | 142 +++++++++++++++++++++++++++++++
util/event-loop.h | 40 +++++++++
6 files changed, 204 insertions(+), 162 deletions(-)
create mode 100644 util/event-loop.c
create mode 100644 util/event-loop.h
diff --git a/MAINTAINERS b/MAINTAINERS
index 4b3ae2ab08..e5ffbea449 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -2670,6 +2670,7 @@ F: include/sysemu/runstate.h
F: include/sysemu/runstate-action.h
F: util/main-loop.c
F: util/qemu-timer.c
+F: util/event-loop*
F: softmmu/vl.c
F: softmmu/main.c
F: softmmu/cpus.c
diff --git a/include/sysemu/iothread.h b/include/sysemu/iothread.h
index 7f714bd136..aafef546b5 100644
--- a/include/sysemu/iothread.h
+++ b/include/sysemu/iothread.h
@@ -17,11 +17,12 @@
#include "block/aio.h"
#include "qemu/thread.h"
#include "qom/object.h"
+#include "util/event-loop.h"
#define TYPE_IOTHREAD "iothread"
struct IOThread {
- Object parent_obj;
+ EventLoopBackend parent_obj;
QemuThread thread;
AioContext *ctx;
@@ -32,14 +33,6 @@ struct IOThread {
bool stopping; /* has iothread_stop() been called? */
bool running; /* should iothread_run() continue? */
int thread_id;
-
- /* AioContext poll parameters */
- int64_t poll_max_ns;
- int64_t poll_grow;
- int64_t poll_shrink;
-
- /* AioContext AIO engine parameters */
- int64_t aio_max_batch;
};
typedef struct IOThread IOThread;
diff --git a/iothread.c b/iothread.c
index 0f98af0f2a..fca5ee8f4c 100644
--- a/iothread.c
+++ b/iothread.c
@@ -23,22 +23,13 @@
#include "qemu/error-report.h"
#include "qemu/rcu.h"
#include "qemu/main-loop.h"
+#include "util/event-loop.h"
typedef ObjectClass IOThreadClass;
DECLARE_CLASS_CHECKERS(IOThreadClass, IOTHREAD,
TYPE_IOTHREAD)
-#ifdef CONFIG_POSIX
-/* Benchmark results from 2016 on NVMe SSD drives show max polling times around
- * 16-32 microseconds yield IOPS improvements for both iodepth=1 and iodepth=32
- * workloads.
- */
-#define IOTHREAD_POLL_MAX_NS_DEFAULT 32768ULL
-#else
-#define IOTHREAD_POLL_MAX_NS_DEFAULT 0ULL
-#endif
-
static void *iothread_run(void *opaque)
{
IOThread *iothread = opaque;
@@ -105,7 +96,6 @@ static void iothread_instance_init(Object *obj)
{
IOThread *iothread = IOTHREAD(obj);
- iothread->poll_max_ns = IOTHREAD_POLL_MAX_NS_DEFAULT;
iothread->thread_id = -1;
qemu_sem_init(&iothread->init_done_sem, 0);
/* By default, we don't run gcontext */
@@ -152,28 +142,24 @@ static void iothread_init_gcontext(IOThread *iothread)
iothread->main_loop = g_main_loop_new(iothread->worker_context, TRUE);
}
-static void iothread_set_aio_context_params(IOThread *iothread, Error **errp)
+static void iothread_set_aio_context_params(EventLoopBackend *bc, Error **errp)
{
+ IOThread *iothread = IOTHREAD(bc);
ERRP_GUARD();
- aio_context_set_poll_params(iothread->ctx,
- iothread->poll_max_ns,
- iothread->poll_grow,
- iothread->poll_shrink,
- errp);
+ aio_context_set_poll_params(iothread->ctx, bc->poll_max_ns, bc->poll_grow,
+ bc->poll_shrink, errp);
if (*errp) {
return;
}
- aio_context_set_aio_params(iothread->ctx,
- iothread->aio_max_batch,
- errp);
+ aio_context_set_aio_params(iothread->ctx, bc->aio_max_batch, errp);
}
-static void iothread_complete(UserCreatable *obj, Error **errp)
+static void iothread_init(EventLoopBackend *bc, Error **errp)
{
Error *local_error = NULL;
- IOThread *iothread = IOTHREAD(obj);
+ IOThread *iothread = IOTHREAD(bc);
char *thread_name;
iothread->stopping = false;
@@ -189,7 +175,7 @@ static void iothread_complete(UserCreatable *obj, Error **errp)
*/
iothread_init_gcontext(iothread);
- iothread_set_aio_context_params(iothread, &local_error);
+ iothread_set_aio_context_params(bc, &local_error);
if (local_error) {
error_propagate(errp, local_error);
aio_context_unref(iothread->ctx);
@@ -201,7 +187,7 @@ static void iothread_complete(UserCreatable *obj, Error **errp)
* to inherit.
*/
thread_name = g_strdup_printf("IO %s",
- object_get_canonical_path_component(OBJECT(obj)));
+ object_get_canonical_path_component(OBJECT(bc)));
qemu_thread_create(&iothread->thread, thread_name, iothread_run,
iothread, QEMU_THREAD_JOINABLE);
g_free(thread_name);
@@ -212,141 +198,20 @@ static void iothread_complete(UserCreatable *obj, Error **errp)
}
}
-typedef struct {
- const char *name;
- ptrdiff_t offset; /* field's byte offset in IOThread struct */
-} IOThreadParamInfo;
-
-static IOThreadParamInfo poll_max_ns_info = {
- "poll-max-ns", offsetof(IOThread, poll_max_ns),
-};
-static IOThreadParamInfo poll_grow_info = {
- "poll-grow", offsetof(IOThread, poll_grow),
-};
-static IOThreadParamInfo poll_shrink_info = {
- "poll-shrink", offsetof(IOThread, poll_shrink),
-};
-static IOThreadParamInfo aio_max_batch_info = {
- "aio-max-batch", offsetof(IOThread, aio_max_batch),
-};
-
-static void iothread_get_param(Object *obj, Visitor *v,
- const char *name, IOThreadParamInfo *info, Error **errp)
-{
- IOThread *iothread = IOTHREAD(obj);
- int64_t *field = (void *)iothread + info->offset;
-
- visit_type_int64(v, name, field, errp);
-}
-
-static bool iothread_set_param(Object *obj, Visitor *v,
- const char *name, IOThreadParamInfo *info, Error **errp)
-{
- IOThread *iothread = IOTHREAD(obj);
- int64_t *field = (void *)iothread + info->offset;
- int64_t value;
-
- if (!visit_type_int64(v, name, &value, errp)) {
- return false;
- }
-
- if (value < 0) {
- error_setg(errp, "%s value must be in range [0, %" PRId64 "]",
- info->name, INT64_MAX);
- return false;
- }
-
- *field = value;
-
- return true;
-}
-
-static void iothread_get_poll_param(Object *obj, Visitor *v,
- const char *name, void *opaque, Error **errp)
+static void iothread_class_init(ObjectClass *oc, void *class_data)
{
- IOThreadParamInfo *info = opaque;
-
- iothread_get_param(obj, v, name, info, errp);
-}
+ EventLoopBackendClass *bc = EVENT_LOOP_BACKEND_CLASS(oc);
-static void iothread_set_poll_param(Object *obj, Visitor *v,
- const char *name, void *opaque, Error **errp)
-{
- IOThread *iothread = IOTHREAD(obj);
- IOThreadParamInfo *info = opaque;
-
- if (!iothread_set_param(obj, v, name, info, errp)) {
- return;
- }
-
- if (iothread->ctx) {
- aio_context_set_poll_params(iothread->ctx,
- iothread->poll_max_ns,
- iothread->poll_grow,
- iothread->poll_shrink,
- errp);
- }
-}
-
-static void iothread_get_aio_param(Object *obj, Visitor *v,
- const char *name, void *opaque, Error **errp)
-{
- IOThreadParamInfo *info = opaque;
-
- iothread_get_param(obj, v, name, info, errp);
-}
-
-static void iothread_set_aio_param(Object *obj, Visitor *v,
- const char *name, void *opaque, Error **errp)
-{
- IOThread *iothread = IOTHREAD(obj);
- IOThreadParamInfo *info = opaque;
-
- if (!iothread_set_param(obj, v, name, info, errp)) {
- return;
- }
-
- if (iothread->ctx) {
- aio_context_set_aio_params(iothread->ctx,
- iothread->aio_max_batch,
- errp);
- }
-}
-
-static void iothread_class_init(ObjectClass *klass, void *class_data)
-{
- UserCreatableClass *ucc = USER_CREATABLE_CLASS(klass);
- ucc->complete = iothread_complete;
-
- object_class_property_add(klass, "poll-max-ns", "int",
- iothread_get_poll_param,
- iothread_set_poll_param,
- NULL, &poll_max_ns_info);
- object_class_property_add(klass, "poll-grow", "int",
- iothread_get_poll_param,
- iothread_set_poll_param,
- NULL, &poll_grow_info);
- object_class_property_add(klass, "poll-shrink", "int",
- iothread_get_poll_param,
- iothread_set_poll_param,
- NULL, &poll_shrink_info);
- object_class_property_add(klass, "aio-max-batch", "int",
- iothread_get_aio_param,
- iothread_set_aio_param,
- NULL, &aio_max_batch_info);
+ bc->init = iothread_init;
}
static const TypeInfo iothread_info = {
.name = TYPE_IOTHREAD,
- .parent = TYPE_OBJECT,
+ .parent = TYPE_EVENT_LOOP_BACKEND,
.class_init = iothread_class_init,
.instance_size = sizeof(IOThread),
.instance_init = iothread_instance_init,
.instance_finalize = iothread_instance_finalize,
- .interfaces = (InterfaceInfo[]) {
- {TYPE_USER_CREATABLE},
- {}
- },
};
static void iothread_register_types(void)
@@ -380,10 +245,10 @@ static int query_one_iothread(Object *object, void *opaque)
info = g_new0(IOThreadInfo, 1);
info->id = iothread_get_id(iothread);
info->thread_id = iothread->thread_id;
- info->poll_max_ns = iothread->poll_max_ns;
- info->poll_grow = iothread->poll_grow;
- info->poll_shrink = iothread->poll_shrink;
- info->aio_max_batch = iothread->aio_max_batch;
+ info->poll_max_ns = iothread->parent_obj.poll_max_ns;
+ info->poll_grow = iothread->parent_obj.poll_grow;
+ info->poll_shrink = iothread->parent_obj.poll_shrink;
+ info->aio_max_batch = iothread->parent_obj.aio_max_batch;
QAPI_LIST_APPEND(*tail, info);
return 0;
diff --git a/qom/meson.build b/qom/meson.build
index 062a3789d8..c20e5dd1cb 100644
--- a/qom/meson.build
+++ b/qom/meson.build
@@ -4,6 +4,7 @@ qom_ss.add(files(
'object.c',
'object_interfaces.c',
'qom-qobject.c',
+ '../util/event-loop.c',
))
qmp_ss.add(files('qom-qmp-cmds.c'))
diff --git a/util/event-loop.c b/util/event-loop.c
new file mode 100644
index 0000000000..f3e50909a0
--- /dev/null
+++ b/util/event-loop.c
@@ -0,0 +1,142 @@
+/*
+ * QEMU event-loop backend
+ *
+ * Copyright (C) 2022 Red Hat Inc
+ *
+ * Authors:
+ * Nicolas Saenz Julienne <nsaenzju@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#include "qemu/osdep.h"
+#include "util/event-loop.h"
+#include "qom/object_interfaces.h"
+#include "qapi/error.h"
+#include "qemu/main-loop.h"
+
+#ifdef CONFIG_POSIX
+/*
+ * Benchmark results from 2016 on NVMe SSD drives show max polling times around
+ * 16-32 microseconds yield IOPS improvements for both iodepth=1 and iodepth=32
+ * workloads.
+ */
+#define EVENT_LOOP_BACKEND_POLL_MAX_NS_DEFAULT 32768ULL
+#else
+#define EVENT_LOOP_BACKEND_POLL_MAX_NS_DEFAULT 0ULL
+#endif
+
+static void event_loop_backend_instance_init(Object *obj)
+{
+ EventLoopBackend *event_loop_backend = EVENT_LOOP_BACKEND(obj);
+
+ event_loop_backend->poll_max_ns = EVENT_LOOP_BACKEND_POLL_MAX_NS_DEFAULT;
+}
+
+typedef struct {
+ const char *name;
+ ptrdiff_t offset; /* field's byte offset in EventLoopBackend struct */
+} EventLoopBackendParamInfo;
+
+static EventLoopBackendParamInfo poll_max_ns_info = {
+ "poll-max-ns", offsetof(EventLoopBackend, poll_max_ns),
+};
+static EventLoopBackendParamInfo poll_grow_info = {
+ "poll-grow", offsetof(EventLoopBackend, poll_grow),
+};
+static EventLoopBackendParamInfo poll_shrink_info = {
+ "poll-shrink", offsetof(EventLoopBackend, poll_shrink),
+};
+static EventLoopBackendParamInfo aio_max_batch_info = {
+ "aio-max-batch", offsetof(EventLoopBackend, aio_max_batch),
+};
+
+static void event_loop_backend_get_param(Object *obj, Visitor *v,
+ const char *name, void *opaque, Error **errp)
+{
+ EventLoopBackendParamInfo *info = opaque;
+ EventLoopBackend *event_loop_backend = EVENT_LOOP_BACKEND(obj);
+ int64_t *field = (void *)event_loop_backend + info->offset;
+
+ visit_type_int64(v, name, field, errp);
+}
+
+static void event_loop_backend_set_param(Object *obj, Visitor *v,
+ const char *name, void *opaque,
+ Error **errp)
+{
+ EventLoopBackend *event_loop_backend = EVENT_LOOP_BACKEND(obj);
+ EventLoopBackendParamInfo *info = opaque;
+ int64_t *field = (void *)event_loop_backend + info->offset;
+ int64_t value;
+
+ if (!visit_type_int64(v, name, &value, errp)) {
+ return;
+ }
+
+ if (value < 0) {
+ error_setg(errp, "%s value must be in range [0, %" PRId64 "]",
+ info->name, INT64_MAX);
+ return;
+ }
+
+ *field = value;
+
+ return;
+
+}
+
+static void
+event_loop_backend_complete(UserCreatable *uc, Error **errp)
+{
+ EventLoopBackend *backend = EVENT_LOOP_BACKEND(uc);
+ EventLoopBackendClass *bc = EVENT_LOOP_BACKEND_GET_CLASS(uc);
+
+ if (bc->init) {
+ bc->init(backend, errp);
+ }
+}
+
+static void event_loop_backend_class_init(ObjectClass *klass, void *class_data)
+{
+ UserCreatableClass *ucc = USER_CREATABLE_CLASS(klass);
+ ucc->complete = event_loop_backend_complete;
+
+ object_class_property_add(klass, "poll-max-ns", "int",
+ event_loop_backend_get_param,
+ event_loop_backend_set_param,
+ NULL, &poll_max_ns_info);
+ object_class_property_add(klass, "poll-grow", "int",
+ event_loop_backend_get_param,
+ event_loop_backend_set_param,
+ NULL, &poll_grow_info);
+ object_class_property_add(klass, "poll-shrink", "int",
+ event_loop_backend_get_param,
+ event_loop_backend_set_param,
+ NULL, &poll_shrink_info);
+ object_class_property_add(klass, "aio-max-batch", "int",
+ event_loop_backend_get_param,
+ event_loop_backend_set_param,
+ NULL, &aio_max_batch_info);
+}
+
+static const TypeInfo event_loop_backend_info = {
+ .name = TYPE_EVENT_LOOP_BACKEND,
+ .parent = TYPE_OBJECT,
+ .instance_size = sizeof(EventLoopBackend),
+ .instance_init = event_loop_backend_instance_init,
+ .class_size = sizeof(EventLoopBackendClass),
+ .class_init = event_loop_backend_class_init,
+ .abstract = true,
+ .interfaces = (InterfaceInfo[]) {
+ { TYPE_USER_CREATABLE },
+ { }
+ }
+};
+
+static void register_types(void)
+{
+ type_register_static(&event_loop_backend_info);
+}
+type_init(register_types);
diff --git a/util/event-loop.h b/util/event-loop.h
new file mode 100644
index 0000000000..8883a0d086
--- /dev/null
+++ b/util/event-loop.h
@@ -0,0 +1,40 @@
+/*
+ * QEMU event-loop backend
+ *
+ * Copyright (C) 2022 Red Hat Inc
+ *
+ * Authors:
+ * Nicolas Saenz Julienne <nsaenzju@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+#ifndef QEMU_EVENT_LOOP_H
+#define QEMU_EVENT_LOOP_H
+
+#include "qom/object.h"
+#include "block/aio.h"
+#include "qemu/typedefs.h"
+
+#define TYPE_EVENT_LOOP_BACKEND "event-loop-backend"
+OBJECT_DECLARE_TYPE(EventLoopBackend, EventLoopBackendClass,
+ EVENT_LOOP_BACKEND)
+
+struct EventLoopBackendClass {
+ ObjectClass parent_class;
+
+ void (*init)(EventLoopBackend *backend, Error **errp);
+};
+
+struct EventLoopBackend {
+ Object parent;
+
+ /* AioContext poll parameters */
+ int64_t poll_max_ns;
+ int64_t poll_grow;
+ int64_t poll_shrink;
+
+ /* AioContext AIO engine parameters */
+ int64_t aio_max_batch;
+};
+#endif
--
2.35.1
^ permalink raw reply related [flat|nested] 13+ messages in thread
* [PATCH 2/3] util/main-loop: Introduce the main loop into QOM
2022-02-21 17:08 [PATCH 0/3] util/thread-pool: Expose minimun and maximum size Nicolas Saenz Julienne
2022-02-21 17:08 ` [PATCH 1/3] util & iothread: Introduce event-loop abstract class Nicolas Saenz Julienne
@ 2022-02-21 17:08 ` Nicolas Saenz Julienne
2022-02-22 6:07 ` Markus Armbruster
2022-02-24 10:01 ` Stefan Hajnoczi
2022-02-21 17:08 ` [PATCH 3/3] util/event-loop: Introduce options to set the thread pool size Nicolas Saenz Julienne
2 siblings, 2 replies; 13+ messages in thread
From: Nicolas Saenz Julienne @ 2022-02-21 17:08 UTC (permalink / raw)
To: kwolf, stefanha, berrange
Cc: fam, eduardo, qemu-block, michael.roth, mtosatti, qemu-devel,
armbru, hreitz, pbonzini, Nicolas Saenz Julienne, eblake
'event-loop-backend' provides basic property handling for all
'AioContext' based event loops. So let's define a new 'MainLoopClass'
that inherits from it. This will permit tweaking the main loop's
properties through qapi as well as through the command line using the
'-object' keyword[1]. Only one instance of 'MainLoopClass' might be
created at any time.
'EventLoopBackendClass' learns a new callback, 'can_be_deleted()' so as
to mark 'MainLoop' as non-deletable.
Signed-off-by: Nicolas Saenz Julienne <nsaenzju@redhat.com>
[1] For example:
-object main-loop,id=main-loop,poll-max-ns=<value>
---
include/qemu/main-loop.h | 11 ++++++++++
qapi/qom.json | 10 ++++++----
qga/meson.build | 2 +-
tests/unit/meson.build | 10 +++++-----
util/event-loop.c | 13 ++++++++++++
util/event-loop.h | 1 +
util/main-loop.c | 43 ++++++++++++++++++++++++++++++++++++++++
7 files changed, 80 insertions(+), 10 deletions(-)
diff --git a/include/qemu/main-loop.h b/include/qemu/main-loop.h
index 8dbc6fcb89..fea5a3e9d4 100644
--- a/include/qemu/main-loop.h
+++ b/include/qemu/main-loop.h
@@ -26,9 +26,20 @@
#define QEMU_MAIN_LOOP_H
#include "block/aio.h"
+#include "qom/object.h"
+#include "util/event-loop.h"
#define SIG_IPI SIGUSR1
+#define TYPE_MAIN_LOOP "main-loop"
+
+struct MainLoop {
+ EventLoopBackend parent_obj;
+};
+typedef struct MainLoop MainLoop;
+
+DECLARE_INSTANCE_CHECKER(MainLoop, MAIN_LOOP, TYPE_MAIN_LOOP)
+
/**
* qemu_init_main_loop: Set up the process so that it can run the main loop.
*
diff --git a/qapi/qom.json b/qapi/qom.json
index eeb5395ff3..e7730ef62f 100644
--- a/qapi/qom.json
+++ b/qapi/qom.json
@@ -500,9 +500,9 @@
'*grab-toggle': 'GrabToggleKeys' } }
##
-# @IothreadProperties:
+# @EventLoopBackendProperties:
#
-# Properties for iothread objects.
+# Properties for iothread and main-loop objects.
#
# @poll-max-ns: the maximum number of nanoseconds to busy wait for events.
# 0 means polling is disabled (default: 32768 on POSIX hosts,
@@ -522,7 +522,7 @@
#
# Since: 2.0
##
-{ 'struct': 'IothreadProperties',
+{ 'struct': 'EventLoopBackendProperties',
'data': { '*poll-max-ns': 'int',
'*poll-grow': 'int',
'*poll-shrink': 'int',
@@ -818,6 +818,7 @@
{ 'name': 'input-linux',
'if': 'CONFIG_LINUX' },
'iothread',
+ 'main-loop',
{ 'name': 'memory-backend-epc',
'if': 'CONFIG_LINUX' },
'memory-backend-file',
@@ -882,7 +883,8 @@
'input-barrier': 'InputBarrierProperties',
'input-linux': { 'type': 'InputLinuxProperties',
'if': 'CONFIG_LINUX' },
- 'iothread': 'IothreadProperties',
+ 'iothread': 'EventLoopBackendProperties',
+ 'main-loop': 'EventLoopBackendProperties',
'memory-backend-epc': { 'type': 'MemoryBackendEpcProperties',
'if': 'CONFIG_LINUX' },
'memory-backend-file': 'MemoryBackendFileProperties',
diff --git a/qga/meson.build b/qga/meson.build
index 1ee9dca60b..3051473e04 100644
--- a/qga/meson.build
+++ b/qga/meson.build
@@ -52,7 +52,7 @@ qga_ss = qga_ss.apply(config_host, strict: false)
qga = executable('qemu-ga', qga_ss.sources(),
link_args: config_host['LIBS_QGA'].split(),
- dependencies: [qemuutil, libudev],
+ dependencies: [qemuutil, libudev, qom],
install: true)
all_qga = [qga]
diff --git a/tests/unit/meson.build b/tests/unit/meson.build
index 64a5e7bfde..7a1af584dd 100644
--- a/tests/unit/meson.build
+++ b/tests/unit/meson.build
@@ -51,7 +51,7 @@ tests = {
if have_system or have_tools
tests += {
- 'test-qmp-event': [testqapi],
+ 'test-qmp-event': [testqapi, qom],
}
endif
@@ -120,17 +120,17 @@ endif
if have_system
tests += {
'test-iov': [],
- 'test-qmp-cmds': [testqapi],
+ 'test-qmp-cmds': [testqapi, qom],
'test-xbzrle': [migration],
- 'test-timed-average': [],
- 'test-util-sockets': ['socket-helpers.c'],
+ 'test-timed-average': [qom],
+ 'test-util-sockets': ['socket-helpers.c', qom],
'test-base64': [],
'test-bufferiszero': [],
'test-vmstate': [migration, io],
'test-yank': ['socket-helpers.c', qom, io, chardev]
}
if config_host_data.get('CONFIG_INOTIFY1')
- tests += {'test-util-filemonitor': []}
+ tests += {'test-util-filemonitor': [qom]}
endif
# Some tests: test-char, test-qdev-global-props, and test-qga,
diff --git a/util/event-loop.c b/util/event-loop.c
index f3e50909a0..c0ddd61f20 100644
--- a/util/event-loop.c
+++ b/util/event-loop.c
@@ -98,10 +98,23 @@ event_loop_backend_complete(UserCreatable *uc, Error **errp)
}
}
+static bool event_loop_backend_can_be_deleted(UserCreatable *uc)
+{
+ EventLoopBackendClass *bc = EVENT_LOOP_BACKEND_GET_CLASS(uc);
+ EventLoopBackend *backend = EVENT_LOOP_BACKEND(uc);
+
+ if (bc->can_be_deleted) {
+ return bc->can_be_deleted(backend);
+ }
+
+ return true;
+}
+
static void event_loop_backend_class_init(ObjectClass *klass, void *class_data)
{
UserCreatableClass *ucc = USER_CREATABLE_CLASS(klass);
ucc->complete = event_loop_backend_complete;
+ ucc->can_be_deleted = event_loop_backend_can_be_deleted;
object_class_property_add(klass, "poll-max-ns", "int",
event_loop_backend_get_param,
diff --git a/util/event-loop.h b/util/event-loop.h
index 8883a0d086..34cf9309af 100644
--- a/util/event-loop.h
+++ b/util/event-loop.h
@@ -24,6 +24,7 @@ struct EventLoopBackendClass {
ObjectClass parent_class;
void (*init)(EventLoopBackend *backend, Error **errp);
+ bool (*can_be_deleted)(EventLoopBackend *backend);
};
struct EventLoopBackend {
diff --git a/util/main-loop.c b/util/main-loop.c
index 4d5a5b9943..395fd9bd3e 100644
--- a/util/main-loop.c
+++ b/util/main-loop.c
@@ -33,6 +33,7 @@
#include "qemu/error-report.h"
#include "qemu/queue.h"
#include "qemu/compiler.h"
+#include "qom/object.h"
#ifndef _WIN32
#include <sys/wait.h>
@@ -184,6 +185,48 @@ int qemu_init_main_loop(Error **errp)
return 0;
}
+MainLoop *mloop;
+
+static void main_loop_init(EventLoopBackend *bc, Error **errp)
+{
+ MainLoop *m = MAIN_LOOP(bc);
+
+ if (mloop) {
+ error_setg(errp, "only one main-loop instance allowed");
+ return;
+ }
+
+ mloop = m;
+ return;
+}
+
+static bool main_loop_can_be_deleted(EventLoopBackend *bc)
+{
+ return false;
+}
+
+static void main_loop_class_init(ObjectClass *oc, void *class_data)
+{
+ EventLoopBackendClass *bc = EVENT_LOOP_BACKEND_CLASS(oc);
+
+ bc->init = main_loop_init;
+ bc->can_be_deleted = main_loop_can_be_deleted;
+}
+
+static const TypeInfo main_loop_info = {
+ .name = TYPE_MAIN_LOOP,
+ .parent = TYPE_EVENT_LOOP_BACKEND,
+ .class_init = main_loop_class_init,
+ .instance_size = sizeof(MainLoop),
+};
+
+static void main_loop_register_types(void)
+{
+ type_register_static(&main_loop_info);
+}
+
+type_init(main_loop_register_types)
+
static int max_priority;
#ifndef _WIN32
--
2.35.1
^ permalink raw reply related [flat|nested] 13+ messages in thread
* [PATCH 3/3] util/event-loop: Introduce options to set the thread pool size
2022-02-21 17:08 [PATCH 0/3] util/thread-pool: Expose minimun and maximum size Nicolas Saenz Julienne
2022-02-21 17:08 ` [PATCH 1/3] util & iothread: Introduce event-loop abstract class Nicolas Saenz Julienne
2022-02-21 17:08 ` [PATCH 2/3] util/main-loop: Introduce the main loop into QOM Nicolas Saenz Julienne
@ 2022-02-21 17:08 ` Nicolas Saenz Julienne
2022-02-24 10:40 ` Stefan Hajnoczi
2 siblings, 1 reply; 13+ messages in thread
From: Nicolas Saenz Julienne @ 2022-02-21 17:08 UTC (permalink / raw)
To: kwolf, stefanha, berrange
Cc: fam, eduardo, qemu-block, michael.roth, mtosatti, qemu-devel,
armbru, hreitz, pbonzini, Nicolas Saenz Julienne, eblake
The thread pool regulates itself: when idle, it kills threads until
empty, when in demand, it creates new threads until full. This behaviour
doesn't play well with latency sensitive workloads where the price of
creating a new thread is too high. For example, when paired with qemu's
'-mlock', or using safety features like SafeStack, creating a new thread
has been measured take multiple milliseconds.
In order to mitigate this let's introduce a new 'EventLoopBackend'
property to set the thread pool size. The threads will be created during
the pool's initialization, remain available during its lifetime
regardless of demand, and destroyed upon freeing it. A properly
characterized workload will then be able to configure the pool to avoid
any latency spike.
Signed-off-by: Nicolas Saenz Julienne <nsaenzju@redhat.com>
---
include/block/aio.h | 11 +++++++++++
qapi/qom.json | 4 +++-
util/async.c | 3 +++
util/event-loop.c | 15 ++++++++++++++-
util/event-loop.h | 4 ++++
util/main-loop.c | 13 +++++++++++++
util/thread-pool.c | 41 +++++++++++++++++++++++++++++++++++++----
7 files changed, 85 insertions(+), 6 deletions(-)
diff --git a/include/block/aio.h b/include/block/aio.h
index 5634173b12..331483d1d1 100644
--- a/include/block/aio.h
+++ b/include/block/aio.h
@@ -192,6 +192,8 @@ struct AioContext {
QSLIST_HEAD(, Coroutine) scheduled_coroutines;
QEMUBH *co_schedule_bh;
+ int pool_min;
+ int pool_max;
/* Thread pool for performing work and receiving completion callbacks.
* Has its own locking.
*/
@@ -769,4 +771,13 @@ void aio_context_set_poll_params(AioContext *ctx, int64_t max_ns,
void aio_context_set_aio_params(AioContext *ctx, int64_t max_batch,
Error **errp);
+/**
+ * aio_context_set_thread_pool_params:
+ * @ctx: the aio context
+ * @min: Thread pool's min size, 0 by default
+ * @max: Thread pool's max size, 64 by default
+ */
+void aio_context_set_thread_pool_params(AioContext *ctx, uint64_t min,
+ uint64_t max, Error **errp);
+
#endif
diff --git a/qapi/qom.json b/qapi/qom.json
index e7730ef62f..79c141b6bf 100644
--- a/qapi/qom.json
+++ b/qapi/qom.json
@@ -526,7 +526,9 @@
'data': { '*poll-max-ns': 'int',
'*poll-grow': 'int',
'*poll-shrink': 'int',
- '*aio-max-batch': 'int' } }
+ '*aio-max-batch': 'int',
+ '*thread-pool-min': 'int',
+ '*thread-pool-max': 'int' } }
##
# @MemoryBackendProperties:
diff --git a/util/async.c b/util/async.c
index 08d25feef5..58ef2e2ee5 100644
--- a/util/async.c
+++ b/util/async.c
@@ -562,6 +562,9 @@ AioContext *aio_context_new(Error **errp)
ctx->aio_max_batch = 0;
+ ctx->pool_min = 0;
+ ctx->pool_max = 64;
+
return ctx;
fail:
g_source_destroy(&ctx->source);
diff --git a/util/event-loop.c b/util/event-loop.c
index c0ddd61f20..f2532e7d31 100644
--- a/util/event-loop.c
+++ b/util/event-loop.c
@@ -51,6 +51,12 @@ static EventLoopBackendParamInfo poll_shrink_info = {
static EventLoopBackendParamInfo aio_max_batch_info = {
"aio-max-batch", offsetof(EventLoopBackend, aio_max_batch),
};
+static EventLoopBackendParamInfo thread_pool_min_info = {
+ "thread-pool-min", offsetof(EventLoopBackend, thread_pool_min),
+};
+static EventLoopBackendParamInfo thread_pool_max_info = {
+ "thread-pool-max", offsetof(EventLoopBackend, thread_pool_max),
+};
static void event_loop_backend_get_param(Object *obj, Visitor *v,
const char *name, void *opaque, Error **errp)
@@ -84,7 +90,6 @@ static void event_loop_backend_set_param(Object *obj, Visitor *v,
*field = value;
return;
-
}
static void
@@ -132,6 +137,14 @@ static void event_loop_backend_class_init(ObjectClass *klass, void *class_data)
event_loop_backend_get_param,
event_loop_backend_set_param,
NULL, &aio_max_batch_info);
+ object_class_property_add(klass, "thread-pool-min", "int",
+ event_loop_backend_get_param,
+ event_loop_backend_set_param,
+ NULL, &thread_pool_min_info);
+ object_class_property_add(klass, "thread-pool-max", "int",
+ event_loop_backend_get_param,
+ event_loop_backend_set_param,
+ NULL, &thread_pool_max_info);
}
static const TypeInfo event_loop_backend_info = {
diff --git a/util/event-loop.h b/util/event-loop.h
index 34cf9309af..0f4255ee7b 100644
--- a/util/event-loop.h
+++ b/util/event-loop.h
@@ -37,5 +37,9 @@ struct EventLoopBackend {
/* AioContext AIO engine parameters */
int64_t aio_max_batch;
+
+ /* AioContext thread pool parameters */
+ int64_t thread_pool_min;
+ int64_t thread_pool_max;
};
#endif
diff --git a/util/main-loop.c b/util/main-loop.c
index 395fd9bd3e..266a9c72d8 100644
--- a/util/main-loop.c
+++ b/util/main-loop.c
@@ -190,12 +190,25 @@ MainLoop *mloop;
static void main_loop_init(EventLoopBackend *bc, Error **errp)
{
MainLoop *m = MAIN_LOOP(bc);
+ Error *local_error = NULL;
+
+ if (!qemu_aio_context) {
+ error_setg(errp, "qemu aio context not ready");
+ return;
+ }
if (mloop) {
error_setg(errp, "only one main-loop instance allowed");
return;
}
+ aio_context_set_thread_pool_params(qemu_aio_context, bc->thread_pool_min,
+ bc->thread_pool_max, &local_error);
+ if (local_error) {
+ error_propagate(errp, local_error);
+ return;
+ }
+
mloop = m;
return;
}
diff --git a/util/thread-pool.c b/util/thread-pool.c
index d763cea505..95c339cb00 100644
--- a/util/thread-pool.c
+++ b/util/thread-pool.c
@@ -21,6 +21,7 @@
#include "trace.h"
#include "block/thread-pool.h"
#include "qemu/main-loop.h"
+#include "qapi/error.h"
static void do_spawn_thread(ThreadPool *pool);
@@ -58,7 +59,6 @@ struct ThreadPool {
QemuMutex lock;
QemuCond worker_stopped;
QemuSemaphore sem;
- int max_threads;
QEMUBH *new_thread_bh;
/* The following variables are only accessed from one AioContext. */
@@ -76,6 +76,7 @@ struct ThreadPool {
static void *worker_thread(void *opaque)
{
ThreadPool *pool = opaque;
+ AioContext *ctx = pool->ctx;
qemu_mutex_lock(&pool->lock);
pool->pending_threads--;
@@ -91,7 +92,8 @@ static void *worker_thread(void *opaque)
ret = qemu_sem_timedwait(&pool->sem, 10000);
qemu_mutex_lock(&pool->lock);
pool->idle_threads--;
- } while (ret == -1 && !QTAILQ_EMPTY(&pool->request_list));
+ } while (ret == -1 && (!QTAILQ_EMPTY(&pool->request_list) ||
+ pool->cur_threads <= ctx->pool_min));
if (ret == -1 || pool->stopping) {
break;
}
@@ -244,6 +246,7 @@ BlockAIOCB *thread_pool_submit_aio(ThreadPool *pool,
ThreadPoolFunc *func, void *arg,
BlockCompletionFunc *cb, void *opaque)
{
+ AioContext *ctx = pool->ctx;
ThreadPoolElement *req;
req = qemu_aio_get(&thread_pool_aiocb_info, NULL, cb, opaque);
@@ -257,7 +260,7 @@ BlockAIOCB *thread_pool_submit_aio(ThreadPool *pool,
trace_thread_pool_submit(pool, req, arg);
qemu_mutex_lock(&pool->lock);
- if (pool->idle_threads == 0 && pool->cur_threads < pool->max_threads) {
+ if (pool->idle_threads == 0 && pool->cur_threads < ctx->pool_max) {
spawn_thread(pool);
}
QTAILQ_INSERT_TAIL(&pool->request_list, req, reqs);
@@ -306,11 +309,16 @@ static void thread_pool_init_one(ThreadPool *pool, AioContext *ctx)
qemu_mutex_init(&pool->lock);
qemu_cond_init(&pool->worker_stopped);
qemu_sem_init(&pool->sem, 0);
- pool->max_threads = 64;
pool->new_thread_bh = aio_bh_new(ctx, spawn_thread_bh_fn, pool);
QLIST_INIT(&pool->head);
QTAILQ_INIT(&pool->request_list);
+
+ qemu_mutex_lock(&pool->lock);
+ for (int i = pool->cur_threads; i < ctx->pool_min; i++) {
+ spawn_thread(pool);
+ }
+ qemu_mutex_unlock(&pool->lock);
}
ThreadPool *thread_pool_new(AioContext *ctx)
@@ -350,3 +358,28 @@ void thread_pool_free(ThreadPool *pool)
qemu_mutex_destroy(&pool->lock);
g_free(pool);
}
+
+void aio_context_set_thread_pool_params(AioContext *ctx, uint64_t min,
+ uint64_t max, Error **errp)
+{
+ ThreadPool *pool = ctx->thread_pool;
+
+ if (min > max || !max) {
+ error_setg(errp, "bad thread-pool-min/thread-pool-max values");
+ return;
+ }
+
+ if (pool) {
+ qemu_mutex_lock(&pool->lock);
+ }
+
+ ctx->pool_min = min;
+ ctx->pool_max = max;
+
+ if (pool) {
+ for (int i = pool->cur_threads; i < ctx->pool_min; i++) {
+ spawn_thread(pool);
+ }
+ qemu_mutex_unlock(&pool->lock);
+ }
+}
--
2.35.1
^ permalink raw reply related [flat|nested] 13+ messages in thread