* [Qemu-devel] [PATCH 1.3? 0/2] AioContext and thread pool unit tests
@ 2012-11-23 15:13 Paolo Bonzini
2012-11-23 15:13 ` [Qemu-devel] [PATCH 1.3? 1/2] tests: add AioContext " Paolo Bonzini
` (2 more replies)
0 siblings, 3 replies; 4+ messages in thread
From: Paolo Bonzini @ 2012-11-23 15:13 UTC (permalink / raw)
To: qemu-devel; +Cc: aliguori
As requested. :) The tests pass on both Linux and Wine.
I'm not sure if it makes sense to include these in 1.3, but the idea
has some worth.
Paolo Bonzini (2):
tests: add AioContext unit tests
tests: add thread pool unit tests
tests/Makefile | 4 +
tests/test-aio.c | 667 +++++++++++++++++++++++++++++++++++++++++++++++
tests/test-thread-pool.c | 213 +++++++++++++++
3 files changed, 884 insertions(+)
create mode 100644 tests/test-aio.c
create mode 100644 tests/test-thread-pool.c
--
1.8.0
^ permalink raw reply [flat|nested] 4+ messages in thread
* [Qemu-devel] [PATCH 1.3? 1/2] tests: add AioContext unit tests
2012-11-23 15:13 [Qemu-devel] [PATCH 1.3? 0/2] AioContext and thread pool unit tests Paolo Bonzini
@ 2012-11-23 15:13 ` Paolo Bonzini
2012-11-23 15:13 ` [Qemu-devel] [PATCH 1.3? 2/2] tests: add thread pool " Paolo Bonzini
2012-11-26 21:48 ` [Qemu-devel] [PATCH 1.3? 0/2] AioContext and " Anthony Liguori
2 siblings, 0 replies; 4+ messages in thread
From: Paolo Bonzini @ 2012-11-23 15:13 UTC (permalink / raw)
To: qemu-devel; +Cc: aliguori
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
---
tests/Makefile | 2 +
tests/test-aio.c | 667 +++++++++++++++++++++++++++++++++++++++++++++++++++++++
2 files changed, 669 insertions(+)
create mode 100644 tests/test-aio.c
diff --git a/tests/Makefile b/tests/Makefile
index ca680e5..61cbe3b 100644
--- a/tests/Makefile
+++ b/tests/Makefile
@@ -15,6 +15,7 @@ check-unit-y += tests/test-string-output-visitor$(EXESUF)
check-unit-y += tests/test-coroutine$(EXESUF)
check-unit-y += tests/test-visitor-serialization$(EXESUF)
check-unit-y += tests/test-iov$(EXESUF)
+check-unit-y += tests/test-aio$(EXESUF)
check-block-$(CONFIG_POSIX) += tests/qemu-iotests-quick.sh
@@ -49,6 +50,7 @@ tests/check-qlist$(EXESUF): tests/check-qlist.o qlist.o qint.o
tests/check-qfloat$(EXESUF): tests/check-qfloat.o qfloat.o
tests/check-qjson$(EXESUF): tests/check-qjson.o $(qobject-obj-y) qemu-tool.o
tests/test-coroutine$(EXESUF): tests/test-coroutine.o $(coroutine-obj-y) $(tools-obj-y) $(block-obj-y) iov.o libqemustub.a
+tests/test-aio$(EXESUF): tests/test-aio.o $(coroutine-obj-y) $(tools-obj-y) $(block-obj-y) libqemustub.a
tests/test-iov$(EXESUF): tests/test-iov.o iov.o
tests/test-qapi-types.c tests/test-qapi-types.h :\
diff --git a/tests/test-aio.c b/tests/test-aio.c
new file mode 100644
index 0000000..f53c908
--- /dev/null
+++ b/tests/test-aio.c
@@ -0,0 +1,667 @@
+/*
+ * AioContext tests
+ *
+ * Copyright Red Hat, Inc. 2012
+ *
+ * Authors:
+ * Paolo Bonzini <pbonzini@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU LGPL, version 2 or later.
+ * See the COPYING.LIB file in the top-level directory.
+ */
+
+#include <glib.h>
+#include "qemu-aio.h"
+
+AioContext *ctx;
+
+/* Simple callbacks for testing. */
+
+typedef struct {
+ QEMUBH *bh;
+ int n;
+ int max;
+} BHTestData;
+
+static void bh_test_cb(void *opaque)
+{
+ BHTestData *data = opaque;
+ if (++data->n < data->max) {
+ qemu_bh_schedule(data->bh);
+ }
+}
+
+static void bh_delete_cb(void *opaque)
+{
+ BHTestData *data = opaque;
+ if (++data->n < data->max) {
+ qemu_bh_schedule(data->bh);
+ } else {
+ qemu_bh_delete(data->bh);
+ data->bh = NULL;
+ }
+}
+
+typedef struct {
+ EventNotifier e;
+ int n;
+ int active;
+ bool auto_set;
+} EventNotifierTestData;
+
+static int event_active_cb(EventNotifier *e)
+{
+ EventNotifierTestData *data = container_of(e, EventNotifierTestData, e);
+ return data->active > 0;
+}
+
+static void event_ready_cb(EventNotifier *e)
+{
+ EventNotifierTestData *data = container_of(e, EventNotifierTestData, e);
+ g_assert(event_notifier_test_and_clear(e));
+ data->n++;
+ if (data->active > 0) {
+ data->active--;
+ }
+ if (data->auto_set && data->active) {
+ event_notifier_set(e);
+ }
+}
+
+/* Tests using aio_*. */
+
+static void test_notify(void)
+{
+ g_assert(!aio_poll(ctx, false));
+ aio_notify(ctx);
+ g_assert(!aio_poll(ctx, true));
+ g_assert(!aio_poll(ctx, false));
+}
+
+static void test_flush(void)
+{
+ g_assert(!aio_poll(ctx, false));
+ aio_notify(ctx);
+ aio_flush(ctx);
+ g_assert(!aio_poll(ctx, false));
+}
+
+static void test_bh_schedule(void)
+{
+ BHTestData data = { .n = 0 };
+ data.bh = aio_bh_new(ctx, bh_test_cb, &data);
+
+ qemu_bh_schedule(data.bh);
+ g_assert_cmpint(data.n, ==, 0);
+
+ g_assert(aio_poll(ctx, true));
+ g_assert_cmpint(data.n, ==, 1);
+
+ g_assert(!aio_poll(ctx, false));
+ g_assert_cmpint(data.n, ==, 1);
+ qemu_bh_delete(data.bh);
+}
+
+static void test_bh_schedule10(void)
+{
+ BHTestData data = { .n = 0, .max = 10 };
+ data.bh = aio_bh_new(ctx, bh_test_cb, &data);
+
+ qemu_bh_schedule(data.bh);
+ g_assert_cmpint(data.n, ==, 0);
+
+ g_assert(aio_poll(ctx, false));
+ g_assert_cmpint(data.n, ==, 1);
+
+ g_assert(aio_poll(ctx, true));
+ g_assert_cmpint(data.n, ==, 2);
+
+ aio_flush(ctx);
+ g_assert_cmpint(data.n, ==, 10);
+
+ g_assert(!aio_poll(ctx, false));
+ g_assert_cmpint(data.n, ==, 10);
+ qemu_bh_delete(data.bh);
+}
+
+static void test_bh_cancel(void)
+{
+ BHTestData data = { .n = 0 };
+ data.bh = aio_bh_new(ctx, bh_test_cb, &data);
+
+ qemu_bh_schedule(data.bh);
+ g_assert_cmpint(data.n, ==, 0);
+
+ qemu_bh_cancel(data.bh);
+ g_assert_cmpint(data.n, ==, 0);
+
+ g_assert(!aio_poll(ctx, false));
+ g_assert_cmpint(data.n, ==, 0);
+ qemu_bh_delete(data.bh);
+}
+
+static void test_bh_delete(void)
+{
+ BHTestData data = { .n = 0 };
+ data.bh = aio_bh_new(ctx, bh_test_cb, &data);
+
+ qemu_bh_schedule(data.bh);
+ g_assert_cmpint(data.n, ==, 0);
+
+ qemu_bh_delete(data.bh);
+ g_assert_cmpint(data.n, ==, 0);
+
+ g_assert(!aio_poll(ctx, false));
+ g_assert_cmpint(data.n, ==, 0);
+}
+
+static void test_bh_delete_from_cb(void)
+{
+ BHTestData data1 = { .n = 0, .max = 1 };
+
+ data1.bh = aio_bh_new(ctx, bh_delete_cb, &data1);
+
+ qemu_bh_schedule(data1.bh);
+ g_assert_cmpint(data1.n, ==, 0);
+
+ aio_flush(ctx);
+ g_assert_cmpint(data1.n, ==, data1.max);
+ g_assert(data1.bh == NULL);
+
+ g_assert(!aio_poll(ctx, false));
+ g_assert(!aio_poll(ctx, true));
+}
+
+static void test_bh_delete_from_cb_many(void)
+{
+ BHTestData data1 = { .n = 0, .max = 1 };
+ BHTestData data2 = { .n = 0, .max = 3 };
+ BHTestData data3 = { .n = 0, .max = 2 };
+ BHTestData data4 = { .n = 0, .max = 4 };
+
+ data1.bh = aio_bh_new(ctx, bh_delete_cb, &data1);
+ data2.bh = aio_bh_new(ctx, bh_delete_cb, &data2);
+ data3.bh = aio_bh_new(ctx, bh_delete_cb, &data3);
+ data4.bh = aio_bh_new(ctx, bh_delete_cb, &data4);
+
+ qemu_bh_schedule(data1.bh);
+ qemu_bh_schedule(data2.bh);
+ qemu_bh_schedule(data3.bh);
+ qemu_bh_schedule(data4.bh);
+ g_assert_cmpint(data1.n, ==, 0);
+ g_assert_cmpint(data2.n, ==, 0);
+ g_assert_cmpint(data3.n, ==, 0);
+ g_assert_cmpint(data4.n, ==, 0);
+
+ g_assert(aio_poll(ctx, false));
+ g_assert_cmpint(data1.n, ==, 1);
+ g_assert_cmpint(data2.n, ==, 1);
+ g_assert_cmpint(data3.n, ==, 1);
+ g_assert_cmpint(data4.n, ==, 1);
+ g_assert(data1.bh == NULL);
+
+ aio_flush(ctx);
+ g_assert_cmpint(data1.n, ==, data1.max);
+ g_assert_cmpint(data2.n, ==, data2.max);
+ g_assert_cmpint(data3.n, ==, data3.max);
+ g_assert_cmpint(data4.n, ==, data4.max);
+ g_assert(data1.bh == NULL);
+ g_assert(data2.bh == NULL);
+ g_assert(data3.bh == NULL);
+ g_assert(data4.bh == NULL);
+}
+
+static void test_bh_flush(void)
+{
+ BHTestData data = { .n = 0 };
+ data.bh = aio_bh_new(ctx, bh_test_cb, &data);
+
+ qemu_bh_schedule(data.bh);
+ g_assert_cmpint(data.n, ==, 0);
+
+ aio_flush(ctx);
+ g_assert_cmpint(data.n, ==, 1);
+
+ g_assert(!aio_poll(ctx, false));
+ g_assert_cmpint(data.n, ==, 1);
+ qemu_bh_delete(data.bh);
+}
+
+static void test_set_event_notifier(void)
+{
+ EventNotifierTestData data = { .n = 0, .active = 0 };
+ event_notifier_init(&data.e, false);
+ aio_set_event_notifier(ctx, &data.e, event_ready_cb, event_active_cb);
+ g_assert(!aio_poll(ctx, false));
+ g_assert_cmpint(data.n, ==, 0);
+
+ aio_set_event_notifier(ctx, &data.e, NULL, NULL);
+ g_assert(!aio_poll(ctx, false));
+ g_assert_cmpint(data.n, ==, 0);
+ event_notifier_cleanup(&data.e);
+}
+
+static void test_wait_event_notifier(void)
+{
+ EventNotifierTestData data = { .n = 0, .active = 1 };
+ event_notifier_init(&data.e, false);
+ aio_set_event_notifier(ctx, &data.e, event_ready_cb, event_active_cb);
+ g_assert(aio_poll(ctx, false));
+ g_assert_cmpint(data.n, ==, 0);
+ g_assert_cmpint(data.active, ==, 1);
+
+ event_notifier_set(&data.e);
+ g_assert(aio_poll(ctx, false));
+ g_assert_cmpint(data.n, ==, 1);
+ g_assert_cmpint(data.active, ==, 0);
+
+ g_assert(!aio_poll(ctx, false));
+ g_assert_cmpint(data.n, ==, 1);
+ g_assert_cmpint(data.active, ==, 0);
+
+ aio_set_event_notifier(ctx, &data.e, NULL, NULL);
+ g_assert(!aio_poll(ctx, false));
+ g_assert_cmpint(data.n, ==, 1);
+
+ event_notifier_cleanup(&data.e);
+}
+
+static void test_flush_event_notifier(void)
+{
+ EventNotifierTestData data = { .n = 0, .active = 10, .auto_set = true };
+ event_notifier_init(&data.e, false);
+ aio_set_event_notifier(ctx, &data.e, event_ready_cb, event_active_cb);
+ g_assert(aio_poll(ctx, false));
+ g_assert_cmpint(data.n, ==, 0);
+ g_assert_cmpint(data.active, ==, 10);
+
+ event_notifier_set(&data.e);
+ g_assert(aio_poll(ctx, false));
+ g_assert_cmpint(data.n, ==, 1);
+ g_assert_cmpint(data.active, ==, 9);
+ g_assert(aio_poll(ctx, false));
+
+ aio_flush(ctx);
+ g_assert_cmpint(data.n, ==, 10);
+ g_assert_cmpint(data.active, ==, 0);
+ g_assert(!aio_poll(ctx, false));
+
+ aio_set_event_notifier(ctx, &data.e, NULL, NULL);
+ g_assert(!aio_poll(ctx, false));
+ event_notifier_cleanup(&data.e);
+}
+
+static void test_wait_event_notifier_noflush(void)
+{
+ EventNotifierTestData data = { .n = 0 };
+ EventNotifierTestData dummy = { .n = 0, .active = 1 };
+
+ event_notifier_init(&data.e, false);
+ aio_set_event_notifier(ctx, &data.e, event_ready_cb, NULL);
+
+ g_assert(!aio_poll(ctx, false));
+ g_assert_cmpint(data.n, ==, 0);
+
+ /* Until there is an active descriptor, aio_poll may or may not call
+ * event_ready_cb. Still, it must not block. */
+ event_notifier_set(&data.e);
+ g_assert(!aio_poll(ctx, true));
+ data.n = 0;
+
+ /* An active event notifier forces aio_poll to look at EventNotifiers. */
+ event_notifier_init(&dummy.e, false);
+ aio_set_event_notifier(ctx, &dummy.e, event_ready_cb, event_active_cb);
+
+ event_notifier_set(&data.e);
+ g_assert(aio_poll(ctx, false));
+ g_assert_cmpint(data.n, ==, 1);
+ g_assert(!aio_poll(ctx, false));
+ g_assert_cmpint(data.n, ==, 1);
+
+ event_notifier_set(&data.e);
+ g_assert(aio_poll(ctx, false));
+ g_assert_cmpint(data.n, ==, 2);
+ g_assert(!aio_poll(ctx, false));
+ g_assert_cmpint(data.n, ==, 2);
+
+ event_notifier_set(&dummy.e);
+ aio_flush(ctx);
+ g_assert_cmpint(data.n, ==, 2);
+ g_assert_cmpint(dummy.n, ==, 1);
+ g_assert_cmpint(dummy.active, ==, 0);
+
+ aio_set_event_notifier(ctx, &dummy.e, NULL, NULL);
+ event_notifier_cleanup(&dummy.e);
+
+ aio_set_event_notifier(ctx, &data.e, NULL, NULL);
+ g_assert(!aio_poll(ctx, false));
+ g_assert_cmpint(data.n, ==, 2);
+
+ event_notifier_cleanup(&data.e);
+}
+
+/* Now the same tests, using the context as a GSource. They are
+ * very similar to the ones above, with g_main_context_iteration
+ * replacing aio_poll. However:
+ * - sometimes both the AioContext and the glib main loop wake
+ * themselves up. Hence, some "g_assert(!aio_poll(ctx, false));"
+ * are replaced by "while (g_main_context_iteration(NULL, false));".
+ * - there is no exact replacement for aio_flush's blocking wait.
+ * "while (g_main_context_iteration(NULL, true)" seems to work,
+ * but it is not documented _why_ it works. For these tests a
+ * non-blocking loop like "while (g_main_context_iteration(NULL, false)"
+ * works well, and that's what I am using.
+ */
+
+static void test_source_notify(void)
+{
+ while (g_main_context_iteration(NULL, false));
+ aio_notify(ctx);
+ g_assert(g_main_context_iteration(NULL, true));
+ g_assert(!g_main_context_iteration(NULL, false));
+}
+
+static void test_source_flush(void)
+{
+ g_assert(!g_main_context_iteration(NULL, false));
+ aio_notify(ctx);
+ while (g_main_context_iteration(NULL, false));
+ g_assert(!g_main_context_iteration(NULL, false));
+}
+
+static void test_source_bh_schedule(void)
+{
+ BHTestData data = { .n = 0 };
+ data.bh = aio_bh_new(ctx, bh_test_cb, &data);
+
+ qemu_bh_schedule(data.bh);
+ g_assert_cmpint(data.n, ==, 0);
+
+ g_assert(g_main_context_iteration(NULL, true));
+ g_assert_cmpint(data.n, ==, 1);
+
+ g_assert(!g_main_context_iteration(NULL, false));
+ g_assert_cmpint(data.n, ==, 1);
+ qemu_bh_delete(data.bh);
+}
+
+static void test_source_bh_schedule10(void)
+{
+ BHTestData data = { .n = 0, .max = 10 };
+ data.bh = aio_bh_new(ctx, bh_test_cb, &data);
+
+ qemu_bh_schedule(data.bh);
+ g_assert_cmpint(data.n, ==, 0);
+
+ g_assert(g_main_context_iteration(NULL, false));
+ g_assert_cmpint(data.n, ==, 1);
+
+ g_assert(g_main_context_iteration(NULL, true));
+ g_assert_cmpint(data.n, ==, 2);
+
+ while (g_main_context_iteration(NULL, false));
+ g_assert_cmpint(data.n, ==, 10);
+
+ g_assert(!g_main_context_iteration(NULL, false));
+ g_assert_cmpint(data.n, ==, 10);
+ qemu_bh_delete(data.bh);
+}
+
+static void test_source_bh_cancel(void)
+{
+ BHTestData data = { .n = 0 };
+ data.bh = aio_bh_new(ctx, bh_test_cb, &data);
+
+ qemu_bh_schedule(data.bh);
+ g_assert_cmpint(data.n, ==, 0);
+
+ qemu_bh_cancel(data.bh);
+ g_assert_cmpint(data.n, ==, 0);
+
+ while (g_main_context_iteration(NULL, false));
+ g_assert_cmpint(data.n, ==, 0);
+ qemu_bh_delete(data.bh);
+}
+
+static void test_source_bh_delete(void)
+{
+ BHTestData data = { .n = 0 };
+ data.bh = aio_bh_new(ctx, bh_test_cb, &data);
+
+ qemu_bh_schedule(data.bh);
+ g_assert_cmpint(data.n, ==, 0);
+
+ qemu_bh_delete(data.bh);
+ g_assert_cmpint(data.n, ==, 0);
+
+ while (g_main_context_iteration(NULL, false));
+ g_assert_cmpint(data.n, ==, 0);
+}
+
+static void test_source_bh_delete_from_cb(void)
+{
+ BHTestData data1 = { .n = 0, .max = 1 };
+
+ data1.bh = aio_bh_new(ctx, bh_delete_cb, &data1);
+
+ qemu_bh_schedule(data1.bh);
+ g_assert_cmpint(data1.n, ==, 0);
+
+ g_main_context_iteration(NULL, true);
+ g_assert_cmpint(data1.n, ==, data1.max);
+ g_assert(data1.bh == NULL);
+
+ g_assert(!g_main_context_iteration(NULL, false));
+}
+
+static void test_source_bh_delete_from_cb_many(void)
+{
+ BHTestData data1 = { .n = 0, .max = 1 };
+ BHTestData data2 = { .n = 0, .max = 3 };
+ BHTestData data3 = { .n = 0, .max = 2 };
+ BHTestData data4 = { .n = 0, .max = 4 };
+
+ data1.bh = aio_bh_new(ctx, bh_delete_cb, &data1);
+ data2.bh = aio_bh_new(ctx, bh_delete_cb, &data2);
+ data3.bh = aio_bh_new(ctx, bh_delete_cb, &data3);
+ data4.bh = aio_bh_new(ctx, bh_delete_cb, &data4);
+
+ qemu_bh_schedule(data1.bh);
+ qemu_bh_schedule(data2.bh);
+ qemu_bh_schedule(data3.bh);
+ qemu_bh_schedule(data4.bh);
+ g_assert_cmpint(data1.n, ==, 0);
+ g_assert_cmpint(data2.n, ==, 0);
+ g_assert_cmpint(data3.n, ==, 0);
+ g_assert_cmpint(data4.n, ==, 0);
+
+ g_assert(g_main_context_iteration(NULL, false));
+ g_assert_cmpint(data1.n, ==, 1);
+ g_assert_cmpint(data2.n, ==, 1);
+ g_assert_cmpint(data3.n, ==, 1);
+ g_assert_cmpint(data4.n, ==, 1);
+ g_assert(data1.bh == NULL);
+
+ while (g_main_context_iteration(NULL, false));
+ g_assert_cmpint(data1.n, ==, data1.max);
+ g_assert_cmpint(data2.n, ==, data2.max);
+ g_assert_cmpint(data3.n, ==, data3.max);
+ g_assert_cmpint(data4.n, ==, data4.max);
+ g_assert(data1.bh == NULL);
+ g_assert(data2.bh == NULL);
+ g_assert(data3.bh == NULL);
+ g_assert(data4.bh == NULL);
+}
+
+static void test_source_bh_flush(void)
+{
+ BHTestData data = { .n = 0 };
+ data.bh = aio_bh_new(ctx, bh_test_cb, &data);
+
+ qemu_bh_schedule(data.bh);
+ g_assert_cmpint(data.n, ==, 0);
+
+ g_assert(g_main_context_iteration(NULL, true));
+ g_assert_cmpint(data.n, ==, 1);
+
+ g_assert(!g_main_context_iteration(NULL, false));
+ g_assert_cmpint(data.n, ==, 1);
+ qemu_bh_delete(data.bh);
+}
+
+static void test_source_set_event_notifier(void)
+{
+ EventNotifierTestData data = { .n = 0, .active = 0 };
+ event_notifier_init(&data.e, false);
+ aio_set_event_notifier(ctx, &data.e, event_ready_cb, event_active_cb);
+ while (g_main_context_iteration(NULL, false));
+ g_assert_cmpint(data.n, ==, 0);
+
+ aio_set_event_notifier(ctx, &data.e, NULL, NULL);
+ while (g_main_context_iteration(NULL, false));
+ g_assert_cmpint(data.n, ==, 0);
+ event_notifier_cleanup(&data.e);
+}
+
+static void test_source_wait_event_notifier(void)
+{
+ EventNotifierTestData data = { .n = 0, .active = 1 };
+ event_notifier_init(&data.e, false);
+ aio_set_event_notifier(ctx, &data.e, event_ready_cb, event_active_cb);
+ g_assert(g_main_context_iteration(NULL, false));
+ g_assert_cmpint(data.n, ==, 0);
+ g_assert_cmpint(data.active, ==, 1);
+
+ event_notifier_set(&data.e);
+ g_assert(g_main_context_iteration(NULL, false));
+ g_assert_cmpint(data.n, ==, 1);
+ g_assert_cmpint(data.active, ==, 0);
+
+ while (g_main_context_iteration(NULL, false));
+ g_assert_cmpint(data.n, ==, 1);
+ g_assert_cmpint(data.active, ==, 0);
+
+ aio_set_event_notifier(ctx, &data.e, NULL, NULL);
+ while (g_main_context_iteration(NULL, false));
+ g_assert_cmpint(data.n, ==, 1);
+
+ event_notifier_cleanup(&data.e);
+}
+
+static void test_source_flush_event_notifier(void)
+{
+ EventNotifierTestData data = { .n = 0, .active = 10, .auto_set = true };
+ event_notifier_init(&data.e, false);
+ aio_set_event_notifier(ctx, &data.e, event_ready_cb, event_active_cb);
+ g_assert(g_main_context_iteration(NULL, false));
+ g_assert_cmpint(data.n, ==, 0);
+ g_assert_cmpint(data.active, ==, 10);
+
+ event_notifier_set(&data.e);
+ g_assert(g_main_context_iteration(NULL, false));
+ g_assert_cmpint(data.n, ==, 1);
+ g_assert_cmpint(data.active, ==, 9);
+ g_assert(g_main_context_iteration(NULL, false));
+
+ while (g_main_context_iteration(NULL, false));
+ g_assert_cmpint(data.n, ==, 10);
+ g_assert_cmpint(data.active, ==, 0);
+ g_assert(!g_main_context_iteration(NULL, false));
+
+ aio_set_event_notifier(ctx, &data.e, NULL, NULL);
+ while (g_main_context_iteration(NULL, false));
+ event_notifier_cleanup(&data.e);
+}
+
+static void test_source_wait_event_notifier_noflush(void)
+{
+ EventNotifierTestData data = { .n = 0 };
+ EventNotifierTestData dummy = { .n = 0, .active = 1 };
+
+ event_notifier_init(&data.e, false);
+ aio_set_event_notifier(ctx, &data.e, event_ready_cb, NULL);
+
+ while (g_main_context_iteration(NULL, false));
+ g_assert_cmpint(data.n, ==, 0);
+
+ /* Until there is an active descriptor, glib may or may not call
+ * event_ready_cb. Still, it must not block. */
+ event_notifier_set(&data.e);
+ g_main_context_iteration(NULL, true);
+ data.n = 0;
+
+ /* An active event notifier forces aio_poll to look at EventNotifiers. */
+ event_notifier_init(&dummy.e, false);
+ aio_set_event_notifier(ctx, &dummy.e, event_ready_cb, event_active_cb);
+
+ event_notifier_set(&data.e);
+ g_assert(g_main_context_iteration(NULL, false));
+ g_assert_cmpint(data.n, ==, 1);
+ g_assert(!g_main_context_iteration(NULL, false));
+ g_assert_cmpint(data.n, ==, 1);
+
+ event_notifier_set(&data.e);
+ g_assert(g_main_context_iteration(NULL, false));
+ g_assert_cmpint(data.n, ==, 2);
+ g_assert(!g_main_context_iteration(NULL, false));
+ g_assert_cmpint(data.n, ==, 2);
+
+ event_notifier_set(&dummy.e);
+ while (g_main_context_iteration(NULL, false));
+ g_assert_cmpint(data.n, ==, 2);
+ g_assert_cmpint(dummy.n, ==, 1);
+ g_assert_cmpint(dummy.active, ==, 0);
+
+ aio_set_event_notifier(ctx, &dummy.e, NULL, NULL);
+ event_notifier_cleanup(&dummy.e);
+
+ aio_set_event_notifier(ctx, &data.e, NULL, NULL);
+ while (g_main_context_iteration(NULL, false));
+ g_assert_cmpint(data.n, ==, 2);
+
+ event_notifier_cleanup(&data.e);
+}
+
+/* End of tests. */
+
+int main(int argc, char **argv)
+{
+ GSource *src;
+
+ ctx = aio_context_new();
+ src = aio_get_g_source(ctx);
+ g_source_attach(src, NULL);
+ g_source_unref(src);
+
+ while (g_main_context_iteration(NULL, false));
+
+ g_test_init(&argc, &argv, NULL);
+ g_test_add_func("/aio/notify", test_notify);
+ g_test_add_func("/aio/flush", test_flush);
+ g_test_add_func("/aio/bh/schedule", test_bh_schedule);
+ g_test_add_func("/aio/bh/schedule10", test_bh_schedule10);
+ g_test_add_func("/aio/bh/cancel", test_bh_cancel);
+ g_test_add_func("/aio/bh/delete", test_bh_delete);
+ g_test_add_func("/aio/bh/callback-delete/one", test_bh_delete_from_cb);
+ g_test_add_func("/aio/bh/callback-delete/many", test_bh_delete_from_cb_many);
+ g_test_add_func("/aio/bh/flush", test_bh_flush);
+ g_test_add_func("/aio/event/add-remove", test_set_event_notifier);
+ g_test_add_func("/aio/event/wait", test_wait_event_notifier);
+ g_test_add_func("/aio/event/wait/no-flush-cb", test_wait_event_notifier_noflush);
+ g_test_add_func("/aio/event/flush", test_flush_event_notifier);
+
+ g_test_add_func("/aio-gsource/notify", test_source_notify);
+ g_test_add_func("/aio-gsource/flush", test_source_flush);
+ g_test_add_func("/aio-gsource/bh/schedule", test_source_bh_schedule);
+ g_test_add_func("/aio-gsource/bh/schedule10", test_source_bh_schedule10);
+ g_test_add_func("/aio-gsource/bh/cancel", test_source_bh_cancel);
+ g_test_add_func("/aio-gsource/bh/delete", test_source_bh_delete);
+ g_test_add_func("/aio-gsource/bh/callback-delete/one", test_source_bh_delete_from_cb);
+ g_test_add_func("/aio-gsource/bh/callback-delete/many", test_source_bh_delete_from_cb_many);
+ g_test_add_func("/aio-gsource/bh/flush", test_source_bh_flush);
+ g_test_add_func("/aio-gsource/event/add-remove", test_source_set_event_notifier);
+ g_test_add_func("/aio-gsource/event/wait", test_source_wait_event_notifier);
+ g_test_add_func("/aio-gsource/event/wait/no-flush-cb", test_source_wait_event_notifier_noflush);
+ g_test_add_func("/aio-gsource/event/flush", test_source_flush_event_notifier);
+ return g_test_run();
+}
--
1.8.0
^ permalink raw reply related [flat|nested] 4+ messages in thread
* [Qemu-devel] [PATCH 1.3? 2/2] tests: add thread pool unit tests
2012-11-23 15:13 [Qemu-devel] [PATCH 1.3? 0/2] AioContext and thread pool unit tests Paolo Bonzini
2012-11-23 15:13 ` [Qemu-devel] [PATCH 1.3? 1/2] tests: add AioContext " Paolo Bonzini
@ 2012-11-23 15:13 ` Paolo Bonzini
2012-11-26 21:48 ` [Qemu-devel] [PATCH 1.3? 0/2] AioContext and " Anthony Liguori
2 siblings, 0 replies; 4+ messages in thread
From: Paolo Bonzini @ 2012-11-23 15:13 UTC (permalink / raw)
To: qemu-devel; +Cc: aliguori
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
---
tests/Makefile | 2 +
tests/test-thread-pool.c | 213 +++++++++++++++++++++++++++++++++++++++++++++++
2 files changed, 215 insertions(+)
create mode 100644 tests/test-thread-pool.c
diff --git a/tests/Makefile b/tests/Makefile
index 61cbe3b..b60f0fb 100644
--- a/tests/Makefile
+++ b/tests/Makefile
@@ -16,6 +16,7 @@ check-unit-y += tests/test-coroutine$(EXESUF)
check-unit-y += tests/test-visitor-serialization$(EXESUF)
check-unit-y += tests/test-iov$(EXESUF)
check-unit-y += tests/test-aio$(EXESUF)
+check-unit-y += tests/test-thread-pool$(EXESUF)
check-block-$(CONFIG_POSIX) += tests/qemu-iotests-quick.sh
@@ -51,6 +52,7 @@ tests/check-qfloat$(EXESUF): tests/check-qfloat.o qfloat.o
tests/check-qjson$(EXESUF): tests/check-qjson.o $(qobject-obj-y) qemu-tool.o
tests/test-coroutine$(EXESUF): tests/test-coroutine.o $(coroutine-obj-y) $(tools-obj-y) $(block-obj-y) iov.o libqemustub.a
tests/test-aio$(EXESUF): tests/test-aio.o $(coroutine-obj-y) $(tools-obj-y) $(block-obj-y) libqemustub.a
+tests/test-thread-pool$(EXESUF): tests/test-thread-pool.o $(coroutine-obj-y) $(tools-obj-y) $(block-obj-y) libqemustub.a
tests/test-iov$(EXESUF): tests/test-iov.o iov.o
tests/test-qapi-types.c tests/test-qapi-types.h :\
diff --git a/tests/test-thread-pool.c b/tests/test-thread-pool.c
new file mode 100644
index 0000000..fd02f38
--- /dev/null
+++ b/tests/test-thread-pool.c
@@ -0,0 +1,213 @@
+#include <glib.h>
+#include "qemu-common.h"
+#include "qemu-aio.h"
+#include "thread-pool.h"
+#include "block.h"
+
+static int active;
+
+typedef struct {
+ BlockDriverAIOCB *aiocb;
+ int n;
+ int ret;
+} WorkerTestData;
+
+static int worker_cb(void *opaque)
+{
+ WorkerTestData *data = opaque;
+ return __sync_fetch_and_add(&data->n, 1);
+}
+
+static int long_cb(void *opaque)
+{
+ WorkerTestData *data = opaque;
+ __sync_fetch_and_add(&data->n, 1);
+ g_usleep(2000000);
+ __sync_fetch_and_add(&data->n, 1);
+ return 0;
+}
+
+static void done_cb(void *opaque, int ret)
+{
+ WorkerTestData *data = opaque;
+ g_assert_cmpint(data->ret, ==, -EINPROGRESS);
+ data->ret = ret;
+ data->aiocb = NULL;
+
+ /* Callbacks are serialized, so no need to use atomic ops. */
+ active--;
+}
+
+/* A non-blocking poll of the main AIO context (we cannot use aio_poll
+ * because we do not know the AioContext).
+ */
+static void qemu_aio_wait_nonblocking(void)
+{
+ qemu_notify_event();
+ qemu_aio_wait();
+}
+
+static void test_submit(void)
+{
+ WorkerTestData data = { .n = 0 };
+ thread_pool_submit(worker_cb, &data);
+ qemu_aio_flush();
+ g_assert_cmpint(data.n, ==, 1);
+}
+
+static void test_submit_aio(void)
+{
+ WorkerTestData data = { .n = 0, .ret = -EINPROGRESS };
+ data.aiocb = thread_pool_submit_aio(worker_cb, &data, done_cb, &data);
+
+ /* The callbacks are not called until after the first wait. */
+ active = 1;
+ g_assert_cmpint(data.ret, ==, -EINPROGRESS);
+ qemu_aio_flush();
+ g_assert_cmpint(active, ==, 0);
+ g_assert_cmpint(data.n, ==, 1);
+ g_assert_cmpint(data.ret, ==, 0);
+}
+
+static void co_test_cb(void *opaque)
+{
+ WorkerTestData *data = opaque;
+
+ active = 1;
+ data->n = 0;
+ data->ret = -EINPROGRESS;
+ thread_pool_submit_co(worker_cb, data);
+
+ /* The test continues in test_submit_co, after qemu_coroutine_enter... */
+
+ g_assert_cmpint(data->n, ==, 1);
+ data->ret = 0;
+ active--;
+
+ /* The test continues in test_submit_co, after qemu_aio_flush... */
+}
+
+static void test_submit_co(void)
+{
+ WorkerTestData data;
+ Coroutine *co = qemu_coroutine_create(co_test_cb);
+
+ qemu_coroutine_enter(co, &data);
+
+ /* Back here once the worker has started. */
+
+ g_assert_cmpint(active, ==, 1);
+ g_assert_cmpint(data.ret, ==, -EINPROGRESS);
+
+ /* qemu_aio_flush will execute the rest of the coroutine. */
+
+ qemu_aio_flush();
+
+ /* Back here after the coroutine has finished. */
+
+ g_assert_cmpint(active, ==, 0);
+ g_assert_cmpint(data.ret, ==, 0);
+}
+
+static void test_submit_many(void)
+{
+ WorkerTestData data[100];
+ int i;
+
+ /* Start more work items than there will be threads. */
+ for (i = 0; i < 100; i++) {
+ data[i].n = 0;
+ data[i].ret = -EINPROGRESS;
+ thread_pool_submit_aio(worker_cb, &data[i], done_cb, &data[i]);
+ }
+
+ active = 100;
+ while (active > 0) {
+ qemu_aio_wait();
+ }
+ for (i = 0; i < 100; i++) {
+ g_assert_cmpint(data[i].n, ==, 1);
+ g_assert_cmpint(data[i].ret, ==, 0);
+ }
+}
+
+static void test_cancel(void)
+{
+ WorkerTestData data[100];
+ int i;
+
+ /* Start more work items than there will be threads, to ensure
+ * the pool is full.
+ */
+ test_submit_many();
+
+ /* Start long running jobs, to ensure we can cancel some. */
+ for (i = 0; i < 100; i++) {
+ data[i].n = 0;
+ data[i].ret = -EINPROGRESS;
+ data[i].aiocb = thread_pool_submit_aio(long_cb, &data[i],
+ done_cb, &data[i]);
+ }
+
+ /* Starting the threads may be left to a bottom half. Let it
+ * run, but do not waste too much time...
+ */
+ active = 100;
+ qemu_aio_wait_nonblocking();
+
+ /* Wait some time for the threads to start, with some sanity
+ * testing on the behavior of the scheduler...
+ */
+ g_assert_cmpint(active, ==, 100);
+ g_usleep(1000000);
+ g_assert_cmpint(active, >, 50);
+
+ /* Cancel the jobs that haven't been started yet. */
+ for (i = 0; i < 100; i++) {
+ if (__sync_val_compare_and_swap(&data[i].n, 0, 3) == 0) {
+ data[i].ret = -ECANCELED;
+ bdrv_aio_cancel(data[i].aiocb);
+ active--;
+ }
+ }
+ g_assert_cmpint(active, >, 5);
+ g_assert_cmpint(active, <, 95);
+
+ /* Canceling the others will be a blocking operation. */
+ for (i = 0; i < 100; i++) {
+ if (data[i].n != 3) {
+ bdrv_aio_cancel(data[i].aiocb);
+ }
+ }
+
+ /* Finish execution and execute any remaining callbacks. */
+ qemu_aio_flush();
+ g_assert_cmpint(active, ==, 0);
+ for (i = 0; i < 100; i++) {
+ if (data[i].n == 3) {
+ g_assert_cmpint(data[i].ret, ==, -ECANCELED);
+ g_assert(data[i].aiocb != NULL);
+ } else {
+ g_assert_cmpint(data[i].n, ==, 2);
+ g_assert_cmpint(data[i].ret, ==, 0);
+ g_assert(data[i].aiocb == NULL);
+ }
+ }
+}
+
+int main(int argc, char **argv)
+{
+ /* These should be removed once each AioContext has its thread pool.
+ * The test should create its own AioContext.
+ */
+ qemu_init_main_loop();
+ bdrv_init();
+
+ g_test_init(&argc, &argv, NULL);
+ g_test_add_func("/thread-pool/submit", test_submit);
+ g_test_add_func("/thread-pool/submit-aio", test_submit_aio);
+ g_test_add_func("/thread-pool/submit-co", test_submit_co);
+ g_test_add_func("/thread-pool/submit-many", test_submit_many);
+ g_test_add_func("/thread-pool/cancel", test_cancel);
+ return g_test_run();
+}
--
1.8.0
^ permalink raw reply related [flat|nested] 4+ messages in thread
* Re: [Qemu-devel] [PATCH 1.3? 0/2] AioContext and thread pool unit tests
2012-11-23 15:13 [Qemu-devel] [PATCH 1.3? 0/2] AioContext and thread pool unit tests Paolo Bonzini
2012-11-23 15:13 ` [Qemu-devel] [PATCH 1.3? 1/2] tests: add AioContext " Paolo Bonzini
2012-11-23 15:13 ` [Qemu-devel] [PATCH 1.3? 2/2] tests: add thread pool " Paolo Bonzini
@ 2012-11-26 21:48 ` Anthony Liguori
2 siblings, 0 replies; 4+ messages in thread
From: Anthony Liguori @ 2012-11-26 21:48 UTC (permalink / raw)
To: Paolo Bonzini, qemu-devel
Paolo Bonzini <pbonzini@redhat.com> writes:
> As requested. :) The tests pass on both Linux and Wine.
>
> I'm not sure if it makes sense to include these in 1.3, but the idea
> has some worth.
>
Applied. Thanks.
Regards,
Anthony Liguori
> Paolo Bonzini (2):
> tests: add AioContext unit tests
> tests: add thread pool unit tests
>
> tests/Makefile | 4 +
> tests/test-aio.c | 667 +++++++++++++++++++++++++++++++++++++++++++++++
> tests/test-thread-pool.c | 213 +++++++++++++++
> 3 files changed, 884 insertions(+)
> create mode 100644 tests/test-aio.c
> create mode 100644 tests/test-thread-pool.c
>
> --
> 1.8.0
^ permalink raw reply [flat|nested] 4+ messages in thread
end of thread, other threads:[~2012-11-26 21:49 UTC | newest]
Thread overview: 4+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2012-11-23 15:13 [Qemu-devel] [PATCH 1.3? 0/2] AioContext and thread pool unit tests Paolo Bonzini
2012-11-23 15:13 ` [Qemu-devel] [PATCH 1.3? 1/2] tests: add AioContext " Paolo Bonzini
2012-11-23 15:13 ` [Qemu-devel] [PATCH 1.3? 2/2] tests: add thread pool " Paolo Bonzini
2012-11-26 21:48 ` [Qemu-devel] [PATCH 1.3? 0/2] AioContext and " Anthony Liguori
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).