From: Alex Bligh <alex@alex.org.uk>
To: qemu-devel@nongnu.org
Cc: Kevin Wolf <kwolf@redhat.com>,
Anthony Liguori <aliguori@us.ibm.com>,
Alex Bligh <alex@alex.org.uk>, liu ping fan <qemulist@gmail.com>,
Stefan Hajnoczi <stefanha@redhat.com>,
Paolo Bonzini <pbonzini@redhat.com>,
MORITA Kazutaka <morita.kazutaka@lab.ntt.co.jp>,
rth@twiddle.net
Subject: [Qemu-devel] [RFC] [PATCHv7 21/22] aio / timers: Add test harness for AioContext timers
Date: Wed, 7 Aug 2013 00:49:15 +0100 [thread overview]
Message-ID: <1375832956-7588-22-git-send-email-alex@alex.org.uk> (raw)
In-Reply-To: <1375832956-7588-1-git-send-email-alex@alex.org.uk>
Add a test harness for AioContext timers. The g_source equivalent is
unsatisfactory as it suffers from false wakeups.
Signed-off-by: Alex Bligh <alex@alex.org.uk>
---
tests/test-aio.c | 137 ++++++++++++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 137 insertions(+)
diff --git a/tests/test-aio.c b/tests/test-aio.c
index eedf7f8..1d16046 100644
--- a/tests/test-aio.c
+++ b/tests/test-aio.c
@@ -32,6 +32,15 @@ typedef struct {
int max;
} BHTestData;
+typedef struct {
+ QEMUTimer *timer;
+ QEMUTimerList *timer_list;
+ int n;
+ int max;
+ int64_t ns;
+ AioContext *ctx;
+} TimerTestData;
+
static void bh_test_cb(void *opaque)
{
BHTestData *data = opaque;
@@ -40,6 +49,26 @@ static void bh_test_cb(void *opaque)
}
}
+static void timer_test_cb(void *opaque)
+{
+ TimerTestData *data = opaque;
+ if (++data->n < data->max) {
+ qemu_mod_timer(data->timer,
+ qemu_get_clock_ns(
+ timerlist_get_clock(data->timer_list)) +
+ data->ns);
+ }
+}
+
+static void dummy_io_handler_read(void *opaque)
+{
+}
+
+static int dummy_io_handler_flush(void *opaque)
+{
+ return 1;
+}
+
static void bh_delete_cb(void *opaque)
{
BHTestData *data = opaque;
@@ -341,6 +370,64 @@ static void test_wait_event_notifier_noflush(void)
event_notifier_cleanup(&data.e);
}
+static void test_timer_schedule(void)
+{
+ TimerTestData data = { .n = 0, .ctx = ctx, .ns = SCALE_MS * 750LL,
+ .max = 2,
+ .timer_list = ctx->tlg[QEMU_CLOCK_VIRTUAL] };
+ int pipefd[2];
+
+ /* aio_poll will not block to wait for timers to complete unless it has
+ * an fd to wait on. Fixing this breaks other tests. So create a dummy one.
+ */
+ g_assert(!pipe2(pipefd, O_NONBLOCK));
+ aio_set_fd_handler(ctx, pipefd[0],
+ dummy_io_handler_read, NULL, dummy_io_handler_flush,
+ NULL);
+ aio_poll(ctx, false);
+
+ data.timer = timer_new(data.timer_list, SCALE_NS, timer_test_cb, &data);
+ qemu_mod_timer(data.timer,
+ qemu_get_clock_ns(timerlist_get_clock(data.timer_list)) +
+ data.ns);
+
+ g_assert_cmpint(data.n, ==, 0);
+
+ /* qemu_mod_timer may well cause an event notifer to have gone off,
+ * so clear that
+ */
+ do {} while (aio_poll(ctx, false));
+
+ g_assert(!aio_poll(ctx, false));
+ g_assert_cmpint(data.n, ==, 0);
+
+ sleep(1);
+ g_assert_cmpint(data.n, ==, 0);
+
+ g_assert(aio_poll(ctx, false));
+ g_assert_cmpint(data.n, ==, 1);
+
+ /* qemu_mod_timer called by our callback */
+ do {} while (aio_poll(ctx, false));
+
+ g_assert(!aio_poll(ctx, false));
+ g_assert_cmpint(data.n, ==, 1);
+
+ g_assert(aio_poll(ctx, true));
+ g_assert_cmpint(data.n, ==, 2);
+
+ /* As max is now 2, an event notifier should not have gone off */
+
+ g_assert(!aio_poll(ctx, false));
+ g_assert_cmpint(data.n, ==, 2);
+
+ aio_set_fd_handler(ctx, pipefd[0], NULL, NULL, NULL, NULL);
+ close(pipefd[0]);
+ close(pipefd[1]);
+
+ qemu_del_timer(data.timer);
+}
+
/* Now the same tests, using the context as a GSource. They are
* very similar to the ones above, with g_main_context_iteration
* replacing aio_poll. However:
@@ -623,6 +710,54 @@ static void test_source_wait_event_notifier_noflush(void)
event_notifier_cleanup(&data.e);
}
+static void test_source_timer_schedule(void)
+{
+ TimerTestData data = { .n = 0, .ctx = ctx, .ns = SCALE_MS * 750LL,
+ .max = 2,
+ .timer_list = ctx->tlg[QEMU_CLOCK_VIRTUAL] };
+ int pipefd[2];
+ int64_t expiry;
+
+ /* aio_poll will not block to wait for timers to complete unless it has
+ * an fd to wait on. Fixing this breaks other tests. So create a dummy one.
+ */
+ g_assert(!pipe2(pipefd, O_NONBLOCK));
+ aio_set_fd_handler(ctx, pipefd[0],
+ dummy_io_handler_read, NULL, dummy_io_handler_flush,
+ NULL);
+ do {} while (g_main_context_iteration(NULL, false));
+
+ data.timer = timer_new(data.timer_list, SCALE_NS, timer_test_cb, &data);
+ expiry = qemu_get_clock_ns(timerlist_get_clock(data.timer_list)) +
+ data.ns;
+ qemu_mod_timer(data.timer, expiry);
+
+ g_assert_cmpint(data.n, ==, 0);
+
+ sleep(1);
+ g_assert_cmpint(data.n, ==, 0);
+
+ g_assert(g_main_context_iteration(NULL, false));
+ g_assert_cmpint(data.n, ==, 1);
+
+ /* The comment above was not kidding when it said this wakes up itself */
+ do {
+ g_assert(g_main_context_iteration(NULL, true));
+ } while (qemu_get_clock_ns(
+ timerlist_get_clock(data.timer_list)) <= expiry);
+ sleep(1);
+ g_main_context_iteration(NULL, false);
+
+ g_assert_cmpint(data.n, ==, 2);
+
+ aio_set_fd_handler(ctx, pipefd[0], NULL, NULL, NULL, NULL);
+ close(pipefd[0]);
+ close(pipefd[1]);
+
+ qemu_del_timer(data.timer);
+}
+
+
/* End of tests. */
int main(int argc, char **argv)
@@ -651,6 +786,7 @@ int main(int argc, char **argv)
g_test_add_func("/aio/event/wait", test_wait_event_notifier);
g_test_add_func("/aio/event/wait/no-flush-cb", test_wait_event_notifier_noflush);
g_test_add_func("/aio/event/flush", test_flush_event_notifier);
+ g_test_add_func("/aio/timer/schedule", test_timer_schedule);
g_test_add_func("/aio-gsource/notify", test_source_notify);
g_test_add_func("/aio-gsource/flush", test_source_flush);
@@ -665,5 +801,6 @@ int main(int argc, char **argv)
g_test_add_func("/aio-gsource/event/wait", test_source_wait_event_notifier);
g_test_add_func("/aio-gsource/event/wait/no-flush-cb", test_source_wait_event_notifier_noflush);
g_test_add_func("/aio-gsource/event/flush", test_source_flush_event_notifier);
+ g_test_add_func("/aio-gsource/timer/schedule", test_source_timer_schedule);
return g_test_run();
}
--
1.7.9.5
next prev parent reply other threads:[~2013-08-06 23:49 UTC|newest]
Thread overview: 31+ messages / expand[flat|nested] mbox.gz Atom feed top
2013-08-06 23:48 [Qemu-devel] [RFC] [PATCHv7 00/22] aio / timers: Add AioContext timers and use ppoll Alex Bligh
2013-08-06 23:48 ` [Qemu-devel] [RFC] [PATCHv7 01/22] aio / timers: Add qemu_clock_free and expose qemu_clock_new and clock types Alex Bligh
2013-08-07 11:31 ` Stefan Hajnoczi
2013-08-07 11:37 ` Alex Bligh
2013-08-07 15:47 ` Paolo Bonzini
2013-08-07 11:31 ` Stefan Hajnoczi
2013-08-08 6:21 ` liu ping fan
2013-08-08 6:37 ` Alex Bligh
2013-08-06 23:48 ` [Qemu-devel] [RFC] [PATCHv7 02/22] aio / timers: add qemu-timer.c utility functions Alex Bligh
2013-08-06 23:48 ` [Qemu-devel] [RFC] [PATCHv7 03/22] aio / timers: Consistent treatment of disabled clocks for deadlines Alex Bligh
2013-08-06 23:48 ` [Qemu-devel] [RFC] [PATCHv7 04/22] aio / timers: add ppoll support with qemu_poll_ns Alex Bligh
2013-08-06 23:48 ` [Qemu-devel] [RFC] [PATCHv7 05/22] aio / timers: Add prctl(PR_SET_TIMERSLACK, 1, ...) to reduce timer slack Alex Bligh
2013-08-06 23:49 ` [Qemu-devel] [RFC] [PATCHv7 06/22] aio / timers: Make qemu_run_timers and qemu_run_all_timers return progress Alex Bligh
2013-08-06 23:49 ` [Qemu-devel] [RFC] [PATCHv7 07/22] aio / timers: Split QEMUClock into QEMUClock and QEMUTimerList Alex Bligh
2013-08-08 6:12 ` liu ping fan
2013-08-06 23:49 ` [Qemu-devel] [RFC] [PATCHv7 08/22] aio / timers: Untangle include files Alex Bligh
2013-08-06 23:49 ` [Qemu-devel] [RFC] [PATCHv7 09/22] aio / timers: Add QEMUTimerListGroup and helper functions Alex Bligh
2013-08-06 23:49 ` [Qemu-devel] [RFC] [PATCHv7 10/22] aio / timers: Add QEMUTimerListGroup to AioContext Alex Bligh
2013-08-06 23:49 ` [Qemu-devel] [RFC] [PATCHv7 11/22] aio / timers: Add a notify callback to QEMUTimerList Alex Bligh
2013-08-06 23:49 ` [Qemu-devel] [RFC] [PATCHv7 12/22] aio / timers: aio_ctx_prepare sets timeout from AioContext timers Alex Bligh
2013-08-06 23:49 ` [Qemu-devel] [RFC] [PATCHv7 13/22] aio / timers: Add aio_timer_new wrapper Alex Bligh
2013-08-06 23:49 ` [Qemu-devel] [RFC] [PATCHv7 14/22] aio / timers: Convert aio_poll to use AioContext timers' deadline Alex Bligh
2013-08-06 23:49 ` [Qemu-devel] [RFC] [PATCHv7 15/22] aio / timers: Convert mainloop to use timeout Alex Bligh
2013-08-06 23:49 ` [Qemu-devel] [RFC] [PATCHv7 16/22] aio / timers: On timer modification, qemu_notify or aio_notify Alex Bligh
2013-08-06 23:49 ` [Qemu-devel] [RFC] [PATCHv7 17/22] aio / timers: Introduce new API qemu_timer_new and friends Alex Bligh
2013-08-06 23:49 ` [Qemu-devel] [RFC] [PATCHv7 18/22] aio / timers: Use all timerlists in icount warp calculations Alex Bligh
2013-08-06 23:49 ` [Qemu-devel] [RFC] [PATCHv7 19/22] aio / timers: Add documentation and new format calls Alex Bligh
2013-08-06 23:49 ` [Qemu-devel] [RFC] [PATCHv7 20/22] aio / timers: Remove alarm timers Alex Bligh
2013-08-06 23:49 ` Alex Bligh [this message]
2013-08-06 23:49 ` [Qemu-devel] [RFC] [PATCHv7 22/22] aio / timers: Remove legacy qemu_clock_deadline & qemu_timerlist_deadline Alex Bligh
2013-08-08 7:52 ` [Qemu-devel] [RFC] [PATCHv7 00/22] aio / timers: Add AioContext timers and use ppoll Alex Bligh
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1375832956-7588-22-git-send-email-alex@alex.org.uk \
--to=alex@alex.org.uk \
--cc=aliguori@us.ibm.com \
--cc=kwolf@redhat.com \
--cc=morita.kazutaka@lab.ntt.co.jp \
--cc=pbonzini@redhat.com \
--cc=qemu-devel@nongnu.org \
--cc=qemulist@gmail.com \
--cc=rth@twiddle.net \
--cc=stefanha@redhat.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).