From: Pavel Dovgalyuk <Pavel.Dovgaluk@ispras.ru>
To: qemu-devel@nongnu.org
Cc: peter.maydell@linaro.org, peter.crosthwaite@xilinx.com,
alex.bennee@linaro.org, mark.burton@greensocs.com,
real@ispras.ru, batuzovk@ispras.ru,
maria.klimushenkova@ispras.ru, pavel.dovgaluk@ispras.ru,
pbonzini@redhat.com, afaerber@suse.de, fred.konrad@greensocs.com
Subject: [Qemu-devel] [RFC PATCH v8 14/21] replay: checkpoints
Date: Thu, 22 Jan 2015 11:52:49 +0300 [thread overview]
Message-ID: <20150122085249.5276.26806.stgit@PASHA-ISP.def.inno> (raw)
In-Reply-To: <20150122085127.5276.53895.stgit@PASHA-ISP.def.inno>
This patch introduces checkpoints that synchronize cpu thread and iothread.
When checkpoint is met in the code all asynchronous events from the queue
are executed.
Signed-off-by: Pavel Dovgalyuk <pavel.dovgaluk@ispras.ru>
---
block.c | 11 ++++++++++
cpus.c | 7 ++++++-
include/qemu/timer.h | 6 ++++--
main-loop.c | 5 +++++
qemu-timer.c | 49 ++++++++++++++++++++++++++++++++++++++--------
replay/replay-internal.h | 5 ++++-
replay/replay.c | 36 ++++++++++++++++++++++++++++++++++
replay/replay.h | 21 ++++++++++++++++++++
stubs/replay.c | 11 ++++++++++
vl.c | 4 +++-
10 files changed, 142 insertions(+), 13 deletions(-)
diff --git a/block.c b/block.c
index cbe4a32..a4f45c3 100644
--- a/block.c
+++ b/block.c
@@ -1994,6 +1994,11 @@ void bdrv_drain_all(void)
BlockDriverState *bs;
while (busy) {
+ if (!replay_checkpoint(CHECKPOINT_BDRV_DRAIN)) {
+ /* Do not wait anymore, we stopped at some place in
+ the middle of execution during replay */
+ return;
+ }
busy = false;
QTAILQ_FOREACH(bs, &bdrv_states, device_list) {
@@ -2004,6 +2009,12 @@ void bdrv_drain_all(void)
aio_context_release(aio_context);
}
}
+ if (replay_mode == REPLAY_MODE_PLAY) {
+ /* Skip checkpoints from the log */
+ while (replay_checkpoint(CHECKPOINT_BDRV_DRAIN)) {
+ /* Nothing */
+ }
+ }
}
/* make a BlockDriverState anonymous by removing from bdrv_state and
diff --git a/cpus.c b/cpus.c
index 01d89aa..9c32491 100644
--- a/cpus.c
+++ b/cpus.c
@@ -388,7 +388,7 @@ void qtest_clock_warp(int64_t dest)
timers_state.qemu_icount_bias += warp;
seqlock_write_unlock(&timers_state.vm_clock_seqlock);
- qemu_clock_run_timers(QEMU_CLOCK_VIRTUAL);
+ qemu_clock_run_timers(QEMU_CLOCK_VIRTUAL, false);
clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
}
qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
@@ -408,6 +408,11 @@ void qemu_clock_warp(QEMUClockType type)
return;
}
+ /* warp clock deterministically in record/replay mode */
+ if (!replay_checkpoint(CHECKPOINT_CLOCK_WARP)) {
+ return;
+ }
+
/*
* If the CPUs have been sleeping, advance QEMU_CLOCK_VIRTUAL timer now.
* This ensures that the deadline for the timer is computed correctly below.
diff --git a/include/qemu/timer.h b/include/qemu/timer.h
index 0c2472c..26927b0 100644
--- a/include/qemu/timer.h
+++ b/include/qemu/timer.h
@@ -240,13 +240,14 @@ void qemu_clock_unregister_reset_notifier(QEMUClockType type,
/**
* qemu_clock_run_timers:
* @type: clock on which to operate
+ * @run_all: true, when called from qemu_clock_run_all_timers
*
* Run all the timers associated with the default timer list
* of a clock.
*
* Returns: true if any timer ran.
*/
-bool qemu_clock_run_timers(QEMUClockType type);
+bool qemu_clock_run_timers(QEMUClockType type, bool run_all);
/**
* qemu_clock_run_all_timers:
@@ -337,12 +338,13 @@ QEMUClockType timerlist_get_clock(QEMUTimerList *timer_list);
/**
* timerlist_run_timers:
* @timer_list: the timer list to use
+ * @run_all: true, when called from qemu_clock_run_all_timers
*
* Call all expired timers associated with the timer list.
*
* Returns: true if any timer expired
*/
-bool timerlist_run_timers(QEMUTimerList *timer_list);
+bool timerlist_run_timers(QEMUTimerList *timer_list, bool run_all);
/**
* timerlist_notify:
diff --git a/main-loop.c b/main-loop.c
index 981bcb5..d6e93c3 100644
--- a/main-loop.c
+++ b/main-loop.c
@@ -497,6 +497,11 @@ int main_loop_wait(int nonblocking)
slirp_pollfds_poll(gpollfds, (ret < 0));
#endif
+ /* CPU thread can infinitely wait for event after
+ missing the warp */
+ if (replay_mode == REPLAY_MODE_PLAY) {
+ qemu_clock_warp(QEMU_CLOCK_VIRTUAL);
+ }
qemu_clock_run_all_timers();
return ret;
diff --git a/qemu-timer.c b/qemu-timer.c
index bc981a2..b6eb7b3 100644
--- a/qemu-timer.c
+++ b/qemu-timer.c
@@ -465,7 +465,7 @@ bool timer_expired(QEMUTimer *timer_head, int64_t current_time)
return timer_expired_ns(timer_head, current_time * timer_head->scale);
}
-bool timerlist_run_timers(QEMUTimerList *timer_list)
+bool timerlist_run_timers(QEMUTimerList *timer_list, bool run_all)
{
QEMUTimer *ts;
int64_t current_time;
@@ -473,6 +473,32 @@ bool timerlist_run_timers(QEMUTimerList *timer_list)
QEMUTimerCB *cb;
void *opaque;
+ switch (timer_list->clock->type) {
+ case QEMU_CLOCK_REALTIME:
+ break;
+ default:
+ case QEMU_CLOCK_VIRTUAL:
+ if ((replay_mode != REPLAY_MODE_NONE && !runstate_is_running())
+ || !replay_checkpoint(run_all ? CHECKPOINT_CLOCK_VIRTUAL_ALL
+ : CHECKPOINT_CLOCK_VIRTUAL)) {
+ return false;
+ }
+ break;
+ case QEMU_CLOCK_HOST:
+ if ((replay_mode != REPLAY_MODE_NONE && !runstate_is_running())
+ || !replay_checkpoint(run_all ? CHECKPOINT_CLOCK_HOST_ALL
+ : CHECKPOINT_CLOCK_HOST)) {
+ return false;
+ }
+ case QEMU_CLOCK_VIRTUAL_RT:
+ if ((replay_mode != REPLAY_MODE_NONE && !runstate_is_running())
+ || !replay_checkpoint(run_all ? CHECKPOINT_CLOCK_VIRTUAL_RT_ALL
+ : CHECKPOINT_CLOCK_VIRTUAL_RT)) {
+ return false;
+ }
+ break;
+ }
+
qemu_event_reset(&timer_list->timers_done_ev);
if (!timer_list->clock->enabled) {
goto out;
@@ -505,9 +531,9 @@ out:
return progress;
}
-bool qemu_clock_run_timers(QEMUClockType type)
+bool qemu_clock_run_timers(QEMUClockType type, bool run_all)
{
- return timerlist_run_timers(main_loop_tlg.tl[type]);
+ return timerlist_run_timers(main_loop_tlg.tl[type], run_all);
}
void timerlistgroup_init(QEMUTimerListGroup *tlg,
@@ -532,7 +558,7 @@ bool timerlistgroup_run_timers(QEMUTimerListGroup *tlg)
QEMUClockType type;
bool progress = false;
for (type = 0; type < QEMU_CLOCK_MAX; type++) {
- progress |= timerlist_run_timers(tlg->tl[type]);
+ progress |= timerlist_run_timers(tlg->tl[type], false);
}
return progress;
}
@@ -541,11 +567,18 @@ int64_t timerlistgroup_deadline_ns(QEMUTimerListGroup *tlg)
{
int64_t deadline = -1;
QEMUClockType type;
+ bool play = replay_mode == REPLAY_MODE_PLAY;
for (type = 0; type < QEMU_CLOCK_MAX; type++) {
if (qemu_clock_use_for_deadline(tlg->tl[type]->clock->type)) {
- deadline = qemu_soonest_timeout(deadline,
- timerlist_deadline_ns(
- tlg->tl[type]));
+ if (!play || tlg->tl[type]->clock->type == QEMU_CLOCK_REALTIME) {
+ deadline = qemu_soonest_timeout(deadline,
+ timerlist_deadline_ns(
+ tlg->tl[type]));
+ } else {
+ /* Read clock from the replay file and
+ do not calculate the deadline, based on virtual clock. */
+ qemu_clock_get_ns(tlg->tl[type]->clock->type);
+ }
}
}
return deadline;
@@ -615,7 +648,7 @@ bool qemu_clock_run_all_timers(void)
QEMUClockType type;
for (type = 0; type < QEMU_CLOCK_MAX; type++) {
- progress |= qemu_clock_run_timers(type);
+ progress |= qemu_clock_run_timers(type, true);
}
return progress;
diff --git a/replay/replay-internal.h b/replay/replay-internal.h
index 68b2d45..c0a5800 100755
--- a/replay/replay-internal.h
+++ b/replay/replay-internal.h
@@ -31,7 +31,10 @@ enum ReplayEvents {
EVENT_SHUTDOWN,
/* for clock read/writes */
/* some of grteater codes are reserved for clocks */
- EVENT_CLOCK
+ EVENT_CLOCK,
+ /* for checkpoint event */
+ /* some of grteater codes are reserved for checkpoints */
+ EVENT_CHECKPOINT = EVENT_CLOCK + REPLAY_CLOCK_COUNT
};
/* Asynchronous events IDs */
diff --git a/replay/replay.c b/replay/replay.c
index cfa69fa..7c4a801 100755
--- a/replay/replay.c
+++ b/replay/replay.c
@@ -163,3 +163,39 @@ void replay_shutdown_request(void)
replay_put_event(EVENT_SHUTDOWN);
}
}
+
+bool replay_checkpoint(ReplayCheckpoint checkpoint)
+{
+ bool res = false;
+ replay_save_instructions();
+
+ if (replay_file) {
+ if (replay_mode == REPLAY_MODE_PLAY) {
+ replay_mutex_lock();
+ if (!skip_async_events(EVENT_CHECKPOINT + checkpoint)) {
+ if (replay_data_kind == EVENT_ASYNC) {
+ replay_read_events(checkpoint);
+ replay_fetch_data_kind();
+ res = replay_data_kind != EVENT_ASYNC;
+ replay_mutex_unlock();
+ return res;
+ }
+ replay_mutex_unlock();
+ return res;
+ }
+ replay_has_unread_data = 0;
+ replay_read_events(checkpoint);
+ replay_fetch_data_kind();
+ res = replay_data_kind != EVENT_ASYNC;
+ replay_mutex_unlock();
+ return res;
+ } else if (replay_mode == REPLAY_MODE_RECORD) {
+ replay_mutex_lock();
+ replay_put_event(EVENT_CHECKPOINT + checkpoint);
+ replay_save_events(checkpoint);
+ replay_mutex_unlock();
+ }
+ }
+
+ return true;
+}
diff --git a/replay/replay.h b/replay/replay.h
index e1c5fcf..39822b4 100755
--- a/replay/replay.h
+++ b/replay/replay.h
@@ -29,6 +29,21 @@ enum ReplayClockKind {
};
typedef enum ReplayClockKind ReplayClockKind;
+/* IDs of the checkpoints */
+enum ReplayCheckpoint {
+ CHECKPOINT_BDRV_DRAIN,
+ CHECKPOINT_CLOCK_WARP,
+ CHECKPOINT_RESET_REQUESTED,
+ CHECKPOINT_CLOCK_VIRTUAL,
+ CHECKPOINT_CLOCK_VIRTUAL_ALL,
+ CHECKPOINT_CLOCK_HOST,
+ CHECKPOINT_CLOCK_HOST_ALL,
+ CHECKPOINT_CLOCK_VIRTUAL_RT,
+ CHECKPOINT_CLOCK_VIRTUAL_RT_ALL,
+ CHECKPOINT_COUNT
+};
+typedef enum ReplayCheckpoint ReplayCheckpoint;
+
extern ReplayMode replay_mode;
/* Processing the instructions */
@@ -80,6 +95,12 @@ void replay_get_timedate(struct tm *tm);
/*! Called when qemu shutdown is requested. */
void replay_shutdown_request(void);
+/*! Should be called at check points in the execution.
+ These check points are skipped, if they were not met.
+ Saves checkpoint in the SAVE mode and validates in the PLAY mode.
+ Returns 0 in PLAY mode if checkpoint was not found.
+ Returns 1 in all other cases. */
+bool replay_checkpoint(ReplayCheckpoint checkpoint);
/* Asynchronous events queue */
diff --git a/stubs/replay.c b/stubs/replay.c
index 121bca6..1be3575 100755
--- a/stubs/replay.c
+++ b/stubs/replay.c
@@ -1,4 +1,5 @@
#include "replay/replay.h"
+#include "sysemu/sysemu.h"
ReplayMode replay_mode;
@@ -10,3 +11,13 @@ int64_t replay_read_clock(unsigned int kind)
{
return 0;
}
+
+bool replay_checkpoint(ReplayCheckpoint checkpoint)
+{
+ return 0;
+}
+
+int runstate_is_running(void)
+{
+ return 0;
+}
diff --git a/vl.c b/vl.c
index 905ea8a..86ba385 100644
--- a/vl.c
+++ b/vl.c
@@ -1767,7 +1767,9 @@ static bool main_loop_should_exit(void)
return true;
}
}
- if (qemu_reset_requested()) {
+ if (qemu_reset_requested_get()
+ && replay_checkpoint(CHECKPOINT_RESET_REQUESTED)) {
+ qemu_reset_requested();
pause_all_vcpus();
cpu_synchronize_all_states();
qemu_system_reset(VMRESET_REPORT);
next prev parent reply other threads:[~2015-01-22 8:53 UTC|newest]
Thread overview: 82+ messages / expand[flat|nested] mbox.gz Atom feed top
2015-01-22 8:51 [Qemu-devel] [RFC PATCH v8 00/21] Deterministic replay core Pavel Dovgalyuk
2015-01-22 8:51 ` [Qemu-devel] [RFC PATCH v8 01/21] i386: partial revert of interrupt poll fix Pavel Dovgalyuk
2015-01-22 8:51 ` [Qemu-devel] [RFC PATCH v8 02/21] replay: global variables and function stubs Pavel Dovgalyuk
2015-01-29 9:02 ` Paolo Bonzini
2015-01-29 23:23 ` Eric Blake
2015-01-22 8:51 ` [Qemu-devel] [RFC PATCH v8 03/21] sysemu: system functions for replay Pavel Dovgalyuk
2015-01-29 9:03 ` Paolo Bonzini
2015-01-22 8:51 ` [Qemu-devel] [RFC PATCH v8 04/21] replay: internal functions for replay log Pavel Dovgalyuk
2015-01-29 9:11 ` Paolo Bonzini
2015-01-30 12:56 ` Pavel Dovgaluk
2015-01-30 13:06 ` Paolo Bonzini
2015-01-30 13:11 ` Mark Burton
2015-01-22 8:51 ` [Qemu-devel] [RFC PATCH v8 05/21] replay: introduce mutex to protect the " Pavel Dovgalyuk
2015-01-29 9:12 ` Paolo Bonzini
2015-01-22 8:52 ` [Qemu-devel] [RFC PATCH v8 06/21] replay: introduce icount event Pavel Dovgalyuk
2015-01-29 9:14 ` Paolo Bonzini
2015-01-22 8:52 ` [Qemu-devel] [RFC PATCH v8 07/21] cpu-exec: allow temporary disabling icount Pavel Dovgalyuk
2015-01-22 8:52 ` [Qemu-devel] [RFC PATCH v8 08/21] cpu: replay instructions sequence Pavel Dovgalyuk
2015-01-29 9:32 ` Paolo Bonzini
2015-02-02 12:28 ` Pavel Dovgaluk
2015-02-02 12:38 ` Paolo Bonzini
2015-02-02 12:42 ` Pavel Dovgaluk
[not found] ` <28583.7738695138$1422880978@news.gmane.org>
2015-02-02 13:18 ` Paolo Bonzini
2015-02-16 12:26 ` Pavel Dovgaluk
[not found] ` <6071.25815372473$1424089600@news.gmane.org>
2015-02-16 12:59 ` Paolo Bonzini
2015-02-16 13:27 ` Pavel Dovgaluk
[not found] ` <8198.56250095672$1424093273@news.gmane.org>
2015-02-16 13:31 ` Paolo Bonzini
2015-02-16 13:37 ` Pavel Dovgaluk
[not found] ` <39577.5216319182$1424093895@news.gmane.org>
2015-02-16 13:53 ` Paolo Bonzini
2015-02-17 8:43 ` Pavel Dovgaluk
2015-02-17 10:58 ` Paolo Bonzini
2015-02-17 11:35 ` Pavel Dovgaluk
2015-02-17 12:21 ` Paolo Bonzini
2015-01-22 8:52 ` [Qemu-devel] [RFC PATCH v8 09/21] replay: interrupts and exceptions Pavel Dovgalyuk
2015-01-29 9:44 ` Paolo Bonzini
2015-02-02 13:50 ` Pavel Dovgaluk
[not found] ` <23862.806443549$1422885088@news.gmane.org>
2015-02-02 14:18 ` Paolo Bonzini
2015-01-22 8:52 ` [Qemu-devel] [RFC PATCH v8 10/21] replay: asynchronous events infrastructure Pavel Dovgalyuk
2015-01-29 10:06 ` Paolo Bonzini
2015-01-22 8:52 ` [Qemu-devel] [RFC PATCH v8 11/21] replay: recording and replaying clock ticks Pavel Dovgalyuk
2015-01-29 10:16 ` Paolo Bonzini
2015-02-03 10:51 ` Pavel Dovgaluk
2015-02-03 11:04 ` Paolo Bonzini
2015-02-03 11:23 ` Pavel Dovgaluk
2015-02-03 11:59 ` Paolo Bonzini
2015-01-22 8:52 ` [Qemu-devel] [RFC PATCH v8 12/21] replay: recording and replaying different timers Pavel Dovgalyuk
2015-01-29 10:20 ` Paolo Bonzini
2015-02-03 14:05 ` Pavel Dovgaluk
2015-02-04 15:20 ` Paolo Bonzini
2015-01-22 8:52 ` [Qemu-devel] [RFC PATCH v8 13/21] replay: shutdown event Pavel Dovgalyuk
2015-01-29 10:20 ` Paolo Bonzini
2015-01-22 8:52 ` Pavel Dovgalyuk [this message]
2015-01-30 11:05 ` [Qemu-devel] [RFC PATCH v8 14/21] replay: checkpoints Paolo Bonzini
2015-01-22 8:52 ` [Qemu-devel] [RFC PATCH v8 15/21] aio: replace stack of bottom halves with queue Pavel Dovgalyuk
2015-01-30 10:43 ` Paolo Bonzini
2015-01-22 8:53 ` [Qemu-devel] [RFC PATCH v8 16/21] replay: bottom halves Pavel Dovgalyuk
2015-01-30 10:49 ` Paolo Bonzini
2015-02-11 13:03 ` Pavel Dovgaluk
2015-01-22 8:53 ` [Qemu-devel] [RFC PATCH v8 17/21] replay: replay aio requests Pavel Dovgalyuk
2015-01-30 11:07 ` Paolo Bonzini
2015-01-22 8:53 ` [Qemu-devel] [RFC PATCH v8 18/21] replay: thread pool Pavel Dovgalyuk
2015-01-30 11:13 ` Paolo Bonzini
2015-01-22 8:53 ` [Qemu-devel] [RFC PATCH v8 19/21] replay: initialization and deinitialization Pavel Dovgalyuk
2015-01-30 11:02 ` Paolo Bonzini
2015-02-09 12:59 ` Pavel Dovgaluk
2015-02-09 13:01 ` Paolo Bonzini
2015-01-22 8:53 ` [Qemu-devel] [RFC PATCH v8 20/21] replay: command line options Pavel Dovgalyuk
2015-01-30 10:54 ` Paolo Bonzini
2015-02-09 12:15 ` Pavel Dovgaluk
2015-02-09 12:26 ` Paolo Bonzini
2015-02-12 9:12 ` Pavel Dovgaluk
2015-02-12 14:12 ` Paolo Bonzini
2015-01-22 8:53 ` [Qemu-devel] [RFC PATCH v8 21/21] replay: recording of the user input Pavel Dovgalyuk
2015-01-30 11:23 ` Paolo Bonzini
2015-02-12 7:43 ` Pavel Dovgaluk
2015-02-12 8:08 ` Pavel Dovgaluk
2015-02-12 14:41 ` Paolo Bonzini
2015-01-28 11:45 ` [Qemu-devel] [RFC PATCH v8 00/21] Deterministic replay core Pavel Dovgaluk
[not found] ` <28048.5671981753$1422445570@news.gmane.org>
2015-01-29 10:21 ` Paolo Bonzini
2015-01-30 11:25 ` Paolo Bonzini
2015-02-02 14:30 ` Paolo Bonzini
2015-02-03 6:47 ` Pavel Dovgaluk
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20150122085249.5276.26806.stgit@PASHA-ISP.def.inno \
--to=pavel.dovgaluk@ispras.ru \
--cc=afaerber@suse.de \
--cc=alex.bennee@linaro.org \
--cc=batuzovk@ispras.ru \
--cc=fred.konrad@greensocs.com \
--cc=maria.klimushenkova@ispras.ru \
--cc=mark.burton@greensocs.com \
--cc=pbonzini@redhat.com \
--cc=peter.crosthwaite@xilinx.com \
--cc=peter.maydell@linaro.org \
--cc=qemu-devel@nongnu.org \
--cc=real@ispras.ru \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).