From: Pavel Dovgalyuk <Pavel.Dovgaluk@ispras.ru>
To: qemu-devel@nongnu.org
Cc: kwolf@redhat.com, peter.maydell@linaro.org,
boost.lists@gmail.com, quintela@redhat.com, jasowang@redhat.com,
mst@redhat.com, zuban32s@gmail.com,
maria.klimushenkova@ispras.ru, dovgaluk@ispras.ru,
kraxel@redhat.com, pavel.dovgaluk@ispras.ru, pbonzini@redhat.com,
alex.bennee@linaro.org
Subject: [Qemu-devel] [RFC PATCH v3 13/30] icount: fixed saving/restoring of icount warp timers
Date: Thu, 11 Jan 2018 11:26:10 +0300 [thread overview]
Message-ID: <20180111082610.27295.51298.stgit@pasha-VirtualBox> (raw)
In-Reply-To: <20180111082452.27295.85707.stgit@pasha-VirtualBox>
This patch adds saving and restoring of the icount warp
timers in the vmstate.
It is needed because there timers affect the virtual clock value.
Therefore determinism of the execution in icount record/replay mode
depends on determinism of the timers.
Signed-off-by: Pavel Dovgalyuk <pavel.dovgaluk@ispras.ru>
Acked-by: Paolo Bonzini <pbonzini@redhat.com>
---
cpus.c | 85 ++++++++++++++++++++++++++++++++++++++++++++++++++--------------
1 file changed, 66 insertions(+), 19 deletions(-)
diff --git a/cpus.c b/cpus.c
index f2ac0f2..b4146a8 100644
--- a/cpus.c
+++ b/cpus.c
@@ -120,16 +120,11 @@ static bool all_cpu_threads_idle(void)
/* Protected by TimersState seqlock */
static bool icount_sleep = true;
-static int64_t vm_clock_warp_start = -1;
/* Conversion factor from emulated instructions to virtual clock ticks. */
static int icount_time_shift;
/* Arbitrarily pick 1MIPS as the minimum allowable speed. */
#define MAX_ICOUNT_SHIFT 10
-static QEMUTimer *icount_rt_timer;
-static QEMUTimer *icount_vm_timer;
-static QEMUTimer *icount_warp_timer;
-
typedef struct TimersState {
/* Protected by BQL. */
int64_t cpu_ticks_prev;
@@ -147,6 +142,11 @@ typedef struct TimersState {
int64_t qemu_icount_bias;
/* Only written by TCG thread */
int64_t qemu_icount;
+ /* for adjusting icount */
+ int64_t vm_clock_warp_start;
+ QEMUTimer *icount_rt_timer;
+ QEMUTimer *icount_vm_timer;
+ QEMUTimer *icount_warp_timer;
} TimersState;
static TimersState timers_state;
@@ -432,14 +432,14 @@ static void icount_adjust(void)
static void icount_adjust_rt(void *opaque)
{
- timer_mod(icount_rt_timer,
+ timer_mod(timers_state.icount_rt_timer,
qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL_RT) + 1000);
icount_adjust();
}
static void icount_adjust_vm(void *opaque)
{
- timer_mod(icount_vm_timer,
+ timer_mod(timers_state.icount_vm_timer,
qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
NANOSECONDS_PER_SECOND / 10);
icount_adjust();
@@ -460,7 +460,7 @@ static void icount_warp_rt(void)
*/
do {
seq = seqlock_read_begin(&timers_state.vm_clock_seqlock);
- warp_start = vm_clock_warp_start;
+ warp_start = timers_state.vm_clock_warp_start;
} while (seqlock_read_retry(&timers_state.vm_clock_seqlock, seq));
if (warp_start == -1) {
@@ -473,7 +473,7 @@ static void icount_warp_rt(void)
cpu_get_clock_locked());
int64_t warp_delta;
- warp_delta = clock - vm_clock_warp_start;
+ warp_delta = clock - timers_state.vm_clock_warp_start;
if (use_icount == 2) {
/*
* In adaptive mode, do not let QEMU_CLOCK_VIRTUAL run too
@@ -485,7 +485,7 @@ static void icount_warp_rt(void)
}
timers_state.qemu_icount_bias += warp_delta;
}
- vm_clock_warp_start = -1;
+ timers_state.vm_clock_warp_start = -1;
seqlock_write_end(&timers_state.vm_clock_seqlock);
if (qemu_clock_expired(QEMU_CLOCK_VIRTUAL)) {
@@ -594,11 +594,13 @@ void qemu_start_warp_timer(void)
* every 100ms.
*/
seqlock_write_begin(&timers_state.vm_clock_seqlock);
- if (vm_clock_warp_start == -1 || vm_clock_warp_start > clock) {
- vm_clock_warp_start = clock;
+ if (timers_state.vm_clock_warp_start == -1
+ || timers_state.vm_clock_warp_start > clock) {
+ timers_state.vm_clock_warp_start = clock;
}
seqlock_write_end(&timers_state.vm_clock_seqlock);
- timer_mod_anticipate(icount_warp_timer, clock + deadline);
+ timer_mod_anticipate(timers_state.icount_warp_timer,
+ clock + deadline);
}
} else if (deadline == 0) {
qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
@@ -623,7 +625,7 @@ static void qemu_account_warp_timer(void)
return;
}
- timer_del(icount_warp_timer);
+ timer_del(timers_state.icount_warp_timer);
icount_warp_rt();
}
@@ -632,6 +634,45 @@ static bool icount_state_needed(void *opaque)
return use_icount;
}
+static bool warp_timer_state_needed(void *opaque)
+{
+ TimersState *s = opaque;
+ return s->icount_warp_timer != NULL;
+}
+
+static bool adjust_timers_state_needed(void *opaque)
+{
+ TimersState *s = opaque;
+ return s->icount_rt_timer != NULL;
+}
+
+/*
+ * Subsection for warp timer migration is optional, because may not be created
+ */
+static const VMStateDescription icount_vmstate_warp_timer = {
+ .name = "timer/icount/warp_timer",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = warp_timer_state_needed,
+ .fields = (VMStateField[]) {
+ VMSTATE_INT64(vm_clock_warp_start, TimersState),
+ VMSTATE_TIMER_PTR(icount_warp_timer, TimersState),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static const VMStateDescription icount_vmstate_adjust_timers = {
+ .name = "timer/icount/timers",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = adjust_timers_state_needed,
+ .fields = (VMStateField[]) {
+ VMSTATE_TIMER_PTR(icount_rt_timer, TimersState),
+ VMSTATE_TIMER_PTR(icount_vm_timer, TimersState),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
/*
* This is a subsection for icount migration.
*/
@@ -644,6 +685,11 @@ static const VMStateDescription icount_vmstate_timers = {
VMSTATE_INT64(qemu_icount_bias, TimersState),
VMSTATE_INT64(qemu_icount, TimersState),
VMSTATE_END_OF_LIST()
+ },
+ .subsections = (const VMStateDescription*[]) {
+ &icount_vmstate_warp_timer,
+ &icount_vmstate_adjust_timers,
+ NULL
}
};
@@ -754,7 +800,7 @@ void configure_icount(QemuOpts *opts, Error **errp)
icount_sleep = qemu_opt_get_bool(opts, "sleep", true);
if (icount_sleep) {
- icount_warp_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL_RT,
+ timers_state.icount_warp_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL_RT,
icount_timer_cb, NULL);
}
@@ -788,13 +834,14 @@ void configure_icount(QemuOpts *opts, Error **errp)
the virtual time trigger catches emulated time passing too fast.
Realtime triggers occur even when idle, so use them less frequently
than VM triggers. */
- icount_rt_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL_RT,
+ timers_state.vm_clock_warp_start = -1;
+ timers_state.icount_rt_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL_RT,
icount_adjust_rt, NULL);
- timer_mod(icount_rt_timer,
+ timer_mod(timers_state.icount_rt_timer,
qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL_RT) + 1000);
- icount_vm_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
+ timers_state.icount_vm_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
icount_adjust_vm, NULL);
- timer_mod(icount_vm_timer,
+ timer_mod(timers_state.icount_vm_timer,
qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
NANOSECONDS_PER_SECOND / 10);
}
next prev parent reply other threads:[~2018-01-11 8:26 UTC|newest]
Thread overview: 51+ messages / expand[flat|nested] mbox.gz Atom feed top
2018-01-11 8:24 [Qemu-devel] [RFC PATCH v3 00/30] replay additions Pavel Dovgalyuk
2018-01-11 8:24 ` [Qemu-devel] [RFC PATCH v3 01/30] hpet: recover timer offset correctly Pavel Dovgalyuk
2018-01-11 8:25 ` [Qemu-devel] [RFC PATCH v3 02/30] cpu: flush TB cache when loading VMState Pavel Dovgalyuk
2018-01-11 8:25 ` [Qemu-devel] [RFC PATCH v3 03/30] This patch adds a condition before overwriting exception_index fields Pavel Dovgalyuk
2018-01-11 12:29 ` Paolo Bonzini
2018-01-12 6:12 ` Pavel Dovgalyuk
2018-01-11 8:25 ` [Qemu-devel] [RFC PATCH v3 04/30] block: implement bdrv_snapshot_goto for blkreplay Pavel Dovgalyuk
2018-01-11 8:25 ` [Qemu-devel] [RFC PATCH v3 05/30] blkreplay: create temporary overlay for underlaying devices Pavel Dovgalyuk
2018-01-11 8:25 ` [Qemu-devel] [RFC PATCH v3 06/30] replay: disable default snapshot for record/replay Pavel Dovgalyuk
2018-01-11 8:25 ` [Qemu-devel] [RFC PATCH v3 07/30] replay: fix processing async events Pavel Dovgalyuk
2018-01-11 8:25 ` [Qemu-devel] [RFC PATCH v3 08/30] replay: fixed replay_enable_events Pavel Dovgalyuk
2018-01-11 8:25 ` [Qemu-devel] [RFC PATCH v3 09/30] replay: fix save/load vm for non-empty queue Pavel Dovgalyuk
2018-01-11 8:25 ` [Qemu-devel] [RFC PATCH v3 10/30] replay: added replay log format description Pavel Dovgalyuk
2018-01-11 8:25 ` [Qemu-devel] [RFC PATCH v3 11/30] replay: make safe vmstop at record/replay Pavel Dovgalyuk
2018-01-11 8:26 ` [Qemu-devel] [RFC PATCH v3 12/30] replay: save prior value of the host clock Pavel Dovgalyuk
2018-01-11 8:26 ` Pavel Dovgalyuk [this message]
2018-01-11 8:26 ` [Qemu-devel] [RFC PATCH v3 14/30] target/arm/arm-powertctl: drop BQL assertions Pavel Dovgalyuk
2018-01-11 8:26 ` [Qemu-devel] [RFC PATCH v3 15/30] cpus: push BQL lock to qemu_*_wait_io_event Pavel Dovgalyuk
2018-01-11 12:50 ` Paolo Bonzini
2018-01-11 8:26 ` [Qemu-devel] [RFC PATCH v3 16/30] cpus: only take BQL for sleeping threads Pavel Dovgalyuk
2018-01-11 8:26 ` [Qemu-devel] [RFC PATCH v3 17/30] replay/replay.c: bump REPLAY_VERSION again Pavel Dovgalyuk
2018-01-11 8:26 ` [Qemu-devel] [RFC PATCH v3 18/30] replay/replay-internal.c: track holding of replay_lock Pavel Dovgalyuk
2018-01-11 8:26 ` [Qemu-devel] [RFC PATCH v3 19/30] replay: make locking visible outside replay code Pavel Dovgalyuk
2018-01-11 8:26 ` [Qemu-devel] [RFC PATCH v3 20/30] replay: push replay_mutex_lock up the call tree Pavel Dovgalyuk
2018-01-11 8:26 ` [Qemu-devel] [RFC PATCH v3 21/30] replay: don't destroy mutex at exit Pavel Dovgalyuk
2018-01-11 8:27 ` [Qemu-devel] [RFC PATCH v3 22/30] replay: check return values of fwrite Pavel Dovgalyuk
2018-01-11 8:27 ` [Qemu-devel] [RFC PATCH v3 23/30] replay: avoid recursive call of checkpoints Pavel Dovgalyuk
2018-01-11 8:27 ` [Qemu-devel] [RFC PATCH v3 24/30] scripts/qemu-gdb: add simple tcg lock status helper Pavel Dovgalyuk
2018-01-11 8:27 ` [Qemu-devel] [RFC PATCH v3 25/30] util/qemu-thread-*: add qemu_lock, locked and unlock trace events Pavel Dovgalyuk
2018-01-11 8:27 ` [Qemu-devel] [RFC PATCH v3 26/30] scripts/analyse-locks-simpletrace.py: script to analyse lock times Pavel Dovgalyuk
2018-01-11 8:27 ` [Qemu-devel] [RFC PATCH v3 27/30] scripts/replay-dump.py: replay log dumper Pavel Dovgalyuk
2018-01-11 8:27 ` [Qemu-devel] [RFC PATCH v3 28/30] scripts/qemu-gdb/timers.py: new helper to dump timer state Pavel Dovgalyuk
2018-01-11 8:27 ` [Qemu-devel] [RFC PATCH v3 29/30] replay: improve replay performance Pavel Dovgalyuk
2018-01-11 12:55 ` Paolo Bonzini
2018-01-11 13:12 ` Pavel Dovgalyuk
2018-01-11 13:22 ` Paolo Bonzini
2018-01-12 6:13 ` Pavel Dovgalyuk
2018-01-12 12:10 ` Paolo Bonzini
2018-01-12 12:41 ` Pavel Dovgalyuk
2018-01-11 8:27 ` [Qemu-devel] [RFC PATCH v3 30/30] replay: don't process async events when warping the clock Pavel Dovgalyuk
2018-01-11 12:37 ` Paolo Bonzini
2018-01-12 7:20 ` Pavel Dovgalyuk
2018-01-11 8:54 ` [Qemu-devel] [RFC PATCH v3 00/30] replay additions no-reply
2018-01-11 8:55 ` no-reply
2018-01-11 12:56 ` Paolo Bonzini
2018-01-12 9:32 ` Paolo Bonzini
2018-01-12 12:07 ` Pavel Dovgalyuk
2018-01-12 12:11 ` Paolo Bonzini
2018-01-19 6:35 ` Pavel Dovgalyuk
2018-01-19 7:41 ` Paolo Bonzini
2018-01-19 7:45 ` Pavel Dovgalyuk
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20180111082610.27295.51298.stgit@pasha-VirtualBox \
--to=pavel.dovgaluk@ispras.ru \
--cc=alex.bennee@linaro.org \
--cc=boost.lists@gmail.com \
--cc=dovgaluk@ispras.ru \
--cc=jasowang@redhat.com \
--cc=kraxel@redhat.com \
--cc=kwolf@redhat.com \
--cc=maria.klimushenkova@ispras.ru \
--cc=mst@redhat.com \
--cc=pbonzini@redhat.com \
--cc=peter.maydell@linaro.org \
--cc=qemu-devel@nongnu.org \
--cc=quintela@redhat.com \
--cc=zuban32s@gmail.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).