* KVM: x86: switch KVMCLOCK base to monotonic raw clock
@ 2019-10-28 14:36 Marcelo Tosatti
2019-11-01 0:09 ` Paolo Bonzini
0 siblings, 1 reply; 3+ messages in thread
From: Marcelo Tosatti @ 2019-10-28 14:36 UTC (permalink / raw)
To: kvm; +Cc: Paolo Bonzini
Commit 0bc48bea36d1 ("KVM: x86: update master clock before computing
kvmclock_offset")
switches the order of operations to avoid the conversion
TSC (without frequency correction) ->
system_timestamp (with frequency correction),
which might cause a time jump.
However, it leaves any other masterclock update unsafe, which includes,
at the moment:
* HV_X64_MSR_REFERENCE_TSC MSR write.
* TSC writes.
* Host suspend/resume.
Avoid the time jump issue by using frequency uncorrected
CLOCK_MONOTONIC_RAW clock.
Its the guests time keeping software responsability
to track and correct a reference clock such as UTC.
This fixes forward time jump (which can result in
failure to bring up a vCPU) during vCPU hotplug:
Oct 11 14:48:33 storage kernel: CPU2 has been hot-added
Oct 11 14:48:34 storage kernel: CPU3 has been hot-added
Oct 11 14:49:22 storage kernel: smpboot: Booting Node 0 Processor 2 APIC 0x2 <-- time jump of almost 1 minute
Oct 11 14:49:22 storage kernel: smpboot: do_boot_cpu failed(-1) to wakeup CPU#2
Oct 11 14:49:23 storage kernel: smpboot: Booting Node 0 Processor 3 APIC 0x3
Oct 11 14:49:23 storage kernel: kvm-clock: cpu 3, msr 0:7ff640c1, secondary cpu clock
Which happens because:
/*
* Wait 10s total for a response from AP
*/
boot_error = -1;
timeout = jiffies + 10*HZ;
while (time_before(jiffies, timeout)) {
...
}
Analyzed-by: Igor Mammedov <imammedo@redhat.com>
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 661e2bf..ff713a1 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -1521,20 +1521,25 @@ static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
}
#ifdef CONFIG_X86_64
+struct pvclock_clock {
+ int vclock_mode;
+ u64 cycle_last;
+ u64 mask;
+ u32 mult;
+ u32 shift;
+};
+
struct pvclock_gtod_data {
seqcount_t seq;
- struct { /* extract of a clocksource struct */
- int vclock_mode;
- u64 cycle_last;
- u64 mask;
- u32 mult;
- u32 shift;
- } clock;
+ struct pvclock_clock clock; /* extract of a clocksource struct */
+ struct pvclock_clock raw_clock; /* extract of a clocksource struct */
+ u64 boot_ns_raw;
u64 boot_ns;
u64 nsec_base;
u64 wall_time_sec;
+ u64 monotonic_raw_nsec;
};
static struct pvclock_gtod_data pvclock_gtod_data;
@@ -1542,10 +1547,20 @@ struct pvclock_gtod_data {
static void update_pvclock_gtod(struct timekeeper *tk)
{
struct pvclock_gtod_data *vdata = &pvclock_gtod_data;
- u64 boot_ns;
+ u64 boot_ns, boot_ns_raw;
boot_ns = ktime_to_ns(ktime_add(tk->tkr_mono.base, tk->offs_boot));
+ /*
+ * FIXME: tk->offs_boot should be converted to CLOCK_MONOTONIC_RAW
+ * interval (that is, without frequency adjustment for that interval).
+ *
+ * Lack of this fix can cause system_timestamp to not be equal to
+ * CLOCK_MONOTONIC_RAW (which happen if the host uses
+ * suspend/resume).
+ */
+ boot_ns_raw = ktime_to_ns(ktime_add(tk->tkr_raw.base, tk->offs_boot));
+
write_seqcount_begin(&vdata->seq);
/* copy pvclock gtod data */
@@ -1555,11 +1570,20 @@ static void update_pvclock_gtod(struct timekeeper *tk)
vdata->clock.mult = tk->tkr_mono.mult;
vdata->clock.shift = tk->tkr_mono.shift;
+ vdata->raw_clock.vclock_mode = tk->tkr_raw.clock->archdata.vclock_mode;
+ vdata->raw_clock.cycle_last = tk->tkr_raw.cycle_last;
+ vdata->raw_clock.mask = tk->tkr_raw.mask;
+ vdata->raw_clock.mult = tk->tkr_raw.mult;
+ vdata->raw_clock.shift = tk->tkr_raw.shift;
+
vdata->boot_ns = boot_ns;
vdata->nsec_base = tk->tkr_mono.xtime_nsec;
vdata->wall_time_sec = tk->xtime_sec;
+ vdata->boot_ns_raw = boot_ns_raw;
+ vdata->monotonic_raw_nsec = tk->tkr_raw.xtime_nsec;
+
write_seqcount_end(&vdata->seq);
}
#endif
@@ -1983,21 +2007,21 @@ static u64 read_tsc(void)
return last;
}
-static inline u64 vgettsc(u64 *tsc_timestamp, int *mode)
+static inline u64 vgettsc(struct pvclock_clock *clock, u64 *tsc_timestamp,
+ int *mode)
{
long v;
- struct pvclock_gtod_data *gtod = &pvclock_gtod_data;
u64 tsc_pg_val;
- switch (gtod->clock.vclock_mode) {
+ switch (clock->vclock_mode) {
case VCLOCK_HVCLOCK:
tsc_pg_val = hv_read_tsc_page_tsc(hv_get_tsc_page(),
tsc_timestamp);
if (tsc_pg_val != U64_MAX) {
/* TSC page valid */
*mode = VCLOCK_HVCLOCK;
- v = (tsc_pg_val - gtod->clock.cycle_last) &
- gtod->clock.mask;
+ v = (tsc_pg_val - clock->cycle_last) &
+ clock->mask;
} else {
/* TSC page invalid */
*mode = VCLOCK_NONE;
@@ -2006,8 +2030,8 @@ static inline u64 vgettsc(u64 *tsc_timestamp, int *mode)
case VCLOCK_TSC:
*mode = VCLOCK_TSC;
*tsc_timestamp = read_tsc();
- v = (*tsc_timestamp - gtod->clock.cycle_last) &
- gtod->clock.mask;
+ v = (*tsc_timestamp - clock->cycle_last) &
+ clock->mask;
break;
default:
*mode = VCLOCK_NONE;
@@ -2016,10 +2040,10 @@ static inline u64 vgettsc(u64 *tsc_timestamp, int *mode)
if (*mode == VCLOCK_NONE)
*tsc_timestamp = v = 0;
- return v * gtod->clock.mult;
+ return v * clock->mult;
}
-static int do_monotonic_boot(s64 *t, u64 *tsc_timestamp)
+static int do_monotonic_raw(s64 *t, u64 *tsc_timestamp)
{
struct pvclock_gtod_data *gtod = &pvclock_gtod_data;
unsigned long seq;
@@ -2028,10 +2052,10 @@ static int do_monotonic_boot(s64 *t, u64 *tsc_timestamp)
do {
seq = read_seqcount_begin(>od->seq);
- ns = gtod->nsec_base;
- ns += vgettsc(tsc_timestamp, &mode);
+ ns = gtod->monotonic_raw_nsec;
+ ns += vgettsc(>od->raw_clock, tsc_timestamp, &mode);
ns >>= gtod->clock.shift;
- ns += gtod->boot_ns;
+ ns += gtod->boot_ns_raw;
} while (unlikely(read_seqcount_retry(>od->seq, seq)));
*t = ns;
@@ -2049,7 +2073,7 @@ static int do_realtime(struct timespec64 *ts, u64 *tsc_timestamp)
seq = read_seqcount_begin(>od->seq);
ts->tv_sec = gtod->wall_time_sec;
ns = gtod->nsec_base;
- ns += vgettsc(tsc_timestamp, &mode);
+ ns += vgettsc(>od->clock, tsc_timestamp, &mode);
ns >>= gtod->clock.shift;
} while (unlikely(read_seqcount_retry(>od->seq, seq)));
@@ -2066,7 +2090,7 @@ static bool kvm_get_time_and_clockread(s64 *kernel_ns, u64 *tsc_timestamp)
if (!gtod_is_based_on_tsc(pvclock_gtod_data.clock.vclock_mode))
return false;
- return gtod_is_based_on_tsc(do_monotonic_boot(kernel_ns,
+ return gtod_is_based_on_tsc(do_monotonic_raw(kernel_ns,
tsc_timestamp));
}
^ permalink raw reply related [flat|nested] 3+ messages in thread
* Re: KVM: x86: switch KVMCLOCK base to monotonic raw clock
2019-10-28 14:36 KVM: x86: switch KVMCLOCK base to monotonic raw clock Marcelo Tosatti
@ 2019-11-01 0:09 ` Paolo Bonzini
2019-11-01 1:35 ` Marcelo Tosatti
0 siblings, 1 reply; 3+ messages in thread
From: Paolo Bonzini @ 2019-11-01 0:09 UTC (permalink / raw)
To: Marcelo Tosatti, kvm
On 28/10/19 15:36, Marcelo Tosatti wrote:
>
> Commit 0bc48bea36d1 ("KVM: x86: update master clock before computing
> kvmclock_offset")
> switches the order of operations to avoid the conversion
>
> TSC (without frequency correction) ->
> system_timestamp (with frequency correction),
>
> which might cause a time jump.
>
> However, it leaves any other masterclock update unsafe, which includes,
> at the moment:
>
> * HV_X64_MSR_REFERENCE_TSC MSR write.
> * TSC writes.
> * Host suspend/resume.
>
> Avoid the time jump issue by using frequency uncorrected
> CLOCK_MONOTONIC_RAW clock.
>
> Its the guests time keeping software responsability
> to track and correct a reference clock such as UTC.
>
> This fixes forward time jump (which can result in
> failure to bring up a vCPU) during vCPU hotplug:
>
> Oct 11 14:48:33 storage kernel: CPU2 has been hot-added
> Oct 11 14:48:34 storage kernel: CPU3 has been hot-added
> Oct 11 14:49:22 storage kernel: smpboot: Booting Node 0 Processor 2 APIC 0x2 <-- time jump of almost 1 minute
> Oct 11 14:49:22 storage kernel: smpboot: do_boot_cpu failed(-1) to wakeup CPU#2
> Oct 11 14:49:23 storage kernel: smpboot: Booting Node 0 Processor 3 APIC 0x3
> Oct 11 14:49:23 storage kernel: kvm-clock: cpu 3, msr 0:7ff640c1, secondary cpu clock
>
> Which happens because:
>
> /*
> * Wait 10s total for a response from AP
> */
> boot_error = -1;
> timeout = jiffies + 10*HZ;
> while (time_before(jiffies, timeout)) {
> ...
> }
>
> Analyzed-by: Igor Mammedov <imammedo@redhat.com>
> Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
>
> diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
> index 661e2bf..ff713a1 100644
> --- a/arch/x86/kvm/x86.c
> +++ b/arch/x86/kvm/x86.c
> @@ -1521,20 +1521,25 @@ static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
> }
>
> #ifdef CONFIG_X86_64
> +struct pvclock_clock {
> + int vclock_mode;
> + u64 cycle_last;
> + u64 mask;
> + u32 mult;
> + u32 shift;
> +};
> +
> struct pvclock_gtod_data {
> seqcount_t seq;
>
> - struct { /* extract of a clocksource struct */
> - int vclock_mode;
> - u64 cycle_last;
> - u64 mask;
> - u32 mult;
> - u32 shift;
> - } clock;
> + struct pvclock_clock clock; /* extract of a clocksource struct */
> + struct pvclock_clock raw_clock; /* extract of a clocksource struct */
>
> + u64 boot_ns_raw;
> u64 boot_ns;
> u64 nsec_base;
> u64 wall_time_sec;
> + u64 monotonic_raw_nsec;
> };
>
> static struct pvclock_gtod_data pvclock_gtod_data;
> @@ -1542,10 +1547,20 @@ struct pvclock_gtod_data {
> static void update_pvclock_gtod(struct timekeeper *tk)
> {
> struct pvclock_gtod_data *vdata = &pvclock_gtod_data;
> - u64 boot_ns;
> + u64 boot_ns, boot_ns_raw;
>
> boot_ns = ktime_to_ns(ktime_add(tk->tkr_mono.base, tk->offs_boot));
>
> + /*
> + * FIXME: tk->offs_boot should be converted to CLOCK_MONOTONIC_RAW
> + * interval (that is, without frequency adjustment for that interval).
> + *
> + * Lack of this fix can cause system_timestamp to not be equal to
> + * CLOCK_MONOTONIC_RAW (which happen if the host uses
> + * suspend/resume).
> + */
This is scary. Essentially you're saying that you'd want a
CLOCK_BOOTTIME_RAW. But is this true? CLOCK_BOOTTIME only differs by
the suspend time, and that is computed directly in nanoseconds so the
different frequency of CLOCK_MONOTONIC and CLOCK_MONOTONIC_RAW does not
affect it.
Thanks,
Paolo
Paolo
^ permalink raw reply [flat|nested] 3+ messages in thread
* Re: KVM: x86: switch KVMCLOCK base to monotonic raw clock
2019-11-01 0:09 ` Paolo Bonzini
@ 2019-11-01 1:35 ` Marcelo Tosatti
0 siblings, 0 replies; 3+ messages in thread
From: Marcelo Tosatti @ 2019-11-01 1:35 UTC (permalink / raw)
To: Paolo Bonzini; +Cc: kvm
On Fri, Nov 01, 2019 at 01:09:50AM +0100, Paolo Bonzini wrote:
> On 28/10/19 15:36, Marcelo Tosatti wrote:
> >
> > Commit 0bc48bea36d1 ("KVM: x86: update master clock before computing
> > kvmclock_offset")
> > switches the order of operations to avoid the conversion
> >
> > TSC (without frequency correction) ->
> > system_timestamp (with frequency correction),
> >
> > which might cause a time jump.
> >
> > However, it leaves any other masterclock update unsafe, which includes,
> > at the moment:
> >
> > * HV_X64_MSR_REFERENCE_TSC MSR write.
> > * TSC writes.
> > * Host suspend/resume.
> >
> > Avoid the time jump issue by using frequency uncorrected
> > CLOCK_MONOTONIC_RAW clock.
> >
> > Its the guests time keeping software responsability
> > to track and correct a reference clock such as UTC.
> >
> > This fixes forward time jump (which can result in
> > failure to bring up a vCPU) during vCPU hotplug:
> >
> > Oct 11 14:48:33 storage kernel: CPU2 has been hot-added
> > Oct 11 14:48:34 storage kernel: CPU3 has been hot-added
> > Oct 11 14:49:22 storage kernel: smpboot: Booting Node 0 Processor 2 APIC 0x2 <-- time jump of almost 1 minute
> > Oct 11 14:49:22 storage kernel: smpboot: do_boot_cpu failed(-1) to wakeup CPU#2
> > Oct 11 14:49:23 storage kernel: smpboot: Booting Node 0 Processor 3 APIC 0x3
> > Oct 11 14:49:23 storage kernel: kvm-clock: cpu 3, msr 0:7ff640c1, secondary cpu clock
> >
> > Which happens because:
> >
> > /*
> > * Wait 10s total for a response from AP
> > */
> > boot_error = -1;
> > timeout = jiffies + 10*HZ;
> > while (time_before(jiffies, timeout)) {
> > ...
> > }
> >
> > Analyzed-by: Igor Mammedov <imammedo@redhat.com>
> > Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
> >
> > diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
> > index 661e2bf..ff713a1 100644
> > --- a/arch/x86/kvm/x86.c
> > +++ b/arch/x86/kvm/x86.c
> > @@ -1521,20 +1521,25 @@ static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
> > }
> >
> > #ifdef CONFIG_X86_64
> > +struct pvclock_clock {
> > + int vclock_mode;
> > + u64 cycle_last;
> > + u64 mask;
> > + u32 mult;
> > + u32 shift;
> > +};
> > +
> > struct pvclock_gtod_data {
> > seqcount_t seq;
> >
> > - struct { /* extract of a clocksource struct */
> > - int vclock_mode;
> > - u64 cycle_last;
> > - u64 mask;
> > - u32 mult;
> > - u32 shift;
> > - } clock;
> > + struct pvclock_clock clock; /* extract of a clocksource struct */
> > + struct pvclock_clock raw_clock; /* extract of a clocksource struct */
> >
> > + u64 boot_ns_raw;
> > u64 boot_ns;
> > u64 nsec_base;
> > u64 wall_time_sec;
> > + u64 monotonic_raw_nsec;
> > };
> >
> > static struct pvclock_gtod_data pvclock_gtod_data;
> > @@ -1542,10 +1547,20 @@ struct pvclock_gtod_data {
> > static void update_pvclock_gtod(struct timekeeper *tk)
> > {
> > struct pvclock_gtod_data *vdata = &pvclock_gtod_data;
> > - u64 boot_ns;
> > + u64 boot_ns, boot_ns_raw;
> >
> > boot_ns = ktime_to_ns(ktime_add(tk->tkr_mono.base, tk->offs_boot));
> >
> > + /*
> > + * FIXME: tk->offs_boot should be converted to CLOCK_MONOTONIC_RAW
> > + * interval (that is, without frequency adjustment for that interval).
> > + *
> > + * Lack of this fix can cause system_timestamp to not be equal to
> > + * CLOCK_MONOTONIC_RAW (which happen if the host uses
> > + * suspend/resume).
> > + */
>
> This is scary. Essentially you're saying that you'd want a
> CLOCK_BOOTTIME_RAW. But is this true? CLOCK_BOOTTIME only differs by
> the suspend time, and that is computed directly in nanoseconds so the
Its read from the RTC.
> different frequency of CLOCK_MONOTONIC and CLOCK_MONOTONIC_RAW does not
> affect it.
Still different frequency from RTC and TSC, which can cause
system_timestamp to not equal CLOCK_MONOTONIC_RAW (but in fact i don't
see a fix for that, and the hosts clock also suffers from the same
issue).
Should i remove the fixme ? Or just add a note about this fact
of suspend/resume ?
^ permalink raw reply [flat|nested] 3+ messages in thread
end of thread, other threads:[~2019-11-01 1:36 UTC | newest]
Thread overview: 3+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2019-10-28 14:36 KVM: x86: switch KVMCLOCK base to monotonic raw clock Marcelo Tosatti
2019-11-01 0:09 ` Paolo Bonzini
2019-11-01 1:35 ` Marcelo Tosatti
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox