From: Roman Kagan <rkagan@virtuozzo.com>
To: <kvm@vger.kernel.org>
Cc: Evgeniy Yakovlev <eyakovlev@virtuozzo.com>,
Roman Kagan <rkagan@virtuozzo.com>,
Paolo Bonzini <pbonzini@redhat.com>
Subject: [PATCH kvm-unit-tests] KVM: x86: add hyperv clock test case
Date: Wed, 31 Aug 2016 17:13:33 +0300 [thread overview]
Message-ID: <1472652813-28826-1-git-send-email-rkagan@virtuozzo.com> (raw)
The test checks the relative precision of the reference TSC page
and the time reference counter.
Reworked from the initial version by Paolo Bonzini.
Signed-off-by: Roman Kagan <rkagan@virtuozzo.com>
Cc: Paolo Bonzini <pbonzini@redhat.com>
---
The test is obviously supposed to fail until Hyper-V reference TSC page
implementation lands in KVM.
x86/Makefile.common | 2 +
x86/Makefile.x86_64 | 1 +
x86/hyperv.h | 9 ++
x86/hyperv_clock.c | 230 ++++++++++++++++++++++++++++++++++++++++++++++++++++
x86/unittests.cfg | 5 ++
5 files changed, 247 insertions(+)
create mode 100644 x86/hyperv_clock.c
diff --git a/x86/Makefile.common b/x86/Makefile.common
index 356d879..287c0cf 100644
--- a/x86/Makefile.common
+++ b/x86/Makefile.common
@@ -67,6 +67,8 @@ $(TEST_DIR)/hyperv_synic.elf: $(TEST_DIR)/hyperv.o
$(TEST_DIR)/hyperv_stimer.elf: $(TEST_DIR)/hyperv.o
+$(TEST_DIR)/hyperv_clock.elf: $(TEST_DIR)/hyperv.o
+
arch_clean:
$(RM) $(TEST_DIR)/*.o $(TEST_DIR)/*.flat $(TEST_DIR)/*.elf \
$(TEST_DIR)/.*.d lib/x86/.*.d
diff --git a/x86/Makefile.x86_64 b/x86/Makefile.x86_64
index e166911..af99279 100644
--- a/x86/Makefile.x86_64
+++ b/x86/Makefile.x86_64
@@ -14,6 +14,7 @@ tests = $(TEST_DIR)/access.flat $(TEST_DIR)/apic.flat \
tests += $(TEST_DIR)/svm.flat
tests += $(TEST_DIR)/vmx.flat
tests += $(TEST_DIR)/tscdeadline_latency.flat
+tests += $(TEST_DIR)/hyperv_clock.flat
include $(TEST_DIR)/Makefile.common
diff --git a/x86/hyperv.h b/x86/hyperv.h
index 434a933..bef0317 100644
--- a/x86/hyperv.h
+++ b/x86/hyperv.h
@@ -11,6 +11,7 @@
#define HV_X64_MSR_SYNTIMER_AVAILABLE (1 << 3)
#define HV_X64_MSR_TIME_REF_COUNT 0x40000020
+#define HV_X64_MSR_REFERENCE_TSC 0x40000021
/* Define synthetic interrupt controller model specific registers. */
#define HV_X64_MSR_SCONTROL 0x40000080
@@ -179,4 +180,12 @@ void synic_sint_create(int vcpu, int sint, int vec, bool auto_eoi);
void synic_sint_set(int vcpu, int sint);
void synic_sint_destroy(int vcpu, int sint);
+struct hv_reference_tsc_page {
+ uint32_t tsc_sequence;
+ uint32_t res1;
+ uint64_t tsc_scale;
+ int64_t tsc_offset;
+};
+
+
#endif
diff --git a/x86/hyperv_clock.c b/x86/hyperv_clock.c
new file mode 100644
index 0000000..3cd6af7
--- /dev/null
+++ b/x86/hyperv_clock.c
@@ -0,0 +1,230 @@
+#include "libcflat.h"
+#include "smp.h"
+#include "atomic.h"
+#include "processor.h"
+#include "hyperv.h"
+#include "vm.h"
+#include "asm/barrier.h"
+
+#define MAX_CPU 64
+#define NSEC_PER_SEC 1000000000ULL
+#define HV_NSEC_PER_TICK 100
+#define TICKS_PER_SEC (NSEC_PER_SEC / HV_NSEC_PER_TICK)
+
+#define DURATION 2 /* testcase duration (s) */
+#define CPI_MAX 1000 /* max cycles per iteration */
+
+struct hv_reference_tsc_page *tsc_ref;
+
+struct warp_test_info {
+ unsigned long long warps;
+ unsigned long long stalls;
+ long long worst;
+};
+struct warp_test_info wti[MAX_CPU];
+
+struct perf_test_info {
+ unsigned long long cycles;
+ unsigned long long loops;
+};
+struct perf_test_info pti[MAX_CPU];
+
+atomic_t cpus_left;
+
+/*
+ * ret = (a * b) >> 64
+ * where ret, a, b are 64bit
+ */
+static inline u64 mul64_shift64(u64 a, u64 b)
+{
+ u64 product;
+
+ __asm__ (
+ "mul %[b]"
+ : "+a" (a), "=d" (product)
+ : [b] "rm" (b) );
+
+ return product;
+}
+
+
+static inline u64 rdtsc_ordered()
+{
+ /*
+ * FIXME: on Intel CPUs rmb() aka lfence is sufficient which brings up
+ * to 2x speedup
+ */
+ mb();
+ return rdtsc();
+}
+
+static u64 tsc_ref_read_once(void)
+{
+ return mul64_shift64(rdtsc_ordered(), tsc_ref->tsc_scale) +
+ tsc_ref->tsc_offset;
+}
+
+u64 time_msr_read(void)
+{
+ return rdmsr(HV_X64_MSR_TIME_REF_COUNT);
+}
+
+u64 tsc_ref_read(void)
+{
+ u32 seq;
+ u64 ret;
+
+ do {
+ seq = tsc_ref->tsc_sequence;
+ if (!seq)
+ return time_msr_read();
+ smp_rmb(); /* fetch version before data */
+ ret = tsc_ref_read_once();
+ smp_rmb(); /* test version after fetching data */
+ } while (tsc_ref->tsc_sequence != seq);
+
+ return ret;
+}
+
+static void warp_test_cpu(void *data)
+{
+ struct warp_test_info *ti = data;
+ u64 t = time_msr_read();
+ u64 end = t + DURATION * TICKS_PER_SEC;
+ u16 msr_interval = 1;
+ u64 msr_time = t + msr_interval;
+ ti->warps = 0;
+ ti->stalls = 0;
+ ti->worst = 0;
+
+ do {
+ u64 now;
+ s64 delta;
+
+ if (t >= msr_time) {
+ now = time_msr_read();
+
+ if (msr_interval >= (1U << 15))
+ msr_interval = 1;
+ else
+ msr_interval <<= 1;
+ } else
+ now = tsc_ref_read();
+
+ delta = now - t;
+
+ if (delta < 0) {
+ ti->warps++;
+ if (delta < ti->worst)
+ ti->worst = delta;
+ }
+ if (delta == 0)
+ ti->stalls++;
+
+ t = now;
+ } while (t < end);
+
+ atomic_dec(&cpus_left);
+}
+
+static void perf_test_cpu(void *data)
+{
+ struct perf_test_info *ti = data;
+ u64 end = tsc_ref_read() + DURATION * TICKS_PER_SEC;
+ ti->loops = 0;
+ ti->cycles = rdtsc();
+
+ do
+ ti->loops++;
+ while (tsc_ref_read() < end);
+
+ ti->cycles = rdtsc() - ti->cycles;
+
+ atomic_dec(&cpus_left);
+}
+
+static void presence_test(void)
+{
+ u32 seq;
+ u64 end = time_msr_read() + DURATION * TICKS_PER_SEC;
+
+ do {
+ seq = tsc_ref->tsc_sequence;
+ if (seq)
+ break;
+ } while (time_msr_read() < end);
+
+ report("TSC reference page being updated", seq);
+}
+
+static void warp_test(int ncpus)
+{
+ int i;
+ unsigned long long warps = 0, stalls = 0;
+ long long worst = 0;
+
+ atomic_set(&cpus_left, ncpus);
+ for (i = ncpus - 1; i >= 0; i--)
+ on_cpu_async(i, warp_test_cpu, &wti[i]);
+ while (atomic_read(&cpus_left));
+
+ for (i = 0; i < ncpus; i++) {
+ warps += wti[i].warps;
+ stalls += wti[i].stalls;
+ if (wti[i].worst < worst)
+ worst = wti[i].worst;
+ }
+
+ report("warps: %llu (worst %lld), stalls: %llu",
+ warps == 0, warps, worst, stalls);
+}
+
+static void perf_test(int ncpus)
+{
+ int i;
+ unsigned long long loops = 0, cycles = 0;
+
+ atomic_set(&cpus_left, ncpus);
+ for (i = ncpus - 1; i >= 0; i--)
+ on_cpu_async(i, perf_test_cpu, &pti[i]);
+ while (atomic_read(&cpus_left));
+
+ for (i = 0; i < ncpus; i++) {
+ loops += pti[i].loops;
+ cycles += pti[i].cycles;
+ }
+
+ cycles /= loops;
+ report("iterations/s/cpu: %llu, "
+ "cycles/iteration: %llu (expected < %u)",
+ cycles < CPI_MAX,
+ loops / DURATION / ncpus, cycles, CPI_MAX);
+}
+
+int main(int ac, char **av)
+{
+ int ncpus;
+
+ setup_vm();
+ smp_init();
+
+ ncpus = cpu_count();
+ if (ncpus > MAX_CPU)
+ ncpus = MAX_CPU;
+
+ tsc_ref = alloc_page();
+ wrmsr(HV_X64_MSR_REFERENCE_TSC, (u64)(uintptr_t)tsc_ref | 1);
+ report("MSR value after enabling",
+ rdmsr(HV_X64_MSR_REFERENCE_TSC) == ((u64)(uintptr_t)tsc_ref | 1));
+
+ presence_test();
+ warp_test(ncpus);
+ perf_test(ncpus);
+
+ wrmsr(HV_X64_MSR_REFERENCE_TSC, 0LL);
+ report("MSR value after disabling", rdmsr(HV_X64_MSR_REFERENCE_TSC) == 0);
+
+ free_page(tsc_ref);
+
+ return report_summary();
+}
diff --git a/x86/unittests.cfg b/x86/unittests.cfg
index 4a1f74e..a866613 100644
--- a/x86/unittests.cfg
+++ b/x86/unittests.cfg
@@ -200,3 +200,8 @@ extra_params = -cpu kvm64,hv_synic -device hyperv-testdev
file = hyperv_stimer.flat
smp = 2
extra_params = -cpu kvm64,hv_time,hv_synic,hv_stimer -device hyperv-testdev
+
+[hyperv_clock]
+file = hyperv_clock.flat
+smp = 2
+extra_params = -cpu kvm64,hv_time
--
2.7.4
next reply other threads:[~2016-09-01 2:52 UTC|newest]
Thread overview: 24+ messages / expand[flat|nested] mbox.gz Atom feed top
2016-08-31 14:13 Roman Kagan [this message]
2016-09-01 16:07 ` [PATCH kvm-unit-tests] KVM: x86: add hyperv clock test case Paolo Bonzini
-- strict thread matches above, loose matches on Subject: below --
2016-09-01 16:07 Paolo Bonzini
2016-02-10 13:57 [PATCH 4/4] KVM: x86: track actual TSC frequency from the timekeeper struct Paolo Bonzini
2016-01-28 14:04 [PATCH kvm-unit-tests] KVM: x86: add hyperv clock test case Paolo Bonzini
2016-01-28 14:04 ` Paolo Bonzini
2016-01-28 14:25 ` Andrey Smetanin
2016-01-28 14:50 ` Paolo Bonzini
2016-01-28 15:53 ` Paolo Bonzini
2016-01-28 18:45 ` Roman Kagan
2016-01-28 18:53 ` Roman Kagan
2016-01-28 21:28 ` Paolo Bonzini
2016-01-28 16:22 ` Roman Kagan
2016-02-03 16:37 ` Paolo Bonzini
2016-02-04 9:33 ` Roman Kagan
2016-02-04 10:13 ` Paolo Bonzini
2016-02-04 11:12 ` Roman Kagan
2016-04-21 17:01 ` Roman Kagan
2016-04-22 13:32 ` Roman Kagan
2016-04-22 18:08 ` Paolo Bonzini
2016-04-25 8:47 ` Roman Kagan
2016-04-26 10:34 ` Roman Kagan
2016-05-25 18:33 ` Roman Kagan
2016-05-26 14:47 ` Roman Kagan
2016-05-29 22:34 ` Marcelo Tosatti
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1472652813-28826-1-git-send-email-rkagan@virtuozzo.com \
--to=rkagan@virtuozzo.com \
--cc=eyakovlev@virtuozzo.com \
--cc=kvm@vger.kernel.org \
--cc=pbonzini@redhat.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).