* [PATCH 2/3] test: Add kvmclock driver
2009-07-26 3:22 [PATCH 1/3] test: Drop print.S Jason Wang
@ 2009-07-26 3:22 ` Jason Wang
2009-07-26 3:22 ` [PATCH 3/3] test: Add test for kvmclock Jason Wang
1 sibling, 0 replies; 3+ messages in thread
From: Jason Wang @ 2009-07-26 3:22 UTC (permalink / raw)
To: mtosatti, avi, kvm
This patch add a kvmclock driver for unittest. A special bit:
PV_CLOCK_CYCLE_RAW_TEST_BIT is used to notify the driver to return
just the raw cycle supplied by the hypervisor.
Signed-off-by: Jason Wang <jasowang@redhat.com>
---
kvm/test/x86/kvmclock.c | 206 +++++++++++++++++++++++++++++++++++++++++++++++
kvm/test/x86/kvmclock.h | 51 ++++++++++++
2 files changed, 257 insertions(+), 0 deletions(-)
create mode 100644 kvm/test/x86/kvmclock.c
create mode 100644 kvm/test/x86/kvmclock.h
diff --git a/kvm/test/x86/kvmclock.c b/kvm/test/x86/kvmclock.c
new file mode 100644
index 0000000..1520ea4
--- /dev/null
+++ b/kvm/test/x86/kvmclock.c
@@ -0,0 +1,206 @@
+#include "libcflat.h"
+#include "smp.h"
+#include "kvmclock.h"
+
+#define barrier() asm volatile("":::"memory")
+#define rmb() asm volatile("lfence":::"memory")
+
+struct pvclock_vcpu_time_info hv_clock[MAX_CPU];
+struct pvclock_wall_clock wall_clock;
+static unsigned char valid_flags = 0;
+static u64 __attribute__((aligned(8))) last_value = 0;
+
+#ifdef __i386__
+static inline u64 atomic64_read(const u64 *v)
+{
+ u64 res;
+
+ asm volatile("mov %%ebx, %%eax;"
+ "mov %%ecx, %%edx;"
+ "lock cmpxchg8b %1;"
+ : "=&A" (res)
+ : "m" (*v)
+ );
+
+ return res;
+}
+
+static u64 atomic64_cmpxchg(u64 *v, u64 old, u64 new)
+{
+ u32 low = new;
+ u32 high = new >> 32;
+
+ asm volatile("lock cmpxchg8b %1\n"
+ : "+A" (old), "+m" (*v)
+ : "b" (low), "c" (high)
+ );
+
+ return old;
+}
+#elif defined(__x86_64__)
+static inline u64 atomic64_read(const u64 *v)
+{
+ return (*(volatile long *)v);
+}
+
+static u64 atomic64_cmpxchg(u64 *v, u64 old, u64 new)
+{
+ u64 ret;
+ u64 _old = old;
+ u64 _new = new;
+
+ asm volatile("lock cmpxchgq %1,%2"
+ : "=a" (ret)
+ : "r" (_new), "m" (*(volatile long *)v), "0" (_old)
+ : "memory"
+ );
+ return ret;
+}
+#endif
+
+static void wrmsr(unsigned index, u64 value)
+{
+ unsigned a = value, d = value >> 32;
+
+ asm volatile("wrmsr" : : "a"(a), "d"(d), "c"(index));
+}
+
+static u64 native_read_tsc(void)
+{
+ unsigned a, d;
+
+ asm volatile("rdtsc" : "=a"(a), "=d"(d));
+ return a | (u64)d << 32;
+}
+
+static inline u64 scale_delta(u64 delta, u32 mul_frac, int shift)
+{
+ u64 product;
+#ifdef __i386__
+ u32 tmp1, tmp2;
+#endif
+
+ if (shift < 0)
+ delta >>= -shift;
+ else
+ delta <<= shift;
+
+#ifdef __i386__
+ __asm__ (
+ "mul %5 ; "
+ "mov %4,%%eax ; "
+ "mov %%edx,%4 ; "
+ "mul %5 ; "
+ "xor %5,%5 ; "
+ "add %4,%%eax ; "
+ "adc %5,%%edx ; "
+ : "=A" (product), "=r" (tmp1), "=r" (tmp2)
+ : "a" ((u32)delta), "1" ((u32)(delta >> 32)), "2" (mul_frac) );
+#elif defined(__x86_64__)
+ __asm__ (
+ "mul %%rdx ; shrd $32,%%rdx,%%rax"
+ : "=a" (product) : "0" (delta), "d" ((u64)mul_frac) );
+#else
+#error implement me!
+#endif
+
+ return product;
+}
+
+static unsigned pvclock_get_time_values(struct pvclock_shadow_time *dst,
+ struct pvclock_vcpu_time_info *src)
+{
+ do {
+ dst->version = src->version;
+ rmb(); /* fetch version before data */
+ dst->tsc_timestamp = src->tsc_timestamp;
+ dst->system_timestamp = src->system_time;
+ dst->tsc_to_nsec_mul = src->tsc_to_system_mul;
+ dst->tsc_shift = src->tsc_shift;
+ dst->flags = src->flags;
+ rmb(); /* test version after fetching data */
+ } while ((src->version & 1) || (dst->version != src->version));
+
+ return dst->version;
+}
+
+static u64 pvclock_get_nsec_offset(struct pvclock_shadow_time *shadow)
+{
+ u64 delta = native_read_tsc() - shadow->tsc_timestamp;
+ return scale_delta(delta, shadow->tsc_to_nsec_mul, shadow->tsc_shift);
+}
+
+static cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
+{
+ struct pvclock_shadow_time shadow;
+ unsigned version;
+ cycle_t ret, offset;
+ u64 last;
+
+ do {
+ version = pvclock_get_time_values(&shadow, src);
+ barrier();
+ offset = pvclock_get_nsec_offset(&shadow);
+ ret = shadow.system_timestamp + offset;
+ barrier();
+ } while (version != src->version);
+
+ if ((valid_flags & PVCLOCK_CYCLE_RAW_TEST_BIT) ||
+ ((valid_flags & PVCLOCK_TSC_STABLE_BIT) &&
+ (shadow.flags & PVCLOCK_TSC_STABLE_BIT)))
+ return ret;
+
+ last = atomic64_read(&last_value);
+ do {
+ if (ret < last)
+ return last;
+ last = atomic64_cmpxchg(&last_value, last, ret);
+ } while (last != ret);
+
+ return ret;
+}
+
+cycle_t kvm_clock_read()
+{
+ struct pvclock_vcpu_time_info *src;
+ cycle_t ret;
+ int index = smp_id();
+
+ src = &hv_clock[index];
+ ret = pvclock_clocksource_read(src);
+ return ret;
+}
+
+void kvm_clock_init(void *data)
+{
+ int index = smp_id();
+ struct pvclock_vcpu_time_info *hvc = &hv_clock[index];
+
+ printf("kvm-clock: cpu %d, msr 0x:%lx \n", index, hvc);
+ wrmsr(MSR_KVM_SYSTEM_TIME, (unsigned long)hvc | 1);
+}
+
+void kvm_clock_clear(void *data)
+{
+ wrmsr(MSR_KVM_SYSTEM_TIME, 0LL);
+}
+
+void kvm_get_wallclock(struct timespec *ts)
+{
+ u32 version;
+ wrmsr(MSR_KVM_WALL_CLOCK, (u64)&wall_clock);
+
+ do {
+ version = wall_clock.version;
+ rmb(); /* fetch version before time */
+ ts->sec = wall_clock.sec;
+ ts->nsec = wall_clock.nsec;
+ rmb(); /* fetch time before checking version */
+ } while ((wall_clock.version & 1) || (version != wall_clock.version));
+
+}
+
+void pvclock_set_flags(unsigned char flags)
+{
+ valid_flags = flags;
+}
diff --git a/kvm/test/x86/kvmclock.h b/kvm/test/x86/kvmclock.h
new file mode 100644
index 0000000..6cc545e
--- /dev/null
+++ b/kvm/test/x86/kvmclock.h
@@ -0,0 +1,51 @@
+#ifndef KVMCLOCK_H
+#define KVMCLOCK_H
+
+#define MSR_KVM_WALL_CLOCK 0x11
+#define MSR_KVM_SYSTEM_TIME 0x12
+
+#define MAX_CPU 4
+
+#define PVCLOCK_TSC_STABLE_BIT (1 << 0)
+#define PVCLOCK_CYCLE_RAW_TEST_BIT (1 << 1) /* Get raw cycle */
+
+typedef u64 cycle_t;
+
+struct pvclock_vcpu_time_info {
+ u32 version;
+ u32 pad0;
+ u64 tsc_timestamp;
+ u64 system_time;
+ u32 tsc_to_system_mul;
+ signed char tsc_shift;
+ u8 flags;
+ u8 pad[2];
+} __attribute__((__packed__)); /* 32 bytes */
+
+struct pvclock_shadow_time {
+ u64 tsc_timestamp; /* TSC at last update of time vals. */
+ u64 system_timestamp; /* Time, in nanosecs, since boot. */
+ u32 tsc_to_nsec_mul;
+ int tsc_shift;
+ u32 version;
+ u8 flags;
+};
+
+struct pvclock_wall_clock {
+ u32 version;
+ u32 sec;
+ u32 nsec;
+} __attribute__((__packed__));
+
+struct timespec {
+ u32 sec;
+ u32 nsec;
+};
+
+void pvclock_set_flags(unsigned char flags);
+cycle_t kvm_clock_read();
+void kvm_get_wallclock(struct timespec *ts);
+void kvm_clock_init(void *data);
+void kvm_clock_clear(void *data);
+
+#endif
^ permalink raw reply related [flat|nested] 3+ messages in thread* [PATCH 3/3] test: Add test for kvmclock
2009-07-26 3:22 [PATCH 1/3] test: Drop print.S Jason Wang
2009-07-26 3:22 ` [PATCH 2/3] test: Add kvmclock driver Jason Wang
@ 2009-07-26 3:22 ` Jason Wang
1 sibling, 0 replies; 3+ messages in thread
From: Jason Wang @ 2009-07-26 3:22 UTC (permalink / raw)
To: mtosatti, avi, kvm
This patch implements the following tests for kvmclock:
- A simple wall clock test to check whether a consistent value was
returned.
- A monotonic cycle test to check whether the kvmclock driver could
return monotnic cycles in SMP guests.
- A raw cycle test to check whether the hypervios could provide
monotonic cycles.
Signed-off-by: Jason Wang <jasowang@redhat.com>
---
kvm/test/x86/kvmclock_test.c | 128 ++++++++++++++++++++++++++++++++++++++++++
kvm/test/x86/unittests.cfg | 4 +
2 files changed, 132 insertions(+), 0 deletions(-)
create mode 100644 kvm/test/x86/kvmclock_test.c
diff --git a/kvm/test/config-x86-common.mak b/kvm/test/config-x86-common.mak
index a6ee18c..3dfd450 100644
--- a/kvm/test/config-x86-common.mak
+++ b/kvm/test/config-x86-common.mak
@@ -25,7 +25,8 @@ FLATLIBS = lib/libcflat.a $(libgcc)
tests-common = $(TEST_DIR)/vmexit.flat $(TEST_DIR)/tsc.flat \
$(TEST_DIR)/smptest.flat $(TEST_DIR)/port80.flat \
$(TEST_DIR)/realmode.flat $(TEST_DIR)/msr.flat \
- $(TEST_DIR)/hypercall.flat $(TEST_DIR)/sieve.flat
+ $(TEST_DIR)/hypercall.flat $(TEST_DIR)/sieve.flat \
+ $(TEST_DIR)/kvmclock_test.flat
tests_and_config = $(TEST_DIR)/*.flat $(TEST_DIR)/unittests.cfg
@@ -67,6 +68,9 @@ $(TEST_DIR)/xsave.flat: $(cstart.o) $(TEST_DIR)/idt.o $(TEST_DIR)/xsave.o
$(TEST_DIR)/rmap_chain.flat: $(cstart.o) $(TEST_DIR)/rmap_chain.o \
$(TEST_DIR)/vm.o
+$(TEST_DIR)/kvmclock_test.flat: $(cstart.o) $(TEST_DIR)/kvmclock.o \
+ $(TEST_DIR)/kvmclock_test.o
+
arch_clean:
$(RM) $(TEST_DIR)/*.o $(TEST_DIR)/*.flat \
$(TEST_DIR)/.*.d $(TEST_DIR)/lib/.*.d $(TEST_DIR)/lib/*.o
diff --git a/kvm/test/x86/kvmclock_test.c b/kvm/test/x86/kvmclock_test.c
new file mode 100644
index 0000000..8530a89
--- /dev/null
+++ b/kvm/test/x86/kvmclock_test.c
@@ -0,0 +1,128 @@
+#include "libcflat.h"
+#include "smp.h"
+#include "kvmclock.h"
+
+#define TEST_LOOPS 100000000
+
+static inline int atomic_read(int *v)
+{
+ return (*(volatile int *)&(v));
+}
+
+static inline void atomic_dec(int *v)
+{
+ asm volatile("lock decl %0": "+m" (*v));
+}
+
+struct test_info {
+ struct spinlock lock;
+ u64 loops; /* test loops */
+ u64 warps; /* warp count */
+ long long worst; /* worst warp */
+ volatile cycle_t last; /* last cycle seen by test */
+ int ncpus; /* number of cpu in the test*/
+};
+
+struct test_info ti[2];
+
+static int wallclock_test()
+{
+ int i;
+ struct timespec ts, ts_last;
+
+ kvm_get_wallclock(&ts_last);
+
+ for (i=0; i < 100; i++){
+ kvm_get_wallclock(&ts);
+ if (ts.nsec != ts_last.nsec || ts.sec != ts_last.sec){
+ printf ("Inconsistent wall clock returned!\n");
+ return 1;
+ }
+ }
+ return 0;
+}
+
+static void kvm_clock_test(void *data)
+{
+ struct test_info *hv_test_info = (struct test_info *)data;
+ int index = smp_id();
+ int i;
+
+ for (i = 0; i < hv_test_info->loops; i++){
+ cycle_t t0, t1;
+ long long delta;
+
+ spin_lock(&hv_test_info->lock);
+ t1 = kvm_clock_read();
+ t0 = hv_test_info->last;
+ hv_test_info->last = kvm_clock_read();
+ spin_unlock(&hv_test_info->lock);
+
+ delta = t1 - t0;
+ if (delta < 0){
+ spin_lock(&hv_test_info->lock);
+ ++hv_test_info->warps;
+ if (delta < hv_test_info->worst){
+ hv_test_info->worst = delta;
+ printf("Worst warp %lld %\n", hv_test_info->worst);
+ }
+ spin_unlock(&hv_test_info->lock);
+ }
+
+ if (!((unsigned long)i & 31))
+ asm volatile("rep; nop");
+ }
+
+ atomic_dec(&hv_test_info->ncpus);
+}
+
+static int cycle_test(int ncpus, int loops, struct test_info *ti)
+{
+ int i;
+
+ ti->ncpus = ncpus;
+ ti->loops = loops;
+ for (i = ncpus - 1; i >= 0; --i)
+ on_cpu_async(i, kvm_clock_test, (void *)ti);
+
+ /* Wait for the end of other vcpu */
+ while(!atomic_read(&ti->ncpus))
+ ;
+
+ printf("Total vcpus: %d\n", ncpus);
+ printf("Total loops: %lld\n", ti->loops * ncpus);
+ printf("Total warps: %lld\n", ti->warps);
+ printf("Worst warp: %lld\n", ti->worst);
+
+ return ti->warps ? 1 : 0;
+}
+
+int main()
+{
+ int ncpus = cpu_count();
+ int nerr = 0, i;
+
+ smp_init();
+
+ if (ncpus > MAX_CPU)
+ ncpus = MAX_CPU;
+ for (i = 0; i < ncpus; ++i)
+ on_cpu(i, kvm_clock_init, (void *)0);
+
+ printf("Wallclock test:\n");
+ nerr += wallclock_test();
+
+ pvclock_set_flags(PVCLOCK_TSC_STABLE_BIT);
+ printf("Monotonic cycle test:\n");
+ nerr += cycle_test(ncpus, TEST_LOOPS, &ti[0]);
+
+ pvclock_set_flags(PVCLOCK_TSC_STABLE_BIT
+ | PVCLOCK_CYCLE_RAW_TEST_BIT);
+ printf("Raw cycle test:\n");
+ nerr += cycle_test(ncpus, TEST_LOOPS, &ti[1]);
+
+ for (i = 0; i < ncpus; ++i)
+ on_cpu(i, kvm_clock_clear, (void *)0);
+
+ return nerr > 0 ? 1 : 0;
+}
diff --git a/kvm/test/x86/unittests.cfg b/kvm/test/x86/unittests.cfg
index f39c5bd..0d077ac 100644
--- a/kvm/test/x86/unittests.cfg
+++ b/kvm/test/x86/unittests.cfg
@@ -53,3 +53,7 @@ file = xsave.flat
[rmap_chain]
file = rmap_chain.flat
+
+[kvmclock]
+file = kvmclock_test.flat
+smp = 2
^ permalink raw reply related [flat|nested] 3+ messages in thread