diff -up kvm-65/kernel/x86.c.fix kvm-65/kernel/x86.c --- kvm-65/kernel/x86.c.fix 2008-04-06 21:23:07.000000000 +0200 +++ kvm-65/kernel/x86.c 2008-04-11 15:18:23.000000000 +0200 @@ -548,6 +548,44 @@ static void kvm_write_guest_time(struct mark_page_dirty(v->kvm, vcpu->time >> PAGE_SHIFT); } +static uint32_t div_frac(uint32_t dividend, uint32_t divisor) +{ + uint32_t quotient, remainder; + + __asm__ ( "divl %4" + : "=a" (quotient), "=d" (remainder) + : "0" (0), "1" (dividend), "r" (divisor) ); + return quotient; +} + +static void kvm_set_time_scale(uint32_t tsc_khz, struct kvm_vcpu_time_info *hv_clock) +{ + uint64_t nsecs = 1000000000LL; + int32_t shift = 0; + uint64_t tps64; + uint32_t tps32; + + tps64 = tsc_khz * 1000LL; + while (tps64 > nsecs*2) { + tps64 >>= 1; + shift--; + } + + tps32 = (uint32_t)tps64; + while (tps32 <= (uint32_t)nsecs) { + tps32 <<= 1; + shift++; + } + + hv_clock->tsc_shift = shift; + hv_clock->tsc_to_system_mul = div_frac(nsecs, tps32); + +#if 0 + printk(KERN_DEBUG "%s: tsc_khz %u, tsc_shift %d, tsc_mul %u\n", + __FUNCTION__, tsc_khz, hv_clock->tsc_shift, + hv_clock->tsc_to_system_mul); +#endif +} int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data) { @@ -596,9 +634,7 @@ int kvm_set_msr_common(struct kvm_vcpu * /* ...but clean it before doing the actual write */ vcpu->arch.time_offset = data & ~(PAGE_MASK | 1); - vcpu->arch.hv_clock.tsc_to_system_mul = - clocksource_khz2mult(kvm_tsc_khz, 22); - vcpu->arch.hv_clock.tsc_shift = 22; + kvm_set_time_scale(kvm_tsc_khz, &vcpu->arch.hv_clock); down_read(¤t->mm->mmap_sem); vcpu->arch.time_page =