* kvm/ia64: Fix halt emulation logic.
@ 2008-10-15 13:47 Zhang, Xiantao
2008-10-16 8:53 ` Avi Kivity
2008-10-18 15:27 ` Marcelo Tosatti
0 siblings, 2 replies; 9+ messages in thread
From: Zhang, Xiantao @ 2008-10-15 13:47 UTC (permalink / raw)
To: avi, kvm-ia64; +Cc: kvm
[-- Attachment #1: Type: text/plain, Size: 8651 bytes --]
Hi, Avi
This is the key fix for 2.6.28 merge. Without this patch, guest
may hang once configured with more than 2 vcpus, it is because x86 side
changed the halt handling's common logic, but it misses to change ia64
side.
Thanks
Xiantao
>From 82b5626f9e3e422e1cd4352ecdff08c1c0dd394e Mon Sep 17 00:00:00 2001
From: Xiantao Zhang <xiantao.zhang@intel.com>
Date: Wed, 15 Oct 2008 19:49:38 +0800
Subject: [PATCH] kvm/ia64: Fix halt emulation logic.
Common halt halding logic is changed by x86 side, this patch
fix it for ia64 side. Otherwise, guest may hang once configured
more than 2 vcpus.
Signed-off-by: Xiantao Zhang <xiantao.zhang@intel.com>
---
arch/ia64/include/asm/kvm_host.h | 3 +-
arch/ia64/kvm/kvm-ia64.c | 79
++++++++++++++++++++------------------
arch/ia64/kvm/kvm_fw.c | 9 +++-
arch/ia64/kvm/process.c | 2 +-
4 files changed, 52 insertions(+), 41 deletions(-)
diff --git a/arch/ia64/include/asm/kvm_host.h
b/arch/ia64/include/asm/kvm_host.h
index 85db124..b9e3c7f 100644
--- a/arch/ia64/include/asm/kvm_host.h
+++ b/arch/ia64/include/asm/kvm_host.h
@@ -365,7 +365,8 @@ struct kvm_vcpu_arch {
long itc_offset;
unsigned long itc_check;
unsigned long timer_check;
- unsigned long timer_pending;
+ unsigned int timer_pending;
+ unsigned int timer_fired;
unsigned long vrr[8];
unsigned long ibr[8];
diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c
index c0699f0..ae0c8dd 100644
--- a/arch/ia64/kvm/kvm-ia64.c
+++ b/arch/ia64/kvm/kvm-ia64.c
@@ -325,6 +325,7 @@ static struct kvm_vcpu *lid_to_vcpu(struct kvm *kvm,
unsigned long id,
return NULL;
}
+
static int handle_ipi(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
{
struct exit_ctl_data *p = kvm_get_exit_data(vcpu);
@@ -385,6 +386,7 @@ static int handle_global_purge(struct kvm_vcpu
*vcpu, struct kvm_run *kvm_run)
struct kvm *kvm = vcpu->kvm;
struct call_data call_data;
int i;
+
call_data.ptc_g_data = p->u.ptc_g_data;
for (i = 0; i < KVM_MAX_VCPUS; i++) {
@@ -398,8 +400,11 @@ static int handle_global_purge(struct kvm_vcpu
*vcpu, struct kvm_run *kvm_run)
if (kvm->vcpus[i]->cpu != -1) {
call_data.vcpu = kvm->vcpus[i];
- smp_call_function_single(kvm->vcpus[i]->cpu,
+ if (kvm->vcpus[i]->cpu != smp_processor_id())
+
smp_call_function_single(kvm->vcpus[i]->cpu,
vcpu_global_purge, &call_data,
1);
+ else
+ vcpu_global_purge(&call_data);
} else
printk(KERN_WARNING"kvm: Uninit vcpu received
ipi!\n");
@@ -418,33 +423,41 @@ int kvm_emulate_halt(struct kvm_vcpu *vcpu)
ktime_t kt;
long itc_diff;
unsigned long vcpu_now_itc;
-
unsigned long expires;
struct hrtimer *p_ht = &vcpu->arch.hlt_timer;
unsigned long cyc_per_usec = local_cpu_data->cyc_per_usec;
struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd);
- vcpu_now_itc = ia64_getreg(_IA64_REG_AR_ITC) +
vcpu->arch.itc_offset;
+ if (irqchip_in_kernel(vcpu->kvm)) {
- if (time_after(vcpu_now_itc, vpd->itm)) {
- vcpu->arch.timer_check = 1;
- return 1;
- }
- itc_diff = vpd->itm - vcpu_now_itc;
- if (itc_diff < 0)
- itc_diff = -itc_diff;
+ vcpu_now_itc = ia64_getreg(_IA64_REG_AR_ITC) +
vcpu->arch.itc_offset;
+
+ if (time_after(vcpu_now_itc, vpd->itm)) {
+ vcpu->arch.timer_check = 1;
+ return 1;
+ }
+ itc_diff = vpd->itm - vcpu_now_itc;
+ if (itc_diff < 0)
+ itc_diff = -itc_diff;
- expires = div64_u64(itc_diff, cyc_per_usec);
- kt = ktime_set(0, 1000 * expires);
- vcpu->arch.ht_active = 1;
- hrtimer_start(p_ht, kt, HRTIMER_MODE_ABS);
+ expires = div64_u64(itc_diff, cyc_per_usec);
+ kt = ktime_set(0, 1000 * expires);
+
+ down_read(&vcpu->kvm->slots_lock);
+ vcpu->arch.ht_active = 1;
+ hrtimer_start(p_ht, kt, HRTIMER_MODE_ABS);
- if (irqchip_in_kernel(vcpu->kvm)) {
vcpu->arch.mp_state = KVM_MP_STATE_HALTED;
kvm_vcpu_block(vcpu);
hrtimer_cancel(p_ht);
vcpu->arch.ht_active = 0;
+ if (test_and_clear_bit(KVM_REQ_UNHALT, &vcpu->requests))
+ if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED)
+ vcpu->arch.mp_state =
+ KVM_MP_STATE_RUNNABLE;
+ up_read(&vcpu->kvm->slots_lock);
+
if (vcpu->arch.mp_state != KVM_MP_STATE_RUNNABLE)
return -EINTR;
return 1;
@@ -484,10 +497,6 @@ static int (*kvm_vti_exit_handlers[])(struct
kvm_vcpu *vcpu,
static const int kvm_vti_max_exit_handlers =
sizeof(kvm_vti_exit_handlers)/sizeof(*kvm_vti_exit_handlers);
-static void kvm_prepare_guest_switch(struct kvm_vcpu *vcpu)
-{
-}
-
static uint32_t kvm_get_exit_reason(struct kvm_vcpu *vcpu)
{
struct exit_ctl_data *p_exit_data;
@@ -600,10 +609,9 @@ static int __vcpu_run(struct kvm_vcpu *vcpu, struct
kvm_run *kvm_run)
again:
preempt_disable();
-
- kvm_prepare_guest_switch(vcpu);
local_irq_disable();
+
if (signal_pending(current)) {
local_irq_enable();
preempt_enable();
@@ -614,7 +622,7 @@ again:
vcpu->guest_mode = 1;
kvm_guest_enter();
-
+ down_read(&vcpu->kvm->slots_lock);
r = vti_vcpu_run(vcpu, kvm_run);
if (r < 0) {
local_irq_enable();
@@ -634,9 +642,8 @@ again:
* But we need to prevent reordering, hence this barrier():
*/
barrier();
-
kvm_guest_exit();
-
+ up_read(&vcpu->kvm->slots_lock);
preempt_enable();
r = kvm_handle_exit(kvm_run, vcpu);
@@ -673,6 +680,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu,
struct kvm_run *kvm_run)
if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED))
{
kvm_vcpu_block(vcpu);
+ clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
vcpu_put(vcpu);
return -EAGAIN;
}
@@ -1123,15 +1131,16 @@ static enum hrtimer_restart hlt_timer_fn(struct
hrtimer *data)
wait_queue_head_t *q;
vcpu = container_of(data, struct kvm_vcpu, arch.hlt_timer);
+ q = &vcpu->wq;
+
if (vcpu->arch.mp_state != KVM_MP_STATE_HALTED)
goto out;
- q = &vcpu->wq;
- if (waitqueue_active(q)) {
- vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
+ if (waitqueue_active(q))
wake_up_interruptible(q);
- }
+
out:
+ vcpu->arch.timer_fired = 1;
vcpu->arch.timer_check = 1;
return HRTIMER_NORESTART;
}
@@ -1700,12 +1709,14 @@ static void vcpu_kick_intr(void *info)
void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
{
int ipi_pcpu = vcpu->cpu;
+ int cpu = get_cpu();
if (waitqueue_active(&vcpu->wq))
wake_up_interruptible(&vcpu->wq);
- if (vcpu->guest_mode)
+ if (vcpu->guest_mode && cpu != ipi_pcpu)
smp_call_function_single(ipi_pcpu, vcpu_kick_intr, vcpu,
0);
+ put_cpu();
}
int kvm_apic_set_irq(struct kvm_vcpu *vcpu, u8 vec, u8 trig)
@@ -1715,13 +1726,7 @@ int kvm_apic_set_irq(struct kvm_vcpu *vcpu, u8
vec, u8 trig)
if (!test_and_set_bit(vec, &vpd->irr[0])) {
vcpu->arch.irq_new_pending = 1;
- if (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE)
- kvm_vcpu_kick(vcpu);
- else if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED) {
- vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
- if (waitqueue_active(&vcpu->wq))
- wake_up_interruptible(&vcpu->wq);
- }
+ kvm_vcpu_kick(vcpu);
return 1;
}
return 0;
@@ -1791,7 +1796,7 @@ int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu)
int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
{
- return 0;
+ return vcpu->arch.timer_fired;
}
gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
diff --git a/arch/ia64/kvm/kvm_fw.c b/arch/ia64/kvm/kvm_fw.c
index 0c69d9e..cb7600b 100644
--- a/arch/ia64/kvm/kvm_fw.c
+++ b/arch/ia64/kvm/kvm_fw.c
@@ -286,6 +286,12 @@ static u64 kvm_get_pal_call_index(struct kvm_vcpu
*vcpu)
return index;
}
+static void prepare_for_halt(struct kvm_vcpu *vcpu)
+{
+ vcpu->arch.timer_pending = 1;
+ vcpu->arch.timer_fired = 0;
+}
+
int kvm_pal_emul(struct kvm_vcpu *vcpu, struct kvm_run *run)
{
@@ -304,11 +310,10 @@ int kvm_pal_emul(struct kvm_vcpu *vcpu, struct
kvm_run *run)
break;
case PAL_HALT_LIGHT:
{
- vcpu->arch.timer_pending = 1;
INIT_PAL_STATUS_SUCCESS(result);
+ prepare_for_halt(vcpu);
if (kvm_highest_pending_irq(vcpu) == -1)
ret = kvm_emulate_halt(vcpu);
-
}
break;
diff --git a/arch/ia64/kvm/process.c b/arch/ia64/kvm/process.c
index 3417783..8008173 100644
--- a/arch/ia64/kvm/process.c
+++ b/arch/ia64/kvm/process.c
@@ -713,7 +713,7 @@ void leave_hypervisor_tail(void)
if (!(VCPU(v, itv) & (1 << 16))) {
vcpu_pend_interrupt(v, VCPU(v,
itv)
& 0xff);
- VMX(v, itc_check) = 0;
+ VMX(v, itc_check) = 0;
} else {
v->arch.timer_pending = 1;
}
--
1.5.1
[-- Attachment #2: 0001-kvm-ia64-Fix-halt-emulation-logic.patch --]
[-- Type: application/octet-stream, Size: 8112 bytes --]
From 82b5626f9e3e422e1cd4352ecdff08c1c0dd394e Mon Sep 17 00:00:00 2001
From: Xiantao Zhang <xiantao.zhang@intel.com>
Date: Wed, 15 Oct 2008 19:49:38 +0800
Subject: [PATCH] kvm/ia64: Fix halt emulation logic.
Common halt halding logic is changed by x86 side, this patch
fix it for ia64 side. Otherwise, guest may hang once configured
more than 2 vcpus.
Signed-off-by: Xiantao Zhang <xiantao.zhang@intel.com>
---
arch/ia64/include/asm/kvm_host.h | 3 +-
arch/ia64/kvm/kvm-ia64.c | 79 ++++++++++++++++++++------------------
arch/ia64/kvm/kvm_fw.c | 9 +++-
arch/ia64/kvm/process.c | 2 +-
4 files changed, 52 insertions(+), 41 deletions(-)
diff --git a/arch/ia64/include/asm/kvm_host.h b/arch/ia64/include/asm/kvm_host.h
index 85db124..b9e3c7f 100644
--- a/arch/ia64/include/asm/kvm_host.h
+++ b/arch/ia64/include/asm/kvm_host.h
@@ -365,7 +365,8 @@ struct kvm_vcpu_arch {
long itc_offset;
unsigned long itc_check;
unsigned long timer_check;
- unsigned long timer_pending;
+ unsigned int timer_pending;
+ unsigned int timer_fired;
unsigned long vrr[8];
unsigned long ibr[8];
diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c
index c0699f0..ae0c8dd 100644
--- a/arch/ia64/kvm/kvm-ia64.c
+++ b/arch/ia64/kvm/kvm-ia64.c
@@ -325,6 +325,7 @@ static struct kvm_vcpu *lid_to_vcpu(struct kvm *kvm, unsigned long id,
return NULL;
}
+
static int handle_ipi(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
{
struct exit_ctl_data *p = kvm_get_exit_data(vcpu);
@@ -385,6 +386,7 @@ static int handle_global_purge(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
struct kvm *kvm = vcpu->kvm;
struct call_data call_data;
int i;
+
call_data.ptc_g_data = p->u.ptc_g_data;
for (i = 0; i < KVM_MAX_VCPUS; i++) {
@@ -398,8 +400,11 @@ static int handle_global_purge(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
if (kvm->vcpus[i]->cpu != -1) {
call_data.vcpu = kvm->vcpus[i];
- smp_call_function_single(kvm->vcpus[i]->cpu,
+ if (kvm->vcpus[i]->cpu != smp_processor_id())
+ smp_call_function_single(kvm->vcpus[i]->cpu,
vcpu_global_purge, &call_data, 1);
+ else
+ vcpu_global_purge(&call_data);
} else
printk(KERN_WARNING"kvm: Uninit vcpu received ipi!\n");
@@ -418,33 +423,41 @@ int kvm_emulate_halt(struct kvm_vcpu *vcpu)
ktime_t kt;
long itc_diff;
unsigned long vcpu_now_itc;
-
unsigned long expires;
struct hrtimer *p_ht = &vcpu->arch.hlt_timer;
unsigned long cyc_per_usec = local_cpu_data->cyc_per_usec;
struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd);
- vcpu_now_itc = ia64_getreg(_IA64_REG_AR_ITC) + vcpu->arch.itc_offset;
+ if (irqchip_in_kernel(vcpu->kvm)) {
- if (time_after(vcpu_now_itc, vpd->itm)) {
- vcpu->arch.timer_check = 1;
- return 1;
- }
- itc_diff = vpd->itm - vcpu_now_itc;
- if (itc_diff < 0)
- itc_diff = -itc_diff;
+ vcpu_now_itc = ia64_getreg(_IA64_REG_AR_ITC) + vcpu->arch.itc_offset;
+
+ if (time_after(vcpu_now_itc, vpd->itm)) {
+ vcpu->arch.timer_check = 1;
+ return 1;
+ }
+ itc_diff = vpd->itm - vcpu_now_itc;
+ if (itc_diff < 0)
+ itc_diff = -itc_diff;
- expires = div64_u64(itc_diff, cyc_per_usec);
- kt = ktime_set(0, 1000 * expires);
- vcpu->arch.ht_active = 1;
- hrtimer_start(p_ht, kt, HRTIMER_MODE_ABS);
+ expires = div64_u64(itc_diff, cyc_per_usec);
+ kt = ktime_set(0, 1000 * expires);
+
+ down_read(&vcpu->kvm->slots_lock);
+ vcpu->arch.ht_active = 1;
+ hrtimer_start(p_ht, kt, HRTIMER_MODE_ABS);
- if (irqchip_in_kernel(vcpu->kvm)) {
vcpu->arch.mp_state = KVM_MP_STATE_HALTED;
kvm_vcpu_block(vcpu);
hrtimer_cancel(p_ht);
vcpu->arch.ht_active = 0;
+ if (test_and_clear_bit(KVM_REQ_UNHALT, &vcpu->requests))
+ if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED)
+ vcpu->arch.mp_state =
+ KVM_MP_STATE_RUNNABLE;
+ up_read(&vcpu->kvm->slots_lock);
+
if (vcpu->arch.mp_state != KVM_MP_STATE_RUNNABLE)
return -EINTR;
return 1;
@@ -484,10 +497,6 @@ static int (*kvm_vti_exit_handlers[])(struct kvm_vcpu *vcpu,
static const int kvm_vti_max_exit_handlers =
sizeof(kvm_vti_exit_handlers)/sizeof(*kvm_vti_exit_handlers);
-static void kvm_prepare_guest_switch(struct kvm_vcpu *vcpu)
-{
-}
-
static uint32_t kvm_get_exit_reason(struct kvm_vcpu *vcpu)
{
struct exit_ctl_data *p_exit_data;
@@ -600,10 +609,9 @@ static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
again:
preempt_disable();
-
- kvm_prepare_guest_switch(vcpu);
local_irq_disable();
+
if (signal_pending(current)) {
local_irq_enable();
preempt_enable();
@@ -614,7 +622,7 @@ again:
vcpu->guest_mode = 1;
kvm_guest_enter();
-
+ down_read(&vcpu->kvm->slots_lock);
r = vti_vcpu_run(vcpu, kvm_run);
if (r < 0) {
local_irq_enable();
@@ -634,9 +642,8 @@ again:
* But we need to prevent reordering, hence this barrier():
*/
barrier();
-
kvm_guest_exit();
-
+ up_read(&vcpu->kvm->slots_lock);
preempt_enable();
r = kvm_handle_exit(kvm_run, vcpu);
@@ -673,6 +680,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) {
kvm_vcpu_block(vcpu);
+ clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
vcpu_put(vcpu);
return -EAGAIN;
}
@@ -1123,15 +1131,16 @@ static enum hrtimer_restart hlt_timer_fn(struct hrtimer *data)
wait_queue_head_t *q;
vcpu = container_of(data, struct kvm_vcpu, arch.hlt_timer);
+ q = &vcpu->wq;
+
if (vcpu->arch.mp_state != KVM_MP_STATE_HALTED)
goto out;
- q = &vcpu->wq;
- if (waitqueue_active(q)) {
- vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
+ if (waitqueue_active(q))
wake_up_interruptible(q);
- }
+
out:
+ vcpu->arch.timer_fired = 1;
vcpu->arch.timer_check = 1;
return HRTIMER_NORESTART;
}
@@ -1700,12 +1709,14 @@ static void vcpu_kick_intr(void *info)
void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
{
int ipi_pcpu = vcpu->cpu;
+ int cpu = get_cpu();
if (waitqueue_active(&vcpu->wq))
wake_up_interruptible(&vcpu->wq);
- if (vcpu->guest_mode)
+ if (vcpu->guest_mode && cpu != ipi_pcpu)
smp_call_function_single(ipi_pcpu, vcpu_kick_intr, vcpu, 0);
+ put_cpu();
}
int kvm_apic_set_irq(struct kvm_vcpu *vcpu, u8 vec, u8 trig)
@@ -1715,13 +1726,7 @@ int kvm_apic_set_irq(struct kvm_vcpu *vcpu, u8 vec, u8 trig)
if (!test_and_set_bit(vec, &vpd->irr[0])) {
vcpu->arch.irq_new_pending = 1;
- if (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE)
- kvm_vcpu_kick(vcpu);
- else if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED) {
- vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
- if (waitqueue_active(&vcpu->wq))
- wake_up_interruptible(&vcpu->wq);
- }
+ kvm_vcpu_kick(vcpu);
return 1;
}
return 0;
@@ -1791,7 +1796,7 @@ int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu)
int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
{
- return 0;
+ return vcpu->arch.timer_fired;
}
gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
diff --git a/arch/ia64/kvm/kvm_fw.c b/arch/ia64/kvm/kvm_fw.c
index 0c69d9e..cb7600b 100644
--- a/arch/ia64/kvm/kvm_fw.c
+++ b/arch/ia64/kvm/kvm_fw.c
@@ -286,6 +286,12 @@ static u64 kvm_get_pal_call_index(struct kvm_vcpu *vcpu)
return index;
}
+static void prepare_for_halt(struct kvm_vcpu *vcpu)
+{
+ vcpu->arch.timer_pending = 1;
+ vcpu->arch.timer_fired = 0;
+}
+
int kvm_pal_emul(struct kvm_vcpu *vcpu, struct kvm_run *run)
{
@@ -304,11 +310,10 @@ int kvm_pal_emul(struct kvm_vcpu *vcpu, struct kvm_run *run)
break;
case PAL_HALT_LIGHT:
{
- vcpu->arch.timer_pending = 1;
INIT_PAL_STATUS_SUCCESS(result);
+ prepare_for_halt(vcpu);
if (kvm_highest_pending_irq(vcpu) == -1)
ret = kvm_emulate_halt(vcpu);
-
}
break;
diff --git a/arch/ia64/kvm/process.c b/arch/ia64/kvm/process.c
index 3417783..8008173 100644
--- a/arch/ia64/kvm/process.c
+++ b/arch/ia64/kvm/process.c
@@ -713,7 +713,7 @@ void leave_hypervisor_tail(void)
if (!(VCPU(v, itv) & (1 << 16))) {
vcpu_pend_interrupt(v, VCPU(v, itv)
& 0xff);
- VMX(v, itc_check) = 0;
+ VMX(v, itc_check) = 0;
} else {
v->arch.timer_pending = 1;
}
--
1.5.1
^ permalink raw reply related [flat|nested] 9+ messages in thread
* RE: kvm/ia64: Fix halt emulation logic.
@ 2008-10-15 13:52 Zhang, Xiantao
0 siblings, 0 replies; 9+ messages in thread
From: Zhang, Xiantao @ 2008-10-15 13:52 UTC (permalink / raw)
To: Zhang, Xiantao, avi, kvm-ia64; +Cc: kvm
Forget to mention Jes had tested to boot 16vcpus, and didn't meet any
issue. Sorry! :)
Acked-by : Jes Sorensen <jes@sgi.com>
Zhang, Xiantao wrote:
> Hi, Avi
> This is the key fix for 2.6.28 merge. Without this patch, guest
may
> hang once configured with more than 2 vcpus, it is because x86 side
> changed the halt handling's common logic, but it misses to change
> ia64 side. Thanks
> Xiantao
> From 82b5626f9e3e422e1cd4352ecdff08c1c0dd394e Mon Sep 17 00:00:00 2001
> From: Xiantao Zhang <xiantao.zhang@intel.com>
> Date: Wed, 15 Oct 2008 19:49:38 +0800
> Subject: [PATCH] kvm/ia64: Fix halt emulation logic.
>
> Common halt halding logic is changed by x86 side, this patch
> fix it for ia64 side. Otherwise, guest may hang once configured
> more than 2 vcpus.
> Signed-off-by: Xiantao Zhang <xiantao.zhang@intel.com>
> ---
> arch/ia64/include/asm/kvm_host.h | 3 +-
> arch/ia64/kvm/kvm-ia64.c | 79
> ++++++++++++++++++++------------------ arch/ia64/kvm/kvm_fw.c
> | 9 +++- arch/ia64/kvm/process.c | 2 +-
> 4 files changed, 52 insertions(+), 41 deletions(-)
>
> diff --git a/arch/ia64/include/asm/kvm_host.h
> b/arch/ia64/include/asm/kvm_host.h
> index 85db124..b9e3c7f 100644
> --- a/arch/ia64/include/asm/kvm_host.h
> +++ b/arch/ia64/include/asm/kvm_host.h
> @@ -365,7 +365,8 @@ struct kvm_vcpu_arch {
> long itc_offset;
> unsigned long itc_check;
> unsigned long timer_check;
> - unsigned long timer_pending;
> + unsigned int timer_pending;
> + unsigned int timer_fired;
>
> unsigned long vrr[8];
> unsigned long ibr[8];
> diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c
> index c0699f0..ae0c8dd 100644
> --- a/arch/ia64/kvm/kvm-ia64.c
> +++ b/arch/ia64/kvm/kvm-ia64.c
> @@ -325,6 +325,7 @@ static struct kvm_vcpu *lid_to_vcpu(struct kvm
> *kvm, unsigned long id, return NULL;
> }
>
> +
> static int handle_ipi(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
> {
> struct exit_ctl_data *p = kvm_get_exit_data(vcpu);
> @@ -385,6 +386,7 @@ static int handle_global_purge(struct kvm_vcpu
> *vcpu, struct kvm_run *kvm_run) struct kvm *kvm = vcpu->kvm;
> struct call_data call_data;
> int i;
> +
> call_data.ptc_g_data = p->u.ptc_g_data;
>
> for (i = 0; i < KVM_MAX_VCPUS; i++) {
> @@ -398,8 +400,11 @@ static int handle_global_purge(struct kvm_vcpu
> *vcpu, struct kvm_run *kvm_run)
>
> if (kvm->vcpus[i]->cpu != -1) {
> call_data.vcpu = kvm->vcpus[i];
> - smp_call_function_single(kvm->vcpus[i]->cpu,
> + if (kvm->vcpus[i]->cpu != smp_processor_id())
> +
smp_call_function_single(kvm->vcpus[i]->cpu,
> vcpu_global_purge, &call_data,
1);
> + else
> + vcpu_global_purge(&call_data);
> } else
> printk(KERN_WARNING"kvm: Uninit vcpu received
ipi!\n");
>
> @@ -418,33 +423,41 @@ int kvm_emulate_halt(struct kvm_vcpu *vcpu)
> ktime_t kt;
> long itc_diff;
> unsigned long vcpu_now_itc;
> -
> unsigned long expires;
> struct hrtimer *p_ht = &vcpu->arch.hlt_timer;
> unsigned long cyc_per_usec = local_cpu_data->cyc_per_usec;
> struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd);
>
> - vcpu_now_itc = ia64_getreg(_IA64_REG_AR_ITC) +
> vcpu->arch.itc_offset; + if (irqchip_in_kernel(vcpu->kvm)) {
>
> - if (time_after(vcpu_now_itc, vpd->itm)) {
> - vcpu->arch.timer_check = 1;
> - return 1;
> - }
> - itc_diff = vpd->itm - vcpu_now_itc;
> - if (itc_diff < 0)
> - itc_diff = -itc_diff;
> + vcpu_now_itc = ia64_getreg(_IA64_REG_AR_ITC) +
> vcpu->arch.itc_offset; +
> + if (time_after(vcpu_now_itc, vpd->itm)) {
> + vcpu->arch.timer_check = 1;
> + return 1;
> + }
> + itc_diff = vpd->itm - vcpu_now_itc;
> + if (itc_diff < 0)
> + itc_diff = -itc_diff;
>
> - expires = div64_u64(itc_diff, cyc_per_usec);
> - kt = ktime_set(0, 1000 * expires);
> - vcpu->arch.ht_active = 1;
> - hrtimer_start(p_ht, kt, HRTIMER_MODE_ABS);
> + expires = div64_u64(itc_diff, cyc_per_usec);
> + kt = ktime_set(0, 1000 * expires);
> +
> + down_read(&vcpu->kvm->slots_lock);
> + vcpu->arch.ht_active = 1;
> + hrtimer_start(p_ht, kt, HRTIMER_MODE_ABS);
>
> - if (irqchip_in_kernel(vcpu->kvm)) {
> vcpu->arch.mp_state = KVM_MP_STATE_HALTED;
> kvm_vcpu_block(vcpu);
> hrtimer_cancel(p_ht);
> vcpu->arch.ht_active = 0;
>
> + if (test_and_clear_bit(KVM_REQ_UNHALT, &vcpu->requests))
> + if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED)
> + vcpu->arch.mp_state =
> + KVM_MP_STATE_RUNNABLE;
> + up_read(&vcpu->kvm->slots_lock);
> +
> if (vcpu->arch.mp_state != KVM_MP_STATE_RUNNABLE)
> return -EINTR;
> return 1;
> @@ -484,10 +497,6 @@ static int (*kvm_vti_exit_handlers[])(struct
> kvm_vcpu *vcpu, static const int kvm_vti_max_exit_handlers =
>
sizeof(kvm_vti_exit_handlers)/sizeof(*kvm_vti_exit_handlers);
>
> -static void kvm_prepare_guest_switch(struct kvm_vcpu *vcpu)
> -{
> -}
> -
> static uint32_t kvm_get_exit_reason(struct kvm_vcpu *vcpu)
> {
> struct exit_ctl_data *p_exit_data;
> @@ -600,10 +609,9 @@ static int __vcpu_run(struct kvm_vcpu *vcpu,
> struct kvm_run *kvm_run)
>
> again:
> preempt_disable();
> -
> - kvm_prepare_guest_switch(vcpu);
> local_irq_disable();
>
> +
> if (signal_pending(current)) {
> local_irq_enable();
> preempt_enable();
> @@ -614,7 +622,7 @@ again:
>
> vcpu->guest_mode = 1;
> kvm_guest_enter();
> -
> + down_read(&vcpu->kvm->slots_lock);
> r = vti_vcpu_run(vcpu, kvm_run);
> if (r < 0) {
> local_irq_enable();
> @@ -634,9 +642,8 @@ again:
> * But we need to prevent reordering, hence this barrier():
> */
> barrier();
> -
> kvm_guest_exit();
> -
> + up_read(&vcpu->kvm->slots_lock);
> preempt_enable();
>
> r = kvm_handle_exit(kvm_run, vcpu);
> @@ -673,6 +680,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu
> *vcpu, struct kvm_run *kvm_run)
>
> if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED))
{
> kvm_vcpu_block(vcpu);
> + clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
> vcpu_put(vcpu);
> return -EAGAIN;
> }
> @@ -1123,15 +1131,16 @@ static enum hrtimer_restart
> hlt_timer_fn(struct hrtimer *data) wait_queue_head_t *q;
>
> vcpu = container_of(data, struct kvm_vcpu, arch.hlt_timer);
> + q = &vcpu->wq;
> +
> if (vcpu->arch.mp_state != KVM_MP_STATE_HALTED)
> goto out;
>
> - q = &vcpu->wq;
> - if (waitqueue_active(q)) {
> - vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
> + if (waitqueue_active(q))
> wake_up_interruptible(q);
> - }
> +
> out:
> + vcpu->arch.timer_fired = 1;
> vcpu->arch.timer_check = 1;
> return HRTIMER_NORESTART;
> }
> @@ -1700,12 +1709,14 @@ static void vcpu_kick_intr(void *info)
> void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
> {
> int ipi_pcpu = vcpu->cpu;
> + int cpu = get_cpu();
>
> if (waitqueue_active(&vcpu->wq))
> wake_up_interruptible(&vcpu->wq);
>
> - if (vcpu->guest_mode)
> + if (vcpu->guest_mode && cpu != ipi_pcpu)
> smp_call_function_single(ipi_pcpu, vcpu_kick_intr, vcpu,
0);
> + put_cpu();
> }
>
> int kvm_apic_set_irq(struct kvm_vcpu *vcpu, u8 vec, u8 trig)
> @@ -1715,13 +1726,7 @@ int kvm_apic_set_irq(struct kvm_vcpu *vcpu, u8
> vec, u8 trig)
>
> if (!test_and_set_bit(vec, &vpd->irr[0])) {
> vcpu->arch.irq_new_pending = 1;
> - if (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE)
> - kvm_vcpu_kick(vcpu);
> - else if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED) {
> - vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
> - if (waitqueue_active(&vcpu->wq))
> - wake_up_interruptible(&vcpu->wq);
> - }
> + kvm_vcpu_kick(vcpu);
> return 1;
> }
> return 0;
> @@ -1791,7 +1796,7 @@ int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu)
>
> int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
> {
> - return 0;
> + return vcpu->arch.timer_fired;
> }
>
> gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
> diff --git a/arch/ia64/kvm/kvm_fw.c b/arch/ia64/kvm/kvm_fw.c
> index 0c69d9e..cb7600b 100644
> --- a/arch/ia64/kvm/kvm_fw.c
> +++ b/arch/ia64/kvm/kvm_fw.c
> @@ -286,6 +286,12 @@ static u64 kvm_get_pal_call_index(struct
> kvm_vcpu *vcpu) return index;
> }
>
> +static void prepare_for_halt(struct kvm_vcpu *vcpu)
> +{
> + vcpu->arch.timer_pending = 1;
> + vcpu->arch.timer_fired = 0;
> +}
> +
> int kvm_pal_emul(struct kvm_vcpu *vcpu, struct kvm_run *run)
> {
>
> @@ -304,11 +310,10 @@ int kvm_pal_emul(struct kvm_vcpu *vcpu, struct
> kvm_run *run) break;
> case PAL_HALT_LIGHT:
> {
> - vcpu->arch.timer_pending = 1;
> INIT_PAL_STATUS_SUCCESS(result);
> + prepare_for_halt(vcpu);
> if (kvm_highest_pending_irq(vcpu) == -1)
> ret = kvm_emulate_halt(vcpu);
> -
> }
> break;
>
> diff --git a/arch/ia64/kvm/process.c b/arch/ia64/kvm/process.c
> index 3417783..8008173 100644
> --- a/arch/ia64/kvm/process.c
> +++ b/arch/ia64/kvm/process.c
> @@ -713,7 +713,7 @@ void leave_hypervisor_tail(void)
> if (!(VCPU(v, itv) & (1 << 16))) {
> vcpu_pend_interrupt(v, VCPU(v,
itv)
> & 0xff);
> - VMX(v, itc_check) = 0;
> + VMX(v, itc_check) = 0;
> } else {
> v->arch.timer_pending = 1;
> }
^ permalink raw reply [flat|nested] 9+ messages in thread
* Re: kvm/ia64: Fix halt emulation logic.
2008-10-15 13:47 kvm/ia64: Fix halt emulation logic Zhang, Xiantao
@ 2008-10-16 8:53 ` Avi Kivity
2008-10-16 8:57 ` Zhang, Xiantao
2008-10-18 15:27 ` Marcelo Tosatti
1 sibling, 1 reply; 9+ messages in thread
From: Avi Kivity @ 2008-10-16 8:53 UTC (permalink / raw)
To: Zhang, Xiantao; +Cc: kvm-ia64, kvm
Zhang, Xiantao wrote:
> Hi, Avi
> This is the key fix for 2.6.28 merge. Without this patch, guest
> may hang once configured with more than 2 vcpus, it is because x86 side
> changed the halt handling's common logic, but it misses to change ia64
> side.
>
> @@ -398,8 +400,11 @@ static int handle_global_purge(struct kvm_vcpu
> *vcpu, struct kvm_run *kvm_run)
>
> if (kvm->vcpus[i]->cpu != -1) {
> call_data.vcpu = kvm->vcpus[i];
> - smp_call_function_single(kvm->vcpus[i]->cpu,
> + if (kvm->vcpus[i]->cpu != smp_processor_id())
> +
> smp_call_function_single(kvm->vcpus[i]->cpu,
> vcpu_global_purge, &call_data,
> 1);
> + else
> + vcpu_global_purge(&call_data);
>
smp_call_function_single() will call the function on the current cpu if
instructed, so this change is unneeded.
--
error compiling committee.c: too many arguments to function
^ permalink raw reply [flat|nested] 9+ messages in thread
* RE: kvm/ia64: Fix halt emulation logic.
2008-10-16 8:53 ` Avi Kivity
@ 2008-10-16 8:57 ` Zhang, Xiantao
2008-10-16 9:01 ` Avi Kivity
0 siblings, 1 reply; 9+ messages in thread
From: Zhang, Xiantao @ 2008-10-16 8:57 UTC (permalink / raw)
To: Avi Kivity; +Cc: kvm-ia64, kvm
Avi Kivity wrote:
> Zhang, Xiantao wrote:
>> Hi, Avi
>> This is the key fix for 2.6.28 merge. Without this patch, guest
>> may hang once configured with more than 2 vcpus, it is because x86
>> side changed the halt handling's common logic, but it misses to
>> change ia64 side.
>>
>
>> @@ -398,8 +400,11 @@ static int handle_global_purge(struct kvm_vcpu
>> *vcpu, struct kvm_run *kvm_run)
>>
>> if (kvm->vcpus[i]->cpu != -1) {
>> call_data.vcpu = kvm->vcpus[i];
>> - smp_call_function_single(kvm->vcpus[i]->cpu,
>> + if (kvm->vcpus[i]->cpu != smp_processor_id())
>> +
>> smp_call_function_single(kvm->vcpus[i]->cpu,
>> vcpu_global_purge, &call_data,
>> 1);
>> + else
>> + vcpu_global_purge(&call_data);
>>
>
> smp_call_function_single() will call the function on the current cpu
> if instructed, so this change is unneeded.
Aha, Thanks for pointing this. Yes, it is unnecessary. Could you help to
remove it directly ? or need me have a resend.
Xiantao
^ permalink raw reply [flat|nested] 9+ messages in thread
* Re: kvm/ia64: Fix halt emulation logic.
2008-10-16 8:57 ` Zhang, Xiantao
@ 2008-10-16 9:01 ` Avi Kivity
2008-10-16 9:19 ` Zhang, Xiantao
0 siblings, 1 reply; 9+ messages in thread
From: Avi Kivity @ 2008-10-16 9:01 UTC (permalink / raw)
To: Zhang, Xiantao; +Cc: kvm-ia64, kvm
Zhang, Xiantao wrote:
>>
>> smp_call_function_single() will call the function on the current cpu
>> if instructed, so this change is unneeded.
>>
> Aha, Thanks for pointing this. Yes, it is unnecessary. Could you help to
> remove it directly ? or need me have a resend.
Please resend, since I can't test (or even compile) it.
--
error compiling committee.c: too many arguments to function
^ permalink raw reply [flat|nested] 9+ messages in thread
* RE: kvm/ia64: Fix halt emulation logic.
2008-10-16 9:01 ` Avi Kivity
@ 2008-10-16 9:19 ` Zhang, Xiantao
2008-10-16 9:43 ` Avi Kivity
0 siblings, 1 reply; 9+ messages in thread
From: Zhang, Xiantao @ 2008-10-16 9:19 UTC (permalink / raw)
To: Avi Kivity; +Cc: kvm-ia64, kvm
[-- Attachment #1: Type: text/plain, Size: 8312 bytes --]
Attached the updated one. Thanks!
Xiantao
>From 8d827b19ba03dee74edeb5e2bcc23bed7b9f38ea Mon Sep 17 00:00:00 2001
From: Xiantao Zhang <xiantao.zhang@intel.com>
Date: Thu, 16 Oct 2008 15:58:15 +0800
Subject: [PATCH] kvm/ia64: Fix halt emulation logic.
Common halt halding logic is changed by x86 side, this patch
fix it for ia64 side. Otherwise, guest may hang once configured
more than 2 vcpus.
Signed-off-by: Xiantao Zhang <xiantao.zhang@intel.com>
---
arch/ia64/include/asm/kvm_host.h | 3 +-
arch/ia64/kvm/kvm-ia64.c | 72
+++++++++++++++++++-------------------
arch/ia64/kvm/kvm_fw.c | 9 ++++-
arch/ia64/kvm/process.c | 2 +-
4 files changed, 46 insertions(+), 40 deletions(-)
diff --git a/arch/ia64/include/asm/kvm_host.h
b/arch/ia64/include/asm/kvm_host.h
index 85db124..b9e3c7f 100644
--- a/arch/ia64/include/asm/kvm_host.h
+++ b/arch/ia64/include/asm/kvm_host.h
@@ -365,7 +365,8 @@ struct kvm_vcpu_arch {
long itc_offset;
unsigned long itc_check;
unsigned long timer_check;
- unsigned long timer_pending;
+ unsigned int timer_pending;
+ unsigned int timer_fired;
unsigned long vrr[8];
unsigned long ibr[8];
diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c
index c0699f0..e66a3db 100644
--- a/arch/ia64/kvm/kvm-ia64.c
+++ b/arch/ia64/kvm/kvm-ia64.c
@@ -385,6 +385,7 @@ static int handle_global_purge(struct kvm_vcpu
*vcpu, struct kvm_run *kvm_run)
struct kvm *kvm = vcpu->kvm;
struct call_data call_data;
int i;
+
call_data.ptc_g_data = p->u.ptc_g_data;
for (i = 0; i < KVM_MAX_VCPUS; i++) {
@@ -418,33 +419,41 @@ int kvm_emulate_halt(struct kvm_vcpu *vcpu)
ktime_t kt;
long itc_diff;
unsigned long vcpu_now_itc;
-
unsigned long expires;
struct hrtimer *p_ht = &vcpu->arch.hlt_timer;
unsigned long cyc_per_usec = local_cpu_data->cyc_per_usec;
struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd);
- vcpu_now_itc = ia64_getreg(_IA64_REG_AR_ITC) +
vcpu->arch.itc_offset;
+ if (irqchip_in_kernel(vcpu->kvm)) {
- if (time_after(vcpu_now_itc, vpd->itm)) {
- vcpu->arch.timer_check = 1;
- return 1;
- }
- itc_diff = vpd->itm - vcpu_now_itc;
- if (itc_diff < 0)
- itc_diff = -itc_diff;
+ vcpu_now_itc = ia64_getreg(_IA64_REG_AR_ITC) +
vcpu->arch.itc_offset;
- expires = div64_u64(itc_diff, cyc_per_usec);
- kt = ktime_set(0, 1000 * expires);
- vcpu->arch.ht_active = 1;
- hrtimer_start(p_ht, kt, HRTIMER_MODE_ABS);
+ if (time_after(vcpu_now_itc, vpd->itm)) {
+ vcpu->arch.timer_check = 1;
+ return 1;
+ }
+ itc_diff = vpd->itm - vcpu_now_itc;
+ if (itc_diff < 0)
+ itc_diff = -itc_diff;
+
+ expires = div64_u64(itc_diff, cyc_per_usec);
+ kt = ktime_set(0, 1000 * expires);
+
+ down_read(&vcpu->kvm->slots_lock);
+ vcpu->arch.ht_active = 1;
+ hrtimer_start(p_ht, kt, HRTIMER_MODE_ABS);
- if (irqchip_in_kernel(vcpu->kvm)) {
vcpu->arch.mp_state = KVM_MP_STATE_HALTED;
kvm_vcpu_block(vcpu);
hrtimer_cancel(p_ht);
vcpu->arch.ht_active = 0;
+ if (test_and_clear_bit(KVM_REQ_UNHALT, &vcpu->requests))
+ if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED)
+ vcpu->arch.mp_state =
+ KVM_MP_STATE_RUNNABLE;
+ up_read(&vcpu->kvm->slots_lock);
+
if (vcpu->arch.mp_state != KVM_MP_STATE_RUNNABLE)
return -EINTR;
return 1;
@@ -484,10 +493,6 @@ static int (*kvm_vti_exit_handlers[])(struct
kvm_vcpu *vcpu,
static const int kvm_vti_max_exit_handlers =
sizeof(kvm_vti_exit_handlers)/sizeof(*kvm_vti_exit_handlers);
-static void kvm_prepare_guest_switch(struct kvm_vcpu *vcpu)
-{
-}
-
static uint32_t kvm_get_exit_reason(struct kvm_vcpu *vcpu)
{
struct exit_ctl_data *p_exit_data;
@@ -600,8 +605,6 @@ static int __vcpu_run(struct kvm_vcpu *vcpu, struct
kvm_run *kvm_run)
again:
preempt_disable();
-
- kvm_prepare_guest_switch(vcpu);
local_irq_disable();
if (signal_pending(current)) {
@@ -614,7 +617,7 @@ again:
vcpu->guest_mode = 1;
kvm_guest_enter();
-
+ down_read(&vcpu->kvm->slots_lock);
r = vti_vcpu_run(vcpu, kvm_run);
if (r < 0) {
local_irq_enable();
@@ -634,9 +637,8 @@ again:
* But we need to prevent reordering, hence this barrier():
*/
barrier();
-
kvm_guest_exit();
-
+ up_read(&vcpu->kvm->slots_lock);
preempt_enable();
r = kvm_handle_exit(kvm_run, vcpu);
@@ -673,6 +675,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu,
struct kvm_run *kvm_run)
if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED))
{
kvm_vcpu_block(vcpu);
+ clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
vcpu_put(vcpu);
return -EAGAIN;
}
@@ -1123,15 +1126,16 @@ static enum hrtimer_restart hlt_timer_fn(struct
hrtimer *data)
wait_queue_head_t *q;
vcpu = container_of(data, struct kvm_vcpu, arch.hlt_timer);
+ q = &vcpu->wq;
+
if (vcpu->arch.mp_state != KVM_MP_STATE_HALTED)
goto out;
- q = &vcpu->wq;
- if (waitqueue_active(q)) {
- vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
+ if (waitqueue_active(q))
wake_up_interruptible(q);
- }
+
out:
+ vcpu->arch.timer_fired = 1;
vcpu->arch.timer_check = 1;
return HRTIMER_NORESTART;
}
@@ -1700,12 +1704,14 @@ static void vcpu_kick_intr(void *info)
void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
{
int ipi_pcpu = vcpu->cpu;
+ int cpu = get_cpu();
if (waitqueue_active(&vcpu->wq))
wake_up_interruptible(&vcpu->wq);
- if (vcpu->guest_mode)
+ if (vcpu->guest_mode && cpu != ipi_pcpu)
smp_call_function_single(ipi_pcpu, vcpu_kick_intr, vcpu,
0);
+ put_cpu();
}
int kvm_apic_set_irq(struct kvm_vcpu *vcpu, u8 vec, u8 trig)
@@ -1715,13 +1721,7 @@ int kvm_apic_set_irq(struct kvm_vcpu *vcpu, u8
vec, u8 trig)
if (!test_and_set_bit(vec, &vpd->irr[0])) {
vcpu->arch.irq_new_pending = 1;
- if (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE)
- kvm_vcpu_kick(vcpu);
- else if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED) {
- vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
- if (waitqueue_active(&vcpu->wq))
- wake_up_interruptible(&vcpu->wq);
- }
+ kvm_vcpu_kick(vcpu);
return 1;
}
return 0;
@@ -1791,7 +1791,7 @@ int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu)
int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
{
- return 0;
+ return vcpu->arch.timer_fired;
}
gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
diff --git a/arch/ia64/kvm/kvm_fw.c b/arch/ia64/kvm/kvm_fw.c
index 0c69d9e..cb7600b 100644
--- a/arch/ia64/kvm/kvm_fw.c
+++ b/arch/ia64/kvm/kvm_fw.c
@@ -286,6 +286,12 @@ static u64 kvm_get_pal_call_index(struct kvm_vcpu
*vcpu)
return index;
}
+static void prepare_for_halt(struct kvm_vcpu *vcpu)
+{
+ vcpu->arch.timer_pending = 1;
+ vcpu->arch.timer_fired = 0;
+}
+
int kvm_pal_emul(struct kvm_vcpu *vcpu, struct kvm_run *run)
{
@@ -304,11 +310,10 @@ int kvm_pal_emul(struct kvm_vcpu *vcpu, struct
kvm_run *run)
break;
case PAL_HALT_LIGHT:
{
- vcpu->arch.timer_pending = 1;
INIT_PAL_STATUS_SUCCESS(result);
+ prepare_for_halt(vcpu);
if (kvm_highest_pending_irq(vcpu) == -1)
ret = kvm_emulate_halt(vcpu);
-
}
break;
diff --git a/arch/ia64/kvm/process.c b/arch/ia64/kvm/process.c
index 3417783..8008173 100644
--- a/arch/ia64/kvm/process.c
+++ b/arch/ia64/kvm/process.c
@@ -713,7 +713,7 @@ void leave_hypervisor_tail(void)
if (!(VCPU(v, itv) & (1 << 16))) {
vcpu_pend_interrupt(v, VCPU(v,
itv)
& 0xff);
- VMX(v, itc_check) = 0;
+ VMX(v, itc_check) = 0;
} else {
v->arch.timer_pending = 1;
}
--
1.5.1
-----Original Message-----
From: Avi Kivity [mailto:avi@redhat.com]
Sent: Thursday, October 16, 2008 5:01 PM
To: Zhang, Xiantao
Cc: kvm-ia64@vger.kernel.org; kvm@vger.kernel.org
Subject: Re: kvm/ia64: Fix halt emulation logic.
Zhang, Xiantao wrote:
>>
>> smp_call_function_single() will call the function on the current cpu
>> if instructed, so this change is unneeded.
>>
> Aha, Thanks for pointing this. Yes, it is unnecessary. Could you help
to
> remove it directly ? or need me have a resend.
Please resend, since I can't test (or even compile) it.
--
error compiling committee.c: too many arguments to function
[-- Attachment #2: 0001-kvm-ia64-Fix-halt-emulation-logic.patch --]
[-- Type: application/octet-stream, Size: 7346 bytes --]
From 8d827b19ba03dee74edeb5e2bcc23bed7b9f38ea Mon Sep 17 00:00:00 2001
From: Xiantao Zhang <xiantao.zhang@intel.com>
Date: Thu, 16 Oct 2008 15:58:15 +0800
Subject: [PATCH] kvm/ia64: Fix halt emulation logic.
Common halt halding logic is changed by x86 side, this patch
fix it for ia64 side. Otherwise, guest may hang once configured
more than 2 vcpus.
Signed-off-by: Xiantao Zhang <xiantao.zhang@intel.com>
---
arch/ia64/include/asm/kvm_host.h | 3 +-
arch/ia64/kvm/kvm-ia64.c | 72 +++++++++++++++++++-------------------
arch/ia64/kvm/kvm_fw.c | 9 ++++-
arch/ia64/kvm/process.c | 2 +-
4 files changed, 46 insertions(+), 40 deletions(-)
diff --git a/arch/ia64/include/asm/kvm_host.h b/arch/ia64/include/asm/kvm_host.h
index 85db124..b9e3c7f 100644
--- a/arch/ia64/include/asm/kvm_host.h
+++ b/arch/ia64/include/asm/kvm_host.h
@@ -365,7 +365,8 @@ struct kvm_vcpu_arch {
long itc_offset;
unsigned long itc_check;
unsigned long timer_check;
- unsigned long timer_pending;
+ unsigned int timer_pending;
+ unsigned int timer_fired;
unsigned long vrr[8];
unsigned long ibr[8];
diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c
index c0699f0..e66a3db 100644
--- a/arch/ia64/kvm/kvm-ia64.c
+++ b/arch/ia64/kvm/kvm-ia64.c
@@ -385,6 +385,7 @@ static int handle_global_purge(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
struct kvm *kvm = vcpu->kvm;
struct call_data call_data;
int i;
+
call_data.ptc_g_data = p->u.ptc_g_data;
for (i = 0; i < KVM_MAX_VCPUS; i++) {
@@ -418,33 +419,41 @@ int kvm_emulate_halt(struct kvm_vcpu *vcpu)
ktime_t kt;
long itc_diff;
unsigned long vcpu_now_itc;
-
unsigned long expires;
struct hrtimer *p_ht = &vcpu->arch.hlt_timer;
unsigned long cyc_per_usec = local_cpu_data->cyc_per_usec;
struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd);
- vcpu_now_itc = ia64_getreg(_IA64_REG_AR_ITC) + vcpu->arch.itc_offset;
+ if (irqchip_in_kernel(vcpu->kvm)) {
- if (time_after(vcpu_now_itc, vpd->itm)) {
- vcpu->arch.timer_check = 1;
- return 1;
- }
- itc_diff = vpd->itm - vcpu_now_itc;
- if (itc_diff < 0)
- itc_diff = -itc_diff;
+ vcpu_now_itc = ia64_getreg(_IA64_REG_AR_ITC) + vcpu->arch.itc_offset;
- expires = div64_u64(itc_diff, cyc_per_usec);
- kt = ktime_set(0, 1000 * expires);
- vcpu->arch.ht_active = 1;
- hrtimer_start(p_ht, kt, HRTIMER_MODE_ABS);
+ if (time_after(vcpu_now_itc, vpd->itm)) {
+ vcpu->arch.timer_check = 1;
+ return 1;
+ }
+ itc_diff = vpd->itm - vcpu_now_itc;
+ if (itc_diff < 0)
+ itc_diff = -itc_diff;
+
+ expires = div64_u64(itc_diff, cyc_per_usec);
+ kt = ktime_set(0, 1000 * expires);
+
+ down_read(&vcpu->kvm->slots_lock);
+ vcpu->arch.ht_active = 1;
+ hrtimer_start(p_ht, kt, HRTIMER_MODE_ABS);
- if (irqchip_in_kernel(vcpu->kvm)) {
vcpu->arch.mp_state = KVM_MP_STATE_HALTED;
kvm_vcpu_block(vcpu);
hrtimer_cancel(p_ht);
vcpu->arch.ht_active = 0;
+ if (test_and_clear_bit(KVM_REQ_UNHALT, &vcpu->requests))
+ if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED)
+ vcpu->arch.mp_state =
+ KVM_MP_STATE_RUNNABLE;
+ up_read(&vcpu->kvm->slots_lock);
+
if (vcpu->arch.mp_state != KVM_MP_STATE_RUNNABLE)
return -EINTR;
return 1;
@@ -484,10 +493,6 @@ static int (*kvm_vti_exit_handlers[])(struct kvm_vcpu *vcpu,
static const int kvm_vti_max_exit_handlers =
sizeof(kvm_vti_exit_handlers)/sizeof(*kvm_vti_exit_handlers);
-static void kvm_prepare_guest_switch(struct kvm_vcpu *vcpu)
-{
-}
-
static uint32_t kvm_get_exit_reason(struct kvm_vcpu *vcpu)
{
struct exit_ctl_data *p_exit_data;
@@ -600,8 +605,6 @@ static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
again:
preempt_disable();
-
- kvm_prepare_guest_switch(vcpu);
local_irq_disable();
if (signal_pending(current)) {
@@ -614,7 +617,7 @@ again:
vcpu->guest_mode = 1;
kvm_guest_enter();
-
+ down_read(&vcpu->kvm->slots_lock);
r = vti_vcpu_run(vcpu, kvm_run);
if (r < 0) {
local_irq_enable();
@@ -634,9 +637,8 @@ again:
* But we need to prevent reordering, hence this barrier():
*/
barrier();
-
kvm_guest_exit();
-
+ up_read(&vcpu->kvm->slots_lock);
preempt_enable();
r = kvm_handle_exit(kvm_run, vcpu);
@@ -673,6 +675,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) {
kvm_vcpu_block(vcpu);
+ clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
vcpu_put(vcpu);
return -EAGAIN;
}
@@ -1123,15 +1126,16 @@ static enum hrtimer_restart hlt_timer_fn(struct hrtimer *data)
wait_queue_head_t *q;
vcpu = container_of(data, struct kvm_vcpu, arch.hlt_timer);
+ q = &vcpu->wq;
+
if (vcpu->arch.mp_state != KVM_MP_STATE_HALTED)
goto out;
- q = &vcpu->wq;
- if (waitqueue_active(q)) {
- vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
+ if (waitqueue_active(q))
wake_up_interruptible(q);
- }
+
out:
+ vcpu->arch.timer_fired = 1;
vcpu->arch.timer_check = 1;
return HRTIMER_NORESTART;
}
@@ -1700,12 +1704,14 @@ static void vcpu_kick_intr(void *info)
void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
{
int ipi_pcpu = vcpu->cpu;
+ int cpu = get_cpu();
if (waitqueue_active(&vcpu->wq))
wake_up_interruptible(&vcpu->wq);
- if (vcpu->guest_mode)
+ if (vcpu->guest_mode && cpu != ipi_pcpu)
smp_call_function_single(ipi_pcpu, vcpu_kick_intr, vcpu, 0);
+ put_cpu();
}
int kvm_apic_set_irq(struct kvm_vcpu *vcpu, u8 vec, u8 trig)
@@ -1715,13 +1721,7 @@ int kvm_apic_set_irq(struct kvm_vcpu *vcpu, u8 vec, u8 trig)
if (!test_and_set_bit(vec, &vpd->irr[0])) {
vcpu->arch.irq_new_pending = 1;
- if (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE)
- kvm_vcpu_kick(vcpu);
- else if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED) {
- vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
- if (waitqueue_active(&vcpu->wq))
- wake_up_interruptible(&vcpu->wq);
- }
+ kvm_vcpu_kick(vcpu);
return 1;
}
return 0;
@@ -1791,7 +1791,7 @@ int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu)
int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
{
- return 0;
+ return vcpu->arch.timer_fired;
}
gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
diff --git a/arch/ia64/kvm/kvm_fw.c b/arch/ia64/kvm/kvm_fw.c
index 0c69d9e..cb7600b 100644
--- a/arch/ia64/kvm/kvm_fw.c
+++ b/arch/ia64/kvm/kvm_fw.c
@@ -286,6 +286,12 @@ static u64 kvm_get_pal_call_index(struct kvm_vcpu *vcpu)
return index;
}
+static void prepare_for_halt(struct kvm_vcpu *vcpu)
+{
+ vcpu->arch.timer_pending = 1;
+ vcpu->arch.timer_fired = 0;
+}
+
int kvm_pal_emul(struct kvm_vcpu *vcpu, struct kvm_run *run)
{
@@ -304,11 +310,10 @@ int kvm_pal_emul(struct kvm_vcpu *vcpu, struct kvm_run *run)
break;
case PAL_HALT_LIGHT:
{
- vcpu->arch.timer_pending = 1;
INIT_PAL_STATUS_SUCCESS(result);
+ prepare_for_halt(vcpu);
if (kvm_highest_pending_irq(vcpu) == -1)
ret = kvm_emulate_halt(vcpu);
-
}
break;
diff --git a/arch/ia64/kvm/process.c b/arch/ia64/kvm/process.c
index 3417783..8008173 100644
--- a/arch/ia64/kvm/process.c
+++ b/arch/ia64/kvm/process.c
@@ -713,7 +713,7 @@ void leave_hypervisor_tail(void)
if (!(VCPU(v, itv) & (1 << 16))) {
vcpu_pend_interrupt(v, VCPU(v, itv)
& 0xff);
- VMX(v, itc_check) = 0;
+ VMX(v, itc_check) = 0;
} else {
v->arch.timer_pending = 1;
}
--
1.5.1
^ permalink raw reply related [flat|nested] 9+ messages in thread
* Re: kvm/ia64: Fix halt emulation logic.
2008-10-16 9:19 ` Zhang, Xiantao
@ 2008-10-16 9:43 ` Avi Kivity
0 siblings, 0 replies; 9+ messages in thread
From: Avi Kivity @ 2008-10-16 9:43 UTC (permalink / raw)
To: Zhang, Xiantao; +Cc: kvm-ia64, kvm
Zhang, Xiantao wrote:
> Attached the updated one. Thanks!
> Xiantao
> From 8d827b19ba03dee74edeb5e2bcc23bed7b9f38ea Mon Sep 17 00:00:00 2001
> From: Xiantao Zhang <xiantao.zhang@intel.com>
> Date: Thu, 16 Oct 2008 15:58:15 +0800
> Subject: [PATCH] kvm/ia64: Fix halt emulation logic.
>
> Common halt halding logic is changed by x86 side, this patch
> fix it for ia64 side. Otherwise, guest may hang once configured
> more than 2 vcpus.
>
Applied, and will queue for 2.6.28.
--
error compiling committee.c: too many arguments to function
^ permalink raw reply [flat|nested] 9+ messages in thread
* Re: kvm/ia64: Fix halt emulation logic.
2008-10-15 13:47 kvm/ia64: Fix halt emulation logic Zhang, Xiantao
2008-10-16 8:53 ` Avi Kivity
@ 2008-10-18 15:27 ` Marcelo Tosatti
2008-11-03 4:36 ` Zhang, Xiantao
1 sibling, 1 reply; 9+ messages in thread
From: Marcelo Tosatti @ 2008-10-18 15:27 UTC (permalink / raw)
To: Zhang, Xiantao; +Cc: avi, kvm-ia64, kvm
Hi Xiantao,
On Wed, Oct 15, 2008 at 09:47:24PM +0800, Zhang, Xiantao wrote:
> + expires = div64_u64(itc_diff, cyc_per_usec);
> + kt = ktime_set(0, 1000 * expires);
> +
> + down_read(&vcpu->kvm->slots_lock);
> + vcpu->arch.ht_active = 1;
> + hrtimer_start(p_ht, kt, HRTIMER_MODE_ABS);
>
> - if (irqchip_in_kernel(vcpu->kvm)) {
> vcpu->arch.mp_state = KVM_MP_STATE_HALTED;
> kvm_vcpu_block(vcpu);
> hrtimer_cancel(p_ht);
> vcpu->arch.ht_active = 0;
>
> + if (test_and_clear_bit(KVM_REQ_UNHALT, &vcpu->requests))
> + if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED)
> + vcpu->arch.mp_state =
> + KVM_MP_STATE_RUNNABLE;
> + up_read(&vcpu->kvm->slots_lock);
You should release slots_lock when blocking via kvm_vcpu_block().
Otherwise paths that grab it for write will depend on these vcpus to
unhalt.
BTW, none of the data structures in this section of code should be
protected by slots_lock?
^ permalink raw reply [flat|nested] 9+ messages in thread
* RE: kvm/ia64: Fix halt emulation logic.
2008-10-18 15:27 ` Marcelo Tosatti
@ 2008-11-03 4:36 ` Zhang, Xiantao
0 siblings, 0 replies; 9+ messages in thread
From: Zhang, Xiantao @ 2008-11-03 4:36 UTC (permalink / raw)
To: Marcelo Tosatti; +Cc: avi, kvm-ia64@vger.kernel.org, kvm@vger.kernel.org
[-- Attachment #1: Type: text/plain, Size: 1270 bytes --]
Marcelo Tosatti wrote:
> Hi Xiantao,
>
> On Wed, Oct 15, 2008 at 09:47:24PM +0800, Zhang, Xiantao wrote:
>> + expires = div64_u64(itc_diff, cyc_per_usec);
>> + kt = ktime_set(0, 1000 * expires);
>> +
>> + down_read(&vcpu->kvm->slots_lock);
>> + vcpu->arch.ht_active = 1;
>> + hrtimer_start(p_ht, kt, HRTIMER_MODE_ABS);
>>
>> - if (irqchip_in_kernel(vcpu->kvm)) {
>> vcpu->arch.mp_state = KVM_MP_STATE_HALTED;
>> kvm_vcpu_block(vcpu);
>> hrtimer_cancel(p_ht);
>> vcpu->arch.ht_active = 0;
>>
>> + if (test_and_clear_bit(KVM_REQ_UNHALT, &vcpu->requests))
>> + if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED)
>> + vcpu->arch.mp_state = + KVM_MP_STATE_RUNNABLE;
>> + up_read(&vcpu->kvm->slots_lock);
>
> You should release slots_lock when blocking via kvm_vcpu_block().
> Otherwise paths that grab it for write will depend on these vcpus to
> unhalt.
>
> BTW, none of the data structures in this section of code should be
> protected by slots_lock?
Hi, Marcelo
Agree, I have made a patch to remove it last week, but seems Avi hasn't commited it yet.
Avi,
Could you help to commit the attached patch, even though it is just a potential issue(write lock is very rare in current code).
Thanks
Xiantao
[-- Attachment #2: Type: message/rfc822, Size: 7147 bytes --]
[-- Attachment #2.1.1: Type: text/plain, Size: 1284 bytes --]
>From 4858a5c47c5dce88a62a6edf427d8709f3ebda15 Mon Sep 17 00:00:00 2001
From: Xiantao Zhang <xiantao.zhang@intel.com>
Date: Thu, 23 Oct 2008 15:03:38 +0800
Subject: [PATCH] KVM: ia64: kvm halt logic doesn't need lock to protect.
Remove the lock protection for kvm halt logic, otherwise,
once other vcpus want to acquire the lock, and they have to
wait all vcpus are waken up from halt.
Signed-off-by: Xiantao Zhang <xiantao.zhang@intel.com>
---
arch/ia64/kvm/kvm-ia64.c | 2 --
1 files changed, 0 insertions(+), 2 deletions(-)
diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c
index 6b1e31b..93c7f18 100644
--- a/arch/ia64/kvm/kvm-ia64.c
+++ b/arch/ia64/kvm/kvm-ia64.c
@@ -439,7 +439,6 @@ int kvm_emulate_halt(struct kvm_vcpu *vcpu)
expires = div64_u64(itc_diff, cyc_per_usec);
kt = ktime_set(0, 1000 * expires);
- down_read(&vcpu->kvm->slots_lock);
vcpu->arch.ht_active = 1;
hrtimer_start(p_ht, kt, HRTIMER_MODE_ABS);
@@ -452,7 +451,6 @@ int kvm_emulate_halt(struct kvm_vcpu *vcpu)
if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED)
vcpu->arch.mp_state =
KVM_MP_STATE_RUNNABLE;
- up_read(&vcpu->kvm->slots_lock);
if (vcpu->arch.mp_state != KVM_MP_STATE_RUNNABLE)
return -EINTR;
--
1.6.0
[-- Attachment #2.1.2: 0003-KVM-ia64-kvm-halt-logic-doesn-t-need-lock-to-prote.patch --]
[-- Type: application/octet-stream, Size: 1249 bytes --]
From 4858a5c47c5dce88a62a6edf427d8709f3ebda15 Mon Sep 17 00:00:00 2001
From: Xiantao Zhang <xiantao.zhang@intel.com>
Date: Thu, 23 Oct 2008 15:03:38 +0800
Subject: [PATCH] KVM: ia64: kvm halt logic doesn't need lock to protect.
Remove the lock protection for kvm halt logic, otherwise,
once other vcpus want to acquire the lock, and they have to
wait all vcpus are waken up from halt.
Signed-off-by: Xiantao Zhang <xiantao.zhang@intel.com>
---
arch/ia64/kvm/kvm-ia64.c | 2 --
1 files changed, 0 insertions(+), 2 deletions(-)
diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c
index 6b1e31b..93c7f18 100644
--- a/arch/ia64/kvm/kvm-ia64.c
+++ b/arch/ia64/kvm/kvm-ia64.c
@@ -439,7 +439,6 @@ int kvm_emulate_halt(struct kvm_vcpu *vcpu)
expires = div64_u64(itc_diff, cyc_per_usec);
kt = ktime_set(0, 1000 * expires);
- down_read(&vcpu->kvm->slots_lock);
vcpu->arch.ht_active = 1;
hrtimer_start(p_ht, kt, HRTIMER_MODE_ABS);
@@ -452,7 +451,6 @@ int kvm_emulate_halt(struct kvm_vcpu *vcpu)
if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED)
vcpu->arch.mp_state =
KVM_MP_STATE_RUNNABLE;
- up_read(&vcpu->kvm->slots_lock);
if (vcpu->arch.mp_state != KVM_MP_STATE_RUNNABLE)
return -EINTR;
--
1.6.0
^ permalink raw reply related [flat|nested] 9+ messages in thread
end of thread, other threads:[~2008-11-03 4:36 UTC | newest]
Thread overview: 9+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2008-10-15 13:47 kvm/ia64: Fix halt emulation logic Zhang, Xiantao
2008-10-16 8:53 ` Avi Kivity
2008-10-16 8:57 ` Zhang, Xiantao
2008-10-16 9:01 ` Avi Kivity
2008-10-16 9:19 ` Zhang, Xiantao
2008-10-16 9:43 ` Avi Kivity
2008-10-18 15:27 ` Marcelo Tosatti
2008-11-03 4:36 ` Zhang, Xiantao
-- strict thread matches above, loose matches on Subject: below --
2008-10-15 13:52 Zhang, Xiantao
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox