* [PATCH v2] KVM: Fix simultaneous NMIs
@ 2011-09-20 10:43 Avi Kivity
2011-09-20 13:25 ` Marcelo Tosatti
0 siblings, 1 reply; 10+ messages in thread
From: Avi Kivity @ 2011-09-20 10:43 UTC (permalink / raw)
To: Marcelo Tosatti, kvm, Jan Kiszka
If simultaneous NMIs happen, we're supposed to queue the second
and next (collapsing them), but currently we sometimes collapse
the second into the first.
Fix by using a counter for pending NMIs instead of a bool; since
the counter limit depends on whether the processor is currently
in an NMI handler, which can only be checked in vcpu context
(via the NMI mask), we add a new KVM_REQ_NMI to request recalculation
of the counter.
Signed-off-by: Avi Kivity <avi@redhat.com>
---
arch/x86/include/asm/kvm_host.h | 5 ++-
arch/x86/kvm/x86.c | 48 +++++++++++++++++++++++++-------------
include/linux/kvm_host.h | 1 +
3 files changed, 35 insertions(+), 19 deletions(-)
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 6ab4241..ab62711 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -413,8 +413,9 @@ struct kvm_vcpu_arch {
u32 tsc_catchup_mult;
s8 tsc_catchup_shift;
- bool nmi_pending;
- bool nmi_injected;
+ atomic_t nmi_queued; /* unprocessed asynchronous NMIs */
+ unsigned nmi_pending; /* NMI queued after currently running handler */
+ bool nmi_injected; /* Trying to inject an NMI this entry */
struct mtrr_state_type mtrr_state;
u32 pat;
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 6b37f18..d51e407 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -83,6 +83,7 @@
static void update_cr8_intercept(struct kvm_vcpu *vcpu);
static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid,
struct kvm_cpuid_entry2 __user *entries);
+static void process_nmi(struct kvm_vcpu *vcpu);
struct kvm_x86_ops *kvm_x86_ops;
EXPORT_SYMBOL_GPL(kvm_x86_ops);
@@ -359,8 +360,8 @@ void kvm_propagate_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault)
void kvm_inject_nmi(struct kvm_vcpu *vcpu)
{
- kvm_make_request(KVM_REQ_EVENT, vcpu);
- vcpu->arch.nmi_pending = 1;
+ atomic_inc(&vcpu->arch.nmi_queued);
+ kvm_make_request(KVM_REQ_NMI, vcpu);
}
EXPORT_SYMBOL_GPL(kvm_inject_nmi);
@@ -2827,6 +2828,7 @@ static int kvm_vcpu_ioctl_x86_set_mce(struct kvm_vcpu *vcpu,
static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu,
struct kvm_vcpu_events *events)
{
+ process_nmi(vcpu);
events->exception.injected =
vcpu->arch.exception.pending &&
!kvm_exception_is_soft(vcpu->arch.exception.nr);
@@ -2844,7 +2846,7 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu,
KVM_X86_SHADOW_INT_MOV_SS | KVM_X86_SHADOW_INT_STI);
events->nmi.injected = vcpu->arch.nmi_injected;
- events->nmi.pending = vcpu->arch.nmi_pending;
+ events->nmi.pending = vcpu->arch.nmi_pending != 0;
events->nmi.masked = kvm_x86_ops->get_nmi_mask(vcpu);
events->nmi.pad = 0;
@@ -2864,6 +2866,7 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
| KVM_VCPUEVENT_VALID_SHADOW))
return -EINVAL;
+ process_nmi(vcpu);
vcpu->arch.exception.pending = events->exception.injected;
vcpu->arch.exception.nr = events->exception.nr;
vcpu->arch.exception.has_error_code = events->exception.has_error_code;
@@ -4763,7 +4766,7 @@ int kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip)
kvm_set_rflags(vcpu, ctxt->eflags);
if (irq == NMI_VECTOR)
- vcpu->arch.nmi_pending = false;
+ vcpu->arch.nmi_pending = 0;
else
vcpu->arch.interrupt.pending = false;
@@ -5572,7 +5575,7 @@ static void inject_pending_event(struct kvm_vcpu *vcpu)
/* try to inject new event if pending */
if (vcpu->arch.nmi_pending) {
if (kvm_x86_ops->nmi_allowed(vcpu)) {
- vcpu->arch.nmi_pending = false;
+ --vcpu->arch.nmi_pending;
vcpu->arch.nmi_injected = true;
kvm_x86_ops->set_nmi(vcpu);
}
@@ -5604,10 +5607,26 @@ static void kvm_put_guest_xcr0(struct kvm_vcpu *vcpu)
}
}
+static void process_nmi(struct kvm_vcpu *vcpu)
+{
+ unsigned limit = 2;
+
+ /*
+ * x86 is limited to one NMI running, and one NMI pending after it.
+ * If an NMI is already in progress, limit further NMIs to just one.
+ * Otherwise, allow two (and we'll inject the first one immediately).
+ */
+ if (kvm_x86_ops->get_nmi_mask(vcpu) || vcpu->arch.nmi_injected)
+ limit = 1;
+
+ vcpu->arch.nmi_pending += atomic_xchg(&vcpu->arch.nmi_queued, 0);
+ vcpu->arch.nmi_pending = min(vcpu->arch.nmi_pending, limit);
+ kvm_make_request(KVM_REQ_EVENT, vcpu);
+}
+
static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
{
int r;
- bool nmi_pending;
bool req_int_win = !irqchip_in_kernel(vcpu->kvm) &&
vcpu->run->request_interrupt_window;
@@ -5647,6 +5666,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
}
if (kvm_check_request(KVM_REQ_STEAL_UPDATE, vcpu))
record_steal_time(vcpu);
+ if (kvm_check_request(KVM_REQ_NMI, vcpu))
+ process_nmi(vcpu);
}
@@ -5654,19 +5675,11 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
if (unlikely(r))
goto out;
- /*
- * An NMI can be injected between local nmi_pending read and
- * vcpu->arch.nmi_pending read inside inject_pending_event().
- * But in that case, KVM_REQ_EVENT will be set, which makes
- * the race described above benign.
- */
- nmi_pending = ACCESS_ONCE(vcpu->arch.nmi_pending);
-
if (kvm_check_request(KVM_REQ_EVENT, vcpu) || req_int_win) {
inject_pending_event(vcpu);
/* enable NMI/IRQ window open exits if needed */
- if (nmi_pending)
+ if (vcpu->arch.nmi_pending)
kvm_x86_ops->enable_nmi_window(vcpu);
else if (kvm_cpu_has_interrupt(vcpu) || req_int_win)
kvm_x86_ops->enable_irq_window(vcpu);
@@ -6374,7 +6387,8 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
int kvm_arch_vcpu_reset(struct kvm_vcpu *vcpu)
{
- vcpu->arch.nmi_pending = false;
+ atomic_set(&vcpu->arch.nmi_queued, 0);
+ vcpu->arch.nmi_pending = 0;
vcpu->arch.nmi_injected = false;
vcpu->arch.switch_db_regs = 0;
@@ -6649,7 +6663,7 @@ int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
!vcpu->arch.apf.halted)
|| !list_empty_careful(&vcpu->async_pf.done)
|| vcpu->arch.mp_state == KVM_MP_STATE_SIPI_RECEIVED
- || vcpu->arch.nmi_pending ||
+ || atomic_read(&vcpu->arch.nmi_queued) ||
(kvm_arch_interrupt_allowed(vcpu) &&
kvm_cpu_has_interrupt(vcpu));
}
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 2a414f6..d526231 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -49,6 +49,7 @@
#define KVM_REQ_EVENT 11
#define KVM_REQ_APF_HALT 12
#define KVM_REQ_STEAL_UPDATE 13
+#define KVM_REQ_NMI 14
#define KVM_USERSPACE_IRQ_SOURCE_ID 0
--
1.7.6.3
^ permalink raw reply related [flat|nested] 10+ messages in thread* Re: [PATCH v2] KVM: Fix simultaneous NMIs
2011-09-20 10:43 [PATCH v2] KVM: Fix simultaneous NMIs Avi Kivity
@ 2011-09-20 13:25 ` Marcelo Tosatti
2011-09-20 13:56 ` Avi Kivity
0 siblings, 1 reply; 10+ messages in thread
From: Marcelo Tosatti @ 2011-09-20 13:25 UTC (permalink / raw)
To: Avi Kivity; +Cc: kvm, Jan Kiszka
On Tue, Sep 20, 2011 at 01:43:14PM +0300, Avi Kivity wrote:
> If simultaneous NMIs happen, we're supposed to queue the second
> and next (collapsing them), but currently we sometimes collapse
> the second into the first.
>
> Fix by using a counter for pending NMIs instead of a bool; since
> the counter limit depends on whether the processor is currently
> in an NMI handler, which can only be checked in vcpu context
> (via the NMI mask), we add a new KVM_REQ_NMI to request recalculation
> of the counter.
>
> Signed-off-by: Avi Kivity <avi@redhat.com>
> ---
> arch/x86/include/asm/kvm_host.h | 5 ++-
> arch/x86/kvm/x86.c | 48 +++++++++++++++++++++++++-------------
> include/linux/kvm_host.h | 1 +
> 3 files changed, 35 insertions(+), 19 deletions(-)
>
> diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
> index 6ab4241..ab62711 100644
> --- a/arch/x86/include/asm/kvm_host.h
> +++ b/arch/x86/include/asm/kvm_host.h
> @@ -413,8 +413,9 @@ struct kvm_vcpu_arch {
> u32 tsc_catchup_mult;
> s8 tsc_catchup_shift;
>
> - bool nmi_pending;
> - bool nmi_injected;
> + atomic_t nmi_queued; /* unprocessed asynchronous NMIs */
> + unsigned nmi_pending; /* NMI queued after currently running handler */
> + bool nmi_injected; /* Trying to inject an NMI this entry */
>
> struct mtrr_state_type mtrr_state;
> u32 pat;
> diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
> index 6b37f18..d51e407 100644
> --- a/arch/x86/kvm/x86.c
> +++ b/arch/x86/kvm/x86.c
> @@ -83,6 +83,7 @@
> static void update_cr8_intercept(struct kvm_vcpu *vcpu);
> static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid,
> struct kvm_cpuid_entry2 __user *entries);
> +static void process_nmi(struct kvm_vcpu *vcpu);
>
> struct kvm_x86_ops *kvm_x86_ops;
> EXPORT_SYMBOL_GPL(kvm_x86_ops);
> @@ -359,8 +360,8 @@ void kvm_propagate_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault)
>
> void kvm_inject_nmi(struct kvm_vcpu *vcpu)
> {
> - kvm_make_request(KVM_REQ_EVENT, vcpu);
> - vcpu->arch.nmi_pending = 1;
> + atomic_inc(&vcpu->arch.nmi_queued);
> + kvm_make_request(KVM_REQ_NMI, vcpu);
> }
> EXPORT_SYMBOL_GPL(kvm_inject_nmi);
>
> @@ -2827,6 +2828,7 @@ static int kvm_vcpu_ioctl_x86_set_mce(struct kvm_vcpu *vcpu,
> static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu,
> struct kvm_vcpu_events *events)
> {
> + process_nmi(vcpu);
> events->exception.injected =
> vcpu->arch.exception.pending &&
> !kvm_exception_is_soft(vcpu->arch.exception.nr);
> @@ -2844,7 +2846,7 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu,
> KVM_X86_SHADOW_INT_MOV_SS | KVM_X86_SHADOW_INT_STI);
>
> events->nmi.injected = vcpu->arch.nmi_injected;
> - events->nmi.pending = vcpu->arch.nmi_pending;
> + events->nmi.pending = vcpu->arch.nmi_pending != 0;
> events->nmi.masked = kvm_x86_ops->get_nmi_mask(vcpu);
> events->nmi.pad = 0;
nmi_queued should also be saved and restored. Not sure if its necessary
though.
Should at least reset nmi_queued somewhere (set_vcpu_events?).
> @@ -2864,6 +2866,7 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
> | KVM_VCPUEVENT_VALID_SHADOW))
> return -EINVAL;
>
> + process_nmi(vcpu);
This should be after nmi fields are set, not before?
^ permalink raw reply [flat|nested] 10+ messages in thread* Re: [PATCH v2] KVM: Fix simultaneous NMIs
2011-09-20 13:25 ` Marcelo Tosatti
@ 2011-09-20 13:56 ` Avi Kivity
2011-09-20 14:59 ` Marcelo Tosatti
0 siblings, 1 reply; 10+ messages in thread
From: Avi Kivity @ 2011-09-20 13:56 UTC (permalink / raw)
To: Marcelo Tosatti; +Cc: kvm, Jan Kiszka
On 09/20/2011 04:25 PM, Marcelo Tosatti wrote:
> >
> > @@ -2827,6 +2828,7 @@ static int kvm_vcpu_ioctl_x86_set_mce(struct kvm_vcpu *vcpu,
> > static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu,
> > struct kvm_vcpu_events *events)
> > {
> > + process_nmi(vcpu);
> > events->exception.injected =
> > vcpu->arch.exception.pending&&
> > !kvm_exception_is_soft(vcpu->arch.exception.nr);
> > @@ -2844,7 +2846,7 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu,
> > KVM_X86_SHADOW_INT_MOV_SS | KVM_X86_SHADOW_INT_STI);
> >
> > events->nmi.injected = vcpu->arch.nmi_injected;
> > - events->nmi.pending = vcpu->arch.nmi_pending;
> > + events->nmi.pending = vcpu->arch.nmi_pending != 0;
> > events->nmi.masked = kvm_x86_ops->get_nmi_mask(vcpu);
> > events->nmi.pad = 0;
>
> nmi_queued should also be saved and restored. Not sure if its necessary
> though.
>
> Should at least reset nmi_queued somewhere (set_vcpu_events?).
Did you miss the call to process_nmi()?
We do have a small issue. If we exit during NMI-blocked-by-STI and
nmi_pending == 2, then we lose the second interrupt. Should rarely
happen, since external interrupts never exit in that condition, but it's
a wart.
>
> > @@ -2864,6 +2866,7 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
> > | KVM_VCPUEVENT_VALID_SHADOW))
> > return -EINVAL;
> >
> > + process_nmi(vcpu);
>
> This should be after nmi fields are set, not before?
>
It's actually to clear queued NMIs in case we set nmi_pending (which
should never happen unless the machine is completely quiet, since it's
asynchronous to the vcpu; same as the IRR).
--
error compiling committee.c: too many arguments to function
^ permalink raw reply [flat|nested] 10+ messages in thread* Re: [PATCH v2] KVM: Fix simultaneous NMIs
2011-09-20 13:56 ` Avi Kivity
@ 2011-09-20 14:59 ` Marcelo Tosatti
2011-09-20 16:24 ` Avi Kivity
0 siblings, 1 reply; 10+ messages in thread
From: Marcelo Tosatti @ 2011-09-20 14:59 UTC (permalink / raw)
To: Avi Kivity; +Cc: kvm, Jan Kiszka
On Tue, Sep 20, 2011 at 04:56:02PM +0300, Avi Kivity wrote:
> On 09/20/2011 04:25 PM, Marcelo Tosatti wrote:
> >>
> >> @@ -2827,6 +2828,7 @@ static int kvm_vcpu_ioctl_x86_set_mce(struct kvm_vcpu *vcpu,
> >> static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu,
> >> struct kvm_vcpu_events *events)
> >> {
> >> + process_nmi(vcpu);
> >> events->exception.injected =
> >> vcpu->arch.exception.pending&&
> >> !kvm_exception_is_soft(vcpu->arch.exception.nr);
> >> @@ -2844,7 +2846,7 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu,
> >> KVM_X86_SHADOW_INT_MOV_SS | KVM_X86_SHADOW_INT_STI);
> >>
> >> events->nmi.injected = vcpu->arch.nmi_injected;
> >> - events->nmi.pending = vcpu->arch.nmi_pending;
> >> + events->nmi.pending = vcpu->arch.nmi_pending != 0;
> >> events->nmi.masked = kvm_x86_ops->get_nmi_mask(vcpu);
> >> events->nmi.pad = 0;
> >
> >nmi_queued should also be saved and restored. Not sure if its necessary
> >though.
> >
> >Should at least reset nmi_queued somewhere (set_vcpu_events?).
>
> Did you miss the call to process_nmi()?
It transfers nmi_queued to nmi_pending with capping. What i mean is that
upon system reset, nmi_queued (which refers to pre-reset system state)
should be zeroed.
> We do have a small issue. If we exit during NMI-blocked-by-STI and
> nmi_pending == 2, then we lose the second interrupt. Should rarely
> happen, since external interrupts never exit in that condition, but
> it's a wart.
And the above system reset case, you should be able to handle it by
saving/restoring nmi_queued (so that QEMU can zero it in vcpu_reset).
> >
> >> @@ -2864,6 +2866,7 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
> >> | KVM_VCPUEVENT_VALID_SHADOW))
> >> return -EINVAL;
> >>
> >> + process_nmi(vcpu);
> >
> >This should be after nmi fields are set, not before?
> >
>
> It's actually to clear queued NMIs in case we set nmi_pending (which
> should never happen unless the machine is completely quiet, since
> it's asynchronous to the vcpu; same as the IRR).
OK.
^ permalink raw reply [flat|nested] 10+ messages in thread* Re: [PATCH v2] KVM: Fix simultaneous NMIs
2011-09-20 14:59 ` Marcelo Tosatti
@ 2011-09-20 16:24 ` Avi Kivity
2011-09-20 16:30 ` Marcelo Tosatti
0 siblings, 1 reply; 10+ messages in thread
From: Avi Kivity @ 2011-09-20 16:24 UTC (permalink / raw)
To: Marcelo Tosatti; +Cc: kvm, Jan Kiszka
On 09/20/2011 05:59 PM, Marcelo Tosatti wrote:
> On Tue, Sep 20, 2011 at 04:56:02PM +0300, Avi Kivity wrote:
> > On 09/20/2011 04:25 PM, Marcelo Tosatti wrote:
> > >>
> > >> @@ -2827,6 +2828,7 @@ static int kvm_vcpu_ioctl_x86_set_mce(struct kvm_vcpu *vcpu,
> > >> static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu,
> > >> struct kvm_vcpu_events *events)
> > >> {
> > >> + process_nmi(vcpu);
> > >> events->exception.injected =
> > >> vcpu->arch.exception.pending&&
> > >> !kvm_exception_is_soft(vcpu->arch.exception.nr);
> > >> @@ -2844,7 +2846,7 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu,
> > >> KVM_X86_SHADOW_INT_MOV_SS | KVM_X86_SHADOW_INT_STI);
> > >>
> > >> events->nmi.injected = vcpu->arch.nmi_injected;
> > >> - events->nmi.pending = vcpu->arch.nmi_pending;
> > >> + events->nmi.pending = vcpu->arch.nmi_pending != 0;
> > >> events->nmi.masked = kvm_x86_ops->get_nmi_mask(vcpu);
> > >> events->nmi.pad = 0;
> > >
> > >nmi_queued should also be saved and restored. Not sure if its necessary
> > >though.
> > >
> > >Should at least reset nmi_queued somewhere (set_vcpu_events?).
> >
> > Did you miss the call to process_nmi()?
>
> It transfers nmi_queued to nmi_pending with capping. What i mean is that
> upon system reset, nmi_queued (which refers to pre-reset system state)
> should be zeroed.
The is get_events(); for set_events(), process_nmi() does zero
nmi_queued() (and then we overwrite nmi_pending).
>
> > We do have a small issue. If we exit during NMI-blocked-by-STI and
> > nmi_pending == 2, then we lose the second interrupt. Should rarely
> > happen, since external interrupts never exit in that condition, but
> > it's a wart.
>
> And the above system reset case, you should be able to handle it by
> saving/restoring nmi_queued (so that QEMU can zero it in vcpu_reset).
We could just add a KVM_CAP (and flag) that extends nmi_pending from a
bool to a counter.
--
error compiling committee.c: too many arguments to function
^ permalink raw reply [flat|nested] 10+ messages in thread* Re: [PATCH v2] KVM: Fix simultaneous NMIs
2011-09-20 16:24 ` Avi Kivity
@ 2011-09-20 16:30 ` Marcelo Tosatti
2011-09-20 17:28 ` Avi Kivity
0 siblings, 1 reply; 10+ messages in thread
From: Marcelo Tosatti @ 2011-09-20 16:30 UTC (permalink / raw)
To: Avi Kivity; +Cc: kvm, Jan Kiszka
On Tue, Sep 20, 2011 at 07:24:01PM +0300, Avi Kivity wrote:
> On 09/20/2011 05:59 PM, Marcelo Tosatti wrote:
> >On Tue, Sep 20, 2011 at 04:56:02PM +0300, Avi Kivity wrote:
> >> On 09/20/2011 04:25 PM, Marcelo Tosatti wrote:
> >> >>
> >> >> @@ -2827,6 +2828,7 @@ static int kvm_vcpu_ioctl_x86_set_mce(struct kvm_vcpu *vcpu,
> >> >> static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu,
> >> >> struct kvm_vcpu_events *events)
> >> >> {
> >> >> + process_nmi(vcpu);
> >> >> events->exception.injected =
> >> >> vcpu->arch.exception.pending&&
> >> >> !kvm_exception_is_soft(vcpu->arch.exception.nr);
> >> >> @@ -2844,7 +2846,7 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu,
> >> >> KVM_X86_SHADOW_INT_MOV_SS | KVM_X86_SHADOW_INT_STI);
> >> >>
> >> >> events->nmi.injected = vcpu->arch.nmi_injected;
> >> >> - events->nmi.pending = vcpu->arch.nmi_pending;
> >> >> + events->nmi.pending = vcpu->arch.nmi_pending != 0;
> >> >> events->nmi.masked = kvm_x86_ops->get_nmi_mask(vcpu);
> >> >> events->nmi.pad = 0;
> >> >
> >> >nmi_queued should also be saved and restored. Not sure if its necessary
> >> >though.
> >> >
> >> >Should at least reset nmi_queued somewhere (set_vcpu_events?).
> >>
> >> Did you miss the call to process_nmi()?
> >
> >It transfers nmi_queued to nmi_pending with capping. What i mean is that
> >upon system reset, nmi_queued (which refers to pre-reset system state)
> >should be zeroed.
>
> The is get_events(); for set_events(), process_nmi() does zero
> nmi_queued() (and then we overwrite nmi_pending).
Right.
> >
> >> We do have a small issue. If we exit during NMI-blocked-by-STI and
> >> nmi_pending == 2, then we lose the second interrupt. Should rarely
> >> happen, since external interrupts never exit in that condition, but
> >> it's a wart.
> >
> >And the above system reset case, you should be able to handle it by
> >saving/restoring nmi_queued (so that QEMU can zero it in vcpu_reset).
>
> We could just add a KVM_CAP (and flag) that extends nmi_pending from
> a bool to a counter.
Or just add a new field to the pad.
^ permalink raw reply [flat|nested] 10+ messages in thread* Re: [PATCH v2] KVM: Fix simultaneous NMIs
2011-09-20 16:30 ` Marcelo Tosatti
@ 2011-09-20 17:28 ` Avi Kivity
2011-09-21 8:46 ` Avi Kivity
0 siblings, 1 reply; 10+ messages in thread
From: Avi Kivity @ 2011-09-20 17:28 UTC (permalink / raw)
To: Marcelo Tosatti; +Cc: kvm, Jan Kiszka
On 09/20/2011 07:30 PM, Marcelo Tosatti wrote:
> > >
> > >> We do have a small issue. If we exit during NMI-blocked-by-STI and
> > >> nmi_pending == 2, then we lose the second interrupt. Should rarely
> > >> happen, since external interrupts never exit in that condition, but
> > >> it's a wart.
> > >
> > >And the above system reset case, you should be able to handle it by
> > >saving/restoring nmi_queued (so that QEMU can zero it in vcpu_reset).
> >
> > We could just add a KVM_CAP (and flag) that extends nmi_pending from
> > a bool to a counter.
>
> Or just add a new field to the pad.
>
Okay; I'll address this in a follow-on patch (my preference is making
nmi_pending a counter).
--
error compiling committee.c: too many arguments to function
^ permalink raw reply [flat|nested] 10+ messages in thread
* Re: [PATCH v2] KVM: Fix simultaneous NMIs
2011-09-20 17:28 ` Avi Kivity
@ 2011-09-21 8:46 ` Avi Kivity
2011-09-21 16:44 ` Marcelo Tosatti
0 siblings, 1 reply; 10+ messages in thread
From: Avi Kivity @ 2011-09-21 8:46 UTC (permalink / raw)
To: Marcelo Tosatti; +Cc: kvm, Jan Kiszka
On 09/20/2011 08:28 PM, Avi Kivity wrote:
> On 09/20/2011 07:30 PM, Marcelo Tosatti wrote:
>> > >
>> > >> We do have a small issue. If we exit during
>> NMI-blocked-by-STI and
>> > >> nmi_pending == 2, then we lose the second interrupt. Should
>> rarely
>> > >> happen, since external interrupts never exit in that
>> condition, but
>> > >> it's a wart.
>> > >
>> > >And the above system reset case, you should be able to handle it by
>> > >saving/restoring nmi_queued (so that QEMU can zero it in vcpu_reset).
>> >
>> > We could just add a KVM_CAP (and flag) that extends nmi_pending from
>> > a bool to a counter.
>>
>> Or just add a new field to the pad.
>>
>
> Okay; I'll address this in a follow-on patch (my preference is making
> nmi_pending a counter).
>
Yet another way to do this is to redefine .injected (just in the API) to
mean: inject immediately, unless blocked by interrupt shadow; in this
case inject in the next instruction. No KVM_CAP or anything.
The drawback is that if we hit the corner case of two NMIs queued and
held back by interrupt shadow, an older kvm live migration target will
not run the guest (exit with invalid state). The advantage is that no
user space or API changes are necessary.
Given that to get into this corner you need an NMI intensive load AND a
sti; blah pair that spans two pages AND have the second page unavailable
when those NMIs hit, I think it's better to avoid the API change. Opinions?
--
error compiling committee.c: too many arguments to function
^ permalink raw reply [flat|nested] 10+ messages in thread
* Re: [PATCH v2] KVM: Fix simultaneous NMIs
2011-09-21 8:46 ` Avi Kivity
@ 2011-09-21 16:44 ` Marcelo Tosatti
2011-09-23 11:54 ` Marcelo Tosatti
0 siblings, 1 reply; 10+ messages in thread
From: Marcelo Tosatti @ 2011-09-21 16:44 UTC (permalink / raw)
To: Avi Kivity; +Cc: kvm, Jan Kiszka
On Wed, Sep 21, 2011 at 11:46:03AM +0300, Avi Kivity wrote:
> On 09/20/2011 08:28 PM, Avi Kivity wrote:
> >On 09/20/2011 07:30 PM, Marcelo Tosatti wrote:
> >>> >
> >>> >> We do have a small issue. If we exit during
> >>NMI-blocked-by-STI and
> >>> >> nmi_pending == 2, then we lose the second interrupt.
> >>Should rarely
> >>> >> happen, since external interrupts never exit in that
> >>condition, but
> >>> >> it's a wart.
Actually exits in the window between
- increase of nmi_queued
and
- transfer to nmi_pending/nmi_injected
Lose all nmi_queued values, no?
> >>> >
> >>> >And the above system reset case, you should be able to handle it by
> >>> >saving/restoring nmi_queued (so that QEMU can zero it in vcpu_reset).
> >>>
> >>> We could just add a KVM_CAP (and flag) that extends nmi_pending from
> >>> a bool to a counter.
> >>
> >>Or just add a new field to the pad.
> >>
> >
> >Okay; I'll address this in a follow-on patch (my preference is
> >making nmi_pending a counter).
> >
>
> Yet another way to do this is to redefine .injected (just in the
> API) to mean: inject immediately, unless blocked by interrupt
> shadow; in this case inject in the next instruction. No KVM_CAP or
> anything.
>
> The drawback is that if we hit the corner case of two NMIs queued
> and held back by interrupt shadow, an older kvm live migration
> target will not run the guest (exit with invalid state). The
> advantage is that no user space or API changes are necessary.
>
> Given that to get into this corner you need an NMI intensive load
> AND a sti; blah pair that spans two pages AND have the second page
> unavailable when those NMIs hit, I think it's better to avoid the
> API change. Opinions?
^ permalink raw reply [flat|nested] 10+ messages in thread
* Re: [PATCH v2] KVM: Fix simultaneous NMIs
2011-09-21 16:44 ` Marcelo Tosatti
@ 2011-09-23 11:54 ` Marcelo Tosatti
0 siblings, 0 replies; 10+ messages in thread
From: Marcelo Tosatti @ 2011-09-23 11:54 UTC (permalink / raw)
To: Avi Kivity; +Cc: kvm, Jan Kiszka
On Wed, Sep 21, 2011 at 01:44:49PM -0300, Marcelo Tosatti wrote:
> On Wed, Sep 21, 2011 at 11:46:03AM +0300, Avi Kivity wrote:
> > On 09/20/2011 08:28 PM, Avi Kivity wrote:
> > >On 09/20/2011 07:30 PM, Marcelo Tosatti wrote:
> > >>> >
> > >>> >> We do have a small issue. If we exit during
> > >>NMI-blocked-by-STI and
> > >>> >> nmi_pending == 2, then we lose the second interrupt.
> > >>Should rarely
> > >>> >> happen, since external interrupts never exit in that
> > >>condition, but
> > >>> >> it's a wart.
>
> Actually exits in the window between
>
> - increase of nmi_queued
> and
> - transfer to nmi_pending/nmi_injected
>
> Lose all nmi_queued values, no?
Applied the patch, please fix save/restore later, thanks.
^ permalink raw reply [flat|nested] 10+ messages in thread
end of thread, other threads:[~2011-09-23 12:38 UTC | newest]
Thread overview: 10+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2011-09-20 10:43 [PATCH v2] KVM: Fix simultaneous NMIs Avi Kivity
2011-09-20 13:25 ` Marcelo Tosatti
2011-09-20 13:56 ` Avi Kivity
2011-09-20 14:59 ` Marcelo Tosatti
2011-09-20 16:24 ` Avi Kivity
2011-09-20 16:30 ` Marcelo Tosatti
2011-09-20 17:28 ` Avi Kivity
2011-09-21 8:46 ` Avi Kivity
2011-09-21 16:44 ` Marcelo Tosatti
2011-09-23 11:54 ` Marcelo Tosatti
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox