From mboxrd@z Thu Jan 1 00:00:00 1970 From: Paolo Bonzini Subject: Re: [PATCH v3 1/4] KVM: nVMX: Rework interception of IRQs and NMIs Date: Fri, 07 Mar 2014 20:48:00 +0100 Message-ID: <531A2270.6010406@redhat.com> References: Mime-Version: 1.0 Content-Type: text/plain; charset=ISO-8859-1; format=flowed Content-Transfer-Encoding: 7bit Cc: kvm To: Jan Kiszka , Gleb Natapov , Marcelo Tosatti Return-path: Received: from mail-ea0-f174.google.com ([209.85.215.174]:46102 "EHLO mail-ea0-f174.google.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1752342AbaCGTsG (ORCPT ); Fri, 7 Mar 2014 14:48:06 -0500 Received: by mail-ea0-f174.google.com with SMTP id f15so2534153eak.33 for ; Fri, 07 Mar 2014 11:48:04 -0800 (PST) In-Reply-To: Sender: kvm-owner@vger.kernel.org List-ID: Il 07/03/2014 20:03, Jan Kiszka ha scritto: > @@ -4631,22 +4631,8 @@ static void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked) > > static int vmx_nmi_allowed(struct kvm_vcpu *vcpu) > { > - if (is_guest_mode(vcpu)) { > - if (to_vmx(vcpu)->nested.nested_run_pending) > - return 0; > - if (nested_exit_on_nmi(vcpu)) { > - nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI, > - NMI_VECTOR | INTR_TYPE_NMI_INTR | > - INTR_INFO_VALID_MASK, 0); > - /* > - * The NMI-triggered VM exit counts as injection: > - * clear this one and block further NMIs. > - */ > - vcpu->arch.nmi_pending = 0; > - vmx_set_nmi_mask(vcpu, true); > - return 0; > - } > - } > + if (to_vmx(vcpu)->nested.nested_run_pending) > + return 0; > > if (!cpu_has_virtual_nmis() && to_vmx(vcpu)->soft_vnmi_blocked) > return 0; > @@ -4658,19 +4644,8 @@ static int vmx_nmi_allowed(struct kvm_vcpu *vcpu) > > static int vmx_interrupt_allowed(struct kvm_vcpu *vcpu) > { > - if (is_guest_mode(vcpu)) { > - if (to_vmx(vcpu)->nested.nested_run_pending) > - return 0; > - if (nested_exit_on_intr(vcpu)) { > - nested_vmx_vmexit(vcpu, EXIT_REASON_EXTERNAL_INTERRUPT, > - 0, 0); > - /* > - * fall through to normal code, but now in L1, not L2 > - */ > - } > - } > - > - return (vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF) && > + return (!to_vmx(vcpu)->nested.nested_run_pending && > + vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF) && > !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & > (GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS)); The checks on nested_run_pending are not needed anymore and can be replaced with a WARN_ON. Otherwise, Reviewed-by: Paolo Bonzini Paolo