* [PATCH] KVM: VMX: enable acknowledge interupt on vmexit
@ 2013-01-16 9:00 Yang Zhang
2013-01-16 15:42 ` Gleb Natapov
0 siblings, 1 reply; 5+ messages in thread
From: Yang Zhang @ 2013-01-16 9:00 UTC (permalink / raw)
To: kvm; +Cc: gleb, haitao.shan, mtosatti, xiantao.zhang, Yang Zhang
From: Yang Zhang <yang.z.zhang@Intel.com>
The "acknowledge interrupt on exit" feature controls processor behavior
for external interrupt acknowledgement. When this control is set, the
processor acknowledges the interrupt controller to acquire the
interrupt vector on VM exit.
This feature is required by Posted Interrupt. It will be turnned on only
when posted interrupt is enabled.
Refer to Intel SDM volum 3, chapter 33.2.
Signed-off-by: Yang Zhang <yang.z.zhang@Intel.com>
---
arch/x86/kvm/vmx.c | 51 +++++++++++++++++++++++++++++++++++++++++++++++++--
1 files changed, 49 insertions(+), 2 deletions(-)
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index dd2a85c..d1ed9ae 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -2565,7 +2565,7 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf)
#ifdef CONFIG_X86_64
min |= VM_EXIT_HOST_ADDR_SPACE_SIZE;
#endif
- opt = VM_EXIT_SAVE_IA32_PAT | VM_EXIT_LOAD_IA32_PAT;
+ opt = VM_EXIT_SAVE_IA32_PAT | VM_EXIT_LOAD_IA32_PAT | VM_EXIT_ACK_INTR_ON_EXIT;
if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_EXIT_CTLS,
&_vmexit_control) < 0)
return -EIO;
@@ -3926,7 +3926,7 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
++vmx->nmsrs;
}
- vmcs_write32(VM_EXIT_CONTROLS, vmcs_config.vmexit_ctrl);
+ vmcs_write32(VM_EXIT_CONTROLS, vmcs_config.vmexit_ctrl & ~VM_EXIT_ACK_INTR_ON_EXIT);
/* 22.2.1, 20.8.1 */
vmcs_write32(VM_ENTRY_CONTROLS, vmcs_config.vmentry_ctrl);
@@ -6096,6 +6096,52 @@ static void vmx_complete_atomic_exit(struct vcpu_vmx *vmx)
}
}
+
+static noinline void vmx_handle_external_intr(struct kvm_vcpu *vcpu)
+{
+ u32 exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
+
+ if ((exit_intr_info & INTR_INFO_INTR_TYPE_MASK) == INTR_TYPE_EXT_INTR &&
+ (exit_intr_info & INTR_INFO_VALID_MASK) ) {
+ unsigned int vector;
+ unsigned long entry;
+ struct desc_ptr dt;
+ gate_desc *desc;
+
+ native_store_idt(&dt);
+
+ vector = exit_intr_info & INTR_INFO_VECTOR_MASK;
+ desc = (void *)dt.address + vector * 16;
+
+ entry = gate_offset(*desc);
+ asm(
+ "mov %0, %%" _ASM_DX "\n\t"
+ "mov %%" _ASM_SP ", %%" _ASM_BX "\n\t"
+#ifdef CONFIG_X86_64
+ "and $0xfffffffffffffff0, %%" _ASM_SP "\n\t"
+#endif
+ "mov %%ss, %%" _ASM_AX "\n\t"
+ "push %%" _ASM_AX "\n\t"
+ "push %%" _ASM_BX "\n\t"
+ "pushf\n\t"
+ "mov %%cs, %%" _ASM_AX "\n\t"
+ "push %%" _ASM_AX "\n\t"
+ "push intr_return\n\t"
+ "jmp *%% " _ASM_DX "\n\t"
+ ".pushsection .rodata \n\t"
+ ".global intr_return \n\t"
+ "intr_return: " _ASM_PTR " 1b \n\t"
+ ".popsection\n\t"
+ : :"m"(entry) :
+#ifdef CONFIG_X86_64
+ "rax", "rbx", "rdx"
+#else
+ "eax", "ebx", "edx"
+#endif
+ );
+ }
+}
+
static void vmx_recover_nmi_blocking(struct vcpu_vmx *vmx)
{
u32 exit_intr_info;
@@ -6431,6 +6477,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
vmx_complete_atomic_exit(vmx);
vmx_recover_nmi_blocking(vmx);
vmx_complete_interrupts(vmx);
+ vmx_handle_external_intr(vcpu);
}
static void vmx_free_vcpu(struct kvm_vcpu *vcpu)
--
1.7.1
^ permalink raw reply related [flat|nested] 5+ messages in thread* Re: [PATCH] KVM: VMX: enable acknowledge interupt on vmexit
2013-01-16 9:00 [PATCH] KVM: VMX: enable acknowledge interupt on vmexit Yang Zhang
@ 2013-01-16 15:42 ` Gleb Natapov
2013-01-17 1:36 ` Zhang, Yang Z
0 siblings, 1 reply; 5+ messages in thread
From: Gleb Natapov @ 2013-01-16 15:42 UTC (permalink / raw)
To: Yang Zhang; +Cc: kvm, haitao.shan, mtosatti, xiantao.zhang
On Wed, Jan 16, 2013 at 05:00:48PM +0800, Yang Zhang wrote:
> From: Yang Zhang <yang.z.zhang@Intel.com>
>
> The "acknowledge interrupt on exit" feature controls processor behavior
> for external interrupt acknowledgement. When this control is set, the
> processor acknowledges the interrupt controller to acquire the
> interrupt vector on VM exit.
>
> This feature is required by Posted Interrupt. It will be turnned on only
> when posted interrupt is enabled.
>
Why? Always enable it. It is faster than current approach.
> Refer to Intel SDM volum 3, chapter 33.2.
>
Please CC H. Peter Anvin <hpa@linux.intel.com> on the next version.
> Signed-off-by: Yang Zhang <yang.z.zhang@Intel.com>
> ---
> arch/x86/kvm/vmx.c | 51 +++++++++++++++++++++++++++++++++++++++++++++++++--
> 1 files changed, 49 insertions(+), 2 deletions(-)
>
> diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
> index dd2a85c..d1ed9ae 100644
> --- a/arch/x86/kvm/vmx.c
> +++ b/arch/x86/kvm/vmx.c
> @@ -2565,7 +2565,7 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf)
> #ifdef CONFIG_X86_64
> min |= VM_EXIT_HOST_ADDR_SPACE_SIZE;
> #endif
> - opt = VM_EXIT_SAVE_IA32_PAT | VM_EXIT_LOAD_IA32_PAT;
> + opt = VM_EXIT_SAVE_IA32_PAT | VM_EXIT_LOAD_IA32_PAT | VM_EXIT_ACK_INTR_ON_EXIT;
> if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_EXIT_CTLS,
> &_vmexit_control) < 0)
> return -EIO;
> @@ -3926,7 +3926,7 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
> ++vmx->nmsrs;
> }
>
> - vmcs_write32(VM_EXIT_CONTROLS, vmcs_config.vmexit_ctrl);
> + vmcs_write32(VM_EXIT_CONTROLS, vmcs_config.vmexit_ctrl & ~VM_EXIT_ACK_INTR_ON_EXIT);
>
> /* 22.2.1, 20.8.1 */
> vmcs_write32(VM_ENTRY_CONTROLS, vmcs_config.vmentry_ctrl);
> @@ -6096,6 +6096,52 @@ static void vmx_complete_atomic_exit(struct vcpu_vmx *vmx)
> }
> }
>
> +
> +static noinline void vmx_handle_external_intr(struct kvm_vcpu *vcpu)
> +{
> + u32 exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
> +
> + if ((exit_intr_info & INTR_INFO_INTR_TYPE_MASK) == INTR_TYPE_EXT_INTR &&
> + (exit_intr_info & INTR_INFO_VALID_MASK) ) {
if (exit_intr_info & (INTR_INFO_VALID_MASK | INTR_INFO_INTR_TYPE_MASK)
== (INTR_INFO_VALID_MASK | INTR_TYPE_EXT_INTR))
> + unsigned int vector;
> + unsigned long entry;
> + struct desc_ptr dt;
> + gate_desc *desc;
> +
> + native_store_idt(&dt);
This does not change. Store it in vcpu during vcpu creation instead of
reading it each time.
> +
> + vector = exit_intr_info & INTR_INFO_VECTOR_MASK;
> + desc = (void *)dt.address + vector * 16;
> +
> + entry = gate_offset(*desc);
> + asm(
> + "mov %0, %%" _ASM_DX "\n\t"
> + "mov %%" _ASM_SP ", %%" _ASM_BX "\n\t"
> +#ifdef CONFIG_X86_64
> + "and $0xfffffffffffffff0, %%" _ASM_SP "\n\t"
> +#endif
> + "mov %%ss, %%" _ASM_AX "\n\t"
> + "push %%" _ASM_AX "\n\t"
> + "push %%" _ASM_BX "\n\t"
For 32bit you do not need to save ss:esp.
> + "pushf\n\t"
> + "mov %%cs, %%" _ASM_AX "\n\t"
> + "push %%" _ASM_AX "\n\t"
> + "push intr_return\n\t"
> + "jmp *%% " _ASM_DX "\n\t"
> + ".pushsection .rodata \n\t"
> + ".global intr_return \n\t"
> + "intr_return: " _ASM_PTR " 1b \n\t"
> + ".popsection\n\t"
> + : :"m"(entry) :
> +#ifdef CONFIG_X86_64
> + "rax", "rbx", "rdx"
> +#else
> + "eax", "ebx", "edx"
> +#endif
> + );
> + }
> +}
> +
> static void vmx_recover_nmi_blocking(struct vcpu_vmx *vmx)
> {
> u32 exit_intr_info;
> @@ -6431,6 +6477,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
> vmx_complete_atomic_exit(vmx);
> vmx_recover_nmi_blocking(vmx);
> vmx_complete_interrupts(vmx);
> + vmx_handle_external_intr(vcpu);
This should be done just before enabling interrupts, otherwise we are
taking an interrupt before vmexit is fully complete.
--
Gleb.
^ permalink raw reply [flat|nested] 5+ messages in thread* RE: [PATCH] KVM: VMX: enable acknowledge interupt on vmexit
2013-01-16 15:42 ` Gleb Natapov
@ 2013-01-17 1:36 ` Zhang, Yang Z
2013-01-17 6:27 ` Gleb Natapov
0 siblings, 1 reply; 5+ messages in thread
From: Zhang, Yang Z @ 2013-01-17 1:36 UTC (permalink / raw)
To: Gleb Natapov
Cc: kvm@vger.kernel.org, Shan, Haitao, mtosatti@redhat.com,
Zhang, Xiantao
Gleb Natapov wrote on 2013-01-16:
> On Wed, Jan 16, 2013 at 05:00:48PM +0800, Yang Zhang wrote:
>> From: Yang Zhang <yang.z.zhang@Intel.com>
>>
>> The "acknowledge interrupt on exit" feature controls processor behavior
>> for external interrupt acknowledgement. When this control is set, the
>> processor acknowledges the interrupt controller to acquire the
>> interrupt vector on VM exit.
>>
>> This feature is required by Posted Interrupt. It will be turnned on only
>> when posted interrupt is enabled.
>>
> Why? Always enable it. It is faster than current approach.
I don't think it will faster. It should be same.
>> Refer to Intel SDM volum 3, chapter 33.2.
>>
> Please CC H. Peter Anvin <hpa@linux.intel.com> on the next version.
>
>> Signed-off-by: Yang Zhang <yang.z.zhang@Intel.com>
>> ---
>> arch/x86/kvm/vmx.c | 51
>> +++++++++++++++++++++++++++++++++++++++++++++++++-- 1 files changed,
>> 49 insertions(+), 2 deletions(-)
>> diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
>> index dd2a85c..d1ed9ae 100644
>> --- a/arch/x86/kvm/vmx.c
>> +++ b/arch/x86/kvm/vmx.c
>> @@ -2565,7 +2565,7 @@ static __init int setup_vmcs_config(struct
> vmcs_config *vmcs_conf)
>> #ifdef CONFIG_X86_64
>> min |= VM_EXIT_HOST_ADDR_SPACE_SIZE;
>> #endif
>> - opt = VM_EXIT_SAVE_IA32_PAT | VM_EXIT_LOAD_IA32_PAT;
>> + opt = VM_EXIT_SAVE_IA32_PAT | VM_EXIT_LOAD_IA32_PAT |
> VM_EXIT_ACK_INTR_ON_EXIT;
>> if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_EXIT_CTLS,
>> &_vmexit_control) < 0) return -EIO; @@ -3926,7 +3926,7 @@ static
>> int vmx_vcpu_setup(struct vcpu_vmx *vmx) ++vmx->nmsrs; }
>> - vmcs_write32(VM_EXIT_CONTROLS, vmcs_config.vmexit_ctrl);
>> + vmcs_write32(VM_EXIT_CONTROLS, vmcs_config.vmexit_ctrl &
>> ~VM_EXIT_ACK_INTR_ON_EXIT);
>>
>> /* 22.2.1, 20.8.1 */
>> vmcs_write32(VM_ENTRY_CONTROLS, vmcs_config.vmentry_ctrl);
>> @@ -6096,6 +6096,52 @@ static void vmx_complete_atomic_exit(struct
> vcpu_vmx *vmx)
>> }
>> }
>> + +static noinline void vmx_handle_external_intr(struct kvm_vcpu *vcpu)
>> +{ + u32 exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO); + + if
>> ((exit_intr_info & INTR_INFO_INTR_TYPE_MASK) == INTR_TYPE_EXT_INTR &&
>> + (exit_intr_info & INTR_INFO_VALID_MASK) ) {
> if (exit_intr_info & (INTR_INFO_VALID_MASK | INTR_INFO_INTR_TYPE_MASK)
> == (INTR_INFO_VALID_MASK | INTR_TYPE_EXT_INTR))
>
>> + unsigned int vector;
>> + unsigned long entry;
>> + struct desc_ptr dt;
>> + gate_desc *desc;
>> +
>> + native_store_idt(&dt);
> This does not change. Store it in vcpu during vcpu creation instead of
> reading it each time.
Right. It will reduce the cost.
>> +
>> + vector = exit_intr_info & INTR_INFO_VECTOR_MASK;
>> + desc = (void *)dt.address + vector * 16;
>> +
>> + entry = gate_offset(*desc);
>> + asm(
>> + "mov %0, %%" _ASM_DX "\n\t"
>> + "mov %%" _ASM_SP ", %%" _ASM_BX "\n\t"
>> +#ifdef CONFIG_X86_64
>> + "and $0xfffffffffffffff0, %%" _ASM_SP "\n\t"
>> +#endif
>> + "mov %%ss, %%" _ASM_AX "\n\t"
>> + "push %%" _ASM_AX "\n\t"
>> + "push %%" _ASM_BX "\n\t"
> For 32bit you do not need to save ss:esp.
You are right. No need to save it for 32bit.
>> + "pushf\n\t"
>> + "mov %%cs, %%" _ASM_AX "\n\t"
>> + "push %%" _ASM_AX "\n\t"
>> + "push intr_return\n\t"
>> + "jmp *%% " _ASM_DX "\n\t"
>> + ".pushsection .rodata \n\t"
>> + ".global intr_return \n\t"
>> + "intr_return: " _ASM_PTR " 1b \n\t"
>> + ".popsection\n\t"
>> + : :"m"(entry) :
>> +#ifdef CONFIG_X86_64
>> + "rax", "rbx", "rdx"
>> +#else
>> + "eax", "ebx", "edx"
>> +#endif
>> + );
>> + }
>> +}
>> +
>> static void vmx_recover_nmi_blocking(struct vcpu_vmx *vmx)
>> {
>> u32 exit_intr_info;
>> @@ -6431,6 +6477,7 @@ static void __noclone vmx_vcpu_run(struct
> kvm_vcpu *vcpu)
>> vmx_complete_atomic_exit(vmx);
>> vmx_recover_nmi_blocking(vmx);
>> vmx_complete_interrupts(vmx);
>> + vmx_handle_external_intr(vcpu);
> This should be done just before enabling interrupts, otherwise we are
> taking an interrupt before vmexit is fully complete.
How about to put it just after set vcpu->mode to OUTSIDE_GUEST_MODE? At that point, the vmexit is considered as fully complete.
Best regards,
Yang
^ permalink raw reply [flat|nested] 5+ messages in thread* Re: [PATCH] KVM: VMX: enable acknowledge interupt on vmexit
2013-01-17 1:36 ` Zhang, Yang Z
@ 2013-01-17 6:27 ` Gleb Natapov
2013-01-17 6:34 ` Zhang, Yang Z
0 siblings, 1 reply; 5+ messages in thread
From: Gleb Natapov @ 2013-01-17 6:27 UTC (permalink / raw)
To: Zhang, Yang Z
Cc: kvm@vger.kernel.org, Shan, Haitao, mtosatti@redhat.com,
Zhang, Xiantao
On Thu, Jan 17, 2013 at 01:36:36AM +0000, Zhang, Yang Z wrote:
> Gleb Natapov wrote on 2013-01-16:
> > On Wed, Jan 16, 2013 at 05:00:48PM +0800, Yang Zhang wrote:
> >> From: Yang Zhang <yang.z.zhang@Intel.com>
> >>
> >> The "acknowledge interrupt on exit" feature controls processor behavior
> >> for external interrupt acknowledgement. When this control is set, the
> >> processor acknowledges the interrupt controller to acquire the
> >> interrupt vector on VM exit.
> >>
> >> This feature is required by Posted Interrupt. It will be turnned on only
> >> when posted interrupt is enabled.
> >>
> > Why? Always enable it. It is faster than current approach.
> I don't think it will faster. It should be same.
>
Interrupt delivered->vmexit->interrupt enabled->interrupt
redelivered->acked->dispatched through idt.
vs
Interrupt delivered->acked->vmexit->dispatched through idt.
Anyway even if it is not slower it is better to use the same code path
to make sure it is well tested.
> >> Refer to Intel SDM volum 3, chapter 33.2.
> >>
> > Please CC H. Peter Anvin <hpa@linux.intel.com> on the next version.
> >
> >> Signed-off-by: Yang Zhang <yang.z.zhang@Intel.com>
> >> ---
> >> arch/x86/kvm/vmx.c | 51
> >> +++++++++++++++++++++++++++++++++++++++++++++++++-- 1 files changed,
> >> 49 insertions(+), 2 deletions(-)
> >> diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
> >> index dd2a85c..d1ed9ae 100644
> >> --- a/arch/x86/kvm/vmx.c
> >> +++ b/arch/x86/kvm/vmx.c
> >> @@ -2565,7 +2565,7 @@ static __init int setup_vmcs_config(struct
> > vmcs_config *vmcs_conf)
> >> #ifdef CONFIG_X86_64
> >> min |= VM_EXIT_HOST_ADDR_SPACE_SIZE;
> >> #endif
> >> - opt = VM_EXIT_SAVE_IA32_PAT | VM_EXIT_LOAD_IA32_PAT;
> >> + opt = VM_EXIT_SAVE_IA32_PAT | VM_EXIT_LOAD_IA32_PAT |
> > VM_EXIT_ACK_INTR_ON_EXIT;
> >> if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_EXIT_CTLS,
> >> &_vmexit_control) < 0) return -EIO; @@ -3926,7 +3926,7 @@ static
> >> int vmx_vcpu_setup(struct vcpu_vmx *vmx) ++vmx->nmsrs; }
> >> - vmcs_write32(VM_EXIT_CONTROLS, vmcs_config.vmexit_ctrl);
> >> + vmcs_write32(VM_EXIT_CONTROLS, vmcs_config.vmexit_ctrl &
> >> ~VM_EXIT_ACK_INTR_ON_EXIT);
> >>
> >> /* 22.2.1, 20.8.1 */
> >> vmcs_write32(VM_ENTRY_CONTROLS, vmcs_config.vmentry_ctrl);
> >> @@ -6096,6 +6096,52 @@ static void vmx_complete_atomic_exit(struct
> > vcpu_vmx *vmx)
> >> }
> >> }
> >> + +static noinline void vmx_handle_external_intr(struct kvm_vcpu *vcpu)
> >> +{ + u32 exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO); + + if
> >> ((exit_intr_info & INTR_INFO_INTR_TYPE_MASK) == INTR_TYPE_EXT_INTR &&
> >> + (exit_intr_info & INTR_INFO_VALID_MASK) ) {
> > if (exit_intr_info & (INTR_INFO_VALID_MASK | INTR_INFO_INTR_TYPE_MASK)
> > == (INTR_INFO_VALID_MASK | INTR_TYPE_EXT_INTR))
> >
> >> + unsigned int vector;
> >> + unsigned long entry;
> >> + struct desc_ptr dt;
> >> + gate_desc *desc;
> >> +
> >> + native_store_idt(&dt);
> > This does not change. Store it in vcpu during vcpu creation instead of
> > reading it each time.
> Right. It will reduce the cost.
>
> >> +
> >> + vector = exit_intr_info & INTR_INFO_VECTOR_MASK;
> >> + desc = (void *)dt.address + vector * 16;
> >> +
> >> + entry = gate_offset(*desc);
> >> + asm(
> >> + "mov %0, %%" _ASM_DX "\n\t"
> >> + "mov %%" _ASM_SP ", %%" _ASM_BX "\n\t"
> >> +#ifdef CONFIG_X86_64
> >> + "and $0xfffffffffffffff0, %%" _ASM_SP "\n\t"
> >> +#endif
> >> + "mov %%ss, %%" _ASM_AX "\n\t"
> >> + "push %%" _ASM_AX "\n\t"
> >> + "push %%" _ASM_BX "\n\t"
> > For 32bit you do not need to save ss:esp.
> You are right. No need to save it for 32bit.
>
> >> + "pushf\n\t"
> >> + "mov %%cs, %%" _ASM_AX "\n\t"
> >> + "push %%" _ASM_AX "\n\t"
> >> + "push intr_return\n\t"
> >> + "jmp *%% " _ASM_DX "\n\t"
> >> + ".pushsection .rodata \n\t"
> >> + ".global intr_return \n\t"
> >> + "intr_return: " _ASM_PTR " 1b \n\t"
> >> + ".popsection\n\t"
> >> + : :"m"(entry) :
> >> +#ifdef CONFIG_X86_64
> >> + "rax", "rbx", "rdx"
> >> +#else
> >> + "eax", "ebx", "edx"
> >> +#endif
> >> + );
> >> + }
> >> +}
> >> +
> >> static void vmx_recover_nmi_blocking(struct vcpu_vmx *vmx)
> >> {
> >> u32 exit_intr_info;
> >> @@ -6431,6 +6477,7 @@ static void __noclone vmx_vcpu_run(struct
> > kvm_vcpu *vcpu)
> >> vmx_complete_atomic_exit(vmx);
> >> vmx_recover_nmi_blocking(vmx);
> >> vmx_complete_interrupts(vmx);
> >> + vmx_handle_external_intr(vcpu);
> > This should be done just before enabling interrupts, otherwise we are
> > taking an interrupt before vmexit is fully complete.
> How about to put it just after set vcpu->mode to OUTSIDE_GUEST_MODE? At that point, the vmexit is considered as fully complete.
>
As I said, right before local_irq_enable().
--
Gleb.
^ permalink raw reply [flat|nested] 5+ messages in thread* RE: [PATCH] KVM: VMX: enable acknowledge interupt on vmexit
2013-01-17 6:27 ` Gleb Natapov
@ 2013-01-17 6:34 ` Zhang, Yang Z
0 siblings, 0 replies; 5+ messages in thread
From: Zhang, Yang Z @ 2013-01-17 6:34 UTC (permalink / raw)
To: Gleb Natapov
Cc: kvm@vger.kernel.org, Shan, Haitao, mtosatti@redhat.com,
Zhang, Xiantao
Gleb Natapov wrote on 2013-01-17:
> On Thu, Jan 17, 2013 at 01:36:36AM +0000, Zhang, Yang Z wrote:
>> Gleb Natapov wrote on 2013-01-16:
>>> On Wed, Jan 16, 2013 at 05:00:48PM +0800, Yang Zhang wrote:
>>>> From: Yang Zhang <yang.z.zhang@Intel.com>
>>>>
>>>> The "acknowledge interrupt on exit" feature controls processor behavior
>>>> for external interrupt acknowledgement. When this control is set, the
>>>> processor acknowledges the interrupt controller to acquire the
>>>> interrupt vector on VM exit.
>>>>
>>>> This feature is required by Posted Interrupt. It will be turnned on only
>>>> when posted interrupt is enabled.
>>>>
>>> Why? Always enable it. It is faster than current approach.
>> I don't think it will faster. It should be same.
>>
> Interrupt delivered->vmexit->interrupt enabled->interrupt
> redelivered->acked->dispatched through idt.
>
> vs
>
> Interrupt delivered->acked->vmexit->dispatched through idt.
>
> Anyway even if it is not slower it is better to use the same code path
> to make sure it is well tested.
Ok.
>>>> Refer to Intel SDM volum 3, chapter 33.2.
>>>>
>>> Please CC H. Peter Anvin <hpa@linux.intel.com> on the next version.
>>>
>>>> Signed-off-by: Yang Zhang <yang.z.zhang@Intel.com>
>>>> ---
>>>> arch/x86/kvm/vmx.c | 51
>>>> +++++++++++++++++++++++++++++++++++++++++++++++++-- 1 files changed,
>>>> 49 insertions(+), 2 deletions(-)
>>>> diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
>>>> index dd2a85c..d1ed9ae 100644
>>>> --- a/arch/x86/kvm/vmx.c
>>>> +++ b/arch/x86/kvm/vmx.c
>>>> @@ -2565,7 +2565,7 @@ static __init int setup_vmcs_config(struct
>>> vmcs_config *vmcs_conf)
>>>> #ifdef CONFIG_X86_64
>>>> min |= VM_EXIT_HOST_ADDR_SPACE_SIZE;
>>>> #endif
>>>> - opt = VM_EXIT_SAVE_IA32_PAT | VM_EXIT_LOAD_IA32_PAT;
>>>> + opt = VM_EXIT_SAVE_IA32_PAT | VM_EXIT_LOAD_IA32_PAT |
>>> VM_EXIT_ACK_INTR_ON_EXIT;
>>>> if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_EXIT_CTLS,
>>>> &_vmexit_control) < 0) return -EIO; @@ -3926,7 +3926,7 @@
>>>> static int vmx_vcpu_setup(struct vcpu_vmx *vmx) ++vmx->nmsrs;
> }
>>>> - vmcs_write32(VM_EXIT_CONTROLS, vmcs_config.vmexit_ctrl);
>>>> + vmcs_write32(VM_EXIT_CONTROLS, vmcs_config.vmexit_ctrl &
>>>> ~VM_EXIT_ACK_INTR_ON_EXIT);
>>>>
>>>> /* 22.2.1, 20.8.1 */
>>>> vmcs_write32(VM_ENTRY_CONTROLS, vmcs_config.vmentry_ctrl);
>>>> @@ -6096,6 +6096,52 @@ static void vmx_complete_atomic_exit(struct
>>> vcpu_vmx *vmx)
>>>> }
>>>> }
>>>> + +static noinline void vmx_handle_external_intr(struct kvm_vcpu
>>>> *vcpu) +{ + u32 exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO); +
>>>> + if ((exit_intr_info & INTR_INFO_INTR_TYPE_MASK) ==
>>>> INTR_TYPE_EXT_INTR && + (exit_intr_info & INTR_INFO_VALID_MASK) )
>>>> {
>>> if (exit_intr_info & (INTR_INFO_VALID_MASK | INTR_INFO_INTR_TYPE_MASK)
>>> == (INTR_INFO_VALID_MASK | INTR_TYPE_EXT_INTR))
>>>
>>>> + unsigned int vector;
>>>> + unsigned long entry;
>>>> + struct desc_ptr dt;
>>>> + gate_desc *desc;
>>>> +
>>>> + native_store_idt(&dt);
>>> This does not change. Store it in vcpu during vcpu creation instead of
>>> reading it each time.
>> Right. It will reduce the cost.
>>
>>>> +
>>>> + vector = exit_intr_info & INTR_INFO_VECTOR_MASK;
>>>> + desc = (void *)dt.address + vector * 16;
>>>> +
>>>> + entry = gate_offset(*desc);
>>>> + asm(
>>>> + "mov %0, %%" _ASM_DX "\n\t"
>>>> + "mov %%" _ASM_SP ", %%" _ASM_BX "\n\t"
>>>> +#ifdef CONFIG_X86_64
>>>> + "and $0xfffffffffffffff0, %%" _ASM_SP "\n\t"
>>>> +#endif
>>>> + "mov %%ss, %%" _ASM_AX "\n\t"
>>>> + "push %%" _ASM_AX "\n\t"
>>>> + "push %%" _ASM_BX "\n\t"
>>> For 32bit you do not need to save ss:esp.
>> You are right. No need to save it for 32bit.
>>
>>>> + "pushf\n\t"
>>>> + "mov %%cs, %%" _ASM_AX "\n\t"
>>>> + "push %%" _ASM_AX "\n\t"
>>>> + "push intr_return\n\t"
>>>> + "jmp *%% " _ASM_DX "\n\t"
>>>> + ".pushsection .rodata \n\t"
>>>> + ".global intr_return \n\t"
>>>> + "intr_return: " _ASM_PTR " 1b \n\t"
>>>> + ".popsection\n\t"
>>>> + : :"m"(entry) :
>>>> +#ifdef CONFIG_X86_64
>>>> + "rax", "rbx", "rdx"
>>>> +#else
>>>> + "eax", "ebx", "edx"
>>>> +#endif
>>>> + );
>>>> + }
>>>> +}
>>>> +
>>>> static void vmx_recover_nmi_blocking(struct vcpu_vmx *vmx)
>>>> {
>>>> u32 exit_intr_info;
>>>> @@ -6431,6 +6477,7 @@ static void __noclone vmx_vcpu_run(struct
>>> kvm_vcpu *vcpu)
>>>> vmx_complete_atomic_exit(vmx);
>>>> vmx_recover_nmi_blocking(vmx);
>>>> vmx_complete_interrupts(vmx);
>>>> + vmx_handle_external_intr(vcpu);
>>> This should be done just before enabling interrupts, otherwise we are
>>> taking an interrupt before vmexit is fully complete.
>> How about to put it just after set vcpu->mode to OUTSIDE_GUEST_MODE? At
>> that point, the vmexit is considered as fully complete.
>>
> As I said, right before local_irq_enable().
Ok.
Best regards,
Yang
^ permalink raw reply [flat|nested] 5+ messages in thread
end of thread, other threads:[~2013-01-17 6:34 UTC | newest]
Thread overview: 5+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2013-01-16 9:00 [PATCH] KVM: VMX: enable acknowledge interupt on vmexit Yang Zhang
2013-01-16 15:42 ` Gleb Natapov
2013-01-17 1:36 ` Zhang, Yang Z
2013-01-17 6:27 ` Gleb Natapov
2013-01-17 6:34 ` Zhang, Yang Z
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox