* [PATCH] KVM: VMX: shadow VM_(ENTRY|EXIT)_CONTROLS vmcs field
@ 2013-11-25 13:37 Gleb Natapov
2013-11-27 16:58 ` Paolo Bonzini
0 siblings, 1 reply; 6+ messages in thread
From: Gleb Natapov @ 2013-11-25 13:37 UTC (permalink / raw)
To: kvm; +Cc: pbonzini
VM_(ENTRY|EXIT)_CONTROLS vmcs fields are read/written on each guest
entry but most times it can be avoided since values do not changes.
Keep fields copy in memory to avoid unnecessary reads from vmcs.
Signed-off-by: Gleb Natapov <gleb@redhat.com>
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index b2fe1c2..29c1c7f 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -418,6 +418,8 @@ struct vcpu_vmx {
u64 msr_host_kernel_gs_base;
u64 msr_guest_kernel_gs_base;
#endif
+ u32 vm_entry_controls_shadow;
+ u32 vm_exit_controls_shadow;
/*
* loaded_vmcs points to the VMCS currently used in this vcpu. For a
* non-nested (L1) guest, it always points to vmcs01. For a nested
@@ -1326,6 +1328,62 @@ static void vmcs_set_bits(unsigned long field, u32 mask)
vmcs_writel(field, vmcs_readl(field) | mask);
}
+static inline void vm_entry_controls_init(struct vcpu_vmx *vmx, u32 val)
+{
+ vmcs_write32(VM_ENTRY_CONTROLS, val);
+ vmx->vm_entry_controls_shadow = val;
+}
+
+static inline void vm_entry_controls_set(struct vcpu_vmx *vmx, u32 val)
+{
+ if (vmx->vm_entry_controls_shadow != val)
+ vm_entry_controls_init(vmx, val);
+}
+
+static inline u32 vm_entry_controls_get(struct vcpu_vmx *vmx)
+{
+ return vmx->vm_entry_controls_shadow;
+}
+
+
+static inline void vm_entry_controls_setbit(struct vcpu_vmx *vmx, u32 val)
+{
+ vm_entry_controls_set(vmx, vm_entry_controls_get(vmx) | val);
+}
+
+static inline void vm_entry_controls_clearbit(struct vcpu_vmx *vmx, u32 val)
+{
+ vm_entry_controls_set(vmx, vm_entry_controls_get(vmx) & ~val);
+}
+
+static inline void vm_exit_controls_init(struct vcpu_vmx *vmx, u32 val)
+{
+ vmcs_write32(VM_EXIT_CONTROLS, val);
+ vmx->vm_exit_controls_shadow = val;
+}
+
+static inline void vm_exit_controls_set(struct vcpu_vmx *vmx, u32 val)
+{
+ if (vmx->vm_exit_controls_shadow != val)
+ vm_exit_controls_init(vmx, val);
+}
+
+static inline u32 vm_exit_controls_get(struct vcpu_vmx *vmx)
+{
+ return vmx->vm_exit_controls_shadow;
+}
+
+
+static inline void vm_exit_controls_setbit(struct vcpu_vmx *vmx, u32 val)
+{
+ vm_exit_controls_set(vmx, vm_exit_controls_get(vmx) | val);
+}
+
+static inline void vm_exit_controls_clearbit(struct vcpu_vmx *vmx, u32 val)
+{
+ vm_exit_controls_set(vmx, vm_exit_controls_get(vmx) & ~val);
+}
+
static void vmx_segment_cache_clear(struct vcpu_vmx *vmx)
{
vmx->segment_cache.bitmask = 0;
@@ -1410,11 +1468,11 @@ static void update_exception_bitmap(struct kvm_vcpu *vcpu)
vmcs_write32(EXCEPTION_BITMAP, eb);
}
-static void clear_atomic_switch_msr_special(unsigned long entry,
- unsigned long exit)
+static void clear_atomic_switch_msr_special(struct vcpu_vmx *vmx,
+ unsigned long entry, unsigned long exit)
{
- vmcs_clear_bits(VM_ENTRY_CONTROLS, entry);
- vmcs_clear_bits(VM_EXIT_CONTROLS, exit);
+ vm_entry_controls_clearbit(vmx, entry);
+ vm_exit_controls_clearbit(vmx, exit);
}
static void clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr)
@@ -1425,14 +1483,15 @@ static void clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr)
switch (msr) {
case MSR_EFER:
if (cpu_has_load_ia32_efer) {
- clear_atomic_switch_msr_special(VM_ENTRY_LOAD_IA32_EFER,
+ clear_atomic_switch_msr_special(vmx,
+ VM_ENTRY_LOAD_IA32_EFER,
VM_EXIT_LOAD_IA32_EFER);
return;
}
break;
case MSR_CORE_PERF_GLOBAL_CTRL:
if (cpu_has_load_perf_global_ctrl) {
- clear_atomic_switch_msr_special(
+ clear_atomic_switch_msr_special(vmx,
VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL,
VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL);
return;
@@ -1453,14 +1512,15 @@ static void clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr)
vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->nr);
}
-static void add_atomic_switch_msr_special(unsigned long entry,
- unsigned long exit, unsigned long guest_val_vmcs,
- unsigned long host_val_vmcs, u64 guest_val, u64 host_val)
+static void add_atomic_switch_msr_special(struct vcpu_vmx *vmx,
+ unsigned long entry, unsigned long exit,
+ unsigned long guest_val_vmcs, unsigned long host_val_vmcs,
+ u64 guest_val, u64 host_val)
{
vmcs_write64(guest_val_vmcs, guest_val);
vmcs_write64(host_val_vmcs, host_val);
- vmcs_set_bits(VM_ENTRY_CONTROLS, entry);
- vmcs_set_bits(VM_EXIT_CONTROLS, exit);
+ vm_entry_controls_setbit(vmx, entry);
+ vm_exit_controls_setbit(vmx, exit);
}
static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr,
@@ -1472,7 +1532,8 @@ static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr,
switch (msr) {
case MSR_EFER:
if (cpu_has_load_ia32_efer) {
- add_atomic_switch_msr_special(VM_ENTRY_LOAD_IA32_EFER,
+ add_atomic_switch_msr_special(vmx,
+ VM_ENTRY_LOAD_IA32_EFER,
VM_EXIT_LOAD_IA32_EFER,
GUEST_IA32_EFER,
HOST_IA32_EFER,
@@ -1482,7 +1543,7 @@ static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr,
break;
case MSR_CORE_PERF_GLOBAL_CTRL:
if (cpu_has_load_perf_global_ctrl) {
- add_atomic_switch_msr_special(
+ add_atomic_switch_msr_special(vmx,
VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL,
VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL,
GUEST_IA32_PERF_GLOBAL_CTRL,
@@ -3182,14 +3243,10 @@ static void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer)
vmx_load_host_state(to_vmx(vcpu));
vcpu->arch.efer = efer;
if (efer & EFER_LMA) {
- vmcs_write32(VM_ENTRY_CONTROLS,
- vmcs_read32(VM_ENTRY_CONTROLS) |
- VM_ENTRY_IA32E_MODE);
+ vm_entry_controls_setbit(to_vmx(vcpu), VM_ENTRY_IA32E_MODE);
msr->data = efer;
} else {
- vmcs_write32(VM_ENTRY_CONTROLS,
- vmcs_read32(VM_ENTRY_CONTROLS) &
- ~VM_ENTRY_IA32E_MODE);
+ vm_entry_controls_clearbit(to_vmx(vcpu), VM_ENTRY_IA32E_MODE);
msr->data = efer & ~EFER_LME;
}
@@ -3217,9 +3274,7 @@ static void enter_lmode(struct kvm_vcpu *vcpu)
static void exit_lmode(struct kvm_vcpu *vcpu)
{
- vmcs_write32(VM_ENTRY_CONTROLS,
- vmcs_read32(VM_ENTRY_CONTROLS)
- & ~VM_ENTRY_IA32E_MODE);
+ vm_entry_controls_clearbit(to_vmx(vcpu), VM_ENTRY_IA32E_MODE);
vmx_set_efer(vcpu, vcpu->arch.efer & ~EFER_LMA);
}
@@ -4346,10 +4401,11 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
++vmx->nmsrs;
}
- vmcs_write32(VM_EXIT_CONTROLS, vmcs_config.vmexit_ctrl);
+
+ vm_exit_controls_init(vmx, vmcs_config.vmexit_ctrl);
/* 22.2.1, 20.8.1 */
- vmcs_write32(VM_ENTRY_CONTROLS, vmcs_config.vmentry_ctrl);
+ vm_entry_controls_init(vmx, vmcs_config.vmentry_ctrl);
vmcs_writel(CR0_GUEST_HOST_MASK, ~0UL);
set_cr4_guest_host_mask(vmx);
@@ -7759,12 +7815,12 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
exit_control = vmcs_config.vmexit_ctrl;
if (vmcs12->pin_based_vm_exec_control & PIN_BASED_VMX_PREEMPTION_TIMER)
exit_control |= VM_EXIT_SAVE_VMX_PREEMPTION_TIMER;
- vmcs_write32(VM_EXIT_CONTROLS, exit_control);
+ vm_exit_controls_init(vmx, exit_control);
/* vmcs12's VM_ENTRY_LOAD_IA32_EFER and VM_ENTRY_IA32E_MODE are
* emulated by vmx_set_efer(), below.
*/
- vmcs_write32(VM_ENTRY_CONTROLS,
+ vm_entry_controls_init(vmx,
(vmcs12->vm_entry_controls & ~VM_ENTRY_LOAD_IA32_EFER &
~VM_ENTRY_IA32E_MODE) |
(vmcs_config.vmentry_ctrl & ~VM_ENTRY_IA32E_MODE));
@@ -8186,7 +8242,7 @@ static void prepare_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
vmcs12->vm_entry_controls =
(vmcs12->vm_entry_controls & ~VM_ENTRY_IA32E_MODE) |
- (vmcs_read32(VM_ENTRY_CONTROLS) & VM_ENTRY_IA32E_MODE);
+ (vm_entry_controls_get(to_vmx(vcpu)) & VM_ENTRY_IA32E_MODE);
/* TODO: These cannot have changed unless we have MSR bitmaps and
* the relevant bit asks not to trap the change */
@@ -8390,6 +8446,7 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu)
vcpu->cpu = cpu;
put_cpu();
+ vm_entry_controls_init(vmx, vmcs_read32(VM_ENTRY_CONTROLS));
vmx_segment_cache_clear(vmx);
/* if no vmcs02 cache requested, remove the one we used */
--
Gleb.
^ permalink raw reply related [flat|nested] 6+ messages in thread
* Re: [PATCH] KVM: VMX: shadow VM_(ENTRY|EXIT)_CONTROLS vmcs field
2013-11-25 13:37 [PATCH] KVM: VMX: shadow VM_(ENTRY|EXIT)_CONTROLS vmcs field Gleb Natapov
@ 2013-11-27 16:58 ` Paolo Bonzini
2013-12-08 13:00 ` Jan Kiszka
0 siblings, 1 reply; 6+ messages in thread
From: Paolo Bonzini @ 2013-11-27 16:58 UTC (permalink / raw)
To: Gleb Natapov; +Cc: kvm
Il 25/11/2013 14:37, Gleb Natapov ha scritto:
> VM_(ENTRY|EXIT)_CONTROLS vmcs fields are read/written on each guest
> entry but most times it can be avoided since values do not changes.
> Keep fields copy in memory to avoid unnecessary reads from vmcs.
>
> Signed-off-by: Gleb Natapov <gleb@redhat.com>
> diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
> index b2fe1c2..29c1c7f 100644
> --- a/arch/x86/kvm/vmx.c
> +++ b/arch/x86/kvm/vmx.c
> @@ -418,6 +418,8 @@ struct vcpu_vmx {
> u64 msr_host_kernel_gs_base;
> u64 msr_guest_kernel_gs_base;
> #endif
> + u32 vm_entry_controls_shadow;
> + u32 vm_exit_controls_shadow;
> /*
> * loaded_vmcs points to the VMCS currently used in this vcpu. For a
> * non-nested (L1) guest, it always points to vmcs01. For a nested
> @@ -1326,6 +1328,62 @@ static void vmcs_set_bits(unsigned long field, u32 mask)
> vmcs_writel(field, vmcs_readl(field) | mask);
> }
>
> +static inline void vm_entry_controls_init(struct vcpu_vmx *vmx, u32 val)
> +{
> + vmcs_write32(VM_ENTRY_CONTROLS, val);
> + vmx->vm_entry_controls_shadow = val;
> +}
> +
> +static inline void vm_entry_controls_set(struct vcpu_vmx *vmx, u32 val)
> +{
> + if (vmx->vm_entry_controls_shadow != val)
> + vm_entry_controls_init(vmx, val);
> +}
> +
> +static inline u32 vm_entry_controls_get(struct vcpu_vmx *vmx)
> +{
> + return vmx->vm_entry_controls_shadow;
> +}
> +
> +
> +static inline void vm_entry_controls_setbit(struct vcpu_vmx *vmx, u32 val)
> +{
> + vm_entry_controls_set(vmx, vm_entry_controls_get(vmx) | val);
> +}
> +
> +static inline void vm_entry_controls_clearbit(struct vcpu_vmx *vmx, u32 val)
> +{
> + vm_entry_controls_set(vmx, vm_entry_controls_get(vmx) & ~val);
> +}
> +
> +static inline void vm_exit_controls_init(struct vcpu_vmx *vmx, u32 val)
> +{
> + vmcs_write32(VM_EXIT_CONTROLS, val);
> + vmx->vm_exit_controls_shadow = val;
> +}
> +
> +static inline void vm_exit_controls_set(struct vcpu_vmx *vmx, u32 val)
> +{
> + if (vmx->vm_exit_controls_shadow != val)
> + vm_exit_controls_init(vmx, val);
> +}
> +
> +static inline u32 vm_exit_controls_get(struct vcpu_vmx *vmx)
> +{
> + return vmx->vm_exit_controls_shadow;
> +}
> +
> +
> +static inline void vm_exit_controls_setbit(struct vcpu_vmx *vmx, u32 val)
> +{
> + vm_exit_controls_set(vmx, vm_exit_controls_get(vmx) | val);
> +}
> +
> +static inline void vm_exit_controls_clearbit(struct vcpu_vmx *vmx, u32 val)
> +{
> + vm_exit_controls_set(vmx, vm_exit_controls_get(vmx) & ~val);
> +}
> +
> static void vmx_segment_cache_clear(struct vcpu_vmx *vmx)
> {
> vmx->segment_cache.bitmask = 0;
> @@ -1410,11 +1468,11 @@ static void update_exception_bitmap(struct kvm_vcpu *vcpu)
> vmcs_write32(EXCEPTION_BITMAP, eb);
> }
>
> -static void clear_atomic_switch_msr_special(unsigned long entry,
> - unsigned long exit)
> +static void clear_atomic_switch_msr_special(struct vcpu_vmx *vmx,
> + unsigned long entry, unsigned long exit)
> {
> - vmcs_clear_bits(VM_ENTRY_CONTROLS, entry);
> - vmcs_clear_bits(VM_EXIT_CONTROLS, exit);
> + vm_entry_controls_clearbit(vmx, entry);
> + vm_exit_controls_clearbit(vmx, exit);
> }
>
> static void clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr)
> @@ -1425,14 +1483,15 @@ static void clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr)
> switch (msr) {
> case MSR_EFER:
> if (cpu_has_load_ia32_efer) {
> - clear_atomic_switch_msr_special(VM_ENTRY_LOAD_IA32_EFER,
> + clear_atomic_switch_msr_special(vmx,
> + VM_ENTRY_LOAD_IA32_EFER,
> VM_EXIT_LOAD_IA32_EFER);
> return;
> }
> break;
> case MSR_CORE_PERF_GLOBAL_CTRL:
> if (cpu_has_load_perf_global_ctrl) {
> - clear_atomic_switch_msr_special(
> + clear_atomic_switch_msr_special(vmx,
> VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL,
> VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL);
> return;
> @@ -1453,14 +1512,15 @@ static void clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr)
> vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->nr);
> }
>
> -static void add_atomic_switch_msr_special(unsigned long entry,
> - unsigned long exit, unsigned long guest_val_vmcs,
> - unsigned long host_val_vmcs, u64 guest_val, u64 host_val)
> +static void add_atomic_switch_msr_special(struct vcpu_vmx *vmx,
> + unsigned long entry, unsigned long exit,
> + unsigned long guest_val_vmcs, unsigned long host_val_vmcs,
> + u64 guest_val, u64 host_val)
> {
> vmcs_write64(guest_val_vmcs, guest_val);
> vmcs_write64(host_val_vmcs, host_val);
> - vmcs_set_bits(VM_ENTRY_CONTROLS, entry);
> - vmcs_set_bits(VM_EXIT_CONTROLS, exit);
> + vm_entry_controls_setbit(vmx, entry);
> + vm_exit_controls_setbit(vmx, exit);
> }
>
> static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr,
> @@ -1472,7 +1532,8 @@ static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr,
> switch (msr) {
> case MSR_EFER:
> if (cpu_has_load_ia32_efer) {
> - add_atomic_switch_msr_special(VM_ENTRY_LOAD_IA32_EFER,
> + add_atomic_switch_msr_special(vmx,
> + VM_ENTRY_LOAD_IA32_EFER,
> VM_EXIT_LOAD_IA32_EFER,
> GUEST_IA32_EFER,
> HOST_IA32_EFER,
> @@ -1482,7 +1543,7 @@ static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr,
> break;
> case MSR_CORE_PERF_GLOBAL_CTRL:
> if (cpu_has_load_perf_global_ctrl) {
> - add_atomic_switch_msr_special(
> + add_atomic_switch_msr_special(vmx,
> VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL,
> VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL,
> GUEST_IA32_PERF_GLOBAL_CTRL,
> @@ -3182,14 +3243,10 @@ static void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer)
> vmx_load_host_state(to_vmx(vcpu));
> vcpu->arch.efer = efer;
> if (efer & EFER_LMA) {
> - vmcs_write32(VM_ENTRY_CONTROLS,
> - vmcs_read32(VM_ENTRY_CONTROLS) |
> - VM_ENTRY_IA32E_MODE);
> + vm_entry_controls_setbit(to_vmx(vcpu), VM_ENTRY_IA32E_MODE);
> msr->data = efer;
> } else {
> - vmcs_write32(VM_ENTRY_CONTROLS,
> - vmcs_read32(VM_ENTRY_CONTROLS) &
> - ~VM_ENTRY_IA32E_MODE);
> + vm_entry_controls_clearbit(to_vmx(vcpu), VM_ENTRY_IA32E_MODE);
>
> msr->data = efer & ~EFER_LME;
> }
> @@ -3217,9 +3274,7 @@ static void enter_lmode(struct kvm_vcpu *vcpu)
>
> static void exit_lmode(struct kvm_vcpu *vcpu)
> {
> - vmcs_write32(VM_ENTRY_CONTROLS,
> - vmcs_read32(VM_ENTRY_CONTROLS)
> - & ~VM_ENTRY_IA32E_MODE);
> + vm_entry_controls_clearbit(to_vmx(vcpu), VM_ENTRY_IA32E_MODE);
> vmx_set_efer(vcpu, vcpu->arch.efer & ~EFER_LMA);
> }
>
> @@ -4346,10 +4401,11 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
> ++vmx->nmsrs;
> }
>
> - vmcs_write32(VM_EXIT_CONTROLS, vmcs_config.vmexit_ctrl);
> +
> + vm_exit_controls_init(vmx, vmcs_config.vmexit_ctrl);
>
> /* 22.2.1, 20.8.1 */
> - vmcs_write32(VM_ENTRY_CONTROLS, vmcs_config.vmentry_ctrl);
> + vm_entry_controls_init(vmx, vmcs_config.vmentry_ctrl);
>
> vmcs_writel(CR0_GUEST_HOST_MASK, ~0UL);
> set_cr4_guest_host_mask(vmx);
> @@ -7759,12 +7815,12 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
> exit_control = vmcs_config.vmexit_ctrl;
> if (vmcs12->pin_based_vm_exec_control & PIN_BASED_VMX_PREEMPTION_TIMER)
> exit_control |= VM_EXIT_SAVE_VMX_PREEMPTION_TIMER;
> - vmcs_write32(VM_EXIT_CONTROLS, exit_control);
> + vm_exit_controls_init(vmx, exit_control);
>
> /* vmcs12's VM_ENTRY_LOAD_IA32_EFER and VM_ENTRY_IA32E_MODE are
> * emulated by vmx_set_efer(), below.
> */
> - vmcs_write32(VM_ENTRY_CONTROLS,
> + vm_entry_controls_init(vmx,
> (vmcs12->vm_entry_controls & ~VM_ENTRY_LOAD_IA32_EFER &
> ~VM_ENTRY_IA32E_MODE) |
> (vmcs_config.vmentry_ctrl & ~VM_ENTRY_IA32E_MODE));
> @@ -8186,7 +8242,7 @@ static void prepare_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
>
> vmcs12->vm_entry_controls =
> (vmcs12->vm_entry_controls & ~VM_ENTRY_IA32E_MODE) |
> - (vmcs_read32(VM_ENTRY_CONTROLS) & VM_ENTRY_IA32E_MODE);
> + (vm_entry_controls_get(to_vmx(vcpu)) & VM_ENTRY_IA32E_MODE);
>
> /* TODO: These cannot have changed unless we have MSR bitmaps and
> * the relevant bit asks not to trap the change */
> @@ -8390,6 +8446,7 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu)
> vcpu->cpu = cpu;
> put_cpu();
>
> + vm_entry_controls_init(vmx, vmcs_read32(VM_ENTRY_CONTROLS));
> vmx_segment_cache_clear(vmx);
>
> /* if no vmcs02 cache requested, remove the one we used */
> --
> Gleb.
> --
> To unsubscribe from this list: send the line "unsubscribe kvm" in
> the body of a message to majordomo@vger.kernel.org
> More majordomo info at http://vger.kernel.org/majordomo-info.html
>
Applied to kvm/queue, thanks.
Paolo
^ permalink raw reply [flat|nested] 6+ messages in thread
* Re: [PATCH] KVM: VMX: shadow VM_(ENTRY|EXIT)_CONTROLS vmcs field
2013-11-27 16:58 ` Paolo Bonzini
@ 2013-12-08 13:00 ` Jan Kiszka
2013-12-09 6:55 ` Gleb Natapov
0 siblings, 1 reply; 6+ messages in thread
From: Jan Kiszka @ 2013-12-08 13:00 UTC (permalink / raw)
To: Paolo Bonzini, Gleb Natapov; +Cc: kvm
[-- Attachment #1: Type: text/plain, Size: 11049 bytes --]
On 2013-11-27 17:58, Paolo Bonzini wrote:
> Il 25/11/2013 14:37, Gleb Natapov ha scritto:
>> VM_(ENTRY|EXIT)_CONTROLS vmcs fields are read/written on each guest
>> entry but most times it can be avoided since values do not changes.
>> Keep fields copy in memory to avoid unnecessary reads from vmcs.
>>
>> Signed-off-by: Gleb Natapov <gleb@redhat.com>
>> diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
>> index b2fe1c2..29c1c7f 100644
>> --- a/arch/x86/kvm/vmx.c
>> +++ b/arch/x86/kvm/vmx.c
>> @@ -418,6 +418,8 @@ struct vcpu_vmx {
>> u64 msr_host_kernel_gs_base;
>> u64 msr_guest_kernel_gs_base;
>> #endif
>> + u32 vm_entry_controls_shadow;
>> + u32 vm_exit_controls_shadow;
>> /*
>> * loaded_vmcs points to the VMCS currently used in this vcpu. For a
>> * non-nested (L1) guest, it always points to vmcs01. For a nested
>> @@ -1326,6 +1328,62 @@ static void vmcs_set_bits(unsigned long field, u32 mask)
>> vmcs_writel(field, vmcs_readl(field) | mask);
>> }
>>
>> +static inline void vm_entry_controls_init(struct vcpu_vmx *vmx, u32 val)
>> +{
>> + vmcs_write32(VM_ENTRY_CONTROLS, val);
>> + vmx->vm_entry_controls_shadow = val;
>> +}
>> +
>> +static inline void vm_entry_controls_set(struct vcpu_vmx *vmx, u32 val)
>> +{
>> + if (vmx->vm_entry_controls_shadow != val)
>> + vm_entry_controls_init(vmx, val);
>> +}
>> +
>> +static inline u32 vm_entry_controls_get(struct vcpu_vmx *vmx)
>> +{
>> + return vmx->vm_entry_controls_shadow;
>> +}
>> +
>> +
>> +static inline void vm_entry_controls_setbit(struct vcpu_vmx *vmx, u32 val)
>> +{
>> + vm_entry_controls_set(vmx, vm_entry_controls_get(vmx) | val);
>> +}
>> +
>> +static inline void vm_entry_controls_clearbit(struct vcpu_vmx *vmx, u32 val)
>> +{
>> + vm_entry_controls_set(vmx, vm_entry_controls_get(vmx) & ~val);
>> +}
>> +
>> +static inline void vm_exit_controls_init(struct vcpu_vmx *vmx, u32 val)
>> +{
>> + vmcs_write32(VM_EXIT_CONTROLS, val);
>> + vmx->vm_exit_controls_shadow = val;
>> +}
>> +
>> +static inline void vm_exit_controls_set(struct vcpu_vmx *vmx, u32 val)
>> +{
>> + if (vmx->vm_exit_controls_shadow != val)
>> + vm_exit_controls_init(vmx, val);
>> +}
>> +
>> +static inline u32 vm_exit_controls_get(struct vcpu_vmx *vmx)
>> +{
>> + return vmx->vm_exit_controls_shadow;
>> +}
>> +
>> +
>> +static inline void vm_exit_controls_setbit(struct vcpu_vmx *vmx, u32 val)
>> +{
>> + vm_exit_controls_set(vmx, vm_exit_controls_get(vmx) | val);
>> +}
>> +
>> +static inline void vm_exit_controls_clearbit(struct vcpu_vmx *vmx, u32 val)
>> +{
>> + vm_exit_controls_set(vmx, vm_exit_controls_get(vmx) & ~val);
>> +}
>> +
>> static void vmx_segment_cache_clear(struct vcpu_vmx *vmx)
>> {
>> vmx->segment_cache.bitmask = 0;
>> @@ -1410,11 +1468,11 @@ static void update_exception_bitmap(struct kvm_vcpu *vcpu)
>> vmcs_write32(EXCEPTION_BITMAP, eb);
>> }
>>
>> -static void clear_atomic_switch_msr_special(unsigned long entry,
>> - unsigned long exit)
>> +static void clear_atomic_switch_msr_special(struct vcpu_vmx *vmx,
>> + unsigned long entry, unsigned long exit)
>> {
>> - vmcs_clear_bits(VM_ENTRY_CONTROLS, entry);
>> - vmcs_clear_bits(VM_EXIT_CONTROLS, exit);
>> + vm_entry_controls_clearbit(vmx, entry);
>> + vm_exit_controls_clearbit(vmx, exit);
>> }
>>
>> static void clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr)
>> @@ -1425,14 +1483,15 @@ static void clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr)
>> switch (msr) {
>> case MSR_EFER:
>> if (cpu_has_load_ia32_efer) {
>> - clear_atomic_switch_msr_special(VM_ENTRY_LOAD_IA32_EFER,
>> + clear_atomic_switch_msr_special(vmx,
>> + VM_ENTRY_LOAD_IA32_EFER,
>> VM_EXIT_LOAD_IA32_EFER);
>> return;
>> }
>> break;
>> case MSR_CORE_PERF_GLOBAL_CTRL:
>> if (cpu_has_load_perf_global_ctrl) {
>> - clear_atomic_switch_msr_special(
>> + clear_atomic_switch_msr_special(vmx,
>> VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL,
>> VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL);
>> return;
>> @@ -1453,14 +1512,15 @@ static void clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr)
>> vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->nr);
>> }
>>
>> -static void add_atomic_switch_msr_special(unsigned long entry,
>> - unsigned long exit, unsigned long guest_val_vmcs,
>> - unsigned long host_val_vmcs, u64 guest_val, u64 host_val)
>> +static void add_atomic_switch_msr_special(struct vcpu_vmx *vmx,
>> + unsigned long entry, unsigned long exit,
>> + unsigned long guest_val_vmcs, unsigned long host_val_vmcs,
>> + u64 guest_val, u64 host_val)
>> {
>> vmcs_write64(guest_val_vmcs, guest_val);
>> vmcs_write64(host_val_vmcs, host_val);
>> - vmcs_set_bits(VM_ENTRY_CONTROLS, entry);
>> - vmcs_set_bits(VM_EXIT_CONTROLS, exit);
>> + vm_entry_controls_setbit(vmx, entry);
>> + vm_exit_controls_setbit(vmx, exit);
>> }
>>
>> static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr,
>> @@ -1472,7 +1532,8 @@ static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr,
>> switch (msr) {
>> case MSR_EFER:
>> if (cpu_has_load_ia32_efer) {
>> - add_atomic_switch_msr_special(VM_ENTRY_LOAD_IA32_EFER,
>> + add_atomic_switch_msr_special(vmx,
>> + VM_ENTRY_LOAD_IA32_EFER,
>> VM_EXIT_LOAD_IA32_EFER,
>> GUEST_IA32_EFER,
>> HOST_IA32_EFER,
>> @@ -1482,7 +1543,7 @@ static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr,
>> break;
>> case MSR_CORE_PERF_GLOBAL_CTRL:
>> if (cpu_has_load_perf_global_ctrl) {
>> - add_atomic_switch_msr_special(
>> + add_atomic_switch_msr_special(vmx,
>> VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL,
>> VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL,
>> GUEST_IA32_PERF_GLOBAL_CTRL,
>> @@ -3182,14 +3243,10 @@ static void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer)
>> vmx_load_host_state(to_vmx(vcpu));
>> vcpu->arch.efer = efer;
>> if (efer & EFER_LMA) {
>> - vmcs_write32(VM_ENTRY_CONTROLS,
>> - vmcs_read32(VM_ENTRY_CONTROLS) |
>> - VM_ENTRY_IA32E_MODE);
>> + vm_entry_controls_setbit(to_vmx(vcpu), VM_ENTRY_IA32E_MODE);
>> msr->data = efer;
>> } else {
>> - vmcs_write32(VM_ENTRY_CONTROLS,
>> - vmcs_read32(VM_ENTRY_CONTROLS) &
>> - ~VM_ENTRY_IA32E_MODE);
>> + vm_entry_controls_clearbit(to_vmx(vcpu), VM_ENTRY_IA32E_MODE);
>>
>> msr->data = efer & ~EFER_LME;
>> }
>> @@ -3217,9 +3274,7 @@ static void enter_lmode(struct kvm_vcpu *vcpu)
>>
>> static void exit_lmode(struct kvm_vcpu *vcpu)
>> {
>> - vmcs_write32(VM_ENTRY_CONTROLS,
>> - vmcs_read32(VM_ENTRY_CONTROLS)
>> - & ~VM_ENTRY_IA32E_MODE);
>> + vm_entry_controls_clearbit(to_vmx(vcpu), VM_ENTRY_IA32E_MODE);
>> vmx_set_efer(vcpu, vcpu->arch.efer & ~EFER_LMA);
>> }
>>
>> @@ -4346,10 +4401,11 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
>> ++vmx->nmsrs;
>> }
>>
>> - vmcs_write32(VM_EXIT_CONTROLS, vmcs_config.vmexit_ctrl);
>> +
>> + vm_exit_controls_init(vmx, vmcs_config.vmexit_ctrl);
>>
>> /* 22.2.1, 20.8.1 */
>> - vmcs_write32(VM_ENTRY_CONTROLS, vmcs_config.vmentry_ctrl);
>> + vm_entry_controls_init(vmx, vmcs_config.vmentry_ctrl);
>>
>> vmcs_writel(CR0_GUEST_HOST_MASK, ~0UL);
>> set_cr4_guest_host_mask(vmx);
>> @@ -7759,12 +7815,12 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
>> exit_control = vmcs_config.vmexit_ctrl;
>> if (vmcs12->pin_based_vm_exec_control & PIN_BASED_VMX_PREEMPTION_TIMER)
>> exit_control |= VM_EXIT_SAVE_VMX_PREEMPTION_TIMER;
>> - vmcs_write32(VM_EXIT_CONTROLS, exit_control);
>> + vm_exit_controls_init(vmx, exit_control);
>>
>> /* vmcs12's VM_ENTRY_LOAD_IA32_EFER and VM_ENTRY_IA32E_MODE are
>> * emulated by vmx_set_efer(), below.
>> */
>> - vmcs_write32(VM_ENTRY_CONTROLS,
>> + vm_entry_controls_init(vmx,
>> (vmcs12->vm_entry_controls & ~VM_ENTRY_LOAD_IA32_EFER &
>> ~VM_ENTRY_IA32E_MODE) |
>> (vmcs_config.vmentry_ctrl & ~VM_ENTRY_IA32E_MODE));
>> @@ -8186,7 +8242,7 @@ static void prepare_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
>>
>> vmcs12->vm_entry_controls =
>> (vmcs12->vm_entry_controls & ~VM_ENTRY_IA32E_MODE) |
>> - (vmcs_read32(VM_ENTRY_CONTROLS) & VM_ENTRY_IA32E_MODE);
>> + (vm_entry_controls_get(to_vmx(vcpu)) & VM_ENTRY_IA32E_MODE);
>>
>> /* TODO: These cannot have changed unless we have MSR bitmaps and
>> * the relevant bit asks not to trap the change */
>> @@ -8390,6 +8446,7 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu)
>> vcpu->cpu = cpu;
>> put_cpu();
>>
>> + vm_entry_controls_init(vmx, vmcs_read32(VM_ENTRY_CONTROLS));
>> vmx_segment_cache_clear(vmx);
>>
>> /* if no vmcs02 cache requested, remove the one we used */
>> --
>> Gleb.
>> --
>> To unsubscribe from this list: send the line "unsubscribe kvm" in
>> the body of a message to majordomo@vger.kernel.org
>> More majordomo info at http://vger.kernel.org/majordomo-info.html
>>
>
> Applied to kvm/queue, thanks.
Seems to have or cause an issue with nVMX. The (still not merged) vmx
unit test breaks here after apply this commit:
enabling apic
paging enabled
cr0 = 80010011
cr3 = 7fff000
cr4 = 20
PASS: test vmx capability
PASS: test vmxon
PASS: test vmptrld
PASS: test vmclear
PASS: test vmptrst
Hello World, this is null_guest_main!
PASS: test vmxoff
Test suite : vmenter
PASS: test vmlaunch
PASS: test vmresume
Test suite : preemption timer
KVM: entry failed, hardware error 0x7
RAX=000000002ced133a RBX=0000000000000000 RCX=0000000000000000 RDX=0000000000000000
RSI=0000000000000000 RDI=0000000000000000 RBP=0000000000000000 RSP=000000000044f9a0
R8 =0000000000000000 R9 =0000000000000000 R10=0000000000000000 R11=0000000000000000
R12=0000000000000000 R13=0000000000000000 R14=0000000000000000 R15=0000000000000000
RIP=0000000000400c63 RFL=00000002 [-------] CPL=0 II=0 A20=1 SMM=0 HLT=0
ES =0010 0000000000000000 ffffffff 00c09300 DPL=0 DS [-WA]
CS =0008 0000000000000000 ffffffff 00a09b00 DPL=0 CS64 [-RA]
SS =0010 0000000000000000 ffffffff 00c09300 DPL=0 DS [-WA]
DS =0010 0000000000000000 ffffffff 00c09300 DPL=0 DS [-WA]
FS =0010 0000000000000000 ffffffff 00c09300 DPL=0 DS [-WA]
GS =0010 0000000000000000 ffffffff 00c09300 DPL=0 DS [-WA]
LDT=0000 0000000000000000 0000ffff 00008200 DPL=0 LDT
TR =0048 000000000040b052 00000067 00008b00 DPL=0 TSS64-busy
GDT= 000000000040b000 00000447
IDT= 0000000000400296 00000fff
CR0=80010031 CR2=0000000000000000 CR3=0000000007fff000 CR4=00002021
DR0=0000000000000000 DR1=0000000000000000 DR2=0000000000000000 DR3=0000000000000000
DR6=00000000ffff0ff0 DR7=0000000000000400
EFER=0000000000000500
Code=34 25 00 0c 45 00 9d 0f 01 c2 eb 03 0f 01 c3 0f 96 44 24 0c <48> 87 04 25 40 0c 45 00 48 87 1c 25 48 0c 45 00 48 87 0c 25 50 0c 45 00 48 87 14 25 58 0c
Jan
[-- Attachment #2: OpenPGP digital signature --]
[-- Type: application/pgp-signature, Size: 263 bytes --]
^ permalink raw reply [flat|nested] 6+ messages in thread
* Re: [PATCH] KVM: VMX: shadow VM_(ENTRY|EXIT)_CONTROLS vmcs field
2013-12-08 13:00 ` Jan Kiszka
@ 2013-12-09 6:55 ` Gleb Natapov
2013-12-09 13:48 ` Jan Kiszka
2013-12-12 9:50 ` Paolo Bonzini
0 siblings, 2 replies; 6+ messages in thread
From: Gleb Natapov @ 2013-12-09 6:55 UTC (permalink / raw)
To: Jan Kiszka; +Cc: Paolo Bonzini, kvm
On Sun, Dec 08, 2013 at 02:00:11PM +0100, Jan Kiszka wrote:
> On 2013-11-27 17:58, Paolo Bonzini wrote:
> > Il 25/11/2013 14:37, Gleb Natapov ha scritto:
> >> VM_(ENTRY|EXIT)_CONTROLS vmcs fields are read/written on each guest
> >> entry but most times it can be avoided since values do not changes.
> >> Keep fields copy in memory to avoid unnecessary reads from vmcs.
> >>
> >> Signed-off-by: Gleb Natapov <gleb@redhat.com>
> >> diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
> >> index b2fe1c2..29c1c7f 100644
> >> --- a/arch/x86/kvm/vmx.c
> >> +++ b/arch/x86/kvm/vmx.c
> >> @@ -418,6 +418,8 @@ struct vcpu_vmx {
> >> u64 msr_host_kernel_gs_base;
> >> u64 msr_guest_kernel_gs_base;
> >> #endif
> >> + u32 vm_entry_controls_shadow;
> >> + u32 vm_exit_controls_shadow;
> >> /*
> >> * loaded_vmcs points to the VMCS currently used in this vcpu. For a
> >> * non-nested (L1) guest, it always points to vmcs01. For a nested
> >> @@ -1326,6 +1328,62 @@ static void vmcs_set_bits(unsigned long field, u32 mask)
> >> vmcs_writel(field, vmcs_readl(field) | mask);
> >> }
> >>
> >> +static inline void vm_entry_controls_init(struct vcpu_vmx *vmx, u32 val)
> >> +{
> >> + vmcs_write32(VM_ENTRY_CONTROLS, val);
> >> + vmx->vm_entry_controls_shadow = val;
> >> +}
> >> +
> >> +static inline void vm_entry_controls_set(struct vcpu_vmx *vmx, u32 val)
> >> +{
> >> + if (vmx->vm_entry_controls_shadow != val)
> >> + vm_entry_controls_init(vmx, val);
> >> +}
> >> +
> >> +static inline u32 vm_entry_controls_get(struct vcpu_vmx *vmx)
> >> +{
> >> + return vmx->vm_entry_controls_shadow;
> >> +}
> >> +
> >> +
> >> +static inline void vm_entry_controls_setbit(struct vcpu_vmx *vmx, u32 val)
> >> +{
> >> + vm_entry_controls_set(vmx, vm_entry_controls_get(vmx) | val);
> >> +}
> >> +
> >> +static inline void vm_entry_controls_clearbit(struct vcpu_vmx *vmx, u32 val)
> >> +{
> >> + vm_entry_controls_set(vmx, vm_entry_controls_get(vmx) & ~val);
> >> +}
> >> +
> >> +static inline void vm_exit_controls_init(struct vcpu_vmx *vmx, u32 val)
> >> +{
> >> + vmcs_write32(VM_EXIT_CONTROLS, val);
> >> + vmx->vm_exit_controls_shadow = val;
> >> +}
> >> +
> >> +static inline void vm_exit_controls_set(struct vcpu_vmx *vmx, u32 val)
> >> +{
> >> + if (vmx->vm_exit_controls_shadow != val)
> >> + vm_exit_controls_init(vmx, val);
> >> +}
> >> +
> >> +static inline u32 vm_exit_controls_get(struct vcpu_vmx *vmx)
> >> +{
> >> + return vmx->vm_exit_controls_shadow;
> >> +}
> >> +
> >> +
> >> +static inline void vm_exit_controls_setbit(struct vcpu_vmx *vmx, u32 val)
> >> +{
> >> + vm_exit_controls_set(vmx, vm_exit_controls_get(vmx) | val);
> >> +}
> >> +
> >> +static inline void vm_exit_controls_clearbit(struct vcpu_vmx *vmx, u32 val)
> >> +{
> >> + vm_exit_controls_set(vmx, vm_exit_controls_get(vmx) & ~val);
> >> +}
> >> +
> >> static void vmx_segment_cache_clear(struct vcpu_vmx *vmx)
> >> {
> >> vmx->segment_cache.bitmask = 0;
> >> @@ -1410,11 +1468,11 @@ static void update_exception_bitmap(struct kvm_vcpu *vcpu)
> >> vmcs_write32(EXCEPTION_BITMAP, eb);
> >> }
> >>
> >> -static void clear_atomic_switch_msr_special(unsigned long entry,
> >> - unsigned long exit)
> >> +static void clear_atomic_switch_msr_special(struct vcpu_vmx *vmx,
> >> + unsigned long entry, unsigned long exit)
> >> {
> >> - vmcs_clear_bits(VM_ENTRY_CONTROLS, entry);
> >> - vmcs_clear_bits(VM_EXIT_CONTROLS, exit);
> >> + vm_entry_controls_clearbit(vmx, entry);
> >> + vm_exit_controls_clearbit(vmx, exit);
> >> }
> >>
> >> static void clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr)
> >> @@ -1425,14 +1483,15 @@ static void clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr)
> >> switch (msr) {
> >> case MSR_EFER:
> >> if (cpu_has_load_ia32_efer) {
> >> - clear_atomic_switch_msr_special(VM_ENTRY_LOAD_IA32_EFER,
> >> + clear_atomic_switch_msr_special(vmx,
> >> + VM_ENTRY_LOAD_IA32_EFER,
> >> VM_EXIT_LOAD_IA32_EFER);
> >> return;
> >> }
> >> break;
> >> case MSR_CORE_PERF_GLOBAL_CTRL:
> >> if (cpu_has_load_perf_global_ctrl) {
> >> - clear_atomic_switch_msr_special(
> >> + clear_atomic_switch_msr_special(vmx,
> >> VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL,
> >> VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL);
> >> return;
> >> @@ -1453,14 +1512,15 @@ static void clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr)
> >> vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->nr);
> >> }
> >>
> >> -static void add_atomic_switch_msr_special(unsigned long entry,
> >> - unsigned long exit, unsigned long guest_val_vmcs,
> >> - unsigned long host_val_vmcs, u64 guest_val, u64 host_val)
> >> +static void add_atomic_switch_msr_special(struct vcpu_vmx *vmx,
> >> + unsigned long entry, unsigned long exit,
> >> + unsigned long guest_val_vmcs, unsigned long host_val_vmcs,
> >> + u64 guest_val, u64 host_val)
> >> {
> >> vmcs_write64(guest_val_vmcs, guest_val);
> >> vmcs_write64(host_val_vmcs, host_val);
> >> - vmcs_set_bits(VM_ENTRY_CONTROLS, entry);
> >> - vmcs_set_bits(VM_EXIT_CONTROLS, exit);
> >> + vm_entry_controls_setbit(vmx, entry);
> >> + vm_exit_controls_setbit(vmx, exit);
> >> }
> >>
> >> static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr,
> >> @@ -1472,7 +1532,8 @@ static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr,
> >> switch (msr) {
> >> case MSR_EFER:
> >> if (cpu_has_load_ia32_efer) {
> >> - add_atomic_switch_msr_special(VM_ENTRY_LOAD_IA32_EFER,
> >> + add_atomic_switch_msr_special(vmx,
> >> + VM_ENTRY_LOAD_IA32_EFER,
> >> VM_EXIT_LOAD_IA32_EFER,
> >> GUEST_IA32_EFER,
> >> HOST_IA32_EFER,
> >> @@ -1482,7 +1543,7 @@ static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr,
> >> break;
> >> case MSR_CORE_PERF_GLOBAL_CTRL:
> >> if (cpu_has_load_perf_global_ctrl) {
> >> - add_atomic_switch_msr_special(
> >> + add_atomic_switch_msr_special(vmx,
> >> VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL,
> >> VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL,
> >> GUEST_IA32_PERF_GLOBAL_CTRL,
> >> @@ -3182,14 +3243,10 @@ static void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer)
> >> vmx_load_host_state(to_vmx(vcpu));
> >> vcpu->arch.efer = efer;
> >> if (efer & EFER_LMA) {
> >> - vmcs_write32(VM_ENTRY_CONTROLS,
> >> - vmcs_read32(VM_ENTRY_CONTROLS) |
> >> - VM_ENTRY_IA32E_MODE);
> >> + vm_entry_controls_setbit(to_vmx(vcpu), VM_ENTRY_IA32E_MODE);
> >> msr->data = efer;
> >> } else {
> >> - vmcs_write32(VM_ENTRY_CONTROLS,
> >> - vmcs_read32(VM_ENTRY_CONTROLS) &
> >> - ~VM_ENTRY_IA32E_MODE);
> >> + vm_entry_controls_clearbit(to_vmx(vcpu), VM_ENTRY_IA32E_MODE);
> >>
> >> msr->data = efer & ~EFER_LME;
> >> }
> >> @@ -3217,9 +3274,7 @@ static void enter_lmode(struct kvm_vcpu *vcpu)
> >>
> >> static void exit_lmode(struct kvm_vcpu *vcpu)
> >> {
> >> - vmcs_write32(VM_ENTRY_CONTROLS,
> >> - vmcs_read32(VM_ENTRY_CONTROLS)
> >> - & ~VM_ENTRY_IA32E_MODE);
> >> + vm_entry_controls_clearbit(to_vmx(vcpu), VM_ENTRY_IA32E_MODE);
> >> vmx_set_efer(vcpu, vcpu->arch.efer & ~EFER_LMA);
> >> }
> >>
> >> @@ -4346,10 +4401,11 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
> >> ++vmx->nmsrs;
> >> }
> >>
> >> - vmcs_write32(VM_EXIT_CONTROLS, vmcs_config.vmexit_ctrl);
> >> +
> >> + vm_exit_controls_init(vmx, vmcs_config.vmexit_ctrl);
> >>
> >> /* 22.2.1, 20.8.1 */
> >> - vmcs_write32(VM_ENTRY_CONTROLS, vmcs_config.vmentry_ctrl);
> >> + vm_entry_controls_init(vmx, vmcs_config.vmentry_ctrl);
> >>
> >> vmcs_writel(CR0_GUEST_HOST_MASK, ~0UL);
> >> set_cr4_guest_host_mask(vmx);
> >> @@ -7759,12 +7815,12 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
> >> exit_control = vmcs_config.vmexit_ctrl;
> >> if (vmcs12->pin_based_vm_exec_control & PIN_BASED_VMX_PREEMPTION_TIMER)
> >> exit_control |= VM_EXIT_SAVE_VMX_PREEMPTION_TIMER;
> >> - vmcs_write32(VM_EXIT_CONTROLS, exit_control);
> >> + vm_exit_controls_init(vmx, exit_control);
> >>
> >> /* vmcs12's VM_ENTRY_LOAD_IA32_EFER and VM_ENTRY_IA32E_MODE are
> >> * emulated by vmx_set_efer(), below.
> >> */
> >> - vmcs_write32(VM_ENTRY_CONTROLS,
> >> + vm_entry_controls_init(vmx,
> >> (vmcs12->vm_entry_controls & ~VM_ENTRY_LOAD_IA32_EFER &
> >> ~VM_ENTRY_IA32E_MODE) |
> >> (vmcs_config.vmentry_ctrl & ~VM_ENTRY_IA32E_MODE));
> >> @@ -8186,7 +8242,7 @@ static void prepare_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
> >>
> >> vmcs12->vm_entry_controls =
> >> (vmcs12->vm_entry_controls & ~VM_ENTRY_IA32E_MODE) |
> >> - (vmcs_read32(VM_ENTRY_CONTROLS) & VM_ENTRY_IA32E_MODE);
> >> + (vm_entry_controls_get(to_vmx(vcpu)) & VM_ENTRY_IA32E_MODE);
> >>
> >> /* TODO: These cannot have changed unless we have MSR bitmaps and
> >> * the relevant bit asks not to trap the change */
> >> @@ -8390,6 +8446,7 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu)
> >> vcpu->cpu = cpu;
> >> put_cpu();
> >>
> >> + vm_entry_controls_init(vmx, vmcs_read32(VM_ENTRY_CONTROLS));
> >> vmx_segment_cache_clear(vmx);
> >>
> >> /* if no vmcs02 cache requested, remove the one we used */
> >> --
> >> Gleb.
> >> --
> >> To unsubscribe from this list: send the line "unsubscribe kvm" in
> >> the body of a message to majordomo@vger.kernel.org
> >> More majordomo info at http://vger.kernel.org/majordomo-info.html
> >>
> >
> > Applied to kvm/queue, thanks.
>
> Seems to have or cause an issue with nVMX. The (still not merged) vmx
> unit test breaks here after apply this commit:
>
What about this patch?
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 1d95384..f90320b 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -8452,6 +8452,7 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu)
put_cpu();
vm_entry_controls_init(vmx, vmcs_read32(VM_ENTRY_CONTROLS));
+ vm_exit_controls_init(vmx, vmcs_read32(VM_EXIT_CONTROLS));
vmx_segment_cache_clear(vmx);
/* if no vmcs02 cache requested, remove the one we used */
--
Gleb.
^ permalink raw reply related [flat|nested] 6+ messages in thread
* Re: [PATCH] KVM: VMX: shadow VM_(ENTRY|EXIT)_CONTROLS vmcs field
2013-12-09 6:55 ` Gleb Natapov
@ 2013-12-09 13:48 ` Jan Kiszka
2013-12-12 9:50 ` Paolo Bonzini
1 sibling, 0 replies; 6+ messages in thread
From: Jan Kiszka @ 2013-12-09 13:48 UTC (permalink / raw)
To: Gleb Natapov; +Cc: Paolo Bonzini, kvm
[-- Attachment #1: Type: text/plain, Size: 10489 bytes --]
On 2013-12-09 07:55, Gleb Natapov wrote:
> On Sun, Dec 08, 2013 at 02:00:11PM +0100, Jan Kiszka wrote:
>> On 2013-11-27 17:58, Paolo Bonzini wrote:
>>> Il 25/11/2013 14:37, Gleb Natapov ha scritto:
>>>> VM_(ENTRY|EXIT)_CONTROLS vmcs fields are read/written on each guest
>>>> entry but most times it can be avoided since values do not changes.
>>>> Keep fields copy in memory to avoid unnecessary reads from vmcs.
>>>>
>>>> Signed-off-by: Gleb Natapov <gleb@redhat.com>
>>>> diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
>>>> index b2fe1c2..29c1c7f 100644
>>>> --- a/arch/x86/kvm/vmx.c
>>>> +++ b/arch/x86/kvm/vmx.c
>>>> @@ -418,6 +418,8 @@ struct vcpu_vmx {
>>>> u64 msr_host_kernel_gs_base;
>>>> u64 msr_guest_kernel_gs_base;
>>>> #endif
>>>> + u32 vm_entry_controls_shadow;
>>>> + u32 vm_exit_controls_shadow;
>>>> /*
>>>> * loaded_vmcs points to the VMCS currently used in this vcpu. For a
>>>> * non-nested (L1) guest, it always points to vmcs01. For a nested
>>>> @@ -1326,6 +1328,62 @@ static void vmcs_set_bits(unsigned long field, u32 mask)
>>>> vmcs_writel(field, vmcs_readl(field) | mask);
>>>> }
>>>>
>>>> +static inline void vm_entry_controls_init(struct vcpu_vmx *vmx, u32 val)
>>>> +{
>>>> + vmcs_write32(VM_ENTRY_CONTROLS, val);
>>>> + vmx->vm_entry_controls_shadow = val;
>>>> +}
>>>> +
>>>> +static inline void vm_entry_controls_set(struct vcpu_vmx *vmx, u32 val)
>>>> +{
>>>> + if (vmx->vm_entry_controls_shadow != val)
>>>> + vm_entry_controls_init(vmx, val);
>>>> +}
>>>> +
>>>> +static inline u32 vm_entry_controls_get(struct vcpu_vmx *vmx)
>>>> +{
>>>> + return vmx->vm_entry_controls_shadow;
>>>> +}
>>>> +
>>>> +
>>>> +static inline void vm_entry_controls_setbit(struct vcpu_vmx *vmx, u32 val)
>>>> +{
>>>> + vm_entry_controls_set(vmx, vm_entry_controls_get(vmx) | val);
>>>> +}
>>>> +
>>>> +static inline void vm_entry_controls_clearbit(struct vcpu_vmx *vmx, u32 val)
>>>> +{
>>>> + vm_entry_controls_set(vmx, vm_entry_controls_get(vmx) & ~val);
>>>> +}
>>>> +
>>>> +static inline void vm_exit_controls_init(struct vcpu_vmx *vmx, u32 val)
>>>> +{
>>>> + vmcs_write32(VM_EXIT_CONTROLS, val);
>>>> + vmx->vm_exit_controls_shadow = val;
>>>> +}
>>>> +
>>>> +static inline void vm_exit_controls_set(struct vcpu_vmx *vmx, u32 val)
>>>> +{
>>>> + if (vmx->vm_exit_controls_shadow != val)
>>>> + vm_exit_controls_init(vmx, val);
>>>> +}
>>>> +
>>>> +static inline u32 vm_exit_controls_get(struct vcpu_vmx *vmx)
>>>> +{
>>>> + return vmx->vm_exit_controls_shadow;
>>>> +}
>>>> +
>>>> +
>>>> +static inline void vm_exit_controls_setbit(struct vcpu_vmx *vmx, u32 val)
>>>> +{
>>>> + vm_exit_controls_set(vmx, vm_exit_controls_get(vmx) | val);
>>>> +}
>>>> +
>>>> +static inline void vm_exit_controls_clearbit(struct vcpu_vmx *vmx, u32 val)
>>>> +{
>>>> + vm_exit_controls_set(vmx, vm_exit_controls_get(vmx) & ~val);
>>>> +}
>>>> +
>>>> static void vmx_segment_cache_clear(struct vcpu_vmx *vmx)
>>>> {
>>>> vmx->segment_cache.bitmask = 0;
>>>> @@ -1410,11 +1468,11 @@ static void update_exception_bitmap(struct kvm_vcpu *vcpu)
>>>> vmcs_write32(EXCEPTION_BITMAP, eb);
>>>> }
>>>>
>>>> -static void clear_atomic_switch_msr_special(unsigned long entry,
>>>> - unsigned long exit)
>>>> +static void clear_atomic_switch_msr_special(struct vcpu_vmx *vmx,
>>>> + unsigned long entry, unsigned long exit)
>>>> {
>>>> - vmcs_clear_bits(VM_ENTRY_CONTROLS, entry);
>>>> - vmcs_clear_bits(VM_EXIT_CONTROLS, exit);
>>>> + vm_entry_controls_clearbit(vmx, entry);
>>>> + vm_exit_controls_clearbit(vmx, exit);
>>>> }
>>>>
>>>> static void clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr)
>>>> @@ -1425,14 +1483,15 @@ static void clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr)
>>>> switch (msr) {
>>>> case MSR_EFER:
>>>> if (cpu_has_load_ia32_efer) {
>>>> - clear_atomic_switch_msr_special(VM_ENTRY_LOAD_IA32_EFER,
>>>> + clear_atomic_switch_msr_special(vmx,
>>>> + VM_ENTRY_LOAD_IA32_EFER,
>>>> VM_EXIT_LOAD_IA32_EFER);
>>>> return;
>>>> }
>>>> break;
>>>> case MSR_CORE_PERF_GLOBAL_CTRL:
>>>> if (cpu_has_load_perf_global_ctrl) {
>>>> - clear_atomic_switch_msr_special(
>>>> + clear_atomic_switch_msr_special(vmx,
>>>> VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL,
>>>> VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL);
>>>> return;
>>>> @@ -1453,14 +1512,15 @@ static void clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr)
>>>> vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->nr);
>>>> }
>>>>
>>>> -static void add_atomic_switch_msr_special(unsigned long entry,
>>>> - unsigned long exit, unsigned long guest_val_vmcs,
>>>> - unsigned long host_val_vmcs, u64 guest_val, u64 host_val)
>>>> +static void add_atomic_switch_msr_special(struct vcpu_vmx *vmx,
>>>> + unsigned long entry, unsigned long exit,
>>>> + unsigned long guest_val_vmcs, unsigned long host_val_vmcs,
>>>> + u64 guest_val, u64 host_val)
>>>> {
>>>> vmcs_write64(guest_val_vmcs, guest_val);
>>>> vmcs_write64(host_val_vmcs, host_val);
>>>> - vmcs_set_bits(VM_ENTRY_CONTROLS, entry);
>>>> - vmcs_set_bits(VM_EXIT_CONTROLS, exit);
>>>> + vm_entry_controls_setbit(vmx, entry);
>>>> + vm_exit_controls_setbit(vmx, exit);
>>>> }
>>>>
>>>> static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr,
>>>> @@ -1472,7 +1532,8 @@ static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr,
>>>> switch (msr) {
>>>> case MSR_EFER:
>>>> if (cpu_has_load_ia32_efer) {
>>>> - add_atomic_switch_msr_special(VM_ENTRY_LOAD_IA32_EFER,
>>>> + add_atomic_switch_msr_special(vmx,
>>>> + VM_ENTRY_LOAD_IA32_EFER,
>>>> VM_EXIT_LOAD_IA32_EFER,
>>>> GUEST_IA32_EFER,
>>>> HOST_IA32_EFER,
>>>> @@ -1482,7 +1543,7 @@ static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr,
>>>> break;
>>>> case MSR_CORE_PERF_GLOBAL_CTRL:
>>>> if (cpu_has_load_perf_global_ctrl) {
>>>> - add_atomic_switch_msr_special(
>>>> + add_atomic_switch_msr_special(vmx,
>>>> VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL,
>>>> VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL,
>>>> GUEST_IA32_PERF_GLOBAL_CTRL,
>>>> @@ -3182,14 +3243,10 @@ static void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer)
>>>> vmx_load_host_state(to_vmx(vcpu));
>>>> vcpu->arch.efer = efer;
>>>> if (efer & EFER_LMA) {
>>>> - vmcs_write32(VM_ENTRY_CONTROLS,
>>>> - vmcs_read32(VM_ENTRY_CONTROLS) |
>>>> - VM_ENTRY_IA32E_MODE);
>>>> + vm_entry_controls_setbit(to_vmx(vcpu), VM_ENTRY_IA32E_MODE);
>>>> msr->data = efer;
>>>> } else {
>>>> - vmcs_write32(VM_ENTRY_CONTROLS,
>>>> - vmcs_read32(VM_ENTRY_CONTROLS) &
>>>> - ~VM_ENTRY_IA32E_MODE);
>>>> + vm_entry_controls_clearbit(to_vmx(vcpu), VM_ENTRY_IA32E_MODE);
>>>>
>>>> msr->data = efer & ~EFER_LME;
>>>> }
>>>> @@ -3217,9 +3274,7 @@ static void enter_lmode(struct kvm_vcpu *vcpu)
>>>>
>>>> static void exit_lmode(struct kvm_vcpu *vcpu)
>>>> {
>>>> - vmcs_write32(VM_ENTRY_CONTROLS,
>>>> - vmcs_read32(VM_ENTRY_CONTROLS)
>>>> - & ~VM_ENTRY_IA32E_MODE);
>>>> + vm_entry_controls_clearbit(to_vmx(vcpu), VM_ENTRY_IA32E_MODE);
>>>> vmx_set_efer(vcpu, vcpu->arch.efer & ~EFER_LMA);
>>>> }
>>>>
>>>> @@ -4346,10 +4401,11 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
>>>> ++vmx->nmsrs;
>>>> }
>>>>
>>>> - vmcs_write32(VM_EXIT_CONTROLS, vmcs_config.vmexit_ctrl);
>>>> +
>>>> + vm_exit_controls_init(vmx, vmcs_config.vmexit_ctrl);
>>>>
>>>> /* 22.2.1, 20.8.1 */
>>>> - vmcs_write32(VM_ENTRY_CONTROLS, vmcs_config.vmentry_ctrl);
>>>> + vm_entry_controls_init(vmx, vmcs_config.vmentry_ctrl);
>>>>
>>>> vmcs_writel(CR0_GUEST_HOST_MASK, ~0UL);
>>>> set_cr4_guest_host_mask(vmx);
>>>> @@ -7759,12 +7815,12 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
>>>> exit_control = vmcs_config.vmexit_ctrl;
>>>> if (vmcs12->pin_based_vm_exec_control & PIN_BASED_VMX_PREEMPTION_TIMER)
>>>> exit_control |= VM_EXIT_SAVE_VMX_PREEMPTION_TIMER;
>>>> - vmcs_write32(VM_EXIT_CONTROLS, exit_control);
>>>> + vm_exit_controls_init(vmx, exit_control);
>>>>
>>>> /* vmcs12's VM_ENTRY_LOAD_IA32_EFER and VM_ENTRY_IA32E_MODE are
>>>> * emulated by vmx_set_efer(), below.
>>>> */
>>>> - vmcs_write32(VM_ENTRY_CONTROLS,
>>>> + vm_entry_controls_init(vmx,
>>>> (vmcs12->vm_entry_controls & ~VM_ENTRY_LOAD_IA32_EFER &
>>>> ~VM_ENTRY_IA32E_MODE) |
>>>> (vmcs_config.vmentry_ctrl & ~VM_ENTRY_IA32E_MODE));
>>>> @@ -8186,7 +8242,7 @@ static void prepare_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
>>>>
>>>> vmcs12->vm_entry_controls =
>>>> (vmcs12->vm_entry_controls & ~VM_ENTRY_IA32E_MODE) |
>>>> - (vmcs_read32(VM_ENTRY_CONTROLS) & VM_ENTRY_IA32E_MODE);
>>>> + (vm_entry_controls_get(to_vmx(vcpu)) & VM_ENTRY_IA32E_MODE);
>>>>
>>>> /* TODO: These cannot have changed unless we have MSR bitmaps and
>>>> * the relevant bit asks not to trap the change */
>>>> @@ -8390,6 +8446,7 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu)
>>>> vcpu->cpu = cpu;
>>>> put_cpu();
>>>>
>>>> + vm_entry_controls_init(vmx, vmcs_read32(VM_ENTRY_CONTROLS));
>>>> vmx_segment_cache_clear(vmx);
>>>>
>>>> /* if no vmcs02 cache requested, remove the one we used */
>>>> --
>>>> Gleb.
>>>> --
>>>> To unsubscribe from this list: send the line "unsubscribe kvm" in
>>>> the body of a message to majordomo@vger.kernel.org
>>>> More majordomo info at http://vger.kernel.org/majordomo-info.html
>>>>
>>>
>>> Applied to kvm/queue, thanks.
>>
>> Seems to have or cause an issue with nVMX. The (still not merged) vmx
>> unit test breaks here after apply this commit:
>>
> What about this patch?
>
>
> diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
> index 1d95384..f90320b 100644
> --- a/arch/x86/kvm/vmx.c
> +++ b/arch/x86/kvm/vmx.c
> @@ -8452,6 +8452,7 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu)
> put_cpu();
>
> vm_entry_controls_init(vmx, vmcs_read32(VM_ENTRY_CONTROLS));
> + vm_exit_controls_init(vmx, vmcs_read32(VM_EXIT_CONTROLS));
> vmx_segment_cache_clear(vmx);
>
> /* if no vmcs02 cache requested, remove the one we used */
> --
> Gleb.
>
Yep, works and makes sense as well.
Thanks,
Jan
[-- Attachment #2: OpenPGP digital signature --]
[-- Type: application/pgp-signature, Size: 263 bytes --]
^ permalink raw reply [flat|nested] 6+ messages in thread
* Re: [PATCH] KVM: VMX: shadow VM_(ENTRY|EXIT)_CONTROLS vmcs field
2013-12-09 6:55 ` Gleb Natapov
2013-12-09 13:48 ` Jan Kiszka
@ 2013-12-12 9:50 ` Paolo Bonzini
1 sibling, 0 replies; 6+ messages in thread
From: Paolo Bonzini @ 2013-12-12 9:50 UTC (permalink / raw)
To: Gleb Natapov; +Cc: Jan Kiszka, kvm
Il 09/12/2013 07:55, Gleb Natapov ha scritto:
> On Sun, Dec 08, 2013 at 02:00:11PM +0100, Jan Kiszka wrote:
>> On 2013-11-27 17:58, Paolo Bonzini wrote:
>>> Il 25/11/2013 14:37, Gleb Natapov ha scritto:
>>>> VM_(ENTRY|EXIT)_CONTROLS vmcs fields are read/written on each guest
>>>> entry but most times it can be avoided since values do not changes.
>>>> Keep fields copy in memory to avoid unnecessary reads from vmcs.
>>>>
>>>> Signed-off-by: Gleb Natapov <gleb@redhat.com>
>>>> diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
>>>> index b2fe1c2..29c1c7f 100644
>>>> --- a/arch/x86/kvm/vmx.c
>>>> +++ b/arch/x86/kvm/vmx.c
>>>> @@ -418,6 +418,8 @@ struct vcpu_vmx {
>>>> u64 msr_host_kernel_gs_base;
>>>> u64 msr_guest_kernel_gs_base;
>>>> #endif
>>>> + u32 vm_entry_controls_shadow;
>>>> + u32 vm_exit_controls_shadow;
>>>> /*
>>>> * loaded_vmcs points to the VMCS currently used in this vcpu. For a
>>>> * non-nested (L1) guest, it always points to vmcs01. For a nested
>>>> @@ -1326,6 +1328,62 @@ static void vmcs_set_bits(unsigned long field, u32 mask)
>>>> vmcs_writel(field, vmcs_readl(field) | mask);
>>>> }
>>>>
>>>> +static inline void vm_entry_controls_init(struct vcpu_vmx *vmx, u32 val)
>>>> +{
>>>> + vmcs_write32(VM_ENTRY_CONTROLS, val);
>>>> + vmx->vm_entry_controls_shadow = val;
>>>> +}
>>>> +
>>>> +static inline void vm_entry_controls_set(struct vcpu_vmx *vmx, u32 val)
>>>> +{
>>>> + if (vmx->vm_entry_controls_shadow != val)
>>>> + vm_entry_controls_init(vmx, val);
>>>> +}
>>>> +
>>>> +static inline u32 vm_entry_controls_get(struct vcpu_vmx *vmx)
>>>> +{
>>>> + return vmx->vm_entry_controls_shadow;
>>>> +}
>>>> +
>>>> +
>>>> +static inline void vm_entry_controls_setbit(struct vcpu_vmx *vmx, u32 val)
>>>> +{
>>>> + vm_entry_controls_set(vmx, vm_entry_controls_get(vmx) | val);
>>>> +}
>>>> +
>>>> +static inline void vm_entry_controls_clearbit(struct vcpu_vmx *vmx, u32 val)
>>>> +{
>>>> + vm_entry_controls_set(vmx, vm_entry_controls_get(vmx) & ~val);
>>>> +}
>>>> +
>>>> +static inline void vm_exit_controls_init(struct vcpu_vmx *vmx, u32 val)
>>>> +{
>>>> + vmcs_write32(VM_EXIT_CONTROLS, val);
>>>> + vmx->vm_exit_controls_shadow = val;
>>>> +}
>>>> +
>>>> +static inline void vm_exit_controls_set(struct vcpu_vmx *vmx, u32 val)
>>>> +{
>>>> + if (vmx->vm_exit_controls_shadow != val)
>>>> + vm_exit_controls_init(vmx, val);
>>>> +}
>>>> +
>>>> +static inline u32 vm_exit_controls_get(struct vcpu_vmx *vmx)
>>>> +{
>>>> + return vmx->vm_exit_controls_shadow;
>>>> +}
>>>> +
>>>> +
>>>> +static inline void vm_exit_controls_setbit(struct vcpu_vmx *vmx, u32 val)
>>>> +{
>>>> + vm_exit_controls_set(vmx, vm_exit_controls_get(vmx) | val);
>>>> +}
>>>> +
>>>> +static inline void vm_exit_controls_clearbit(struct vcpu_vmx *vmx, u32 val)
>>>> +{
>>>> + vm_exit_controls_set(vmx, vm_exit_controls_get(vmx) & ~val);
>>>> +}
>>>> +
>>>> static void vmx_segment_cache_clear(struct vcpu_vmx *vmx)
>>>> {
>>>> vmx->segment_cache.bitmask = 0;
>>>> @@ -1410,11 +1468,11 @@ static void update_exception_bitmap(struct kvm_vcpu *vcpu)
>>>> vmcs_write32(EXCEPTION_BITMAP, eb);
>>>> }
>>>>
>>>> -static void clear_atomic_switch_msr_special(unsigned long entry,
>>>> - unsigned long exit)
>>>> +static void clear_atomic_switch_msr_special(struct vcpu_vmx *vmx,
>>>> + unsigned long entry, unsigned long exit)
>>>> {
>>>> - vmcs_clear_bits(VM_ENTRY_CONTROLS, entry);
>>>> - vmcs_clear_bits(VM_EXIT_CONTROLS, exit);
>>>> + vm_entry_controls_clearbit(vmx, entry);
>>>> + vm_exit_controls_clearbit(vmx, exit);
>>>> }
>>>>
>>>> static void clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr)
>>>> @@ -1425,14 +1483,15 @@ static void clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr)
>>>> switch (msr) {
>>>> case MSR_EFER:
>>>> if (cpu_has_load_ia32_efer) {
>>>> - clear_atomic_switch_msr_special(VM_ENTRY_LOAD_IA32_EFER,
>>>> + clear_atomic_switch_msr_special(vmx,
>>>> + VM_ENTRY_LOAD_IA32_EFER,
>>>> VM_EXIT_LOAD_IA32_EFER);
>>>> return;
>>>> }
>>>> break;
>>>> case MSR_CORE_PERF_GLOBAL_CTRL:
>>>> if (cpu_has_load_perf_global_ctrl) {
>>>> - clear_atomic_switch_msr_special(
>>>> + clear_atomic_switch_msr_special(vmx,
>>>> VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL,
>>>> VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL);
>>>> return;
>>>> @@ -1453,14 +1512,15 @@ static void clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr)
>>>> vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->nr);
>>>> }
>>>>
>>>> -static void add_atomic_switch_msr_special(unsigned long entry,
>>>> - unsigned long exit, unsigned long guest_val_vmcs,
>>>> - unsigned long host_val_vmcs, u64 guest_val, u64 host_val)
>>>> +static void add_atomic_switch_msr_special(struct vcpu_vmx *vmx,
>>>> + unsigned long entry, unsigned long exit,
>>>> + unsigned long guest_val_vmcs, unsigned long host_val_vmcs,
>>>> + u64 guest_val, u64 host_val)
>>>> {
>>>> vmcs_write64(guest_val_vmcs, guest_val);
>>>> vmcs_write64(host_val_vmcs, host_val);
>>>> - vmcs_set_bits(VM_ENTRY_CONTROLS, entry);
>>>> - vmcs_set_bits(VM_EXIT_CONTROLS, exit);
>>>> + vm_entry_controls_setbit(vmx, entry);
>>>> + vm_exit_controls_setbit(vmx, exit);
>>>> }
>>>>
>>>> static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr,
>>>> @@ -1472,7 +1532,8 @@ static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr,
>>>> switch (msr) {
>>>> case MSR_EFER:
>>>> if (cpu_has_load_ia32_efer) {
>>>> - add_atomic_switch_msr_special(VM_ENTRY_LOAD_IA32_EFER,
>>>> + add_atomic_switch_msr_special(vmx,
>>>> + VM_ENTRY_LOAD_IA32_EFER,
>>>> VM_EXIT_LOAD_IA32_EFER,
>>>> GUEST_IA32_EFER,
>>>> HOST_IA32_EFER,
>>>> @@ -1482,7 +1543,7 @@ static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr,
>>>> break;
>>>> case MSR_CORE_PERF_GLOBAL_CTRL:
>>>> if (cpu_has_load_perf_global_ctrl) {
>>>> - add_atomic_switch_msr_special(
>>>> + add_atomic_switch_msr_special(vmx,
>>>> VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL,
>>>> VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL,
>>>> GUEST_IA32_PERF_GLOBAL_CTRL,
>>>> @@ -3182,14 +3243,10 @@ static void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer)
>>>> vmx_load_host_state(to_vmx(vcpu));
>>>> vcpu->arch.efer = efer;
>>>> if (efer & EFER_LMA) {
>>>> - vmcs_write32(VM_ENTRY_CONTROLS,
>>>> - vmcs_read32(VM_ENTRY_CONTROLS) |
>>>> - VM_ENTRY_IA32E_MODE);
>>>> + vm_entry_controls_setbit(to_vmx(vcpu), VM_ENTRY_IA32E_MODE);
>>>> msr->data = efer;
>>>> } else {
>>>> - vmcs_write32(VM_ENTRY_CONTROLS,
>>>> - vmcs_read32(VM_ENTRY_CONTROLS) &
>>>> - ~VM_ENTRY_IA32E_MODE);
>>>> + vm_entry_controls_clearbit(to_vmx(vcpu), VM_ENTRY_IA32E_MODE);
>>>>
>>>> msr->data = efer & ~EFER_LME;
>>>> }
>>>> @@ -3217,9 +3274,7 @@ static void enter_lmode(struct kvm_vcpu *vcpu)
>>>>
>>>> static void exit_lmode(struct kvm_vcpu *vcpu)
>>>> {
>>>> - vmcs_write32(VM_ENTRY_CONTROLS,
>>>> - vmcs_read32(VM_ENTRY_CONTROLS)
>>>> - & ~VM_ENTRY_IA32E_MODE);
>>>> + vm_entry_controls_clearbit(to_vmx(vcpu), VM_ENTRY_IA32E_MODE);
>>>> vmx_set_efer(vcpu, vcpu->arch.efer & ~EFER_LMA);
>>>> }
>>>>
>>>> @@ -4346,10 +4401,11 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
>>>> ++vmx->nmsrs;
>>>> }
>>>>
>>>> - vmcs_write32(VM_EXIT_CONTROLS, vmcs_config.vmexit_ctrl);
>>>> +
>>>> + vm_exit_controls_init(vmx, vmcs_config.vmexit_ctrl);
>>>>
>>>> /* 22.2.1, 20.8.1 */
>>>> - vmcs_write32(VM_ENTRY_CONTROLS, vmcs_config.vmentry_ctrl);
>>>> + vm_entry_controls_init(vmx, vmcs_config.vmentry_ctrl);
>>>>
>>>> vmcs_writel(CR0_GUEST_HOST_MASK, ~0UL);
>>>> set_cr4_guest_host_mask(vmx);
>>>> @@ -7759,12 +7815,12 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
>>>> exit_control = vmcs_config.vmexit_ctrl;
>>>> if (vmcs12->pin_based_vm_exec_control & PIN_BASED_VMX_PREEMPTION_TIMER)
>>>> exit_control |= VM_EXIT_SAVE_VMX_PREEMPTION_TIMER;
>>>> - vmcs_write32(VM_EXIT_CONTROLS, exit_control);
>>>> + vm_exit_controls_init(vmx, exit_control);
>>>>
>>>> /* vmcs12's VM_ENTRY_LOAD_IA32_EFER and VM_ENTRY_IA32E_MODE are
>>>> * emulated by vmx_set_efer(), below.
>>>> */
>>>> - vmcs_write32(VM_ENTRY_CONTROLS,
>>>> + vm_entry_controls_init(vmx,
>>>> (vmcs12->vm_entry_controls & ~VM_ENTRY_LOAD_IA32_EFER &
>>>> ~VM_ENTRY_IA32E_MODE) |
>>>> (vmcs_config.vmentry_ctrl & ~VM_ENTRY_IA32E_MODE));
>>>> @@ -8186,7 +8242,7 @@ static void prepare_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
>>>>
>>>> vmcs12->vm_entry_controls =
>>>> (vmcs12->vm_entry_controls & ~VM_ENTRY_IA32E_MODE) |
>>>> - (vmcs_read32(VM_ENTRY_CONTROLS) & VM_ENTRY_IA32E_MODE);
>>>> + (vm_entry_controls_get(to_vmx(vcpu)) & VM_ENTRY_IA32E_MODE);
>>>>
>>>> /* TODO: These cannot have changed unless we have MSR bitmaps and
>>>> * the relevant bit asks not to trap the change */
>>>> @@ -8390,6 +8446,7 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu)
>>>> vcpu->cpu = cpu;
>>>> put_cpu();
>>>>
>>>> + vm_entry_controls_init(vmx, vmcs_read32(VM_ENTRY_CONTROLS));
>>>> vmx_segment_cache_clear(vmx);
>>>>
>>>> /* if no vmcs02 cache requested, remove the one we used */
>>>> --
>>>> Gleb.
>>>> --
>>>> To unsubscribe from this list: send the line "unsubscribe kvm" in
>>>> the body of a message to majordomo@vger.kernel.org
>>>> More majordomo info at http://vger.kernel.org/majordomo-info.html
>>>>
>>>
>>> Applied to kvm/queue, thanks.
>>
>> Seems to have or cause an issue with nVMX. The (still not merged) vmx
>> unit test breaks here after apply this commit:
>>
> What about this patch?
>
>
> diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
> index 1d95384..f90320b 100644
> --- a/arch/x86/kvm/vmx.c
> +++ b/arch/x86/kvm/vmx.c
> @@ -8452,6 +8452,7 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu)
> put_cpu();
>
> vm_entry_controls_init(vmx, vmcs_read32(VM_ENTRY_CONTROLS));
> + vm_exit_controls_init(vmx, vmcs_read32(VM_EXIT_CONTROLS));
> vmx_segment_cache_clear(vmx);
>
> /* if no vmcs02 cache requested, remove the one we used */
> --
> Gleb.
Squashed in the first patch, and merged to kvm-next.
Paolo
^ permalink raw reply [flat|nested] 6+ messages in thread
end of thread, other threads:[~2013-12-12 9:50 UTC | newest]
Thread overview: 6+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2013-11-25 13:37 [PATCH] KVM: VMX: shadow VM_(ENTRY|EXIT)_CONTROLS vmcs field Gleb Natapov
2013-11-27 16:58 ` Paolo Bonzini
2013-12-08 13:00 ` Jan Kiszka
2013-12-09 6:55 ` Gleb Natapov
2013-12-09 13:48 ` Jan Kiszka
2013-12-12 9:50 ` Paolo Bonzini
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox