diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 62a7d519fbaf..033c7070a0c5 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -858,6 +858,7 @@ struct kvm_vcpu_arch { struct kvm_mmu_memory_cache mmu_external_spt_cache; struct page *pml_page; + bool update_cpu_dirty_logging_pending; /* * QEMU userspace and the guest each have their own FPU state. @@ -1863,7 +1864,7 @@ struct kvm_x86_ops { struct x86_exception *exception); void (*handle_exit_irqoff)(struct kvm_vcpu *vcpu); - void (*update_cpu_dirty_logging)(struct kvm_vcpu *vcpu); + void (*update_cpu_dirty_logging)(struct kvm_vcpu *vcpu, bool enable); const struct kvm_x86_nested_ops *nested_ops; diff --git a/arch/x86/kvm/kvm_cache_regs.h b/arch/x86/kvm/kvm_cache_regs.h index 36a8786db291..81ad4160b418 100644 --- a/arch/x86/kvm/kvm_cache_regs.h +++ b/arch/x86/kvm/kvm_cache_regs.h @@ -237,6 +237,13 @@ static inline void leave_guest_mode(struct kvm_vcpu *vcpu) kvm_make_request(KVM_REQ_LOAD_EOI_EXITMAP, vcpu); } + /* Also see kvm_vcpu_update_cpu_dirty_logging() */ + if (vcpu->arch.update_cpu_dirty_logging_pending) { + vcpu->arch.update_cpu_dirty_logging_pending = false; + kvm_x86_call(update_cpu_dirty_logging)(vcpu, + atomic_read(&vcpu->kvm->nr_memslots_dirty_logging)); + } + vcpu->stat.guest_mode = 0; } diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c index b37a1bb938e0..bd3f5539153c 100644 --- a/arch/x86/kvm/svm/nested.c +++ b/arch/x86/kvm/svm/nested.c @@ -1180,12 +1180,6 @@ int nested_svm_vmexit(struct vcpu_svm *svm) svm_update_lbrv(vcpu); } - /* Update dirty logging that might have changed while L2 ran */ - if (svm->nested.update_vmcb01_cpu_dirty_logging) { - svm->nested.update_vmcb01_cpu_dirty_logging = false; - svm_update_cpu_dirty_logging(vcpu); - } - if (vnmi) { if (vmcb02->control.int_ctl & V_NMI_BLOCKING_MASK) vmcb01->control.int_ctl |= V_NMI_BLOCKING_MASK; diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 042fca4dc0f8..009cef2477f0 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -178,8 +178,7 @@ module_param(intercept_smi, bool, 0444); bool vnmi = true; module_param(vnmi, bool, 0444); -bool pml = true; -module_param(pml, bool, 0444); +module_param_named(pml, enable_pml, bool, 0444); static bool svm_gp_erratum_intercept = true; @@ -1223,7 +1222,7 @@ static void init_vmcb(struct kvm_vcpu *vcpu) if (vcpu->kvm->arch.bus_lock_detection_enabled) svm_set_intercept(svm, INTERCEPT_BUSLOCK); - if (pml) { + if (enable_pml) { /* * Populate the page address and index here, PML is enabled * when dirty logging is enabled on the memslot through @@ -1309,7 +1308,7 @@ static int svm_vcpu_create(struct kvm_vcpu *vcpu) goto error_free_vmcb_page; } - if (pml) { + if (enable_pml) { vcpu->arch.pml_page = snp_safe_alloc_page(); if (!vcpu->arch.pml_page) goto error_free_vmsa_page; @@ -1361,7 +1360,7 @@ static void svm_vcpu_free(struct kvm_vcpu *vcpu) sev_free_vcpu(vcpu); - if (pml) + if (enable_pml) __free_page(vcpu->arch.pml_page); __free_page(__sme_pa_to_page(svm->vmcb01.pa)); @@ -3231,27 +3230,12 @@ static int bus_lock_exit(struct kvm_vcpu *vcpu) return 0; } -void svm_update_cpu_dirty_logging(struct kvm_vcpu *vcpu) +void svm_update_cpu_dirty_logging(struct kvm_vcpu *vcpu, bool enable) { - struct vcpu_svm *svm = to_svm(vcpu); - - if (WARN_ON_ONCE(!pml)) - return; - - if (is_guest_mode(vcpu)) { - svm->nested.update_vmcb01_cpu_dirty_logging = true; - return; - } - - /* - * Note, nr_memslots_dirty_logging can be changed concurrently with this - * code, but in that case another update request will be made and so the - * guest will never run with a stale PML value. - */ - if (atomic_read(&vcpu->kvm->nr_memslots_dirty_logging)) - svm->vmcb->control.nested_ctl |= SVM_NESTED_CTL_PML_ENABLE; + if (enable) + svm->vmcb->control.nested_ctl |= svm_nested_ctl_pml_enable; else - svm->vmcb->control.nested_ctl &= ~SVM_NESTED_CTL_PML_ENABLE; + svm->vmcb->control.nested_ctl &= ~svm_nested_ctl_pml_enable; } static void svm_flush_pml_buffer(struct kvm_vcpu *vcpu) @@ -5472,7 +5456,7 @@ static __init int svm_hardware_setup(void) nrips = nrips && boot_cpu_has(X86_FEATURE_NRIPS); - pml = pml && npt_enabled && cpu_feature_enabled(X86_FEATURE_PML); + enable_pml = enable_pml && npt_enabled && cpu_feature_enabled(X86_FEATURE_PML); if (pml) pr_info("Page modification logging supported\n"); diff --git a/arch/x86/kvm/vmx/main.c b/arch/x86/kvm/vmx/main.c index bb5f182f6788..2390cdb98e25 100644 --- a/arch/x86/kvm/vmx/main.c +++ b/arch/x86/kvm/vmx/main.c @@ -103,7 +103,7 @@ static void vt_vcpu_load(struct kvm_vcpu *vcpu, int cpu) vmx_vcpu_load(vcpu, cpu); } -static void vt_update_cpu_dirty_logging(struct kvm_vcpu *vcpu) +static void vt_update_cpu_dirty_logging(struct kvm_vcpu *vcpu, bool enable) { /* * Basic TDX does not support feature PML. KVM does not enable PML in @@ -112,7 +112,7 @@ static void vt_update_cpu_dirty_logging(struct kvm_vcpu *vcpu) if (WARN_ON_ONCE(is_td_vcpu(vcpu))) return; - vmx_update_cpu_dirty_logging(vcpu); + vmx_update_cpu_dirty_logging(vcpu, enable); } static void vt_prepare_switch_to_guest(struct kvm_vcpu *vcpu) diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c index b8ea1969113d..8532c7a63d7f 100644 --- a/arch/x86/kvm/vmx/nested.c +++ b/arch/x86/kvm/vmx/nested.c @@ -5064,11 +5064,6 @@ void __nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 vm_exit_reason, vmx_set_virtual_apic_mode(vcpu); } - if (vmx->nested.update_vmcs01_cpu_dirty_logging) { - vmx->nested.update_vmcs01_cpu_dirty_logging = false; - vmx_update_cpu_dirty_logging(vcpu); - } - nested_put_vmcs12_pages(vcpu); if (vmx->nested.reload_vmcs01_apic_access_page) { diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 9520e11b08d0..fd9844ff0af0 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -127,7 +127,6 @@ module_param(enable_device_posted_irqs, bool, 0444); static bool __read_mostly nested = 1; module_param(nested, bool, 0444); -bool __read_mostly enable_pml = 1; module_param_named(pml, enable_pml, bool, 0444); static bool __read_mostly error_on_inconsistent_vmcs_config = true; @@ -8071,27 +8070,12 @@ void vmx_cancel_hv_timer(struct kvm_vcpu *vcpu) } #endif -void vmx_update_cpu_dirty_logging(struct kvm_vcpu *vcpu) +void vmx_update_cpu_dirty_logging(struct kvm_vcpu *vcpu, bool enable) { - struct vcpu_vmx *vmx = to_vmx(vcpu); - - if (WARN_ON_ONCE(!enable_pml)) - return; - - if (is_guest_mode(vcpu)) { - vmx->nested.update_vmcs01_cpu_dirty_logging = true; - return; - } - - /* - * Note, nr_memslots_dirty_logging can be changed concurrent with this - * code, but in that case another update request will be made and so - * the guest will never run with a stale PML value. - */ - if (atomic_read(&vcpu->kvm->nr_memslots_dirty_logging)) - secondary_exec_controls_setbit(vmx, SECONDARY_EXEC_ENABLE_PML); + if (enable) + secondary_exec_controls_setbit(to_vmx(vcpu), SECONDARY_EXEC_ENABLE_PML); else - secondary_exec_controls_clearbit(vmx, SECONDARY_EXEC_ENABLE_PML); + secondary_exec_controls_clearbit(to_vmx(vcpu), SECONDARY_EXEC_ENABLE_PML); } void vmx_setup_mce(struct kvm_vcpu *vcpu) diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h index 6fafb6228c17..d35d7c25c16f 100644 --- a/arch/x86/kvm/vmx/vmx.h +++ b/arch/x86/kvm/vmx/vmx.h @@ -133,7 +133,6 @@ struct nested_vmx { bool change_vmcs01_virtual_apic_mode; bool reload_vmcs01_apic_access_page; - bool update_vmcs01_cpu_dirty_logging; bool update_vmcs01_apicv_status; bool update_vmcs01_hwapic_isr; @@ -398,7 +397,7 @@ u64 vmx_get_l2_tsc_multiplier(struct kvm_vcpu *vcpu); gva_t vmx_get_untagged_addr(struct kvm_vcpu *vcpu, gva_t gva, unsigned int flags); -void vmx_update_cpu_dirty_logging(struct kvm_vcpu *vcpu); +void vmx_update_cpu_dirty_logging(struct kvm_vcpu *vcpu, bool enable); u64 vmx_get_supported_debugctl(struct kvm_vcpu *vcpu, bool host_initiated); bool vmx_is_valid_debugctl(struct kvm_vcpu *vcpu, u64 data, bool host_initiated); diff --git a/arch/x86/kvm/vmx/x86_ops.h b/arch/x86/kvm/vmx/x86_ops.h index 4c70f56c57c8..f30610737a0e 100644 --- a/arch/x86/kvm/vmx/x86_ops.h +++ b/arch/x86/kvm/vmx/x86_ops.h @@ -113,7 +113,7 @@ u64 vmx_get_l2_tsc_offset(struct kvm_vcpu *vcpu); u64 vmx_get_l2_tsc_multiplier(struct kvm_vcpu *vcpu); void vmx_write_tsc_offset(struct kvm_vcpu *vcpu); void vmx_write_tsc_multiplier(struct kvm_vcpu *vcpu); -void vmx_update_cpu_dirty_logging(struct kvm_vcpu *vcpu); +void vmx_update_cpu_dirty_logging(struct kvm_vcpu *vcpu, bool enable); #ifdef CONFIG_X86_64 int vmx_set_hv_timer(struct kvm_vcpu *vcpu, u64 guest_deadline_tsc, bool *expired); diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index afa7f8b46416..95843c854b11 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -232,6 +232,9 @@ EXPORT_SYMBOL_GPL(enable_ipiv); bool __read_mostly enable_device_posted_irqs = true; EXPORT_SYMBOL_GPL(enable_device_posted_irqs); +bool __read_mostly enable_pml = true; +EXPORT_SYMBOL_GPL(enable_pml); + const struct _kvm_stats_desc kvm_vm_stats_desc[] = { KVM_GENERIC_VM_STATS(), STATS_DESC_COUNTER(VM, mmu_shadow_zapped), @@ -10665,6 +10668,25 @@ static void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu) kvm_x86_call(set_apic_access_page_addr)(vcpu); } +static void kvm_vcpu_update_cpu_dirty_logging(struct kvm_vcpu *vcpu) +{ + if (WARN_ON_ONCE(!enable_pml)) + return; + + if (is_guest_mode(vcpu)) { + vcpu->arch.update_cpu_dirty_logging_pending = true; + return; + } + + /* + * Note, nr_memslots_dirty_logging can be changed concurrently with this + * code, but in that case another update request will be made and so the + * guest will never run with a stale PML value. + */ + kvm_x86_call(update_cpu_dirty_logging)(vcpu, + atomic_read(&vcpu->kvm->nr_memslots_dirty_logging)); +} + /* * Called within kvm->srcu read side. * Returns 1 to let vcpu_run() continue the guest execution loop without @@ -10836,7 +10858,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) kvm_x86_call(recalc_msr_intercepts)(vcpu); if (kvm_check_request(KVM_REQ_UPDATE_CPU_DIRTY_LOGGING, vcpu)) - kvm_x86_call(update_cpu_dirty_logging)(vcpu); + kvm_vcpu_update_cpu_dirty_logging(vcpu); if (kvm_check_request(KVM_REQ_UPDATE_PROTECTED_GUEST_STATE, vcpu)) { kvm_vcpu_reset(vcpu, true);