* [PATCH] KVM: x86: Replace cpu_dirty_logging_count with nr_memslots_dirty_logging
@ 2023-01-05 16:54 David Matlack
2023-01-05 18:08 ` Sean Christopherson
0 siblings, 1 reply; 2+ messages in thread
From: David Matlack @ 2023-01-05 16:54 UTC (permalink / raw)
To: Paolo Bonzini; +Cc: Sean Christopherson, kvm, David Matlack
Drop cpu_dirty_logging_count in favor of nr_memslots_dirty_logging.
Both fields count the number of memslots that have dirty-logging enabled,
with the only difference being that cpu_dirty_logging_count is only
incremented when using PML. So while nr_memslots_dirty_logging is not a
direct replacement for cpu_dirty_logging_count, it can be combined with
enable_pml to get the same information.
Signed-off-by: David Matlack <dmatlack@google.com>
---
arch/x86/include/asm/kvm_host.h | 1 -
arch/x86/kvm/vmx/vmx.c | 8 +++++---
arch/x86/kvm/x86.c | 8 ++------
3 files changed, 7 insertions(+), 10 deletions(-)
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 2f5bf581d00a..f328007ea05a 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -1329,7 +1329,6 @@ struct kvm_arch {
u32 bsp_vcpu_id;
u64 disabled_quirks;
- int cpu_dirty_logging_count;
enum kvm_irqchip_mode irqchip_mode;
u8 nr_reserved_ioapic_pins;
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index c788aa382611..9c1bf4dfafcc 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -4606,7 +4606,7 @@ static u32 vmx_secondary_exec_control(struct vcpu_vmx *vmx)
* it needs to be set here when dirty logging is already active, e.g.
* if this vCPU was created after dirty logging was enabled.
*/
- if (!vcpu->kvm->arch.cpu_dirty_logging_count)
+ if (!enable_pml || !atomic_read(&vcpu->kvm->nr_memslots_dirty_logging))
exec_control &= ~SECONDARY_EXEC_ENABLE_PML;
if (cpu_has_vmx_xsaves()) {
@@ -7993,12 +7993,14 @@ void vmx_update_cpu_dirty_logging(struct kvm_vcpu *vcpu)
return;
}
+ WARN_ON_ONCE(!enable_pml);
+
/*
- * Note, cpu_dirty_logging_count can be changed concurrent with this
+ * Note, nr_memslots_dirty_logging can be changed concurrent with this
* code, but in that case another update request will be made and so
* the guest will never run with a stale PML value.
*/
- if (vcpu->kvm->arch.cpu_dirty_logging_count)
+ if (atomic_read(&vcpu->kvm->nr_memslots_dirty_logging))
secondary_exec_controls_setbit(vmx, SECONDARY_EXEC_ENABLE_PML);
else
secondary_exec_controls_clearbit(vmx, SECONDARY_EXEC_ENABLE_PML);
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index c936f8d28a53..ee89a85bbd4e 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -12482,16 +12482,12 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
static void kvm_mmu_update_cpu_dirty_logging(struct kvm *kvm, bool enable)
{
- struct kvm_arch *ka = &kvm->arch;
-
if (!kvm_x86_ops.cpu_dirty_log_size)
return;
- if ((enable && ++ka->cpu_dirty_logging_count == 1) ||
- (!enable && --ka->cpu_dirty_logging_count == 0))
+ if ((enable && atomic_read(&kvm->nr_memslots_dirty_logging) == 1) ||
+ (!enable && atomic_read(&kvm->nr_memslots_dirty_logging) == 0))
kvm_make_all_cpus_request(kvm, KVM_REQ_UPDATE_CPU_DIRTY_LOGGING);
-
- WARN_ON_ONCE(ka->cpu_dirty_logging_count < 0);
}
static void kvm_mmu_slot_apply_flags(struct kvm *kvm,
base-commit: 91dc252b0dbb6879e4067f614df1e397fec532a1
--
2.39.0.314.g84b9a713c41-goog
^ permalink raw reply related [flat|nested] 2+ messages in thread
* Re: [PATCH] KVM: x86: Replace cpu_dirty_logging_count with nr_memslots_dirty_logging
2023-01-05 16:54 [PATCH] KVM: x86: Replace cpu_dirty_logging_count with nr_memslots_dirty_logging David Matlack
@ 2023-01-05 18:08 ` Sean Christopherson
0 siblings, 0 replies; 2+ messages in thread
From: Sean Christopherson @ 2023-01-05 18:08 UTC (permalink / raw)
To: David Matlack; +Cc: Paolo Bonzini, kvm
On Thu, Jan 05, 2023, David Matlack wrote:
> Drop cpu_dirty_logging_count in favor of nr_memslots_dirty_logging.
> Both fields count the number of memslots that have dirty-logging enabled,
> with the only difference being that cpu_dirty_logging_count is only
> incremented when using PML. So while nr_memslots_dirty_logging is not a
> direct replacement for cpu_dirty_logging_count, it can be combined with
> enable_pml to get the same information.
>
> Signed-off-by: David Matlack <dmatlack@google.com>
> ---
> arch/x86/include/asm/kvm_host.h | 1 -
> arch/x86/kvm/vmx/vmx.c | 8 +++++---
> arch/x86/kvm/x86.c | 8 ++------
> 3 files changed, 7 insertions(+), 10 deletions(-)
>
> diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
> index 2f5bf581d00a..f328007ea05a 100644
> --- a/arch/x86/include/asm/kvm_host.h
> +++ b/arch/x86/include/asm/kvm_host.h
> @@ -1329,7 +1329,6 @@ struct kvm_arch {
> u32 bsp_vcpu_id;
>
> u64 disabled_quirks;
> - int cpu_dirty_logging_count;
>
> enum kvm_irqchip_mode irqchip_mode;
> u8 nr_reserved_ioapic_pins;
> diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
> index c788aa382611..9c1bf4dfafcc 100644
> --- a/arch/x86/kvm/vmx/vmx.c
> +++ b/arch/x86/kvm/vmx/vmx.c
> @@ -4606,7 +4606,7 @@ static u32 vmx_secondary_exec_control(struct vcpu_vmx *vmx)
> * it needs to be set here when dirty logging is already active, e.g.
> * if this vCPU was created after dirty logging was enabled.
> */
> - if (!vcpu->kvm->arch.cpu_dirty_logging_count)
> + if (!enable_pml || !atomic_read(&vcpu->kvm->nr_memslots_dirty_logging))
> exec_control &= ~SECONDARY_EXEC_ENABLE_PML;
>
> if (cpu_has_vmx_xsaves()) {
> @@ -7993,12 +7993,14 @@ void vmx_update_cpu_dirty_logging(struct kvm_vcpu *vcpu)
> return;
> }
>
> + WARN_ON_ONCE(!enable_pml);
If you're going to add a WARN, might as well bail and do nothing if !enable_pml.
Setting the VMCS bit could lead to a VMWRITE error and/or corrupt memory due to
enabling PML with a garbage buffer.
> +
> /*
> - * Note, cpu_dirty_logging_count can be changed concurrent with this
> + * Note, nr_memslots_dirty_logging can be changed concurrent with this
> * code, but in that case another update request will be made and so
> * the guest will never run with a stale PML value.
> */
> - if (vcpu->kvm->arch.cpu_dirty_logging_count)
> + if (atomic_read(&vcpu->kvm->nr_memslots_dirty_logging))
> secondary_exec_controls_setbit(vmx, SECONDARY_EXEC_ENABLE_PML);
> else
> secondary_exec_controls_clearbit(vmx, SECONDARY_EXEC_ENABLE_PML);
> diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
> index c936f8d28a53..ee89a85bbd4e 100644
> --- a/arch/x86/kvm/x86.c
> +++ b/arch/x86/kvm/x86.c
> @@ -12482,16 +12482,12 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
>
> static void kvm_mmu_update_cpu_dirty_logging(struct kvm *kvm, bool enable)
> {
> - struct kvm_arch *ka = &kvm->arch;
> -
> if (!kvm_x86_ops.cpu_dirty_log_size)
> return;
>
> - if ((enable && ++ka->cpu_dirty_logging_count == 1) ||
> - (!enable && --ka->cpu_dirty_logging_count == 0))
> + if ((enable && atomic_read(&kvm->nr_memslots_dirty_logging) == 1) ||
> + (!enable && atomic_read(&kvm->nr_memslots_dirty_logging) == 0))
There's no need to force multiple reads of nr_memslots_dirty_logging. And the
!enable check is unnecessary since this helper is called iff there's a change
(and we have bigger problems if the count wraps).
E.g. this could be
int nr_slots;
if (!kvm_x86_ops.cpu_dirty_log_size)
return;
nr_slots = atomic_read(&kvm->nr_memslots_dirty_logging);
if ((enable && nr_slots == 1) || !nr_slots)
kvm_make_all_cpus_request(kvm, KVM_REQ_UPDATE_CPU_DIRTY_LOGGING);
Or if we want to be unnecessarily clever :-)
if (((old_flags ^ new_flags) & KVM_MEM_LOG_DIRTY_PAGES) &&
kvm_x86_ops.cpu_dirty_log_size &&
!(atomic_read(&kvm->nr_memslots_dirty_logging) - log_dirty_pages))
kvm_make_all_cpus_request(kvm, KVM_REQ_UPDATE_CPU_DIRTY_LOGGING);
^ permalink raw reply [flat|nested] 2+ messages in thread
end of thread, other threads:[~2023-01-05 18:09 UTC | newest]
Thread overview: 2+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2023-01-05 16:54 [PATCH] KVM: x86: Replace cpu_dirty_logging_count with nr_memslots_dirty_logging David Matlack
2023-01-05 18:08 ` Sean Christopherson
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).