From: Claudio Imbrenda <imbrenda@linux.ibm.com>
To: Marc Zyngier <maz@kernel.org>
Cc: kvm@vger.kernel.org, linux-mips@vger.kernel.org,
kvmarm@lists.cs.columbia.edu, linuxppc-dev@lists.ozlabs.org,
Huacai Chen <chenhuacai@kernel.org>,
Aleksandar Markovic <aleksandar.qemu.devel@gmail.com>,
Anup Patel <anup.patel@wdc.com>,
Atish Patra <atish.patra@wdc.com>,
Christian Borntraeger <borntraeger@de.ibm.com>,
Janosch Frank <frankja@linux.ibm.com>,
David Hildenbrand <david@redhat.com>,
Paolo Bonzini <pbonzini@redhat.com>,
Juergen Gross <jgross@suse.com>,
Nicholas Piggin <npiggin@gmail.com>,
Sean Christopherson <seanjc@google.com>,
Paul Mackerras <paulus@samba.org>,
Michael Ellerman <mpe@ellerman.id.au>,
James Morse <james.morse@arm.com>,
Suzuki K Poulose <suzuki.poulose@arm.com>,
Alexandru Elisei <alexandru.elisei@arm.com>,
kernel-team@android.com
Subject: Re: [PATCH 1/5] KVM: Move wiping of the kvm->vcpus array to common code
Date: Mon, 8 Nov 2021 13:12:02 +0100 [thread overview]
Message-ID: <20211108131202.774812b9@p-imbrenda> (raw)
In-Reply-To: <20211105192101.3862492-2-maz@kernel.org>
On Fri, 5 Nov 2021 19:20:57 +0000
Marc Zyngier <maz@kernel.org> wrote:
> All architectures have similar loops iterating over the vcpus,
> freeing one vcpu at a time, and eventually wiping the reference
> off the vcpus array. They are also inconsistently taking
> the kvm->lock mutex when wiping the references from the array.
>
> Make this code common, which will simplify further changes.
>
> Signed-off-by: Marc Zyngier <maz@kernel.org>
no objections
Reviewed-by: Claudio Imbrenda <imbrenda@linux.ibm.com>
> ---
> arch/arm64/kvm/arm.c | 10 +---------
> arch/mips/kvm/mips.c | 21 +--------------------
> arch/powerpc/kvm/powerpc.c | 10 +---------
> arch/riscv/kvm/vm.c | 10 +---------
> arch/s390/kvm/kvm-s390.c | 18 +-----------------
> arch/x86/kvm/x86.c | 9 +--------
> include/linux/kvm_host.h | 2 +-
> virt/kvm/kvm_main.c | 20 ++++++++++++++++++--
> 8 files changed, 25 insertions(+), 75 deletions(-)
>
> diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
> index f5490afe1ebf..75bb7215da03 100644
> --- a/arch/arm64/kvm/arm.c
> +++ b/arch/arm64/kvm/arm.c
> @@ -175,19 +175,11 @@ vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
> */
> void kvm_arch_destroy_vm(struct kvm *kvm)
> {
> - int i;
> -
> bitmap_free(kvm->arch.pmu_filter);
>
> kvm_vgic_destroy(kvm);
>
> - for (i = 0; i < KVM_MAX_VCPUS; ++i) {
> - if (kvm->vcpus[i]) {
> - kvm_vcpu_destroy(kvm->vcpus[i]);
> - kvm->vcpus[i] = NULL;
> - }
> - }
> - atomic_set(&kvm->online_vcpus, 0);
> + kvm_destroy_vcpus(kvm);
> }
>
> int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
> diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
> index 562aa878b266..ceacca74f808 100644
> --- a/arch/mips/kvm/mips.c
> +++ b/arch/mips/kvm/mips.c
> @@ -171,25 +171,6 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
> return 0;
> }
>
> -void kvm_mips_free_vcpus(struct kvm *kvm)
> -{
> - unsigned int i;
> - struct kvm_vcpu *vcpu;
> -
> - kvm_for_each_vcpu(i, vcpu, kvm) {
> - kvm_vcpu_destroy(vcpu);
> - }
> -
> - mutex_lock(&kvm->lock);
> -
> - for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
> - kvm->vcpus[i] = NULL;
> -
> - atomic_set(&kvm->online_vcpus, 0);
> -
> - mutex_unlock(&kvm->lock);
> -}
> -
> static void kvm_mips_free_gpa_pt(struct kvm *kvm)
> {
> /* It should always be safe to remove after flushing the whole range */
> @@ -199,7 +180,7 @@ static void kvm_mips_free_gpa_pt(struct kvm *kvm)
>
> void kvm_arch_destroy_vm(struct kvm *kvm)
> {
> - kvm_mips_free_vcpus(kvm);
> + kvm_destroy_vcpus(kvm);
> kvm_mips_free_gpa_pt(kvm);
> }
>
> diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
> index 35e9cccdeef9..492e4a4121cb 100644
> --- a/arch/powerpc/kvm/powerpc.c
> +++ b/arch/powerpc/kvm/powerpc.c
> @@ -463,9 +463,6 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
>
> void kvm_arch_destroy_vm(struct kvm *kvm)
> {
> - unsigned int i;
> - struct kvm_vcpu *vcpu;
> -
> #ifdef CONFIG_KVM_XICS
> /*
> * We call kick_all_cpus_sync() to ensure that all
> @@ -476,14 +473,9 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
> kick_all_cpus_sync();
> #endif
>
> - kvm_for_each_vcpu(i, vcpu, kvm)
> - kvm_vcpu_destroy(vcpu);
> + kvm_destroy_vcpus(kvm);
>
> mutex_lock(&kvm->lock);
> - for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
> - kvm->vcpus[i] = NULL;
> -
> - atomic_set(&kvm->online_vcpus, 0);
>
> kvmppc_core_destroy_vm(kvm);
>
> diff --git a/arch/riscv/kvm/vm.c b/arch/riscv/kvm/vm.c
> index 26399df15b63..6af6cde295eb 100644
> --- a/arch/riscv/kvm/vm.c
> +++ b/arch/riscv/kvm/vm.c
> @@ -46,15 +46,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
>
> void kvm_arch_destroy_vm(struct kvm *kvm)
> {
> - int i;
> -
> - for (i = 0; i < KVM_MAX_VCPUS; ++i) {
> - if (kvm->vcpus[i]) {
> - kvm_vcpu_destroy(kvm->vcpus[i]);
> - kvm->vcpus[i] = NULL;
> - }
> - }
> - atomic_set(&kvm->online_vcpus, 0);
> + kvm_destroy_vcpus(kvm);
> }
>
> int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
> diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
> index c6257f625929..7af53b8788fa 100644
> --- a/arch/s390/kvm/kvm-s390.c
> +++ b/arch/s390/kvm/kvm-s390.c
> @@ -2819,27 +2819,11 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
> free_page((unsigned long)(vcpu->arch.sie_block));
> }
>
> -static void kvm_free_vcpus(struct kvm *kvm)
> -{
> - unsigned int i;
> - struct kvm_vcpu *vcpu;
> -
> - kvm_for_each_vcpu(i, vcpu, kvm)
> - kvm_vcpu_destroy(vcpu);
> -
> - mutex_lock(&kvm->lock);
> - for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
> - kvm->vcpus[i] = NULL;
> -
> - atomic_set(&kvm->online_vcpus, 0);
> - mutex_unlock(&kvm->lock);
> -}
> -
> void kvm_arch_destroy_vm(struct kvm *kvm)
> {
> u16 rc, rrc;
>
> - kvm_free_vcpus(kvm);
> + kvm_destroy_vcpus(kvm);
> sca_dispose(kvm);
> kvm_s390_gisa_destroy(kvm);
> /*
> diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
> index c1c4e2b05a63..498a43126615 100644
> --- a/arch/x86/kvm/x86.c
> +++ b/arch/x86/kvm/x86.c
> @@ -11302,15 +11302,8 @@ static void kvm_free_vcpus(struct kvm *kvm)
> kvm_clear_async_pf_completion_queue(vcpu);
> kvm_unload_vcpu_mmu(vcpu);
> }
> - kvm_for_each_vcpu(i, vcpu, kvm)
> - kvm_vcpu_destroy(vcpu);
> -
> - mutex_lock(&kvm->lock);
> - for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
> - kvm->vcpus[i] = NULL;
>
> - atomic_set(&kvm->online_vcpus, 0);
> - mutex_unlock(&kvm->lock);
> + kvm_destroy_vcpus(kvm);
> }
>
> void kvm_arch_sync_events(struct kvm *kvm)
> diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
> index 60a35d9fe259..36967291b8c6 100644
> --- a/include/linux/kvm_host.h
> +++ b/include/linux/kvm_host.h
> @@ -725,7 +725,7 @@ static inline struct kvm_vcpu *kvm_get_vcpu_by_id(struct kvm *kvm, int id)
> if (WARN_ON_ONCE(!memslot->npages)) { \
> } else
>
> -void kvm_vcpu_destroy(struct kvm_vcpu *vcpu);
> +void kvm_destroy_vcpus(struct kvm *kvm);
>
> void vcpu_load(struct kvm_vcpu *vcpu);
> void vcpu_put(struct kvm_vcpu *vcpu);
> diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
> index 3f6d450355f0..d83553eeea21 100644
> --- a/virt/kvm/kvm_main.c
> +++ b/virt/kvm/kvm_main.c
> @@ -435,7 +435,7 @@ static void kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
> vcpu->last_used_slot = 0;
> }
>
> -void kvm_vcpu_destroy(struct kvm_vcpu *vcpu)
> +static void kvm_vcpu_destroy(struct kvm_vcpu *vcpu)
> {
> kvm_dirty_ring_free(&vcpu->dirty_ring);
> kvm_arch_vcpu_destroy(vcpu);
> @@ -450,7 +450,23 @@ void kvm_vcpu_destroy(struct kvm_vcpu *vcpu)
> free_page((unsigned long)vcpu->run);
> kmem_cache_free(kvm_vcpu_cache, vcpu);
> }
> -EXPORT_SYMBOL_GPL(kvm_vcpu_destroy);
> +
> +void kvm_destroy_vcpus(struct kvm *kvm)
> +{
> + unsigned int i;
> + struct kvm_vcpu *vcpu;
> +
> + kvm_for_each_vcpu(i, vcpu, kvm)
> + kvm_vcpu_destroy(vcpu);
> +
> + mutex_lock(&kvm->lock);
> + for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
> + kvm->vcpus[i] = NULL;
> +
> + atomic_set(&kvm->online_vcpus, 0);
> + mutex_unlock(&kvm->lock);
> +}
> +EXPORT_SYMBOL_GPL(kvm_destroy_vcpus);
>
> #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
> static inline struct kvm *mmu_notifier_to_kvm(struct mmu_notifier *mn)
next prev parent reply other threads:[~2021-11-08 13:19 UTC|newest]
Thread overview: 24+ messages / expand[flat|nested] mbox.gz Atom feed top
2021-11-05 19:20 [PATCH 0/5] KVM: Turn the vcpu array into an xarray Marc Zyngier
2021-11-05 19:20 ` [PATCH 1/5] KVM: Move wiping of the kvm->vcpus array to common code Marc Zyngier
2021-11-05 20:12 ` Sean Christopherson
2021-11-06 11:17 ` Marc Zyngier
2021-11-16 13:49 ` Paolo Bonzini
2021-11-08 12:12 ` Claudio Imbrenda [this message]
2021-11-05 19:20 ` [PATCH 2/5] KVM: mips: Use kvm_get_vcpu() instead of open-coded access Marc Zyngier
2021-11-06 15:56 ` Philippe Mathieu-Daudé
2021-11-05 19:20 ` [PATCH 3/5] KVM: s390: " Marc Zyngier
2021-11-08 12:13 ` Claudio Imbrenda
2021-11-05 19:21 ` [PATCH 4/5] KVM: x86: " Marc Zyngier
2021-11-05 20:03 ` Sean Christopherson
2021-11-16 14:04 ` Paolo Bonzini
2021-11-16 16:07 ` Sean Christopherson
2021-11-16 16:48 ` Paolo Bonzini
2021-11-05 19:21 ` [PATCH 5/5] KVM: Convert the kvm->vcpus array to a xarray Marc Zyngier
2021-11-05 20:21 ` Sean Christopherson
2021-11-06 11:48 ` Marc Zyngier
2021-11-08 8:23 ` Marc Zyngier
2021-11-16 14:13 ` [PATCH 0/5] KVM: Turn the vcpu array into an xarray Juergen Gross
2021-11-16 14:21 ` Paolo Bonzini
2021-11-16 14:54 ` Juergen Gross
2021-11-16 15:03 ` Paolo Bonzini
2021-11-16 15:40 ` Marc Zyngier
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20211108131202.774812b9@p-imbrenda \
--to=imbrenda@linux.ibm.com \
--cc=aleksandar.qemu.devel@gmail.com \
--cc=alexandru.elisei@arm.com \
--cc=anup.patel@wdc.com \
--cc=atish.patra@wdc.com \
--cc=borntraeger@de.ibm.com \
--cc=chenhuacai@kernel.org \
--cc=david@redhat.com \
--cc=frankja@linux.ibm.com \
--cc=james.morse@arm.com \
--cc=jgross@suse.com \
--cc=kernel-team@android.com \
--cc=kvm@vger.kernel.org \
--cc=kvmarm@lists.cs.columbia.edu \
--cc=linux-mips@vger.kernel.org \
--cc=linuxppc-dev@lists.ozlabs.org \
--cc=maz@kernel.org \
--cc=mpe@ellerman.id.au \
--cc=npiggin@gmail.com \
--cc=paulus@samba.org \
--cc=pbonzini@redhat.com \
--cc=seanjc@google.com \
--cc=suzuki.poulose@arm.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).