From: Maxim Levitsky <mlevitsk@redhat.com>
To: kvm@vger.kernel.org
Cc: Alexander Potapenko <glider@google.com>,
"H. Peter Anvin" <hpa@zytor.com>,
Suzuki K Poulose <suzuki.poulose@arm.com>,
kvm-riscv@lists.infradead.org,
Oliver Upton <oliver.upton@linux.dev>,
Dave Hansen <dave.hansen@linux.intel.com>,
Jing Zhang <jingzhangos@google.com>,
Waiman Long <longman@redhat.com>,
x86@kernel.org, Kunkun Jiang <jiangkunkun@huawei.com>,
Boqun Feng <boqun.feng@gmail.com>,
Anup Patel <anup@brainfault.org>,
Albert Ou <aou@eecs.berkeley.edu>,
kvmarm@lists.linux.dev, linux-kernel@vger.kernel.org,
Zenghui Yu <yuzenghui@huawei.com>, Borislav Petkov <bp@alien8.de>,
Alexandre Ghiti <alex@ghiti.fr>,
Keisuke Nishimura <keisuke.nishimura@inria.fr>,
Sebastian Ott <sebott@redhat.com>,
Paolo Bonzini <pbonzini@redhat.com>,
Atish Patra <atishp@atishpatra.org>,
Paul Walmsley <paul.walmsley@sifive.com>,
Randy Dunlap <rdunlap@infradead.org>,
Will Deacon <will@kernel.org>,
Palmer Dabbelt <palmer@dabbelt.com>,
linux-riscv@lists.infradead.org, Marc Zyngier <maz@kernel.org>,
linux-arm-kernel@lists.infradead.org,
Joey Gouly <joey.gouly@arm.com>,
Peter Zijlstra <peterz@infradead.org>,
Ingo Molnar <mingo@redhat.com>,
Andre Przywara <andre.przywara@arm.com>,
Thomas Gleixner <tglx@linutronix.de>,
Sean Christopherson <seanjc@google.com>,
Catalin Marinas <catalin.marinas@arm.com>,
Maxim Levitsky <mlevitsk@redhat.com>,
Bjorn Helgaas <bhelgaas@google.com>
Subject: [PATCH v2 2/4] KVM: x86: move sev_lock/unlock_vcpus_for_migration to kvm_main.c
Date: Tue, 8 Apr 2025 21:41:34 -0400 [thread overview]
Message-ID: <20250409014136.2816971-3-mlevitsk@redhat.com> (raw)
In-Reply-To: <20250409014136.2816971-1-mlevitsk@redhat.com>
Move sev_lock/unlock_vcpus_for_migration to kvm_main and call the
new functions the kvm_lock_all_vcpus/kvm_unlock_all_vcpus
and kvm_lock_all_vcpus_nested.
This code allows to lock all vCPUs without triggering lockdep warning
about reaching MAX_LOCK_DEPTH depth by coercing the lockdep into
thinking that we release all the locks other than vcpu'0 lock
immediately after we take them.
No functional change intended.
Suggested-by: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: Maxim Levitsky <mlevitsk@redhat.com>
---
arch/x86/kvm/svm/sev.c | 65 +++---------------------------------
include/linux/kvm_host.h | 6 ++++
virt/kvm/kvm_main.c | 71 ++++++++++++++++++++++++++++++++++++++++
3 files changed, 81 insertions(+), 61 deletions(-)
diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
index 0bc708ee2788..7adc54b1f741 100644
--- a/arch/x86/kvm/svm/sev.c
+++ b/arch/x86/kvm/svm/sev.c
@@ -1889,63 +1889,6 @@ enum sev_migration_role {
SEV_NR_MIGRATION_ROLES,
};
-static int sev_lock_vcpus_for_migration(struct kvm *kvm,
- enum sev_migration_role role)
-{
- struct kvm_vcpu *vcpu;
- unsigned long i, j;
-
- kvm_for_each_vcpu(i, vcpu, kvm) {
- if (mutex_lock_killable_nested(&vcpu->mutex, role))
- goto out_unlock;
-
-#ifdef CONFIG_PROVE_LOCKING
- if (!i)
- /*
- * Reset the role to one that avoids colliding with
- * the role used for the first vcpu mutex.
- */
- role = SEV_NR_MIGRATION_ROLES;
- else
- mutex_release(&vcpu->mutex.dep_map, _THIS_IP_);
-#endif
- }
-
- return 0;
-
-out_unlock:
-
- kvm_for_each_vcpu(j, vcpu, kvm) {
- if (i == j)
- break;
-
-#ifdef CONFIG_PROVE_LOCKING
- if (j)
- mutex_acquire(&vcpu->mutex.dep_map, role, 0, _THIS_IP_);
-#endif
-
- mutex_unlock(&vcpu->mutex);
- }
- return -EINTR;
-}
-
-static void sev_unlock_vcpus_for_migration(struct kvm *kvm)
-{
- struct kvm_vcpu *vcpu;
- unsigned long i;
- bool first = true;
-
- kvm_for_each_vcpu(i, vcpu, kvm) {
- if (first)
- first = false;
- else
- mutex_acquire(&vcpu->mutex.dep_map,
- SEV_NR_MIGRATION_ROLES, 0, _THIS_IP_);
-
- mutex_unlock(&vcpu->mutex);
- }
-}
-
static void sev_migrate_from(struct kvm *dst_kvm, struct kvm *src_kvm)
{
struct kvm_sev_info *dst = to_kvm_sev_info(dst_kvm);
@@ -2083,10 +2026,10 @@ int sev_vm_move_enc_context_from(struct kvm *kvm, unsigned int source_fd)
charged = true;
}
- ret = sev_lock_vcpus_for_migration(kvm, SEV_MIGRATION_SOURCE);
+ ret = kvm_lock_all_vcpus_nested(kvm, false, SEV_MIGRATION_SOURCE);
if (ret)
goto out_dst_cgroup;
- ret = sev_lock_vcpus_for_migration(source_kvm, SEV_MIGRATION_TARGET);
+ ret = kvm_lock_all_vcpus_nested(source_kvm, false, SEV_MIGRATION_TARGET);
if (ret)
goto out_dst_vcpu;
@@ -2100,9 +2043,9 @@ int sev_vm_move_enc_context_from(struct kvm *kvm, unsigned int source_fd)
ret = 0;
out_source_vcpu:
- sev_unlock_vcpus_for_migration(source_kvm);
+ kvm_unlock_all_vcpus(source_kvm);
out_dst_vcpu:
- sev_unlock_vcpus_for_migration(kvm);
+ kvm_unlock_all_vcpus(kvm);
out_dst_cgroup:
/* Operates on the source on success, on the destination on failure. */
if (charged)
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 1dedc421b3e3..30cf28bf5c80 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -1015,6 +1015,12 @@ static inline struct kvm_vcpu *kvm_get_vcpu_by_id(struct kvm *kvm, int id)
void kvm_destroy_vcpus(struct kvm *kvm);
+int kvm_lock_all_vcpus_nested(struct kvm *kvm, bool trylock, unsigned int role);
+void kvm_unlock_all_vcpus(struct kvm *kvm);
+
+#define kvm_lock_all_vcpus(kvm, trylock) \
+ kvm_lock_all_vcpus_nested(kvm, trylock, 0)
+
void vcpu_load(struct kvm_vcpu *vcpu);
void vcpu_put(struct kvm_vcpu *vcpu);
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 69782df3617f..71c0d8c35b4b 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -1368,6 +1368,77 @@ static int kvm_vm_release(struct inode *inode, struct file *filp)
return 0;
}
+
+/*
+ * Lock all VM vCPUs.
+ * Can be used nested (to lock vCPUS of two VMs for example)
+ */
+int kvm_lock_all_vcpus_nested(struct kvm *kvm, bool trylock, unsigned int role)
+{
+ struct kvm_vcpu *vcpu;
+ unsigned long i, j;
+
+ lockdep_assert_held(&kvm->lock);
+
+ kvm_for_each_vcpu(i, vcpu, kvm) {
+
+ if (trylock && !mutex_trylock_nested(&vcpu->mutex, role))
+ goto out_unlock;
+ else if (!trylock && mutex_lock_killable_nested(&vcpu->mutex, role))
+ goto out_unlock;
+
+#ifdef CONFIG_PROVE_LOCKING
+ if (!i)
+ /*
+ * Reset the role to one that avoids colliding with
+ * the role used for the first vcpu mutex.
+ */
+ role = MAX_LOCK_DEPTH - 1;
+ else
+ mutex_release(&vcpu->mutex.dep_map, _THIS_IP_);
+#endif
+ }
+
+ return 0;
+
+out_unlock:
+
+ kvm_for_each_vcpu(j, vcpu, kvm) {
+ if (i == j)
+ break;
+
+#ifdef CONFIG_PROVE_LOCKING
+ if (j)
+ mutex_acquire(&vcpu->mutex.dep_map, role, 0, _THIS_IP_);
+#endif
+
+ mutex_unlock(&vcpu->mutex);
+ }
+ return -EINTR;
+}
+EXPORT_SYMBOL_GPL(kvm_lock_all_vcpus_nested);
+
+void kvm_unlock_all_vcpus(struct kvm *kvm)
+{
+ struct kvm_vcpu *vcpu;
+ unsigned long i;
+ bool first = true;
+
+ lockdep_assert_held(&kvm->lock);
+
+ kvm_for_each_vcpu(i, vcpu, kvm) {
+ if (first)
+ first = false;
+ else
+ mutex_acquire(&vcpu->mutex.dep_map,
+ MAX_LOCK_DEPTH - 1, 0, _THIS_IP_);
+
+ mutex_unlock(&vcpu->mutex);
+ }
+}
+EXPORT_SYMBOL_GPL(kvm_unlock_all_vcpus);
+
+
/*
* Allocation size is twice as large as the actual dirty bitmap size.
* See kvm_vm_ioctl_get_dirty_log() why this is needed.
--
2.26.3
next prev parent reply other threads:[~2025-04-09 1:42 UTC|newest]
Thread overview: 13+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-04-09 1:41 [PATCH v2 0/4] KVM: extract lock_all_vcpus/unlock_all_vcpus Maxim Levitsky
2025-04-09 1:41 ` [PATCH v2 1/4] locking/mutex: implement mutex_trylock_nested Maxim Levitsky
2025-04-10 8:04 ` Peter Zijlstra
2025-04-09 1:41 ` Maxim Levitsky [this message]
2025-04-09 13:47 ` [PATCH v2 2/4] KVM: x86: move sev_lock/unlock_vcpus_for_migration to kvm_main.c Waiman Long
2025-04-09 20:45 ` Oliver Upton
2025-04-10 8:16 ` Peter Zijlstra
2025-04-16 17:48 ` Paolo Bonzini
2025-04-16 18:50 ` Peter Zijlstra
2025-04-17 9:53 ` Paolo Bonzini
2025-04-09 1:41 ` [PATCH v2 3/4] KVM: arm64: switch to using kvm_lock/unlock_all_vcpus Maxim Levitsky
2025-04-09 1:41 ` [PATCH v2 4/4] RISC-V: KVM: switch to kvm_lock/unlock_all_vcpus Maxim Levitsky
2025-04-09 19:53 ` [PATCH v2 0/4] KVM: extract lock_all_vcpus/unlock_all_vcpus Sean Christopherson
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20250409014136.2816971-3-mlevitsk@redhat.com \
--to=mlevitsk@redhat.com \
--cc=alex@ghiti.fr \
--cc=andre.przywara@arm.com \
--cc=anup@brainfault.org \
--cc=aou@eecs.berkeley.edu \
--cc=atishp@atishpatra.org \
--cc=bhelgaas@google.com \
--cc=boqun.feng@gmail.com \
--cc=bp@alien8.de \
--cc=catalin.marinas@arm.com \
--cc=dave.hansen@linux.intel.com \
--cc=glider@google.com \
--cc=hpa@zytor.com \
--cc=jiangkunkun@huawei.com \
--cc=jingzhangos@google.com \
--cc=joey.gouly@arm.com \
--cc=keisuke.nishimura@inria.fr \
--cc=kvm-riscv@lists.infradead.org \
--cc=kvm@vger.kernel.org \
--cc=kvmarm@lists.linux.dev \
--cc=linux-arm-kernel@lists.infradead.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-riscv@lists.infradead.org \
--cc=longman@redhat.com \
--cc=maz@kernel.org \
--cc=mingo@redhat.com \
--cc=oliver.upton@linux.dev \
--cc=palmer@dabbelt.com \
--cc=paul.walmsley@sifive.com \
--cc=pbonzini@redhat.com \
--cc=peterz@infradead.org \
--cc=rdunlap@infradead.org \
--cc=seanjc@google.com \
--cc=sebott@redhat.com \
--cc=suzuki.poulose@arm.com \
--cc=tglx@linutronix.de \
--cc=will@kernel.org \
--cc=x86@kernel.org \
--cc=yuzenghui@huawei.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox