* [PATCH 0/3] KVM: x86/mmu: small locking cleanups
@ 2023-09-28 16:29 Paolo Bonzini
2023-09-28 16:29 ` [PATCH 1/3] KVM: x86/mmu: remove unnecessary "bool shared" argument from functions Paolo Bonzini
` (2 more replies)
0 siblings, 3 replies; 10+ messages in thread
From: Paolo Bonzini @ 2023-09-28 16:29 UTC (permalink / raw)
To: linux-kernel, kvm
Remove "bool shared" argument from functions and iterators that need
not know if the lock is taken for read or write. This is common because
protection is achieved via RCU and tdp_mmu_pages_lock or because the
argument is only used for assertions that can be written by hand.
Also always take tdp_mmu_pages_lock even if mmu_lock is currently taken
for write.
Paolo Bonzini (3):
KVM: x86/mmu: remove unnecessary "bool shared" argument from functions
KVM: x86/mmu: remove unnecessary "bool shared" argument from iterators
KVM: x86/mmu: always take tdp_mmu_pages_lock
Documentation/virt/kvm/locking.rst | 6 +-
arch/x86/kvm/mmu/mmu.c | 2 +-
arch/x86/kvm/mmu/tdp_mmu.c | 93 +++++++++++++++---------------
arch/x86/kvm/mmu/tdp_mmu.h | 3 +-
4 files changed, 52 insertions(+), 52 deletions(-)
--
2.39.1
^ permalink raw reply [flat|nested] 10+ messages in thread
* [PATCH 1/3] KVM: x86/mmu: remove unnecessary "bool shared" argument from functions
2023-09-28 16:29 [PATCH 0/3] KVM: x86/mmu: small locking cleanups Paolo Bonzini
@ 2023-09-28 16:29 ` Paolo Bonzini
2023-09-28 16:46 ` Maxim Levitsky
2023-09-29 16:11 ` Sean Christopherson
2023-09-28 16:29 ` [PATCH 2/3] KVM: x86/mmu: remove unnecessary "bool shared" argument from iterators Paolo Bonzini
2023-09-28 16:29 ` [PATCH 3/3] KVM: x86/mmu: always take tdp_mmu_pages_lock Paolo Bonzini
2 siblings, 2 replies; 10+ messages in thread
From: Paolo Bonzini @ 2023-09-28 16:29 UTC (permalink / raw)
To: linux-kernel, kvm
Neither tdp_mmu_next_root nor kvm_tdp_mmu_put_root need to know
if the lock is taken for read or write. Either way, protection
is achieved via RCU and tdp_mmu_pages_lock. Remove the argument
and just assert that the lock is taken.
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
---
arch/x86/kvm/mmu/mmu.c | 2 +-
arch/x86/kvm/mmu/tdp_mmu.c | 34 +++++++++++++++++++++-------------
arch/x86/kvm/mmu/tdp_mmu.h | 3 +--
3 files changed, 23 insertions(+), 16 deletions(-)
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index f7901cb4d2fa..64b1bdba943e 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -3548,7 +3548,7 @@ static void mmu_free_root_page(struct kvm *kvm, hpa_t *root_hpa,
return;
if (is_tdp_mmu_page(sp))
- kvm_tdp_mmu_put_root(kvm, sp, false);
+ kvm_tdp_mmu_put_root(kvm, sp);
else if (!--sp->root_count && sp->role.invalid)
kvm_mmu_prepare_zap_page(kvm, sp, invalid_list);
diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c
index 6cd4dd631a2f..ab0876015be7 100644
--- a/arch/x86/kvm/mmu/tdp_mmu.c
+++ b/arch/x86/kvm/mmu/tdp_mmu.c
@@ -73,10 +73,13 @@ static void tdp_mmu_free_sp_rcu_callback(struct rcu_head *head)
tdp_mmu_free_sp(sp);
}
-void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root,
- bool shared)
+void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root)
{
- kvm_lockdep_assert_mmu_lock_held(kvm, shared);
+ /*
+ * Either read or write is okay, but the lock is needed because
+ * writers might not take tdp_mmu_pages_lock.
+ */
+ lockdep_assert_held(&kvm->mmu_lock);
if (!refcount_dec_and_test(&root->tdp_mmu_root_count))
return;
@@ -106,10 +109,16 @@ void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root,
*/
static struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm,
struct kvm_mmu_page *prev_root,
- bool shared, bool only_valid)
+ bool only_valid)
{
struct kvm_mmu_page *next_root;
+ /*
+ * While the roots themselves are RCU-protected, fields such as
+ * role.invalid are protected by mmu_lock.
+ */
+ lockdep_assert_held(&kvm->mmu_lock);
+
rcu_read_lock();
if (prev_root)
@@ -132,7 +141,7 @@ static struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm,
rcu_read_unlock();
if (prev_root)
- kvm_tdp_mmu_put_root(kvm, prev_root, shared);
+ kvm_tdp_mmu_put_root(kvm, prev_root);
return next_root;
}
@@ -144,13 +153,12 @@ static struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm,
* recent root. (Unless keeping a live reference is desirable.)
*
* If shared is set, this function is operating under the MMU lock in read
- * mode. In the unlikely event that this thread must free a root, the lock
- * will be temporarily dropped and reacquired in write mode.
+ * mode.
*/
#define __for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, _shared, _only_valid)\
- for (_root = tdp_mmu_next_root(_kvm, NULL, _shared, _only_valid); \
- _root; \
- _root = tdp_mmu_next_root(_kvm, _root, _shared, _only_valid)) \
+ for (_root = tdp_mmu_next_root(_kvm, NULL, _only_valid); \
+ _root; \
+ _root = tdp_mmu_next_root(_kvm, _root, _only_valid)) \
if (kvm_lockdep_assert_mmu_lock_held(_kvm, _shared) && \
kvm_mmu_page_as_id(_root) != _as_id) { \
} else
@@ -159,9 +167,9 @@ static struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm,
__for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, _shared, true)
#define for_each_tdp_mmu_root_yield_safe(_kvm, _root, _shared) \
- for (_root = tdp_mmu_next_root(_kvm, NULL, _shared, false); \
- _root; \
- _root = tdp_mmu_next_root(_kvm, _root, _shared, false)) \
+ for (_root = tdp_mmu_next_root(_kvm, NULL, false); \
+ _root; \
+ _root = tdp_mmu_next_root(_kvm, _root, false))
if (!kvm_lockdep_assert_mmu_lock_held(_kvm, _shared)) { \
} else
diff --git a/arch/x86/kvm/mmu/tdp_mmu.h b/arch/x86/kvm/mmu/tdp_mmu.h
index 733a3aef3a96..20d97aa46c49 100644
--- a/arch/x86/kvm/mmu/tdp_mmu.h
+++ b/arch/x86/kvm/mmu/tdp_mmu.h
@@ -17,8 +17,7 @@ __must_check static inline bool kvm_tdp_mmu_get_root(struct kvm_mmu_page *root)
return refcount_inc_not_zero(&root->tdp_mmu_root_count);
}
-void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root,
- bool shared);
+void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root);
bool kvm_tdp_mmu_zap_leafs(struct kvm *kvm, gfn_t start, gfn_t end, bool flush);
bool kvm_tdp_mmu_zap_sp(struct kvm *kvm, struct kvm_mmu_page *sp);
--
2.39.1
^ permalink raw reply related [flat|nested] 10+ messages in thread
* [PATCH 2/3] KVM: x86/mmu: remove unnecessary "bool shared" argument from iterators
2023-09-28 16:29 [PATCH 0/3] KVM: x86/mmu: small locking cleanups Paolo Bonzini
2023-09-28 16:29 ` [PATCH 1/3] KVM: x86/mmu: remove unnecessary "bool shared" argument from functions Paolo Bonzini
@ 2023-09-28 16:29 ` Paolo Bonzini
2023-09-28 16:55 ` Maxim Levitsky
2023-09-28 16:29 ` [PATCH 3/3] KVM: x86/mmu: always take tdp_mmu_pages_lock Paolo Bonzini
2 siblings, 1 reply; 10+ messages in thread
From: Paolo Bonzini @ 2023-09-28 16:29 UTC (permalink / raw)
To: linux-kernel, kvm
The "bool shared" argument is more or less unnecessary in the
for_each_*_tdp_mmu_root_yield_safe() macros. Many users check for
the lock before calling it; all of them either call small functions
that do the check, or end up calling tdp_mmu_set_spte_atomic() and
tdp_mmu_iter_set_spte(). Add a few assertions to make up for the
lost check in for_each_*_tdp_mmu_root_yield_safe(), but even this
is probably overkill and mostly for documentation reasons.
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
---
arch/x86/kvm/mmu/tdp_mmu.c | 42 +++++++++++++++++++-------------------
1 file changed, 21 insertions(+), 21 deletions(-)
diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c
index ab0876015be7..b9abfa78808a 100644
--- a/arch/x86/kvm/mmu/tdp_mmu.c
+++ b/arch/x86/kvm/mmu/tdp_mmu.c
@@ -155,23 +155,20 @@ static struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm,
* If shared is set, this function is operating under the MMU lock in read
* mode.
*/
-#define __for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, _shared, _only_valid)\
+#define __for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, _only_valid)\
for (_root = tdp_mmu_next_root(_kvm, NULL, _only_valid); \
_root; \
_root = tdp_mmu_next_root(_kvm, _root, _only_valid)) \
- if (kvm_lockdep_assert_mmu_lock_held(_kvm, _shared) && \
- kvm_mmu_page_as_id(_root) != _as_id) { \
+ if (kvm_mmu_page_as_id(_root) != _as_id) { \
} else
-#define for_each_valid_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, _shared) \
- __for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, _shared, true)
+#define for_each_valid_tdp_mmu_root_yield_safe(_kvm, _root, _as_id) \
+ __for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, true)
-#define for_each_tdp_mmu_root_yield_safe(_kvm, _root, _shared) \
+#define for_each_tdp_mmu_root_yield_safe(_kvm, _root) \
for (_root = tdp_mmu_next_root(_kvm, NULL, false); \
_root; \
_root = tdp_mmu_next_root(_kvm, _root, false))
- if (!kvm_lockdep_assert_mmu_lock_held(_kvm, _shared)) { \
- } else
/*
* Iterate over all TDP MMU roots. Requires that mmu_lock be held for write,
@@ -840,7 +837,8 @@ bool kvm_tdp_mmu_zap_leafs(struct kvm *kvm, gfn_t start, gfn_t end, bool flush)
{
struct kvm_mmu_page *root;
- for_each_tdp_mmu_root_yield_safe(kvm, root, false)
+ lockdep_assert_held_write(&kvm->mmu_lock);
+ for_each_tdp_mmu_root_yield_safe(kvm, root)
flush = tdp_mmu_zap_leafs(kvm, root, start, end, true, flush);
return flush;
@@ -862,7 +860,8 @@ void kvm_tdp_mmu_zap_all(struct kvm *kvm)
* is being destroyed or the userspace VMM has exited. In both cases,
* KVM_RUN is unreachable, i.e. no vCPUs will ever service the request.
*/
- for_each_tdp_mmu_root_yield_safe(kvm, root, false)
+ lockdep_assert_held_write(&kvm->mmu_lock);
+ for_each_tdp_mmu_root_yield_safe(kvm, root)
tdp_mmu_zap_root(kvm, root, false);
}
@@ -876,7 +875,7 @@ void kvm_tdp_mmu_zap_invalidated_roots(struct kvm *kvm)
read_lock(&kvm->mmu_lock);
- for_each_tdp_mmu_root_yield_safe(kvm, root, true) {
+ for_each_tdp_mmu_root_yield_safe(kvm, root) {
if (!root->tdp_mmu_scheduled_root_to_zap)
continue;
@@ -899,7 +898,7 @@ void kvm_tdp_mmu_zap_invalidated_roots(struct kvm *kvm)
* the root must be reachable by mmu_notifiers while it's being
* zapped
*/
- kvm_tdp_mmu_put_root(kvm, root, true);
+ kvm_tdp_mmu_put_root(kvm, root);
}
read_unlock(&kvm->mmu_lock);
@@ -1133,7 +1132,9 @@ bool kvm_tdp_mmu_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range,
{
struct kvm_mmu_page *root;
- __for_each_tdp_mmu_root_yield_safe(kvm, root, range->slot->as_id, false, false)
+ lockdep_assert_held_write(&kvm->mmu_lock);
+
+ __for_each_tdp_mmu_root_yield_safe(kvm, root, range->slot->as_id, false)
flush = tdp_mmu_zap_leafs(kvm, root, range->start, range->end,
range->may_block, flush);
@@ -1322,7 +1323,7 @@ bool kvm_tdp_mmu_wrprot_slot(struct kvm *kvm,
lockdep_assert_held_read(&kvm->mmu_lock);
- for_each_valid_tdp_mmu_root_yield_safe(kvm, root, slot->as_id, true)
+ for_each_valid_tdp_mmu_root_yield_safe(kvm, root, slot->as_id)
spte_set |= wrprot_gfn_range(kvm, root, slot->base_gfn,
slot->base_gfn + slot->npages, min_level);
@@ -1354,6 +1355,8 @@ static struct kvm_mmu_page *tdp_mmu_alloc_sp_for_split(struct kvm *kvm,
{
struct kvm_mmu_page *sp;
+ kvm_lockdep_assert_mmu_lock_held(kvm, shared);
+
/*
* Since we are allocating while under the MMU lock we have to be
* careful about GFP flags. Use GFP_NOWAIT to avoid blocking on direct
@@ -1504,11 +1507,10 @@ void kvm_tdp_mmu_try_split_huge_pages(struct kvm *kvm,
int r = 0;
kvm_lockdep_assert_mmu_lock_held(kvm, shared);
-
- for_each_valid_tdp_mmu_root_yield_safe(kvm, root, slot->as_id, shared) {
+ for_each_valid_tdp_mmu_root_yield_safe(kvm, root, slot->as_id) {
r = tdp_mmu_split_huge_pages_root(kvm, root, start, end, target_level, shared);
if (r) {
- kvm_tdp_mmu_put_root(kvm, root, shared);
+ kvm_tdp_mmu_put_root(kvm, root);
break;
}
}
@@ -1568,8 +1570,7 @@ bool kvm_tdp_mmu_clear_dirty_slot(struct kvm *kvm,
bool spte_set = false;
lockdep_assert_held_read(&kvm->mmu_lock);
-
- for_each_valid_tdp_mmu_root_yield_safe(kvm, root, slot->as_id, true)
+ for_each_valid_tdp_mmu_root_yield_safe(kvm, root, slot->as_id)
spte_set |= clear_dirty_gfn_range(kvm, root, slot->base_gfn,
slot->base_gfn + slot->npages);
@@ -1703,8 +1704,7 @@ void kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm,
struct kvm_mmu_page *root;
lockdep_assert_held_read(&kvm->mmu_lock);
-
- for_each_valid_tdp_mmu_root_yield_safe(kvm, root, slot->as_id, true)
+ for_each_valid_tdp_mmu_root_yield_safe(kvm, root, slot->as_id)
zap_collapsible_spte_range(kvm, root, slot);
}
--
2.39.1
^ permalink raw reply related [flat|nested] 10+ messages in thread
* [PATCH 3/3] KVM: x86/mmu: always take tdp_mmu_pages_lock
2023-09-28 16:29 [PATCH 0/3] KVM: x86/mmu: small locking cleanups Paolo Bonzini
2023-09-28 16:29 ` [PATCH 1/3] KVM: x86/mmu: remove unnecessary "bool shared" argument from functions Paolo Bonzini
2023-09-28 16:29 ` [PATCH 2/3] KVM: x86/mmu: remove unnecessary "bool shared" argument from iterators Paolo Bonzini
@ 2023-09-28 16:29 ` Paolo Bonzini
2023-09-29 7:30 ` kernel test robot
2 siblings, 1 reply; 10+ messages in thread
From: Paolo Bonzini @ 2023-09-28 16:29 UTC (permalink / raw)
To: linux-kernel, kvm
It is cheap to take tdp_mmu_pages_lock in all write-side critical sections.
We already do it all the time when zapping with read_lock(), so it is not
a problem to do it from the kvm_tdp_mmu_zap_all() path (aka
kvm_arch_flush_shadow_all(), aka VM destruction and MMU notifier release).
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
---
Documentation/virt/kvm/locking.rst | 6 ++----
arch/x86/kvm/mmu/tdp_mmu.c | 17 ++++++-----------
2 files changed, 8 insertions(+), 15 deletions(-)
diff --git a/Documentation/virt/kvm/locking.rst b/Documentation/virt/kvm/locking.rst
index 3a034db5e55f..381eb0e7d947 100644
--- a/Documentation/virt/kvm/locking.rst
+++ b/Documentation/virt/kvm/locking.rst
@@ -43,10 +43,8 @@ On x86:
- vcpu->mutex is taken outside kvm->arch.hyperv.hv_lock and kvm->arch.xen.xen_lock
-- kvm->arch.mmu_lock is an rwlock. kvm->arch.tdp_mmu_pages_lock and
- kvm->arch.mmu_unsync_pages_lock are taken inside kvm->arch.mmu_lock, and
- cannot be taken without already holding kvm->arch.mmu_lock (typically with
- ``read_lock`` for the TDP MMU, thus the need for additional spinlocks).
+- kvm->arch.mmu_lock is an rwlock and is taken outside
+ kvm->arch.tdp_mmu_pages_lock and kvm->arch.mmu_unsync_pages_lock
Everything else is a leaf: no other lock is taken inside the critical
sections.
diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c
index b9abfa78808a..f61bc842067f 100644
--- a/arch/x86/kvm/mmu/tdp_mmu.c
+++ b/arch/x86/kvm/mmu/tdp_mmu.c
@@ -285,24 +285,19 @@ static void tdp_unaccount_mmu_page(struct kvm *kvm, struct kvm_mmu_page *sp)
* the MMU lock and the operation must synchronize with other
* threads that might be adding or removing pages.
*/
-static void tdp_mmu_unlink_sp(struct kvm *kvm, struct kvm_mmu_page *sp,
- bool shared)
+static void tdp_mmu_unlink_sp(struct kvm *kvm, struct kvm_mmu_page *sp)
{
+ lockdep_assert_held(&kvm->mmu_lock);
+
tdp_unaccount_mmu_page(kvm, sp);
if (!sp->nx_huge_page_disallowed)
return;
- if (shared)
- spin_lock(&kvm->arch.tdp_mmu_pages_lock);
- else
- lockdep_assert_held_write(&kvm->mmu_lock);
-
+ spin_lock(&kvm->arch.tdp_mmu_pages_lock);
sp->nx_huge_page_disallowed = false;
untrack_possible_nx_huge_page(kvm, sp);
-
- if (shared)
- spin_unlock(&kvm->arch.tdp_mmu_pages_lock);
+ spin_unlock(&kvm->arch.tdp_mmu_pages_lock);
}
/**
@@ -331,7 +326,7 @@ static void handle_removed_pt(struct kvm *kvm, tdp_ptep_t pt, bool shared)
trace_kvm_mmu_prepare_zap_page(sp);
- tdp_mmu_unlink_sp(kvm, sp, shared);
+ tdp_mmu_unlink_sp(kvm, sp);
for (i = 0; i < SPTE_ENT_PER_PAGE; i++) {
tdp_ptep_t sptep = pt + i;
--
2.39.1
^ permalink raw reply related [flat|nested] 10+ messages in thread
* Re: [PATCH 1/3] KVM: x86/mmu: remove unnecessary "bool shared" argument from functions
2023-09-28 16:29 ` [PATCH 1/3] KVM: x86/mmu: remove unnecessary "bool shared" argument from functions Paolo Bonzini
@ 2023-09-28 16:46 ` Maxim Levitsky
2023-09-29 16:11 ` Sean Christopherson
1 sibling, 0 replies; 10+ messages in thread
From: Maxim Levitsky @ 2023-09-28 16:46 UTC (permalink / raw)
To: Paolo Bonzini, linux-kernel, kvm
У чт, 2023-09-28 у 12:29 -0400, Paolo Bonzini пише:
> Neither tdp_mmu_next_root nor kvm_tdp_mmu_put_root need to know
> if the lock is taken for read or write. Either way, protection
> is achieved via RCU and tdp_mmu_pages_lock. Remove the argument
> and just assert that the lock is taken.
>
> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
> ---
> arch/x86/kvm/mmu/mmu.c | 2 +-
> arch/x86/kvm/mmu/tdp_mmu.c | 34 +++++++++++++++++++++-------------
> arch/x86/kvm/mmu/tdp_mmu.h | 3 +--
> 3 files changed, 23 insertions(+), 16 deletions(-)
>
> diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
> index f7901cb4d2fa..64b1bdba943e 100644
> --- a/arch/x86/kvm/mmu/mmu.c
> +++ b/arch/x86/kvm/mmu/mmu.c
> @@ -3548,7 +3548,7 @@ static void mmu_free_root_page(struct kvm *kvm, hpa_t *root_hpa,
> return;
>
> if (is_tdp_mmu_page(sp))
> - kvm_tdp_mmu_put_root(kvm, sp, false);
> + kvm_tdp_mmu_put_root(kvm, sp);
> else if (!--sp->root_count && sp->role.invalid)
> kvm_mmu_prepare_zap_page(kvm, sp, invalid_list);
>
> diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c
> index 6cd4dd631a2f..ab0876015be7 100644
> --- a/arch/x86/kvm/mmu/tdp_mmu.c
> +++ b/arch/x86/kvm/mmu/tdp_mmu.c
> @@ -73,10 +73,13 @@ static void tdp_mmu_free_sp_rcu_callback(struct rcu_head *head)
> tdp_mmu_free_sp(sp);
> }
>
> -void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root,
> - bool shared)
> +void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root)
> {
> - kvm_lockdep_assert_mmu_lock_held(kvm, shared);
> + /*
> + * Either read or write is okay, but the lock is needed because
> + * writers might not take tdp_mmu_pages_lock.
> + */
> + lockdep_assert_held(&kvm->mmu_lock);
I double checked all callers and indeed at least the read lock is held.
>
> if (!refcount_dec_and_test(&root->tdp_mmu_root_count))
> return;
> @@ -106,10 +109,16 @@ void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root,
> */
> static struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm,
> struct kvm_mmu_page *prev_root,
> - bool shared, bool only_valid)
> + bool only_valid)
> {
> struct kvm_mmu_page *next_root;
>
> + /*
> + * While the roots themselves are RCU-protected, fields such as
> + * role.invalid are protected by mmu_lock.
> + */
> + lockdep_assert_held(&kvm->mmu_lock);
> +
> rcu_read_lock();
>
> if (prev_root)
> @@ -132,7 +141,7 @@ static struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm,
> rcu_read_unlock();
>
> if (prev_root)
> - kvm_tdp_mmu_put_root(kvm, prev_root, shared);
> + kvm_tdp_mmu_put_root(kvm, prev_root);
>
> return next_root;
> }
> @@ -144,13 +153,12 @@ static struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm,
> * recent root. (Unless keeping a live reference is desirable.)
> *
> * If shared is set, this function is operating under the MMU lock in read
> - * mode. In the unlikely event that this thread must free a root, the lock
> - * will be temporarily dropped and reacquired in write mode.
> + * mode.
> */
> #define __for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, _shared, _only_valid)\
> - for (_root = tdp_mmu_next_root(_kvm, NULL, _shared, _only_valid); \
> - _root; \
> - _root = tdp_mmu_next_root(_kvm, _root, _shared, _only_valid)) \
> + for (_root = tdp_mmu_next_root(_kvm, NULL, _only_valid); \
> + _root; \
> + _root = tdp_mmu_next_root(_kvm, _root, _only_valid)) \
> if (kvm_lockdep_assert_mmu_lock_held(_kvm, _shared) && \
> kvm_mmu_page_as_id(_root) != _as_id) { \
> } else
> @@ -159,9 +167,9 @@ static struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm,
> __for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, _shared, true)
>
> #define for_each_tdp_mmu_root_yield_safe(_kvm, _root, _shared) \
> - for (_root = tdp_mmu_next_root(_kvm, NULL, _shared, false); \
> - _root; \
> - _root = tdp_mmu_next_root(_kvm, _root, _shared, false)) \
> + for (_root = tdp_mmu_next_root(_kvm, NULL, false); \
> + _root; \
> + _root = tdp_mmu_next_root(_kvm, _root, false))
> if (!kvm_lockdep_assert_mmu_lock_held(_kvm, _shared)) { \
> } else
>
> diff --git a/arch/x86/kvm/mmu/tdp_mmu.h b/arch/x86/kvm/mmu/tdp_mmu.h
> index 733a3aef3a96..20d97aa46c49 100644
> --- a/arch/x86/kvm/mmu/tdp_mmu.h
> +++ b/arch/x86/kvm/mmu/tdp_mmu.h
> @@ -17,8 +17,7 @@ __must_check static inline bool kvm_tdp_mmu_get_root(struct kvm_mmu_page *root)
> return refcount_inc_not_zero(&root->tdp_mmu_root_count);
> }
>
> -void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root,
> - bool shared);
> +void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root);
>
> bool kvm_tdp_mmu_zap_leafs(struct kvm *kvm, gfn_t start, gfn_t end, bool flush);
> bool kvm_tdp_mmu_zap_sp(struct kvm *kvm, struct kvm_mmu_page *sp);
I don't know all of the details of the kvm mmu, so I might have missed something,
but still I need to get back to reviewing....
Reviewed-by: Maxim Levitsky <mlevitsk@redhat.com>
Best regards,
Maxim Levitsky
^ permalink raw reply [flat|nested] 10+ messages in thread
* Re: [PATCH 2/3] KVM: x86/mmu: remove unnecessary "bool shared" argument from iterators
2023-09-28 16:29 ` [PATCH 2/3] KVM: x86/mmu: remove unnecessary "bool shared" argument from iterators Paolo Bonzini
@ 2023-09-28 16:55 ` Maxim Levitsky
2023-09-29 16:14 ` Sean Christopherson
0 siblings, 1 reply; 10+ messages in thread
From: Maxim Levitsky @ 2023-09-28 16:55 UTC (permalink / raw)
To: Paolo Bonzini, linux-kernel, kvm
У чт, 2023-09-28 у 12:29 -0400, Paolo Bonzini пише:
> The "bool shared" argument is more or less unnecessary in the
> for_each_*_tdp_mmu_root_yield_safe() macros. Many users check for
> the lock before calling it; all of them either call small functions
> that do the check, or end up calling tdp_mmu_set_spte_atomic() and
> tdp_mmu_iter_set_spte(). Add a few assertions to make up for the
> lost check in for_each_*_tdp_mmu_root_yield_safe(), but even this
> is probably overkill and mostly for documentation reasons.
Why not to leave the 'kvm_lockdep_assert_mmu_lock_held' but drop the shared argument from it?
and then use lockdep_assert_held. If I am not mistaken, lockdep_assert_held should assert
if the lock is held for read or write.
Best regards,
Maxim Levitsky
>
> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
> ---
> arch/x86/kvm/mmu/tdp_mmu.c | 42 +++++++++++++++++++-------------------
> 1 file changed, 21 insertions(+), 21 deletions(-)
>
> diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c
> index ab0876015be7..b9abfa78808a 100644
> --- a/arch/x86/kvm/mmu/tdp_mmu.c
> +++ b/arch/x86/kvm/mmu/tdp_mmu.c
> @@ -155,23 +155,20 @@ static struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm,
> * If shared is set, this function is operating under the MMU lock in read
> * mode.
> */
> -#define __for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, _shared, _only_valid)\
> +#define __for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, _only_valid)\
> for (_root = tdp_mmu_next_root(_kvm, NULL, _only_valid); \
> _root; \
> _root = tdp_mmu_next_root(_kvm, _root, _only_valid)) \
> - if (kvm_lockdep_assert_mmu_lock_held(_kvm, _shared) && \
> - kvm_mmu_page_as_id(_root) != _as_id) { \
> + if (kvm_mmu_page_as_id(_root) != _as_id) { \
> } else
>
> -#define for_each_valid_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, _shared) \
> - __for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, _shared, true)
> +#define for_each_valid_tdp_mmu_root_yield_safe(_kvm, _root, _as_id) \
> + __for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, true)
>
> -#define for_each_tdp_mmu_root_yield_safe(_kvm, _root, _shared) \
> +#define for_each_tdp_mmu_root_yield_safe(_kvm, _root) \
> for (_root = tdp_mmu_next_root(_kvm, NULL, false); \
> _root; \
> _root = tdp_mmu_next_root(_kvm, _root, false))
> - if (!kvm_lockdep_assert_mmu_lock_held(_kvm, _shared)) { \
> - } else
>
> /*
> * Iterate over all TDP MMU roots. Requires that mmu_lock be held for write,
> @@ -840,7 +837,8 @@ bool kvm_tdp_mmu_zap_leafs(struct kvm *kvm, gfn_t start, gfn_t end, bool flush)
> {
> struct kvm_mmu_page *root;
>
> - for_each_tdp_mmu_root_yield_safe(kvm, root, false)
> + lockdep_assert_held_write(&kvm->mmu_lock);
> + for_each_tdp_mmu_root_yield_safe(kvm, root)
> flush = tdp_mmu_zap_leafs(kvm, root, start, end, true, flush);
>
> return flush;
> @@ -862,7 +860,8 @@ void kvm_tdp_mmu_zap_all(struct kvm *kvm)
> * is being destroyed or the userspace VMM has exited. In both cases,
> * KVM_RUN is unreachable, i.e. no vCPUs will ever service the request.
> */
> - for_each_tdp_mmu_root_yield_safe(kvm, root, false)
> + lockdep_assert_held_write(&kvm->mmu_lock);
> + for_each_tdp_mmu_root_yield_safe(kvm, root)
> tdp_mmu_zap_root(kvm, root, false);
> }
>
> @@ -876,7 +875,7 @@ void kvm_tdp_mmu_zap_invalidated_roots(struct kvm *kvm)
>
> read_lock(&kvm->mmu_lock);
>
> - for_each_tdp_mmu_root_yield_safe(kvm, root, true) {
> + for_each_tdp_mmu_root_yield_safe(kvm, root) {
> if (!root->tdp_mmu_scheduled_root_to_zap)
> continue;
>
> @@ -899,7 +898,7 @@ void kvm_tdp_mmu_zap_invalidated_roots(struct kvm *kvm)
> * the root must be reachable by mmu_notifiers while it's being
> * zapped
> */
> - kvm_tdp_mmu_put_root(kvm, root, true);
> + kvm_tdp_mmu_put_root(kvm, root);
> }
>
> read_unlock(&kvm->mmu_lock);
> @@ -1133,7 +1132,9 @@ bool kvm_tdp_mmu_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range,
> {
> struct kvm_mmu_page *root;
>
> - __for_each_tdp_mmu_root_yield_safe(kvm, root, range->slot->as_id, false, false)
> + lockdep_assert_held_write(&kvm->mmu_lock);
> +
> + __for_each_tdp_mmu_root_yield_safe(kvm, root, range->slot->as_id, false)
> flush = tdp_mmu_zap_leafs(kvm, root, range->start, range->end,
> range->may_block, flush);
>
> @@ -1322,7 +1323,7 @@ bool kvm_tdp_mmu_wrprot_slot(struct kvm *kvm,
>
> lockdep_assert_held_read(&kvm->mmu_lock);
>
> - for_each_valid_tdp_mmu_root_yield_safe(kvm, root, slot->as_id, true)
> + for_each_valid_tdp_mmu_root_yield_safe(kvm, root, slot->as_id)
> spte_set |= wrprot_gfn_range(kvm, root, slot->base_gfn,
> slot->base_gfn + slot->npages, min_level);
>
> @@ -1354,6 +1355,8 @@ static struct kvm_mmu_page *tdp_mmu_alloc_sp_for_split(struct kvm *kvm,
> {
> struct kvm_mmu_page *sp;
>
> + kvm_lockdep_assert_mmu_lock_held(kvm, shared);
> +
> /*
> * Since we are allocating while under the MMU lock we have to be
> * careful about GFP flags. Use GFP_NOWAIT to avoid blocking on direct
> @@ -1504,11 +1507,10 @@ void kvm_tdp_mmu_try_split_huge_pages(struct kvm *kvm,
> int r = 0;
>
> kvm_lockdep_assert_mmu_lock_held(kvm, shared);
> -
> - for_each_valid_tdp_mmu_root_yield_safe(kvm, root, slot->as_id, shared) {
> + for_each_valid_tdp_mmu_root_yield_safe(kvm, root, slot->as_id) {
> r = tdp_mmu_split_huge_pages_root(kvm, root, start, end, target_level, shared);
> if (r) {
> - kvm_tdp_mmu_put_root(kvm, root, shared);
> + kvm_tdp_mmu_put_root(kvm, root);
> break;
> }
> }
> @@ -1568,8 +1570,7 @@ bool kvm_tdp_mmu_clear_dirty_slot(struct kvm *kvm,
> bool spte_set = false;
>
> lockdep_assert_held_read(&kvm->mmu_lock);
> -
> - for_each_valid_tdp_mmu_root_yield_safe(kvm, root, slot->as_id, true)
> + for_each_valid_tdp_mmu_root_yield_safe(kvm, root, slot->as_id)
> spte_set |= clear_dirty_gfn_range(kvm, root, slot->base_gfn,
> slot->base_gfn + slot->npages);
>
> @@ -1703,8 +1704,7 @@ void kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm,
> struct kvm_mmu_page *root;
>
> lockdep_assert_held_read(&kvm->mmu_lock);
> -
> - for_each_valid_tdp_mmu_root_yield_safe(kvm, root, slot->as_id, true)
> + for_each_valid_tdp_mmu_root_yield_safe(kvm, root, slot->as_id)
> zap_collapsible_spte_range(kvm, root, slot);
> }
>
^ permalink raw reply [flat|nested] 10+ messages in thread
* Re: [PATCH 3/3] KVM: x86/mmu: always take tdp_mmu_pages_lock
2023-09-28 16:29 ` [PATCH 3/3] KVM: x86/mmu: always take tdp_mmu_pages_lock Paolo Bonzini
@ 2023-09-29 7:30 ` kernel test robot
2023-09-29 16:16 ` Sean Christopherson
0 siblings, 1 reply; 10+ messages in thread
From: kernel test robot @ 2023-09-29 7:30 UTC (permalink / raw)
To: Paolo Bonzini, linux-kernel, kvm; +Cc: oe-kbuild-all
Hi Paolo,
kernel test robot noticed the following build warnings:
[auto build test WARNING on kvm/queue]
[also build test WARNING on linus/master v6.6-rc3 next-20230929]
[cannot apply to mst-vhost/linux-next kvm/linux-next]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch#_base_tree_information]
url: https://github.com/intel-lab-lkp/linux/commits/Paolo-Bonzini/KVM-x86-mmu-remove-unnecessary-bool-shared-argument-from-functions/20230929-003259
base: https://git.kernel.org/pub/scm/virt/kvm/kvm.git queue
patch link: https://lore.kernel.org/r/20230928162959.1514661-4-pbonzini%40redhat.com
patch subject: [PATCH 3/3] KVM: x86/mmu: always take tdp_mmu_pages_lock
config: x86_64-buildonly-randconfig-004-20230929 (https://download.01.org/0day-ci/archive/20230929/202309291557.Eq3JDvT6-lkp@intel.com/config)
compiler: gcc-9 (Debian 9.3.0-22) 9.3.0
reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20230929/202309291557.Eq3JDvT6-lkp@intel.com/reproduce)
If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <lkp@intel.com>
| Closes: https://lore.kernel.org/oe-kbuild-all/202309291557.Eq3JDvT6-lkp@intel.com/
All warnings (new ones prefixed by >>):
>> arch/x86/kvm/mmu/tdp_mmu.c:289: warning: Excess function parameter 'shared' description in 'tdp_mmu_unlink_sp'
vim +289 arch/x86/kvm/mmu/tdp_mmu.c
43a063cab325ee7 Yosry Ahmed 2022-08-23 278
a9442f594147f95 Ben Gardon 2021-02-02 279 /**
c298a30c2821cb0 David Matlack 2022-01-19 280 * tdp_mmu_unlink_sp() - Remove a shadow page from the list of used pages
a9442f594147f95 Ben Gardon 2021-02-02 281 *
a9442f594147f95 Ben Gardon 2021-02-02 282 * @kvm: kvm instance
a9442f594147f95 Ben Gardon 2021-02-02 283 * @sp: the page to be removed
9a77daacc87dee9 Ben Gardon 2021-02-02 284 * @shared: This operation may not be running under the exclusive use of
9a77daacc87dee9 Ben Gardon 2021-02-02 285 * the MMU lock and the operation must synchronize with other
9a77daacc87dee9 Ben Gardon 2021-02-02 286 * threads that might be adding or removing pages.
a9442f594147f95 Ben Gardon 2021-02-02 287 */
44f1ce87ebc1ca1 Paolo Bonzini 2023-09-28 288 static void tdp_mmu_unlink_sp(struct kvm *kvm, struct kvm_mmu_page *sp)
a9442f594147f95 Ben Gardon 2021-02-02 @289 {
44f1ce87ebc1ca1 Paolo Bonzini 2023-09-28 290 lockdep_assert_held(&kvm->mmu_lock);
44f1ce87ebc1ca1 Paolo Bonzini 2023-09-28 291
43a063cab325ee7 Yosry Ahmed 2022-08-23 292 tdp_unaccount_mmu_page(kvm, sp);
d25ceb9264364dc Sean Christopherson 2022-10-19 293
d25ceb9264364dc Sean Christopherson 2022-10-19 294 if (!sp->nx_huge_page_disallowed)
d25ceb9264364dc Sean Christopherson 2022-10-19 295 return;
d25ceb9264364dc Sean Christopherson 2022-10-19 296
9a77daacc87dee9 Ben Gardon 2021-02-02 297 spin_lock(&kvm->arch.tdp_mmu_pages_lock);
61f94478547bb4f Sean Christopherson 2022-10-19 298 sp->nx_huge_page_disallowed = false;
61f94478547bb4f Sean Christopherson 2022-10-19 299 untrack_possible_nx_huge_page(kvm, sp);
9a77daacc87dee9 Ben Gardon 2021-02-02 300 spin_unlock(&kvm->arch.tdp_mmu_pages_lock);
a9442f594147f95 Ben Gardon 2021-02-02 301 }
a9442f594147f95 Ben Gardon 2021-02-02 302
--
0-DAY CI Kernel Test Service
https://github.com/intel/lkp-tests/wiki
^ permalink raw reply [flat|nested] 10+ messages in thread
* Re: [PATCH 1/3] KVM: x86/mmu: remove unnecessary "bool shared" argument from functions
2023-09-28 16:29 ` [PATCH 1/3] KVM: x86/mmu: remove unnecessary "bool shared" argument from functions Paolo Bonzini
2023-09-28 16:46 ` Maxim Levitsky
@ 2023-09-29 16:11 ` Sean Christopherson
1 sibling, 0 replies; 10+ messages in thread
From: Sean Christopherson @ 2023-09-29 16:11 UTC (permalink / raw)
To: Paolo Bonzini; +Cc: linux-kernel, kvm
On Thu, Sep 28, 2023, Paolo Bonzini wrote:
> Neither tdp_mmu_next_root nor kvm_tdp_mmu_put_root need to know
> if the lock is taken for read or write. Either way, protection
> is achieved via RCU and tdp_mmu_pages_lock. Remove the argument
> and just assert that the lock is taken.
>
> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
> ---
> +void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root)
> {
> - kvm_lockdep_assert_mmu_lock_held(kvm, shared);
> + /*
> + * Either read or write is okay, but the lock is needed because
> + * writers might not take tdp_mmu_pages_lock.
> + */
Nit, I'd prefer to say mmu_lock instead of "the lock", and be very explicit about
writers not needing to take tdp_mmu_pages_lock, e.g.
/*
* Either read or write is okay, but mmu_lock must be held as writers
* are not required to take tdp_mmu_pages_lock.
*/
> + lockdep_assert_held(&kvm->mmu_lock);
>
> if (!refcount_dec_and_test(&root->tdp_mmu_root_count))
> return;
^ permalink raw reply [flat|nested] 10+ messages in thread
* Re: [PATCH 2/3] KVM: x86/mmu: remove unnecessary "bool shared" argument from iterators
2023-09-28 16:55 ` Maxim Levitsky
@ 2023-09-29 16:14 ` Sean Christopherson
0 siblings, 0 replies; 10+ messages in thread
From: Sean Christopherson @ 2023-09-29 16:14 UTC (permalink / raw)
To: Maxim Levitsky; +Cc: Paolo Bonzini, linux-kernel, kvm
On Thu, Sep 28, 2023, Maxim Levitsky wrote:
> У чт, 2023-09-28 у 12:29 -0400, Paolo Bonzini пише:
> > The "bool shared" argument is more or less unnecessary in the
> > for_each_*_tdp_mmu_root_yield_safe() macros. Many users check for
> > the lock before calling it; all of them either call small functions
> > that do the check, or end up calling tdp_mmu_set_spte_atomic() and
> > tdp_mmu_iter_set_spte(). Add a few assertions to make up for the
> > lost check in for_each_*_tdp_mmu_root_yield_safe(), but even this
> > is probably overkill and mostly for documentation reasons.
>
> Why not to leave the 'kvm_lockdep_assert_mmu_lock_held' but drop the shared
> argument from it? and then use lockdep_assert_held. If I am not mistaken,
> lockdep_assert_held should assert if the lock is held for read or write.
+1, I don't see any downside to asserting that mmu_lock is held when iterating.
It'll be a redundant assertion 99% of the time, but it's not like performance
matters all that much when running with lockdep enabled. And I find lockdep
assertions to be wonderful documentation.
^ permalink raw reply [flat|nested] 10+ messages in thread
* Re: [PATCH 3/3] KVM: x86/mmu: always take tdp_mmu_pages_lock
2023-09-29 7:30 ` kernel test robot
@ 2023-09-29 16:16 ` Sean Christopherson
0 siblings, 0 replies; 10+ messages in thread
From: Sean Christopherson @ 2023-09-29 16:16 UTC (permalink / raw)
To: kernel test robot; +Cc: Paolo Bonzini, linux-kernel, kvm, oe-kbuild-all
On Fri, Sep 29, 2023, kernel test robot wrote:
> Hi Paolo,
>
> kernel test robot noticed the following build warnings:
>
> [auto build test WARNING on kvm/queue]
> [also build test WARNING on linus/master v6.6-rc3 next-20230929]
> [cannot apply to mst-vhost/linux-next kvm/linux-next]
> [If your patch is applied to the wrong git tree, kindly drop us a note.
> And when submitting patch, we suggest to use '--base' as documented in
> https://git-scm.com/docs/git-format-patch#_base_tree_information]
>
> url: https://github.com/intel-lab-lkp/linux/commits/Paolo-Bonzini/KVM-x86-mmu-remove-unnecessary-bool-shared-argument-from-functions/20230929-003259
> base: https://git.kernel.org/pub/scm/virt/kvm/kvm.git queue
> patch link: https://lore.kernel.org/r/20230928162959.1514661-4-pbonzini%40redhat.com
> patch subject: [PATCH 3/3] KVM: x86/mmu: always take tdp_mmu_pages_lock
> config: x86_64-buildonly-randconfig-004-20230929 (https://download.01.org/0day-ci/archive/20230929/202309291557.Eq3JDvT6-lkp@intel.com/config)
> compiler: gcc-9 (Debian 9.3.0-22) 9.3.0
> reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20230929/202309291557.Eq3JDvT6-lkp@intel.com/reproduce)
>
> If you fix the issue in a separate patch/commit (i.e. not just a new version of
> the same patch/commit), kindly add following tags
> | Reported-by: kernel test robot <lkp@intel.com>
> | Closes: https://lore.kernel.org/oe-kbuild-all/202309291557.Eq3JDvT6-lkp@intel.com/
>
> All warnings (new ones prefixed by >>):
>
> >> arch/x86/kvm/mmu/tdp_mmu.c:289: warning: Excess function parameter 'shared' description in 'tdp_mmu_unlink_sp'
>
>
> vim +289 arch/x86/kvm/mmu/tdp_mmu.c
>
> 43a063cab325ee7 Yosry Ahmed 2022-08-23 278
> a9442f594147f95 Ben Gardon 2021-02-02 279 /**
> c298a30c2821cb0 David Matlack 2022-01-19 280 * tdp_mmu_unlink_sp() - Remove a shadow page from the list of used pages
> a9442f594147f95 Ben Gardon 2021-02-02 281 *
> a9442f594147f95 Ben Gardon 2021-02-02 282 * @kvm: kvm instance
> a9442f594147f95 Ben Gardon 2021-02-02 283 * @sp: the page to be removed
> 9a77daacc87dee9 Ben Gardon 2021-02-02 284 * @shared: This operation may not be running under the exclusive use of
> 9a77daacc87dee9 Ben Gardon 2021-02-02 285 * the MMU lock and the operation must synchronize with other
> 9a77daacc87dee9 Ben Gardon 2021-02-02 286 * threads that might be adding or removing pages.
> a9442f594147f95 Ben Gardon 2021-02-02 287 */
The bot is complaining about the kernel doc, i.e. the above @shared documentation
needs to be deleted. Took me a few seconds to understand what the complaint was
about...
^ permalink raw reply [flat|nested] 10+ messages in thread
end of thread, other threads:[~2023-09-29 16:16 UTC | newest]
Thread overview: 10+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2023-09-28 16:29 [PATCH 0/3] KVM: x86/mmu: small locking cleanups Paolo Bonzini
2023-09-28 16:29 ` [PATCH 1/3] KVM: x86/mmu: remove unnecessary "bool shared" argument from functions Paolo Bonzini
2023-09-28 16:46 ` Maxim Levitsky
2023-09-29 16:11 ` Sean Christopherson
2023-09-28 16:29 ` [PATCH 2/3] KVM: x86/mmu: remove unnecessary "bool shared" argument from iterators Paolo Bonzini
2023-09-28 16:55 ` Maxim Levitsky
2023-09-29 16:14 ` Sean Christopherson
2023-09-28 16:29 ` [PATCH 3/3] KVM: x86/mmu: always take tdp_mmu_pages_lock Paolo Bonzini
2023-09-29 7:30 ` kernel test robot
2023-09-29 16:16 ` Sean Christopherson
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).