public inbox for linux-kernel@vger.kernel.org
 help / color / mirror / Atom feed
* [PATCH 1/4] locking/lockdep: Use hashtable.h for lock_keys_hash
@ 2025-03-20 14:31 Sasha Levin
  2025-03-20 14:31 ` [PATCH 2/4] locking/lockdep: Use hashtable.h for classhash_table Sasha Levin
                   ` (3 more replies)
  0 siblings, 4 replies; 5+ messages in thread
From: Sasha Levin @ 2025-03-20 14:31 UTC (permalink / raw)
  To: peterz, mingo, will, boqun.feng; +Cc: longman, linux-kernel, Sasha Levin

Convert the lock_keys_hash array in lockdep.c to use the generic
hashtable implementation from hashtable.h instead of the manual
hlist_head array implementation.

This simplifies the code and makes it more maintainable by using the
standard hashtable API defined in hashtable.h, while preserving the
RCU-safe behavior with proper RCU variants of the hashtable functions.

Signed-off-by: Sasha Levin <sashal@kernel.org>
---
 kernel/locking/lockdep.c | 34 ++++++++++++++++------------------
 1 file changed, 16 insertions(+), 18 deletions(-)

diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 4470680f02269..160cf8310eda0 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -57,6 +57,7 @@
 #include <linux/lockdep.h>
 #include <linux/context_tracking.h>
 #include <linux/console.h>
+#include <linux/hashtable.h>
 
 #include <asm/sections.h>
 
@@ -212,8 +213,7 @@ static DECLARE_BITMAP(list_entries_in_use, MAX_LOCKDEP_ENTRIES);
  * in use.
  */
 #define KEYHASH_BITS		(MAX_LOCKDEP_KEYS_BITS - 1)
-#define KEYHASH_SIZE		(1UL << KEYHASH_BITS)
-static struct hlist_head lock_keys_hash[KEYHASH_SIZE];
+static DEFINE_HASHTABLE(lock_keys_hash, KEYHASH_BITS);
 unsigned long nr_lock_classes;
 unsigned long nr_zapped_classes;
 unsigned long max_lock_class_idx;
@@ -1209,32 +1209,28 @@ static void init_data_structures_once(void)
 	init_chain_block_buckets();
 }
 
-static inline struct hlist_head *keyhashentry(const struct lock_class_key *key)
-{
-	unsigned long hash = hash_long((uintptr_t)key, KEYHASH_BITS);
-
-	return lock_keys_hash + hash;
-}
 
 /* Register a dynamically allocated key. */
 void lockdep_register_key(struct lock_class_key *key)
 {
-	struct hlist_head *hash_head;
 	struct lock_class_key *k;
+	unsigned long hash;
 	unsigned long flags;
 
 	if (WARN_ON_ONCE(static_obj(key)))
 		return;
-	hash_head = keyhashentry(key);
+
+	hash = hash_long((uintptr_t)key, KEYHASH_BITS);
 
 	raw_local_irq_save(flags);
 	if (!graph_lock())
 		goto restore_irqs;
-	hlist_for_each_entry_rcu(k, hash_head, hash_entry) {
+
+	hash_for_each_possible_rcu(lock_keys_hash, k, hash_entry, hash) {
 		if (WARN_ON_ONCE(k == key))
 			goto out_unlock;
 	}
-	hlist_add_head_rcu(&key->hash_entry, hash_head);
+	hash_add_rcu(lock_keys_hash, &key->hash_entry, hash);
 out_unlock:
 	graph_unlock();
 restore_irqs:
@@ -1245,8 +1241,8 @@ EXPORT_SYMBOL_GPL(lockdep_register_key);
 /* Check whether a key has been registered as a dynamic key. */
 static bool is_dynamic_key(const struct lock_class_key *key)
 {
-	struct hlist_head *hash_head;
 	struct lock_class_key *k;
+	unsigned long hash;
 	bool found = false;
 
 	if (WARN_ON_ONCE(static_obj(key)))
@@ -1260,10 +1256,10 @@ static bool is_dynamic_key(const struct lock_class_key *key)
 	if (!debug_locks)
 		return true;
 
-	hash_head = keyhashentry(key);
+	hash = hash_long((uintptr_t)key, KEYHASH_BITS);
 
 	rcu_read_lock();
-	hlist_for_each_entry_rcu(k, hash_head, hash_entry) {
+	hash_for_each_possible_rcu(lock_keys_hash, k, hash_entry, hash) {
 		if (k == key) {
 			found = true;
 			break;
@@ -6561,9 +6557,9 @@ void lockdep_reset_lock(struct lockdep_map *lock)
  */
 void lockdep_unregister_key(struct lock_class_key *key)
 {
-	struct hlist_head *hash_head = keyhashentry(key);
 	struct lock_class_key *k;
 	struct pending_free *pf;
+	unsigned long hash;
 	unsigned long flags;
 	bool found = false;
 	bool need_callback = false;
@@ -6573,12 +6569,14 @@ void lockdep_unregister_key(struct lock_class_key *key)
 	if (WARN_ON_ONCE(static_obj(key)))
 		return;
 
+	hash = hash_long((uintptr_t)key, KEYHASH_BITS);
+
 	raw_local_irq_save(flags);
 	lockdep_lock();
 
-	hlist_for_each_entry_rcu(k, hash_head, hash_entry) {
+	hash_for_each_possible(lock_keys_hash, k, hash_entry, hash) {
 		if (k == key) {
-			hlist_del_rcu(&k->hash_entry);
+			hash_del_rcu(&k->hash_entry);
 			found = true;
 			break;
 		}
-- 
2.39.5


^ permalink raw reply related	[flat|nested] 5+ messages in thread

end of thread, other threads:[~2025-04-29 18:30 UTC | newest]

Thread overview: 5+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2025-03-20 14:31 [PATCH 1/4] locking/lockdep: Use hashtable.h for lock_keys_hash Sasha Levin
2025-03-20 14:31 ` [PATCH 2/4] locking/lockdep: Use hashtable.h for classhash_table Sasha Levin
2025-03-20 14:31 ` [PATCH 3/4] locking/lockdep: Use hashtable.h for chainhash_table Sasha Levin
2025-03-20 14:31 ` [PATCH 4/4] locking/lockdep: Use hashtable.h for stack_trace_hash Sasha Levin
2025-04-29 18:30 ` [PATCH 1/4] locking/lockdep: Use hashtable.h for lock_keys_hash Boqun Feng

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox