From: Thomas Gleixner <tglx@kernel.org>
To: LKML <linux-kernel@vger.kernel.org>
Cc: "Mathieu Desnoyers" <mathieu.desnoyers@efficios.com>,
"André Almeida" <andrealmeid@igalia.com>,
"Sebastian Andrzej Siewior" <bigeasy@linutronix.de>,
"Carlos O'Donell" <carlos@redhat.com>,
"Peter Zijlstra" <peterz@infradead.org>,
"Florian Weimer" <fweimer@redhat.com>,
"Rich Felker" <dalias@aerifal.cx>,
"Torvald Riegel" <triegel@redhat.com>,
"Darren Hart" <dvhart@infradead.org>,
"Ingo Molnar" <mingo@kernel.org>,
"Davidlohr Bueso" <dave@stgolabs.net>,
"Arnd Bergmann" <arnd@arndb.de>,
"Liam R . Howlett" <Liam.Howlett@oracle.com>
Subject: [patch 2/8] futex: Move futex related mm_struct data into a struct
Date: Mon, 16 Mar 2026 18:13:02 +0100 [thread overview]
Message-ID: <20260316164951.073076616@kernel.org> (raw)
In-Reply-To: 20260316162316.356674433@kernel.org
Having all these members in mm_struct along with the required #ifdeffery is
annoying, does not allow efficient initializing of the data with
memset() and makes extending it tedious.
Move it into a data structure and fix up all usage sites.
Signed-off-by: Thomas Gleixner <tglx@kernel.org>
---
include/linux/futex_types.h | 22 +++++++
include/linux/mm_types.h | 11 ---
kernel/futex/core.c | 123 ++++++++++++++++++++------------------------
3 files changed, 80 insertions(+), 76 deletions(-)
--- a/include/linux/futex_types.h
+++ b/include/linux/futex_types.h
@@ -31,4 +31,26 @@ struct futex_ctrl {
struct futex_ctrl { };
#endif /* !CONFIG_FUTEX */
+/**
+ * struct futex_mm_data - Futex related per MM data
+ * @phash_lock: Mutex to protect the private hash operations
+ * @phash: RCU managed pointer to the private hash
+ * @phash_new: Pointer to a newly allocated private hash
+ * @phash_batches: Batch state for RCU synchronization
+ * @phash_rcu: RCU head for call_rcu()
+ * @phash_atomic: Aggregate value for @phash_ref
+ * @phash_ref: Per CPU reference counter for a private hash
+ */
+struct futex_mm_data {
+#ifdef CONFIG_FUTEX_PRIVATE_HASH
+ struct mutex phash_lock;
+ struct futex_private_hash __rcu *phash;
+ struct futex_private_hash *phash_new;
+ unsigned long phash_batches;
+ struct rcu_head phash_rcu;
+ atomic_long_t phash_atomic;
+ unsigned int __percpu *phash_ref;
+#endif
+};
+
#endif /* _LINUX_FUTEX_TYPES_H */
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -1221,16 +1221,7 @@ struct mm_struct {
*/
seqcount_t mm_lock_seq;
#endif
-#ifdef CONFIG_FUTEX_PRIVATE_HASH
- struct mutex futex_hash_lock;
- struct futex_private_hash __rcu *futex_phash;
- struct futex_private_hash *futex_phash_new;
- /* futex-ref */
- unsigned long futex_batches;
- struct rcu_head futex_rcu;
- atomic_long_t futex_atomic;
- unsigned int __percpu *futex_ref;
-#endif
+ struct futex_mm_data futex;
unsigned long hiwater_rss; /* High-watermark of RSS usage */
unsigned long hiwater_vm; /* High-water virtual memory usage */
--- a/kernel/futex/core.c
+++ b/kernel/futex/core.c
@@ -188,13 +188,13 @@ static struct futex_hash_bucket *
return NULL;
if (!fph)
- fph = rcu_dereference(key->private.mm->futex_phash);
+ fph = rcu_dereference(key->private.mm->futex.phash);
if (!fph || !fph->hash_mask)
return NULL;
- hash = jhash2((void *)&key->private.address,
- sizeof(key->private.address) / 4,
+ hash = jhash2((void *)&key->private.address, sizeof(key->private.address) / 4,
key->both.offset);
+
return &fph->queues[hash & fph->hash_mask];
}
@@ -238,13 +238,12 @@ static bool __futex_pivot_hash(struct mm
{
struct futex_private_hash *fph;
- WARN_ON_ONCE(mm->futex_phash_new);
+ WARN_ON_ONCE(mm->futex.phash_new);
- fph = rcu_dereference_protected(mm->futex_phash,
- lockdep_is_held(&mm->futex_hash_lock));
+ fph = rcu_dereference_protected(mm->futex.phash, lockdep_is_held(&mm->futex.phash_lock));
if (fph) {
if (!futex_ref_is_dead(fph)) {
- mm->futex_phash_new = new;
+ mm->futex.phash_new = new;
return false;
}
@@ -252,8 +251,8 @@ static bool __futex_pivot_hash(struct mm
}
new->state = FR_PERCPU;
scoped_guard(rcu) {
- mm->futex_batches = get_state_synchronize_rcu();
- rcu_assign_pointer(mm->futex_phash, new);
+ mm->futex.phash_batches = get_state_synchronize_rcu();
+ rcu_assign_pointer(mm->futex.phash, new);
}
kvfree_rcu(fph, rcu);
return true;
@@ -261,12 +260,12 @@ static bool __futex_pivot_hash(struct mm
static void futex_pivot_hash(struct mm_struct *mm)
{
- scoped_guard(mutex, &mm->futex_hash_lock) {
+ scoped_guard(mutex, &mm->futex.phash_lock) {
struct futex_private_hash *fph;
- fph = mm->futex_phash_new;
+ fph = mm->futex.phash_new;
if (fph) {
- mm->futex_phash_new = NULL;
+ mm->futex.phash_new = NULL;
__futex_pivot_hash(mm, fph);
}
}
@@ -289,7 +288,7 @@ struct futex_private_hash *futex_private
scoped_guard(rcu) {
struct futex_private_hash *fph;
- fph = rcu_dereference(mm->futex_phash);
+ fph = rcu_dereference(mm->futex.phash);
if (!fph)
return NULL;
@@ -412,8 +411,7 @@ static int futex_mpol(struct mm_struct *
* private hash) is returned if existing. Otherwise a hash bucket from the
* global hash is returned.
*/
-static struct futex_hash_bucket *
-__futex_hash(union futex_key *key, struct futex_private_hash *fph)
+static struct futex_hash_bucket *__futex_hash(union futex_key *key, struct futex_private_hash *fph)
{
int node = key->both.node;
u32 hash;
@@ -426,8 +424,7 @@ static struct futex_hash_bucket *
return hb;
}
- hash = jhash2((u32 *)key,
- offsetof(typeof(*key), both.offset) / sizeof(u32),
+ hash = jhash2((u32 *)key, offsetof(typeof(*key), both.offset) / sizeof(u32),
key->both.offset);
if (node == FUTEX_NO_NODE) {
@@ -442,8 +439,7 @@ static struct futex_hash_bucket *
*/
node = (hash >> futex_hashshift) % nr_node_ids;
if (!node_possible(node)) {
- node = find_next_bit_wrap(node_possible_map.bits,
- nr_node_ids, node);
+ node = find_next_bit_wrap(node_possible_map.bits, nr_node_ids, node);
}
}
@@ -460,9 +456,8 @@ static struct futex_hash_bucket *
* Return: Initialized hrtimer_sleeper structure or NULL if no timeout
* value given
*/
-struct hrtimer_sleeper *
-futex_setup_timer(ktime_t *time, struct hrtimer_sleeper *timeout,
- int flags, u64 range_ns)
+struct hrtimer_sleeper *futex_setup_timer(ktime_t *time, struct hrtimer_sleeper *timeout,
+ int flags, u64 range_ns)
{
if (!time)
return NULL;
@@ -1551,17 +1546,17 @@ static void __futex_ref_atomic_begin(str
* otherwise it would be impossible for it to have reported success
* from futex_ref_is_dead().
*/
- WARN_ON_ONCE(atomic_long_read(&mm->futex_atomic) != 0);
+ WARN_ON_ONCE(atomic_long_read(&mm->futex.phash_atomic) != 0);
/*
* Set the atomic to the bias value such that futex_ref_{get,put}()
* will never observe 0. Will be fixed up in __futex_ref_atomic_end()
* when folding in the percpu count.
*/
- atomic_long_set(&mm->futex_atomic, LONG_MAX);
+ atomic_long_set(&mm->futex.phash_atomic, LONG_MAX);
smp_store_release(&fph->state, FR_ATOMIC);
- call_rcu_hurry(&mm->futex_rcu, futex_ref_rcu);
+ call_rcu_hurry(&mm->futex.phash_rcu, futex_ref_rcu);
}
static void __futex_ref_atomic_end(struct futex_private_hash *fph)
@@ -1582,7 +1577,7 @@ static void __futex_ref_atomic_end(struc
* Therefore the per-cpu counter is now stable, sum and reset.
*/
for_each_possible_cpu(cpu) {
- unsigned int *ptr = per_cpu_ptr(mm->futex_ref, cpu);
+ unsigned int *ptr = per_cpu_ptr(mm->futex.phash_ref, cpu);
count += *ptr;
*ptr = 0;
}
@@ -1590,7 +1585,7 @@ static void __futex_ref_atomic_end(struc
/*
* Re-init for the next cycle.
*/
- this_cpu_inc(*mm->futex_ref); /* 0 -> 1 */
+ this_cpu_inc(*mm->futex.phash_ref); /* 0 -> 1 */
/*
* Add actual count, subtract bias and initial refcount.
@@ -1598,7 +1593,7 @@ static void __futex_ref_atomic_end(struc
* The moment this atomic operation happens, futex_ref_is_dead() can
* become true.
*/
- ret = atomic_long_add_return(count - LONG_MAX - 1, &mm->futex_atomic);
+ ret = atomic_long_add_return(count - LONG_MAX - 1, &mm->futex.phash_atomic);
if (!ret)
wake_up_var(mm);
@@ -1608,8 +1603,8 @@ static void __futex_ref_atomic_end(struc
static void futex_ref_rcu(struct rcu_head *head)
{
- struct mm_struct *mm = container_of(head, struct mm_struct, futex_rcu);
- struct futex_private_hash *fph = rcu_dereference_raw(mm->futex_phash);
+ struct mm_struct *mm = container_of(head, struct mm_struct, futex.phash_rcu);
+ struct futex_private_hash *fph = rcu_dereference_raw(mm->futex.phash);
if (fph->state == FR_PERCPU) {
/*
@@ -1638,7 +1633,7 @@ static void futex_ref_drop(struct futex_
/*
* Can only transition the current fph;
*/
- WARN_ON_ONCE(rcu_dereference_raw(mm->futex_phash) != fph);
+ WARN_ON_ONCE(rcu_dereference_raw(mm->futex.phash) != fph);
/*
* We enqueue at least one RCU callback. Ensure mm stays if the task
* exits before the transition is completed.
@@ -1650,8 +1645,8 @@ static void futex_ref_drop(struct futex_
*
* futex_hash() __futex_pivot_hash()
* guard(rcu); guard(mm->futex_hash_lock);
- * fph = mm->futex_phash;
- * rcu_assign_pointer(&mm->futex_phash, new);
+ * fph = mm->futex.phash;
+ * rcu_assign_pointer(&mm->futex.phash, new);
* futex_hash_allocate()
* futex_ref_drop()
* fph->state = FR_ATOMIC;
@@ -1666,7 +1661,7 @@ static void futex_ref_drop(struct futex_
* There must be at least one full grace-period between publishing a
* new fph and trying to replace it.
*/
- if (poll_state_synchronize_rcu(mm->futex_batches)) {
+ if (poll_state_synchronize_rcu(mm->futex.phash_batches)) {
/*
* There was a grace-period, we can begin now.
*/
@@ -1674,7 +1669,7 @@ static void futex_ref_drop(struct futex_
return;
}
- call_rcu_hurry(&mm->futex_rcu, futex_ref_rcu);
+ call_rcu_hurry(&mm->futex.phash_rcu, futex_ref_rcu);
}
static bool futex_ref_get(struct futex_private_hash *fph)
@@ -1684,11 +1679,11 @@ static bool futex_ref_get(struct futex_p
guard(preempt)();
if (READ_ONCE(fph->state) == FR_PERCPU) {
- __this_cpu_inc(*mm->futex_ref);
+ __this_cpu_inc(*mm->futex.phash_ref);
return true;
}
- return atomic_long_inc_not_zero(&mm->futex_atomic);
+ return atomic_long_inc_not_zero(&mm->futex.phash_atomic);
}
static bool futex_ref_put(struct futex_private_hash *fph)
@@ -1698,11 +1693,11 @@ static bool futex_ref_put(struct futex_p
guard(preempt)();
if (READ_ONCE(fph->state) == FR_PERCPU) {
- __this_cpu_dec(*mm->futex_ref);
+ __this_cpu_dec(*mm->futex.phash_ref);
return false;
}
- return atomic_long_dec_and_test(&mm->futex_atomic);
+ return atomic_long_dec_and_test(&mm->futex.phash_atomic);
}
static bool futex_ref_is_dead(struct futex_private_hash *fph)
@@ -1714,18 +1709,14 @@ static bool futex_ref_is_dead(struct fut
if (smp_load_acquire(&fph->state) == FR_PERCPU)
return false;
- return atomic_long_read(&mm->futex_atomic) == 0;
+ return atomic_long_read(&mm->futex.phash_atomic) == 0;
}
int futex_mm_init(struct mm_struct *mm)
{
- mutex_init(&mm->futex_hash_lock);
- RCU_INIT_POINTER(mm->futex_phash, NULL);
- mm->futex_phash_new = NULL;
- /* futex-ref */
- mm->futex_ref = NULL;
- atomic_long_set(&mm->futex_atomic, 0);
- mm->futex_batches = get_state_synchronize_rcu();
+ memset(&mm->futex, 0, sizeof(mm->futex));
+ mutex_init(&mm->futex.phash_lock);
+ mm->futex.phash_batches = get_state_synchronize_rcu();
return 0;
}
@@ -1733,9 +1724,9 @@ void futex_hash_free(struct mm_struct *m
{
struct futex_private_hash *fph;
- free_percpu(mm->futex_ref);
- kvfree(mm->futex_phash_new);
- fph = rcu_dereference_raw(mm->futex_phash);
+ free_percpu(mm->futex.phash_ref);
+ kvfree(mm->futex.phash_new);
+ fph = rcu_dereference_raw(mm->futex.phash);
if (fph)
kvfree(fph);
}
@@ -1746,10 +1737,10 @@ static bool futex_pivot_pending(struct m
guard(rcu)();
- if (!mm->futex_phash_new)
+ if (!mm->futex.phash_new)
return true;
- fph = rcu_dereference(mm->futex_phash);
+ fph = rcu_dereference(mm->futex.phash);
return futex_ref_is_dead(fph);
}
@@ -1791,7 +1782,7 @@ static int futex_hash_allocate(unsigned
* Once we've disabled the global hash there is no way back.
*/
scoped_guard(rcu) {
- fph = rcu_dereference(mm->futex_phash);
+ fph = rcu_dereference(mm->futex.phash);
if (fph && !fph->hash_mask) {
if (custom)
return -EBUSY;
@@ -1799,15 +1790,15 @@ static int futex_hash_allocate(unsigned
}
}
- if (!mm->futex_ref) {
+ if (!mm->futex.phash_ref) {
/*
* This will always be allocated by the first thread and
* therefore requires no locking.
*/
- mm->futex_ref = alloc_percpu(unsigned int);
- if (!mm->futex_ref)
+ mm->futex.phash_ref = alloc_percpu(unsigned int);
+ if (!mm->futex.phash_ref)
return -ENOMEM;
- this_cpu_inc(*mm->futex_ref); /* 0 -> 1 */
+ this_cpu_inc(*mm->futex.phash_ref); /* 0 -> 1 */
}
fph = kvzalloc(struct_size(fph, queues, hash_slots),
@@ -1830,14 +1821,14 @@ static int futex_hash_allocate(unsigned
wait_var_event(mm, futex_pivot_pending(mm));
}
- scoped_guard(mutex, &mm->futex_hash_lock) {
+ scoped_guard(mutex, &mm->futex.phash_lock) {
struct futex_private_hash *free __free(kvfree) = NULL;
struct futex_private_hash *cur, *new;
- cur = rcu_dereference_protected(mm->futex_phash,
- lockdep_is_held(&mm->futex_hash_lock));
- new = mm->futex_phash_new;
- mm->futex_phash_new = NULL;
+ cur = rcu_dereference_protected(mm->futex.phash,
+ lockdep_is_held(&mm->futex.phash_lock));
+ new = mm->futex.phash_new;
+ mm->futex.phash_new = NULL;
if (fph) {
if (cur && !cur->hash_mask) {
@@ -1847,7 +1838,7 @@ static int futex_hash_allocate(unsigned
* the second one returns here.
*/
free = fph;
- mm->futex_phash_new = new;
+ mm->futex.phash_new = new;
return -EBUSY;
}
if (cur && !new) {
@@ -1877,7 +1868,7 @@ static int futex_hash_allocate(unsigned
if (new) {
/*
- * Will set mm->futex_phash_new on failure;
+ * Will set mm->futex.phash_new on failure;
* futex_private_hash_get() will try again.
*/
if (!__futex_pivot_hash(mm, new) && custom)
@@ -1900,7 +1891,7 @@ int futex_hash_allocate_default(void)
get_nr_threads(current),
num_online_cpus());
- fph = rcu_dereference(current->mm->futex_phash);
+ fph = rcu_dereference(current->mm->futex.phash);
if (fph) {
if (fph->custom)
return 0;
@@ -1927,7 +1918,7 @@ static int futex_hash_get_slots(void)
struct futex_private_hash *fph;
guard(rcu)();
- fph = rcu_dereference(current->mm->futex_phash);
+ fph = rcu_dereference(current->mm->futex.phash);
if (fph && fph->hash_mask)
return fph->hash_mask + 1;
return 0;
next prev parent reply other threads:[~2026-03-16 17:13 UTC|newest]
Thread overview: 57+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-03-16 17:12 [patch 0/8] futex: Address the robust futex unlock race for real Thomas Gleixner
2026-03-16 17:12 ` [patch 1/8] futex: Move futex task related data into a struct Thomas Gleixner
2026-03-16 17:55 ` Mathieu Desnoyers
2026-03-17 2:24 ` André Almeida
2026-03-17 9:52 ` Thomas Gleixner
2026-03-16 17:13 ` Thomas Gleixner [this message]
2026-03-16 18:00 ` [patch 2/8] futex: Move futex related mm_struct " Mathieu Desnoyers
2026-03-16 17:13 ` [patch 3/8] futex: Provide UABI defines for robust list entry modifiers Thomas Gleixner
2026-03-16 18:02 ` Mathieu Desnoyers
2026-03-17 2:38 ` André Almeida
2026-03-17 9:53 ` Thomas Gleixner
2026-03-16 17:13 ` [patch 4/8] futex: Add support for unlocking robust futexes Thomas Gleixner
2026-03-16 18:24 ` Mathieu Desnoyers
2026-03-17 16:17 ` André Almeida
2026-03-17 20:46 ` Peter Zijlstra
2026-03-17 22:40 ` Thomas Gleixner
2026-03-18 8:02 ` Peter Zijlstra
2026-03-18 8:06 ` Florian Weimer
2026-03-18 14:47 ` Peter Zijlstra
2026-03-18 16:03 ` Thomas Gleixner
2026-03-16 17:13 ` [patch 5/8] futex: Add robust futex unlock IP range Thomas Gleixner
2026-03-16 18:36 ` Mathieu Desnoyers
2026-03-17 19:19 ` André Almeida
2026-03-16 17:13 ` [patch 6/8] futex: Provide infrastructure to plug the non contended robust futex unlock race Thomas Gleixner
2026-03-16 18:35 ` Mathieu Desnoyers
2026-03-16 20:29 ` Thomas Gleixner
2026-03-16 20:52 ` Mathieu Desnoyers
2026-03-16 17:13 ` [patch 7/8] x86/vdso: Prepare for robust futex unlock support Thomas Gleixner
2026-03-16 17:13 ` [patch 8/8] x86/vdso: Implement __vdso_futex_robust_try_unlock() Thomas Gleixner
2026-03-16 19:19 ` Mathieu Desnoyers
2026-03-16 21:02 ` Thomas Gleixner
2026-03-16 22:35 ` Mathieu Desnoyers
2026-03-16 21:14 ` Thomas Gleixner
2026-03-16 21:29 ` Thomas Gleixner
2026-03-17 7:25 ` Thomas Weißschuh
2026-03-17 9:51 ` Thomas Gleixner
2026-03-17 11:17 ` Thomas Weißschuh
2026-03-18 16:17 ` Thomas Gleixner
2026-03-19 7:41 ` Thomas Weißschuh
2026-03-19 8:53 ` Florian Weimer
2026-03-19 9:04 ` Thomas Weißschuh
2026-03-19 9:08 ` Peter Zijlstra
2026-03-19 23:31 ` Thomas Gleixner
2026-03-19 10:36 ` Sebastian Andrzej Siewior
2026-03-19 10:49 ` Thomas Weißschuh
2026-03-19 10:55 ` Sebastian Andrzej Siewior
2026-03-17 8:28 ` Florian Weimer
2026-03-17 9:36 ` Thomas Gleixner
2026-03-17 10:37 ` Florian Weimer
2026-03-17 22:32 ` Thomas Gleixner
2026-03-18 22:08 ` Thomas Gleixner
2026-03-18 22:10 ` Peter Zijlstra
2026-03-19 2:05 ` André Almeida
2026-03-19 7:10 ` Thomas Gleixner
2026-03-17 15:33 ` Uros Bizjak
2026-03-18 8:21 ` Thomas Gleixner
2026-03-18 8:32 ` Uros Bizjak
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20260316164951.073076616@kernel.org \
--to=tglx@kernel.org \
--cc=Liam.Howlett@oracle.com \
--cc=andrealmeid@igalia.com \
--cc=arnd@arndb.de \
--cc=bigeasy@linutronix.de \
--cc=carlos@redhat.com \
--cc=dalias@aerifal.cx \
--cc=dave@stgolabs.net \
--cc=dvhart@infradead.org \
--cc=fweimer@redhat.com \
--cc=linux-kernel@vger.kernel.org \
--cc=mathieu.desnoyers@efficios.com \
--cc=mingo@kernel.org \
--cc=peterz@infradead.org \
--cc=triegel@redhat.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox