From: Thomas Gleixner <tglx@kernel.org>
To: LKML <linux-kernel@vger.kernel.org>
Cc: "Mathieu Desnoyers" <mathieu.desnoyers@efficios.com>,
"Andrè Almeida" <andrealmeid@igalia.com>,
"Sebastian Andrzej Siewior" <bigeasy@linutronix.de>,
"Carlos O'Donell" <carlos@redhat.com>,
"Peter Zijlstra" <peterz@infradead.org>,
"Florian Weimer" <fweimer@redhat.com>,
"Rich Felker" <dalias@aerifal.cx>,
"Torvald Riegel" <triegel@redhat.com>,
"Darren Hart" <dvhart@infradead.org>,
"Ingo Molnar" <mingo@kernel.org>,
"Davidlohr Bueso" <dave@stgolabs.net>,
"Arnd Bergmann" <arnd@arndb.de>,
"Liam R . Howlett" <Liam.Howlett@oracle.com>,
"Uros Bizjak" <ubizjak@gmail.com>,
"Thomas Weißschuh" <linux@weissschuh.net>
Subject: [patch V4 14/14] selftests: futex: Add tests for robust release operations
Date: Thu, 02 Apr 2026 17:22:11 +0200 [thread overview]
Message-ID: <20260402151940.554181645@kernel.org> (raw)
In-Reply-To: 20260402151131.876492985@kernel.org
From: André Almeida <andrealmeid@igalia.com>
Add tests for __vdso_futex_robust_listXX_try_unlock() and for the futex()
op FUTEX_ROBUST_UNLOCK.
Test the contended and uncontended cases for the vDSO functions and all
ops combinations for FUTEX_ROBUST_UNLOCK.
[ tglx: Replace the VDSO function lookup ]
Signed-off-by: André Almeida <andrealmeid@igalia.com>
Signed-off-by: Thomas Gleixner <tglx@kernel.org>
Link: https://patch.msgid.link/20260329-tonyk-vdso_test-v2-2-b7db810e44a1@igalia.com
---
V3:
Replaced the VDSO lookup
Change from v2:
- Add test variants for FUTEX_ROBUST_LIST32
- Skip 64 bit tests for 32 bit builds
---
tools/testing/selftests/futex/functional/robust_list.c | 239 +++++++++++++++++
tools/testing/selftests/futex/include/futextest.h | 6
2 files changed, 245 insertions(+)
--- a/tools/testing/selftests/futex/functional/robust_list.c
+++ b/tools/testing/selftests/futex/functional/robust_list.c
@@ -27,12 +27,15 @@
#include "futextest.h"
#include "../../kselftest_harness.h"
+#include <dlfcn.h>
#include <errno.h>
#include <pthread.h>
#include <signal.h>
+#include <stdint.h>
#include <stdatomic.h>
#include <stdbool.h>
#include <stddef.h>
+#include <sys/auxv.h>
#include <sys/mman.h>
#include <sys/wait.h>
@@ -42,6 +45,10 @@
#define SLEEP_US 100
+#if UINTPTR_MAX == 0xffffffffffffffff
+# define BUILD_64
+#endif
+
static pthread_barrier_t barrier, barrier2;
static int set_robust_list(struct robust_list_head *head, size_t len)
@@ -54,6 +61,12 @@ static int get_robust_list(int pid, stru
return syscall(SYS_get_robust_list, pid, head, len_ptr);
}
+static int sys_futex_robust_unlock(_Atomic(uint32_t) *uaddr, unsigned int op, int val,
+ void *list_op_pending, unsigned int val3)
+{
+ return syscall(SYS_futex, uaddr, op, val, NULL, list_op_pending, val3, 0);
+}
+
/*
* Basic lock struct, contains just the futex word and the robust list element
* Real implementations have also a *prev to easily walk in the list
@@ -549,4 +562,230 @@ TEST(test_circular_list)
ksft_test_result_pass("%s\n", __func__);
}
+/*
+ * Below are tests for the fix of robust release race condition. Please read the following
+ * thread to learn more about the issue in the first place and why the following functions fix it:
+ * https://lore.kernel.org/lkml/20260316162316.356674433@kernel.org/
+ */
+
+/*
+ * Auxiliary code for binding the vDSO functions
+ */
+static void *get_vdso_func_addr(const char *function)
+{
+ const char *vdso_names[] = {
+ "linux-vdso.so.1", "linux-gate.so.1", "linux-vdso32.so.1", "linux-vdso64.so.1",
+ };
+
+ for (int i = 0; i < ARRAY_SIZE(vdso_names); i++) {
+ void *vdso = dlopen(vdso_names[i], RTLD_LAZY | RTLD_LOCAL | RTLD_NOLOAD);
+
+ if (vdso)
+ return dlsym(vdso, function);
+ }
+ return NULL;
+}
+
+/*
+ * These are the real vDSO function signatures:
+ *
+ * __vdso_futex_robust_list64_try_unlock(__u32 *lock, __u32 tid, __u64 *pop)
+ * __vdso_futex_robust_list32_try_unlock(__u32 *lock, __u32 tid, __u32 *pop)
+ *
+ * So for the generic entry point we need to use a void pointer as the last argument
+ */
+FIXTURE(vdso_unlock)
+{
+ uint32_t (*vdso)(_Atomic(uint32_t) *lock, uint32_t tid, void *pop);
+};
+
+FIXTURE_VARIANT(vdso_unlock)
+{
+ bool is_32;
+ char func_name[];
+};
+
+FIXTURE_SETUP(vdso_unlock)
+{
+ self->vdso = get_vdso_func_addr(variant->func_name);
+}
+
+FIXTURE_TEARDOWN(vdso_unlock) {}
+
+FIXTURE_VARIANT_ADD(vdso_unlock, 32)
+{
+ .func_name = "__vdso_futex_robust_list32_try_unlock",
+ .is_32 = true,
+};
+
+FIXTURE_VARIANT_ADD(vdso_unlock, 64)
+{
+ .func_name = "__vdso_futex_robust_list64_try_unlock",
+ .is_32 = false,
+};
+
+/*
+ * Test the vDSO robust_listXX_try_unlock() for the uncontended case. The virtual syscall should
+ * return the thread ID of the lock owner, the lock word must be 0 and the list_op_pending should
+ * be NULL.
+ */
+TEST_F(vdso_unlock, test_robust_try_unlock_uncontended)
+{
+ struct lock_struct lock = { .futex = 0 };
+ _Atomic(unsigned int) *futex = &lock.futex;
+ struct robust_list_head head;
+ uintptr_t exp = (uintptr_t) NULL;
+ pid_t tid = gettid();
+ int ret;
+
+ if (!self->vdso) {
+ ksft_test_result_skip("%s not found\n", variant->func_name);
+ return;
+ }
+
+ *futex = tid;
+
+ ret = set_list(&head);
+ if (ret)
+ ksft_test_result_fail("set_robust_list error\n");
+
+ head.list_op_pending = &lock.list;
+
+ ret = self->vdso(futex, tid, &head.list_op_pending);
+
+ ASSERT_EQ(ret, tid);
+ ASSERT_EQ(*futex, 0);
+
+ /* Check only the lower 32 bits for the 32-bit entry point */
+ if (variant->is_32) {
+ exp = (uintptr_t)(unsigned long)&lock.list;
+ exp &= ~0xFFFFFFFFULL;
+ }
+
+ ASSERT_EQ((uintptr_t)(unsigned long)head.list_op_pending, exp);
+}
+
+/*
+ * If the lock is contended, the operation fails. The return value is the value found at the
+ * futex word (tid | FUTEX_WAITERS), the futex word is not modified and the list_op_pending is_32
+ * not cleared.
+ */
+TEST_F(vdso_unlock, test_robust_try_unlock_contended)
+{
+ struct lock_struct lock = { .futex = 0 };
+ _Atomic(unsigned int) *futex = &lock.futex;
+ struct robust_list_head head;
+ pid_t tid = gettid();
+ int ret;
+
+ if (!self->vdso) {
+ ksft_test_result_skip("%s not found\n", variant->func_name);
+ return;
+ }
+
+ *futex = tid | FUTEX_WAITERS;
+
+ ret = set_list(&head);
+ if (ret)
+ ksft_test_result_fail("set_robust_list error\n");
+
+ head.list_op_pending = &lock.list;
+
+ ret = self->vdso(futex, tid, &head.list_op_pending);
+
+ ASSERT_EQ(ret, tid | FUTEX_WAITERS);
+ ASSERT_EQ(*futex, tid | FUTEX_WAITERS);
+ ASSERT_EQ(head.list_op_pending, &lock.list);
+}
+
+FIXTURE(futex_op) {};
+
+FIXTURE_VARIANT(futex_op)
+{
+ unsigned int op;
+ unsigned int val3;
+};
+
+FIXTURE_SETUP(futex_op) {}
+
+FIXTURE_TEARDOWN(futex_op) {}
+
+FIXTURE_VARIANT_ADD(futex_op, wake)
+{
+ .op = FUTEX_WAKE,
+ .val3 = 0,
+};
+
+FIXTURE_VARIANT_ADD(futex_op, wake_bitset)
+{
+ .op = FUTEX_WAKE_BITSET,
+ .val3 = FUTEX_BITSET_MATCH_ANY,
+};
+
+FIXTURE_VARIANT_ADD(futex_op, unlock_pi)
+{
+ .op = FUTEX_UNLOCK_PI,
+ .val3 = 0,
+};
+
+FIXTURE_VARIANT_ADD(futex_op, wake32)
+{
+ .op = FUTEX_WAKE | FUTEX_ROBUST_LIST32,
+ .val3 = 0,
+};
+
+FIXTURE_VARIANT_ADD(futex_op, wake_bitset32)
+{
+ .op = FUTEX_WAKE_BITSET | FUTEX_ROBUST_LIST32,
+ .val3 = FUTEX_BITSET_MATCH_ANY,
+};
+
+FIXTURE_VARIANT_ADD(futex_op, unlock_pi32)
+{
+ .op = FUTEX_UNLOCK_PI | FUTEX_ROBUST_LIST32,
+ .val3 = 0,
+};
+
+/*
+ * The syscall should return the number of tasks waken (for this test, 0), clear the futex word and
+ * clear list_op_pending
+ */
+TEST_F(futex_op, test_futex_robust_unlock)
+{
+ struct lock_struct lock = { .futex = 0 };
+ _Atomic(unsigned int) *futex = &lock.futex;
+ uintptr_t exp = (uintptr_t) NULL;
+ struct robust_list_head head;
+ pid_t tid = gettid();
+ int ret;
+
+#ifndef BUILD_64
+ if (!(variant->op & FUTEX_ROBUST_LIST32)) {
+ ksft_test_result_skip("Not supported for 32 bit build\n");
+ return;
+ }
+#endif
+
+ *futex = tid | FUTEX_WAITERS;
+
+ ret = set_list(&head);
+ if (ret)
+ ksft_test_result_fail("set_robust_list error\n");
+
+ head.list_op_pending = &lock.list;
+
+ ret = sys_futex_robust_unlock(futex, FUTEX_ROBUST_UNLOCK | variant->op, tid,
+ &head.list_op_pending, variant->val3);
+
+ ASSERT_EQ(ret, 0);
+ ASSERT_EQ(*futex, 0);
+
+ if (variant->op & FUTEX_ROBUST_LIST32) {
+ exp = (uint64_t)(unsigned long)&lock.list;
+ exp &= ~0xFFFFFFFFULL;
+ }
+
+ ASSERT_EQ((uintptr_t)(unsigned long)head.list_op_pending, exp);
+}
+
TEST_HARNESS_MAIN
--- a/tools/testing/selftests/futex/include/futextest.h
+++ b/tools/testing/selftests/futex/include/futextest.h
@@ -38,6 +38,12 @@ typedef volatile u_int32_t futex_t;
#ifndef FUTEX_CMP_REQUEUE_PI
#define FUTEX_CMP_REQUEUE_PI 12
#endif
+#ifndef FUTEX_ROBUST_UNLOCK
+#define FUTEX_ROBUST_UNLOCK 512
+#endif
+#ifndef FUTEX_ROBUST_LIST32
+#define FUTEX_ROBUST_LIST32 1024
+#endif
#ifndef FUTEX_WAIT_REQUEUE_PI_PRIVATE
#define FUTEX_WAIT_REQUEUE_PI_PRIVATE (FUTEX_WAIT_REQUEUE_PI | \
FUTEX_PRIVATE_FLAG)
next prev parent reply other threads:[~2026-04-02 15:22 UTC|newest]
Thread overview: 17+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-04-02 15:21 [patch V4 00/14] futex: Address the robust futex unlock race for real Thomas Gleixner
2026-04-02 15:21 ` [patch V4 01/14] futex: Move futex task related data into a struct Thomas Gleixner
2026-04-02 15:21 ` [patch V4 02/14] futex: Make futex_mm_init() void Thomas Gleixner
2026-04-02 15:21 ` [patch V4 03/14] futex: Move futex related mm_struct data into a struct Thomas Gleixner
2026-04-02 15:21 ` [patch V4 04/14] futex: Provide UABI defines for robust list entry modifiers Thomas Gleixner
2026-04-02 15:21 ` [patch V4 05/14] uaccess: Provide unsafe_atomic_store_release_user() Thomas Gleixner
2026-04-02 15:21 ` [patch V4 06/14] x86: Select ARCH_MEMORY_ORDER_TSO Thomas Gleixner
2026-04-02 15:21 ` [patch V4 07/14] futex: Cleanup UAPI defines Thomas Gleixner
2026-04-02 15:21 ` [patch V4 08/14] futex: Add support for unlocking robust futexes Thomas Gleixner
2026-04-02 15:21 ` [patch V4 09/14] futex: Add robust futex unlock IP range Thomas Gleixner
2026-04-02 15:21 ` [patch V4 10/14] futex: Provide infrastructure to plug the non contended robust futex unlock race Thomas Gleixner
2026-04-02 15:21 ` [patch V4 11/14] x86/vdso: Prepare for robust futex unlock support Thomas Gleixner
2026-04-02 15:22 ` [patch V4 12/14] x86/vdso: Implement __vdso_futex_robust_try_unlock() Thomas Gleixner
2026-04-02 15:22 ` [patch V4 13/14] Documentation: futex: Add a note about robust list race condition Thomas Gleixner
2026-04-02 15:22 ` Thomas Gleixner [this message]
2026-04-04 9:39 ` [PATCH 15/14] selftests: futex: Add tests for robust unlock within the critical section Sebastian Andrzej Siewior
2026-04-04 20:13 ` Thomas Gleixner
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20260402151940.554181645@kernel.org \
--to=tglx@kernel.org \
--cc=Liam.Howlett@oracle.com \
--cc=andrealmeid@igalia.com \
--cc=arnd@arndb.de \
--cc=bigeasy@linutronix.de \
--cc=carlos@redhat.com \
--cc=dalias@aerifal.cx \
--cc=dave@stgolabs.net \
--cc=dvhart@infradead.org \
--cc=fweimer@redhat.com \
--cc=linux-kernel@vger.kernel.org \
--cc=linux@weissschuh.net \
--cc=mathieu.desnoyers@efficios.com \
--cc=mingo@kernel.org \
--cc=peterz@infradead.org \
--cc=triegel@redhat.com \
--cc=ubizjak@gmail.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox