From mboxrd@z Thu Jan 1 00:00:00 1970 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id D99211F8AC5 for ; Mon, 16 Mar 2026 17:13:11 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal:i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1773681191; cv=none; b=Bwlyw0u86sh0zZ+lfxlEEGCoEG2SdG1VqOCNhPIOa8vQ+l/apY/r2IjodIxzl69RKS3OK36RRX8/zh+3GeiYQXFnRnVi5G4fiIyQdIZyPZ7ttVR9q6vEs9CkJh82WdLxffli67Ov2zQ6PMLIbkl7iLThTGr9/xohu9ZxHwYXJY0= ARC-Message-Signature:i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1773681191; c=relaxed/simple; bh=0Bq4q6JEc8CXmF5E+eLm61dqJMwU3RJtb7rzPGnG4AY=; h=Date:Message-ID:From:To:Cc:Subject:References:MIME-Version: Content-Type; b=nJEaRuptZZzqd9jTuP8ht/rYh6elMgupLLXrrpVuzHcC3lMhwISBD1K0euX+u1eJowCvnNmV09BQEMNMYPQPB/qOiYGFMGqvZQR8DBMpRZL8PF1xbiGnRDdn5E2rH/+JMlkVfRJfvr1oUhvHqbNR52oKw0AWbiPB8jDE57zAqAI= ARC-Authentication-Results:i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=blHaHkG7; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="blHaHkG7" Received: by smtp.kernel.org (Postfix) with ESMTPSA id B01F1C19421; Mon, 16 Mar 2026 17:13:10 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1773681191; bh=0Bq4q6JEc8CXmF5E+eLm61dqJMwU3RJtb7rzPGnG4AY=; h=Date:From:To:Cc:Subject:References:From; b=blHaHkG79Xw0SavuhqHK6199e8NOcBKW5Y75D+1GL7UZHDIWFZDs/hUeWwFh88twX kLcu/Qc595ipqs+hrOKLjeOGv6WV8wsVDHek+gxwZJx37mr0p0KE6Rqm9nPGrvri5Y TM73Wje252381rRx135UIBRG7DHkHCd+AyndN2lc5lLf+aKaBuT1w2h9z2Pdpvpn9P TdGprrm2xxNt+z1bMrNAFDSS5CcuThgjWigMyPBPhb9dKW+esIGqaU0xU5t5s//j4L Ra6kHxnKxV9xSMBvEd8hGIAf/0FrCRcOtLA8LInxOePDNEmnCyMEe/3nZIz5+2xf2t ntT+mR8qyXdmQ== Date: Mon, 16 Mar 2026 18:13:07 +0100 Message-ID: <20260316164951.141212347@kernel.org> User-Agent: quilt/0.68 From: Thomas Gleixner To: LKML Cc: Mathieu Desnoyers , =?UTF-8?q?Andr=C3=A9=20Almeida?= , Sebastian Andrzej Siewior , Carlos O'Donell , Peter Zijlstra , Florian Weimer , Rich Felker , Torvald Riegel , Darren Hart , Ingo Molnar , Davidlohr Bueso , Arnd Bergmann , "Liam R . Howlett" Subject: [patch 3/8] futex: Provide UABI defines for robust list entry modifiers References: <20260316162316.356674433@kernel.org> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 The marker for PI futexes in the robust list is a hardcoded 0x1 which lacks any sensible form of documentation. Provide proper defines for the bit and the mask and fix up the usage sites. Signed-off-by: Thomas Gleixner --- include/uapi/linux/futex.h | 4 +++ kernel/futex/core.c | 53 +++++++++++++++++++++------------------------ 2 files changed, 29 insertions(+), 28 deletions(-) --- a/include/uapi/linux/futex.h +++ b/include/uapi/linux/futex.h @@ -177,6 +177,10 @@ struct robust_list_head { */ #define ROBUST_LIST_LIMIT 2048 +/* Modifiers for robust_list_head::list_op_pending */ +#define FUTEX_ROBUST_MOD_PI (0x1UL) +#define FUTEX_ROBUST_MOD_MASK (FUTEX_ROBUST_MOD_PI) + /* * bitset with all bits set for the FUTEX_xxx_BITSET OPs to request a * match of any bit. --- a/kernel/futex/core.c +++ b/kernel/futex/core.c @@ -1009,8 +1009,9 @@ void futex_unqueue_pi(struct futex_q *q) * dying task, and do notification if so: */ static int handle_futex_death(u32 __user *uaddr, struct task_struct *curr, - bool pi, bool pending_op) + unsigned int mod, bool pending_op) { + bool pi = !!(mod & FUTEX_ROBUST_MOD_PI); u32 uval, nval, mval; pid_t owner; int err; @@ -1128,21 +1129,21 @@ static int handle_futex_death(u32 __user */ static inline int fetch_robust_entry(struct robust_list __user **entry, struct robust_list __user * __user *head, - unsigned int *pi) + unsigned int *mod) { unsigned long uentry; if (get_user(uentry, (unsigned long __user *)head)) return -EFAULT; - *entry = (void __user *)(uentry & ~1UL); - *pi = uentry & 1; + *entry = (void __user *)(uentry & ~FUTEX_ROBUST_MOD_MASK); + *mod = uentry & FUTEX_ROBUST_MOD_MASK; return 0; } /* - * Walk curr->robust_list (very carefully, it's a userspace list!) + * Walk curr->futex.robust_list (very carefully, it's a userspace list!) * and mark any locks found there dead, and notify any waiters. * * We silently return on any sign of list-walking problem. @@ -1150,9 +1151,8 @@ static inline int fetch_robust_entry(str static void exit_robust_list(struct task_struct *curr) { struct robust_list_head __user *head = curr->futex.robust_list; + unsigned int limit = ROBUST_LIST_LIMIT, cur_mod, next_mod, pend_mod; struct robust_list __user *entry, *next_entry, *pending; - unsigned int limit = ROBUST_LIST_LIMIT, pi, pip; - unsigned int next_pi; unsigned long futex_offset; int rc; @@ -1160,7 +1160,7 @@ static void exit_robust_list(struct task * Fetch the list head (which was registered earlier, via * sys_set_robust_list()): */ - if (fetch_robust_entry(&entry, &head->list.next, &pi)) + if (fetch_robust_entry(&entry, &head->list.next, &cur_mod)) return; /* * Fetch the relative futex offset: @@ -1171,7 +1171,7 @@ static void exit_robust_list(struct task * Fetch any possibly pending lock-add first, and handle it * if it exists: */ - if (fetch_robust_entry(&pending, &head->list_op_pending, &pip)) + if (fetch_robust_entry(&pending, &head->list_op_pending, &pend_mod)) return; next_entry = NULL; /* avoid warning with gcc */ @@ -1180,20 +1180,20 @@ static void exit_robust_list(struct task * Fetch the next entry in the list before calling * handle_futex_death: */ - rc = fetch_robust_entry(&next_entry, &entry->next, &next_pi); + rc = fetch_robust_entry(&next_entry, &entry->next, &next_mod); /* * A pending lock might already be on the list, so * don't process it twice: */ if (entry != pending) { if (handle_futex_death((void __user *)entry + futex_offset, - curr, pi, HANDLE_DEATH_LIST)) + curr, cur_mod, HANDLE_DEATH_LIST)) return; } if (rc) return; entry = next_entry; - pi = next_pi; + cur_mod = next_mod; /* * Avoid excessively long or circular lists: */ @@ -1205,7 +1205,7 @@ static void exit_robust_list(struct task if (pending) { handle_futex_death((void __user *)pending + futex_offset, - curr, pip, HANDLE_DEATH_PENDING); + curr, pend_mod, HANDLE_DEATH_PENDING); } } @@ -1224,29 +1224,28 @@ static void __user *futex_uaddr(struct r */ static inline int compat_fetch_robust_entry(compat_uptr_t *uentry, struct robust_list __user **entry, - compat_uptr_t __user *head, unsigned int *pi) + compat_uptr_t __user *head, unsigned int *pflags) { if (get_user(*uentry, head)) return -EFAULT; - *entry = compat_ptr((*uentry) & ~1); - *pi = (unsigned int)(*uentry) & 1; + *entry = compat_ptr((*uentry) & ~FUTEX_ROBUST_MOD_MASK); + *pflags = (unsigned int)(*uentry) & FUTEX_ROBUST_MOD_MASK; return 0; } /* - * Walk curr->robust_list (very carefully, it's a userspace list!) + * Walk curr->futex.robust_list (very carefully, it's a userspace list!) * and mark any locks found there dead, and notify any waiters. * * We silently return on any sign of list-walking problem. */ static void compat_exit_robust_list(struct task_struct *curr) { - struct compat_robust_list_head __user *head = curr->futex.compat_robust_list; + struct compat_robust_list_head __user *head = current->futex.compat_robust_list; + unsigned int limit = ROBUST_LIST_LIMIT, cur_mod, next_mod, pend_mod; struct robust_list __user *entry, *next_entry, *pending; - unsigned int limit = ROBUST_LIST_LIMIT, pi, pip; - unsigned int next_pi; compat_uptr_t uentry, next_uentry, upending; compat_long_t futex_offset; int rc; @@ -1255,7 +1254,7 @@ static void compat_exit_robust_list(stru * Fetch the list head (which was registered earlier, via * sys_set_robust_list()): */ - if (compat_fetch_robust_entry(&uentry, &entry, &head->list.next, &pi)) + if (compat_fetch_robust_entry(&uentry, &entry, &head->list.next, &cur_mod)) return; /* * Fetch the relative futex offset: @@ -1266,8 +1265,7 @@ static void compat_exit_robust_list(stru * Fetch any possibly pending lock-add first, and handle it * if it exists: */ - if (compat_fetch_robust_entry(&upending, &pending, - &head->list_op_pending, &pip)) + if (compat_fetch_robust_entry(&upending, &pending, &head->list_op_pending, &pend_mod)) return; next_entry = NULL; /* avoid warning with gcc */ @@ -1277,7 +1275,7 @@ static void compat_exit_robust_list(stru * handle_futex_death: */ rc = compat_fetch_robust_entry(&next_uentry, &next_entry, - (compat_uptr_t __user *)&entry->next, &next_pi); + (compat_uptr_t __user *)&entry->next, &next_mod); /* * A pending lock might already be on the list, so * dont process it twice: @@ -1285,15 +1283,14 @@ static void compat_exit_robust_list(stru if (entry != pending) { void __user *uaddr = futex_uaddr(entry, futex_offset); - if (handle_futex_death(uaddr, curr, pi, - HANDLE_DEATH_LIST)) + if (handle_futex_death(uaddr, curr, cur_mod, HANDLE_DEATH_LIST)) return; } if (rc) return; uentry = next_uentry; entry = next_entry; - pi = next_pi; + cur_mod = next_mod; /* * Avoid excessively long or circular lists: */ @@ -1305,7 +1302,7 @@ static void compat_exit_robust_list(stru if (pending) { void __user *uaddr = futex_uaddr(pending, futex_offset); - handle_futex_death(uaddr, curr, pip, HANDLE_DEATH_PENDING); + handle_futex_death(uaddr, curr, pend_mod, HANDLE_DEATH_PENDING); } } #endif