From mboxrd@z Thu Jan 1 00:00:00 1970 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id D5CB938B147; Tue, 28 Apr 2026 23:34:18 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal:i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1777419258; cv=none; b=jZRc8QeDsdPuYY8gyd+5L7cu/bHbiWGGBoxoEcJEFy254kDq/xqfnVEoSKGjBdpO/MdnOVjNK1XbMaQGUuev9rdCNH/az6ZWJqwbDFW0ZaPGY3EeoEAIfz56Opt51br8Y1Ruz+ZxQLqza2QQYxpKh5uEJexldiiW0ImQF5iyIqg= ARC-Message-Signature:i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1777419258; c=relaxed/simple; bh=VOV//gWQkHADVLYCgIfwzZhW3FXmMvBwfAlH8Np/HUA=; h=Date:Message-ID:From:To:Cc:Subject:References:MIME-Version: Content-Type; b=YM+WrGX1G5KFoXYDUqKH2ThPCb6ZHu79SkjjKQiw4qzbV7zzeL1k7grKv3/ZFJok8mcHbFJ/dnUlN6liv3jL6DGe8UGV0MLXCcfbk59DNqiPQz+k8HWRV9uMhrjGNuSQiSE+J/CqMjLbMUU4ltjipRXJvwRpjmFoZNYQ3P1N+e4= ARC-Authentication-Results:i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=PXWvOoar; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="PXWvOoar" Received: by smtp.kernel.org (Postfix) with ESMTPSA id E2557C2BCAF; Tue, 28 Apr 2026 23:34:17 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1777419258; bh=VOV//gWQkHADVLYCgIfwzZhW3FXmMvBwfAlH8Np/HUA=; h=Date:From:To:Cc:Subject:References:From; b=PXWvOoarTT7mMHSp04TtzWAGxpts7zSy5mpyu647WXK1xtMmlLJoCk+U84PxoJaTo CidrQJlQMKs74UQk0+UNbPEXaYuNqML5byOgBZyFHFkIyLYpNpTt7qCkeL5TDwmGuc ++W15qofgto6pyYJKnO0e9FWyvWemeam3KNnJkWyJznZOXpXDTioldomUpwthB3m0f zC7D5O1rLA2X1hbdsHQnAO7dWdakFBaZpadBye6na75zs/zG5G8cWEOsRw2q2Ano3Z wGAzgAi3d+ulCTpI7ajwChVupvy8zhfo1RpZaQlb7wfsK358K/cUga0d1KkwnAbu8b Oi99PTEYp/77Q== Date: Wed, 29 Apr 2026 01:34:15 +0200 Message-ID: <20260428224427.845230956@kernel.org> User-Agent: quilt/0.68 From: Thomas Gleixner To: LKML Cc: Mathias Stearn , Dmitry Vyukov , Peter Zijlstra , linux-man@vger.kernel.org, Mark Rutland , Mathieu Desnoyers , Chris Kennelly , regressions@lists.linux.dev, Ingo Molnar , Blake Oler , Florian Weimer , Rich Felker , Matthew Wilcox , Greg Kroah-Hartman , Linus Torvalds Subject: [patch 08/10] rseq: Implement read only ABI enforcement for optimized RSEQ V2 mode References: <20260428221058.149538293@kernel.org> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 The optimized RSEQ V2 mode requires that user space adheres to the ABI specification and does not modify the read-only fields cpu_id_start, cpu_id, node_id and mm_cid behind the kernel's back. While the kernel does not rely on these fields, the adherence to this is a fundamental prerequisite to allow multiple entities, e.g. libraries, in an application to utilize the full potential of RSEQ without stepping on each other toes. Validate this adherence on every update of these fields. If the kernel detects that user space modified the fields, the application is force terminated. Fixes: d6200245c75e ("rseq: Allow registering RSEQ with slice extension") Signed-off-by: Thomas Gleixner Cc: stable@vger.kernel.org --- include/linux/rseq_entry.h | 71 +++++++++++++++++---------------------------- 1 file changed, 28 insertions(+), 43 deletions(-) --- a/include/linux/rseq_entry.h +++ b/include/linux/rseq_entry.h @@ -248,7 +248,6 @@ static __always_inline bool rseq_grant_s #endif /* !CONFIG_RSEQ_SLICE_EXTENSION */ bool rseq_debug_update_user_cs(struct task_struct *t, struct pt_regs *regs, unsigned long csaddr); -bool rseq_debug_validate_ids(struct task_struct *t); static __always_inline void rseq_note_user_irq_entry(void) { @@ -368,43 +367,6 @@ bool rseq_debug_update_user_cs(struct ta return false; } -/* - * On debug kernels validate that user space did not mess with it if the - * debug branch is enabled. - */ -bool rseq_debug_validate_ids(struct task_struct *t) -{ - struct rseq __user *rseq = t->rseq.usrptr; - u32 cpu_id, uval, node_id; - - /* - * On the first exit after registering the rseq region CPU ID is - * RSEQ_CPU_ID_UNINITIALIZED and node_id in user space is 0! - */ - node_id = t->rseq.ids.cpu_id != RSEQ_CPU_ID_UNINITIALIZED ? - cpu_to_node(t->rseq.ids.cpu_id) : 0; - - scoped_user_read_access(rseq, efault) { - unsafe_get_user(cpu_id, &rseq->cpu_id_start, efault); - if (cpu_id != t->rseq.ids.cpu_id) - goto die; - unsafe_get_user(uval, &rseq->cpu_id, efault); - if (uval != cpu_id) - goto die; - unsafe_get_user(uval, &rseq->node_id, efault); - if (uval != node_id) - goto die; - unsafe_get_user(uval, &rseq->mm_cid, efault); - if (uval != t->rseq.ids.mm_cid) - goto die; - } - return true; -die: - t->rseq.event.fatal = true; -efault: - return false; -} - #endif /* RSEQ_BUILD_SLOW_PATH */ /* @@ -519,12 +481,32 @@ bool rseq_set_ids_get_csaddr(struct task { struct rseq __user *rseq = t->rseq.usrptr; - if (static_branch_unlikely(&rseq_debug_enabled)) { - if (!rseq_debug_validate_ids(t)) - return false; - } - scoped_user_rw_access(rseq, efault) { + /* Validate the R/O fields for debug and optimized mode */ + if (static_branch_unlikely(&rseq_debug_enabled) || rseq_v2(t)) { + u32 cpu_id, uval, node_id; + + /* + * On the first exit after registering the rseq region CPU ID is + * RSEQ_CPU_ID_UNINITIALIZED and node_id in user space is 0! + */ + node_id = t->rseq.ids.cpu_id != RSEQ_CPU_ID_UNINITIALIZED ? + cpu_to_node(t->rseq.ids.cpu_id) : 0; + + unsafe_get_user(cpu_id, &rseq->cpu_id_start, efault); + if (cpu_id != t->rseq.ids.cpu_id) + goto die; + unsafe_get_user(uval, &rseq->cpu_id, efault); + if (uval != cpu_id) + goto die; + unsafe_get_user(uval, &rseq->node_id, efault); + if (uval != node_id) + goto die; + unsafe_get_user(uval, &rseq->mm_cid, efault); + if (uval != t->rseq.ids.mm_cid) + goto die; + } + unsafe_put_user(ids->cpu_id, &rseq->cpu_id_start, efault); unsafe_put_user(ids->cpu_id, &rseq->cpu_id, efault); unsafe_put_user(node_id, &rseq->node_id, efault); @@ -543,6 +525,9 @@ bool rseq_set_ids_get_csaddr(struct task rseq_stat_inc(rseq_stats.ids); rseq_trace_update(t, ids); return true; + +die: + t->rseq.event.fatal = true; efault: return false; }