From mboxrd@z Thu Jan 1 00:00:00 1970 Received: from mail-106113.protonmail.ch (mail-106113.protonmail.ch [79.135.106.113]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 2DDE3262FE2 for ; Sat, 6 Sep 2025 04:23:34 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=79.135.106.113 ARC-Seal:i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1757132617; cv=none; b=Q/XLTE4UtRAJoEpcl+I0UPWZGvUv3naPrPA1kpGeElANf+EIMckcHL/OiLCs0seaRYzoTwCbe7380W/ynwDCA3M4Iw2NKMJEnzf6s/dXqrB5YfHauYdwpJFajQ80LdhfduOCY+B+fVk/EEawcPwvOK1Z6VIC6OIvkX+AJhpJXvo= ARC-Message-Signature:i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1757132617; c=relaxed/simple; bh=kyz6DDwj0aqxNYfSd6xOGLNF0/iRkjFKKjV5KIF6KQM=; h=Date:From:To:Cc:Subject:Message-ID:References:MIME-Version: Content-Type:Content-Disposition:In-Reply-To; b=OYeVRq6+vkyN/c11TWyDN8v0RGTsoNasiGTrz6hAbYdTcxpmaTEZ9f2ypURBXJM4xDAk8faDhHUMpSBPRqqhmUdAqxb1bl8cP6knbLMYVb1W0kkLMIfRnZjxJN6epVpT8LqBg6FmOO+545cfuG1CArRZ/8qcexrIcfuEtBFAKnc= ARC-Authentication-Results:i=1; smtp.subspace.kernel.org; dmarc=pass (p=quarantine dis=none) header.from=weathered-steel.dev; spf=pass smtp.mailfrom=weathered-steel.dev; dkim=pass (2048-bit key) header.d=weathered-steel.dev header.i=@weathered-steel.dev header.b=SWkzuPQJ; arc=none smtp.client-ip=79.135.106.113 Authentication-Results: smtp.subspace.kernel.org; dmarc=pass (p=quarantine dis=none) header.from=weathered-steel.dev Authentication-Results: smtp.subspace.kernel.org; spf=pass smtp.mailfrom=weathered-steel.dev Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=weathered-steel.dev header.i=@weathered-steel.dev header.b="SWkzuPQJ" DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=weathered-steel.dev; s=protonmail3; t=1757132612; x=1757391812; bh=4VU70ERqzg05NnLwKVEFpWOrZpZbKWfcuTLy8005LFk=; h=Date:From:To:Cc:Subject:Message-ID:References:In-Reply-To:From:To: Cc:Date:Subject:Reply-To:Feedback-ID:Message-ID:BIMI-Selector; b=SWkzuPQJ6ENzMcl0F2TDncx0Kdf2MP7WKmH0FvXxtehPaK8Wzo0Jufmv4/K+fWFlE iCyQ9YoJzKFhydltCLK3rmplKhR/SIr8hnupl81rDO8xysFnfOBHV8fV6xSzYUsqkZ 32H4x54G//DRWWepZfAzko9BH+MEVrihBxPUp6fsMsdAEu7fvT/iOvWzEVJDAFhvE1 /X/qkgcAzinDZp30Ar8Xe5sHO89on8So279lZSRruXDnHpwRbEwTmainW3tLd/4ogk fu3VewQX5Bd34kZqg+2Wj9hzel3p6yZatX/3O016z642HYpK94SHcqMHghGU45Rq6z 81rhkobv3uczg== X-Pm-Submission-Id: 4cJg7k0tblz2ScWl Date: Sat, 6 Sep 2025 04:23:27 +0000 From: Elle Rhumsaa To: Boqun Feng Cc: rust-for-linux@vger.kernel.org, linux-kernel@vger.kernel.org, lkmm@lists.linux.dev, Will Deacon , Peter Zijlstra , Mark Rutland , Ingo Molnar , Thomas Gleixner , "Paul E. McKenney" , stern@rowland.harvard.edu, Miguel Ojeda , alex.gaynor@gmail.com, Gary Guo , =?iso-8859-1?Q?Bj=F6rn?= Roy Baron , Benno Lossin , Alice Ryhl , Trevor Gross , Danilo Krummrich , Andreas Hindborg Subject: Re: [PATCH 05/14] rust: sync: atomic: Add atomic {cmp,}xchg operations Message-ID: References: <20250905044141.77868-1-boqun.feng@gmail.com> <20250905044141.77868-6-boqun.feng@gmail.com> Precedence: bulk X-Mailing-List: rust-for-linux@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Type: text/plain; charset=us-ascii Content-Disposition: inline In-Reply-To: <20250905044141.77868-6-boqun.feng@gmail.com> On Thu, Sep 04, 2025 at 09:41:32PM -0700, Boqun Feng wrote: > xchg() and cmpxchg() are basic operations on atomic. Provide these based > on C APIs. > > Note that cmpxchg() use the similar function signature as > compare_exchange() in Rust std: returning a `Result`, `Ok(old)` means > the operation succeeds and `Err(old)` means the operation fails. > > Reviewed-by: Alice Ryhl > Reviewed-by: Benno Lossin > Signed-off-by: Boqun Feng > Link: https://lore.kernel.org/all/20250719030827.61357-6-boqun.feng@gmail.com/ > --- > rust/kernel/sync/atomic.rs | 168 ++++++++++++++++++++++++++++++++++++- > 1 file changed, 167 insertions(+), 1 deletion(-) > > diff --git a/rust/kernel/sync/atomic.rs b/rust/kernel/sync/atomic.rs > index ea5782b6ee95..4c32d12dc61e 100644 > --- a/rust/kernel/sync/atomic.rs > +++ b/rust/kernel/sync/atomic.rs > @@ -25,7 +25,7 @@ > pub use ordering::{Acquire, Full, Relaxed, Release}; > > use crate::build_error; > -use internal::{AtomicBasicOps, AtomicRepr}; > +use internal::{AtomicBasicOps, AtomicExchangeOps, AtomicRepr}; > use ordering::OrderingType; > > /// A memory location which can be safely modified from multiple execution contexts. > @@ -293,3 +293,169 @@ pub fn store(&self, v: T, _: Ordering) { > } > } > } > + > +impl Atomic > +where > + T::Repr: AtomicExchangeOps, > +{ > + /// Atomic exchange. > + /// > + /// Atomically updates `*self` to `v` and returns the old value of `*self`. > + /// > + /// # Examples > + /// > + /// ``` > + /// use kernel::sync::atomic::{Atomic, Acquire, Relaxed}; > + /// > + /// let x = Atomic::new(42); > + /// > + /// assert_eq!(42, x.xchg(52, Acquire)); > + /// assert_eq!(52, x.load(Relaxed)); > + /// ``` > + #[doc(alias("atomic_xchg", "atomic64_xchg", "swap"))] > + #[inline(always)] > + pub fn xchg(&self, v: T, _: Ordering) -> T { > + let v = into_repr(v); > + > + // INVARIANT: `self.0` is a valid `T` after `atomic_xchg*()` because `v` is transmutable to > + // `T`. > + let ret = { > + match Ordering::TYPE { > + OrderingType::Full => T::Repr::atomic_xchg(&self.0, v), > + OrderingType::Acquire => T::Repr::atomic_xchg_acquire(&self.0, v), > + OrderingType::Release => T::Repr::atomic_xchg_release(&self.0, v), > + OrderingType::Relaxed => T::Repr::atomic_xchg_relaxed(&self.0, v), > + } > + }; > + > + // SAFETY: `ret` comes from reading `*self`, which is a valid `T` per type invariants. > + unsafe { from_repr(ret) } > + } > + > + /// Atomic compare and exchange. > + /// > + /// If `*self` == `old`, atomically updates `*self` to `new`. Otherwise, `*self` is not > + /// modified. > + /// > + /// Compare: The comparison is done via the byte level comparison between `*self` and `old`. > + /// > + /// Ordering: When succeeds, provides the corresponding ordering as the `Ordering` type > + /// parameter indicates, and a failed one doesn't provide any ordering, the load part of a > + /// failed cmpxchg is a [`Relaxed`] load. > + /// > + /// Returns `Ok(value)` if cmpxchg succeeds, and `value` is guaranteed to be equal to `old`, > + /// otherwise returns `Err(value)`, and `value` is the current value of `*self`. > + /// > + /// # Examples > + /// > + /// ``` > + /// use kernel::sync::atomic::{Atomic, Full, Relaxed}; > + /// > + /// let x = Atomic::new(42); > + /// > + /// // Checks whether cmpxchg succeeded. > + /// let success = x.cmpxchg(52, 64, Relaxed).is_ok(); > + /// # assert!(!success); > + /// > + /// // Checks whether cmpxchg failed. > + /// let failure = x.cmpxchg(52, 64, Relaxed).is_err(); > + /// # assert!(failure); > + /// > + /// // Uses the old value if failed, probably re-try cmpxchg. > + /// match x.cmpxchg(52, 64, Relaxed) { > + /// Ok(_) => { }, > + /// Err(old) => { > + /// // do something with `old`. > + /// # assert_eq!(old, 42); > + /// } > + /// } > + /// > + /// // Uses the latest value regardlessly, same as atomic_cmpxchg() in C. > + /// let latest = x.cmpxchg(42, 64, Full).unwrap_or_else(|old| old); > + /// # assert_eq!(42, latest); > + /// assert_eq!(64, x.load(Relaxed)); > + /// ``` > + /// > + /// [`Relaxed`]: ordering::Relaxed > + #[doc(alias( > + "atomic_cmpxchg", > + "atomic64_cmpxchg", > + "atomic_try_cmpxchg", > + "atomic64_try_cmpxchg", > + "compare_exchange" > + ))] > + #[inline(always)] > + pub fn cmpxchg( > + &self, > + mut old: T, > + new: T, > + o: Ordering, > + ) -> Result { > + // Note on code generation: > + // > + // try_cmpxchg() is used to implement cmpxchg(), and if the helper functions are inlined, > + // the compiler is able to figure out that branch is not needed if the users don't care > + // about whether the operation succeeds or not. One exception is on x86, due to commit > + // 44fe84459faf ("locking/atomic: Fix atomic_try_cmpxchg() semantics"), the > + // atomic_try_cmpxchg() on x86 has a branch even if the caller doesn't care about the > + // success of cmpxchg and only wants to use the old value. For example, for code like: > + // > + // let latest = x.cmpxchg(42, 64, Full).unwrap_or_else(|old| old); > + // > + // It will still generate code: > + // > + // movl $0x40, %ecx > + // movl $0x34, %eax > + // lock > + // cmpxchgl %ecx, 0x4(%rsp) > + // jne 1f > + // 2: > + // ... > + // 1: movl %eax, %ecx > + // jmp 2b > + // > + // This might be "fixed" by introducing a try_cmpxchg_exclusive() that knows the "*old" > + // location in the C function is always safe to write. > + if self.try_cmpxchg(&mut old, new, o) { > + Ok(old) > + } else { > + Err(old) > + } > + } > + > + /// Atomic compare and exchange and returns whether the operation succeeds. > + /// > + /// If `*self` == `old`, atomically updates `*self` to `new`. Otherwise, `*self` is not > + /// modified, `*old` is updated to the current value of `*self`. > + /// > + /// "Compare" and "Ordering" part are the same as [`Atomic::cmpxchg()`]. > + /// > + /// Returns `true` means the cmpxchg succeeds otherwise returns `false`. > + #[inline(always)] > + fn try_cmpxchg(&self, old: &mut T, new: T, _: Ordering) -> bool { > + let mut tmp = into_repr(*old); > + let new = into_repr(new); > + > + // INVARIANT: `self.0` is a valid `T` after `atomic_try_cmpxchg*()` because `new` is > + // transmutable to `T`. > + let ret = { > + match Ordering::TYPE { > + OrderingType::Full => T::Repr::atomic_try_cmpxchg(&self.0, &mut tmp, new), > + OrderingType::Acquire => { > + T::Repr::atomic_try_cmpxchg_acquire(&self.0, &mut tmp, new) > + } > + OrderingType::Release => { > + T::Repr::atomic_try_cmpxchg_release(&self.0, &mut tmp, new) > + } > + OrderingType::Relaxed => { > + T::Repr::atomic_try_cmpxchg_relaxed(&self.0, &mut tmp, new) > + } > + } > + }; > + > + // SAFETY: `tmp` comes from reading `*self`, which is a valid `T` per type invariants. > + *old = unsafe { from_repr(tmp) }; > + > + ret > + } > +} > -- > 2.51.0 > > Reviewed-by: Elle Rhumsaa