From mboxrd@z Thu Jan 1 00:00:00 1970 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 09005BA34; Sun, 22 Jun 2025 12:58:50 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal:i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1750597131; cv=none; b=Tejul+trGmkw2jZv+PhjBZWRkF8yHu5GMvtUMB2rLthnwYUiIO+3rh7t22X8zMPmgD2A57RVKcKwIPhdwlaPxA8rtQxgE0X4Xglfx4fdwUX+OGSet4hfu4BsrGjjW0Eh/Jpt5REP8nOtTTrTDyYmzPSshYtiCaVJPtq3olVipkU= ARC-Message-Signature:i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1750597131; c=relaxed/simple; bh=HqNms1bRkXnM2krBDVuunrgP1o3ALeek4T92TsKQ6A4=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=ekL3oJXBNyNf/og3JE27aqsbpPCR0csgI7MteuiI/Pd1puKEITXiMXb2rgNBB/5W4kgh6G3KPhq2AxKYnJsUnOZ8UpiDMTX7tRnT7/G25Wc5PHsPGHrc7BXbmbw99w8Vd0K82ZhNqGMtiPLrtfA6pFbiBFewdGdlazzSM/SunuE= ARC-Authentication-Results:i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=sMOvgrws; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="sMOvgrws" Received: by smtp.kernel.org (Postfix) with ESMTPSA id E58CFC4CEE3; Sun, 22 Jun 2025 12:58:46 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1750597130; bh=HqNms1bRkXnM2krBDVuunrgP1o3ALeek4T92TsKQ6A4=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=sMOvgrwsVphIwHzs98bhZVY32GdsLvKEwvhQxaSQ9OLE8RNpKyLsY/Z1JjLnpcy+G 2rzyzCXV42hdXyn1ZPT5jJgM+Mz4q9TRLSvwcq7tMOPsYcBna4qNV6UNiGbSkr7lR4 +nqylULGWJsEA6F4zgIYKpMzk67M5rWjzNUkNwkab/iKSkn0lmwCyuK7zB1ZXfICeE QGw0R/fTiW5kz3E28nRPK/eb43GuNU+bo/i1JaFRWdrJLhU1H3cjeGjTRthhqBWqmn DNya15uGisdBt7TYDi7f/aHYFW6aa7IVEbidexpnPJikY8wWg85UEjTwsB4JSrRBA6 +uo3X8NktGEnw== From: Gary Guo To: Miguel Ojeda , Alex Gaynor , Boqun Feng , Gary Guo , =?UTF-8?q?Bj=C3=B6rn=20Roy=20Baron?= , Benno Lossin , Andreas Hindborg , Alice Ryhl , Trevor Gross , Danilo Krummrich , Will Deacon , Peter Zijlstra , Mark Rutland , Tamir Duberstein , Ingo Molnar , Mitchell Levy , Lyude Paul , Wedson Almeida Filho Cc: rust-for-linux@vger.kernel.org, Fiona Behrens , linux-kernel@vger.kernel.org Subject: [PATCH v4 1/5] rust: implement `kernel::sync::Refcount` Date: Sun, 22 Jun 2025 13:57:27 +0100 Message-ID: <20250622125802.3224264-2-gary@kernel.org> X-Mailer: git-send-email 2.49.0 In-Reply-To: <20250622125802.3224264-1-gary@kernel.org> References: <20250622125802.3224264-1-gary@kernel.org> Precedence: bulk X-Mailing-List: rust-for-linux@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: 8bit From: Gary Guo This is a wrapping layer of `include/linux/refcount.h`. Currently the kernel refcount has already been used in `Arc`, however it calls into FFI directly. Reviewed-by: Alice Ryhl Reviewed-by: Boqun Feng Reviewed-by: Fiona Behrens Signed-off-by: Gary Guo --- rust/helpers/refcount.c | 10 +++++ rust/kernel/sync.rs | 2 + rust/kernel/sync/refcount.rs | 86 ++++++++++++++++++++++++++++++++++++ 3 files changed, 98 insertions(+) create mode 100644 rust/kernel/sync/refcount.rs diff --git a/rust/helpers/refcount.c b/rust/helpers/refcount.c index d6adbd2e45a1..d175898ad7b8 100644 --- a/rust/helpers/refcount.c +++ b/rust/helpers/refcount.c @@ -7,11 +7,21 @@ refcount_t rust_helper_REFCOUNT_INIT(int n) return (refcount_t)REFCOUNT_INIT(n); } +void rust_helper_refcount_set(refcount_t *r, int n) +{ + refcount_set(r, n); +} + void rust_helper_refcount_inc(refcount_t *r) { refcount_inc(r); } +void rust_helper_refcount_dec(refcount_t *r) +{ + refcount_dec(r); +} + bool rust_helper_refcount_dec_and_test(refcount_t *r) { return refcount_dec_and_test(r); diff --git a/rust/kernel/sync.rs b/rust/kernel/sync.rs index 36a719015583..09e81a1c2966 100644 --- a/rust/kernel/sync.rs +++ b/rust/kernel/sync.rs @@ -15,6 +15,7 @@ mod locked_by; pub mod poll; pub mod rcu; +mod refcount; pub use arc::{Arc, ArcBorrow, UniqueArc}; pub use condvar::{new_condvar, CondVar, CondVarTimeoutResult}; @@ -22,6 +23,7 @@ pub use lock::mutex::{new_mutex, Mutex, MutexGuard}; pub use lock::spinlock::{new_spinlock, SpinLock, SpinLockGuard}; pub use locked_by::LockedBy; +pub use refcount::Refcount; /// Represents a lockdep class. It's a wrapper around C's `lock_class_key`. #[repr(transparent)] diff --git a/rust/kernel/sync/refcount.rs b/rust/kernel/sync/refcount.rs new file mode 100644 index 000000000000..a0fc22f6d645 --- /dev/null +++ b/rust/kernel/sync/refcount.rs @@ -0,0 +1,86 @@ +// SPDX-License-Identifier: GPL-2.0 + +//! Atomic reference counting. +//! +//! C header: [`include/linux/refcount.h`](srctree/include/linux/refcount.h) + +use crate::types::Opaque; + +/// Atomic reference counter. +/// +/// This type is conceptually an atomic integer, but provides saturation semantics compared to +/// normal atomic integers. Values in the negative range when viewed as a signed integer are +/// saturation (bad) values. For details about the saturation semantics, please refer to top of +/// [`include/linux/refcount.h`](srctree/include/linux/refcount.h). +/// +/// Wraps the kernel's C `refcount_t`. +#[repr(transparent)] +pub struct Refcount(Opaque); + +impl Refcount { + /// Construct a new [`Refcount`] from an initial value. + #[inline] + pub fn new(value: i32) -> Self { + // SAFETY: There are no safety requirements for this FFI call. + Self(Opaque::new(unsafe { bindings::REFCOUNT_INIT(value) })) + } + + #[inline] + fn as_ptr(&self) -> *mut bindings::refcount_t { + self.0.get() + } + + /// Set a refcount's value. + #[inline] + pub fn set(&self, value: i32) { + // SAFETY: `self.as_ptr()` is valid. + unsafe { bindings::refcount_set(self.as_ptr(), value) } + } + + /// Increment a refcount. + /// + /// It will saturate if overflows and `WARN`. It will also `WARN` if the refcount is 0, as this + /// represents a possible use-after-free condition. + /// + /// Provides no memory ordering, it is assumed that caller already has a reference on the + /// object. + #[inline] + pub fn inc(&self) { + // SAFETY: self is valid. + unsafe { bindings::refcount_inc(self.as_ptr()) } + } + + /// Decrement a refcount. + /// + /// It will `WARN` on underflow and fail to decrement when saturated. + /// + /// Provides release memory ordering, such that prior loads and stores are done + /// before. + #[inline] + pub fn dec(&self) { + // SAFETY: `self.as_ptr()` is valid. + unsafe { bindings::refcount_dec(self.as_ptr()) } + } + + /// Decrement a refcount and test if it is 0. + /// + /// It will `WARN` on underflow and fail to decrement when saturated. + /// + /// Provides release memory ordering, such that prior loads and stores are done + /// before, and provides an acquire ordering on success such that memory deallocation + /// must come after. + /// + /// Returns true if the resulting refcount is 0, false otherwise. + #[inline] + #[must_use = "use `dec` instead if you do not need to test if it is 0"] + pub fn dec_and_test(&self) -> bool { + // SAFETY: `self.as_ptr()` is valid. + unsafe { bindings::refcount_dec_and_test(self.as_ptr()) } + } +} + +// SAFETY: `refcount_t` is thread-safe. +unsafe impl Send for Refcount {} + +// SAFETY: `refcount_t` is thread-safe. +unsafe impl Sync for Refcount {} -- 2.49.0