From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from kanga.kvack.org (kanga.kvack.org [205.233.56.17]) by smtp.lore.kernel.org (Postfix) with ESMTP id E469AC7EE30 for ; Tue, 1 Jul 2025 16:27:29 +0000 (UTC) Received: by kanga.kvack.org (Postfix) id EE4476B00A5; Tue, 1 Jul 2025 12:27:27 -0400 (EDT) Received: by kanga.kvack.org (Postfix, from userid 40) id E93E06B00A6; Tue, 1 Jul 2025 12:27:27 -0400 (EDT) X-Delivered-To: int-list-linux-mm@kvack.org Received: by kanga.kvack.org (Postfix, from userid 63042) id D0D696B00A7; Tue, 1 Jul 2025 12:27:27 -0400 (EDT) X-Delivered-To: linux-mm@kvack.org Received: from relay.hostedemail.com (smtprelay0013.hostedemail.com [216.40.44.13]) by kanga.kvack.org (Postfix) with ESMTP id BC28F6B00A5 for ; Tue, 1 Jul 2025 12:27:27 -0400 (EDT) Received: from smtpin26.hostedemail.com (a10.router.float.18 [10.200.18.1]) by unirelay09.hostedemail.com (Postfix) with ESMTP id 2BD4580538 for ; Tue, 1 Jul 2025 16:27:27 +0000 (UTC) X-FDA: 83616226134.26.58E8FA3 Received: from mail-qt1-f182.google.com (mail-qt1-f182.google.com [209.85.160.182]) by imf09.hostedemail.com (Postfix) with ESMTP id 151CC14000F for ; Tue, 1 Jul 2025 16:27:24 +0000 (UTC) Authentication-Results: imf09.hostedemail.com; dkim=pass header.d=gmail.com header.s=20230601 header.b="V0re/oom"; spf=pass (imf09.hostedemail.com: domain of tamird@gmail.com designates 209.85.160.182 as permitted sender) smtp.mailfrom=tamird@gmail.com; dmarc=pass (policy=none) header.from=gmail.com ARC-Message-Signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=hostedemail.com; s=arc-20220608; t=1751387245; h=from:from:sender:reply-to:subject:subject:date:date: message-id:message-id:to:to:cc:cc:mime-version:mime-version: content-type:content-type: content-transfer-encoding:content-transfer-encoding: in-reply-to:in-reply-to:references:references:dkim-signature; bh=vRIP3sZ8XpmbMloGMu5iJhjGHLcDD0hWOMx3cgyxyPc=; b=czXY+chNCJyRx1xpf7FnNPTYsjhTj4CJ55RvWj13npbz4JtJ6PugZ7jHEp4m1xvwaHEIdK trbQ+Vycd4CqI4hd9Nq1kaPjftONBdk2wh2m5SRp5KdXUZYQIpTXKbjcNfEtct3U8nG8a2 PJE6kpm7eZKuKhIGOmAzZfoXXcgGmcs= ARC-Authentication-Results: i=1; imf09.hostedemail.com; dkim=pass header.d=gmail.com header.s=20230601 header.b="V0re/oom"; spf=pass (imf09.hostedemail.com: domain of tamird@gmail.com designates 209.85.160.182 as permitted sender) smtp.mailfrom=tamird@gmail.com; dmarc=pass (policy=none) header.from=gmail.com ARC-Seal: i=1; s=arc-20220608; d=hostedemail.com; t=1751387245; a=rsa-sha256; cv=none; b=XHHnpI0Z5ekd7zS+LOG9pvgbw+rrw4cBACI5qhL2LITihNql5zRjnryvfSjkwJ8pH3robo 8c76BhiRmLGU+4i4oqcocropU9TcehGQaQo6cijbuZRpcN0evc3ZQDicElMjlVLURAVbcr ud6OfMXZBmHlTlt/mTnhnwgPaqQBcAk= Received: by mail-qt1-f182.google.com with SMTP id d75a77b69052e-4a44b9b2af8so19447631cf.3 for ; Tue, 01 Jul 2025 09:27:24 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=gmail.com; s=20230601; t=1751387244; x=1751992044; darn=kvack.org; h=cc:to:in-reply-to:references:message-id:content-transfer-encoding :mime-version:subject:date:from:from:to:cc:subject:date:message-id :reply-to; bh=vRIP3sZ8XpmbMloGMu5iJhjGHLcDD0hWOMx3cgyxyPc=; b=V0re/oomBAG30BCdaX+8V8ga+amuZT+eCocZDsX5H/gPj0fXWkZS2wJX8DrvHq8Y5k G14Z221ilW5vKaGNDweuObFCFGLI3oGBNSD7Owt0nfiD8gd0NwjC9TZzPbAnNJs5apS7 CKWCtETaYQW5Ec67eEW4ACv3qXC7+958ur1VgapBDxZg8GciN+rCu8BCL9atEJaHbCez UhgrZOinuVi9SfU5eymN1LmkCdn8h7S2PFkFwI49okRaXUZI2XQX3egLQ8xfBg/S7Wzz agbB6l18gM/onM8gUrG43vgkLmFczC0Y6H4ZmHgnu6nb4INb7cENscKfHY4qma16BENY szSA== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20230601; t=1751387244; x=1751992044; h=cc:to:in-reply-to:references:message-id:content-transfer-encoding :mime-version:subject:date:from:x-gm-message-state:from:to:cc :subject:date:message-id:reply-to; bh=vRIP3sZ8XpmbMloGMu5iJhjGHLcDD0hWOMx3cgyxyPc=; b=jAfq3rFQ2KqHCm5BbDOmmbo0rnOaaXz9A/7o4JeDqBrcY4wZ8iHe7dAmRnq2lrhdyN 9+KzJiOTJRf1BxT19ghn3DFkmbXwRKOhicNT4O2kgdBt4ZqATU8WcRJxEuhX5rYqVQCS 1I4pe3pdF74N1x/gKdMAvYWkI96WWjtPuR0Ez2aB2XJFxVr5XpOo1f/Jn2MOzKL6/U+H uqiNHd5ur+h6rg4f6uzAPVsWLgzl/vqqqTQ1spipPiYsRrUdqr9FLIVDJRa8igH9a8GF Ai+6DknfVSIPdoU9JqAlyKlaDKgggR0/q40rSwcE4sOzGupEsc+y1hHnOa6BPZUIo/Vm CqmA== X-Forwarded-Encrypted: i=1; AJvYcCXtrGG2hxh2KXfsR3qjXo/wmxVvtlSbBmdZfS2kxn3dpE0ZVtIyxeYAoR597qv/yPXAYkzmlUtK5g==@kvack.org X-Gm-Message-State: AOJu0YzE+2VScnxl8I8NIq1o84izZFENXiU1ueRki10w+yVq5WwuE6Nc 4CdbS4qc7hTxyv++si6m02Ez+f4LHeN1n6CrrOf10oUBHvxDi2IBlCCx X-Gm-Gg: ASbGncvf3dBwUJlxO7tD3mK3/WQVlC4HDJUsUADy/gUOu9Rz3fH650tzkLnUkp8sAqH MkE/KFdlY8HP5Yr3Yq2QsKJbETW0g/SZIR07+CNEnqLjuDys9HTEdd60CzjGeNMKR+YpUVd9g2u zbYXW01oVL96SwJvaiy8DSvWJ9jFvEVxf4ylN2ypZCSXo4hWE/jUHw1xMNnZtVFvB+1smk/7ibI zYxNFM5D7nn1+pKU+TV8gdfEbsuZ5HQLF1GZOOiqfnz0+BOAJ5Nk3galD6rw7hjZ2b+CB8lGt2f OByaF06bBkYseK+0Z2A89SJA3zEsZNROCcJ6C+3UK6nectDsqK+xzKvSixr1+aPGVF7yt0XgrkB c0qwii63Jk1lpu30E2trim/HdNcqUMUK8vN4Babp6yhPAXGue+JaPExDvahkJNlDJRDMEiPh+CO 9er8p+LrCu X-Google-Smtp-Source: AGHT+IFALn/cE/vWx+m2W5lf0bpYEA8//NXPUM50VPRZIeiX5E8TGmoTgjPWgyXQITETkjC81YWoTA== X-Received: by 2002:a05:622a:1c05:b0:4a6:f4ca:68e8 with SMTP id d75a77b69052e-4a82badaf1fmr74151131cf.48.1751387243727; Tue, 01 Jul 2025 09:27:23 -0700 (PDT) Received: from a.1.b.d.0.e.7.9.6.4.2.0.b.3.4.b.0.0.1.1.e.f.b.5.1.4.0.4.0.0.6.2.ip6.arpa ([2600:4041:5bfe:1100:70ac:5fd8:4c25:89ec]) by smtp.gmail.com with ESMTPSA id d75a77b69052e-4a7fc57d530sm78032551cf.61.2025.07.01.09.27.22 (version=TLS1_3 cipher=TLS_AES_256_GCM_SHA384 bits=256/256); Tue, 01 Jul 2025 09:27:23 -0700 (PDT) From: Tamir Duberstein Date: Tue, 01 Jul 2025 12:27:19 -0400 Subject: [PATCH 3/3] rust: xarray: add `insert` and `reserve` MIME-Version: 1.0 Content-Type: text/plain; charset="utf-8" Content-Transfer-Encoding: 7bit Message-Id: <20250701-xarray-insert-reserve-v1-3-25df2b0d706a@gmail.com> References: <20250701-xarray-insert-reserve-v1-0-25df2b0d706a@gmail.com> In-Reply-To: <20250701-xarray-insert-reserve-v1-0-25df2b0d706a@gmail.com> To: Andreas Hindborg , Miguel Ojeda , Alex Gaynor , Boqun Feng , Gary Guo , =?utf-8?q?Bj=C3=B6rn_Roy_Baron?= , Benno Lossin , Alice Ryhl , Trevor Gross , Danilo Krummrich , Matthew Wilcox , Andrew Morton Cc: rust-for-linux@vger.kernel.org, linux-kernel@vger.kernel.org, linux-fsdevel@vger.kernel.org, linux-mm@kvack.org, Daniel Almeida , Tamir Duberstein X-Mailer: b4 0.15-dev X-Developer-Signature: v=1; a=openssh-sha256; t=1751387238; l=20620; i=tamird@gmail.com; h=from:subject:message-id; bh=jojRZnleqPCP2n75PqJ21Lf6xcs4Lt4lOIu2vTR83hw=; b=U1NIU0lHAAAAAQAAADMAAAALc3NoLWVkMjU1MTkAAAAgtYz36g7iDMSkY5K7Ab51ksGX7hJgs MRt+XVZTrIzMVIAAAAGcGF0YXR0AAAAAAAAAAZzaGE1MTIAAABTAAAAC3NzaC1lZDI1NTE5AAAA QEm6abl0/rc2Wkc4pUld5Dv0UunpYl5G1hhRUdnQpXLh0zjbjmhp5GHrwTTSYm6V2hdNqgVulsF lNDGjcJfcuQE= X-Developer-Key: i=tamird@gmail.com; a=openssh; fpr=SHA256:264rPmnnrb+ERkS7DDS3tuwqcJss/zevJRzoylqMsbc X-Stat-Signature: 9rab5ge1m4b44y4hw7w97nrzb315wpxw X-Rspamd-Queue-Id: 151CC14000F X-Rspamd-Server: rspam11 X-Rspam-User: X-HE-Tag: 1751387244-183772 X-HE-Meta: U2FsdGVkX18xFCehNzFOynZAlUJkUfNOERXvdaEv4b944Y2fmQ4ygodvv+jdlmIJguVL1Tgfx6Z3Ix8rlJxqD7p1luaA0VUXVuZZxFeejWh7VvM5RXkbFLzQAYm7jG6xzbzvsOTGoxWit9Yc4X/IKV8tiwk0T5UIbXYYeuRfwveSKABwYZ1K2lHC/Oeo8PTQPP4Cjh6TChpjN88LbvBO/FsCzw82iL+SAGC+Qjj83UvW1zBhTFAxf0OFId8qeuYavpBMO4TSzcgL+CybAC04ij4DwOziy+uXlrdykjA1GcKEEsS5OgontmE7FzmlGI+0Z5jLxs5+oKCNTreK1Tx40uFxPm9v41WYUoRDcmmhFLMfFdTpffR1dS6g+3bq1ol1gPmzA2qXNwn3hF1Y2gC+INrPCycZq860GwcdVkWCWXiCJFsivy/hrl6W8t/1QyjApVaQZQeQfVH7DmocVGC8nNwyxBwgfxD6e8mg3TqmF3MFtn6jTP3k7tynNWAA3+phNh1kZXtN3TfCEIdUf1zJbJ5ligxbn2RleW9opE/oM8rEZYfqA0em4pxBUyclgw4u5pfmVgVONRKGWGsqr58wfBPw4j9JP6Oxn1XZt35viNYdNm2S79xvE5bVPJnGk6ni+aGHbVxb6CeOvUIRgEFk1efHMGdFH4SKsO3fgqxY9MP41FbWspOKocke7/H5HxRYByr2rEl9BJZzjiujQmMHP5Iy7NNPIJ+MeM2a/bY9o9rjbnnJu+RDX3gG0ULG8aU371wsTpWDB+EB5JYbIEPxAZwsAkVMIkduQXHY2YVY4KR/ktduAPOupjbKf+ze1yUVNzfEWwYwp3PW24axIA743dPqjY/NfM5+I5WVOgjzKvTyRQmWTvj98ZHkRyV0Z630NGNtpnrDe046ofuRFubhE56Khwr7eObgFhG4Xs7yNHxf85xKs2maW16NYt0dMLILkiHB64FOB3ytUwkzFTE 4x5qjKng Pk8Baqpi8W5u9zDtVrthZYDIAC4czJsBTvKl8uSuyIImNvveHkpIqH0C/9AMzJTxYgYqODuIxiynXUzUJvkSaIzmmDdm/Dq2r9h9Di7SWxrMdSr1Llfq/WdeT4g5qi9P7jufG62idsrYBQIRJFcwCKQj1Mgca9RAR0L/XsMe5wZsUkbIeKz9aUFkToqUZUOlehoU7h2S0YnvfMiksDw+buGEz+BbEYVsa/l5kMclcmVwk+VjFmIpMFGaGg8EEgzjuJu+wQ+3M9PBpTEnxJwbdAi6fOnqqrZQvyy79N3+d06hI+WQVpYif03gQaPc01shBoSOAMzUzCOYfIw7gHTT7ttj0saSYcMpj8sqF5R9AO63/xaezEe5f9nk9XHsRli8ZFWw7nCXah54AnTyGOOjSuuejJMe8h8yHzNF3w+GROKcpmRfju2VvCTyi1UmpDgepAdtyuoHe1iHB20OXDW/GPaXN2IzUvoYl+/rtLuA2Lnp8j+XmNIUgWn9USD08t8CMOgSN72cLNGx9q1SyhnWjooOm7yk71n67/Q+I X-Bogosity: Ham, tests=bogofilter, spamicity=0.000000, version=1.2.4 Sender: owner-linux-mm@kvack.org Precedence: bulk X-Loop: owner-majordomo@kvack.org List-ID: List-Subscribe: List-Unsubscribe: Add `Guard::{insert,reserve}` and `Guard::{insert,reserve}_limit`, which are akin to `__xa_{alloc,insert}` in C. Note that unlike `xa_reserve` which only ensures that memory is allocated, the semantics of `Reservation` are stricter and require precise management of the reservation. Indices which have been reserved can still be overwritten with `Guard::store`, which allows for C-like semantics if desired. `__xa_cmpxchg_raw` is exported to facilitate the semantics described above. Signed-off-by: Tamir Duberstein --- include/linux/xarray.h | 2 + lib/xarray.c | 28 +++- rust/helpers/xarray.c | 5 + rust/kernel/xarray.rs | 419 ++++++++++++++++++++++++++++++++++++++++++++++++- 4 files changed, 447 insertions(+), 7 deletions(-) diff --git a/include/linux/xarray.h b/include/linux/xarray.h index be850174e802..64f2a5e06ceb 100644 --- a/include/linux/xarray.h +++ b/include/linux/xarray.h @@ -563,6 +563,8 @@ void *__xa_erase(struct xarray *, unsigned long index); void *__xa_store(struct xarray *, unsigned long index, void *entry, gfp_t); void *__xa_cmpxchg(struct xarray *, unsigned long index, void *old, void *entry, gfp_t); +void *__xa_cmpxchg_raw(struct xarray *, unsigned long index, void *old, + void *entry, gfp_t); int __must_check __xa_insert(struct xarray *, unsigned long index, void *entry, gfp_t); int __must_check __xa_alloc(struct xarray *, u32 *id, void *entry, diff --git a/lib/xarray.c b/lib/xarray.c index 76dde3a1cacf..58202b6fbb59 100644 --- a/lib/xarray.c +++ b/lib/xarray.c @@ -1738,9 +1738,6 @@ void *xa_store(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp) } EXPORT_SYMBOL(xa_store); -static inline void *__xa_cmpxchg_raw(struct xarray *xa, unsigned long index, - void *old, void *entry, gfp_t gfp); - /** * __xa_cmpxchg() - Conditionally replace an entry in the XArray. * @xa: XArray. @@ -1767,7 +1764,29 @@ void *__xa_cmpxchg(struct xarray *xa, unsigned long index, } EXPORT_SYMBOL(__xa_cmpxchg); -static inline void *__xa_cmpxchg_raw(struct xarray *xa, unsigned long index, +/** + * __xa_cmpxchg_raw() - Conditionally replace an entry in the XArray. + * @xa: XArray. + * @index: Index into array. + * @old: Old value to test against. + * @entry: New value to place in array. + * @gfp: Memory allocation flags. + * + * You must already be holding the xa_lock when calling this function. + * It will drop the lock if needed to allocate memory, and then reacquire + * it afterwards. + * + * If the entry at @index is the same as @old, replace it with @entry. + * If the return value is equal to @old, then the exchange was successful. + * + * This function is the same as __xa_cmpxchg() except that it does not coerce + * XA_ZERO_ENTRY to NULL on egress. + * + * Context: Any context. Expects xa_lock to be held on entry. May + * release and reacquire xa_lock if @gfp flags permit. + * Return: The old value at this index or xa_err() if an error happened. + */ +void *__xa_cmpxchg_raw(struct xarray *xa, unsigned long index, void *old, void *entry, gfp_t gfp) { XA_STATE(xas, xa, index); @@ -1787,6 +1806,7 @@ static inline void *__xa_cmpxchg_raw(struct xarray *xa, unsigned long index, return xas_result(&xas, curr); } +EXPORT_SYMBOL(__xa_cmpxchg_raw); /** * __xa_insert() - Store this entry in the XArray if no entry is present. diff --git a/rust/helpers/xarray.c b/rust/helpers/xarray.c index 60b299f11451..b6c078e6a343 100644 --- a/rust/helpers/xarray.c +++ b/rust/helpers/xarray.c @@ -2,6 +2,11 @@ #include +void *rust_helper_xa_zero_entry(void) +{ + return XA_ZERO_ENTRY; +} + int rust_helper_xa_err(void *entry) { return xa_err(entry); diff --git a/rust/kernel/xarray.rs b/rust/kernel/xarray.rs index bbce54ec695c..87fa3259cdd7 100644 --- a/rust/kernel/xarray.rs +++ b/rust/kernel/xarray.rs @@ -9,7 +9,12 @@ prelude::*, types::{ForeignOwnable, NotThreadSafe, Opaque}, }; -use core::{iter, marker::PhantomData, mem, ptr::NonNull}; +use core::{ + fmt, iter, + marker::PhantomData, + mem, ops, + ptr::{null_mut, NonNull}, +}; /// An array which efficiently maps sparse integer indices to owned objects. /// @@ -126,6 +131,19 @@ fn iter(&self) -> impl Iterator> + '_ { .map_while(|ptr| NonNull::new(ptr.cast())) } + fn with_guard(&self, guard: Option<&mut Guard<'_, T>>, f: F) -> U + where + F: FnOnce(&mut Guard<'_, T>) -> U, + { + match guard { + None => f(&mut self.lock()), + Some(guard) => { + assert_eq!(guard.xa.xa.get(), self.xa.get()); + f(guard) + } + } + } + /// Attempts to lock the [`XArray`] for exclusive access. pub fn try_lock(&self) -> Option> { // SAFETY: `self.xa` is always valid by the type invariant. @@ -172,6 +190,7 @@ fn drop(&mut self) { /// The error returned by [`store`](Guard::store). /// /// Contains the underlying error and the value that was not stored. +#[derive(Debug)] pub struct StoreError { /// The error that occurred. pub error: Error, @@ -185,6 +204,11 @@ fn from(value: StoreError) -> Self { } } +fn to_usize(i: u32) -> usize { + i.try_into() + .unwrap_or_else(|_| build_error!("cannot convert u32 to usize")) +} + impl<'a, T: ForeignOwnable> Guard<'a, T> { fn load(&self, index: usize, f: F) -> Option where @@ -219,7 +243,7 @@ pub fn remove(&mut self, index: usize) -> Option { // - The caller holds the lock. let ptr = unsafe { bindings::__xa_erase(self.xa.xa.get(), index) }.cast(); // SAFETY: - // - `ptr` is either NULL or came from `T::into_foreign`. + // - `ptr` is either `NULL` or came from `T::into_foreign`. // - `&mut self` guarantees that the lifetimes of [`T::Borrowed`] and [`T::BorrowedMut`] // borrowed from `self` have ended. unsafe { T::try_from_foreign(ptr) } @@ -267,13 +291,272 @@ pub fn store( }) } else { let old = old.cast(); - // SAFETY: `ptr` is either NULL or came from `T::into_foreign`. + // SAFETY: `ptr` is either `NULL` or came from `T::into_foreign`. // // NB: `XA_ZERO_ENTRY` is never returned by functions belonging to the Normal XArray // API; such entries present as `NULL`. Ok(unsafe { T::try_from_foreign(old) }) } } + + /// Stores an element at the given index if no entry is present. + /// + /// May drop the lock if needed to allocate memory, and then reacquire it afterwards. + /// + /// On failure, returns the element which was attempted to be stored. + pub fn insert( + &mut self, + index: usize, + value: T, + gfp: alloc::Flags, + ) -> Result<(), StoreError> { + build_assert!( + mem::align_of::() >= 4, + "pointers stored in XArray must be 4-byte aligned" + ); + let ptr = value.into_foreign(); + // SAFETY: `self.xa` is always valid by the type invariant. + // + // INVARIANT: `ptr` came from `T::into_foreign`. + match unsafe { bindings::__xa_insert(self.xa.xa.get(), index, ptr.cast(), gfp.as_raw()) } { + 0 => Ok(()), + errno => { + // SAFETY: `ptr` came from `T::into_foreign` and `__xa_insert` does not take + // ownership of the value on error. + let value = unsafe { T::from_foreign(ptr) }; + Err(StoreError { + value, + error: Error::from_errno(errno), + }) + } + } + } + + /// Wrapper around `__xa_alloc`. + /// + /// On success, takes ownership of pointers passed in `op`. + /// + /// On failure, ownership returns to the caller. + /// + /// # Safety + /// + /// `ptr` must be `NULL` or have come from a previous call to `T::into_foreign`. + unsafe fn alloc( + &mut self, + limit: impl ops::RangeBounds, + ptr: *mut T::PointedTo, + gfp: alloc::Flags, + ) -> Result { + // NB: `xa_limit::{max,min}` are inclusive. + let limit = bindings::xa_limit { + max: match limit.end_bound() { + ops::Bound::Included(&end) => end, + ops::Bound::Excluded(&end) => end - 1, + ops::Bound::Unbounded => u32::MAX, + }, + min: match limit.start_bound() { + ops::Bound::Included(&start) => start, + ops::Bound::Excluded(&start) => start + 1, + ops::Bound::Unbounded => 0, + }, + }; + + let mut index = u32::MAX; + + // SAFETY: + // - `self.xa` is always valid by the type invariant. + // - `self.xa` was initialized with `XA_FLAGS_ALLOC` or `XA_FLAGS_ALLOC1`. + // + // INVARIANT: `ptr` is either `NULL` or came from `T::into_foreign`. + match unsafe { + bindings::__xa_alloc( + self.xa.xa.get(), + &mut index, + ptr.cast(), + limit, + gfp.as_raw(), + ) + } { + 0 => Ok(to_usize(index)), + errno => Err(Error::from_errno(errno)), + } + } + + /// Allocates an entry somewhere in the array. + /// + /// On success, returns the index at which the entry was stored. + /// + /// On failure, returns the entry which was attempted to be stored. + pub fn insert_limit( + &mut self, + limit: impl ops::RangeBounds, + value: T, + gfp: alloc::Flags, + ) -> Result> { + build_assert!( + mem::align_of::() >= 4, + "pointers stored in XArray must be 4-byte aligned" + ); + let ptr = value.into_foreign(); + // SAFETY: `ptr` came from `T::into_foreign`. + unsafe { self.alloc(limit, ptr, gfp) }.map_err(|error| { + // SAFETY: `ptr` came from `T::into_foreign` and `self.alloc` does not take ownership of + // the value on error. + let value = unsafe { T::from_foreign(ptr) }; + StoreError { value, error } + }) + } + + /// Reserves an entry in the array. + pub fn reserve(&mut self, index: usize, gfp: alloc::Flags) -> Result> { + // NB: `__xa_insert` internally coerces `NULL` to `XA_ZERO_ENTRY` on ingress. + let ptr = null_mut(); + // SAFETY: `self.xa` is always valid by the type invariant. + // + // INVARIANT: `ptr` is `NULL`. + match unsafe { bindings::__xa_insert(self.xa.xa.get(), index, ptr, gfp.as_raw()) } { + 0 => Ok(Reservation { xa: self.xa, index }), + errno => Err(Error::from_errno(errno)), + } + } + + /// Reserves an entry somewhere in the array. + pub fn reserve_limit( + &mut self, + limit: impl ops::RangeBounds, + gfp: alloc::Flags, + ) -> Result> { + // NB: `__xa_alloc` internally coerces `NULL` to `XA_ZERO_ENTRY` on ingress. + let ptr = null_mut(); + // SAFETY: `ptr` is `NULL`. + unsafe { self.alloc(limit, ptr, gfp) }.map(|index| Reservation { xa: self.xa, index }) + } +} + +/// A reserved slot in an array. +/// +/// The slot is released when the reservation goes out of scope. +/// +/// Note that the array lock *must not* be held when the reservation is filled or dropped as this +/// will lead to deadlock. [`Reservation::fill_locked`] and [`Reservation::release_locked`] can be +/// used in context where the array lock is held. +#[must_use = "the reservation is released immediately when the reservation is unused"] +pub struct Reservation<'a, T: ForeignOwnable> { + xa: &'a XArray, + index: usize, +} + +impl fmt::Debug for Reservation<'_, T> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("Reservation") + .field("index", &self.index()) + .finish() + } +} + +impl Reservation<'_, T> { + /// Returns the index of the reservation. + pub fn index(&self) -> usize { + self.index + } + + /// Replaces the reserved entry with the given entry. + /// + /// # Safety + /// + /// `ptr` must be `NULL` or have come from a previous call to `T::into_foreign`. + unsafe fn replace(guard: &mut Guard<'_, T>, index: usize, ptr: *mut T::PointedTo) -> Result { + // SAFETY: `xa_zero_entry` wraps `XA_ZERO_ENTRY` which is always safe to use. + let old = unsafe { bindings::xa_zero_entry() }; + + // NB: `__xa_cmpxchg_raw` is used over `__xa_cmpxchg` because the latter coerces + // `XA_ZERO_ENTRY` to `NULL` on egress, which would prevent us from determining whether a + // replacement was made. + // + // SAFETY: `self.xa` is always valid by the type invariant. + // + // INVARIANT: `ptr` is either `NULL` or came from `T::into_foreign` and `old` is + // `XA_ZERO_ENTRY`. + let ret = + unsafe { bindings::__xa_cmpxchg_raw(guard.xa.xa.get(), index, old, ptr.cast(), 0) }; + + // SAFETY: `__xa_cmpxchg_raw` returns the old entry at this index on success or `xa_err` if + // an error happened. + match unsafe { bindings::xa_err(ret) } { + 0 => { + if ret == old { + Ok(()) + } else { + Err(EBUSY) + } + } + errno => Err(Error::from_errno(errno)), + } + } + + fn fill_inner(&self, guard: Option<&mut Guard<'_, T>>, value: T) -> Result<(), StoreError> { + let Self { xa, index } = self; + let index = *index; + + let ptr = value.into_foreign(); + xa.with_guard(guard, |guard| { + // SAFETY: `ptr` came from `T::into_foreign`. + unsafe { Self::replace(guard, index, ptr) } + }) + .map_err(|error| { + // SAFETY: `ptr` came from `T::into_foreign` and `Self::replace` does not take ownership + // of the value on error. + let value = unsafe { T::from_foreign(ptr) }; + StoreError { value, error } + }) + } + + /// Fills the reservation. + pub fn fill(self, value: T) -> Result<(), StoreError> { + let result = self.fill_inner(None, value); + mem::forget(self); + result + } + + /// Fills the reservation without acquiring the array lock. + /// + /// # Panics + /// + /// Panics if the passed guard locks a different array. + pub fn fill_locked(self, guard: &mut Guard<'_, T>, value: T) -> Result<(), StoreError> { + let result = self.fill_inner(Some(guard), value); + mem::forget(self); + result + } + + fn release_inner(&self, guard: Option<&mut Guard<'_, T>>) -> Result { + let Self { xa, index } = self; + let index = *index; + + xa.with_guard(guard, |guard| { + let ptr = null_mut(); + // SAFETY: `ptr` is `NULL`. + unsafe { Self::replace(guard, index, ptr) } + }) + } + + /// Releases the reservation without acquiring the array lock. + /// + /// # Panics + /// + /// Panics if the passed guard locks a different array. + pub fn release_locked(self, guard: &mut Guard<'_, T>) -> Result { + let result = self.release_inner(Some(guard)); + mem::forget(self); + result + } +} + +impl Drop for Reservation<'_, T> { + fn drop(&mut self) { + // NB: Errors here are possible since `Guard::store` does not honor reservations. + let _: Result = self.release_inner(None); + } } // SAFETY: `XArray` has no shared mutable state so it is `Send` iff `T` is `Send`. @@ -282,3 +565,133 @@ unsafe impl Send for XArray {} // SAFETY: `XArray` serialises the interior mutability it provides so it is `Sync` iff `T` is // `Send`. unsafe impl Sync for XArray {} + +#[macros::kunit_tests(rust_xarray_kunit)] +mod tests { + use super::*; + use pin_init::stack_pin_init; + + fn new_kbox(value: T) -> Result> { + KBox::new(value, GFP_KERNEL).map_err(Into::into) + } + + #[test] + fn test_alloc_kind_alloc() -> Result { + test_alloc_kind(AllocKind::Alloc, 0) + } + + #[test] + fn test_alloc_kind_alloc1() -> Result { + test_alloc_kind(AllocKind::Alloc1, 1) + } + + fn test_alloc_kind(kind: AllocKind, expected_index: usize) -> Result { + stack_pin_init!(let xa = XArray::new(kind)); + let mut guard = xa.lock(); + + let reservation = guard.reserve_limit(.., GFP_KERNEL)?; + assert_eq!(reservation.index(), expected_index); + reservation.release_locked(&mut guard)?; + + let insertion = guard.insert_limit(.., new_kbox(0x1337)?, GFP_KERNEL); + assert!(insertion.is_ok()); + let insertion_index = insertion.unwrap(); + assert_eq!(insertion_index, expected_index); + + Ok(()) + } + + const IDX: usize = 0x1337; + + fn insert(guard: &mut Guard<'_, T>, value: T) -> Result<(), StoreError> { + guard.insert(IDX, value, GFP_KERNEL) + } + + fn reserve<'a, T: ForeignOwnable>(guard: &mut Guard<'a, T>) -> Result> { + guard.reserve(IDX, GFP_KERNEL) + } + + #[track_caller] + fn check_not_vacant<'a>(guard: &mut Guard<'a, KBox>) -> Result { + // Insertion fails. + { + let beef = new_kbox(0xbeef)?; + let ret = insert(guard, beef); + assert!(ret.is_err()); + let StoreError { error, value } = ret.unwrap_err(); + assert_eq!(error, EBUSY); + assert_eq!(*value, 0xbeef); + } + + // Reservation fails. + { + let ret = reserve(guard); + assert!(ret.is_err()); + assert_eq!(ret.unwrap_err(), EBUSY); + } + + Ok(()) + } + + #[test] + fn test_insert_and_reserve_interaction() -> Result { + stack_pin_init!(let xa = XArray::new(Default::default())); + let mut guard = xa.lock(); + + // Vacant. + assert_eq!(guard.get(IDX), None); + + // Reservation succeeds. + let reservation = { + let ret = reserve(&mut guard); + assert!(ret.is_ok()); + ret.unwrap() + }; + + // Reserved presents as vacant. + assert_eq!(guard.get(IDX), None); + + check_not_vacant(&mut guard)?; + + // Release reservation. + { + let ret = reservation.release_locked(&mut guard); + assert!(ret.is_ok()); + let () = ret.unwrap(); + } + + // Vacant again. + assert_eq!(guard.get(IDX), None); + + // Insert succeeds. + { + let dead = new_kbox(0xdead)?; + let ret = insert(&mut guard, dead); + assert!(ret.is_ok()); + let () = ret.unwrap(); + } + + check_not_vacant(&mut guard)?; + + // Remove. + assert_eq!(guard.remove(IDX).as_deref(), Some(&0xdead)); + + // Reserve and fill. + { + let beef = new_kbox(0xbeef)?; + let ret = reserve(&mut guard); + assert!(ret.is_ok()); + let reservation = ret.unwrap(); + let ret = reservation.fill_locked(&mut guard, beef); + assert!(ret.is_ok()); + let () = ret.unwrap(); + }; + + check_not_vacant(&mut guard)?; + + // Remove. + assert_eq!(guard.remove(IDX).as_deref(), Some(&0xbeef)); + + Ok(()) + } +} -- 2.50.0