The Linux Kernel Mailing List
 help / color / mirror / Atom feed
* [PATCH v1] rust: workqueue: add cancel_sync support
@ 2026-05-10  8:21 Onur Özkan
  2026-05-10 13:43 ` Alice Ryhl
  0 siblings, 1 reply; 3+ messages in thread
From: Onur Özkan @ 2026-05-10  8:21 UTC (permalink / raw)
  To: rust-for-linux, linux-kernel
  Cc: ojeda, boqun, gary, bjorn3_gh, lossin, a.hindborg, aliceryhl,
	tmgross, dakr, peterz, fujita.tomonori, tamird, Onur Özkan

Drivers can use this during teardown to cancel pending work and wait for
running work to finish before dropping related resources.

This is not implemented for Pin<KBox<T>> because queuing a boxed work
item transfers ownership of the box to the workqueue. There is therefore
no separate safe owner that can cancel the boxed work while it is pending.

The immediate motivation is the Tyr reset infrastructure [1], which needs
to cancel pending reset work and wait for any running reset work during
teardown before dropping the resources used by that work.

[1]: https://lore.kernel.org/all/20260416171728.205141-1-work@onurozkan.dev

Signed-off-by: Onur Özkan <work@onurozkan.dev>
---
 rust/kernel/workqueue.rs | 134 ++++++++++++++++++++++++++++++++-------
 1 file changed, 112 insertions(+), 22 deletions(-)

diff --git a/rust/kernel/workqueue.rs b/rust/kernel/workqueue.rs
index 7e253b6f299c..a10daa2763ac 100644
--- a/rust/kernel/workqueue.rs
+++ b/rust/kernel/workqueue.rs
@@ -442,23 +442,44 @@ pub unsafe trait RawDelayedWorkItem<const ID: u64>: RawWorkItem<ID> {}
 ///
 /// # Safety
 ///
-/// Implementers must ensure that [`__enqueue`] uses a `work_struct` initialized with the [`run`]
-/// method of this trait as the function pointer.
+/// Implementers must ensure that [`__enqueue`] uses a `work_struct` initialized with [`run`] as
+/// its function pointer, and that [`from_raw_work`] rebuilds the exact ownership transferred by
+/// a successful [`__enqueue`] call.
 ///
 /// [`__enqueue`]: RawWorkItem::__enqueue
+/// [`from_raw_work`]: WorkItemPointer::from_raw_work
 /// [`run`]: WorkItemPointer::run
-pub unsafe trait WorkItemPointer<const ID: u64>: RawWorkItem<ID> {
-    /// Run this work item.
+pub unsafe trait WorkItemPointer<const ID: u64>: RawWorkItem<ID> + Sized {
+    /// The work item type containing the embedded `work_struct`.
+    type Item: WorkItem<ID, Pointer = Self> + ?Sized;
+
+    /// Rebuild this work item's pointer from its embedded `work_struct`.
     ///
     /// # Safety
     ///
-    /// The provided `work_struct` pointer must originate from a previous call to [`__enqueue`]
-    /// where the `queue_work_on` closure returned true, and the pointer must still be valid.
+    /// The provided `work_struct` pointer must originate from a previous call to
+    /// [`RawWorkItem::__enqueue`] where the `queue_work_on` closure returned true
+    /// and the pointer must still be valid.
+    unsafe fn from_raw_work(ptr: *mut bindings::work_struct) -> Self;
+
+    /// Run this work item.
     ///
-    /// [`__enqueue`]: RawWorkItem::__enqueue
-    unsafe extern "C" fn run(ptr: *mut bindings::work_struct);
+    /// # Safety
+    ///
+    /// The provided `work_struct` pointer must satisfy the same requirements as
+    /// [`WorkItemPointer::from_raw_work`].
+    #[inline]
+    unsafe extern "C" fn run(ptr: *mut bindings::work_struct) {
+        <Self::Item as WorkItem<ID>>::run(
+            // SAFETY: The requirements for `run` are exactly those of `from_raw_work`.
+            unsafe { Self::from_raw_work(ptr) },
+        );
+    }
 }
 
+/// Marker for work item types that support cancellation.
+pub trait SupportsCancelling<const ID: u64>: WorkItemPointer<ID> {}
+
 /// Defines the method that should be called when this work item is executed.
 ///
 /// This trait is used when the `work_struct` field is defined using the [`Work`] helper.
@@ -523,6 +544,32 @@ pub fn new(name: &'static CStr, key: Pin<&'static LockClassKey>) -> impl PinInit
         })
     }
 
+    /// Cancels this work item if it is pending and waits for any running execution to finish.
+    ///
+    /// On return, the work item is guaranteed to not be pending or executing as long as there are
+    /// no racing re-enqueues.
+    ///
+    /// # Note
+    ///
+    /// Should be called from a sleepable context if the work was last queued on a non-BH
+    /// workqueue.
+    #[inline]
+    pub fn cancel_sync(&self) -> Option<T::Pointer>
+    where
+        T: WorkItem<ID>,
+        T::Pointer: SupportsCancelling<ID>,
+    {
+        let ptr = self.work.get();
+        // SAFETY: `ptr` is a valid embedded `work_struct`.
+        if unsafe { bindings::cancel_work_sync(ptr) } {
+            // SAFETY: A `true` return means the work was pending and got canceled, so the queued
+            // ownership transfer performed by `__enqueue` is reclaimed here.
+            Some(unsafe { T::Pointer::from_raw_work(ptr) })
+        } else {
+            None
+        }
+    }
+
     /// Get a pointer to the inner `work_struct`.
     ///
     /// # Safety
@@ -709,6 +756,34 @@ pub fn new(
         })
     }
 
+    /// Cancels this delayed work item if it is pending and waits for any running execution to
+    /// finish.
+    ///
+    /// On return, the work item is guaranteed to not be pending or executing as long as there are
+    /// no racing re-enqueues.
+    ///
+    /// # Note
+    ///
+    /// Should be called from a sleepable context if the work was last queued on a non-BH
+    /// workqueue.
+    #[inline]
+    pub fn cancel_sync(&self) -> Option<T::Pointer>
+    where
+        T: WorkItem<ID>,
+        T::Pointer: SupportsCancelling<ID>,
+    {
+        let ptr = self.dwork.get();
+
+        // SAFETY: `ptr` is a valid embedded `delayed_work`.
+        if unsafe { bindings::cancel_delayed_work_sync(ptr) } {
+            // SAFETY: A `true` return means the work was pending and got canceled, so the queued
+            // ownership transfer performed by `__enqueue` is reclaimed here.
+            Some(unsafe { T::Pointer::from_raw_work(core::ptr::addr_of_mut!((*ptr).work)) })
+        } else {
+            None
+        }
+    }
+
     /// Get a pointer to the inner `delayed_work`.
     ///
     /// # Safety
@@ -824,25 +899,32 @@ unsafe impl<T, const ID: u64> WorkItemPointer<ID> for Arc<T>
     T: WorkItem<ID, Pointer = Self>,
     T: HasWork<T, ID>,
 {
-    unsafe extern "C" fn run(ptr: *mut bindings::work_struct) {
+    type Item = T;
+
+    unsafe fn from_raw_work(ptr: *mut bindings::work_struct) -> Self {
         // The `__enqueue` method always uses a `work_struct` stored in a `Work<T, ID>`.
         let ptr = ptr.cast::<Work<T, ID>>();
         // SAFETY: This computes the pointer that `__enqueue` got from `Arc::into_raw`.
         let ptr = unsafe { T::work_container_of(ptr) };
         // SAFETY: This pointer comes from `Arc::into_raw` and we've been given back ownership.
-        let arc = unsafe { Arc::from_raw(ptr) };
-
-        T::run(arc)
+        unsafe { Arc::from_raw(ptr) }
     }
 }
 
+impl<T, const ID: u64> SupportsCancelling<ID> for Arc<T>
+where
+    T: WorkItem<ID, Pointer = Self>,
+    T: HasWork<T, ID>,
+{
+}
+
 // SAFETY: The `work_struct` raw pointer is guaranteed to be valid for the duration of the call to
 // the closure because we get it from an `Arc`, which means that the ref count will be at least 1,
 // and we don't drop the `Arc` ourselves. If `queue_work_on` returns true, it is further guaranteed
 // to be valid until a call to the function pointer in `work_struct` because we leak the memory it
 // points to, and only reclaim it if the closure returns false, or in `WorkItemPointer::run`, which
 // is what the function pointer in the `work_struct` must be pointing to, according to the safety
-// requirements of `WorkItemPointer`.
+// requirements of `WorkItemPointer`, or after a successful cancellation.
 unsafe impl<T, const ID: u64> RawWorkItem<ID> for Arc<T>
 where
     T: WorkItem<ID, Pointer = Self>,
@@ -887,7 +969,9 @@ unsafe impl<T, const ID: u64> WorkItemPointer<ID> for Pin<KBox<T>>
     T: WorkItem<ID, Pointer = Self>,
     T: HasWork<T, ID>,
 {
-    unsafe extern "C" fn run(ptr: *mut bindings::work_struct) {
+    type Item = T;
+
+    unsafe fn from_raw_work(ptr: *mut bindings::work_struct) -> Self {
         // The `__enqueue` method always uses a `work_struct` stored in a `Work<T, ID>`.
         let ptr = ptr.cast::<Work<T, ID>>();
         // SAFETY: This computes the pointer that `__enqueue` got from `Arc::into_raw`.
@@ -895,9 +979,7 @@ unsafe impl<T, const ID: u64> WorkItemPointer<ID> for Pin<KBox<T>>
         // SAFETY: This pointer comes from `Arc::into_raw` and we've been given back ownership.
         let boxed = unsafe { KBox::from_raw(ptr) };
         // SAFETY: The box was already pinned when it was enqueued.
-        let pinned = unsafe { Pin::new_unchecked(boxed) };
-
-        T::run(pinned)
+        unsafe { Pin::new_unchecked(boxed) }
     }
 }
 
@@ -958,7 +1040,9 @@ unsafe impl<T, const ID: u64> WorkItemPointer<ID> for ARef<T>
     T: WorkItem<ID, Pointer = Self>,
     T: HasWork<T, ID>,
 {
-    unsafe extern "C" fn run(ptr: *mut bindings::work_struct) {
+    type Item = T;
+
+    unsafe fn from_raw_work(ptr: *mut bindings::work_struct) -> Self {
         // The `__enqueue` method always uses a `work_struct` stored in a `Work<T, ID>`.
         let ptr = ptr.cast::<Work<T, ID>>();
 
@@ -972,19 +1056,25 @@ unsafe impl<T, const ID: u64> WorkItemPointer<ID> for ARef<T>
 
         // SAFETY: This pointer comes from `ARef::into_raw` and we've been given
         // back ownership.
-        let aref = unsafe { ARef::from_raw(ptr) };
-
-        T::run(aref)
+        unsafe { ARef::from_raw(ptr) }
     }
 }
 
+impl<T, const ID: u64> SupportsCancelling<ID> for ARef<T>
+where
+    T: AlwaysRefCounted,
+    T: WorkItem<ID, Pointer = Self>,
+    T: HasWork<T, ID>,
+{
+}
+
 // SAFETY: The `work_struct` raw pointer is guaranteed to be valid for the duration of the call to
 // the closure because we get it from an `ARef`, which means that the ref count will be at least 1,
 // and we don't drop the `ARef` ourselves. If `queue_work_on` returns true, it is further guaranteed
 // to be valid until a call to the function pointer in `work_struct` because we leak the memory it
 // points to, and only reclaim it if the closure returns false, or in `WorkItemPointer::run`, which
 // is what the function pointer in the `work_struct` must be pointing to, according to the safety
-// requirements of `WorkItemPointer`.
+// requirements of `WorkItemPointer`, or after a successful cancellation.
 unsafe impl<T, const ID: u64> RawWorkItem<ID> for ARef<T>
 where
     T: AlwaysRefCounted,
-- 
2.51.2


^ permalink raw reply related	[flat|nested] 3+ messages in thread

end of thread, other threads:[~2026-05-10 15:53 UTC | newest]

Thread overview: 3+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2026-05-10  8:21 [PATCH v1] rust: workqueue: add cancel_sync support Onur Özkan
2026-05-10 13:43 ` Alice Ryhl
2026-05-10 15:53   ` Onur Özkan

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox