Add support for the ARef<T> smart pointer. This allows an instance of ARef<T> to handle deferred work directly, which can be convenient or even necessary at times, depending on the specifics of the driver or subsystem.
The implementation is similar to that of Arc<T>, and a subsequent patch will implement support for drm::Device as the first user. This is notably important for work items that need access to the drm device, as it was not possible to enqueue work on a ARef<drm::Device<T>> previously without failing the orphan rule. Signed-off-by: Daniel Almeida <[email protected]> --- rust/kernel/workqueue.rs | 85 ++++++++++++++++++++++++++++++++++++++++++++---- 1 file changed, 79 insertions(+), 6 deletions(-) diff --git a/rust/kernel/workqueue.rs b/rust/kernel/workqueue.rs index 706e833e9702..6ae7f3fb3496 100644 --- a/rust/kernel/workqueue.rs +++ b/rust/kernel/workqueue.rs @@ -192,9 +192,9 @@ sync::Arc, sync::LockClassKey, time::Jiffies, - types::Opaque, + types::{ARef, AlwaysRefCounted, Opaque}, }; -use core::marker::PhantomData; +use core::{marker::PhantomData, ptr::NonNull}; /// Creates a [`Work`] initialiser with the given name and a newly-created lock class. #[macro_export] @@ -425,10 +425,11 @@ pub unsafe trait RawDelayedWorkItem<const ID: u64>: RawWorkItem<ID> {} /// Defines the method that should be called directly when a work item is executed. /// -/// This trait is implemented by `Pin<KBox<T>>` and [`Arc<T>`], and is mainly intended to be -/// implemented for smart pointer types. For your own structs, you would implement [`WorkItem`] -/// instead. The [`run`] method on this trait will usually just perform the appropriate -/// `container_of` translation and then call into the [`run`][WorkItem::run] method from the +/// This trait is implemented by `Pin<KBox<T>>`, [`Arc<T>`] and [`ARef<T>`], and +/// is mainly intended to be implemented for smart pointer types. For your own +/// structs, you would implement [`WorkItem`] instead. The [`run`] method on +/// this trait will usually just perform the appropriate `container_of` +/// translation and then call into the [`run`][WorkItem::run] method from the /// [`WorkItem`] trait. /// /// This trait is used when the `work_struct` field is defined using the [`Work`] helper. @@ -934,6 +935,78 @@ unsafe impl<T, const ID: u64> RawDelayedWorkItem<ID> for Pin<KBox<T>> { } +// SAFETY: Like the `Arc<T>` implementation, the `__enqueue` implementation for +// `ARef<T>` obtains a `work_struct` from the `Work` field using +// `T::raw_get_work`, so the same safety reasoning applies: +// +// - `__enqueue` gets the `work_struct` from the `Work` field, using `T::raw_get_work`. +// - The only safe way to create a `Work` object is through `Work::new`. +// - `Work::new` makes sure that `T::Pointer::run` is passed to `init_work_with_key`. +// - Finally `Work` and `RawWorkItem` guarantee that the correct `Work` field +// will be used because of the ID const generic bound. This makes sure that `T::raw_get_work` +// uses the correct offset for the `Work` field, and `Work::new` picks the correct +// implementation of `WorkItemPointer` for `ARef<T>`. +unsafe impl<T, const ID: u64> WorkItemPointer<ID> for ARef<T> +where + T: AlwaysRefCounted, + T: WorkItem<ID, Pointer = Self>, + T: HasWork<T, ID>, +{ + unsafe extern "C" fn run(ptr: *mut bindings::work_struct) { + // The `__enqueue` method always uses a `work_struct` stored in a `Work<T, ID>`. + let ptr = ptr.cast::<Work<T, ID>>(); + + // SAFETY: This computes the pointer that `__enqueue` got from + // `ARef::into_raw`. + let ptr = unsafe { T::work_container_of(ptr) }; + + // SAFETY: The safety contract of `work_container_of` ensures that it + // returns a valid non-null pointer. + let ptr = unsafe { NonNull::new_unchecked(ptr) }; + + // SAFETY: This pointer comes from `ARef::into_raw` and we've been given + // back ownership. + let aref = unsafe { ARef::from_raw(ptr) }; + + T::run(aref) + } +} + +// SAFETY: The `work_struct` raw pointer is guaranteed to be valid for the duration of the call to +// the closure because we get it from an `ARef`, which means that the ref count will be at least 1, +// and we don't drop the `ARef` ourselves. If `queue_work_on` returns true, it is further guaranteed +// to be valid until a call to the function pointer in `work_struct` because we leak the memory it +// points to, and only reclaim it if the closure returns false, or in `WorkItemPointer::run`, which +// is what the function pointer in the `work_struct` must be pointing to, according to the safety +// requirements of `WorkItemPointer`. +unsafe impl<T, const ID: u64> RawWorkItem<ID> for ARef<T> +where + T: AlwaysRefCounted, + T: WorkItem<ID, Pointer = Self>, + T: HasWork<T, ID>, +{ + type EnqueueOutput = Result<(), Self>; + + unsafe fn __enqueue<F>(self, queue_work_on: F) -> Self::EnqueueOutput + where + F: FnOnce(*mut bindings::work_struct) -> bool, + { + let ptr = ARef::into_raw(self); + + // SAFETY: Pointers from ARef::into_raw are valid and non-null. + let work_ptr = unsafe { T::raw_get_work(ptr.as_ptr()) }; + // SAFETY: `raw_get_work` returns a pointer to a valid value. + let work_ptr = unsafe { Work::raw_get(work_ptr) }; + + if queue_work_on(work_ptr) { + Ok(()) + } else { + // SAFETY: The work queue has not taken ownership of the pointer. + Err(unsafe { ARef::from_raw(ptr) }) + } + } +} + /// Returns the system work queue (`system_wq`). /// /// It is the one used by `schedule[_delayed]_work[_on]()`. Multi-CPU multi-threaded. There are -- 2.52.0
