* [PATCH v5 1/6] rust: drm: add base GPUVM immediate mode abstraction
2026-03-20 16:08 [PATCH v5 0/6] Rust GPUVM immediate mode Alice Ryhl
@ 2026-03-20 16:08 ` Alice Ryhl
2026-03-20 16:08 ` [PATCH v5 2/6] rust: helpers: Add bindings/wrappers for dma_resv_lock Alice Ryhl
` (4 subsequent siblings)
5 siblings, 0 replies; 7+ messages in thread
From: Alice Ryhl @ 2026-03-20 16:08 UTC (permalink / raw)
To: Danilo Krummrich, Daniel Almeida
Cc: Boris Brezillon, Janne Grunau, Matthew Brost,
Thomas Hellström, Lyude Paul, Asahi Lina, Sumit Semwal,
Christian König, dri-devel, linux-kernel, rust-for-linux,
linux-media, Alice Ryhl
From: Asahi Lina <lina+kernel@asahilina.net>
Add a GPUVM abstraction to be used by Rust GPU drivers.
GPUVM keeps track of a GPU's virtual address (VA) space and manages the
corresponding virtual mappings represented by "GPU VA" objects. It also
keeps track of the gem::Object<T> used to back the mappings through
GpuVmBo<T>.
This abstraction is only usable by drivers that wish to use GPUVM in
immediate mode. This allows us to build the locking scheme into the API
design. It means that the GEM mutex is used for the GEM gpuva list, and
that the resv lock is used for the extobj list. The evicted list is not
yet used in this version.
This abstraction provides a special handle called the UniqueRefGpuVm,
which is a wrapper around ARef<GpuVm> that provides access to the
interval tree. Generally, all changes to the address space requires
mutable access to this unique handle.
Signed-off-by: Asahi Lina <lina+kernel@asahilina.net>
Co-developed-by: Daniel Almeida <daniel.almeida@collabora.com>
Signed-off-by: Daniel Almeida <daniel.almeida@collabora.com>
Reviewed-by: Daniel Almeida <daniel.almeida@collabora.com>
Co-developed-by: Alice Ryhl <aliceryhl@google.com>
Signed-off-by: Alice Ryhl <aliceryhl@google.com>
---
MAINTAINERS | 2 +
rust/bindings/bindings_helper.h | 1 +
rust/helpers/drm_gpuvm.c | 20 ++++
rust/helpers/helpers.c | 1 +
rust/kernel/drm/gpuvm/mod.rs | 238 ++++++++++++++++++++++++++++++++++++++++
rust/kernel/drm/mod.rs | 1 +
6 files changed, 263 insertions(+)
diff --git a/MAINTAINERS b/MAINTAINERS
index 96ea84948d76..b40923a18066 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -8840,6 +8840,8 @@ S: Supported
T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: drivers/gpu/drm/drm_gpuvm.c
F: include/drm/drm_gpuvm.h
+F: rust/helpers/drm_gpuvm.c
+F: rust/kernel/drm/gpuvm/
DRM LOG
M: Jocelyn Falempe <jfalempe@redhat.com>
diff --git a/rust/bindings/bindings_helper.h b/rust/bindings/bindings_helper.h
index 083cc44aa952..ef0f010c268b 100644
--- a/rust/bindings/bindings_helper.h
+++ b/rust/bindings/bindings_helper.h
@@ -33,6 +33,7 @@
#include <drm/drm_drv.h>
#include <drm/drm_file.h>
#include <drm/drm_gem.h>
+#include <drm/drm_gpuvm.h>
#include <drm/drm_ioctl.h>
#include <kunit/test.h>
#include <linux/auxiliary_bus.h>
diff --git a/rust/helpers/drm_gpuvm.c b/rust/helpers/drm_gpuvm.c
new file mode 100644
index 000000000000..18cf104a8bc7
--- /dev/null
+++ b/rust/helpers/drm_gpuvm.c
@@ -0,0 +1,20 @@
+// SPDX-License-Identifier: GPL-2.0 or MIT
+
+#ifdef CONFIG_DRM_GPUVM
+
+#include <drm/drm_gpuvm.h>
+
+__rust_helper
+struct drm_gpuvm *rust_helper_drm_gpuvm_get(struct drm_gpuvm *obj)
+{
+ return drm_gpuvm_get(obj);
+}
+
+__rust_helper
+bool rust_helper_drm_gpuvm_is_extobj(struct drm_gpuvm *gpuvm,
+ struct drm_gem_object *obj)
+{
+ return drm_gpuvm_is_extobj(gpuvm, obj);
+}
+
+#endif // CONFIG_DRM_GPUVM
diff --git a/rust/helpers/helpers.c b/rust/helpers/helpers.c
index a3c42e51f00a..a83060f8500c 100644
--- a/rust/helpers/helpers.c
+++ b/rust/helpers/helpers.c
@@ -29,6 +29,7 @@
#include "device.c"
#include "dma.c"
#include "drm.c"
+#include "drm_gpuvm.c"
#include "err.c"
#include "irq.c"
#include "fs.c"
diff --git a/rust/kernel/drm/gpuvm/mod.rs b/rust/kernel/drm/gpuvm/mod.rs
new file mode 100644
index 000000000000..5086625bbbe7
--- /dev/null
+++ b/rust/kernel/drm/gpuvm/mod.rs
@@ -0,0 +1,238 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+
+#![cfg(CONFIG_DRM_GPUVM = "y")]
+
+//! DRM GPUVM in immediate mode
+//!
+//! Rust abstractions for using GPUVM in immediate mode. This is when the GPUVM state is updated
+//! during `run_job()`, i.e., in the DMA fence signalling critical path, to ensure that the GPUVM
+//! and the GPU's virtual address space has the same state at all times.
+//!
+//! C header: [`include/drm/drm_gpuvm.h`](srctree/include/drm/drm_gpuvm.h)
+
+use kernel::{
+ alloc::AllocError,
+ bindings,
+ drm,
+ drm::gem::IntoGEMObject,
+ prelude::*,
+ sync::aref::{
+ ARef,
+ AlwaysRefCounted, //
+ },
+ types::Opaque, //
+};
+
+use core::{
+ cell::UnsafeCell,
+ ops::{
+ Deref,
+ Range, //
+ },
+ ptr::NonNull, //
+};
+
+/// A DRM GPU VA manager.
+///
+/// This object is refcounted, but the locations of mapped ranges may only be accessed or changed
+/// via the special unique handle [`UniqueRefGpuVm`].
+///
+/// # Invariants
+///
+/// * Stored in an allocation managed by the refcount in `self.vm`.
+/// * Access to `data` and the gpuvm interval tree is controlled via the [`UniqueRefGpuVm`] type.
+#[pin_data]
+pub struct GpuVm<T: DriverGpuVm> {
+ #[pin]
+ vm: Opaque<bindings::drm_gpuvm>,
+ /// Accessed only through the [`UniqueRefGpuVm`] reference.
+ data: UnsafeCell<T>,
+}
+
+// SAFETY: The GPUVM api does not assume that it is tied to a specific thread.
+unsafe impl<T: DriverGpuVm> Send for GpuVm<T> {}
+// SAFETY: The GPUVM api is designed to allow &self methods to be called in parallel.
+unsafe impl<T: DriverGpuVm> Sync for GpuVm<T> {}
+
+// SAFETY: By type invariants, the allocation is managed by the refcount in `self.vm`.
+unsafe impl<T: DriverGpuVm> AlwaysRefCounted for GpuVm<T> {
+ fn inc_ref(&self) {
+ // SAFETY: By type invariants, the allocation is managed by the refcount in `self.vm`.
+ unsafe { bindings::drm_gpuvm_get(self.vm.get()) };
+ }
+
+ unsafe fn dec_ref(obj: NonNull<Self>) {
+ // SAFETY: By type invariants, the allocation is managed by the refcount in `self.vm`.
+ unsafe { bindings::drm_gpuvm_put((*obj.as_ptr()).vm.get()) };
+ }
+}
+
+impl<T: DriverGpuVm> GpuVm<T> {
+ const fn vtable() -> &'static bindings::drm_gpuvm_ops {
+ &bindings::drm_gpuvm_ops {
+ vm_free: Some(Self::vm_free),
+ op_alloc: None,
+ op_free: None,
+ vm_bo_alloc: None,
+ vm_bo_free: None,
+ vm_bo_validate: None,
+ sm_step_map: None,
+ sm_step_unmap: None,
+ sm_step_remap: None,
+ }
+ }
+
+ /// Creates a GPUVM instance.
+ #[expect(clippy::new_ret_no_self)]
+ pub fn new<E>(
+ name: &'static CStr,
+ dev: &drm::Device<T::Driver>,
+ r_obj: &T::Object,
+ range: Range<u64>,
+ reserve_range: Range<u64>,
+ data: T,
+ ) -> Result<UniqueRefGpuVm<T>, E>
+ where
+ E: From<AllocError>,
+ E: From<core::convert::Infallible>,
+ {
+ let obj = KBox::try_pin_init::<E>(
+ try_pin_init!(Self {
+ data: UnsafeCell::new(data),
+ vm <- Opaque::ffi_init(|vm| {
+ // SAFETY: These arguments are valid. `vm` is valid until refcount drops to
+ // zero.
+ unsafe {
+ bindings::drm_gpuvm_init(
+ vm,
+ name.as_char_ptr(),
+ bindings::drm_gpuvm_flags_DRM_GPUVM_IMMEDIATE_MODE
+ | bindings::drm_gpuvm_flags_DRM_GPUVM_RESV_PROTECTED,
+ dev.as_raw(),
+ r_obj.as_raw(),
+ range.start,
+ range.end - range.start,
+ reserve_range.start,
+ reserve_range.end - reserve_range.start,
+ const { Self::vtable() },
+ )
+ }
+ }),
+ }? E),
+ GFP_KERNEL,
+ )?;
+ // SAFETY: This transfers the initial refcount to the ARef.
+ let aref = unsafe {
+ ARef::from_raw(NonNull::new_unchecked(KBox::into_raw(
+ Pin::into_inner_unchecked(obj),
+ )))
+ };
+ // INVARIANT: This reference is unique.
+ Ok(UniqueRefGpuVm(aref))
+ }
+
+ /// Access this [`GpuVm`] from a raw pointer.
+ ///
+ /// # Safety
+ ///
+ /// The pointer must reference the `struct drm_gpuvm` in a valid [`GpuVm<T>`] that remains
+ /// valid for at least `'a`.
+ #[inline]
+ pub unsafe fn from_raw<'a>(ptr: *mut bindings::drm_gpuvm) -> &'a Self {
+ // SAFETY: Caller passes a pointer to the `drm_gpuvm` in a `GpuVm<T>`. Caller ensures the
+ // pointer is valid for 'a.
+ unsafe { &*kernel::container_of!(Opaque::cast_from(ptr), Self, vm) }
+ }
+
+ /// Returns a raw pointer to the embedded `struct drm_gpuvm`.
+ #[inline]
+ pub fn as_raw(&self) -> *mut bindings::drm_gpuvm {
+ self.vm.get()
+ }
+
+ /// The start of the VA space.
+ #[inline]
+ pub fn va_start(&self) -> u64 {
+ // SAFETY: The `mm_start` field is immutable.
+ unsafe { (*self.as_raw()).mm_start }
+ }
+
+ /// The length of the GPU's virtual address space.
+ #[inline]
+ pub fn va_length(&self) -> u64 {
+ // SAFETY: The `mm_range` field is immutable.
+ unsafe { (*self.as_raw()).mm_range }
+ }
+
+ /// Returns the range of the GPU virtual address space.
+ #[inline]
+ pub fn va_range(&self) -> Range<u64> {
+ let start = self.va_start();
+ // OVERFLOW: This reconstructs the Range<u64> passed to the constructor, so it won't fail.
+ let end = start + self.va_length();
+ Range { start, end }
+ }
+
+ /// Clean up buffer objects that are no longer used.
+ #[inline]
+ pub fn deferred_cleanup(&self) {
+ // SAFETY: This GPUVM uses immediate mode.
+ unsafe { bindings::drm_gpuvm_bo_deferred_cleanup(self.as_raw()) }
+ }
+
+ /// Check if this GEM object is an external object for this GPUVM.
+ #[inline]
+ pub fn is_extobj(&self, obj: &T::Object) -> bool {
+ // SAFETY: We may call this with any GPUVM and GEM object.
+ unsafe { bindings::drm_gpuvm_is_extobj(self.as_raw(), obj.as_raw()) }
+ }
+
+ /// Free this GPUVM.
+ ///
+ /// # Safety
+ ///
+ /// Called when refcount hits zero.
+ unsafe extern "C" fn vm_free(me: *mut bindings::drm_gpuvm) {
+ // SAFETY: Caller passes a pointer to the `drm_gpuvm` in a `GpuVm<T>`.
+ let me = unsafe { kernel::container_of!(Opaque::cast_from(me), Self, vm).cast_mut() };
+ // SAFETY: By type invariants we can free it when refcount hits zero.
+ drop(unsafe { KBox::from_raw(me) })
+ }
+}
+
+/// The manager for a GPUVM.
+pub trait DriverGpuVm: Sized {
+ /// Parent `Driver` for this object.
+ type Driver: drm::Driver<Object = Self::Object>;
+
+ /// The kind of GEM object stored in this GPUVM.
+ type Object: IntoGEMObject;
+}
+
+/// The core of the DRM GPU VA manager.
+///
+/// This object is a unique reference to the VM that can access the interval tree and the Rust
+/// `data` field.
+///
+/// # Invariants
+///
+/// Each `GpuVm` instance has at most one `UniqueRefGpuVm` reference.
+pub struct UniqueRefGpuVm<T: DriverGpuVm>(ARef<GpuVm<T>>);
+
+impl<T: DriverGpuVm> UniqueRefGpuVm<T> {
+ /// Access the core data of this GPUVM.
+ #[inline]
+ pub fn data(&mut self) -> &mut T {
+ // SAFETY: By the type invariants we may access `core`.
+ unsafe { &mut *self.0.data.get() }
+ }
+}
+
+impl<T: DriverGpuVm> Deref for UniqueRefGpuVm<T> {
+ type Target = GpuVm<T>;
+
+ #[inline]
+ fn deref(&self) -> &GpuVm<T> {
+ &self.0
+ }
+}
diff --git a/rust/kernel/drm/mod.rs b/rust/kernel/drm/mod.rs
index 1b82b6945edf..a4b6c5430198 100644
--- a/rust/kernel/drm/mod.rs
+++ b/rust/kernel/drm/mod.rs
@@ -6,6 +6,7 @@
pub mod driver;
pub mod file;
pub mod gem;
+pub mod gpuvm;
pub mod ioctl;
pub use self::device::Device;
--
2.53.0.959.g497ff81fa9-goog
^ permalink raw reply related [flat|nested] 7+ messages in thread* [PATCH v5 2/6] rust: helpers: Add bindings/wrappers for dma_resv_lock
2026-03-20 16:08 [PATCH v5 0/6] Rust GPUVM immediate mode Alice Ryhl
2026-03-20 16:08 ` [PATCH v5 1/6] rust: drm: add base GPUVM immediate mode abstraction Alice Ryhl
@ 2026-03-20 16:08 ` Alice Ryhl
2026-03-20 16:08 ` [PATCH v5 3/6] rust: gpuvm: add GpuVm::obtain() Alice Ryhl
` (3 subsequent siblings)
5 siblings, 0 replies; 7+ messages in thread
From: Alice Ryhl @ 2026-03-20 16:08 UTC (permalink / raw)
To: Danilo Krummrich, Daniel Almeida
Cc: Boris Brezillon, Janne Grunau, Matthew Brost,
Thomas Hellström, Lyude Paul, Asahi Lina, Sumit Semwal,
Christian König, dri-devel, linux-kernel, rust-for-linux,
linux-media, Alice Ryhl
From: Asahi Lina <lina+kernel@asahilina.net>
This is just for basic usage in the DRM shmem abstractions for implied
locking, not intended as a full DMA Reservation abstraction yet.
Cc: Sumit Semwal <sumit.semwal@linaro.org>
Cc: Christian König <christian.koenig@amd.com>
Signed-off-by: Asahi Lina <lina+kernel@asahilina.net>
Signed-off-by: Daniel Almeida <daniel.almeida@collabora.com>
Reviewed-by: Alice Ryhl <aliceryhl@google.com>
Signed-off-by: Lyude Paul <lyude@redhat.com>
Reviewed-by: Janne Grunau <j@jannau.net>
Reviewed-by: Daniel Almeida <daniel.almeida@collabora.com>
Signed-off-by: Alice Ryhl <aliceryhl@google.com>
---
Taken from:
https://lore.kernel.org/all/20251202220924.520644-3-lyude@redhat.com/
with __rust_helper and MAINTAINERS file added.
---
MAINTAINERS | 1 +
rust/bindings/bindings_helper.h | 1 +
rust/helpers/dma-resv.c | 14 ++++++++++++++
rust/helpers/helpers.c | 1 +
4 files changed, 17 insertions(+)
diff --git a/MAINTAINERS b/MAINTAINERS
index b40923a18066..5d013a36f46f 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -7534,6 +7534,7 @@ F: include/linux/*fence.h
F: include/linux/dma-buf.h
F: include/linux/dma-buf/
F: include/linux/dma-resv.h
+F: rust/helpers/dma-resv.c
K: \bdma_(?:buf|fence|resv)\b
DMA GENERIC OFFLOAD ENGINE SUBSYSTEM
diff --git a/rust/bindings/bindings_helper.h b/rust/bindings/bindings_helper.h
index ef0f010c268b..47279e993934 100644
--- a/rust/bindings/bindings_helper.h
+++ b/rust/bindings/bindings_helper.h
@@ -52,6 +52,7 @@
#include <linux/device/faux.h>
#include <linux/dma-direction.h>
#include <linux/dma-mapping.h>
+#include <linux/dma-resv.h>
#include <linux/errname.h>
#include <linux/ethtool.h>
#include <linux/fdtable.h>
diff --git a/rust/helpers/dma-resv.c b/rust/helpers/dma-resv.c
new file mode 100644
index 000000000000..71914d8241e2
--- /dev/null
+++ b/rust/helpers/dma-resv.c
@@ -0,0 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/dma-resv.h>
+
+__rust_helper
+int rust_helper_dma_resv_lock(struct dma_resv *obj, struct ww_acquire_ctx *ctx)
+{
+ return dma_resv_lock(obj, ctx);
+}
+
+__rust_helper void rust_helper_dma_resv_unlock(struct dma_resv *obj)
+{
+ dma_resv_unlock(obj);
+}
diff --git a/rust/helpers/helpers.c b/rust/helpers/helpers.c
index a83060f8500c..fa56d62c9014 100644
--- a/rust/helpers/helpers.c
+++ b/rust/helpers/helpers.c
@@ -28,6 +28,7 @@
#include "cred.c"
#include "device.c"
#include "dma.c"
+#include "dma-resv.c"
#include "drm.c"
#include "drm_gpuvm.c"
#include "err.c"
--
2.53.0.959.g497ff81fa9-goog
^ permalink raw reply related [flat|nested] 7+ messages in thread* [PATCH v5 3/6] rust: gpuvm: add GpuVm::obtain()
2026-03-20 16:08 [PATCH v5 0/6] Rust GPUVM immediate mode Alice Ryhl
2026-03-20 16:08 ` [PATCH v5 1/6] rust: drm: add base GPUVM immediate mode abstraction Alice Ryhl
2026-03-20 16:08 ` [PATCH v5 2/6] rust: helpers: Add bindings/wrappers for dma_resv_lock Alice Ryhl
@ 2026-03-20 16:08 ` Alice Ryhl
2026-03-20 16:08 ` [PATCH v5 4/6] rust: gpuvm: add GpuVa struct Alice Ryhl
` (2 subsequent siblings)
5 siblings, 0 replies; 7+ messages in thread
From: Alice Ryhl @ 2026-03-20 16:08 UTC (permalink / raw)
To: Danilo Krummrich, Daniel Almeida
Cc: Boris Brezillon, Janne Grunau, Matthew Brost,
Thomas Hellström, Lyude Paul, Asahi Lina, Sumit Semwal,
Christian König, dri-devel, linux-kernel, rust-for-linux,
linux-media, Alice Ryhl
This provides a mechanism to create (or look up) VMBO instances, which
represent the mapping between GPUVM and GEM objects.
The GpuVmBoRegistered<T> type can be considered like ARef<GpuVm<T>>,
except that no way to increment the refcount is provided.
The GpuVmBoAlloc<T> type is more akin to a pre-allocated GpuVmBo<T>, so
it's not really a GpuVmBo<T> yet. Its destructor could call
drm_gpuvm_bo_destroy_not_in_lists(), but as the type is currently
private and never called anywhere, this perf optimization does not need
to happen now.
Pre-allocating and obtaining the gpuvm_bo object is exposed as a single
step. This could theoretically be a problem if one wanted to call
drm_gpuvm_bo_obtain_prealloc() during the fence signalling critical
path, but that's not a possibility because:
1. Adding the BO to the extobj list requires the resv lock, so it cannot
happen during the fence signalling critical path.
2. obtain() requires that the BO is not in the extobj list, so obtain()
must be called before adding the BO to the extobj list.
Thus, drm_gpuvm_bo_obtain_prealloc() cannot be called during the fence
signalling critical path. (For extobjs at least.)
Reviewed-by: Daniel Almeida <daniel.almeida@collabora.com>
Signed-off-by: Alice Ryhl <aliceryhl@google.com>
---
rust/helpers/drm_gpuvm.c | 6 ++
rust/kernel/drm/gpuvm/mod.rs | 32 +++++-
rust/kernel/drm/gpuvm/vm_bo.rs | 227 +++++++++++++++++++++++++++++++++++++++++
3 files changed, 262 insertions(+), 3 deletions(-)
diff --git a/rust/helpers/drm_gpuvm.c b/rust/helpers/drm_gpuvm.c
index 18cf104a8bc7..ca959d9a66f6 100644
--- a/rust/helpers/drm_gpuvm.c
+++ b/rust/helpers/drm_gpuvm.c
@@ -4,6 +4,12 @@
#include <drm/drm_gpuvm.h>
+__rust_helper
+struct drm_gpuvm_bo *rust_helper_drm_gpuvm_bo_get(struct drm_gpuvm_bo *vm_bo)
+{
+ return drm_gpuvm_bo_get(vm_bo);
+}
+
__rust_helper
struct drm_gpuvm *rust_helper_drm_gpuvm_get(struct drm_gpuvm *obj)
{
diff --git a/rust/kernel/drm/gpuvm/mod.rs b/rust/kernel/drm/gpuvm/mod.rs
index 5086625bbbe7..40446e679844 100644
--- a/rust/kernel/drm/gpuvm/mod.rs
+++ b/rust/kernel/drm/gpuvm/mod.rs
@@ -25,13 +25,20 @@
use core::{
cell::UnsafeCell,
+ mem::ManuallyDrop,
ops::{
Deref,
Range, //
},
- ptr::NonNull, //
+ ptr::{
+ self,
+ NonNull, //
+ }, //
};
+mod vm_bo;
+pub use self::vm_bo::*;
+
/// A DRM GPU VA manager.
///
/// This object is refcounted, but the locations of mapped ranges may only be accessed or changed
@@ -73,8 +80,8 @@ const fn vtable() -> &'static bindings::drm_gpuvm_ops {
vm_free: Some(Self::vm_free),
op_alloc: None,
op_free: None,
- vm_bo_alloc: None,
- vm_bo_free: None,
+ vm_bo_alloc: GpuVmBo::<T>::ALLOC_FN,
+ vm_bo_free: GpuVmBo::<T>::FREE_FN,
vm_bo_validate: None,
sm_step_map: None,
sm_step_unmap: None,
@@ -173,6 +180,16 @@ pub fn va_range(&self) -> Range<u64> {
Range { start, end }
}
+ /// Get or create the [`GpuVmBo`] for this gem object.
+ #[inline]
+ pub fn obtain(
+ &self,
+ obj: &T::Object,
+ data: impl PinInit<T::VmBoData>,
+ ) -> Result<ARef<GpuVmBo<T>>, AllocError> {
+ Ok(GpuVmBoAlloc::new(self, obj, data)?.obtain())
+ }
+
/// Clean up buffer objects that are no longer used.
#[inline]
pub fn deferred_cleanup(&self) {
@@ -198,6 +215,12 @@ pub fn is_extobj(&self, obj: &T::Object) -> bool {
// SAFETY: By type invariants we can free it when refcount hits zero.
drop(unsafe { KBox::from_raw(me) })
}
+
+ #[inline]
+ fn raw_resv(&self) -> *mut bindings::dma_resv {
+ // SAFETY: `r_obj` is immutable and valid for duration of GPUVM.
+ unsafe { (*(*self.as_raw()).r_obj).resv }
+ }
}
/// The manager for a GPUVM.
@@ -207,6 +230,9 @@ pub trait DriverGpuVm: Sized {
/// The kind of GEM object stored in this GPUVM.
type Object: IntoGEMObject;
+
+ /// Data stored with each `struct drm_gpuvm_bo`.
+ type VmBoData;
}
/// The core of the DRM GPU VA manager.
diff --git a/rust/kernel/drm/gpuvm/vm_bo.rs b/rust/kernel/drm/gpuvm/vm_bo.rs
new file mode 100644
index 000000000000..b51c6f106185
--- /dev/null
+++ b/rust/kernel/drm/gpuvm/vm_bo.rs
@@ -0,0 +1,227 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+
+use super::*;
+
+/// Represents that a given GEM object has at least one mapping on this [`GpuVm`] instance.
+///
+/// Does not assume that GEM lock is held.
+///
+/// # Invariants
+///
+/// * Allocated with `kmalloc` and refcounted via `inner`.
+/// * Is present in the gem list.
+#[repr(C)]
+#[pin_data]
+pub struct GpuVmBo<T: DriverGpuVm> {
+ #[pin]
+ inner: Opaque<bindings::drm_gpuvm_bo>,
+ #[pin]
+ data: T::VmBoData,
+}
+
+// SAFETY: By type invariants, the allocation is managed by the refcount in `self.inner`.
+unsafe impl<T: DriverGpuVm> AlwaysRefCounted for GpuVmBo<T> {
+ fn inc_ref(&self) {
+ // SAFETY: By type invariants, the allocation is managed by the refcount in `self.inner`.
+ unsafe { bindings::drm_gpuvm_bo_get(self.inner.get()) };
+ }
+
+ unsafe fn dec_ref(obj: NonNull<Self>) {
+ // CAST: `drm_gpuvm_bo` is first field of repr(C) struct.
+ // SAFETY: By type invariants, the allocation is managed by the refcount in `self.inner`.
+ // This GPUVM instance uses immediate mode, so we may put the refcount using the deferred
+ // mechanism.
+ unsafe { bindings::drm_gpuvm_bo_put_deferred(obj.as_ptr().cast()) };
+ }
+}
+
+impl<T: DriverGpuVm> GpuVmBo<T> {
+ /// The function pointer for allocating a GpuVmBo stored in the gpuvm vtable.
+ ///
+ /// Allocation is always implemented according to [`Self::vm_bo_alloc`], but it is set to
+ /// `None` if the default gpuvm behavior is the same as `vm_bo_alloc`.
+ ///
+ /// This may be `Some` even if `FREE_FN` is `None`, or vice-versa.
+ pub(super) const ALLOC_FN: Option<unsafe extern "C" fn() -> *mut bindings::drm_gpuvm_bo> = {
+ use core::alloc::Layout;
+ let base = Layout::new::<bindings::drm_gpuvm_bo>();
+ let rust = Layout::new::<Self>();
+ assert!(base.size() <= rust.size());
+ if base.size() != rust.size() || base.align() != rust.align() {
+ Some(Self::vm_bo_alloc)
+ } else {
+ // This causes GPUVM to allocate a `GpuVmBo<T>` with `kzalloc(sizeof(drm_gpuvm_bo))`.
+ None
+ }
+ };
+
+ /// The function pointer for freeing a GpuVmBo stored in the gpuvm vtable.
+ ///
+ /// Freeing is always implemented according to [`Self::vm_bo_free`], but it is set to `None` if
+ /// the default gpuvm behavior is the same as `vm_bo_free`.
+ ///
+ /// This may be `Some` even if `ALLOC_FN` is `None`, or vice-versa.
+ pub(super) const FREE_FN: Option<unsafe extern "C" fn(*mut bindings::drm_gpuvm_bo)> = {
+ if core::mem::needs_drop::<Self>() {
+ Some(Self::vm_bo_free)
+ } else {
+ // This causes GPUVM to free a `GpuVmBo<T>` with `kfree`.
+ None
+ }
+ };
+
+ /// Custom function for allocating a `drm_gpuvm_bo`.
+ ///
+ /// # Safety
+ ///
+ /// Always safe to call.
+ unsafe extern "C" fn vm_bo_alloc() -> *mut bindings::drm_gpuvm_bo {
+ let raw_ptr = KBox::<Self>::new_uninit(GFP_KERNEL | __GFP_ZERO)
+ .map(KBox::into_raw)
+ .unwrap_or(ptr::null_mut());
+
+ // CAST: `drm_gpuvm_bo` is first field of `Self`.
+ raw_ptr.cast()
+ }
+
+ /// Custom function for freeing a `drm_gpuvm_bo`.
+ ///
+ /// # Safety
+ ///
+ /// The pointer must have been allocated with [`GpuVmBo::ALLOC_FN`], and must not be used after
+ /// this call.
+ unsafe extern "C" fn vm_bo_free(ptr: *mut bindings::drm_gpuvm_bo) {
+ // CAST: `drm_gpuvm_bo` is first field of `Self`.
+ // SAFETY:
+ // * The ptr was allocated from kmalloc with the layout of `GpuVmBo<T>`.
+ // * `ptr->inner` has no destructor.
+ // * `ptr->data` contains a valid `T::VmBoData` that we can drop.
+ drop(unsafe { KBox::<Self>::from_raw(ptr.cast()) });
+ }
+
+ /// Access this [`GpuVmBo`] from a raw pointer.
+ ///
+ /// # Safety
+ ///
+ /// For the duration of `'a`, the pointer must reference a valid `drm_gpuvm_bo` associated with
+ /// a [`GpuVm<T>`]. The BO must also be present in the GEM list.
+ #[inline]
+ #[expect(dead_code)]
+ pub(crate) unsafe fn from_raw<'a>(ptr: *mut bindings::drm_gpuvm_bo) -> &'a Self {
+ // SAFETY: `drm_gpuvm_bo` is first field and `repr(C)`.
+ unsafe { &*ptr.cast() }
+ }
+
+ /// Returns a raw pointer to underlying C value.
+ #[inline]
+ pub fn as_raw(&self) -> *mut bindings::drm_gpuvm_bo {
+ self.inner.get()
+ }
+
+ /// The [`GpuVm`] that this GEM object is mapped in.
+ #[inline]
+ pub fn gpuvm(&self) -> &GpuVm<T> {
+ // SAFETY: The `obj` pointer is guaranteed to be valid.
+ unsafe { GpuVm::<T>::from_raw((*self.inner.get()).vm) }
+ }
+
+ /// The [`drm_gem_object`](crate::gem::Object) for these mappings.
+ #[inline]
+ pub fn obj(&self) -> &T::Object {
+ // SAFETY: The `obj` pointer is guaranteed to be valid.
+ unsafe { <T::Object as IntoGEMObject>::from_raw((*self.inner.get()).obj) }
+ }
+
+ /// The driver data with this buffer object.
+ #[inline]
+ pub fn data(&self) -> &T::VmBoData {
+ &self.data
+ }
+}
+
+/// A pre-allocated [`GpuVmBo`] object.
+///
+/// # Invariants
+///
+/// Points at a `drm_gpuvm_bo` that contains a valid `T::VmBoData`, has a refcount of one, and is
+/// absent from any gem, extobj, or evict lists.
+pub(super) struct GpuVmBoAlloc<T: DriverGpuVm>(NonNull<GpuVmBo<T>>);
+
+impl<T: DriverGpuVm> GpuVmBoAlloc<T> {
+ /// Create a new pre-allocated [`GpuVmBo`].
+ ///
+ /// It's intentional that the initializer is infallible because `drm_gpuvm_bo_put` will call
+ /// drop on the data, so we don't have a way to free it when the data is missing.
+ #[inline]
+ pub(super) fn new(
+ gpuvm: &GpuVm<T>,
+ gem: &T::Object,
+ value: impl PinInit<T::VmBoData>,
+ ) -> Result<GpuVmBoAlloc<T>, AllocError> {
+ // CAST: `GpuVmBoAlloc::vm_bo_alloc` ensures that this memory was allocated with the layout
+ // of `GpuVmBo<T>`. The type is repr(C), so `container_of` is not required.
+ // SAFETY: The provided gpuvm and gem ptrs are valid for the duration of this call.
+ let raw_ptr = unsafe {
+ bindings::drm_gpuvm_bo_create(gpuvm.as_raw(), gem.as_raw()).cast::<GpuVmBo<T>>()
+ };
+ let ptr = NonNull::new(raw_ptr).ok_or(AllocError)?;
+ // SAFETY: `ptr->data` is a valid pinned location.
+ let Ok(()) = unsafe { value.__pinned_init(&raw mut (*raw_ptr).data) };
+ // INVARIANTS: We just created the vm_bo so it's absent from lists, and the data is valid
+ // as we just initialized it.
+ Ok(GpuVmBoAlloc(ptr))
+ }
+
+ /// Returns a raw pointer to underlying C value.
+ #[inline]
+ pub(super) fn as_raw(&self) -> *mut bindings::drm_gpuvm_bo {
+ // SAFETY: The pointer references a valid `drm_gpuvm_bo`.
+ unsafe { (*self.0.as_ptr()).inner.get() }
+ }
+
+ /// Look up whether there is an existing [`GpuVmBo`] for this gem object.
+ #[inline]
+ pub(super) fn obtain(self) -> ARef<GpuVmBo<T>> {
+ let me = ManuallyDrop::new(self);
+ // SAFETY: Valid `drm_gpuvm_bo` not already in the lists.
+ let ptr = unsafe { bindings::drm_gpuvm_bo_obtain_prealloc(me.as_raw()) };
+
+ // Add the vm_bo to the extobj list if it's an external object, and if the vm_bo does not
+ // already exist. (If we are using an existing vm_bo, it's already in the extobj list.)
+ if ptr::eq(ptr, me.as_raw()) && me.gpuvm().is_extobj(me.obj()) {
+ let resv_lock = me.gpuvm().raw_resv();
+ // TODO: Use a proper lock guard here once a dma_resv lock abstraction exists.
+ // SAFETY: The GPUVM is still alive, so its resv lock is too.
+ unsafe { bindings::dma_resv_lock(resv_lock, ptr::null_mut()) };
+ // SAFETY: We hold the GPUVMs resv lock.
+ unsafe { bindings::drm_gpuvm_bo_extobj_add(ptr) };
+ // SAFETY: We took the lock, so we can unlock it.
+ unsafe { bindings::dma_resv_unlock(resv_lock) };
+ }
+
+ // SAFETY: `drm_gpuvm_bo_obtain_prealloc` always returns a non-null ptr
+ let ptr = unsafe { NonNull::new_unchecked(ptr.cast()) };
+
+ // INVARIANTS: `drm_gpuvm_bo_obtain_prealloc` ensures that the bo is in the GEM list.
+ // SAFETY: We received one refcount from `drm_gpuvm_bo_obtain_prealloc`.
+ unsafe { ARef::from_raw(ptr) }
+ }
+}
+
+impl<T: DriverGpuVm> Deref for GpuVmBoAlloc<T> {
+ type Target = GpuVmBo<T>;
+ #[inline]
+ fn deref(&self) -> &GpuVmBo<T> {
+ // SAFETY: By the type invariants we may deref while `Self` exists.
+ unsafe { self.0.as_ref() }
+ }
+}
+
+impl<T: DriverGpuVm> Drop for GpuVmBoAlloc<T> {
+ #[inline]
+ fn drop(&mut self) {
+ // TODO: Call drm_gpuvm_bo_destroy_not_in_lists() directly.
+ // SAFETY: It's safe to perform a deferred put in any context.
+ unsafe { bindings::drm_gpuvm_bo_put_deferred(self.as_raw()) };
+ }
+}
--
2.53.0.959.g497ff81fa9-goog
^ permalink raw reply related [flat|nested] 7+ messages in thread* [PATCH v5 4/6] rust: gpuvm: add GpuVa struct
2026-03-20 16:08 [PATCH v5 0/6] Rust GPUVM immediate mode Alice Ryhl
` (2 preceding siblings ...)
2026-03-20 16:08 ` [PATCH v5 3/6] rust: gpuvm: add GpuVm::obtain() Alice Ryhl
@ 2026-03-20 16:08 ` Alice Ryhl
2026-03-20 16:08 ` [PATCH v5 5/6] rust: gpuvm: add GpuVmCore::sm_unmap() Alice Ryhl
2026-03-20 16:08 ` [PATCH v5 6/6] rust: gpuvm: add GpuVmCore::sm_map() Alice Ryhl
5 siblings, 0 replies; 7+ messages in thread
From: Alice Ryhl @ 2026-03-20 16:08 UTC (permalink / raw)
To: Danilo Krummrich, Daniel Almeida
Cc: Boris Brezillon, Janne Grunau, Matthew Brost,
Thomas Hellström, Lyude Paul, Asahi Lina, Sumit Semwal,
Christian König, dri-devel, linux-kernel, rust-for-linux,
linux-media, Alice Ryhl
This struct will be used to keep track of individual mapped ranges in
the GPU's virtual memory.
Co-developed-by: Asahi Lina <lina+kernel@asahilina.net>
Signed-off-by: Asahi Lina <lina+kernel@asahilina.net>
Co-developed-by: Daniel Almeida <daniel.almeida@collabora.com>
Signed-off-by: Daniel Almeida <daniel.almeida@collabora.com>
Reviewed-by: Daniel Almeida <daniel.almeida@collabora.com>
Signed-off-by: Alice Ryhl <aliceryhl@google.com>
---
rust/kernel/drm/gpuvm/mod.rs | 17 ++++-
rust/kernel/drm/gpuvm/va.rs | 151 +++++++++++++++++++++++++++++++++++++++++
rust/kernel/drm/gpuvm/vm_bo.rs | 1 -
3 files changed, 166 insertions(+), 3 deletions(-)
diff --git a/rust/kernel/drm/gpuvm/mod.rs b/rust/kernel/drm/gpuvm/mod.rs
index 40446e679844..2eb4fb0f9839 100644
--- a/rust/kernel/drm/gpuvm/mod.rs
+++ b/rust/kernel/drm/gpuvm/mod.rs
@@ -11,7 +11,10 @@
//! C header: [`include/drm/drm_gpuvm.h`](srctree/include/drm/drm_gpuvm.h)
use kernel::{
- alloc::AllocError,
+ alloc::{
+ AllocError,
+ Flags as AllocFlags, //
+ },
bindings,
drm,
drm::gem::IntoGEMObject,
@@ -25,9 +28,13 @@
use core::{
cell::UnsafeCell,
- mem::ManuallyDrop,
+ mem::{
+ ManuallyDrop,
+ MaybeUninit, //
+ },
ops::{
Deref,
+ DerefMut,
Range, //
},
ptr::{
@@ -36,6 +43,9 @@
}, //
};
+mod va;
+pub use self::va::*;
+
mod vm_bo;
pub use self::vm_bo::*;
@@ -231,6 +241,9 @@ pub trait DriverGpuVm: Sized {
/// The kind of GEM object stored in this GPUVM.
type Object: IntoGEMObject;
+ /// Data stored with each `struct drm_gpuva`.
+ type VaData;
+
/// Data stored with each `struct drm_gpuvm_bo`.
type VmBoData;
}
diff --git a/rust/kernel/drm/gpuvm/va.rs b/rust/kernel/drm/gpuvm/va.rs
new file mode 100644
index 000000000000..540f525bb56d
--- /dev/null
+++ b/rust/kernel/drm/gpuvm/va.rs
@@ -0,0 +1,151 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+
+#![expect(dead_code)]
+use super::*;
+
+/// Represents that a range of a GEM object is mapped in this [`GpuVm`] instance.
+///
+/// Does not assume that GEM lock is held.
+///
+/// # Invariants
+///
+/// This is a valid `drm_gpuva` that is resident in the [`GpuVm`] instance. The associated
+/// [`GpuVmBo`] is part of the GEM list.
+#[repr(C)]
+#[pin_data]
+pub struct GpuVa<T: DriverGpuVm> {
+ #[pin]
+ inner: Opaque<bindings::drm_gpuva>,
+ #[pin]
+ data: T::VaData,
+}
+
+impl<T: DriverGpuVm> GpuVa<T> {
+ /// Access this [`GpuVa`] from a raw pointer.
+ ///
+ /// # Safety
+ ///
+ /// For the duration of `'a`, the pointer must reference a valid `drm_gpuva` associated with a
+ /// [`GpuVm<T>`]. Its parent [`GpuVmBo`] must be in the GEM list.
+ #[inline]
+ pub unsafe fn from_raw<'a>(ptr: *mut bindings::drm_gpuva) -> &'a Self {
+ // SAFETY: `drm_gpuva` is first field and `repr(C)`.
+ unsafe { &*ptr.cast() }
+ }
+
+ /// Returns a raw pointer to underlying C value.
+ #[inline]
+ pub fn as_raw(&self) -> *mut bindings::drm_gpuva {
+ self.inner.get()
+ }
+
+ /// Returns the address of this mapping in the GPU virtual address space.
+ #[inline]
+ pub fn addr(&self) -> u64 {
+ // SAFETY: The `va.addr` field of `drm_gpuva` is immutable.
+ unsafe { (*self.as_raw()).va.addr }
+ }
+
+ /// Returns the length of this mapping.
+ #[inline]
+ pub fn length(&self) -> u64 {
+ // SAFETY: The `va.range` field of `drm_gpuva` is immutable.
+ unsafe { (*self.as_raw()).va.range }
+ }
+
+ /// Returns `addr..addr+length`.
+ #[inline]
+ pub fn range(&self) -> Range<u64> {
+ let addr = self.addr();
+ addr..addr + self.length()
+ }
+
+ /// Returns the offset within the GEM object.
+ #[inline]
+ pub fn gem_offset(&self) -> u64 {
+ // SAFETY: The `gem.offset` field of `drm_gpuva` is immutable.
+ unsafe { (*self.as_raw()).gem.offset }
+ }
+
+ /// Returns the GEM object.
+ #[inline]
+ pub fn obj(&self) -> &T::Object {
+ // SAFETY: The `gem.offset` field of `drm_gpuva` is immutable.
+ unsafe { <T::Object as IntoGEMObject>::from_raw((*self.as_raw()).gem.obj) }
+ }
+
+ /// Returns the underlying [`GpuVmBo`] object that backs this [`GpuVa`].
+ #[inline]
+ pub fn vm_bo(&self) -> &GpuVmBo<T> {
+ // SAFETY: The `vm_bo` field has been set and is immutable for the duration in which this
+ // `drm_gpuva` is resident in the VM. By the type invariants, the BO is part of the GEM
+ // list.
+ unsafe { GpuVmBo::from_raw((*self.as_raw()).vm_bo) }
+ }
+}
+
+/// A pre-allocated [`GpuVa`] object.
+///
+/// # Invariants
+///
+/// The memory is zeroed.
+pub struct GpuVaAlloc<T: DriverGpuVm>(KBox<MaybeUninit<GpuVa<T>>>);
+
+impl<T: DriverGpuVm> GpuVaAlloc<T> {
+ /// Pre-allocate a [`GpuVa`] object.
+ pub fn new(flags: AllocFlags) -> Result<GpuVaAlloc<T>, AllocError> {
+ // INVARIANTS: Memory allocated with __GFP_ZERO.
+ Ok(GpuVaAlloc(KBox::new_uninit(flags | __GFP_ZERO)?))
+ }
+
+ /// Prepare this `drm_gpuva` for insertion into the GPUVM.
+ pub(super) fn prepare(mut self, va_data: impl PinInit<T::VaData>) -> *mut bindings::drm_gpuva {
+ let va_ptr = MaybeUninit::as_mut_ptr(&mut self.0);
+ // SAFETY: The `data` field is pinned.
+ let Ok(()) = unsafe { va_data.__pinned_init(&raw mut (*va_ptr).data) };
+ KBox::into_raw(self.0).cast()
+ }
+}
+
+/// A [`GpuVa`] object that has been removed.
+///
+/// # Invariants
+///
+/// The `drm_gpuva` is not resident in the [`GpuVm`].
+pub struct GpuVaRemoved<T: DriverGpuVm>(KBox<GpuVa<T>>);
+
+impl<T: DriverGpuVm> GpuVaRemoved<T> {
+ /// Convert a raw pointer into a [`GpuVaRemoved`].
+ ///
+ /// # Safety
+ ///
+ /// Must have been removed from a [`GpuVm<T>`].
+ pub(super) unsafe fn from_raw(ptr: *mut bindings::drm_gpuva) -> Self {
+ // SAFETY: Since it has been removed we can take ownership of allocation.
+ GpuVaRemoved(unsafe { KBox::from_raw(ptr.cast()) })
+ }
+
+ /// Take ownership of the VA data.
+ pub fn into_inner(self) -> T::VaData
+ where
+ T::VaData: Unpin,
+ {
+ KBox::into_inner(self.0).data
+ }
+}
+
+impl<T: DriverGpuVm> Deref for GpuVaRemoved<T> {
+ type Target = T::VaData;
+ fn deref(&self) -> &T::VaData {
+ &self.0.data
+ }
+}
+
+impl<T: DriverGpuVm> DerefMut for GpuVaRemoved<T>
+where
+ T::VaData: Unpin,
+{
+ fn deref_mut(&mut self) -> &mut T::VaData {
+ &mut self.0.data
+ }
+}
diff --git a/rust/kernel/drm/gpuvm/vm_bo.rs b/rust/kernel/drm/gpuvm/vm_bo.rs
index b51c6f106185..7fd1d301fd41 100644
--- a/rust/kernel/drm/gpuvm/vm_bo.rs
+++ b/rust/kernel/drm/gpuvm/vm_bo.rs
@@ -106,7 +106,6 @@ impl<T: DriverGpuVm> GpuVmBo<T> {
/// For the duration of `'a`, the pointer must reference a valid `drm_gpuvm_bo` associated with
/// a [`GpuVm<T>`]. The BO must also be present in the GEM list.
#[inline]
- #[expect(dead_code)]
pub(crate) unsafe fn from_raw<'a>(ptr: *mut bindings::drm_gpuvm_bo) -> &'a Self {
// SAFETY: `drm_gpuvm_bo` is first field and `repr(C)`.
unsafe { &*ptr.cast() }
--
2.53.0.959.g497ff81fa9-goog
^ permalink raw reply related [flat|nested] 7+ messages in thread* [PATCH v5 5/6] rust: gpuvm: add GpuVmCore::sm_unmap()
2026-03-20 16:08 [PATCH v5 0/6] Rust GPUVM immediate mode Alice Ryhl
` (3 preceding siblings ...)
2026-03-20 16:08 ` [PATCH v5 4/6] rust: gpuvm: add GpuVa struct Alice Ryhl
@ 2026-03-20 16:08 ` Alice Ryhl
2026-03-20 16:08 ` [PATCH v5 6/6] rust: gpuvm: add GpuVmCore::sm_map() Alice Ryhl
5 siblings, 0 replies; 7+ messages in thread
From: Alice Ryhl @ 2026-03-20 16:08 UTC (permalink / raw)
To: Danilo Krummrich, Daniel Almeida
Cc: Boris Brezillon, Janne Grunau, Matthew Brost,
Thomas Hellström, Lyude Paul, Asahi Lina, Sumit Semwal,
Christian König, dri-devel, linux-kernel, rust-for-linux,
linux-media, Alice Ryhl
Add the entrypoint for unmapping ranges in the GPUVM, and provide
callbacks and VA types for the implementation.
Co-developed-by: Asahi Lina <lina+kernel@asahilina.net>
Signed-off-by: Asahi Lina <lina+kernel@asahilina.net>
Reviewed-by: Daniel Almeida <daniel.almeida@collabora.com>
Signed-off-by: Alice Ryhl <aliceryhl@google.com>
---
rust/kernel/drm/gpuvm/mod.rs | 30 ++++-
rust/kernel/drm/gpuvm/sm_ops.rs | 270 ++++++++++++++++++++++++++++++++++++++++
rust/kernel/drm/gpuvm/va.rs | 1 -
rust/kernel/drm/gpuvm/vm_bo.rs | 8 ++
4 files changed, 304 insertions(+), 5 deletions(-)
diff --git a/rust/kernel/drm/gpuvm/mod.rs b/rust/kernel/drm/gpuvm/mod.rs
index 2eb4fb0f9839..dca72e7b2b89 100644
--- a/rust/kernel/drm/gpuvm/mod.rs
+++ b/rust/kernel/drm/gpuvm/mod.rs
@@ -18,6 +18,7 @@
bindings,
drm,
drm::gem::IntoGEMObject,
+ error::to_result,
prelude::*,
sync::aref::{
ARef,
@@ -28,6 +29,7 @@
use core::{
cell::UnsafeCell,
+ marker::PhantomData,
mem::{
ManuallyDrop,
MaybeUninit, //
@@ -43,12 +45,15 @@
}, //
};
-mod va;
-pub use self::va::*;
+mod sm_ops;
+pub use self::sm_ops::*;
mod vm_bo;
pub use self::vm_bo::*;
+mod va;
+pub use self::va::*;
+
/// A DRM GPU VA manager.
///
/// This object is refcounted, but the locations of mapped ranges may only be accessed or changed
@@ -94,8 +99,8 @@ const fn vtable() -> &'static bindings::drm_gpuvm_ops {
vm_bo_free: GpuVmBo::<T>::FREE_FN,
vm_bo_validate: None,
sm_step_map: None,
- sm_step_unmap: None,
- sm_step_remap: None,
+ sm_step_unmap: Some(Self::sm_step_unmap),
+ sm_step_remap: Some(Self::sm_step_remap),
}
}
@@ -246,6 +251,23 @@ pub trait DriverGpuVm: Sized {
/// Data stored with each `struct drm_gpuvm_bo`.
type VmBoData;
+
+ /// The private data passed to callbacks.
+ type SmContext<'ctx>;
+
+ /// Indicates that an existing mapping should be removed.
+ fn sm_step_unmap<'op, 'ctx>(
+ &mut self,
+ op: OpUnmap<'op, Self>,
+ context: &mut Self::SmContext<'ctx>,
+ ) -> Result<OpUnmapped<'op, Self>, Error>;
+
+ /// Indicates that an existing mapping should be split up.
+ fn sm_step_remap<'op, 'ctx>(
+ &mut self,
+ op: OpRemap<'op, Self>,
+ context: &mut Self::SmContext<'ctx>,
+ ) -> Result<OpRemapped<'op, Self>, Error>;
}
/// The core of the DRM GPU VA manager.
diff --git a/rust/kernel/drm/gpuvm/sm_ops.rs b/rust/kernel/drm/gpuvm/sm_ops.rs
new file mode 100644
index 000000000000..ab802a07e4be
--- /dev/null
+++ b/rust/kernel/drm/gpuvm/sm_ops.rs
@@ -0,0 +1,270 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+
+use super::*;
+
+/// The actual data that gets threaded through the callbacks.
+struct SmData<'a, 'ctx, T: DriverGpuVm> {
+ gpuvm: &'a mut UniqueRefGpuVm<T>,
+ user_context: &'a mut T::SmContext<'ctx>,
+}
+
+/// Represents an `sm_step_unmap` operation that has not yet been completed.
+pub struct OpUnmap<'op, T: DriverGpuVm> {
+ op: &'op bindings::drm_gpuva_op_unmap,
+ // This ensures that 'op is invariant, so that `OpUnmap<'long, T>` does not
+ // coerce to `OpUnmap<'short, T>`. This ensures that the user can't return the
+ // wrong`OpUnmapped` value.
+ _invariant: PhantomData<*mut &'op mut T>,
+}
+
+impl<'op, T: DriverGpuVm> OpUnmap<'op, T> {
+ /// Indicates whether this [`GpuVa`] is physically contiguous with the
+ /// original mapping request.
+ ///
+ /// Optionally, if `keep` is set, drivers may keep the actual page table
+ /// mappings for this `drm_gpuva`, adding the missing page table entries
+ /// only and update the `drm_gpuvm` accordingly.
+ pub fn keep(&self) -> bool {
+ self.op.keep
+ }
+
+ /// The range being unmapped.
+ pub fn va(&self) -> &GpuVa<T> {
+ // SAFETY: This is a valid va.
+ unsafe { GpuVa::<T>::from_raw(self.op.va) }
+ }
+
+ /// Remove the VA.
+ pub fn remove(self) -> (OpUnmapped<'op, T>, GpuVaRemoved<T>) {
+ // SAFETY: The op references a valid drm_gpuva in the GPUVM.
+ unsafe { bindings::drm_gpuva_unmap(self.op) };
+ // SAFETY: The va is no longer in the interval tree so we may unlink it.
+ unsafe { bindings::drm_gpuva_unlink_defer(self.op.va) };
+
+ // SAFETY: We just removed this va from the `GpuVm<T>`.
+ let va = unsafe { GpuVaRemoved::from_raw(self.op.va) };
+
+ (
+ OpUnmapped {
+ _invariant: self._invariant,
+ },
+ va,
+ )
+ }
+}
+
+/// Represents a completed [`OpUnmap`] operation.
+pub struct OpUnmapped<'op, T> {
+ _invariant: PhantomData<*mut &'op mut T>,
+}
+
+/// Represents an `sm_step_remap` operation that has not yet been completed.
+pub struct OpRemap<'op, T: DriverGpuVm> {
+ op: &'op bindings::drm_gpuva_op_remap,
+ // This ensures that 'op is invariant, so that `OpRemap<'long, T>` does not
+ // coerce to `OpRemap<'short, T>`. This ensures that the user can't return the
+ // wrong`OpRemapped` value.
+ _invariant: PhantomData<*mut &'op mut T>,
+}
+
+impl<'op, T: DriverGpuVm> OpRemap<'op, T> {
+ /// The preceding part of a split mapping.
+ #[inline]
+ pub fn prev(&self) -> Option<&OpRemapMapData> {
+ // SAFETY: We checked for null, so the pointer must be valid.
+ NonNull::new(self.op.prev).map(|ptr| unsafe { OpRemapMapData::from_raw(ptr) })
+ }
+
+ /// The subsequent part of a split mapping.
+ #[inline]
+ pub fn next(&self) -> Option<&OpRemapMapData> {
+ // SAFETY: We checked for null, so the pointer must be valid.
+ NonNull::new(self.op.next).map(|ptr| unsafe { OpRemapMapData::from_raw(ptr) })
+ }
+
+ /// Indicates whether the `drm_gpuva` being removed is physically contiguous with the original
+ /// mapping request.
+ ///
+ /// Optionally, if `keep` is set, drivers may keep the actual page table mappings for this
+ /// `drm_gpuva`, adding the missing page table entries only and update the `drm_gpuvm`
+ /// accordingly.
+ #[inline]
+ pub fn keep(&self) -> bool {
+ // SAFETY: The unmap pointer is always valid.
+ unsafe { (*self.op.unmap).keep }
+ }
+
+ /// The range being unmapped.
+ #[inline]
+ pub fn va_to_unmap(&self) -> &GpuVa<T> {
+ // SAFETY: This is a valid va.
+ unsafe { GpuVa::<T>::from_raw((*self.op.unmap).va) }
+ }
+
+ /// The [`drm_gem_object`](crate::gem::Object) whose VA is being remapped.
+ #[inline]
+ pub fn obj(&self) -> &T::Object {
+ self.va_to_unmap().obj()
+ }
+
+ /// The [`GpuVmBo`] that is being remapped.
+ #[inline]
+ pub fn vm_bo(&self) -> &GpuVmBo<T> {
+ self.va_to_unmap().vm_bo()
+ }
+
+ /// Update the GPUVM to perform the remapping.
+ pub fn remap(
+ self,
+ va_alloc: [GpuVaAlloc<T>; 2],
+ prev_data: impl PinInit<T::VaData>,
+ next_data: impl PinInit<T::VaData>,
+ ) -> (OpRemapped<'op, T>, OpRemapRet<T>) {
+ let [va1, va2] = va_alloc;
+
+ let mut unused_va = None;
+ let mut prev_ptr = ptr::null_mut();
+ let mut next_ptr = ptr::null_mut();
+ if self.prev().is_some() {
+ prev_ptr = va1.prepare(prev_data);
+ } else {
+ unused_va = Some(va1);
+ }
+ if self.next().is_some() {
+ next_ptr = va2.prepare(next_data);
+ } else {
+ unused_va = Some(va2);
+ }
+
+ // SAFETY: the pointers are non-null when required
+ unsafe { bindings::drm_gpuva_remap(prev_ptr, next_ptr, self.op) };
+
+ let gpuva_guard = self.vm_bo().lock_gpuva();
+ if !prev_ptr.is_null() {
+ // SAFETY: The prev_ptr is a valid drm_gpuva prepared for insertion. The vm_bo is still
+ // valid as the not-yet-unlinked gpuva holds a refcount on the vm_bo.
+ unsafe { bindings::drm_gpuva_link(prev_ptr, self.vm_bo().as_raw()) };
+ }
+ if !next_ptr.is_null() {
+ // SAFETY: The next_ptr is a valid drm_gpuva prepared for insertion. The vm_bo is still
+ // valid as the not-yet-unlinked gpuva holds a refcount on the vm_bo.
+ unsafe { bindings::drm_gpuva_link(next_ptr, self.vm_bo().as_raw()) };
+ }
+ drop(gpuva_guard);
+
+ // SAFETY: The va is no longer in the interval tree so we may unlink it.
+ unsafe { bindings::drm_gpuva_unlink_defer((*self.op.unmap).va) };
+
+ (
+ OpRemapped {
+ _invariant: self._invariant,
+ },
+ OpRemapRet {
+ // SAFETY: We just removed this va from the `GpuVm<T>`.
+ unmapped_va: unsafe { GpuVaRemoved::from_raw((*self.op.unmap).va) },
+ unused_va,
+ },
+ )
+ }
+}
+
+/// Part of an [`OpRemap`] that represents a new mapping.
+#[repr(transparent)]
+pub struct OpRemapMapData(bindings::drm_gpuva_op_map);
+
+impl OpRemapMapData {
+ /// # Safety
+ /// Must reference a valid `drm_gpuva_op_map` for duration of `'a`.
+ unsafe fn from_raw<'a>(ptr: NonNull<bindings::drm_gpuva_op_map>) -> &'a Self {
+ // SAFETY: ok per safety requirements
+ unsafe { ptr.cast().as_ref() }
+ }
+
+ /// The base address of the new mapping.
+ pub fn addr(&self) -> u64 {
+ self.0.va.addr
+ }
+
+ /// The length of the new mapping.
+ pub fn length(&self) -> u64 {
+ self.0.va.range
+ }
+
+ /// The offset within the [`drm_gem_object`](crate::gem::Object).
+ pub fn gem_offset(&self) -> u64 {
+ self.0.gem.offset
+ }
+}
+
+/// Struct containing objects removed or not used by [`OpRemap::remap`].
+pub struct OpRemapRet<T: DriverGpuVm> {
+ /// The `drm_gpuva` that was removed.
+ pub unmapped_va: GpuVaRemoved<T>,
+ /// If the remap did not split the region into two pieces, then the unused `drm_gpuva` is
+ /// returned here.
+ pub unused_va: Option<GpuVaAlloc<T>>,
+}
+
+/// Represents a completed [`OpRemap`] operation.
+pub struct OpRemapped<'op, T> {
+ _invariant: PhantomData<*mut &'op mut T>,
+}
+
+impl<T: DriverGpuVm> UniqueRefGpuVm<T> {
+ /// Remove any mappings in the given region.
+ ///
+ /// Internally calls [`DriverGpuVm::sm_step_unmap`] for ranges entirely contained within the
+ /// given range, and [`DriverGpuVm::sm_step_remap`] for ranges that overlap with the range.
+ #[inline]
+ pub fn sm_unmap(&mut self, addr: u64, length: u64, context: &mut T::SmContext<'_>) -> Result {
+ let gpuvm = self.as_raw();
+ let mut p = SmData {
+ gpuvm: self,
+ user_context: context,
+ };
+ // SAFETY:
+ // * raw_request() creates a valid request.
+ // * The private data is valid to be interpreted as SmData.
+ to_result(unsafe { bindings::drm_gpuvm_sm_unmap(gpuvm, (&raw mut p).cast(), addr, length) })
+ }
+}
+
+impl<T: DriverGpuVm> GpuVm<T> {
+ /// # Safety
+ /// Must be called from `sm_unmap` with a pointer to `SmData`.
+ pub(super) unsafe extern "C" fn sm_step_unmap(
+ op: *mut bindings::drm_gpuva_op,
+ p: *mut c_void,
+ ) -> c_int {
+ // SAFETY: The caller provides a pointer to `SmData`.
+ let p = unsafe { &mut *p.cast::<SmData<'_, '_, T>>() };
+ let op = OpUnmap {
+ // SAFETY: sm_step_unmap is called with an unmap operation.
+ op: unsafe { &(*op).__bindgen_anon_1.unmap },
+ _invariant: PhantomData,
+ };
+ match p.gpuvm.data().sm_step_unmap(op, p.user_context) {
+ Ok(OpUnmapped { .. }) => 0,
+ Err(err) => err.to_errno(),
+ }
+ }
+
+ /// # Safety
+ /// Must be called from `sm_unmap` with a pointer to `SmData`.
+ pub(super) unsafe extern "C" fn sm_step_remap(
+ op: *mut bindings::drm_gpuva_op,
+ p: *mut c_void,
+ ) -> c_int {
+ // SAFETY: The caller provides a pointer to `SmData`.
+ let p = unsafe { &mut *p.cast::<SmData<'_, '_, T>>() };
+ let op = OpRemap {
+ // SAFETY: sm_step_remap is called with a remap operation.
+ op: unsafe { &(*op).__bindgen_anon_1.remap },
+ _invariant: PhantomData,
+ };
+ match p.gpuvm.data().sm_step_remap(op, p.user_context) {
+ Ok(OpRemapped { .. }) => 0,
+ Err(err) => err.to_errno(),
+ }
+ }
+}
diff --git a/rust/kernel/drm/gpuvm/va.rs b/rust/kernel/drm/gpuvm/va.rs
index 540f525bb56d..654e5ca09bcb 100644
--- a/rust/kernel/drm/gpuvm/va.rs
+++ b/rust/kernel/drm/gpuvm/va.rs
@@ -1,6 +1,5 @@
// SPDX-License-Identifier: GPL-2.0 OR MIT
-#![expect(dead_code)]
use super::*;
/// Represents that a range of a GEM object is mapped in this [`GpuVm`] instance.
diff --git a/rust/kernel/drm/gpuvm/vm_bo.rs b/rust/kernel/drm/gpuvm/vm_bo.rs
index 7fd1d301fd41..098dbd7f9aa4 100644
--- a/rust/kernel/drm/gpuvm/vm_bo.rs
+++ b/rust/kernel/drm/gpuvm/vm_bo.rs
@@ -136,6 +136,14 @@ pub fn obj(&self) -> &T::Object {
pub fn data(&self) -> &T::VmBoData {
&self.data
}
+
+ pub(super) fn lock_gpuva(&self) -> crate::sync::MutexGuard<'_, ()> {
+ // SAFETY: The GEM object is valid.
+ let ptr = unsafe { &raw mut (*self.obj().as_raw()).gpuva.lock };
+ // SAFETY: The GEM object is valid, so the mutex is properly initialized.
+ let mutex = unsafe { crate::sync::Mutex::from_raw(ptr) };
+ mutex.lock()
+ }
}
/// A pre-allocated [`GpuVmBo`] object.
--
2.53.0.959.g497ff81fa9-goog
^ permalink raw reply related [flat|nested] 7+ messages in thread* [PATCH v5 6/6] rust: gpuvm: add GpuVmCore::sm_map()
2026-03-20 16:08 [PATCH v5 0/6] Rust GPUVM immediate mode Alice Ryhl
` (4 preceding siblings ...)
2026-03-20 16:08 ` [PATCH v5 5/6] rust: gpuvm: add GpuVmCore::sm_unmap() Alice Ryhl
@ 2026-03-20 16:08 ` Alice Ryhl
5 siblings, 0 replies; 7+ messages in thread
From: Alice Ryhl @ 2026-03-20 16:08 UTC (permalink / raw)
To: Danilo Krummrich, Daniel Almeida
Cc: Boris Brezillon, Janne Grunau, Matthew Brost,
Thomas Hellström, Lyude Paul, Asahi Lina, Sumit Semwal,
Christian König, dri-devel, linux-kernel, rust-for-linux,
linux-media, Alice Ryhl
Finally also add the operation for creating new mappings. Mapping
operations need extra data in the context since they involve a vm_bo
coming from the outside.
Co-developed-by: Asahi Lina <lina+kernel@asahilina.net>
Signed-off-by: Asahi Lina <lina+kernel@asahilina.net>
Reviewed-by: Daniel Almeida <daniel.almeida@collabora.com>
Signed-off-by: Alice Ryhl <aliceryhl@google.com>
---
rust/kernel/drm/gpuvm/mod.rs | 9 ++-
rust/kernel/drm/gpuvm/sm_ops.rs | 157 ++++++++++++++++++++++++++++++++++++++--
2 files changed, 160 insertions(+), 6 deletions(-)
diff --git a/rust/kernel/drm/gpuvm/mod.rs b/rust/kernel/drm/gpuvm/mod.rs
index dca72e7b2b89..64a614d14f09 100644
--- a/rust/kernel/drm/gpuvm/mod.rs
+++ b/rust/kernel/drm/gpuvm/mod.rs
@@ -98,7 +98,7 @@ const fn vtable() -> &'static bindings::drm_gpuvm_ops {
vm_bo_alloc: GpuVmBo::<T>::ALLOC_FN,
vm_bo_free: GpuVmBo::<T>::FREE_FN,
vm_bo_validate: None,
- sm_step_map: None,
+ sm_step_map: Some(Self::sm_step_map),
sm_step_unmap: Some(Self::sm_step_unmap),
sm_step_remap: Some(Self::sm_step_remap),
}
@@ -255,6 +255,13 @@ pub trait DriverGpuVm: Sized {
/// The private data passed to callbacks.
type SmContext<'ctx>;
+ /// Indicates that a new mapping should be created.
+ fn sm_step_map<'op, 'ctx>(
+ &mut self,
+ op: OpMap<'op, Self>,
+ context: &mut Self::SmContext<'ctx>,
+ ) -> Result<OpMapped<'op, Self>, Error>;
+
/// Indicates that an existing mapping should be removed.
fn sm_step_unmap<'op, 'ctx>(
&mut self,
diff --git a/rust/kernel/drm/gpuvm/sm_ops.rs b/rust/kernel/drm/gpuvm/sm_ops.rs
index ab802a07e4be..1d07e84a8062 100644
--- a/rust/kernel/drm/gpuvm/sm_ops.rs
+++ b/rust/kernel/drm/gpuvm/sm_ops.rs
@@ -8,6 +8,103 @@ struct SmData<'a, 'ctx, T: DriverGpuVm> {
user_context: &'a mut T::SmContext<'ctx>,
}
+#[repr(C)]
+struct SmMapData<'a, 'ctx, T: DriverGpuVm> {
+ sm_data: SmData<'a, 'ctx, T>,
+ vm_bo: ARef<GpuVmBo<T>>,
+}
+
+/// The argument for [`GpuVmCore::sm_map`].
+pub struct OpMapRequest<'a, 'ctx, T: DriverGpuVm> {
+ /// Address in GPU virtual address space.
+ pub addr: u64,
+ /// Length of mapping to create.
+ pub range: u64,
+ /// Offset in GEM object.
+ pub gem_offset: u64,
+ /// The GEM object to map.
+ pub vm_bo: ARef<GpuVmBo<T>>,
+ /// The user-provided context type.
+ pub context: &'a mut T::SmContext<'ctx>,
+}
+
+impl<'a, 'ctx, T: DriverGpuVm> OpMapRequest<'a, 'ctx, T> {
+ fn raw_request(&self) -> bindings::drm_gpuvm_map_req {
+ bindings::drm_gpuvm_map_req {
+ map: bindings::drm_gpuva_op_map {
+ va: bindings::drm_gpuva_op_map__bindgen_ty_1 {
+ addr: self.addr,
+ range: self.range,
+ },
+ gem: bindings::drm_gpuva_op_map__bindgen_ty_2 {
+ offset: self.gem_offset,
+ obj: self.vm_bo.obj().as_raw(),
+ },
+ },
+ }
+ }
+}
+
+/// Represents an `sm_step_map` operation that has not yet been completed.
+pub struct OpMap<'op, T: DriverGpuVm> {
+ op: &'op bindings::drm_gpuva_op_map,
+ // Since these abstractions are designed for immediate mode, the VM BO needs to be
+ // pre-allocated, so we always have it available when we reach this point.
+ vm_bo: &'op GpuVmBo<T>,
+ // This ensures that 'op is invariant, so that `OpMap<'long, T>` does not
+ // coerce to `OpMap<'short, T>`. This ensures that the user can't return
+ // the wrong `OpMapped` value.
+ _invariant: PhantomData<*mut &'op mut T>,
+}
+
+impl<'op, T: DriverGpuVm> OpMap<'op, T> {
+ /// The base address of the new mapping.
+ pub fn addr(&self) -> u64 {
+ self.op.va.addr
+ }
+
+ /// The length of the new mapping.
+ pub fn length(&self) -> u64 {
+ self.op.va.range
+ }
+
+ /// The offset within the [`drm_gem_object`](crate::gem::Object).
+ pub fn gem_offset(&self) -> u64 {
+ self.op.gem.offset
+ }
+
+ /// The [`drm_gem_object`](crate::gem::Object) to map.
+ pub fn obj(&self) -> &T::Object {
+ // SAFETY: The `obj` pointer is guaranteed to be valid.
+ unsafe { <T::Object as IntoGEMObject>::from_raw(self.op.gem.obj) }
+ }
+
+ /// The [`GpuVmBo`] that the new VA will be associated with.
+ pub fn vm_bo(&self) -> &GpuVmBo<T> {
+ self.vm_bo
+ }
+
+ /// Use the pre-allocated VA to carry out this map operation.
+ pub fn insert(self, va: GpuVaAlloc<T>, va_data: impl PinInit<T::VaData>) -> OpMapped<'op, T> {
+ let va = va.prepare(va_data);
+ // SAFETY: By the type invariants we may access the interval tree.
+ unsafe { bindings::drm_gpuva_map(self.vm_bo.gpuvm().as_raw(), va, self.op) };
+
+ let _gpuva_guard = self.vm_bo().lock_gpuva();
+ // SAFETY: The va is prepared for insertion, and we hold the GEM lock.
+ unsafe { bindings::drm_gpuva_link(va, self.vm_bo.as_raw()) };
+
+ OpMapped {
+ _invariant: self._invariant,
+ }
+ }
+}
+
+/// Represents a completed [`OpMap`] operation.
+pub struct OpMapped<'op, T> {
+ _invariant: PhantomData<*mut &'op mut T>,
+}
+
/// Represents an `sm_step_unmap` operation that has not yet been completed.
pub struct OpUnmap<'op, T: DriverGpuVm> {
op: &'op bindings::drm_gpuva_op_unmap,
@@ -211,6 +308,30 @@ pub struct OpRemapped<'op, T> {
}
impl<T: DriverGpuVm> UniqueRefGpuVm<T> {
+ /// Create a mapping, removing or remapping anything that overlaps.
+ ///
+ /// Internally calls the [`DriverGpuVm`] callbacks similar to [`Self::sm_unmap`], except that
+ /// the [`DriverGpuVm::sm_step_map`] is called once to create the requested mapping.
+ #[inline]
+ pub fn sm_map(&mut self, req: OpMapRequest<'_, '_, T>) -> Result {
+ let gpuvm = self.as_raw();
+ let raw_req = req.raw_request();
+ let mut p = SmMapData {
+ sm_data: SmData {
+ gpuvm: self,
+ user_context: req.context,
+ },
+ vm_bo: req.vm_bo,
+ };
+ // SAFETY:
+ // * raw_request() creates a valid request.
+ // * The private data is valid to be interpreted as both SmData and SmMapData since the
+ // first field of SmMapData is SmData.
+ to_result(unsafe {
+ bindings::drm_gpuvm_sm_map(gpuvm, (&raw mut p).cast(), &raw const raw_req)
+ })
+ }
+
/// Remove any mappings in the given region.
///
/// Internally calls [`DriverGpuVm::sm_step_unmap`] for ranges entirely contained within the
@@ -224,19 +345,45 @@ pub fn sm_unmap(&mut self, addr: u64, length: u64, context: &mut T::SmContext<'_
};
// SAFETY:
// * raw_request() creates a valid request.
- // * The private data is valid to be interpreted as SmData.
+ // * The private data is a valid SmData.
to_result(unsafe { bindings::drm_gpuvm_sm_unmap(gpuvm, (&raw mut p).cast(), addr, length) })
}
}
impl<T: DriverGpuVm> GpuVm<T> {
/// # Safety
- /// Must be called from `sm_unmap` with a pointer to `SmData`.
+ /// Must be called from `sm_map` with a pointer to `SmMapData`.
+ pub(super) unsafe extern "C" fn sm_step_map(
+ op: *mut bindings::drm_gpuva_op,
+ p: *mut c_void,
+ ) -> c_int {
+ // SAFETY: If we reach `sm_step_map` then we were called from `sm_map` which always passes
+ // an `SmMapData` as private data.
+ let p = unsafe { &mut *p.cast::<SmMapData<'_, '_, T>>() };
+ let op = OpMap {
+ // SAFETY: sm_step_map is called with a map operation.
+ op: unsafe { &(*op).__bindgen_anon_1.map },
+ vm_bo: &p.vm_bo,
+ _invariant: PhantomData,
+ };
+ match p
+ .sm_data
+ .gpuvm
+ .data()
+ .sm_step_map(op, p.sm_data.user_context)
+ {
+ Ok(OpMapped { .. }) => 0,
+ Err(err) => err.to_errno(),
+ }
+ }
+
+ /// # Safety
+ /// Must be called from `sm_map` or `sm_unmap` with a pointer to `SmMapData` or `SmData`.
pub(super) unsafe extern "C" fn sm_step_unmap(
op: *mut bindings::drm_gpuva_op,
p: *mut c_void,
) -> c_int {
- // SAFETY: The caller provides a pointer to `SmData`.
+ // SAFETY: The caller provides a pointer that can be treated as `SmData`.
let p = unsafe { &mut *p.cast::<SmData<'_, '_, T>>() };
let op = OpUnmap {
// SAFETY: sm_step_unmap is called with an unmap operation.
@@ -250,12 +397,12 @@ impl<T: DriverGpuVm> GpuVm<T> {
}
/// # Safety
- /// Must be called from `sm_unmap` with a pointer to `SmData`.
+ /// Must be called from `sm_map` or `sm_unmap` with a pointer to `SmMapData` or `SmData`.
pub(super) unsafe extern "C" fn sm_step_remap(
op: *mut bindings::drm_gpuva_op,
p: *mut c_void,
) -> c_int {
- // SAFETY: The caller provides a pointer to `SmData`.
+ // SAFETY: The caller provides a pointer that can be treated as `SmData`.
let p = unsafe { &mut *p.cast::<SmData<'_, '_, T>>() };
let op = OpRemap {
// SAFETY: sm_step_remap is called with a remap operation.
--
2.53.0.959.g497ff81fa9-goog
^ permalink raw reply related [flat|nested] 7+ messages in thread