From: Joel Fernandes <joelagnelf@nvidia.com>
To: linux-kernel@vger.kernel.org
Cc: "Miguel Ojeda" <ojeda@kernel.org>,
"Boqun Feng" <boqun@kernel.org>, "Gary Guo" <gary@garyguo.net>,
"Björn Roy Baron" <bjorn3_gh@protonmail.com>,
"Benno Lossin" <lossin@kernel.org>,
"Andreas Hindborg" <a.hindborg@kernel.org>,
"Alice Ryhl" <aliceryhl@google.com>,
"Trevor Gross" <tmgross@umich.edu>,
"Danilo Krummrich" <dakr@kernel.org>,
"Dave Airlie" <airlied@redhat.com>,
"Daniel Almeida" <daniel.almeida@collabora.com>,
"Koen Koning" <koen.koning@linux.intel.com>,
dri-devel@lists.freedesktop.org, nouveau@lists.freedesktop.org,
rust-for-linux@vger.kernel.org,
"Nikola Djukic" <ndjukic@nvidia.com>,
"Maarten Lankhorst" <maarten.lankhorst@linux.intel.com>,
"Maxime Ripard" <mripard@kernel.org>,
"Thomas Zimmermann" <tzimmermann@suse.de>,
"David Airlie" <airlied@gmail.com>,
"Simona Vetter" <simona@ffwll.ch>,
"Jonathan Corbet" <corbet@lwn.net>,
"Alex Deucher" <alexander.deucher@amd.com>,
"Christian König" <christian.koenig@amd.com>,
"Jani Nikula" <jani.nikula@linux.intel.com>,
"Joonas Lahtinen" <joonas.lahtinen@linux.intel.com>,
"Rodrigo Vivi" <rodrigo.vivi@intel.com>,
"Tvrtko Ursulin" <tursulin@ursulin.net>,
"Huang Rui" <ray.huang@amd.com>,
"Matthew Auld" <matthew.auld@intel.com>,
"Matthew Brost" <matthew.brost@intel.com>,
"Lucas De Marchi" <lucas.demarchi@intel.com>,
"Thomas Hellström" <thomas.hellstrom@linux.intel.com>,
"Helge Deller" <deller@gmx.de>,
"Alex Gaynor" <alex.gaynor@gmail.com>,
"Boqun Feng" <boqun.feng@gmail.com>,
"John Hubbard" <jhubbard@nvidia.com>,
"Alistair Popple" <apopple@nvidia.com>,
"Timur Tabi" <ttabi@nvidia.com>, "Edwin Peer" <epeer@nvidia.com>,
"Alexandre Courbot" <acourbot@nvidia.com>,
"Andrea Righi" <arighi@nvidia.com>,
"Andy Ritger" <aritger@nvidia.com>, "Zhi Wang" <zhiw@nvidia.com>,
"Balbir Singh" <balbirs@nvidia.com>,
"Philipp Stanner" <phasta@kernel.org>,
"Elle Rhumsaa" <elle@weathered-steel.dev>,
alexeyi@nvidia.com, "Eliot Courtney" <ecourtney@nvidia.com>,
joel@joelfernandes.org, linux-doc@vger.kernel.org,
amd-gfx@lists.freedesktop.org, intel-gfx@lists.freedesktop.org,
intel-xe@lists.freedesktop.org, linux-fbdev@vger.kernel.org,
"Joel Fernandes" <joelagnelf@nvidia.com>
Subject: [PATCH v9 18/23] gpu: nova-core: mm: Add multi-page mapping API to VMM
Date: Tue, 10 Mar 2026 20:40:03 -0400 [thread overview]
Message-ID: <20260311004008.2208806-19-joelagnelf@nvidia.com> (raw)
In-Reply-To: <20260311004008.2208806-1-joelagnelf@nvidia.com>
Add the page table mapping and unmapping API to the Virtual Memory
Manager, implementing a two-phase prepare/execute model suitable for
use both inside and outside the DMA fence signalling critical path.
Cc: Nikola Djukic <ndjukic@nvidia.com>
Signed-off-by: Joel Fernandes <joelagnelf@nvidia.com>
---
drivers/gpu/nova-core/mm/vmm.rs | 366 +++++++++++++++++++++++++++++++-
1 file changed, 363 insertions(+), 3 deletions(-)
diff --git a/drivers/gpu/nova-core/mm/vmm.rs b/drivers/gpu/nova-core/mm/vmm.rs
index 78e614d8829d..95ee3496e0a6 100644
--- a/drivers/gpu/nova-core/mm/vmm.rs
+++ b/drivers/gpu/nova-core/mm/vmm.rs
@@ -11,21 +11,34 @@
AllocatedBlocks,
GpuBuddy,
GpuBuddyAllocFlag,
+ GpuBuddyAllocFlags,
GpuBuddyAllocMode,
GpuBuddyParams, //
},
prelude::*,
ptr::Alignment,
+ rbtree::{RBTree, RBTreeNode},
sizes::SZ_4K, //
};
-use core::ops::Range;
+use core::{
+ cell::Cell,
+ ops::Range, //
+};
use crate::{
mm::{
pagetable::{
- walk::{PtWalk, WalkResult},
- MmuVersion, //
+ walk::{
+ PtWalk,
+ WalkPdeResult,
+ WalkResult, //
+ },
+ DualPde,
+ MmuVersion,
+ PageTableLevel,
+ Pde,
+ Pte, //
},
GpuMm,
Pfn,
@@ -50,6 +63,74 @@ pub(crate) struct Vmm {
page_table_allocs: KVec<Pin<KBox<AllocatedBlocks>>>,
/// Buddy allocator for virtual address range tracking.
virt_buddy: GpuBuddy,
+ /// Prepared PT pages pending PDE installation, keyed by `install_addr`.
+ ///
+ /// Populated by `Vmm` mapping prepare phase and drained in the execute phase.
+ /// Shared by all pending maps in the `Vmm`, thus preventing races where 2
+ /// maps might be trying to install the same page table/directory entry pointer.
+ pt_pages: RBTree<VramAddress, PreparedPtPage>,
+}
+
+/// A pre-allocated and zeroed page table page.
+///
+/// Created during the mapping prepare phase and consumed during the mapping execute phase.
+/// Stored in an [`RBTree`] keyed by the PDE slot address (`install_addr`).
+struct PreparedPtPage {
+ /// The allocated and zeroed page table page.
+ alloc: Pin<KBox<AllocatedBlocks>>,
+ /// Page table level -- needed to determine if this PT page is for a dual PDE.
+ level: PageTableLevel,
+}
+
+/// Multi-page prepared mapping -- VA range allocated, ready for execute.
+///
+/// Produced by [`Vmm::prepare_map()`], consumed by [`Vmm::execute_map()`].
+/// The struct owns the VA space allocation between prepare and execute phases.
+pub(crate) struct PreparedMapping {
+ vfn_start: Vfn,
+ num_pages: usize,
+ vfn_alloc: Pin<KBox<AllocatedBlocks>>,
+}
+
+/// Result of a mapping operation -- tracks the active mapped range.
+///
+/// Returned by [`Vmm::execute_map()`] and [`Vmm::map_pages()`].
+/// Owns the VA allocation; the VA range is freed when this is dropped.
+/// Callers must call [`Vmm::unmap_pages()`] before dropping to invalidate
+/// PTEs (dropping only frees the VA range, not the PTE entries).
+pub(crate) struct MappedRange {
+ pub(crate) vfn_start: Vfn,
+ pub(crate) num_pages: usize,
+ /// VA allocation -- freed when [`MappedRange`] is dropped.
+ _vfn_alloc: Pin<KBox<AllocatedBlocks>>,
+ /// Logs a warning if dropped without unmapping.
+ _drop_guard: MustUnmapGuard,
+}
+
+/// Guard that logs a warning once if a [`MappedRange`] is dropped without
+/// calling [`Vmm::unmap_pages()`].
+struct MustUnmapGuard {
+ armed: Cell<bool>,
+}
+
+impl MustUnmapGuard {
+ const fn new() -> Self {
+ Self {
+ armed: Cell::new(true),
+ }
+ }
+
+ fn disarm(&self) {
+ self.armed.set(false);
+ }
+}
+
+impl Drop for MustUnmapGuard {
+ fn drop(&mut self) {
+ if self.armed.get() {
+ kernel::pr_warn!("MappedRange dropped without calling unmap_pages()\n");
+ }
+ }
}
impl Vmm {
@@ -77,6 +158,7 @@ pub(crate) fn new(
mmu_version,
page_table_allocs: KVec::new(),
virt_buddy,
+ pt_pages: RBTree::new(),
})
}
@@ -135,4 +217,282 @@ pub(crate) fn read_mapping(&self, mm: &GpuMm, vfn: Vfn) -> Result<Option<Pfn>> {
WalkResult::Unmapped { .. } | WalkResult::PageTableMissing => Ok(None),
}
}
+
+ /// Allocate and zero a physical page table page for a specific PDE slot.
+ /// Called during the map prepare phase.
+ fn alloc_and_zero_page_table(
+ &mut self,
+ mm: &GpuMm,
+ level: PageTableLevel,
+ ) -> Result<PreparedPtPage> {
+ let blocks = KBox::pin_init(
+ mm.buddy().alloc_blocks(
+ GpuBuddyAllocMode::Simple,
+ SZ_4K,
+ Alignment::new::<SZ_4K>(),
+ GpuBuddyAllocFlags::default(),
+ ),
+ GFP_KERNEL,
+ )?;
+
+ // Get page's VRAM address from the allocation.
+ let page_vram = VramAddress::new(blocks.iter().next().ok_or(ENOMEM)?.offset());
+
+ // Zero via PRAMIN.
+ let mut window = mm.pramin().window()?;
+ let base = page_vram.raw();
+ for off in (0..PAGE_SIZE).step_by(8) {
+ window.try_write64(base + off, 0)?;
+ }
+
+ Ok(PreparedPtPage {
+ alloc: blocks,
+ level,
+ })
+ }
+
+ /// Ensure all intermediate page table pages are prepared for a [`Vfn`]. Just
+ /// finds out which PDE pages are missing, allocates pages for them, and defers
+ /// installation to the execute phase.
+ ///
+ /// PRAMIN is released before each allocation and re-acquired after. Memory
+ /// allocations are done outside of holding this lock to prevent deadlocks with
+ /// the fence signalling critical path.
+ fn ensure_pte_path(&mut self, mm: &GpuMm, vfn: Vfn) -> Result {
+ let walker = PtWalk::new(self.pdb_addr, self.mmu_version);
+ let max_iter = 2 * self.mmu_version.pde_level_count();
+
+ // Keep looping until all PDE levels are resolved.
+ for _ in 0..max_iter {
+ let mut window = mm.pramin().window()?;
+
+ // Walk PDE levels. The closure checks self.pt_pages for prepared-but-uninstalled
+ // pages, letting the walker continue through them as if they were installed in HW.
+ // The walker keeps calling the closure to get these "prepared but not installed" pages.
+ let result = walker.walk_pde_levels(&mut window, vfn, |install_addr| {
+ self.pt_pages
+ .get(&install_addr)
+ .and_then(|p| Some(VramAddress::new(p.alloc.iter().next()?.offset())))
+ })?;
+
+ match result {
+ WalkPdeResult::Complete { .. } => {
+ // All PDE levels resolved.
+ return Ok(());
+ }
+ WalkPdeResult::Missing {
+ install_addr,
+ level,
+ } => {
+ // Drop PRAMIN before allocation.
+ drop(window);
+ let page = self.alloc_and_zero_page_table(mm, level)?;
+ let node = RBTreeNode::new(install_addr, page, GFP_KERNEL)?;
+ let old = self.pt_pages.insert(node);
+ if old.is_some() {
+ kernel::pr_warn_once!(
+ "VMM: duplicate install_addr in pt_pages (internal consistency error)\n"
+ );
+ return Err(EIO);
+ }
+
+ // Loop: re-acquire PRAMIN and re-walk from root.
+ }
+ }
+ }
+
+ kernel::pr_warn!(
+ "VMM: ensure_pte_path: loop exhausted after {} iters (VFN {:?})\n",
+ max_iter,
+ vfn
+ );
+ Err(EIO)
+ }
+
+ /// Prepare resources for mapping `num_pages` pages.
+ ///
+ /// Allocates a contiguous VA range, then walks the hierarchy per-VFN to prepare pages
+ /// for all missing PDEs. Returns a [`PreparedMapping`] with the VA allocation.
+ ///
+ /// If `va_range` is not `None`, the VA range is constrained to the given range. Safe
+ /// to call outside the fence signalling critical path.
+ pub(crate) fn prepare_map(
+ &mut self,
+ mm: &GpuMm,
+ num_pages: usize,
+ va_range: Option<Range<u64>>,
+ ) -> Result<PreparedMapping> {
+ if num_pages == 0 {
+ return Err(EINVAL);
+ }
+
+ // Pre-reserve so execute_map() can use push_within_capacity (no alloc in
+ // fence signalling critical path).
+ // Upper bound on page table pages needed for the full tree (PTE pages + PDE
+ // pages at all levels).
+ let pt_upper_bound = self.mmu_version.pt_pages_upper_bound(num_pages);
+ self.page_table_allocs.reserve(pt_upper_bound, GFP_KERNEL)?;
+
+ // Allocate contiguous VA range.
+ let (vfn_start, vfn_alloc) = self.alloc_vfn_range(num_pages, va_range)?;
+
+ // Walk the hierarchy per-VFN to prepare pages for all missing PDEs.
+ for i in 0..num_pages {
+ let i_u64: u64 = i.into_safe_cast();
+ let vfn = Vfn::new(vfn_start.raw() + i_u64);
+ self.ensure_pte_path(mm, vfn)?;
+ }
+
+ Ok(PreparedMapping {
+ vfn_start,
+ num_pages,
+ vfn_alloc,
+ })
+ }
+
+ /// Execute a prepared multi-page mapping.
+ ///
+ /// Drain prepared PT pages and install PDEs followed by single TLB flush.
+ pub(crate) fn execute_map(
+ &mut self,
+ mm: &GpuMm,
+ prepared: PreparedMapping,
+ pfns: &[Pfn],
+ writable: bool,
+ ) -> Result<MappedRange> {
+ if pfns.len() != prepared.num_pages {
+ return Err(EINVAL);
+ }
+
+ let PreparedMapping {
+ vfn_start,
+ num_pages,
+ vfn_alloc,
+ } = prepared;
+
+ let walker = PtWalk::new(self.pdb_addr, self.mmu_version);
+ let mut window = mm.pramin().window()?;
+
+ // First, drain self.pt_pages, install all pending PDEs.
+ let mut cursor = self.pt_pages.cursor_front_mut();
+ while let Some(c) = cursor {
+ let (next, node) = c.remove_current();
+ let (install_addr, page) = node.to_key_value();
+ let page_vram = VramAddress::new(page.alloc.iter().next().ok_or(ENOMEM)?.offset());
+
+ if page.level == self.mmu_version.dual_pde_level() {
+ let new_dpde = DualPde::new_small(self.mmu_version, Pfn::from(page_vram));
+ new_dpde.write(&mut window, install_addr)?;
+ } else {
+ let new_pde = Pde::new_vram(self.mmu_version, Pfn::from(page_vram));
+ new_pde.write(&mut window, install_addr)?;
+ }
+
+ // Track the allocated pages in the `Vmm`.
+ self.page_table_allocs
+ .push_within_capacity(page.alloc)
+ .map_err(|_| ENOMEM)?;
+
+ cursor = next;
+ }
+
+ // Next, write PTEs (all PDEs now installed in HW).
+ for (i, &pfn) in pfns.iter().enumerate() {
+ let i_u64: u64 = i.into_safe_cast();
+ let vfn = Vfn::new(vfn_start.raw() + i_u64);
+ let result = walker.walk_to_pte_lookup_with_window(&mut window, vfn)?;
+
+ match result {
+ WalkResult::Unmapped { pte_addr } | WalkResult::Mapped { pte_addr, .. } => {
+ let pte = Pte::new_vram(self.mmu_version, pfn, writable);
+ pte.write(&mut window, pte_addr)?;
+ }
+ WalkResult::PageTableMissing => {
+ kernel::pr_warn_once!("VMM: page table missing for VFN {vfn:?}\n");
+ return Err(EIO);
+ }
+ }
+ }
+
+ drop(window);
+
+ // Finally, flush the TLB.
+ mm.tlb().flush(self.pdb_addr)?;
+
+ Ok(MappedRange {
+ vfn_start,
+ num_pages,
+ _vfn_alloc: vfn_alloc,
+ _drop_guard: MustUnmapGuard::new(),
+ })
+ }
+
+ /// Map pages doing prepare and execute in the same call.
+ ///
+ /// This is a convenience wrapper for callers outside the fence signalling critical
+ /// path (e.g., BAR mappings). For DRM usecases, [`Vmm::prepare_map()`] and
+ /// [`Vmm::execute_map()`] will be called separately.
+ pub(crate) fn map_pages(
+ &mut self,
+ mm: &GpuMm,
+ pfns: &[Pfn],
+ va_range: Option<Range<u64>>,
+ writable: bool,
+ ) -> Result<MappedRange> {
+ if pfns.is_empty() {
+ return Err(EINVAL);
+ }
+
+ // Check if provided VA range is sufficient (if provided).
+ if let Some(ref range) = va_range {
+ let required: u64 = pfns
+ .len()
+ .checked_mul(PAGE_SIZE)
+ .ok_or(EOVERFLOW)?
+ .into_safe_cast();
+ let available = range.end.checked_sub(range.start).ok_or(EINVAL)?;
+ if available < required {
+ return Err(EINVAL);
+ }
+ }
+
+ let prepared = self.prepare_map(mm, pfns.len(), va_range)?;
+ self.execute_map(mm, prepared, pfns, writable)
+ }
+
+ /// Unmap all pages in a [`MappedRange`] with a single TLB flush.
+ ///
+ /// Takes the range by value (consumes it), then invalidates PTEs for the range,
+ /// flushes the TLB, then drops the range (freeing the VA). PRAMIN lock is held.
+ pub(crate) fn unmap_pages(&mut self, mm: &GpuMm, range: MappedRange) -> Result {
+ let walker = PtWalk::new(self.pdb_addr, self.mmu_version);
+ let invalid_pte = Pte::invalid(self.mmu_version);
+
+ let mut window = mm.pramin().window()?;
+ for i in 0..range.num_pages {
+ let i_u64: u64 = i.into_safe_cast();
+ let vfn = Vfn::new(range.vfn_start.raw() + i_u64);
+ let result = walker.walk_to_pte_lookup_with_window(&mut window, vfn)?;
+
+ match result {
+ WalkResult::Mapped { pte_addr, .. } | WalkResult::Unmapped { pte_addr } => {
+ invalid_pte.write(&mut window, pte_addr)?;
+ }
+ WalkResult::PageTableMissing => {
+ continue;
+ }
+ }
+ }
+ drop(window);
+
+ mm.tlb().flush(self.pdb_addr)?;
+
+ // TODO: Internal page table pages (PDE, PTE pages) are still kept around.
+ // This is by design as repeated maps/unmaps will be fast. As a future TODO,
+ // we can add a reclaimer here to reclaim if VRAM is short. For now, the PT
+ // pages are dropped once the `Vmm` is dropped.
+
+ range._drop_guard.disarm(); // Unmap complete, Ok to drop MappedRange.
+ Ok(())
+ }
}
--
2.34.1
WARNING: multiple messages have this Message-ID (diff)
From: Joel Fernandes <joelagnelf@nvidia.com>
To: linux-kernel@vger.kernel.org
Cc: "Miguel Ojeda" <ojeda@kernel.org>,
"Boqun Feng" <boqun@kernel.org>, "Gary Guo" <gary@garyguo.net>,
"Björn Roy Baron" <bjorn3_gh@protonmail.com>,
"Benno Lossin" <lossin@kernel.org>,
"Andreas Hindborg" <a.hindborg@kernel.org>,
"Alice Ryhl" <aliceryhl@google.com>,
"Trevor Gross" <tmgross@umich.edu>,
"Danilo Krummrich" <dakr@kernel.org>,
"Dave Airlie" <airlied@redhat.com>,
"Daniel Almeida" <daniel.almeida@collabora.com>,
"Koen Koning" <koen.koning@linux.intel.com>,
dri-devel@lists.freedesktop.org, nouveau@lists.freedesktop.org,
rust-for-linux@vger.kernel.org,
"Nikola Djukic" <ndjukic@nvidia.com>,
"Maarten Lankhorst" <maarten.lankhorst@linux.intel.com>,
"Maxime Ripard" <mripard@kernel.org>,
"Simona Vetter" <simona@ffwll.ch>,
"Jonathan Corbet" <corbet@lwn.net>,
"Alex Deucher" <alexander.deucher@amd.com>,
"Christian König" <christian.koenig@amd.com>,
"Jani Nikula" <jani.nikula@linux.intel.com>,
"Joonas Lahtinen" <joonas.lahtinen@linux.intel.com>,
"Rodrigo Vivi" <rodrigo.vivi@intel.com>,
"Tvrtko Ursulin" <tursulin@ursulin.net>,
"Huang Rui" <ray.huang@amd.com>,
"Matthew Auld" <matthew.auld@intel.com>,
"Matthew Brost" <matthew.brost@intel.com>,
"Lucas De Marchi" <lucas.demarchi@intel.com>,
"Thomas Hellström" <thomas.hellstrom@linux.intel.com>,
"Helge Deller" <deller@gmx.de>,
"Alex Gaynor" <alex.gaynor@gmail.com>,
"Boqun Feng" <boqun.feng@gmail.com>,
"Alistair Popple" <apopple@nvidia.com>,
"Alexandre Courbot" <acourbot@nvidia.com>,
"Andrea Righi" <arighi@nvidia.com>, "Zhi Wang" <zhiw@nvidia.com>,
"Philipp Stanner" <phasta@kernel.org>,
"Elle Rhumsaa" <elle@weathered-steel.dev>,
alexeyi@nvidia.com, "Eliot Courtney" <ecourtney@nvidia.com>,
joel@joelfernandes.org, linux-doc@vger.kernel.org,
amd-gfx@lists.freedesktop.org, intel-gfx@lists.freedesktop.org,
intel-xe@lists.freedesktop.org, linux-fbdev@vger.kernel.org,
"Joel Fernandes" <joelagnelf@nvidia.com>
Subject: [PATCH v9 18/23] gpu: nova-core: mm: Add multi-page mapping API to VMM
Date: Tue, 10 Mar 2026 20:40:03 -0400 [thread overview]
Message-ID: <20260311004008.2208806-19-joelagnelf@nvidia.com> (raw)
In-Reply-To: <20260311004008.2208806-1-joelagnelf@nvidia.com>
Add the page table mapping and unmapping API to the Virtual Memory
Manager, implementing a two-phase prepare/execute model suitable for
use both inside and outside the DMA fence signalling critical path.
Cc: Nikola Djukic <ndjukic@nvidia.com>
Signed-off-by: Joel Fernandes <joelagnelf@nvidia.com>
---
drivers/gpu/nova-core/mm/vmm.rs | 366 +++++++++++++++++++++++++++++++-
1 file changed, 363 insertions(+), 3 deletions(-)
diff --git a/drivers/gpu/nova-core/mm/vmm.rs b/drivers/gpu/nova-core/mm/vmm.rs
index 78e614d8829d..95ee3496e0a6 100644
--- a/drivers/gpu/nova-core/mm/vmm.rs
+++ b/drivers/gpu/nova-core/mm/vmm.rs
@@ -11,21 +11,34 @@
AllocatedBlocks,
GpuBuddy,
GpuBuddyAllocFlag,
+ GpuBuddyAllocFlags,
GpuBuddyAllocMode,
GpuBuddyParams, //
},
prelude::*,
ptr::Alignment,
+ rbtree::{RBTree, RBTreeNode},
sizes::SZ_4K, //
};
-use core::ops::Range;
+use core::{
+ cell::Cell,
+ ops::Range, //
+};
use crate::{
mm::{
pagetable::{
- walk::{PtWalk, WalkResult},
- MmuVersion, //
+ walk::{
+ PtWalk,
+ WalkPdeResult,
+ WalkResult, //
+ },
+ DualPde,
+ MmuVersion,
+ PageTableLevel,
+ Pde,
+ Pte, //
},
GpuMm,
Pfn,
@@ -50,6 +63,74 @@ pub(crate) struct Vmm {
page_table_allocs: KVec<Pin<KBox<AllocatedBlocks>>>,
/// Buddy allocator for virtual address range tracking.
virt_buddy: GpuBuddy,
+ /// Prepared PT pages pending PDE installation, keyed by `install_addr`.
+ ///
+ /// Populated by `Vmm` mapping prepare phase and drained in the execute phase.
+ /// Shared by all pending maps in the `Vmm`, thus preventing races where 2
+ /// maps might be trying to install the same page table/directory entry pointer.
+ pt_pages: RBTree<VramAddress, PreparedPtPage>,
+}
+
+/// A pre-allocated and zeroed page table page.
+///
+/// Created during the mapping prepare phase and consumed during the mapping execute phase.
+/// Stored in an [`RBTree`] keyed by the PDE slot address (`install_addr`).
+struct PreparedPtPage {
+ /// The allocated and zeroed page table page.
+ alloc: Pin<KBox<AllocatedBlocks>>,
+ /// Page table level -- needed to determine if this PT page is for a dual PDE.
+ level: PageTableLevel,
+}
+
+/// Multi-page prepared mapping -- VA range allocated, ready for execute.
+///
+/// Produced by [`Vmm::prepare_map()`], consumed by [`Vmm::execute_map()`].
+/// The struct owns the VA space allocation between prepare and execute phases.
+pub(crate) struct PreparedMapping {
+ vfn_start: Vfn,
+ num_pages: usize,
+ vfn_alloc: Pin<KBox<AllocatedBlocks>>,
+}
+
+/// Result of a mapping operation -- tracks the active mapped range.
+///
+/// Returned by [`Vmm::execute_map()`] and [`Vmm::map_pages()`].
+/// Owns the VA allocation; the VA range is freed when this is dropped.
+/// Callers must call [`Vmm::unmap_pages()`] before dropping to invalidate
+/// PTEs (dropping only frees the VA range, not the PTE entries).
+pub(crate) struct MappedRange {
+ pub(crate) vfn_start: Vfn,
+ pub(crate) num_pages: usize,
+ /// VA allocation -- freed when [`MappedRange`] is dropped.
+ _vfn_alloc: Pin<KBox<AllocatedBlocks>>,
+ /// Logs a warning if dropped without unmapping.
+ _drop_guard: MustUnmapGuard,
+}
+
+/// Guard that logs a warning once if a [`MappedRange`] is dropped without
+/// calling [`Vmm::unmap_pages()`].
+struct MustUnmapGuard {
+ armed: Cell<bool>,
+}
+
+impl MustUnmapGuard {
+ const fn new() -> Self {
+ Self {
+ armed: Cell::new(true),
+ }
+ }
+
+ fn disarm(&self) {
+ self.armed.set(false);
+ }
+}
+
+impl Drop for MustUnmapGuard {
+ fn drop(&mut self) {
+ if self.armed.get() {
+ kernel::pr_warn!("MappedRange dropped without calling unmap_pages()\n");
+ }
+ }
}
impl Vmm {
@@ -77,6 +158,7 @@ pub(crate) fn new(
mmu_version,
page_table_allocs: KVec::new(),
virt_buddy,
+ pt_pages: RBTree::new(),
})
}
@@ -135,4 +217,282 @@ pub(crate) fn read_mapping(&self, mm: &GpuMm, vfn: Vfn) -> Result<Option<Pfn>> {
WalkResult::Unmapped { .. } | WalkResult::PageTableMissing => Ok(None),
}
}
+
+ /// Allocate and zero a physical page table page for a specific PDE slot.
+ /// Called during the map prepare phase.
+ fn alloc_and_zero_page_table(
+ &mut self,
+ mm: &GpuMm,
+ level: PageTableLevel,
+ ) -> Result<PreparedPtPage> {
+ let blocks = KBox::pin_init(
+ mm.buddy().alloc_blocks(
+ GpuBuddyAllocMode::Simple,
+ SZ_4K,
+ Alignment::new::<SZ_4K>(),
+ GpuBuddyAllocFlags::default(),
+ ),
+ GFP_KERNEL,
+ )?;
+
+ // Get page's VRAM address from the allocation.
+ let page_vram = VramAddress::new(blocks.iter().next().ok_or(ENOMEM)?.offset());
+
+ // Zero via PRAMIN.
+ let mut window = mm.pramin().window()?;
+ let base = page_vram.raw();
+ for off in (0..PAGE_SIZE).step_by(8) {
+ window.try_write64(base + off, 0)?;
+ }
+
+ Ok(PreparedPtPage {
+ alloc: blocks,
+ level,
+ })
+ }
+
+ /// Ensure all intermediate page table pages are prepared for a [`Vfn`]. Just
+ /// finds out which PDE pages are missing, allocates pages for them, and defers
+ /// installation to the execute phase.
+ ///
+ /// PRAMIN is released before each allocation and re-acquired after. Memory
+ /// allocations are done outside of holding this lock to prevent deadlocks with
+ /// the fence signalling critical path.
+ fn ensure_pte_path(&mut self, mm: &GpuMm, vfn: Vfn) -> Result {
+ let walker = PtWalk::new(self.pdb_addr, self.mmu_version);
+ let max_iter = 2 * self.mmu_version.pde_level_count();
+
+ // Keep looping until all PDE levels are resolved.
+ for _ in 0..max_iter {
+ let mut window = mm.pramin().window()?;
+
+ // Walk PDE levels. The closure checks self.pt_pages for prepared-but-uninstalled
+ // pages, letting the walker continue through them as if they were installed in HW.
+ // The walker keeps calling the closure to get these "prepared but not installed" pages.
+ let result = walker.walk_pde_levels(&mut window, vfn, |install_addr| {
+ self.pt_pages
+ .get(&install_addr)
+ .and_then(|p| Some(VramAddress::new(p.alloc.iter().next()?.offset())))
+ })?;
+
+ match result {
+ WalkPdeResult::Complete { .. } => {
+ // All PDE levels resolved.
+ return Ok(());
+ }
+ WalkPdeResult::Missing {
+ install_addr,
+ level,
+ } => {
+ // Drop PRAMIN before allocation.
+ drop(window);
+ let page = self.alloc_and_zero_page_table(mm, level)?;
+ let node = RBTreeNode::new(install_addr, page, GFP_KERNEL)?;
+ let old = self.pt_pages.insert(node);
+ if old.is_some() {
+ kernel::pr_warn_once!(
+ "VMM: duplicate install_addr in pt_pages (internal consistency error)\n"
+ );
+ return Err(EIO);
+ }
+
+ // Loop: re-acquire PRAMIN and re-walk from root.
+ }
+ }
+ }
+
+ kernel::pr_warn!(
+ "VMM: ensure_pte_path: loop exhausted after {} iters (VFN {:?})\n",
+ max_iter,
+ vfn
+ );
+ Err(EIO)
+ }
+
+ /// Prepare resources for mapping `num_pages` pages.
+ ///
+ /// Allocates a contiguous VA range, then walks the hierarchy per-VFN to prepare pages
+ /// for all missing PDEs. Returns a [`PreparedMapping`] with the VA allocation.
+ ///
+ /// If `va_range` is not `None`, the VA range is constrained to the given range. Safe
+ /// to call outside the fence signalling critical path.
+ pub(crate) fn prepare_map(
+ &mut self,
+ mm: &GpuMm,
+ num_pages: usize,
+ va_range: Option<Range<u64>>,
+ ) -> Result<PreparedMapping> {
+ if num_pages == 0 {
+ return Err(EINVAL);
+ }
+
+ // Pre-reserve so execute_map() can use push_within_capacity (no alloc in
+ // fence signalling critical path).
+ // Upper bound on page table pages needed for the full tree (PTE pages + PDE
+ // pages at all levels).
+ let pt_upper_bound = self.mmu_version.pt_pages_upper_bound(num_pages);
+ self.page_table_allocs.reserve(pt_upper_bound, GFP_KERNEL)?;
+
+ // Allocate contiguous VA range.
+ let (vfn_start, vfn_alloc) = self.alloc_vfn_range(num_pages, va_range)?;
+
+ // Walk the hierarchy per-VFN to prepare pages for all missing PDEs.
+ for i in 0..num_pages {
+ let i_u64: u64 = i.into_safe_cast();
+ let vfn = Vfn::new(vfn_start.raw() + i_u64);
+ self.ensure_pte_path(mm, vfn)?;
+ }
+
+ Ok(PreparedMapping {
+ vfn_start,
+ num_pages,
+ vfn_alloc,
+ })
+ }
+
+ /// Execute a prepared multi-page mapping.
+ ///
+ /// Drain prepared PT pages and install PDEs followed by single TLB flush.
+ pub(crate) fn execute_map(
+ &mut self,
+ mm: &GpuMm,
+ prepared: PreparedMapping,
+ pfns: &[Pfn],
+ writable: bool,
+ ) -> Result<MappedRange> {
+ if pfns.len() != prepared.num_pages {
+ return Err(EINVAL);
+ }
+
+ let PreparedMapping {
+ vfn_start,
+ num_pages,
+ vfn_alloc,
+ } = prepared;
+
+ let walker = PtWalk::new(self.pdb_addr, self.mmu_version);
+ let mut window = mm.pramin().window()?;
+
+ // First, drain self.pt_pages, install all pending PDEs.
+ let mut cursor = self.pt_pages.cursor_front_mut();
+ while let Some(c) = cursor {
+ let (next, node) = c.remove_current();
+ let (install_addr, page) = node.to_key_value();
+ let page_vram = VramAddress::new(page.alloc.iter().next().ok_or(ENOMEM)?.offset());
+
+ if page.level == self.mmu_version.dual_pde_level() {
+ let new_dpde = DualPde::new_small(self.mmu_version, Pfn::from(page_vram));
+ new_dpde.write(&mut window, install_addr)?;
+ } else {
+ let new_pde = Pde::new_vram(self.mmu_version, Pfn::from(page_vram));
+ new_pde.write(&mut window, install_addr)?;
+ }
+
+ // Track the allocated pages in the `Vmm`.
+ self.page_table_allocs
+ .push_within_capacity(page.alloc)
+ .map_err(|_| ENOMEM)?;
+
+ cursor = next;
+ }
+
+ // Next, write PTEs (all PDEs now installed in HW).
+ for (i, &pfn) in pfns.iter().enumerate() {
+ let i_u64: u64 = i.into_safe_cast();
+ let vfn = Vfn::new(vfn_start.raw() + i_u64);
+ let result = walker.walk_to_pte_lookup_with_window(&mut window, vfn)?;
+
+ match result {
+ WalkResult::Unmapped { pte_addr } | WalkResult::Mapped { pte_addr, .. } => {
+ let pte = Pte::new_vram(self.mmu_version, pfn, writable);
+ pte.write(&mut window, pte_addr)?;
+ }
+ WalkResult::PageTableMissing => {
+ kernel::pr_warn_once!("VMM: page table missing for VFN {vfn:?}\n");
+ return Err(EIO);
+ }
+ }
+ }
+
+ drop(window);
+
+ // Finally, flush the TLB.
+ mm.tlb().flush(self.pdb_addr)?;
+
+ Ok(MappedRange {
+ vfn_start,
+ num_pages,
+ _vfn_alloc: vfn_alloc,
+ _drop_guard: MustUnmapGuard::new(),
+ })
+ }
+
+ /// Map pages doing prepare and execute in the same call.
+ ///
+ /// This is a convenience wrapper for callers outside the fence signalling critical
+ /// path (e.g., BAR mappings). For DRM usecases, [`Vmm::prepare_map()`] and
+ /// [`Vmm::execute_map()`] will be called separately.
+ pub(crate) fn map_pages(
+ &mut self,
+ mm: &GpuMm,
+ pfns: &[Pfn],
+ va_range: Option<Range<u64>>,
+ writable: bool,
+ ) -> Result<MappedRange> {
+ if pfns.is_empty() {
+ return Err(EINVAL);
+ }
+
+ // Check if provided VA range is sufficient (if provided).
+ if let Some(ref range) = va_range {
+ let required: u64 = pfns
+ .len()
+ .checked_mul(PAGE_SIZE)
+ .ok_or(EOVERFLOW)?
+ .into_safe_cast();
+ let available = range.end.checked_sub(range.start).ok_or(EINVAL)?;
+ if available < required {
+ return Err(EINVAL);
+ }
+ }
+
+ let prepared = self.prepare_map(mm, pfns.len(), va_range)?;
+ self.execute_map(mm, prepared, pfns, writable)
+ }
+
+ /// Unmap all pages in a [`MappedRange`] with a single TLB flush.
+ ///
+ /// Takes the range by value (consumes it), then invalidates PTEs for the range,
+ /// flushes the TLB, then drops the range (freeing the VA). PRAMIN lock is held.
+ pub(crate) fn unmap_pages(&mut self, mm: &GpuMm, range: MappedRange) -> Result {
+ let walker = PtWalk::new(self.pdb_addr, self.mmu_version);
+ let invalid_pte = Pte::invalid(self.mmu_version);
+
+ let mut window = mm.pramin().window()?;
+ for i in 0..range.num_pages {
+ let i_u64: u64 = i.into_safe_cast();
+ let vfn = Vfn::new(range.vfn_start.raw() + i_u64);
+ let result = walker.walk_to_pte_lookup_with_window(&mut window, vfn)?;
+
+ match result {
+ WalkResult::Mapped { pte_addr, .. } | WalkResult::Unmapped { pte_addr } => {
+ invalid_pte.write(&mut window, pte_addr)?;
+ }
+ WalkResult::PageTableMissing => {
+ continue;
+ }
+ }
+ }
+ drop(window);
+
+ mm.tlb().flush(self.pdb_addr)?;
+
+ // TODO: Internal page table pages (PDE, PTE pages) are still kept around.
+ // This is by design as repeated maps/unmaps will be fast. As a future TODO,
+ // we can add a reclaimer here to reclaim if VRAM is short. For now, the PT
+ // pages are dropped once the `Vmm` is dropped.
+
+ range._drop_guard.disarm(); // Unmap complete, Ok to drop MappedRange.
+ Ok(())
+ }
}
--
2.34.1
next prev parent reply other threads:[~2026-03-11 8:09 UTC|newest]
Thread overview: 136+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-03-11 0:39 [PATCH v9 00/23] gpu: nova-core: Add memory management support Joel Fernandes
2026-03-11 0:39 ` Joel Fernandes
2026-03-11 0:39 ` [PATCH v9 01/23] gpu: nova-core: Select GPU_BUDDY for VRAM allocation Joel Fernandes
2026-03-11 0:39 ` Joel Fernandes
2026-03-12 6:34 ` Eliot Courtney
2026-03-12 6:34 ` Eliot Courtney
2026-03-16 13:17 ` Alexandre Courbot
2026-03-16 13:17 ` Alexandre Courbot
2026-03-16 16:28 ` Joel Fernandes
2026-03-16 16:28 ` Joel Fernandes
2026-03-11 0:39 ` [PATCH v9 02/23] gpu: nova-core: Kconfig: Sort select statements alphabetically Joel Fernandes
2026-03-11 0:39 ` Joel Fernandes
2026-03-12 6:35 ` Eliot Courtney
2026-03-12 6:35 ` Eliot Courtney
2026-03-16 13:17 ` Alexandre Courbot
2026-03-16 13:17 ` Alexandre Courbot
2026-03-16 16:28 ` Joel Fernandes
2026-03-11 0:39 ` [PATCH v9 03/23] gpu: nova-core: gsp: Return GspStaticInfo from boot() Joel Fernandes
2026-03-11 0:39 ` Joel Fernandes
2026-03-12 6:37 ` Eliot Courtney
2026-03-12 6:37 ` Eliot Courtney
2026-03-11 0:39 ` [PATCH v9 04/23] gpu: nova-core: gsp: Extract usable FB region from GSP Joel Fernandes
2026-03-11 0:39 ` Joel Fernandes
2026-03-13 6:58 ` Eliot Courtney
2026-03-13 6:58 ` Eliot Courtney
2026-04-01 23:23 ` Joel Fernandes
2026-04-01 23:23 ` Joel Fernandes
2026-03-16 13:18 ` Alexandre Courbot
2026-03-16 13:18 ` Alexandre Courbot
2026-03-16 16:57 ` Joel Fernandes
2026-03-11 0:39 ` [PATCH v9 05/23] gpu: nova-core: gsp: Expose total physical VRAM end from FB region info Joel Fernandes
2026-03-11 0:39 ` Joel Fernandes
2026-03-16 13:19 ` Alexandre Courbot
2026-03-16 13:19 ` Alexandre Courbot
2026-03-16 17:00 ` Joel Fernandes
2026-03-11 0:39 ` [PATCH v9 06/23] gpu: nova-core: mm: Add support to use PRAMIN windows to write to VRAM Joel Fernandes
2026-03-11 0:39 ` Joel Fernandes
2026-03-11 0:39 ` [PATCH v9 07/23] docs: gpu: nova-core: Document the PRAMIN aperture mechanism Joel Fernandes
2026-03-11 0:39 ` Joel Fernandes
2026-03-11 0:39 ` [PATCH v9 08/23] gpu: nova-core: mm: Add common memory management types Joel Fernandes
2026-03-11 0:39 ` Joel Fernandes
2026-03-11 0:39 ` [PATCH v9 09/23] gpu: nova-core: mm: Add TLB flush support Joel Fernandes
2026-03-11 0:39 ` Joel Fernandes
2026-03-11 0:39 ` [PATCH v9 10/23] gpu: nova-core: mm: Add GpuMm centralized memory manager Joel Fernandes
2026-03-11 0:39 ` Joel Fernandes
2026-03-11 0:39 ` [PATCH v9 11/23] gpu: nova-core: mm: Add common types for all page table formats Joel Fernandes
2026-03-11 0:39 ` Joel Fernandes
2026-03-11 0:39 ` [PATCH v9 12/23] gpu: nova-core: mm: Add MMU v2 page table types Joel Fernandes
2026-03-11 0:39 ` Joel Fernandes
2026-03-11 0:39 ` [PATCH v9 13/23] gpu: nova-core: mm: Add MMU v3 " Joel Fernandes
2026-03-11 0:39 ` Joel Fernandes
2026-03-11 0:39 ` [PATCH v9 14/23] gpu: nova-core: mm: Add unified page table entry wrapper enums Joel Fernandes
2026-03-11 0:39 ` Joel Fernandes
2026-03-11 0:40 ` [PATCH v9 15/23] gpu: nova-core: mm: Add page table walker for MMU v2/v3 Joel Fernandes
2026-03-11 0:40 ` Joel Fernandes
2026-03-11 0:40 ` [PATCH v9 16/23] gpu: nova-core: mm: Add Virtual Memory Manager Joel Fernandes
2026-03-11 0:40 ` Joel Fernandes
2026-03-11 0:40 ` [PATCH v9 17/23] gpu: nova-core: mm: Add virtual address range tracking to VMM Joel Fernandes
2026-03-11 0:40 ` Joel Fernandes
2026-03-11 0:40 ` Joel Fernandes [this message]
2026-03-11 0:40 ` [PATCH v9 18/23] gpu: nova-core: mm: Add multi-page mapping API " Joel Fernandes
2026-03-11 0:40 ` [PATCH v9 19/23] gpu: nova-core: Add BAR1 aperture type and size constant Joel Fernandes
2026-03-11 0:40 ` Joel Fernandes
2026-03-11 0:40 ` [PATCH v9 20/23] gpu: nova-core: mm: Add BAR1 user interface Joel Fernandes
2026-03-11 0:40 ` Joel Fernandes
2026-03-11 0:40 ` [PATCH v9 21/23] gpu: nova-core: mm: Add BAR1 memory management self-tests Joel Fernandes
2026-03-11 0:40 ` Joel Fernandes
2026-03-11 0:40 ` [PATCH v9 22/23] gpu: nova-core: mm: Add PRAMIN aperture self-tests Joel Fernandes
2026-03-11 0:40 ` Joel Fernandes
2026-03-11 0:40 ` [PATCH v9 23/23] gpu: nova-core: Use runtime BAR1 size instead of hardcoded 256MB Joel Fernandes
2026-03-11 0:40 ` Joel Fernandes
2026-03-11 0:58 ` ✗ Fi.CI.BUILD: failure for gpu: nova-core: Add memory management support (rev2) Patchwork
2026-03-31 21:20 ` [PATCH v10 00/21] gpu: nova-core: Add memory management support Joel Fernandes
2026-03-31 21:20 ` [PATCH v10 01/21] gpu: nova-core: gsp: Return GspStaticInfo from boot() Joel Fernandes
2026-04-01 8:25 ` Eliot Courtney
2026-04-08 7:34 ` Alexandre Courbot
2026-03-31 21:20 ` [PATCH v10 02/21] gpu: nova-core: gsp: Extract usable FB region from GSP Joel Fernandes
2026-04-01 8:27 ` Eliot Courtney
2026-04-01 23:24 ` Joel Fernandes
2026-04-02 5:49 ` Eliot Courtney
2026-04-06 18:56 ` Joel Fernandes
2026-04-08 7:33 ` Alexandre Courbot
2026-03-31 21:20 ` [PATCH v10 03/21] gpu: nova-core: gsp: Expose total physical VRAM end from FB region info Joel Fernandes
2026-04-02 5:37 ` Eliot Courtney
2026-04-06 19:42 ` Joel Fernandes
2026-04-06 21:08 ` Joel Fernandes
2026-03-31 21:20 ` [PATCH v10 04/21] gpu: nova-core: mm: Add support to use PRAMIN windows to write to VRAM Joel Fernandes
2026-03-31 21:20 ` [PATCH v10 05/21] docs: gpu: nova-core: Document the PRAMIN aperture mechanism Joel Fernandes
2026-03-31 21:20 ` [PATCH v10 06/21] gpu: nova-core: mm: Add common memory management types Joel Fernandes
2026-03-31 21:20 ` [PATCH v10 07/21] gpu: nova-core: mm: Add TLB flush support Joel Fernandes
2026-04-02 5:49 ` Eliot Courtney
2026-04-06 20:50 ` Joel Fernandes
2026-04-02 5:59 ` Matthew Brost
2026-04-06 21:24 ` Joel Fernandes
2026-04-06 22:10 ` Joel Fernandes
2026-04-07 5:14 ` Matthew Brost
2026-04-08 7:40 ` Alexandre Courbot
2026-03-31 21:20 ` [PATCH v10 08/21] gpu: nova-core: mm: Add GpuMm centralized memory manager Joel Fernandes
2026-03-31 21:20 ` [PATCH v10 09/21] gpu: nova-core: mm: Add common types for all page table formats Joel Fernandes
2026-03-31 21:20 ` [PATCH v10 10/21] gpu: nova-core: mm: Add MMU v2 page table types Joel Fernandes
2026-04-02 5:41 ` Eliot Courtney
2026-04-06 21:14 ` Joel Fernandes
2026-03-31 21:20 ` [PATCH v10 11/21] gpu: nova-core: mm: Add MMU v3 " Joel Fernandes
2026-03-31 21:20 ` [PATCH v10 12/21] gpu: nova-core: mm: Add unified page table entry wrapper enums Joel Fernandes
2026-04-02 5:40 ` Eliot Courtney
2026-04-06 21:55 ` Joel Fernandes
2026-04-07 13:42 ` Eliot Courtney
2026-04-07 13:59 ` Joel Fernandes
2026-04-08 7:03 ` Alexandre Courbot
2026-04-08 20:19 ` Joel Fernandes
2026-04-09 10:56 ` Alexandre Courbot
2026-04-13 20:04 ` Joel Fernandes
2026-04-09 11:02 ` Gary Guo
2026-04-13 20:25 ` Joel Fernandes
2026-04-08 13:26 ` Eliot Courtney
2026-04-08 16:58 ` Joel Fernandes
2026-04-08 18:01 ` Danilo Krummrich
2026-04-08 19:04 ` Joel Fernandes
2026-04-08 23:13 ` John Hubbard
2026-04-09 10:33 ` Joel Fernandes
2026-04-09 11:00 ` Danilo Krummrich
2026-04-13 20:10 ` Joel Fernandes
2026-04-13 22:27 ` Joel Fernandes
2026-04-13 22:50 ` John Hubbard
2026-04-09 18:02 ` John Hubbard
2026-03-31 21:20 ` [PATCH v10 13/21] gpu: nova-core: mm: Add page table walker for MMU v2/v3 Joel Fernandes
2026-03-31 21:20 ` [PATCH v10 14/21] gpu: nova-core: mm: Add Virtual Memory Manager Joel Fernandes
2026-03-31 21:20 ` [PATCH v10 15/21] gpu: nova-core: mm: Add virtual address range tracking to VMM Joel Fernandes
2026-03-31 21:20 ` [PATCH v10 16/21] gpu: nova-core: mm: Add multi-page mapping API " Joel Fernandes
2026-03-31 21:20 ` [PATCH v10 17/21] gpu: nova-core: Add BAR1 aperture type and size constant Joel Fernandes
2026-03-31 21:20 ` [PATCH v10 18/21] gpu: nova-core: mm: Add BAR1 user interface Joel Fernandes
2026-03-31 21:20 ` [PATCH v10 19/21] gpu: nova-core: mm: Add BAR1 memory management self-tests Joel Fernandes
2026-03-31 21:20 ` [PATCH v10 20/21] gpu: nova-core: mm: Add PRAMIN aperture self-tests Joel Fernandes
2026-03-31 21:20 ` [PATCH v10 21/21] gpu: nova-core: Use runtime BAR1 size instead of hardcoded 256MB Joel Fernandes
2026-04-02 5:54 ` Eliot Courtney
2026-04-15 20:23 ` Joel Fernandes
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20260311004008.2208806-19-joelagnelf@nvidia.com \
--to=joelagnelf@nvidia.com \
--cc=a.hindborg@kernel.org \
--cc=acourbot@nvidia.com \
--cc=airlied@gmail.com \
--cc=airlied@redhat.com \
--cc=alex.gaynor@gmail.com \
--cc=alexander.deucher@amd.com \
--cc=alexeyi@nvidia.com \
--cc=aliceryhl@google.com \
--cc=amd-gfx@lists.freedesktop.org \
--cc=apopple@nvidia.com \
--cc=arighi@nvidia.com \
--cc=aritger@nvidia.com \
--cc=balbirs@nvidia.com \
--cc=bjorn3_gh@protonmail.com \
--cc=boqun.feng@gmail.com \
--cc=boqun@kernel.org \
--cc=christian.koenig@amd.com \
--cc=corbet@lwn.net \
--cc=dakr@kernel.org \
--cc=daniel.almeida@collabora.com \
--cc=deller@gmx.de \
--cc=dri-devel@lists.freedesktop.org \
--cc=ecourtney@nvidia.com \
--cc=elle@weathered-steel.dev \
--cc=epeer@nvidia.com \
--cc=gary@garyguo.net \
--cc=intel-gfx@lists.freedesktop.org \
--cc=intel-xe@lists.freedesktop.org \
--cc=jani.nikula@linux.intel.com \
--cc=jhubbard@nvidia.com \
--cc=joel@joelfernandes.org \
--cc=joonas.lahtinen@linux.intel.com \
--cc=koen.koning@linux.intel.com \
--cc=linux-doc@vger.kernel.org \
--cc=linux-fbdev@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=lossin@kernel.org \
--cc=lucas.demarchi@intel.com \
--cc=maarten.lankhorst@linux.intel.com \
--cc=matthew.auld@intel.com \
--cc=matthew.brost@intel.com \
--cc=mripard@kernel.org \
--cc=ndjukic@nvidia.com \
--cc=nouveau@lists.freedesktop.org \
--cc=ojeda@kernel.org \
--cc=phasta@kernel.org \
--cc=ray.huang@amd.com \
--cc=rodrigo.vivi@intel.com \
--cc=rust-for-linux@vger.kernel.org \
--cc=simona@ffwll.ch \
--cc=thomas.hellstrom@linux.intel.com \
--cc=tmgross@umich.edu \
--cc=ttabi@nvidia.com \
--cc=tursulin@ursulin.net \
--cc=tzimmermann@suse.de \
--cc=zhiw@nvidia.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.