public inbox for linux-doc@vger.kernel.org
 help / color / mirror / Atom feed
From: Joel Fernandes <joelagnelf@nvidia.com>
To: linux-kernel@vger.kernel.org
Cc: Miguel Ojeda <ojeda@kernel.org>, Boqun Feng <boqun@kernel.org>,
	Gary Guo <gary@garyguo.net>,
	Bjorn Roy Baron <bjorn3_gh@protonmail.com>,
	Benno Lossin <lossin@kernel.org>,
	Andreas Hindborg <a.hindborg@kernel.org>,
	Alice Ryhl <aliceryhl@google.com>,
	Trevor Gross <tmgross@umich.edu>,
	Danilo Krummrich <dakr@kernel.org>,
	Dave Airlie <airlied@redhat.com>,
	Daniel Almeida <daniel.almeida@collabora.com>,
	dri-devel@lists.freedesktop.org, rust-for-linux@vger.kernel.org,
	nova-gpu@lists.linux.dev, Nikola Djukic <ndjukic@nvidia.com>,
	David Airlie <airlied@gmail.com>,
	Boqun Feng <boqun.feng@gmail.com>,
	John Hubbard <jhubbard@nvidia.com>,
	Alistair Popple <apopple@nvidia.com>,
	Timur Tabi <ttabi@nvidia.com>, Edwin Peer <epeer@nvidia.com>,
	Alexandre Courbot <acourbot@nvidia.com>,
	Andrea Righi <arighi@nvidia.com>,
	Andy Ritger <aritger@nvidia.com>, Zhi Wang <zhiw@nvidia.com>,
	Balbir Singh <balbirs@nvidia.com>,
	Philipp Stanner <phasta@kernel.org>,
	alexeyi@nvidia.com, Eliot Courtney <ecourtney@nvidia.com>,
	joel@joelfernandes.org, linux-doc@vger.kernel.org,
	Joel Fernandes <joelagnelf@nvidia.com>
Subject: [PATCH v12 08/22] gpu: nova-core: mm: Add GpuMm centralized memory manager
Date: Sat, 25 Apr 2026 17:14:40 -0400	[thread overview]
Message-ID: <20260425211454.174696-9-joelagnelf@nvidia.com> (raw)
In-Reply-To: <20260425211454.174696-1-joelagnelf@nvidia.com>

Introduce GpuMm as the centralized GPU memory manager that owns:
- Buddy allocator for VRAM allocation.
- PRAMIN window for direct VRAM access.
- TLB manager for translation buffer operations.

This provides clean ownership model where GpuMm provides accessor
methods for its components that can be used for memory management
operations.

Cc: Nikola Djukic <ndjukic@nvidia.com>
Signed-off-by: Joel Fernandes <joelagnelf@nvidia.com>
---
 drivers/gpu/nova-core/Kconfig         |  1 +
 drivers/gpu/nova-core/gpu.rs          | 28 +++++++++++
 drivers/gpu/nova-core/gsp/commands.rs |  1 -
 drivers/gpu/nova-core/mm.rs           | 69 ++++++++++++++++++++++++++-
 drivers/gpu/nova-core/mm/pramin.rs    | 20 ++++----
 5 files changed, 108 insertions(+), 11 deletions(-)

diff --git a/drivers/gpu/nova-core/Kconfig b/drivers/gpu/nova-core/Kconfig
index a4f2380654e2..6513007bf66f 100644
--- a/drivers/gpu/nova-core/Kconfig
+++ b/drivers/gpu/nova-core/Kconfig
@@ -4,6 +4,7 @@ config NOVA_CORE
 	depends on PCI
 	depends on RUST
 	select AUXILIARY_BUS
+	select GPU_BUDDY
 	select RUST_FW_LOADER_ABSTRACTIONS
 	default n
 	help
diff --git a/drivers/gpu/nova-core/gpu.rs b/drivers/gpu/nova-core/gpu.rs
index 675a0676f032..fce05e9d1a89 100644
--- a/drivers/gpu/nova-core/gpu.rs
+++ b/drivers/gpu/nova-core/gpu.rs
@@ -4,10 +4,13 @@
     device,
     devres::Devres,
     fmt,
+    gpu::buddy::GpuBuddyParams,
     io::Io,
     num::Bounded,
     pci,
     prelude::*,
+    ptr::Alignment,
+    sizes::SZ_4K,
     sync::Arc, //
 };
 
@@ -25,6 +28,7 @@
         commands::GetGspStaticInfoReply,
         Gsp, //
     },
+    mm::GpuMm,
     regs,
 };
 
@@ -238,6 +242,9 @@ pub(crate) struct Gpu {
     gsp_falcon: Falcon<GspFalcon>,
     /// SEC2 falcon instance, used for GSP boot up and cleanup.
     sec2_falcon: Falcon<Sec2Falcon>,
+    /// GPU memory manager owning memory management resources.
+    #[pin]
+    mm: GpuMm,
     /// GSP runtime data. Temporarily an empty placeholder.
     #[pin]
     gsp: Gsp,
@@ -276,6 +283,13 @@ pub(crate) fn new<'a>(
             gsp_static_info: {
                 let info = gsp.boot(pdev, bar, spec.chipset, gsp_falcon, sec2_falcon)?;
 
+                dev_info!(
+                    pdev.as_ref(),
+                    "Using FB region: {:#x}..{:#x}\n",
+                    info.usable_fb_region.start,
+                    info.usable_fb_region.end
+                );
+
                 dev_info!(
                     pdev.as_ref(),
                     "Total physical VRAM: {} MiB\n",
@@ -285,6 +299,20 @@ pub(crate) fn new<'a>(
                 info
             },
 
+            // Create GPU memory manager owning memory management resources.
+            mm <- {
+                let usable_vram = &gsp_static_info.usable_fb_region;
+
+                // PRAMIN covers all physical VRAM (including GSP-reserved areas
+                // above the usable region, e.g. the BAR1 page directory).
+                let pramin_vram_region = 0..gsp_static_info.total_fb_end;
+                GpuMm::new(devres_bar.clone(), pdev.as_ref(), GpuBuddyParams {
+                    base_offset: usable_vram.start,
+                    size: usable_vram.end - usable_vram.start,
+                    chunk_size: Alignment::new::<SZ_4K>(),
+                }, pramin_vram_region)?
+            },
+
             bar: devres_bar,
         })
     }
diff --git a/drivers/gpu/nova-core/gsp/commands.rs b/drivers/gpu/nova-core/gsp/commands.rs
index 40b4ef72169d..eeecf81a0ffd 100644
--- a/drivers/gpu/nova-core/gsp/commands.rs
+++ b/drivers/gpu/nova-core/gsp/commands.rs
@@ -194,7 +194,6 @@ fn init(&self) -> impl Init<Self::Command, Self::InitError> {
 pub(crate) struct GetGspStaticInfoReply {
     gpu_name: [u8; 64],
     /// Usable FB (VRAM) region for driver memory allocation.
-    #[expect(dead_code)]
     pub(crate) usable_fb_region: Range<u64>,
     /// End of VRAM.
     pub(crate) total_fb_end: u64,
diff --git a/drivers/gpu/nova-core/mm.rs b/drivers/gpu/nova-core/mm.rs
index 2a8b228b1916..8b8a86980bb6 100644
--- a/drivers/gpu/nova-core/mm.rs
+++ b/drivers/gpu/nova-core/mm.rs
@@ -37,12 +37,77 @@ macro_rules! impl_pfn_bounded {
 
 use kernel::{
     bitfield,
+    device,
+    devres::Devres,
+    gpu::buddy::{
+        GpuBuddy,
+        GpuBuddyParams, //
+    },
     num::Bounded,
     prelude::*,
-    sizes::SZ_4K, //
+    sizes::SZ_4K,
+    sync::Arc, //
 };
 
-use crate::num::u64_as_usize;
+use crate::{
+    driver::Bar0,
+    num::u64_as_usize, //
+};
+
+pub(crate) use tlb::Tlb;
+
+/// GPU Memory Manager - owns all core MM components.
+///
+/// Provides centralized ownership of memory management resources:
+/// - [`GpuBuddy`] allocator for VRAM page table allocation.
+/// - [`pramin::Pramin`] for direct VRAM access.
+/// - [`Tlb`] manager for translation buffer flush operations.
+#[pin_data]
+pub(crate) struct GpuMm {
+    buddy: GpuBuddy,
+    #[pin]
+    pramin: pramin::Pramin,
+    #[pin]
+    tlb: Tlb,
+}
+
+impl GpuMm {
+    /// Create a pin-initializer for `GpuMm`.
+    ///
+    /// `pramin_vram_region` is the full physical VRAM range (including GSP-reserved
+    /// areas). PRAMIN window accesses are validated against this range.
+    pub(crate) fn new(
+        bar: Arc<Devres<Bar0>>,
+        dev: &device::Device<device::Bound>,
+        buddy_params: GpuBuddyParams,
+        pramin_vram_region: core::ops::Range<u64>,
+    ) -> Result<impl PinInit<Self>> {
+        let buddy = GpuBuddy::new(buddy_params)?;
+        let tlb_init = Tlb::new(bar.clone());
+        let pramin_init = pramin::Pramin::new(bar, dev, pramin_vram_region)?;
+
+        Ok(pin_init!(Self {
+            buddy,
+            pramin <- pramin_init,
+            tlb <- tlb_init,
+        }))
+    }
+
+    /// Access the [`GpuBuddy`] allocator.
+    pub(crate) fn buddy(&self) -> &GpuBuddy {
+        &self.buddy
+    }
+
+    /// Access the [`pramin::Pramin`].
+    pub(crate) fn pramin(&self) -> &pramin::Pramin {
+        &self.pramin
+    }
+
+    /// Access the [`Tlb`] manager.
+    pub(crate) fn tlb(&self) -> &Tlb {
+        &self.tlb
+    }
+}
 
 /// Page size in bytes (4 KiB).
 pub(crate) const PAGE_SIZE: usize = SZ_4K;
diff --git a/drivers/gpu/nova-core/mm/pramin.rs b/drivers/gpu/nova-core/mm/pramin.rs
index 57b560ae1e85..cc393f1509d7 100644
--- a/drivers/gpu/nova-core/mm/pramin.rs
+++ b/drivers/gpu/nova-core/mm/pramin.rs
@@ -94,12 +94,12 @@
 };
 
 use kernel::{
+    device,
     devres::Devres,
     io::Io,
     new_mutex,
     num::Bounded,
     prelude::*,
-    revocable::RevocableGuard,
     sizes::{
         SZ_1M,
         SZ_64K, //
@@ -137,7 +137,7 @@ pub(crate) fn $name(&mut self, vram_offset: usize) -> Result<$ty> {
                 self.compute_window(vram_offset, ::core::mem::size_of::<$ty>())?;
 
             if let Some(base) = new_base {
-                Self::write_window_base(&self.bar, base)?;
+                Self::write_window_base(self.bar, base)?;
                 *self.state = base;
             }
             self.bar.$name(bar_offset)
@@ -154,7 +154,7 @@ pub(crate) fn $name(&mut self, vram_offset: usize, value: $ty) -> Result {
                 self.compute_window(vram_offset, ::core::mem::size_of::<$ty>())?;
 
             if let Some(base) = new_base {
-                Self::write_window_base(&self.bar, base)?;
+                Self::write_window_base(self.bar, base)?;
                 *self.state = base;
             }
             self.bar.$name(value, bar_offset)
@@ -188,10 +188,11 @@ impl Pramin {
     /// `vram_region` specifies the valid VRAM address range.
     pub(crate) fn new(
         bar: Arc<Devres<Bar0>>,
+        dev: &device::Device<device::Bound>,
         vram_region: Range<u64>,
     ) -> Result<impl PinInit<Self>> {
-        let bar_access = bar.try_access().ok_or(ENODEV)?;
-        let current_base = Self::read_window_base(&bar_access);
+        let bar_access = bar.access(dev)?;
+        let current_base = Self::read_window_base(bar_access);
 
         Ok(pin_init!(Self {
             bar,
@@ -204,8 +205,11 @@ pub(crate) fn new(
     ///
     /// Returns a [`PraminWindow`] guard that provides VRAM read/write accessors.
     /// The [`PraminWindow`] is exclusive and only one can exist at a time.
-    pub(crate) fn get_window(&self) -> Result<PraminWindow<'_>> {
-        let bar = self.bar.try_access().ok_or(ENODEV)?;
+    pub(crate) fn get_window<'a>(
+        &'a self,
+        dev: &'a device::Device<device::Bound>,
+    ) -> Result<PraminWindow<'a>> {
+        let bar = self.bar.access(dev)?;
         let state = self.state.lock();
         Ok(PraminWindow {
             bar,
@@ -231,7 +235,7 @@ fn read_window_base(bar: &Bar0) -> u64 {
 /// Only one [`PraminWindow`] can exist at a time per [`Pramin`] instance (enforced by the
 /// internal `MutexGuard`).
 pub(crate) struct PraminWindow<'a> {
-    bar: RevocableGuard<'a, Bar0>,
+    bar: &'a Bar0,
     vram_region: Range<u64>,
     state: MutexGuard<'a, u64>,
 }
-- 
2.34.1


  parent reply	other threads:[~2026-04-25 21:15 UTC|newest]

Thread overview: 23+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2026-04-25 21:14 [PATCH v12 00/22] gpu: nova-core: Add memory management support Joel Fernandes
2026-04-25 21:14 ` [PATCH v12 01/22] gpu: nova-core: gsp: Return GspStaticInfo from boot() Joel Fernandes
2026-04-25 21:14 ` [PATCH v12 02/22] gpu: nova-core: gsp: Extract usable FB region from GSP Joel Fernandes
2026-04-25 21:14 ` [PATCH v12 03/22] gpu: nova-core: gsp: Expose total physical VRAM end from FB region info Joel Fernandes
2026-04-25 21:14 ` [PATCH v12 04/22] gpu: nova-core: mm: Add support to use PRAMIN windows to write to VRAM Joel Fernandes
2026-04-25 21:14 ` [PATCH v12 05/22] docs: gpu: nova-core: Document the PRAMIN aperture mechanism Joel Fernandes
2026-04-25 21:14 ` [PATCH v12 06/22] gpu: nova-core: mm: Add common memory management types Joel Fernandes
2026-04-25 21:14 ` [PATCH v12 07/22] gpu: nova-core: mm: Add TLB flush support Joel Fernandes
2026-04-25 21:14 ` Joel Fernandes [this message]
2026-04-25 21:14 ` [PATCH v12 09/22] gpu: nova-core: mm: Add common types for all page table formats Joel Fernandes
2026-04-25 21:14 ` [PATCH v12 10/22] gpu: nova-core: mm: Add MMU v2 page table types Joel Fernandes
2026-04-25 21:14 ` [PATCH v12 11/22] gpu: nova-core: mm: Add MMU v3 " Joel Fernandes
2026-04-25 21:14 ` [PATCH v12 12/22] gpu: nova-core: mm: Add page table entry operation traits Joel Fernandes
2026-04-25 21:14 ` [PATCH v12 13/22] gpu: nova-core: mm: Add page table walker for MMU v2/v3 Joel Fernandes
2026-04-25 21:14 ` [PATCH v12 14/22] gpu: nova-core: mm: Add Virtual Memory Manager Joel Fernandes
2026-04-25 21:14 ` [PATCH v12 15/22] gpu: nova-core: mm: Add virtual address range tracking to VMM Joel Fernandes
2026-04-25 21:14 ` [PATCH v12 16/22] gpu: nova-core: mm: Add multi-page mapping API " Joel Fernandes
2026-04-25 21:14 ` [PATCH v12 17/22] gpu: nova-core: Add BAR1 aperture type and size constant Joel Fernandes
2026-04-25 21:14 ` [PATCH v12 18/22] gpu: nova-core: mm: Add BAR1 user interface Joel Fernandes
2026-04-25 21:14 ` [PATCH v12 19/22] gpu: nova-core: mm: Add BAR1 memory management self-tests Joel Fernandes
2026-04-25 21:14 ` [PATCH v12 20/22] gpu: nova-core: mm: Add PRAMIN aperture self-tests Joel Fernandes
2026-04-25 21:14 ` [PATCH v12 21/22] gpu: nova-core: mm: pramin: drop useless as_ref() in run_self_test Joel Fernandes
2026-04-25 21:14 ` [PATCH v12 22/22] rust: maple_tree: implement Send and Sync for MapleTree Joel Fernandes

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20260425211454.174696-9-joelagnelf@nvidia.com \
    --to=joelagnelf@nvidia.com \
    --cc=a.hindborg@kernel.org \
    --cc=acourbot@nvidia.com \
    --cc=airlied@gmail.com \
    --cc=airlied@redhat.com \
    --cc=alexeyi@nvidia.com \
    --cc=aliceryhl@google.com \
    --cc=apopple@nvidia.com \
    --cc=arighi@nvidia.com \
    --cc=aritger@nvidia.com \
    --cc=balbirs@nvidia.com \
    --cc=bjorn3_gh@protonmail.com \
    --cc=boqun.feng@gmail.com \
    --cc=boqun@kernel.org \
    --cc=dakr@kernel.org \
    --cc=daniel.almeida@collabora.com \
    --cc=dri-devel@lists.freedesktop.org \
    --cc=ecourtney@nvidia.com \
    --cc=epeer@nvidia.com \
    --cc=gary@garyguo.net \
    --cc=jhubbard@nvidia.com \
    --cc=joel@joelfernandes.org \
    --cc=linux-doc@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=lossin@kernel.org \
    --cc=ndjukic@nvidia.com \
    --cc=nova-gpu@lists.linux.dev \
    --cc=ojeda@kernel.org \
    --cc=phasta@kernel.org \
    --cc=rust-for-linux@vger.kernel.org \
    --cc=tmgross@umich.edu \
    --cc=ttabi@nvidia.com \
    --cc=zhiw@nvidia.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox