Add the Virtual Memory Manager (VMM) for GPU address space management.
The VMM provides high-level page mapping and unmapping operations for
BAR1 address spaces.

The VMM provides mapping, unmapping, lookup, and page table allocations.
Uses GpuMm for access to buddy allocator, PRAMIN, and TLB.  Extends the
page table walker with walk_to_pte_allocate() for on-demand page table
creation.

Signed-off-by: Joel Fernandes <[email protected]>
---
 drivers/gpu/nova-core/mm/mod.rs |   1 +
 drivers/gpu/nova-core/mm/vmm.rs | 204 ++++++++++++++++++++++++++++++++
 2 files changed, 205 insertions(+)
 create mode 100644 drivers/gpu/nova-core/mm/vmm.rs

diff --git a/drivers/gpu/nova-core/mm/mod.rs b/drivers/gpu/nova-core/mm/mod.rs
index 56c72bf51431..53d726eb7296 100644
--- a/drivers/gpu/nova-core/mm/mod.rs
+++ b/drivers/gpu/nova-core/mm/mod.rs
@@ -7,6 +7,7 @@
 pub(crate) mod pagetable;
 pub(crate) mod pramin;
 pub(crate) mod tlb;
+pub(crate) mod vmm;
 
 use kernel::{
     devres::Devres,
diff --git a/drivers/gpu/nova-core/mm/vmm.rs b/drivers/gpu/nova-core/mm/vmm.rs
new file mode 100644
index 000000000000..a5b4af9053a0
--- /dev/null
+++ b/drivers/gpu/nova-core/mm/vmm.rs
@@ -0,0 +1,204 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Virtual Memory Manager for NVIDIA GPU page table management.
+//!
+//! The [`Vmm`] provides high-level page mapping and unmapping operations for 
GPU
+//! virtual address spaces (Channels, BAR1, BAR2). It wraps the page table 
walker
+//! and handles TLB flushing after modifications.
+//!
+//! # Example
+//!
+//! ```ignore
+//! use crate::mm::vmm::Vmm;
+//! use crate::mm::{GpuMm, Pfn, Vfn, VramAddress};
+//! use crate::mm::pagetable::MmuVersion;
+//! use kernel::sizes::SZ_1M;
+//!
+//! fn map_example(mm: &mut GpuMm, pdb_addr: VramAddress) -> Result<()> {
+//!     let mut vmm = Vmm::new(pdb_addr, MmuVersion::V2, SZ_1M as u64)?;
+//!
+//!     // Map virtual frame 0x100 to physical frame 0x200.
+//!     let vfn = Vfn::new(0x100);
+//!     let pfn = Pfn::new(0x200);
+//!     vmm.map_page(mm, vfn, pfn, true /* writable */)?;
+//!
+//!     Ok(())
+//! }
+//! ```
+
+#![allow(dead_code)]
+
+use kernel::{
+    gpu::buddy::{
+        AllocatedBlocks,
+        BuddyFlags,
+        GpuBuddyAllocParams, //
+    },
+    prelude::*,
+    sizes::SZ_4K,
+    sync::Arc, //
+};
+
+use crate::mm::{
+    pagetable::{
+        walk::{
+            write_pte,
+            PtWalk,
+            WalkResult, //
+        },
+        MmuVersion,
+        PageTableAllocator,
+        Pte, //
+    },
+    GpuMm,
+    Pfn,
+    Vfn,
+    VramAddress,
+    PAGE_SIZE, //
+};
+
+/// Virtual Memory Manager for a GPU address space.
+///
+/// Each [`Vmm`] instance manages a single address space identified by its Page
+/// Directory Base (`PDB`) address. The [`Vmm`] is used for BAR1 and BAR2 
mappings.
+///
+/// The [`Vmm`] tracks all page table allocations made during mapping 
operations
+/// to ensure they remain valid for the lifetime of the address space.
+pub(crate) struct Vmm {
+    pdb_addr: VramAddress,
+    mmu_version: MmuVersion,
+    /// Page table allocations that must persist for the lifetime of mappings.
+    page_table_allocs: KVec<Arc<AllocatedBlocks>>,
+}
+
+impl Vmm {
+    /// Create a new [`Vmm`] for the given Page Directory Base address.
+    pub(crate) fn new(pdb_addr: VramAddress, mmu_version: MmuVersion) -> 
Result<Self> {
+        // Only MMU v2 is supported for now.
+        if mmu_version != MmuVersion::V2 {
+            return Err(ENOTSUPP);
+        }
+
+        Ok(Self {
+            pdb_addr,
+            mmu_version,
+            page_table_allocs: KVec::new(),
+        })
+    }
+
+    /// Get the Page Directory Base address.
+    pub(crate) fn pdb_addr(&self) -> VramAddress {
+        self.pdb_addr
+    }
+
+    /// Get the MMU version.
+    pub(crate) fn mmu_version(&self) -> MmuVersion {
+        self.mmu_version
+    }
+
+    /// Allocate a new page table, zero it, and track the allocation.
+    ///
+    /// This method ensures page table allocations persist for the lifetime of
+    /// the [`Vmm`].
+    pub(crate) fn alloc_page_table(&mut self, mm: &mut GpuMm) -> 
Result<VramAddress> {
+        let params = GpuBuddyAllocParams {
+            start_range_address: 0,
+            end_range_address: 0,
+            size_bytes: SZ_4K as u64,
+            min_block_size_bytes: SZ_4K as u64,
+            buddy_flags: BuddyFlags::try_new(0)?,
+        };
+
+        // Use buddy first, then pramin (sequential to avoid overlapping 
borrows).
+        let blocks = mm.buddy().alloc_blocks(params)?;
+        let offset = blocks.iter().next().ok_or(ENOMEM)?.offset();
+        let addr = VramAddress::new(offset);
+
+        // Zero the page table using pramin.
+        let base = addr.raw();
+        for offset in (0..PAGE_SIZE).step_by(8) {
+            mm.pramin().try_write64(base + offset, 0)?;
+        }
+
+        // Track the page table allocation.
+        self.page_table_allocs.push(blocks, GFP_KERNEL)?;
+
+        Ok(addr)
+    }
+
+    /// Map a 4KB page with on-demand page table allocation.
+    ///
+    /// Walks the page table hierarchy and allocates any missing intermediate
+    /// tables using the buddy allocator from [`GpuMm`].
+    pub(crate) fn map_page(
+        &mut self,
+        mm: &mut GpuMm,
+        vfn: Vfn,
+        pfn: Pfn,
+        writable: bool,
+    ) -> Result {
+        // Create page table walker.
+        let walker = PtWalk::new(self.pdb_addr, self.mmu_version);
+
+        // Walk to PTE address, allocating tables as needed.
+        let pte_addr = match walker.walk_to_pte_allocate(mm, self, vfn)? {
+            WalkResult::Unmapped { pte_addr } | WalkResult::Mapped { pte_addr, 
.. } => pte_addr,
+            WalkResult::PageTableMissing => {
+                // Should not happen with allocate mode.
+                return Err(EINVAL);
+            }
+        };
+
+        // Create and write PTE.
+        let pte = Pte::new_vram(self.mmu_version, pfn, writable);
+        write_pte(mm.pramin(), pte_addr, pte)?;
+
+        // Flush the TLB.
+        mm.tlb().flush(self.pdb_addr)?;
+
+        Ok(())
+    }
+
+    /// Unmap a 4KB page.
+    ///
+    /// Invalidates the [`Pte`] at the given virtual frame number. Does 
nothing if
+    /// the page is not currently mapped.
+    pub(crate) fn unmap_page(&self, mm: &mut GpuMm, vfn: Vfn) -> Result {
+        // Create page table walker.
+        let walker = PtWalk::new(self.pdb_addr, self.mmu_version);
+
+        // Walk to PTE address.
+        let pte_addr = match walker.walk_to_pte_lookup(mm, vfn)? {
+            WalkResult::Unmapped { pte_addr } | WalkResult::Mapped { pte_addr, 
.. } => pte_addr,
+            WalkResult::PageTableMissing => return Ok(()), // Nothing to unmap.
+        };
+
+        // Invalidate PTE.
+        let invalid_pte = Pte::invalid(self.mmu_version);
+        write_pte(mm.pramin(), pte_addr, invalid_pte)?;
+
+        // Flush the TLB.
+        mm.tlb().flush(self.pdb_addr)?;
+
+        Ok(())
+    }
+
+    /// Read the [`Pfn`] for a mapped virtual frame number.
+    ///
+    /// Returns `Some(pfn)` if the [`Vfn`] is mapped, `None` otherwise.
+    pub(crate) fn read_mapping(&self, mm: &mut GpuMm, vfn: Vfn) -> 
Result<Option<Pfn>> {
+        // Create page table walker.
+        let walker = PtWalk::new(self.pdb_addr, self.mmu_version);
+
+        match walker.walk_to_pte_lookup(mm, vfn)? {
+            WalkResult::Mapped { pfn, .. } => Ok(Some(pfn)),
+            WalkResult::Unmapped { .. } | WalkResult::PageTableMissing => 
Ok(None),
+        }
+    }
+}
+
+impl PageTableAllocator for Vmm {
+    fn alloc_page_table(&mut self, mm: &mut GpuMm) -> Result<VramAddress> {
+        Vmm::alloc_page_table(self, mm)
+    }
+}
-- 
2.34.1

Reply via email to