The branch main has been updated by alc:

URL: 
https://cgit.FreeBSD.org/src/commit/?id=2cfd6342ac84c6915106fbe141795ca23a96cbba

commit 2cfd6342ac84c6915106fbe141795ca23a96cbba
Author:     Alan Cox <a...@freebsd.org>
AuthorDate: 2025-07-08 23:31:26 +0000
Commit:     Alan Cox <a...@freebsd.org>
CommitDate: 2025-07-13 16:55:47 +0000

    arm64 pmap: do not panic when unable to insert PTP into trie
    
    When pmap_enter_l2() needs to destroy an existing kernel superpage
    mapping, do not remove the saved page table page (PTP) from the pm_root
    trie and remap it into the page table.  Instead, simply zero it.  Then,
    later the PTP does not need to be unmapped and reinserted into the trie.
    
    If the kernel region is not mapped by a superpage, then try to insert
    the PTP into the pm_root trie before clearing the PTEs.  If the PTP
    insertion fails, then we can return failure with the old mappings still
    in place.
    
    Convert an assertion from a panic to a KASSERT.
    
    Reviewed by:    kib, markj (an earlier version)
    MFC after:      1 week
    Differential Revision:  https://reviews.freebsd.org/D51220
---
 sys/arm64/arm64/pmap.c | 84 ++++++++++++++++++++++++++++++++------------------
 1 file changed, 54 insertions(+), 30 deletions(-)

diff --git a/sys/arm64/arm64/pmap.c b/sys/arm64/arm64/pmap.c
index dbd1ed7b316b..a09da794e77d 100644
--- a/sys/arm64/arm64/pmap.c
+++ b/sys/arm64/arm64/pmap.c
@@ -497,7 +497,8 @@ static bool pmap_pv_insert_l3c(pmap_t pmap, vm_offset_t va, 
vm_page_t m,
     struct rwlock **lockp);
 static void pmap_remove_kernel_l2(pmap_t pmap, pt_entry_t *l2, vm_offset_t va);
 static int pmap_remove_l2(pmap_t pmap, pt_entry_t *l2, vm_offset_t sva,
-    pd_entry_t l1e, struct spglist *free, struct rwlock **lockp);
+    pd_entry_t l1e, bool demote_kl2e, struct spglist *free,
+    struct rwlock **lockp);
 static int pmap_remove_l3(pmap_t pmap, pt_entry_t *l3, vm_offset_t sva,
     pd_entry_t l2e, struct spglist *free, struct rwlock **lockp);
 static bool pmap_remove_l3c(pmap_t pmap, pt_entry_t *l3p, vm_offset_t va,
@@ -3847,8 +3848,7 @@ pmap_remove_kernel_l2(pmap_t pmap, pt_entry_t *l2, 
vm_offset_t va)
        PMAP_LOCK_ASSERT(pmap, MA_OWNED);
 
        ml3 = pmap_remove_pt_page(pmap, va);
-       if (ml3 == NULL)
-               panic("pmap_remove_kernel_l2: Missing pt page");
+       KASSERT(ml3 != NULL, ("pmap_remove_kernel_l2: missing pt page"));
 
        ml3pa = VM_PAGE_TO_PHYS(ml3);
        newl2 = PHYS_TO_PTE(ml3pa) | L2_TABLE;
@@ -3873,8 +3873,8 @@ pmap_remove_kernel_l2(pmap_t pmap, pt_entry_t *l2, 
vm_offset_t va)
  * pmap_remove_l2: Do the things to unmap a level 2 superpage.
  */
 static int
-pmap_remove_l2(pmap_t pmap, pt_entry_t *l2, vm_offset_t sva,
-    pd_entry_t l1e, struct spglist *free, struct rwlock **lockp)
+pmap_remove_l2(pmap_t pmap, pt_entry_t *l2, vm_offset_t sva, pd_entry_t l1e,
+    bool demote_kl2e, struct spglist *free, struct rwlock **lockp)
 {
        struct md_page *pvh;
        pt_entry_t old_l2;
@@ -3910,9 +3910,7 @@ pmap_remove_l2(pmap_t pmap, pt_entry_t *l2, vm_offset_t 
sva,
                                vm_page_aflag_clear(mt, PGA_WRITEABLE);
                }
        }
-       if (pmap == kernel_pmap) {
-               pmap_remove_kernel_l2(pmap, l2, sva);
-       } else {
+       if (pmap != kernel_pmap) {
                ml3 = pmap_remove_pt_page(pmap, sva);
                if (ml3 != NULL) {
                        KASSERT(vm_page_any_valid(ml3),
@@ -3923,6 +3921,14 @@ pmap_remove_l2(pmap_t pmap, pt_entry_t *l2, vm_offset_t 
sva,
                        ml3->ref_count = 0;
                        pmap_add_delayed_free_list(ml3, free, false);
                }
+       } else if (demote_kl2e) {
+               pmap_remove_kernel_l2(pmap, l2, sva);
+       } else {
+               ml3 = vm_radix_lookup(&pmap->pm_root, pmap_l2_pindex(sva));
+               if (vm_page_any_valid(ml3)) {
+                       ml3->valid = 0;
+                       pmap_zero_page(ml3);
+               }
        }
        return (pmap_unuse_pt(pmap, sva, l1e, free));
 }
@@ -4232,7 +4238,7 @@ pmap_remove1(pmap_t pmap, vm_offset_t sva, vm_offset_t 
eva, bool map_delete)
                if ((l3_paddr & ATTR_DESCR_MASK) == L2_BLOCK) {
                        if (sva + L2_SIZE == va_next && eva >= va_next) {
                                pmap_remove_l2(pmap, l2, sva, pmap_load(l1),
-                                   &free, &lock);
+                                   true, &free, &lock);
                                continue;
                        } else if (pmap_demote_l2_locked(pmap, l2, sva,
                            &lock) == NULL)
@@ -5747,33 +5753,51 @@ pmap_enter_l2(pmap_t pmap, vm_offset_t va, pd_entry_t 
new_l2, u_int flags,
                        }
                }
                SLIST_INIT(&free);
-               if ((old_l2 & ATTR_DESCR_MASK) == L2_BLOCK)
+               if ((old_l2 & ATTR_DESCR_MASK) == L2_BLOCK) {
                        (void)pmap_remove_l2(pmap, l2, va,
-                           pmap_load(pmap_l1(pmap, va)), &free, lockp);
-               else
+                           pmap_load(pmap_l1(pmap, va)), false, &free, lockp);
+               } else {
+                       if (ADDR_IS_KERNEL(va)) {
+                               /*
+                                * Try to save the ptp in the trie
+                                * before any changes to mappings are
+                                * made.  Abort on failure.
+                                */
+                               mt = PTE_TO_VM_PAGE(old_l2);
+                               if (pmap_insert_pt_page(pmap, mt, false,
+                                   false)) {
+                                       CTR1(KTR_PMAP,
+                           "pmap_enter_l2: cannot ins kern ptp va %#lx",
+                                           va);
+                                       return (KERN_RESOURCE_SHORTAGE);
+                               }
+                               /*
+                                * Both pmap_remove_l2() and
+                                * pmap_remove_l3_range() will zero fill
+                                * the L3 kernel page table page.
+                                */
+                       }
                        pmap_remove_l3_range(pmap, old_l2, va, va + L2_SIZE,
                            &free, lockp);
+                       if (ADDR_IS_KERNEL(va)) {
+                               /*
+                                * The TLB could have an intermediate
+                                * entry for the L3 kernel page table
+                                * page, so request an invalidation at
+                                * all levels after clearing the
+                                * L2_TABLE entry.
+                                */
+                               pmap_clear(l2);
+                               pmap_s1_invalidate_page(pmap, va, false);
+                       }
+               }
+               KASSERT(pmap_load(l2) == 0,
+                   ("pmap_enter_l2: non-zero L2 entry %p", l2));
                if (!ADDR_IS_KERNEL(va)) {
                        vm_page_free_pages_toq(&free, true);
-                       KASSERT(pmap_load(l2) == 0,
-                           ("pmap_enter_l2: non-zero L2 entry %p", l2));
                } else {
                        KASSERT(SLIST_EMPTY(&free),
                            ("pmap_enter_l2: freed kernel page table page"));
-
-                       /*
-                        * Both pmap_remove_l2() and pmap_remove_l3_range()
-                        * will leave the kernel page table page zero filled.
-                        * Nonetheless, the TLB could have an intermediate
-                        * entry for the kernel page table page, so request
-                        * an invalidation at all levels after clearing
-                        * the L2_TABLE entry.
-                        */
-                       mt = PTE_TO_VM_PAGE(pmap_load(l2));
-                       if (pmap_insert_pt_page(pmap, mt, false, false))
-                               panic("pmap_enter_l2: trie insert failed");
-                       pmap_clear(l2);
-                       pmap_s1_invalidate_page(pmap, va, false);
                }
        }
 
@@ -8426,8 +8450,8 @@ pmap_demote_l2_abort(pmap_t pmap, vm_offset_t va, 
pt_entry_t *l2,
        struct spglist free;
 
        SLIST_INIT(&free);
-       (void)pmap_remove_l2(pmap, l2, va, pmap_load(pmap_l1(pmap, va)), &free,
-           lockp);
+       (void)pmap_remove_l2(pmap, l2, va, pmap_load(pmap_l1(pmap, va)), true,
+           &free, lockp);
        vm_page_free_pages_toq(&free, true);
 }
 

Reply via email to