The branch main has been updated by alc:

URL: 
https://cgit.FreeBSD.org/src/commit/?id=41dfea24eec242e1e083e2a879483a7c05c7e2ff

commit 41dfea24eec242e1e083e2a879483a7c05c7e2ff
Author:     Alan Cox <a...@freebsd.org>
AuthorDate: 2024-06-01 18:17:52 +0000
Commit:     Alan Cox <a...@freebsd.org>
CommitDate: 2024-06-05 04:25:51 +0000

    arm64 pmap: Enable L3C promotions by pmap_enter_quick()
    
    More precisely, implement L3C (64KB/2MB, depending on base page size)
    promotion in pmap_enter_quick()'s helper function,
    pmap_enter_quick_locked().  At the same time, use the recently
    introduced flag VM_PROT_NO_PROMOTE from pmap_enter_object() to
    pmap_enter_quick_locked() to avoid L3C promotion attempts that will
    fail.
    
    Reviewed by:    kib
    Differential Revision:  https://reviews.freebsd.org/D45445
---
 sys/arm64/arm64/pmap.c | 29 +++++++++++++++++++++++------
 1 file changed, 23 insertions(+), 6 deletions(-)

diff --git a/sys/arm64/arm64/pmap.c b/sys/arm64/arm64/pmap.c
index 03d0a1cc6676..8ac7b8f6a135 100644
--- a/sys/arm64/arm64/pmap.c
+++ b/sys/arm64/arm64/pmap.c
@@ -5883,9 +5883,19 @@ pmap_enter_object(pmap_t pmap, vm_offset_t start, 
vm_offset_t end,
                    ((rv = pmap_enter_l3c_rx(pmap, va, m, &mpte, prot,
                    &lock)) == KERN_SUCCESS || rv == KERN_NO_SPACE))
                        m = &m[L3C_ENTRIES - 1];
-               else
-                       mpte = pmap_enter_quick_locked(pmap, va, m, prot, mpte,
-                           &lock);
+               else {
+                       /*
+                        * In general, if a superpage mapping were possible,
+                        * it would have been created above.  That said, if
+                        * start and end are not superpage aligned, then
+                        * promotion might be possible at the ends of [start,
+                        * end).  However, in practice, those promotion
+                        * attempts are so unlikely to succeed that they are
+                        * not worth trying.
+                        */
+                       mpte = pmap_enter_quick_locked(pmap, va, m, prot |
+                           VM_PROT_NO_PROMOTE, mpte, &lock);
+               }
                m = TAILQ_NEXT(m, listq);
        }
        if (lock != NULL)
@@ -6048,12 +6058,19 @@ pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, 
vm_page_t m,
 
 #if VM_NRESERVLEVEL > 0
        /*
-        * If both the PTP and the reservation are fully populated, then
-        * attempt promotion.
+        * First, attempt L3C promotion, if the virtual and physical addresses
+        * are aligned with each other and an underlying reservation has the
+        * neighboring L3 pages allocated.  The first condition is simply an
+        * optimization that recognizes some eventual promotion failures early
+        * at a lower run-time cost.  Then, attempt L2 promotion, if both the
+        * PTP and the reservation are fully populated.
         */
        if ((prot & VM_PROT_NO_PROMOTE) == 0 &&
-           (mpte == NULL || mpte->ref_count == NL3PG) &&
+           (va & L3C_OFFSET) == (pa & L3C_OFFSET) &&
            (m->flags & PG_FICTITIOUS) == 0 &&
+           vm_reserv_is_populated(m, L3C_ENTRIES) &&
+           pmap_promote_l3c(pmap, l3, va) &&
+           (mpte == NULL || mpte->ref_count == NL3PG) &&
            vm_reserv_level_iffullpop(m) == 0) {
                if (l2 == NULL)
                        l2 = pmap_pde(pmap, va, &lvl);

Reply via email to