The branch stable/13 has been updated by scottph:

URL: 
https://cgit.FreeBSD.org/src/commit/?id=89166c063a9a136ba3780bb069e03e6049c53fd5

commit 89166c063a9a136ba3780bb069e03e6049c53fd5
Author:     Andrew Turner <[email protected]>
AuthorDate: 2022-03-10 14:39:03 +0000
Commit:     D Scott Phillips <[email protected]>
CommitDate: 2022-03-29 15:47:43 +0000

    Fix arm64 TLB invalidation with non-4k pages
    
    When using 16k or 64k pages atop will shift the address by more than
    the needed amount for a tlbi instruction. Replace this with a new macro
    to shift the address by 12 and use PAGE_SIZE in the for loop to let the
    code work with any page size.
    
    Reviewed by:    alc, markj
    Sponsored by:   The FreeBSD Foundation
    Differential Revision: https://reviews.freebsd.org/D34516
    
    (cherry picked from commit 813738fabaaea43503724b8371faf5bab73a3047)
---
 sys/arm64/arm64/pmap.c | 20 ++++++++++++--------
 1 file changed, 12 insertions(+), 8 deletions(-)

diff --git a/sys/arm64/arm64/pmap.c b/sys/arm64/arm64/pmap.c
index c105e7cae16b..26e62c9e3cbb 100644
--- a/sys/arm64/arm64/pmap.c
+++ b/sys/arm64/arm64/pmap.c
@@ -360,6 +360,10 @@ void (*pmap_invalidate_vpipt_icache)(void);
 #define        COOKIE_TO_ASID(cookie)          ((int)(cookie))
 #define        COOKIE_TO_EPOCH(cookie)         ((int)((u_long)(cookie) >> 32))
 
+#define        TLBI_VA_SHIFT                   12
+#define        TLBI_VA(addr)                   ((addr) >> TLBI_VA_SHIFT)
+#define        TLBI_VA_L3_INCR                 (L3_SIZE >> TLBI_VA_SHIFT)
+
 static int superpages_enabled = 1;
 SYSCTL_INT(_vm_pmap, OID_AUTO, superpages_enabled,
     CTLFLAG_RDTUN | CTLFLAG_NOFETCH, &superpages_enabled, 0,
@@ -1186,11 +1190,11 @@ pmap_invalidate_page(pmap_t pmap, vm_offset_t va)
        PMAP_ASSERT_STAGE1(pmap);
 
        dsb(ishst);
+       r = TLBI_VA(va);
        if (pmap == kernel_pmap) {
-               r = atop(va);
                __asm __volatile("tlbi vaae1is, %0" : : "r" (r));
        } else {
-               r = ASID_TO_OPERAND(COOKIE_TO_ASID(pmap->pm_cookie)) | atop(va);
+               r |= ASID_TO_OPERAND(COOKIE_TO_ASID(pmap->pm_cookie));
                __asm __volatile("tlbi vae1is, %0" : : "r" (r));
        }
        dsb(ish);
@@ -1206,15 +1210,15 @@ pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, 
vm_offset_t eva)
 
        dsb(ishst);
        if (pmap == kernel_pmap) {
-               start = atop(sva);
-               end = atop(eva);
-               for (r = start; r < end; r++)
+               start = TLBI_VA(sva);
+               end = TLBI_VA(eva);
+               for (r = start; r < end; r += TLBI_VA_L3_INCR)
                        __asm __volatile("tlbi vaae1is, %0" : : "r" (r));
        } else {
                start = end = ASID_TO_OPERAND(COOKIE_TO_ASID(pmap->pm_cookie));
-               start |= atop(sva);
-               end |= atop(eva);
-               for (r = start; r < end; r++)
+               start |= TLBI_VA(sva);
+               end |= TLBI_VA(eva);
+               for (r = start; r < end; r += TLBI_VA_L3_INCR)
                        __asm __volatile("tlbi vae1is, %0" : : "r" (r));
        }
        dsb(ish);

Reply via email to