Currently we mark page table entries as invalid to do reference
emulation.  This is a bit complicated as the XN bit for "small"
(normal 4K) pages has been stuffed into the two-bit type field.
That's why all those

        /* XXX use of L2_V7_S_XN */

comments were in the code for.  But ARMv7 defines a proper Access Flag
bit.  The idea is that if you want to track page references, you clear
the bit.  At that point accessing the page will fault.  When that
happens, we update the referenced flag and set the Access Flag in the
page table to prevent further faults.

The diff below implements that.  It also rationalizes the use of the
page table entry type bits and recognizes that AP[2] is now a bit that
indicates whether a page is write-protected and that AP[1] is now a
bit that indicates that a page is accessible from userland.

Makes life simpler, and paves the way for supporting the XN bit, which
will be addressed in the next diff.

ok?


Index: arch/arm/arm/fault.c
===================================================================
RCS file: /cvs/src/sys/arch/arm/arm/fault.c,v
retrieving revision 1.20
diff -u -p -r1.20 fault.c
--- arch/arm/arm/fault.c        27 Feb 2016 13:08:06 -0000      1.20
+++ arch/arm/arm/fault.c        18 Aug 2016 19:25:28 -0000
@@ -154,10 +154,10 @@ static const struct data_abort data_abor
        {dab_fatal,     "V7 fault 00000"},
        {dab_align,     "Alignment Fault 1"},
        {dab_fatal,     "Debug Event"},
-       {dab_fatal,     "Access Flag Fault (S)"},
+       {NULL,          "Access Flag Fault (S)"},
        {dab_buserr,    "External Linefetch Abort (S)"},
        {NULL,          "Translation Fault (S)"},
-       {dab_fatal,     "Access Flag Fault (P)"},
+       {NULL,          "Access Flag Fault (P)"},
        {NULL,          "Translation Fault (P)"},
        {dab_buserr,    "External Non-Linefetch Abort (S)"},
        {NULL,          "Domain Fault (S)"},
Index: arch/arm/arm/pmap7.c
===================================================================
RCS file: /cvs/src/sys/arch/arm/arm/pmap7.c,v
retrieving revision 1.39
diff -u -p -r1.39 pmap7.c
--- arch/arm/arm/pmap7.c        18 Aug 2016 09:28:22 -0000      1.39
+++ arch/arm/arm/pmap7.c        18 Aug 2016 19:25:28 -0000
@@ -921,27 +921,24 @@ pmap_clearbit(struct vm_page *pg, u_int 
                    pv, pv->pv_pmap, pv->pv_va, oflags));
 
                if (maskbits & (PVF_WRITE|PVF_MOD)) {
-                       /* make the pte read only */
-                       npte = (npte & ~L2_S_PROT_MASK) |
-                           L2_S_PROT(pm == pmap_kernel() ? PTE_KERNEL : 
PTE_USER,
-                             npte & L2_V7_S_XN ? PROT_READ : PROT_READ | 
PROT_EXEC);
+                       /* Disable write access. */
+                       npte |= L2_V7_AP(0x4);
                }
 
                if (maskbits & PVF_REF) {
                        /*
-                        * Make the PTE invalid so that we will take a
-                        * page fault the next time the mapping is
-                        * referenced.
+                        * Clear the Access Flag such that we will
+                        * take a page fault the next time the mapping
+                        * is referenced.
                         */
-                       npte = (npte & ~L2_TYPE_MASK) | L2_TYPE_INV |
-                           (npte & L2_V7_S_XN);
+                       npte &= ~L2_V7_AF;
                }
 
                if (npte != opte) {
                        *ptep = npte;
                        PTE_SYNC(ptep);
                        /* Flush the TLB entry if a current pmap. */
-                       if (l2pte_valid(opte))
+                       if (opte & L2_V7_AF)
                                pmap_tlb_flushID_SE(pm, pv->pv_va);
                }
 
@@ -997,7 +994,7 @@ pmap_page_remove(struct vm_page *pg)
        struct l2_bucket *l2b;
        struct pv_entry *pv, *npv;
        pmap_t pm, curpm;
-       pt_entry_t *ptep;
+       pt_entry_t *ptep, opte;
        boolean_t flush;
 
        NPDEBUG(PDB_FOLLOW,
@@ -1020,9 +1017,10 @@ pmap_page_remove(struct vm_page *pg)
                KDASSERT(l2b != NULL);
 
                ptep = &l2b->l2b_kva[l2pte_index(pv->pv_va)];
-               if (*ptep != 0) {
+               opte = *ptep;
+               if (opte != L2_TYPE_INV) {
                        /* inline pmap_is_current(pm) */
-                       if (l2pte_valid(*ptep) &&
+                       if ((opte & L2_V7_AF) &&
                            (pm == curpm || pm == pmap_kernel())) {
                                if (PV_BEEN_EXECD(pv->pv_flags))
                                        cpu_icache_sync_range(pv->pv_va, 
PAGE_SIZE);
@@ -1136,9 +1134,9 @@ pmap_enter(pmap_t pm, vaddr_t va, paddr_
        }
        ptep = &l2b->l2b_kva[l2pte_index(va)];
        opte = *ptep;
-       npte = pa | L2_V7_AF;
+       npte = L2_S_PROTO | pa;
 
-       if (opte != 0) {        /* not l2pte_valid!!! MIOD */
+       if (opte != L2_TYPE_INV) {
                /*
                 * There is already a mapping at this address.
                 * If the physical address is different, lookup the
@@ -1164,9 +1162,8 @@ pmap_enter(pmap_t pm, vaddr_t va, paddr_
                         * - The physical page has already been referenced
                         *   so no need to re-do referenced emulation here.
                         */
-                       npte |= L2_S_PROTO;
-
                        nflags |= PVF_REF;
+                       npte |= L2_V7_AF;
 
                        if ((prot & PROT_WRITE) != 0 &&
                            ((flags & PROT_WRITE) != 0 ||
@@ -1183,8 +1180,6 @@ pmap_enter(pmap_t pm, vaddr_t va, paddr_
                        /*
                         * Need to do page referenced emulation.
                         */
-                       npte &= ~L2_TYPE_MASK;
-                       npte |= L2_TYPE_INV;
                        prot &= ~PROT_WRITE;
                        mapped = 0;
                }
@@ -1231,7 +1226,7 @@ pmap_enter(pmap_t pm, vaddr_t va, paddr_
                 * These are always readable, and possibly writable, from
                 * the get go as we don't need to track ref/mod status.
                 */
-               npte |= L2_S_PROTO;
+               npte |= L2_V7_AF;
 
                if (opg) {
                        /*
@@ -1252,7 +1247,7 @@ pmap_enter(pmap_t pm, vaddr_t va, paddr_
        /*
         * Keep the stats up to date
         */
-       if (opte == 0) {        /* !! not l2pte_valid MIOD */
+       if (opte == L2_TYPE_INV) {
                l2b->l2b_occupancy++;
                pm->pm_stats.resident_count++;
        } 
@@ -1271,7 +1266,7 @@ pmap_enter(pmap_t pm, vaddr_t va, paddr_
                 * is current
                 */
                PTE_SYNC(ptep);
-               if (/* va != vector_page && */ l2pte_valid(npte)) {
+               if (npte & L2_V7_AF) {
                        /*
                         * This mapping is likely to be accessed as
                         * soon as we return to userland. Fix up the
@@ -1289,7 +1284,7 @@ pmap_enter(pmap_t pm, vaddr_t va, paddr_
                        }
                }
 
-               if (l2pte_valid(opte))
+               if (opte & L2_V7_AF)
                        pmap_tlb_flushID_SE(pm, va);
        }
 
@@ -1345,7 +1340,7 @@ pmap_remove(pmap_t pm, vaddr_t sva, vadd
 
                        pte = *ptep;
 
-                       if (pte == 0) { /* !!! not l2pte_valid */
+                       if (pte == L2_TYPE_INV) {
                                /*
                                 * Nothing here, move along
                                 */
@@ -1380,7 +1375,7 @@ pmap_remove(pmap_t pm, vaddr_t sva, vadd
 
                        *ptep = L2_TYPE_INV;
                        PTE_SYNC(ptep);
-                       if (l2pte_valid(pte))
+                       if (pte & L2_V7_AF)
                                pmap_tlb_flushID_SE(pm, sva);
 
                        sva += PAGE_SIZE;
@@ -1422,7 +1417,7 @@ pmap_kenter_pa(vaddr_t va, paddr_t pa, v
        ptep = &l2b->l2b_kva[l2pte_index(va)];
        opte = *ptep;
 
-       if (opte == 0)
+       if (opte == L2_TYPE_INV)
                l2b->l2b_occupancy++;
 
        if (pa & PMAP_DEVICE)
@@ -1434,7 +1429,7 @@ pmap_kenter_pa(vaddr_t va, paddr_t pa, v
            L2_S_PROT(PTE_KERNEL, prot) | cache_mode;
        *ptep = npte;
        PTE_SYNC(ptep);
-       if (l2pte_valid(opte)) {
+       if (opte & L2_V7_AF) {
                cpu_tlb_flushD_SE(va);
                cpu_cpwait();
        }
@@ -1479,12 +1474,12 @@ pmap_kremove(vaddr_t va, vsize_t len)
 
                while (va < next_bucket) {
                        opte = *ptep;
-                       if (opte != 0) {        /* !! not l2pte_valid */
+                       if (opte != L2_TYPE_INV) {
                                *ptep = L2_TYPE_INV;
                                PTE_SYNC(ptep);
                                mappings++;
                        }
-                       if (l2pte_valid(opte))
+                       if (opte & L2_V7_AF)
                                cpu_tlb_flushD_SE(va);
                        va += PAGE_SIZE;
                        ptep++;
@@ -1531,7 +1526,7 @@ pmap_extract(pmap_t pm, vaddr_t va, padd
                ptep = &ptep[l2pte_index(va)];
                pte = *ptep;
 
-               if (pte == 0)   /* !!! not l2pte_valid */
+               if (pte == L2_TYPE_INV)
                        return (FALSE);
 
                switch (pte & L2_TYPE_MASK) {
@@ -1558,7 +1553,7 @@ void
 pmap_protect(pmap_t pm, vaddr_t sva, vaddr_t eva, vm_prot_t prot)
 {
        struct l2_bucket *l2b;
-       pt_entry_t *ptep, pte, opte;
+       pt_entry_t *ptep, opte;
        vaddr_t next_bucket;
        u_int flags;
        int flush;
@@ -1609,17 +1604,12 @@ NPDEBUG(PDB_PROTECT, printf("\n"));
 
                while (sva < next_bucket) {
                        opte = *ptep;
-                       /* !!! not l2pte_valid */
-/* XXX actually would only matter if really valid ??? */
-                       if (opte != 0 && l2pte_is_writeable(opte, pm)) {
+                       if (opte != L2_TYPE_INV && l2pte_is_writeable(opte, 
pm)) {
                                struct vm_page *pg;
                                u_int f;
 
                                pg = PHYS_TO_VM_PAGE(l2pte_pa(opte));
-                               pte = (opte & ~L2_S_PROT_MASK) |
-                                   L2_S_PROT(pm == pmap_kernel() ? PTE_KERNEL 
: PTE_USER,
-                                     opte & L2_V7_S_XN ? PROT_READ : PROT_READ 
| PROT_EXEC);
-                               *ptep = pte;
+                               *ptep = opte | L2_V7_AP(0x4);
                                PTE_SYNC(ptep);
 
                                if (pg != NULL) {
@@ -1630,7 +1620,7 @@ NPDEBUG(PDB_PROTECT, printf("\n"));
 
                                if (flush >= 0) {
                                        flush++;
-                                       if (l2pte_valid(opte))
+                                       if (opte & L2_V7_AF)
                                                cpu_tlb_flushID_SE(sva);
                                } else
                                        flags |= f;
@@ -1759,7 +1749,7 @@ pmap_fault_fixup(pmap_t pm, vaddr_t va, 
         */
        ptep = &l2b->l2b_kva[l2pte_index(va)];
        pte = *ptep;
-       if (pte == 0)
+       if (pte == L2_TYPE_INV)
                goto out;
 
        /* only if vectors are low ?? */
@@ -1767,9 +1757,7 @@ pmap_fault_fixup(pmap_t pm, vaddr_t va, 
         * Catch a userland access to the vector page mapped at 0x0
         */
        if (user) {
-               /* XXX use of L2_V7_S_XN */
-               if ((pte & L2_S_PROT_MASK & ~L2_V7_S_XN) != L2_S_PROT(PTE_USER, 
PROT_READ) &&
-                   (pte & L2_S_PROT_MASK & ~L2_V7_S_XN) != L2_S_PROT(PTE_USER, 
PROT_WRITE))
+               if ((pte & L2_V7_AP(0x2)) == 0)
                        goto out;
        }
 
@@ -1823,14 +1811,10 @@ Debugger();
                 * We've already set the cacheable bits based on
                 * the assumption that we can write to this page.
                 */
-               *ptep = (pte & ~(L2_TYPE_MASK|L2_S_PROT_MASK)) | L2_S_PROTO |
-                   L2_S_PROT(pm == pmap_kernel() ? PTE_KERNEL : PTE_USER,
-                     pte & L2_V7_S_XN ? PROT_WRITE : PROT_WRITE | PROT_EXEC);
+               *ptep = (pte & ~L2_V7_AP(0x4));
                PTE_SYNC(ptep);
                rv = 1;
-       } else
-       /* XXX use of L2_V7_S_XN */
-       if ((pte & L2_TYPE_MASK & ~L2_V7_S_XN) == L2_TYPE_INV) {
+       } else if ((pte & L2_V7_AF) == 0) {
                /*
                 * This looks like a good candidate for "page referenced"
                 * emulation.
@@ -1849,13 +1833,13 @@ Debugger();
 
                pg->mdpage.pvh_attrs |= PVF_REF;
                pv->pv_flags |= PVF_REF;
+               pte |= L2_V7_AF;
 
                NPDEBUG(PDB_FOLLOW,
                    printf("pmap_fault_fixup: ref emul. pm %p, va 0x%08lx, pa 
0x%08lx\n",
                    pm, va, pg->phys_addr));
 
-               /* XXX use of L2_V7_S_XN */
-               *ptep = (pte & ~(L2_TYPE_MASK & ~L2_V7_S_XN)) | L2_S_PROTO;
+               *ptep = pte;
                PTE_SYNC(ptep);
                rv = 1;
        } else {
@@ -2508,7 +2492,7 @@ pmap_bootstrap(pd_entry_t *kernel_l1pt, 
                for (l2idx = 0;
                    l2idx < (L2_TABLE_SIZE_REAL / sizeof(pt_entry_t));
                    l2idx++) {
-                       if ((ptep[l2idx] & L2_TYPE_MASK) != L2_TYPE_INV)
+                       if (ptep[l2idx] != L2_TYPE_INV)
                                l2b->l2b_occupancy++;
                }
        }
Index: arch/arm/include/pmap.h
===================================================================
RCS file: /cvs/src/sys/arch/arm/include/pmap.h,v
retrieving revision 1.42
diff -u -p -r1.42 pmap.h
--- arch/arm/include/pmap.h     10 Aug 2016 21:22:43 -0000      1.42
+++ arch/arm/include/pmap.h     18 Aug 2016 19:25:28 -0000
@@ -758,10 +758,7 @@ L2_S_PROT(int ku, vm_prot_t pr)
 static __inline boolean_t
 l2pte_is_writeable(pt_entry_t pte, struct pmap *pm)
 {
-       /* XXX use of L2_V7_S_XN */
-       return (pte & L2_S_PROT_MASK & ~L2_V7_S_XN) ==
-           L2_S_PROT(pm == pmap_kernel() ? PTE_KERNEL : PTE_USER,
-                     PROT_WRITE);
+       return (pte & L2_V7_AP(0x4)) == 0;
 }
 #endif
 

Reply via email to