Module Name: src Committed By: ryo Date: Mon Jan 31 09:16:09 UTC 2022
Modified Files: src/sys/arch/aarch64/aarch64: aarch64_machdep.c cpufunc.c fault.c locore.S pmap.c src/sys/arch/aarch64/conf: files.aarch64 src/sys/arch/aarch64/include: cpufunc.h src/sys/arch/evbarm/conf: GENERIC64 Log Message: add support Hardware updates to Access flag and Dirty state (FEAT_HAFDBS) - The DBM bit of the PTE is now used to determine if it is writable, and the AF bit is treated entirely as a reference bit. A valid PTE is always treated as readable. There can be no valid PTE that is not readable. - LX_BLKPAG_OS_{READ,WRITE} are used only for debugging purposes, and has been superseded by LX_BLKPAG_AF and LX_BLKPAG_DBM. - Improve comment The need for reference/modify emulation has been eliminated, and access/permission faults have been reduced, however, there has been little change in overall performance. To generate a diff of this commit: cvs rdiff -u -r1.63 -r1.64 src/sys/arch/aarch64/aarch64/aarch64_machdep.c cvs rdiff -u -r1.32 -r1.33 src/sys/arch/aarch64/aarch64/cpufunc.c cvs rdiff -u -r1.21 -r1.22 src/sys/arch/aarch64/aarch64/fault.c cvs rdiff -u -r1.84 -r1.85 src/sys/arch/aarch64/aarch64/locore.S cvs rdiff -u -r1.126 -r1.127 src/sys/arch/aarch64/aarch64/pmap.c cvs rdiff -u -r1.36 -r1.37 src/sys/arch/aarch64/conf/files.aarch64 cvs rdiff -u -r1.22 -r1.23 src/sys/arch/aarch64/include/cpufunc.h cvs rdiff -u -r1.193 -r1.194 src/sys/arch/evbarm/conf/GENERIC64 Please note that diffs are not public domain; they are subject to the copyright notices on the relevant files.
Modified files: Index: src/sys/arch/aarch64/aarch64/aarch64_machdep.c diff -u src/sys/arch/aarch64/aarch64/aarch64_machdep.c:1.63 src/sys/arch/aarch64/aarch64/aarch64_machdep.c:1.64 --- src/sys/arch/aarch64/aarch64/aarch64_machdep.c:1.63 Sun Oct 31 16:23:47 2021 +++ src/sys/arch/aarch64/aarch64/aarch64_machdep.c Mon Jan 31 09:16:09 2022 @@ -1,4 +1,4 @@ -/* $NetBSD: aarch64_machdep.c,v 1.63 2021/10/31 16:23:47 skrll Exp $ */ +/* $NetBSD: aarch64_machdep.c,v 1.64 2022/01/31 09:16:09 ryo Exp $ */ /*- * Copyright (c) 2014 The NetBSD Foundation, Inc. @@ -30,7 +30,7 @@ */ #include <sys/cdefs.h> -__KERNEL_RCSID(1, "$NetBSD: aarch64_machdep.c,v 1.63 2021/10/31 16:23:47 skrll Exp $"); +__KERNEL_RCSID(1, "$NetBSD: aarch64_machdep.c,v 1.64 2022/01/31 09:16:09 ryo Exp $"); #include "opt_arm_debug.h" #include "opt_cpuoptions.h" @@ -533,6 +533,14 @@ SYSCTL_SETUP(sysctl_machdep_setup, "sysc NULL, 0, &aarch64_bti_enabled, 0, CTL_MACHDEP, CTL_CREATE, CTL_EOL); + + sysctl_createv(clog, 0, NULL, NULL, + CTLFLAG_PERMANENT, + CTLTYPE_INT, "hafdbs", + SYSCTL_DESCR("Whether Hardware updates to Access flag and Dirty state is enabled"), + NULL, 0, + &aarch64_hafdbs_enabled, 0, + CTL_MACHDEP, CTL_CREATE, CTL_EOL); } void Index: src/sys/arch/aarch64/aarch64/cpufunc.c diff -u src/sys/arch/aarch64/aarch64/cpufunc.c:1.32 src/sys/arch/aarch64/aarch64/cpufunc.c:1.33 --- src/sys/arch/aarch64/aarch64/cpufunc.c:1.32 Sun Oct 31 16:23:47 2021 +++ src/sys/arch/aarch64/aarch64/cpufunc.c Mon Jan 31 09:16:09 2022 @@ -1,4 +1,4 @@ -/* $NetBSD: cpufunc.c,v 1.32 2021/10/31 16:23:47 skrll Exp $ */ +/* $NetBSD: cpufunc.c,v 1.33 2022/01/31 09:16:09 ryo Exp $ */ /* * Copyright (c) 2017 Ryo Shimizu <r...@nerv.org> @@ -30,7 +30,7 @@ #include "opt_multiprocessor.h" #include <sys/cdefs.h> -__KERNEL_RCSID(0, "$NetBSD: cpufunc.c,v 1.32 2021/10/31 16:23:47 skrll Exp $"); +__KERNEL_RCSID(0, "$NetBSD: cpufunc.c,v 1.33 2022/01/31 09:16:09 ryo Exp $"); #include <sys/param.h> #include <sys/types.h> @@ -50,6 +50,7 @@ u_int arm_dcache_maxline; u_int aarch64_cache_vindexsize; u_int aarch64_cache_prefer_mask; +int aarch64_hafdbs_enabled __read_mostly; int aarch64_pan_enabled __read_mostly; int aarch64_pac_enabled __read_mostly; @@ -464,6 +465,71 @@ aarch64_setcpufuncs(struct cpu_info *ci) } void +aarch64_hafdbs_init(int primary) +{ +#ifdef ARMV81_HAFDBS + uint64_t tcr; + int hafdbs; + + hafdbs = __SHIFTOUT(reg_id_aa64mmfr1_el1_read(), + ID_AA64MMFR1_EL1_HAFDBS); + + /* + * hafdbs + * 0:HAFDBS_NONE - no support for any hardware flags + * 1:HAFDBS_A - only hardware access flag supported + * 2:HAFDBS_AD - hardware access and modified flags supported. + */ + + if (primary) { + /* CPU0 does the detection. */ + switch (hafdbs) { + case ID_AA64MMFR1_EL1_HAFDBS_NONE: + default: + aarch64_hafdbs_enabled = 0; + break; + case ID_AA64MMFR1_EL1_HAFDBS_A: + case ID_AA64MMFR1_EL1_HAFDBS_AD: + aarch64_hafdbs_enabled = hafdbs; + break; + } + } else { + /* + * The support status of HAFDBS on the primary CPU is different + * from that of the application processor. + * + * XXX: + * The correct way to do this is to disable it on all cores, + * or call pmap_fault_fixup() only on the unsupported cores, + * but for now, do panic(). + */ + if (aarch64_hafdbs_enabled != hafdbs) + panic("HAFDBS is supported (%d) on primary cpu, " + "but isn't equal (%d) on secondary cpu", + aarch64_hafdbs_enabled, hafdbs); + } + + /* enable Hardware updates to Access flag and Dirty state */ + tcr = reg_tcr_el1_read(); + switch (hafdbs) { + case ID_AA64MMFR1_EL1_HAFDBS_NONE: + default: + break; + case ID_AA64MMFR1_EL1_HAFDBS_A: + /* enable only access */ + reg_tcr_el1_write(tcr | TCR_HA); + isb(); + break; + case ID_AA64MMFR1_EL1_HAFDBS_AD: + /* enable both access and dirty */ + reg_tcr_el1_write(tcr | TCR_HD | TCR_HA); + isb(); + break; + } +#endif +} + +void aarch64_pan_init(int primary) { #ifdef ARMV81_PAN Index: src/sys/arch/aarch64/aarch64/fault.c diff -u src/sys/arch/aarch64/aarch64/fault.c:1.21 src/sys/arch/aarch64/aarch64/fault.c:1.22 --- src/sys/arch/aarch64/aarch64/fault.c:1.21 Fri Dec 11 18:03:33 2020 +++ src/sys/arch/aarch64/aarch64/fault.c Mon Jan 31 09:16:09 2022 @@ -1,4 +1,4 @@ -/* $NetBSD: fault.c,v 1.21 2020/12/11 18:03:33 skrll Exp $ */ +/* $NetBSD: fault.c,v 1.22 2022/01/31 09:16:09 ryo Exp $ */ /* * Copyright (c) 2017 Ryo Shimizu <r...@nerv.org> @@ -27,9 +27,10 @@ */ #include <sys/cdefs.h> -__KERNEL_RCSID(0, "$NetBSD: fault.c,v 1.21 2020/12/11 18:03:33 skrll Exp $"); +__KERNEL_RCSID(0, "$NetBSD: fault.c,v 1.22 2022/01/31 09:16:09 ryo Exp $"); #include "opt_compat_netbsd32.h" +#include "opt_cpuoptions.h" #include "opt_ddb.h" #include "opt_uvmhist.h" @@ -199,9 +200,16 @@ data_abort_handler(struct trapframe *tf, } /* reference/modified emulation */ - if (pmap_fault_fixup(map->pmap, va, ftype, user)) { - UVMHIST_LOG(pmaphist, "fixed: va=%016llx", tf->tf_far, 0, 0, 0); - return; +#ifdef ARMV81_HAFDBS + if (aarch64_hafdbs_enabled == ID_AA64MMFR1_EL1_HAFDBS_NONE || + (aarch64_hafdbs_enabled == ID_AA64MMFR1_EL1_HAFDBS_A && + ftype == VM_PROT_WRITE)) +#endif + { + if (pmap_fault_fixup(map->pmap, va, ftype, user)) { + UVMHIST_LOG(pmaphist, "fixed: va=%016llx", tf->tf_far, 0, 0, 0); + return; + } } fb = cpu_disable_onfault(); Index: src/sys/arch/aarch64/aarch64/locore.S diff -u src/sys/arch/aarch64/aarch64/locore.S:1.84 src/sys/arch/aarch64/aarch64/locore.S:1.85 --- src/sys/arch/aarch64/aarch64/locore.S:1.84 Fri Dec 10 20:36:02 2021 +++ src/sys/arch/aarch64/aarch64/locore.S Mon Jan 31 09:16:09 2022 @@ -1,4 +1,4 @@ -/* $NetBSD: locore.S,v 1.84 2021/12/10 20:36:02 andvar Exp $ */ +/* $NetBSD: locore.S,v 1.85 2022/01/31 09:16:09 ryo Exp $ */ /* * Copyright (c) 2017 Ryo Shimizu <r...@nerv.org> @@ -38,7 +38,7 @@ #include <aarch64/hypervisor.h> #include "assym.h" -RCSID("$NetBSD: locore.S,v 1.84 2021/12/10 20:36:02 andvar Exp $") +RCSID("$NetBSD: locore.S,v 1.85 2022/01/31 09:16:09 ryo Exp $") #ifdef AARCH64_DEVICE_MEM_STRONGLY_ORDERED #define MAIR_DEVICE_MEM MAIR_DEVICE_nGnRnE @@ -180,6 +180,10 @@ vstart: msr tpidr_el1, x0 DPRINTREG("curlwp = ", x0); + /* init HAFDBS if supported */ + mov x0, #1 + bl aarch64_hafdbs_init + /* init PAN if supported */ mov x0, #1 bl aarch64_pan_init @@ -545,6 +549,10 @@ mp_vstart: add x2, x2, #(UPAGES * PAGE_SIZE) sub sp, x2, #TF_SIZE /* sp = pcb + USPACE - TF_SIZE */ + /* init HAFDBS if supported */ + mov x0, #0 + bl aarch64_hafdbs_init + /* init PAN if supported */ mov x0, #0 bl aarch64_pan_init Index: src/sys/arch/aarch64/aarch64/pmap.c diff -u src/sys/arch/aarch64/aarch64/pmap.c:1.126 src/sys/arch/aarch64/aarch64/pmap.c:1.127 --- src/sys/arch/aarch64/aarch64/pmap.c:1.126 Mon Jan 31 08:43:05 2022 +++ src/sys/arch/aarch64/aarch64/pmap.c Mon Jan 31 09:16:09 2022 @@ -1,4 +1,4 @@ -/* $NetBSD: pmap.c,v 1.126 2022/01/31 08:43:05 ryo Exp $ */ +/* $NetBSD: pmap.c,v 1.127 2022/01/31 09:16:09 ryo Exp $ */ /* * Copyright (c) 2017 Ryo Shimizu <r...@nerv.org> @@ -27,9 +27,10 @@ */ #include <sys/cdefs.h> -__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.126 2022/01/31 08:43:05 ryo Exp $"); +__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.127 2022/01/31 09:16:09 ryo Exp $"); #include "opt_arm_debug.h" +#include "opt_cpuoptions.h" #include "opt_ddb.h" #include "opt_modular.h" #include "opt_multiprocessor.h" @@ -1018,43 +1019,43 @@ pmap_procwr(struct proc *p, vaddr_t sva, } static pt_entry_t -_pmap_pte_adjust_prot(pt_entry_t pte, vm_prot_t prot, vm_prot_t protmask, +_pmap_pte_adjust_prot(pt_entry_t pte, vm_prot_t prot, vm_prot_t refmod, bool user) { vm_prot_t masked; pt_entry_t xn; - masked = prot & protmask; - pte &= ~(LX_BLKPAG_OS_RWMASK|LX_BLKPAG_AF|LX_BLKPAG_AP); + masked = prot & refmod; + pte &= ~(LX_BLKPAG_OS_RWMASK|LX_BLKPAG_AF|LX_BLKPAG_DBM|LX_BLKPAG_AP); - /* keep prot for ref/mod emulation */ - switch (prot & (VM_PROT_READ|VM_PROT_WRITE)) { - case 0: - default: - break; - case VM_PROT_READ: - pte |= LX_BLKPAG_OS_READ; - break; - case VM_PROT_WRITE: - case VM_PROT_READ|VM_PROT_WRITE: - pte |= (LX_BLKPAG_OS_READ|LX_BLKPAG_OS_WRITE); - break; - } + /* + * keep actual prot in the pte as OS_{READ|WRITE} for ref/mod emulation, + * and set the DBM bit for HAFDBS if it has write permission. + */ + pte |= LX_BLKPAG_OS_READ; /* a valid pte can always be readable */ + if (prot & VM_PROT_WRITE) + pte |= LX_BLKPAG_OS_WRITE|LX_BLKPAG_DBM; switch (masked & (VM_PROT_READ|VM_PROT_WRITE)) { case 0: default: - /* cannot access due to No LX_BLKPAG_AF */ + /* + * it cannot be accessed because there is no AF bit, + * but the AF bit will be added by fixup() or HAFDBS. + */ pte |= LX_BLKPAG_AP_RO; break; case VM_PROT_READ: - /* actual permission of pte */ + /* + * as it is RO, it cannot be written as is, + * but it may be changed to RW by fixup() or HAFDBS. + */ pte |= LX_BLKPAG_AF; pte |= LX_BLKPAG_AP_RO; break; case VM_PROT_WRITE: case VM_PROT_READ|VM_PROT_WRITE: - /* actual permission of pte */ + /* fully readable and writable */ pte |= LX_BLKPAG_AF; pte |= LX_BLKPAG_AP_RW; break; @@ -1098,6 +1099,24 @@ _pmap_pte_adjust_cacheflags(pt_entry_t p return pte; } +#ifdef ARMV81_HAFDBS +static inline void +_pmap_reflect_refmod_in_pp(pt_entry_t pte, struct pmap_page *pp) +{ + if (!lxpde_valid(pte)) + return; + + /* + * In order to retain referenced/modified information, + * it should be reflected from pte in the pmap_page. + */ + if (pte & LX_BLKPAG_AF) + pp->pp_pv.pv_va |= VM_PROT_READ; + if ((pte & LX_BLKPAG_AP) == LX_BLKPAG_AP_RW) + pp->pp_pv.pv_va |= VM_PROT_WRITE; +} +#endif + static struct pv_entry * _pmap_remove_pv(struct pmap_page *pp, struct pmap *pm, vaddr_t va, pt_entry_t pte) @@ -1111,6 +1130,11 @@ _pmap_remove_pv(struct pmap_page *pp, st KASSERT(mutex_owned(&pm->pm_lock)); /* for pv_proc */ KASSERT(mutex_owned(&pp->pp_pvlock)); +#ifdef ARMV81_HAFDBS + if (aarch64_hafdbs_enabled != ID_AA64MMFR1_EL1_HAFDBS_NONE) + _pmap_reflect_refmod_in_pp(pte, pp); +#endif + for (ppv = NULL, pv = &pp->pp_pv; pv != NULL; pv = pv->pv_next) { if (pv->pv_pmap == pm && trunc_page(pv->pv_va) == va) { break; @@ -1281,20 +1305,23 @@ _pmap_protect_pv(struct pmap_page *pp, s KASSERT(mutex_owned(&pv->pv_pmap->pm_lock)); - /* get prot mask from referenced/modified */ - mdattr = pp->pp_pv.pv_va & (VM_PROT_READ | VM_PROT_WRITE); ptep = pv->pv_ptep; pte = *ptep; /* get prot mask from pte */ - pteprot = 0; - if (pte & LX_BLKPAG_AF) - pteprot |= VM_PROT_READ; - if ((pte & LX_BLKPAG_AP) == LX_BLKPAG_AP_RW) + pteprot = VM_PROT_READ; /* a valid pte can always be readable */ + if ((pte & (LX_BLKPAG_OS_WRITE|LX_BLKPAG_DBM)) != 0) pteprot |= VM_PROT_WRITE; if (l3pte_executable(pte, user)) pteprot |= VM_PROT_EXECUTE; +#ifdef ARMV81_HAFDBS + if (aarch64_hafdbs_enabled != ID_AA64MMFR1_EL1_HAFDBS_NONE) + _pmap_reflect_refmod_in_pp(pte, pp); +#endif + /* get prot mask from referenced/modified */ + mdattr = pp->pp_pv.pv_va & (VM_PROT_READ | VM_PROT_WRITE); + /* new prot = prot & pteprot & mdattr */ pte = _pmap_pte_adjust_prot(pte, prot & pteprot, mdattr, user); atomic_swap_64(ptep, pte); @@ -1381,6 +1408,10 @@ pmap_protect(struct pmap *pm, vaddr_t sv } if (pp != NULL) { +#ifdef ARMV81_HAFDBS + if (aarch64_hafdbs_enabled != ID_AA64MMFR1_EL1_HAFDBS_NONE) + _pmap_reflect_refmod_in_pp(pte, pp); +#endif /* get prot mask from referenced/modified */ mdattr = pp->pp_pv.pv_va & (VM_PROT_READ | VM_PROT_WRITE); @@ -1764,6 +1795,7 @@ _pmap_enter(struct pmap *pm, vaddr_t va, KASSERT_PM_ADDR(pm, va); KASSERT(!IN_DIRECTMAP_ADDR(va)); + KASSERT(prot & VM_PROT_READ); #ifdef PMAPCOUNTERS PMAP_COUNT(mappings); @@ -1845,8 +1877,7 @@ _pmap_enter(struct pmap *pm, vaddr_t va, idx = l3pte_index(va); ptep = &l3[idx]; /* as PTE */ - - opte = atomic_swap_64(ptep, 0); + opte = *ptep; need_sync_icache = (prot & VM_PROT_EXECUTE); /* for lock ordering for old page and new page */ @@ -1907,6 +1938,7 @@ _pmap_enter(struct pmap *pm, vaddr_t va, if (pp != NULL) pmap_pv_lock(pp); } + opte = atomic_swap_64(ptep, 0); } else { if (pp != NULL) pmap_pv_lock(pp); @@ -2391,20 +2423,15 @@ pmap_fault_fixup(struct pmap *pm, vaddr_ goto done; } - /* get prot by pmap_enter() (stored in software use bit in pte) */ - switch (pte & (LX_BLKPAG_OS_READ|LX_BLKPAG_OS_WRITE)) { - case 0: - default: - pmap_prot = 0; - break; - case LX_BLKPAG_OS_READ: - pmap_prot = VM_PROT_READ; - break; - case LX_BLKPAG_OS_WRITE: - case LX_BLKPAG_OS_READ|LX_BLKPAG_OS_WRITE: - pmap_prot = (VM_PROT_READ|VM_PROT_WRITE); - break; - } + /* + * Get the prot specified by pmap_enter(). + * A valid pte is considered a readable page. + * If DBM is 1, it is considered a writable page. + */ + pmap_prot = VM_PROT_READ; + if ((pte & (LX_BLKPAG_OS_WRITE|LX_BLKPAG_DBM)) != 0) + pmap_prot |= VM_PROT_WRITE; + if (l3pte_executable(pte, pm != pmap_kernel())) pmap_prot |= VM_PROT_EXECUTE; @@ -2473,6 +2500,9 @@ pmap_clear_modify(struct vm_page *pg) struct pmap_page * const pp = VM_PAGE_TO_PP(pg); pt_entry_t *ptep, pte, opte; vaddr_t va; +#ifdef ARMV81_HAFDBS + bool modified; +#endif UVMHIST_FUNC(__func__); UVMHIST_CALLARGS(pmaphist, "pg=%p, flags=%08x", @@ -2493,11 +2523,17 @@ pmap_clear_modify(struct vm_page *pg) pmap_pv_lock(pp); - if ((pp->pp_pv.pv_va & VM_PROT_WRITE) == 0) { + if ( +#ifdef ARMV81_HAFDBS + aarch64_hafdbs_enabled != ID_AA64MMFR1_EL1_HAFDBS_AD && +#endif + (pp->pp_pv.pv_va & VM_PROT_WRITE) == 0) { pmap_pv_unlock(pp); return false; } - +#ifdef ARMV81_HAFDBS + modified = ((pp->pp_pv.pv_va & VM_PROT_WRITE) != 0); +#endif pp->pp_pv.pv_va &= ~(vaddr_t)VM_PROT_WRITE; for (pv = &pp->pp_pv; pv != NULL; pv = pv->pv_next) { @@ -2517,7 +2553,9 @@ pmap_clear_modify(struct vm_page *pg) continue; if ((pte & LX_BLKPAG_AP) == LX_BLKPAG_AP_RO) continue; - +#ifdef ARMV81_HAFDBS + modified = true; +#endif /* clear write permission */ pte &= ~LX_BLKPAG_AP; pte |= LX_BLKPAG_AP_RO; @@ -2539,7 +2577,11 @@ pmap_clear_modify(struct vm_page *pg) pmap_pv_unlock(pp); +#ifdef ARMV81_HAFDBS + return modified; +#else return true; +#endif } bool @@ -2549,6 +2591,9 @@ pmap_clear_reference(struct vm_page *pg) struct pmap_page * const pp = VM_PAGE_TO_PP(pg); pt_entry_t *ptep, pte, opte; vaddr_t va; +#ifdef ARMV81_HAFDBS + bool referenced; +#endif UVMHIST_FUNC(__func__); UVMHIST_CALLARGS(pmaphist, "pg=%p, pp=%p, flags=%08x", @@ -2556,10 +2601,17 @@ pmap_clear_reference(struct vm_page *pg) pmap_pv_lock(pp); - if ((pp->pp_pv.pv_va & VM_PROT_READ) == 0) { + if ( +#ifdef ARMV81_HAFDBS + aarch64_hafdbs_enabled == ID_AA64MMFR1_EL1_HAFDBS_NONE && +#endif + (pp->pp_pv.pv_va & VM_PROT_READ) == 0) { pmap_pv_unlock(pp); return false; } +#ifdef ARMV81_HAFDBS + referenced = ((pp->pp_pv.pv_va & VM_PROT_READ) != 0); +#endif pp->pp_pv.pv_va &= ~(vaddr_t)VM_PROT_READ; PMAP_COUNT(clear_reference); @@ -2580,7 +2632,9 @@ pmap_clear_reference(struct vm_page *pg) continue; if ((pte & LX_BLKPAG_AF) == 0) continue; - +#ifdef ARMV81_HAFDBS + referenced = true; +#endif /* clear access permission */ pte &= ~LX_BLKPAG_AF; @@ -2600,7 +2654,11 @@ pmap_clear_reference(struct vm_page *pg) pmap_pv_unlock(pp); +#ifdef ARMV81_HAFDBS + return referenced; +#else return true; +#endif } bool @@ -2608,7 +2666,38 @@ pmap_is_modified(struct vm_page *pg) { struct pmap_page * const pp = VM_PAGE_TO_PP(pg); - return (pp->pp_pv.pv_va & VM_PROT_WRITE); + if (pp->pp_pv.pv_va & VM_PROT_WRITE) + return true; + +#ifdef ARMV81_HAFDBS + /* check hardware dirty flag on each pte */ + if (aarch64_hafdbs_enabled == ID_AA64MMFR1_EL1_HAFDBS_AD) { + struct pv_entry *pv; + pt_entry_t *ptep, pte; + + pmap_pv_lock(pp); + for (pv = &pp->pp_pv; pv != NULL; pv = pv->pv_next) { + if (pv->pv_pmap == NULL) { + KASSERT(pv == &pp->pp_pv); + continue; + } + + ptep = pv->pv_ptep; + pte = *ptep; + if (!l3pte_valid(pte)) + continue; + + if ((pte & LX_BLKPAG_AP) == LX_BLKPAG_AP_RW) { + pp->pp_pv.pv_va |= VM_PROT_WRITE; + pmap_pv_unlock(pp); + return true; + } + } + pmap_pv_unlock(pp); + } +#endif + + return false; } bool @@ -2616,7 +2705,38 @@ pmap_is_referenced(struct vm_page *pg) { struct pmap_page * const pp = VM_PAGE_TO_PP(pg); - return (pp->pp_pv.pv_va & VM_PROT_READ); + if (pp->pp_pv.pv_va & VM_PROT_READ) + return true; + +#ifdef ARMV81_HAFDBS + /* check hardware access flag on each pte */ + if (aarch64_hafdbs_enabled != ID_AA64MMFR1_EL1_HAFDBS_NONE) { + struct pv_entry *pv; + pt_entry_t *ptep, pte; + + pmap_pv_lock(pp); + for (pv = &pp->pp_pv; pv != NULL; pv = pv->pv_next) { + if (pv->pv_pmap == NULL) { + KASSERT(pv == &pp->pp_pv); + continue; + } + + ptep = pv->pv_ptep; + pte = *ptep; + if (!l3pte_valid(pte)) + continue; + + if (pte & LX_BLKPAG_AF) { + pp->pp_pv.pv_va |= VM_PROT_READ; + pmap_pv_unlock(pp); + return true; + } + } + pmap_pv_unlock(pp); + } +#endif + + return false; } /* get pointer to kernel segment L2 or L3 table entry */ Index: src/sys/arch/aarch64/conf/files.aarch64 diff -u src/sys/arch/aarch64/conf/files.aarch64:1.36 src/sys/arch/aarch64/conf/files.aarch64:1.37 --- src/sys/arch/aarch64/conf/files.aarch64:1.36 Thu Nov 25 03:08:04 2021 +++ src/sys/arch/aarch64/conf/files.aarch64 Mon Jan 31 09:16:09 2022 @@ -1,10 +1,11 @@ -# $NetBSD: files.aarch64,v 1.36 2021/11/25 03:08:04 ryo Exp $ +# $NetBSD: files.aarch64,v 1.37 2022/01/31 09:16:09 ryo Exp $ defflag opt_cpuoptions.h AARCH64_ALIGNMENT_CHECK defflag opt_cpuoptions.h AARCH64_EL0_STACK_ALIGNMENT_CHECK defflag opt_cpuoptions.h AARCH64_EL1_STACK_ALIGNMENT_CHECK defflag opt_cpuoptions.h AARCH64_HAVE_L2CTLR defflag opt_cpuoptions.h AARCH64_DEVICE_MEM_STRONGLY_ORDERED +defflag opt_cpuoptions.h ARMV81_HAFDBS defflag opt_cputypes.h CPU_ARMV8 defflag opt_cputypes.h CPU_CORTEX: CPU_ARMV8 Index: src/sys/arch/aarch64/include/cpufunc.h diff -u src/sys/arch/aarch64/include/cpufunc.h:1.22 src/sys/arch/aarch64/include/cpufunc.h:1.23 --- src/sys/arch/aarch64/include/cpufunc.h:1.22 Sun Oct 31 16:23:47 2021 +++ src/sys/arch/aarch64/include/cpufunc.h Mon Jan 31 09:16:09 2022 @@ -1,4 +1,4 @@ -/* $NetBSD: cpufunc.h,v 1.22 2021/10/31 16:23:47 skrll Exp $ */ +/* $NetBSD: cpufunc.h,v 1.23 2022/01/31 09:16:09 ryo Exp $ */ /* * Copyright (c) 2017 Ryo Shimizu <r...@nerv.org> @@ -40,9 +40,11 @@ extern u_int aarch64_cache_prefer_mask; extern u_int cputype; /* compat arm */ extern int aarch64_bti_enabled; +extern int aarch64_hafdbs_enabled; extern int aarch64_pan_enabled; extern int aarch64_pac_enabled; +void aarch64_hafdbs_init(int); void aarch64_pan_init(int); int aarch64_pac_init(int); Index: src/sys/arch/evbarm/conf/GENERIC64 diff -u src/sys/arch/evbarm/conf/GENERIC64:1.193 src/sys/arch/evbarm/conf/GENERIC64:1.194 --- src/sys/arch/evbarm/conf/GENERIC64:1.193 Fri Jan 28 16:38:56 2022 +++ src/sys/arch/evbarm/conf/GENERIC64 Mon Jan 31 09:16:09 2022 @@ -1,5 +1,5 @@ # -# $NetBSD: GENERIC64,v 1.193 2022/01/28 16:38:56 jakllsch Exp $ +# $NetBSD: GENERIC64,v 1.194 2022/01/31 09:16:09 ryo Exp $ # # GENERIC ARM (aarch64) kernel # @@ -48,6 +48,9 @@ options INCLUDE_CONFIG_FILE #options EARLYCONS=thunderx, CONSADDR=0x87e024000000 #options EARLYCONS=virt, CONSADDR=0x09000000 +# Hardware management of the Access flag and dirty state (HAFDBS). +options ARMV81_HAFDBS + # Privileged Access Never (PAN). options ARMV81_PAN