Author: cognet Date: Wed Dec 19 00:24:31 2012 New Revision: 244414 URL: http://svnweb.freebsd.org/changeset/base/244414
Log: Properly implement pmap_[get|set]_memattr Submitted by: Ian Lepore <free...@damnhippie.dyndns.org> Modified: head/sys/arm/arm/pmap-v6.c head/sys/arm/arm/pmap.c head/sys/arm/include/pmap.h head/sys/arm/include/vm.h Modified: head/sys/arm/arm/pmap-v6.c ============================================================================== --- head/sys/arm/arm/pmap-v6.c Tue Dec 18 23:27:18 2012 (r244413) +++ head/sys/arm/arm/pmap-v6.c Wed Dec 19 00:24:31 2012 (r244414) @@ -1131,6 +1131,7 @@ pmap_page_init(vm_page_t m) { TAILQ_INIT(&m->md.pv_list); + m->md.pv_memattr = VM_MEMATTR_DEFAULT; } /* @@ -2662,7 +2663,8 @@ do_l2b_alloc: if (!(prot & VM_PROT_EXECUTE) && m) npte |= L2_XN; - npte |= pte_l2_s_cache_mode; + if (!(m->md.pv_memattr & VM_MEMATTR_UNCACHEABLE)) + npte |= pte_l2_s_cache_mode; if (m && m == opg) { /* @@ -3817,3 +3819,22 @@ pmap_dmap_iscurrent(pmap_t pmap) return(pmap_is_current(pmap)); } +void +pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma) +{ + /* + * Remember the memattr in a field that gets used to set the appropriate + * bits in the PTEs as mappings are established. + */ + m->md.pv_memattr = ma; + + /* + * It appears that this function can only be called before any mappings + * for the page are established on ARM. If this ever changes, this code + * will need to walk the pv_list and make each of the existing mappings + * uncacheable, being careful to sync caches and PTEs (and maybe + * invalidate TLB?) for any current mapping it modifies. + */ + if (m->md.pv_kva != 0 || TAILQ_FIRST(&m->md.pv_list) != NULL) + panic("Can't change memattr on page with existing mappings"); +} Modified: head/sys/arm/arm/pmap.c ============================================================================== --- head/sys/arm/arm/pmap.c Tue Dec 18 23:27:18 2012 (r244413) +++ head/sys/arm/arm/pmap.c Wed Dec 19 00:24:31 2012 (r244414) @@ -1366,7 +1366,8 @@ pmap_fix_cache(struct vm_page *pg, pmap_ (pv->pv_flags & PVF_NC)) { pv->pv_flags &= ~PVF_NC; - pmap_set_cache_entry(pv, pm, va, 1); + if (!(pg->md.pv_memattr & VM_MEMATTR_UNCACHEABLE)) + pmap_set_cache_entry(pv, pm, va, 1); continue; } /* user is no longer sharable and writable */ @@ -1375,7 +1376,8 @@ pmap_fix_cache(struct vm_page *pg, pmap_ !pmwc && (pv->pv_flags & PVF_NC)) { pv->pv_flags &= ~(PVF_NC | PVF_MWC); - pmap_set_cache_entry(pv, pm, va, 1); + if (!(pg->md.pv_memattr & VM_MEMATTR_UNCACHEABLE)) + pmap_set_cache_entry(pv, pm, va, 1); } } @@ -1426,15 +1428,16 @@ pmap_clearbit(struct vm_page *pg, u_int if (!(oflags & maskbits)) { if ((maskbits & PVF_WRITE) && (pv->pv_flags & PVF_NC)) { - /* It is safe to re-enable cacheing here. */ - PMAP_LOCK(pm); - l2b = pmap_get_l2_bucket(pm, va); - ptep = &l2b->l2b_kva[l2pte_index(va)]; - *ptep |= pte_l2_s_cache_mode; - PTE_SYNC(ptep); - PMAP_UNLOCK(pm); + if (!(pg->md.pv_memattr & + VM_MEMATTR_UNCACHEABLE)) { + PMAP_LOCK(pm); + l2b = pmap_get_l2_bucket(pm, va); + ptep = &l2b->l2b_kva[l2pte_index(va)]; + *ptep |= pte_l2_s_cache_mode; + PTE_SYNC(ptep); + PMAP_UNLOCK(pm); + } pv->pv_flags &= ~(PVF_NC | PVF_MWC); - } continue; } @@ -1463,7 +1466,9 @@ pmap_clearbit(struct vm_page *pg, u_int * permission. */ if (maskbits & PVF_WRITE) { - npte |= pte_l2_s_cache_mode; + if (!(pg->md.pv_memattr & + VM_MEMATTR_UNCACHEABLE)) + npte |= pte_l2_s_cache_mode; pv->pv_flags &= ~(PVF_NC | PVF_MWC); } } else @@ -1794,6 +1799,7 @@ pmap_page_init(vm_page_t m) { TAILQ_INIT(&m->md.pv_list); + m->md.pv_memattr = VM_MEMATTR_DEFAULT; } /* @@ -3393,7 +3399,8 @@ do_l2b_alloc: (m->oflags & VPO_UNMANAGED) == 0) vm_page_aflag_set(m, PGA_WRITEABLE); } - npte |= pte_l2_s_cache_mode; + if (!(m->md.pv_memattr & VM_MEMATTR_UNCACHEABLE)) + npte |= pte_l2_s_cache_mode; if (m && m == opg) { /* * We're changing the attrs of an existing mapping. @@ -4929,3 +4936,24 @@ pmap_devmap_find_va(vm_offset_t va, vm_s return (NULL); } +void +pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma) +{ + /* + * Remember the memattr in a field that gets used to set the appropriate + * bits in the PTEs as mappings are established. + */ + m->md.pv_memattr = ma; + + /* + * It appears that this function can only be called before any mappings + * for the page are established on ARM. If this ever changes, this code + * will need to walk the pv_list and make each of the existing mappings + * uncacheable, being careful to sync caches and PTEs (and maybe + * invalidate TLB?) for any current mapping it modifies. + */ + if (m->md.pv_kva != 0 || TAILQ_FIRST(&m->md.pv_list) != NULL) + panic("Can't change memattr on page with existing mappings"); +} + + Modified: head/sys/arm/include/pmap.h ============================================================================== --- head/sys/arm/include/pmap.h Tue Dec 18 23:27:18 2012 (r244413) +++ head/sys/arm/include/pmap.h Wed Dec 19 00:24:31 2012 (r244414) @@ -96,10 +96,10 @@ enum mem_type { #endif -#define pmap_page_get_memattr(m) VM_MEMATTR_DEFAULT +#define pmap_page_get_memattr(m) ((m)->md.pv_memattr) #define pmap_page_is_mapped(m) (!TAILQ_EMPTY(&(m)->md.pv_list)) #define pmap_page_is_write_mapped(m) (((m)->aflags & PGA_WRITEABLE) != 0) -#define pmap_page_set_memattr(m, ma) (void)0 +void pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma); /* * Pmap stuff @@ -119,6 +119,7 @@ struct pv_entry; struct md_page { int pvh_attrs; + vm_memattr_t pv_memattr; vm_offset_t pv_kva; /* first kernel VA mapping */ TAILQ_HEAD(,pv_entry) pv_list; }; Modified: head/sys/arm/include/vm.h ============================================================================== --- head/sys/arm/include/vm.h Tue Dec 18 23:27:18 2012 (r244413) +++ head/sys/arm/include/vm.h Wed Dec 19 00:24:31 2012 (r244414) @@ -29,7 +29,8 @@ #ifndef _MACHINE_VM_H_ #define _MACHINE_VM_H_ -/* Memory attribute configuration is not (yet) implemented. */ +/* Memory attribute configuration. */ #define VM_MEMATTR_DEFAULT 0 +#define VM_MEMATTR_UNCACHEABLE 1 #endif /* !_MACHINE_VM_H_ */ _______________________________________________ svn-src-all@freebsd.org mailing list http://lists.freebsd.org/mailman/listinfo/svn-src-all To unsubscribe, send any mail to "svn-src-all-unsubscr...@freebsd.org"