Module Name:    src
Committed By:   matt
Date:           Thu Feb  7 06:52:54 UTC 2013

Modified Files:
        src/sys/arch/arm/arm32 [matt-nb6-plus]: pmap.c
        src/sys/arch/arm/include/arm32 [matt-nb6-plus]: pmap.h

Log Message:
pullup pmap changes from HEAD


To generate a diff of this commit:
cvs rdiff -u -r1.228.2.1.2.1 -r1.228.2.1.2.2 src/sys/arch/arm/arm32/pmap.c
cvs rdiff -u -r1.101.4.1 -r1.101.4.2 src/sys/arch/arm/include/arm32/pmap.h

Please note that diffs are not public domain; they are subject to the
copyright notices on the relevant files.

Modified files:

Index: src/sys/arch/arm/arm32/pmap.c
diff -u src/sys/arch/arm/arm32/pmap.c:1.228.2.1.2.1 src/sys/arch/arm/arm32/pmap.c:1.228.2.1.2.2
--- src/sys/arch/arm/arm32/pmap.c:1.228.2.1.2.1	Wed Nov 28 22:40:19 2012
+++ src/sys/arch/arm/arm32/pmap.c	Thu Feb  7 06:52:53 2013
@@ -1,4 +1,4 @@
-/*	$NetBSD: pmap.c,v 1.228.2.1.2.1 2012/11/28 22:40:19 matt Exp $	*/
+/*	$NetBSD: pmap.c,v 1.228.2.1.2.2 2013/02/07 06:52:53 matt Exp $	*/
 
 /*
  * Copyright 2003 Wasabi Systems, Inc.
@@ -212,7 +212,7 @@
 #include <arm/cpuconf.h>
 #include <arm/arm32/katelib.h>
 
-__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.228.2.1.2.1 2012/11/28 22:40:19 matt Exp $");
+__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.228.2.1.2.2 2013/02/07 06:52:53 matt Exp $");
 
 #ifdef PMAP_DEBUG
 
@@ -668,12 +668,6 @@ static vaddr_t		kernel_pt_lookup(paddr_t
 
 
 /*
- * External function prototypes
- */
-extern void bzero_page(vaddr_t);
-extern void bcopy_page(vaddr_t, vaddr_t);
-
-/*
  * Misc variables
  */
 vaddr_t virtual_avail;
@@ -699,6 +693,12 @@ pmap_debug(int level)
 }
 #endif	/* PMAP_DEBUG */
 
+#ifdef PMAP_CACHE_VIPT
+#define PMAP_VALIDATE_MD_PAGE(md)	\
+	KASSERTMSG(arm_cache_prefer_mask == 0 || (((md)->pvh_attrs & PVF_WRITE) == 0) == ((md)->urw_mappings + (md)->krw_mappings == 0), \
+	    "(md) %p: attrs=%#x urw=%u krw=%u", (md), \
+	    (md)->pvh_attrs, (md)->urw_mappings, (md)->krw_mappings);
+#endif /* PMAP_CACHE_VIPT */
 /*
  * A bunch of routines to conditionally flush the caches/TLB depending
  * on whether the specified pmap actually needs to be flushed at any
@@ -829,10 +829,10 @@ do {					\
 /*
  * main pv_entry manipulation functions:
  *   pmap_enter_pv: enter a mapping onto a vm_page list
- *   pmap_remove_pv: remove a mappiing from a vm_page list
+ *   pmap_remove_pv: remove a mapping from a vm_page list
  *
  * NOTE: pmap_enter_pv expects to lock the pvh itself
- *       pmap_remove_pv expects te caller to lock the pvh before calling
+ *       pmap_remove_pv expects the caller to lock the pvh before calling
  */
 
 /*
@@ -896,6 +896,13 @@ pmap_enter_pv(struct vm_page_md *md, pad
 
 #ifdef PMAP_CACHE_VIPT
 	/*
+	 * Even though pmap_vac_me_harder will set PVF_WRITE for us,
+	 * do it here as well to keep the mappings & KVF_WRITE consistent.
+	 */
+	if (arm_cache_prefer_mask != 0 && (flags & PVF_WRITE) != 0) {
+		md->pvh_attrs |= PVF_WRITE;
+	}
+	/*
 	 * If this is an exec mapping and its the first exec mapping
 	 * for this page, make sure to sync the I-cache.
 	 */
@@ -1014,8 +1021,11 @@ pmap_remove_pv(struct vm_page_md *md, pa
 	 * mappings (ignoring KMPAGE), clear the WRITE flag and writeback
 	 * the contents to memory.
 	 */
-	if (md->krw_mappings + md->urw_mappings == 0)
-		md->pvh_attrs &= ~PVF_WRITE;
+	if (arm_cache_prefer_mask != 0) {
+		if (md->krw_mappings + md->urw_mappings == 0)
+			md->pvh_attrs &= ~PVF_WRITE;
+		PMAP_VALIDATE_MD_PAGE(md);
+	}
 	KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC)));
 #endif /* PMAP_CACHE_VIPT */
 
@@ -1093,8 +1103,13 @@ pmap_modify_pv(struct vm_page_md *md, pa
 		}
 	}
 #ifdef PMAP_CACHE_VIPT
-	if (md->urw_mappings + md->krw_mappings == 0)
-		md->pvh_attrs &= ~PVF_WRITE;
+	if (arm_cache_prefer_mask != 0) {
+		if (md->urw_mappings + md->krw_mappings == 0) {
+			md->pvh_attrs &= ~PVF_WRITE;
+		} else {
+			md->pvh_attrs |= PVF_WRITE;
+		}
+	}
 	/*
 	 * We have two cases here: the first is from enter_pv (new exec
 	 * page), the second is a combined pmap_remove_pv/pmap_enter_pv.
@@ -1850,7 +1865,7 @@ pmap_vac_me_harder(struct vm_page_md *md
 		 * Only check for a bad alias if we have writable mappings.
 		 */
 		tst_mask &= arm_cache_prefer_mask;
-		if (rw_mappings > 0 && arm_cache_prefer_mask) {
+		if (rw_mappings > 0) {
 			for (; pv && !bad_alias; pv = SLIST_NEXT(pv, pv_link)) {
 				/* if there's a bad alias, stop checking. */
 				if (tst_mask != (pv->pv_va & arm_cache_prefer_mask))
@@ -1906,7 +1921,7 @@ pmap_vac_me_harder(struct vm_page_md *md
 		KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC)));
 		KASSERT((rw_mappings == 0) == !(md->pvh_attrs & PVF_WRITE));
 	} else if (!va) {
-		KASSERT(arm_cache_prefer_mask == 0 || pmap_is_page_colored_p(md));
+		KASSERT(pmap_is_page_colored_p(md));
 		KASSERT(!(md->pvh_attrs & PVF_WRITE)
 		    || (md->pvh_attrs & PVF_DIRTY));
 		if (rw_mappings == 0) {
@@ -2230,8 +2245,13 @@ pmap_clearbit(struct vm_page_md *md, pad
 					md->uro_mappings++;
 				}
 #ifdef PMAP_CACHE_VIPT
-				if (md->urw_mappings + md->krw_mappings == 0)
-					md->pvh_attrs &= ~PVF_WRITE;
+				if (arm_cache_prefer_mask != 0) {
+					if (md->urw_mappings + md->krw_mappings == 0) {
+						md->pvh_attrs &= ~PVF_WRITE;
+					} else {
+						PMAP_VALIDATE_MD_PAGE(md);
+					}
+				}
 				if (want_syncicache)
 					need_syncicache = true;
 				need_vac_me_harder = true;
@@ -2564,7 +2584,7 @@ pmap_page_remove(struct vm_page_md *md, 
 		if (PV_IS_EXEC_P(md->pvh_attrs))
 			PMAPCOUNT(exec_discarded_page_protect);
 		md->pvh_attrs &= ~PVF_EXEC;
-		KASSERT((md->urw_mappings + md->krw_mappings == 0) == !(md->pvh_attrs & PVF_WRITE));
+		PMAP_VALIDATE_MD_PAGE(md);
 #endif
 		return;
 	}
@@ -2663,9 +2683,11 @@ pmap_page_remove(struct vm_page_md *md, 
 	md->pvh_attrs &= ~PVF_EXEC;
 	KASSERT(md->urw_mappings == 0);
 	KASSERT(md->uro_mappings == 0);
-	if (md->krw_mappings == 0)
-		md->pvh_attrs &= ~PVF_WRITE;
-	KASSERT((md->urw_mappings + md->krw_mappings == 0) == !(md->pvh_attrs & PVF_WRITE));
+	if (arm_cache_prefer_mask != 0) {
+		if (md->krw_mappings == 0)
+			md->pvh_attrs &= ~PVF_WRITE;
+		PMAP_VALIDATE_MD_PAGE(md);
+	}
 #endif
 
 	if (flush) {
@@ -2809,8 +2831,7 @@ pmap_enter(pmap_t pm, vaddr_t va, paddr_
 		/*
 		 * This is to be a managed mapping.
 		 */
-		if ((flags & VM_PROT_ALL) ||
-		    (md->pvh_attrs & PVF_REF)) {
+		if ((flags & VM_PROT_ALL) || (md->pvh_attrs & PVF_REF)) {
 			/*
 			 * - The access type indicates that we don't need
 			 *   to do referenced emulation.
@@ -2841,7 +2862,10 @@ pmap_enter(pmap_t pm, vaddr_t va, paddr_
 			npte |= L2_TYPE_INV;
 		}
 
-		npte |= pte_l2_s_cache_mode;
+		if (flags & ARM32_MMAP_WRITECOMBINE) {
+			npte |= pte_l2_s_wc_mode;
+		} else
+			npte |= pte_l2_s_cache_mode;
 
 		if (pg == opg) {
 			/*
@@ -3050,7 +3074,7 @@ pmap_enter(pmap_t pm, vaddr_t va, paddr_
 		KASSERT(uvm_page_locked_p(pg));
 #endif
 		KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC)));
-		KASSERT(arm_cache_prefer_mask == 0 || ((md->pvh_attrs & PVF_WRITE) == 0) == (md->urw_mappings + md->krw_mappings == 0));
+		PMAP_VALIDATE_MD_PAGE(md);
 	}
 #endif
 
@@ -3468,7 +3492,9 @@ pmap_kremove(vaddr_t va, vsize_t len)
 					KASSERT(omd->kro_mappings == 0);
 					omd->pvh_attrs &= ~PVF_KMPAGE;
 #ifdef PMAP_CACHE_VIPT
-					omd->pvh_attrs &= ~PVF_WRITE;
+					if (arm_cache_prefer_mask != 0) {
+						omd->pvh_attrs &= ~PVF_WRITE;
+					}
 #endif
 					pmap_kmpages--;
 #ifdef PMAP_CACHE_VIPT
@@ -4047,6 +4073,7 @@ pmap_fault_fixup(pmap_t pm, vaddr_t va, 
 	 */
 	if (rv == 0 && pm->pm_l1->l1_domain_use_count == 1) {
 		extern int last_fault_code;
+		extern int kernel_debug;
 		printf("fixup: pm %p, va 0x%lx, ftype %d - nothing to do!\n",
 		    pm, va, ftype);
 		printf("fixup: l2 %p, l2b %p, ptep %p, pl1pd %p\n",
@@ -4054,7 +4081,8 @@ pmap_fault_fixup(pmap_t pm, vaddr_t va, 
 		printf("fixup: pte 0x%x, l1pd 0x%x, last code 0x%x\n",
 		    pte, l1pd, last_fault_code);
 #ifdef DDB
-		Debugger();
+		if (kernel_debug & 2)
+			Debugger();
 #endif
 	}
 #endif
@@ -4440,14 +4468,26 @@ pmap_zero_page_generic(paddr_t phys)
 	struct vm_page *pg = PHYS_TO_VM_PAGE(phys);
 	struct vm_page_md *md = VM_PAGE_TO_MD(pg);
 #endif
-#ifdef PMAP_CACHE_VIPT
+#if defined(PMAP_CACHE_VIPT)
 	/* Choose the last page color it had, if any */
 	const vsize_t va_offset = md->pvh_attrs & arm_cache_prefer_mask;
 #else
 	const vsize_t va_offset = 0;
 #endif
+#if defined(__HAVE_MM_MD_DIRECT_MAPPED_PHYS)
+	/*
+	 * Is this page mapped at its natural color?
+	 * If we have all of memory mapped, then just convert PA to VA.
+	 */
+	const bool okcolor = va_offset == (phys & arm_cache_prefer_mask);
+	const vaddr_t vdstp = KERNEL_BASE + (phys - physical_start);
+#else
+	const bool okcolor = false;
+	const vaddr_t vdstp = cdstp + va_offset;
+#endif
 	pt_entry_t * const ptep = &cdst_pte[va_offset >> PGSHIFT];
 
+
 #ifdef DEBUG
 	if (!SLIST_EMPTY(&md->pvh_list))
 		panic("pmap_zero_page: page has mappings");
@@ -4455,25 +4495,39 @@ pmap_zero_page_generic(paddr_t phys)
 
 	KDASSERT((phys & PGOFSET) == 0);
 
-	/*
-	 * Hook in the page, zero it, and purge the cache for that
-	 * zeroed page. Invalidate the TLB as needed.
-	 */
-	*ptep = L2_S_PROTO | phys |
-	    L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) | pte_l2_s_cache_mode;
-	PTE_SYNC(ptep);
-	cpu_tlb_flushD_SE(cdstp + va_offset);
-	cpu_cpwait();
-	bzero_page(cdstp + va_offset);
-	/*
-	 * Unmap the page.
-	 */
-	*ptep = 0;
-	PTE_SYNC(ptep);
-	cpu_tlb_flushD_SE(cdstp + va_offset);
+	if (!okcolor) {
+		/*
+		 * Hook in the page, zero it, and purge the cache for that
+		 * zeroed page. Invalidate the TLB as needed.
+		 */
+		*ptep = L2_S_PROTO | phys |
+		    L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) | pte_l2_s_cache_mode;
+		PTE_SYNC(ptep);
+		cpu_tlb_flushD_SE(cdstp + va_offset);
+		cpu_cpwait();
+#if defined(__HAVE_MM_MD_DIRECT_MAPPED_PHYS) && defined(PMAP_CACHE_VIPT)
+		/*
+		 * If we are direct-mapped and our color isn't ok, then before
+		 * we bzero the page invalidate its contents from the cache and
+		 * reset the color to its natural color.
+		 */
+		cpu_dcache_inv_range(cdstp + va_offset, PAGE_SIZE);
+		md->pvh_attrs &= ~arm_cache_prefer_mask;
+		md->pvh_attrs |= (phys & arm_cache_prefer_mask);
+#endif
+	}
+	bzero_page(vdstp);
+	if (!okcolor) {
+		/*
+		 * Unmap the page.
+		 */
+		*ptep = 0;
+		PTE_SYNC(ptep);
+		cpu_tlb_flushD_SE(cdstp + va_offset);
 #ifdef PMAP_CACHE_VIVT
-	cpu_dcache_wbinv_range(cdstp + va_offset, PAGE_SIZE);
+		cpu_dcache_wbinv_range(cdstp + va_offset, PAGE_SIZE);
 #endif
+	}
 #ifdef PMAP_CACHE_VIPT
 	/*
 	 * This page is now cache resident so it now has a page color.
@@ -4633,6 +4687,23 @@ pmap_copy_page_generic(paddr_t src, padd
 	const vsize_t src_va_offset = 0;
 	const vsize_t dst_va_offset = 0;
 #endif
+#if defined(__HAVE_MM_MD_DIRECT_MAPPED_PHYS)
+	/*
+	 * Is this page mapped at its natural color?
+	 * If we have all of memory mapped, then just convert PA to VA.
+	 */
+	const bool src_okcolor = src_va_offset == (src & arm_cache_prefer_mask);
+	const bool dst_okcolor = dst_va_offset == (dst & arm_cache_prefer_mask);
+	const vaddr_t vsrcp = src_okcolor
+	    ? KERNEL_BASE + (src - physical_start)
+	    : csrcp + src_va_offset;
+	const vaddr_t vdstp = KERNEL_BASE + (dst - physical_start);
+#else
+	const bool src_okcolor = false;
+	const bool dst_okcolor = false;
+	const vaddr_t vsrcp = csrcp + src_va_offset;
+	const vaddr_t vdstp = cdstp + dst_va_offset;
+#endif
 	pt_entry_t * const src_ptep = &csrc_pte[src_va_offset >> PGSHIFT];
 	pt_entry_t * const dst_ptep = &cdst_pte[dst_va_offset >> PGSHIFT];
 
@@ -4664,38 +4735,57 @@ pmap_copy_page_generic(paddr_t src, padd
 	 * the cache for the appropriate page. Invalidate the TLB
 	 * as required.
 	 */
-	*src_ptep = L2_S_PROTO
-	    | src
+	if (!src_okcolor) {
+		*src_ptep = L2_S_PROTO
+		    | src
 #ifdef PMAP_CACHE_VIPT
-	    | ((src_md->pvh_attrs & PVF_NC) ? 0 : pte_l2_s_cache_mode)
+		    | ((src_md->pvh_attrs & PVF_NC) ? 0 : pte_l2_s_cache_mode)
 #endif
 #ifdef PMAP_CACHE_VIVT
-	    | pte_l2_s_cache_mode
+		    | pte_l2_s_cache_mode
 #endif
-	    | L2_S_PROT(PTE_KERNEL, VM_PROT_READ);
-	*dst_ptep = L2_S_PROTO | dst |
-	    L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) | pte_l2_s_cache_mode;
-	PTE_SYNC(src_ptep);
-	PTE_SYNC(dst_ptep);
-	cpu_tlb_flushD_SE(csrcp + src_va_offset);
-	cpu_tlb_flushD_SE(cdstp + dst_va_offset);
-	cpu_cpwait();
-	bcopy_page(csrcp + src_va_offset, cdstp + dst_va_offset);
-#ifdef PMAP_CACHE_VIVT
-	cpu_dcache_inv_range(csrcp + src_va_offset, PAGE_SIZE);
+		    | L2_S_PROT(PTE_KERNEL, VM_PROT_READ);
+		PTE_SYNC(src_ptep);
+		cpu_tlb_flushD_SE(csrcp + src_va_offset);
+		cpu_cpwait();
+	}
+	if (!dst_okcolor) {
+		*dst_ptep = L2_S_PROTO | dst |
+		    L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) | pte_l2_s_cache_mode;
+		PTE_SYNC(dst_ptep);
+		cpu_tlb_flushD_SE(cdstp + dst_va_offset);
+		cpu_cpwait();
+#if defined(__HAVE_MM_MD_DIRECT_MAPPED_PHYS) && defined(PMAP_CACHE_VIPT)
+		/*
+		 * If we are direct-mapped and our color isn't ok, then before
+		 * we bcopy to the new page invalidate its contents from the
+		 * cache and reset its color to its natural color.
+		 */
+		cpu_dcache_inv_range(cdstp + dst_va_offset, PAGE_SIZE);
+		dst_md->pvh_attrs &= ~arm_cache_prefer_mask;
+		dst_md->pvh_attrs |= (dst & arm_cache_prefer_mask);
 #endif
+	}
+	bcopy_page(vsrcp, vdstp);
 #ifdef PMAP_CACHE_VIVT
-	cpu_dcache_wbinv_range(cdstp + dst_va_offset, PAGE_SIZE);
+	cpu_dcache_inv_range(vsrcp, PAGE_SIZE);
+	cpu_dcache_wbinv_range(vdstp, PAGE_SIZE);
 #endif
 	/*
 	 * Unmap the pages.
 	 */
-	*src_ptep = 0;
-	*dst_ptep = 0;
-	PTE_SYNC(src_ptep);
-	PTE_SYNC(dst_ptep);
-	cpu_tlb_flushD_SE(csrcp + src_va_offset);
-	cpu_tlb_flushD_SE(cdstp + dst_va_offset);
+	if (!src_okcolor) {
+		*src_ptep = 0;
+		PTE_SYNC(src_ptep);
+		cpu_tlb_flushD_SE(csrcp + src_va_offset);
+		cpu_cpwait();
+	}
+	if (!dst_okcolor) {
+		*dst_ptep = 0;
+		PTE_SYNC(dst_ptep);
+		cpu_tlb_flushD_SE(cdstp + dst_va_offset);
+		cpu_cpwait();
+	}
 #ifdef PMAP_CACHE_VIPT
 	/*
 	 * Now that the destination page is in the cache, mark it as colored.
@@ -6863,11 +6953,13 @@ arm_pmap_alloc_poolpage(int flags)
 {
 	/*
 	 * On some systems, only some pages may be "coherent" for dma and we
-	 * want to use those for pool pages (think mbufs).
+	 * want to prefer those for pool pages (think mbufs) but fallback to
+	 * any page if none is available.
 	 */
-	if (arm_poolpage_vmfreelist != VM_FREELIST_DEFAULT)
+	if (arm_poolpage_vmfreelist != VM_FREELIST_DEFAULT) {
 		return uvm_pagealloc_strat(NULL, 0, NULL, flags,
-		    UVM_PGA_STRAT_ONLY, arm_poolpage_vmfreelist);
+		    UVM_PGA_STRAT_FALLBACK, arm_poolpage_vmfreelist);
+	}
 
 	return uvm_pagealloc(NULL, 0, NULL, flags);
 }

Index: src/sys/arch/arm/include/arm32/pmap.h
diff -u src/sys/arch/arm/include/arm32/pmap.h:1.101.4.1 src/sys/arch/arm/include/arm32/pmap.h:1.101.4.2
--- src/sys/arch/arm/include/arm32/pmap.h:1.101.4.1	Wed Nov 28 22:40:32 2012
+++ src/sys/arch/arm/include/arm32/pmap.h	Thu Feb  7 06:52:54 2013
@@ -1,4 +1,4 @@
-/*	$NetBSD: pmap.h,v 1.101.4.1 2012/11/28 22:40:32 matt Exp $	*/
+/*	$NetBSD: pmap.h,v 1.101.4.2 2013/02/07 06:52:54 matt Exp $	*/
 
 /*
  * Copyright (c) 2002, 2003 Wasabi Systems, Inc.
@@ -463,6 +463,9 @@ pmap_ptesync(pt_entry_t *ptep, size_t cn
 #define KERNEL_PD_SIZE	\
 	(L1_TABLE_SIZE - (KERNEL_BASE >> L1_S_SHIFT) * sizeof(pd_entry_t))
 
+void	bzero_page(vaddr_t);
+void	bcopy_page(vaddr_t, vaddr_t);
+
 /************************* ARM MMU configuration *****************************/
 
 #if (ARM_MMU_GENERIC + ARM_MMU_SA1 + ARM_MMU_V6 + ARM_MMU_V7) != 0
@@ -882,22 +885,23 @@ extern void (*pmap_zero_page_func)(paddr
 #define	L2_L_MAPPABLE_P(va, pa, size)					\
 	((((va) | (pa)) & L2_L_OFFSET) == 0 && (size) >= L2_L_SIZE)
 
+#ifndef _LOCORE
 /*
  * Hooks for the pool allocator.
  */
 #define	POOL_VTOPHYS(va)	vtophys((vaddr_t) (va))
+extern paddr_t physical_start, physical_end;
 #ifdef PMAP_NEED_ALLOC_POOLPAGE
-extern paddr_t physical_start;
 struct vm_page *arm_pmap_alloc_poolpage(int);
 #define	PMAP_ALLOC_POOLPAGE	arm_pmap_alloc_poolpage
+#endif
+#if defined(PMAP_NEED_ALLOC_POOLPAGE) || defined(__HAVE_MM_MD_DIRECT_MAPPED_PHYS)
 #define	PMAP_MAP_POOLPAGE(pa) \
         ((vaddr_t)((paddr_t)(pa) - physical_start + KERNEL_BASE))
 #define PMAP_UNMAP_POOLPAGE(va) \
         ((paddr_t)((vaddr_t)(va) - KERNEL_BASE + physical_start))
 #endif
 
-#ifndef _LOCORE
-
 /*
  * pmap-specific data store in the vm_page structure.
  */

Reply via email to