Module Name:    src
Committed By:   matt
Date:           Fri Apr 11 04:19:48 UTC 2014

Modified Files:
        src/sys/arch/arm/arm32: arm32_kvminit.c arm32_machdep.c pmap.c
        src/sys/arch/arm/include/arm32: pmap.h
        src/sys/arch/evbarm/cubie: cubie_machdep.c cubie_start.S
Added Files:
        src/sys/arch/evbarm/conf: CUBIETRUCK CUBIETRUCK_INSTALL

Log Message:
Add a kernel for the CUBIETRUCK (CUBIEBOARD3).  Allow direct mapping of all
memory (but for now allow the memory mapped above KERNEL_BASE to used for
poolpages).


To generate a diff of this commit:
cvs rdiff -u -r1.27 -r1.28 src/sys/arch/arm/arm32/arm32_kvminit.c
cvs rdiff -u -r1.103 -r1.104 src/sys/arch/arm/arm32/arm32_machdep.c
cvs rdiff -u -r1.283 -r1.284 src/sys/arch/arm/arm32/pmap.c
cvs rdiff -u -r1.130 -r1.131 src/sys/arch/arm/include/arm32/pmap.h
cvs rdiff -u -r0 -r1.1 src/sys/arch/evbarm/conf/CUBIETRUCK \
    src/sys/arch/evbarm/conf/CUBIETRUCK_INSTALL
cvs rdiff -u -r1.15 -r1.16 src/sys/arch/evbarm/cubie/cubie_machdep.c
cvs rdiff -u -r1.7 -r1.8 src/sys/arch/evbarm/cubie/cubie_start.S

Please note that diffs are not public domain; they are subject to the
copyright notices on the relevant files.

Modified files:

Index: src/sys/arch/arm/arm32/arm32_kvminit.c
diff -u src/sys/arch/arm/arm32/arm32_kvminit.c:1.27 src/sys/arch/arm/arm32/arm32_kvminit.c:1.28
--- src/sys/arch/arm/arm32/arm32_kvminit.c:1.27	Sat Apr  5 22:36:18 2014
+++ src/sys/arch/arm/arm32/arm32_kvminit.c	Fri Apr 11 04:19:47 2014
@@ -1,4 +1,4 @@
-/*	$NetBSD: arm32_kvminit.c,v 1.27 2014/04/05 22:36:18 matt Exp $	*/
+/*	$NetBSD: arm32_kvminit.c,v 1.28 2014/04/11 04:19:47 matt Exp $	*/
 
 /*
  * Copyright (c) 2002, 2003, 2005  Genetec Corporation.  All rights reserved.
@@ -122,7 +122,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: arm32_kvminit.c,v 1.27 2014/04/05 22:36:18 matt Exp $");
+__KERNEL_RCSID(0, "$NetBSD: arm32_kvminit.c,v 1.28 2014/04/11 04:19:47 matt Exp $");
 
 #include <sys/param.h>
 #include <sys/device.h>
@@ -161,10 +161,22 @@ extern char _end[];
  * Macros to translate between physical and virtual for a subset of the
  * kernel address space.  *Not* for general use.
  */
+#if defined(KERNEL_BASE_VOFFSET)
+#define KERN_VTOPHYS(bmi, va) \
+	((paddr_t)((vaddr_t)(va) - KERNEL_BASE_VOFFSET))
+#define KERN_PHYSTOV(bmi, pa) \
+	((vaddr_t)((paddr_t)(pa) + KERNEL_BASE_VOFFSET))
+#elif defined(ARM_MMU_EXTENDED) && defined(__HAVE_MM_MD_DIRECT_MAPPED_PHYS)
+#define KERN_VTOPHYS(bmi, va) \
+	((paddr_t)((vaddr_t)(va) - pmap_directbase + (bmi)->bmi_start))
+#define KERN_PHYSTOV(bmi, pa) \
+	((vaddr_t)((paddr_t)(pa) - (bmi)->bmi_start + pmap_directbase))
+#else
 #define KERN_VTOPHYS(bmi, va) \
 	((paddr_t)((vaddr_t)(va) - KERNEL_BASE + (bmi)->bmi_start))
 #define KERN_PHYSTOV(bmi, pa) \
 	((vaddr_t)((paddr_t)(pa) - (bmi)->bmi_start + KERNEL_BASE))
+#endif
 
 void
 arm32_bootmem_init(paddr_t memstart, psize_t memsize, vsize_t kernelstart)
@@ -210,7 +222,11 @@ arm32_bootmem_init(paddr_t memstart, psi
 	 */
 	if (bmi->bmi_start < bmi->bmi_kernelstart) {
 		pv->pv_pa = bmi->bmi_start;
+#if defined(ARM_MMU_EXTENDED) && defined(__HAVE_MM_MD_DIRECT_MAPPED_PHYS)
+		pv->pv_va = pmap_directbase;
+#else
 		pv->pv_va = KERNEL_BASE;
+#endif
 		pv->pv_size = bmi->bmi_kernelstart - bmi->bmi_start;
 		bmi->bmi_freepages += pv->pv_size / PAGE_SIZE;
 #ifdef VERBOSE_INIT_ARM
@@ -387,7 +403,27 @@ arm32_kernel_vm_init(vaddr_t kernel_vm_b
 
 #ifdef __HAVE_MM_MD_DIRECT_MAPPED_PHYS
 	KASSERT(mapallmem_p);
-#endif
+#ifdef ARM_MMU_EXTENDED
+	/*
+	 * We can only use address beneath kernel_vm_base to map physical
+	 * memory.
+	 */
+	KASSERT(kernel_vm_base >= physical_end - physical_start);
+	/*
+	 * If we don't have enough memory via TTBR1, we have use addresses
+	 * from TTBR0 to map some of the physical memory.  But try to use as
+	 * much high memory space as possible.
+	 */
+	if (kernel_vm_base - KERNEL_BASE < physical_end - physical_start) {
+		pmap_directbase = kernel_vm_base
+		    - (physical_end - physical_start);
+		printf("%s: changing pmap_directbase to %#lx\n", __func__,
+		    pmap_directbase);
+	}
+#else
+	KASSERT(kernel_vm_base - KERNEL_BASE >= physical_end - physical_start);
+#endif /* ARM_MMU_EXTENDED */
+#endif /* __HAVE_MM_MD_DIRECT_MAPPED_PHYS */
 
 	/*
 	 * Calculate the number of L2 pages needed for mapping the
@@ -700,7 +736,11 @@ arm32_kernel_vm_init(vaddr_t kernel_vm_b
 		cur_pv = *pv;
 		pv = SLIST_NEXT(pv, pv_list);
 	} else {
+#if defined(ARM_MMU_EXTENDED) && defined(__HAVE_MM_MD_DIRECT_MAPPED_PHYS)
+		cur_pv.pv_va = pmap_directbase;
+#else
 		cur_pv.pv_va = KERNEL_BASE;
+#endif
 		cur_pv.pv_pa = bmi->bmi_start;
 		cur_pv.pv_size = pv->pv_pa - bmi->bmi_start;
 		cur_pv.pv_prot = VM_PROT_READ | VM_PROT_WRITE;

Index: src/sys/arch/arm/arm32/arm32_machdep.c
diff -u src/sys/arch/arm/arm32/arm32_machdep.c:1.103 src/sys/arch/arm/arm32/arm32_machdep.c:1.104
--- src/sys/arch/arm/arm32/arm32_machdep.c:1.103	Sat Apr  5 22:36:18 2014
+++ src/sys/arch/arm/arm32/arm32_machdep.c	Fri Apr 11 04:19:47 2014
@@ -1,4 +1,4 @@
-/*	$NetBSD: arm32_machdep.c,v 1.103 2014/04/05 22:36:18 matt Exp $	*/
+/*	$NetBSD: arm32_machdep.c,v 1.104 2014/04/11 04:19:47 matt Exp $	*/
 
 /*
  * Copyright (c) 1994-1998 Mark Brinicombe.
@@ -42,7 +42,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: arm32_machdep.c,v 1.103 2014/04/05 22:36:18 matt Exp $");
+__KERNEL_RCSID(0, "$NetBSD: arm32_machdep.c,v 1.104 2014/04/11 04:19:47 matt Exp $");
 
 #include "opt_modular.h"
 #include "opt_md.h"
@@ -712,11 +712,11 @@ xc_send_ipi(struct cpu_info *ci)
 bool
 mm_md_direct_mapped_phys(paddr_t pa, vaddr_t *vap)
 {
-	if (physical_start <= pa && pa < physical_end) {
-		*vap = KERNEL_BASE + (pa - physical_start);
-		return true;
+	bool rv;
+	vaddr_t va = pmap_direct_mapped_phys(pa, &rv, 0);
+	if (rv) {
+		*vap = va;
 	}
-
-	return false;
+	return rv;
 }
 #endif

Index: src/sys/arch/arm/arm32/pmap.c
diff -u src/sys/arch/arm/arm32/pmap.c:1.283 src/sys/arch/arm/arm32/pmap.c:1.284
--- src/sys/arch/arm/arm32/pmap.c:1.283	Thu Apr 10 02:45:55 2014
+++ src/sys/arch/arm/arm32/pmap.c	Fri Apr 11 04:19:47 2014
@@ -1,4 +1,4 @@
-/*	$NetBSD: pmap.c,v 1.283 2014/04/10 02:45:55 matt Exp $	*/
+/*	$NetBSD: pmap.c,v 1.284 2014/04/11 04:19:47 matt Exp $	*/
 
 /*
  * Copyright 2003 Wasabi Systems, Inc.
@@ -216,7 +216,7 @@
 #include <arm/locore.h>
 //#include <arm/arm32/katelib.h>
 
-__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.283 2014/04/10 02:45:55 matt Exp $");
+__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.284 2014/04/11 04:19:47 matt Exp $");
 
 //#define PMAP_DEBUG
 #ifdef PMAP_DEBUG
@@ -512,6 +512,13 @@ int pmap_kmpages;
  */
 bool pmap_initialized;
 
+#if defined(ARM_MMU_EXTENDED) && defined(__HAVE_MM_MD_DIRECT_MAPPED_PHYS)
+/*
+ * Start of direct-mapped memory
+ */
+vaddr_t pmap_directbase = KERNEL_BASE;
+#endif
+
 /*
  * Misc. locking data structures
  */
@@ -1294,9 +1301,12 @@ pmap_alloc_l1(pmap_t pm)
 #else
 	struct vm_page *pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_ZERO);
 #endif
+	bool ok __diagused;
 	KASSERT(pg != NULL);
 	pm->pm_l1_pa = VM_PAGE_TO_PHYS(pg);
-	vaddr_t va = KERNEL_BASE + (pm->pm_l1_pa - physical_start);
+	vaddr_t va = pmap_direct_mapped_phys(pm->pm_l1_pa, &ok, 0xdeadbeef);
+	KASSERT(ok);
+	KASSERT(va >= KERNEL_BASE);
 
 #else
 	KASSERTMSG(kernel_map != NULL, "pm %p", pm);
@@ -2632,11 +2642,16 @@ pmap_syncicache_page(struct vm_page_md *
 	KASSERT(arm_cache_prefer_mask == 0 || md->pvh_attrs & PVF_COLORED);
 #endif
 
+	pt_entry_t * const ptep = cpu_cdst_pte(0);
+	const vaddr_t dstp = cpu_cdstp(0);
 #ifdef __HAVE_MM_MD_DIRECT_MAPPED_PHYS
-	if (way_size == PAGE_SIZE) {
-		vaddr_t vdstp = KERNEL_BASE + (pa - physical_start);
-		cpu_icache_sync_range(vdstp, way_size);
-		return;
+	if (way_size <= PAGE_SIZE) {
+		bool ok = false;
+		vaddr_t vdstp = pmap_direct_mapped_phys(pa, &ok, dstp);
+		if (ok) {
+			cpu_icache_sync_range(vdstp, way_size);
+			return;
+		}
 	}
 #endif
 
@@ -2645,8 +2660,6 @@ pmap_syncicache_page(struct vm_page_md *
 	 * same page to pages in the way and then do the icache_sync on
 	 * the entire way making sure we are cleaned.
 	 */
-	pt_entry_t * const ptep = cpu_cdst_pte(0);
-	const vaddr_t dstp = cpu_cdstp(0);
 	const pt_entry_t npte = L2_S_PROTO | pa | pte_l2_s_cache_mode
 	    | L2_S_PROT(PTE_KERNEL, VM_PROT_READ|VM_PROT_WRITE);
 
@@ -5112,10 +5125,10 @@ pmap_zero_page_generic(paddr_t pa)
 	 * Is this page mapped at its natural color?
 	 * If we have all of memory mapped, then just convert PA to VA.
 	 */
-	const bool okcolor = arm_pcache.dcache_type == CACHE_TYPE_PIPT
+	bool okcolor = arm_pcache.dcache_type == CACHE_TYPE_PIPT
 	   || va_offset == (pa & arm_cache_prefer_mask);
 	const vaddr_t vdstp = okcolor
-	    ? KERNEL_BASE + (pa - physical_start)
+	    ? pmap_direct_mapped_phys(pa, &okcolor, cpu_cdstp(va_offset))
 	    : cpu_cdstp(va_offset);
 #else
 	const bool okcolor = false;
@@ -5142,7 +5155,8 @@ pmap_zero_page_generic(paddr_t pa)
 		PTE_SYNC(ptep);
 		cpu_tlb_flushD_SE(vdstp);
 		cpu_cpwait();
-#if defined(__HAVE_MM_MD_DIRECT_MAPPED_PHYS) && defined(PMAP_CACHE_VIPT)
+#if defined(__HAVE_MM_MD_DIRECT_MAPPED_PHYS) && defined(PMAP_CACHE_VIPT) \
+    && !defined(ARM_MMU_EXTENDED)
 		/*
 		 * If we are direct-mapped and our color isn't ok, then before
 		 * we bzero the page invalidate its contents from the cache and
@@ -5239,10 +5253,10 @@ pmap_pageidlezero(paddr_t pa)
 	const vsize_t va_offset = 0;
 #endif
 #ifdef __HAVE_MM_MD_DIRECT_MAPPED_PHYS
-	const bool okcolor = arm_pcache.dcache_type == CACHE_TYPE_PIPT
+	bool okcolor = arm_pcache.dcache_type == CACHE_TYPE_PIPT
 	   || va_offset == (pa & arm_cache_prefer_mask);
 	const vaddr_t vdstp = okcolor
-	    ? KERNEL_BASE + (pa - physical_start)
+	    ? pmap_direct_mapped_phys(pa, &okcolor, cpu_cdstp(va_offset))
 	    : cpu_cdstp(va_offset);
 #else
 	const bool okcolor = false;
@@ -5350,14 +5364,16 @@ pmap_copy_page_generic(paddr_t src, padd
 	 * Is this page mapped at its natural color?
 	 * If we have all of memory mapped, then just convert PA to VA.
 	 */
-	const bool src_okcolor = arm_pcache.dcache_type == CACHE_TYPE_PIPT
+	bool src_okcolor = arm_pcache.dcache_type == CACHE_TYPE_PIPT
 	    || src_va_offset == (src & arm_cache_prefer_mask);
-	const bool dst_okcolor = arm_pcache.dcache_type == CACHE_TYPE_PIPT
+	bool dst_okcolor = arm_pcache.dcache_type == CACHE_TYPE_PIPT
 	    || dst_va_offset == (dst & arm_cache_prefer_mask);
 	const vaddr_t vsrcp = src_okcolor
-	    ? KERNEL_BASE + (src - physical_start)
+	    ? pmap_direct_mapped_phys(src, &src_okcolor,
+		cpu_csrcp(src_va_offset))
 	    : cpu_csrcp(src_va_offset);
-	const vaddr_t vdstp = KERNEL_BASE + (dst - physical_start);
+	const vaddr_t vdstp = pmap_direct_mapped_phys(dst, &dst_okcolor,
+	    cpu_cdstp(dst_va_offset));
 #else
 	const bool src_okcolor = false;
 	const bool dst_okcolor = false;
@@ -6602,6 +6618,9 @@ pmap_map_chunk(vaddr_t l1pt, vaddr_t va,
 #ifdef ARM_MMU_EXTENDED_XXX
 			    | ((prot & VM_PROT_EXECUTE) ? 0 : L1_S_V6_XN)
 #endif
+#ifdef ARM_MMU_EXTENDED
+			    | (va & 0x80000000 ? 0 : L1_S_V6_nG)
+#endif
 			    | L1_S_PROT(PTE_KERNEL, prot) | f1;
 #ifdef VERBOSE_INIT_ARM
 			printf("sS");
@@ -6620,6 +6639,9 @@ pmap_map_chunk(vaddr_t l1pt, vaddr_t va,
 #ifdef ARM_MMU_EXTENDED_XXX
 			    | ((prot & VM_PROT_EXECUTE) ? 0 : L1_S_V6_XN)
 #endif
+#ifdef ARM_MMU_EXTENDED
+			    | (va & 0x80000000 ? 0 : L1_S_V6_nG)
+#endif
 			    | L1_S_PROT(PTE_KERNEL, prot) | f1
 			    | L1_S_DOM(PMAP_DOMAIN_KERNEL);
 #ifdef VERBOSE_INIT_ARM
@@ -6654,6 +6676,9 @@ pmap_map_chunk(vaddr_t l1pt, vaddr_t va,
 #ifdef ARM_MMU_EXTENDED_XXX
 			    | ((prot & VM_PROT_EXECUTE) ? 0 : L2_XS_L_XN)
 #endif
+#ifdef ARM_MMU_EXTENDED
+			    | (va & 0x80000000 ? 0 : L2_XS_nG)
+#endif
 			    | L2_L_PROT(PTE_KERNEL, prot) | f2l;
 #ifdef VERBOSE_INIT_ARM
 			printf("L");
@@ -6674,6 +6699,9 @@ pmap_map_chunk(vaddr_t l1pt, vaddr_t va,
 #ifdef ARM_MMU_EXTENDED_XXX
 		    | ((prot & VM_PROT_EXECUTE) ? 0 : L2_XS_XN)
 #endif
+#ifdef ARM_MMU_EXTENDED
+		    | (va & 0x80000000 ? 0 : L2_XS_nG)
+#endif
 		    | L2_S_PROT(PTE_KERNEL, prot) | f2s;
 		l2pte_set(ptep, npte, 0);
 		PTE_SYNC(ptep);
@@ -7331,16 +7359,17 @@ pmap_pte_init_armv7(void)
 		pte_l1_s_cache_mode |= L1_S_V6_S;
 		pte_l2_l_cache_mode |= L2_XS_S;
 		pte_l2_s_cache_mode |= L2_XS_S;
-
-		/*
-		 * write-back, no write-allocate, shareable for page tables.
-		 */
-		pte_l1_s_cache_mode_pt = pte_l1_s_cache_mode;
-		pte_l2_l_cache_mode_pt = pte_l2_l_cache_mode;
-		pte_l2_s_cache_mode_pt = pte_l2_s_cache_mode;
 	}
 
 	/*
+	 * Page tables are just all other memory.  We can use write-back since
+	 * pmap_needs_pte_sync is 1 (or the MMU can read out of cache).
+	 */
+	pte_l1_s_cache_mode_pt = pte_l1_s_cache_mode;
+	pte_l2_l_cache_mode_pt = pte_l2_l_cache_mode;
+	pte_l2_s_cache_mode_pt = pte_l2_s_cache_mode;
+
+	/*
 	 * Check the Memory Model Features to see if this CPU supports
 	 * the TLBIASID coproc op.
 	 */
@@ -7714,11 +7743,21 @@ arm_pmap_alloc_poolpage(int flags)
 	/*
 	 * On some systems, only some pages may be "coherent" for dma and we
 	 * want to prefer those for pool pages (think mbufs) but fallback to
-	 * any page if none is available.
+	 * any page if none is available.  But we can only fallback if we
+	 * aren't direct mapping memory or all of memory can be direct-mapped.
+	 * If that isn't true, pool changes can only come from direct-mapped
+	 * memory.
 	 */
 	if (arm_poolpage_vmfreelist != VM_FREELIST_DEFAULT) {
 		return uvm_pagealloc_strat(NULL, 0, NULL, flags,
-		    UVM_PGA_STRAT_FALLBACK, arm_poolpage_vmfreelist);
+#if defined(__HAVE_MM_MD_DIRECT_MAPPED_PHYS) && defined(ARM_MMU_EXTENDED)
+		    (pmap_directbase < KERNEL_BASE
+			? UVM_PGA_STRAT_ONLY
+			: UVM_PGA_STRAT_FALLBACK),
+#else
+		    UVM_PGA_STRAT_FALLBACK,
+#endif
+		    arm_poolpage_vmfreelist);
 	}
 
 	return uvm_pagealloc(NULL, 0, NULL, flags);
@@ -7741,3 +7780,59 @@ pic_ipi_shootdown(void *arg)
 	return 1;
 }
 #endif /* ARM_MMU_EXTENDED && MULTIPROCESSOR */
+
+
+#ifdef __HAVE_MM_MD_DIRECT_MAPPED_PHYS
+vaddr_t
+pmap_direct_mapped_phys(paddr_t pa, bool *ok_p, vaddr_t va)
+{
+	bool ok = false;
+	if (physical_start <= pa && pa < physical_end) {
+#ifdef ARM_MMU_EXTENDED
+		const vaddr_t newva = pmap_directbase + pa - physical_start;
+		if (newva >= KERNEL_BASE) {
+			va = newva;
+			ok = true;
+		}
+#else
+		va = KERNEL_BASE + pa - physical_start;
+		ok = true;
+#endif
+	}
+	KASSERT(ok_p);
+	*ok_p = ok;
+	return va;
+}
+
+vaddr_t
+pmap_map_poolpage(paddr_t pa)
+{
+	bool ok __diagused;
+	vaddr_t va = pmap_direct_mapped_phys(pa, &ok, 0);
+	KASSERT(ok);
+#if defined(PMAP_CACHE_VIPT) && !defined(ARM_MMU_EXTENDED)
+	if (arm_cache_prefer_mask != 0) {
+		struct vm_page * const pg = PHYS_TO_VM_PAGE(pa);
+		struct vn_page_md * const md = VM_PAGE_TO_MD(pg);
+		pmap_acquire_page_lock(md);
+		pmap_vac_me_harder(md, pa, pmap_kernel(), va);
+		pmap_release_page_lock(md);
+	}
+#endif
+	return va;
+}
+
+paddr_t
+pmap_unmap_poolpage(vaddr_t va)
+{
+	KASSERT(va >= KERNEL_BASE);
+#if defined(ARM_MMU_EXTENDED)
+	return va - pmap_directbase + physical_start;
+#else
+#ifdef PMAP_CACHE_VIVT
+	cpu_idcache_wbinv_range(va, PAGE_SIZE);
+#endif
+        return va - KERNEL_BASE + physical_start;
+#endif
+}
+#endif /* __HAVE_MM_MD_DIRECT_MAPPED_PHYS */

Index: src/sys/arch/arm/include/arm32/pmap.h
diff -u src/sys/arch/arm/include/arm32/pmap.h:1.130 src/sys/arch/arm/include/arm32/pmap.h:1.131
--- src/sys/arch/arm/include/arm32/pmap.h:1.130	Fri Apr  4 16:12:28 2014
+++ src/sys/arch/arm/include/arm32/pmap.h	Fri Apr 11 04:19:47 2014
@@ -1,4 +1,4 @@
-/*	$NetBSD: pmap.h,v 1.130 2014/04/04 16:12:28 matt Exp $	*/
+/*	$NetBSD: pmap.h,v 1.131 2014/04/11 04:19:47 matt Exp $	*/
 
 /*
  * Copyright (c) 2002, 2003 Wasabi Systems, Inc.
@@ -406,6 +406,15 @@ void	pmap_devmap_register(const struct p
 bool	pmap_pageidlezero(paddr_t);
 #define PMAP_PAGEIDLEZERO(pa)	pmap_pageidlezero((pa))
 
+#ifdef __HAVE_MM_MD_DIRECT_MAPPED_PHYS
+/*
+ * For the pmap, this is a more useful way to map a direct mapped page.
+ * It returns either the direct-mapped VA or the VA supplied if it can't
+ * be direct mapped.
+ */
+vaddr_t	pmap_direct_mapped_phys(paddr_t, bool *, vaddr_t);
+#endif
+
 /*
  * used by dumpsys to record the PA of the L1 table
  */
@@ -415,6 +424,13 @@ uint32_t pmap_kernel_L1_addr(void);
  */
 extern vaddr_t	pmap_curmaxkvaddr;
 
+#if defined(ARM_MMU_EXTENDED) && defined(__HAVE_MM_MD_DIRECT_MAPPED_PHYS)
+/*
+ * Starting VA of direct mapped memory (usually KERNEL_BASE).
+ */
+extern vaddr_t pmap_directbase;
+#endif
+
 /*
  * Useful macros and constants 
  */
@@ -1030,10 +1046,10 @@ struct vm_page *arm_pmap_alloc_poolpage(
 #define	PMAP_ALLOC_POOLPAGE	arm_pmap_alloc_poolpage
 #endif
 #if defined(PMAP_NEED_ALLOC_POOLPAGE) || defined(__HAVE_MM_MD_DIRECT_MAPPED_PHYS)
-#define	PMAP_MAP_POOLPAGE(pa) \
-        ((vaddr_t)((paddr_t)(pa) - physical_start + KERNEL_BASE))
-#define PMAP_UNMAP_POOLPAGE(va) \
-        ((paddr_t)((vaddr_t)(va) - KERNEL_BASE + physical_start))
+vaddr_t	pmap_map_poolpage(paddr_t);
+paddr_t	pmap_unmap_poolpage(vaddr_t);
+#define	PMAP_MAP_POOLPAGE(pa)	pmap_map_poolpage(pa)
+#define PMAP_UNMAP_POOLPAGE(va)	pmap_unmap_poolpage(va)
 #endif
 
 /*

Index: src/sys/arch/evbarm/cubie/cubie_machdep.c
diff -u src/sys/arch/evbarm/cubie/cubie_machdep.c:1.15 src/sys/arch/evbarm/cubie/cubie_machdep.c:1.16
--- src/sys/arch/evbarm/cubie/cubie_machdep.c:1.15	Sat Mar 29 14:00:30 2014
+++ src/sys/arch/evbarm/cubie/cubie_machdep.c	Fri Apr 11 04:19:48 2014
@@ -1,4 +1,4 @@
-/*	$NetBSD: cubie_machdep.c,v 1.15 2014/03/29 14:00:30 matt Exp $ */
+/*	$NetBSD: cubie_machdep.c,v 1.16 2014/04/11 04:19:48 matt Exp $ */
 
 /*
  * Machine dependent functions for kernel setup for TI OSK5912 board.
@@ -125,7 +125,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: cubie_machdep.c,v 1.15 2014/03/29 14:00:30 matt Exp $");
+__KERNEL_RCSID(0, "$NetBSD: cubie_machdep.c,v 1.16 2014/04/11 04:19:48 matt Exp $");
 
 #include "opt_machdep.h"
 #include "opt_ddb.h"
@@ -216,7 +216,11 @@ int use_fb_console = true;
  * kernel address space.  *Not* for general use.
  */
 #define KERNEL_BASE_PHYS	((paddr_t)KERNEL_BASE_phys)
+#ifdef KERNEL_BASES_EQUAL
+#define KERNEL_PHYS_VOFFSET	0
+#else
 #define KERNEL_PHYS_VOFFSET	(KERNEL_BASE - AWIN_SDRAM_PBASE)
+#endif
 #define AWIN_CORE_VOFFSET	(AWIN_CORE_VBASE - AWIN_CORE_PBASE)
 
 /* Prototypes */
@@ -279,6 +283,15 @@ static const struct pmap_devmap devmap[]
 #undef	_A
 #undef	_S
 
+#ifdef PMAP_NEED_ALLOC_POOLPAGE
+static struct boot_physmem bp_highgig = {
+	.bp_start = AWIN_SDRAM_PBASE / NBPG,
+	.bp_pages = (KERNEL_VM_BASE - KERNEL_BASE) / NBPG,
+	.bp_freelist = VM_FREELIST_ISADMA,
+	.bp_flags = 0,
+};
+#endif
+
 /*
  * u_int initarm(...)
  *
@@ -366,12 +379,14 @@ initarm(void *arg)
 
 #ifdef __HAVE_MM_MD_DIRECT_MAPPED_PHYS
 	const bool mapallmem_p = true;
+#ifndef PMAP_NEED_ALLOC_POOLPAGE
 	if (ram_size > KERNEL_VM_BASE - KERNEL_BASE) {
 		printf("%s: dropping RAM size from %luMB to %uMB\n",
 		   __func__, (unsigned long) (ram_size >> 20),
 		   (KERNEL_VM_BASE - KERNEL_BASE) >> 20);
 		ram_size = KERNEL_VM_BASE - KERNEL_BASE;
 	}
+#endif
 #else
 	const bool mapallmem_p = false;
 #endif
@@ -408,7 +423,19 @@ initarm(void *arg)
 		use_fb_console = true;
 	}
 #endif
-	
+	/*
+	 * If we couldn't map all of memory via TTBR1, limit the memory the
+	 * kernel can allocate from to be from the highest available 1GB.
+	 */
+#ifdef PMAP_NEED_ALLOC_POOLPAGE
+	if (atop(ram_size) > bp_highgig.bp_pages) {
+		bp_highgig.bp_start += atop(ram_size) - bp_highgig.bp_pages;
+		arm_poolpage_vmfreelist = bp_highgig.bp_freelist;
+		return initarm_common(KERNEL_VM_BASE, KERNEL_VM_SIZE,
+		    &bp_highgig, 1);
+	}
+#endif
+
 	return initarm_common(KERNEL_VM_BASE, KERNEL_VM_SIZE, NULL, 0);
 
 }

Index: src/sys/arch/evbarm/cubie/cubie_start.S
diff -u src/sys/arch/evbarm/cubie/cubie_start.S:1.7 src/sys/arch/evbarm/cubie/cubie_start.S:1.8
--- src/sys/arch/evbarm/cubie/cubie_start.S:1.7	Sat Mar 29 14:00:30 2014
+++ src/sys/arch/evbarm/cubie/cubie_start.S	Fri Apr 11 04:19:48 2014
@@ -40,10 +40,15 @@
 #include <arm/allwinner/awin_reg.h>
 #include <evbarm/cubie/platform.h>  
 
-RCSID("$NetBSD: cubie_start.S,v 1.7 2014/03/29 14:00:30 matt Exp $")
+RCSID("$NetBSD: cubie_start.S,v 1.8 2014/04/11 04:19:48 matt Exp $")
 
 #if defined(VERBOSE_INIT_ARM)
 #define	XPUTC(n)	mov r0, n; bl xputc
+#ifdef KERNEL_BASES_EQUAL
+#define	XPUTC2(n)	mov r0, n; bl xputc
+#else
+#define XPUTC2(n)	mov r0, n; blx r11
+#endif
 #ifdef __ARMEB__
 #define COM_BSWAP
 #endif
@@ -51,6 +56,7 @@ RCSID("$NetBSD: cubie_start.S,v 1.7 2014
 #define XPUTC_COM	1
 #else
 #define	XPUTC(n)
+#define	XPUTC2(n)
 #endif
 
 #define INIT_MEMSIZE	128
@@ -59,11 +65,15 @@ RCSID("$NetBSD: cubie_start.S,v 1.7 2014
 #define	MD_CPU_HATCH	_C_LABEL(awin_cpu_hatch)
 
 /*
- * Kernel start routine for BEAGLEBOARD boards.
+ * Kernel start routine for CUBIE (Allwinner) boards.
  * At this point, this code has been loaded into SDRAM
- * and the MMU is off
+ * and the MMU maybe on or maybe off.
  */
+#ifdef KERNEL_BASES_EQUAL
+	.text
+#else
 	.section .start,"ax",%progbits
+#endif
 
 	.global	_C_LABEL(cubie_start)
 _C_LABEL(cubie_start):
@@ -76,17 +86,19 @@ _C_LABEL(cubie_start):
 	cpsid	if, #PSR_SVC32_MODE
 
 	/*
-	 * Save any arguments passed to us.  But since .start is at 0x40000000
-	 * and .text is at 0x8000000, we can't directly use the address that
-	 * the linker gave us directly.  We have to replace the upper 4 bits
-	 * of the address the linker gave us and replace it with the upper 4
-	 * bits of our pc.  Or replace the lower 28 bits of our PC with the
-	 * lower 28 bits of what the linker gave us.
-	 */
-	adr	r4, _C_LABEL(cubie_start)
-	movw	r5, #:lower16:uboot_args
-	movt	r5, #:upper16:uboot_args
-	bfi	r4, r5, #0, #28
+	 * Save any arguments passed to us.
+	 */
+	movw	r4, #:lower16:uboot_args
+	movt	r4, #:upper16:uboot_args
+#ifndef KERNEL_BASES_EQUAL
+	/*
+	 * But since .start is at 0x40000000 and .text is at 0x8000000, we
+	 * can't directly use the address that the linker gave us directly.
+	 * We have to adjust the address the linker gave us to get the to
+	 * the physical address.
+	 */
+	sub	r4, r4, #KERNEL_BASE_VOFFSET
+#endif
 
 	stmia	r4, {r0-r3}		// Save the arguments
 
@@ -107,39 +119,59 @@ _C_LABEL(cubie_start):
 	XPUTC(#68)
 
 	/*
-	 * Turn on the MMU, Caches, etc.
+	 * Turn on the MMU, Caches, etc.  Return to new enabled address space.
 	 */
 	movw	r0, #:lower16:TEMP_L1_TABLE
 	movt	r0, #:upper16:TEMP_L1_TABLE
+#ifdef KERNEL_BASES_EQUAL
 	bl	arm_cpuinit
-
-	XPUTC(#90)
+#else
+	/*
+	 * After the MMU is on, we can execute in the normal .text segment
+	 * so setup the lr to be in .text.  Cache the address for xputc
+	 * before we go.
+	 */
+#ifdef VERBOSE_INIT_ARM
+	adr	r11, xputc		@ for XPUTC2
+#endif
+	movw	lr, #:lower16:1f
+	movt	lr, #:upper16:1f
+	b	arm_cpuinit
+	.pushsection .text,"ax",%progbits
+1:
+#endif
+	XPUTC2(#90)
 
 #if defined(MULTIPROCESSOR)
 	// Now spin up the second processors into the same state we are now.
-	XPUTC(#77)
-	XPUTC(#80)
-	XPUTC(#60)
+	XPUTC2(#77)
+	XPUTC2(#80)
+	XPUTC2(#60)
 	// Make sure the cache is flushed out to RAM for the other CPUs
 	bl	_C_LABEL(armv7_dcache_wbinv_all)
 	bl	a20_mpinit
-	XPUTC(#62)
+	XPUTC2(#62)
 #endif /* MULTIPROCESSOR */
-	XPUTC(#13)
-	XPUTC(#10)
+	XPUTC2(#13)
+	XPUTC2(#10)
 
 	/*
 	 * Jump to start in locore.S, which in turn will call initarm and main.
 	 */
-	movw	ip, #:lower16:start
-	movt	ip, #:upper16:start
-	bx	ip		/* Jump to start (flushes pipeline). */
+	b	start
 
 	/* NOTREACHED */
 
+#ifndef KERNEL_BASES_EQUAL
+	.popsection
+#endif
+
 #include <arm/cortex/a9_mpsubr.S>
 
 #if defined(MULTIPROCESSOR)
+#ifndef KERNEL_BASES_EQUAL
+	.pushsection .text,"ax",%progbits
+#endif
 a20_mpinit:
 	mov	r4, lr			// because we call gtmr_bootdelay
 	movw	r5, #:lower16:(AWIN_CORE_PBASE+AWIN_CPUCFG_OFFSET)
@@ -214,16 +246,30 @@ a20_mpinit:
 	bl	_C_LABEL(gtmr_bootdelay)
 	b	1b
 ASEND(a20_mpinit)
+#ifndef KERNEL_BASES_EQUAL
+	.popsection
+#endif
 #endif /* MULTIPROCESSOR */
 
 .Lmmu_init_table:
+#ifdef KERNEL_BASES_EQUAL
+	/* Map memory 1:1 VA to PA, write-back cacheable, shareable */
+	MMU_INIT(KERNEL_BASE, KERNEL_BASE, INIT_MEMSIZE,
+		L1_S_PROTO_armv7 | L1_S_APv7_KRW | L1_S_CACHEABLE)
+
+	/* Map temp L1 table 1:1 VA to PA, write-back cacheable, shareable */
+	MMU_INIT(TEMP_L1_TABLE, TEMP_L1_TABLE, INIT_MEMSIZE,
+		L1_S_PROTO_armv7 | L1_S_APv7_KRW | L1_S_CACHEABLE)
+#else
 	/* Map KERNEL_BASE VA to SDRAM PA, write-back cacheable, shareable */
-	MMU_INIT(KERNEL_BASE, AWIN_SDRAM_PBASE, INIT_MEMSIZE,
+	MMU_INIT(KERNEL_BASE, KERNEL_BASE - KERNEL_BASE_VOFFSET, INIT_MEMSIZE,
 		L1_S_PROTO_armv7 | L1_S_APv7_KRW | L1_S_CACHEABLE)
 
 	/* Map memory 1:1 VA to PA, write-back cacheable, shareable */
-	MMU_INIT(AWIN_SDRAM_PBASE, AWIN_SDRAM_PBASE, INIT_MEMSIZE,
+	MMU_INIT(KERNEL_BASE - KERNEL_BASE_VOFFSET,
+		KERNEL_BASE - KERNEL_BASE_VOFFSET, INIT_MEMSIZE,
 		L1_S_PROTO_armv7 | L1_S_APv7_KRW | L1_S_CACHEABLE)
+#endif
 
 	/* Map AWIN CORE (so console will work) */
 	MMU_INIT(AWIN_CORE_VBASE, AWIN_CORE_PBASE,

Added files:

Index: src/sys/arch/evbarm/conf/CUBIETRUCK
diff -u /dev/null src/sys/arch/evbarm/conf/CUBIETRUCK:1.1
--- /dev/null	Fri Apr 11 04:19:48 2014
+++ src/sys/arch/evbarm/conf/CUBIETRUCK	Fri Apr 11 04:19:47 2014
@@ -0,0 +1,19 @@
+#	$NetBSD: CUBIETRUCK,v 1.1 2014/04/11 04:19:47 matt Exp $
+#
+#	CUBIETRUCK - Cubieboard3 (mostly cubieboard2 with 2GB)
+#
+
+include "arch/evbarm/conf/CUBIEBOARD"
+
+#
+# Relocate loaded kernel to second GB of memory.
+#
+no makeoptions	KERNEL_BASE_PHYS
+no makeoptions	KERNEL_BASE_VIRT
+makeoptions	KERNEL_BASE_PHYS="0x80000000"
+makeoptions	KERNEL_BASE_VIRT="0x80000000"
+
+#
+# We need this too.
+#
+options 	PMAP_NEED_ALLOC_POOLPAGE
Index: src/sys/arch/evbarm/conf/CUBIETRUCK_INSTALL
diff -u /dev/null src/sys/arch/evbarm/conf/CUBIETRUCK_INSTALL:1.1
--- /dev/null	Fri Apr 11 04:19:48 2014
+++ src/sys/arch/evbarm/conf/CUBIETRUCK_INSTALL	Fri Apr 11 04:19:47 2014
@@ -0,0 +1,10 @@
+#	$NetBSD: CUBIETRUCK_INSTALL,v 1.1 2014/04/11 04:19:47 matt Exp $
+#
+#	CUBIETRUCK_INSTALL -- CUBIETRUCK kernel with installation-sized
+#	ramdisk
+#
+
+include "arch/evbarm/conf/CUBIETRUCK"
+include "arch/evbarm/conf/INSTALL"
+
+options BOOTHOWTO=RB_SINGLE

Reply via email to