Module Name:    src
Committed By:   matt
Date:           Mon Apr 14 20:50:47 UTC 2014

Modified Files:
        src/sys/arch/arm/arm: cpufunc.c cpufunc_asm_sheeva.S
        src/sys/arch/arm/conf: files.arm
        src/sys/arch/arm/include: armreg.h cpuconf.h cpufunc.h
Added Files:
        src/sys/arch/arm/include: cpufunc_proto.h

Log Message:
Support (untested) SHEEVA_L2_CACHE and SHEEVA_L2_CACHE_WT options.
Move prototypes out to <arm/cpufunc.h> to their own file.
Add sdcache routines to cpufunc_asm_sheeva.S
Add code sheeve_setup to init the sdcache and sdcache info.


To generate a diff of this commit:
cvs rdiff -u -r1.145 -r1.146 src/sys/arch/arm/arm/cpufunc.c
cvs rdiff -u -r1.5 -r1.6 src/sys/arch/arm/arm/cpufunc_asm_sheeva.S
cvs rdiff -u -r1.126 -r1.127 src/sys/arch/arm/conf/files.arm
cvs rdiff -u -r1.96 -r1.97 src/sys/arch/arm/include/armreg.h
cvs rdiff -u -r1.23 -r1.24 src/sys/arch/arm/include/cpuconf.h
cvs rdiff -u -r1.71 -r1.72 src/sys/arch/arm/include/cpufunc.h
cvs rdiff -u -r0 -r1.1 src/sys/arch/arm/include/cpufunc_proto.h

Please note that diffs are not public domain; they are subject to the
copyright notices on the relevant files.

Modified files:

Index: src/sys/arch/arm/arm/cpufunc.c
diff -u src/sys/arch/arm/arm/cpufunc.c:1.145 src/sys/arch/arm/arm/cpufunc.c:1.146
--- src/sys/arch/arm/arm/cpufunc.c:1.145	Thu Apr 10 02:49:42 2014
+++ src/sys/arch/arm/arm/cpufunc.c	Mon Apr 14 20:50:46 2014
@@ -1,4 +1,4 @@
-/*	$NetBSD: cpufunc.c,v 1.145 2014/04/10 02:49:42 matt Exp $	*/
+/*	$NetBSD: cpufunc.c,v 1.146 2014/04/14 20:50:46 matt Exp $	*/
 
 /*
  * arm7tdmi support code Copyright (c) 2001 John Fremlin
@@ -49,7 +49,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: cpufunc.c,v 1.145 2014/04/10 02:49:42 matt Exp $");
+__KERNEL_RCSID(0, "$NetBSD: cpufunc.c,v 1.146 2014/04/14 20:50:46 matt Exp $");
 
 #include "opt_compat_netbsd.h"
 #include "opt_cpuoptions.h"
@@ -66,8 +66,8 @@ __KERNEL_RCSID(0, "$NetBSD: cpufunc.c,v 
 #include <uvm/uvm.h>
 
 #include <arm/cpuconf.h>
-#include <arm/cpufunc.h>
 #include <arm/locore.h>
+#include <arm/cpufunc_proto.h>
 
 #ifdef CPU_XSCALE_80200
 #include <arm/xscale/i80200reg.h>
@@ -1212,8 +1212,7 @@ struct cpu_functions ixp12x0_cpufuncs = 
 };
 #endif	/* CPU_IXP12X0 */
 
-#if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
-    defined(__CPU_XSCALE_PXA2XX) || defined(CPU_XSCALE_IXP425)
+#if defined(CPU_XSCALE)
 struct cpu_functions xscale_cpufuncs = {
 	/* CPU functions */
 
@@ -1272,8 +1271,7 @@ struct cpu_functions xscale_cpufuncs = {
 
 	.cf_setup		= xscale_setup
 };
-#endif
-/* CPU_XSCALE_80200 || CPU_XSCALE_80321 || __CPU_XSCALE_PXA2XX || CPU_XSCALE_IXP425 */
+#endif /* CPU_XSCALE */
 
 #if defined(CPU_ARMV7)
 struct cpu_functions armv7_cpufuncs = {
@@ -3465,8 +3463,7 @@ ixp12x0_setup(char *args)
 }
 #endif /* CPU_IXP12X0 */
 
-#if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
-    defined(__CPU_XSCALE_PXA2XX) || defined(CPU_XSCALE_IXP425)
+#if defined(CPU_XSCALE)
 struct cpu_option xscale_options[] = {
 #ifdef COMPAT_12
 	{ "branchpredict", 	BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
@@ -3547,7 +3544,7 @@ xscale_setup(char *args)
 	__asm volatile("mcr p15, 0, %0, c1, c0, 1"
 		: : "r" (auxctl));
 }
-#endif	/* CPU_XSCALE_80200 || CPU_XSCALE_80321 || __CPU_XSCALE_PXA2XX || CPU_XSCALE_IXP425 */
+#endif	/* CPU_XSCALE */
 
 #if defined(CPU_SHEEVA)
 struct cpu_option sheeva_options[] = {
@@ -3565,8 +3562,6 @@ struct cpu_option sheeva_options[] = {
 void
 sheeva_setup(char *args)
 {
-	uint32_t sheeva_ext;
-
 	int cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
 	    | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
 	    | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_BPRD_ENABLE;
@@ -3586,18 +3581,36 @@ sheeva_setup(char *args)
 	cpuctrl = parse_cpu_options(args, sheeva_options, cpuctrl);
 
 	/* Enable DCache Streaming Switch and Write Allocate */
-	__asm volatile("mrc p15, 1, %0, c15, c1, 0"
-	    : "=r" (sheeva_ext));
+	uint32_t sheeva_ext = armreg_sheeva_xctrl_read();
 
 	sheeva_ext |= FC_DCACHE_STREAM_EN | FC_WR_ALLOC_EN;
-
-	__asm volatile("mcr p15, 1, %0, c15, c1, 0"
-	    :: "r" (sheeva_ext));
-
-	/*
-	 * Sheeva has L2 Cache.  Enable/Disable it here.
-	 * Really not support yet...
-	 */
+#ifdef SHEEVA_L2_CACHE
+	sheeva_ext |= FC_L2CACHE_EN;
+	sheeva_ext &= ~FC_L2_PREF_DIS;
+#endif
+
+	armreg_sheeva_xctrl_write(sheeva_ext);
+
+#ifdef SHEEVA_L2_CACHE
+#ifndef SHEEVA_L2_CACHE_WT
+	arm_scache.cache_type = CPU_CT_CTYPE_WB2;
+#elif CPU_CT_CTYPE_WT != 0
+	arm_scache.cache_type = CPU_CT_CTYPE_WT;
+#endif
+	arm_scache.cache_unified = 1;
+	arm_scache.dcache_type = arm_scache.icache_type = CACHE_TYPE_PIPT;
+	arm_scache.dcache_size = arm_scache.icache_size = 256*1024;
+	arm_scache.dcache_ways = arm_scache.icache_ways = 4;
+	arm_scache.dcache_way_size = arm_scache.icache_way_size =
+	    arm_scache.dcache_size / arm_scache.dcache_ways;
+	arm_scache.dcache_line_size = arm_scache.icache_line_size = 32;
+	arm_scache.dcache_sets = arm_scache.icache_sets =
+	    arm_scache.dcache_way_size / arm_scache.dcache_line_size;
+
+	cpufuncs.cf_sdcache_wb_range = sheeva_sdcache_wb_range;
+	cpufuncs.cf_sdcache_inv_range = sheeva_sdcache_inv_range;
+	cpufuncs.cf_sdcache_wbinv_range = sheeva_sdcache_wbinv_range;
+#endif /* SHEEVA_L2_CACHE */
 
 #ifdef __ARMEB__
 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
@@ -3620,5 +3633,8 @@ sheeva_setup(char *args)
 
 	/* And again. */
 	cpu_idcache_wbinv_all();
+#ifdef SHEEVA_L2_CACHE
+	sheeva_sdcache_wbinv_all();
+#endif
 }
 #endif	/* CPU_SHEEVA */

Index: src/sys/arch/arm/arm/cpufunc_asm_sheeva.S
diff -u src/sys/arch/arm/arm/cpufunc_asm_sheeva.S:1.5 src/sys/arch/arm/arm/cpufunc_asm_sheeva.S:1.6
--- src/sys/arch/arm/arm/cpufunc_asm_sheeva.S:1.5	Sun Mar 30 01:15:03 2014
+++ src/sys/arch/arm/arm/cpufunc_asm_sheeva.S	Mon Apr 14 20:50:46 2014
@@ -39,17 +39,19 @@
 	.word	_C_LABEL(PAGE_MASK)
 
 ENTRY(sheeva_dcache_wbinv_range)
-	str	lr, [sp, #-4]!
-	mrs	lr, cpsr
+	push	{r4,r5}
+	mrs	r4, cpsr
+	orr	r5, r4, #I32_bit | F32_bit
+
 	/* Start with cache line aligned address */
 	ldr	ip, .Lsheeva_cache_line_size
-	ldr	ip, [ip]
-	sub	ip, ip, #1
-	and	r2, r0, ip
+	ldr	r3, [ip]
+	sub	r3, r3, #1
+	and	r2, r0, r3
 	add	r1, r1, r2
-	add	r1, r1, ip
-	bics	r1, r1, ip
-	bics	r0, r0, ip
+	add	r1, r1, r3
+	bic	r1, r1, r3
+	bic	r0, r0, r3
 
 	ldr	ip, .Lsheeva_asm_page_mask
 	and	r2, r0, ip
@@ -57,16 +59,13 @@ ENTRY(sheeva_dcache_wbinv_range)
 	cmp	r1, r2
 	movcc	ip, r1
 	movcs	ip, r2
+	sub	r2, r0, #1
 1:
-	add	r3, r0, ip
-	sub	r2, r3, #1
-	/* Disable irqs */
-	orr	r3, lr, #I32_bit | F32_bit
-	msr	cpsr_c, r3
+	add	r2, r2, ip
+	msr	cpsr_c, r5		/* Disable irqs */
 	mcr	p15, 5, r0, c15, c15, 0	/* Clean and inv zone start address */
 	mcr	p15, 5, r2, c15, c15, 1	/* Clean and inv zone end address */
-	/* Enable irqs */
-	msr	cpsr_c, lr
+	msr	cpsr_c, r4		/* Enable irqs */
 
 	add	r0, r0, ip
 	sub	r1, r1, ip
@@ -77,21 +76,24 @@ ENTRY(sheeva_dcache_wbinv_range)
 	bne	1b
 	mov	r0, #0
 	mcr	p15, 0, r0, c7, c10, 4	/* drain the write buffer */
-	ldr	lr, [sp], #4
+	pop	{r4, r5}
 	RET
+END(sheeva_dcache_wbinv_range)
 
 ENTRY(sheeva_dcache_inv_range)
-	str	lr, [sp, #-4]!
-	mrs	lr, cpsr
+	push	{r4,r5}
+	mrs	r4, cpsr
+	orr	r5, r4, #I32_bit | F32_bit
+
 	/* Start with cache line aligned address */
 	ldr	ip, .Lsheeva_cache_line_size
-	ldr	ip, [ip]
-	sub	ip, ip, #1
-	and	r2, r0, ip
+	ldr	r3, [ip]
+	sub	r3, r3, #1
+	and	r2, r0, r3
 	add	r1, r1, r2
-	add	r1, r1, ip
-	bics	r1, r1, ip
-	bics	r0, r0, ip
+	add	r1, r1, r3
+	bic	r1, r1, r3
+	bic	r0, r0, r3
 
 	ldr	ip, .Lsheeva_asm_page_mask
 	and	r2, r0, ip
@@ -99,16 +101,13 @@ ENTRY(sheeva_dcache_inv_range)
 	cmp	r1, r2
 	movcc	ip, r1
 	movcs	ip, r2
+	sub	r2, r0, #1
 1:
-	add	r3, r0, ip
-	sub	r2, r3, #1
-	/* Disable irqs */
-	orr	r3, lr, #I32_bit | F32_bit
-	msr	cpsr_c, r3
+	add	r2, r2, ip
+	msr	cpsr_c, r5		/* Disable irqs */
 	mcr	p15, 5, r0, c15, c14, 0	/* Inv zone start address */
 	mcr	p15, 5, r2, c15, c14, 1	/* Inv zone end address */
-	/* Enable irqs */
-	msr	cpsr_c, lr
+	msr	cpsr_c, r4		/* Enable irqs */
 
 	add	r0, r0, ip
 	sub	r1, r1, ip
@@ -119,21 +118,24 @@ ENTRY(sheeva_dcache_inv_range)
 	bne	1b
 	mov	r0, #0
 	mcr	p15, 0, r0, c7, c10, 4	/* drain the write buffer */
-	ldr	lr, [sp], #4
+	pop	{r4, r5}
 	RET
+END(sheeva_dcache_inv_range)
 
 ENTRY(sheeva_dcache_wb_range)
-	str	lr, [sp, #-4]!
-	mrs	lr, cpsr
+	push	{r4,r5}
+	mrs	r4, cpsr
+	orr	r5, r4, #I32_bit | F32_bit
+
 	/* Start with cache line aligned address */
 	ldr	ip, .Lsheeva_cache_line_size
-	ldr	ip, [ip]
-	sub	ip, ip, #1
-	and	r2, r0, ip
+	ldr	r3, [ip]
+	sub	r3, r3, #1
+	and	r2, r0, r3
 	add	r1, r1, r2
-	add	r1, r1, ip
-	bics	r1, r1, ip
-	bics	r0, r0, ip
+	add	r1, r1, r3
+	bic	r1, r1, r3
+	bic	r0, r0, r3
 
 	ldr	ip, .Lsheeva_asm_page_mask
 	and	r2, r0, ip
@@ -141,16 +143,13 @@ ENTRY(sheeva_dcache_wb_range)
 	cmp	r1, r2
 	movcc	ip, r1
 	movcs	ip, r2
+	sub	r2, r0, #1
 1:
-	add	r3, r0, ip
-	sub	r2, r3, #1
-	/* Disable irqs */
-	orr	r3, lr, #I32_bit | F32_bit
-	msr	cpsr_c, r3
+	add	r2, r2, ip
+	msr	cpsr_c, r5		/* Disable irqs */
 	mcr	p15, 5, r0, c15, c13, 0	/* Clean zone start address */
 	mcr	p15, 5, r2, c15, c13, 1	/* Clean zone end address */
-	/* Enable irqs */
-	msr	cpsr_c, lr
+	msr	cpsr_c, r4		/* Enable irqs */
 
 	add	r0, r0, ip
 	sub	r1, r1, ip
@@ -161,21 +160,24 @@ ENTRY(sheeva_dcache_wb_range)
 	bne	1b
 	mov	r0, #0
 	mcr	p15, 0, r0, c7, c10, 4	/* drain the write buffer */
-	ldr	lr, [sp], #4
+	pop	{r4, r5}
 	RET
+END(sheeva_dcache_wb_range)
 
 ENTRY(sheeva_idcache_wbinv_range)
-	str	lr, [sp, #-4]!
-	mrs	lr, cpsr
+	push	{r4,r5}
+	mrs	r4, cpsr
+	orr	r5, r4, #I32_bit | F32_bit
+
 	/* Start with cache line aligned address */
 	ldr	ip, .Lsheeva_cache_line_size
-	ldr	ip, [ip]
-	sub	ip, ip, #1
-	and	r2, r0, ip
+	ldr	r3, [ip]
+	sub	r3, r3, #1
+	and	r2, r0, r3
 	add	r1, r1, r2
-	add	r1, r1, ip
-	bics	r1, r1, ip
-	bics	r0, r0, ip
+	add	r1, r1, r3
+	bic	r1, r1, r3
+	bic	r0, r0, r3
 
 	ldr	ip, .Lsheeva_asm_page_mask
 	and	r2, r0, ip
@@ -183,27 +185,68 @@ ENTRY(sheeva_idcache_wbinv_range)
 	cmp	r1, r2
 	movcc	ip, r1
 	movcs	ip, r2
+	sub	r2, r0, #1
 1:
-	add	r3, r0, ip
-	sub	r2, r3, #1
-	/* Disable irqs */
-	orr	r3, lr, #I32_bit | F32_bit
-	msr	cpsr_c, r3
+	add	r2, r2, ip
+	msr	cpsr_c, r5		/* Disable irqs */
 	mcr	p15, 5, r0, c15, c15, 0	/* Clean and inv zone start address */
 	mcr	p15, 5, r2, c15, c15, 1	/* Clean and inv zone end address */
-	/* Enable irqs */
-	msr	cpsr_c, lr
+	msr	cpsr_c, r4		/* Enable irqs */
 
 	/* Invalidate and clean icache line by line */
-	ldr	r3, .Lsheeva_cache_line_size
-	ldr	r3, [r3]
 2:
 	mcr	p15, 0, r0, c7, c5, 1
 	add	r0, r0, r3
 	cmp	r2, r0
 	bhi	2b
 
-	add	r0, r2, #1
+	add	r0, r0, ip
+	sub	r1, r1, ip
+	cmp	r1, #PAGE_SIZE
+	movcc	ip, r1
+	movcs	ip, #PAGE_SIZE
+	cmp	r1, #0
+	bne	1b
+	mov	r0, #0
+	mcr	p15, 0, r0, c7, c10, 4	/* drain the write buffer */
+	pop	{r4, r5}
+	RET
+END(sheeva_idcache_wbinv_range)
+
+ENTRY(sheeva_sdcache_wbinv_range)
+	push	{r4,r5}
+	mrs	r4, cpsr
+	orr	r5, r4, #I32_bit | F32_bit
+
+	mov	r1, r2		/* ignore paddr_t argument */
+
+	/* Start with cache line aligned address */
+	ldr	ip, .Lsheeva_cache_line_size
+	ldr	ip, [ip]
+	sub	ip, ip, #1
+	and	r2, r0, ip
+	add	r1, r1, r2
+	add	r1, r1, ip
+	bic	r1, r1, ip
+	bic	r0, r0, ip
+
+	ldr	ip, .Lsheeva_asm_page_mask
+	and	r2, r0, ip
+	rsb	r2, r2, #PAGE_SIZE
+	cmp	r1, r2
+	movcc	ip, r1
+	movcs	ip, r2
+	sub	r2, r0, #1
+1:
+	add	r2, r2, ip
+	msr	cpsr_c, r5		/* Disable irqs */
+	mcr	p15, 1, r0, c15, c9, 4	/* Clean L2 zone start address */
+	mcr	p15, 1, r2, c15, c9, 5	/* Clean L2 zone end address */
+	mcr	p15, 1, r0, c15, c11, 4	/* Inv L2 zone start address */
+	mcr	p15, 1, r2, c15, c11, 5	/* Inv L2 zone end address */
+	msr	cpsr_c, r4		/* Enable irqs */
+
+	add	r0, r0, ip
 	sub	r1, r1, ip
 	cmp	r1, #PAGE_SIZE
 	movcc	ip, r1
@@ -212,8 +255,105 @@ ENTRY(sheeva_idcache_wbinv_range)
 	bne	1b
 	mov	r0, #0
 	mcr	p15, 0, r0, c7, c10, 4	/* drain the write buffer */
-	ldr	lr, [sp], #4
+	pop	{r4, r5}
+	RET
+END(sheeva_sdcache_wbinv_range)
+
+ENTRY(sheeva_sdcache_inv_range)
+	push	{r4,r5}
+	mrs	r4, cpsr
+	orr	r5, r4, #I32_bit | F32_bit
+
+	mov	r1, r2		/* ignore paddr_t argument */
+
+	/* Start with cache line aligned address */
+	ldr	ip, .Lsheeva_cache_line_size
+	ldr	r3, [ip]
+	sub	r3, r3, #1
+	and	r2, r0, r3
+	add	r1, r1, r2
+	add	r1, r1, r3
+	bic	r1, r1, r3
+	bic	r0, r0, r3
+
+	ldr	ip, .Lsheeva_asm_page_mask
+	and	r2, r0, ip
+	rsb	r2, r2, #PAGE_SIZE
+	cmp	r1, r2
+	movcc	ip, r1
+	movcs	ip, r2
+	sub	r2, r2, #1
+1:
+	add	r2, r2, ip
+	msr	cpsr_c, r5		/* Disable irqs */
+	mcr	p15, 1, r0, c15, c11, 4	/* Inv L2 zone start address */
+	mcr	p15, 1, r2, c15, c11, 5	/* Inv L2 zone end address */
+	msr	cpsr_c, r4		/* Enable irqs */
+
+	add	r0, r0, ip
+	sub	r1, r1, ip
+	cmp	r1, #PAGE_SIZE
+	movcc	ip, r1
+	movcs	ip, #PAGE_SIZE
+	cmp	r1, #0
+	bne	1b
+	mov	r0, #0
+	mcr	p15, 0, r0, c7, c10, 4	/* drain the write buffer */
+	pop	{r4, r5}
+	RET
+END(sheeva_sdcache_inv_range)
+
+ENTRY(sheeva_sdcache_wb_range)
+	push	{r4,r5}
+	mrs	r4, cpsr
+	orr	r5, r4, #I32_bit | F32_bit
+
+	mov	r1, r2		/* ignore paddr_t argument */
+
+	/* Start with cache line aligned address */
+	ldr	ip, .Lsheeva_cache_line_size
+	ldr	r3, [ip]
+	sub	r3, r3, #1
+	and	r2, r0, r3
+	add	r1, r1, r2
+	add	r1, r1, r3
+	bic	r1, r1, r3
+	bic	r0, r0, r3
+
+	ldr	ip, .Lsheeva_asm_page_mask
+	and	r2, r0,	ip
+	rsb	r2, r2, #PAGE_SIZE
+	cmp	r1, r2
+	movcc	ip, r1
+	movcs	ip, r2
+	sub	r2, r0, #1
+1:
+	add	r2, r2, ip
+	msr	cpsr_c, r5		/* Disable irqs */
+	mcr	p15, 1, r0, c15, c9, 4	/* Clean L2 zone start address */
+	mcr	p15, 1, r2, c15, c9, 5	/* Clean L2 zone end address */
+	msr	cpsr_c, r4		/* Enable irqs */
+
+	add	r0, r0, ip
+	sub	r1, r1, ip
+	cmp	r1, #PAGE_SIZE
+	movcc	ip, r1
+	movcs	ip, #PAGE_SIZE
+	cmp	r1, #0
+	bne	1b
+	mov	r0, #0
+	mcr	p15, 0, r0, c7, c10, 4	/* drain the write buffer */
+	pop	{r4, r5}
+	RET
+END(sheeva_sdcache_wb_range)
+
+ENTRY(sheeva_sdcache_wbinv_all)
+	mov	r0, #0
+	mcr	p15, 1, r0, c15, c9, 0	/* Clean L2 */
+	mcr	p15, 1, r0, c15, c11, 0	/* Invalidate L2 */
+	mcr	p15, 0, r0, c7, c10, 4	/* drain the write buffer */
 	RET
+END(sheeva_sdcache_wbinv_all)
 
 /*
  * CPU sleep
@@ -222,3 +362,4 @@ ENTRY_NP(sheeva_cpu_sleep)
 	mov	r0, #0
 	mcr	p15, 0, r0, c7, c0, 4	/* wait for interrupt */
 	RET
+END(sheeva_cpu_sleep)

Index: src/sys/arch/arm/conf/files.arm
diff -u src/sys/arch/arm/conf/files.arm:1.126 src/sys/arch/arm/conf/files.arm:1.127
--- src/sys/arch/arm/conf/files.arm:1.126	Sun Mar 30 15:50:51 2014
+++ src/sys/arch/arm/conf/files.arm	Mon Apr 14 20:50:47 2014
@@ -1,4 +1,4 @@
-#	$NetBSD: files.arm,v 1.126 2014/03/30 15:50:51 matt Exp $
+#	$NetBSD: files.arm,v 1.127 2014/04/14 20:50:47 matt Exp $
 
 # temporary define to allow easy moving to ../arch/arm/arm32
 defflag				ARM32
@@ -64,7 +64,8 @@ defflag  opt_cpuoptions.h	ARM11_CACHE_WR
 defflag	 opt_cpuoptions.h	ARM11_COMPAT_MMU
 defflag	 opt_cpuoptions.h	ARM_HAS_VBAR
 # use extended small page in compatible MMU mode for ARMv6
-defflag  opt_cpuoptions.h	ARMV6_EXTENDED_SMALL_PAGE
+defflag  opt_cpuoptions.h	SHEEVA_L2_CACHE
+defflag  opt_cpuoptions.h	SHEEVA_L2_CACHE_WT: SHEEVA_L2_CACHE
 
 # Interrupt implementation header definition.
 defparam opt_arm_intr_impl.h	ARM_INTR_IMPL

Index: src/sys/arch/arm/include/armreg.h
diff -u src/sys/arch/arm/include/armreg.h:1.96 src/sys/arch/arm/include/armreg.h:1.97
--- src/sys/arch/arm/include/armreg.h:1.96	Sun Apr 13 02:23:00 2014
+++ src/sys/arch/arm/include/armreg.h	Mon Apr 14 20:50:47 2014
@@ -1,4 +1,4 @@
-/*	$NetBSD: armreg.h,v 1.96 2014/04/13 02:23:00 matt Exp $	*/
+/*	$NetBSD: armreg.h,v 1.97 2014/04/14 20:50:47 matt Exp $	*/
 
 /*
  * Copyright (c) 1998, 2001 Ben Harris
@@ -1021,6 +1021,9 @@ ARMREG_READ_INLINE(tlbdata1, "p15,3,%0,c
 ARMREG_READ_INLINE(tlbdata2, "p15,3,%0,c15,c0,2") /* TLB Data Register 2 (cortex) */
 ARMREG_WRITE_INLINE(tlbdataop, "p15,3,%0,c15,c4,2") /* TLB Data Read Operation (cortex) */
 
+ARMREG_READ_INLINE(sheeva_xctrl, "p15,1,%0,c15,c1,0") /* Sheeva eXtra Control register */
+ARMREG_WRITE_INLINE(sheeva_xctrl, "p15,1,%0,c15,c1,0") /* Sheeva eXtra Control register */
+
 #endif /* !__ASSEMBLER__ */
 
 

Index: src/sys/arch/arm/include/cpuconf.h
diff -u src/sys/arch/arm/include/cpuconf.h:1.23 src/sys/arch/arm/include/cpuconf.h:1.24
--- src/sys/arch/arm/include/cpuconf.h:1.23	Sun Mar 30 15:50:51 2014
+++ src/sys/arch/arm/include/cpuconf.h	Mon Apr 14 20:50:47 2014
@@ -1,4 +1,4 @@
-/*	$NetBSD: cpuconf.h,v 1.23 2014/03/30 15:50:51 matt Exp $	*/
+/*	$NetBSD: cpuconf.h,v 1.24 2014/04/14 20:50:47 matt Exp $	*/
 
 /*
  * Copyright (c) 2002 Wasabi Systems, Inc.
@@ -80,10 +80,7 @@
 			 defined(CPU_SA1110) +				\
 			 defined(CPU_FA526) +				\
 			 defined(CPU_IXP12X0) +				\
-			 defined(CPU_XSCALE_80200) +			\
-			 defined(CPU_XSCALE_80321) +			\
-			 defined(__CPU_XSCALE_PXA2XX) +			\
-			 defined(CPU_XSCALE_IXP425)) +			\
+			 defined(CPU_XSCALE) +				\
 			 defined(CPU_SHEEVA))
 #else
 #define	CPU_NTYPES	2
@@ -118,9 +115,7 @@
 
 #if !defined(_KERNEL_OPT) ||						\
     (defined(CPU_ARM9E) || defined(CPU_ARM10) ||			\
-     defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) ||		\
-     defined(__CPU_XSCALE_PXA2XX) || defined(CPU_XSCALE_IXP425)) ||	\
-     defined(CPU_SHEEVA)
+     defined(CPU_XSCALE) || defined(CPU_SHEEVA))
 #define	ARM_ARCH_5	1
 #else
 #define	ARM_ARCH_5	0
@@ -205,8 +200,7 @@
 #endif
 
 #if !defined(_KERNEL_OPT) ||						\
-    (defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) ||		\
-     defined(__CPU_XSCALE_PXA2XX) || defined(CPU_XSCALE_IXP425))
+    defined(CPU_XSCALE)
 #define	ARM_MMU_XSCALE		1
 #else
 #define	ARM_MMU_XSCALE		0

Index: src/sys/arch/arm/include/cpufunc.h
diff -u src/sys/arch/arm/include/cpufunc.h:1.71 src/sys/arch/arm/include/cpufunc.h:1.72
--- src/sys/arch/arm/include/cpufunc.h:1.71	Mon Apr  7 20:38:52 2014
+++ src/sys/arch/arm/include/cpufunc.h	Mon Apr 14 20:50:47 2014
@@ -229,396 +229,6 @@ void	cpufunc_domains		(u_int);
 u_int	cpufunc_faultstatus	(void);
 u_int	cpufunc_faultaddress	(void);
 
-#if defined(CPU_ARM2) || defined(CPU_ARM250) || defined(CPU_ARM3)
-void	arm3_cache_flush	(void);
-#endif	/* CPU_ARM2 || CPU_ARM250 || CPU_ARM3 */
-
-#ifdef CPU_ARM2
-u_int	arm2_id			(void);
-#endif /* CPU_ARM2 */
-
-#ifdef CPU_ARM250
-u_int	arm250_id		(void);
-#endif
-
-#ifdef CPU_ARM3
-u_int	arm3_control		(u_int, u_int);
-#endif	/* CPU_ARM3 */
-
-#if defined(CPU_ARM6) || defined(CPU_ARM7)
-void	arm67_setttb		(u_int, bool);
-void	arm67_tlb_flush		(void);
-void	arm67_tlb_purge		(vaddr_t);
-void	arm67_cache_flush	(void);
-void	arm67_context_switch	(u_int);
-#endif	/* CPU_ARM6 || CPU_ARM7 */
-
-#ifdef CPU_ARM6
-void	arm6_setup		(char *);
-#endif	/* CPU_ARM6 */
-
-#ifdef CPU_ARM7
-void	arm7_setup		(char *);
-#endif	/* CPU_ARM7 */
-
-#ifdef CPU_ARM7TDMI
-int	arm7_dataabt_fixup	(void *);
-void	arm7tdmi_setup		(char *);
-void	arm7tdmi_setttb		(u_int, bool);
-void	arm7tdmi_tlb_flushID	(void);
-void	arm7tdmi_tlb_flushID_SE	(vaddr_t);
-void	arm7tdmi_cache_flushID	(void);
-void	arm7tdmi_context_switch	(u_int);
-#endif /* CPU_ARM7TDMI */
-
-#ifdef CPU_ARM8
-void	arm8_setttb		(u_int, bool);
-void	arm8_tlb_flushID	(void);
-void	arm8_tlb_flushID_SE	(vaddr_t);
-void	arm8_cache_flushID	(void);
-void	arm8_cache_flushID_E	(u_int);
-void	arm8_cache_cleanID	(void);
-void	arm8_cache_cleanID_E	(u_int);
-void	arm8_cache_purgeID	(void);
-void	arm8_cache_purgeID_E	(u_int entry);
-
-void	arm8_cache_syncI	(void);
-void	arm8_cache_cleanID_rng	(vaddr_t, vsize_t);
-void	arm8_cache_cleanD_rng	(vaddr_t, vsize_t);
-void	arm8_cache_purgeID_rng	(vaddr_t, vsize_t);
-void	arm8_cache_purgeD_rng	(vaddr_t, vsize_t);
-void	arm8_cache_syncI_rng	(vaddr_t, vsize_t);
-
-void	arm8_context_switch	(u_int);
-
-void	arm8_setup		(char *);
-
-u_int	arm8_clock_config	(u_int, u_int);
-#endif
-
-#ifdef CPU_FA526
-void	fa526_setup		(char *);
-void	fa526_setttb		(u_int, bool);
-void	fa526_context_switch	(u_int);
-void	fa526_cpu_sleep		(int);
-void	fa526_tlb_flushI_SE	(vaddr_t);
-void	fa526_tlb_flushID_SE	(vaddr_t);
-void	fa526_flush_prefetchbuf	(void);
-void	fa526_flush_brnchtgt_E	(u_int);
-
-void	fa526_icache_sync_all	(void);
-void	fa526_icache_sync_range(vaddr_t, vsize_t);
-void	fa526_dcache_wbinv_all	(void);
-void	fa526_dcache_wbinv_range(vaddr_t, vsize_t);
-void	fa526_dcache_inv_range	(vaddr_t, vsize_t);
-void	fa526_dcache_wb_range	(vaddr_t, vsize_t);
-void	fa526_idcache_wbinv_all(void);
-void	fa526_idcache_wbinv_range(vaddr_t, vsize_t);
-#endif
-
-#ifdef CPU_SA110
-void	sa110_setup		(char *);
-void	sa110_context_switch	(u_int);
-#endif	/* CPU_SA110 */
-
-#if defined(CPU_SA1100) || defined(CPU_SA1110)
-void	sa11x0_drain_readbuf	(void);
-
-void	sa11x0_context_switch	(u_int);
-void	sa11x0_cpu_sleep	(int);
-
-void	sa11x0_setup		(char *);
-#endif
-
-#if defined(CPU_SA110) || defined(CPU_SA1100) || defined(CPU_SA1110)
-void	sa1_setttb		(u_int, bool);
-
-void	sa1_tlb_flushID_SE	(vaddr_t);
-
-void	sa1_cache_flushID	(void);
-void	sa1_cache_flushI	(void);
-void	sa1_cache_flushD	(void);
-void	sa1_cache_flushD_SE	(vaddr_t);
-
-void	sa1_cache_cleanID	(void);
-void	sa1_cache_cleanD	(void);
-void	sa1_cache_cleanD_E	(u_int);
-
-void	sa1_cache_purgeID	(void);
-void	sa1_cache_purgeID_E	(u_int);
-void	sa1_cache_purgeD	(void);
-void	sa1_cache_purgeD_E	(u_int);
-
-void	sa1_cache_syncI		(void);
-void	sa1_cache_cleanID_rng	(vaddr_t, vsize_t);
-void	sa1_cache_cleanD_rng	(vaddr_t, vsize_t);
-void	sa1_cache_purgeID_rng	(vaddr_t, vsize_t);
-void	sa1_cache_purgeD_rng	(vaddr_t, vsize_t);
-void	sa1_cache_syncI_rng	(vaddr_t, vsize_t);
-
-#endif
-
-#ifdef CPU_ARM9
-void	arm9_setttb		(u_int, bool);
-
-void	arm9_tlb_flushID_SE	(vaddr_t);
-
-void	arm9_icache_sync_all	(void);
-void	arm9_icache_sync_range	(vaddr_t, vsize_t);
-
-void	arm9_dcache_wbinv_all	(void);
-void	arm9_dcache_wbinv_range (vaddr_t, vsize_t);
-void	arm9_dcache_inv_range	(vaddr_t, vsize_t);
-void	arm9_dcache_wb_range	(vaddr_t, vsize_t);
-
-void	arm9_idcache_wbinv_all	(void);
-void	arm9_idcache_wbinv_range (vaddr_t, vsize_t);
-
-void	arm9_context_switch	(u_int);
-
-void	arm9_setup		(char *);
-
-extern unsigned arm9_dcache_sets_max;
-extern unsigned arm9_dcache_sets_inc;
-extern unsigned arm9_dcache_index_max;
-extern unsigned arm9_dcache_index_inc;
-#endif
-
-#if defined(CPU_ARM9E) || defined(CPU_ARM10) || defined(CPU_SHEEVA)
-void	arm10_tlb_flushID_SE	(vaddr_t);
-void	arm10_tlb_flushI_SE	(vaddr_t);
-
-void	arm10_context_switch	(u_int);
-
-void	arm10_setup		(char *);
-#endif
-
-#if defined(CPU_ARM9E) || defined (CPU_ARM10) || defined(CPU_SHEEVA)
-void	armv5_ec_setttb			(u_int, bool);
-
-void	armv5_ec_icache_sync_all	(void);
-void	armv5_ec_icache_sync_range	(vaddr_t, vsize_t);
-
-void	armv5_ec_dcache_wbinv_all	(void);
-void	armv5_ec_dcache_wbinv_range	(vaddr_t, vsize_t);
-void	armv5_ec_dcache_inv_range	(vaddr_t, vsize_t);
-void	armv5_ec_dcache_wb_range	(vaddr_t, vsize_t);
-
-void	armv5_ec_idcache_wbinv_all	(void);
-void	armv5_ec_idcache_wbinv_range	(vaddr_t, vsize_t);
-#endif
-
-#if defined (CPU_ARM10) || defined (CPU_ARM11MPCORE)
-void	armv5_setttb		(u_int, bool);
-
-void	armv5_icache_sync_all	(void);
-void	armv5_icache_sync_range	(vaddr_t, vsize_t);
-
-void	armv5_dcache_wbinv_all	(void);
-void	armv5_dcache_wbinv_range (vaddr_t, vsize_t);
-void	armv5_dcache_inv_range	(vaddr_t, vsize_t);
-void	armv5_dcache_wb_range	(vaddr_t, vsize_t);
-
-void	armv5_idcache_wbinv_all	(void);
-void	armv5_idcache_wbinv_range (vaddr_t, vsize_t);
-
-extern unsigned armv5_dcache_sets_max;
-extern unsigned armv5_dcache_sets_inc;
-extern unsigned armv5_dcache_index_max;
-extern unsigned armv5_dcache_index_inc;
-#endif
-
-#if defined(CPU_ARM11MPCORE)
-void	arm11mpcore_setup		(char *);
-#endif
-
-#if defined(CPU_ARM11)
-#if defined(ARM_MMU_EXTENDED)
-void	arm11_setttb		(u_int, tlb_asid_t);
-void	arm11_context_switch	(u_int, tlb_asid_t);
-#else
-void	arm11_setttb		(u_int, bool);
-void	arm11_context_switch	(u_int);
-#endif
-
-void	arm11_cpu_sleep		(int);
-void	arm11_setup		(char *string);
-void	arm11_tlb_flushID	(void);
-void	arm11_tlb_flushI	(void);
-void	arm11_tlb_flushD	(void);
-void	arm11_tlb_flushID_SE	(vaddr_t);
-void	arm11_tlb_flushI_SE	(vaddr_t);
-void	arm11_tlb_flushD_SE	(vaddr_t);
-
-void	armv11_dcache_wbinv_all (void);
-void	armv11_idcache_wbinv_all(void);
-
-void	arm11_drain_writebuf	(void);
-void	arm11_sleep		(int);
-
-void	armv6_setttb		(u_int, bool);
-
-void	armv6_icache_sync_all	(void);
-void	armv6_icache_sync_range	(vaddr_t, vsize_t);
-
-void	armv6_dcache_wbinv_all	(void);
-void	armv6_dcache_wbinv_range (vaddr_t, vsize_t);
-void	armv6_dcache_inv_range	(vaddr_t, vsize_t);
-void	armv6_dcache_wb_range	(vaddr_t, vsize_t);
-
-void	armv6_idcache_wbinv_all	(void);
-void	armv6_idcache_wbinv_range (vaddr_t, vsize_t);
-#endif
-
-#if defined(CPU_ARMV7)
-#if defined(ARM_MMU_EXTENDED)
-void	armv7_setttb(u_int, tlb_asid_t);
-void	armv7_context_switch(u_int, tlb_asid_t);
-#else
-void	armv7_setttb(u_int, bool);
-void	armv7_context_switch(u_int);
-#endif
-
-void	armv7_icache_sync_range(vaddr_t, vsize_t);
-void	armv7_icache_sync_all(void);
-
-void	armv7_dcache_inv_range(vaddr_t, vsize_t);
-void	armv7_dcache_wb_range(vaddr_t, vsize_t);
-void	armv7_dcache_wbinv_range(vaddr_t, vsize_t);
-void 	armv7_dcache_wbinv_all(void);
-
-void	armv7_idcache_wbinv_range(vaddr_t, vsize_t);
-void	armv7_idcache_wbinv_all(void);
-
-void	armv7_tlb_flushID(void);
-void	armv7_tlb_flushI(void);
-void	armv7_tlb_flushD(void);
-
-void	armv7_tlb_flushID_SE(vaddr_t);
-void	armv7_tlb_flushI_SE(vaddr_t);
-void	armv7_tlb_flushD_SE(vaddr_t);
-
-void	armv7_cpu_sleep(int);
-void	armv7_drain_writebuf(void);
-void	armv7_setup(char *string);
-#endif /* CPU_ARMV7 */
-
-#if defined(CPU_PJ4B)
-#if defined(ARM_MMU_EXTENDED)
-void	pj4b_setttb(u_int, tlb_asid_t);
-void	pj4b_context_switch(u_int, tlb_asid_t);
-#else
-void	pj4b_setttb(u_int, bool);
-void	pj4b_context_switch(u_int);
-#endif
-void	pj4b_tlb_flushID(void);
-void	pj4b_tlb_flushID_SE(vaddr_t);
-
-void	pj4b_icache_sync_range(vm_offset_t, vm_size_t);
-void	pj4b_idcache_wbinv_range(vm_offset_t, vm_size_t);
-void	pj4b_dcache_wbinv_range(vm_offset_t, vm_size_t);
-void	pj4b_dcache_inv_range(vm_offset_t, vm_size_t);
-void	pj4b_dcache_wb_range(vm_offset_t, vm_size_t);
-
-void	pj4b_drain_writebuf(void);
-void	pj4b_drain_readbuf(void);
-void	pj4b_flush_brnchtgt_all(void);
-void	pj4b_flush_brnchtgt_va(u_int);
-void	pj4b_sleep(int);
-
-void	pj4bv7_setup(char *string);
-void	pj4b_config(void);
-
-#endif /* CPU_PJ4B */
-
-#if defined(CPU_ARM1136) || defined(CPU_ARM1176)
-void	arm11x6_idcache_wbinv_all	(void);
-void	arm11x6_dcache_wbinv_all	(void);
-void	arm11x6_icache_sync_all		(void);
-void	arm11x6_flush_prefetchbuf	(void);
-void	arm11x6_icache_sync_range	(vaddr_t, vsize_t);
-void	arm11x6_idcache_wbinv_range	(vaddr_t, vsize_t);
-void	arm11x6_setup			(char *string);
-void	arm11x6_sleep			(int);	/* no ref. for errata */
-#endif
-#if defined(CPU_ARM1136)
-void	arm1136_sleep_rev0		(int);	/* for errata 336501 */
-#endif
-
-
-#if defined(CPU_ARM9) || defined(CPU_ARM9E) || defined(CPU_ARM10) || \
-    defined(CPU_SA110) || defined(CPU_SA1100) || defined(CPU_SA1110) || \
-    defined(CPU_FA526) || \
-    defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
-    defined(__CPU_XSCALE_PXA2XX) || defined(CPU_XSCALE_IXP425) || \
-    defined(CPU_SHEEVA)
-
-void	armv4_tlb_flushID	(void);
-void	armv4_tlb_flushI	(void);
-void	armv4_tlb_flushD	(void);
-void	armv4_tlb_flushD_SE	(vaddr_t);
-
-void	armv4_drain_writebuf	(void);
-#endif
-
-#if defined(CPU_IXP12X0)
-void	ixp12x0_drain_readbuf	(void);
-void	ixp12x0_context_switch	(u_int);
-void	ixp12x0_setup		(char *);
-#endif
-
-#if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
-    defined(__CPU_XSCALE_PXA2XX) || defined(CPU_XSCALE_IXP425)
-
-void	xscale_cpwait		(void);
-#define	cpu_cpwait()		cpufuncs.cf_cpwait()
-
-void	xscale_cpu_sleep	(int);
-
-u_int	xscale_control		(u_int, u_int);
-
-void	xscale_setttb		(u_int, bool);
-
-void	xscale_tlb_flushID_SE	(vaddr_t);
-
-void	xscale_cache_flushID	(void);
-void	xscale_cache_flushI	(void);
-void	xscale_cache_flushD	(void);
-void	xscale_cache_flushD_SE	(vaddr_t);
-
-void	xscale_cache_cleanID	(void);
-void	xscale_cache_cleanD	(void);
-void	xscale_cache_cleanD_E	(u_int);
-
-void	xscale_cache_clean_minidata (void);
-
-void	xscale_cache_purgeID	(void);
-void	xscale_cache_purgeID_E	(u_int);
-void	xscale_cache_purgeD	(void);
-void	xscale_cache_purgeD_E	(u_int);
-
-void	xscale_cache_syncI	(void);
-void	xscale_cache_cleanID_rng (vaddr_t, vsize_t);
-void	xscale_cache_cleanD_rng	(vaddr_t, vsize_t);
-void	xscale_cache_purgeID_rng (vaddr_t, vsize_t);
-void	xscale_cache_purgeD_rng	(vaddr_t, vsize_t);
-void	xscale_cache_syncI_rng	(vaddr_t, vsize_t);
-void	xscale_cache_flushD_rng	(vaddr_t, vsize_t);
-
-void	xscale_context_switch	(u_int);
-
-void	xscale_setup		(char *);
-#endif	/* CPU_XSCALE_80200 || CPU_XSCALE_80321 || __CPU_XSCALE_PXA2XX || CPU_XSCALE_IXP425 */
-
-#if defined(CPU_SHEEVA)
-void	sheeva_dcache_wbinv_range (vaddr_t, vsize_t);
-void	sheeva_dcache_inv_range	(vaddr_t, vsize_t);
-void	sheeva_dcache_wb_range	(vaddr_t, vsize_t);
-void	sheeva_idcache_wbinv_range (vaddr_t, vsize_t);
-void	sheeva_setup(char *);
-void	sheeva_cpu_sleep(int);
-#endif
-
 #define tlb_flush	cpu_tlb_flushID
 #define setttb		cpu_setttb
 #define drain_writebuf	cpu_drain_writebuf

Added files:

Index: src/sys/arch/arm/include/cpufunc_proto.h
diff -u /dev/null src/sys/arch/arm/include/cpufunc_proto.h:1.1
--- /dev/null	Mon Apr 14 20:50:47 2014
+++ src/sys/arch/arm/include/cpufunc_proto.h	Mon Apr 14 20:50:47 2014
@@ -0,0 +1,443 @@
+/*	cpufunc.h,v 1.40.22.4 2007/11/08 10:59:33 matt Exp	*/
+
+/*
+ * Copyright (c) 1997 Mark Brinicombe.
+ * Copyright (c) 1997 Causality Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ *    must display the following acknowledgement:
+ *	This product includes software developed by Causality Limited.
+ * 4. The name of Causality Limited may not be used to endorse or promote
+ *    products derived from this software without specific prior written
+ *    permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS
+ * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * RiscBSD kernel project
+ *
+ * cpufunc.h
+ *
+ * Prototypes for cpu, mmu and tlb related functions.
+ */
+
+#ifndef _ARM_CPUFUNC_PROTO_H_
+#define _ARM_CPUFUNC_PROTO_H_
+
+#ifdef _KERNEL
+
+#include <sys/types.h>
+#include <arm/armreg.h>
+#include <arm/cpuconf.h>
+
+#if defined(CPU_ARM2) || defined(CPU_ARM250) || defined(CPU_ARM3)
+void	arm3_cache_flush	(void);
+#endif	/* CPU_ARM2 || CPU_ARM250 || CPU_ARM3 */
+
+#ifdef CPU_ARM2
+u_int	arm2_id			(void);
+#endif /* CPU_ARM2 */
+
+#ifdef CPU_ARM250
+u_int	arm250_id		(void);
+#endif
+
+#ifdef CPU_ARM3
+u_int	arm3_control		(u_int, u_int);
+#endif	/* CPU_ARM3 */
+
+#if defined(CPU_ARM6) || defined(CPU_ARM7)
+void	arm67_setttb		(u_int, bool);
+void	arm67_tlb_flush		(void);
+void	arm67_tlb_purge		(vaddr_t);
+void	arm67_cache_flush	(void);
+void	arm67_context_switch	(u_int);
+#endif	/* CPU_ARM6 || CPU_ARM7 */
+
+#ifdef CPU_ARM6
+void	arm6_setup		(char *);
+#endif	/* CPU_ARM6 */
+
+#ifdef CPU_ARM7
+void	arm7_setup		(char *);
+#endif	/* CPU_ARM7 */
+
+#ifdef CPU_ARM7TDMI
+int	arm7_dataabt_fixup	(void *);
+void	arm7tdmi_setup		(char *);
+void	arm7tdmi_setttb		(u_int, bool);
+void	arm7tdmi_tlb_flushID	(void);
+void	arm7tdmi_tlb_flushID_SE	(vaddr_t);
+void	arm7tdmi_cache_flushID	(void);
+void	arm7tdmi_context_switch	(u_int);
+#endif /* CPU_ARM7TDMI */
+
+#ifdef CPU_ARM8
+void	arm8_setttb		(u_int, bool);
+void	arm8_tlb_flushID	(void);
+void	arm8_tlb_flushID_SE	(vaddr_t);
+void	arm8_cache_flushID	(void);
+void	arm8_cache_flushID_E	(u_int);
+void	arm8_cache_cleanID	(void);
+void	arm8_cache_cleanID_E	(u_int);
+void	arm8_cache_purgeID	(void);
+void	arm8_cache_purgeID_E	(u_int entry);
+
+void	arm8_cache_syncI	(void);
+void	arm8_cache_cleanID_rng	(vaddr_t, vsize_t);
+void	arm8_cache_cleanD_rng	(vaddr_t, vsize_t);
+void	arm8_cache_purgeID_rng	(vaddr_t, vsize_t);
+void	arm8_cache_purgeD_rng	(vaddr_t, vsize_t);
+void	arm8_cache_syncI_rng	(vaddr_t, vsize_t);
+
+void	arm8_context_switch	(u_int);
+
+void	arm8_setup		(char *);
+
+u_int	arm8_clock_config	(u_int, u_int);
+#endif
+
+#ifdef CPU_FA526
+void	fa526_setup		(char *);
+void	fa526_setttb		(u_int, bool);
+void	fa526_context_switch	(u_int);
+void	fa526_cpu_sleep		(int);
+void	fa526_tlb_flushI_SE	(vaddr_t);
+void	fa526_tlb_flushID_SE	(vaddr_t);
+void	fa526_flush_prefetchbuf	(void);
+void	fa526_flush_brnchtgt_E	(u_int);
+
+void	fa526_icache_sync_all	(void);
+void	fa526_icache_sync_range(vaddr_t, vsize_t);
+void	fa526_dcache_wbinv_all	(void);
+void	fa526_dcache_wbinv_range(vaddr_t, vsize_t);
+void	fa526_dcache_inv_range	(vaddr_t, vsize_t);
+void	fa526_dcache_wb_range	(vaddr_t, vsize_t);
+void	fa526_idcache_wbinv_all(void);
+void	fa526_idcache_wbinv_range(vaddr_t, vsize_t);
+#endif
+
+#ifdef CPU_SA110
+void	sa110_setup		(char *);
+void	sa110_context_switch	(u_int);
+#endif	/* CPU_SA110 */
+
+#if defined(CPU_SA1100) || defined(CPU_SA1110)
+void	sa11x0_drain_readbuf	(void);
+
+void	sa11x0_context_switch	(u_int);
+void	sa11x0_cpu_sleep	(int);
+
+void	sa11x0_setup		(char *);
+#endif
+
+#if defined(CPU_SA110) || defined(CPU_SA1100) || defined(CPU_SA1110)
+void	sa1_setttb		(u_int, bool);
+
+void	sa1_tlb_flushID_SE	(vaddr_t);
+
+void	sa1_cache_flushID	(void);
+void	sa1_cache_flushI	(void);
+void	sa1_cache_flushD	(void);
+void	sa1_cache_flushD_SE	(vaddr_t);
+
+void	sa1_cache_cleanID	(void);
+void	sa1_cache_cleanD	(void);
+void	sa1_cache_cleanD_E	(u_int);
+
+void	sa1_cache_purgeID	(void);
+void	sa1_cache_purgeID_E	(u_int);
+void	sa1_cache_purgeD	(void);
+void	sa1_cache_purgeD_E	(u_int);
+
+void	sa1_cache_syncI		(void);
+void	sa1_cache_cleanID_rng	(vaddr_t, vsize_t);
+void	sa1_cache_cleanD_rng	(vaddr_t, vsize_t);
+void	sa1_cache_purgeID_rng	(vaddr_t, vsize_t);
+void	sa1_cache_purgeD_rng	(vaddr_t, vsize_t);
+void	sa1_cache_syncI_rng	(vaddr_t, vsize_t);
+
+#endif
+
+#ifdef CPU_ARM9
+void	arm9_setttb		(u_int, bool);
+
+void	arm9_tlb_flushID_SE	(vaddr_t);
+
+void	arm9_icache_sync_all	(void);
+void	arm9_icache_sync_range	(vaddr_t, vsize_t);
+
+void	arm9_dcache_wbinv_all	(void);
+void	arm9_dcache_wbinv_range (vaddr_t, vsize_t);
+void	arm9_dcache_inv_range	(vaddr_t, vsize_t);
+void	arm9_dcache_wb_range	(vaddr_t, vsize_t);
+
+void	arm9_idcache_wbinv_all	(void);
+void	arm9_idcache_wbinv_range (vaddr_t, vsize_t);
+
+void	arm9_context_switch	(u_int);
+
+void	arm9_setup		(char *);
+
+extern unsigned arm9_dcache_sets_max;
+extern unsigned arm9_dcache_sets_inc;
+extern unsigned arm9_dcache_index_max;
+extern unsigned arm9_dcache_index_inc;
+#endif
+
+#if defined(CPU_ARM9E) || defined(CPU_ARM10) || defined(CPU_SHEEVA)
+void	arm10_tlb_flushID_SE	(vaddr_t);
+void	arm10_tlb_flushI_SE	(vaddr_t);
+
+void	arm10_context_switch	(u_int);
+
+void	arm10_setup		(char *);
+#endif
+
+#if defined(CPU_ARM9E) || defined (CPU_ARM10) || defined(CPU_SHEEVA)
+void	armv5_ec_setttb			(u_int, bool);
+
+void	armv5_ec_icache_sync_all	(void);
+void	armv5_ec_icache_sync_range	(vaddr_t, vsize_t);
+
+void	armv5_ec_dcache_wbinv_all	(void);
+void	armv5_ec_dcache_wbinv_range	(vaddr_t, vsize_t);
+void	armv5_ec_dcache_inv_range	(vaddr_t, vsize_t);
+void	armv5_ec_dcache_wb_range	(vaddr_t, vsize_t);
+
+void	armv5_ec_idcache_wbinv_all	(void);
+void	armv5_ec_idcache_wbinv_range	(vaddr_t, vsize_t);
+#endif
+
+#if defined (CPU_ARM10) || defined (CPU_ARM11MPCORE)
+void	armv5_setttb		(u_int, bool);
+
+void	armv5_icache_sync_all	(void);
+void	armv5_icache_sync_range	(vaddr_t, vsize_t);
+
+void	armv5_dcache_wbinv_all	(void);
+void	armv5_dcache_wbinv_range (vaddr_t, vsize_t);
+void	armv5_dcache_inv_range	(vaddr_t, vsize_t);
+void	armv5_dcache_wb_range	(vaddr_t, vsize_t);
+
+void	armv5_idcache_wbinv_all	(void);
+void	armv5_idcache_wbinv_range (vaddr_t, vsize_t);
+
+extern unsigned armv5_dcache_sets_max;
+extern unsigned armv5_dcache_sets_inc;
+extern unsigned armv5_dcache_index_max;
+extern unsigned armv5_dcache_index_inc;
+#endif
+
+#if defined(CPU_ARM11MPCORE)
+void	arm11mpcore_setup		(char *);
+#endif
+
+#if defined(CPU_ARM11)
+#if defined(ARM_MMU_EXTENDED)
+void	arm11_setttb		(u_int, tlb_asid_t);
+void	arm11_context_switch	(u_int, tlb_asid_t);
+#else
+void	arm11_setttb		(u_int, bool);
+void	arm11_context_switch	(u_int);
+#endif
+
+void	arm11_cpu_sleep		(int);
+void	arm11_setup		(char *string);
+void	arm11_tlb_flushID	(void);
+void	arm11_tlb_flushI	(void);
+void	arm11_tlb_flushD	(void);
+void	arm11_tlb_flushID_SE	(vaddr_t);
+void	arm11_tlb_flushI_SE	(vaddr_t);
+void	arm11_tlb_flushD_SE	(vaddr_t);
+
+void	armv11_dcache_wbinv_all (void);
+void	armv11_idcache_wbinv_all(void);
+
+void	arm11_drain_writebuf	(void);
+void	arm11_sleep		(int);
+
+void	armv6_setttb		(u_int, bool);
+
+void	armv6_icache_sync_all	(void);
+void	armv6_icache_sync_range	(vaddr_t, vsize_t);
+
+void	armv6_dcache_wbinv_all	(void);
+void	armv6_dcache_wbinv_range (vaddr_t, vsize_t);
+void	armv6_dcache_inv_range	(vaddr_t, vsize_t);
+void	armv6_dcache_wb_range	(vaddr_t, vsize_t);
+
+void	armv6_idcache_wbinv_all	(void);
+void	armv6_idcache_wbinv_range (vaddr_t, vsize_t);
+#endif
+
+#if defined(CPU_ARMV7)
+#if defined(ARM_MMU_EXTENDED)
+void	armv7_setttb(u_int, tlb_asid_t);
+void	armv7_context_switch(u_int, tlb_asid_t);
+#else
+void	armv7_setttb(u_int, bool);
+void	armv7_context_switch(u_int);
+#endif
+
+void	armv7_icache_sync_range(vaddr_t, vsize_t);
+void	armv7_icache_sync_all(void);
+
+void	armv7_dcache_inv_range(vaddr_t, vsize_t);
+void	armv7_dcache_wb_range(vaddr_t, vsize_t);
+void	armv7_dcache_wbinv_range(vaddr_t, vsize_t);
+void 	armv7_dcache_wbinv_all(void);
+
+void	armv7_idcache_wbinv_range(vaddr_t, vsize_t);
+void	armv7_idcache_wbinv_all(void);
+
+void	armv7_tlb_flushID(void);
+void	armv7_tlb_flushI(void);
+void	armv7_tlb_flushD(void);
+
+void	armv7_tlb_flushID_SE(vaddr_t);
+void	armv7_tlb_flushI_SE(vaddr_t);
+void	armv7_tlb_flushD_SE(vaddr_t);
+
+void	armv7_cpu_sleep(int);
+void	armv7_drain_writebuf(void);
+void	armv7_setup(char *string);
+#endif /* CPU_ARMV7 */
+
+#if defined(CPU_PJ4B)
+#if defined(ARM_MMU_EXTENDED)
+void	pj4b_setttb(u_int, tlb_asid_t);
+void	pj4b_context_switch(u_int, tlb_asid_t);
+#else
+void	pj4b_setttb(u_int, bool);
+void	pj4b_context_switch(u_int);
+#endif
+void	pj4b_tlb_flushID(void);
+void	pj4b_tlb_flushID_SE(vaddr_t);
+
+void	pj4b_icache_sync_range(vm_offset_t, vm_size_t);
+void	pj4b_idcache_wbinv_range(vm_offset_t, vm_size_t);
+void	pj4b_dcache_wbinv_range(vm_offset_t, vm_size_t);
+void	pj4b_dcache_inv_range(vm_offset_t, vm_size_t);
+void	pj4b_dcache_wb_range(vm_offset_t, vm_size_t);
+
+void	pj4b_drain_writebuf(void);
+void	pj4b_drain_readbuf(void);
+void	pj4b_flush_brnchtgt_all(void);
+void	pj4b_flush_brnchtgt_va(u_int);
+void	pj4b_sleep(int);
+
+void	pj4bv7_setup(char *string);
+void	pj4b_config(void);
+
+#endif /* CPU_PJ4B */
+
+#if defined(CPU_ARM1136) || defined(CPU_ARM1176)
+void	arm11x6_idcache_wbinv_all	(void);
+void	arm11x6_dcache_wbinv_all	(void);
+void	arm11x6_icache_sync_all		(void);
+void	arm11x6_flush_prefetchbuf	(void);
+void	arm11x6_icache_sync_range	(vaddr_t, vsize_t);
+void	arm11x6_idcache_wbinv_range	(vaddr_t, vsize_t);
+void	arm11x6_setup			(char *string);
+void	arm11x6_sleep			(int);	/* no ref. for errata */
+#endif
+#if defined(CPU_ARM1136)
+void	arm1136_sleep_rev0		(int);	/* for errata 336501 */
+#endif
+
+
+#if defined(CPU_ARM9) || defined(CPU_ARM9E) || defined(CPU_ARM10) || \
+    defined(CPU_SA110) || defined(CPU_SA1100) || defined(CPU_SA1110) || \
+    defined(CPU_FA526) || defined(CPU_XSCALE) || defined(CPU_SHEEVA)
+
+void	armv4_tlb_flushID	(void);
+void	armv4_tlb_flushI	(void);
+void	armv4_tlb_flushD	(void);
+void	armv4_tlb_flushD_SE	(vaddr_t);
+
+void	armv4_drain_writebuf	(void);
+#endif
+
+#if defined(CPU_IXP12X0)
+void	ixp12x0_drain_readbuf	(void);
+void	ixp12x0_context_switch	(u_int);
+void	ixp12x0_setup		(char *);
+#endif
+
+#if defined(CPU_XSCALE)
+void	xscale_cpwait		(void);
+#define	cpu_cpwait()		cpufuncs.cf_cpwait()
+
+void	xscale_cpu_sleep	(int);
+
+u_int	xscale_control		(u_int, u_int);
+
+void	xscale_setttb		(u_int, bool);
+
+void	xscale_tlb_flushID_SE	(vaddr_t);
+
+void	xscale_cache_flushID	(void);
+void	xscale_cache_flushI	(void);
+void	xscale_cache_flushD	(void);
+void	xscale_cache_flushD_SE	(vaddr_t);
+
+void	xscale_cache_cleanID	(void);
+void	xscale_cache_cleanD	(void);
+void	xscale_cache_cleanD_E	(u_int);
+
+void	xscale_cache_clean_minidata (void);
+
+void	xscale_cache_purgeID	(void);
+void	xscale_cache_purgeID_E	(u_int);
+void	xscale_cache_purgeD	(void);
+void	xscale_cache_purgeD_E	(u_int);
+
+void	xscale_cache_syncI	(void);
+void	xscale_cache_cleanID_rng (vaddr_t, vsize_t);
+void	xscale_cache_cleanD_rng	(vaddr_t, vsize_t);
+void	xscale_cache_purgeID_rng (vaddr_t, vsize_t);
+void	xscale_cache_purgeD_rng	(vaddr_t, vsize_t);
+void	xscale_cache_syncI_rng	(vaddr_t, vsize_t);
+void	xscale_cache_flushD_rng	(vaddr_t, vsize_t);
+
+void	xscale_context_switch	(u_int);
+
+void	xscale_setup		(char *);
+#endif	/* CPU_XSCALE */
+
+#if defined(CPU_SHEEVA)
+void	sheeva_dcache_wbinv_range (vaddr_t, vsize_t);
+void	sheeva_dcache_inv_range	(vaddr_t, vsize_t);
+void	sheeva_dcache_wb_range	(vaddr_t, vsize_t);
+void	sheeva_idcache_wbinv_range (vaddr_t, vsize_t);
+void	sheeva_setup(char *);
+void	sheeva_cpu_sleep(int);
+
+void	sheeva_sdcache_inv_range(vaddr_t, paddr_t, vsize_t);
+void	sheeva_sdcache_wb_range(vaddr_t, paddr_t, vsize_t);
+void	sheeva_sdcache_wbinv_range(vaddr_t, paddr_t, vsize_t);
+void	sheeva_sdcache_wbinv_all(void);
+#endif
+
+#endif /* _KERNEL */
+
+#endif	/* _ARM_CPUFUNC_PROTO_H_ */

Reply via email to