Module Name:    src
Committed By:   jmcneill
Date:           Thu Dec 11 23:35:11 UTC 2014

Modified Files:
        src/sys/arch/arm/allwinner: awin_reg.h
        src/sys/arch/evbarm/awin: awin_machdep.c awin_start.S platform.h

Log Message:
A80: Startup 3 additional Cortex-A7 cores in cluster 0.


To generate a diff of this commit:
cvs rdiff -u -r1.72 -r1.73 src/sys/arch/arm/allwinner/awin_reg.h
cvs rdiff -u -r1.35 -r1.36 src/sys/arch/evbarm/awin/awin_machdep.c
cvs rdiff -u -r1.7 -r1.8 src/sys/arch/evbarm/awin/awin_start.S
cvs rdiff -u -r1.4 -r1.5 src/sys/arch/evbarm/awin/platform.h

Please note that diffs are not public domain; they are subject to the
copyright notices on the relevant files.

Modified files:

Index: src/sys/arch/arm/allwinner/awin_reg.h
diff -u src/sys/arch/arm/allwinner/awin_reg.h:1.72 src/sys/arch/arm/allwinner/awin_reg.h:1.73
--- src/sys/arch/arm/allwinner/awin_reg.h:1.72	Mon Dec  8 10:48:22 2014
+++ src/sys/arch/arm/allwinner/awin_reg.h	Thu Dec 11 23:35:11 2014
@@ -54,6 +54,8 @@
 #define AWIN_CORE_PBASE			0x01C00000
 #if defined(ALLWINNER_A80)
 #define AWIN_CORE_SIZE			0x06400000	/* XXX */
+#define AWIN_A80_RCPUCFG_PBASE		0x01700000
+#define AWIN_A80_RCPUCFG_SIZE		0x00100000
 #define AWIN_A80_CORE2_PBASE		0x00800000
 #define AWIN_A80_CORE2_SIZE		0x00100000
 #define AWIN_A80_USB_PBASE		0x00a00000
@@ -2821,9 +2823,26 @@ struct awin_a31_dma_desc {
 #define AWIN_A80_RPRCM_CIR_CLK_REG		0x0054
 #define AWIN_A80_RPRCM_APB0_RST_REG		0x00b0
 
+#define AWIN_A80_RPRCM_CLUSTER0_RST_REG		0x0004
+
+#define AWIN_A80_RPRCM_CLUSTER0_RST_REG		0x0004
+#define AWIN_A80_RPRCM_CLUSTER1_RST_REG		0x0008
+
+#define AWIN_A80_RPRCM_CLUSTER0_PWR_GATING_REG	0x0100
+#define AWIN_A80_RPRCM_CLUSTER1_PWR_GATING_REG	0x0104
+
+#define AWIN_A80_RPRCM_CLUSTER0_PRW_CLAMP_REG	0x0140
+
+#define AWIN_A80_RPRCM_CLUSTER0_PRW_CLAMP_STATUS_REG	0x0064
+
+#define AWIN_A80_RPRCM_PRIVATE_REG		0x0164
+
 #define AWIN_A80_RPRCM_APB0_GATING_CIR		__BIT(1)
 #define AWIN_A80_RPRCM_APB0_RST_CIR		__BIT(1)
 
+#define AWIN_A80_RCPUCFG_CLUSTER0_RST_REG	0x0080
+#define AWIN_A80_RCPUCFG_CLUSTER1_RST_REG	0x0084
+
 #define AWIN_A80_RSB_CMD_REG			0x002c
 #define AWIN_A80_RSB_DAR_REG			0x0030
 

Index: src/sys/arch/evbarm/awin/awin_machdep.c
diff -u src/sys/arch/evbarm/awin/awin_machdep.c:1.35 src/sys/arch/evbarm/awin/awin_machdep.c:1.36
--- src/sys/arch/evbarm/awin/awin_machdep.c:1.35	Wed Dec 10 17:45:53 2014
+++ src/sys/arch/evbarm/awin/awin_machdep.c	Thu Dec 11 23:35:11 2014
@@ -1,4 +1,4 @@
-/*	$NetBSD: awin_machdep.c,v 1.35 2014/12/10 17:45:53 jmcneill Exp $ */
+/*	$NetBSD: awin_machdep.c,v 1.36 2014/12/11 23:35:11 jmcneill Exp $ */
 
 /*
  * Machine dependent functions for kernel setup for TI OSK5912 board.
@@ -125,7 +125,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: awin_machdep.c,v 1.35 2014/12/10 17:45:53 jmcneill Exp $");
+__KERNEL_RCSID(0, "$NetBSD: awin_machdep.c,v 1.36 2014/12/11 23:35:11 jmcneill Exp $");
 
 #include "opt_machdep.h"
 #include "opt_ddb.h"
@@ -318,6 +318,16 @@ static const struct pmap_devmap devmap[]
 		.pd_prot = VM_PROT_READ|VM_PROT_WRITE,
 		.pd_cache = PTE_NOCACHE
 	},
+	{
+		/*
+		 * A80 CPUCFG
+		 */
+		.pd_va = _A(AWIN_A80_RCPUCFG_VBASE),
+		.pd_pa = _A(AWIN_A80_RCPUCFG_PBASE),
+		.pd_size = _S(AWIN_A80_RCPUCFG_SIZE),
+		.pd_prot = VM_PROT_READ|VM_PROT_WRITE,
+		.pd_cache = PTE_NOCACHE
+	},
 #endif
 	{
 		/*

Index: src/sys/arch/evbarm/awin/awin_start.S
diff -u src/sys/arch/evbarm/awin/awin_start.S:1.7 src/sys/arch/evbarm/awin/awin_start.S:1.8
--- src/sys/arch/evbarm/awin/awin_start.S:1.7	Sun Dec  7 18:32:13 2014
+++ src/sys/arch/evbarm/awin/awin_start.S	Thu Dec 11 23:35:11 2014
@@ -41,7 +41,7 @@
 #include <arm/allwinner/awin_reg.h>
 #include <evbarm/awin/platform.h>  
 
-RCSID("$NetBSD: awin_start.S,v 1.7 2014/12/07 18:32:13 jmcneill Exp $")
+RCSID("$NetBSD: awin_start.S,v 1.8 2014/12/11 23:35:11 jmcneill Exp $")
 
 #if defined(VERBOSE_INIT_ARM)
 #define	XPUTC(n)	mov r0, n; bl xputc
@@ -151,7 +151,7 @@ _C_LABEL(awin_start):
 	// Make sure the cache is flushed out to RAM for the other CPUs
 	bl	_C_LABEL(armv7_dcache_wbinv_all)
 
-#if defined(ALLWINNER_A20) + defined(ALLWINNER_A31) + defined(ALLWINNER_A80) > 1
+#if defined(ALLWINNER_A20) + defined(ALLWINNER_A31) > 1
 	// Read SoC ID
 	movw	r5, #:lower16:(AWIN_CORE_PBASE+AWIN_SRAM_OFFSET)
 	movt	r5, #:upper16:(AWIN_CORE_PBASE+AWIN_SRAM_OFFSET)
@@ -168,11 +168,10 @@ _C_LABEL(awin_start):
 	rev	r1, r1
 #endif
 	lsr	r1, r1, #16
-#endif /* ALLWINNER_A20 + ALLWINNER_A31 + ALLWINNER_A80 > 1 */
 
 	// MP init based on SoC ID
 #if defined(ALLWINNER_A20)
-# if defined(ALLWINNER_A31) || defined(ALLWINNER_A80)
+# if defined(ALLWINNER_A31)
 	movw	r0, #AWIN_SRAM_VER_KEY_A20
 	cmp	r1, r0
 	bleq	a20_mpinit
@@ -181,7 +180,7 @@ _C_LABEL(awin_start):
 # endif
 #endif
 #if defined(ALLWINNER_A31)
-# if defined(ALLWINNER_A20) || defined(ALLWINNER_A80)
+# if defined(ALLWINNER_A20)
 	movw	r0, #AWIN_SRAM_VER_KEY_A31
 	cmp	r1, r0
 	bleq	a31_mpinit
@@ -189,6 +188,9 @@ _C_LABEL(awin_start):
 	bl	a31_mpinit
 # endif
 #endif
+#elif defined(ALLWINNER_A80)
+	bl	a80_mpinit
+#endif
 
 	XPUTC2(#62)
 #endif /* MULTIPROCESSOR */
@@ -439,6 +441,130 @@ ASEND(a31_mpinit)
 	.popsection
 #endif
 
+#if defined(ALLWINNER_A80)
+#ifndef KERNEL_BASES_EQUAL
+	.pushsection .text,"ax",%progbits
+#endif
+a80_mpinit:
+	mov	r4, lr			// because we call gtmr_bootdelay
+	movw	r5, #:lower16:(AWIN_A80_RCPUCFG_PBASE)
+	movt	r5, #:upper16:(AWIN_A80_RCPUCFG_PBASE)
+	movw	r6, #:lower16:(AWIN_A80_RCPUS_PBASE+AWIN_A80_RPRCM_OFFSET)
+	movt	r6, #:upper16:(AWIN_A80_RCPUS_PBASE+AWIN_A80_RPRCM_OFFSET)
+
+	XPUTC2(#65)
+	XPUTC2(#51)
+	XPUTC2(#49)
+
+#ifdef __ARMEB__
+	setend	le			// everything here is little-endian
+#endif
+
+	mov	r12, #1			// CPU number
+
+a80_mpinit_cpu:
+
+	XPUTC2(r12)
+
+	/* Set where the other CPU(s) are going to execute */
+	movw	r1, #:lower16:cortex_mpstart
+	movt	r1, #:upper16:cortex_mpstart
+	str	r1, [r6, #AWIN_A80_RPRCM_PRIVATE_REG]
+	dsb
+
+	/* Assert CPU power on reset */
+	ldr	r1, [r6, #AWIN_A80_RPRCM_CLUSTER0_RST_REG]
+	mov	r0, #1
+	lsl	r0, r0, r12
+	bic	r1, r1, r0
+	str	r1, [r6, #AWIN_A80_RPRCM_CLUSTER0_RST_REG]
+
+	/* Assert CPU core reset */
+	ldr	r1, [r5, #AWIN_A80_RCPUCFG_CLUSTER0_RST_REG]
+	mov	r0, #1
+	lsl	r0, r0, r12
+	bic	r1, r1, r0
+	str	r1, [r5, #AWIN_A80_RCPUCFG_CLUSTER0_RST_REG]
+
+	/* Release power clamp */
+	mov	r1, #0x00
+	mov	r2, #0x4
+	mul	r7, r12, r2
+	add	r7, r7, #AWIN_A80_RPRCM_CLUSTER0_PRW_CLAMP_REG
+	str	r1, [r6, r7]
+	dsb
+
+	mov	r2, #0x40
+	mul	r7, r12, r2
+	add	r7, r7, #AWIN_A80_RPRCM_CLUSTER0_PRW_CLAMP_STATUS_REG
+1:
+	ldr	r1, [r5, r7]
+	cmp	r1, #0x00
+	bne	1b
+
+	/* We need to wait (at least) 10ms */
+	mov	r0, #0x3b000			// 10.06ms
+	bl	_C_LABEL(gtmr_bootdelay)	// endian-neutral
+
+	/* Clear power-off gating */
+	ldr	r1, [r6, #AWIN_A80_RPRCM_CLUSTER0_PWR_GATING_REG] 
+	mov	r0, #1
+	lsl	r0, r0, r12
+	bic	r1, r1, r0
+	str	r1, [r6, #AWIN_A80_RPRCM_CLUSTER0_PWR_GATING_REG]
+	dsb
+
+	/* We need to wait (at least) 10ms */
+	mov	r0, #0x3b000			// 10.06ms
+	bl	_C_LABEL(gtmr_bootdelay)	// endian-neutral
+
+	/* Bring core out of reset */
+	ldr	r1, [r5, #AWIN_A80_RCPUCFG_CLUSTER0_RST_REG]
+	mov	r0, #1
+	lsl	r0, r0, r12
+	orr	r1, r1, r0
+	str	r1, [r5, #AWIN_A80_RCPUCFG_CLUSTER0_RST_REG]
+
+	/* Bring cpu power-on out of reset */
+	ldr	r1, [r6, #AWIN_A80_RPRCM_CLUSTER0_RST_REG]
+	mov	r0, #1
+	lsl	r0, r0, r12
+	orr	r1, r1, r0
+	str	r1, [r6, #AWIN_A80_RPRCM_CLUSTER0_RST_REG]
+	dsb
+
+	/* If there is another CPU, start it */
+	add	r12, r12, #1
+	cmp	r12, #3
+	ble	a80_mpinit_cpu
+
+#ifdef __ARMEB__
+	setend	be				// we're done with little endian
+#endif
+
+	//
+	// Wait up a second for CPU1-3 to hatch. 
+	//
+	movw	r6, #:lower16:arm_cpu_hatched
+	movt	r6, #:upper16:arm_cpu_hatched
+	mov	r5, #200			// 200 x 5ms
+
+1:	dmb					// memory barrier
+	ldr	r0, [r6]			// load hatched
+	cmp	r0, #0xe			// our bits set yet?
+	bxge	r4				//   yes, return
+	subs	r5, r5, #1			// decrement count
+	bxeq	r4				//   0? return
+	mov	r0, #0x1d800			// 5.03ms
+	bl	_C_LABEL(gtmr_bootdelay)
+	b	1b
+
+ASEND(a80_mpinit)
+#ifndef KERNEL_BASES_EQUAL
+	.popsection
+#endif
+#endif /* ALLWINNER_A80 */
+
 #endif /* MULTIPROCESSOR */
 
 .Lmmu_init_table:
@@ -464,15 +590,25 @@ ASEND(a31_mpinit)
 		L1_S_PROTO_armv7 | L1_S_APv7_KRW | L1_S_V6_XN)
 
 #if defined(ALLWINNER_A80)
-	/* Map AWIN RCPUS (for PIO L-N) */
+	/* Map AWIN RCPUS (for PIO L-N, PRCM) */
 	MMU_INIT(AWIN_A80_RCPUS_VBASE, AWIN_A80_RCPUS_PBASE,
 		(AWIN_A80_RCPUS_SIZE + L1_S_SIZE - 1) / L1_S_SIZE,
 		L1_S_PROTO_armv7 | L1_S_APv7_KRW | L1_S_V6_XN)
 
-	/* Map AWIN RCPUS (for PIO L-N) */
+	/* Map AWIN RCPUS (for PIO L-N, PRCM) */
 	MMU_INIT(AWIN_A80_RCPUS_PBASE, AWIN_A80_RCPUS_PBASE,
 		(AWIN_A80_RCPUS_SIZE + L1_S_SIZE - 1) / L1_S_SIZE,
 		L1_S_PROTO_armv7 | L1_S_APv7_KRW | L1_S_V6_XN)
+
+	/* Map AWIN RCPUCFG */
+	MMU_INIT(AWIN_A80_RCPUCFG_VBASE, AWIN_A80_RCPUCFG_PBASE,
+		(AWIN_A80_RCPUCFG_SIZE + L1_S_SIZE - 1) / L1_S_SIZE,
+		L1_S_PROTO_armv7 | L1_S_APv7_KRW | L1_S_V6_XN)
+
+	/* Map AWIN RCPUCFG */
+	MMU_INIT(AWIN_A80_RCPUCFG_PBASE, AWIN_A80_RCPUCFG_PBASE,
+		(AWIN_A80_RCPUCFG_SIZE + L1_S_SIZE - 1) / L1_S_SIZE,
+		L1_S_PROTO_armv7 | L1_S_APv7_KRW | L1_S_V6_XN)
 #endif
 
 	/* end of table */

Index: src/sys/arch/evbarm/awin/platform.h
diff -u src/sys/arch/evbarm/awin/platform.h:1.4 src/sys/arch/evbarm/awin/platform.h:1.5
--- src/sys/arch/evbarm/awin/platform.h:1.4	Sun Dec  7 00:36:26 2014
+++ src/sys/arch/evbarm/awin/platform.h	Thu Dec 11 23:35:11 2014
@@ -1,4 +1,4 @@
-/*	$NetBSD: platform.h,v 1.4 2014/12/07 00:36:26 jmcneill Exp $	*/
+/*	$NetBSD: platform.h,v 1.5 2014/12/11 23:35:11 jmcneill Exp $	*/
 /*
  * Copyright (c) 2007 Microsoft
  * All rights reserved.
@@ -56,7 +56,8 @@
 #define AWIN_A80_CORE2_VBASE	(AWIN_SRAM_VBASE + AWIN_SRAM_SIZE)
 #define AWIN_A80_USB_VBASE	(AWIN_A80_CORE2_VBASE + AWIN_A80_CORE2_SIZE)
 #define AWIN_A80_RCPUS_VBASE	(AWIN_A80_USB_VBASE + AWIN_A80_USB_SIZE)
-#define AWIN_KERNEL_IO_VEND	(AWIN_A80_RCPUS_VBASE + AWIN_A80_RCPUS_SIZE)
+#define AWIN_A80_RCPUCFG_VBASE	(AWIN_A80_RCPUS_VBASE + AWIN_A80_RCPUS_SIZE)
+#define AWIN_KERNEL_IO_VEND	(AWIN_A80_RCPUCFG_VBASE + AWIN_A80_RCPUCFG_SIZE)
 #else
 #define AWIN_KERNEL_IO_VEND	(AWIN_SRAM_VBASE + AWIN_SRAM_SIZE)
 #endif

Reply via email to