Module Name: src Committed By: matt Date: Sat May 30 21:44:38 UTC 2015
Modified Files: src/sys/arch/arm/cortex: a9_mpsubr.S Log Message: Call armv7_dcache_l1inv_all in mpstart Make sure ACTRL<31> is set on A15 with 3+ cores. To generate a diff of this commit: cvs rdiff -u -r1.39 -r1.40 src/sys/arch/arm/cortex/a9_mpsubr.S Please note that diffs are not public domain; they are subject to the copyright notices on the relevant files.
Modified files: Index: src/sys/arch/arm/cortex/a9_mpsubr.S diff -u src/sys/arch/arm/cortex/a9_mpsubr.S:1.39 src/sys/arch/arm/cortex/a9_mpsubr.S:1.40 --- src/sys/arch/arm/cortex/a9_mpsubr.S:1.39 Thu May 28 02:23:18 2015 +++ src/sys/arch/arm/cortex/a9_mpsubr.S Sat May 30 21:44:38 2015 @@ -1,4 +1,4 @@ -/* $NetBSD: a9_mpsubr.S,v 1.39 2015/05/28 02:23:18 matt Exp $ */ +/* $NetBSD: a9_mpsubr.S,v 1.40 2015/05/30 21:44:38 matt Exp $ */ /*- * Copyright (c) 2012 The NetBSD Foundation, Inc. * All rights reserved. @@ -339,14 +339,30 @@ cortex_init: mov r0, #0 msr spsr_sxc, r0 // set SPSR[23:8] to known value +#if 0 + mrc p14, 0, r0, c0, c0, 0 // MIDR read + ufbx r0, r0, #4, #4 // extract cortex part. + mov r5, #1 + lsl r5, r5, r0 +#endif + XPUTC(#'@') #if defined(CPU_CORTEXA7) || defined(CPU_CORTEXA15) || defined(CPU_CORTEXA17) // - // If SMP is already enabled, don't do anything. + // If SMP is already enabled, don't do anything (maybe). // mrc p15, 0, r0, c1, c0, 1 // ACTLR read - tst r0, #CORTEXA9_AUXCTL_SMP // test SMP - bxne r10 // return if set + orr r1, r0, #CORTEXA9_AUXCTL_SMP // test SMP +#if defined(CPU_CORTEXA15) + // The A15 requires snoop-delayed exclusive handling to be set + // if there are 3 or more CPUs. + mrc p15, 1, r2, c9, c0, 2 // L2CTRL read + ubfx r2, r2, #25, #1 // bit 25 is set when 3+ CPUs + bfi r1, r2, #31, #1 // copy it to bit 31 in ACTRL +#endif + + cmp r0, r1 // ACTLR have SMP+<31> set? + bxeq r10 // return if set #endif mrc p15, 0, r4, c1, c0, 0 // SCTLR read @@ -547,17 +563,21 @@ cortex_mpstart: #endif // We haven't used anything from memory yet so we can invalidate the - // cache without fear of losing valuable data. Note that the A5/A9 - // L2 cache are not flushed via this call so this affects only this - // core's L1 cache. The A7/A15/A17 L2 cache is write-through so - // invalidating it doesn't cause any data loss either. + // L1 cache without fear of losing valuable data. Afterwards, we can + // flush icache without worrying about anything getting written back + // to memory. #if defined(KERNEL_BASES_EQUAL) - bl _C_LABEL(armv7_dcache_inv_all) + bl _C_LABEL(armv7_dcache_l1inv_all)// toss-dcache + bl _C_LABEL(armv7_icache_inv_all) // toss i-cache after d-cache #else - movw ip, #:lower16:_C_LABEL(armv7_dcache_inv_all) - movt ip, #:upper16:_C_LABEL(armv7_dcache_inv_all) + movw ip, #:lower16:_C_LABEL(armv7_dcache_l1inv_all) + movt ip, #:upper16:_C_LABEL(armv7_dcache_l1inv_all) sub ip, ip, #KERNEL_BASE_VOFFSET blx ip // toss d-cache + movw ip, #:lower16:_C_LABEL(armv7_icache_inv_all) + movt ip, #:upper16:_C_LABEL(armv7_icache_inv_all) + sub ip, ip, #KERNEL_BASE_VOFFSET + blx ip // toss i-cache after d-cache #endif #if 0