Module Name:    src
Committed By:   macallan
Date:           Thu Mar 22 15:18:06 UTC 2018

Modified Files:
        src/sys/arch/powerpc/include: cpu.h spr.h
        src/sys/arch/powerpc/oea: cpu_subr.c

Log Message:
first step towards G5 SMP:
- only save/restore BATs on CPUs that have them
- treat HID0 as 64bit on 64bit CPUs


To generate a diff of this commit:
cvs rdiff -u -r1.103 -r1.104 src/sys/arch/powerpc/include/cpu.h
cvs rdiff -u -r1.50 -r1.51 src/sys/arch/powerpc/include/spr.h
cvs rdiff -u -r1.90 -r1.91 src/sys/arch/powerpc/oea/cpu_subr.c

Please note that diffs are not public domain; they are subject to the
copyright notices on the relevant files.

Modified files:

Index: src/sys/arch/powerpc/include/cpu.h
diff -u src/sys/arch/powerpc/include/cpu.h:1.103 src/sys/arch/powerpc/include/cpu.h:1.104
--- src/sys/arch/powerpc/include/cpu.h:1.103	Sun Dec 17 17:18:34 2017
+++ src/sys/arch/powerpc/include/cpu.h	Thu Mar 22 15:18:05 2018
@@ -1,4 +1,4 @@
-/*	$NetBSD: cpu.h,v 1.103 2017/12/17 17:18:34 chs Exp $	*/
+/*	$NetBSD: cpu.h,v 1.104 2018/03/22 15:18:05 macallan Exp $	*/
 
 /*
  * Copyright (C) 1999 Wolfgang Solfrank.
@@ -162,7 +162,11 @@ struct cpu_hatch_data {
 	struct cpu_info *hatch_ci;
 	uint32_t hatch_tbu;
 	uint32_t hatch_tbl;
+#if defined(PPC_OEA64_BRIDGE) || defined (_ARCH_PPC64)
+	uint64_t hatch_hid0;
+#else
 	uint32_t hatch_hid0;
+#endif
 	uint32_t hatch_pir;
 #if defined(PPC_OEA) || defined(PPC_OEA64_BRIDGE)
 	uintptr_t hatch_asr;

Index: src/sys/arch/powerpc/include/spr.h
diff -u src/sys/arch/powerpc/include/spr.h:1.50 src/sys/arch/powerpc/include/spr.h:1.51
--- src/sys/arch/powerpc/include/spr.h:1.50	Sun Jan 21 09:25:45 2018
+++ src/sys/arch/powerpc/include/spr.h	Thu Mar 22 15:18:05 2018
@@ -1,4 +1,4 @@
-/*	$NetBSD: spr.h,v 1.50 2018/01/21 09:25:45 mrg Exp $	*/
+/*	$NetBSD: spr.h,v 1.51 2018/03/22 15:18:05 macallan Exp $	*/
 
 /*
  * Copyright (c) 2001, The NetBSD Foundation, Inc.
@@ -37,22 +37,22 @@ static inline uint64_t
 mfspr64(int reg)
 {
 	uint64_t ret;
-	register_t h, l;
+	register_t hi, l;
 
 	__asm volatile( "mfspr %0,%2;"
 			"srdi %1,%0,32;"
-			 : "=r"(l), "=r"(h) : "K"(reg));
-	ret = ((uint64_t)h << 32) | l;
+			 : "=r"(l), "=r"(hi) : "K"(reg));
+	ret = ((uint64_t)hi << 32) | l;
 	return ret;
 }
 
 /* This as an inline breaks as 'reg' ends up not being an immediate */
 #define mtspr64(reg, v)						\
 ( {								\
-	volatile register_t h, l;				\
+	volatile register_t hi, l;				\
 								\
 	uint64_t val = v;					\
-	h = (val >> 32);					\
+	hi = (val >> 32);					\
 	l = val & 0xffffffff;					\
 	__asm volatile(	"sldi %2,%2,32;"			\
 			"or %2,%2,%1;"				\
@@ -64,7 +64,7 @@ mfspr64(int reg)
 			"mfspr %2,%0;"				\
 			"mfspr %2,%0;"				\
 			"mfspr %2,%0;"				\
-			 : : "K"(reg), "r"(l), "r"(h));		\
+			 : : "K"(reg), "r"(l), "r"(hi));		\
 } )
 #endif /* PPC_OEA64_BRIDGE || _ARCH_PPC64 */
 

Index: src/sys/arch/powerpc/oea/cpu_subr.c
diff -u src/sys/arch/powerpc/oea/cpu_subr.c:1.90 src/sys/arch/powerpc/oea/cpu_subr.c:1.91
--- src/sys/arch/powerpc/oea/cpu_subr.c:1.90	Sun Mar  4 21:51:44 2018
+++ src/sys/arch/powerpc/oea/cpu_subr.c	Thu Mar 22 15:18:06 2018
@@ -1,4 +1,4 @@
-/*	$NetBSD: cpu_subr.c,v 1.90 2018/03/04 21:51:44 mrg Exp $	*/
+/*	$NetBSD: cpu_subr.c,v 1.91 2018/03/22 15:18:06 macallan Exp $	*/
 
 /*-
  * Copyright (c) 2001 Matt Thomas.
@@ -34,7 +34,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: cpu_subr.c,v 1.90 2018/03/04 21:51:44 mrg Exp $");
+__KERNEL_RCSID(0, "$NetBSD: cpu_subr.c,v 1.91 2018/03/22 15:18:06 macallan Exp $");
 
 #include "opt_ppcparam.h"
 #include "opt_ppccache.h"
@@ -593,7 +593,7 @@ cpu_setup(device_t self, struct cpu_info
 		KASSERT((oeacpufeat & OEACPU_64_BRIDGE) != 0);
 #endif
 		hid64_0 &= ~(HID0_64_DOZE | HID0_64_NAP | HID0_64_DEEPNAP);
-		hid64_0 |= HID0_64_DOZE | HID0_64_DPM | HID0_64_EX_TBEN |
+		hid64_0 |= HID0_64_NAP | HID0_64_DPM | HID0_64_EX_TBEN |
 			   HID0_64_TB_CTRL | HID0_64_EN_MCHK;
 		powersave = 1;
 		break;
@@ -1326,24 +1326,26 @@ cpu_spinup(device_t self, struct cpu_inf
 	else
 		h->hatch_asr = 0;
 
-	/* copy the bat regs */
-	__asm volatile ("mfibatu %0,0" : "=r"(h->hatch_ibatu[0]));
-	__asm volatile ("mfibatl %0,0" : "=r"(h->hatch_ibatl[0]));
-	__asm volatile ("mfibatu %0,1" : "=r"(h->hatch_ibatu[1]));
-	__asm volatile ("mfibatl %0,1" : "=r"(h->hatch_ibatl[1]));
-	__asm volatile ("mfibatu %0,2" : "=r"(h->hatch_ibatu[2]));
-	__asm volatile ("mfibatl %0,2" : "=r"(h->hatch_ibatl[2]));
-	__asm volatile ("mfibatu %0,3" : "=r"(h->hatch_ibatu[3]));
-	__asm volatile ("mfibatl %0,3" : "=r"(h->hatch_ibatl[3]));
-	__asm volatile ("mfdbatu %0,0" : "=r"(h->hatch_dbatu[0]));
-	__asm volatile ("mfdbatl %0,0" : "=r"(h->hatch_dbatl[0]));
-	__asm volatile ("mfdbatu %0,1" : "=r"(h->hatch_dbatu[1]));
-	__asm volatile ("mfdbatl %0,1" : "=r"(h->hatch_dbatl[1]));
-	__asm volatile ("mfdbatu %0,2" : "=r"(h->hatch_dbatu[2]));
-	__asm volatile ("mfdbatl %0,2" : "=r"(h->hatch_dbatl[2]));
-	__asm volatile ("mfdbatu %0,3" : "=r"(h->hatch_dbatu[3]));
-	__asm volatile ("mfdbatl %0,3" : "=r"(h->hatch_dbatl[3]));
-	__asm volatile ("sync; isync");
+	if ((oeacpufeat & OEACPU_NOBAT) == 0) {
+		/* copy the bat regs */
+		__asm volatile ("mfibatu %0,0" : "=r"(h->hatch_ibatu[0]));
+		__asm volatile ("mfibatl %0,0" : "=r"(h->hatch_ibatl[0]));
+		__asm volatile ("mfibatu %0,1" : "=r"(h->hatch_ibatu[1]));
+		__asm volatile ("mfibatl %0,1" : "=r"(h->hatch_ibatl[1]));
+		__asm volatile ("mfibatu %0,2" : "=r"(h->hatch_ibatu[2]));
+		__asm volatile ("mfibatl %0,2" : "=r"(h->hatch_ibatl[2]));
+		__asm volatile ("mfibatu %0,3" : "=r"(h->hatch_ibatu[3]));
+		__asm volatile ("mfibatl %0,3" : "=r"(h->hatch_ibatl[3]));
+		__asm volatile ("mfdbatu %0,0" : "=r"(h->hatch_dbatu[0]));
+		__asm volatile ("mfdbatl %0,0" : "=r"(h->hatch_dbatl[0]));
+		__asm volatile ("mfdbatu %0,1" : "=r"(h->hatch_dbatu[1]));
+		__asm volatile ("mfdbatl %0,1" : "=r"(h->hatch_dbatl[1]));
+		__asm volatile ("mfdbatu %0,2" : "=r"(h->hatch_dbatu[2]));
+		__asm volatile ("mfdbatl %0,2" : "=r"(h->hatch_dbatl[2]));
+		__asm volatile ("mfdbatu %0,3" : "=r"(h->hatch_dbatu[3]));
+		__asm volatile ("mfdbatl %0,3" : "=r"(h->hatch_dbatl[3]));
+		__asm volatile ("sync; isync");
+	}
 
 	if (md_setup_trampoline(h, ci) == -1)
 		return -1;
@@ -1408,28 +1410,35 @@ cpu_hatch(void)
 	curlwp = ci->ci_curlwp;
 	cpu_spinstart_ack = 0;
 
-	/* Initialize MMU. */
-	__asm ("mtibatu 0,%0" :: "r"(h->hatch_ibatu[0]));
-	__asm ("mtibatl 0,%0" :: "r"(h->hatch_ibatl[0]));
-	__asm ("mtibatu 1,%0" :: "r"(h->hatch_ibatu[1]));
-	__asm ("mtibatl 1,%0" :: "r"(h->hatch_ibatl[1]));
-	__asm ("mtibatu 2,%0" :: "r"(h->hatch_ibatu[2]));
-	__asm ("mtibatl 2,%0" :: "r"(h->hatch_ibatl[2]));
-	__asm ("mtibatu 3,%0" :: "r"(h->hatch_ibatu[3]));
-	__asm ("mtibatl 3,%0" :: "r"(h->hatch_ibatl[3]));
-	__asm ("mtdbatu 0,%0" :: "r"(h->hatch_dbatu[0]));
-	__asm ("mtdbatl 0,%0" :: "r"(h->hatch_dbatl[0]));
-	__asm ("mtdbatu 1,%0" :: "r"(h->hatch_dbatu[1]));
-	__asm ("mtdbatl 1,%0" :: "r"(h->hatch_dbatl[1]));
-	__asm ("mtdbatu 2,%0" :: "r"(h->hatch_dbatu[2]));
-	__asm ("mtdbatl 2,%0" :: "r"(h->hatch_dbatl[2]));
-	__asm ("mtdbatu 3,%0" :: "r"(h->hatch_dbatu[3]));
-	__asm ("mtdbatl 3,%0" :: "r"(h->hatch_dbatl[3]));
+	if ((oeacpufeat & OEACPU_NOBAT) == 0) {
+		/* Initialize MMU. */
+		__asm ("mtibatu 0,%0" :: "r"(h->hatch_ibatu[0]));
+		__asm ("mtibatl 0,%0" :: "r"(h->hatch_ibatl[0]));
+		__asm ("mtibatu 1,%0" :: "r"(h->hatch_ibatu[1]));
+		__asm ("mtibatl 1,%0" :: "r"(h->hatch_ibatl[1]));
+		__asm ("mtibatu 2,%0" :: "r"(h->hatch_ibatu[2]));
+		__asm ("mtibatl 2,%0" :: "r"(h->hatch_ibatl[2]));
+		__asm ("mtibatu 3,%0" :: "r"(h->hatch_ibatu[3]));
+		__asm ("mtibatl 3,%0" :: "r"(h->hatch_ibatl[3]));
+		__asm ("mtdbatu 0,%0" :: "r"(h->hatch_dbatu[0]));
+		__asm ("mtdbatl 0,%0" :: "r"(h->hatch_dbatl[0]));
+		__asm ("mtdbatu 1,%0" :: "r"(h->hatch_dbatu[1]));
+		__asm ("mtdbatl 1,%0" :: "r"(h->hatch_dbatl[1]));
+		__asm ("mtdbatu 2,%0" :: "r"(h->hatch_dbatu[2]));
+		__asm ("mtdbatl 2,%0" :: "r"(h->hatch_dbatl[2]));
+		__asm ("mtdbatu 3,%0" :: "r"(h->hatch_dbatu[3]));
+		__asm ("mtdbatl 3,%0" :: "r"(h->hatch_dbatl[3]));
+	}
 
-	mtspr(SPR_HID0, h->hatch_hid0);
+	if ((oeacpufeat & OEACPU_64_BRIDGE) != 0) {
+		mtspr64(SPR_HID0, h->hatch_hid0);
+	} else
+		mtspr(SPR_HID0, h->hatch_hid0);
 
-	__asm ("mtibatl 0,%0; mtibatu 0,%1; mtdbatl 0,%0; mtdbatu 0,%1;"
-	    :: "r"(battable[0].batl), "r"(battable[0].batu));
+	if ((oeacpufeat & OEACPU_NOBAT) == 0) {
+		__asm ("mtibatl 0,%0; mtibatu 0,%1; mtdbatl 0,%0; mtdbatu 0,%1;"
+		    :: "r"(battable[0].batl), "r"(battable[0].batu));
+	}
 
 	__asm volatile ("sync");
 	for (i = 0; i < 16; i++)

Reply via email to