Module Name:    src
Committed By:   skrll
Date:           Fri Aug 14 16:18:37 UTC 2020

Modified Files:
        src/sys/arch/arm/arm: arm_machdep.c
        src/sys/arch/arm/arm32: cpuswitch.S db_machdep.c genassym.cf
        src/sys/arch/arm/include: cpu.h locore.h proc.h
        src/sys/arch/arm/include/arm32: frame.h
        src/sys/arch/evbarm/conf: std.generic

Log Message:
Mirror the changes to aarch64 and

- Switch to TPIDRPRW_IS_CURLWP, because curlwp is accessed much more often
  by MI code.  It also makes curlwp preemption safe,

- Make ASTs operate per-LWP rather than per-CPU, otherwise sometimes LWPs
  can see spurious ASTs (which doesn't cause a problem, it just means some
  time may be wasted).

- Make sure ASTs are always set on the same CPU as the target LWP, and
  delivered via IPI if posted from a remote CPU so that they are resolved
  quickly.

- Add some cache line padding to struct cpu_info.

- Add a memory barrier in a couple of places where ci_curlwp is set.  This
  is needed whenever an LWP that is resuming on the CPU could hold an
  adaptive mutex.  The barrier needs to drain the CPU's store buffer, so
  that the update to ci_curlwp becomes globally visible before the LWP can
  resume and call mutex_exit().


To generate a diff of this commit:
cvs rdiff -u -r1.63 -r1.64 src/sys/arch/arm/arm/arm_machdep.c
cvs rdiff -u -r1.101 -r1.102 src/sys/arch/arm/arm32/cpuswitch.S
cvs rdiff -u -r1.34 -r1.35 src/sys/arch/arm/arm32/db_machdep.c
cvs rdiff -u -r1.93 -r1.94 src/sys/arch/arm/arm32/genassym.cf
cvs rdiff -u -r1.111 -r1.112 src/sys/arch/arm/include/cpu.h
cvs rdiff -u -r1.32 -r1.33 src/sys/arch/arm/include/locore.h
cvs rdiff -u -r1.18 -r1.19 src/sys/arch/arm/include/proc.h
cvs rdiff -u -r1.47 -r1.48 src/sys/arch/arm/include/arm32/frame.h
cvs rdiff -u -r1.7 -r1.8 src/sys/arch/evbarm/conf/std.generic

Please note that diffs are not public domain; they are subject to the
copyright notices on the relevant files.

Modified files:

Index: src/sys/arch/arm/arm/arm_machdep.c
diff -u src/sys/arch/arm/arm/arm_machdep.c:1.63 src/sys/arch/arm/arm/arm_machdep.c:1.64
--- src/sys/arch/arm/arm/arm_machdep.c:1.63	Sat Feb 15 08:16:10 2020
+++ src/sys/arch/arm/arm/arm_machdep.c	Fri Aug 14 16:18:36 2020
@@ -1,4 +1,4 @@
-/*	$NetBSD: arm_machdep.c,v 1.63 2020/02/15 08:16:10 skrll Exp $	*/
+/*	$NetBSD: arm_machdep.c,v 1.64 2020/08/14 16:18:36 skrll Exp $	*/
 
 /*
  * Copyright (c) 2001 Wasabi Systems, Inc.
@@ -80,7 +80,7 @@
 
 #include <sys/param.h>
 
-__KERNEL_RCSID(0, "$NetBSD: arm_machdep.c,v 1.63 2020/02/15 08:16:10 skrll Exp $");
+__KERNEL_RCSID(0, "$NetBSD: arm_machdep.c,v 1.64 2020/08/14 16:18:36 skrll Exp $");
 
 #include <sys/atomic.h>
 #include <sys/cpu.h>
@@ -241,17 +241,39 @@ cpu_need_resched(struct cpu_info *ci, st
 		if (flags & RESCHED_REMOTE) {
 			intr_ipi_send(ci->ci_kcpuset, IPI_KPREEMPT);
 		} else {
-			atomic_or_uint(&ci->ci_astpending, __BIT(1));
+			l->l_md.md_astpending |= __BIT(1);
 		}
 #endif /* __HAVE_PREEMPTION */
 		return;
 	}
+
+	KASSERT((flags & RESCHED_UPREEMPT) != 0);
 	if (flags & RESCHED_REMOTE) {
 #ifdef MULTIPROCESSOR
 		intr_ipi_send(ci->ci_kcpuset, IPI_AST);
 #endif /* MULTIPROCESSOR */
 	} else {
-		setsoftast(ci);
+		l->l_md.md_astpending |= __BIT(0);
+	}
+}
+
+
+/*
+ * Notify the current lwp (l) that it has a signal pending,
+ * process as soon as possible.
+ */
+void
+cpu_signotify(struct lwp *l)
+{
+
+	KASSERT(kpreempt_disabled());
+
+	if (l->l_cpu != curcpu()) {
+#ifdef MULTIPROCESSOR
+		intr_ipi_send(l->l_cpu->ci_kcpuset, IPI_AST);
+#endif
+	} else {
+		l->l_md.md_astpending |= __BIT(0);
 	}
 }
 

Index: src/sys/arch/arm/arm32/cpuswitch.S
diff -u src/sys/arch/arm/arm32/cpuswitch.S:1.101 src/sys/arch/arm/arm32/cpuswitch.S:1.102
--- src/sys/arch/arm/arm32/cpuswitch.S:1.101	Fri Jul 10 12:25:09 2020
+++ src/sys/arch/arm/arm32/cpuswitch.S	Fri Aug 14 16:18:36 2020
@@ -1,4 +1,4 @@
-/*	$NetBSD: cpuswitch.S,v 1.101 2020/07/10 12:25:09 skrll Exp $	*/
+/*	$NetBSD: cpuswitch.S,v 1.102 2020/08/14 16:18:36 skrll Exp $	*/
 
 /*
  * Copyright 2003 Wasabi Systems, Inc.
@@ -87,7 +87,7 @@
 #include <arm/asm.h>
 #include <arm/locore.h>
 
-	RCSID("$NetBSD: cpuswitch.S,v 1.101 2020/07/10 12:25:09 skrll Exp $")
+	RCSID("$NetBSD: cpuswitch.S,v 1.102 2020/08/14 16:18:36 skrll Exp $")
 
 /* LINTSTUB: include <sys/param.h> */
 
@@ -191,6 +191,7 @@ ENTRY(cpu_switchto)
 
 	/* We have a new curlwp now so make a note of it */
 	str	r6, [r5, #(CI_CURLWP)]
+	dmb					/* see comments in kern_mutex.c */
 
 	/* Get the new pcb */
 	ldr	r7, [r6, #(L_PCB)]
@@ -388,6 +389,7 @@ ENTRY_NP(softint_switch)
 	mcr	p15, 0, r5, c13, c0, 4	/* save new lwp */
 #endif
 	str	r5, [r7, #(CI_CURLWP)]	/* save new lwp */
+	dmb				/* see comments in kern_mutex.c */
 
 #ifdef KASAN
 	mov	r0, r5

Index: src/sys/arch/arm/arm32/db_machdep.c
diff -u src/sys/arch/arm/arm32/db_machdep.c:1.34 src/sys/arch/arm/arm32/db_machdep.c:1.35
--- src/sys/arch/arm/arm32/db_machdep.c:1.34	Fri Jul  3 10:19:18 2020
+++ src/sys/arch/arm/arm32/db_machdep.c	Fri Aug 14 16:18:36 2020
@@ -1,4 +1,4 @@
-/*	$NetBSD: db_machdep.c,v 1.34 2020/07/03 10:19:18 jmcneill Exp $	*/
+/*	$NetBSD: db_machdep.c,v 1.35 2020/08/14 16:18:36 skrll Exp $	*/
 
 /*
  * Copyright (c) 1996 Mark Brinicombe
@@ -34,7 +34,7 @@
 #endif
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: db_machdep.c,v 1.34 2020/07/03 10:19:18 jmcneill Exp $");
+__KERNEL_RCSID(0, "$NetBSD: db_machdep.c,v 1.35 2020/08/14 16:18:36 skrll Exp $");
 
 #include <sys/param.h>
 
@@ -521,8 +521,6 @@ show_cpuinfo(struct cpu_info *kci)
 	    &ci->ci_cpl, cpuid, ci->ci_cpl);
 	db_printf("%p cpu[%lu].ci_softints     = 0x%08x\n",
 	    &ci->ci_softints, cpuid, ci->ci_softints);
-	db_printf("%p cpu[%lu].ci_astpending   = 0x%08x\n",
-	    &ci->ci_astpending, cpuid, ci->ci_astpending);
 	db_printf("%p cpu[%lu].ci_intr_depth   = %u\n",
 	    &ci->ci_intr_depth, cpuid, ci->ci_intr_depth);
 

Index: src/sys/arch/arm/arm32/genassym.cf
diff -u src/sys/arch/arm/arm32/genassym.cf:1.93 src/sys/arch/arm/arm32/genassym.cf:1.94
--- src/sys/arch/arm/arm32/genassym.cf:1.93	Wed Jul  8 10:18:00 2020
+++ src/sys/arch/arm/arm32/genassym.cf	Fri Aug 14 16:18:36 2020
@@ -1,4 +1,4 @@
-#	$NetBSD: genassym.cf,v 1.93 2020/07/08 10:18:00 skrll Exp $
+#	$NetBSD: genassym.cf,v 1.94 2020/08/14 16:18:36 skrll Exp $
 
 # Copyright (c) 1982, 1990 The Regents of the University of California.
 # All rights reserved.
@@ -166,6 +166,7 @@ define	L_CPU			offsetof(struct lwp, l_cp
 define	L_PROC			offsetof(struct lwp, l_proc)
 define	L_PRIVATE		offsetof(struct lwp, l_private)
 define	L_FLAG			offsetof(struct lwp, l_flag)
+define	L_MD_ASTPENDING		offsetof(struct lwp, l_md.md_astpending)
 define	L_MD_FLAGS		offsetof(struct lwp, l_md.md_flags)
 define	L_MD_TF			offsetof(struct lwp, l_md.md_tf)
 define	MDLWP_NOALIGNFLT	MDLWP_NOALIGNFLT
@@ -225,7 +226,6 @@ define	CF_CONTROL		offsetof(struct cpu_f
 
 define	CPU_INFO_SIZE		sizeof(struct cpu_info)
 define	CI_ARM_CPUID		offsetof(struct cpu_info, ci_arm_cpuid)
-define	CI_ASTPENDING		offsetof(struct cpu_info, ci_astpending)
 define	CI_CPL			offsetof(struct cpu_info, ci_cpl)
 define	CI_CURLWP		offsetof(struct cpu_info, ci_curlwp)
 define	CI_INTR_DEPTH		offsetof(struct cpu_info, ci_intr_depth)

Index: src/sys/arch/arm/include/cpu.h
diff -u src/sys/arch/arm/include/cpu.h:1.111 src/sys/arch/arm/include/cpu.h:1.112
--- src/sys/arch/arm/include/cpu.h:1.111	Mon Jun 29 23:54:06 2020
+++ src/sys/arch/arm/include/cpu.h	Fri Aug 14 16:18:36 2020
@@ -1,4 +1,4 @@
-/*	$NetBSD: cpu.h,v 1.111 2020/06/29 23:54:06 riastradh Exp $	*/
+/*	$NetBSD: cpu.h,v 1.112 2020/08/14 16:18:36 skrll Exp $	*/
 
 /*
  * Copyright (c) 1994-1996 Mark Brinicombe.
@@ -154,6 +154,7 @@ static inline void cpu_dosoftints(void);
 #include <sys/cpu_data.h>
 #include <sys/device_if.h>
 #include <sys/evcnt.h>
+#include <sys/param.h>
 
 struct cpu_info {
 	struct cpu_data	ci_data;	/* MI per-cpu data */
@@ -163,22 +164,32 @@ struct cpu_info {
 	uint32_t	ci_arm_cputype;	/* CPU type */
 	uint32_t	ci_arm_cpurev;	/* CPU revision */
 	uint32_t	ci_ctrl;	/* The CPU control register */
-	int		ci_cpl;		/* current processor level (spl) */
-	volatile int	ci_astpending;	/* */
-	int		ci_want_resched;/* resched() was called */
-	int		ci_intr_depth;	/* */
 
-	int ci_kfpu_spl;
+	/*
+	 * the following are in their own cache line, as they are stored to
+	 * regularly by remote CPUs; when they were mixed with other fields
+	 * we observed frequent cache misses.
+	 */
+	int		ci_want_resched __aligned(COHERENCY_UNIT);
+					/* resched() was called */
+	lwp_t *		ci_curlwp __aligned(COHERENCY_UNIT);
+					/* current lwp */
+	lwp_t *		ci_onproc;	/* current user LWP / kthread */
+
+	/*
+	 * largely CPU-private.
+	 */
+	lwp_t *		ci_softlwps[SOFTINT_COUNT] __aligned(COHERENCY_UNIT);
 
 	struct cpu_softc *
 			ci_softc;	/* platform softc */
 
-	lwp_t *		ci_softlwps[SOFTINT_COUNT];
-	volatile uint32_t
-			ci_softints;
+	int		ci_cpl;		/* current processor level (spl) */
+	int		ci_kfpu_spl;
+
+	volatile u_int	ci_intr_depth;	/* */
+	volatile u_int	ci_softints;
 
-	lwp_t *		ci_curlwp;	/* current lwp */
-	lwp_t *		ci_onproc;	/* current user LWP / kthread */
 	lwp_t *		ci_lastlwp;	/* last lwp */
 
 	struct evcnt	ci_arm700bugcount;
@@ -315,19 +326,8 @@ cpu_dosoftints(void)
 /*
  * Scheduling glue
  */
-
-#ifdef __HAVE_PREEMPTION
-#define setsoftast(ci)		atomic_or_uint(&(ci)->ci_astpending, __BIT(0))
-#else
-#define setsoftast(ci)		((ci)->ci_astpending = __BIT(0))
-#endif
-
-/*
- * Notify the current process (p) that it has a signal pending,
- * process as soon as possible.
- */
-
-#define cpu_signotify(l)		setsoftast((l)->l_cpu)
+void cpu_signotify(struct lwp *);
+#define	setsoftast(ci)		(cpu_signotify((ci)->ci_onproc))
 
 /*
  * Give a profiling tick to the current process when the user profiling
@@ -335,7 +335,7 @@ cpu_dosoftints(void)
  * through trap(), marking the proc as needing a profiling tick.
  */
 #define	cpu_need_proftick(l)	((l)->l_pflag |= LP_OWEUPC, \
-				 setsoftast((l)->l_cpu))
+				 setsoftast(lwp_getcpu(l)))
 
 /*
  * We've already preallocated the stack for the idlelwps for additional CPUs.

Index: src/sys/arch/arm/include/locore.h
diff -u src/sys/arch/arm/include/locore.h:1.32 src/sys/arch/arm/include/locore.h:1.33
--- src/sys/arch/arm/include/locore.h:1.32	Sat Feb 15 08:16:11 2020
+++ src/sys/arch/arm/include/locore.h	Fri Aug 14 16:18:36 2020
@@ -1,4 +1,4 @@
-/*	$NetBSD: locore.h,v 1.32 2020/02/15 08:16:11 skrll Exp $	*/
+/*	$NetBSD: locore.h,v 1.33 2020/08/14 16:18:36 skrll Exp $	*/
 
 /*
  * Copyright (c) 1994-1996 Mark Brinicombe.
@@ -87,18 +87,23 @@
 #if defined (TPIDRPRW_IS_CURCPU)
 #define GET_CURCPU(rX)		mrc	p15, 0, rX, c13, c0, 4
 #define GET_CURLWP(rX)		GET_CURCPU(rX); ldr rX, [rX, #CI_CURLWP]
+#define GET_CURX(rCPU, rLWP)	GET_CURCPU(rCPU); ldr rLWP, [rCPU, #CI_CURLWP]
 #elif defined (TPIDRPRW_IS_CURLWP)
 #define GET_CURLWP(rX)		mrc	p15, 0, rX, c13, c0, 4
 #if defined (MULTIPROCESSOR)
 #define GET_CURCPU(rX)		GET_CURLWP(rX); ldr rX, [rX, #L_CPU]
+#define GET_CURX(rCPU, rLWP)	GET_CURLWP(rLWP); ldr rCPU, [rLWP, #L_CPU]
 #elif defined(_ARM_ARCH_7)
 #define GET_CURCPU(rX)		movw rX, #:lower16:cpu_info_store; movt rX, #:upper16:cpu_info_store
+#define GET_CURX(rCPU, rLWP)	GET_CURLWP(rLWP); GET_CURCPU(rCPU)
 #else
 #define GET_CURCPU(rX)		ldr rX, =_C_LABEL(cpu_info_store)
+#define GET_CURX(rCPU, rLWP)	GET_CURLWP(rLWP); ldr rCPU, [rLWP, #L_CPU]
 #endif
 #elif !defined(MULTIPROCESSOR)
 #define GET_CURCPU(rX)		ldr rX, =_C_LABEL(cpu_info_store)
 #define GET_CURLWP(rX)		GET_CURCPU(rX); ldr rX, [rX, #CI_CURLWP]
+#define GET_CURX(rCPU, rLWP)	GET_CURCPU(rCPU); ldr rLWP, [rCPU, #CI_CURLWP]
 #endif
 #define GET_CURPCB(rX)		GET_CURLWP(rX); ldr rX, [rX, #L_PCB]
 

Index: src/sys/arch/arm/include/proc.h
diff -u src/sys/arch/arm/include/proc.h:1.18 src/sys/arch/arm/include/proc.h:1.19
--- src/sys/arch/arm/include/proc.h:1.18	Tue Oct 31 12:37:23 2017
+++ src/sys/arch/arm/include/proc.h	Fri Aug 14 16:18:36 2020
@@ -1,4 +1,4 @@
-/*	$NetBSD: proc.h,v 1.18 2017/10/31 12:37:23 martin Exp $	*/
+/*	$NetBSD: proc.h,v 1.19 2020/08/14 16:18:36 skrll Exp $	*/
 
 /*
  * Copyright (c) 1994 Mark Brinicombe.
@@ -45,6 +45,7 @@ struct lwp;
 struct mdlwp {
 	struct trapframe *md_tf;
 	int	md_flags;
+	volatile uint32_t md_astpending;
 };
 
 /* Flags setttings for md_flags */

Index: src/sys/arch/arm/include/arm32/frame.h
diff -u src/sys/arch/arm/include/arm32/frame.h:1.47 src/sys/arch/arm/include/arm32/frame.h:1.48
--- src/sys/arch/arm/include/arm32/frame.h:1.47	Sun Oct 28 14:46:59 2018
+++ src/sys/arch/arm/include/arm32/frame.h	Fri Aug 14 16:18:36 2020
@@ -1,4 +1,4 @@
-/*	$NetBSD: frame.h,v 1.47 2018/10/28 14:46:59 skrll Exp $	*/
+/*	$NetBSD: frame.h,v 1.48 2020/08/14 16:18:36 skrll Exp $	*/
 
 /*
  * Copyright (c) 1994-1997 Mark Brinicombe.
@@ -151,26 +151,16 @@ void validate_trapframe(trapframe_t *, i
 	msr	cpsr_c, ra		/* Restore interrupts */
 #endif
 
-#ifdef __HAVE_PREEMPTION
-#define DO_CLEAR_ASTPENDING						\
-	mvn	r1, #1			/* complement of 1 */		;\
-	add	r0, r4, #CI_ASTPENDING	/* address of astpending */	;\
-	bl	_C_LABEL(atomic_and_uint) /* clear AST */
-#else
-#define DO_CLEAR_ASTPENDING						\
-	mov	r0, #0							;\
-	str	r0, [r4, #CI_ASTPENDING] /* clear AST */
-#endif
-
 #define DO_PENDING_AST(lbl)						;\
-1:	ldr	r1, [r4, #CI_ASTPENDING] /* Pending AST? */		;\
-	tst	r1, #0x00000001						;\
+1:	ldr	r1, [r5, #L_MD_ASTPENDING] /* Pending AST? */		;\
+	tst	r1, #1							;\
 	beq	lbl			/* Nope. Just bail */		;\
-	DO_CLEAR_ASTPENDING						;\
-	CPSIE_I(r5, r5)			/* Restore interrupts */	;\
+	bic	r0, r1, #1		 /* clear AST */		;\
+	str	r0, [r5, #L_MD_ASTPENDING]				;\
+	CPSIE_I(r6, r6)			/* Restore interrupts */	;\
 	mov	r0, sp							;\
 	bl	_C_LABEL(ast)		/* ast(frame) */		;\
-	CPSID_I(r0, r5)			/* Disable interrupts */	;\
+	CPSID_I(r0, r6)			/* Disable interrupts */	;\
 	b	1b			/* test again */
 
 /*
@@ -179,8 +169,8 @@ void validate_trapframe(trapframe_t *, i
  * alignment faults when executing old a.out ARM binaries.
  *
  * Note that when ENABLE_ALIGNMENTS_FAULTS finishes r4 will contain
- * pointer to the cpu's cpu_info.  DO_AST_AND_RESTORE_ALIGNMENT_FAULTS
- * relies on r4 being preserved.
+ * curcpu() and r5 containing curlwp.  DO_AST_AND_RESTORE_ALIGNMENT_FAULTS
+ * relies on r4 and r5 being preserved.
  */
 #ifdef EXEC_AOUT
 #define	AST_ALIGNMENT_FAULT_LOCALS					\
@@ -198,10 +188,9 @@ void validate_trapframe(trapframe_t *, i
 #define	ENABLE_ALIGNMENT_FAULTS						\
 	and	r7, r0, #(PSR_MODE)	/* Test for USR32 mode */	;\
 	cmp	r7, #(PSR_USR32_MODE)					;\
-	GET_CURCPU(r4)			/* r4 = cpuinfo */		;\
+	GET_CURX(r4, r5)		/* r4 = curcpu, r5 = curlwp */	;\
 	bne	1f			/* Not USR mode skip AFLT */	;\
-	ldr	r1, [r4, #CI_CURLWP]	/* get curlwp from cpu_info */	;\
-	ldr	r1, [r1, #L_MD_FLAGS]	/* Fetch l_md.md_flags */	;\
+	ldr	r1, [r5, #L_MD_FLAGS]	/* Fetch l_md.md_flags */	;\
 	tst	r1, #MDLWP_NOALIGNFLT					;\
 	beq	1f			/* AFLTs already enabled */	;\
 	ldr	r2, .Laflt_cpufuncs					;\
@@ -213,13 +202,13 @@ void validate_trapframe(trapframe_t *, i
 /*
  * This macro must be invoked just before PULLFRAMEFROMSVCANDEXIT or
  * PULLFRAME at the end of interrupt/exception handlers.  We know that
- * r4 points to cpu_info since that is what ENABLE_ALIGNMENT_FAULTS did
- * for use.
+ * r4 points to curcpu() and r5 points to curlwp since that is what
+ * ENABLE_ALIGNMENT_FAULTS did for us.
  */
 #define	DO_AST_AND_RESTORE_ALIGNMENT_FAULTS				\
 	DO_PENDING_SOFTINTS						;\
-	GET_CPSR(r5)			/* save CPSR */			;\
-	CPSID_I(r1, r5)			/* Disable interrupts */	;\
+	GET_CPSR(r6)			/* save CPSR */			;\
+	CPSID_I(r1, r6)			/* Disable interrupts */	;\
 	cmp	r7, #(PSR_USR32_MODE)	/* Returning to USR mode? */	;\
 	bne	3f			/* Nope, get out now */		;\
 	DO_PENDING_AST(2f)		/* Pending AST? */		;\
@@ -240,13 +229,13 @@ void validate_trapframe(trapframe_t *, i
 
 #define	ENABLE_ALIGNMENT_FAULTS						\
 	and	r7, r0, #(PSR_MODE)	/* Test for USR32 mode */	;\
-	GET_CURCPU(r4)			/* r4 = cpuinfo */
+	GET_CURX(r4, r5)		/* r4 = curcpu, r5 = curlwp */
 
 
 #define	DO_AST_AND_RESTORE_ALIGNMENT_FAULTS				\
 	DO_PENDING_SOFTINTS						;\
-	GET_CPSR(r5)			/* save CPSR */			;\
-	CPSID_I(r1, r5)			/* Disable interrupts */	;\
+	GET_CPSR(r6)			/* save CPSR */			;\
+	CPSID_I(r1, r6)			/* Disable interrupts */	;\
 	cmp	r7, #(PSR_USR32_MODE)					;\
 	bne	2f			/* Nope, get out now */		;\
 	DO_PENDING_AST(2f)		/* Pending AST? */		;\

Index: src/sys/arch/evbarm/conf/std.generic
diff -u src/sys/arch/evbarm/conf/std.generic:1.7 src/sys/arch/evbarm/conf/std.generic:1.8
--- src/sys/arch/evbarm/conf/std.generic:1.7	Sat Jan 25 18:10:11 2020
+++ src/sys/arch/evbarm/conf/std.generic	Fri Aug 14 16:18:37 2020
@@ -1,4 +1,4 @@
-#	$NetBSD: std.generic,v 1.7 2020/01/25 18:10:11 skrll Exp $
+#	$NetBSD: std.generic,v 1.8 2020/08/14 16:18:37 skrll Exp $
 #
 # 	generic NetBSD/evbarm with FDT support
 
@@ -22,7 +22,7 @@ options 	FPU_VFP
 options 	MODULAR
 options 	MODULAR_DEFAULT_AUTOLOAD
 options 	PCI_NETBSD_CONFIGURE
-options 	TPIDRPRW_IS_CURCPU
+options 	TPIDRPRW_IS_CURLWP
 options 	__BUS_SPACE_HAS_STREAM_METHODS
 options 	__HAVE_CPU_COUNTER
 options 	__HAVE_CPU_UAREA_ALLOC_IDLELWP

Reply via email to