Module Name:    src
Committed By:   martin
Date:           Fri Aug  4 13:06:59 UTC 2023

Modified Files:
        src/lib/libpthread [netbsd-8]: pthread_int.h pthread_spin.c
        src/lib/libpthread/arch/aarch64 [netbsd-8]: pthread_md.h
        src/lib/libpthread/arch/arm [netbsd-8]: pthread_md.h
        src/lib/libpthread/arch/i386 [netbsd-8]: pthread_md.h
        src/lib/libpthread/arch/x86_64 [netbsd-8]: pthread_md.h

Log Message:
Pull up following revision(s) (requested by riastradh in ticket #1878):

        lib/libpthread/arch/x86_64/pthread_md.h: revision 1.13
        lib/libpthread/pthread_int.h: revision 1.110
        lib/libpthread/pthread_int.h: revision 1.111
        lib/libpthread/arch/i386/pthread_md.h: revision 1.21
        lib/libpthread/arch/arm/pthread_md.h: revision 1.12
        lib/libpthread/arch/arm/pthread_md.h: revision 1.13
        lib/libpthread/pthread_spin.c: revision 1.11
        lib/libpthread/arch/aarch64/pthread_md.h: revision 1.2

libpthread: Use __nothing, not /* nothing */, for empty macros.

No functional change intended -- just safer to do it this way in case
the macros are used in if branches or comma expressions.
PR port-arm/57437 (pthread__smt_pause/wake issue)

libpthread: New pthread__smt_wait to put CPU in low power for spin.

This is now distinct from pthread__smt_pause, which is for spin lock
backoff with no paired wakeup.

On Arm, there is a single-bit event register per CPU, and there are two
instructions to manage it:
- wfe, wait for event -- if event register is clear, enter low power
  mode and wait until event register is set; then exit low power mode
  and clear event register
- sev, signal event -- sets event register on all CPUs (other
  circumstances like interrupts also set the event register and cause
  wfe to wake)

These can be used to reduce the power consumption of spinning for a
lock, but only if they are actually paired -- if there's no sev, wfe
might hang indefinitely.  Currently only pthread_spin(3) actually
pairs them; the other lock primitives (internal lock, mutex, rwlock)
do not -- they have spin lock backoff loops, but no corresponding
wakeup to cancel a wfe.

It may be worthwhile to teach the other lock primitives to pair
wfe/sev, but that requires some performance measurement to verify
it's actually worthwhile.  So for now, we just make sure not to use
wfe when there's no sev, and keep everything else the same -- this
should fix severe performance degredation in libpthread on Arm
without hurting anything else.

No change in the generated code on amd64 and i386.  No change in the
generated code for pthread_spin.c on arm and aarch64 -- changes only
the generated code for pthread_lock.c, pthread_mutex.c, and
pthread_rwlock.c, as intended.
PR port-arm/57437


To generate a diff of this commit:
cvs rdiff -u -r1.93.4.1 -r1.93.4.2 src/lib/libpthread/pthread_int.h
cvs rdiff -u -r1.6 -r1.6.24.1 src/lib/libpthread/pthread_spin.c
cvs rdiff -u -r1.1 -r1.1.18.1 src/lib/libpthread/arch/aarch64/pthread_md.h
cvs rdiff -u -r1.9 -r1.9.18.1 src/lib/libpthread/arch/arm/pthread_md.h
cvs rdiff -u -r1.20 -r1.20.24.1 src/lib/libpthread/arch/i386/pthread_md.h
cvs rdiff -u -r1.12 -r1.12.36.1 src/lib/libpthread/arch/x86_64/pthread_md.h

Please note that diffs are not public domain; they are subject to the
copyright notices on the relevant files.

Modified files:

Index: src/lib/libpthread/pthread_int.h
diff -u src/lib/libpthread/pthread_int.h:1.93.4.1 src/lib/libpthread/pthread_int.h:1.93.4.2
--- src/lib/libpthread/pthread_int.h:1.93.4.1	Thu Aug 31 08:32:39 2017
+++ src/lib/libpthread/pthread_int.h	Fri Aug  4 13:06:59 2023
@@ -1,4 +1,4 @@
-/*	$NetBSD: pthread_int.h,v 1.93.4.1 2017/08/31 08:32:39 bouyer Exp $	*/
+/*	$NetBSD: pthread_int.h,v 1.93.4.2 2023/08/04 13:06:59 martin Exp $	*/
 
 /*-
  * Copyright (c) 2001, 2002, 2003, 2006, 2007, 2008 The NetBSD Foundation, Inc.
@@ -301,10 +301,13 @@ int	pthread__checkpri(int) PTHREAD_HIDE;
 int	pthread__add_specific(pthread_t, pthread_key_t, const void *) PTHREAD_HIDE;
 
 #ifndef pthread__smt_pause
-#define	pthread__smt_pause()	/* nothing */
+#define	pthread__smt_pause()	__nothing
+#endif
+#ifndef pthread__smt_wait
+#define	pthread__smt_wait()	__nothing
 #endif
 #ifndef pthread__smt_wake
-#define	pthread__smt_wake()	/* nothing */
+#define	pthread__smt_wake()	__nothing
 #endif
 
 /*

Index: src/lib/libpthread/pthread_spin.c
diff -u src/lib/libpthread/pthread_spin.c:1.6 src/lib/libpthread/pthread_spin.c:1.6.24.1
--- src/lib/libpthread/pthread_spin.c:1.6	Thu Aug 16 04:49:47 2012
+++ src/lib/libpthread/pthread_spin.c	Fri Aug  4 13:06:59 2023
@@ -1,4 +1,4 @@
-/*	$NetBSD: pthread_spin.c,v 1.6 2012/08/16 04:49:47 matt Exp $	*/
+/*	$NetBSD: pthread_spin.c,v 1.6.24.1 2023/08/04 13:06:59 martin Exp $	*/
 
 /*-
  * Copyright (c) 2001, 2006, 2007 The NetBSD Foundation, Inc.
@@ -34,7 +34,7 @@
  */
 
 #include <sys/cdefs.h>
-__RCSID("$NetBSD: pthread_spin.c,v 1.6 2012/08/16 04:49:47 matt Exp $");
+__RCSID("$NetBSD: pthread_spin.c,v 1.6.24.1 2023/08/04 13:06:59 martin Exp $");
 
 #include <sys/types.h>
 #include <sys/ras.h>
@@ -99,7 +99,7 @@ pthread_spin_lock(pthread_spinlock_t *lo
 
 	self = pthread__self();
 	while (pthread__spintrylock(self, &lock->pts_spin) == 0) {
-		pthread__smt_pause();
+		pthread__smt_wait();
 	}
 
 	return 0;

Index: src/lib/libpthread/arch/aarch64/pthread_md.h
diff -u src/lib/libpthread/arch/aarch64/pthread_md.h:1.1 src/lib/libpthread/arch/aarch64/pthread_md.h:1.1.18.1
--- src/lib/libpthread/arch/aarch64/pthread_md.h:1.1	Sun Aug 10 05:47:37 2014
+++ src/lib/libpthread/arch/aarch64/pthread_md.h	Fri Aug  4 13:06:59 2023
@@ -1,4 +1,4 @@
-/* $NetBSD: pthread_md.h,v 1.1 2014/08/10 05:47:37 matt Exp $ */
+/* $NetBSD: pthread_md.h,v 1.1.18.1 2023/08/04 13:06:59 martin Exp $ */
 
 /*-
  * Copyright (c) 2014 The NetBSD Foundation, Inc.
@@ -42,7 +42,7 @@ pthread__sp(void)
 	return ret;
 }
 
-#define pthread__smt_pause()	__asm __volatile("wfe") /* wfe */
+#define pthread__smt_wait()	__asm __volatile("wfe") /* wfe */
 #define pthread__smt_wake()	__asm __volatile("sev") /* sev */
 
 #define	pthread__uc_sp(ucp)	((ucp)->uc_mcontext.__gregs[_REG_SP])

Index: src/lib/libpthread/arch/arm/pthread_md.h
diff -u src/lib/libpthread/arch/arm/pthread_md.h:1.9 src/lib/libpthread/arch/arm/pthread_md.h:1.9.18.1
--- src/lib/libpthread/arch/arm/pthread_md.h:1.9	Thu Aug 15 22:37:29 2013
+++ src/lib/libpthread/arch/arm/pthread_md.h	Fri Aug  4 13:06:59 2023
@@ -1,4 +1,4 @@
-/*	$NetBSD: pthread_md.h,v 1.9 2013/08/15 22:37:29 matt Exp $	*/
+/*	$NetBSD: pthread_md.h,v 1.9.18.1 2023/08/04 13:06:59 martin Exp $	*/
 
 /*
  * Copyright (c) 2001 Wasabi Systems, Inc.
@@ -50,14 +50,14 @@ pthread__sp(void)
 }
 
 #if defined(__thumb__) && defined(_ARM_ARCH_6)
-#define pthread__smt_pause()	__asm __volatile(".inst.n 0xbf20") /* wfe */
+#define pthread__smt_wait()	__asm __volatile(".inst.n 0xbf20") /* wfe */
 #define pthread__smt_wake()	__asm __volatile(".inst.n 0xbf40") /* sev */
 #elif !defined(__thumb__)
-#define pthread__smt_pause()	__asm __volatile(".inst 0xe320f002") /* wfe */
+#define pthread__smt_wait()	__asm __volatile(".inst 0xe320f002") /* wfe */
 #define pthread__smt_wake()	__asm __volatile(".inst 0xe320f004") /* sev */
 #else
-#define pthread__smt_pause()
-#define pthread__smt_wake()
+#define pthread__smt_wait()	__nothing
+#define pthread__smt_wake()	__nothing
 #endif
 
 #define	pthread__uc_sp(ucp)	((ucp)->uc_mcontext.__gregs[_REG_SP])

Index: src/lib/libpthread/arch/i386/pthread_md.h
diff -u src/lib/libpthread/arch/i386/pthread_md.h:1.20 src/lib/libpthread/arch/i386/pthread_md.h:1.20.24.1
--- src/lib/libpthread/arch/i386/pthread_md.h:1.20	Fri Mar  2 23:19:47 2012
+++ src/lib/libpthread/arch/i386/pthread_md.h	Fri Aug  4 13:06:59 2023
@@ -1,4 +1,4 @@
-/*	$NetBSD: pthread_md.h,v 1.20 2012/03/02 23:19:47 joerg Exp $	*/
+/*	$NetBSD: pthread_md.h,v 1.20.24.1 2023/08/04 13:06:59 martin Exp $	*/
 
 /*-
  * Copyright (c) 2001, 2007, 2008 The NetBSD Foundation, Inc.
@@ -61,6 +61,7 @@ _initcontext_u_md(ucontext_t *ucp)
 #define	_INITCONTEXT_U_MD(ucp)	_initcontext_u_md(ucp);
 
 #define	pthread__smt_pause()	__asm __volatile("rep; nop" ::: "memory")
+#define	pthread__smt_wait()	__asm __volatile("rep; nop" ::: "memory")
 
 /* Don't need additional memory barriers. */
 #define	PTHREAD__ATOMIC_IS_MEMBAR

Index: src/lib/libpthread/arch/x86_64/pthread_md.h
diff -u src/lib/libpthread/arch/x86_64/pthread_md.h:1.12 src/lib/libpthread/arch/x86_64/pthread_md.h:1.12.36.1
--- src/lib/libpthread/arch/x86_64/pthread_md.h:1.12	Tue Jan 25 19:12:06 2011
+++ src/lib/libpthread/arch/x86_64/pthread_md.h	Fri Aug  4 13:06:58 2023
@@ -1,4 +1,4 @@
-/*	$NetBSD: pthread_md.h,v 1.12 2011/01/25 19:12:06 christos Exp $	*/
+/*	$NetBSD: pthread_md.h,v 1.12.36.1 2023/08/04 13:06:58 martin Exp $	*/
 
 /*-
  * Copyright (c) 2001, 2007, 2008 The NetBSD Foundation, Inc.
@@ -64,6 +64,7 @@ pthread__sp(void)
 	(ucp)->uc_mcontext.__gregs[_REG_RFL] = 0x202;
 
 #define	pthread__smt_pause()	__asm __volatile("rep; nop" ::: "memory")
+#define	pthread__smt_wait()	__asm __volatile("rep; nop" ::: "memory")
 
 /* Don't need additional memory barriers. */
 #define	PTHREAD__ATOMIC_IS_MEMBAR

Reply via email to