Module Name:    src
Committed By:   riastradh
Date:           Sun Feb 13 14:06:51 UTC 2022

Modified Files:
        src/sys/arch/hppa/include: lock.h

Log Message:
hppa: __cpu_simple_lock membar audit.

ok skrll


To generate a diff of this commit:
cvs rdiff -u -r1.23 -r1.24 src/sys/arch/hppa/include/lock.h

Please note that diffs are not public domain; they are subject to the
copyright notices on the relevant files.

Modified files:

Index: src/sys/arch/hppa/include/lock.h
diff -u src/sys/arch/hppa/include/lock.h:1.23 src/sys/arch/hppa/include/lock.h:1.24
--- src/sys/arch/hppa/include/lock.h:1.23	Sat Feb 12 17:17:53 2022
+++ src/sys/arch/hppa/include/lock.h	Sun Feb 13 14:06:51 2022
@@ -1,4 +1,4 @@
-/* 	$NetBSD: lock.h,v 1.23 2022/02/12 17:17:53 riastradh Exp $	*/
+/* 	$NetBSD: lock.h,v 1.24 2022/02/13 14:06:51 riastradh Exp $	*/
 
 /*-
  * Copyright (c) 1998, 1999, 2000, 2001 The NetBSD Foundation, Inc.
@@ -91,6 +91,25 @@ __cpu_simple_lock_init(__cpu_simple_lock
 	    __SIMPLELOCK_RAW_UNLOCKED;
 }
 
+static __inline int
+__cpu_simple_lock_try(__cpu_simple_lock_t *alp)
+{
+	volatile unsigned long *__aptr = __SIMPLELOCK_ALIGN(alp);
+
+	if (__ldcw(__aptr) == __SIMPLELOCK_RAW_LOCKED)
+		return 0;
+
+	/*
+	 * __cpu_simple_lock_try must be a load-acquire operation, but
+	 * HPPA's LDCW does not appear to guarantee load-acquire
+	 * semantics, so we have to do LDCW and then an explicit SYNC
+	 * to make a load-acquire operation that pairs with a preceding
+	 * store-release in __cpu_simple_unlock.
+	 */
+	__sync();
+	return 1;
+}
+
 static __inline void
 __cpu_simple_lock(__cpu_simple_lock_t *alp)
 {
@@ -103,24 +122,20 @@ __cpu_simple_lock(__cpu_simple_lock_t *a
 	 * some work.
 	 */
 
-	while (__ldcw(__aptr) == __SIMPLELOCK_RAW_LOCKED)
+	while (!__cpu_simple_lock_try(alp))
 		while (*__aptr == __SIMPLELOCK_RAW_LOCKED)
 			;
 }
 
-static __inline int
-__cpu_simple_lock_try(__cpu_simple_lock_t *alp)
-{
-	volatile unsigned long *__aptr = __SIMPLELOCK_ALIGN(alp);
-
-	return (__ldcw(__aptr) != __SIMPLELOCK_RAW_LOCKED);
-}
-
 static __inline void
 __cpu_simple_unlock(__cpu_simple_lock_t *alp)
 {
 	volatile unsigned long *__aptr = __SIMPLELOCK_ALIGN(alp);
 
+	/*
+	 * SYNC and then store makes a store-release that pairs with
+	 * the load-acquire in a subsequent __cpu_simple_lock_try.
+	 */
 	__sync();
 	*__aptr = __SIMPLELOCK_RAW_UNLOCKED;
 }

Reply via email to