Module Name: src
Committed By: riastradh
Date: Mon Mar 31 14:07:10 UTC 2025
Modified Files:
src/lib/libpthread: pthread.c pthread_cancelstub.c pthread_cond.c
pthread_int.h
src/tests/lib/libpthread: t_cancellation.c
Log Message:
pthread_cancel(3): Rework.
Make pthread_setcancelstate(3) async-signal-safe. (As a side effect,
this also makes pthread_setcanceltype(3) async-signal-safe, although
that is not required.)
PR lib/59134: POSIX-1.2024: pthread_setcancelstate must be
async-signal-safe
To generate a diff of this commit:
cvs rdiff -u -r1.185 -r1.186 src/lib/libpthread/pthread.c
cvs rdiff -u -r1.45 -r1.46 src/lib/libpthread/pthread_cancelstub.c
cvs rdiff -u -r1.77 -r1.78 src/lib/libpthread/pthread_cond.c
cvs rdiff -u -r1.112 -r1.113 src/lib/libpthread/pthread_int.h
cvs rdiff -u -r1.1 -r1.2 src/tests/lib/libpthread/t_cancellation.c
Please note that diffs are not public domain; they are subject to the
copyright notices on the relevant files.
Modified files:
Index: src/lib/libpthread/pthread.c
diff -u src/lib/libpthread/pthread.c:1.185 src/lib/libpthread/pthread.c:1.186
--- src/lib/libpthread/pthread.c:1.185 Sat Jun 8 08:01:49 2024
+++ src/lib/libpthread/pthread.c Mon Mar 31 14:07:10 2025
@@ -1,4 +1,4 @@
-/* $NetBSD: pthread.c,v 1.185 2024/06/08 08:01:49 hannken Exp $ */
+/* $NetBSD: pthread.c,v 1.186 2025/03/31 14:07:10 riastradh Exp $ */
/*-
* Copyright (c) 2001, 2002, 2003, 2006, 2007, 2008, 2020
@@ -31,7 +31,7 @@
*/
#include <sys/cdefs.h>
-__RCSID("$NetBSD: pthread.c,v 1.185 2024/06/08 08:01:49 hannken Exp $");
+__RCSID("$NetBSD: pthread.c,v 1.186 2025/03/31 14:07:10 riastradh Exp $");
#define __EXPOSE_STACK 1
@@ -54,6 +54,7 @@ __RCSID("$NetBSD: pthread.c,v 1.185 2024
#include <errno.h>
#include <lwp.h>
#include <signal.h>
+#include <stdatomic.h>
#include <stdio.h>
#include <stdlib.h>
#include <stddef.h>
@@ -69,6 +70,15 @@ __RCSID("$NetBSD: pthread.c,v 1.185 2024
#include "pthread_makelwp.h"
#include "reentrant.h"
+#define atomic_load_relaxed(p) \
+ atomic_load_explicit(p, memory_order_relaxed)
+
+#define atomic_store_relaxed(p, v) \
+ atomic_store_explicit(p, v, memory_order_relaxed)
+
+#define atomic_store_release(p, v) \
+ atomic_store_explicit(p, v, memory_order_release)
+
__BEGIN_DECLS
void _malloc_thread_cleanup(void) __weak;
__END_DECLS
@@ -647,10 +657,7 @@ pthread_exit(void *retval)
self = pthread__self();
/* Disable cancellability. */
- pthread_mutex_lock(&self->pt_lock);
- self->pt_flags |= PT_FLAG_CS_DISABLED;
- self->pt_cancel = 0;
- pthread_mutex_unlock(&self->pt_lock);
+ atomic_store_relaxed(&self->pt_cancel, PT_CANCEL_DISABLED);
/* Call any cancellation cleanup handlers */
if (!PTQ_EMPTY(&self->pt_cleanup_stack)) {
@@ -868,20 +875,34 @@ pthread_self(void)
int
pthread_cancel(pthread_t thread)
{
+ unsigned old, new;
+ bool wake;
pthread__error(EINVAL, "Invalid thread",
thread->pt_magic == PT_MAGIC);
if (pthread__find(thread) != 0)
return ESRCH;
- pthread_mutex_lock(&thread->pt_lock);
- thread->pt_flags |= PT_FLAG_CS_PENDING;
- if ((thread->pt_flags & PT_FLAG_CS_DISABLED) == 0) {
- thread->pt_cancel = 1;
- pthread_mutex_unlock(&thread->pt_lock);
+
+ /*
+ * membar_release matches membar_acquire in
+ * pthread_setcancelstate and pthread__testcancel.
+ */
+ membar_release();
+
+ do {
+ old = atomic_load_relaxed(&thread->pt_cancel);
+ new = old | PT_CANCEL_PENDING;
+ wake = false;
+ if ((old & PT_CANCEL_DISABLED) == 0) {
+ new |= PT_CANCEL_CANCELLED;
+ wake = true;
+ }
+ } while (__predict_false(atomic_cas_uint(&thread->pt_cancel, old, new)
+ != old));
+
+ if (wake)
_lwp_wakeup(thread->pt_lid);
- } else
- pthread_mutex_unlock(&thread->pt_lock);
return 0;
}
@@ -891,50 +912,76 @@ int
pthread_setcancelstate(int state, int *oldstate)
{
pthread_t self;
- int retval;
+ unsigned flags, old, new;
+ bool cancelled;
if (__predict_false(__uselibcstub))
return __libc_thr_setcancelstate_stub(state, oldstate);
self = pthread__self();
- retval = 0;
- pthread_mutex_lock(&self->pt_lock);
-
- if (oldstate != NULL) {
- if (self->pt_flags & PT_FLAG_CS_DISABLED)
- *oldstate = PTHREAD_CANCEL_DISABLE;
- else
- *oldstate = PTHREAD_CANCEL_ENABLE;
+ switch (state) {
+ case PTHREAD_CANCEL_ENABLE:
+ flags = 0;
+ break;
+ case PTHREAD_CANCEL_DISABLE:
+ flags = PT_CANCEL_DISABLED;
+ break;
+ default:
+ return EINVAL;
}
- if (state == PTHREAD_CANCEL_DISABLE) {
- self->pt_flags |= PT_FLAG_CS_DISABLED;
- if (self->pt_cancel) {
- self->pt_flags |= PT_FLAG_CS_PENDING;
- self->pt_cancel = 0;
- }
- } else if (state == PTHREAD_CANCEL_ENABLE) {
- self->pt_flags &= ~PT_FLAG_CS_DISABLED;
+ do {
+ old = atomic_load_relaxed(&self->pt_cancel);
+ new = (old & ~PT_CANCEL_DISABLED) | flags;
/*
+ * If we disable while cancelled, switch back to
+ * pending so future cancellation tests will not fire
+ * until enabled again.
+ *
* If a cancellation was requested while cancellation
* was disabled, note that fact for future
* cancellation tests.
*/
- if (self->pt_flags & PT_FLAG_CS_PENDING) {
- self->pt_cancel = 1;
+ cancelled = false;
+ if (__predict_false((flags | (old & PT_CANCEL_CANCELLED)) ==
+ (PT_CANCEL_DISABLED|PT_CANCEL_CANCELLED))) {
+ new &= ~PT_CANCEL_CANCELLED;
+ new |= PT_CANCEL_PENDING;
+ } else if (__predict_false((flags |
+ (old & PT_CANCEL_PENDING)) ==
+ PT_CANCEL_PENDING)) {
+ new |= PT_CANCEL_CANCELLED;
/* This is not a deferred cancellation point. */
- if (self->pt_flags & PT_FLAG_CS_ASYNC) {
- pthread_mutex_unlock(&self->pt_lock);
- pthread__cancelled();
- }
+ if (__predict_false(old & PT_CANCEL_ASYNC))
+ cancelled = true;
}
- } else
- retval = EINVAL;
+ } while (__predict_false(atomic_cas_uint(&self->pt_cancel, old, new)
+ != old));
- pthread_mutex_unlock(&self->pt_lock);
+ /*
+ * If we transitioned from PTHREAD_CANCEL_DISABLED to
+ * PTHREAD_CANCEL_ENABLED, there was a pending cancel, and we
+ * are configured with asynchronous cancellation, we are now
+ * cancelled -- make it happen.
+ */
+ if (__predict_false(cancelled)) {
+ /*
+ * membar_acquire matches membar_release in
+ * pthread_cancel.
+ */
+ membar_acquire();
+ pthread__cancelled();
+ }
+
+ if (oldstate) {
+ if (old & PT_CANCEL_DISABLED)
+ *oldstate = PTHREAD_CANCEL_DISABLE;
+ else
+ *oldstate = PTHREAD_CANCEL_ENABLE;
+ }
- return retval;
+ return 0;
}
@@ -942,34 +989,45 @@ int
pthread_setcanceltype(int type, int *oldtype)
{
pthread_t self;
- int retval;
+ unsigned flags, old, new;
+ bool cancelled;
self = pthread__self();
- retval = 0;
- pthread_mutex_lock(&self->pt_lock);
+ switch (type) {
+ case PTHREAD_CANCEL_DEFERRED:
+ flags = 0;
+ break;
+ case PTHREAD_CANCEL_ASYNCHRONOUS:
+ flags = PT_CANCEL_ASYNC;
+ break;
+ default:
+ return EINVAL;
+ }
+
+ do {
+ old = atomic_load_relaxed(&self->pt_cancel);
+ new = (old & ~PT_CANCEL_ASYNC) | flags;
+ cancelled = false;
+ if (__predict_false((flags | (old & PT_CANCEL_CANCELLED)) ==
+ (PT_CANCEL_ASYNC|PT_CANCEL_CANCELLED)))
+ cancelled = true;
+ } while (__predict_false(atomic_cas_uint(&self->pt_cancel, old, new)
+ != old));
+
+ if (__predict_false(cancelled)) {
+ membar_acquire();
+ pthread__cancelled();
+ }
if (oldtype != NULL) {
- if (self->pt_flags & PT_FLAG_CS_ASYNC)
+ if (old & PT_CANCEL_ASYNC)
*oldtype = PTHREAD_CANCEL_ASYNCHRONOUS;
else
*oldtype = PTHREAD_CANCEL_DEFERRED;
}
- if (type == PTHREAD_CANCEL_ASYNCHRONOUS) {
- self->pt_flags |= PT_FLAG_CS_ASYNC;
- if (self->pt_cancel) {
- pthread_mutex_unlock(&self->pt_lock);
- pthread__cancelled();
- }
- } else if (type == PTHREAD_CANCEL_DEFERRED)
- self->pt_flags &= ~PT_FLAG_CS_ASYNC;
- else
- retval = EINVAL;
-
- pthread_mutex_unlock(&self->pt_lock);
-
- return retval;
+ return 0;
}
@@ -979,8 +1037,8 @@ pthread_testcancel(void)
pthread_t self;
self = pthread__self();
- if (self->pt_cancel)
- pthread__cancelled();
+
+ pthread__testcancel(self);
}
@@ -1008,8 +1066,19 @@ void
pthread__testcancel(pthread_t self)
{
- if (self->pt_cancel)
+ /*
+ * We use atomic_load_relaxed and then a conditional
+ * membar_acquire, rather than atomic_load_acquire, in order to
+ * avoid incurring the cost of an acquire barrier in the common
+ * case of not having been cancelled.
+ *
+ * membar_acquire matches membar_release in pthread_cancel.
+ */
+ if (__predict_false(atomic_load_relaxed(&self->pt_cancel) &
+ PT_CANCEL_CANCELLED)) {
+ membar_acquire();
pthread__cancelled();
+ }
}
@@ -1191,7 +1260,9 @@ pthread__park(pthread_t self, pthread_mu
}
}
/* Check for cancellation. */
- if (cancelpt && self->pt_cancel)
+ if (cancelpt &&
+ (atomic_load_relaxed(&self->pt_cancel) &
+ PT_CANCEL_CANCELLED))
rv = EINTR;
} while (self->pt_sleepobj != NULL && rv == 0);
return rv;
Index: src/lib/libpthread/pthread_cancelstub.c
diff -u src/lib/libpthread/pthread_cancelstub.c:1.45 src/lib/libpthread/pthread_cancelstub.c:1.46
--- src/lib/libpthread/pthread_cancelstub.c:1.45 Fri Jan 19 19:55:03 2024
+++ src/lib/libpthread/pthread_cancelstub.c Mon Mar 31 14:07:10 2025
@@ -1,4 +1,4 @@
-/* $NetBSD: pthread_cancelstub.c,v 1.45 2024/01/19 19:55:03 christos Exp $ */
+/* $NetBSD: pthread_cancelstub.c,v 1.46 2025/03/31 14:07:10 riastradh Exp $ */
/*-
* Copyright (c) 2002, 2007 The NetBSD Foundation, Inc.
@@ -33,7 +33,7 @@
#undef _FORTIFY_SOURCE
#include <sys/cdefs.h>
-__RCSID("$NetBSD: pthread_cancelstub.c,v 1.45 2024/01/19 19:55:03 christos Exp $");
+__RCSID("$NetBSD: pthread_cancelstub.c,v 1.46 2025/03/31 14:07:10 riastradh Exp $");
/* Need to use libc-private names for atomic operations. */
#include "../../common/lib/libc/atomic/atomic_op_namespace.h"
@@ -65,6 +65,7 @@ __RCSID("$NetBSD: pthread_cancelstub.c,v
#include <fcntl.h>
#include <mqueue.h>
#include <poll.h>
+#include <stdatomic.h>
#include <stdarg.h>
#include <unistd.h>
@@ -88,6 +89,9 @@ __RCSID("$NetBSD: pthread_cancelstub.c,v
#include "pthread_int.h"
#include "reentrant.h"
+#define atomic_load_relaxed(p) \
+ atomic_load_explicit(p, memory_order_relaxed)
+
int pthread__cancel_stub_binder;
int _sys_accept(int, struct sockaddr *, socklen_t *);
@@ -147,8 +151,11 @@ int __sigsuspend14(const sigset_t *);
#define TESTCANCEL(id) do { \
if (__predict_true(!__uselibcstub) && \
- __predict_false((id)->pt_cancel)) \
+ __predict_false(atomic_load_relaxed(&(id)->pt_cancel) & \
+ PT_CANCEL_CANCELLED)) { \
+ membar_acquire(); \
pthread__cancelled(); \
+ } \
} while (0)
Index: src/lib/libpthread/pthread_cond.c
diff -u src/lib/libpthread/pthread_cond.c:1.77 src/lib/libpthread/pthread_cond.c:1.78
--- src/lib/libpthread/pthread_cond.c:1.77 Sat Feb 12 14:59:32 2022
+++ src/lib/libpthread/pthread_cond.c Mon Mar 31 14:07:10 2025
@@ -1,4 +1,4 @@
-/* $NetBSD: pthread_cond.c,v 1.77 2022/02/12 14:59:32 riastradh Exp $ */
+/* $NetBSD: pthread_cond.c,v 1.78 2025/03/31 14:07:10 riastradh Exp $ */
/*-
* Copyright (c) 2001, 2006, 2007, 2008, 2020 The NetBSD Foundation, Inc.
@@ -30,11 +30,12 @@
*/
#include <sys/cdefs.h>
-__RCSID("$NetBSD: pthread_cond.c,v 1.77 2022/02/12 14:59:32 riastradh Exp $");
+__RCSID("$NetBSD: pthread_cond.c,v 1.78 2025/03/31 14:07:10 riastradh Exp $");
/* Need to use libc-private names for atomic operations. */
#include "../../common/lib/libc/atomic/atomic_op_namespace.h"
+#include <stdatomic.h>
#include <stdlib.h>
#include <errno.h>
#include <sys/time.h>
@@ -44,6 +45,9 @@ __RCSID("$NetBSD: pthread_cond.c,v 1.77
#include "pthread_int.h"
#include "reentrant.h"
+#define atomic_load_relaxed(p) \
+ atomic_load_explicit(p, memory_order_relaxed)
+
int _sys___nanosleep50(const struct timespec *, struct timespec *);
int _pthread_cond_has_waiters_np(pthread_cond_t *);
@@ -139,7 +143,9 @@ pthread_cond_timedwait(pthread_cond_t *c
self = pthread__self();
pthread__assert(self->pt_lid != 0);
- if (__predict_false(self->pt_cancel)) {
+ if (__predict_false(atomic_load_relaxed(&self->pt_cancel) &
+ PT_CANCEL_CANCELLED)) {
+ membar_acquire();
pthread__cancelled();
}
@@ -166,7 +172,9 @@ pthread_cond_timedwait(pthread_cond_t *c
/* Drop the interlock and wait. */
error = 0;
pthread_mutex_unlock(mutex);
- while (waiter.lid && !(cancel = self->pt_cancel)) {
+ while (waiter.lid &&
+ !(cancel = atomic_load_relaxed(&self->pt_cancel) &
+ PT_CANCEL_CANCELLED)) {
int rv = _lwp_park(clkid, TIMER_ABSTIME, __UNCONST(abstime),
0, NULL, NULL);
if (rv == 0) {
@@ -209,6 +217,7 @@ pthread_cond_timedwait(pthread_cond_t *c
* held if this happens.
*/
if (cancel) {
+ membar_acquire();
pthread__cancelled();
}
Index: src/lib/libpthread/pthread_int.h
diff -u src/lib/libpthread/pthread_int.h:1.112 src/lib/libpthread/pthread_int.h:1.113
--- src/lib/libpthread/pthread_int.h:1.112 Sat Nov 30 01:04:04 2024
+++ src/lib/libpthread/pthread_int.h Mon Mar 31 14:07:10 2025
@@ -1,4 +1,4 @@
-/* $NetBSD: pthread_int.h,v 1.112 2024/11/30 01:04:04 christos Exp $ */
+/* $NetBSD: pthread_int.h,v 1.113 2025/03/31 14:07:10 riastradh Exp $ */
/*-
* Copyright (c) 2001, 2002, 2003, 2006, 2007, 2008, 2020
@@ -96,7 +96,7 @@ struct __pthread_st {
unsigned int pt_magic; /* Magic number */
int pt_state; /* running, blocked, etc. */
int pt_flags; /* see PT_FLAG_* below */
- int pt_cancel; /* Deferred cancellation */
+ volatile unsigned int pt_cancel; /* Cancellation */
int pt_errno; /* Thread-specific errno. */
stack_t pt_stack; /* Our stack */
bool pt_stack_allocated;
@@ -152,13 +152,17 @@ struct __pthread_st {
/* Flag values */
#define PT_FLAG_DETACHED 0x0001
-#define PT_FLAG_CS_DISABLED 0x0004 /* Cancellation disabled */
-#define PT_FLAG_CS_ASYNC 0x0008 /* Cancellation is async */
-#define PT_FLAG_CS_PENDING 0x0010
#define PT_FLAG_SCOPE_SYSTEM 0x0040
#define PT_FLAG_EXPLICIT_SCHED 0x0080
#define PT_FLAG_SUSPENDED 0x0100 /* In the suspended queue */
+/* pt_cancel word */
+
+#define PT_CANCEL_DISABLED __BIT(0)
+#define PT_CANCEL_ASYNC __BIT(1)
+#define PT_CANCEL_PENDING __BIT(2)
+#define PT_CANCEL_CANCELLED __BIT(3)
+
#define PT_MAGIC 0x11110001
#define PT_DEAD 0xDEAD0001
Index: src/tests/lib/libpthread/t_cancellation.c
diff -u src/tests/lib/libpthread/t_cancellation.c:1.1 src/tests/lib/libpthread/t_cancellation.c:1.2
--- src/tests/lib/libpthread/t_cancellation.c:1.1 Mon Mar 31 13:57:06 2025
+++ src/tests/lib/libpthread/t_cancellation.c Mon Mar 31 14:07:10 2025
@@ -1,4 +1,4 @@
-/* $NetBSD: t_cancellation.c,v 1.1 2025/03/31 13:57:06 riastradh Exp $ */
+/* $NetBSD: t_cancellation.c,v 1.2 2025/03/31 14:07:10 riastradh Exp $ */
/*
* Copyright (c) 2025 The NetBSD Foundation, Inc.
@@ -27,7 +27,7 @@
*/
#include <sys/cdefs.h>
-__RCSID("$NetBSD: t_cancellation.c,v 1.1 2025/03/31 13:57:06 riastradh Exp $");
+__RCSID("$NetBSD: t_cancellation.c,v 1.2 2025/03/31 14:07:10 riastradh Exp $");
#include <sys/mman.h>
#include <sys/msg.h>
@@ -1497,8 +1497,6 @@ ATF_TC_BODY(sigsafecancelstate, tc)
memory_order_relaxed);
}
- atf_tc_expect_signal(SIGALRM, "PR lib/59134: POSIX-1.2024:"
- " pthread_setcancelstate must be async-signal-safe");
alarm(1);
RZ(pthread_join(t, &result));
ATF_CHECK_MSG(result == NULL, "result=%p", result);
@@ -1619,4 +1617,3 @@ ATF_TP_ADD_TCS(tp)
return atf_no_error();
}
-