https://github.com/python/cpython/commit/7b9ca26812fabcd1202238c989f0f0a9e5b02e87
commit: 7b9ca26812fabcd1202238c989f0f0a9e5b02e87
branch: 3.13
author: Miss Islington (bot) <[email protected]>
committer: colesbury <[email protected]>
date: 2024-05-08T21:31:37Z
summary:

[3.13] gh-117657: Fix data races when writing / reading `ob_gc_bits` 
(GH-118292) (#118796)

Use relaxed atomics when reading / writing to the field. There are still a
few places in the GC where we do not use atomics. Those should be safe as
the world is stopped.
(cherry picked from commit cb6f75a32ca2649c6cc1cabb0301eb783efbd55b)

Co-authored-by: mpage <[email protected]>

files:
M Include/internal/pycore_gc.h
M Include/internal/pycore_object.h
M Objects/object.c
M Tools/tsan/suppressions_free_threading.txt

diff --git a/Include/internal/pycore_gc.h b/Include/internal/pycore_gc.h
index 281094df786735..60582521db5bd7 100644
--- a/Include/internal/pycore_gc.h
+++ b/Include/internal/pycore_gc.h
@@ -37,7 +37,15 @@ static inline PyObject* _Py_FROM_GC(PyGC_Head *gc) {
 }
 
 
-/* Bit flags for ob_gc_bits (in Py_GIL_DISABLED builds) */
+/* Bit flags for ob_gc_bits (in Py_GIL_DISABLED builds)
+ *
+ * Setting the bits requires a relaxed store. The per-object lock must also be
+ * held, except when the object is only visible to a single thread (e.g. during
+ * object initialization or destruction).
+ *
+ * Reading the bits requires using a relaxed load, but does not require holding
+ * the per-object lock.
+ */
 #ifdef Py_GIL_DISABLED
 #  define _PyGC_BITS_TRACKED        (1)     // Tracked by the GC
 #  define _PyGC_BITS_FINALIZED      (2)     // tp_finalize was called
@@ -48,10 +56,34 @@ static inline PyObject* _Py_FROM_GC(PyGC_Head *gc) {
 #  define _PyGC_BITS_DEFERRED       (64)    // Use deferred reference counting
 #endif
 
+#ifdef Py_GIL_DISABLED
+
+static inline void
+_PyObject_SET_GC_BITS(PyObject *op, uint8_t new_bits)
+{
+    uint8_t bits = _Py_atomic_load_uint8_relaxed(&op->ob_gc_bits);
+    _Py_atomic_store_uint8_relaxed(&op->ob_gc_bits, bits | new_bits);
+}
+
+static inline int
+_PyObject_HAS_GC_BITS(PyObject *op, uint8_t bits)
+{
+    return (_Py_atomic_load_uint8_relaxed(&op->ob_gc_bits) & bits) != 0;
+}
+
+static inline void
+_PyObject_CLEAR_GC_BITS(PyObject *op, uint8_t bits_to_clear)
+{
+    uint8_t bits = _Py_atomic_load_uint8_relaxed(&op->ob_gc_bits);
+    _Py_atomic_store_uint8_relaxed(&op->ob_gc_bits, bits & ~bits_to_clear);
+}
+
+#endif
+
 /* True if the object is currently tracked by the GC. */
 static inline int _PyObject_GC_IS_TRACKED(PyObject *op) {
 #ifdef Py_GIL_DISABLED
-    return (op->ob_gc_bits & _PyGC_BITS_TRACKED) != 0;
+    return _PyObject_HAS_GC_BITS(op, _PyGC_BITS_TRACKED);
 #else
     PyGC_Head *gc = _Py_AS_GC(op);
     return (gc->_gc_next != 0);
@@ -80,12 +112,12 @@ static inline int _PyObject_GC_MAY_BE_TRACKED(PyObject 
*obj) {
  * for calling _PyMem_FreeDelayed on the referenced
  * memory. */
 static inline int _PyObject_GC_IS_SHARED(PyObject *op) {
-    return (op->ob_gc_bits & _PyGC_BITS_SHARED) != 0;
+    return _PyObject_HAS_GC_BITS(op, _PyGC_BITS_SHARED);
 }
 #define _PyObject_GC_IS_SHARED(op) _PyObject_GC_IS_SHARED(_Py_CAST(PyObject*, 
op))
 
 static inline void _PyObject_GC_SET_SHARED(PyObject *op) {
-    op->ob_gc_bits |= _PyGC_BITS_SHARED;
+    _PyObject_SET_GC_BITS(op, _PyGC_BITS_SHARED);
 }
 #define _PyObject_GC_SET_SHARED(op) 
_PyObject_GC_SET_SHARED(_Py_CAST(PyObject*, op))
 
@@ -95,13 +127,13 @@ static inline void _PyObject_GC_SET_SHARED(PyObject *op) {
  * Objects with this bit that are GC objects will automatically
  * delay-freed by PyObject_GC_Del. */
 static inline int _PyObject_GC_IS_SHARED_INLINE(PyObject *op) {
-    return (op->ob_gc_bits & _PyGC_BITS_SHARED_INLINE) != 0;
+    return _PyObject_HAS_GC_BITS(op, _PyGC_BITS_SHARED_INLINE);
 }
 #define _PyObject_GC_IS_SHARED_INLINE(op) \
     _PyObject_GC_IS_SHARED_INLINE(_Py_CAST(PyObject*, op))
 
 static inline void _PyObject_GC_SET_SHARED_INLINE(PyObject *op) {
-    op->ob_gc_bits |= _PyGC_BITS_SHARED_INLINE;
+    _PyObject_SET_GC_BITS(op, _PyGC_BITS_SHARED_INLINE);
 }
 #define _PyObject_GC_SET_SHARED_INLINE(op) \
     _PyObject_GC_SET_SHARED_INLINE(_Py_CAST(PyObject*, op))
@@ -178,7 +210,7 @@ static inline void _PyGCHead_SET_PREV(PyGC_Head *gc, 
PyGC_Head *prev) {
 
 static inline int _PyGC_FINALIZED(PyObject *op) {
 #ifdef Py_GIL_DISABLED
-    return (op->ob_gc_bits & _PyGC_BITS_FINALIZED) != 0;
+    return _PyObject_HAS_GC_BITS(op, _PyGC_BITS_FINALIZED);
 #else
     PyGC_Head *gc = _Py_AS_GC(op);
     return ((gc->_gc_prev & _PyGC_PREV_MASK_FINALIZED) != 0);
@@ -186,7 +218,7 @@ static inline int _PyGC_FINALIZED(PyObject *op) {
 }
 static inline void _PyGC_SET_FINALIZED(PyObject *op) {
 #ifdef Py_GIL_DISABLED
-    op->ob_gc_bits |= _PyGC_BITS_FINALIZED;
+    _PyObject_SET_GC_BITS(op, _PyGC_BITS_FINALIZED);
 #else
     PyGC_Head *gc = _Py_AS_GC(op);
     gc->_gc_prev |= _PyGC_PREV_MASK_FINALIZED;
@@ -194,7 +226,7 @@ static inline void _PyGC_SET_FINALIZED(PyObject *op) {
 }
 static inline void _PyGC_CLEAR_FINALIZED(PyObject *op) {
 #ifdef Py_GIL_DISABLED
-    op->ob_gc_bits &= ~_PyGC_BITS_FINALIZED;
+    _PyObject_CLEAR_GC_BITS(op, _PyGC_BITS_FINALIZED);
 #else
     PyGC_Head *gc = _Py_AS_GC(op);
     gc->_gc_prev &= ~_PyGC_PREV_MASK_FINALIZED;
diff --git a/Include/internal/pycore_object.h b/Include/internal/pycore_object.h
index f15c332a7b0811..7602248f956405 100644
--- a/Include/internal/pycore_object.h
+++ b/Include/internal/pycore_object.h
@@ -168,7 +168,7 @@ static inline int
 _PyObject_HasDeferredRefcount(PyObject *op)
 {
 #ifdef Py_GIL_DISABLED
-    return (op->ob_gc_bits & _PyGC_BITS_DEFERRED) != 0;
+    return _PyObject_HAS_GC_BITS(op, _PyGC_BITS_DEFERRED);
 #else
     return 0;
 #endif
@@ -320,7 +320,7 @@ static inline void _PyObject_GC_TRACK(
                           "object already tracked by the garbage collector",
                           filename, lineno, __func__);
 #ifdef Py_GIL_DISABLED
-    op->ob_gc_bits |= _PyGC_BITS_TRACKED;
+    _PyObject_SET_GC_BITS(op, _PyGC_BITS_TRACKED);
 #else
     PyGC_Head *gc = _Py_AS_GC(op);
     _PyObject_ASSERT_FROM(op,
@@ -361,7 +361,7 @@ static inline void _PyObject_GC_UNTRACK(
                           filename, lineno, __func__);
 
 #ifdef Py_GIL_DISABLED
-    op->ob_gc_bits &= ~_PyGC_BITS_TRACKED;
+    _PyObject_CLEAR_GC_BITS(op, _PyGC_BITS_TRACKED);
 #else
     PyGC_Head *gc = _Py_AS_GC(op);
     PyGC_Head *prev = _PyGCHead_PREV(gc);
diff --git a/Objects/object.c b/Objects/object.c
index 8ad0389cbc7626..a4c69996c2cc5e 100644
--- a/Objects/object.c
+++ b/Objects/object.c
@@ -2431,6 +2431,7 @@ _PyObject_SetDeferredRefcount(PyObject *op)
     assert(PyType_IS_GC(Py_TYPE(op)));
     assert(_Py_IsOwnedByCurrentThread(op));
     assert(op->ob_ref_shared == 0);
+    _PyObject_SET_GC_BITS(op, _PyGC_BITS_DEFERRED);
     PyInterpreterState *interp = _PyInterpreterState_GET();
     if (interp->gc.immortalize.enabled) {
         // gh-117696: immortalize objects instead of using deferred reference
@@ -2438,7 +2439,6 @@ _PyObject_SetDeferredRefcount(PyObject *op)
         _Py_SetImmortal(op);
         return;
     }
-    op->ob_gc_bits |= _PyGC_BITS_DEFERRED;
     op->ob_ref_local += 1;
     op->ob_ref_shared = _Py_REF_QUEUED;
 #endif
diff --git a/Tools/tsan/suppressions_free_threading.txt 
b/Tools/tsan/suppressions_free_threading.txt
index 74dbf4bb1cb688..a669bc4d1d5c30 100644
--- a/Tools/tsan/suppressions_free_threading.txt
+++ b/Tools/tsan/suppressions_free_threading.txt
@@ -19,9 +19,6 @@ race:_PyImport_AcquireLock
 race:_PyImport_ReleaseLock
 race:_PyInterpreterState_SetNotRunningMain
 race:_PyInterpreterState_IsRunningMain
-race:_PyObject_GC_IS_SHARED
-race:_PyObject_GC_SET_SHARED
-race:_PyObject_GC_TRACK
 # https://gist.github.com/mpage/0a24eb2dd458441ededb498e9b0e5de8
 race:_PyParkingLot_Park
 race:_PyType_HasFeature

_______________________________________________
Python-checkins mailing list -- [email protected]
To unsubscribe send an email to [email protected]
https://mail.python.org/mailman3/lists/python-checkins.python.org/
Member address: [email protected]

Reply via email to