On 11/07/19 20:45 +0100, Jonathan Wakely wrote:
This adds the new atomic types from C++2a, as proposed by P0019 and
P0020. To reduce duplication the calls to the compiler's atomic
built-ins are wrapped in new functions in the __atomic_impl namespace.
These functions are currently only used by std::atomic<floating-point>
and std::atomic_ref but could also be used for all other specializations
of std::atomic.

Here's a patch to reuse the new __atomic_impl functions in the
existing atomic<integral> and atomic<pointer> specializations (and
apply some general tidying up).

I don't plan to commit this yet, but I might do so at some point.


diff --git a/libstdc++-v3/include/bits/atomic_base.h b/libstdc++-v3/include/bits/atomic_base.h
index 146e70a9f2e..718eca7424a 100644
--- a/libstdc++-v3/include/bits/atomic_base.h
+++ b/libstdc++-v3/include/bits/atomic_base.h
@@ -230,6 +230,207 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
     { return __i ? __GCC_ATOMIC_TEST_AND_SET_TRUEVAL : 0; }
   };
 
+  // Implementation details of std::atomic and std::atomic_ref
+  namespace __atomic_impl
+  {
+    // Remove volatile and create a non-deduced context for value arguments.
+    template<typename _Tp>
+      using _Val = typename remove_volatile<_Tp>::type;
+
+    // As above, but for difference_type arguments.
+    template<typename _Tp>
+      using _Diff = typename
+	conditional<is_pointer<_Tp>::value, ptrdiff_t, _Val<_Tp>>::type;
+
+    template<size_t _Size, size_t _Align>
+      _GLIBCXX_ALWAYS_INLINE bool
+      is_lock_free() noexcept
+      {
+	// Produce a fake, minimally aligned pointer.
+	return __atomic_is_lock_free(_Size, reinterpret_cast<void *>(-_Align));
+      }
+
+    template<typename _Tp>
+      _GLIBCXX_ALWAYS_INLINE void
+      store(_Tp* __ptr, _Val<_Tp> __t, memory_order __m) noexcept
+      {
+	const memory_order __b __attribute__((__unused__))
+	  = __m & __memory_order_mask;
+	__glibcxx_assert(__b != memory_order_acquire);
+	__glibcxx_assert(__b != memory_order_acq_rel);
+	__glibcxx_assert(__b != memory_order_consume);
+
+	__atomic_store(__ptr, std::__addressof(__t), int(__m));
+      }
+
+    template<typename _Tp>
+      _GLIBCXX_ALWAYS_INLINE _Val<_Tp>
+      load(_Tp* __ptr, memory_order __m) noexcept
+      {
+	const memory_order __b __attribute__((__unused__))
+	  = __m & __memory_order_mask;
+	__glibcxx_assert(__b != memory_order_release);
+	__glibcxx_assert(__b != memory_order_acq_rel);
+
+	alignas(_Tp) unsigned char __buf[sizeof(_Tp)];
+	auto __dest = reinterpret_cast<_Val<_Tp>*>(__buf);
+	__atomic_load(__ptr, __dest, int(__m));
+	return *__dest;
+      }
+
+    template<typename _Tp>
+      _GLIBCXX_ALWAYS_INLINE _Val<_Tp>
+      exchange(_Tp* __ptr, _Val<_Tp> __desired, memory_order __m) noexcept
+      {
+	__glibcxx_assert((__m & __memory_order_mask) != memory_order_consume);
+
+        alignas(_Tp) unsigned char __buf[sizeof(_Tp)];
+	auto __dest = reinterpret_cast<_Val<_Tp>*>(__buf);
+	__atomic_exchange(__ptr, std::__addressof(__desired), __dest, int(__m));
+	return *__dest;
+      }
+
+    template<typename _Tp>
+      _GLIBCXX_ALWAYS_INLINE bool
+      compare_exchange_weak(_Tp* __ptr, _Val<_Tp>& __expected,
+			    _Val<_Tp> __desired, memory_order __success,
+			    memory_order __failure) noexcept
+      {
+	const memory_order __bs __attribute__((__unused__))
+	  = __success & __memory_order_mask;
+	const memory_order __bf __attribute__((__unused__))
+	  = __failure & __memory_order_mask;
+	__glibcxx_assert(__bf != memory_order_release);
+	__glibcxx_assert(__bf != memory_order_acq_rel);
+	__glibcxx_assert(__bf <= __bs);
+
+	return __atomic_compare_exchange(__ptr, std::__addressof(__expected),
+					 std::__addressof(__desired), true,
+					 int(__success), int(__failure));
+      }
+
+    template<typename _Tp>
+      _GLIBCXX_ALWAYS_INLINE bool
+      compare_exchange_strong(_Tp* __ptr, _Val<_Tp>& __expected,
+			      _Val<_Tp> __desired, memory_order __success,
+			      memory_order __failure) noexcept
+      {
+	const memory_order __succ __attribute__((__unused__))
+	  = __success & __memory_order_mask;
+	const memory_order __fail __attribute__((__unused__))
+	  = __failure & __memory_order_mask;
+	__glibcxx_assert(__fail != memory_order_release);
+	__glibcxx_assert(__fail != memory_order_acq_rel);
+	__glibcxx_assert(__fail <= __succ);
+
+	return __atomic_compare_exchange(__ptr, std::__addressof(__expected),
+					 std::__addressof(__desired), false,
+					 int(__success), int(__failure));
+      }
+
+    template<typename _Tp>
+      _GLIBCXX_ALWAYS_INLINE _Tp
+      fetch_add(_Tp* __ptr, _Diff<_Tp> __i, memory_order __m) noexcept
+      { return __atomic_fetch_add(__ptr, __i, int(__m)); }
+
+    template<typename _Tp>
+      _GLIBCXX_ALWAYS_INLINE _Tp
+      fetch_sub(_Tp* __ptr, _Diff<_Tp> __i, memory_order __m) noexcept
+      { return __atomic_fetch_sub(__ptr, __i, int(__m)); }
+
+    template<typename _Tp>
+      _GLIBCXX_ALWAYS_INLINE _Tp
+      fetch_and(_Tp* __ptr, _Val<_Tp> __i, memory_order __m) noexcept
+      { return __atomic_fetch_and(__ptr, __i, int(__m)); }
+
+    template<typename _Tp>
+      _GLIBCXX_ALWAYS_INLINE _Tp
+      fetch_or(_Tp* __ptr, _Val<_Tp> __i, memory_order __m) noexcept
+      { return __atomic_fetch_or(__ptr, __i, int(__m)); }
+
+    template<typename _Tp>
+      _GLIBCXX_ALWAYS_INLINE _Tp
+      fetch_xor(_Tp* __ptr, _Val<_Tp> __i, memory_order __m) noexcept
+      { return __atomic_fetch_xor(__ptr, __i, int(__m)); }
+
+    template<typename _Tp>
+      _GLIBCXX_ALWAYS_INLINE _Tp
+      __add_fetch(_Tp* __ptr, _Diff<_Tp> __i) noexcept
+      { return __atomic_add_fetch(__ptr, __i, __ATOMIC_SEQ_CST); }
+
+    template<typename _Tp>
+      _GLIBCXX_ALWAYS_INLINE _Tp
+      __sub_fetch(_Tp* __ptr, _Diff<_Tp> __i) noexcept
+      { return __atomic_sub_fetch(__ptr, __i, __ATOMIC_SEQ_CST); }
+
+    template<typename _Tp>
+      _GLIBCXX_ALWAYS_INLINE _Tp
+      __and_fetch(_Tp* __ptr, _Val<_Tp> __i) noexcept
+      { return __atomic_and_fetch(__ptr, __i, __ATOMIC_SEQ_CST); }
+
+    template<typename _Tp>
+      _GLIBCXX_ALWAYS_INLINE _Tp
+      __or_fetch(_Tp* __ptr, _Val<_Tp> __i) noexcept
+      { return __atomic_or_fetch(__ptr, __i, __ATOMIC_SEQ_CST); }
+
+    template<typename _Tp>
+      _GLIBCXX_ALWAYS_INLINE _Tp
+      __xor_fetch(_Tp* __ptr, _Val<_Tp> __i) noexcept
+      { return __atomic_xor_fetch(__ptr, __i, __ATOMIC_SEQ_CST); }
+
+#if __cplusplus > 201703L
+    template<typename _Tp>
+      _Tp
+      __fetch_add_flt(_Tp* __ptr, _Val<_Tp> __i, memory_order __m) noexcept
+      {
+	_Val<_Tp> __oldval = load(__ptr, memory_order_relaxed);
+	_Val<_Tp> __newval = __oldval + __i;
+	while (!compare_exchange_weak(__ptr, __oldval, __newval, __m,
+				      memory_order_relaxed))
+	  __newval = __oldval + __i;
+	return __oldval;
+      }
+
+    template<typename _Tp>
+      _Tp
+      __fetch_sub_flt(_Tp* __ptr, _Val<_Tp> __i, memory_order __m) noexcept
+      {
+	_Val<_Tp> __oldval = load(__ptr, memory_order_relaxed);
+	_Val<_Tp> __newval = __oldval - __i;
+	while (!compare_exchange_weak(__ptr, __oldval, __newval, __m,
+				      memory_order_relaxed))
+	  __newval = __oldval - __i;
+	return __oldval;
+      }
+
+    template<typename _Tp>
+      _Tp
+      __add_fetch_flt(_Tp* __ptr, _Val<_Tp> __i) noexcept
+      {
+	_Val<_Tp> __oldval = load(__ptr, memory_order_relaxed);
+	_Val<_Tp> __newval = __oldval + __i;
+	while (!compare_exchange_weak(__ptr, __oldval, __newval,
+				      memory_order_seq_cst,
+				      memory_order_relaxed))
+	  __newval = __oldval + __i;
+	return __newval;
+      }
+
+    template<typename _Tp>
+      _Tp
+      __sub_fetch_flt(_Tp* __ptr, _Val<_Tp> __i) noexcept
+      {
+	_Val<_Tp> __oldval = load(__ptr, memory_order_relaxed);
+	_Val<_Tp> __newval = __oldval - __i;
+	while (!compare_exchange_weak(__ptr, __oldval, __newval,
+				      memory_order_seq_cst,
+				      memory_order_relaxed))
+	  __newval = __oldval - __i;
+	return __newval;
+      }
+#endif // C++2a
+  } // namespace __atomic_impl
+
 
   /// Base class for atomic integrals.
   //
@@ -262,12 +463,10 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
       using difference_type = value_type;
 
     private:
-      typedef _ITp 	__int_type;
-
       static constexpr int _S_alignment =
 	sizeof(_ITp) > alignof(_ITp) ? sizeof(_ITp) : alignof(_ITp);
 
-      alignas(_S_alignment) __int_type _M_i;
+      alignas(_S_alignment) value_type _M_i;
 
     public:
       __atomic_base() noexcept = default;
@@ -276,206 +475,154 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
       __atomic_base& operator=(const __atomic_base&) = delete;
       __atomic_base& operator=(const __atomic_base&) volatile = delete;
 
-      // Requires __int_type convertible to _M_i.
-      constexpr __atomic_base(__int_type __i) noexcept : _M_i (__i) { }
+      constexpr __atomic_base(value_type __i) noexcept : _M_i (__i) { }
 
-      operator __int_type() const noexcept
+      operator value_type() const noexcept
       { return load(); }
 
-      operator __int_type() const volatile noexcept
+      operator value_type() const volatile noexcept
       { return load(); }
 
-      __int_type
-      operator=(__int_type __i) noexcept
+      value_type
+      operator=(value_type __i) noexcept
       {
 	store(__i);
 	return __i;
       }
 
-      __int_type
-      operator=(__int_type __i) volatile noexcept
+      value_type
+      operator=(value_type __i) volatile noexcept
       {
 	store(__i);
 	return __i;
       }
 
-      __int_type
+      value_type
       operator++(int) noexcept
       { return fetch_add(1); }
 
-      __int_type
+      value_type
       operator++(int) volatile noexcept
       { return fetch_add(1); }
 
-      __int_type
+      value_type
       operator--(int) noexcept
       { return fetch_sub(1); }
 
-      __int_type
+      value_type
       operator--(int) volatile noexcept
       { return fetch_sub(1); }
 
-      __int_type
+      value_type
       operator++() noexcept
-      { return __atomic_add_fetch(&_M_i, 1, int(memory_order_seq_cst)); }
+      { return __atomic_impl::__add_fetch(&_M_i, 1); }
 
-      __int_type
+      value_type
       operator++() volatile noexcept
-      { return __atomic_add_fetch(&_M_i, 1, int(memory_order_seq_cst)); }
+      { return __atomic_impl::__add_fetch(&_M_i, 1); }
 
-      __int_type
+      value_type
       operator--() noexcept
-      { return __atomic_sub_fetch(&_M_i, 1, int(memory_order_seq_cst)); }
+      { return __atomic_impl::__sub_fetch(&_M_i, 1); }
 
-      __int_type
+      value_type
       operator--() volatile noexcept
-      { return __atomic_sub_fetch(&_M_i, 1, int(memory_order_seq_cst)); }
+      { return __atomic_impl::__sub_fetch(&_M_i, 1); }
 
-      __int_type
-      operator+=(__int_type __i) noexcept
-      { return __atomic_add_fetch(&_M_i, __i, int(memory_order_seq_cst)); }
+      value_type
+      operator+=(value_type __i) noexcept
+      { return __atomic_impl::__add_fetch(&_M_i, __i); }
 
-      __int_type
-      operator+=(__int_type __i) volatile noexcept
-      { return __atomic_add_fetch(&_M_i, __i, int(memory_order_seq_cst)); }
+      value_type
+      operator+=(value_type __i) volatile noexcept
+      { return __atomic_impl::__add_fetch(&_M_i, __i); }
 
-      __int_type
-      operator-=(__int_type __i) noexcept
-      { return __atomic_sub_fetch(&_M_i, __i, int(memory_order_seq_cst)); }
+      value_type
+      operator-=(value_type __i) noexcept
+      { return __atomic_impl::__sub_fetch(&_M_i, __i); }
 
-      __int_type
-      operator-=(__int_type __i) volatile noexcept
-      { return __atomic_sub_fetch(&_M_i, __i, int(memory_order_seq_cst)); }
+      value_type
+      operator-=(value_type __i) volatile noexcept
+      { return __atomic_impl::__sub_fetch(&_M_i, __i); }
 
-      __int_type
-      operator&=(__int_type __i) noexcept
-      { return __atomic_and_fetch(&_M_i, __i, int(memory_order_seq_cst)); }
+      value_type
+      operator&=(value_type __i) noexcept
+      { return __atomic_impl::__and_fetch(&_M_i, __i); }
 
-      __int_type
-      operator&=(__int_type __i) volatile noexcept
-      { return __atomic_and_fetch(&_M_i, __i, int(memory_order_seq_cst)); }
+      value_type
+      operator&=(value_type __i) volatile noexcept
+      { return __atomic_impl::__and_fetch(&_M_i, __i); }
 
-      __int_type
-      operator|=(__int_type __i) noexcept
-      { return __atomic_or_fetch(&_M_i, __i, int(memory_order_seq_cst)); }
+      value_type
+      operator|=(value_type __i) noexcept
+      { return __atomic_impl::__or_fetch(&_M_i, __i); }
 
-      __int_type
-      operator|=(__int_type __i) volatile noexcept
-      { return __atomic_or_fetch(&_M_i, __i, int(memory_order_seq_cst)); }
+      value_type
+      operator|=(value_type __i) volatile noexcept
+      { return __atomic_impl::__or_fetch(&_M_i, __i); }
 
-      __int_type
-      operator^=(__int_type __i) noexcept
-      { return __atomic_xor_fetch(&_M_i, __i, int(memory_order_seq_cst)); }
+      value_type
+      operator^=(value_type __i) noexcept
+      { return __atomic_impl::__xor_fetch(&_M_i, __i); }
 
-      __int_type
-      operator^=(__int_type __i) volatile noexcept
-      { return __atomic_xor_fetch(&_M_i, __i, int(memory_order_seq_cst)); }
+      value_type
+      operator^=(value_type __i) volatile noexcept
+      { return __atomic_impl::__xor_fetch(&_M_i, __i); }
 
       bool
       is_lock_free() const noexcept
-      {
-	// Use a fake, minimally aligned pointer.
-	return __atomic_is_lock_free(sizeof(_M_i),
-	    reinterpret_cast<void *>(-_S_alignment));
-      }
+      { return __atomic_impl::is_lock_free<sizeof(_M_i), _S_alignment>(); }
 
       bool
       is_lock_free() const volatile noexcept
-      {
-	// Use a fake, minimally aligned pointer.
-	return __atomic_is_lock_free(sizeof(_M_i),
-	    reinterpret_cast<void *>(-_S_alignment));
-      }
+      { return __atomic_impl::is_lock_free<sizeof(_M_i), _S_alignment>(); }
 
       _GLIBCXX_ALWAYS_INLINE void
-      store(__int_type __i, memory_order __m = memory_order_seq_cst) noexcept
-      {
-	memory_order __b = __m & __memory_order_mask;
-	__glibcxx_assert(__b != memory_order_acquire);
-	__glibcxx_assert(__b != memory_order_acq_rel);
-	__glibcxx_assert(__b != memory_order_consume);
-
-	__atomic_store_n(&_M_i, __i, int(__m));
-      }
+      store(value_type __i, memory_order __m = memory_order_seq_cst) noexcept
+      { __atomic_impl::store(&_M_i, __i, __m); }
 
       _GLIBCXX_ALWAYS_INLINE void
-      store(__int_type __i,
+      store(value_type __i,
 	    memory_order __m = memory_order_seq_cst) volatile noexcept
-      {
-	memory_order __b = __m & __memory_order_mask;
-	__glibcxx_assert(__b != memory_order_acquire);
-	__glibcxx_assert(__b != memory_order_acq_rel);
-	__glibcxx_assert(__b != memory_order_consume);
+      { __atomic_impl::store(&_M_i, __i, __m); }
 
-	__atomic_store_n(&_M_i, __i, int(__m));
-      }
-
-      _GLIBCXX_ALWAYS_INLINE __int_type
+      _GLIBCXX_ALWAYS_INLINE value_type
       load(memory_order __m = memory_order_seq_cst) const noexcept
-      {
-	memory_order __b = __m & __memory_order_mask;
-	__glibcxx_assert(__b != memory_order_release);
-	__glibcxx_assert(__b != memory_order_acq_rel);
-
-	return __atomic_load_n(&_M_i, int(__m));
-      }
+      { return __atomic_impl::load(&_M_i, __m); }
 
-      _GLIBCXX_ALWAYS_INLINE __int_type
+      _GLIBCXX_ALWAYS_INLINE value_type
       load(memory_order __m = memory_order_seq_cst) const volatile noexcept
-      {
-	memory_order __b = __m & __memory_order_mask;
-	__glibcxx_assert(__b != memory_order_release);
-	__glibcxx_assert(__b != memory_order_acq_rel);
+      { return __atomic_impl::load(&_M_i, __m); }
 
-	return __atomic_load_n(&_M_i, int(__m));
-      }
-
-      _GLIBCXX_ALWAYS_INLINE __int_type
-      exchange(__int_type __i,
+      _GLIBCXX_ALWAYS_INLINE value_type
+      exchange(value_type __i,
 	       memory_order __m = memory_order_seq_cst) noexcept
-      {
-	return __atomic_exchange_n(&_M_i, __i, int(__m));
-      }
-
+      { return __atomic_impl::exchange(&_M_i, __i, __m); }
 
-      _GLIBCXX_ALWAYS_INLINE __int_type
-      exchange(__int_type __i,
+      _GLIBCXX_ALWAYS_INLINE value_type
+      exchange(value_type __i,
 	       memory_order __m = memory_order_seq_cst) volatile noexcept
-      {
-	return __atomic_exchange_n(&_M_i, __i, int(__m));
-      }
+      { return __atomic_impl::exchange(&_M_i, __i, __m); }
 
       _GLIBCXX_ALWAYS_INLINE bool
-      compare_exchange_weak(__int_type& __i1, __int_type __i2,
+      compare_exchange_weak(value_type& __i1, value_type __i2,
 			    memory_order __m1, memory_order __m2) noexcept
       {
-	memory_order __b2 = __m2 & __memory_order_mask;
-	memory_order __b1 = __m1 & __memory_order_mask;
-	__glibcxx_assert(__b2 != memory_order_release);
-	__glibcxx_assert(__b2 != memory_order_acq_rel);
-	__glibcxx_assert(__b2 <= __b1);
-
-	return __atomic_compare_exchange_n(&_M_i, &__i1, __i2, 1,
-					   int(__m1), int(__m2));
+	return __atomic_impl::compare_exchange_weak(&_M_i, __i1, __i2,
+						    __m1, __m2);
       }
 
       _GLIBCXX_ALWAYS_INLINE bool
-      compare_exchange_weak(__int_type& __i1, __int_type __i2,
+      compare_exchange_weak(value_type& __i1, value_type __i2,
 			    memory_order __m1,
 			    memory_order __m2) volatile noexcept
       {
-	memory_order __b2 = __m2 & __memory_order_mask;
-	memory_order __b1 = __m1 & __memory_order_mask;
-	__glibcxx_assert(__b2 != memory_order_release);
-	__glibcxx_assert(__b2 != memory_order_acq_rel);
-	__glibcxx_assert(__b2 <= __b1);
-
-	return __atomic_compare_exchange_n(&_M_i, &__i1, __i2, 1,
-					   int(__m1), int(__m2));
+	return __atomic_impl::compare_exchange_weak(&_M_i, __i1, __i2,
+						    __m1, __m2);
       }
 
       _GLIBCXX_ALWAYS_INLINE bool
-      compare_exchange_weak(__int_type& __i1, __int_type __i2,
+      compare_exchange_weak(value_type& __i1, value_type __i2,
 			    memory_order __m = memory_order_seq_cst) noexcept
       {
 	return compare_exchange_weak(__i1, __i2, __m,
@@ -483,7 +630,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
       }
 
       _GLIBCXX_ALWAYS_INLINE bool
-      compare_exchange_weak(__int_type& __i1, __int_type __i2,
+      compare_exchange_weak(value_type& __i1, value_type __i2,
 		   memory_order __m = memory_order_seq_cst) volatile noexcept
       {
 	return compare_exchange_weak(__i1, __i2, __m,
@@ -491,37 +638,24 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
       }
 
       _GLIBCXX_ALWAYS_INLINE bool
-      compare_exchange_strong(__int_type& __i1, __int_type __i2,
+      compare_exchange_strong(value_type& __i1, value_type __i2,
 			      memory_order __m1, memory_order __m2) noexcept
       {
-	memory_order __b2 = __m2 & __memory_order_mask;
-	memory_order __b1 = __m1 & __memory_order_mask;
-	__glibcxx_assert(__b2 != memory_order_release);
-	__glibcxx_assert(__b2 != memory_order_acq_rel);
-	__glibcxx_assert(__b2 <= __b1);
-
-	return __atomic_compare_exchange_n(&_M_i, &__i1, __i2, 0,
-					   int(__m1), int(__m2));
+	return __atomic_impl::compare_exchange_strong(&_M_i, __i1, __i2,
+						      __m1, __m2);
       }
 
       _GLIBCXX_ALWAYS_INLINE bool
-      compare_exchange_strong(__int_type& __i1, __int_type __i2,
+      compare_exchange_strong(value_type& __i1, value_type __i2,
 			      memory_order __m1,
 			      memory_order __m2) volatile noexcept
       {
-	memory_order __b2 = __m2 & __memory_order_mask;
-	memory_order __b1 = __m1 & __memory_order_mask;
-
-	__glibcxx_assert(__b2 != memory_order_release);
-	__glibcxx_assert(__b2 != memory_order_acq_rel);
-	__glibcxx_assert(__b2 <= __b1);
-
-	return __atomic_compare_exchange_n(&_M_i, &__i1, __i2, 0,
-					   int(__m1), int(__m2));
+	return __atomic_impl::compare_exchange_strong(&_M_i, __i1, __i2,
+						      __m1, __m2);
       }
 
       _GLIBCXX_ALWAYS_INLINE bool
-      compare_exchange_strong(__int_type& __i1, __int_type __i2,
+      compare_exchange_strong(value_type& __i1, value_type __i2,
 			      memory_order __m = memory_order_seq_cst) noexcept
       {
 	return compare_exchange_strong(__i1, __i2, __m,
@@ -529,464 +663,67 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
       }
 
       _GLIBCXX_ALWAYS_INLINE bool
-      compare_exchange_strong(__int_type& __i1, __int_type __i2,
+      compare_exchange_strong(value_type& __i1, value_type __i2,
 		 memory_order __m = memory_order_seq_cst) volatile noexcept
       {
 	return compare_exchange_strong(__i1, __i2, __m,
 				       __cmpexch_failure_order(__m));
       }
 
-      _GLIBCXX_ALWAYS_INLINE __int_type
-      fetch_add(__int_type __i,
+      _GLIBCXX_ALWAYS_INLINE value_type
+      fetch_add(value_type __i,
 		memory_order __m = memory_order_seq_cst) noexcept
-      { return __atomic_fetch_add(&_M_i, __i, int(__m)); }
+      { return __atomic_impl::fetch_add(&_M_i, __i, __m); }
 
-      _GLIBCXX_ALWAYS_INLINE __int_type
-      fetch_add(__int_type __i,
+      _GLIBCXX_ALWAYS_INLINE value_type
+      fetch_add(value_type __i,
 		memory_order __m = memory_order_seq_cst) volatile noexcept
-      { return __atomic_fetch_add(&_M_i, __i, int(__m)); }
+      { return __atomic_impl::fetch_add(&_M_i, __i, __m); }
 
-      _GLIBCXX_ALWAYS_INLINE __int_type
-      fetch_sub(__int_type __i,
+      _GLIBCXX_ALWAYS_INLINE value_type
+      fetch_sub(value_type __i,
 		memory_order __m = memory_order_seq_cst) noexcept
-      { return __atomic_fetch_sub(&_M_i, __i, int(__m)); }
+      { return __atomic_impl::fetch_sub(&_M_i, __i, __m); }
 
-      _GLIBCXX_ALWAYS_INLINE __int_type
-      fetch_sub(__int_type __i,
+      _GLIBCXX_ALWAYS_INLINE value_type
+      fetch_sub(value_type __i,
 		memory_order __m = memory_order_seq_cst) volatile noexcept
-      { return __atomic_fetch_sub(&_M_i, __i, int(__m)); }
+      { return __atomic_impl::fetch_sub(&_M_i, __i, __m); }
 
-      _GLIBCXX_ALWAYS_INLINE __int_type
-      fetch_and(__int_type __i,
+      _GLIBCXX_ALWAYS_INLINE value_type
+      fetch_and(value_type __i,
 		memory_order __m = memory_order_seq_cst) noexcept
-      { return __atomic_fetch_and(&_M_i, __i, int(__m)); }
+      { return __atomic_impl::fetch_and(&_M_i, __i, __m); }
 
-      _GLIBCXX_ALWAYS_INLINE __int_type
-      fetch_and(__int_type __i,
+      _GLIBCXX_ALWAYS_INLINE value_type
+      fetch_and(value_type __i,
 		memory_order __m = memory_order_seq_cst) volatile noexcept
-      { return __atomic_fetch_and(&_M_i, __i, int(__m)); }
+      { return __atomic_impl::fetch_and(&_M_i, __i, __m); }
 
-      _GLIBCXX_ALWAYS_INLINE __int_type
-      fetch_or(__int_type __i,
+      _GLIBCXX_ALWAYS_INLINE value_type
+      fetch_or(value_type __i,
 	       memory_order __m = memory_order_seq_cst) noexcept
-      { return __atomic_fetch_or(&_M_i, __i, int(__m)); }
+      { return __atomic_impl::fetch_or(&_M_i, __i, __m); }
 
-      _GLIBCXX_ALWAYS_INLINE __int_type
-      fetch_or(__int_type __i,
+      _GLIBCXX_ALWAYS_INLINE value_type
+      fetch_or(value_type __i,
 	       memory_order __m = memory_order_seq_cst) volatile noexcept
-      { return __atomic_fetch_or(&_M_i, __i, int(__m)); }
+      { return __atomic_impl::fetch_or(&_M_i, __i, __m); }
 
-      _GLIBCXX_ALWAYS_INLINE __int_type
-      fetch_xor(__int_type __i,
+      _GLIBCXX_ALWAYS_INLINE value_type
+      fetch_xor(value_type __i,
 		memory_order __m = memory_order_seq_cst) noexcept
-      { return __atomic_fetch_xor(&_M_i, __i, int(__m)); }
+      { return __atomic_impl::fetch_xor(&_M_i, __i, __m); }
 
-      _GLIBCXX_ALWAYS_INLINE __int_type
-      fetch_xor(__int_type __i,
+      _GLIBCXX_ALWAYS_INLINE value_type
+      fetch_xor(value_type __i,
 		memory_order __m = memory_order_seq_cst) volatile noexcept
-      { return __atomic_fetch_xor(&_M_i, __i, int(__m)); }
-    };
-
-
-  /// Partial specialization for pointer types.
-  template<typename _PTp>
-    struct __atomic_base<_PTp*>
-    {
-    private:
-      typedef _PTp* 	__pointer_type;
-
-      __pointer_type 	_M_p;
-
-      // Factored out to facilitate explicit specialization.
-      constexpr ptrdiff_t
-      _M_type_size(ptrdiff_t __d) const { return __d * sizeof(_PTp); }
-
-      constexpr ptrdiff_t
-      _M_type_size(ptrdiff_t __d) const volatile { return __d * sizeof(_PTp); }
-
-    public:
-      __atomic_base() noexcept = default;
-      ~__atomic_base() noexcept = default;
-      __atomic_base(const __atomic_base&) = delete;
-      __atomic_base& operator=(const __atomic_base&) = delete;
-      __atomic_base& operator=(const __atomic_base&) volatile = delete;
-
-      // Requires __pointer_type convertible to _M_p.
-      constexpr __atomic_base(__pointer_type __p) noexcept : _M_p (__p) { }
-
-      operator __pointer_type() const noexcept
-      { return load(); }
-
-      operator __pointer_type() const volatile noexcept
-      { return load(); }
-
-      __pointer_type
-      operator=(__pointer_type __p) noexcept
-      {
-	store(__p);
-	return __p;
-      }
-
-      __pointer_type
-      operator=(__pointer_type __p) volatile noexcept
-      {
-	store(__p);
-	return __p;
-      }
-
-      __pointer_type
-      operator++(int) noexcept
-      { return fetch_add(1); }
-
-      __pointer_type
-      operator++(int) volatile noexcept
-      { return fetch_add(1); }
-
-      __pointer_type
-      operator--(int) noexcept
-      { return fetch_sub(1); }
-
-      __pointer_type
-      operator--(int) volatile noexcept
-      { return fetch_sub(1); }
-
-      __pointer_type
-      operator++() noexcept
-      { return __atomic_add_fetch(&_M_p, _M_type_size(1),
-				  int(memory_order_seq_cst)); }
-
-      __pointer_type
-      operator++() volatile noexcept
-      { return __atomic_add_fetch(&_M_p, _M_type_size(1),
-				  int(memory_order_seq_cst)); }
-
-      __pointer_type
-      operator--() noexcept
-      { return __atomic_sub_fetch(&_M_p, _M_type_size(1),
-				  int(memory_order_seq_cst)); }
-
-      __pointer_type
-      operator--() volatile noexcept
-      { return __atomic_sub_fetch(&_M_p, _M_type_size(1),
-				  int(memory_order_seq_cst)); }
-
-      __pointer_type
-      operator+=(ptrdiff_t __d) noexcept
-      { return __atomic_add_fetch(&_M_p, _M_type_size(__d),
-				  int(memory_order_seq_cst)); }
-
-      __pointer_type
-      operator+=(ptrdiff_t __d) volatile noexcept
-      { return __atomic_add_fetch(&_M_p, _M_type_size(__d),
-				  int(memory_order_seq_cst)); }
-
-      __pointer_type
-      operator-=(ptrdiff_t __d) noexcept
-      { return __atomic_sub_fetch(&_M_p, _M_type_size(__d),
-				  int(memory_order_seq_cst)); }
-
-      __pointer_type
-      operator-=(ptrdiff_t __d) volatile noexcept
-      { return __atomic_sub_fetch(&_M_p, _M_type_size(__d),
-				  int(memory_order_seq_cst)); }
-
-      bool
-      is_lock_free() const noexcept
-      {
-	// Produce a fake, minimally aligned pointer.
-	return __atomic_is_lock_free(sizeof(_M_p),
-	    reinterpret_cast<void *>(-__alignof(_M_p)));
-      }
-
-      bool
-      is_lock_free() const volatile noexcept
-      {
-	// Produce a fake, minimally aligned pointer.
-	return __atomic_is_lock_free(sizeof(_M_p),
-	    reinterpret_cast<void *>(-__alignof(_M_p)));
-      }
-
-      _GLIBCXX_ALWAYS_INLINE void
-      store(__pointer_type __p,
-	    memory_order __m = memory_order_seq_cst) noexcept
-      {
-        memory_order __b = __m & __memory_order_mask;
-
-	__glibcxx_assert(__b != memory_order_acquire);
-	__glibcxx_assert(__b != memory_order_acq_rel);
-	__glibcxx_assert(__b != memory_order_consume);
-
-	__atomic_store_n(&_M_p, __p, int(__m));
-      }
-
-      _GLIBCXX_ALWAYS_INLINE void
-      store(__pointer_type __p,
-	    memory_order __m = memory_order_seq_cst) volatile noexcept
-      {
-	memory_order __b = __m & __memory_order_mask;
-	__glibcxx_assert(__b != memory_order_acquire);
-	__glibcxx_assert(__b != memory_order_acq_rel);
-	__glibcxx_assert(__b != memory_order_consume);
-
-	__atomic_store_n(&_M_p, __p, int(__m));
-      }
-
-      _GLIBCXX_ALWAYS_INLINE __pointer_type
-      load(memory_order __m = memory_order_seq_cst) const noexcept
-      {
-	memory_order __b = __m & __memory_order_mask;
-	__glibcxx_assert(__b != memory_order_release);
-	__glibcxx_assert(__b != memory_order_acq_rel);
-
-	return __atomic_load_n(&_M_p, int(__m));
-      }
-
-      _GLIBCXX_ALWAYS_INLINE __pointer_type
-      load(memory_order __m = memory_order_seq_cst) const volatile noexcept
-      {
-	memory_order __b = __m & __memory_order_mask;
-	__glibcxx_assert(__b != memory_order_release);
-	__glibcxx_assert(__b != memory_order_acq_rel);
-
-	return __atomic_load_n(&_M_p, int(__m));
-      }
-
-      _GLIBCXX_ALWAYS_INLINE __pointer_type
-      exchange(__pointer_type __p,
-	       memory_order __m = memory_order_seq_cst) noexcept
-      {
-	return __atomic_exchange_n(&_M_p, __p, int(__m));
-      }
-
-
-      _GLIBCXX_ALWAYS_INLINE __pointer_type
-      exchange(__pointer_type __p,
-	       memory_order __m = memory_order_seq_cst) volatile noexcept
-      {
-	return __atomic_exchange_n(&_M_p, __p, int(__m));
-      }
-
-      _GLIBCXX_ALWAYS_INLINE bool
-      compare_exchange_strong(__pointer_type& __p1, __pointer_type __p2,
-			      memory_order __m1,
-			      memory_order __m2) noexcept
-      {
-	memory_order __b2 = __m2 & __memory_order_mask;
-	memory_order __b1 = __m1 & __memory_order_mask;
-	__glibcxx_assert(__b2 != memory_order_release);
-	__glibcxx_assert(__b2 != memory_order_acq_rel);
-	__glibcxx_assert(__b2 <= __b1);
-
-	return __atomic_compare_exchange_n(&_M_p, &__p1, __p2, 0,
-					   int(__m1), int(__m2));
-      }
-
-      _GLIBCXX_ALWAYS_INLINE bool
-      compare_exchange_strong(__pointer_type& __p1, __pointer_type __p2,
-			      memory_order __m1,
-			      memory_order __m2) volatile noexcept
-      {
-	memory_order __b2 = __m2 & __memory_order_mask;
-	memory_order __b1 = __m1 & __memory_order_mask;
-
-	__glibcxx_assert(__b2 != memory_order_release);
-	__glibcxx_assert(__b2 != memory_order_acq_rel);
-	__glibcxx_assert(__b2 <= __b1);
-
-	return __atomic_compare_exchange_n(&_M_p, &__p1, __p2, 0,
-					   int(__m1), int(__m2));
-      }
-
-      _GLIBCXX_ALWAYS_INLINE __pointer_type
-      fetch_add(ptrdiff_t __d,
-		memory_order __m = memory_order_seq_cst) noexcept
-      { return __atomic_fetch_add(&_M_p, _M_type_size(__d), int(__m)); }
-
-      _GLIBCXX_ALWAYS_INLINE __pointer_type
-      fetch_add(ptrdiff_t __d,
-		memory_order __m = memory_order_seq_cst) volatile noexcept
-      { return __atomic_fetch_add(&_M_p, _M_type_size(__d), int(__m)); }
-
-      _GLIBCXX_ALWAYS_INLINE __pointer_type
-      fetch_sub(ptrdiff_t __d,
-		memory_order __m = memory_order_seq_cst) noexcept
-      { return __atomic_fetch_sub(&_M_p, _M_type_size(__d), int(__m)); }
-
-      _GLIBCXX_ALWAYS_INLINE __pointer_type
-      fetch_sub(ptrdiff_t __d,
-		memory_order __m = memory_order_seq_cst) volatile noexcept
-      { return __atomic_fetch_sub(&_M_p, _M_type_size(__d), int(__m)); }
+      { return __atomic_impl::fetch_xor(&_M_i, __i, __m); }
     };
 
 #if __cplusplus > 201703L
-  // Implementation details of atomic_ref and atomic<floating-point>.
-  namespace __atomic_impl
-  {
-    // Remove volatile and create a non-deduced context for value arguments.
-    template<typename _Tp>
-      using _Val = remove_volatile_t<_Tp>;
-
-    // As above, but for difference_type arguments.
-    template<typename _Tp>
-      using _Diff = conditional_t<is_pointer_v<_Tp>, ptrdiff_t, _Val<_Tp>>;
-
-    template<size_t _Size, size_t _Align>
-      _GLIBCXX_ALWAYS_INLINE bool
-      is_lock_free() noexcept
-      {
-	// Produce a fake, minimally aligned pointer.
-	return __atomic_is_lock_free(_Size, reinterpret_cast<void *>(-_Align));
-      }
-
-    template<typename _Tp>
-      _GLIBCXX_ALWAYS_INLINE void
-      store(_Tp* __ptr, _Val<_Tp> __t, memory_order __m) noexcept
-      { __atomic_store(__ptr, std::__addressof(__t), int(__m)); }
-
-    template<typename _Tp>
-      _GLIBCXX_ALWAYS_INLINE _Tp
-      load(_Tp* __ptr, memory_order __m) noexcept
-      {
-	alignas(_Tp) unsigned char __buf[sizeof(_Tp)];
-	_Tp* __dest = reinterpret_cast<_Tp*>(__buf);
-	__atomic_load(__ptr, __dest, int(__m));
-	return *__dest;
-      }
-
-    template<typename _Tp>
-      _GLIBCXX_ALWAYS_INLINE _Tp
-      exchange(_Tp* __ptr, _Val<_Tp> __desired, memory_order __m) noexcept
-      {
-        alignas(_Tp) unsigned char __buf[sizeof(_Tp)];
-	_Tp* __dest = reinterpret_cast<_Tp*>(__buf);
-	__atomic_exchange(__ptr, std::__addressof(__desired), __dest, int(__m));
-	return *__dest;
-      }
-
-    template<typename _Tp>
-      _GLIBCXX_ALWAYS_INLINE bool
-      compare_exchange_weak(_Tp* __ptr, _Val<_Tp>& __expected,
-			    _Val<_Tp> __desired, memory_order __success,
-			    memory_order __failure) noexcept
-      {
-	return __atomic_compare_exchange(__ptr, std::__addressof(__expected),
-					 std::__addressof(__desired), true,
-					 int(__success), int(__failure));
-      }
-
-    template<typename _Tp>
-      _GLIBCXX_ALWAYS_INLINE bool
-      compare_exchange_strong(_Tp* __ptr, _Val<_Tp>& __expected,
-			      _Val<_Tp> __desired, memory_order __success,
-			      memory_order __failure) noexcept
-      {
-	return __atomic_compare_exchange(__ptr, std::__addressof(__expected),
-					 std::__addressof(__desired), false,
-					 int(__success), int(__failure));
-      }
-
-    template<typename _Tp>
-      _GLIBCXX_ALWAYS_INLINE _Tp
-      fetch_add(_Tp* __ptr, _Diff<_Tp> __i, memory_order __m) noexcept
-      { return __atomic_fetch_add(__ptr, __i, int(__m)); }
-
-    template<typename _Tp>
-      _GLIBCXX_ALWAYS_INLINE _Tp
-      fetch_sub(_Tp* __ptr, _Diff<_Tp> __i, memory_order __m) noexcept
-      { return __atomic_fetch_sub(__ptr, __i, int(__m)); }
-
-    template<typename _Tp>
-      _GLIBCXX_ALWAYS_INLINE _Tp
-      fetch_and(_Tp* __ptr, _Val<_Tp> __i, memory_order __m) noexcept
-      { return __atomic_fetch_and(__ptr, __i, int(__m)); }
-
-    template<typename _Tp>
-      _GLIBCXX_ALWAYS_INLINE _Tp
-      fetch_or(_Tp* __ptr, _Val<_Tp> __i, memory_order __m) noexcept
-      { return __atomic_fetch_or(__ptr, __i, int(__m)); }
-
-    template<typename _Tp>
-      _GLIBCXX_ALWAYS_INLINE _Tp
-      fetch_xor(_Tp* __ptr, _Val<_Tp> __i, memory_order __m) noexcept
-      { return __atomic_fetch_xor(__ptr, __i, int(__m)); }
-
-    template<typename _Tp>
-      _GLIBCXX_ALWAYS_INLINE _Tp
-      __add_fetch(_Tp* __ptr, _Diff<_Tp> __i) noexcept
-      { return __atomic_add_fetch(__ptr, __i, __ATOMIC_SEQ_CST); }
-
-    template<typename _Tp>
-      _GLIBCXX_ALWAYS_INLINE _Tp
-      __sub_fetch(_Tp* __ptr, _Diff<_Tp> __i) noexcept
-      { return __atomic_sub_fetch(__ptr, __i, __ATOMIC_SEQ_CST); }
-
-    template<typename _Tp>
-      _GLIBCXX_ALWAYS_INLINE _Tp
-      __and_fetch(_Tp* __ptr, _Val<_Tp> __i) noexcept
-      { return __atomic_and_fetch(__ptr, __i, __ATOMIC_SEQ_CST); }
-
-    template<typename _Tp>
-      _GLIBCXX_ALWAYS_INLINE _Tp
-      __or_fetch(_Tp* __ptr, _Val<_Tp> __i) noexcept
-      { return __atomic_or_fetch(__ptr, __i, __ATOMIC_SEQ_CST); }
-
-    template<typename _Tp>
-      _GLIBCXX_ALWAYS_INLINE _Tp
-      __xor_fetch(_Tp* __ptr, _Val<_Tp> __i) noexcept
-      { return __atomic_xor_fetch(__ptr, __i, __ATOMIC_SEQ_CST); }
-
-    template<typename _Tp>
-      _Tp
-      __fetch_add_flt(_Tp* __ptr, _Val<_Tp> __i, memory_order __m) noexcept
-      {
-	_Val<_Tp> __oldval = load(__ptr, memory_order_relaxed);
-	_Val<_Tp> __newval = __oldval + __i;
-	while (!compare_exchange_weak(__ptr, __oldval, __newval, __m,
-				      memory_order_relaxed))
-	  __newval = __oldval + __i;
-	return __oldval;
-      }
-
-    template<typename _Tp>
-      _Tp
-      __fetch_sub_flt(_Tp* __ptr, _Val<_Tp> __i, memory_order __m) noexcept
-      {
-	_Val<_Tp> __oldval = load(__ptr, memory_order_relaxed);
-	_Val<_Tp> __newval = __oldval - __i;
-	while (!compare_exchange_weak(__ptr, __oldval, __newval, __m,
-				      memory_order_relaxed))
-	  __newval = __oldval - __i;
-	return __oldval;
-      }
-
-    template<typename _Tp>
-      _Tp
-      __add_fetch_flt(_Tp* __ptr, _Val<_Tp> __i) noexcept
-      {
-	_Val<_Tp> __oldval = load(__ptr, memory_order_relaxed);
-	_Val<_Tp> __newval = __oldval + __i;
-	while (!compare_exchange_weak(__ptr, __oldval, __newval,
-				      memory_order_seq_cst,
-				      memory_order_relaxed))
-	  __newval = __oldval + __i;
-	return __newval;
-      }
-
-    template<typename _Tp>
-      _Tp
-      __sub_fetch_flt(_Tp* __ptr, _Val<_Tp> __i) noexcept
-      {
-	_Val<_Tp> __oldval = load(__ptr, memory_order_relaxed);
-	_Val<_Tp> __newval = __oldval - __i;
-	while (!compare_exchange_weak(__ptr, __oldval, __newval,
-				      memory_order_seq_cst,
-				      memory_order_relaxed))
-	  __newval = __oldval - __i;
-	return __newval;
-      }
-  } // namespace __atomic_impl
-
-  // base class for atomic<floating-point-type>
+  // Base class for atomic<floating-point-type>.
+  // Implementation for atomic<float>, atomic<double>, atomic<long double>.
   template<typename _Fp>
     struct __atomic_float
     {
diff --git a/libstdc++-v3/include/std/atomic b/libstdc++-v3/include/std/atomic
index 26d8d3946da..686ecc9114e 100644
--- a/libstdc++-v3/include/std/atomic
+++ b/libstdc++-v3/include/std/atomic
@@ -218,19 +218,11 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 
       bool
       is_lock_free() const noexcept
-      {
-	// Produce a fake, minimally aligned pointer.
-	return __atomic_is_lock_free(sizeof(_M_i),
-	    reinterpret_cast<void *>(-_S_alignment));
-      }
+      { return __atomic_impl::is_lock_free<sizeof(_M_i), _S_alignment>(); }
 
       bool
       is_lock_free() const volatile noexcept
-      {
-	// Produce a fake, minimally aligned pointer.
-	return __atomic_is_lock_free(sizeof(_M_i),
-	    reinterpret_cast<void *>(-_S_alignment));
-      }
+      { return __atomic_impl::is_lock_free<sizeof(_M_i), _S_alignment>(); }
 
 #if __cplusplus >= 201703L
       static constexpr bool is_always_lock_free
@@ -239,69 +231,43 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 
       void
       store(_Tp __i, memory_order __m = memory_order_seq_cst) noexcept
-      { __atomic_store(std::__addressof(_M_i), std::__addressof(__i), int(__m)); }
+      { __atomic_impl::store(std::__addressof(_M_i), __i, __m); }
 
       void
       store(_Tp __i, memory_order __m = memory_order_seq_cst) volatile noexcept
-      { __atomic_store(std::__addressof(_M_i), std::__addressof(__i), int(__m)); }
+      { __atomic_impl::store(std::__addressof(_M_i), __i, __m); }
 
       _Tp
       load(memory_order __m = memory_order_seq_cst) const noexcept
-      {
-	alignas(_Tp) unsigned char __buf[sizeof(_Tp)];
-	_Tp* __ptr = reinterpret_cast<_Tp*>(__buf);
-	__atomic_load(std::__addressof(_M_i), __ptr, int(__m));
-	return *__ptr;
-      }
+      { return __atomic_impl::load(std::__addressof(_M_i), __m); }
 
       _Tp
       load(memory_order __m = memory_order_seq_cst) const volatile noexcept
-      {
-        alignas(_Tp) unsigned char __buf[sizeof(_Tp)];
-	_Tp* __ptr = reinterpret_cast<_Tp*>(__buf);
-	__atomic_load(std::__addressof(_M_i), __ptr, int(__m));
-	return *__ptr;
-      }
+      { return __atomic_impl::load(std::__addressof(_M_i), __m); }
 
       _Tp
       exchange(_Tp __i, memory_order __m = memory_order_seq_cst) noexcept
-      {
-        alignas(_Tp) unsigned char __buf[sizeof(_Tp)];
-	_Tp* __ptr = reinterpret_cast<_Tp*>(__buf);
-	__atomic_exchange(std::__addressof(_M_i), std::__addressof(__i),
-			  __ptr, int(__m));
-	return *__ptr;
-      }
+      { return __atomic_impl::exchange(std::__addressof(_M_i), __i, __m); }
 
       _Tp
       exchange(_Tp __i,
 	       memory_order __m = memory_order_seq_cst) volatile noexcept
-      {
-        alignas(_Tp) unsigned char __buf[sizeof(_Tp)];
-	_Tp* __ptr = reinterpret_cast<_Tp*>(__buf);
-	__atomic_exchange(std::__addressof(_M_i), std::__addressof(__i),
-			  __ptr, int(__m));
-	return *__ptr;
-      }
+      { return __atomic_impl::exchange(std::__addressof(_M_i), __i, __m); }
 
       bool
       compare_exchange_weak(_Tp& __e, _Tp __i, memory_order __s,
 			    memory_order __f) noexcept
       {
-	return __atomic_compare_exchange(std::__addressof(_M_i),
-					 std::__addressof(__e),
-					 std::__addressof(__i),
-					 true, int(__s), int(__f));
+	return __atomic_impl::compare_exchange_weak(std::__addressof(_M_i),
+						    __e, __i, __s, __f);
       }
 
       bool
       compare_exchange_weak(_Tp& __e, _Tp __i, memory_order __s,
 			    memory_order __f) volatile noexcept
       {
-	return __atomic_compare_exchange(std::__addressof(_M_i),
-					 std::__addressof(__e),
-					 std::__addressof(__i),
-					 true, int(__s), int(__f));
+	return __atomic_impl::compare_exchange_weak(std::__addressof(_M_i),
+						    __e, __i, __s, __f);
       }
 
       bool
@@ -320,20 +286,16 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
       compare_exchange_strong(_Tp& __e, _Tp __i, memory_order __s,
 			      memory_order __f) noexcept
       {
-	return __atomic_compare_exchange(std::__addressof(_M_i),
-					 std::__addressof(__e),
-					 std::__addressof(__i),
-					 false, int(__s), int(__f));
+	return __atomic_impl::compare_exchange_strong(std::__addressof(_M_i),
+						      __e, __i, __s, __f);
       }
 
       bool
       compare_exchange_strong(_Tp& __e, _Tp __i, memory_order __s,
 			      memory_order __f) volatile noexcept
       {
-	return __atomic_compare_exchange(std::__addressof(_M_i),
-					 std::__addressof(__e),
-					 std::__addressof(__i),
-					 false, int(__s), int(__f));
+	return __atomic_impl::compare_exchange_strong(std::__addressof(_M_i),
+						      __e, __i, __s, __f);
       }
 
       bool
@@ -357,9 +319,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
       using value_type = _Tp*;
       using difference_type = ptrdiff_t;
 
-      typedef _Tp* 			__pointer_type;
-      typedef __atomic_base<_Tp*>	__base_type;
-      __base_type			_M_b;
+      value_type			_M_p;
 
       atomic() noexcept = default;
       ~atomic() noexcept = default;
@@ -367,183 +327,207 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
       atomic& operator=(const atomic&) = delete;
       atomic& operator=(const atomic&) volatile = delete;
 
-      constexpr atomic(__pointer_type __p) noexcept : _M_b(__p) { }
+      constexpr atomic(value_type __p) noexcept : _M_p(__p) { }
 
-      operator __pointer_type() const noexcept
-      { return __pointer_type(_M_b); }
+      operator value_type() const noexcept
+      { return load(); }
 
-      operator __pointer_type() const volatile noexcept
-      { return __pointer_type(_M_b); }
+      operator value_type() const volatile noexcept
+      { return load(); }
 
-      __pointer_type
-      operator=(__pointer_type __p) noexcept
-      { return _M_b.operator=(__p); }
+      value_type
+      operator=(value_type __p) noexcept
+      {
+	store(__p);
+	return __p;
+      }
 
-      __pointer_type
-      operator=(__pointer_type __p) volatile noexcept
-      { return _M_b.operator=(__p); }
+      value_type
+      operator=(value_type __p) volatile noexcept
+      {
+	store(__p);
+	return __p;
+      }
 
-      __pointer_type
+      _GLIBCXX_ALWAYS_INLINE value_type
       operator++(int) noexcept
       {
 #if __cplusplus >= 201703L
 	static_assert( is_object<_Tp>::value, "pointer to object type" );
 #endif
-	return _M_b++;
+	return fetch_add(1);
       }
 
-      __pointer_type
+      _GLIBCXX_ALWAYS_INLINE value_type
       operator++(int) volatile noexcept
       {
 #if __cplusplus >= 201703L
 	static_assert( is_object<_Tp>::value, "pointer to object type" );
 #endif
-	return _M_b++;
+	return fetch_add(1);
       }
 
-      __pointer_type
+      _GLIBCXX_ALWAYS_INLINE value_type
       operator--(int) noexcept
       {
 #if __cplusplus >= 201703L
 	static_assert( is_object<_Tp>::value, "pointer to object type" );
 #endif
-	return _M_b--;
+	return fetch_sub(1);
       }
 
-      __pointer_type
+      _GLIBCXX_ALWAYS_INLINE value_type
       operator--(int) volatile noexcept
       {
 #if __cplusplus >= 201703L
 	static_assert( is_object<_Tp>::value, "pointer to object type" );
 #endif
-	return _M_b--;
+	return fetch_sub(1);
       }
 
-      __pointer_type
+      value_type
       operator++() noexcept
       {
 #if __cplusplus >= 201703L
 	static_assert( is_object<_Tp>::value, "pointer to object type" );
 #endif
-	return ++_M_b;
+	return __atomic_impl::__add_fetch(std::__addressof(_M_p),
+					  _S_type_size(1));
       }
 
-      __pointer_type
+      value_type
       operator++() volatile noexcept
       {
 #if __cplusplus >= 201703L
 	static_assert( is_object<_Tp>::value, "pointer to object type" );
 #endif
-	return ++_M_b;
+	return __atomic_impl::__add_fetch(std::__addressof(_M_p),
+					  _S_type_size(1));
       }
 
-      __pointer_type
+      value_type
       operator--() noexcept
       {
 #if __cplusplus >= 201703L
 	static_assert( is_object<_Tp>::value, "pointer to object type" );
 #endif
-	return --_M_b;
+	return __atomic_impl::__sub_fetch(std::__addressof(_M_p),
+					  _S_type_size(1));
       }
 
-      __pointer_type
+      value_type
       operator--() volatile noexcept
       {
 #if __cplusplus >= 201703L
 	static_assert( is_object<_Tp>::value, "pointer to object type" );
 #endif
-	return --_M_b;
+	return __atomic_impl::__sub_fetch(std::__addressof(_M_p),
+					  _S_type_size(1));
       }
 
-      __pointer_type
+      value_type
       operator+=(ptrdiff_t __d) noexcept
       {
 #if __cplusplus >= 201703L
 	static_assert( is_object<_Tp>::value, "pointer to object type" );
 #endif
-	return _M_b.operator+=(__d);
+	return __atomic_impl::__add_fetch(std::__addressof(_M_p),
+					  _S_type_size(__d));
       }
 
-      __pointer_type
+      value_type
       operator+=(ptrdiff_t __d) volatile noexcept
       {
 #if __cplusplus >= 201703L
 	static_assert( is_object<_Tp>::value, "pointer to object type" );
 #endif
-	return _M_b.operator+=(__d);
+	return __atomic_impl::__add_fetch(std::__addressof(_M_p),
+					  _S_type_size(__d));
       }
 
-      __pointer_type
+      value_type
       operator-=(ptrdiff_t __d) noexcept
       {
 #if __cplusplus >= 201703L
 	static_assert( is_object<_Tp>::value, "pointer to object type" );
 #endif
-	return _M_b.operator-=(__d);
+	return __atomic_impl::__sub_fetch(std::__addressof(_M_p),
+					  _S_type_size(__d));
       }
 
-      __pointer_type
+      value_type
       operator-=(ptrdiff_t __d) volatile noexcept
       {
 #if __cplusplus >= 201703L
 	static_assert( is_object<_Tp>::value, "pointer to object type" );
 #endif
-	return _M_b.operator-=(__d);
+	return __atomic_impl::__sub_fetch(std::__addressof(_M_p),
+					  _S_type_size(__d));
       }
 
       bool
       is_lock_free() const noexcept
-      { return _M_b.is_lock_free(); }
+      {
+	return __atomic_impl::is_lock_free<sizeof(_M_p), __alignof__(_M_p)>();
+      }
 
       bool
       is_lock_free() const volatile noexcept
-      { return _M_b.is_lock_free(); }
+      {
+	return __atomic_impl::is_lock_free<sizeof(_M_p), __alignof__(_M_p)>();
+      }
 
 #if __cplusplus >= 201703L
     static constexpr bool is_always_lock_free = ATOMIC_POINTER_LOCK_FREE == 2;
 #endif
 
       void
-      store(__pointer_type __p,
+      store(value_type __p,
 	    memory_order __m = memory_order_seq_cst) noexcept
-      { return _M_b.store(__p, __m); }
+      { __atomic_impl::store(std::__addressof(_M_p), __p, __m); }
 
       void
-      store(__pointer_type __p,
+      store(value_type __p,
 	    memory_order __m = memory_order_seq_cst) volatile noexcept
-      { return _M_b.store(__p, __m); }
+      { __atomic_impl::store(std::__addressof(_M_p), __p, __m); }
 
-      __pointer_type
+      value_type
       load(memory_order __m = memory_order_seq_cst) const noexcept
-      { return _M_b.load(__m); }
+      { return __atomic_impl::load(std::__addressof(_M_p), __m); }
 
-      __pointer_type
+      value_type
       load(memory_order __m = memory_order_seq_cst) const volatile noexcept
-      { return _M_b.load(__m); }
+      { return __atomic_impl::load(std::__addressof(_M_p), __m); }
 
-      __pointer_type
-      exchange(__pointer_type __p,
+      value_type
+      exchange(value_type __p,
 	       memory_order __m = memory_order_seq_cst) noexcept
-      { return _M_b.exchange(__p, __m); }
+      { return __atomic_impl::exchange(std::__addressof(_M_p), __p, __m); }
 
-      __pointer_type
-      exchange(__pointer_type __p,
+      value_type
+      exchange(value_type __p,
 	       memory_order __m = memory_order_seq_cst) volatile noexcept
-      { return _M_b.exchange(__p, __m); }
+      { return __atomic_impl::exchange(std::__addressof(_M_p), __p, __m); }
 
       bool
-      compare_exchange_weak(__pointer_type& __p1, __pointer_type __p2,
+      compare_exchange_weak(value_type& __p1, value_type __p2,
 			    memory_order __m1, memory_order __m2) noexcept
-      { return _M_b.compare_exchange_strong(__p1, __p2, __m1, __m2); }
+      {
+	return __atomic_impl::compare_exchange_weak(std::__addressof(_M_p),
+						    __p1, __p2, __m1, __m2);
+      }
 
       bool
-      compare_exchange_weak(__pointer_type& __p1, __pointer_type __p2,
+      compare_exchange_weak(value_type& __p1, value_type __p2,
 			    memory_order __m1,
 			    memory_order __m2) volatile noexcept
-      { return _M_b.compare_exchange_strong(__p1, __p2, __m1, __m2); }
+      {
+	return __atomic_impl::compare_exchange_weak(std::__addressof(_M_p),
+						    __p1, __p2, __m1, __m2);
+      }
 
       bool
-      compare_exchange_weak(__pointer_type& __p1, __pointer_type __p2,
+      compare_exchange_weak(value_type& __p1, value_type __p2,
 			    memory_order __m = memory_order_seq_cst) noexcept
       {
 	return compare_exchange_weak(__p1, __p2, __m,
@@ -551,7 +535,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
       }
 
       bool
-      compare_exchange_weak(__pointer_type& __p1, __pointer_type __p2,
+      compare_exchange_weak(value_type& __p1, value_type __p2,
 		    memory_order __m = memory_order_seq_cst) volatile noexcept
       {
 	return compare_exchange_weak(__p1, __p2, __m,
@@ -559,70 +543,78 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
       }
 
       bool
-      compare_exchange_strong(__pointer_type& __p1, __pointer_type __p2,
+      compare_exchange_strong(value_type& __p1, value_type __p2,
 			      memory_order __m1, memory_order __m2) noexcept
-      { return _M_b.compare_exchange_strong(__p1, __p2, __m1, __m2); }
+      {
+	return __atomic_impl::compare_exchange_strong(std::__addressof(_M_p),
+						      __p1, __p2, __m1, __m2);
+      }
 
       bool
-      compare_exchange_strong(__pointer_type& __p1, __pointer_type __p2,
+      compare_exchange_strong(value_type& __p1, value_type __p2,
 			      memory_order __m1,
 			      memory_order __m2) volatile noexcept
-      { return _M_b.compare_exchange_strong(__p1, __p2, __m1, __m2); }
+      {
+	return __atomic_impl::compare_exchange_strong(std::__addressof(_M_p),
+						      __p1, __p2, __m1, __m2);
+      }
 
       bool
-      compare_exchange_strong(__pointer_type& __p1, __pointer_type __p2,
+      compare_exchange_strong(value_type& __p1, value_type __p2,
 			      memory_order __m = memory_order_seq_cst) noexcept
       {
-	return _M_b.compare_exchange_strong(__p1, __p2, __m,
-					    __cmpexch_failure_order(__m));
+	return compare_exchange_strong(__p1, __p2, __m,
+				       __cmpexch_failure_order(__m));
       }
 
       bool
-      compare_exchange_strong(__pointer_type& __p1, __pointer_type __p2,
+      compare_exchange_strong(value_type& __p1, value_type __p2,
 		    memory_order __m = memory_order_seq_cst) volatile noexcept
       {
-	return _M_b.compare_exchange_strong(__p1, __p2, __m,
-					    __cmpexch_failure_order(__m));
+	return compare_exchange_strong(__p1, __p2, __m,
+				       __cmpexch_failure_order(__m));
       }
 
-      __pointer_type
+      value_type
       fetch_add(ptrdiff_t __d,
 		memory_order __m = memory_order_seq_cst) noexcept
       {
-#if __cplusplus >= 201703L
-	static_assert( is_object<_Tp>::value, "pointer to object type" );
-#endif
-	return _M_b.fetch_add(__d, __m);
+	return __atomic_impl::fetch_add(std::__addressof(_M_p),
+					_S_type_size(__d), __m);
       }
 
-      __pointer_type
+      value_type
       fetch_add(ptrdiff_t __d,
 		memory_order __m = memory_order_seq_cst) volatile noexcept
       {
-#if __cplusplus >= 201703L
-	static_assert( is_object<_Tp>::value, "pointer to object type" );
-#endif
-	return _M_b.fetch_add(__d, __m);
+	return __atomic_impl::fetch_add(std::__addressof(_M_p),
+					_S_type_size(__d), __m);
       }
 
-      __pointer_type
+      value_type
       fetch_sub(ptrdiff_t __d,
 		memory_order __m = memory_order_seq_cst) noexcept
       {
-#if __cplusplus >= 201703L
-	static_assert( is_object<_Tp>::value, "pointer to object type" );
-#endif
-	return _M_b.fetch_sub(__d, __m);
+	return __atomic_impl::fetch_sub(std::__addressof(_M_p),
+					_S_type_size(__d), __m);
       }
 
-      __pointer_type
+      value_type
       fetch_sub(ptrdiff_t __d,
 		memory_order __m = memory_order_seq_cst) volatile noexcept
       {
+	return __atomic_impl::fetch_sub(std::__addressof(_M_p),
+					_S_type_size(__d), __m);
+      }
+
+    private:
+      static constexpr ptrdiff_t
+      _S_type_size(ptrdiff_t __d) noexcept
+      {
 #if __cplusplus >= 201703L
-	static_assert( is_object<_Tp>::value, "pointer to object type" );
+	static_assert(is_object_v<_Tp>);
 #endif
-	return _M_b.fetch_sub(__d, __m);
+	return __d * sizeof(_Tp);
       }
     };
 
@@ -631,8 +623,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
   template<>
     struct atomic<char> : __atomic_base<char>
     {
-      typedef char 			__integral_type;
-      typedef __atomic_base<char> 	__base_type;
+      using value_type = char;
 
       atomic() noexcept = default;
       ~atomic() noexcept = default;
@@ -640,10 +631,11 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
       atomic& operator=(const atomic&) = delete;
       atomic& operator=(const atomic&) volatile = delete;
 
-      constexpr atomic(__integral_type __i) noexcept : __base_type(__i) { }
+      constexpr atomic(value_type __i) noexcept
+      : __atomic_base<value_type>(__i) { }
 
-      using __base_type::operator __integral_type;
-      using __base_type::operator=;
+      using __atomic_base<value_type>::operator value_type;
+      using __atomic_base<value_type>::operator=;
 
 #if __cplusplus >= 201703L
     static constexpr bool is_always_lock_free = ATOMIC_CHAR_LOCK_FREE == 2;
@@ -654,8 +646,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
   template<>
     struct atomic<signed char> : __atomic_base<signed char>
     {
-      typedef signed char 		__integral_type;
-      typedef __atomic_base<signed char> 	__base_type;
+      using value_type = signed char;
 
       atomic() noexcept= default;
       ~atomic() noexcept = default;
@@ -663,10 +654,11 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
       atomic& operator=(const atomic&) = delete;
       atomic& operator=(const atomic&) volatile = delete;
 
-      constexpr atomic(__integral_type __i) noexcept : __base_type(__i) { }
+      constexpr atomic(value_type __i) noexcept
+      : __atomic_base<value_type>(__i) { }
 
-      using __base_type::operator __integral_type;
-      using __base_type::operator=;
+      using __atomic_base<value_type>::operator value_type;
+      using __atomic_base<value_type>::operator=;
 
 #if __cplusplus >= 201703L
     static constexpr bool is_always_lock_free = ATOMIC_CHAR_LOCK_FREE == 2;
@@ -677,8 +669,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
   template<>
     struct atomic<unsigned char> : __atomic_base<unsigned char>
     {
-      typedef unsigned char 		__integral_type;
-      typedef __atomic_base<unsigned char> 	__base_type;
+      using value_type = unsigned char;
 
       atomic() noexcept= default;
       ~atomic() noexcept = default;
@@ -686,10 +677,11 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
       atomic& operator=(const atomic&) = delete;
       atomic& operator=(const atomic&) volatile = delete;
 
-      constexpr atomic(__integral_type __i) noexcept : __base_type(__i) { }
+      constexpr atomic(value_type __i) noexcept
+      : __atomic_base<value_type>(__i) { }
 
-      using __base_type::operator __integral_type;
-      using __base_type::operator=;
+      using __atomic_base<value_type>::operator value_type;
+      using __atomic_base<value_type>::operator=;
 
 #if __cplusplus >= 201703L
     static constexpr bool is_always_lock_free = ATOMIC_CHAR_LOCK_FREE == 2;
@@ -700,8 +692,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
   template<>
     struct atomic<short> : __atomic_base<short>
     {
-      typedef short 			__integral_type;
-      typedef __atomic_base<short> 		__base_type;
+      using value_type = short;
 
       atomic() noexcept = default;
       ~atomic() noexcept = default;
@@ -709,10 +700,11 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
       atomic& operator=(const atomic&) = delete;
       atomic& operator=(const atomic&) volatile = delete;
 
-      constexpr atomic(__integral_type __i) noexcept : __base_type(__i) { }
+      constexpr atomic(value_type __i) noexcept
+      : __atomic_base<value_type>(__i) { }
 
-      using __base_type::operator __integral_type;
-      using __base_type::operator=;
+      using __atomic_base<value_type>::operator value_type;
+      using __atomic_base<value_type>::operator=;
 
 #if __cplusplus >= 201703L
     static constexpr bool is_always_lock_free = ATOMIC_SHORT_LOCK_FREE == 2;
@@ -723,8 +715,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
   template<>
     struct atomic<unsigned short> : __atomic_base<unsigned short>
     {
-      typedef unsigned short 	      	__integral_type;
-      typedef __atomic_base<unsigned short> 		__base_type;
+      using value_type = unsigned short;
 
       atomic() noexcept = default;
       ~atomic() noexcept = default;
@@ -732,10 +723,11 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
       atomic& operator=(const atomic&) = delete;
       atomic& operator=(const atomic&) volatile = delete;
 
-      constexpr atomic(__integral_type __i) noexcept : __base_type(__i) { }
+      constexpr atomic(value_type __i) noexcept
+      : __atomic_base<value_type>(__i) { }
 
-      using __base_type::operator __integral_type;
-      using __base_type::operator=;
+      using __atomic_base<value_type>::operator value_type;
+      using __atomic_base<value_type>::operator=;
 
 #if __cplusplus >= 201703L
     static constexpr bool is_always_lock_free = ATOMIC_SHORT_LOCK_FREE == 2;
@@ -746,8 +738,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
   template<>
     struct atomic<int> : __atomic_base<int>
     {
-      typedef int 			__integral_type;
-      typedef __atomic_base<int> 		__base_type;
+      using value_type = int;
 
       atomic() noexcept = default;
       ~atomic() noexcept = default;
@@ -755,10 +746,11 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
       atomic& operator=(const atomic&) = delete;
       atomic& operator=(const atomic&) volatile = delete;
 
-      constexpr atomic(__integral_type __i) noexcept : __base_type(__i) { }
+      constexpr atomic(value_type __i) noexcept
+      : __atomic_base<value_type>(__i) { }
 
-      using __base_type::operator __integral_type;
-      using __base_type::operator=;
+      using __atomic_base<value_type>::operator value_type;
+      using __atomic_base<value_type>::operator=;
 
 #if __cplusplus >= 201703L
     static constexpr bool is_always_lock_free = ATOMIC_INT_LOCK_FREE == 2;
@@ -769,8 +761,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
   template<>
     struct atomic<unsigned int> : __atomic_base<unsigned int>
     {
-      typedef unsigned int		__integral_type;
-      typedef __atomic_base<unsigned int> 	__base_type;
+      using value_type = unsigned int;
 
       atomic() noexcept = default;
       ~atomic() noexcept = default;
@@ -778,10 +769,11 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
       atomic& operator=(const atomic&) = delete;
       atomic& operator=(const atomic&) volatile = delete;
 
-      constexpr atomic(__integral_type __i) noexcept : __base_type(__i) { }
+      constexpr atomic(value_type __i) noexcept
+      : __atomic_base<value_type>(__i) { }
 
-      using __base_type::operator __integral_type;
-      using __base_type::operator=;
+      using __atomic_base<value_type>::operator value_type;
+      using __atomic_base<value_type>::operator=;
 
 #if __cplusplus >= 201703L
     static constexpr bool is_always_lock_free = ATOMIC_INT_LOCK_FREE == 2;
@@ -792,8 +784,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
   template<>
     struct atomic<long> : __atomic_base<long>
     {
-      typedef long 			__integral_type;
-      typedef __atomic_base<long> 	__base_type;
+      using value_type = long;
 
       atomic() noexcept = default;
       ~atomic() noexcept = default;
@@ -801,10 +792,11 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
       atomic& operator=(const atomic&) = delete;
       atomic& operator=(const atomic&) volatile = delete;
 
-      constexpr atomic(__integral_type __i) noexcept : __base_type(__i) { }
+      constexpr atomic(value_type __i) noexcept
+      : __atomic_base<value_type>(__i) { }
 
-      using __base_type::operator __integral_type;
-      using __base_type::operator=;
+      using __atomic_base<value_type>::operator value_type;
+      using __atomic_base<value_type>::operator=;
 
 #if __cplusplus >= 201703L
     static constexpr bool is_always_lock_free = ATOMIC_LONG_LOCK_FREE == 2;
@@ -815,8 +807,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
   template<>
     struct atomic<unsigned long> : __atomic_base<unsigned long>
     {
-      typedef unsigned long 		__integral_type;
-      typedef __atomic_base<unsigned long> 	__base_type;
+      using value_type = unsigned long;
 
       atomic() noexcept = default;
       ~atomic() noexcept = default;
@@ -824,10 +815,11 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
       atomic& operator=(const atomic&) = delete;
       atomic& operator=(const atomic&) volatile = delete;
 
-      constexpr atomic(__integral_type __i) noexcept : __base_type(__i) { }
+      constexpr atomic(value_type __i) noexcept
+      : __atomic_base<value_type>(__i) { }
 
-      using __base_type::operator __integral_type;
-      using __base_type::operator=;
+      using __atomic_base<value_type>::operator value_type;
+      using __atomic_base<value_type>::operator=;
 
 #if __cplusplus >= 201703L
     static constexpr bool is_always_lock_free = ATOMIC_LONG_LOCK_FREE == 2;
@@ -838,8 +830,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
   template<>
     struct atomic<long long> : __atomic_base<long long>
     {
-      typedef long long 		__integral_type;
-      typedef __atomic_base<long long> 		__base_type;
+      using value_type = long long;
 
       atomic() noexcept = default;
       ~atomic() noexcept = default;
@@ -847,10 +838,11 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
       atomic& operator=(const atomic&) = delete;
       atomic& operator=(const atomic&) volatile = delete;
 
-      constexpr atomic(__integral_type __i) noexcept : __base_type(__i) { }
+      constexpr atomic(value_type __i) noexcept
+      : __atomic_base<value_type>(__i) { }
 
-      using __base_type::operator __integral_type;
-      using __base_type::operator=;
+      using __atomic_base<value_type>::operator value_type;
+      using __atomic_base<value_type>::operator=;
 
 #if __cplusplus >= 201703L
     static constexpr bool is_always_lock_free = ATOMIC_LLONG_LOCK_FREE == 2;
@@ -861,8 +853,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
   template<>
     struct atomic<unsigned long long> : __atomic_base<unsigned long long>
     {
-      typedef unsigned long long       	__integral_type;
-      typedef __atomic_base<unsigned long long> 	__base_type;
+      using value_type = unsigned long long;
 
       atomic() noexcept = default;
       ~atomic() noexcept = default;
@@ -870,10 +861,11 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
       atomic& operator=(const atomic&) = delete;
       atomic& operator=(const atomic&) volatile = delete;
 
-      constexpr atomic(__integral_type __i) noexcept : __base_type(__i) { }
+      constexpr atomic(value_type __i) noexcept
+      : __atomic_base<value_type>(__i) { }
 
-      using __base_type::operator __integral_type;
-      using __base_type::operator=;
+      using __atomic_base<value_type>::operator value_type;
+      using __atomic_base<value_type>::operator=;
 
 #if __cplusplus >= 201703L
     static constexpr bool is_always_lock_free = ATOMIC_LLONG_LOCK_FREE == 2;
@@ -884,8 +876,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
   template<>
     struct atomic<wchar_t> : __atomic_base<wchar_t>
     {
-      typedef wchar_t 			__integral_type;
-      typedef __atomic_base<wchar_t> 	__base_type;
+      using value_type = wchar_t;
 
       atomic() noexcept = default;
       ~atomic() noexcept = default;
@@ -893,10 +884,11 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
       atomic& operator=(const atomic&) = delete;
       atomic& operator=(const atomic&) volatile = delete;
 
-      constexpr atomic(__integral_type __i) noexcept : __base_type(__i) { }
+      constexpr atomic(value_type __i) noexcept
+      : __atomic_base<value_type>(__i) { }
 
-      using __base_type::operator __integral_type;
-      using __base_type::operator=;
+      using __atomic_base<value_type>::operator value_type;
+      using __atomic_base<value_type>::operator=;
 
 #if __cplusplus >= 201703L
     static constexpr bool is_always_lock_free = ATOMIC_WCHAR_T_LOCK_FREE == 2;
@@ -908,8 +900,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
   template<>
     struct atomic<char8_t> : __atomic_base<char8_t>
     {
-      typedef char8_t 			__integral_type;
-      typedef __atomic_base<char8_t> 	__base_type;
+      using value_type = char8_t;
 
       atomic() noexcept = default;
       ~atomic() noexcept = default;
@@ -917,23 +908,23 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
       atomic& operator=(const atomic&) = delete;
       atomic& operator=(const atomic&) volatile = delete;
 
-      constexpr atomic(__integral_type __i) noexcept : __base_type(__i) { }
+      constexpr atomic(value_type __i) noexcept
+      : __atomic_base<value_type>(__i) { }
 
-      using __base_type::operator __integral_type;
-      using __base_type::operator=;
+      using __atomic_base<value_type>::operator value_type;
+      using __atomic_base<value_type>::operator=;
 
 #if __cplusplus > 201402L
     static constexpr bool is_always_lock_free = ATOMIC_CHAR8_T_LOCK_FREE == 2;
 #endif
     };
-#endif
+#endif // char8_t
 
   /// Explicit specialization for char16_t.
   template<>
     struct atomic<char16_t> : __atomic_base<char16_t>
     {
-      typedef char16_t 			__integral_type;
-      typedef __atomic_base<char16_t> 	__base_type;
+      using value_type = char16_t;
 
       atomic() noexcept = default;
       ~atomic() noexcept = default;
@@ -941,10 +932,11 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
       atomic& operator=(const atomic&) = delete;
       atomic& operator=(const atomic&) volatile = delete;
 
-      constexpr atomic(__integral_type __i) noexcept : __base_type(__i) { }
+      constexpr atomic(value_type __i) noexcept
+      : __atomic_base<value_type>(__i) { }
 
-      using __base_type::operator __integral_type;
-      using __base_type::operator=;
+      using __atomic_base<value_type>::operator value_type;
+      using __atomic_base<value_type>::operator=;
 
 #if __cplusplus >= 201703L
     static constexpr bool is_always_lock_free = ATOMIC_CHAR16_T_LOCK_FREE == 2;
@@ -955,8 +947,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
   template<>
     struct atomic<char32_t> : __atomic_base<char32_t>
     {
-      typedef char32_t 			__integral_type;
-      typedef __atomic_base<char32_t> 	__base_type;
+      using value_type = char32_t;
 
       atomic() noexcept = default;
       ~atomic() noexcept = default;
@@ -964,10 +955,11 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
       atomic& operator=(const atomic&) = delete;
       atomic& operator=(const atomic&) volatile = delete;
 
-      constexpr atomic(__integral_type __i) noexcept : __base_type(__i) { }
+      constexpr atomic(value_type __i) noexcept
+      : __atomic_base<value_type>(__i) { }
 
-      using __base_type::operator __integral_type;
-      using __base_type::operator=;
+      using __atomic_base<value_type>::operator value_type;
+      using __atomic_base<value_type>::operator=;
 
 #if __cplusplus >= 201703L
     static constexpr bool is_always_lock_free = ATOMIC_CHAR32_T_LOCK_FREE == 2;
@@ -1337,9 +1329,9 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 						     memory_order_seq_cst);
     }
 
-  // Function templates for atomic_integral and atomic_pointer operations only.
-  // Some operations (and, or, xor) are only available for atomic integrals,
-  // which is implemented by taking a parameter of type __atomic_base<_ITp>*.
+  // Function templates for atomic<integral> and atomic<T*> operations only.
+  // These functions are ill-formed if called for a specialization that
+  // does not define the corresponding member function.
 
   template<typename _ITp>
     inline _ITp
@@ -1371,42 +1363,42 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 
   template<typename _ITp>
     inline _ITp
-    atomic_fetch_and_explicit(__atomic_base<_ITp>* __a,
+    atomic_fetch_and_explicit(atomic<_ITp>* __a,
 			      __atomic_val_t<_ITp> __i,
 			      memory_order __m) noexcept
     { return __a->fetch_and(__i, __m); }
 
   template<typename _ITp>
     inline _ITp
-    atomic_fetch_and_explicit(volatile __atomic_base<_ITp>* __a,
+    atomic_fetch_and_explicit(volatile atomic<_ITp>* __a,
 			      __atomic_val_t<_ITp> __i,
 			      memory_order __m) noexcept
     { return __a->fetch_and(__i, __m); }
 
   template<typename _ITp>
     inline _ITp
-    atomic_fetch_or_explicit(__atomic_base<_ITp>* __a,
+    atomic_fetch_or_explicit(atomic<_ITp>* __a,
 			     __atomic_val_t<_ITp> __i,
 			     memory_order __m) noexcept
     { return __a->fetch_or(__i, __m); }
 
   template<typename _ITp>
     inline _ITp
-    atomic_fetch_or_explicit(volatile __atomic_base<_ITp>* __a,
+    atomic_fetch_or_explicit(volatile atomic<_ITp>* __a,
 			     __atomic_val_t<_ITp> __i,
 			     memory_order __m) noexcept
     { return __a->fetch_or(__i, __m); }
 
   template<typename _ITp>
     inline _ITp
-    atomic_fetch_xor_explicit(__atomic_base<_ITp>* __a,
+    atomic_fetch_xor_explicit(atomic<_ITp>* __a,
 			      __atomic_val_t<_ITp> __i,
 			      memory_order __m) noexcept
     { return __a->fetch_xor(__i, __m); }
 
   template<typename _ITp>
     inline _ITp
-    atomic_fetch_xor_explicit(volatile __atomic_base<_ITp>* __a,
+    atomic_fetch_xor_explicit(volatile atomic<_ITp>* __a,
 			      __atomic_val_t<_ITp> __i,
 			      memory_order __m) noexcept
     { return __a->fetch_xor(__i, __m); }
@@ -1437,37 +1429,37 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 
   template<typename _ITp>
     inline _ITp
-    atomic_fetch_and(__atomic_base<_ITp>* __a,
+    atomic_fetch_and(atomic<_ITp>* __a,
 		     __atomic_val_t<_ITp> __i) noexcept
     { return atomic_fetch_and_explicit(__a, __i, memory_order_seq_cst); }
 
   template<typename _ITp>
     inline _ITp
-    atomic_fetch_and(volatile __atomic_base<_ITp>* __a,
+    atomic_fetch_and(volatile atomic<_ITp>* __a,
 		     __atomic_val_t<_ITp> __i) noexcept
     { return atomic_fetch_and_explicit(__a, __i, memory_order_seq_cst); }
 
   template<typename _ITp>
     inline _ITp
-    atomic_fetch_or(__atomic_base<_ITp>* __a,
+    atomic_fetch_or(atomic<_ITp>* __a,
 		    __atomic_val_t<_ITp> __i) noexcept
     { return atomic_fetch_or_explicit(__a, __i, memory_order_seq_cst); }
 
   template<typename _ITp>
     inline _ITp
-    atomic_fetch_or(volatile __atomic_base<_ITp>* __a,
+    atomic_fetch_or(volatile atomic<_ITp>* __a,
 		    __atomic_val_t<_ITp> __i) noexcept
     { return atomic_fetch_or_explicit(__a, __i, memory_order_seq_cst); }
 
   template<typename _ITp>
     inline _ITp
-    atomic_fetch_xor(__atomic_base<_ITp>* __a,
+    atomic_fetch_xor(atomic<_ITp>* __a,
 		     __atomic_val_t<_ITp> __i) noexcept
     { return atomic_fetch_xor_explicit(__a, __i, memory_order_seq_cst); }
 
   template<typename _ITp>
     inline _ITp
-    atomic_fetch_xor(volatile __atomic_base<_ITp>* __a,
+    atomic_fetch_xor(volatile atomic<_ITp>* __a,
 		     __atomic_val_t<_ITp> __i) noexcept
     { return atomic_fetch_xor_explicit(__a, __i, memory_order_seq_cst); }
 

Reply via email to