Revision: 20598
Author:   hpa...@chromium.org
Date:     Wed Apr  9 08:20:10 2014 UTC
Log:      Introduced Atomic8 and added no-barrier Atomic8 accessors.

BUG=
R=ja...@chromium.org

Review URL: https://codereview.chromium.org/228613005
http://code.google.com/p/v8/source/detail?r=20598

Modified:
 /branches/bleeding_edge/src/atomicops.h
 /branches/bleeding_edge/src/atomicops_internals_arm64_gcc.h
 /branches/bleeding_edge/src/atomicops_internals_arm_gcc.h
 /branches/bleeding_edge/src/atomicops_internals_mac.h
 /branches/bleeding_edge/src/atomicops_internals_mips_gcc.h
 /branches/bleeding_edge/src/atomicops_internals_tsan.h
 /branches/bleeding_edge/src/atomicops_internals_x86_gcc.h
 /branches/bleeding_edge/src/atomicops_internals_x86_msvc.h
 /branches/bleeding_edge/test/cctest/test-atomicops.cc

=======================================
--- /branches/bleeding_edge/src/atomicops.h     Fri Mar 21 09:28:26 2014 UTC
+++ /branches/bleeding_edge/src/atomicops.h     Wed Apr  9 08:20:10 2014 UTC
@@ -63,6 +63,7 @@
 namespace v8 {
 namespace internal {

+typedef char Atomic8;
 typedef int32_t Atomic32;
 #ifdef V8_HOST_ARCH_64_BIT
// We need to be able to go between Atomic64 and AtomicWord implicitly. This
@@ -120,10 +121,12 @@
                                 Atomic32 new_value);

 void MemoryBarrier();
+void NoBarrier_Store(volatile Atomic8* ptr, Atomic8 value);
 void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value);
 void Acquire_Store(volatile Atomic32* ptr, Atomic32 value);
 void Release_Store(volatile Atomic32* ptr, Atomic32 value);

+Atomic8 NoBarrier_Load(volatile const Atomic8* ptr);
 Atomic32 NoBarrier_Load(volatile const Atomic32* ptr);
 Atomic32 Acquire_Load(volatile const Atomic32* ptr);
 Atomic32 Release_Load(volatile const Atomic32* ptr);
=======================================
--- /branches/bleeding_edge/src/atomicops_internals_arm64_gcc.h Thu Apr 3 14:28:40 2014 UTC +++ /branches/bleeding_edge/src/atomicops_internals_arm64_gcc.h Wed Apr 9 08:20:10 2014 UTC
@@ -143,6 +143,10 @@

   return prev;
 }
+
+inline void NoBarrier_Store(volatile Atomic8* ptr, Atomic8 value) {
+  *ptr = value;
+}

 inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
   *ptr = value;
@@ -161,6 +165,10 @@
     : "memory"
   );  // NOLINT
 }
+
+inline Atomic8 NoBarrier_Load(volatile const Atomic8* ptr) {
+  return *ptr;
+}

 inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
   return *ptr;
=======================================
--- /branches/bleeding_edge/src/atomicops_internals_arm_gcc.h Mon Mar 10 06:43:21 2014 UTC +++ /branches/bleeding_edge/src/atomicops_internals_arm_gcc.h Wed Apr 9 08:20:10 2014 UTC
@@ -310,6 +310,14 @@
   MemoryBarrier();
   return *ptr;
 }
+
+// Byte accessors.
+
+inline void NoBarrier_Store(volatile Atomic8* ptr, Atomic8 value) {
+  *ptr = value;
+}
+
+inline Atomic8 NoBarrier_Load(volatile const Atomic8* ptr) { return *ptr; }

 } }  // namespace v8::internal

=======================================
--- /branches/bleeding_edge/src/atomicops_internals_mac.h Mon Mar 10 06:43:21 2014 UTC +++ /branches/bleeding_edge/src/atomicops_internals_mac.h Wed Apr 9 08:20:10 2014 UTC
@@ -92,6 +92,10 @@
                                        Atomic32 new_value) {
   return Acquire_CompareAndSwap(ptr, old_value, new_value);
 }
+
+inline void NoBarrier_Store(volatile Atomic8* ptr, Atomic8 value) {
+  *ptr = value;
+}

 inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
   *ptr = value;
@@ -106,6 +110,10 @@
   MemoryBarrier();
   *ptr = value;
 }
+
+inline Atomic8 NoBarrier_Load(volatile const Atomic8* ptr) {
+  return *ptr;
+}

 inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
   return *ptr;
=======================================
--- /branches/bleeding_edge/src/atomicops_internals_mips_gcc.h Tue Jun 11 14:30:43 2013 UTC +++ /branches/bleeding_edge/src/atomicops_internals_mips_gcc.h Wed Apr 9 08:20:10 2014 UTC
@@ -135,6 +135,10 @@
   MemoryBarrier();
   return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
 }
+
+inline void NoBarrier_Store(volatile Atomic8* ptr, Atomic8 value) {
+  *ptr = value;
+}

 inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
   *ptr = value;
@@ -153,6 +157,10 @@
   MemoryBarrier();
   *ptr = value;
 }
+
+inline Atomic8 NoBarrier_Load(volatile const Atomic8* ptr) {
+  return *ptr;
+}

 inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
   return *ptr;
=======================================
--- /branches/bleeding_edge/src/atomicops_internals_tsan.h Mon Mar 10 06:43:21 2014 UTC +++ /branches/bleeding_edge/src/atomicops_internals_tsan.h Wed Apr 9 08:20:10 2014 UTC
@@ -275,6 +275,10 @@
       __tsan_memory_order_release, __tsan_memory_order_relaxed);
   return cmp;
 }
+
+inline void NoBarrier_Store(volatile Atomic8* ptr, Atomic8 value) {
+  __tsan_atomic8_store(ptr, value, __tsan_memory_order_relaxed);
+}

 inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
   __tsan_atomic32_store(ptr, value, __tsan_memory_order_relaxed);
@@ -288,6 +292,10 @@
 inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
   __tsan_atomic32_store(ptr, value, __tsan_memory_order_release);
 }
+
+inline Atomic8 NoBarrier_Load(volatile const Atomic8* ptr) {
+  return __tsan_atomic8_load(ptr, __tsan_memory_order_relaxed);
+}

 inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
   return __tsan_atomic32_load(ptr, __tsan_memory_order_relaxed);
=======================================
--- /branches/bleeding_edge/src/atomicops_internals_x86_gcc.h Tue Apr 30 11:34:43 2013 UTC +++ /branches/bleeding_edge/src/atomicops_internals_x86_gcc.h Wed Apr 9 08:20:10 2014 UTC
@@ -106,6 +106,10 @@
                                        Atomic32 new_value) {
   return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
 }
+
+inline void NoBarrier_Store(volatile Atomic8* ptr, Atomic8 value) {
+  *ptr = value;
+}

 inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
   *ptr = value;
@@ -151,6 +155,10 @@
   *ptr = value;  // An x86 store acts as a release barrier.
   // See comments in Atomic64 version of Release_Store(), below.
 }
+
+inline Atomic8 NoBarrier_Load(volatile const Atomic8* ptr) {
+  return *ptr;
+}

 inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
   return *ptr;
=======================================
--- /branches/bleeding_edge/src/atomicops_internals_x86_msvc.h Mon Mar 10 06:43:21 2014 UTC +++ /branches/bleeding_edge/src/atomicops_internals_x86_msvc.h Wed Apr 9 08:20:10 2014 UTC
@@ -99,6 +99,10 @@
                                        Atomic32 new_value) {
   return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
 }
+
+inline void NoBarrier_Store(volatile Atomic8* ptr, Atomic8 value) {
+  *ptr = value;
+}

 inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
   *ptr = value;
@@ -113,6 +117,10 @@
*ptr = value; // works w/o barrier for current Intel chips as of June 2005
   // See comments in Atomic64 version of Release_Store() below.
 }
+
+inline Atomic8 NoBarrier_Load(volatile const Atomic8* ptr) {
+  return *ptr;
+}

 inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
   return *ptr;
=======================================
--- /branches/bleeding_edge/test/cctest/test-atomicops.cc Mon Mar 10 06:43:21 2014 UTC +++ /branches/bleeding_edge/test/cctest/test-atomicops.cc Wed Apr 9 08:20:10 2014 UTC
@@ -212,6 +212,21 @@
   Release_Store(&value, kVal2);
   CHECK_EQU(kVal2, value);
 }
+
+
+// Merge this test with TestStore as soon as we have Atomic8 acquire
+// and release stores.
+static void TestStoreAtomic8() {
+  const Atomic8 kVal1 = TestFillValue<Atomic8>();
+  const Atomic8 kVal2 = static_cast<Atomic8>(-1);
+
+  Atomic8 value;
+
+  NoBarrier_Store(&value, kVal1);
+  CHECK_EQU(kVal1, value);
+  NoBarrier_Store(&value, kVal2);
+  CHECK_EQU(kVal2, value);
+}


 // This is a simple sanity check to ensure that values are correct.
@@ -238,6 +253,21 @@
   value = kVal2;
   CHECK_EQU(kVal2, Release_Load(&value));
 }
+
+
+// Merge this test with TestLoad as soon as we have Atomic8 acquire
+// and release loads.
+static void TestLoadAtomic8() {
+  const Atomic8 kVal1 = TestFillValue<Atomic8>();
+  const Atomic8 kVal2 = static_cast<Atomic8>(-1);
+
+  Atomic8 value;
+
+  value = kVal1;
+  CHECK_EQU(kVal1, NoBarrier_Load(&value));
+  value = kVal2;
+  CHECK_EQU(kVal2, NoBarrier_Load(&value));
+}


 TEST(AtomicIncrement) {
@@ -265,12 +295,14 @@


 TEST(Store) {
+  TestStoreAtomic8();
   TestStore<Atomic32>();
   TestStore<AtomicWord>();
 }


 TEST(Load) {
+  TestLoadAtomic8();
   TestLoad<Atomic32>();
   TestLoad<AtomicWord>();
 }

--
--
v8-dev mailing list
v8-dev@googlegroups.com
http://groups.google.com/group/v8-dev
--- You received this message because you are subscribed to the Google Groups "v8-dev" group.
To unsubscribe from this group and stop receiving emails from it, send an email 
to v8-dev+unsubscr...@googlegroups.com.
For more options, visit https://groups.google.com/d/optout.

Reply via email to