Support for emitting libcalls for __atomic_fetch_nand and
__atomic_{add,sub,and,or,xor,nand}_fetch was missing; add it, and some
test cases.

http://reviews.llvm.org/D10847

Files:
  docs/LanguageExtensions.rst
  lib/CodeGen/CGAtomic.cpp
  test/CodeGen/atomic-ops-libcall.c

EMAIL PREFERENCES
  http://reviews.llvm.org/settings/panel/emailpreferences/
Index: docs/LanguageExtensions.rst
===================================================================
--- docs/LanguageExtensions.rst
+++ docs/LanguageExtensions.rst
@@ -1715,6 +1715,9 @@
 provided, with values corresponding to the enumerators of C11's
 ``memory_order`` enumeration.
 
+(Note that Clang additionally provides GCC-compatible ``__atomic_*``
+builtins)
+
 Low-level ARM exclusive memory builtins
 ---------------------------------------
 
Index: lib/CodeGen/CGAtomic.cpp
===================================================================
--- lib/CodeGen/CGAtomic.cpp
+++ lib/CodeGen/CGAtomic.cpp
@@ -784,20 +784,43 @@
   if (UseLibcall) {
     bool UseOptimizedLibcall = false;
     switch (E->getOp()) {
+    case AtomicExpr::AO__c11_atomic_init:
+      llvm_unreachable("Already handled!");
+
     case AtomicExpr::AO__c11_atomic_fetch_add:
     case AtomicExpr::AO__atomic_fetch_add:
     case AtomicExpr::AO__c11_atomic_fetch_and:
     case AtomicExpr::AO__atomic_fetch_and:
     case AtomicExpr::AO__c11_atomic_fetch_or:
     case AtomicExpr::AO__atomic_fetch_or:
+    case AtomicExpr::AO__atomic_fetch_nand:
     case AtomicExpr::AO__c11_atomic_fetch_sub:
     case AtomicExpr::AO__atomic_fetch_sub:
     case AtomicExpr::AO__c11_atomic_fetch_xor:
     case AtomicExpr::AO__atomic_fetch_xor:
+    case AtomicExpr::AO__atomic_add_fetch:
+    case AtomicExpr::AO__atomic_and_fetch:
+    case AtomicExpr::AO__atomic_nand_fetch:
+    case AtomicExpr::AO__atomic_or_fetch:
+    case AtomicExpr::AO__atomic_sub_fetch:
+    case AtomicExpr::AO__atomic_xor_fetch:
       // For these, only library calls for certain sizes exist.
       UseOptimizedLibcall = true;
       break;
-    default:
+
+    case AtomicExpr::AO__c11_atomic_load:
+    case AtomicExpr::AO__c11_atomic_store:
+    case AtomicExpr::AO__c11_atomic_exchange:
+    case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
+    case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
+    case AtomicExpr::AO__atomic_load_n:
+    case AtomicExpr::AO__atomic_load:
+    case AtomicExpr::AO__atomic_store_n:
+    case AtomicExpr::AO__atomic_store:
+    case AtomicExpr::AO__atomic_exchange_n:
+    case AtomicExpr::AO__atomic_exchange:
+    case AtomicExpr::AO__atomic_compare_exchange_n:
+    case AtomicExpr::AO__atomic_compare_exchange:
       // Only use optimized library calls for sizes for which they exist.
       if (Size == 1 || Size == 2 || Size == 4 || Size == 8)
         UseOptimizedLibcall = true;
@@ -819,6 +842,9 @@
     QualType RetTy;
     bool HaveRetTy = false;
     switch (E->getOp()) {
+    case AtomicExpr::AO__c11_atomic_init:
+      llvm_unreachable("Already handled!");
+
     // There is only one libcall for compare an exchange, because there is no
     // optimisation benefit possible from a libcall version of a weak compare
     // and exchange.
@@ -902,7 +928,49 @@
       AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
                         E->getExprLoc(), sizeChars);
       break;
-    default: return EmitUnsupportedRValue(E, "atomic library call");
+    // T __atomic_fetch_nand_N(T *mem, T val, int order)
+    case AtomicExpr::AO__atomic_fetch_nand:
+      LibCallName = "__atomic_fetch_nand";
+      AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
+                        E->getExprLoc(), sizeChars);
+      break;
+
+    // T __atomic_add_fetch_N(T *mem, T val, int order)
+    case AtomicExpr::AO__atomic_add_fetch:
+      LibCallName = "__atomic_add_fetch";
+      AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, LoweredMemTy,
+                        E->getExprLoc(), sizeChars);
+      break;
+    // T __atomic_and_fetch_N(T *mem, T val, int order)
+    case AtomicExpr::AO__atomic_and_fetch:
+      LibCallName = "__atomic_and_fetch";
+      AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
+                        E->getExprLoc(), sizeChars);
+      break;
+    // T __atomic_or_fetch_N(T *mem, T val, int order)
+    case AtomicExpr::AO__atomic_or_fetch:
+      LibCallName = "__atomic_or_fetch";
+      AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
+                        E->getExprLoc(), sizeChars);
+      break;
+    // T __atomic_sub_fetch_N(T *mem, T val, int order)
+    case AtomicExpr::AO__atomic_sub_fetch:
+      LibCallName = "__atomic_sub_fetch";
+      AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, LoweredMemTy,
+                        E->getExprLoc(), sizeChars);
+      break;
+    // T __atomic_xor_fetch_N(T *mem, T val, int order)
+    case AtomicExpr::AO__atomic_xor_fetch:
+      LibCallName = "__atomic_xor_fetch";
+      AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
+                        E->getExprLoc(), sizeChars);
+      break;
+    // T __atomic_nand_fetch_N(T *mem, T val, int order)
+    case AtomicExpr::AO__atomic_nand_fetch:
+      LibCallName = "__atomic_nand_fetch";
+      AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
+                        E->getExprLoc(), sizeChars);
+      break;
     }
 
     // Optimized functions have the size in their name.
Index: test/CodeGen/atomic-ops-libcall.c
===================================================================
--- test/CodeGen/atomic-ops-libcall.c
+++ test/CodeGen/atomic-ops-libcall.c
@@ -35,3 +35,75 @@
   // Note, the GNU builtins do not multiply by sizeof(T)!
   return __atomic_fetch_sub(p, 4, memory_order_relaxed);
 }
+
+int test_atomic_fetch_add(int *p) {
+  // CHECK: test_atomic_fetch_add
+  // CHECK: {{%[^ ]*}} = tail call i32 @__atomic_fetch_add_4(i8* {{%[0-9]+}}, i32 55, i32 5)
+  return __atomic_fetch_add(p, 55, memory_order_seq_cst);
+}
+
+int test_atomic_fetch_sub(int *p) {
+  // CHECK: test_atomic_fetch_sub
+  // CHECK: {{%[^ ]*}} = tail call i32 @__atomic_fetch_sub_4(i8* {{%[0-9]+}}, i32 55, i32 5)
+  return __atomic_fetch_sub(p, 55, memory_order_seq_cst);
+}
+
+int test_atomic_fetch_and(int *p) {
+  // CHECK: test_atomic_fetch_and
+  // CHECK: {{%[^ ]*}} = tail call i32 @__atomic_fetch_and_4(i8* {{%[0-9]+}}, i32 55, i32 5)
+  return __atomic_fetch_and(p, 55, memory_order_seq_cst);
+}
+
+int test_atomic_fetch_or(int *p) {
+  // CHECK: test_atomic_fetch_or
+  // CHECK: {{%[^ ]*}} = tail call i32 @__atomic_fetch_or_4(i8* {{%[0-9]+}}, i32 55, i32 5)
+  return __atomic_fetch_or(p, 55, memory_order_seq_cst);
+}
+
+int test_atomic_fetch_xor(int *p) {
+  // CHECK: test_atomic_fetch_xor
+  // CHECK: {{%[^ ]*}} = tail call i32 @__atomic_fetch_xor_4(i8* {{%[0-9]+}}, i32 55, i32 5)
+  return __atomic_fetch_xor(p, 55, memory_order_seq_cst);
+}
+
+int test_atomic_fetch_nand(int *p) {
+  // CHECK: test_atomic_fetch_nand
+  // CHECK: {{%[^ ]*}} = tail call i32 @__atomic_fetch_nand_4(i8* {{%[0-9]+}}, i32 55, i32 5)
+  return __atomic_fetch_nand(p, 55, memory_order_seq_cst);
+}
+
+int test_atomic_add_fetch(int *p) {
+  // CHECK: test_atomic_add_fetch
+  // CHECK: {{%[^ ]*}} = tail call i32 @__atomic_add_fetch_4(i8* {{%[0-9]+}}, i32 55, i32 5)
+  return __atomic_add_fetch(p, 55, memory_order_seq_cst);
+}
+
+int test_atomic_sub_fetch(int *p) {
+  // CHECK: test_atomic_sub_fetch
+  // CHECK: {{%[^ ]*}} = tail call i32 @__atomic_sub_fetch_4(i8* {{%[0-9]+}}, i32 55, i32 5)
+  return __atomic_sub_fetch(p, 55, memory_order_seq_cst);
+}
+
+int test_atomic_and_fetch(int *p) {
+  // CHECK: test_atomic_and_fetch
+  // CHECK: {{%[^ ]*}} = tail call i32 @__atomic_and_fetch_4(i8* {{%[0-9]+}}, i32 55, i32 5)
+  return __atomic_and_fetch(p, 55, memory_order_seq_cst);
+}
+
+int test_atomic_or_fetch(int *p) {
+  // CHECK: test_atomic_or_fetch
+  // CHECK: {{%[^ ]*}} = tail call i32 @__atomic_or_fetch_4(i8* {{%[0-9]+}}, i32 55, i32 5)
+  return __atomic_or_fetch(p, 55, memory_order_seq_cst);
+}
+
+int test_atomic_xor_fetch(int *p) {
+  // CHECK: test_atomic_xor_fetch
+  // CHECK: {{%[^ ]*}} = tail call i32 @__atomic_xor_fetch_4(i8* {{%[0-9]+}}, i32 55, i32 5)
+  return __atomic_xor_fetch(p, 55, memory_order_seq_cst);
+}
+
+int test_atomic_nand_fetch(int *p) {
+  // CHECK: test_atomic_nand_fetch
+  // CHECK: {{%[^ ]*}} = tail call i32 @__atomic_nand_fetch_4(i8* {{%[0-9]+}}, i32 55, i32 5)
+  return __atomic_nand_fetch(p, 55, memory_order_seq_cst);
+}
_______________________________________________
cfe-commits mailing list
cfe-commits@cs.uiuc.edu
http://lists.cs.uiuc.edu/mailman/listinfo/cfe-commits

Reply via email to