Author: Joseph Huber
Date: 2026-03-03T09:57:26-06:00
New Revision: d61b45cd409d6def96eae8977bab2b0393c96b7e

URL: 
https://github.com/llvm/llvm-project/commit/d61b45cd409d6def96eae8977bab2b0393c96b7e
DIFF: 
https://github.com/llvm/llvm-project/commit/d61b45cd409d6def96eae8977bab2b0393c96b7e.diff

LOG: [Clang] Generate ptr and float atomics without integer casts (#183853)

Summary:
LLVM IR should support these for all cases except for compare-exchange.
Currently the code goes through an integer indirection for these cases.
This PR changes the behavior to use atomics directly to the target
memory type.

Added: 
    

Modified: 
    clang/lib/CodeGen/CGAtomic.cpp
    clang/test/CodeGen/atomic-arm64.c
    clang/test/CodeGen/atomic-ops.c
    clang/test/CodeGen/big-atomic-ops.c
    clang/test/CodeGenOpenCL/atomic-ops.cl

Removed: 
    


################################################################################
diff  --git a/clang/lib/CodeGen/CGAtomic.cpp b/clang/lib/CodeGen/CGAtomic.cpp
index fb3a5663834ed..859ab20bb6740 100644
--- a/clang/lib/CodeGen/CGAtomic.cpp
+++ b/clang/lib/CodeGen/CGAtomic.cpp
@@ -830,6 +830,17 @@ EmitValToTemp(CodeGenFunction &CGF, Expr *E) {
   return DeclPtr;
 }
 
+/// Return true if \param ValTy is a type that should be casted to integer
+/// around the atomic memory operation. If \param CmpXchg is true, then the
+/// cast of a floating point type is made as that instruction can not have
+/// floating point operands.  TODO: Allow compare-and-exchange and FP - see
+/// comment in AtomicExpandPass.cpp.
+static bool shouldCastToInt(llvm::Type *ValTy, bool CmpXchg) {
+  if (ValTy->isFloatingPointTy())
+    return ValTy->isX86_FP80Ty() || CmpXchg;
+  return !ValTy->isIntegerTy() && !ValTy->isPointerTy();
+}
+
 static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *Expr, Address Dest,
                          Address Ptr, Address Val1, Address Val2,
                          Address OriginalVal1, llvm::Value *IsWeak,
@@ -941,7 +952,6 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
   llvm::Value *Order = EmitScalarExpr(E->getOrder());
   llvm::Value *Scope =
       E->getScopeModel() ? EmitScalarExpr(E->getScope()) : nullptr;
-  bool ShouldCastToIntPtrTy = true;
 
   switch (E->getOp()) {
   case AtomicExpr::AO__c11_atomic_init:
@@ -1041,7 +1051,6 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
   case AtomicExpr::AO__scoped_atomic_max_fetch:
   case AtomicExpr::AO__scoped_atomic_min_fetch:
   case AtomicExpr::AO__scoped_atomic_sub_fetch:
-    ShouldCastToIntPtrTy = !MemTy->isFloatingType();
     [[fallthrough]];
 
   case AtomicExpr::AO__atomic_fetch_and:
@@ -1089,6 +1098,8 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
   }
 
   QualType RValTy = E->getType().getUnqualifiedType();
+  bool ShouldCastToIntPtrTy =
+      shouldCastToInt(ConvertTypeForMem(MemTy), E->isCmpXChg());
 
   // The inlined atomics only function on iN types, where N is a power of 2. We
   // need to make sure (via temporaries if necessary) that all incoming values
@@ -1507,17 +1518,6 @@ RValue AtomicInfo::convertAtomicTempToRValue(Address 
addr,
       LVal.getBaseInfo(), TBAAAccessInfo()));
 }
 
-/// Return true if \param ValTy is a type that should be casted to integer
-/// around the atomic memory operation. If \param CmpXchg is true, then the
-/// cast of a floating point type is made as that instruction can not have
-/// floating point operands.  TODO: Allow compare-and-exchange and FP - see
-/// comment in AtomicExpandPass.cpp.
-static bool shouldCastToInt(llvm::Type *ValTy, bool CmpXchg) {
-  if (ValTy->isFloatingPointTy())
-    return ValTy->isX86_FP80Ty() || CmpXchg;
-  return !ValTy->isIntegerTy() && !ValTy->isPointerTy();
-}
-
 RValue AtomicInfo::ConvertToValueOrAtomic(llvm::Value *Val,
                                           AggValueSlot ResultSlot,
                                           SourceLocation Loc, bool AsValue,

diff  --git a/clang/test/CodeGen/atomic-arm64.c 
b/clang/test/CodeGen/atomic-arm64.c
index 672fe3fbf180f..83943fa3cfa88 100644
--- a/clang/test/CodeGen/atomic-arm64.c
+++ b/clang/test/CodeGen/atomic-arm64.c
@@ -31,8 +31,8 @@ void test0(void) {
 // CHECK-LABEL:define{{.*}} void @test1()
 // CHECK:      [[TEMP:%.*]] = alloca float, align 4
 // CHECK-NEXT: store float 3.000000e+00, ptr [[TEMP]]
-// CHECK-NEXT: [[T1:%.*]] = load i32, ptr [[TEMP]], align 4
-// CHECK-NEXT: store atomic i32 [[T1]], ptr @a_float seq_cst, align 4
+// CHECK-NEXT: [[T1:%.*]] = load float, ptr [[TEMP]], align 4
+// CHECK-NEXT: store atomic float [[T1]], ptr @a_float seq_cst, align 4
 void test1(void) {
   __c11_atomic_store(&a_float, 3, memory_order_seq_cst);
 }
@@ -40,8 +40,8 @@ void test1(void) {
 // CHECK-LABEL:define{{.*}} void @test2()
 // CHECK:      [[TEMP:%.*]] = alloca ptr, align 8
 // CHECK-NEXT: store ptr @a_bool, ptr [[TEMP]]
-// CHECK-NEXT: [[T1:%.*]] = load i64, ptr [[TEMP]], align 8
-// CHECK-NEXT: store atomic i64 [[T1]], ptr @a_pointer seq_cst, align 8
+// CHECK-NEXT: [[T1:%.*]] = load ptr, ptr [[TEMP]], align 8
+// CHECK-NEXT: store atomic ptr [[T1]], ptr @a_pointer seq_cst, align 8
 void test2(void) {
   __c11_atomic_store(&a_pointer, &a_bool, memory_order_seq_cst);
 }

diff  --git a/clang/test/CodeGen/atomic-ops.c b/clang/test/CodeGen/atomic-ops.c
index 97d3d3ba10065..a95ae942b44b2 100644
--- a/clang/test/CodeGen/atomic-ops.c
+++ b/clang/test/CodeGen/atomic-ops.c
@@ -165,13 +165,13 @@ _Bool fi4d(_Atomic(int) *i, int _AS1 *ptr2) {
 
 float ff1(_Atomic(float) *d) {
   // CHECK-LABEL: @ff1
-  // CHECK: load atomic i32, ptr {{.*}} monotonic, align 4
+  // CHECK: load atomic float, ptr {{.*}} monotonic, align 4
   return __c11_atomic_load(d, memory_order_relaxed);
 }
 
 void ff2(_Atomic(float) *d) {
   // CHECK-LABEL: @ff2
-  // CHECK: store atomic i32 {{.*}} release, align 4
+  // CHECK: store atomic float {{.*}} release, align 4
   __c11_atomic_store(d, 1, memory_order_release);
 }
 
@@ -269,7 +269,7 @@ int ud1(int* p) {
 
 int* fp1(_Atomic(int*) *p) {
   // CHECK-LABEL: @fp1
-  // CHECK: load atomic i32, ptr {{.*}} seq_cst, align 4
+  // CHECK: load atomic ptr, ptr {{.*}} seq_cst, align 4
   return __c11_atomic_load(p, memory_order_seq_cst);
 }
 

diff  --git a/clang/test/CodeGen/big-atomic-ops.c 
b/clang/test/CodeGen/big-atomic-ops.c
index 7ef772027ef8b..8a76ba82557bc 100644
--- a/clang/test/CodeGen/big-atomic-ops.c
+++ b/clang/test/CodeGen/big-atomic-ops.c
@@ -113,13 +113,13 @@ _Bool fi4b(int *i) {
 
 float ff1(_Atomic(float) *d) {
   // CHECK: @ff1
-  // CHECK: load atomic i32, ptr {{.*}} monotonic, align 4
+  // CHECK: load atomic float, ptr {{.*}} monotonic, align 4
   return __c11_atomic_load(d, memory_order_relaxed);
 }
 
 void ff2(_Atomic(float) *d) {
   // CHECK: @ff2
-  // CHECK: store atomic i32 {{.*}} release, align 4
+  // CHECK: store atomic float {{.*}} release, align 4
   __c11_atomic_store(d, 1, memory_order_release);
 }
 
@@ -129,7 +129,7 @@ float ff3(_Atomic(float) *d) {
 
 int* fp1(_Atomic(int*) *p) {
   // CHECK: @fp1
-  // CHECK: load atomic i64, ptr {{.*}} seq_cst, align 8
+  // CHECK: load atomic ptr, ptr {{.*}} seq_cst, align 8
   return __c11_atomic_load(p, memory_order_seq_cst);
 }
 

diff  --git a/clang/test/CodeGenOpenCL/atomic-ops.cl 
b/clang/test/CodeGenOpenCL/atomic-ops.cl
index f54880f88eb9a..db2cb571b0d8f 100644
--- a/clang/test/CodeGenOpenCL/atomic-ops.cl
+++ b/clang/test/CodeGenOpenCL/atomic-ops.cl
@@ -174,13 +174,13 @@ void fi6(atomic_int *i, int order, int scope) {
 
 float ff1(global atomic_float *d) {
   // CHECK-LABEL: @ff1
-  // CHECK: load atomic i32, ptr addrspace(1) {{.*}} 
syncscope("workgroup-one-as") monotonic, align 4{{$}}
+  // CHECK: load atomic float, ptr addrspace(1) {{.*}} 
syncscope("workgroup-one-as") monotonic, align 4{{$}}
   return __opencl_atomic_load(d, memory_order_relaxed, 
memory_scope_work_group);
 }
 
 void ff2(atomic_float *d) {
   // CHECK-LABEL: @ff2
-  // CHECK: store atomic i32 {{.*}} syncscope("workgroup-one-as") release, 
align 4
+  // CHECK: store atomic float {{.*}} syncscope("workgroup-one-as") release, 
align 4
   __opencl_atomic_store(d, 1, memory_order_release, memory_scope_work_group);
 }
 


        
_______________________________________________
cfe-commits mailing list
[email protected]
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits

Reply via email to