Hi rjmccall, hfinkel, fraggamuffin, ejstotzer,
Adds codegen for 'atomic capture' constructs with the following forms of
expressions/statements:
```
v = x binop= expr;
v = x++;
v = ++x;
v = x--;
v = --x;
v = x = x binop expr;
v = x = expr binop x;
{v = x; x = binop= expr;}
{v = x; x++;}
{v = x; ++x;}
{v = x; x--;}
{v = x; --x;}
{x = x binop expr; v = x;}
{x = binop= expr; v = x;}
{x++; v = x;}
{++x; v = x;}
{x--; v = x;}
{--x; v = x;}
{x = x binop expr; v = x;}
{x = expr binop x; v = x;}
{x = expr binop x; v = x;}
```
If x and expr are integer and binop is associative or x is a LHS in a RHS of
the assignment expression, and atomics are allowed for type of x on the target
platform atomicrmw instruction is emitted.
Otherwise compare-and-swap sequence is emitted.
Update of 'v' is not required to be be atomic with respect to the read or write
of the 'x'.
```
bb:
...
atomic load <x>
cont:
<expected> = phi [ <x>, label %bb ], [ <new_failed>, %cont ]
<desired> = <expected> binop <expr>
<res> = cmpxchg atomic &<x>, desired, expected
<new_failed> = <res>.field1;
br <res>field2, label %exit, label %cont
exit:
atomic store <old/new x>, <v>
...
```
If 'x' was updated with ```atomicrmw``` instruction and 'v' must be updated
with the new value of 'x', an additional atomic load of 'x' is used to read the
new value to be stored to 'v'. Otherwise previous value of 'x', returned by
atomic instruction is used (if 'v' must be rewritten by the old value of 'x'),
or newly calculated update value for 'x' is used (if 'v' must be rewritten by
the new value of 'x').
http://reviews.llvm.org/D9049
Files:
lib/CodeGen/CGStmtOpenMP.cpp
lib/CodeGen/CodeGenFunction.h
lib/Sema/SemaOpenMP.cpp
test/OpenMP/atomic_capture_codegen.cpp
test/OpenMP/atomic_codegen.cpp
test/OpenMP/atomic_messages.cpp
EMAIL PREFERENCES
http://reviews.llvm.org/settings/panel/emailpreferences/
Index: test/OpenMP/atomic_capture_codegen.cpp
===================================================================
--- test/OpenMP/atomic_capture_codegen.cpp
+++ test/OpenMP/atomic_capture_codegen.cpp
@@ -0,0 +1,1168 @@
+// RUN: %clang_cc1 -verify -triple x86_64-apple-darwin10 -fopenmp=libiomp5 -x c -emit-llvm %s -o - | FileCheck %s
+// RUN: %clang_cc1 -fopenmp=libiomp5 -x c -triple x86_64-apple-darwin10 -emit-pch -o %t %s
+// RUN: %clang_cc1 -fopenmp=libiomp5 -x c -triple x86_64-apple-darwin10 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s
+// expected-no-diagnostics
+
+#ifndef HEADER
+#define HEADER
+
+_Bool bv, bx;
+char cv, cx;
+unsigned char ucv, ucx;
+short sv, sx;
+unsigned short usv, usx;
+int iv, ix;
+unsigned int uiv, uix;
+long lv, lx;
+unsigned long ulv, ulx;
+long long llv, llx;
+unsigned long long ullv, ullx;
+float fv, fx;
+double dv, dx;
+long double ldv, ldx;
+_Complex int civ, cix;
+_Complex float cfv, cfx;
+_Complex double cdv, cdx;
+
+typedef int int4 __attribute__((__vector_size__(16)));
+int4 int4x;
+
+struct BitFields {
+ int : 32;
+ int a : 31;
+} bfx;
+
+struct BitFields_packed {
+ int : 32;
+ int a : 31;
+} __attribute__ ((__packed__)) bfx_packed;
+
+struct BitFields2 {
+ int : 31;
+ int a : 1;
+} bfx2;
+
+struct BitFields2_packed {
+ int : 31;
+ int a : 1;
+} __attribute__ ((__packed__)) bfx2_packed;
+
+struct BitFields3 {
+ int : 11;
+ int a : 14;
+} bfx3;
+
+struct BitFields3_packed {
+ int : 11;
+ int a : 14;
+} __attribute__ ((__packed__)) bfx3_packed;
+
+struct BitFields4 {
+ short : 16;
+ int a: 1;
+ long b : 7;
+} bfx4;
+
+struct BitFields4_packed {
+ short : 16;
+ int a: 1;
+ long b : 7;
+} __attribute__ ((__packed__)) bfx4_packed;
+
+typedef float float2 __attribute__((ext_vector_type(2)));
+float2 float2x;
+
+register int rix __asm__("0");
+
+int main() {
+// CHECK: [[PREV:%.+]] = atomicrmw add i8* @{{.+}}, i8 1 monotonic
+// CHECK: store atomic i8 [[PREV]], i8* @{{.+}} monotonic
+#pragma omp atomic capture
+ bv = bx++;
+// CHECK: atomicrmw add i8* @{{.+}}, i8 1 monotonic
+// CHECK: load atomic i8, i8* @{{.+}} monotonic,
+// CHECK: store atomic i8 %{{.+}}, i8* @{{.+}} monotonic
+#pragma omp atomic capture
+ cv = ++cx;
+// CHECK: [[PREV:%.+]] = atomicrmw sub i8* @{{.+}}, i8 1 monotonic
+// CHECK: store atomic i8 [[PREV]], i8* @{{.+}} monotonic
+#pragma omp atomic capture
+ ucv = ucx--;
+// CHECK: atomicrmw sub i16* @{{.+}}, i16 1 monotonic
+// CHECK: load atomic i16, i16* @{{.+}} monotonic,
+// CHECK: store atomic i16 %{{.+}}, i16* @{{.+}} monotonic
+#pragma omp atomic capture
+ sv = --sx;
+// CHECK: [[USV:%.+]] = load i16, i16* @{{.+}},
+// CHECK: [[EXPR:%.+]] = zext i16 [[USV]] to i32
+// CHECK: [[X:%.+]] = load atomic i16, i16* [[X_ADDR:@.+]] monotonic
+// CHECK: br label %[[CONT:.+]]
+// CHECK: [[CONT]]
+// CHECK: [[EXPECTED:%.+]] = phi i16 [ [[X]], %{{.+}} ], [ [[OLD_X:%.+]], %[[CONT]] ]
+// CHECK: [[CONV:%.+]] = zext i16 [[EXPECTED]] to i32
+// CHECK: [[ADD:%.+]] = add nsw i32 [[CONV]], [[EXPR]]
+// CHECK: [[DESIRED:%.+]] = trunc i32 [[ADD]] to i16
+// CHECK: [[RES:%.+]] = cmpxchg i16* [[X_ADDR]], i16 [[EXPECTED]], i16 [[DESIRED]] monotonic monotonic
+// CHECK: [[OLD_X]] = extractvalue { i16, i1 } [[RES]], 0
+// CHECK: [[SUCCESS_FAIL:%.+]] = extractvalue { i16, i1 } [[RES]], 1
+// CHECK: br i1 [[SUCCESS_FAIL]], label %[[EXIT:.+]], label %[[CONT]]
+// CHECK: [[EXIT]]
+// CHECK: store atomic i16 [[DESIRED]], i16* @{{.+}} monotonic
+#pragma omp atomic capture
+ sv = usx += usv;
+// CHECK: [[EXPR:%.+]] = load i32, i32* @{{.+}},
+// CHECK: [[X:%.+]] = load atomic i32, i32* [[X_ADDR:@.+]] monotonic
+// CHECK: br label %[[CONT:.+]]
+// CHECK: [[CONT]]
+// CHECK: [[EXPECTED:%.+]] = phi i32 [ [[X]], %{{.+}} ], [ [[OLD_X:%.+]], %[[CONT]] ]
+// CHECK: [[DESIRED:%.+]] = mul nsw i32 [[EXPECTED]], [[EXPR]]
+// CHECK: [[RES:%.+]] = cmpxchg i32* [[X_ADDR]], i32 [[EXPECTED]], i32 [[DESIRED]] monotonic monotonic
+// CHECK: [[OLD_X]] = extractvalue { i32, i1 } [[RES]], 0
+// CHECK: [[SUCCESS_FAIL:%.+]] = extractvalue { i32, i1 } [[RES]], 1
+// CHECK: br i1 [[SUCCESS_FAIL]], label %[[EXIT:.+]], label %[[CONT]]
+// CHECK: [[EXIT]]
+// CHECK: store atomic i32 [[DESIRED]], i32* @{{.+}} monotonic
+#pragma omp atomic capture
+ uiv = ix *= iv;
+// CHECK: [[EXPR:%.+]] = load i32, i32* @{{.+}},
+// CHECK: [[PREV:%.+]] = atomicrmw sub i32* @{{.+}}, i32 [[EXPR]] monotonic
+// CHECK: store atomic i32 [[PREV]], i32* @{{.+}} monotonic
+#pragma omp atomic capture
+ {iv = uix; uix -= uiv;}
+// CHECK: [[EXPR:%.+]] = load i32, i32* @{{.+}},
+// CHECK: [[X:%.+]] = load atomic i32, i32* [[X_ADDR:@.+]] monotonic
+// CHECK: br label %[[CONT:.+]]
+// CHECK: [[CONT]]
+// CHECK: [[EXPECTED:%.+]] = phi i32 [ [[X]], %{{.+}} ], [ [[OLD_X:%.+]], %[[CONT]] ]
+// CHECK: [[DESIRED:%.+]] = shl i32 [[EXPECTED]], [[EXPR]]
+// CHECK: [[RES:%.+]] = cmpxchg i32* [[X_ADDR]], i32 [[EXPECTED]], i32 [[DESIRED]] monotonic monotonic
+// CHECK: [[OLD_X]] = extractvalue { i32, i1 } [[RES]], 0
+// CHECK: [[SUCCESS_FAIL:%.+]] = extractvalue { i32, i1 } [[RES]], 1
+// CHECK: br i1 [[SUCCESS_FAIL]], label %[[EXIT:.+]], label %[[CONT]]
+// CHECK: [[EXIT]]
+// CHECK: store atomic i32 [[DESIRED]], i32* @{{.+}} monotonic
+#pragma omp atomic capture
+ {ix <<= iv; uiv = ix;}
+// CHECK: [[EXPR:%.+]] = load i32, i32* @{{.+}},
+// CHECK: [[X:%.+]] = load atomic i32, i32* [[X_ADDR:@.+]] monotonic
+// CHECK: br label %[[CONT:.+]]
+// CHECK: [[CONT]]
+// CHECK: [[EXPECTED:%.+]] = phi i32 [ [[X]], %{{.+}} ], [ [[OLD_X:%.+]], %[[CONT]] ]
+// CHECK: [[DESIRED:%.+]] = lshr i32 [[EXPECTED]], [[EXPR]]
+// CHECK: [[RES:%.+]] = cmpxchg i32* [[X_ADDR]], i32 [[EXPECTED]], i32 [[DESIRED]] monotonic monotonic
+// CHECK: [[OLD_X]] = extractvalue { i32, i1 } [[RES]], 0
+// CHECK: [[SUCCESS_FAIL:%.+]] = extractvalue { i32, i1 } [[RES]], 1
+// CHECK: br i1 [[SUCCESS_FAIL]], label %[[EXIT:.+]], label %[[CONT]]
+// CHECK: [[EXIT]]
+// CHECK: store atomic i32 [[DESIRED]], i32* @{{.+}} monotonic
+#pragma omp atomic capture
+ iv = uix >>= uiv;
+// CHECK: [[EXPR:%.+]] = load i64, i64* @{{.+}},
+// CHECK: [[X:%.+]] = load atomic i64, i64* [[X_ADDR:@.+]] monotonic
+// CHECK: br label %[[CONT:.+]]
+// CHECK: [[CONT]]
+// CHECK: [[EXPECTED:%.+]] = phi i64 [ [[X]], %{{.+}} ], [ [[OLD_X:%.+]], %[[CONT]] ]
+// CHECK: [[DESIRED:%.+]] = sdiv i64 [[EXPECTED]], [[EXPR]]
+// CHECK: [[RES:%.+]] = cmpxchg i64* [[X_ADDR]], i64 [[EXPECTED]], i64 [[DESIRED]] monotonic monotonic
+// CHECK: [[OLD_X]] = extractvalue { i64, i1 } [[RES]], 0
+// CHECK: [[SUCCESS_FAIL:%.+]] = extractvalue { i64, i1 } [[RES]], 1
+// CHECK: br i1 [[SUCCESS_FAIL]], label %[[EXIT:.+]], label %[[CONT]]
+// CHECK: [[EXIT]]
+// CHECK: store atomic i64 [[EXPECTED]], i64* @{{.+}} monotonic
+#pragma omp atomic capture
+ {ulv = lx; lx /= lv;}
+// CHECK: [[EXPR:%.+]] = load i64, i64* @{{.+}},
+// CHECK: [[OLD:%.+]] = atomicrmw and i64* @{{.+}}, i64 [[EXPR]] monotonic
+// CHECK: [[DESIRED:%.+]] = load atomic i64, i64* @{{.+}} monotonic
+// CHECK: store atomic i64 [[DESIRED]], i64* @{{.+}} monotonic
+#pragma omp atomic capture
+ {ulx &= ulv; lv = ulx;}
+// CHECK: [[EXPR:%.+]] = load i64, i64* @{{.+}},
+// CHECK: [[OLD:%.+]] = atomicrmw xor i64* @{{.+}}, i64 [[EXPR]] monotonic
+// CHECK: [[DESIRED:%.+]] = load atomic i64, i64* @{{.+}} monotonic
+// CHECK: store atomic i64 [[DESIRED]], i64* @{{.+}} monotonic,
+#pragma omp atomic capture
+ ullv = llx ^= llv;
+// CHECK: [[EXPR:%.+]] = load i64, i64* @{{.+}},
+// CHECK: [[OLD:%.+]] = atomicrmw or i64* @{{.+}}, i64 [[EXPR]] monotonic
+// CHECK: [[DESIRED:%.+]] = load atomic i64, i64* @{{.+}} monotonic
+// CHECK: store atomic i64 [[DESIRED]], i64* @{{.+}} monotonic,
+#pragma omp atomic capture
+ llv = ullx |= ullv;
+// CHECK: [[EXPR:%.+]] = load float, float* @{{.+}},
+// CHECK: [[OLD:%.+]] = load atomic i32, i32* bitcast (float* [[X_ADDR:@.+]] to i32*) monotonic
+// CHECK: [[X:%.+]] = bitcast i32 [[OLD]] to float
+// CHECK: br label %[[CONT:.+]]
+// CHECK: [[CONT]]
+// CHECK: [[OLD:%.+]] = phi float [ [[X]], %{{.+}} ], [ [[OLD_X:%.+]], %[[CONT]] ]
+// CHECK: [[ADD:%.+]] = fadd float [[OLD]], [[EXPR]]
+// CHECK: [[EXPECTED:%.+]] = bitcast float [[OLD]] to i32
+// CHECK: [[DESIRED:%.+]] = bitcast float [[ADD]] to i32
+// CHECK: [[RES:%.+]] = cmpxchg i32* bitcast (float* [[X_ADDR]] to i32*), i32 [[EXPECTED]], i32 [[DESIRED]] monotonic monotonic
+// CHECK: [[PREV:%.+]] = extractvalue { i32, i1 } [[RES]], 0
+// CHECK: [[SUCCESS_FAIL:%.+]] = extractvalue { i32, i1 } [[RES]], 1
+// CHECK: [[OLD_X]] = bitcast i32 [[PREV]] to float
+// CHECK: br i1 [[SUCCESS_FAIL]], label %[[EXIT:.+]], label %[[CONT]]
+// CHECK: [[EXIT]]
+// CHECK: [[CAST:%.+]] = fpext float [[ADD]] to double
+// CHECK: [[CAST_I64:%.+]] = bitcast double [[CAST]] to i64
+// CHECK: store atomic i64 [[CAST_I64]], i64* bitcast (double* @{{.+}} to i64*) monotonic,
+#pragma omp atomic capture
+ dv = fx = fx + fv;
+// CHECK: [[EXPR:%.+]] = load double, double* @{{.+}},
+// CHECK: [[OLD:%.+]] = load atomic i64, i64* bitcast (double* [[X_ADDR:@.+]] to i64*) monotonic
+// CHECK: [[X:%.+]] = bitcast i64 [[OLD]] to double
+// CHECK: br label %[[CONT:.+]]
+// CHECK: [[CONT]]
+// CHECK: [[OLD:%.+]] = phi double [ [[X]], %{{.+}} ], [ [[OLD_X:%.+]], %[[CONT]] ]
+// CHECK: [[SUB:%.+]] = fsub double [[EXPR]], [[OLD]]
+// CHECK: [[EXPECTED:%.+]] = bitcast double [[OLD]] to i64
+// CHECK: [[DESIRED:%.+]] = bitcast double [[SUB]] to i64
+// CHECK: [[RES:%.+]] = cmpxchg i64* bitcast (double* [[X_ADDR]] to i64*), i64 [[EXPECTED]], i64 [[DESIRED]] monotonic monotonic
+// CHECK: [[PREV:%.+]] = extractvalue { i64, i1 } [[RES]], 0
+// CHECK: [[SUCCESS_FAIL:%.+]] = extractvalue { i64, i1 } [[RES]], 1
+// CHECK: [[OLD_X]] = bitcast i64 [[PREV]] to double
+// CHECK: br i1 [[SUCCESS_FAIL]], label %[[EXIT:.+]], label %[[CONT]]
+// CHECK: [[EXIT]]
+// CHECK: [[CAST:%.+]] = fptrunc double [[OLD]] to float
+// CHECK: [[CAST_I32:%.+]] = bitcast float [[CAST]] to i32
+// CHECK: store atomic i32 [[CAST_I32]], i32* bitcast (float* @{{.+}} to i32*) monotonic,
+#pragma omp atomic capture
+ {fv = dx; dx = dv - dx;}
+// CHECK: [[EXPR:%.+]] = load x86_fp80, x86_fp80* @{{.+}},
+// CHECK: [[OLD:%.+]] = load atomic i128, i128* bitcast (x86_fp80* [[X_ADDR:@.+]] to i128*) monotonic
+// CHECK: [[BITCAST:%.+]] = bitcast x86_fp80* [[TEMP:%.+]] to i128*
+// CHECK: store i128 [[OLD]], i128* [[BITCAST]]
+// CHECK: [[X:%.+]] = load x86_fp80, x86_fp80* [[TEMP]]
+// CHECK: br label %[[CONT:.+]]
+// CHECK: [[CONT]]
+// CHECK: [[OLD:%.+]] = phi x86_fp80 [ [[X]], %{{.+}} ], [ [[OLD_X:%.+]], %[[CONT]] ]
+// CHECK: [[MUL:%.+]] = fmul x86_fp80 [[OLD]], [[EXPR]]
+// CHECK: [[BITCAST:%.+]] = bitcast x86_fp80* [[TEMP:%.+]] to i8*
+// CHECK: call void @llvm.memset.p0i8.i64(i8* [[BITCAST]], i8 0, i64 16, i32 16, i1 false)
+// CHECK: store x86_fp80 [[OLD]], x86_fp80* [[TEMP]]
+// CHECK: [[BITCAST:%.+]] = bitcast x86_fp80* [[TEMP]] to i128*
+// CHECK: [[EXPECTED:%.+]] = load i128, i128* [[BITCAST]]
+// CHECK: [[BITCAST:%.+]] = bitcast x86_fp80* [[TEMP:%.+]] to i8*
+// CHECK: call void @llvm.memset.p0i8.i64(i8* [[BITCAST]], i8 0, i64 16, i32 16, i1 false)
+// CHECK: store x86_fp80 [[MUL]], x86_fp80* [[TEMP]]
+// CHECK: [[BITCAST:%.+]] = bitcast x86_fp80* [[TEMP]] to i128*
+// CHECK: [[DESIRED:%.+]] = load i128, i128* [[BITCAST]]
+// CHECK: [[RES:%.+]] = cmpxchg i128* bitcast (x86_fp80* [[X_ADDR]] to i128*), i128 [[EXPECTED]], i128 [[DESIRED]] monotonic monotonic
+// CHECK: [[PREV:%.+]] = extractvalue { i128, i1 } [[RES]], 0
+// CHECK: [[SUCCESS_FAIL:%.+]] = extractvalue { i128, i1 } [[RES]], 1
+// CHECK: [[BITCAST:%.+]] = bitcast x86_fp80* [[TEMP:%.+]] to i128*
+// CHECK: store i128 [[PREV]], i128* [[BITCAST]]
+// CHECK: [[OLD_X]] = load x86_fp80, x86_fp80* [[TEMP]],
+// CHECK: br i1 [[SUCCESS_FAIL]], label %[[EXIT:.+]], label %[[CONT]]
+// CHECK: [[EXIT]]
+// CHECK: [[CAST:%.+]] = fptrunc x86_fp80 [[MUL]] to double
+// CHECK: [[CAST_I64:%.+]] = bitcast double [[CAST]] to i64
+// CHECK: store atomic i64 [[CAST_I64]], i64* bitcast (double* @{{.+}} to i64*) monotonic
+#pragma omp atomic capture
+ {ldx = ldx * ldv; dv = ldx;}
+// CHECK: [[EXPR_RE:%.+]] = load i32, i32* getelementptr inbounds ({ i32, i32 }, { i32, i32 }* @{{.+}}, i32 0, i32 0)
+// CHECK: [[EXPR_IM:%.+]] = load i32, i32* getelementptr inbounds ({ i32, i32 }, { i32, i32 }* @{{.+}}, i32 0, i32 1)
+// CHECK: [[BITCAST:%.+]] = bitcast { i32, i32 }* [[TEMP:%.+]] to i8*
+// CHECK: call void @__atomic_load(i64 8, i8* bitcast ({ i32, i32 }* [[X_ADDR:@.+]] to i8*), i8* [[BITCAST]], i32 0)
+// CHECK: [[LD_RE_ADDR:%.+]] = getelementptr inbounds { i32, i32 }, { i32, i32 }* [[TEMP]], i32 0, i32 0
+// CHECK: [[LD_RE:%.+]] = load i32, i32* [[LD_RE_ADDR]]
+// CHECK: [[LD_IM_ADDR:%.+]] = getelementptr inbounds { i32, i32 }, { i32, i32 }* [[TEMP]], i32 0, i32 1
+// CHECK: [[LD_IM:%.+]] = load i32, i32* [[LD_IM_ADDR]]
+// CHECK: [[LD_RE_ADDR:%.+]] = getelementptr inbounds { i32, i32 }, { i32, i32 }* [[TEMP:%.+]], i32 0, i32 0
+// CHECK: [[LD_IM_ADDR:%.+]] = getelementptr inbounds { i32, i32 }, { i32, i32 }* [[TEMP]], i32 0, i32 1
+// CHECK: store i32 [[LD_RE]], i32* [[LD_RE_ADDR]]
+// CHECK: store i32 [[LD_IM]], i32* [[LD_IM_ADDR]]
+// CHECK: [[BITCAST:%.+]] = bitcast { i32, i32 }* [[TEMP]] to i64*
+// CHECK: [[X:%.+]] = load i64, i64* [[BITCAST]]
+// CHECK: br label %[[CONT:.+]]
+// CHECK: [[CONT]]
+// CHECK: [[OLD:%.+]] = phi i64 [ [[X]], %{{.+}} ], [ [[OLD_X:%.+]], %[[CONT]] ]
+// CHECK: [[BITCAST:%.+]] = bitcast { i32, i32 }* [[TEMP:%.+]] to i64*
+// CHECK: store i64 [[OLD]], i64* [[BITCAST]]
+// CHECK: [[X_RE_ADDR:%.+]] = getelementptr inbounds { i32, i32 }, { i32, i32 }* [[TEMP]], i32 0, i32 0
+// CHECK: [[X_RE:%.+]] = load i32, i32* [[X_RE_ADDR]]
+// CHECK: [[X_IM_ADDR:%.+]] = getelementptr inbounds { i32, i32 }, { i32, i32 }* [[TEMP]], i32 0, i32 1
+// CHECK: [[X_IM:%.+]] = load i32, i32* [[X_IM_ADDR]]
+// <Skip checks for complex calculations>
+// CHECK: [[X_RE_ADDR:%.+]] = getelementptr inbounds { i32, i32 }, { i32, i32 }* [[EXPECTED_ADDR:%.+]], i32 0, i32 0
+// CHECK: [[X_IM_ADDR:%.+]] = getelementptr inbounds { i32, i32 }, { i32, i32 }* [[EXPECTED_ADDR]], i32 0, i32 1
+// CHECK: store i32 [[X_RE]], i32* [[X_RE_ADDR]]
+// CHECK: store i32 [[X_IM]], i32* [[X_IM_ADDR]]
+// CHECK: [[X_RE_ADDR:%.+]] = getelementptr inbounds { i32, i32 }, { i32, i32 }* [[DESIRED_ADDR:%.+]], i32 0, i32 0
+// CHECK: [[X_IM_ADDR:%.+]] = getelementptr inbounds { i32, i32 }, { i32, i32 }* [[DESIRED_ADDR]], i32 0, i32 1
+// CHECK: store i32 [[NEW_RE:%.+]], i32* [[X_RE_ADDR]]
+// CHECK: store i32 [[NEW_IM:%.+]], i32* [[X_IM_ADDR]]
+// CHECK: [[EXPECTED:%.+]] = bitcast { i32, i32 }* [[EXPECTED_ADDR]] to i8*
+// CHECK: [[DESIRED:%.+]] = bitcast { i32, i32 }* [[DESIRED_ADDR]] to i8*
+// CHECK: [[SUCCESS_FAIL:%.+]] = call zeroext i1 @__atomic_compare_exchange(i64 8, i8* bitcast ({ i32, i32 }* [[X_ADDR]] to i8*), i8* [[EXPECTED]], i8* [[DESIRED]], i32 0, i32 0)
+// CHECK: [[X_RE_ADDR:%.+]] = getelementptr inbounds { i32, i32 }, { i32, i32 }* [[TEMP:%.+]], i32 0, i32 0
+// CHECK: [[X_RE:%.+]] = load i32, i32* [[X_RE_ADDR]]
+// CHECK: [[X_IM_ADDR:%.+]] = getelementptr inbounds { i32, i32 }, { i32, i32 }* [[TEMP]], i32 0, i32 1
+// CHECK: [[X_IM:%.+]] = load i32, i32* [[X_IM_ADDR]]
+// CHECK: [[LD_RE_ADDR:%.+]] = getelementptr inbounds { i32, i32 }, { i32, i32 }* [[TEMP:%.+]], i32 0, i32 0
+// CHECK: [[LD_IM_ADDR:%.+]] = getelementptr inbounds { i32, i32 }, { i32, i32 }* [[TEMP]], i32 0, i32 1
+// CHECK: store i32 [[X_RE]], i32* [[LD_RE_ADDR]]
+// CHECK: store i32 [[X_IM]], i32* [[LD_IM_ADDR]]
+// CHECK: [[BITCAST:%.+]] = bitcast { i32, i32 }* [[TEMP]] to i64*
+// CHECK: [[OLD_X]] = load i64, i64* [[BITCAST]]
+// CHECK: br i1 [[SUCCESS_FAIL]], label %[[EXIT:.+]], label %[[CONT]]
+// CHECK: [[EXIT]]
+// CHECK: [[RE_CAST:%.+]] = sitofp i32 [[NEW_RE]] to float
+// CHECK: [[IM_CAST:%.+]] = sitofp i32 [[NEW_IM]] to float
+// CHECK: getelementptr inbounds { float, float }, { float, float }* %{{.+}}, i32 0, i32 0
+// CHECK: getelementptr inbounds { float, float }, { float, float }* %{{.+}}, i32 0, i32 1
+// CHECK: store float [[RE_CAST]], float* %{{.+}},
+// CHECK: store float [[IM_CAST]], float* %{{.+}},
+// CHECK: call void @__atomic_store(i64 8, i8* bitcast ({ float, float }* @{{.+}} to i8*), i8* %{{.+}}, i32 0)
+#pragma omp atomic capture
+ cfv = cix = civ / cix;
+// CHECK: [[EXPR_RE:%.+]] = load float, float* getelementptr inbounds ({ float, float }, { float, float }* @{{.+}}, i32 0, i32 0)
+// CHECK: [[EXPR_IM:%.+]] = load float, float* getelementptr inbounds ({ float, float }, { float, float }* @{{.+}}, i32 0, i32 1)
+// CHECK: [[BITCAST:%.+]] = bitcast { float, float }* [[TEMP:%.+]] to i8*
+// CHECK: call void @__atomic_load(i64 8, i8* bitcast ({ float, float }* [[X_ADDR:@.+]] to i8*), i8* [[BITCAST]], i32 0)
+// CHECK: [[LD_RE_ADDR:%.+]] = getelementptr inbounds { float, float }, { float, float }* [[TEMP]], i32 0, i32 0
+// CHECK: [[LD_RE:%.+]] = load float, float* [[LD_RE_ADDR]]
+// CHECK: [[LD_IM_ADDR:%.+]] = getelementptr inbounds { float, float }, { float, float }* [[TEMP]], i32 0, i32 1
+// CHECK: [[LD_IM:%.+]] = load float, float* [[LD_IM_ADDR]]
+// CHECK: [[LD_RE_ADDR:%.+]] = getelementptr inbounds { float, float }, { float, float }* [[TEMP:%.+]], i32 0, i32 0
+// CHECK: [[LD_IM_ADDR:%.+]] = getelementptr inbounds { float, float }, { float, float }* [[TEMP]], i32 0, i32 1
+// CHECK: store float [[LD_RE]], float* [[LD_RE_ADDR]]
+// CHECK: store float [[LD_IM]], float* [[LD_IM_ADDR]]
+// CHECK: [[BITCAST:%.+]] = bitcast { float, float }* [[TEMP]] to i64*
+// CHECK: [[X:%.+]] = load i64, i64* [[BITCAST]]
+// CHECK: br label %[[CONT:.+]]
+// CHECK: [[CONT]]
+// CHECK: [[OLD:%.+]] = phi i64 [ [[X]], %{{.+}} ], [ [[OLD_X:%.+]], %[[CONT]] ]
+// CHECK: [[BITCAST:%.+]] = bitcast { float, float }* [[TEMP:%.+]] to i64*
+// CHECK: store i64 [[OLD]], i64* [[BITCAST]]
+// CHECK: [[X_RE_ADDR:%.+]] = getelementptr inbounds { float, float }, { float, float }* [[TEMP]], i32 0, i32 0
+// CHECK: [[X_RE_OLD:%.+]] = load float, float* [[X_RE_ADDR]]
+// CHECK: [[X_IM_ADDR:%.+]] = getelementptr inbounds { float, float }, { float, float }* [[TEMP]], i32 0, i32 1
+// CHECK: [[X_IM_OLD:%.+]] = load float, float* [[X_IM_ADDR]]
+// <Skip checks for complex calculations>
+// CHECK: [[X_RE_ADDR:%.+]] = getelementptr inbounds { float, float }, { float, float }* [[EXPECTED_ADDR:%.+]], i32 0, i32 0
+// CHECK: [[X_IM_ADDR:%.+]] = getelementptr inbounds { float, float }, { float, float }* [[EXPECTED_ADDR]], i32 0, i32 1
+// CHECK: store float [[X_RE_OLD]], float* [[X_RE_ADDR]]
+// CHECK: store float [[X_IM_OLD]], float* [[X_IM_ADDR]]
+// CHECK: [[X_RE_ADDR:%.+]] = getelementptr inbounds { float, float }, { float, float }* [[DESIRED_ADDR:%.+]], i32 0, i32 0
+// CHECK: [[X_IM_ADDR:%.+]] = getelementptr inbounds { float, float }, { float, float }* [[DESIRED_ADDR]], i32 0, i32 1
+// CHECK: store float [[NEW_RE:%.+]], float* [[X_RE_ADDR]]
+// CHECK: store float [[NEW_IM:%.+]], float* [[X_IM_ADDR]]
+// CHECK: [[EXPECTED:%.+]] = bitcast { float, float }* [[EXPECTED_ADDR]] to i8*
+// CHECK: [[DESIRED:%.+]] = bitcast { float, float }* [[DESIRED_ADDR]] to i8*
+// CHECK: [[SUCCESS_FAIL:%.+]] = call zeroext i1 @__atomic_compare_exchange(i64 8, i8* bitcast ({ float, float }* [[X_ADDR]] to i8*), i8* [[EXPECTED]], i8* [[DESIRED]], i32 0, i32 0)
+// CHECK: [[X_RE_ADDR:%.+]] = getelementptr inbounds { float, float }, { float, float }* [[TEMP:%.+]], i32 0, i32 0
+// CHECK: [[X_RE:%.+]] = load float, float* [[X_RE_ADDR]]
+// CHECK: [[X_IM_ADDR:%.+]] = getelementptr inbounds { float, float }, { float, float }* [[TEMP]], i32 0, i32 1
+// CHECK: [[X_IM:%.+]] = load float, float* [[X_IM_ADDR]]
+// CHECK: [[LD_RE_ADDR:%.+]] = getelementptr inbounds { float, float }, { float, float }* [[TEMP:%.+]], i32 0, i32 0
+// CHECK: [[LD_IM_ADDR:%.+]] = getelementptr inbounds { float, float }, { float, float }* [[TEMP]], i32 0, i32 1
+// CHECK: store float [[X_RE]], float* [[LD_RE_ADDR]]
+// CHECK: store float [[X_IM]], float* [[LD_IM_ADDR]]
+// CHECK: [[BITCAST:%.+]] = bitcast { float, float }* [[TEMP]] to i64*
+// CHECK: [[OLD_X]] = load i64, i64* [[BITCAST]]
+// CHECK: br i1 [[SUCCESS_FAIL]], label %[[EXIT:.+]], label %[[CONT]]
+// CHECK: [[EXIT]]
+// CHECK: [[RE_CAST:%.+]] = fptosi float [[X_RE_OLD]] to i32
+// CHECK: [[IM_CAST:%.+]] = fptosi float [[X_IM_OLD]] to i32
+// CHECK: getelementptr inbounds { i32, i32 }, { i32, i32 }* %{{.+}}, i32 0, i32 0
+// CHECK: getelementptr inbounds { i32, i32 }, { i32, i32 }* %{{.+}}, i32 0, i32 1
+// CHECK: store i32 [[RE_CAST]], i32* %{{.+}},
+// CHECK: store i32 [[IM_CAST]], i32* %{{.+}},
+// CHECK: call void @__atomic_store(i64 8, i8* bitcast ({ i32, i32 }* @{{.+}} to i8*), i8* %{{.+}}, i32 0)
+#pragma omp atomic capture
+ {civ = cfx; cfx = cfv + cfx;}
+// CHECK: [[EXPR_RE:%.+]] = load double, double* getelementptr inbounds ({ double, double }, { double, double }* @{{.+}}, i32 0, i32 0)
+// CHECK: [[EXPR_IM:%.+]] = load double, double* getelementptr inbounds ({ double, double }, { double, double }* @{{.+}}, i32 0, i32 1)
+// CHECK: [[BITCAST:%.+]] = bitcast { double, double }* [[TEMP:%.+]] to i8*
+// CHECK: call void @__atomic_load(i64 16, i8* bitcast ({ double, double }* [[X_ADDR:@.+]] to i8*), i8* [[BITCAST]], i32 5)
+// CHECK: [[LD_RE_ADDR:%.+]] = getelementptr inbounds { double, double }, { double, double }* [[TEMP]], i32 0, i32 0
+// CHECK: [[LD_RE:%.+]] = load double, double* [[LD_RE_ADDR]]
+// CHECK: [[LD_IM_ADDR:%.+]] = getelementptr inbounds { double, double }, { double, double }* [[TEMP]], i32 0, i32 1
+// CHECK: [[LD_IM:%.+]] = load double, double* [[LD_IM_ADDR]]
+// CHECK: [[LD_RE_ADDR:%.+]] = getelementptr inbounds { double, double }, { double, double }* [[TEMP:%.+]], i32 0, i32 0
+// CHECK: [[LD_IM_ADDR:%.+]] = getelementptr inbounds { double, double }, { double, double }* [[TEMP]], i32 0, i32 1
+// CHECK: store double [[LD_RE]], double* [[LD_RE_ADDR]]
+// CHECK: store double [[LD_IM]], double* [[LD_IM_ADDR]]
+// CHECK: [[BITCAST:%.+]] = bitcast { double, double }* [[TEMP]] to i128*
+// CHECK: [[X:%.+]] = load i128, i128* [[BITCAST]]
+// CHECK: br label %[[CONT:.+]]
+// CHECK: [[CONT]]
+// CHECK: [[OLD:%.+]] = phi i128 [ [[X]], %{{.+}} ], [ [[OLD_X:%.+]], %[[CONT]] ]
+// CHECK: [[BITCAST:%.+]] = bitcast { double, double }* [[TEMP:%.+]] to i128*
+// CHECK: store i128 [[OLD]], i128* [[BITCAST]]
+// CHECK: [[X_RE_ADDR:%.+]] = getelementptr inbounds { double, double }, { double, double }* [[TEMP]], i32 0, i32 0
+// CHECK: [[X_RE:%.+]] = load double, double* [[X_RE_ADDR]]
+// CHECK: [[X_IM_ADDR:%.+]] = getelementptr inbounds { double, double }, { double, double }* [[TEMP]], i32 0, i32 1
+// CHECK: [[X_IM:%.+]] = load double, double* [[X_IM_ADDR]]
+// <Skip checks for complex calculations>
+// CHECK: [[X_RE_ADDR:%.+]] = getelementptr inbounds { double, double }, { double, double }* [[EXPECTED_ADDR:%.+]], i32 0, i32 0
+// CHECK: [[X_IM_ADDR:%.+]] = getelementptr inbounds { double, double }, { double, double }* [[EXPECTED_ADDR]], i32 0, i32 1
+// CHECK: store double [[X_RE]], double* [[X_RE_ADDR]]
+// CHECK: store double [[X_IM]], double* [[X_IM_ADDR]]
+// CHECK: [[X_RE_ADDR:%.+]] = getelementptr inbounds { double, double }, { double, double }* [[DESIRED_ADDR:%.+]], i32 0, i32 0
+// CHECK: [[X_IM_ADDR:%.+]] = getelementptr inbounds { double, double }, { double, double }* [[DESIRED_ADDR]], i32 0, i32 1
+// CHECK: store double [[NEW_RE:%.+]], double* [[X_RE_ADDR]]
+// CHECK: store double [[NEW_IM:%.+]], double* [[X_IM_ADDR]]
+// CHECK: [[EXPECTED:%.+]] = bitcast { double, double }* [[EXPECTED_ADDR]] to i8*
+// CHECK: [[DESIRED:%.+]] = bitcast { double, double }* [[DESIRED_ADDR]] to i8*
+// CHECK: [[SUCCESS_FAIL:%.+]] = call zeroext i1 @__atomic_compare_exchange(i64 16, i8* bitcast ({ double, double }* [[X_ADDR]] to i8*), i8* [[EXPECTED]], i8* [[DESIRED]], i32 5, i32 5)
+// CHECK: [[X_RE_ADDR:%.+]] = getelementptr inbounds { double, double }, { double, double }* [[TEMP:%.+]], i32 0, i32 0
+// CHECK: [[X_RE:%.+]] = load double, double* [[X_RE_ADDR]]
+// CHECK: [[X_IM_ADDR:%.+]] = getelementptr inbounds { double, double }, { double, double }* [[TEMP]], i32 0, i32 1
+// CHECK: [[X_IM:%.+]] = load double, double* [[X_IM_ADDR]]
+// CHECK: [[LD_RE_ADDR:%.+]] = getelementptr inbounds { double, double }, { double, double }* [[TEMP:%.+]], i32 0, i32 0
+// CHECK: [[LD_IM_ADDR:%.+]] = getelementptr inbounds { double, double }, { double, double }* [[TEMP]], i32 0, i32 1
+// CHECK: store double [[X_RE]], double* [[LD_RE_ADDR]]
+// CHECK: store double [[X_IM]], double* [[LD_IM_ADDR]]
+// CHECK: [[BITCAST:%.+]] = bitcast { double, double }* [[TEMP]] to i128*
+// CHECK: [[OLD_X]] = load i128, i128* [[BITCAST]]
+// CHECK: br i1 [[SUCCESS_FAIL]], label %[[EXIT:.+]], label %[[CONT]]
+// CHECK: [[EXIT]]
+// CHECK: fptrunc double [[NEW_RE]] to float
+// CHECK: fptrunc double [[NEW_IM]] to float
+// CHECK: call void @__atomic_store(i64 8, i8* bitcast ({ float, float }* @{{.+}} to i8*), i8* %{{.+}}, i32 5)
+// CHECK: call{{.*}} @__kmpc_flush(
+#pragma omp atomic capture seq_cst
+ {cdx = cdx - cdv; cfv = cdx;}
+// CHECK: [[BV:%.+]] = load i8, i8* @{{.+}}
+// CHECK: [[BOOL:%.+]] = trunc i8 [[BV]] to i1
+// CHECK: [[EXPR:%.+]] = zext i1 [[BOOL]] to i64
+// CHECK: atomicrmw and i64* @{{.+}}, i64 [[EXPR]] monotonic
+// CHECK: [[DESIRED:%.+]] = load atomic i64, i64* @{{.+}} monotonic
+// CHECK: store atomic i64 [[DESIRED]], i64* @{{.+}} monotonic
+#pragma omp atomic capture
+ ulv = ulx = ulx & bv;
+// CHECK: [[CV:%.+]] = load i8, i8* @{{.+}}, align 1
+// CHECK: [[EXPR:%.+]] = sext i8 [[CV]] to i32
+// CHECK: [[BX:%.+]] = load atomic i8, i8* [[BX_ADDR:@.+]] monotonic
+// CHECK: [[X:%.+]] = trunc i8 [[BX]] to i1
+// CHECK: br label %[[CONT:.+]]
+// CHECK: [[CONT]]
+// CHECK: [[OLD_BOOL:%.+]] = phi i1 [ [[X]], %{{.+}} ], [ [[OLD_X:%.+]], %[[CONT]] ]
+// CHECK: [[X_RVAL:%.+]] = zext i1 [[OLD_BOOL]] to i32
+// CHECK: [[AND:%.+]] = and i32 [[EXPR]], [[X_RVAL]]
+// CHECK: [[CAST:%.+]] = icmp ne i32 [[AND]], 0
+// CHECK: [[EXPECTED:%.+]] = zext i1 [[OLD_BOOL]] to i8
+// CHECK: [[DESIRED:%.+]] = zext i1 [[CAST]] to i8
+// CHECK: [[RES:%.+]] = cmpxchg i8* [[BX_ADDR]], i8 [[EXPECTED]], i8 [[DESIRED]] monotonic monotonic
+// CHECK: [[OLD:%.+]] = extractvalue { i8, i1 } [[RES]], 0
+// CHECK: [[SUCCESS_FAIL:%.+]] = extractvalue { i8, i1 } [[RES]], 1
+// CHECK: [[OLD_X]] = trunc i8 [[OLD]] to i1
+// CHECK: br i1 [[SUCCESS_FAIL]], label %[[EXIT:.+]], label %[[CONT]]
+// CHECK: [[EXIT]]
+// CHECK: [[OLD_I8:%.+]] = zext i1 [[OLD_BOOL]] to i8
+// CHECK: store atomic i8 [[OLD_I8]], i8* @{{.+}} monotonic
+#pragma omp atomic capture
+ {bv = bx; bx = cv & bx;}
+// CHECK: [[UCV:%.+]] = load i8, i8* @{{.+}},
+// CHECK: [[EXPR:%.+]] = zext i8 [[UCV]] to i32
+// CHECK: [[X:%.+]] = load atomic i8, i8* [[CX_ADDR:@.+]] seq_cst
+// CHECK: br label %[[CONT:.+]]
+// CHECK: [[CONT]]
+// CHECK: [[EXPECTED:%.+]] = phi i8 [ [[X]], %{{.+}} ], [ [[OLD_X:%.+]], %[[CONT]] ]
+// CHECK: [[X_RVAL:%.+]] = sext i8 [[EXPECTED]] to i32
+// CHECK: [[ASHR:%.+]] = ashr i32 [[X_RVAL]], [[EXPR]]
+// CHECK: [[DESIRED:%.+]] = trunc i32 [[ASHR]] to i8
+// CHECK: [[RES:%.+]] = cmpxchg i8* [[CX_ADDR]], i8 [[EXPECTED]], i8 [[DESIRED]] seq_cst seq_cst
+// CHECK: [[OLD_X:%.+]] = extractvalue { i8, i1 } [[RES]], 0
+// CHECK: [[SUCCESS_FAIL:%.+]] = extractvalue { i8, i1 } [[RES]], 1
+// CHECK: br i1 [[SUCCESS_FAIL]], label %[[EXIT:.+]], label %[[CONT]]
+// CHECK: [[EXIT]]
+// CHECK: store atomic i8 [[DESIRED]], i8* @{{.+}} seq_cst
+// CHECK: call{{.*}} @__kmpc_flush(
+#pragma omp atomic capture, seq_cst
+ {cx = cx >> ucv; cv = cx;}
+// CHECK: [[SV:%.+]] = load i16, i16* @{{.+}},
+// CHECK: [[EXPR:%.+]] = sext i16 [[SV]] to i32
+// CHECK: [[X:%.+]] = load atomic i64, i64* [[ULX_ADDR:@.+]] monotonic
+// CHECK: br label %[[CONT:.+]]
+// CHECK: [[CONT]]
+// CHECK: [[EXPECTED:%.+]] = phi i64 [ [[X]], %{{.+}} ], [ [[OLD_X:%.+]], %[[CONT]] ]
+// CHECK: [[X_RVAL:%.+]] = trunc i64 [[EXPECTED]] to i32
+// CHECK: [[SHL:%.+]] = shl i32 [[EXPR]], [[X_RVAL]]
+// CHECK: [[DESIRED:%.+]] = sext i32 [[SHL]] to i64
+// CHECK: [[RES:%.+]] = cmpxchg i64* [[ULX_ADDR]], i64 [[EXPECTED]], i64 [[DESIRED]] monotonic monotonic
+// CHECK: [[OLD_X:%.+]] = extractvalue { i64, i1 } [[RES]], 0
+// CHECK: [[SUCCESS_FAIL:%.+]] = extractvalue { i64, i1 } [[RES]], 1
+// CHECK: br i1 [[SUCCESS_FAIL]], label %[[EXIT:.+]], label %[[CONT]]
+// CHECK: [[EXIT]]
+// CHECK: store atomic i64 [[DESIRED]], i64* @{{.+}} monotonic
+#pragma omp atomic capture
+ ulv = ulx = sv << ulx;
+// CHECK: [[USV:%.+]] = load i16, i16* @{{.+}},
+// CHECK: [[EXPR:%.+]] = zext i16 [[USV]] to i64
+// CHECK: [[X:%.+]] = load atomic i64, i64* [[LX_ADDR:@.+]] monotonic
+// CHECK: br label %[[CONT:.+]]
+// CHECK: [[CONT]]
+// CHECK: [[EXPECTED:%.+]] = phi i64 [ [[X]], %{{.+}} ], [ [[OLD_X:%.+]], %[[CONT]] ]
+// CHECK: [[DESIRED:%.+]] = srem i64 [[EXPECTED]], [[EXPR]]
+// CHECK: [[RES:%.+]] = cmpxchg i64* [[LX_ADDR]], i64 [[EXPECTED]], i64 [[DESIRED]] monotonic monotonic
+// CHECK: [[OLD_X:%.+]] = extractvalue { i64, i1 } [[RES]], 0
+// CHECK: [[SUCCESS_FAIL:%.+]] = extractvalue { i64, i1 } [[RES]], 1
+// CHECK: br i1 [[SUCCESS_FAIL]], label %[[EXIT:.+]], label %[[CONT]]
+// CHECK: [[EXIT]]
+// CHECK: store atomic i64 [[EXPECTED]], i64* @{{.+}} monotonic
+#pragma omp atomic capture
+ {lv = lx; lx = lx % usv;}
+// CHECK: [[EXPR:%.+]] = load i32, i32* @{{.+}}
+// CHECK: atomicrmw or i32* @{{.+}}, i32 [[EXPR]] seq_cst
+// CHECK: [[DESIRED:%.+]] = load atomic i32, i32* @{{.+}} seq_cst
+// CHECK: store atomic i32 [[DESIRED]], i32* @{{.+}} seq_cst
+// CHECK: call{{.*}} @__kmpc_flush(
+#pragma omp atomic seq_cst, capture
+ {uix = iv | uix; uiv = uix;}
+// CHECK: [[EXPR:%.+]] = load i32, i32* @{{.+}}
+// CHECK: atomicrmw and i32* @{{.+}}, i32 [[EXPR]] monotonic
+// CHECK: [[DESIRED:%.+]] = load atomic i32, i32* @{{.+}} monotonic
+// CHECK: store atomic i32 [[DESIRED]], i32* @{{.+}} monotonic
+#pragma omp atomic capture
+ iv = ix = ix & uiv;
+// CHECK: [[EXPR:%.+]] = load i64, i64* @{{.+}},
+// CHECK: [[BITCAST:%.+]] = bitcast { i32, i32 }* [[TEMP:%.+]] to i8*
+// CHECK: call void @__atomic_load(i64 8, i8* bitcast ({ i32, i32 }* [[X_ADDR:@.+]] to i8*), i8* [[BITCAST]], i32 0)
+// CHECK: [[LD_RE_ADDR:%.+]] = getelementptr inbounds { i32, i32 }, { i32, i32 }* [[TEMP]], i32 0, i32 0
+// CHECK: [[LD_RE:%.+]] = load i32, i32* [[LD_RE_ADDR]]
+// CHECK: [[LD_IM_ADDR:%.+]] = getelementptr inbounds { i32, i32 }, { i32, i32 }* [[TEMP]], i32 0, i32 1
+// CHECK: [[LD_IM:%.+]] = load i32, i32* [[LD_IM_ADDR]]
+// CHECK: [[LD_RE_ADDR:%.+]] = getelementptr inbounds { i32, i32 }, { i32, i32 }* [[TEMP:%.+]], i32 0, i32 0
+// CHECK: [[LD_IM_ADDR:%.+]] = getelementptr inbounds { i32, i32 }, { i32, i32 }* [[TEMP]], i32 0, i32 1
+// CHECK: store i32 [[LD_RE]], i32* [[LD_RE_ADDR]]
+// CHECK: store i32 [[LD_IM]], i32* [[LD_IM_ADDR]]
+// CHECK: [[BITCAST:%.+]] = bitcast { i32, i32 }* [[TEMP]] to i64*
+// CHECK: [[X:%.+]] = load i64, i64* [[BITCAST]]
+// CHECK: br label %[[CONT:.+]]
+// CHECK: [[CONT]]
+// CHECK: [[OLD:%.+]] = phi i64 [ [[X]], %{{.+}} ], [ [[OLD_X:%.+]], %[[CONT]] ]
+// CHECK: [[BITCAST:%.+]] = bitcast { i32, i32 }* [[TEMP:%.+]] to i64*
+// CHECK: store i64 [[OLD]], i64* [[BITCAST]]
+// CHECK: [[X_RE_ADDR:%.+]] = getelementptr inbounds { i32, i32 }, { i32, i32 }* [[TEMP]], i32 0, i32 0
+// CHECK: [[OLD_RE:%.+]] = load i32, i32* [[X_RE_ADDR]]
+// CHECK: [[X_IM_ADDR:%.+]] = getelementptr inbounds { i32, i32 }, { i32, i32 }* [[TEMP]], i32 0, i32 1
+// CHECK: [[OLD_IM:%.+]] = load i32, i32* [[X_IM_ADDR]]
+// <Skip checks for complex calculations>
+// CHECK: [[X_RE_ADDR:%.+]] = getelementptr inbounds { i32, i32 }, { i32, i32 }* [[EXPECTED_ADDR:%.+]], i32 0, i32 0
+// CHECK: [[X_IM_ADDR:%.+]] = getelementptr inbounds { i32, i32 }, { i32, i32 }* [[EXPECTED_ADDR]], i32 0, i32 1
+// CHECK: store i32 [[OLD_RE]], i32* [[X_RE_ADDR]]
+// CHECK: store i32 [[OLD_IM]], i32* [[X_IM_ADDR]]
+// CHECK: [[X_RE_ADDR:%.+]] = getelementptr inbounds { i32, i32 }, { i32, i32 }* [[DESIRED_ADDR:%.+]], i32 0, i32 0
+// CHECK: [[X_IM_ADDR:%.+]] = getelementptr inbounds { i32, i32 }, { i32, i32 }* [[DESIRED_ADDR]], i32 0, i32 1
+// CHECK: store i32 %{{.+}}, i32* [[X_RE_ADDR]]
+// CHECK: store i32 %{{.+}}, i32* [[X_IM_ADDR]]
+// CHECK: [[EXPECTED:%.+]] = bitcast { i32, i32 }* [[EXPECTED_ADDR]] to i8*
+// CHECK: [[DESIRED:%.+]] = bitcast { i32, i32 }* [[DESIRED_ADDR]] to i8*
+// CHECK: [[SUCCESS_FAIL:%.+]] = call zeroext i1 @__atomic_compare_exchange(i64 8, i8* bitcast ({ i32, i32 }* [[X_ADDR]] to i8*), i8* [[EXPECTED]], i8* [[DESIRED]], i32 0, i32 0)
+// CHECK: [[X_RE_ADDR:%.+]] = getelementptr inbounds { i32, i32 }, { i32, i32 }* [[TEMP:%.+]], i32 0, i32 0
+// CHECK: [[X_RE:%.+]] = load i32, i32* [[X_RE_ADDR]]
+// CHECK: [[X_IM_ADDR:%.+]] = getelementptr inbounds { i32, i32 }, { i32, i32 }* [[TEMP]], i32 0, i32 1
+// CHECK: [[X_IM:%.+]] = load i32, i32* [[X_IM_ADDR]]
+// CHECK: [[LD_RE_ADDR:%.+]] = getelementptr inbounds { i32, i32 }, { i32, i32 }* [[TEMP:%.+]], i32 0, i32 0
+// CHECK: [[LD_IM_ADDR:%.+]] = getelementptr inbounds { i32, i32 }, { i32, i32 }* [[TEMP]], i32 0, i32 1
+// CHECK: store i32 [[X_RE]], i32* [[LD_RE_ADDR]]
+// CHECK: store i32 [[X_IM]], i32* [[LD_IM_ADDR]]
+// CHECK: [[BITCAST:%.+]] = bitcast { i32, i32 }* [[TEMP]] to i64*
+// CHECK: [[OLD_X]] = load i64, i64* [[BITCAST]]
+// CHECK: br i1 [[SUCCESS_FAIL]], label %[[EXIT:.+]], label %[[CONT]]
+// CHECK: [[EXIT]]
+// CHECK: store i32 [[OLD_RE]], i32* %{{.+}},
+// CHECK: store i32 [[OLD_IM]], i32* %{{.+}},
+// CHECK: call void @__atomic_store(i64 8, i8* bitcast ({ i32, i32 }* @{{.+}} to i8*), i8* %{{.+}}, i32 0)
+#pragma omp atomic capture
+ {civ = cix; cix = lv + cix;}
+// CHECK: [[ULV:%.+]] = load i64, i64* @{{.+}},
+// CHECK: [[EXPR:%.+]] = uitofp i64 [[ULV]] to float
+// CHECK: [[OLD:%.+]] = load atomic i32, i32* bitcast (float* [[X_ADDR:@.+]] to i32*) monotonic
+// CHECK: [[X:%.+]] = bitcast i32 [[OLD]] to float
+// CHECK: br label %[[CONT:.+]]
+// CHECK: [[CONT]]
+// CHECK: [[OLD:%.+]] = phi float [ [[X]], %{{.+}} ], [ [[OLD_X:%.+]], %[[CONT]] ]
+// CHECK: [[MUL:%.+]] = fmul float [[OLD]], [[EXPR]]
+// CHECK: [[EXPECTED:%.+]] = bitcast float [[OLD]] to i32
+// CHECK: [[DESIRED:%.+]] = bitcast float [[MUL]] to i32
+// CHECK: [[RES:%.+]] = cmpxchg i32* bitcast (float* [[X_ADDR]] to i32*), i32 [[EXPECTED]], i32 [[DESIRED]] monotonic monotonic
+// CHECK: [[PREV:%.+]] = extractvalue { i32, i1 } [[RES]], 0
+// CHECK: [[SUCCESS_FAIL:%.+]] = extractvalue { i32, i1 } [[RES]], 1
+// CHECK: [[OLD_X]] = bitcast i32 [[PREV]] to float
+// CHECK: br i1 [[SUCCESS_FAIL]], label %[[EXIT:.+]], label %[[CONT]]
+// CHECK: [[EXIT]]
+// CHECK: [[CAST:%.+]] = bitcast float [[MUL]] to i32
+// CHECK: store atomic i32 [[CAST]], i32* bitcast (float* @{{.+}} to i32*) monotonic
+#pragma omp atomic capture
+ {fx = fx * ulv; fv = fx;}
+// CHECK: [[LLV:%.+]] = load i64, i64* @{{.+}},
+// CHECK: [[EXPR:%.+]] = sitofp i64 [[LLV]] to double
+// CHECK: [[OLD:%.+]] = load atomic i64, i64* bitcast (double* [[X_ADDR:@.+]] to i64*) monotonic
+// CHECK: [[X:%.+]] = bitcast i64 [[OLD]] to double
+// CHECK: br label %[[CONT:.+]]
+// CHECK: [[CONT]]
+// CHECK: [[OLD:%.+]] = phi double [ [[X]], %{{.+}} ], [ [[OLD_X:%.+]], %[[CONT]] ]
+// CHECK: [[DIV:%.+]] = fdiv double [[OLD]], [[EXPR]]
+// CHECK: [[EXPECTED:%.+]] = bitcast double [[OLD]] to i64
+// CHECK: [[DESIRED:%.+]] = bitcast double [[DIV]] to i64
+// CHECK: [[RES:%.+]] = cmpxchg i64* bitcast (double* [[X_ADDR]] to i64*), i64 [[EXPECTED]], i64 [[DESIRED]] monotonic monotonic
+// CHECK: [[PREV:%.+]] = extractvalue { i64, i1 } [[RES]], 0
+// CHECK: [[SUCCESS_FAIL:%.+]] = extractvalue { i64, i1 } [[RES]], 1
+// CHECK: [[OLD_X]] = bitcast i64 [[PREV]] to double
+// CHECK: br i1 [[SUCCESS_FAIL]], label %[[EXIT:.+]], label %[[CONT]]
+// CHECK: [[EXIT]]
+// CHECK: [[CAST:%.+]] = bitcast double [[DIV]] to i64
+// CHECK: store atomic i64 [[CAST]], i64* bitcast (double* @{{.+}} to i64*) monotonic
+#pragma omp atomic capture
+ dv = dx /= llv;
+// CHECK: [[ULLV:%.+]] = load i64, i64* @{{.+}},
+// CHECK: [[EXPR:%.+]] = uitofp i64 [[ULLV]] to x86_fp80
+// CHECK: [[OLD:%.+]] = load atomic i128, i128* bitcast (x86_fp80* [[X_ADDR:@.+]] to i128*) monotonic
+// CHECK: [[BITCAST:%.+]] = bitcast x86_fp80* [[TEMP:%.+]] to i128*
+// CHECK: store i128 [[OLD]], i128* [[BITCAST]]
+// CHECK: [[X:%.+]] = load x86_fp80, x86_fp80* [[TEMP]]
+// CHECK: br label %[[CONT:.+]]
+// CHECK: [[CONT]]
+// CHECK: [[OLD:%.+]] = phi x86_fp80 [ [[X]], %{{.+}} ], [ [[OLD_X:%.+]], %[[CONT]] ]
+// CHECK: [[SUB:%.+]] = fsub x86_fp80 [[OLD]], [[EXPR]]
+// CHECK: [[BITCAST:%.+]] = bitcast x86_fp80* [[TEMP:%.+]] to i8*
+// CHECK: call void @llvm.memset.p0i8.i64(i8* [[BITCAST]], i8 0, i64 16, i32 16, i1 false)
+// CHECK: store x86_fp80 [[OLD]], x86_fp80* [[TEMP]]
+// CHECK: [[BITCAST:%.+]] = bitcast x86_fp80* [[TEMP]] to i128*
+// CHECK: [[EXPECTED:%.+]] = load i128, i128* [[BITCAST]]
+// CHECK: [[BITCAST:%.+]] = bitcast x86_fp80* [[TEMP:%.+]] to i8*
+// CHECK: call void @llvm.memset.p0i8.i64(i8* [[BITCAST]], i8 0, i64 16, i32 16, i1 false)
+// CHECK: store x86_fp80 [[SUB]], x86_fp80* [[TEMP]]
+// CHECK: [[BITCAST:%.+]] = bitcast x86_fp80* [[TEMP]] to i128*
+// CHECK: [[DESIRED:%.+]] = load i128, i128* [[BITCAST]]
+// CHECK: [[RES:%.+]] = cmpxchg i128* bitcast (x86_fp80* [[X_ADDR]] to i128*), i128 [[EXPECTED]], i128 [[DESIRED]] monotonic monotonic
+// CHECK: [[PREV:%.+]] = extractvalue { i128, i1 } [[RES]], 0
+// CHECK: [[SUCCESS_FAIL:%.+]] = extractvalue { i128, i1 } [[RES]], 1
+// CHECK: [[BITCAST:%.+]] = bitcast x86_fp80* [[TEMP:%.+]] to i128*
+// CHECK: store i128 [[PREV]], i128* [[BITCAST]]
+// CHECK: [[OLD_X]] = load x86_fp80, x86_fp80* [[TEMP]],
+// CHECK: br i1 [[SUCCESS_FAIL]], label %[[EXIT:.+]], label %[[CONT]]
+// CHECK: [[EXIT]]
+// CHECK: [[TEMP:%.+]] = bitcast x86_fp80* %{{.+}} to i8*
+// CHECK: call void @llvm.memset.p0i8.i64(i8* [[TEMP]], i8 0, i64 16, i32 16, i1 false)
+// CHECK: store x86_fp80 [[OLD]], x86_fp80* %{{.+}},
+// CHECK: [[TEMP_I128:%.+]] = bitcast x86_fp80* %{{.+}} to i128*
+// CHECK: [[EXPECTED:%.+]] = load i128, i128* [[TEMP_I128]],
+// CHECK: store atomic i128 [[EXPECTED]], i128* bitcast (x86_fp80* @{{.+}} to i128*) monotonic
+#pragma omp atomic capture
+ {ldv = ldx; ldx -= ullv;}
+// CHECK: [[EXPR:%.+]] = load float, float* @{{.+}},
+// CHECK: [[BITCAST:%.+]] = bitcast { i32, i32 }* [[TEMP:%.+]] to i8*
+// CHECK: call void @__atomic_load(i64 8, i8* bitcast ({ i32, i32 }* [[X_ADDR:@.+]] to i8*), i8* [[BITCAST]], i32 0)
+// CHECK: [[LD_RE_ADDR:%.+]] = getelementptr inbounds { i32, i32 }, { i32, i32 }* [[TEMP]], i32 0, i32 0
+// CHECK: [[LD_RE:%.+]] = load i32, i32* [[LD_RE_ADDR]]
+// CHECK: [[LD_IM_ADDR:%.+]] = getelementptr inbounds { i32, i32 }, { i32, i32 }* [[TEMP]], i32 0, i32 1
+// CHECK: [[LD_IM:%.+]] = load i32, i32* [[LD_IM_ADDR]]
+// CHECK: [[LD_RE_ADDR:%.+]] = getelementptr inbounds { i32, i32 }, { i32, i32 }* [[TEMP:%.+]], i32 0, i32 0
+// CHECK: [[LD_IM_ADDR:%.+]] = getelementptr inbounds { i32, i32 }, { i32, i32 }* [[TEMP]], i32 0, i32 1
+// CHECK: store i32 [[LD_RE]], i32* [[LD_RE_ADDR]]
+// CHECK: store i32 [[LD_IM]], i32* [[LD_IM_ADDR]]
+// CHECK: [[BITCAST:%.+]] = bitcast { i32, i32 }* [[TEMP]] to i64*
+// CHECK: [[X:%.+]] = load i64, i64* [[BITCAST]]
+// CHECK: br label %[[CONT:.+]]
+// CHECK: [[CONT]]
+// CHECK: [[OLD:%.+]] = phi i64 [ [[X]], %{{.+}} ], [ [[OLD_X:%.+]], %[[CONT]] ]
+// CHECK: [[BITCAST:%.+]] = bitcast { i32, i32 }* [[TEMP:%.+]] to i64*
+// CHECK: store i64 [[OLD]], i64* [[BITCAST]]
+// CHECK: [[X_RE_ADDR:%.+]] = getelementptr inbounds { i32, i32 }, { i32, i32 }* [[TEMP]], i32 0, i32 0
+// CHECK: [[X_RE:%.+]] = load i32, i32* [[X_RE_ADDR]]
+// CHECK: [[X_IM_ADDR:%.+]] = getelementptr inbounds { i32, i32 }, { i32, i32 }* [[TEMP]], i32 0, i32 1
+// CHECK: [[X_IM:%.+]] = load i32, i32* [[X_IM_ADDR]]
+// <Skip checks for complex calculations>
+// CHECK: [[X_RE_ADDR:%.+]] = getelementptr inbounds { i32, i32 }, { i32, i32 }* [[EXPECTED_ADDR:%.+]], i32 0, i32 0
+// CHECK: [[X_IM_ADDR:%.+]] = getelementptr inbounds { i32, i32 }, { i32, i32 }* [[EXPECTED_ADDR]], i32 0, i32 1
+// CHECK: store i32 [[X_RE]], i32* [[X_RE_ADDR]]
+// CHECK: store i32 [[X_IM]], i32* [[X_IM_ADDR]]
+// CHECK: [[X_RE_ADDR:%.+]] = getelementptr inbounds { i32, i32 }, { i32, i32 }* [[DESIRED_ADDR:%.+]], i32 0, i32 0
+// CHECK: [[X_IM_ADDR:%.+]] = getelementptr inbounds { i32, i32 }, { i32, i32 }* [[DESIRED_ADDR]], i32 0, i32 1
+// CHECK: store i32 [[NEW_RE:%.+]], i32* [[X_RE_ADDR]]
+// CHECK: store i32 [[NEW_IM:%.+]], i32* [[X_IM_ADDR]]
+// CHECK: [[EXPECTED:%.+]] = bitcast { i32, i32 }* [[EXPECTED_ADDR]] to i8*
+// CHECK: [[DESIRED:%.+]] = bitcast { i32, i32 }* [[DESIRED_ADDR]] to i8*
+// CHECK: [[SUCCESS_FAIL:%.+]] = call zeroext i1 @__atomic_compare_exchange(i64 8, i8* bitcast ({ i32, i32 }* [[X_ADDR]] to i8*), i8* [[EXPECTED]], i8* [[DESIRED]], i32 0, i32 0)
+// CHECK: [[X_RE_ADDR:%.+]] = getelementptr inbounds { i32, i32 }, { i32, i32 }* [[TEMP:%.+]], i32 0, i32 0
+// CHECK: [[X_RE:%.+]] = load i32, i32* [[X_RE_ADDR]]
+// CHECK: [[X_IM_ADDR:%.+]] = getelementptr inbounds { i32, i32 }, { i32, i32 }* [[TEMP]], i32 0, i32 1
+// CHECK: [[X_IM:%.+]] = load i32, i32* [[X_IM_ADDR]]
+// CHECK: [[LD_RE_ADDR:%.+]] = getelementptr inbounds { i32, i32 }, { i32, i32 }* [[TEMP:%.+]], i32 0, i32 0
+// CHECK: [[LD_IM_ADDR:%.+]] = getelementptr inbounds { i32, i32 }, { i32, i32 }* [[TEMP]], i32 0, i32 1
+// CHECK: store i32 [[X_RE]], i32* [[LD_RE_ADDR]]
+// CHECK: store i32 [[X_IM]], i32* [[LD_IM_ADDR]]
+// CHECK: [[BITCAST:%.+]] = bitcast { i32, i32 }* [[TEMP]] to i64*
+// CHECK: [[OLD_X]] = load i64, i64* [[BITCAST]]
+// CHECK: br i1 [[SUCCESS_FAIL]], label %[[EXIT:.+]], label %[[CONT]]
+// CHECK: [[EXIT]]
+// CHECK: store i32 [[NEW_RE]], i32* %{{.+}},
+// CHECK: store i32 [[NEW_IM]], i32* %{{.+}},
+// CHECK: call void @__atomic_store(i64 8, i8* bitcast ({ i32, i32 }* @{{.+}} to i8*), i8* %{{.+}}, i32 0)
+#pragma omp atomic capture
+ {cix = fv / cix; civ = cix;}
+// CHECK: [[EXPR:%.+]] = load double, double* @{{.+}},
+// CHECK: [[X:%.+]] = load atomic i16, i16* [[X_ADDR:@.+]] monotonic
+// CHECK: br label %[[CONT:.+]]
+// CHECK: [[CONT]]
+// CHECK: [[EXPECTED:%.+]] = phi i16 [ [[X]], %{{.+}} ], [ [[OLD_X:%.+]], %[[CONT]] ]
+// CHECK: [[CONV:%.+]] = sext i16 [[EXPECTED]] to i32
+// CHECK: [[X_RVAL:%.+]] = sitofp i32 [[CONV]] to double
+// CHECK: [[ADD:%.+]] = fadd double [[X_RVAL]], [[EXPR]]
+// CHECK: [[DESIRED:%.+]] = fptosi double [[ADD]] to i16
+// CHECK: [[RES:%.+]] = cmpxchg i16* [[X_ADDR]], i16 [[EXPECTED]], i16 [[DESIRED]] monotonic monotonic
+// CHECK: [[OLD_X]] = extractvalue { i16, i1 } [[RES]], 0
+// CHECK: [[SUCCESS_FAIL:%.+]] = extractvalue { i16, i1 } [[RES]], 1
+// CHECK: br i1 [[SUCCESS_FAIL]], label %[[EXIT:.+]], label %[[CONT]]
+// CHECK: [[EXIT]]
+// CHECK: store atomic i16 [[DESIRED]], i16* @{{.+}} monotonic
+#pragma omp atomic capture
+ sv = sx = sx + dv;
+// CHECK: [[EXPR:%.+]] = load x86_fp80, x86_fp80* @{{.+}},
+// CHECK: [[XI8:%.+]] = load atomic i8, i8* [[X_ADDR:@.+]] monotonic
+// CHECK: [[X:%.+]] = trunc i8 [[XI8]] to i1
+// CHECK: br label %[[CONT:.+]]
+// CHECK: [[CONT]]
+// CHECK: [[BOOL_EXPECTED:%.+]] = phi i1 [ [[X]], %{{.+}} ], [ [[OLD_X:%.+]], %[[CONT]] ]
+// CHECK: [[CONV:%.+]] = zext i1 [[BOOL_EXPECTED]] to i32
+// CHECK: [[X_RVAL:%.+]] = sitofp i32 [[CONV]] to x86_fp80
+// CHECK: [[MUL:%.+]] = fmul x86_fp80 [[EXPR]], [[X_RVAL]]
+// CHECK: [[BOOL_DESIRED:%.+]] = fcmp une x86_fp80 [[MUL]], 0xK00000000000000000000
+// CHECK: [[EXPECTED:%.+]] = zext i1 [[BOOL_EXPECTED]] to i8
+// CHECK: [[DESIRED:%.+]] = zext i1 [[BOOL_DESIRED]] to i8
+// CHECK: [[RES:%.+]] = cmpxchg i8* [[X_ADDR]], i8 [[EXPECTED]], i8 [[DESIRED]] monotonic monotonic
+// CHECK: [[OLD_XI8:%.+]] = extractvalue { i8, i1 } [[RES]], 0
+// CHECK: [[SUCCESS_FAIL:%.+]] = extractvalue { i8, i1 } [[RES]], 1
+// CHECK: [[OLD_X]] = trunc i8 [[OLD_XI8]] to i1
+// CHECK: br i1 [[SUCCESS_FAIL]], label %[[EXIT:.+]], label %[[CONT]]
+// CHECK: [[EXIT]]
+// CHECK: [[EXPECTED_I8:%.+]] = zext i1 [[BOOL_EXPECTED]] to i8
+// CHECK: store atomic i8 [[EXPECTED_I8]], i8* @{{.+}} monotonic
+#pragma omp atomic capture
+ {bv = bx; bx = ldv * bx;}
+// CHECK: [[EXPR_RE:%.+]] = load i32, i32* getelementptr inbounds ({ i32, i32 }, { i32, i32 }* [[CIV_ADDR:@.+]], i32 0, i32 0),
+// CHECK: [[EXPR_IM:%.+]] = load i32, i32* getelementptr inbounds ({ i32, i32 }, { i32, i32 }* [[CIV_ADDR]], i32 0, i32 1),
+// CHECK: [[XI8:%.+]] = load atomic i8, i8* [[X_ADDR:@.+]] monotonic
+// CHECK: [[X:%.+]] = trunc i8 [[XI8]] to i1
+// CHECK: br label %[[CONT:.+]]
+// CHECK: [[CONT]]
+// CHECK: [[BOOL_EXPECTED:%.+]] = phi i1 [ [[X]], %{{.+}} ], [ [[OLD_X:%.+]], %[[CONT]] ]
+// CHECK: [[X_RVAL:%.+]] = zext i1 [[BOOL_EXPECTED]] to i32
+// CHECK: [[SUB_RE:%.+]] = sub i32 [[EXPR_RE:%.+]], [[X_RVAL]]
+// CHECK: [[SUB_IM:%.+]] = sub i32 [[EXPR_IM:%.+]], 0
+// CHECK: icmp ne i32 [[SUB_RE]], 0
+// CHECK: icmp ne i32 [[SUB_IM]], 0
+// CHECK: [[BOOL_DESIRED:%.+]] = or i1
+// CHECK: [[EXPECTED:%.+]] = zext i1 [[BOOL_EXPECTED]] to i8
+// CHECK: [[DESIRED:%.+]] = zext i1 [[BOOL_DESIRED]] to i8
+// CHECK: [[RES:%.+]] = cmpxchg i8* [[X_ADDR]], i8 [[EXPECTED]], i8 [[DESIRED]] monotonic monotonic
+// CHECK: [[OLD_XI8:%.+]] = extractvalue { i8, i1 } [[RES]], 0
+// CHECK: [[SUCCESS_FAIL:%.+]] = extractvalue { i8, i1 } [[RES]], 1
+// CHECK: [[OLD_X]] = trunc i8 [[OLD_XI8]] to i1
+// CHECK: br i1 [[SUCCESS_FAIL]], label %[[EXIT:.+]], label %[[CONT]]
+// CHECK: [[EXIT]]
+// CHECK: [[DESIRED_I8:%.+]] = zext i1 [[BOOL_DESIRED]] to i8
+// CHECK: store atomic i8 [[DESIRED_I8]], i8* @{{.+}} monotonic
+#pragma omp atomic capture
+ {bx = civ - bx; bv = bx;}
+// CHECK: [[EXPR_RE:%.+]] = load float, float* getelementptr inbounds ({ float, float }, { float, float }* @{{.+}}, i32 0, i32 0)
+// CHECK: [[EXPR_IM:%.+]] = load float, float* getelementptr inbounds ({ float, float }, { float, float }* @{{.+}}, i32 0, i32 1)
+// CHECK: [[X:%.+]] = load atomic i16, i16* [[X_ADDR:@.+]] monotonic
+// CHECK: br label %[[CONT:.+]]
+// CHECK: [[CONT]]
+// CHECK: [[EXPECTED:%.+]] = phi i16 [ [[X]], %{{.+}} ], [ [[OLD_X:%.+]], %[[CONT]] ]
+// CHECK: [[CONV:%.+]] = zext i16 [[EXPECTED]] to i32
+// CHECK: [[X_RVAL:%.+]] = sitofp i32 [[CONV]] to float
+// <Skip checks for complex calculations>
+// CHECK: [[X_RE_ADDR:%.+]] = getelementptr inbounds { float, float }, { float, float }* [[TEMP:%.+]], i32 0, i32 0
+// CHECK: [[X_RE:%.+]] = load float, float* [[X_RE_ADDR]]
+// CHECK: [[X_IM_ADDR:%.+]] = getelementptr inbounds { float, float }, { float, float }* [[TEMP]], i32 0, i32 1
+// CHECK: [[X_IM:%.+]] = load float, float* [[X_IM_ADDR]]
+// CHECK: [[DESIRED:%.+]] = fptoui float [[X_RE]] to i16
+// CHECK: [[RES:%.+]] = cmpxchg i16* [[X_ADDR]], i16 [[EXPECTED]], i16 [[DESIRED]] monotonic monotonic
+// CHECK: [[OLD_X]] = extractvalue { i16, i1 } [[RES]], 0
+// CHECK: [[SUCCESS_FAIL:%.+]] = extractvalue { i16, i1 } [[RES]], 1
+// CHECK: br i1 [[SUCCESS_FAIL]], label %[[EXIT:.+]], label %[[CONT]]
+// CHECK: [[EXIT]]
+// CHECK: store atomic i16 [[DESIRED]], i16* @{{.+}} monotonic
+#pragma omp atomic capture
+ usv = usx /= cfv;
+// CHECK: [[EXPR_RE:%.+]] = load double, double* getelementptr inbounds ({ double, double }, { double, double }* @{{.+}}, i32 0, i32 0)
+// CHECK: [[EXPR_IM:%.+]] = load double, double* getelementptr inbounds ({ double, double }, { double, double }* @{{.+}}, i32 0, i32 1)
+// CHECK: [[X:%.+]] = load atomic i64, i64* [[X_ADDR:@.+]] monotonic
+// CHECK: br label %[[CONT:.+]]
+// CHECK: [[CONT]]
+// CHECK: [[EXPECTED:%.+]] = phi i64 [ [[X]], %{{.+}} ], [ [[OLD_X:%.+]], %[[CONT]] ]
+// CHECK: [[X_RVAL:%.+]] = sitofp i64 [[EXPECTED]] to double
+// CHECK: [[ADD_RE:%.+]] = fadd double [[X_RVAL]], [[EXPR_RE]]
+// CHECK: [[ADD_IM:%.+]] = fadd double 0.000000e+00, [[EXPR_IM]]
+// CHECK: [[DESIRED:%.+]] = fptosi double [[ADD_RE]] to i64
+// CHECK: [[RES:%.+]] = cmpxchg i64* [[X_ADDR]], i64 [[EXPECTED]], i64 [[DESIRED]] monotonic monotonic
+// CHECK: [[OLD_X]] = extractvalue { i64, i1 } [[RES]], 0
+// CHECK: [[SUCCESS_FAIL:%.+]] = extractvalue { i64, i1 } [[RES]], 1
+// CHECK: br i1 [[SUCCESS_FAIL]], label %[[EXIT:.+]], label %[[CONT]]
+// CHECK: [[EXIT]]
+// CHECK: store atomic i64 [[EXPECTED]], i64* @{{.+}} monotonic
+#pragma omp atomic capture
+ {llv = llx; llx += cdv;}
+// CHECK: [[IDX:%.+]] = load i16, i16* @{{.+}}
+// CHECK: load i8, i8*
+// CHECK: [[VEC_ITEM_VAL:%.+]] = zext i1 %{{.+}} to i32
+// CHECK: [[I128VAL:%.+]] = load atomic i128, i128* bitcast (<4 x i32>* [[DEST:@.+]] to i128*) monotonic
+// CHECK: [[LD:%.+]] = bitcast i128 [[I128VAL]] to <4 x i32>
+// CHECK: br label %[[CONT:.+]]
+// CHECK: [[CONT]]
+// CHECK: [[OLD_VEC_VAL:%.+]] = phi <4 x i32> [ [[LD]], %{{.+}} ], [ [[FAILED_OLD_VAL:%.+]], %[[CONT]] ]
+// CHECK: store <4 x i32> [[OLD_VEC_VAL]], <4 x i32>* [[LDTEMP:%.+]],
+// CHECK: [[VEC_VAL:%.+]] = load <4 x i32>, <4 x i32>* [[LDTEMP]]
+// CHECK: [[ITEM:%.+]] = extractelement <4 x i32> [[VEC_VAL]], i16 [[IDX]]
+// CHECK: [[OR:%.+]] = or i32 [[ITEM]], [[VEC_ITEM_VAL]]
+// CHECK: [[VEC_VAL:%.+]] = load <4 x i32>, <4 x i32>* [[LDTEMP]]
+// CHECK: [[NEW_VEC_VAL:%.+]] = insertelement <4 x i32> [[VEC_VAL]], i32 [[OR]], i16 [[IDX]]
+// CHECK: store <4 x i32> [[NEW_VEC_VAL]], <4 x i32>* [[LDTEMP]]
+// CHECK: [[NEW_VEC_VAL:%.+]] = load <4 x i32>, <4 x i32>* [[LDTEMP]]
+// CHECK: [[OLD_I128:%.+]] = bitcast <4 x i32> [[OLD_VEC_VAL]] to i128
+// CHECK: [[NEW_I128:%.+]] = bitcast <4 x i32> [[NEW_VEC_VAL]] to i128
+// CHECK: [[RES:%.+]] = cmpxchg i128* bitcast (<4 x i32>* [[DEST]] to i128*), i128 [[OLD_I128]], i128 [[NEW_I128]] monotonic monotonic
+// CHECK: [[FAILED_I128_OLD_VAL:%.+]] = extractvalue { i128, i1 } [[RES]], 0
+// CHECK: [[FAIL_SUCCESS:%.+]] = extractvalue { i128, i1 } [[RES]], 1
+// CHECK: [[FAILED_OLD_VAL]] = bitcast i128 [[FAILED_I128_OLD_VAL]] to <4 x i32>
+// CHECK: br i1 [[FAIL_SUCCESS]], label %[[EXIT:.+]], label %[[CONT]]
+// CHECK: [[EXIT]]
+// CHECK: store atomic i32 [[OR]], i32* @{{.+}} monotonic
+#pragma omp atomic capture
+ {int4x[sv] |= bv; iv = int4x[sv];}
+// CHECK: [[EXPR:%.+]] = load x86_fp80, x86_fp80* @{{.+}}
+// CHECK: [[PREV_VALUE:%.+]] = load atomic i32, i32* bitcast (i8* getelementptr (i8, i8* bitcast (%struct.BitFields* @{{.+}} to i8*), i64 4) to i32*) monotonic
+// CHECK: br label %[[CONT:.+]]
+// CHECK: [[CONT]]
+// CHECK: [[OLD_BF_VALUE:%.+]] = phi i32 [ [[PREV_VALUE]], %[[EXIT]] ], [ [[FAILED_OLD_VAL:%.+]], %[[CONT]] ]
+// CHECK: store i32 [[OLD_BF_VALUE]], i32* [[TEMP:%.+]],
+// CHECK: [[A_LD:%.+]] = load i32, i32* [[TEMP]],
+// CHECK: [[A_SHL:%.+]] = shl i32 [[A_LD]], 1
+// CHECK: [[A_ASHR:%.+]] = ashr i32 [[A_SHL]], 1
+// CHECK: [[X_RVAL:%.+]] = sitofp i32 [[A_ASHR]] to x86_fp80
+// CHECK: [[SUB:%.+]] = fsub x86_fp80 [[X_RVAL]], [[EXPR]]
+// CHECK: [[CONV:%.+]] = fptosi x86_fp80 [[SUB]] to i32
+// CHECK: [[NEW_VAL:%.+]] = load i32, i32* [[TEMP]],
+// CHECK: [[BF_VALUE:%.+]] = and i32 [[CONV]], 2147483647
+// CHECK: [[BF_CLEAR:%.+]] = and i32 [[NEW_VAL]], -2147483648
+// CHECK: or i32 [[BF_CLEAR]], [[BF_VALUE]]
+// CHECK: store i32 %{{.+}}, i32* [[LDTEMP:%.+]]
+// CHECK: [[NEW_BF_VALUE:%.+]] = load i32, i32* [[LDTEMP]]
+// CHECK: [[RES:%.+]] = cmpxchg i32* bitcast (i8* getelementptr (i8, i8* bitcast (%struct.BitFields* @{{.+}} to i8*), i64 4) to i32*), i32 [[OLD_BF_VALUE]], i32 [[NEW_BF_VALUE]] monotonic monotonic
+// CHECK: [[FAILED_OLD_VAL]] = extractvalue { i32, i1 } [[RES]], 0
+// CHECK: [[FAIL_SUCCESS:%.+]] = extractvalue { i32, i1 } [[RES]], 1
+// CHECK: br i1 [[FAIL_SUCCESS]], label %[[EXIT:.+]], label %[[CONT]]
+// CHECK: [[EXIT]]
+// CHECK: store atomic i32 [[CONV]], i32* @{{.+}} monotonic
+#pragma omp atomic capture
+ iv = bfx.a = bfx.a - ldv;
+// CHECK: [[EXPR:%.+]] = load x86_fp80, x86_fp80* @{{.+}}
+// CHECK: [[BITCAST:%.+]] = bitcast i32* [[LDTEMP:%.+]] to i8*
+// CHECK: call void @__atomic_load(i64 4, i8* getelementptr (i8, i8* bitcast (%struct.BitFields_packed* @{{.+}} to i8*), i64 4), i8* [[BITCAST]], i32 0)
+// CHECK: [[PREV_VALUE:%.+]] = load i32, i32* [[LDTEMP]]
+// CHECK: br label %[[CONT:.+]]
+// CHECK: [[CONT]]
+// CHECK: [[OLD_BF_VALUE:%.+]] = phi i32 [ [[PREV_VALUE]], %[[EXIT]] ], [ [[FAILED_OLD_VAL:%.+]], %[[CONT]] ]
+// CHECK: store i32 [[OLD_BF_VALUE]], i32* [[TEMP:%.+]],
+// CHECK: [[A_LD:%.+]] = load i32, i32* [[TEMP]],
+// CHECK: [[A_SHL:%.+]] = shl i32 [[A_LD]], 1
+// CHECK: [[A_ASHR:%.+]] = ashr i32 [[A_SHL]], 1
+// CHECK: [[X_RVAL:%.+]] = sitofp i32 [[A_ASHR]] to x86_fp80
+// CHECK: [[MUL:%.+]] = fmul x86_fp80 [[X_RVAL]], [[EXPR]]
+// CHECK: [[CONV:%.+]] = fptosi x86_fp80 [[MUL]] to i32
+// CHECK: [[NEW_VAL:%.+]] = load i32, i32* [[TEMP]],
+// CHECK: [[BF_VALUE:%.+]] = and i32 [[CONV]], 2147483647
+// CHECK: [[BF_CLEAR:%.+]] = and i32 [[NEW_VAL]], -2147483648
+// CHECK: or i32 [[BF_CLEAR]], [[BF_VALUE]]
+// CHECK: store i32 %{{.+}}, i32* [[LDTEMP:%.+]]
+// CHECK: [[NEW_BF_VALUE:%.+]] = load i32, i32* [[LDTEMP]]
+// CHECK: store i32 [[OLD_BF_VALUE]], i32* [[TEMP_OLD_BF_ADDR:%.+]],
+// CHECK: store i32 [[NEW_BF_VALUE]], i32* [[TEMP_NEW_BF_ADDR:%.+]],
+// CHECK: [[BITCAST_TEMP_OLD_BF_ADDR:%.+]] = bitcast i32* [[TEMP_OLD_BF_ADDR]] to i8*
+// CHECK: [[BITCAST_TEMP_NEW_BF_ADDR:%.+]] = bitcast i32* [[TEMP_NEW_BF_ADDR]] to i8*
+// CHECK: [[FAIL_SUCCESS:%.+]] = call zeroext i1 @__atomic_compare_exchange(i64 4, i8* getelementptr (i8, i8* bitcast (%struct.BitFields_packed* @{{.+}} to i8*), i64 4), i8* [[BITCAST_TEMP_OLD_BF_ADDR]], i8* [[BITCAST_TEMP_NEW_BF_ADDR]], i32 0, i32 0)
+// CHECK: [[FAILED_OLD_VAL]] = load i32, i32* [[TEMP_OLD_BF_ADDR]]
+// CHECK: br i1 [[FAIL_SUCCESS]], label %[[EXIT:.+]], label %[[CONT]]
+// CHECK: [[EXIT]]
+// CHECK: store atomic i32 [[A_ASHR]], i32* @{{.+}} monotonic
+#pragma omp atomic capture
+ {iv = bfx_packed.a; bfx_packed.a *= ldv;}
+// CHECK: [[EXPR:%.+]] = load x86_fp80, x86_fp80* @{{.+}}
+// CHECK: [[PREV_VALUE:%.+]] = load atomic i32, i32* getelementptr inbounds (%struct.BitFields2, %struct.BitFields2* @{{.+}}, i32 0, i32 0) monotonic
+// CHECK: br label %[[CONT:.+]]
+// CHECK: [[CONT]]
+// CHECK: [[OLD_BF_VALUE:%.+]] = phi i32 [ [[PREV_VALUE]], %[[EXIT]] ], [ [[FAILED_OLD_VAL:%.+]], %[[CONT]] ]
+// CHECK: store i32 [[OLD_BF_VALUE]], i32* [[TEMP:%.+]],
+// CHECK: [[A_LD:%.+]] = load i32, i32* [[TEMP]],
+// CHECK: [[A_ASHR:%.+]] = ashr i32 [[A_LD]], 31
+// CHECK: [[X_RVAL:%.+]] = sitofp i32 [[A_ASHR]] to x86_fp80
+// CHECK: [[SUB:%.+]] = fsub x86_fp80 [[X_RVAL]], [[EXPR]]
+// CHECK: [[CONV:%.+]] = fptosi x86_fp80 [[SUB]] to i32
+// CHECK: [[NEW_VAL:%.+]] = load i32, i32* [[TEMP]],
+// CHECK: [[BF_AND:%.+]] = and i32 [[CONV]], 1
+// CHECK: [[BF_VALUE:%.+]] = shl i32 [[BF_AND]], 31
+// CHECK: [[BF_CLEAR:%.+]] = and i32 [[NEW_VAL]], 2147483647
+// CHECK: or i32 [[BF_CLEAR]], [[BF_VALUE]]
+// CHECK: store i32 %{{.+}}, i32* [[LDTEMP:%.+]]
+// CHECK: [[NEW_BF_VALUE:%.+]] = load i32, i32* [[LDTEMP]]
+// CHECK: [[RES:%.+]] = cmpxchg i32* getelementptr inbounds (%struct.BitFields2, %struct.BitFields2* @{{.+}}, i32 0, i32 0), i32 [[OLD_BF_VALUE]], i32 [[NEW_BF_VALUE]] monotonic monotonic
+// CHECK: [[FAILED_OLD_VAL]] = extractvalue { i32, i1 } [[RES]], 0
+// CHECK: [[FAIL_SUCCESS:%.+]] = extractvalue { i32, i1 } [[RES]], 1
+// CHECK: br i1 [[FAIL_SUCCESS]], label %[[EXIT:.+]], label %[[CONT]]
+// CHECK: [[EXIT]]
+// CHECK: store atomic i32 [[CONV]], i32* @{{.+}} monotonic
+#pragma omp atomic capture
+ {bfx2.a -= ldv; iv = bfx2.a;}
+// CHECK: [[EXPR:%.+]] = load x86_fp80, x86_fp80* @{{.+}}
+// CHECK: [[PREV_VALUE:%.+]] = load atomic i8, i8* getelementptr (i8, i8* bitcast (%struct.BitFields2_packed* @{{.+}} to i8*), i64 3) monotonic
+// CHECK: br label %[[CONT:.+]]
+// CHECK: [[CONT]]
+// CHECK: [[OLD_BF_VALUE:%.+]] = phi i8 [ [[PREV_VALUE]], %[[EXIT]] ], [ [[FAILED_OLD_VAL:%.+]], %[[CONT]] ]
+// CHECK: [[BITCAST:%.+]] = bitcast i32* %{{.+}} to i8*
+// CHECK: store i8 [[OLD_BF_VALUE]], i8* [[BITCAST]],
+// CHECK: [[A_LD:%.+]] = load i8, i8* [[BITCAST]],
+// CHECK: [[A_ASHR:%.+]] = ashr i8 [[A_LD]], 7
+// CHECK: [[CAST:%.+]] = sext i8 [[A_ASHR]] to i32
+// CHECK: [[X_RVAL:%.+]] = sitofp i32 [[CAST]] to x86_fp80
+// CHECK: [[DIV:%.+]] = fdiv x86_fp80 [[EXPR]], [[X_RVAL]]
+// CHECK: [[NEW_VAL:%.+]] = fptosi x86_fp80 [[DIV]] to i32
+// CHECK: [[TRUNC:%.+]] = trunc i32 [[NEW_VAL]] to i8
+// CHECK: [[BF_LD:%.+]] = load i8, i8* [[BITCAST]],
+// CHECK: [[BF_AND:%.+]] = and i8 [[TRUNC]], 1
+// CHECK: [[BF_VALUE:%.+]] = shl i8 [[BF_AND]], 7
+// CHECK: [[BF_CLEAR:%.+]] = and i8 %{{.+}}, 127
+// CHECK: or i8 [[BF_CLEAR]], [[BF_VALUE]]
+// CHECK: store i8 %{{.+}}, i8* [[LDTEMP:%.+]]
+// CHECK: [[NEW_BF_VALUE:%.+]] = load i8, i8* [[LDTEMP]]
+// CHECK: [[RES:%.+]] = cmpxchg i8* getelementptr (i8, i8* bitcast (%struct.BitFields2_packed* @{{.+}} to i8*), i64 3), i8 [[OLD_BF_VALUE]], i8 [[NEW_BF_VALUE]] monotonic monotonic
+// CHECK: [[FAILED_OLD_VAL]] = extractvalue { i8, i1 } [[RES]], 0
+// CHECK: [[FAIL_SUCCESS:%.+]] = extractvalue { i8, i1 } [[RES]], 1
+// CHECK: br i1 [[FAIL_SUCCESS]], label %[[EXIT:.+]], label %[[CONT]]
+// CHECK: [[EXIT]]
+// CHECK: store atomic i32 [[NEW_VAL]], i32* @{{.+}} monotonic
+#pragma omp atomic capture
+ iv = bfx2_packed.a = ldv / bfx2_packed.a;
+// CHECK: [[EXPR:%.+]] = load x86_fp80, x86_fp80* @{{.+}}
+// CHECK: [[PREV_VALUE:%.+]] = load atomic i32, i32* getelementptr inbounds (%struct.BitFields3, %struct.BitFields3* @{{.+}}, i32 0, i32 0) monotonic
+// CHECK: br label %[[CONT:.+]]
+// CHECK: [[CONT]]
+// CHECK: [[OLD_BF_VALUE:%.+]] = phi i32 [ [[PREV_VALUE]], %[[EXIT]] ], [ [[FAILED_OLD_VAL:%.+]], %[[CONT]] ]
+// CHECK: store i32 [[OLD_BF_VALUE]], i32* [[TEMP:%.+]],
+// CHECK: [[A_LD:%.+]] = load i32, i32* [[TEMP]],
+// CHECK: [[A_SHL:%.+]] = shl i32 [[A_LD]], 7
+// CHECK: [[A_ASHR:%.+]] = ashr i32 [[A_SHL]], 18
+// CHECK: [[X_RVAL:%.+]] = sitofp i32 [[A_ASHR]] to x86_fp80
+// CHECK: [[DIV:%.+]] = fdiv x86_fp80 [[X_RVAL]], [[EXPR]]
+// CHECK: [[NEW_VAL:%.+]] = fptosi x86_fp80 [[DIV]] to i32
+// CHECK: [[BF_LD:%.+]] = load i32, i32* [[TEMP]],
+// CHECK: [[BF_AND:%.+]] = and i32 [[NEW_VAL]], 16383
+// CHECK: [[BF_VALUE:%.+]] = shl i32 [[BF_AND]], 11
+// CHECK: [[BF_CLEAR:%.+]] = and i32 %{{.+}}, -33552385
+// CHECK: or i32 [[BF_CLEAR]], [[BF_VALUE]]
+// CHECK: store i32 %{{.+}}, i32* [[LDTEMP:%.+]]
+// CHECK: [[NEW_BF_VALUE:%.+]] = load i32, i32* [[LDTEMP]]
+// CHECK: [[RES:%.+]] = cmpxchg i32* getelementptr inbounds (%struct.BitFields3, %struct.BitFields3* @{{.+}}, i32 0, i32 0), i32 [[OLD_BF_VALUE]], i32 [[NEW_BF_VALUE]] monotonic monotonic
+// CHECK: [[FAILED_OLD_VAL]] = extractvalue { i32, i1 } [[RES]], 0
+// CHECK: [[FAIL_SUCCESS:%.+]] = extractvalue { i32, i1 } [[RES]], 1
+// CHECK: br i1 [[FAIL_SUCCESS]], label %[[EXIT:.+]], label %[[CONT]]
+// CHECK: [[EXIT]]
+// CHECK: store atomic i32 [[A_ASHR]], i32* @{{.+}} monotonic
+#pragma omp atomic capture
+ {iv = bfx3.a; bfx3.a /= ldv;}
+// CHECK: [[EXPR:%.+]] = load x86_fp80, x86_fp80* @{{.+}}
+// CHECK: [[LDTEMP:%.+]] = bitcast i32* %{{.+}} to i24*
+// CHECK: [[BITCAST:%.+]] = bitcast i24* %{{.+}} to i8*
+// CHECK: call void @__atomic_load(i64 3, i8* getelementptr (i8, i8* bitcast (%struct.BitFields3_packed* @{{.+}} to i8*), i64 1), i8* [[BITCAST]], i32 0)
+// CHECK: [[PREV_VALUE:%.+]] = load i24, i24* [[LDTEMP]]
+// CHECK: br label %[[CONT:.+]]
+// CHECK: [[CONT]]
+// CHECK: [[OLD_BF_VALUE:%.+]] = phi i24 [ [[PREV_VALUE]], %[[EXIT]] ], [ [[FAILED_OLD_VAL:%.+]], %[[CONT]] ]
+// CHECK: [[BITCAST:%.+]] = bitcast i32* %{{.+}} to i24*
+// CHECK: store i24 [[OLD_BF_VALUE]], i24* [[BITCAST]],
+// CHECK: [[A_LD:%.+]] = load i24, i24* [[BITCAST]],
+// CHECK: [[A_SHL:%.+]] = shl i24 [[A_LD]], 7
+// CHECK: [[A_ASHR:%.+]] = ashr i24 [[A_SHL]], 10
+// CHECK: [[CAST:%.+]] = sext i24 [[A_ASHR]] to i32
+// CHECK: [[X_RVAL:%.+]] = sitofp i32 [[CAST]] to x86_fp80
+// CHECK: [[ADD:%.+]] = fadd x86_fp80 [[X_RVAL]], [[EXPR]]
+// CHECK: [[NEW_VAL:%.+]] = fptosi x86_fp80 [[ADD]] to i32
+// CHECK: [[TRUNC:%.+]] = trunc i32 [[NEW_VAL]] to i24
+// CHECK: [[BF_LD:%.+]] = load i24, i24* [[BITCAST]],
+// CHECK: [[BF_AND:%.+]] = and i24 [[TRUNC]], 16383
+// CHECK: [[BF_VALUE:%.+]] = shl i24 [[BF_AND]], 3
+// CHECK: [[BF_CLEAR:%.+]] = and i24 [[BF_LD]], -131065
+// CHECK: or i24 [[BF_CLEAR]], [[BF_VALUE]]
+// CHECK: store i24 %{{.+}}, i24* [[LDTEMP:%.+]]
+// CHECK: [[NEW_BF_VALUE:%.+]] = load i24, i24* [[LDTEMP]]
+// CHECK: [[TEMP_OLD_BF_ADDR:%.+]] = bitcast i32* %{{.+}} to i24*
+// CHECK: store i24 [[OLD_BF_VALUE]], i24* [[TEMP_OLD_BF_ADDR]]
+// CHECK: [[TEMP_NEW_BF_ADDR:%.+]] = bitcast i32* %{{.+}} to i24*
+// CHECK: store i24 [[NEW_BF_VALUE]], i24* [[TEMP_NEW_BF_ADDR]]
+// CHECK: [[BITCAST_TEMP_OLD_BF_ADDR:%.+]] = bitcast i24* [[TEMP_OLD_BF_ADDR]] to i8*
+// CHECK: [[BITCAST_TEMP_NEW_BF_ADDR:%.+]] = bitcast i24* [[TEMP_NEW_BF_ADDR]] to i8*
+// CHECK: [[FAIL_SUCCESS:%.+]] = call zeroext i1 @__atomic_compare_exchange(i64 3, i8* getelementptr (i8, i8* bitcast (%struct.BitFields3_packed* @{{.+}} to i8*), i64 1), i8* [[BITCAST_TEMP_OLD_BF_ADDR]], i8* [[BITCAST_TEMP_NEW_BF_ADDR]], i32 0, i32 0)
+// CHECK: [[FAILED_OLD_VAL]] = load i24, i24* [[TEMP_OLD_BF_ADDR]]
+// CHECK: br i1 [[FAIL_SUCCESS]], label %[[EXIT:.+]], label %[[CONT]]
+// CHECK: [[EXIT]]
+// CHECK: store atomic i32 [[NEW_VAL]], i32* @{{.+}} monotonic
+#pragma omp atomic capture
+ {bfx3_packed.a += ldv; iv = bfx3_packed.a;}
+// CHECK: [[EXPR:%.+]] = load x86_fp80, x86_fp80* @{{.+}}
+// CHECK: [[PREV_VALUE:%.+]] = load atomic i64, i64* bitcast (%struct.BitFields4* @{{.+}} to i64*) monotonic
+// CHECK: br label %[[CONT:.+]]
+// CHECK: [[CONT]]
+// CHECK: [[OLD_BF_VALUE:%.+]] = phi i64 [ [[PREV_VALUE]], %[[EXIT]] ], [ [[FAILED_OLD_VAL:%.+]], %[[CONT]] ]
+// CHECK: store i64 [[OLD_BF_VALUE]], i64* [[TEMP:%.+]],
+// CHECK: [[A_LD:%.+]] = load i64, i64* [[TEMP]],
+// CHECK: [[A_SHL:%.+]] = shl i64 [[A_LD]], 47
+// CHECK: [[A_ASHR:%.+]] = ashr i64 [[A_SHL:%.+]], 63
+// CHECK: [[A_CAST:%.+]] = trunc i64 [[A_ASHR:%.+]] to i32
+// CHECK: [[X_RVAL:%.+]] = sitofp i32 [[CAST:%.+]] to x86_fp80
+// CHECK: [[MUL:%.+]] = fmul x86_fp80 [[X_RVAL]], [[EXPR]]
+// CHECK: [[NEW_VAL:%.+]] = fptosi x86_fp80 [[MUL]] to i32
+// CHECK: [[ZEXT:%.+]] = zext i32 [[NEW_VAL]] to i64
+// CHECK: [[BF_LD:%.+]] = load i64, i64* [[TEMP]],
+// CHECK: [[BF_AND:%.+]] = and i64 [[ZEXT]], 1
+// CHECK: [[BF_VALUE:%.+]] = shl i64 [[BF_AND]], 16
+// CHECK: [[BF_CLEAR:%.+]] = and i64 [[BF_LD]], -65537
+// CHECK: or i64 [[BF_CLEAR]], [[BF_VALUE]]
+// CHECK: store i64 %{{.+}}, i64* [[LDTEMP:%.+]]
+// CHECK: [[NEW_BF_VALUE:%.+]] = load i64, i64* [[LDTEMP]]
+// CHECK: [[RES:%.+]] = cmpxchg i64* bitcast (%struct.BitFields4* @{{.+}} to i64*), i64 [[OLD_BF_VALUE]], i64 [[NEW_BF_VALUE]] monotonic monotonic
+// CHECK: [[FAILED_OLD_VAL]] = extractvalue { i64, i1 } [[RES]], 0
+// CHECK: [[FAIL_SUCCESS:%.+]] = extractvalue { i64, i1 } [[RES]], 1
+// CHECK: br i1 [[FAIL_SUCCESS]], label %[[EXIT:.+]], label %[[CONT]]
+// CHECK: [[EXIT]]
+// CHECK: store atomic i32 [[NEW_VAL]], i32* @{{.+}} monotonic
+#pragma omp atomic capture
+ iv = bfx4.a = bfx4.a * ldv;
+// CHECK: [[EXPR:%.+]] = load x86_fp80, x86_fp80* @{{.+}}
+// CHECK: [[PREV_VALUE:%.+]] = load atomic i8, i8* getelementptr inbounds (%struct.BitFields4_packed, %struct.BitFields4_packed* @{{.+}}, i32 0, i32 0, i64 2) monotonic
+// CHECK: br label %[[CONT:.+]]
+// CHECK: [[CONT]]
+// CHECK: [[OLD_BF_VALUE:%.+]] = phi i8 [ [[PREV_VALUE]], %{{.+}} ], [ [[FAILED_OLD_VAL:%.+]], %[[CONT]] ]
+// CHECK: [[BITCAST:%.+]] = bitcast i32* %{{.+}} to i8*
+// CHECK: store i8 [[OLD_BF_VALUE]], i8* [[BITCAST]],
+// CHECK: [[A_LD:%.+]] = load i8, i8* [[BITCAST]],
+// CHECK: [[A_SHL:%.+]] = shl i8 [[A_LD]], 7
+// CHECK: [[A_ASHR:%.+]] = ashr i8 [[A_SHL:%.+]], 7
+// CHECK: [[CAST:%.+]] = sext i8 [[A_ASHR:%.+]] to i32
+// CHECK: [[CONV:%.+]] = sitofp i32 [[CAST]] to x86_fp80
+// CHECK: [[SUB: %.+]] = fsub x86_fp80 [[CONV]], [[EXPR]]
+// CHECK: [[CONV:%.+]] = fptosi x86_fp80 [[SUB:%.+]] to i32
+// CHECK: [[NEW_VAL:%.+]] = trunc i32 [[CONV]] to i8
+// CHECK: [[BF_LD:%.+]] = load i8, i8* [[BITCAST]],
+// CHECK: [[BF_VALUE:%.+]] = and i8 [[NEW_VAL]], 1
+// CHECK: [[BF_CLEAR:%.+]] = and i8 [[BF_LD]], -2
+// CHECK: or i8 [[BF_CLEAR]], [[BF_VALUE]]
+// CHECK: store i8 %{{.+}}, i8* [[LDTEMP:%.+]]
+// CHECK: [[NEW_BF_VALUE:%.+]] = load i8, i8* [[LDTEMP]]
+// CHECK: [[RES:%.+]] = cmpxchg i8* getelementptr inbounds (%struct.BitFields4_packed, %struct.BitFields4_packed* @{{.+}}, i32 0, i32 0, i64 2), i8 [[OLD_BF_VALUE]], i8 [[NEW_BF_VALUE]] monotonic monotonic
+// CHECK: [[FAILED_OLD_VAL]] = extractvalue { i8, i1 } [[RES]], 0
+// CHECK: [[FAIL_SUCCESS:%.+]] = extractvalue { i8, i1 } [[RES]], 1
+// CHECK: br i1 [[FAIL_SUCCESS]], label %[[EXIT:.+]], label %[[CONT]]
+// CHECK: [[EXIT]]
+// CHECK: store atomic i32 [[CAST]], i32* @{{.+}} monotonic
+#pragma omp atomic capture
+ {iv = bfx4_packed.a; bfx4_packed.a -= ldv;}
+// CHECK: [[EXPR:%.+]] = load x86_fp80, x86_fp80* @{{.+}}
+// CHECK: [[PREV_VALUE:%.+]] = load atomic i64, i64* bitcast (%struct.BitFields4* @{{.+}} to i64*) monotonic
+// CHECK: br label %[[CONT:.+]]
+// CHECK: [[CONT]]
+// CHECK: [[OLD_BF_VALUE:%.+]] = phi i64 [ [[PREV_VALUE]], %[[EXIT]] ], [ [[FAILED_OLD_VAL:%.+]], %[[CONT]] ]
+// CHECK: store i64 [[OLD_BF_VALUE]], i64* [[TEMP:%.+]],
+// CHECK: [[A_LD:%.+]] = load i64, i64* [[TEMP]],
+// CHECK: [[A_SHL:%.+]] = shl i64 [[A_LD]], 40
+// CHECK: [[A_ASHR:%.+]] = ashr i64 [[A_SHL:%.+]], 57
+// CHECK: [[CONV:%.+]] = sitofp i64 [[A_ASHR]] to x86_fp80
+// CHECK: [[DIV:%.+]] = fdiv x86_fp80 [[CONV]], [[EXPR]]
+// CHECK: [[CONV:%.+]] = fptosi x86_fp80 [[DIV]] to i64
+// CHECK: [[BF_LD:%.+]] = load i64, i64* [[TEMP]],
+// CHECK: [[BF_AND:%.+]] = and i64 [[CONV]], 127
+// CHECK: [[BF_VALUE:%.+]] = shl i64 [[BF_AND:%.+]], 17
+// CHECK: [[BF_CLEAR:%.+]] = and i64 [[BF_LD]], -16646145
+// CHECK: [[VAL:%.+]] = or i64 [[BF_CLEAR]], [[BF_VALUE]]
+// CHECK: store i64 [[VAL]], i64* [[LDTEMP:%.+]]
+// CHECK: [[NEW_BF_VALUE:%.+]] = load i64, i64* [[LDTEMP]]
+// CHECK: [[RES:%.+]] = cmpxchg i64* bitcast (%struct.BitFields4* @{{.+}} to i64*), i64 [[OLD_BF_VALUE]], i64 [[NEW_BF_VALUE]] monotonic monotonic
+// CHECK: [[FAILED_OLD_VAL]] = extractvalue { i64, i1 } [[RES]], 0
+// CHECK: [[FAIL_SUCCESS:%.+]] = extractvalue { i64, i1 } [[RES]], 1
+// CHECK: br i1 [[FAIL_SUCCESS]], label %[[EXIT:.+]], label %[[CONT]]
+// CHECK: [[EXIT]]
+// CHECK: [[NEW_VAL:%.+]] = trunc i64 [[CONV]] to i32
+// CHECK: store atomic i32 [[NEW_VAL]], i32* @{{.+}} monotonic
+#pragma omp atomic capture
+ {bfx4.b /= ldv; iv = bfx4.b;}
+// CHECK: [[EXPR:%.+]] = load x86_fp80, x86_fp80* @{{.+}}
+// CHECK: [[PREV_VALUE:%.+]] = load atomic i8, i8* getelementptr inbounds (%struct.BitFields4_packed, %struct.BitFields4_packed* @{{.+}}, i32 0, i32 0, i64 2) monotonic
+// CHECK: br label %[[CONT:.+]]
+// CHECK: [[CONT]]
+// CHECK: [[OLD_BF_VALUE:%.+]] = phi i8 [ [[PREV_VALUE]], %[[EXIT]] ], [ [[FAILED_OLD_VAL:%.+]], %[[CONT]] ]
+// CHECK: [[BITCAST:%.+]] = bitcast i64* %{{.+}} to i8*
+// CHECK: store i8 [[OLD_BF_VALUE]], i8* [[BITCAST]],
+// CHECK: [[A_LD:%.+]] = load i8, i8* [[BITCAST]],
+// CHECK: [[A_ASHR:%.+]] = ashr i8 [[A_LD]], 1
+// CHECK: [[CAST:%.+]] = sext i8 [[A_ASHR]] to i64
+// CHECK: [[CONV:%.+]] = sitofp i64 [[CAST]] to x86_fp80
+// CHECK: [[ADD:%.+]] = fadd x86_fp80 [[CONV]], [[EXPR]]
+// CHECK: [[NEW_VAL:%.+]] = fptosi x86_fp80 [[ADD]] to i64
+// CHECK: [[TRUNC:%.+]] = trunc i64 [[NEW_VAL]] to i8
+// CHECK: [[BF_LD:%.+]] = load i8, i8* [[BITCAST]],
+// CHECK: [[BF_AND:%.+]] = and i8 [[TRUNC]], 127
+// CHECK: [[BF_VALUE:%.+]] = shl i8 [[BF_AND]], 1
+// CHECK: [[BF_CLEAR:%.+]] = and i8 [[BF_LD]], 1
+// CHECK: or i8 [[BF_CLEAR]], [[BF_VALUE]]
+// CHECK: store i8 %{{.+}}, i8* [[LDTEMP:%.+]]
+// CHECK: [[NEW_BF_VALUE:%.+]] = load i8, i8* [[LDTEMP]]
+// CHECK: [[RES:%.+]] = cmpxchg i8* getelementptr inbounds (%struct.BitFields4_packed, %struct.BitFields4_packed* @{{.+}}, i32 0, i32 0, i64 2), i8 [[OLD_BF_VALUE]], i8 [[NEW_BF_VALUE]] monotonic monotonic
+// CHECK: [[FAILED_OLD_VAL]] = extractvalue { i8, i1 } [[RES]], 0
+// CHECK: [[FAIL_SUCCESS:%.+]] = extractvalue { i8, i1 } [[RES]], 1
+// CHECK: br i1 [[FAIL_SUCCESS]], label %[[EXIT:.+]], label %[[CONT]]
+// CHECK: [[EXIT]]
+// CHECK: [[NEW_VAL_I32:%.+]] = trunc i64 [[NEW_VAL]] to i32
+// CHECK: store atomic i32 [[NEW_VAL_I32]], i32* @{{.+}} monotonic
+#pragma omp atomic capture
+ iv = bfx4_packed.b += ldv;
+// CHECK: load i64, i64*
+// CHECK: [[EXPR:%.+]] = uitofp i64 %{{.+}} to float
+// CHECK: [[I64VAL:%.+]] = load atomic i64, i64* bitcast (<2 x float>* [[DEST:@.+]] to i64*) monotonic
+// CHECK: [[LD:%.+]] = bitcast i64 [[I64VAL]] to <2 x float>
+// CHECK: br label %[[CONT:.+]]
+// CHECK: [[CONT]]
+// CHECK: [[OLD_VEC_VAL:%.+]] = phi <2 x float> [ [[LD]], %{{.+}} ], [ [[FAILED_OLD_VAL:%.+]], %[[CONT]] ]
+// CHECK: store <2 x float> [[OLD_VEC_VAL]], <2 x float>* [[LDTEMP:%.+]],
+// CHECK: [[VEC_VAL:%.+]] = load <2 x float>, <2 x float>* [[LDTEMP]]
+// CHECK: [[X:%.+]] = extractelement <2 x float> [[VEC_VAL]], i64 0
+// CHECK: [[VEC_ITEM_VAL:%.+]] = fsub float [[EXPR]], [[X]]
+// CHECK: [[VEC_VAL:%.+]] = load <2 x float>, <2 x float>* [[LDTEMP]],
+// CHECK: [[NEW_VEC_VAL:%.+]] = insertelement <2 x float> [[VEC_VAL]], float [[VEC_ITEM_VAL]], i64 0
+// CHECK: store <2 x float> [[NEW_VEC_VAL]], <2 x float>* [[LDTEMP]]
+// CHECK: [[NEW_VEC_VAL:%.+]] = load <2 x float>, <2 x float>* [[LDTEMP]]
+// CHECK: [[OLD_I64:%.+]] = bitcast <2 x float> [[OLD_VEC_VAL]] to i64
+// CHECK: [[NEW_I64:%.+]] = bitcast <2 x float> [[NEW_VEC_VAL]] to i64
+// CHECK: [[RES:%.+]] = cmpxchg i64* bitcast (<2 x float>* [[DEST]] to i64*), i64 [[OLD_I64]], i64 [[NEW_I64]] monotonic monotonic
+// CHECK: [[FAILED_I64_OLD_VAL:%.+]] = extractvalue { i64, i1 } [[RES]], 0
+// CHECK: [[FAIL_SUCCESS:%.+]] = extractvalue { i64, i1 } [[RES]], 1
+// CHECK: [[FAILED_OLD_VAL]] = bitcast i64 [[FAILED_I64_OLD_VAL]] to <2 x float>
+// CHECK: br i1 [[FAIL_SUCCESS]], label %[[EXIT:.+]], label %[[CONT]]
+// CHECK: [[EXIT]]
+// CHECK: [[CAST:%.+]] = bitcast float [[X]] to i32
+// CHECK: store atomic i32 [[CAST]], i32* bitcast (float* @{{.+}} to i32*) monotonic
+#pragma omp atomic capture
+ {fv = float2x.x; float2x.x = ulv - float2x.x;}
+// CHECK: [[EXPR:%.+]] = load double, double* @{{.+}},
+// CHECK: [[OLD_VAL:%.+]] = call i32 @llvm.read_register.i32([[REG:metadata ![0-9]+]])
+// CHECK: [[X_RVAL:%.+]] = sitofp i32 [[OLD_VAL]] to double
+// CHECK: [[DIV:%.+]] = fdiv double [[EXPR]], [[X_RVAL]]
+// CHECK: [[NEW_VAL:%.+]] = fptosi double [[DIV]] to i32
+// CHECK: call void @llvm.write_register.i32([[REG]], i32 [[NEW_VAL]])
+// CHECK: store atomic i32 [[NEW_VAL]], i32* @{{.+}} seq_cst
+// CHECK: call{{.*}} @__kmpc_flush(
+#pragma omp atomic capture seq_cst
+ {rix = dv / rix; iv = rix;}
+// CHECK: [[OLD_VAL:%.+]] = load atomic i32, i32* @{{.+}} monotonic
+// CHECK: store atomic i32 5, i32* @{{.+}} monotonic
+// CHECK: call void @llvm.write_register.i32([[REG]], i32 [[OLD_VAL]])
+#pragma omp atomic capture
+ {rix = ix; ix = 5;}
+ return 0;
+}
+#endif
Index: test/OpenMP/atomic_messages.cpp
===================================================================
--- test/OpenMP/atomic_messages.cpp
+++ test/OpenMP/atomic_messages.cpp
@@ -672,7 +672,6 @@
#pragma omp atomic capture capture
b = a /= b;
- // expected-note@+1 {{in instantiation of function template specialization 'capture<int>' requested here}}
return capture<int>();
}
Index: test/OpenMP/atomic_codegen.cpp
===================================================================
--- test/OpenMP/atomic_codegen.cpp
+++ test/OpenMP/atomic_codegen.cpp
@@ -45,6 +45,23 @@
// CHECK: invoke void @_ZN2StD1Ev(%struct.St* [[TEMP_ST_ADDR]])
#pragma omp atomic
St().get() %= b;
+ // CHECK: invoke void @_ZN2StC1Ev(%struct.St* [[TEMP_ST_ADDR:%.+]])
+ // CHECK: [[SCALAR_ADDR:%.+]] = invoke dereferenceable(4) i32* @_ZN2St3getEv(%struct.St* [[TEMP_ST_ADDR]])
+ // CHECK: [[B_VAL:%.+]] = load i32, i32* @b
+ // CHECK: [[OLD_VAL:%.+]] = load atomic i32, i32* [[SCALAR_ADDR]] monotonic,
+ // CHECK: br label %[[OMP_UPDATE:.+]]
+ // CHECK: [[OMP_UPDATE]]
+ // CHECK: [[OLD_PHI_VAL:%.+]] = phi i32 [ [[OLD_VAL]], %{{.+}} ], [ [[NEW_OLD_VAL:%.+]], %[[OMP_UPDATE]] ]
+ // CHECK: [[NEW_VAL:%.+]] = srem i32 [[OLD_PHI_VAL]], [[B_VAL]]
+ // CHECK: [[RES:%.+]] = cmpxchg i32* [[SCALAR_ADDR]], i32 [[OLD_PHI_VAL]], i32 [[NEW_VAL]] monotonic monotonic
+ // CHECK: [[NEW_OLD_VAL]] = extractvalue { i32, i1 } [[RES]], 0
+ // CHECK: [[COND:%.+]] = extractvalue { i32, i1 } [[RES]], 1
+ // CHECK: br i1 [[COND]], label %[[OMP_DONE:.+]], label %[[OMP_UPDATE]]
+ // CHECK: [[OMP_DONE]]
+ // CHECK: store atomic i32 [[NEW_VAL]], i32* @a monotonic,
+ // CHECK: invoke void @_ZN2StD1Ev(%struct.St* [[TEMP_ST_ADDR]])
+#pragma omp atomic capture
+ a = St().get() %= b;
}
}
@@ -74,11 +91,20 @@
// TERM_DEBUG-NOT: __kmpc_global_thread_num
// TERM_DEBUG: atomicrmw add i32* @{{.+}}, i32 %{{.+}} monotonic, {{.*}}!dbg [[UPDATE_LOC:![0-9]+]]
a += foo();
+#pragma omp atomic capture
+ // TERM_DEBUG-NOT: __kmpc_global_thread_num
+ // TERM_DEBUG: invoke {{.*}}foo{{.*}}()
+ // TERM_DEBUG: unwind label %[[TERM_LPAD:.+]],
+ // TERM_DEBUG-NOT: __kmpc_global_thread_num
+ // TERM_DEBUG: [[OLD_VAL:%.+]] = atomicrmw add i32* @{{.+}}, i32 %{{.+}} monotonic, {{.*}}!dbg [[CAPTURE_LOC:![0-9]+]]
+ // TERM_DEBUG: store atomic i32 [[OLD_VAL]], i32* @b monotonic,
+ {b = a; a += foo(); }
}
// TERM_DEBUG: [[TERM_LPAD]]
// TERM_DEBUG: call void @__clang_call_terminate
// TERM_DEBUG: unreachable
}
-// TERM_DEBUG-DAG: [[READ_LOC]] = !MDLocation(line: [[@LINE-25]],
-// TERM_DEBUG-DAG: [[WRITE_LOC]] = !MDLocation(line: [[@LINE-20]],
-// TERM_DEBUG-DAG: [[UPDATE_LOC]] = !MDLocation(line: [[@LINE-14]],
+// TERM_DEBUG-DAG: [[READ_LOC]] = !MDLocation(line: [[@LINE-33]],
+// TERM_DEBUG-DAG: [[WRITE_LOC]] = !MDLocation(line: [[@LINE-28]],
+// TERM_DEBUG-DAG: [[UPDATE_LOC]] = !MDLocation(line: [[@LINE-22]],
+// TERM_DEBUG-DAG: [[CAPTURE_LOC]] = !MDLocation(line: [[@LINE-16]],
Index: lib/Sema/SemaOpenMP.cpp
===================================================================
--- lib/Sema/SemaOpenMP.cpp
+++ lib/Sema/SemaOpenMP.cpp
@@ -3426,7 +3426,7 @@
return true;
} else if (SemaRef.CurContext->isDependentContext())
E = X = UpdateExpr = nullptr;
- return false;
+ return ErrorFound != NoError;
}
bool OpenMPAtomicUpdateChecker::checkStatement(Stmt *S, unsigned DiagId,
@@ -3498,7 +3498,7 @@
return true;
} else if (SemaRef.CurContext->isDependentContext())
E = X = UpdateExpr = nullptr;
- if (E && X) {
+ if (ErrorFound == NoError && E && X) {
// Build an update expression of form 'OpaqueValueExpr(x) binop
// OpaqueValueExpr(expr)' or 'OpaqueValueExpr(expr) binop
// OpaqueValueExpr(x)' and then cast it to the type of the 'x' expression.
@@ -3517,7 +3517,7 @@
return true;
UpdateExpr = Update.get();
}
- return false;
+ return ErrorFound != NoError;
}
StmtResult Sema::ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses,
@@ -3831,7 +3831,7 @@
E = Checker.getExpr();
UE = Checker.getUpdateExpr();
IsXLHSInRHSPart = Checker.isXLHSInRHSPart();
- IsPostfixUpdate = Checker.isPostfixUpdate();
+ IsPostfixUpdate = true;
}
}
if (!IsUpdateExprFound) {
@@ -3861,7 +3861,7 @@
E = Checker.getExpr();
UE = Checker.getUpdateExpr();
IsXLHSInRHSPart = Checker.isXLHSInRHSPart();
- IsPostfixUpdate = Checker.isPostfixUpdate();
+ IsPostfixUpdate = false;
}
}
}
Index: lib/CodeGen/CodeGenFunction.h
===================================================================
--- lib/CodeGen/CodeGenFunction.h
+++ lib/CodeGen/CodeGenFunction.h
@@ -2060,7 +2060,9 @@
/// \param AO Atomic ordering of the generated atomic instructions.
/// \param CommonGen Code generator for complex expressions that cannot be
/// expressed through atomicrmw instruction.
- void EmitOMPAtomicSimpleUpdateExpr(
+ /// \returns <true, OldAtomicValue> if simple 'atomicrmw' instruction was
+ /// generated, <false, RValue::get(nullptr)> otherwise.
+ std::pair<bool, RValue> EmitOMPAtomicSimpleUpdateExpr(
LValue X, RValue E, BinaryOperatorKind BO, bool IsXLHSInRHSPart,
llvm::AtomicOrdering AO, SourceLocation Loc,
const llvm::function_ref<RValue(RValue)> &CommonGen);
Index: lib/CodeGen/CGStmtOpenMP.cpp
===================================================================
--- lib/CodeGen/CGStmtOpenMP.cpp
+++ lib/CodeGen/CGStmtOpenMP.cpp
@@ -1447,31 +1447,35 @@
}
}
+static void emitSimpleAtomicStore(CodeGenFunction &CGF, bool IsSeqCst,
+ LValue LVal, RValue RVal) {
+ if (LVal.isGlobalReg()) {
+ CGF.EmitStoreThroughGlobalRegLValue(RVal, LVal);
+ } else {
+ CGF.EmitAtomicStore(RVal, LVal, IsSeqCst ? llvm::SequentiallyConsistent
+ : llvm::Monotonic,
+ LVal.isVolatile(), /*IsInit=*/false);
+ }
+}
+
static void EmitOMPAtomicWriteExpr(CodeGenFunction &CGF, bool IsSeqCst,
const Expr *X, const Expr *E,
SourceLocation Loc) {
// x = expr;
assert(X->isLValue() && "X of 'omp atomic write' is not lvalue");
- LValue XLValue = CGF.EmitLValue(X);
- RValue ExprRValue = CGF.EmitAnyExpr(E);
- if (XLValue.isGlobalReg())
- CGF.EmitStoreThroughGlobalRegLValue(ExprRValue, XLValue);
- else
- CGF.EmitAtomicStore(ExprRValue, XLValue,
- IsSeqCst ? llvm::SequentiallyConsistent
- : llvm::Monotonic,
- XLValue.isVolatile(), /*IsInit=*/false);
+ emitSimpleAtomicStore(CGF, IsSeqCst, CGF.EmitLValue(X), CGF.EmitAnyExpr(E));
// OpenMP, 2.12.6, atomic Construct
// Any atomic construct with a seq_cst clause forces the atomically
// performed operation to include an implicit flush operation without a
// list.
if (IsSeqCst)
CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc);
}
-bool emitOMPAtomicRMW(CodeGenFunction &CGF, LValue X, RValue Update,
- BinaryOperatorKind BO, llvm::AtomicOrdering AO,
- bool IsXLHSInRHSPart) {
+std::pair<bool, RValue> emitOMPAtomicRMW(CodeGenFunction &CGF, LValue X,
+ RValue Update, BinaryOperatorKind BO,
+ llvm::AtomicOrdering AO,
+ bool IsXLHSInRHSPart) {
auto &Context = CGF.CGM.getContext();
// Allow atomicrmw only if 'x' and 'update' are integer values, lvalue for 'x'
// expression is simple and atomic is allowed for the given type for the
@@ -1483,16 +1487,16 @@
X.getAddress()->getType()->getPointerElementType())) ||
!Context.getTargetInfo().hasBuiltinAtomic(
Context.getTypeSize(X.getType()), Context.toBits(X.getAlignment())))
- return false;
+ return std::make_pair(false, RValue::get(nullptr));
llvm::AtomicRMWInst::BinOp RMWOp;
switch (BO) {
case BO_Add:
RMWOp = llvm::AtomicRMWInst::Add;
break;
case BO_Sub:
if (!IsXLHSInRHSPart)
- return false;
+ return std::make_pair(false, RValue::get(nullptr));
RMWOp = llvm::AtomicRMWInst::Sub;
break;
case BO_And:
@@ -1525,7 +1529,7 @@
case BO_Shr:
case BO_LAnd:
case BO_LOr:
- return false;
+ return std::make_pair(false, RValue::get(nullptr));
case BO_PtrMemD:
case BO_PtrMemI:
case BO_LE:
@@ -1552,11 +1556,11 @@
IC, X.getAddress()->getType()->getPointerElementType(),
X.getType()->hasSignedIntegerRepresentation());
}
- CGF.Builder.CreateAtomicRMW(RMWOp, X.getAddress(), UpdateVal, AO);
- return true;
+ auto *Res = CGF.Builder.CreateAtomicRMW(RMWOp, X.getAddress(), UpdateVal, AO);
+ return std::make_pair(true, RValue::get(Res));
}
-void CodeGenFunction::EmitOMPAtomicSimpleUpdateExpr(
+std::pair<bool, RValue> CodeGenFunction::EmitOMPAtomicSimpleUpdateExpr(
LValue X, RValue E, BinaryOperatorKind BO, bool IsXLHSInRHSPart,
llvm::AtomicOrdering AO, SourceLocation Loc,
const llvm::function_ref<RValue(RValue)> &CommonGen) {
@@ -1566,7 +1570,8 @@
// x--, --x -> xrval - 1;
// x = x binop expr; -> xrval binop expr
// x = expr Op x; - > expr binop xrval;
- if (!emitOMPAtomicRMW(*this, X, E, BO, AO, IsXLHSInRHSPart)) {
+ auto Res = emitOMPAtomicRMW(*this, X, E, BO, AO, IsXLHSInRHSPart);
+ if (!Res.first) {
if (X.isGlobalReg()) {
// Emit an update expression: 'xrval' binop 'expr' or 'expr' binop
// 'xrval'.
@@ -1576,6 +1581,7 @@
EmitAtomicUpdate(X, AO, CommonGen, X.getType().isVolatileQualified());
}
}
+ return Res;
}
static void EmitOMPAtomicUpdateExpr(CodeGenFunction &CGF, bool IsSeqCst,
@@ -1605,8 +1611,93 @@
CodeGenFunction::OpaqueValueMapping MapX(CGF, XRValExpr, XRValue);
return CGF.EmitAnyExpr(UE);
};
- CGF.EmitOMPAtomicSimpleUpdateExpr(XLValue, ExprRValue, BOUE->getOpcode(),
- IsXLHSInRHSPart, AO, Loc, Gen);
+ (void)CGF.EmitOMPAtomicSimpleUpdateExpr(
+ XLValue, ExprRValue, BOUE->getOpcode(), IsXLHSInRHSPart, AO, Loc, Gen);
+ // OpenMP, 2.12.6, atomic Construct
+ // Any atomic construct with a seq_cst clause forces the atomically
+ // performed operation to include an implicit flush operation without a
+ // list.
+ if (IsSeqCst)
+ CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc);
+}
+
+static RValue convertToType(CodeGenFunction &CGF, RValue Value,
+ QualType SourceType, QualType ResType) {
+ switch (CGF.getEvaluationKind(ResType)) {
+ case TEK_Scalar:
+ return RValue::get(convertToScalarValue(CGF, Value, SourceType, ResType));
+ case TEK_Complex: {
+ auto Res = convertToComplexValue(CGF, Value, SourceType, ResType);
+ return RValue::getComplex(Res.first, Res.second);
+ }
+ case TEK_Aggregate:
+ break;
+ }
+ llvm_unreachable("Must be a scalar or complex.");
+}
+
+static void EmitOMPAtomicCaptureExpr(CodeGenFunction &CGF, bool IsSeqCst,
+ bool IsPostfixUpdate, const Expr *V,
+ const Expr *X, const Expr *E,
+ const Expr *UE, bool IsXLHSInRHSPart,
+ SourceLocation Loc) {
+ assert(X->isLValue() && "X of 'omp atomic capture' is not lvalue");
+ assert(V->isLValue() && "V of 'omp atomic capture' is not lvalue");
+ RValue NewVVal;
+ LValue VLValue = CGF.EmitLValue(V);
+ LValue XLValue = CGF.EmitLValue(X);
+ RValue ExprRValue = CGF.EmitAnyExpr(E);
+ auto AO = IsSeqCst ? llvm::SequentiallyConsistent : llvm::Monotonic;
+ QualType NewVValType;
+ if (UE) {
+ // 'x' is updated with some additional value.
+ assert(isa<BinaryOperator>(UE->IgnoreImpCasts()) &&
+ "Update expr in 'atomic capture' must be a binary operator.");
+ auto *BOUE = cast<BinaryOperator>(UE->IgnoreImpCasts());
+ // Update expressions are allowed to have the following forms:
+ // x binop= expr; -> xrval + expr;
+ // x++, ++x -> xrval + 1;
+ // x--, --x -> xrval - 1;
+ // x = x binop expr; -> xrval binop expr
+ // x = expr Op x; - > expr binop xrval;
+ auto *LHS = cast<OpaqueValueExpr>(BOUE->getLHS()->IgnoreImpCasts());
+ auto *RHS = cast<OpaqueValueExpr>(BOUE->getRHS()->IgnoreImpCasts());
+ auto *XRValExpr = IsXLHSInRHSPart ? LHS : RHS;
+ NewVValType = XRValExpr->getType();
+ auto *ERValExpr = IsXLHSInRHSPart ? RHS : LHS;
+ auto Gen = [&CGF, &NewVVal, UE, ExprRValue, XRValExpr, ERValExpr, IsSeqCst,
+ IsPostfixUpdate](RValue XRValue) -> RValue {
+ CodeGenFunction::OpaqueValueMapping MapExpr(CGF, ERValExpr, ExprRValue);
+ CodeGenFunction::OpaqueValueMapping MapX(CGF, XRValExpr, XRValue);
+ RValue Res = CGF.EmitAnyExpr(UE);
+ NewVVal = IsPostfixUpdate ? XRValue : Res;
+ return Res;
+ };
+ auto Res = CGF.EmitOMPAtomicSimpleUpdateExpr(
+ XLValue, ExprRValue, BOUE->getOpcode(), IsXLHSInRHSPart, AO, Loc, Gen);
+ if (Res.first) {
+ // 'atomicrmw' instaruction was generated.
+ if (IsPostfixUpdate) {
+ // Use old value from 'atomicrmw'.
+ NewVVal = Res.second;
+ } else {
+ // 'atomicrmw' does not provide new value, so read it from 'x'.
+ NewVVal = CGF.EmitAtomicLoad(XLValue, Loc, AO, XLValue.isVolatile());
+ }
+ }
+ } else {
+ // 'x' is simply rewritten with some 'expr'.
+ // Load previous value of 'x'.
+ NewVVal = CGF.EmitAtomicLoad(XLValue, Loc, AO, XLValue.isVolatile());
+ emitSimpleAtomicStore(
+ CGF, IsSeqCst, XLValue,
+ convertToType(CGF, ExprRValue, E->getType(), XLValue.getType()));
+ NewVValType = XLValue.getType().getNonReferenceType();
+ }
+ // Emit post-update atomic store to 'v' by old/new 'x' value.
+ emitSimpleAtomicStore(
+ CGF, IsSeqCst, VLValue,
+ convertToType(CGF, NewVVal, NewVValType, VLValue.getType()));
// OpenMP, 2.12.6, atomic Construct
// Any atomic construct with a seq_cst clause forces the atomically
// performed operation to include an implicit flush operation without a
@@ -1616,9 +1707,10 @@
}
static void EmitOMPAtomicExpr(CodeGenFunction &CGF, OpenMPClauseKind Kind,
- bool IsSeqCst, const Expr *X, const Expr *V,
- const Expr *E, const Expr *UE,
- bool IsXLHSInRHSPart, SourceLocation Loc) {
+ bool IsSeqCst, bool IsPostfixUpdate,
+ const Expr *X, const Expr *V, const Expr *E,
+ const Expr *UE, bool IsXLHSInRHSPart,
+ SourceLocation Loc) {
switch (Kind) {
case OMPC_read:
EmitOMPAtomicReadExpr(CGF, IsSeqCst, X, V, Loc);
@@ -1631,7 +1723,9 @@
EmitOMPAtomicUpdateExpr(CGF, IsSeqCst, X, E, UE, IsXLHSInRHSPart, Loc);
break;
case OMPC_capture:
- llvm_unreachable("CodeGen for 'omp atomic clause' is not supported yet.");
+ EmitOMPAtomicCaptureExpr(CGF, IsSeqCst, IsPostfixUpdate, V, X, E, UE,
+ IsXLHSInRHSPart, Loc);
+ break;
case OMPC_if:
case OMPC_final:
case OMPC_num_threads:
@@ -1673,13 +1767,23 @@
const auto *CS =
S.getAssociatedStmt()->IgnoreContainers(/*IgnoreCaptured=*/true);
- if (const auto *EWC = dyn_cast<ExprWithCleanups>(CS))
+ if (const auto *EWC = dyn_cast<ExprWithCleanups>(CS)) {
enterFullExpression(EWC);
+ }
+ // Processing for statements under 'atomic capture'.
+ if (const auto *Compound = dyn_cast<CompoundStmt>(CS)) {
+ for (const auto *C : Compound->body()) {
+ if (const auto *EWC = dyn_cast<ExprWithCleanups>(C)) {
+ enterFullExpression(EWC);
+ }
+ }
+ }
LexicalScope Scope(*this, S.getSourceRange());
auto &&CodeGen = [&S, Kind, IsSeqCst](CodeGenFunction &CGF) {
- EmitOMPAtomicExpr(CGF, Kind, IsSeqCst, S.getX(), S.getV(), S.getExpr(),
- S.getUpdateExpr(), S.isXLHSInRHSPart(), S.getLocStart());
+ EmitOMPAtomicExpr(CGF, Kind, IsSeqCst, S.isPostfixUpdate(), S.getX(),
+ S.getV(), S.getExpr(), S.getUpdateExpr(),
+ S.isXLHSInRHSPart(), S.getLocStart());
};
CGM.getOpenMPRuntime().emitInlinedDirective(*this, CodeGen);
}
_______________________________________________
cfe-commits mailing list
[email protected]
http://lists.cs.uiuc.edu/mailman/listinfo/cfe-commits