https://github.com/Lancern updated https://github.com/llvm/llvm-project/pull/173781
>From eb27751ec1a4f418249d20132a1beffd43ad9b45 Mon Sep 17 00:00:00 2001 From: Sirui Mu <[email protected]> Date: Sun, 28 Dec 2025 23:21:34 +0800 Subject: [PATCH] [CIR] Scoped atomic exchange This patch adds support for for scoped atomic exchange operations in CIR. --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 5 +- clang/lib/CIR/CodeGen/CIRGenAtomic.cpp | 7 ++- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 11 +++-- clang/test/CIR/CodeGen/atomic-scoped.c | 32 +++++++++++++ clang/test/CIR/CodeGen/atomic.c | 46 +++++++++---------- clang/test/CIR/IR/atomic.cir | 24 +++++----- 6 files changed, 83 insertions(+), 42 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 20b85dd877d00..906bd247f60ef 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -6433,11 +6433,14 @@ def CIR_AtomicXchgOp : CIR_Op<"atomic.xchg", [ Arg<CIR_PointerType, "", [MemRead, MemWrite]>:$ptr, CIR_AnyType:$val, Arg<CIR_MemOrder, "memory order">:$mem_order, + CIR_SyncScopeKind:$sync_scope, UnitAttr:$is_volatile ); let assemblyFormat = [{ - $mem_order (`volatile` $is_volatile^)? + $mem_order + `syncscope` `(` $sync_scope `)` + (`volatile` $is_volatile^)? $ptr `,` $val `:` functional-type(operands, results) attr-dict }]; diff --git a/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp b/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp index 963f99a3276a9..a78b15511dd82 100644 --- a/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp @@ -470,6 +470,8 @@ static void emitAtomicOp(CIRGenFunction &cgf, AtomicExpr *expr, Address dest, case AtomicExpr::AO__c11_atomic_exchange: case AtomicExpr::AO__atomic_exchange_n: case AtomicExpr::AO__atomic_exchange: + case AtomicExpr::AO__scoped_atomic_exchange_n: + case AtomicExpr::AO__scoped_atomic_exchange: opName = cir::AtomicXchgOp::getOperationName(); break; @@ -589,8 +591,6 @@ static void emitAtomicOp(CIRGenFunction &cgf, AtomicExpr *expr, Address dest, case AtomicExpr::AO__hip_atomic_exchange: case AtomicExpr::AO__opencl_atomic_exchange: - case AtomicExpr::AO__scoped_atomic_exchange_n: - case AtomicExpr::AO__scoped_atomic_exchange: case AtomicExpr::AO__scoped_atomic_add_fetch: @@ -657,6 +657,7 @@ static void emitAtomicOp(CIRGenFunction &cgf, AtomicExpr *expr, Address dest, if (fetchAttr) rmwOp->setAttr("binop", fetchAttr); rmwOp->setAttr("mem_order", orderAttr); + rmwOp->setAttr("sync_scope", scopeAttr); if (expr->isVolatile()) rmwOp->setAttr("is_volatile", builder.getUnitAttr()); if (fetchFirst && opName == cir::AtomicFetchOp::getOperationName()) @@ -885,6 +886,7 @@ RValue CIRGenFunction::emitAtomicExpr(AtomicExpr *e) { break; case AtomicExpr::AO__atomic_exchange: + case AtomicExpr::AO__scoped_atomic_exchange: val1 = emitPointerWithAlignment(e->getVal1()); dest = emitPointerWithAlignment(e->getVal2()); break; @@ -945,6 +947,7 @@ RValue CIRGenFunction::emitAtomicExpr(AtomicExpr *e) { case AtomicExpr::AO__c11_atomic_exchange: case AtomicExpr::AO__c11_atomic_store: case AtomicExpr::AO__scoped_atomic_store_n: + case AtomicExpr::AO__scoped_atomic_exchange_n: val1 = emitValToTemp(*this, e->getVal1()); break; } diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index d0db3c9b94c2d..6ac32cade4576 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -812,12 +812,14 @@ getLLVMMemOrder(std::optional<cir::MemOrder> memorder) { llvm_unreachable("unknown memory order"); } +static llvm::StringRef getLLVMSyncScope(cir::SyncScopeKind syncScope) { + return syncScope == cir::SyncScopeKind::SingleThread ? "singlethread" : ""; +} + static std::optional<llvm::StringRef> getLLVMSyncScope(std::optional<cir::SyncScopeKind> syncScope) { if (syncScope.has_value()) - return syncScope.value() == cir::SyncScopeKind::SingleThread - ? "singlethread" - : ""; + return getLLVMSyncScope(*syncScope); return std::nullopt; } @@ -851,9 +853,10 @@ mlir::LogicalResult CIRToLLVMAtomicXchgOpLowering::matchAndRewrite( mlir::ConversionPatternRewriter &rewriter) const { assert(!cir::MissingFeatures::atomicSyncScopeID()); mlir::LLVM::AtomicOrdering llvmOrder = getLLVMMemOrder(adaptor.getMemOrder()); + llvm::StringRef llvmSyncScope = getLLVMSyncScope(adaptor.getSyncScope()); rewriter.replaceOpWithNewOp<mlir::LLVM::AtomicRMWOp>( op, mlir::LLVM::AtomicBinOp::xchg, adaptor.getPtr(), adaptor.getVal(), - llvmOrder); + llvmOrder, llvmSyncScope); return mlir::success(); } diff --git a/clang/test/CIR/CodeGen/atomic-scoped.c b/clang/test/CIR/CodeGen/atomic-scoped.c index 4a81288c5bbd6..d34b95b9a305a 100644 --- a/clang/test/CIR/CodeGen/atomic-scoped.c +++ b/clang/test/CIR/CodeGen/atomic-scoped.c @@ -80,3 +80,35 @@ void scoped_atomic_store_n(int *ptr, int value) { // LLVM: store atomic i32 %{{.+}}, ptr %{{.+}} monotonic, align 4 // OGCG: store atomic i32 %{{.+}}, ptr %{{.+}} monotonic, align 4 } + +void scoped_atomic_exchange(int *ptr, int *value, int *old) { + // CIR-LABEL: @scoped_atomic_exchange + // LLVM-LABEL: @scoped_atomic_exchange + // OGCG-LABEL: @scoped_atomic_exchange + + __scoped_atomic_exchange(ptr, value, old, __ATOMIC_RELAXED, __MEMORY_SCOPE_SINGLE); + // CIR: %{{.+}} = cir.atomic.xchg relaxed syncscope(single_thread) %{{.+}}, %{{.+}} : (!cir.ptr<!s32i>, !s32i) -> !s32i + // LLVM: %{{.+}} = atomicrmw xchg ptr %{{.+}}, i32 %{{.+}} monotonic, align 4 + // OGCG: %{{.+}} = atomicrmw xchg ptr %{{.+}}, i32 %{{.+}} monotonic, align 4 + + __scoped_atomic_exchange(ptr, value, old, __ATOMIC_RELAXED, __MEMORY_SCOPE_SYSTEM); + // CIR: %{{.+}} = cir.atomic.xchg relaxed syncscope(system) %{{.+}}, %{{.+}} : (!cir.ptr<!s32i>, !s32i) -> !s32i + // LLVM: %{{.+}} = atomicrmw xchg ptr %{{.+}}, i32 %{{.+}} monotonic, align 4 + // OGCG: %{{.+}} = atomicrmw xchg ptr %{{.+}}, i32 %{{.+}} monotonic, align 4 +} + +void scoped_atomic_exchange_n(int *ptr, int value) { + // CIR-LABEL: @scoped_atomic_exchange_n + // LLVM-LABEL: @scoped_atomic_exchange_n + // OGCG-LABEL: @scoped_atomic_exchange_n + + __scoped_atomic_exchange_n(ptr, value, __ATOMIC_RELAXED, __MEMORY_SCOPE_SINGLE); + // CIR: %{{.+}} = cir.atomic.xchg relaxed syncscope(single_thread) %{{.+}}, %{{.+}} : (!cir.ptr<!s32i>, !s32i) -> !s32i + // LLVM: %{{.+}} = atomicrmw xchg ptr %{{.+}}, i32 %{{.+}} monotonic, align 4 + // OGCG: %{{.+}} = atomicrmw xchg ptr %{{.+}}, i32 %{{.+}} monotonic, align 4 + + __scoped_atomic_exchange_n(ptr, value, __ATOMIC_RELAXED, __MEMORY_SCOPE_SYSTEM); + // CIR: %{{.+}} = cir.atomic.xchg relaxed syncscope(system) %{{.+}}, %{{.+}} : (!cir.ptr<!s32i>, !s32i) -> !s32i + // LLVM: %{{.+}} = atomicrmw xchg ptr %{{.+}}, i32 %{{.+}} monotonic, align 4 + // OGCG: %{{.+}} = atomicrmw xchg ptr %{{.+}}, i32 %{{.+}} monotonic, align 4 +} diff --git a/clang/test/CIR/CodeGen/atomic.c b/clang/test/CIR/CodeGen/atomic.c index 6477c8b6e1b85..631ab6174c937 100644 --- a/clang/test/CIR/CodeGen/atomic.c +++ b/clang/test/CIR/CodeGen/atomic.c @@ -480,12 +480,12 @@ void c11_atomic_exchange(_Atomic(int) *ptr, int value) { __c11_atomic_exchange(ptr, value, __ATOMIC_RELEASE); __c11_atomic_exchange(ptr, value, __ATOMIC_ACQ_REL); __c11_atomic_exchange(ptr, value, __ATOMIC_SEQ_CST); - // CIR: %{{.+}} = cir.atomic.xchg relaxed %{{.+}}, %{{.+}} : (!cir.ptr<!s32i>, !s32i) -> !s32i - // CIR: %{{.+}} = cir.atomic.xchg acquire %{{.+}}, %{{.+}} : (!cir.ptr<!s32i>, !s32i) -> !s32i - // CIR: %{{.+}} = cir.atomic.xchg acquire %{{.+}}, %{{.+}} : (!cir.ptr<!s32i>, !s32i) -> !s32i - // CIR: %{{.+}} = cir.atomic.xchg release %{{.+}}, %{{.+}} : (!cir.ptr<!s32i>, !s32i) -> !s32i - // CIR: %{{.+}} = cir.atomic.xchg acq_rel %{{.+}}, %{{.+}} : (!cir.ptr<!s32i>, !s32i) -> !s32i - // CIR: %{{.+}} = cir.atomic.xchg seq_cst %{{.+}}, %{{.+}} : (!cir.ptr<!s32i>, !s32i) -> !s32i + // CIR: %{{.+}} = cir.atomic.xchg relaxed syncscope(system) %{{.+}}, %{{.+}} : (!cir.ptr<!s32i>, !s32i) -> !s32i + // CIR: %{{.+}} = cir.atomic.xchg acquire syncscope(system) %{{.+}}, %{{.+}} : (!cir.ptr<!s32i>, !s32i) -> !s32i + // CIR: %{{.+}} = cir.atomic.xchg acquire syncscope(system) %{{.+}}, %{{.+}} : (!cir.ptr<!s32i>, !s32i) -> !s32i + // CIR: %{{.+}} = cir.atomic.xchg release syncscope(system) %{{.+}}, %{{.+}} : (!cir.ptr<!s32i>, !s32i) -> !s32i + // CIR: %{{.+}} = cir.atomic.xchg acq_rel syncscope(system) %{{.+}}, %{{.+}} : (!cir.ptr<!s32i>, !s32i) -> !s32i + // CIR: %{{.+}} = cir.atomic.xchg seq_cst syncscope(system) %{{.+}}, %{{.+}} : (!cir.ptr<!s32i>, !s32i) -> !s32i // LLVM: %{{.+}} = atomicrmw xchg ptr %{{.+}}, i32 %{{.+}} monotonic, align 4 // LLVM: %{{.+}} = atomicrmw xchg ptr %{{.+}}, i32 %{{.+}} acquire, align 4 @@ -513,12 +513,12 @@ void atomic_exchange(int *ptr, int *value, int *old) { __atomic_exchange(ptr, value, old, __ATOMIC_RELEASE); __atomic_exchange(ptr, value, old, __ATOMIC_ACQ_REL); __atomic_exchange(ptr, value, old, __ATOMIC_SEQ_CST); - // CIR: %{{.+}} = cir.atomic.xchg relaxed %{{.+}}, %{{.+}} : (!cir.ptr<!s32i>, !s32i) -> !s32i - // CIR: %{{.+}} = cir.atomic.xchg acquire %{{.+}}, %{{.+}} : (!cir.ptr<!s32i>, !s32i) -> !s32i - // CIR: %{{.+}} = cir.atomic.xchg acquire %{{.+}}, %{{.+}} : (!cir.ptr<!s32i>, !s32i) -> !s32i - // CIR: %{{.+}} = cir.atomic.xchg release %{{.+}}, %{{.+}} : (!cir.ptr<!s32i>, !s32i) -> !s32i - // CIR: %{{.+}} = cir.atomic.xchg acq_rel %{{.+}}, %{{.+}} : (!cir.ptr<!s32i>, !s32i) -> !s32i - // CIR: %{{.+}} = cir.atomic.xchg seq_cst %{{.+}}, %{{.+}} : (!cir.ptr<!s32i>, !s32i) -> !s32i + // CIR: %{{.+}} = cir.atomic.xchg relaxed syncscope(system) %{{.+}}, %{{.+}} : (!cir.ptr<!s32i>, !s32i) -> !s32i + // CIR: %{{.+}} = cir.atomic.xchg acquire syncscope(system) %{{.+}}, %{{.+}} : (!cir.ptr<!s32i>, !s32i) -> !s32i + // CIR: %{{.+}} = cir.atomic.xchg acquire syncscope(system) %{{.+}}, %{{.+}} : (!cir.ptr<!s32i>, !s32i) -> !s32i + // CIR: %{{.+}} = cir.atomic.xchg release syncscope(system) %{{.+}}, %{{.+}} : (!cir.ptr<!s32i>, !s32i) -> !s32i + // CIR: %{{.+}} = cir.atomic.xchg acq_rel syncscope(system) %{{.+}}, %{{.+}} : (!cir.ptr<!s32i>, !s32i) -> !s32i + // CIR: %{{.+}} = cir.atomic.xchg seq_cst syncscope(system) %{{.+}}, %{{.+}} : (!cir.ptr<!s32i>, !s32i) -> !s32i // LLVM: %{{.+}} = atomicrmw xchg ptr %{{.+}}, i32 %{{.+}} monotonic, align 4 // LLVM: %{{.+}} = atomicrmw xchg ptr %{{.+}}, i32 %{{.+}} acquire, align 4 @@ -546,12 +546,12 @@ void atomic_exchange_n(int *ptr, int value) { __atomic_exchange_n(ptr, value, __ATOMIC_RELEASE); __atomic_exchange_n(ptr, value, __ATOMIC_ACQ_REL); __atomic_exchange_n(ptr, value, __ATOMIC_SEQ_CST); - // CIR: %{{.+}} = cir.atomic.xchg relaxed %{{.+}}, %{{.+}} : (!cir.ptr<!s32i>, !s32i) -> !s32i - // CIR: %{{.+}} = cir.atomic.xchg acquire %{{.+}}, %{{.+}} : (!cir.ptr<!s32i>, !s32i) -> !s32i - // CIR: %{{.+}} = cir.atomic.xchg acquire %{{.+}}, %{{.+}} : (!cir.ptr<!s32i>, !s32i) -> !s32i - // CIR: %{{.+}} = cir.atomic.xchg release %{{.+}}, %{{.+}} : (!cir.ptr<!s32i>, !s32i) -> !s32i - // CIR: %{{.+}} = cir.atomic.xchg acq_rel %{{.+}}, %{{.+}} : (!cir.ptr<!s32i>, !s32i) -> !s32i - // CIR: %{{.+}} = cir.atomic.xchg seq_cst %{{.+}}, %{{.+}} : (!cir.ptr<!s32i>, !s32i) -> !s32i + // CIR: %{{.+}} = cir.atomic.xchg relaxed syncscope(system) %{{.+}}, %{{.+}} : (!cir.ptr<!s32i>, !s32i) -> !s32i + // CIR: %{{.+}} = cir.atomic.xchg acquire syncscope(system) %{{.+}}, %{{.+}} : (!cir.ptr<!s32i>, !s32i) -> !s32i + // CIR: %{{.+}} = cir.atomic.xchg acquire syncscope(system) %{{.+}}, %{{.+}} : (!cir.ptr<!s32i>, !s32i) -> !s32i + // CIR: %{{.+}} = cir.atomic.xchg release syncscope(system) %{{.+}}, %{{.+}} : (!cir.ptr<!s32i>, !s32i) -> !s32i + // CIR: %{{.+}} = cir.atomic.xchg acq_rel syncscope(system) %{{.+}}, %{{.+}} : (!cir.ptr<!s32i>, !s32i) -> !s32i + // CIR: %{{.+}} = cir.atomic.xchg seq_cst syncscope(system) %{{.+}}, %{{.+}} : (!cir.ptr<!s32i>, !s32i) -> !s32i // LLVM: %{{.+}} = atomicrmw xchg ptr %{{.+}}, i32 %{{.+}} monotonic, align 4 // LLVM: %{{.+}} = atomicrmw xchg ptr %{{.+}}, i32 %{{.+}} acquire, align 4 @@ -2065,31 +2065,31 @@ int atomic_load_and_store_dynamic_order(int *ptr, int order) { // CIR: cir.switch(%[[ORDER]] : !s32i) { // CIR-NEXT: cir.case(default, []) { // CIR-NEXT: %[[LIT:.+]] = cir.load align(4) %{{.+}} : !cir.ptr<!s32i>, !s32i - // CIR-NEXT: %[[RES:.+]] = cir.atomic.xchg relaxed %[[PTR]], %[[LIT]] : (!cir.ptr<!s32i>, !s32i) -> !s32i + // CIR-NEXT: %[[RES:.+]] = cir.atomic.xchg relaxed syncscope(system) %[[PTR]], %[[LIT]] : (!cir.ptr<!s32i>, !s32i) -> !s32i // CIR-NEXT: cir.store align(4) %[[RES]], %[[RES_SLOT:.+]] : !s32i, !cir.ptr<!s32i> // CIR-NEXT: cir.break // CIR-NEXT: } // CIR-NEXT: cir.case(anyof, [#cir.int<1> : !s32i, #cir.int<2> : !s32i]) { // CIR-NEXT: %[[LIT:.+]] = cir.load align(4) %{{.+}} : !cir.ptr<!s32i>, !s32i - // CIR-NEXT: %[[RES:.+]] = cir.atomic.xchg acquire %[[PTR]], %[[LIT]] : (!cir.ptr<!s32i>, !s32i) -> !s32i + // CIR-NEXT: %[[RES:.+]] = cir.atomic.xchg acquire syncscope(system) %[[PTR]], %[[LIT]] : (!cir.ptr<!s32i>, !s32i) -> !s32i // CIR-NEXT: cir.store align(4) %[[RES]], %[[RES_SLOT]] : !s32i, !cir.ptr<!s32i> // CIR-NEXT: cir.break // CIR-NEXT: } // CIR-NEXT: cir.case(anyof, [#cir.int<3> : !s32i]) { // CIR-NEXT: %[[LIT:.+]] = cir.load align(4) %{{.+}} : !cir.ptr<!s32i>, !s32i - // CIR-NEXT: %[[RES:.+]] = cir.atomic.xchg release %[[PTR]], %[[LIT]] : (!cir.ptr<!s32i>, !s32i) -> !s32i + // CIR-NEXT: %[[RES:.+]] = cir.atomic.xchg release syncscope(system) %[[PTR]], %[[LIT]] : (!cir.ptr<!s32i>, !s32i) -> !s32i // CIR-NEXT: cir.store align(4) %[[RES]], %[[RES_SLOT]] : !s32i, !cir.ptr<!s32i> // CIR-NEXT: cir.break // CIR-NEXT: } // CIR-NEXT: cir.case(anyof, [#cir.int<4> : !s32i]) { // CIR-NEXT: %[[LIT:.+]] = cir.load align(4) %{{.+}} : !cir.ptr<!s32i>, !s32i - // CIR-NEXT: %[[RES:.+]] = cir.atomic.xchg acq_rel %[[PTR]], %[[LIT]] : (!cir.ptr<!s32i>, !s32i) -> !s32i + // CIR-NEXT: %[[RES:.+]] = cir.atomic.xchg acq_rel syncscope(system) %[[PTR]], %[[LIT]] : (!cir.ptr<!s32i>, !s32i) -> !s32i // CIR-NEXT: cir.store align(4) %[[RES]], %[[RES_SLOT]] : !s32i, !cir.ptr<!s32i> // CIR-NEXT: cir.break // CIR-NEXT: } // CIR-NEXT: cir.case(anyof, [#cir.int<5> : !s32i]) { // CIR-NEXT: %[[LIT:.+]] = cir.load align(4) %{{.+}} : !cir.ptr<!s32i>, !s32i - // CIR-NEXT: %[[RES:.+]] = cir.atomic.xchg seq_cst %[[PTR]], %[[LIT]] : (!cir.ptr<!s32i>, !s32i) -> !s32i + // CIR-NEXT: %[[RES:.+]] = cir.atomic.xchg seq_cst syncscope(system) %[[PTR]], %[[LIT]] : (!cir.ptr<!s32i>, !s32i) -> !s32i // CIR-NEXT: cir.store align(4) %[[RES]], %[[RES_SLOT]] : !s32i, !cir.ptr<!s32i> // CIR-NEXT: cir.break // CIR-NEXT: } diff --git a/clang/test/CIR/IR/atomic.cir b/clang/test/CIR/IR/atomic.cir index 790297ff99f47..c58cf472bb5f0 100644 --- a/clang/test/CIR/IR/atomic.cir +++ b/clang/test/CIR/IR/atomic.cir @@ -5,18 +5,18 @@ cir.func @atomic_xchg(%ptr: !cir.ptr<!s32i>, %val: !s32i) { // CHECK-LABEL: @atomic_xchg - %0 = cir.atomic.xchg relaxed %ptr, %val : (!cir.ptr<!s32i>, !s32i) -> !s32i - // CHECK: cir.atomic.xchg relaxed %{{.+}}, %{{.+}} : (!cir.ptr<!s32i>, !s32i) -> !s32i - %1 = cir.atomic.xchg consume %ptr, %val : (!cir.ptr<!s32i>, !s32i) -> !s32i - // CHECK: cir.atomic.xchg consume %{{.+}}, %{{.+}} : (!cir.ptr<!s32i>, !s32i) -> !s32i - %2 = cir.atomic.xchg acquire %ptr, %val : (!cir.ptr<!s32i>, !s32i) -> !s32i - // CHECK: cir.atomic.xchg acquire %{{.+}}, %{{.+}} : (!cir.ptr<!s32i>, !s32i) -> !s32i - %3 = cir.atomic.xchg release %ptr, %val : (!cir.ptr<!s32i>, !s32i) -> !s32i - // CHECK: cir.atomic.xchg release %{{.+}}, %{{.+}} : (!cir.ptr<!s32i>, !s32i) -> !s32i - %4 = cir.atomic.xchg acq_rel %ptr, %val : (!cir.ptr<!s32i>, !s32i) -> !s32i - // CHECK: cir.atomic.xchg acq_rel %{{.+}}, %{{.+}} : (!cir.ptr<!s32i>, !s32i) -> !s32i - %5 = cir.atomic.xchg seq_cst %ptr, %val : (!cir.ptr<!s32i>, !s32i) -> !s32i - // CHECK: cir.atomic.xchg seq_cst %{{.+}}, %{{.+}} : (!cir.ptr<!s32i>, !s32i) -> !s32i + %0 = cir.atomic.xchg relaxed syncscope(system) %ptr, %val : (!cir.ptr<!s32i>, !s32i) -> !s32i + // CHECK: cir.atomic.xchg relaxed syncscope(system) %{{.+}}, %{{.+}} : (!cir.ptr<!s32i>, !s32i) -> !s32i + %1 = cir.atomic.xchg consume syncscope(system) %ptr, %val : (!cir.ptr<!s32i>, !s32i) -> !s32i + // CHECK: cir.atomic.xchg consume syncscope(system) %{{.+}}, %{{.+}} : (!cir.ptr<!s32i>, !s32i) -> !s32i + %2 = cir.atomic.xchg acquire syncscope(system) %ptr, %val : (!cir.ptr<!s32i>, !s32i) -> !s32i + // CHECK: cir.atomic.xchg acquire syncscope(system) %{{.+}}, %{{.+}} : (!cir.ptr<!s32i>, !s32i) -> !s32i + %3 = cir.atomic.xchg release syncscope(system) %ptr, %val : (!cir.ptr<!s32i>, !s32i) -> !s32i + // CHECK: cir.atomic.xchg release syncscope(system) %{{.+}}, %{{.+}} : (!cir.ptr<!s32i>, !s32i) -> !s32i + %4 = cir.atomic.xchg acq_rel syncscope(system) %ptr, %val : (!cir.ptr<!s32i>, !s32i) -> !s32i + // CHECK: cir.atomic.xchg acq_rel syncscope(system) %{{.+}}, %{{.+}} : (!cir.ptr<!s32i>, !s32i) -> !s32i + %5 = cir.atomic.xchg seq_cst syncscope(system) %ptr, %val : (!cir.ptr<!s32i>, !s32i) -> !s32i + // CHECK: cir.atomic.xchg seq_cst syncscope(system) %{{.+}}, %{{.+}} : (!cir.ptr<!s32i>, !s32i) -> !s32i cir.return } _______________________________________________ cfe-commits mailing list [email protected] https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits
