yaxunl updated this revision to Diff 110904. yaxunl marked 2 inline comments as done. yaxunl added a comment.
Refactor to introduce classes AtomicScopeABI and AtomicScopeOpenCLABI for easy extension to other languages. https://reviews.llvm.org/D36580 Files: include/clang/AST/ASTContext.h include/clang/Basic/DiagnosticSemaKinds.td include/clang/Basic/SyncScope.h lib/AST/ASTContext.cpp lib/CodeGen/CGAtomic.cpp lib/Frontend/InitPreprocessor.cpp lib/Sema/SemaChecking.cpp test/CodeGenOpenCL/atomic-ops-libcall.cl test/CodeGenOpenCL/atomic-ops.cl test/SemaOpenCL/atomic-ops.cl
Index: test/SemaOpenCL/atomic-ops.cl =================================================================== --- test/SemaOpenCL/atomic-ops.cl +++ test/SemaOpenCL/atomic-ops.cl @@ -14,7 +14,6 @@ int8 i64; atomic_int gn; - void f(atomic_int *i, const atomic_int *ci, atomic_intptr_t *p, atomic_float *d, int *I, const int *CI, @@ -81,6 +80,13 @@ } void memory_checks(atomic_int *Ap, int *p, int val) { + // non-integer memory order argument is casted to integer type. + (void)__opencl_atomic_load(Ap, 1.0f, memory_scope_work_group); + float forder; + (void)__opencl_atomic_load(Ap, forder, memory_scope_work_group); + struct S s; + (void)__opencl_atomic_load(Ap, s, memory_scope_work_group); // expected-error {{passing 'struct S' to parameter of incompatible type 'int'}} + (void)__opencl_atomic_load(Ap, memory_order_relaxed, memory_scope_work_group); (void)__opencl_atomic_load(Ap, memory_order_acquire, memory_scope_work_group); (void)__opencl_atomic_load(Ap, memory_order_consume, memory_scope_work_group); // expected-error {{use of undeclared identifier 'memory_order_consume'}} @@ -151,8 +157,15 @@ (void)__opencl_atomic_load(Ap, memory_order_relaxed, memory_scope_device); (void)__opencl_atomic_load(Ap, memory_order_relaxed, memory_scope_all_svm_devices); (void)__opencl_atomic_load(Ap, memory_order_relaxed, memory_scope_sub_group); - (void)__opencl_atomic_load(Ap, memory_order_relaxed, scope); // expected-error{{non-constant synchronization scope argument to atomic operation is not supported}} + (void)__opencl_atomic_load(Ap, memory_order_relaxed, scope); (void)__opencl_atomic_load(Ap, memory_order_relaxed, 10); //expected-error{{synchronization scope argument to atomic operation is invalid}} + + // non-integer memory scope is casted to integer type. + float fscope; + (void)__opencl_atomic_load(Ap, memory_order_relaxed, 1.0f); + (void)__opencl_atomic_load(Ap, memory_order_relaxed, fscope); + struct S s; + (void)__opencl_atomic_load(Ap, memory_order_relaxed, s); //expected-error{{passing 'struct S' to parameter of incompatible type 'int'}} } void nullPointerWarning(atomic_int *Ap, int *p, int val) { Index: test/CodeGenOpenCL/atomic-ops.cl =================================================================== --- test/CodeGenOpenCL/atomic-ops.cl +++ test/CodeGenOpenCL/atomic-ops.cl @@ -52,6 +52,81 @@ return __opencl_atomic_compare_exchange_strong(i, &cmp, 1, memory_order_acquire, memory_order_acquire, memory_scope_work_group); } +void fi5(atomic_int *i, int scope) { + // CHECK-LABEL: @fi5 + // CHECK: switch i32 %{{.*}}, label %opencl_allsvmdevices [ + // CHECK-NEXT: i32 1, label %opencl_workgroup + // CHECK-NEXT: i32 2, label %opencl_device + // CHECK-NEXT: i32 4, label %opencl_subgroup + // CHECK-NEXT: ] + // CHECK: opencl_workgroup: + // CHECK: load atomic i32, i32 addrspace(4)* %{{.*}} syncscope("workgroup") seq_cst + // CHECK: br label %atomic.scope.continue + // CHECK: opencl_device: + // CHECK: load atomic i32, i32 addrspace(4)* %{{.*}} syncscope("agent") seq_cst + // CHECK: br label %atomic.scope.continue + // CHECK: opencl_allsvmdevices: + // CHECK: load atomic i32, i32 addrspace(4)* %{{.*}} seq_cst, align 4 + // CHECK: br label %atomic.scope.continue + // CHECK: opencl_subgroup: + // CHECK: %5 = load atomic i32, i32 addrspace(4)* %0 syncscope("subgroup") seq_cst, align 4 + // CHECK: br label %atomic.scope.continue + // CHECK: atomic.scope.continue: + int x = __opencl_atomic_load(i, memory_order_seq_cst, scope); +} + +void fi6(atomic_int *i, int order, int scope) { + // CHECK-LABEL: @fi6 + // CHECK: switch i32 %{{.*}}, label %monotonic [ + // CHECK-NEXT: i32 1, label %acquire + // CHECK-NEXT: i32 2, label %acquire + // CHECK-NEXT: i32 5, label %seqcst + // CHECK-NEXT: ] + // CHECK: monotonic: + // CHECK: switch i32 %{{.*}}, label %[[MON_ALL:.*]] [ + // CHECK-NEXT: i32 1, label %[[MON_WG:.*]] + // CHECK-NEXT: i32 2, label %[[MON_DEV:.*]] + // CHECK-NEXT: i32 4, label %[[MON_SUB:.*]] + // CHECK-NEXT: ] + // CHECK: acquire: + // CHECK: switch i32 %{{.*}}, label %[[ACQ_ALL:.*]] [ + // CHECK-NEXT: i32 1, label %[[ACQ_WG:.*]] + // CHECK-NEXT: i32 2, label %[[ACQ_DEV:.*]] + // CHECK-NEXT: i32 4, label %[[ACQ_SUB:.*]] + // CHECK-NEXT: ] + // CHECK: seqcst: + // CHECK: switch i32 %2, label %[[SEQ_ALL:.*]] [ + // CHECK-NEXT: i32 1, label %[[SEQ_WG:.*]] + // CHECK-NEXT: i32 2, label %[[SEQ_DEV:.*]] + // CHECK-NEXT: i32 4, label %[[SEQ_SUB:.*]] + // CHECK-NEXT: ] + // CHECK: [[MON_WG]]: + // CHECK: load atomic i32, i32 addrspace(4)* %{{.*}} syncscope("workgroup") monotonic + // CHECK: [[MON_DEV]]: + // CHECK: load atomic i32, i32 addrspace(4)* %{{.*}} syncscope("agent") monotonic + // CHECK: [[MON_ALL]]: + // CHECK: load atomic i32, i32 addrspace(4)* %{{.*}} monotonic + // CHECK: [[MON_SUB]]: + // CHECK: load atomic i32, i32 addrspace(4)* %{{.*}} syncscope("subgroup") monotonic + // CHECK: [[ACQ_WG]]: + // CHECK: load atomic i32, i32 addrspace(4)* %{{.*}} syncscope("workgroup") acquire + // CHECK: [[ACQ_DEV]]: + // CHECK: load atomic i32, i32 addrspace(4)* %{{.*}} syncscope("agent") acquire + // CHECK: [[ACQ_ALL]]: + // CHECK: load atomic i32, i32 addrspace(4)* %{{.*}} acquire + // CHECK: [[ACQ_SUB]]: + // CHECK: load atomic i32, i32 addrspace(4)* %{{.*}} syncscope("subgroup") acquire + // CHECK: [[SEQ_WG]]: + // CHECK: load atomic i32, i32 addrspace(4)* %{{.*}} syncscope("workgroup") seq_cst + // CHECK: [[SEQ_DEV]]: + // CHECK: load atomic i32, i32 addrspace(4)* %{{.*}} syncscope("agent") seq_cst + // CHECK: [[SEQ_ALL]]: + // CHECK: load atomic i32, i32 addrspace(4)* %{{.*}} seq_cst + // CHECK: [[SEQ_SUB]]: + // CHECK: load atomic i32, i32 addrspace(4)* %{{.*}} syncscope("subgroup") seq_cst + int x = __opencl_atomic_load(i, order, scope); +} + float ff1(global atomic_float *d) { // CHECK-LABEL: @ff1 // CHECK: load atomic i32, i32 addrspace(1)* {{.*}} syncscope("workgroup") monotonic Index: test/CodeGenOpenCL/atomic-ops-libcall.cl =================================================================== --- test/CodeGenOpenCL/atomic-ops-libcall.cl +++ test/CodeGenOpenCL/atomic-ops-libcall.cl @@ -1,7 +1,7 @@ // RUN: %clang_cc1 < %s -cl-std=CL2.0 -finclude-default-header -triple spir64 -emit-llvm | FileCheck -check-prefix=SPIR %s // RUN: %clang_cc1 < %s -cl-std=CL2.0 -finclude-default-header -triple armv5e-none-linux-gnueabi -emit-llvm | FileCheck -check-prefix=ARM %s -void f(atomic_int *i, atomic_uint *ui, int cmp) { +void f(atomic_int *i, atomic_uint *ui, int cmp, int order, int scope) { int x; // SPIR: {{%[^ ]*}} = call i32 @__opencl_atomic_load_4(i8 addrspace(4)* {{%[0-9]+}}, i32 5, i32 1) // ARM: {{%[^ ]*}} = call i32 @__opencl_atomic_load_4(i8* {{%[0-9]+}}, i32 5, i32 1) @@ -34,4 +34,7 @@ // SPIR: {{%[^ ]*}} = call zeroext i1 @__opencl_atomic_compare_exchange_4(i8 addrspace(4)* {{%[0-9]+}}, i8 addrspace(4)* {{%[0-9]+}}, i32 {{%[0-9]+}}, i32 5, i32 5, i32 4) x = __opencl_atomic_compare_exchange_weak(i, &cmp, 1, memory_order_seq_cst, memory_order_seq_cst, memory_scope_sub_group); #endif + // SPIR: {{%[^ ]*}} = call zeroext i1 @__opencl_atomic_compare_exchange_4(i8 addrspace(4)* {{%[0-9]+}}, i8 addrspace(4)* {{%[0-9]+}}, i32 {{%[0-9]+}}, i32 %{{.*}}, i32 %{{.*}}, i32 %{{.*}}) + // ARM: {{%[^ ]*}} = call zeroext i1 @__opencl_atomic_compare_exchange_4(i8* {{%[0-9]+}}, i8* {{%[0-9]+}}, i32 {{%[0-9]+}}, i32 %{{.*}}, i32 %{{.*}}, i32 %{{.*}}) + x = __opencl_atomic_compare_exchange_weak(i, &cmp, 1, order, order, scope); } Index: lib/Sema/SemaChecking.cpp =================================================================== --- lib/Sema/SemaChecking.cpp +++ lib/Sema/SemaChecking.cpp @@ -3150,11 +3150,8 @@ if (IsOpenCL) { Scope = TheCall->getArg(TheCall->getNumArgs() - 1); llvm::APSInt Result(32); - if (!Scope->isIntegerConstantExpr(Result, Context)) - Diag(Scope->getLocStart(), - diag::err_atomic_op_has_non_constant_synch_scope) - << Scope->getSourceRange(); - else if (!isValidSyncScopeValue(Result.getZExtValue())) + if (Scope->isIntegerConstantExpr(Result, Context) && + !Context.getAtomicScopeABI().isValid(Result.getZExtValue())) Diag(Scope->getLocStart(), diag::err_atomic_op_has_invalid_synch_scope) << Scope->getSourceRange(); } else { Index: lib/Frontend/InitPreprocessor.cpp =================================================================== --- lib/Frontend/InitPreprocessor.cpp +++ lib/Frontend/InitPreprocessor.cpp @@ -579,10 +579,12 @@ // Define macros for the OpenCL memory scope. // The values should match clang SyncScope enum. - assert(static_cast<unsigned>(SyncScope::OpenCLWorkGroup) == 1 && - static_cast<unsigned>(SyncScope::OpenCLDevice) == 2 && - static_cast<unsigned>(SyncScope::OpenCLAllSVMDevices) == 3 && - static_cast<unsigned>(SyncScope::OpenCLSubGroup) == 4); + static_assert( + static_cast<unsigned>(AtomicScopeOpenCLABI::WorkGroup) == 1 && + static_cast<unsigned>(AtomicScopeOpenCLABI::Device) == 2 && + static_cast<unsigned>(AtomicScopeOpenCLABI::AllSVMDevices) == 3 && + static_cast<unsigned>(AtomicScopeOpenCLABI::SubGroup) == 4, + "Invalid OpenCL memory scope enum definition"); Builder.defineMacro("__OPENCL_MEMORY_SCOPE_WORK_ITEM", "0"); Builder.defineMacro("__OPENCL_MEMORY_SCOPE_WORK_GROUP", "1"); Builder.defineMacro("__OPENCL_MEMORY_SCOPE_DEVICE", "2"); Index: lib/CodeGen/CGAtomic.cpp =================================================================== --- lib/CodeGen/CGAtomic.cpp +++ lib/CodeGen/CGAtomic.cpp @@ -18,6 +18,7 @@ #include "TargetInfo.h" #include "clang/AST/ASTContext.h" #include "clang/CodeGen/CGFunctionInfo.h" +#include "llvm/ADT/DenseMap.h" #include "llvm/IR/DataLayout.h" #include "llvm/IR/Intrinsics.h" #include "llvm/IR/Operator.h" @@ -659,6 +660,51 @@ return DeclPtr; } +static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *Expr, Address Dest, + Address Ptr, Address Val1, Address Val2, + llvm::Value *IsWeak, llvm::Value *FailureOrder, + uint64_t Size, llvm::AtomicOrdering Order, + llvm::Value *Scope) { + auto &ScopeABI = CGF.getContext().getAtomicScopeABI(); + if (auto SC = dyn_cast<llvm::ConstantInt>(Scope)) { + auto SCID = CGF.getTargetHooks().getLLVMSyncScopeID( + ScopeABI.map(SC->getZExtValue()), CGF.CGM.getLLVMContext()); + EmitAtomicOp(CGF, Expr, Dest, Ptr, Val1, Val2, IsWeak, FailureOrder, Size, + Order, SCID); + return; + } + + // Handle non-constant scope. + auto &Builder = CGF.Builder; + auto Scopes = ScopeABI.getRuntimeValues(); + llvm::DenseMap<unsigned, llvm::BasicBlock *> BB; + for (auto S : Scopes) + BB[S] = CGF.createBasicBlock(getAsString(ScopeABI.map(S)), CGF.CurFn); + + llvm::BasicBlock *ContBB = + CGF.createBasicBlock("atomic.scope.continue", CGF.CurFn); + + auto *SC = Builder.CreateIntCast(Scope, Builder.getInt32Ty(), false); + // If unsupported synch scope is encountered at run time, assume a fallback + // synch scope value. + auto FallBack = ScopeABI.getFallBackValue(); + llvm::SwitchInst *SI = Builder.CreateSwitch(SC, BB[FallBack]); + for (auto S : Scopes) { + auto *B = BB[S]; + if (S != FallBack) + SI->addCase(Builder.getInt32(S), B); + + Builder.SetInsertPoint(B); + EmitAtomicOp(CGF, Expr, Dest, Ptr, Val1, Val2, IsWeak, FailureOrder, Size, + Order, + CGF.getTargetHooks().getLLVMSyncScopeID(ScopeABI.map(S), + CGF.getLLVMContext())); + Builder.CreateBr(ContBB); + } + + Builder.SetInsertPoint(ContBB); +} + static void AddDirectArgument(CodeGenFunction &CGF, CallArgList &Args, bool UseOptimizedLibcall, llvm::Value *Val, QualType ValTy, @@ -1132,44 +1178,38 @@ E->getOp() == AtomicExpr::AO__atomic_load || E->getOp() == AtomicExpr::AO__atomic_load_n; - assert(isa<llvm::ConstantInt>(Scope) && - "Non-constant synchronization scope not supported"); - auto SCID = getTargetHooks().getLLVMSyncScopeID( - static_cast<SyncScope>(cast<llvm::ConstantInt>(Scope)->getZExtValue()), - getLLVMContext()); - if (isa<llvm::ConstantInt>(Order)) { auto ord = cast<llvm::ConstantInt>(Order)->getZExtValue(); // We should not ever get to a case where the ordering isn't a valid C ABI // value, but it's hard to enforce that in general. if (llvm::isValidAtomicOrderingCABI(ord)) switch ((llvm::AtomicOrderingCABI)ord) { case llvm::AtomicOrderingCABI::relaxed: EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size, - llvm::AtomicOrdering::Monotonic, SCID); + llvm::AtomicOrdering::Monotonic, Scope); break; case llvm::AtomicOrderingCABI::consume: case llvm::AtomicOrderingCABI::acquire: if (IsStore) break; // Avoid crashing on code with undefined behavior EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size, - llvm::AtomicOrdering::Acquire, SCID); + llvm::AtomicOrdering::Acquire, Scope); break; case llvm::AtomicOrderingCABI::release: if (IsLoad) break; // Avoid crashing on code with undefined behavior EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size, - llvm::AtomicOrdering::Release, SCID); + llvm::AtomicOrdering::Release, Scope); break; case llvm::AtomicOrderingCABI::acq_rel: if (IsLoad || IsStore) break; // Avoid crashing on code with undefined behavior EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size, - llvm::AtomicOrdering::AcquireRelease, SCID); + llvm::AtomicOrdering::AcquireRelease, Scope); break; case llvm::AtomicOrderingCABI::seq_cst: EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size, - llvm::AtomicOrdering::SequentiallyConsistent, SCID); + llvm::AtomicOrdering::SequentiallyConsistent, Scope); break; } if (RValTy->isVoidType()) @@ -1206,12 +1246,12 @@ // Emit all the different atomics Builder.SetInsertPoint(MonotonicBB); EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size, - llvm::AtomicOrdering::Monotonic, SCID); + llvm::AtomicOrdering::Monotonic, Scope); Builder.CreateBr(ContBB); if (!IsStore) { Builder.SetInsertPoint(AcquireBB); EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size, - llvm::AtomicOrdering::Acquire, SCID); + llvm::AtomicOrdering::Acquire, Scope); Builder.CreateBr(ContBB); SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::consume), AcquireBB); @@ -1221,22 +1261,22 @@ if (!IsLoad) { Builder.SetInsertPoint(ReleaseBB); EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size, - llvm::AtomicOrdering::Release, SCID); + llvm::AtomicOrdering::Release, Scope); Builder.CreateBr(ContBB); SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::release), ReleaseBB); } if (!IsLoad && !IsStore) { Builder.SetInsertPoint(AcqRelBB); EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size, - llvm::AtomicOrdering::AcquireRelease, SCID); + llvm::AtomicOrdering::AcquireRelease, Scope); Builder.CreateBr(ContBB); SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::acq_rel), AcqRelBB); } Builder.SetInsertPoint(SeqCstBB); EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size, - llvm::AtomicOrdering::SequentiallyConsistent, SCID); + llvm::AtomicOrdering::SequentiallyConsistent, Scope); Builder.CreateBr(ContBB); SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::seq_cst), SeqCstBB); Index: lib/AST/ASTContext.cpp =================================================================== --- lib/AST/ASTContext.cpp +++ lib/AST/ASTContext.cpp @@ -757,6 +757,7 @@ Listener(nullptr), Comments(SM), CommentsLoaded(false), CommentCommandTraits(BumpAlloc, LOpts.CommentOpts), LastSDM(nullptr, 0) { TUDecl = TranslationUnitDecl::Create(*this); + AtomicScope = AtomicScopeABI::create(LangOpts); } ASTContext::~ASTContext() { Index: include/clang/Basic/SyncScope.h =================================================================== --- include/clang/Basic/SyncScope.h +++ include/clang/Basic/SyncScope.h @@ -15,24 +15,131 @@ #ifndef LLVM_CLANG_BASIC_SYNCSCOPE_H #define LLVM_CLANG_BASIC_SYNCSCOPE_H +#include "clang/Basic/LangOptions.h" +#include "llvm/ADT/ArrayRef.h" +#include "llvm/ADT/StringRef.h" +#include <memory> + namespace clang { -/// \brief Defines the synch scope values used by the atomic builtins and -/// expressions. +/// \brief Defines synch scope values used internally by clang. +/// +/// The enum values start from 0 and are contiguous. They are mainly used for +/// enumerating all supported synch scope values and mapping them to LLVM +/// synch scopes. Their numerical values may be different from the corresponding +/// synch scope enums used in source languages. +/// +/// In atomic builtin and expressions, language-specific synch scope enums are +/// used. Currently only OpenCL memory scope enums are supported and assumed +/// to be used by all languages. However, in the future, other languages may +/// define their own set of synch scope enums. The language-specific synch scope +/// values can be mapped to internal synch scope values by mapLanguageSyncScope +/// in a language-dependent way. +/// +/// To add a new enum value: +/// Add the enum value to enum class SyncScope. +/// Update enum value Last if necessary. +/// Update getAsString. /// -/// The enum values should match the pre-defined macros -/// __OPENCL_MEMORY_SCOPE_*, which are used to define memory_scope_* -/// enums in opencl-c.h. enum class SyncScope { - OpenCLWorkGroup = 1, - OpenCLDevice = 2, - OpenCLAllSVMDevices = 3, - OpenCLSubGroup = 4, + OpenCLWorkGroup, + OpenCLDevice, + OpenCLAllSVMDevices, + OpenCLSubGroup, + Last = OpenCLSubGroup +}; + +inline llvm::StringRef getAsString(SyncScope S) { + switch (S) { + case SyncScope::OpenCLWorkGroup: + return "opencl_workgroup"; + case SyncScope::OpenCLDevice: + return "opencl_device"; + case SyncScope::OpenCLAllSVMDevices: + return "opencl_allsvmdevices"; + case SyncScope::OpenCLSubGroup: + return "opencl_subgroup"; + } + llvm_unreachable("Invalid synch scope"); +} + +/// \brief Defines the interface for synch scope ABI. +class AtomicScopeABI { +public: + virtual ~AtomicScopeABI() {} + /// \brief Maps language specific synch scope values to internal + /// SyncScope enum. + virtual SyncScope map(unsigned S) const = 0; + + /// \brief Check if the compile-time constant synch scope value + /// is valid. + virtual bool isValid(unsigned S) const = 0; + + /// \brief Get all possible synch scope values that might be + /// encountered at runtime for the current language. + virtual ArrayRef<unsigned> getRuntimeValues() const = 0; + + /// \brief If atomic builtin function is called with invalid + /// synch scope value at runtime, it will fall back to a valid + /// synch scope value returned by this function. + virtual unsigned getFallBackValue() const = 0; + + /// \brief Create a synch scope ABI by language. + static std::unique_ptr<AtomicScopeABI> create(LangOptions &Opt); +}; + +/// \brief Defines the synch scope ABI for OpenCL. +class AtomicScopeOpenCLABI : public AtomicScopeABI { +public: + /// The enum values match the pre-defined macros + /// __OPENCL_MEMORY_SCOPE_*, which are used to define memory_scope_* + /// enums in opencl-c.h. + enum ID { + WorkGroup = 1, + Device = 2, + AllSVMDevices = 3, + SubGroup = 4, + Last = SubGroup + }; + + AtomicScopeOpenCLABI() {} + + SyncScope map(unsigned S) const override { + switch (static_cast<ID>(S)) { + case WorkGroup: + return SyncScope::OpenCLWorkGroup; + case Device: + return SyncScope::OpenCLDevice; + case AllSVMDevices: + return SyncScope::OpenCLAllSVMDevices; + case SubGroup: + return SyncScope::OpenCLSubGroup; + } + llvm_unreachable("Invalid language synch scope value"); + } + + bool isValid(unsigned S) const override { + return S >= static_cast<unsigned>(WorkGroup) && + S <= static_cast<unsigned>(Last); + } + + ArrayRef<unsigned> getRuntimeValues() const override { + static_assert(Last == SubGroup, "Does not include all synch scopes"); + static unsigned Scopes[] = { + static_cast<unsigned>(WorkGroup), static_cast<unsigned>(Device), + static_cast<unsigned>(AllSVMDevices), static_cast<unsigned>(SubGroup)}; + return Scopes; + } + + unsigned getFallBackValue() const override { + return static_cast<unsigned>(AllSVMDevices); + } }; -inline bool isValidSyncScopeValue(unsigned Scope) { - return Scope >= static_cast<unsigned>(SyncScope::OpenCLWorkGroup) && - Scope <= static_cast<unsigned>(SyncScope::OpenCLSubGroup); +inline std::unique_ptr<AtomicScopeABI> +AtomicScopeABI::create(LangOptions &Opt) { + /// Currently it is assumed that all languges use OpenCL synch scope ABI. + return llvm::make_unique<AtomicScopeOpenCLABI>(); } } Index: include/clang/Basic/DiagnosticSemaKinds.td =================================================================== --- include/clang/Basic/DiagnosticSemaKinds.td +++ include/clang/Basic/DiagnosticSemaKinds.td @@ -7026,8 +7026,6 @@ InGroup<DiagGroup<"atomic-memory-ordering">>; def err_atomic_op_has_invalid_synch_scope : Error< "synchronization scope argument to atomic operation is invalid">; -def err_atomic_op_has_non_constant_synch_scope : Error< - "non-constant synchronization scope argument to atomic operation is not supported">; def err_overflow_builtin_must_be_int : Error< "operand argument to overflow builtin must be an integer (%0 invalid)">; Index: include/clang/AST/ASTContext.h =================================================================== --- include/clang/AST/ASTContext.h +++ include/clang/AST/ASTContext.h @@ -19,8 +19,8 @@ #include "clang/AST/CanonicalType.h" #include "clang/AST/CommentCommandTraits.h" #include "clang/AST/Decl.h" -#include "clang/AST/DeclarationName.h" #include "clang/AST/DeclBase.h" +#include "clang/AST/DeclarationName.h" #include "clang/AST/ExternalASTSource.h" #include "clang/AST/NestedNameSpecifier.h" #include "clang/AST/PrettyPrinter.h" @@ -30,32 +30,33 @@ #include "clang/AST/Type.h" #include "clang/Basic/AddressSpaces.h" #include "clang/Basic/IdentifierTable.h" +#include "clang/Basic/LLVM.h" #include "clang/Basic/LangOptions.h" #include "clang/Basic/Linkage.h" -#include "clang/Basic/LLVM.h" #include "clang/Basic/Module.h" #include "clang/Basic/OperatorKinds.h" #include "clang/Basic/PartialDiagnostic.h" #include "clang/Basic/SanitizerBlacklist.h" #include "clang/Basic/SourceLocation.h" #include "clang/Basic/Specifiers.h" +#include "clang/Basic/SyncScope.h" #include "clang/Basic/XRayLists.h" #include "llvm/ADT/APSInt.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/DenseMap.h" #include "llvm/ADT/FoldingSet.h" #include "llvm/ADT/IntrusiveRefCntPtr.h" -#include "llvm/ADT/iterator_range.h" #include "llvm/ADT/MapVector.h" #include "llvm/ADT/None.h" #include "llvm/ADT/Optional.h" #include "llvm/ADT/PointerIntPair.h" #include "llvm/ADT/PointerUnion.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/SmallVector.h" -#include "llvm/ADT/TinyPtrVector.h" #include "llvm/ADT/StringMap.h" #include "llvm/ADT/StringRef.h" +#include "llvm/ADT/TinyPtrVector.h" +#include "llvm/ADT/iterator_range.h" #include "llvm/Support/AlignOf.h" #include "llvm/Support/Allocator.h" #include "llvm/Support/Casting.h" @@ -493,6 +494,9 @@ std::unique_ptr<CXXABI> ABI; CXXABI *createCXXABI(const TargetInfo &T); + /// \brief Language-specific atomic synch scope ABI. + std::unique_ptr<AtomicScopeABI> AtomicScope; + /// \brief The logical -> physical address space map. const LangAS::Map *AddrSpaceMap; @@ -672,6 +676,8 @@ return FullSourceLoc(Loc,SourceMgr); } + AtomicScopeABI &getAtomicScopeABI() const { return *AtomicScope; } + /// \brief All comments in this translation unit. RawCommentList Comments;
_______________________________________________ cfe-commits mailing list cfe-commits@lists.llvm.org http://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits