================
@@ -258,6 +258,80 @@ void setCriticalLock(omp_lock_t *Lock) { setLock(Lock); }
#endif
///}
+#if defined(__SPIRV__)
+
+MemorySemantics_t convertOrderingType(atomic::OrderingTy Ordering) {
+ switch (Ordering) {
+ default:
+ __builtin_unreachable();
+ case atomic::relaxed:
+ return MemorySemantics_t::Relaxed;
+ case atomic::acquire:
+ return MemorySemantics_t::Acquire;
+ case atomic::release:
+ return MemorySemantics_t::Release;
+ case atomic::acq_rel:
+ return MemorySemantics_t::AcquireRelease;
+ case atomic::seq_cst:
+ return MemorySemantics_t::SequentiallyConsistent;
+ }
+}
+uint32_t atomicInc(uint32_t *Address, uint32_t Val, atomic::OrderingTy
Ordering,
+ atomic::MemScopeTy MemScope) {
+ return __spirv_AtomicIAdd(Address, (int)MemScope,
+ convertOrderingType(Ordering), Val);
+}
+
+void namedBarrierInit() {} // TODO
+void namedBarrier() {} // TODO
+void fenceTeam(atomic::OrderingTy Ordering) {
+ return __spirv_MemoryBarrier(Scope_t::Workgroup,
+ 0x100 | convertOrderingType(Ordering));
+}
+void fenceKernel(atomic::OrderingTy Ordering) {
+ return __spirv_MemoryBarrier(Scope_t::Device,
+ 0x200 | convertOrderingType(Ordering));
+}
+void fenceSystem(atomic::OrderingTy Ordering) {
+ return __spirv_MemoryBarrier(Scope_t::CrossDevice,
+ 0x200 | convertOrderingType(Ordering));
+}
+
+void syncWarp(__kmpc_impl_lanemask_t) {
+ __spirv_ControlBarrier(Scope_t::Subgroup, Scope_t::Subgroup,
+ 0x80 | MemorySemantics_t::SequentiallyConsistent);
----------------
jhuber6 wrote:
I'll look into making an intrinsic / builtin for this. It's usually safe to
ignore for now, it only really matters on NVPTX targets.
https://github.com/llvm/llvm-project/pull/174675
_______________________________________________
cfe-commits mailing list
[email protected]
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits