tra created this revision.
Herald added subscribers: mattd, bixia, yaxunl.
Herald added a project: All.
tra published this revision for review.
tra added reviewers: jlebar, nyalloc.
Herald added a project: clang.
Herald added a subscriber: cfe-commits.

For sm_80 NVCC introduced a handful of builtins with the names that deviate from
the historic __nvvm_/__nv naming convention. Clang/LLVM does provide equivalent
builtins, but using different names. This patch maps NVCC-style builtins to
their clang counterparts.


Repository:
  rG LLVM Github Monorepo

https://reviews.llvm.org/D150894

Files:
  clang/lib/Headers/__clang_cuda_intrinsics.h


Index: clang/lib/Headers/__clang_cuda_intrinsics.h
===================================================================
--- clang/lib/Headers/__clang_cuda_intrinsics.h
+++ clang/lib/Headers/__clang_cuda_intrinsics.h
@@ -512,6 +512,69 @@
 __device__ inline cuuint32_t __nvvm_get_smem_pointer(void *__ptr) {
   return __nv_cvta_generic_to_shared_impl(__ptr);
 }
+
+__device__ inline unsigned __reduce_add_sync_unsigned_impl(unsigned __mask,
+                                                           unsigned __value) {
+  return __nvvm_redux_sync_add(__mask, __value);
+}
+__device__ inline int __reduce_add_sync_signed_impl(unsigned __mask,
+                                                    int __value) {
+  return __nvvm_redux_sync_add(__mask, __value);
+}
+__device__ inline unsigned __reduce_min_sync_unsigned_impl(unsigned __mask,
+                                                           unsigned __value) {
+  return __nvvm_redux_sync_umin(__mask, __value);
+}
+__device__ inline unsigned __reduce_max_sync_unsigned_impl(unsigned __mask,
+                                                           unsigned __value) {
+  return __nvvm_redux_sync_umax(__mask, __value);
+}
+__device__ inline int __reduce_min_sync_signed_impl(unsigned __mask,
+                                                    int __value) {
+  return __nvvm_redux_sync_min(__mask, __value);
+}
+__device__ inline int __reduce_max_sync_signed_impl(unsigned __mask,
+                                                    int __value) {
+  return __nvvm_redux_sync_max(__mask, __value);
+}
+__device__ inline unsigned __reduce_or_sync_unsigned_impl(unsigned __mask,
+                                                          unsigned __value) {
+  return __nvvm_redux_sync_or(__mask, __value);
+}
+__device__ inline unsigned __reduce_and_sync_unsigned_impl(unsigned __mask,
+                                                           unsigned __value) {
+  return __nvvm_redux_sync_and(__mask, __value);
+}
+__device__ inline unsigned __reduce_xor_sync_unsigned_impl(unsigned __mask,
+                                                           unsigned __value) {
+  return __nvvm_redux_sync_xor(__mask, __value);
+}
+
+__device__ inline void
+__nv_memcpy_async_shared_global_4_impl(void *dst, const void *src,
+                                       unsigned src_size) {
+  __nvvm_cp_async_ca_shared_global_4(
+      (void __attribute__((address_space(3))) *)dst,
+      (const void __attribute__((address_space(1))) *)src,
+      src_size);
+}
+__device__ inline void
+__nv_memcpy_async_shared_global_8_impl(void *dst, const void *src,
+                                       unsigned src_size) {
+  __nvvm_cp_async_ca_shared_global_8(
+      (void __attribute__((address_space(3))) *)dst,
+      (const void __attribute__((address_space(1))) *)src,
+      src_size);
+}
+__device__ inline void
+__nv_memcpy_async_shared_global_16_impl(void *dst, const void *src,
+                                       unsigned src_size) {
+  __nvvm_cp_async_ca_shared_global_16(
+      (void __attribute__((address_space(3))) *)dst,
+      (const void __attribute__((address_space(1))) *)src,
+      src_size);
+}
+
 } // extern "C"
 #endif // CUDA_VERSION >= 11000
 


Index: clang/lib/Headers/__clang_cuda_intrinsics.h
===================================================================
--- clang/lib/Headers/__clang_cuda_intrinsics.h
+++ clang/lib/Headers/__clang_cuda_intrinsics.h
@@ -512,6 +512,69 @@
 __device__ inline cuuint32_t __nvvm_get_smem_pointer(void *__ptr) {
   return __nv_cvta_generic_to_shared_impl(__ptr);
 }
+
+__device__ inline unsigned __reduce_add_sync_unsigned_impl(unsigned __mask,
+                                                           unsigned __value) {
+  return __nvvm_redux_sync_add(__mask, __value);
+}
+__device__ inline int __reduce_add_sync_signed_impl(unsigned __mask,
+                                                    int __value) {
+  return __nvvm_redux_sync_add(__mask, __value);
+}
+__device__ inline unsigned __reduce_min_sync_unsigned_impl(unsigned __mask,
+                                                           unsigned __value) {
+  return __nvvm_redux_sync_umin(__mask, __value);
+}
+__device__ inline unsigned __reduce_max_sync_unsigned_impl(unsigned __mask,
+                                                           unsigned __value) {
+  return __nvvm_redux_sync_umax(__mask, __value);
+}
+__device__ inline int __reduce_min_sync_signed_impl(unsigned __mask,
+                                                    int __value) {
+  return __nvvm_redux_sync_min(__mask, __value);
+}
+__device__ inline int __reduce_max_sync_signed_impl(unsigned __mask,
+                                                    int __value) {
+  return __nvvm_redux_sync_max(__mask, __value);
+}
+__device__ inline unsigned __reduce_or_sync_unsigned_impl(unsigned __mask,
+                                                          unsigned __value) {
+  return __nvvm_redux_sync_or(__mask, __value);
+}
+__device__ inline unsigned __reduce_and_sync_unsigned_impl(unsigned __mask,
+                                                           unsigned __value) {
+  return __nvvm_redux_sync_and(__mask, __value);
+}
+__device__ inline unsigned __reduce_xor_sync_unsigned_impl(unsigned __mask,
+                                                           unsigned __value) {
+  return __nvvm_redux_sync_xor(__mask, __value);
+}
+
+__device__ inline void
+__nv_memcpy_async_shared_global_4_impl(void *dst, const void *src,
+                                       unsigned src_size) {
+  __nvvm_cp_async_ca_shared_global_4(
+      (void __attribute__((address_space(3))) *)dst,
+      (const void __attribute__((address_space(1))) *)src,
+      src_size);
+}
+__device__ inline void
+__nv_memcpy_async_shared_global_8_impl(void *dst, const void *src,
+                                       unsigned src_size) {
+  __nvvm_cp_async_ca_shared_global_8(
+      (void __attribute__((address_space(3))) *)dst,
+      (const void __attribute__((address_space(1))) *)src,
+      src_size);
+}
+__device__ inline void
+__nv_memcpy_async_shared_global_16_impl(void *dst, const void *src,
+                                       unsigned src_size) {
+  __nvvm_cp_async_ca_shared_global_16(
+      (void __attribute__((address_space(3))) *)dst,
+      (const void __attribute__((address_space(1))) *)src,
+      src_size);
+}
+
 } // extern "C"
 #endif // CUDA_VERSION >= 11000
 
_______________________________________________
cfe-commits mailing list
cfe-commits@lists.llvm.org
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits

Reply via email to