Re: [PATCH] D13171: [CUDA] Added a wrapper header for inclusion of stock CUDA headers.
This revision was automatically updated to reflect the committed changes. Closed by commit rL253388: [CUDA] Added a wrapper header for inclusion of stock CUDA headers. (authored by tra). Changed prior to commit: http://reviews.llvm.org/D13171?vs=38574&id=40435#toc Repository: rL LLVM http://reviews.llvm.org/D13171 Files: cfe/trunk/lib/Headers/CMakeLists.txt cfe/trunk/lib/Headers/cuda_runtime.h Index: cfe/trunk/lib/Headers/CMakeLists.txt === --- cfe/trunk/lib/Headers/CMakeLists.txt +++ cfe/trunk/lib/Headers/CMakeLists.txt @@ -17,6 +17,7 @@ bmiintrin.h cpuid.h cuda_builtin_vars.h + cuda_runtime.h emmintrin.h f16cintrin.h float.h Index: cfe/trunk/lib/Headers/cuda_runtime.h === --- cfe/trunk/lib/Headers/cuda_runtime.h +++ cfe/trunk/lib/Headers/cuda_runtime.h @@ -0,0 +1,179 @@ +/*=== cuda_runtime.h - CUDA runtime support === + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + * + *===---=== + */ + +#ifndef __CLANG_CUDA_RUNTIME_H__ +#define __CLANG_CUDA_RUNTIME_H__ + +#if defined(__CUDA__) && defined(__clang__) + +// Include some standard headers to avoid CUDA headers including them +// while some required macros (like __THROW) are in a weird state. +#include + +// Preserve common macros that will be changed below by us or by CUDA +// headers. +#pragma push_macro("__THROW") +#pragma push_macro("__CUDA_ARCH__") + +// WARNING: Preprocessor hacks below are based on specific of +// implementation of CUDA-7.x headers and are expected to break with +// any other version of CUDA headers. +#include "cuda.h" +#if !defined(CUDA_VERSION) +#error "cuda.h did not define CUDA_VERSION" +#elif CUDA_VERSION < 7000 || CUDA_VERSION > 7050 +#error "Unsupported CUDA version!" +#endif + +// Make largest subset of device functions available during host +// compilation -- SM_35 for the time being. +#ifndef __CUDA_ARCH__ +#define __CUDA_ARCH__ 350 +#endif + +#include "cuda_builtin_vars.h" + +// No need for device_launch_parameters.h as cuda_builtin_vars.h above +// has taken care of builtin variables declared in the file. +#define __DEVICE_LAUNCH_PARAMETERS_H__ + +// {math,device}_functions.h only have declarations of the +// functions. We don't need them as we're going to pull in their +// definitions from .hpp files. +#define __DEVICE_FUNCTIONS_H__ +#define __MATH_FUNCTIONS_H__ + +#undef __CUDACC__ +#define __CUDABE__ +// Disables definitions of device-side runtime support stubs in +// cuda_device_runtime_api.h +#define __CUDADEVRT_INTERNAL__ +#include "host_config.h" +#include "host_defines.h" +#include "driver_types.h" +#include "common_functions.h" +#undef __CUDADEVRT_INTERNAL__ + +#undef __CUDABE__ +#define __CUDACC__ +#include_next "cuda_runtime.h" + +#undef __CUDACC__ +#define __CUDABE__ + +// CUDA headers use __nvvm_memcpy and __nvvm_memset which clang does +// not have at the moment. Emulate them with a builtin memcpy/memset. +#define __nvvm_memcpy(s,d,n,a) __builtin_memcpy(s,d,n) +#define __nvvm_memset(d,c,n,a) __builtin_memset(d,c,n) + +#include "crt/host_runtime.h" +#include "crt/device_runtime.h" +// device_runtime.h defines __cxa_* macros that will conflict with +// cxxabi.h. +// FIXME: redefine these as __device__ functions. +#undef __cxa_vec_ctor +#undef __cxa_vec_cctor +#undef __cxa_vec_dtor +#undef __cxa_vec_new2 +#undef __cxa_vec_new3 +#undef __cxa_vec_delete2 +#undef __cxa_vec_delete +#undef __cxa_vec_delete3 +#undef __cxa_pure_virtual + +// We need decls for functions in CUDA's libdevice woth __device__ +// attribute only. Alas they come either as __host__ __device__ or +// with no attributes at all. To work around that, define __CUDA_RTC__ +// which produces HD variant and undef __host__ which gives us desided +// decls with __device_
Re: [PATCH] D13171: [CUDA] Added a wrapper header for inclusion of stock CUDA headers.
tra updated this revision to Diff 38574. tra added a comment. Added test cases for force-including of cuda_runtime.h Tweaked inclusion of one header due to use of default arguments. http://reviews.llvm.org/D13171 Files: lib/Driver/ToolChains.cpp lib/Headers/CMakeLists.txt lib/Headers/cuda_runtime.h test/Driver/cuda-detect.cu unittests/ASTMatchers/ASTMatchersTest.h Index: unittests/ASTMatchers/ASTMatchersTest.h === --- unittests/ASTMatchers/ASTMatchersTest.h +++ unittests/ASTMatchers/ASTMatchersTest.h @@ -178,6 +178,7 @@ Args.push_back("-xcuda"); Args.push_back("-fno-ms-extensions"); Args.push_back("--cuda-host-only"); + Args.push_back("-nocudainc"); Args.push_back(CompileArg); if (!runToolOnCodeWithArgs(Factory->create(), CudaHeader + Code, Args)) { Index: test/Driver/cuda-detect.cu === --- test/Driver/cuda-detect.cu +++ test/Driver/cuda-detect.cu @@ -3,7 +3,7 @@ // // # Check that we properly detect CUDA installation. // RUN: %clang -v --target=i386-unknown-linux \ -// RUN: --sysroot=/tmp/no-cuda-there 2>&1 | FileCheck %s -check-prefix NOCUDA +// RUN: --sysroot=%S/no-cuda-there 2>&1 | FileCheck %s -check-prefix NOCUDA // RUN: %clang -v --target=i386-unknown-linux \ // RUN: --sysroot=%S/Inputs/CUDA 2>&1 | FileCheck %s // RUN: %clang -v --target=i386-unknown-linux \ @@ -32,6 +32,11 @@ // RUN: %clang -### -v --target=i386-unknown-linux --cuda-gpu-arch=sm_35 \ // RUN: -nocudalib --cuda-path=%S/Inputs/CUDA/usr/local/cuda %s 2>&1 \ // RUN: | FileCheck %s -check-prefix NOLIBDEVICE +// Verify that we don't add include paths, link with libdevice or +// -include cuda_runtime without valid CUDA installation. +// RUN: %clang -### -v --target=i386-unknown-linux --cuda-gpu-arch=sm_35 \ +// RUN: --cuda-path=%S/no-cuda-there %s 2>&1 \ +// RUN: | FileCheck %s -check-prefix NOCUDAINC -check-prefix NOLIBDEVICE // CHECK: Found CUDA installation: {{.*}}/Inputs/CUDA/usr/local/cuda // NOCUDA-NOT: Found CUDA installation: @@ -43,7 +48,9 @@ // LIBDEVICE35-SAME: libdevice.compute_35.10.bc // LIBDEVICE-SAME: "-target-feature" "+ptx42" // CUDAINC-SAME: "-internal-isystem" "{{.*}}/Inputs/CUDA/usr/local/cuda/include" +// CUDAINC-SAME: "-include" "cuda_runtime.h" // NOCUDAINC-NOT: "-internal-isystem" "{{.*}}/Inputs/CUDA/usr/local/cuda/include" +// NOCUDAINC-NOT: "-include" "cuda_runtime.h" // LIBDEVICE-SAME: "-x" "cuda" // NOLIBDEVICE: "-triple" "nvptx-nvidia-cuda" Index: lib/Headers/cuda_runtime.h === --- /dev/null +++ lib/Headers/cuda_runtime.h @@ -0,0 +1,155 @@ +/*=== cuda_runtime.h - CUDA runtime support === + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + * + *===---=== + */ + +#ifndef __CLANG_CUDA_RUNTIME_H__ +#define __CLANG_CUDA_RUNTIME_H__ + +#if defined(__PTX__) + +// Include some standard headers to avoid CUDA headers including them +// while some required macros (like __THROW) are in a weird state. +#include + +// Preserve common macros that will be changed below by us or by CUDA +// headers. +#pragma push_macro("__THROW") +#pragma push_macro("__CUDA_ARCH__") + +// WARNING: Preprocessor hacks below are based on specific of +// implementation of CUDA-7.x headers and are expected to break with +// any other version of CUDA headers. +#include "cuda.h" +#if !defined(CUDA_VERSION) +#error "cuda.h did not define CUDA_VERSION" +#elif CUDA_VERSION < 7000 || CUDA_VERSION > 7050 +#error "Unsupported CUDA version!" +#endif + +// Make largest subset of device functions available during host +// compilation -- SM_35 for the time being. +#ifndef __CUDA_ARCH__ +#define __CUDA_ARCH__ 350 +#endif + +#include "cuda_builtin_vars.h" + +
Re: [PATCH] D13171: [CUDA] Added a wrapper header for inclusion of stock CUDA headers.
echristo accepted this revision. echristo added a comment. In http://reviews.llvm.org/D13171#272441, @tra wrote: > In http://reviews.llvm.org/D13171#272397, @echristo wrote: > > > I'm ignoring the content of the header, but this seems to be a not terrible > > way to do things. I gather that cuda_runtime.h is something that's > > typically included by the driver by nvidia and not the client? > > > Correct. cuda_runtime.h (and all it pulls in) is -include'd under the hood by > nvcc. > > > Also, tests? > > > I'll add a test to verify that "-include cuda_runtime.h" shows up on cc1 > command line where/when it's expected. Ick. > What would be a good way to test the wrapper itself within clang tree without > real CUDA headers? Hrm. Maybe a set of inputs that stub out things? Hard really. > I've done fair amount of manual testing outside of clang source tree. > > - manual comparison of preprocessed output from cuda_runtime.h between host > and device passes. > - compiled 39 out of 46 thrust examples and verified that they produce output > identical to nvcc-compiled binaries. Cool. LGTM with those changes and give a thought at how to test this in tree better. Thanks! -eric http://reviews.llvm.org/D13171 ___ cfe-commits mailing list cfe-commits@lists.llvm.org http://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits
Re: [PATCH] D13171: [CUDA] Added a wrapper header for inclusion of stock CUDA headers.
tra added a comment. In http://reviews.llvm.org/D13171#272397, @echristo wrote: > I'm ignoring the content of the header, but this seems to be a not terrible > way to do things. I gather that cuda_runtime.h is something that's typically > included by the driver by nvidia and not the client? Correct. cuda_runtime.h (and all it pulls in) is -include'd under the hood by nvcc. > Also, tests? I'll add a test to verify that "-include cuda_runtime.h" shows up on cc1 command line where/when it's expected. What would be a good way to test the wrapper itself within clang tree without real CUDA headers? I've done fair amount of manual testing outside of clang source tree. - manual comparison of preprocessed output from cuda_runtime.h between host and device passes. - compiled 39 out of 46 thrust examples and verified that they produce output identical to nvcc-compiled binaries. http://reviews.llvm.org/D13171 ___ cfe-commits mailing list cfe-commits@lists.llvm.org http://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits
Re: [PATCH] D13171: [CUDA] Added a wrapper header for inclusion of stock CUDA headers.
echristo added a comment. I'm ignoring the content of the header, but this seems to be a not terrible way to do things. I gather that cuda_runtime.h is something that's typically included by the driver by nvidia and not the client? Also, tests? -eric http://reviews.llvm.org/D13171 ___ cfe-commits mailing list cfe-commits@lists.llvm.org http://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits
Re: [PATCH] D13171: [CUDA] Added a wrapper header for inclusion of stock CUDA headers.
tra updated this revision to Diff 37912. tra added a comment. Herald added a subscriber: klimek. Changed header wrapping strategy. Previous version was attempting to make CUDA headers work for host/device compilations separately. In the end host and device compilations ended up with different view of CUDA-provided functions. While it mostly worked, that is not what we really want. What we want is to have identical view of device-specific functions in both cases and let function overloading handle name clashes between host and device functions. This wrapper now always includes CUDA headers exactly the same way during host and device compilation passes and produces identical preprocessed content during host and device side compilation for sm_35 GPUs. Device compilation passes for older GPUs will see a smaller subset of device functions supported by particular GPU. As a bonus this wrapper works with CUDA 7.5 now. http://reviews.llvm.org/D13171 Files: lib/Driver/ToolChains.cpp lib/Headers/CMakeLists.txt lib/Headers/cuda_runtime.h unittests/ASTMatchers/ASTMatchersTest.h Index: unittests/ASTMatchers/ASTMatchersTest.h === --- unittests/ASTMatchers/ASTMatchersTest.h +++ unittests/ASTMatchers/ASTMatchersTest.h @@ -178,6 +178,7 @@ Args.push_back("-xcuda"); Args.push_back("-fno-ms-extensions"); Args.push_back("--cuda-host-only"); + Args.push_back("-nocudainc"); Args.push_back(CompileArg); if (!runToolOnCodeWithArgs(Factory->create(), CudaHeader + Code, Args)) { Index: lib/Headers/cuda_runtime.h === --- /dev/null +++ lib/Headers/cuda_runtime.h @@ -0,0 +1,153 @@ +/*=== cuda_runtime.h - CUDA runtime support === + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + * + *===---=== + */ + +#ifndef __CLANG_CUDA_RUNTIME_H__ +#define __CLANG_CUDA_RUNTIME_H__ + +#if defined(__PTX__) + +// Include some standard headers to avoid CUDA headers including them +// while some required macros (like __THROW) are in a weird state. +#include + +// Preserve common macros that will be changed below by us or by CUDA +// headers. +#pragma push_macro("__THROW") +#pragma push_macro("__CUDA_ARCH__") + +// WARNING: Preprocessor hacks below are based on specific of +// implementation of CUDA-7.x headers and are expected to break with +// any other version of CUDA headers. +#include "cuda.h" +#if !defined(CUDA_VERSION) +#error "cuda.h did not define CUDA_VERSION" +#elif CUDA_VERSION < 7000 || CUDA_VERSION > 7050 +#error "Unsupported CUDA version!" +#endif + +// Make largest subset of device functions available during host +// compilation -- SM_35 for the time being. +#ifndef __CUDA_ARCH__ +#define __CUDA_ARCH__ 350 +#endif + +#include "cuda_builtin_vars.h" + +// No need for device_launch_parameters.h as cuda_builtin_vars.h above +// has taken care of builtin variables declared in the file. +#define __DEVICE_LAUNCH_PARAMETERS_H__ + +// {math,device}_functions.h only have declarations of the +// functions. We don't need them as we're going to pull in their +// definitions from .hpp files. +#define __DEVICE_FUNCTIONS_H__ +#define __MATH_FUNCTIONS_H__ + +#undef __CUDACC__ +#define __CUDABE__ +#include "host_config.h" +#include "host_defines.h" +#include "driver_types.h" +#include "common_functions.h" + +#undef __CUDABE__ +#define __CUDACC__ +#include_next "cuda_runtime.h" + +#undef __CUDACC__ +#define __CUDABE__ +#include "crt/host_runtime.h" +#include "crt/device_runtime.h" + +// We need decls for functions in CUDA's libdevice woth __device__ +// attribute only. Alas they come either as __host__ __device__ or +// with no attributes at all. To work around that, define __CUDA_RTC__ +// which produces HD variant and undef __host__ w
Re: [PATCH] D13171: [CUDA] Added a wrapper header for inclusion of stock CUDA headers.
tra updated this revision to Diff 36048. tra added a comment. Renamed wrapper to cuda_runtime.h Similarly to nvcc, automatically add "-include cuda_runtime.h" to CC1 invocations unless -nocudainc is specified. http://reviews.llvm.org/D13171 Files: lib/Driver/ToolChains.cpp lib/Headers/CMakeLists.txt lib/Headers/cuda_runtime.h Index: lib/Headers/cuda_runtime.h === --- /dev/null +++ lib/Headers/cuda_runtime.h @@ -0,0 +1,119 @@ +#ifndef __CLANG_CUDA_SUPPORT_H__ +#define __CLANG_CUDA_SUPPORT_H__ + +#if defined(__PTX__) + +// WARNING: Preprocessor hacks below are based on specific of +// implementation of CUDA-7.0 headers and are expected to break with +// any other version of CUDA headers. +#include "cuda.h" +#if !defined(CUDA_VERSION) +#error "cuda.h did not define CUDA_VERSION" +#elif CUDA_VERSION != 7000 +#error "Unsupported CUDA version!" +#endif + +#define __NVCC__ 1 +#if defined(__CUDA_ARCH__) +#define __CUDABE__ 1 +#else +#define __CUDACC__ 1 +#endif + +// Fake include guards to prevent inclusion of some CUDA headers. +#define __HOST_DEFINES_H__ +#define __DEVICE_LAUNCH_PARAMETERS_H__ +#define __TEXTURE_INDIRECT_FUNCTIONS_HPP__ +#define __SURFACE_INDIRECT_FUNCTIONS_HPP__ + +// Standard CUDA attributes +#define __constant__ __attribute__((constant)) +#define __device__ __attribute__((device)) +#define __global__ __attribute__((global)) +#define __host__ __attribute__((host)) +#define __launch_bounds__(...) __attribute__((launch_bounds(__VA_ARGS__))) +#define __shared__ __attribute__((shared)) + +// Additional macros used throughout CUDA headers. +#define __align__(x) __attribute__((aligned(x))) +#define __builtin_align__(x) __align__(x) +#define __cudart_builtin__ +#define __device_builtin__ +#define __forceinline__ __inline__ __attribute__((always_inline)) + +#define CUDARTAPI +#define _CRTIMP + +// Texture and surface types are not supported yet. +#define __device_builtin_surface_type__ +#define __device_builtin_texture_type__ + +// Include support for built-in variables. +#include "cuda_builtin_vars.h" + +// CUDA headers were implemented with the assumption of split-mode +// compilation and present CUDA functions differently for host and +// device mode. Typically in host mode they provide declarations with +// __device__ attribute attached. In device mode we get definitions +// but *without* __device__ attribute. This does not work well in +// combined compilation mode used by clang, so we have to trick CUDA +// headers into something we can use. + +// libdevice functions in device_functions_decls.h either come with +// __host__ __device__ attributes or with none at all. Temporarily +// undefine __host__ so only __device__ is applied. +#pragma push_macro("__CUDACC_RTC__") +#pragma push_macro("__host__") +#define __CUDACC_RTC__ +#define __host__ +#include "device_functions_decls.h" +#pragma pop_macro("__host__") +#pragma pop_macro("__CUDACC_RTC__") + +#include_next "cuda_runtime.h" +#include "crt/device_runtime.h" + +#if defined(__CUDA_ARCH__) +// device_functions.hpp and math_functions*.hpp use 'static +// __forceinline__' (with no __device__) for definitions of device +// functions. Temporarily redefine __forceinline__ to include +// __device__. +#pragma push_macro("__forceinline__") +#define __forceinline__ __device__ __inline__ __attribute__((always_inline)) +#include "device_functions.h" +#include "math_functions.h" +#pragma pop_macro("__forceinline__") +#else +#include "device_functions.h" +#include "math_functions.h" +#endif + +#if defined(__CUDA_ARCH__) +// Definitions for device specific functions are provided only if +// __CUDACC__ is defined. Alas, they've already been transitively +// included by device_functions.h and are now behind include guards. +// We need to temporarily define __CUDACC__, undo include guards and +// include the files with implementation of these functions. + +#pragma push_macro("__CUDACC__") +#define __CUDACC__ 1 + +#undef __DEVICE_ATOMIC_FUNCTIONS_HPP__ +#include "device_atomic_functions.hpp" + +#undef __SM_20_ATOMIC_FUNCTIONS_HPP__ +#include "sm_20_atomic_functions.hpp" +#undef __SM_32_ATOMIC_FUNCTIONS_HPP__ +#include "sm_32_atomic_functions.hpp" + +#undef __SM_20_INTRINSICS_HPP__ +#include "sm_20_intrinsics.hpp" +#undef __SM_30_INTRINSICS_HPP__ +#include "sm_30_intrinsics.hpp" +#undef __SM_32_INTRINSICS_HPP__ +#include "sm_32_intrinsics.hpp" + +#pragma pop_macro("__CUDACC__") +#endif // __CUDA_ARCH__ +#endif // __PTX__ +#endif // __CLANG_CUDA_SUPPORT_H__ Index: lib/Headers/CMakeLists.txt === --- lib/Headers/CMakeLists.txt +++ lib/Headers/CMakeLists.txt @@ -17,6 +17,7 @@ bmiintrin.h cpuid.h cuda_builtin_vars.h + cuda_runtime.h emmintrin.h f16cintrin.h float.h Index: lib/Driver/ToolChains.cpp === --- lib/Driver/ToolChains.cpp +++ lib/Driver/ToolCh
Re: [PATCH] D13171: [CUDA] Added a wrapper header for inclusion of stock CUDA headers.
cuda_runtime.h may be a better choice. nvcc -includes it during both host and device compilation and this wrapper file is intended to serve a similar purpose and will probably end up being -included by cc1 in the end. --Artem On Mon, Sep 28, 2015 at 6:26 PM, Eric Christopher wrote: > echristo added a comment. > > Why not just call it cuda.h and use #include next for it and then fix it > up? > > -eric > > > http://reviews.llvm.org/D13171 > > > > -- --Artem Belevich ___ cfe-commits mailing list cfe-commits@lists.llvm.org http://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits
Re: [PATCH] D13171: [CUDA] Added a wrapper header for inclusion of stock CUDA headers.
echristo added a comment. Why not just call it cuda.h and use #include next for it and then fix it up? -eric http://reviews.llvm.org/D13171 ___ cfe-commits mailing list cfe-commits@lists.llvm.org http://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits
Re: [PATCH] D13171: [CUDA] Added a wrapper header for inclusion of stock CUDA headers.
The (vague) idea was to make clear that the header is *not* part of cuda distribution. That said, the file could use a better name. Do any of these sound better? * fix_cuda_headers.h * adapt_cuda_headers.h * cuda_shim.h --Artem On Sat, Sep 26, 2015 at 12:20 PM, Eric Christopher wrote: > echristo added a comment. > > Bikeshed: it's part of the clang headers, do we really need "clang" in the > header name? > > -eric > > > http://reviews.llvm.org/D13171 > > > > -- --Artem Belevich ___ cfe-commits mailing list cfe-commits@lists.llvm.org http://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits
Re: [PATCH] D13171: [CUDA] Added a wrapper header for inclusion of stock CUDA headers.
echristo added a comment. Bikeshed: it's part of the clang headers, do we really need "clang" in the header name? -eric http://reviews.llvm.org/D13171 ___ cfe-commits mailing list cfe-commits@lists.llvm.org http://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits
Re: [PATCH] D13171: [CUDA] Added a wrapper header for inclusion of stock CUDA headers.
eliben accepted this revision. eliben added a comment. This revision is now accepted and ready to land. lgtm http://reviews.llvm.org/D13171 ___ cfe-commits mailing list cfe-commits@lists.llvm.org http://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits
Re: [PATCH] D13171: [CUDA] Added a wrapper header for inclusion of stock CUDA headers.
tra updated this revision to Diff 35760. tra added a comment. Include cuda.h and #error if we see wrong CUDA_VERSION. http://reviews.llvm.org/D13171 Files: lib/Headers/CMakeLists.txt lib/Headers/clang_cuda_support.h Index: lib/Headers/clang_cuda_support.h === --- /dev/null +++ lib/Headers/clang_cuda_support.h @@ -0,0 +1,119 @@ +#ifndef __CLANG_CUDA_SUPPORT_H__ +#define __CLANG_CUDA_SUPPORT_H__ + +#if defined(__PTX__) + +// WARNING: Preprocessor hacks below are based on specific of +// implementation of CUDA-7.0 headers and are expected to break with +// any other version of CUDA headers. +#include "cuda.h" +#if !defined(CUDA_VERSION) +#error "cuda.h did not define CUDA_VERSION" +#elif CUDA_VERSION != 7000 +#error "Unsupported CUDA version!" +#endif + +#define __NVCC__ 1 +#if defined(__CUDA_ARCH__) +#define __CUDABE__ 1 +#else +#define __CUDACC__ 1 +#endif + +// Fake include guards to prevent inclusion of some CUDA headers. +#define __HOST_DEFINES_H__ +#define __DEVICE_LAUNCH_PARAMETERS_H__ +#define __TEXTURE_INDIRECT_FUNCTIONS_HPP__ +#define __SURFACE_INDIRECT_FUNCTIONS_HPP__ + +// Standard CUDA attributes +#define __constant__ __attribute__((constant)) +#define __device__ __attribute__((device)) +#define __global__ __attribute__((global)) +#define __host__ __attribute__((host)) +#define __launch_bounds__(...) __attribute__((launch_bounds(__VA_ARGS__))) +#define __shared__ __attribute__((shared)) + +// Additional macros used throughout CUDA headers. +#define __align__(x) __attribute__((aligned(x))) +#define __builtin_align__(x) __align__(x) +#define __cudart_builtin__ +#define __device_builtin__ +#define __forceinline__ __inline__ __attribute__((always_inline)) + +#define CUDARTAPI +#define _CRTIMP + +// Texture and surface types are not supported yet. +#define __device_builtin_surface_type__ +#define __device_builtin_texture_type__ + +// Include support for built-in variables. +#include "cuda_builtin_vars.h" + +// CUDA headers were implemented with the assumption of split-mode +// compilation and present CUDA functions differently for host and +// device mode. Typically in host mode they provide declarations with +// __device__ attribute attached. In device mode we get definitions +// but *without* __device__ attribute. This does not work well in +// combined compilation mode used by clang, so we have to trick CUDA +// headers into something we can use. + +// libdevice functions in device_functions_decls.h either come with +// __host__ __device__ attributes or with none at all. Temporarily +// undefine __host__ so only __device__ is applied. +#pragma push_macro("__CUDACC_RTC__") +#pragma push_macro("__host__") +#define __CUDACC_RTC__ +#define __host__ +#include "device_functions_decls.h" +#pragma pop_macro("__host__") +#pragma pop_macro("__CUDACC_RTC__") + +#include "cuda_runtime.h" +#include "crt/device_runtime.h" + +#if defined(__CUDA_ARCH__) +// device_functions.hpp and math_functions*.hpp use 'static +// __forceinline__' (with no __device__) for definitions of device +// functions. Temporarily redefine __forceinline__ to include +// __device__. +#pragma push_macro("__forceinline__") +#define __forceinline__ __device__ __inline__ __attribute__((always_inline)) +#include "device_functions.h" +#include "math_functions.h" +#pragma pop_macro("__forceinline__") +#else +#include "device_functions.h" +#include "math_functions.h" +#endif + +#if defined(__CUDA_ARCH__) +// Definitions for device specific functions are provided only if +// __CUDACC__ is defined. Alas, they've already been transitively +// included by device_functions.h and are now behind include guards. +// We need to temporarily define __CUDACC__, undo include guards and +// include the files with implementation of these functions. + +#pragma push_macro("__CUDACC__") +#define __CUDACC__ 1 + +#undef __DEVICE_ATOMIC_FUNCTIONS_HPP__ +#include "device_atomic_functions.hpp" + +#undef __SM_20_ATOMIC_FUNCTIONS_HPP__ +#include "sm_20_atomic_functions.hpp" +#undef __SM_32_ATOMIC_FUNCTIONS_HPP__ +#include "sm_32_atomic_functions.hpp" + +#undef __SM_20_INTRINSICS_HPP__ +#include "sm_20_intrinsics.hpp" +#undef __SM_30_INTRINSICS_HPP__ +#include "sm_30_intrinsics.hpp" +#undef __SM_32_INTRINSICS_HPP__ +#include "sm_32_intrinsics.hpp" + +#pragma pop_macro("__CUDACC__") +#endif // __CUDA_ARCH__ +#endif // __PTX__ +#endif // __CLANG_CUDA_SUPPORT_H__ Index: lib/Headers/CMakeLists.txt === --- lib/Headers/CMakeLists.txt +++ lib/Headers/CMakeLists.txt @@ -17,6 +17,7 @@ bmiintrin.h cpuid.h cuda_builtin_vars.h + clang_cuda_support.h emmintrin.h f16cintrin.h float.h ___ cfe-commits mailing list cfe-commits@lists.llvm.org http://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits
Re: [PATCH] D13171: [CUDA] Added a wrapper header for inclusion of stock CUDA headers.
tra marked an inline comment as done. tra added a comment. http://reviews.llvm.org/D13171 ___ cfe-commits mailing list cfe-commits@lists.llvm.org http://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits
Re: [PATCH] D13171: [CUDA] Added a wrapper header for inclusion of stock CUDA headers.
eliben added inline comments. Comment at: lib/Headers/clang_cuda_support.h:53 @@ +52,3 @@ +// WARNING: Preprocessor hacks below are based on specific of +// implementation of CUDA-7.0 headers and are expected to break with +// any other version of CUDA headers. If this includes CUDA headers, maybe you can #error out if the CUDA version isn't good? http://reviews.llvm.org/D13171 ___ cfe-commits mailing list cfe-commits@lists.llvm.org http://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits
Re: [PATCH] D13171: [CUDA] Added a wrapper header for inclusion of stock CUDA headers.
tra updated this revision to Diff 35747. tra added a comment. Fixed typos and whitespace nits. use #pragma push_macro for __CUDACC_RTC__, too. http://reviews.llvm.org/D13171 Files: lib/Headers/CMakeLists.txt lib/Headers/clang_cuda_support.h Index: lib/Headers/clang_cuda_support.h === --- /dev/null +++ lib/Headers/clang_cuda_support.h @@ -0,0 +1,121 @@ +#ifndef __CLANG_CUDA_SUPPORT_H__ +#define __CLANG_CUDA_SUPPORT_H__ + +#if defined(__PTX__) + +#define __NVCC__ 1 +#if defined(__CUDA_ARCH__) +#define __CUDABE__ 1 +#else +#define __CUDACC__ 1 +#endif + +// Fake include guards to prevent inclusion of some CUDA headers. +#define __HOST_DEFINES_H__ +#define __DEVICE_LAUNCH_PARAMETERS_H__ +#define __TEXTURE_INDIRECT_FUNCTIONS_HPP__ +#define __SURFACE_INDIRECT_FUNCTIONS_HPP__ + +// Standard CUDA attributes +#define __constant__ __attribute__((constant)) +#define __device__ __attribute__((device)) +#define __global__ __attribute__((global)) +#define __host__ __attribute__((host)) +#define __launch_bounds__(...) __attribute__((launch_bounds(__VA_ARGS__))) +#define __shared__ __attribute__((shared)) + +// Additional macros used throughout CUDA headers. +#define __align__(x) __attribute__((aligned(x))) +#define __builtin_align__(x) __align__(x) +#define __cudart_builtin__ +#define __device_builtin__ +#define __forceinline__ __inline__ __attribute__((always_inline)) + +#define CUDARTAPI +#define _CRTIMP + +// Texture and surface types are not supported yet. +#define __device_builtin_surface_type__ +#define __device_builtin_texture_type__ + +// Include support for built-in variables. +#include "cuda_builtin_vars.h" + +// CUDA headers were implemented with the assumption of split-mode +// compilation and present CUDA functions differently for host and +// device mode. Typically in host mode they provide declarations with +// __device__ attribute attached. In device mode we get definitions +// but *without* __device__ attribute. This does not work well in +// combined compilation mode used by clang, so we have to trick CUDA +// headers into something we can use. + +// WARNING: Preprocessor hacks below are based on specific of +// implementation of CUDA-7.0 headers and are expected to break with +// any other version of CUDA headers. + +// libdevice functions in device_functions_decls.h either come with +// __host__ __device__ attributes or with none at all. Temporarily +// undefine __host__ so only __device__ is applied. +#pragma push_macro("__CUDACC_RTC__") +#pragma push_macro("__host__") +#define __CUDACC_RTC__ +#define __host__ +#include "device_functions_decls.h" +#pragma pop_macro("__host__") +#pragma pop_macro("__CUDACC_RTC__") + +#include "cuda_runtime.h" +#include "crt/device_runtime.h" + +#if defined(__CUDA_ARCH__) +// device_functions.hpp and math_functions*.hpp use 'static +// __forceinline__' (with no __device__) for definitions of device +// functions. Temporarily redefine __forceinline__ to include +// __device__. +#pragma push_macro("__forceinline__") +#define __forceinline__ __device__ __inline__ __attribute__((always_inline)) +#include "device_functions.h" +#include "math_functions.h" +#pragma pop_macro("__forceinline__") +#else +#include "device_functions.h" +#include "math_functions.h" +#endif + +#if defined(__CUDA_ARCH__) +// Definitions for device specific functions are provided only if +// __CUDACC__ is defined. Alas, they've already been transitively +// included by device_functions.h and are now behind include guards. +// We need to temporarily define __CUDACC__, undo include guards and +// include the files with implementation of these functions. + +#pragma push_macro("__CUDACC__") +#define __CUDACC__ 1 + +#undef __DEVICE_ATOMIC_FUNCTIONS_HPP__ +#include "device_atomic_functions.hpp" + +#undef __SM_20_ATOMIC_FUNCTIONS_HPP__ +#include "sm_20_atomic_functions.hpp" +#undef __SM_32_ATOMIC_FUNCTIONS_HPP__ +#include "sm_32_atomic_functions.hpp" + +#undef __SM_20_INTRINSICS_HPP__ +#include "sm_20_intrinsics.hpp" +#undef __SM_30_INTRINSICS_HPP__ +#include "sm_30_intrinsics.hpp" +#undef __SM_32_INTRINSICS_HPP__ +#include "sm_32_intrinsics.hpp" + +#pragma pop_macro("__CUDACC__") + +// Cuda headers pull in stdlib.h on the host side of compilation, and +// a lot of existing CUDA code assumes it. Because clang sees both +// host and device side of CUDA code simultaneously, we've got to +// include stdlib.h on device side as well. + +#include + +#endif // __CUDA_ARCH__ +#endif // __PTX__ +#endif // __CLANG_CUDA_SUPPORT_H__ Index: lib/Headers/CMakeLists.txt === --- lib/Headers/CMakeLists.txt +++ lib/Headers/CMakeLists.txt @@ -17,6 +17,7 @@ bmiintrin.h cpuid.h cuda_builtin_vars.h + clang_cuda_support.h emmintrin.h f16cintrin.h float.h ___ cfe-commits mailing list cfe-commits@lists.llvm.org http://lists.llvm.org/cgi
[PATCH] D13171: [CUDA] Added a wrapper header for inclusion of stock CUDA headers.
tra created this revision. tra added reviewers: echristo, eliben, jholewinski. tra added a subscriber: cfe-commits. Header files that come with CUDA are assuming split host/device compilation and are not usable by clang out of the box. With a bit of preprocessor magic it's possible to twist them into a form usable by clang after D13170 and D13144 land. http://reviews.llvm.org/D13171 Files: lib/Headers/CMakeLists.txt lib/Headers/clang_cuda_support.h Index: lib/Headers/clang_cuda_support.h === --- /dev/null +++ lib/Headers/clang_cuda_support.h @@ -0,0 +1,119 @@ +#ifndef __CLANG_CUDA_SUPPORT_H__ +#define __CLANG_CUDA_SUPPORT_H__ + +#if defined(__PTX__) + +#define __NVCC__ 1 +#if defined(__CUDA_ARCH__) +#define __CUDABE__ 1 +#else +#define __CUDACC__ 1 +#endif + +// Fake include guards to prevent inclusion of some CUDA headers. +#define __HOST_DEFINES_H__ +#define __DEVICE_LAUNCH_PARAMETERS_H__ +#define __TEXTURE_INDIRECT_FUNCTIONS_HPP__ +#define __SURFACE_INDIRECT_FUNCTIONS_HPP__ + +// Standard CUDA attributes +#define __constant__ __attribute__((constant)) +#define __device__ __attribute__((device)) +#define __global__ __attribute__((global)) +#define __host__ __attribute__((host)) +#define __launch_bounds__(...) __attribute__((launch_bounds(__VA_ARGS__))) +#define __shared__ __attribute__((shared)) + +// Additional macros used throughout CUDA headers. +#define __align__(x) __attribute__((aligned(x))) +#define __builtin_align__(x) __align__(x) +#define __cudart_builtin__ +#define __device_builtin__ +#define __forceinline__ __inline__ __attribute__((always_inline)) + +#define CUDARTAPI +#define _CRTIMP + +// Texture and surface types are not supported yet. +#define __device_builtin_surface_type__ +#define __device_builtin_texture_type__ + +// Inclde support for built-in variables. +#include "cuda_builtin_vars.h" + +// CUDA headers were implemented with the assumption of split-mode +// compilation and present CUDA functions differently for host and +// device mode. Typically in host mode they provide declarations with +// __device__ attribute attached. In device mode we get definitions +// but *without* __device__ attribute. This does not work well in +// combined compilation mode used by clang, so we have to trick CUDA +// headers into something we can use. + +// WARNING: Preprocessor hacks below are based on specific of +// implementation of CUDA-7.0 headers and are expected to break with +// any other version of CUDA headers. + +// libdevice functions in device_functions_decls.h either come with +// __host__ __device__ attributes or with none at all. Temporarily +// undefine __host__ so only __device__ is applied. +#define __CUDACC_RTC__ +#pragma push_macro("__host__") +#define __host__ +#include "device_functions_decls.h" +#pragma pop_macro("__host__") +#undef __CUDACC_RTC__ + +#include "cuda_runtime.h" +#include "crt/device_runtime.h" + +#if defined(__CUDA_ARCH__) +// device_functions.hpp and math_functions*.hpp use 'static +// __forceinline__' (with no __device__) for definitions of device +// functions. Temporarily redefine __forceinline__ to include +// __device__. +#pragma push_macro("__forceinline__") +#define __forceinline__ __device__ __inline__ __attribute__((always_inline)) +#include "device_functions.h" +#include "math_functions.h" +#pragma pop_macro("__forceinline__") +#else +#include "device_functions.h" +#include "math_functions.h" +#endif + +#if defined(__CUDA_ARCH__) +// Definitions for device specific functions are provided only if +// __CUDACC__ is defined. Alas, they've already been transiently +// included by device_functions.h and are now behind include guards. +// We need to temporarily define __CUDACC__, undo include guards and +// include the files with implmentation of these functions. + +#pragma push_macro("__CUDACC__") +#define __CUDACC__ 1 +#undef __DEVICE_ATOMIC_FUNCTIONS_HPP__ +#include "device_atomic_functions.hpp" + +#undef __SM_20_ATOMIC_FUNCTIONS_HPP__ +#include "sm_20_atomic_functions.hpp" +#undef __SM_32_ATOMIC_FUNCTIONS_HPP__ +#include "sm_32_atomic_functions.hpp" + +#undef __SM_20_INTRINSICS_HPP__ +#include "sm_20_intrinsics.hpp" +#undef __SM_30_INTRINSICS_HPP__ +#include "sm_30_intrinsics.hpp" +#undef __SM_32_INTRINSICS_HPP__ +#include "sm_32_intrinsics.hpp" + +#pragma pop_macro("__CUDACC__") + +// Cuda headers pull in stdlib.h on the host side of compilation, and +// a lot of existing CUDA code assumes it. Because clang sees both +// host and device side of CUDA code simultaneously, we've got to +// include stdlib.h on device side as well. + +#include + +#endif // __CUDA_ARCH__ +#endif // __PTX__ +#endif // __CLANG_CUDA_SUPPORT_H__ Index: lib/Headers/CMakeLists.txt === --- lib/Headers/CMakeLists.txt +++ lib/Headers/CMakeLists.txt @@ -17,6 +17,7 @@ bmiintrin.h cpuid.h cuda_builtin_vars.h + clang_cuda_support.h emmintr