[PATCH] D151904: [clang-repl][CUDA] Add an unit test for interactive CUDA

2023-08-24 Thread Anubhab Ghosh via Phabricator via cfe-commits
argentite added inline comments.



Comment at: clang/unittests/Interpreter/InteractiveCudaTest.cpp:92
+  std::unique_ptr Interp = createInterpreter();
+  auto Err = Interp->LoadDynamicLibrary("libcudart.so");
+  if (Err) { // CUDA runtime is not installed/usable, cannot continue testing

tra wrote:
> argentite wrote:
> > tra wrote:
> > > This could be a bit of a problem.
> > > 
> > > There may be multiple CUDA SDK versions that may be installed on a system 
> > > at any given time and the libcudart.so you pick here may not be the one 
> > > you want.
> > > E.g it may be from a recent CUDA version which is not supported by NVIDIA 
> > > drivers yet. 
> > > 
> > > I think you may need a way to let the user override CUDA SDK (or 
> > > libcudart.so) location explicitly. I guess they could do that via 
> > > LD_LIBRARY_PATH, but for the CUDA compilation in general, knowing CUDA 
> > > SDK path is essential, as it does affect various compilation options set 
> > > by the driver.
> > > 
> > Yes, this probably would be an issue. It is currently possible to override 
> > the CUDA path with a command line argument in clang-repl. But I am not sure 
> > what we can do inside a test.
> To me it looks like CUDA location should be detected/set at the configuration 
> time and then propagated to the individual tests that need that info.
> CMake has cuda detection mechanisms that could be used for that purpose.
> They are a bit of a pain to use in practice (I'm still not sure what's the 
> reliable way to do it), but it's as close to the 'standard' way of doing it 
> as we have at the moment.
> I believe libc and mlir subtrees in LLVM are already using this mechanism. 
> E.g 
> https://github.com/llvm/llvm-project/blob/main/libc/utils/gpu/loader/CMakeLists.txt#L16
This version uses CMake to detect the paths as in the example above. I guess 
this would tie the test to the build system but it should probably be fine for 
most cases. Unfortunately I don't think we can use the same trick for 
determining the path in ClangREPL because the interpreter is likely to run on a 
different system.


Repository:
  rG LLVM Github Monorepo

CHANGES SINCE LAST ACTION
  https://reviews.llvm.org/D151904/new/

https://reviews.llvm.org/D151904

___
cfe-commits mailing list
cfe-commits@lists.llvm.org
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits


[PATCH] D151904: [clang-repl][CUDA] Add an unit test for interactive CUDA

2023-08-24 Thread Anubhab Ghosh via Phabricator via cfe-commits
argentite edited the summary of this revision.
argentite updated this revision to Diff 553197.

Repository:
  rG LLVM Github Monorepo

CHANGES SINCE LAST ACTION
  https://reviews.llvm.org/D151904/new/

https://reviews.llvm.org/D151904

Files:
  clang/unittests/Interpreter/CMakeLists.txt
  clang/unittests/Interpreter/InteractiveCudaTest.cpp

Index: clang/unittests/Interpreter/InteractiveCudaTest.cpp
===
--- /dev/null
+++ clang/unittests/Interpreter/InteractiveCudaTest.cpp
@@ -0,0 +1,147 @@
+//===- unittests/Interpreter/CudaTest.cpp --- Interactive CUDA tests --===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===--===//
+//
+// Unit tests for interactive CUDA in Clang interpreter
+//
+//===--===//
+
+#include "clang/Basic/Version.h"
+#include "clang/Config/config.h"
+
+#include "clang/Frontend/CompilerInstance.h"
+#include "clang/Interpreter/Interpreter.h"
+
+#include "llvm/Support/TargetSelect.h"
+
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+
+namespace {
+
+std::string MakeResourcesPath() {
+  using namespace llvm;
+#ifdef LLVM_BINARY_DIR
+  StringRef Dir = LLVM_BINARY_DIR;
+#else
+  // Dir is bin/ or lib/, depending on where BinaryPath is.
+  void *MainAddr = (void *)(intptr_t)MakeResourcesPath;
+  std::string BinaryPath =
+  llvm::sys::fs::getMainExecutable(/*Argv0=*/nullptr, MainAddr);
+
+  // build/tools/clang/unittests/Interpreter/Executable -> build/
+  StringRef Dir = sys::path::parent_path(BinaryPath);
+
+  Dir = sys::path::parent_path(Dir);
+  Dir = sys::path::parent_path(Dir);
+  Dir = sys::path::parent_path(Dir);
+  Dir = sys::path::parent_path(Dir);
+#endif // LLVM_BINARY_DIR
+  SmallString<128> P(Dir);
+  sys::path::append(P, CLANG_INSTALL_LIBDIR_BASENAME, "clang",
+CLANG_VERSION_MAJOR_STRING);
+  return P.str().str();
+}
+
+static std::unique_ptr
+createInterpreter(const std::vector &ExtraArgs = {}) {
+  static bool firstrun = true;
+  if (firstrun) {
+llvm::InitializeAllTargetInfos();
+llvm::InitializeAllTargets();
+llvm::InitializeAllTargetMCs();
+llvm::InitializeAllAsmPrinters();
+
+firstrun = false;
+  }
+
+  clang::IncrementalCompilerBuilder CB;
+
+  // Help find cuda's runtime headers.
+  std::string ResourceDir = MakeResourcesPath();
+
+  std::vector Args = {"-resource-dir", ResourceDir.c_str(), "-std=c++20"};
+  Args.insert(Args.end(), ExtraArgs.begin(), ExtraArgs.end());
+  CB.SetCompilerArgs(Args);
+
+  // Create the device code compiler
+  std::unique_ptr DeviceCI;
+  CB.SetOffloadArch("sm_35");
+  DeviceCI = cantFail(CB.CreateCudaDevice());
+
+  std::unique_ptr CI;
+  CI = cantFail(CB.CreateCudaHost());
+
+  auto Interp = cantFail(
+  clang::Interpreter::createWithCUDA(std::move(CI), std::move(DeviceCI)));
+
+  return Interp;
+}
+
+enum {
+  // Defined in CUDA Runtime API
+  cudaErrorNoDevice = 100,
+};
+
+TEST(InteractiveCudaTest, Sanity) {
+  std::unique_ptr Interp = createInterpreter();
+
+#ifdef LIBCUDART_PATH
+  auto Err = Interp->LoadDynamicLibrary(LIBCUDART_PATH);
+  if (Err) { // CUDA runtime is not usable, cannot continue testing
+consumeError(std::move(Err));
+return;
+  }
+#else
+  return;
+#endif
+
+  // Check if we have any GPU for test
+  int CudaError = 0;
+  auto GpuCheckCommand = std::string(R"(
+int device_id = -1;
+int *error = (int *))" + std::to_string((uintptr_t)&CudaError) +
+ R"(;
+*error = cudaGetDevice(&device_id);
+  )");
+  cantFail(Interp->ParseAndExecute(GpuCheckCommand));
+  if (CudaError == cudaErrorNoDevice) {
+// No GPU is available on this machine, cannot continue testing
+return;
+  }
+  ASSERT_EQ(CudaError, 0);
+
+  int HostSum = 0;
+  auto Command1 = std::string(R"(
+__host__ __device__ inline int sum(int a, int b){ return a + b; }
+__global__ void kernel(int * output){ *output = sum(40,2); }
+int *hostsum = (int *) )") +
+  std::to_string((uintptr_t)&HostSum) +
+  R"(;
+*hostsum = sum(41,1);)";
+  cantFail(Interp->ParseAndExecute(Command1));
+
+  int DeviceSum = 0;
+  auto Command2 = std::string(R"(
+int *devicesum = (int *))" +
+  std::to_string((uintptr_t)&DeviceSum) +
+  R"(;
+int *deviceVar;
+*error |= cudaMalloc((void **) &deviceVar, sizeof(int));
+kernel<<<1,1>>>(deviceVar);
+*error |= cudaGetLastError();
+*error |= cudaMemcpy(devicesum, deviceVar, sizeof(int), cudaMemcpyDeviceToHost);
+*error |= cudaGetLastError();
+)");
+  cantFail(Interp->ParseAndExecute(Command2));
+
+  ASSERT_EQ(HostSum, 42);
+  ASSERT_EQ(DeviceSum

[PATCH] D151904: [clang-repl][CUDA] Add an unit test for interactive CUDA

2023-08-24 Thread Vassil Vassilev via Phabricator via cfe-commits
v.g.vassilev added a comment.

@argentite ping.


Repository:
  rG LLVM Github Monorepo

CHANGES SINCE LAST ACTION
  https://reviews.llvm.org/D151904/new/

https://reviews.llvm.org/D151904

___
cfe-commits mailing list
cfe-commits@lists.llvm.org
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits


[PATCH] D151904: [clang-repl][CUDA] Add an unit test for interactive CUDA

2023-08-13 Thread Vassil Vassilev via Phabricator via cfe-commits
v.g.vassilev added a comment.

@argentite what is the fate of this patch? Should we move forward with it?


Repository:
  rG LLVM Github Monorepo

CHANGES SINCE LAST ACTION
  https://reviews.llvm.org/D151904/new/

https://reviews.llvm.org/D151904

___
cfe-commits mailing list
cfe-commits@lists.llvm.org
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits


[PATCH] D151904: [clang-repl][CUDA] Add an unit test for interactive CUDA

2023-06-01 Thread Artem Belevich via Phabricator via cfe-commits
tra added inline comments.



Comment at: clang/unittests/Interpreter/InteractiveCudaTest.cpp:92
+  std::unique_ptr Interp = createInterpreter();
+  auto Err = Interp->LoadDynamicLibrary("libcudart.so");
+  if (Err) { // CUDA runtime is not installed/usable, cannot continue testing

argentite wrote:
> tra wrote:
> > This could be a bit of a problem.
> > 
> > There may be multiple CUDA SDK versions that may be installed on a system 
> > at any given time and the libcudart.so you pick here may not be the one you 
> > want.
> > E.g it may be from a recent CUDA version which is not supported by NVIDIA 
> > drivers yet. 
> > 
> > I think you may need a way to let the user override CUDA SDK (or 
> > libcudart.so) location explicitly. I guess they could do that via 
> > LD_LIBRARY_PATH, but for the CUDA compilation in general, knowing CUDA SDK 
> > path is essential, as it does affect various compilation options set by the 
> > driver.
> > 
> Yes, this probably would be an issue. It is currently possible to override 
> the CUDA path with a command line argument in clang-repl. But I am not sure 
> what we can do inside a test.
To me it looks like CUDA location should be detected/set at the configuration 
time and then propagated to the individual tests that need that info.
CMake has cuda detection mechanisms that could be used for that purpose.
They are a bit of a pain to use in practice (I'm still not sure what's the 
reliable way to do it), but it's as close to the 'standard' way of doing it as 
we have at the moment.
I believe libc and mlir subtrees in LLVM are already using this mechanism. E.g 
https://github.com/llvm/llvm-project/blob/main/libc/utils/gpu/loader/CMakeLists.txt#L16


Repository:
  rG LLVM Github Monorepo

CHANGES SINCE LAST ACTION
  https://reviews.llvm.org/D151904/new/

https://reviews.llvm.org/D151904

___
cfe-commits mailing list
cfe-commits@lists.llvm.org
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits


[PATCH] D151904: [clang-repl][CUDA] Add an unit test for interactive CUDA

2023-06-01 Thread Anubhab Ghosh via Phabricator via cfe-commits
argentite added inline comments.



Comment at: clang/unittests/Interpreter/InteractiveCudaTest.cpp:92
+  std::unique_ptr Interp = createInterpreter();
+  auto Err = Interp->LoadDynamicLibrary("libcudart.so");
+  if (Err) { // CUDA runtime is not installed/usable, cannot continue testing

tra wrote:
> This could be a bit of a problem.
> 
> There may be multiple CUDA SDK versions that may be installed on a system at 
> any given time and the libcudart.so you pick here may not be the one you want.
> E.g it may be from a recent CUDA version which is not supported by NVIDIA 
> drivers yet. 
> 
> I think you may need a way to let the user override CUDA SDK (or 
> libcudart.so) location explicitly. I guess they could do that via 
> LD_LIBRARY_PATH, but for the CUDA compilation in general, knowing CUDA SDK 
> path is essential, as it does affect various compilation options set by the 
> driver.
> 
Yes, this probably would be an issue. It is currently possible to override the 
CUDA path with a command line argument in clang-repl. But I am not sure what we 
can do inside a test.


Repository:
  rG LLVM Github Monorepo

CHANGES SINCE LAST ACTION
  https://reviews.llvm.org/D151904/new/

https://reviews.llvm.org/D151904

___
cfe-commits mailing list
cfe-commits@lists.llvm.org
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits


[PATCH] D151904: [clang-repl][CUDA] Add an unit test for interactive CUDA

2023-06-01 Thread Artem Belevich via Phabricator via cfe-commits
tra added inline comments.



Comment at: clang/unittests/Interpreter/InteractiveCudaTest.cpp:92
+  std::unique_ptr Interp = createInterpreter();
+  auto Err = Interp->LoadDynamicLibrary("libcudart.so");
+  if (Err) { // CUDA runtime is not installed/usable, cannot continue testing

This could be a bit of a problem.

There may be multiple CUDA SDK versions that may be installed on a system at 
any given time and the libcudart.so you pick here may not be the one you want.
E.g it may be from a recent CUDA version which is not supported by NVIDIA 
drivers yet. 

I think you may need a way to let the user override CUDA SDK (or libcudart.so) 
location explicitly. I guess they could do that via LD_LIBRARY_PATH, but for 
the CUDA compilation in general, knowing CUDA SDK path is essential, as it does 
affect various compilation options set by the driver.



Repository:
  rG LLVM Github Monorepo

CHANGES SINCE LAST ACTION
  https://reviews.llvm.org/D151904/new/

https://reviews.llvm.org/D151904

___
cfe-commits mailing list
cfe-commits@lists.llvm.org
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits


[PATCH] D151904: [clang-repl][CUDA] Add an unit test for interactive CUDA

2023-06-01 Thread Anubhab Ghosh via Phabricator via cfe-commits
argentite created this revision.
argentite added a reviewer: v.g.vassilev.
Herald added subscribers: mattd, carlosgalvezp, yaxunl.
Herald added a project: All.
argentite requested review of this revision.
Herald added a project: clang.
Herald added a subscriber: cfe-commits.

This tests interactive CUDA support in clang interpreter and should pass
in case CUDA runtime is not available or no GPUs are available.


Repository:
  rG LLVM Github Monorepo

https://reviews.llvm.org/D151904

Files:
  clang/unittests/Interpreter/CMakeLists.txt
  clang/unittests/Interpreter/InteractiveCudaTest.cpp

Index: clang/unittests/Interpreter/InteractiveCudaTest.cpp
===
--- /dev/null
+++ clang/unittests/Interpreter/InteractiveCudaTest.cpp
@@ -0,0 +1,142 @@
+//===- unittests/Interpreter/CudaTest.cpp --- Interactive CUDA tests --===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===--===//
+//
+// Unit tests for interactive CUDA in Clang interpreter
+//
+//===--===//
+
+#include "clang/Basic/Version.h"
+#include "clang/Config/config.h"
+
+#include "clang/Frontend/CompilerInstance.h"
+#include "clang/Interpreter/Interpreter.h"
+
+#include "llvm/Support/TargetSelect.h"
+
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+
+namespace {
+
+std::string MakeResourcesPath() {
+  using namespace llvm;
+#ifdef LLVM_BINARY_DIR
+  StringRef Dir = LLVM_BINARY_DIR;
+#else
+  // Dir is bin/ or lib/, depending on where BinaryPath is.
+  void *MainAddr = (void *)(intptr_t)MakeResourcesPath;
+  std::string BinaryPath =
+  llvm::sys::fs::getMainExecutable(/*Argv0=*/nullptr, MainAddr);
+
+  // build/tools/clang/unittests/Interpreter/Executable -> build/
+  StringRef Dir = sys::path::parent_path(BinaryPath);
+
+  Dir = sys::path::parent_path(Dir);
+  Dir = sys::path::parent_path(Dir);
+  Dir = sys::path::parent_path(Dir);
+  Dir = sys::path::parent_path(Dir);
+#endif // LLVM_BINARY_DIR
+  SmallString<128> P(Dir);
+  sys::path::append(P, CLANG_INSTALL_LIBDIR_BASENAME, "clang",
+CLANG_VERSION_MAJOR_STRING);
+  return P.str().str();
+}
+
+static std::unique_ptr
+createInterpreter(const std::vector &ExtraArgs = {}) {
+  static bool firstrun = true;
+  if (firstrun) {
+llvm::InitializeAllTargetInfos();
+llvm::InitializeAllTargets();
+llvm::InitializeAllTargetMCs();
+llvm::InitializeAllAsmPrinters();
+
+firstrun = false;
+  }
+
+  clang::IncrementalCompilerBuilder CB;
+
+  // Help find cuda's runtime headers.
+  std::string ResourceDir = MakeResourcesPath();
+
+  std::vector Args = {"-resource-dir", ResourceDir.c_str(), "-std=c++20"};
+  Args.insert(Args.end(), ExtraArgs.begin(), ExtraArgs.end());
+  CB.SetCompilerArgs(Args);
+
+  // Create the device code compiler
+  std::unique_ptr DeviceCI;
+  CB.SetOffloadArch("sm_35");
+  DeviceCI = cantFail(CB.CreateCudaDevice());
+
+  std::unique_ptr CI;
+  CI = cantFail(CB.CreateCudaHost());
+
+  auto Interp = cantFail(
+  clang::Interpreter::createWithCUDA(std::move(CI), std::move(DeviceCI)));
+
+  return Interp;
+}
+
+enum {
+  // Defined in CUDA Runtime API
+  cudaErrorNoDevice = 100,
+};
+
+TEST(InteractiveCudaTest, Sanity) {
+  std::unique_ptr Interp = createInterpreter();
+  auto Err = Interp->LoadDynamicLibrary("libcudart.so");
+  if (Err) { // CUDA runtime is not installed/usable, cannot continue testing
+consumeError(std::move(Err));
+return;
+  }
+
+  // Check if we have any GPU for test
+  int CudaError = 0;
+  auto GpuCheckCommand = std::string(R"(
+int device_id = -1;
+int *error = (int *))" + std::to_string((uintptr_t)&CudaError) +
+ R"(;
+*error = cudaGetDevice(&device_id);
+  )");
+  cantFail(Interp->ParseAndExecute(GpuCheckCommand));
+  if (CudaError == cudaErrorNoDevice) {
+// No GPU is available on this machine, cannot continue testing
+return;
+  }
+  ASSERT_EQ(CudaError, 0);
+
+  int HostSum = 0;
+  auto Command1 = std::string(R"(
+__host__ __device__ inline int sum(int a, int b){ return a + b; }
+__global__ void kernel(int * output){ *output = sum(40,2); }
+int *hostsum = (int *) )") +
+  std::to_string((uintptr_t)&HostSum) +
+  R"(;
+*hostsum = sum(41,1);)";
+  cantFail(Interp->ParseAndExecute(Command1));
+
+  int DeviceSum = 0;
+  auto Command2 = std::string(R"(
+int *devicesum = (int *))" +
+  std::to_string((uintptr_t)&DeviceSum) +
+  R"(;
+int *deviceVar;
+*error |= cudaMalloc((void **) &deviceVar, sizeof(int));
+kernel<<<1,1>>>(deviceVar);
+*error |= cudaGetLastError();
+*error |=