[GitHub] [tvm] juda commented on a diff in pull request #12232: libstdc++ CXX11 ABI Compatibility & boolean tensor support

2022-07-31 Thread GitBox


juda commented on code in PR #12232:
URL: https://github.com/apache/tvm/pull/12232#discussion_r934117229


##
src/contrib/torch/tvm_module_wrapper/RuntimeModuleWrapperTVM.cc:
##
@@ -0,0 +1,179 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+
+#include 
+#include 
+#include 
+#include 
+
+#include "../../../runtime/graph_executor/graph_executor_factory.h"
+#include "../base64.h"
+#include "runtime_bridge.h"
+
+struct ThreadLocalStore {
+  tvm::runtime::Module mod;
+  static ThreadLocalStore* ThreadLocal() {
+thread_local ThreadLocalStore tls;
+return &tls;
+  }
+};
+
+namespace tvm {
+namespace contrib {
+
+std::string serialize(tvm::runtime::Module module) {
+  static const runtime::PackedFunc* f_to_str =
+  runtime::Registry::Get("script_torch.save_to_base64");
+  ICHECK(f_to_str) << "IndexError: Cannot find the packed function "
+  "`script_torch.save_to_base64` in the global registry";
+  return (*f_to_str)(module);
+}
+
+struct Deleter {  // deleter
+  explicit Deleter(std::string file_name) { this->file_name = file_name; }
+  void operator()(FILE* p) const {
+fclose(p);
+ICHECK(remove(file_name.c_str()) == 0)
+<< "remove temporary file (" << file_name << ") unsuccessfully";
+  }
+  std::string file_name;
+};
+
+tvm::runtime::Module deserialize(std::string state) {
+  auto length = tvm::support::b64strlen(state);
+
+  std::vector bytes(length);
+  tvm::support::b64decode(state, bytes.data());
+
+  const std::string name = tmpnam(NULL);
+  auto file_name = name + ".so";
+  std::unique_ptr pFile(fopen(file_name.c_str(), "wb"), 
Deleter(file_name));
+  fwrite(bytes.data(), sizeof(u_char), length, pFile.get());
+  fflush(pFile.get());
+
+  std::string load_f_name = "runtime.module.loadfile_so";
+  const PackedFunc* f = runtime::Registry::Get(load_f_name);
+  ICHECK(f != nullptr) << "Loader for `.so` files is not registered,"
+   << " resolved to (" << load_f_name << ") in the global 
registry."
+   << "Ensure that you have loaded the correct runtime 
code, and"
+   << "that you are on the correct hardware architecture.";
+
+  tvm::runtime::Module ret = (*f)(file_name, "");
+
+  return ret;
+}
+
+tvm::Device getDeviceInfo(DLManagedTensor* input_device) {
+  return {.device_type = input_device->dl_tensor.device.device_type,
+  .device_id = input_device->dl_tensor.device.device_id};
+}
+
+TVM_REGISTER_GLOBAL("tvmtorch.save_runtime_mod").set_body_typed([](tvm::runtime::Module
 mod) {
+  ThreadLocalStore::ThreadLocal()->mod = mod;
+});
+
+}  // namespace contrib
+}  // namespace tvm
+
+extern "C" {
+
+struct TVMContribTorchRuntimeModule {
+  tvm::runtime::Module mod;
+
+  explicit TVMContribTorchRuntimeModule(tvm::runtime::Module mod) : mod(mod) {}
+};
+
+TVMContribTorchRuntimeModule* 
tvm_contrib_torch_get_last_saved_runtime_module() {
+  return new 
TVMContribTorchRuntimeModule(ThreadLocalStore::ThreadLocal()->mod);
+}
+
+void tvm_contrib_torch_operator_module_forward(TVMContribTorchRuntimeModule* 
runtime_module,
+   DLPackTensorExt* inputs, size_t 
input_size) {
+  tvm::runtime::PackedFunc run = 
runtime_module->mod.GetFunction("__tvm_main__");
+
+  std::vector tvm_values(input_size);
+  std::vector tvm_type_codes(input_size);
+  tvm::runtime::TVMArgsSetter setter(tvm_values.data(), tvm_type_codes.data());
+  for (int k = 0; k < input_size; ++k) {
+setter(k, &inputs[k].dl_managed_tensor->dl_tensor);
+  }
+  run.CallPacked(tvm::runtime::TVMArgs(tvm_values.data(), 
tvm_type_codes.data(), input_size),
+ nullptr);
+}
+
+int64_t 
tvm_contrib_torch_graph_executor_module_forward(TVMContribTorchRuntimeModule* 
graph_module,
+DLPackTensorExt* 
inputs, size_t input_size,
+DLPackTensorExt** 
outputs) {
+  tvm::runtime::PackedFunc built_module = 
graph_module->mod.GetFunction("default");
+  auto device_info = tvm::contrib::getDeviceInfo(inputs[0].dl_managed_tensor);
+  tvm::runtime::

[GitHub] [tvm] juda commented on a diff in pull request #12232: libstdc++ CXX11 ABI Compatibility & boolean tensor support

2022-07-31 Thread GitBox


juda commented on code in PR #12232:
URL: https://github.com/apache/tvm/pull/12232#discussion_r934107880


##
src/contrib/torch/tvm_module_wrapper/RuntimeModuleWrapperTVM.cc:
##
@@ -0,0 +1,179 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+
+#include 
+#include 
+#include 
+#include 
+
+#include "../../../runtime/graph_executor/graph_executor_factory.h"
+#include "../base64.h"
+#include "runtime_bridge.h"
+
+struct ThreadLocalStore {
+  tvm::runtime::Module mod;
+  static ThreadLocalStore* ThreadLocal() {
+thread_local ThreadLocalStore tls;
+return &tls;
+  }
+};
+
+namespace tvm {
+namespace contrib {
+
+std::string serialize(tvm::runtime::Module module) {
+  static const runtime::PackedFunc* f_to_str =
+  runtime::Registry::Get("script_torch.save_to_base64");
+  ICHECK(f_to_str) << "IndexError: Cannot find the packed function "
+  "`script_torch.save_to_base64` in the global registry";
+  return (*f_to_str)(module);
+}
+
+struct Deleter {  // deleter
+  explicit Deleter(std::string file_name) { this->file_name = file_name; }
+  void operator()(FILE* p) const {
+fclose(p);
+ICHECK(remove(file_name.c_str()) == 0)
+<< "remove temporary file (" << file_name << ") unsuccessfully";
+  }
+  std::string file_name;
+};
+
+tvm::runtime::Module deserialize(std::string state) {
+  auto length = tvm::support::b64strlen(state);
+
+  std::vector bytes(length);
+  tvm::support::b64decode(state, bytes.data());
+
+  const std::string name = tmpnam(NULL);
+  auto file_name = name + ".so";
+  std::unique_ptr pFile(fopen(file_name.c_str(), "wb"), 
Deleter(file_name));
+  fwrite(bytes.data(), sizeof(u_char), length, pFile.get());
+  fflush(pFile.get());
+
+  std::string load_f_name = "runtime.module.loadfile_so";
+  const PackedFunc* f = runtime::Registry::Get(load_f_name);
+  ICHECK(f != nullptr) << "Loader for `.so` files is not registered,"
+   << " resolved to (" << load_f_name << ") in the global 
registry."
+   << "Ensure that you have loaded the correct runtime 
code, and"
+   << "that you are on the correct hardware architecture.";
+
+  tvm::runtime::Module ret = (*f)(file_name, "");
+
+  return ret;
+}
+
+tvm::Device getDeviceInfo(DLManagedTensor* input_device) {
+  return {.device_type = input_device->dl_tensor.device.device_type,
+  .device_id = input_device->dl_tensor.device.device_id};
+}
+
+TVM_REGISTER_GLOBAL("tvmtorch.save_runtime_mod").set_body_typed([](tvm::runtime::Module
 mod) {
+  ThreadLocalStore::ThreadLocal()->mod = mod;
+});
+
+}  // namespace contrib
+}  // namespace tvm
+
+extern "C" {
+
+struct TVMContribTorchRuntimeModule {
+  tvm::runtime::Module mod;
+
+  explicit TVMContribTorchRuntimeModule(tvm::runtime::Module mod) : mod(mod) {}
+};
+
+TVMContribTorchRuntimeModule* 
tvm_contrib_torch_get_last_saved_runtime_module() {
+  return new 
TVMContribTorchRuntimeModule(ThreadLocalStore::ThreadLocal()->mod);
+}
+
+void tvm_contrib_torch_operator_module_forward(TVMContribTorchRuntimeModule* 
runtime_module,
+   DLPackTensorExt* inputs, size_t 
input_size) {
+  tvm::runtime::PackedFunc run = 
runtime_module->mod.GetFunction("__tvm_main__");
+
+  std::vector tvm_values(input_size);
+  std::vector tvm_type_codes(input_size);
+  tvm::runtime::TVMArgsSetter setter(tvm_values.data(), tvm_type_codes.data());
+  for (int k = 0; k < input_size; ++k) {
+setter(k, &inputs[k].dl_managed_tensor->dl_tensor);
+  }
+  run.CallPacked(tvm::runtime::TVMArgs(tvm_values.data(), 
tvm_type_codes.data(), input_size),
+ nullptr);
+}
+
+int64_t 
tvm_contrib_torch_graph_executor_module_forward(TVMContribTorchRuntimeModule* 
graph_module,
+DLPackTensorExt* 
inputs, size_t input_size,
+DLPackTensorExt** 
outputs) {
+  tvm::runtime::PackedFunc built_module = 
graph_module->mod.GetFunction("default");
+  auto device_info = tvm::contrib::getDeviceInfo(inputs[0].dl_managed_tensor);
+  tvm::runtime::

[GitHub] [tvm] juda commented on a diff in pull request #12232: libstdc++ CXX11 ABI Compatibility & boolean tensor support

2022-07-31 Thread GitBox


juda commented on code in PR #12232:
URL: https://github.com/apache/tvm/pull/12232#discussion_r934092594


##
src/contrib/torch/tvm_module_wrapper/RuntimeModuleWrapperTVM.cc:
##
@@ -0,0 +1,179 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+
+#include 
+#include 
+#include 
+#include 
+
+#include "../../../runtime/graph_executor/graph_executor_factory.h"
+#include "../base64.h"
+#include "runtime_bridge.h"
+
+struct ThreadLocalStore {
+  tvm::runtime::Module mod;
+  static ThreadLocalStore* ThreadLocal() {
+thread_local ThreadLocalStore tls;
+return &tls;
+  }
+};
+
+namespace tvm {
+namespace contrib {
+
+std::string serialize(tvm::runtime::Module module) {
+  static const runtime::PackedFunc* f_to_str =
+  runtime::Registry::Get("script_torch.save_to_base64");
+  ICHECK(f_to_str) << "IndexError: Cannot find the packed function "
+  "`script_torch.save_to_base64` in the global registry";
+  return (*f_to_str)(module);
+}
+
+struct Deleter {  // deleter
+  explicit Deleter(std::string file_name) { this->file_name = file_name; }
+  void operator()(FILE* p) const {
+fclose(p);
+ICHECK(remove(file_name.c_str()) == 0)
+<< "remove temporary file (" << file_name << ") unsuccessfully";
+  }
+  std::string file_name;
+};
+
+tvm::runtime::Module deserialize(std::string state) {
+  auto length = tvm::support::b64strlen(state);
+
+  std::vector bytes(length);
+  tvm::support::b64decode(state, bytes.data());
+
+  const std::string name = tmpnam(NULL);
+  auto file_name = name + ".so";
+  std::unique_ptr pFile(fopen(file_name.c_str(), "wb"), 
Deleter(file_name));
+  fwrite(bytes.data(), sizeof(u_char), length, pFile.get());
+  fflush(pFile.get());
+
+  std::string load_f_name = "runtime.module.loadfile_so";
+  const PackedFunc* f = runtime::Registry::Get(load_f_name);
+  ICHECK(f != nullptr) << "Loader for `.so` files is not registered,"
+   << " resolved to (" << load_f_name << ") in the global 
registry."
+   << "Ensure that you have loaded the correct runtime 
code, and"
+   << "that you are on the correct hardware architecture.";
+
+  tvm::runtime::Module ret = (*f)(file_name, "");
+
+  return ret;
+}
+
+tvm::Device getDeviceInfo(DLManagedTensor* input_device) {
+  return {.device_type = input_device->dl_tensor.device.device_type,
+  .device_id = input_device->dl_tensor.device.device_id};
+}
+
+TVM_REGISTER_GLOBAL("tvmtorch.save_runtime_mod").set_body_typed([](tvm::runtime::Module
 mod) {
+  ThreadLocalStore::ThreadLocal()->mod = mod;
+});
+
+}  // namespace contrib
+}  // namespace tvm
+
+extern "C" {
+
+struct TVMContribTorchRuntimeModule {
+  tvm::runtime::Module mod;
+
+  explicit TVMContribTorchRuntimeModule(tvm::runtime::Module mod) : mod(mod) {}
+};
+
+TVMContribTorchRuntimeModule* 
tvm_contrib_torch_get_last_saved_runtime_module() {
+  return new 
TVMContribTorchRuntimeModule(ThreadLocalStore::ThreadLocal()->mod);
+}
+
+void tvm_contrib_torch_operator_module_forward(TVMContribTorchRuntimeModule* 
runtime_module,
+   DLPackTensorExt* inputs, size_t 
input_size) {
+  tvm::runtime::PackedFunc run = 
runtime_module->mod.GetFunction("__tvm_main__");
+
+  std::vector tvm_values(input_size);
+  std::vector tvm_type_codes(input_size);
+  tvm::runtime::TVMArgsSetter setter(tvm_values.data(), tvm_type_codes.data());
+  for (int k = 0; k < input_size; ++k) {
+setter(k, &inputs[k].dl_managed_tensor->dl_tensor);
+  }
+  run.CallPacked(tvm::runtime::TVMArgs(tvm_values.data(), 
tvm_type_codes.data(), input_size),
+ nullptr);
+}
+
+int64_t 
tvm_contrib_torch_graph_executor_module_forward(TVMContribTorchRuntimeModule* 
graph_module,
+DLPackTensorExt* 
inputs, size_t input_size,
+DLPackTensorExt** 
outputs) {
+  tvm::runtime::PackedFunc built_module = 
graph_module->mod.GetFunction("default");
+  auto device_info = tvm::contrib::getDeviceInfo(inputs[0].dl_managed_tensor);
+  tvm::runtime::

[GitHub] [tvm] juda commented on a diff in pull request #12232: libstdc++ CXX11 ABI Compatibility & boolean tensor support

2022-07-31 Thread GitBox


juda commented on code in PR #12232:
URL: https://github.com/apache/tvm/pull/12232#discussion_r934087603


##
src/contrib/torch/tvm_module_wrapper/RuntimeModuleWrapperTVM.cc:
##
@@ -0,0 +1,179 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+
+#include 
+#include 
+#include 
+#include 
+
+#include "../../../runtime/graph_executor/graph_executor_factory.h"
+#include "../base64.h"
+#include "runtime_bridge.h"
+
+struct ThreadLocalStore {
+  tvm::runtime::Module mod;
+  static ThreadLocalStore* ThreadLocal() {
+thread_local ThreadLocalStore tls;
+return &tls;
+  }
+};
+
+namespace tvm {
+namespace contrib {
+
+std::string serialize(tvm::runtime::Module module) {
+  static const runtime::PackedFunc* f_to_str =
+  runtime::Registry::Get("script_torch.save_to_base64");
+  ICHECK(f_to_str) << "IndexError: Cannot find the packed function "
+  "`script_torch.save_to_base64` in the global registry";
+  return (*f_to_str)(module);
+}
+
+struct Deleter {  // deleter
+  explicit Deleter(std::string file_name) { this->file_name = file_name; }
+  void operator()(FILE* p) const {
+fclose(p);
+ICHECK(remove(file_name.c_str()) == 0)
+<< "remove temporary file (" << file_name << ") unsuccessfully";
+  }
+  std::string file_name;
+};
+
+tvm::runtime::Module deserialize(std::string state) {
+  auto length = tvm::support::b64strlen(state);
+
+  std::vector bytes(length);
+  tvm::support::b64decode(state, bytes.data());
+
+  const std::string name = tmpnam(NULL);
+  auto file_name = name + ".so";
+  std::unique_ptr pFile(fopen(file_name.c_str(), "wb"), 
Deleter(file_name));
+  fwrite(bytes.data(), sizeof(u_char), length, pFile.get());
+  fflush(pFile.get());
+
+  std::string load_f_name = "runtime.module.loadfile_so";
+  const PackedFunc* f = runtime::Registry::Get(load_f_name);
+  ICHECK(f != nullptr) << "Loader for `.so` files is not registered,"
+   << " resolved to (" << load_f_name << ") in the global 
registry."
+   << "Ensure that you have loaded the correct runtime 
code, and"
+   << "that you are on the correct hardware architecture.";
+
+  tvm::runtime::Module ret = (*f)(file_name, "");
+
+  return ret;
+}
+
+tvm::Device getDeviceInfo(DLManagedTensor* input_device) {
+  return {.device_type = input_device->dl_tensor.device.device_type,
+  .device_id = input_device->dl_tensor.device.device_id};
+}
+
+TVM_REGISTER_GLOBAL("tvmtorch.save_runtime_mod").set_body_typed([](tvm::runtime::Module
 mod) {
+  ThreadLocalStore::ThreadLocal()->mod = mod;
+});
+
+}  // namespace contrib
+}  // namespace tvm
+
+extern "C" {
+
+struct TVMContribTorchRuntimeModule {
+  tvm::runtime::Module mod;
+
+  explicit TVMContribTorchRuntimeModule(tvm::runtime::Module mod) : mod(mod) {}
+};
+
+TVMContribTorchRuntimeModule* 
tvm_contrib_torch_get_last_saved_runtime_module() {
+  return new 
TVMContribTorchRuntimeModule(ThreadLocalStore::ThreadLocal()->mod);
+}
+
+void tvm_contrib_torch_operator_module_forward(TVMContribTorchRuntimeModule* 
runtime_module,
+   DLPackTensorExt* inputs, size_t 
input_size) {
+  tvm::runtime::PackedFunc run = 
runtime_module->mod.GetFunction("__tvm_main__");
+
+  std::vector tvm_values(input_size);
+  std::vector tvm_type_codes(input_size);
+  tvm::runtime::TVMArgsSetter setter(tvm_values.data(), tvm_type_codes.data());
+  for (int k = 0; k < input_size; ++k) {
+setter(k, &inputs[k].dl_managed_tensor->dl_tensor);
+  }
+  run.CallPacked(tvm::runtime::TVMArgs(tvm_values.data(), 
tvm_type_codes.data(), input_size),
+ nullptr);
+}
+
+int64_t 
tvm_contrib_torch_graph_executor_module_forward(TVMContribTorchRuntimeModule* 
graph_module,
+DLPackTensorExt* 
inputs, size_t input_size,
+DLPackTensorExt** 
outputs) {
+  tvm::runtime::PackedFunc built_module = 
graph_module->mod.GetFunction("default");
+  auto device_info = tvm::contrib::getDeviceInfo(inputs[0].dl_managed_tensor);
+  tvm::runtime::

[GitHub] [tvm] juda commented on a diff in pull request #12232: libstdc++ CXX11 ABI Compatibility & boolean tensor support

2022-07-31 Thread GitBox


juda commented on code in PR #12232:
URL: https://github.com/apache/tvm/pull/12232#discussion_r934087537


##
src/contrib/torch/tvm_module_wrapper/RuntimeModuleWrapperTorch.cc:
##
@@ -0,0 +1,159 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+#include 
+#include 
+#include 
+
+#include 
+
+#include "runtime_bridge.h"
+
+namespace tvm {
+namespace contrib {
+
+DLPackTensorExt toDLPackExt(const at::Tensor& src) {
+  if (!src.is_contiguous()) {
+return toDLPackExt(src.contiguous());
+  }
+
+  if (src.dtype().isScalarType(torch::kBool)) {
+auto temp = src.toType(torch::kUInt8);
+return {.dl_managed_tensor = at::toDLPack(temp), .is_bool = true};

Review Comment:
   I switch it to another syntax



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: commits-unsubscr...@tvm.apache.org

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org



[GitHub] [tvm] juda commented on a diff in pull request #12232: libstdc++ CXX11 ABI Compatibility & boolean tensor support

2022-07-31 Thread GitBox


juda commented on code in PR #12232:
URL: https://github.com/apache/tvm/pull/12232#discussion_r934084833


##
src/contrib/torch/tvm_module_wrapper/RuntimeModuleWrapperTorch.cc:
##
@@ -0,0 +1,159 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+#include 
+#include 
+#include 
+
+#include 
+
+#include "runtime_bridge.h"
+
+namespace tvm {
+namespace contrib {
+
+DLPackTensorExt toDLPackExt(const at::Tensor& src) {
+  if (!src.is_contiguous()) {
+return toDLPackExt(src.contiguous());
+  }
+
+  if (src.dtype().isScalarType(torch::kBool)) {
+auto temp = src.toType(torch::kUInt8);
+return {.dl_managed_tensor = at::toDLPack(temp), .is_bool = true};
+  }
+
+  return {.dl_managed_tensor = at::toDLPack(src), .is_bool = false};
+}
+
+at::Tensor fromDLPackExt(const DLPackTensorExt& src) {
+  if (src.is_bool) {
+return at::fromDLPack(src.dl_managed_tensor).toType(torch::kBool);
+  } else {
+return at::fromDLPack(src.dl_managed_tensor);
+  }
+}
+
+/**
+ * @brief A Torch's module which wraps TVM's OperatorModule Class.
+ * The basic forward function calling TVM's runtime is provided.
+ * The TVM module can be serialized/deserialized as a Torch module.
+ */
+class OperatorModuleWrapper : public torch::jit::CustomClassHolder {
+ public:
+  OperatorModuleWrapper() { runtime_module = 
tvm_contrib_torch_get_last_saved_runtime_module(); }
+
+  void forward(const c10::List& inputs) {
+int input_length = inputs.size();
+
+std::vector tensors;
+
+for (int i = 0; i < input_length; ++i) 
tensors.push_back(toDLPackExt(inputs[i]));
+tvm_contrib_torch_operator_module_forward(
+this->runtime_module, static_cast(tensors.data()), 
tensors.size());

Review Comment:
   No



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: commits-unsubscr...@tvm.apache.org

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org



[GitHub] [tvm] juda commented on a diff in pull request #12232: libstdc++ CXX11 ABI Compatibility & boolean tensor support

2022-07-31 Thread GitBox


juda commented on code in PR #12232:
URL: https://github.com/apache/tvm/pull/12232#discussion_r934084691


##
src/contrib/torch/tvm_module_wrapper/RuntimeModuleWrapperTVM.cc:
##
@@ -0,0 +1,179 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+
+#include 
+#include 
+#include 
+#include 
+
+#include "../../../runtime/graph_executor/graph_executor_factory.h"
+#include "../base64.h"
+#include "runtime_bridge.h"
+
+struct ThreadLocalStore {

Review Comment:
   Done



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: commits-unsubscr...@tvm.apache.org

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org



[GitHub] [tvm] juda commented on a diff in pull request #12232: libstdc++ CXX11 ABI Compatibility & boolean tensor support

2022-07-31 Thread GitBox


juda commented on code in PR #12232:
URL: https://github.com/apache/tvm/pull/12232#discussion_r934084309


##
src/contrib/torch/tvm_module_wrapper/RuntimeModuleWrapperTorch.cc:
##
@@ -0,0 +1,159 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+#include 
+#include 
+#include 
+
+#include 
+
+#include "runtime_bridge.h"
+
+namespace tvm {
+namespace contrib {
+
+DLPackTensorExt toDLPackExt(const at::Tensor& src) {
+  if (!src.is_contiguous()) {
+return toDLPackExt(src.contiguous());
+  }
+
+  if (src.dtype().isScalarType(torch::kBool)) {
+auto temp = src.toType(torch::kUInt8);
+return {.dl_managed_tensor = at::toDLPack(temp), .is_bool = true};
+  }
+
+  return {.dl_managed_tensor = at::toDLPack(src), .is_bool = false};
+}
+
+at::Tensor fromDLPackExt(const DLPackTensorExt& src) {
+  if (src.is_bool) {
+return at::fromDLPack(src.dl_managed_tensor).toType(torch::kBool);
+  } else {
+return at::fromDLPack(src.dl_managed_tensor);
+  }
+}
+
+/**
+ * @brief A Torch's module which wraps TVM's OperatorModule Class.
+ * The basic forward function calling TVM's runtime is provided.
+ * The TVM module can be serialized/deserialized as a Torch module.
+ */
+class OperatorModuleWrapper : public torch::jit::CustomClassHolder {
+ public:
+  OperatorModuleWrapper() { runtime_module = 
tvm_contrib_torch_get_last_saved_runtime_module(); }
+
+  void forward(const c10::List& inputs) {
+int input_length = inputs.size();
+
+std::vector tensors;
+
+for (int i = 0; i < input_length; ++i) 
tensors.push_back(toDLPackExt(inputs[i]));
+tvm_contrib_torch_operator_module_forward(
+this->runtime_module, static_cast(tensors.data()), 
tensors.size());
+
+for (int k = 0; k < input_length; ++k) {
+  tensors[k].dl_managed_tensor->deleter(tensors[k].dl_managed_tensor);
+}
+  }
+
+  std::string Serialize() { return 
std::string(tvm_contrib_torch_encode(runtime_module)); }
+
+  explicit OperatorModuleWrapper(std::string state) {
+runtime_module = tvm_contrib_torch_decode(state.c_str());
+  }
+
+ private:
+  TVMContribTorchRuntimeModule* runtime_module;
+};
+
+/**
+ * @brief A Torch's module which wraps TVM's GraphExecutorFactory Class.
+ * The basic forward function calling TVM's runtime is provided.
+ * The TVM module can be serialized/deserialized as a Torch module.
+ */
+class GraphExecutorFactoryWrapper : public torch::jit::CustomClassHolder {
+ public:
+  explicit GraphExecutorFactoryWrapper(TVMContribTorchRuntimeModule* 
executor_factory)
+  : executor_factory_(executor_factory) {}
+
+  GraphExecutorFactoryWrapper()
+  : 
GraphExecutorFactoryWrapper(tvm_contrib_torch_get_last_saved_runtime_module()) 
{}
+  std::string Serialize() { return 
tvm_contrib_torch_encode(executor_factory_); }
+
+  explicit GraphExecutorFactoryWrapper(std::string state) {
+executor_factory_ = tvm_contrib_torch_decode(state.c_str());
+  }
+
+  c10::List forward(const c10::List& inputs) {
+int input_length = inputs.size();
+
+TORCH_CHECK(input_length > 0, "Receive empty list of input tensors");
+
+std::vector tensors;
+
+for (int i = 0; i < input_length; ++i) 
tensors.push_back(toDLPackExt(inputs[i]));
+
+auto outputs = new DLPackTensorExt*;
+
+auto num_outputs = tvm_contrib_torch_graph_executor_module_forward(
+executor_factory_, static_cast(tensors.data()), 
tensors.size(), outputs);

Review Comment:
   Fixed



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: commits-unsubscr...@tvm.apache.org

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org



[GitHub] [tvm] juda commented on a diff in pull request #12232: libstdc++ CXX11 ABI Compatibility & boolean tensor support

2022-07-31 Thread GitBox


juda commented on code in PR #12232:
URL: https://github.com/apache/tvm/pull/12232#discussion_r934081871


##
src/contrib/torch/tvm_module_wrapper/RuntimeModuleWrapperTorch.cc:
##
@@ -0,0 +1,159 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+#include 
+#include 
+#include 
+
+#include 
+
+#include "runtime_bridge.h"
+
+namespace tvm {
+namespace contrib {
+
+DLPackTensorExt toDLPackExt(const at::Tensor& src) {
+  if (!src.is_contiguous()) {
+return toDLPackExt(src.contiguous());
+  }
+
+  if (src.dtype().isScalarType(torch::kBool)) {
+auto temp = src.toType(torch::kUInt8);
+return {.dl_managed_tensor = at::toDLPack(temp), .is_bool = true};
+  }
+
+  return {.dl_managed_tensor = at::toDLPack(src), .is_bool = false};
+}
+
+at::Tensor fromDLPackExt(const DLPackTensorExt& src) {
+  if (src.is_bool) {
+return at::fromDLPack(src.dl_managed_tensor).toType(torch::kBool);
+  } else {
+return at::fromDLPack(src.dl_managed_tensor);
+  }
+}
+
+/**
+ * @brief A Torch's module which wraps TVM's OperatorModule Class.
+ * The basic forward function calling TVM's runtime is provided.
+ * The TVM module can be serialized/deserialized as a Torch module.
+ */
+class OperatorModuleWrapper : public torch::jit::CustomClassHolder {
+ public:
+  OperatorModuleWrapper() { runtime_module = 
tvm_contrib_torch_get_last_saved_runtime_module(); }

Review Comment:
   Fixed



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: commits-unsubscr...@tvm.apache.org

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org



[GitHub] [tvm] juda commented on a diff in pull request #12232: libstdc++ CXX11 ABI Compatibility & boolean tensor support

2022-07-31 Thread GitBox


juda commented on code in PR #12232:
URL: https://github.com/apache/tvm/pull/12232#discussion_r934081517


##
src/contrib/torch/tvm_module_wrapper/runtime_bridge.h:
##
@@ -0,0 +1,53 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+/*!
+ * \file runtime_bridge.h
+ * \brief Util functions for pytorch tvm interaction.
+ */
+#ifndef TVM_CONTRIB_TORCH_TVM_MODULE_WRAPPER_RUNTIME_BRIDGE_H_
+#define TVM_CONTRIB_TORCH_TVM_MODULE_WRAPPER_RUNTIME_BRIDGE_H_
+
+extern "C" {
+
+typedef DLManagedTensor** TensorList;
+
+struct DLPackTensorExt {
+  DLManagedTensor* dl_managed_tensor;
+  bool is_bool;
+};
+
+struct TVMContribTorchRuntimeModule;
+
+TVMContribTorchRuntimeModule* 
tvm_contrib_torch_get_last_saved_runtime_module();
+
+void tvm_contrib_torch_operator_module_forward(TVMContribTorchRuntimeModule* 
runtime_module,
+   DLPackTensorExt* inputs, size_t 
input_size);
+
+int64_t 
tvm_contrib_torch_graph_executor_module_forward(TVMContribTorchRuntimeModule* 
graph_module,
+DLPackTensorExt* 
inputs, size_t input_size,
+DLPackTensorExt** 
outputs);

Review Comment:
   Done



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: commits-unsubscr...@tvm.apache.org

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org



[GitHub] [tvm] juda commented on a diff in pull request #12232: libstdc++ CXX11 ABI Compatibility & boolean tensor support

2022-07-31 Thread GitBox


juda commented on code in PR #12232:
URL: https://github.com/apache/tvm/pull/12232#discussion_r934075212


##
src/contrib/torch/tvm_module_wrapper/runtime_bridge.h:
##
@@ -0,0 +1,53 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+/*!
+ * \file runtime_bridge.h
+ * \brief Util functions for pytorch tvm interaction.
+ */
+#ifndef TVM_CONTRIB_TORCH_TVM_MODULE_WRAPPER_RUNTIME_BRIDGE_H_
+#define TVM_CONTRIB_TORCH_TVM_MODULE_WRAPPER_RUNTIME_BRIDGE_H_
+
+extern "C" {
+
+typedef DLManagedTensor** TensorList;
+
+struct DLPackTensorExt {
+  DLManagedTensor* dl_managed_tensor;
+  bool is_bool;
+};
+
+struct TVMContribTorchRuntimeModule;
+
+TVMContribTorchRuntimeModule* 
tvm_contrib_torch_get_last_saved_runtime_module();
+
+void tvm_contrib_torch_operator_module_forward(TVMContribTorchRuntimeModule* 
runtime_module,
+   DLPackTensorExt* inputs, size_t 
input_size);
+
+int64_t 
tvm_contrib_torch_graph_executor_module_forward(TVMContribTorchRuntimeModule* 
graph_module,

Review Comment:
   Fixed



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: commits-unsubscr...@tvm.apache.org

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org



[GitHub] [tvm] juda commented on a diff in pull request #12232: libstdc++ CXX11 ABI Compatibility & boolean tensor support

2022-07-31 Thread GitBox


juda commented on code in PR #12232:
URL: https://github.com/apache/tvm/pull/12232#discussion_r934074843


##
src/contrib/torch/tvm_module_wrapper/runtime_bridge.h:
##
@@ -0,0 +1,53 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+/*!
+ * \file runtime_bridge.h
+ * \brief Util functions for pytorch tvm interaction.
+ */
+#ifndef TVM_CONTRIB_TORCH_TVM_MODULE_WRAPPER_RUNTIME_BRIDGE_H_
+#define TVM_CONTRIB_TORCH_TVM_MODULE_WRAPPER_RUNTIME_BRIDGE_H_
+
+extern "C" {
+
+typedef DLManagedTensor** TensorList;

Review Comment:
   Fixed



##
src/contrib/torch/tvm_module_wrapper/runtime_bridge.h:
##
@@ -0,0 +1,53 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+/*!
+ * \file runtime_bridge.h
+ * \brief Util functions for pytorch tvm interaction.
+ */
+#ifndef TVM_CONTRIB_TORCH_TVM_MODULE_WRAPPER_RUNTIME_BRIDGE_H_
+#define TVM_CONTRIB_TORCH_TVM_MODULE_WRAPPER_RUNTIME_BRIDGE_H_
+
+extern "C" {
+
+typedef DLManagedTensor** TensorList;
+
+struct DLPackTensorExt {
+  DLManagedTensor* dl_managed_tensor;
+  bool is_bool;
+};
+
+struct TVMContribTorchRuntimeModule;
+
+TVMContribTorchRuntimeModule* 
tvm_contrib_torch_get_last_saved_runtime_module();

Review Comment:
   Fixed



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: commits-unsubscr...@tvm.apache.org

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org



[GitHub] [tvm] juda commented on a diff in pull request #12232: libstdc++ CXX11 ABI Compatibility & boolean tensor support

2022-07-30 Thread GitBox


juda commented on code in PR #12232:
URL: https://github.com/apache/tvm/pull/12232#discussion_r933779749


##
cmake/modules/contrib/PT_TVMDSOOP.cmake:
##
@@ -21,38 +21,55 @@ if(NOT USE_PT_TVMDSOOP STREQUAL "OFF")
   execute_process(COMMAND ${PYTHON_EXECUTABLE} -c "import torch; 
print(torch.__path__[0].strip())"
 OUTPUT_VARIABLE PT_PATH
 RESULT_VARIABLE PT_STATUS)
-  if (NOT ${PT_STATUS} EQUAL 0)
+
+  if(NOT ${PT_STATUS} EQUAL 0)
 message(FATAL_ERROR "Fail to get pytorch path")
   endif()
 
   string(REGEX REPLACE "\n" "" PT_PATH "${PT_PATH}")
   message(STATUS "PyTorch path: ${PT_PATH}")
 
-  set(PT_COMPILE_FLAGS_STR "-I${PT_PATH}/include -D_GLIBCXX_USE_CXX11_ABI=0")
+  execute_process(COMMAND ${PYTHON_EXECUTABLE} -c "import 
torch;print(torch.compiled_with_cxx11_abi())"
+OUTPUT_VARIABLE PT_CXX_FLAG
+RESULT_VARIABLE PT_STATUS)
+
+  string(REGEX REPLACE "\n" "" PT_CXX_FLAG "${PT_CXX_FLAG}")
+  message(STATUS "Found TORCH_BUILT_WITH_CXX_ABI=${PT_CXX_FLAG} ")
+
+  if(${PT_CXX_FLAG} STREQUAL "False")
+set(CXX_ABI_ENABLED 0)
+  else()
+set(CXX_ABI_ENABLED 1)
+  endif()
+
+  set_property(
+SOURCE
+
${CMAKE_CURRENT_SOURCE_DIR}/src/contrib/torch/tvm_module_wrapper/RuntimeModuleWrapperTorch.cc
+APPEND PROPERTY
+COMPILE_OPTIONS
+"-D_GLIBCXX_USE_CXX11_ABI=${CXX_ABI_ENABLED}"
+"-I${PT_PATH}/include"
+  )
   set(PT_LINK_FLAGS_STR "-L${PT_PATH}/lib -l:libtorch.so 
-l:libtorch_python.so")
 
   if(NOT USE_CUDA STREQUAL "OFF")
 add_definitions(-DPT_TVMDSOOP_ENABLE_GPU)
   endif()
 
-
   string(REGEX REPLACE "\n" " " PT_FLAGS "${PT_COMPILE_FLAGS} 
${PT_LINK_FLAGS}")
-  separate_arguments(PT_COMPILE_FLAGS UNIX_COMMAND ${PT_COMPILE_FLAGS_STR})
+  separate_arguments(PT_COMPILE_FLAGS UNIX_COMMAND)
   separate_arguments(PT_LINK_FLAGS UNIX_COMMAND ${PT_LINK_FLAGS_STR})
 
-
   set(LIBRARY_NAME pt_tvmdsoop)
-  tvm_file_glob(GLOB_RECURSE PTTVM_SRCS 
${CMAKE_CURRENT_SOURCE_DIR}/src/contrib/torch/**/*.cc)
+  tvm_file_glob(GLOB_RECURSE PTTVM_SRCS 
${CMAKE_CURRENT_SOURCE_DIR}/src/contrib/torch/tvm_module_wrapper/*.cc)

Review Comment:
   Done



##
python/tvm/contrib/torch/pytorch_tvm.py:
##
@@ -183,6 +184,11 @@ def load_tvm(self, export_dir):
 
 def build_pytorch_module(self, num_inputs, num_outputs, input_infos=None):
 """Build pytorch module containing TVM Graph Module"""
+warnings.warn(
+"We suggest users to use `optimized_torch` for tuning Torch 
modules instead",

Review Comment:
   Done



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: commits-unsubscr...@tvm.apache.org

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org



[GitHub] [tvm] juda commented on a diff in pull request #12232: libstdc++ CXX11 ABI Compatibility & boolean tensor support

2022-07-29 Thread GitBox


juda commented on code in PR #12232:
URL: https://github.com/apache/tvm/pull/12232#discussion_r933036589


##
cmake/modules/contrib/PT_TVMDSOOP.cmake:
##
@@ -21,38 +21,55 @@ if(NOT USE_PT_TVMDSOOP STREQUAL "OFF")
   execute_process(COMMAND ${PYTHON_EXECUTABLE} -c "import torch; 
print(torch.__path__[0].strip())"
 OUTPUT_VARIABLE PT_PATH
 RESULT_VARIABLE PT_STATUS)
-  if (NOT ${PT_STATUS} EQUAL 0)
+
+  if(NOT ${PT_STATUS} EQUAL 0)
 message(FATAL_ERROR "Fail to get pytorch path")
   endif()
 
   string(REGEX REPLACE "\n" "" PT_PATH "${PT_PATH}")
   message(STATUS "PyTorch path: ${PT_PATH}")
 
-  set(PT_COMPILE_FLAGS_STR "-I${PT_PATH}/include -D_GLIBCXX_USE_CXX11_ABI=0")
+  execute_process(COMMAND ${PYTHON_EXECUTABLE} -c "import 
torch;print(torch.compiled_with_cxx11_abi())"
+OUTPUT_VARIABLE PT_CXX_FLAG
+RESULT_VARIABLE PT_STATUS)
+
+  string(REGEX REPLACE "\n" "" PT_CXX_FLAG "${PT_CXX_FLAG}")
+  message(STATUS "Found TORCH_BUILT_WITH_CXX_ABI=${PT_CXX_FLAG} ")
+
+  if(${PT_CXX_FLAG} STREQUAL "False")
+set(CXX_ABI_ENABLED 0)
+  else()
+set(CXX_ABI_ENABLED 1)
+  endif()
+
+  set_property(
+SOURCE
+
${CMAKE_CURRENT_SOURCE_DIR}/src/contrib/torch/tvm_module_wrapper/RuntimeModuleWrapperTorch.cc
+APPEND PROPERTY
+COMPILE_OPTIONS
+"-D_GLIBCXX_USE_CXX11_ABI=${CXX_ABI_ENABLED}"
+"-I${PT_PATH}/include"
+  )
   set(PT_LINK_FLAGS_STR "-L${PT_PATH}/lib -l:libtorch.so 
-l:libtorch_python.so")
 
   if(NOT USE_CUDA STREQUAL "OFF")
 add_definitions(-DPT_TVMDSOOP_ENABLE_GPU)
   endif()
 
-
   string(REGEX REPLACE "\n" " " PT_FLAGS "${PT_COMPILE_FLAGS} 
${PT_LINK_FLAGS}")
-  separate_arguments(PT_COMPILE_FLAGS UNIX_COMMAND ${PT_COMPILE_FLAGS_STR})
+  separate_arguments(PT_COMPILE_FLAGS UNIX_COMMAND)
   separate_arguments(PT_LINK_FLAGS UNIX_COMMAND ${PT_LINK_FLAGS_STR})
 
-
   set(LIBRARY_NAME pt_tvmdsoop)
-  tvm_file_glob(GLOB_RECURSE PTTVM_SRCS 
${CMAKE_CURRENT_SOURCE_DIR}/src/contrib/torch/**/*.cc)
+  tvm_file_glob(GLOB_RECURSE PTTVM_SRCS 
${CMAKE_CURRENT_SOURCE_DIR}/src/contrib/torch/tvm_module_wrapper/*.cc)

Review Comment:
   Note that the compilation of `tvm_class.cc` is skipped.
   There is no way to build such a file without getting an undefined symbol 
error under the official PyTorch distribution.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: commits-unsubscr...@tvm.apache.org

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org