[GitHub] [incubator-tvm] junrushao1994 opened a new pull request #6347: [Target][Codegen] Use target class in all codegens

2020-08-26 Thread GitBox


junrushao1994 opened a new pull request #6347:
URL: https://github.com/apache/incubator-tvm/pull/6347


   This PR uses target class to replace almost all the raw target strings in 
all the codegen modules. It further helps with us migration towards a robost 
JSON-like targets configuration, per [[RFC] TVM Target 
Specification](https://discuss.tvm.ai/t/rfc-tvm-target-specification/6844?u=junrushao1994).
   
   Only one place in the code generators use raw string: the metadata section 
`tvm_target` in the llvm module. I haven't got a better idea how to deal with 
this yet. Don't hesitate to suggest if you have good ideas :-)
   
   CC: @comaniac @jroesch @tqchen 



This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org




[GitHub] [incubator-tvm] merrymercy commented on a change in pull request #6310: [Ansor][AutoTVM v2.0] Phase 2: Evolutionary Search

2020-08-26 Thread GitBox


merrymercy commented on a change in pull request #6310:
URL: https://github.com/apache/incubator-tvm/pull/6310#discussion_r478134161



##
File path: src/auto_scheduler/search_policy/sketch_policy_rules.cc
##
@@ -908,7 +795,362 @@ InitPopulationRule::ResultKind 
InitThreadBind::Apply(SketchPolicyNode* policy, S
   state->bind(stage_id, iters1[1], IteratorAnnotation::kThreadX);
 }
   }
+  return ResultKind::kValid;
+}
+
+PopulationGenerationRule::ResultKind MutateTileSize::Apply(SketchPolicyNode* 
policy,
+   State* state) const 
{
+  int max_innermost_split_factor =
+  GetIntParam(policy->params, SketchParamKey::max_innermost_split_factor);
+
+  // Extract all SplitStep
+  std::vector split_step_ids;
+  for (size_t i = 0; i < (*state)->transform_steps.size(); ++i) {
+if (auto ps = (*state)->transform_steps[i].as()) {
+  if (!ps->extent.defined() || 
!ps->extent.value()->IsInstance()) {
+continue;
+  }
+  auto innermost_factor = 
ps->lengths.back().value_or(max_innermost_split_factor + 1);
+  if (GetIntImm(innermost_factor) <= max_innermost_split_factor) {
+split_step_ids.push_back(i);
+  }
+}
+  }
+  if (split_step_ids.empty()) {
+// No tile size could be mutated.
+return ResultKind::kInvalid;
+  }
+
+  // Select a SplitStep with extent larger than one to mutate.
+  int retry_ct = 0;
+  int64_t extent = 1;
+  int step_id;
+  const SplitStepNode* ps;
+
+  do {
+step_id = split_step_ids[(policy->rand_gen)() % split_step_ids.size()];
+ps = (*state)->transform_steps[step_id].as();
+CHECK(ps != nullptr);
+extent = GetIntImm(ps->extent.value());
+retry_ct += 1;
+  } while (retry_ct < static_cast(split_step_ids.size()) << 2 && (extent 
== 1 || extent == 0));
+
+  if (extent <= 1) {
+// Cannot find a step with extent larger than one.
+return ResultKind::kInvalid;
+  }
+
+  // Fetch the current tile sizes.
+  std::vector lengths(ps->lengths.size() + 1, 1);
+  for (int i = 0; i < static_cast(ps->lengths.size()); ++i) {
+lengths[i + 1] = GetIntImm(ps->lengths[i].value());
+  }
+  lengths[0] = extent / ElementProduct(lengths);
+
+  // Random permute the tile size order.
+  std::vector random_perm;
+  RandomPermutation(lengths.size(), &random_perm, &(policy->rand_gen));
+
+  // Try to divide a factor from one tile size and multiple it to another.
+  for (size_t i = 0; i < random_perm.size(); ++i) {
+size_t src_idx = random_perm[i];
+int length = lengths[src_idx];
+if (length == 1) {
+  continue;
+}
+
+size_t dst_idx = random_perm[(i + 1) % random_perm.size()];
+const std::vector& factors = policy->split_memo.GetFactors(length);
+CHECK_GE(factors.size(), 1);
+
+int divide_factor;
+if (dst_idx == lengths.size() - 1) {
+  // Maintain the restriction of 
hardware_params.max_innermost_split_factor.
+  int max_factor_index = static_cast(factors.size()) - 1;
+  for (; max_factor_index >= 1; max_factor_index--) {
+if (factors[max_factor_index] * lengths[dst_idx] <= 
max_innermost_split_factor) {
+  break;
+}
+  }
+  if (max_factor_index == 0) {
+// Failed on this dst_idx, try next one.
+continue;
+  }
+  divide_factor = factors[1 + (policy->rand_gen)() % (max_factor_index)];
+} else {
+  divide_factor = factors[1 + (policy->rand_gen)() % (factors.size() - 1)];
+}
+
+// Divide one factor from lengths[src_idx] and multiply it to 
lengths[dst_idx].
+Array new_lengths;
+for (size_t j = 1; j < lengths.size(); ++j) {
+  if (j == src_idx) {
+new_lengths.push_back(Integer(lengths[j] / divide_factor));
+  } else if (j == dst_idx) {
+new_lengths.push_back(Integer(lengths[j] * divide_factor));
+  } else {
+new_lengths.push_back(Integer(lengths[j]));
+  }
+}
+
+StateNode* pstate = state->CopyOnWrite();
+pstate->transform_steps.Set(
+step_id, SplitStep(ps->stage_id, ps->iter_id, ps->extent,
+   Array>(new_lengths.begin(), 
new_lengths.end()),
+   ps->inner_to_outer));
+return ResultKind::kValid;
+  }
+  return ResultKind::kInvalid;
+}
+
+PopulationGenerationRule::ResultKind 
MutateMaxUnrollFactor::Apply(SketchPolicyNode* policy,
+  State* 
state) const {
+  // Extract all auto_unroll_max_step pragma steps.
+  std::vector annotate_steps;
+  for (size_t i = 0; i < (*state)->transform_steps.size(); ++i) {
+if (auto ps = (*state)->transform_steps[i].as()) {
+  if (StrStartsWith(ps->pragma_type, "auto_unroll_max_step")) {
+annotate_steps.push_back(i);
+  }
+}
+  }
+  if (annotate_steps.empty()) {
+return ResultKind::kInvalid;
+  }
+
+  // Random pick up one unroll factor candidate.
+  auto cands = (IsGPUTask(policy->search_task)) ? &gpu_unroll_cand

[GitHub] [incubator-tvm] merrymercy commented on a change in pull request #6310: [Ansor][AutoTVM v2.0] Phase 2: Evolutionary Search

2020-08-26 Thread GitBox


merrymercy commented on a change in pull request #6310:
URL: https://github.com/apache/incubator-tvm/pull/6310#discussion_r478134161



##
File path: src/auto_scheduler/search_policy/sketch_policy_rules.cc
##
@@ -908,7 +795,362 @@ InitPopulationRule::ResultKind 
InitThreadBind::Apply(SketchPolicyNode* policy, S
   state->bind(stage_id, iters1[1], IteratorAnnotation::kThreadX);
 }
   }
+  return ResultKind::kValid;
+}
+
+PopulationGenerationRule::ResultKind MutateTileSize::Apply(SketchPolicyNode* 
policy,
+   State* state) const 
{
+  int max_innermost_split_factor =
+  GetIntParam(policy->params, SketchParamKey::max_innermost_split_factor);
+
+  // Extract all SplitStep
+  std::vector split_step_ids;
+  for (size_t i = 0; i < (*state)->transform_steps.size(); ++i) {
+if (auto ps = (*state)->transform_steps[i].as()) {
+  if (!ps->extent.defined() || 
!ps->extent.value()->IsInstance()) {
+continue;
+  }
+  auto innermost_factor = 
ps->lengths.back().value_or(max_innermost_split_factor + 1);
+  if (GetIntImm(innermost_factor) <= max_innermost_split_factor) {
+split_step_ids.push_back(i);
+  }
+}
+  }
+  if (split_step_ids.empty()) {
+// No tile size could be mutated.
+return ResultKind::kInvalid;
+  }
+
+  // Select a SplitStep with extent larger than one to mutate.
+  int retry_ct = 0;
+  int64_t extent = 1;
+  int step_id;
+  const SplitStepNode* ps;
+
+  do {
+step_id = split_step_ids[(policy->rand_gen)() % split_step_ids.size()];
+ps = (*state)->transform_steps[step_id].as();
+CHECK(ps != nullptr);
+extent = GetIntImm(ps->extent.value());
+retry_ct += 1;
+  } while (retry_ct < static_cast(split_step_ids.size()) << 2 && (extent 
== 1 || extent == 0));
+
+  if (extent <= 1) {
+// Cannot find a step with extent larger than one.
+return ResultKind::kInvalid;
+  }
+
+  // Fetch the current tile sizes.
+  std::vector lengths(ps->lengths.size() + 1, 1);
+  for (int i = 0; i < static_cast(ps->lengths.size()); ++i) {
+lengths[i + 1] = GetIntImm(ps->lengths[i].value());
+  }
+  lengths[0] = extent / ElementProduct(lengths);
+
+  // Random permute the tile size order.
+  std::vector random_perm;
+  RandomPermutation(lengths.size(), &random_perm, &(policy->rand_gen));
+
+  // Try to divide a factor from one tile size and multiple it to another.
+  for (size_t i = 0; i < random_perm.size(); ++i) {
+size_t src_idx = random_perm[i];
+int length = lengths[src_idx];
+if (length == 1) {
+  continue;
+}
+
+size_t dst_idx = random_perm[(i + 1) % random_perm.size()];
+const std::vector& factors = policy->split_memo.GetFactors(length);
+CHECK_GE(factors.size(), 1);
+
+int divide_factor;
+if (dst_idx == lengths.size() - 1) {
+  // Maintain the restriction of 
hardware_params.max_innermost_split_factor.
+  int max_factor_index = static_cast(factors.size()) - 1;
+  for (; max_factor_index >= 1; max_factor_index--) {
+if (factors[max_factor_index] * lengths[dst_idx] <= 
max_innermost_split_factor) {
+  break;
+}
+  }
+  if (max_factor_index == 0) {
+// Failed on this dst_idx, try next one.
+continue;
+  }
+  divide_factor = factors[1 + (policy->rand_gen)() % (max_factor_index)];
+} else {
+  divide_factor = factors[1 + (policy->rand_gen)() % (factors.size() - 1)];
+}
+
+// Divide one factor from lengths[src_idx] and multiply it to 
lengths[dst_idx].
+Array new_lengths;
+for (size_t j = 1; j < lengths.size(); ++j) {
+  if (j == src_idx) {
+new_lengths.push_back(Integer(lengths[j] / divide_factor));
+  } else if (j == dst_idx) {
+new_lengths.push_back(Integer(lengths[j] * divide_factor));
+  } else {
+new_lengths.push_back(Integer(lengths[j]));
+  }
+}
+
+StateNode* pstate = state->CopyOnWrite();
+pstate->transform_steps.Set(
+step_id, SplitStep(ps->stage_id, ps->iter_id, ps->extent,
+   Array>(new_lengths.begin(), 
new_lengths.end()),
+   ps->inner_to_outer));
+return ResultKind::kValid;
+  }
+  return ResultKind::kInvalid;
+}
+
+PopulationGenerationRule::ResultKind 
MutateMaxUnrollFactor::Apply(SketchPolicyNode* policy,
+  State* 
state) const {
+  // Extract all auto_unroll_max_step pragma steps.
+  std::vector annotate_steps;
+  for (size_t i = 0; i < (*state)->transform_steps.size(); ++i) {
+if (auto ps = (*state)->transform_steps[i].as()) {
+  if (StrStartsWith(ps->pragma_type, "auto_unroll_max_step")) {
+annotate_steps.push_back(i);
+  }
+}
+  }
+  if (annotate_steps.empty()) {
+return ResultKind::kInvalid;
+  }
+
+  // Random pick up one unroll factor candidate.
+  auto cands = (IsGPUTask(policy->search_task)) ? &gpu_unroll_cand

[GitHub] [incubator-tvm] merrymercy commented on pull request #6310: [Ansor][AutoTVM v2.0] Phase 2: Evolutionary Search

2020-08-26 Thread GitBox


merrymercy commented on pull request #6310:
URL: https://github.com/apache/incubator-tvm/pull/6310#issuecomment-681532459


   The test case looks good to me. The two remaining items:
   1. Resolve my last comment  
(https://github.com/apache/incubator-tvm/pull/6310#discussion_r478134161)
   2. Bring this patch (https://github.com/merrymercy/Ansor/pull/98) to this pr.
   



This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org




[GitHub] [incubator-tvm] merrymercy commented on a change in pull request #6310: [Ansor][AutoTVM v2.0] Phase 2: Evolutionary Search

2020-08-26 Thread GitBox


merrymercy commented on a change in pull request #6310:
URL: https://github.com/apache/incubator-tvm/pull/6310#discussion_r478134161



##
File path: src/auto_scheduler/search_policy/sketch_policy_rules.cc
##
@@ -908,7 +795,362 @@ InitPopulationRule::ResultKind 
InitThreadBind::Apply(SketchPolicyNode* policy, S
   state->bind(stage_id, iters1[1], IteratorAnnotation::kThreadX);
 }
   }
+  return ResultKind::kValid;
+}
+
+PopulationGenerationRule::ResultKind MutateTileSize::Apply(SketchPolicyNode* 
policy,
+   State* state) const 
{
+  int max_innermost_split_factor =
+  GetIntParam(policy->params, SketchParamKey::max_innermost_split_factor);
+
+  // Extract all SplitStep
+  std::vector split_step_ids;
+  for (size_t i = 0; i < (*state)->transform_steps.size(); ++i) {
+if (auto ps = (*state)->transform_steps[i].as()) {
+  if (!ps->extent.defined() || 
!ps->extent.value()->IsInstance()) {
+continue;
+  }
+  auto innermost_factor = 
ps->lengths.back().value_or(max_innermost_split_factor + 1);
+  if (GetIntImm(innermost_factor) <= max_innermost_split_factor) {
+split_step_ids.push_back(i);
+  }
+}
+  }
+  if (split_step_ids.empty()) {
+// No tile size could be mutated.
+return ResultKind::kInvalid;
+  }
+
+  // Select a SplitStep with extent larger than one to mutate.
+  int retry_ct = 0;
+  int64_t extent = 1;
+  int step_id;
+  const SplitStepNode* ps;
+
+  do {
+step_id = split_step_ids[(policy->rand_gen)() % split_step_ids.size()];
+ps = (*state)->transform_steps[step_id].as();
+CHECK(ps != nullptr);
+extent = GetIntImm(ps->extent.value());
+retry_ct += 1;
+  } while (retry_ct < static_cast(split_step_ids.size()) << 2 && (extent 
== 1 || extent == 0));
+
+  if (extent <= 1) {
+// Cannot find a step with extent larger than one.
+return ResultKind::kInvalid;
+  }
+
+  // Fetch the current tile sizes.
+  std::vector lengths(ps->lengths.size() + 1, 1);
+  for (int i = 0; i < static_cast(ps->lengths.size()); ++i) {
+lengths[i + 1] = GetIntImm(ps->lengths[i].value());
+  }
+  lengths[0] = extent / ElementProduct(lengths);
+
+  // Random permute the tile size order.
+  std::vector random_perm;
+  RandomPermutation(lengths.size(), &random_perm, &(policy->rand_gen));
+
+  // Try to divide a factor from one tile size and multiple it to another.
+  for (size_t i = 0; i < random_perm.size(); ++i) {
+size_t src_idx = random_perm[i];
+int length = lengths[src_idx];
+if (length == 1) {
+  continue;
+}
+
+size_t dst_idx = random_perm[(i + 1) % random_perm.size()];
+const std::vector& factors = policy->split_memo.GetFactors(length);
+CHECK_GE(factors.size(), 1);
+
+int divide_factor;
+if (dst_idx == lengths.size() - 1) {
+  // Maintain the restriction of 
hardware_params.max_innermost_split_factor.
+  int max_factor_index = static_cast(factors.size()) - 1;
+  for (; max_factor_index >= 1; max_factor_index--) {
+if (factors[max_factor_index] * lengths[dst_idx] <= 
max_innermost_split_factor) {
+  break;
+}
+  }
+  if (max_factor_index == 0) {
+// Failed on this dst_idx, try next one.
+continue;
+  }
+  divide_factor = factors[1 + (policy->rand_gen)() % (max_factor_index)];
+} else {
+  divide_factor = factors[1 + (policy->rand_gen)() % (factors.size() - 1)];
+}
+
+// Divide one factor from lengths[src_idx] and multiply it to 
lengths[dst_idx].
+Array new_lengths;
+for (size_t j = 1; j < lengths.size(); ++j) {
+  if (j == src_idx) {
+new_lengths.push_back(Integer(lengths[j] / divide_factor));
+  } else if (j == dst_idx) {
+new_lengths.push_back(Integer(lengths[j] * divide_factor));
+  } else {
+new_lengths.push_back(Integer(lengths[j]));
+  }
+}
+
+StateNode* pstate = state->CopyOnWrite();
+pstate->transform_steps.Set(
+step_id, SplitStep(ps->stage_id, ps->iter_id, ps->extent,
+   Array>(new_lengths.begin(), 
new_lengths.end()),
+   ps->inner_to_outer));
+return ResultKind::kValid;
+  }
+  return ResultKind::kInvalid;
+}
+
+PopulationGenerationRule::ResultKind 
MutateMaxUnrollFactor::Apply(SketchPolicyNode* policy,
+  State* 
state) const {
+  // Extract all auto_unroll_max_step pragma steps.
+  std::vector annotate_steps;
+  for (size_t i = 0; i < (*state)->transform_steps.size(); ++i) {
+if (auto ps = (*state)->transform_steps[i].as()) {
+  if (StrStartsWith(ps->pragma_type, "auto_unroll_max_step")) {
+annotate_steps.push_back(i);
+  }
+}
+  }
+  if (annotate_steps.empty()) {
+return ResultKind::kInvalid;
+  }
+
+  // Random pick up one unroll factor candidate.
+  auto cands = (IsGPUTask(policy->search_task)) ? &gpu_unroll_cand

[GitHub] [incubator-tvm] ZihengJiang commented on pull request #6125: [VTA][OpenCL] add device_annot support in graphpack

2020-08-26 Thread GitBox


ZihengJiang commented on pull request #6125:
URL: https://github.com/apache/incubator-tvm/pull/6125#issuecomment-681502427


   I am wondering whether VTA's graph annotation can be unified into the 
relay's heterogeneous execution feature



This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org




[GitHub] [incubator-tvm] ZihengJiang edited a comment on pull request #6125: [VTA][OpenCL] add device_annot support in graphpack

2020-08-26 Thread GitBox


ZihengJiang edited a comment on pull request #6125:
URL: https://github.com/apache/incubator-tvm/pull/6125#issuecomment-681502427


   I am wondering whether VTA's graph annotation can be unified into the 
relay's heterogeneous execution feature: 
https://github.com/apache/incubator-tvm/issues/4178



This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org




[GitHub] [incubator-tvm] anilmartha commented on a change in pull request #6342: [CI][Contrib] Add Vitis-AI docker installation

2020-08-26 Thread GitBox


anilmartha commented on a change in pull request #6342:
URL: https://github.com/apache/incubator-tvm/pull/6342#discussion_r478112852



##
File path: docker/Dockerfile.ci_cpu
##
@@ -83,3 +83,7 @@ RUN bash /install/ubuntu_install_caffe.sh
 # Github Arm(R) Ethos(TM)-N NPU driver
 COPY install/ubuntu_install_ethosn_driver_stack.sh 
/install/ubuntu_install_ethosn_driver_stack.sh
 RUN bash /install/ubuntu_install_ethosn_driver_stack.sh
+
+# Vitis-AI PyXIR CI deps
+COPY install/ubuntu_install_vai_packages.sh 
/install/ubuntu_install_vai_packages.sh
+RUN bash /install/ubuntu_install_vai_packages.sh

Review comment:
   @comaniac  The purpose of docker related files/scripts in the main 
integration PR is to build the docker image for Vitis-AI codegen infrastructure 
using Dockerfile.ci_vai and launching the docker container with necessary 
drivers if required. Should we add those files in this PR itself? 





This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org




[GitHub] [incubator-tvm] zhanghaohit commented on a change in pull request #6126: [VTA][OpenCL] intelfocl

2020-08-26 Thread GitBox


zhanghaohit commented on a change in pull request #6126:
URL: https://github.com/apache/incubator-tvm/pull/6126#discussion_r478102076



##
File path: vta/runtime/runtime.cc
##
@@ -329,7 +442,7 @@ class BaseQueue {
   // End location of current SRAM write in FIFO mode
   uint32_t sram_end_{0};
   // The buffer in DRAM
-  std::vector dram_buffer_;
+  std::vector> dram_buffer_;

Review comment:
   done





This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org




[incubator-tvm] branch master updated: [BYOC][ACL] Improved pooling support (#6248)

2020-08-26 Thread zhic
This is an automated email from the ASF dual-hosted git repository.

zhic pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-tvm.git


The following commit(s) were added to refs/heads/master by this push:
 new c958bc1  [BYOC][ACL] Improved pooling support (#6248)
c958bc1 is described below

commit c958bc17038728f396ef94609c4a98462d545390
Author: lhutton1 <35535092+lhutt...@users.noreply.github.com>
AuthorDate: Thu Aug 27 04:50:38 2020 +0100

[BYOC][ACL] Improved pooling support (#6248)

* [BYOC][ACL] Improved pooling support

Adds support in ACL for the following relay pooling operators and composite 
functions:
  * nn.avg_pool2d (fp32), cast + nn.avg_pool2d(uint8) + cast => AVG pool
  * nn.global_max_pool2d => Global MAX pool
  * nn.global_avg_pool2d, cast + nn.global_avg_pool2d(uint8) + cast => 
Global AVG pool
  * power(2) + nn.avg_pool2d + sqrt => L2 pooling (for fp32 only)

Tests updated to reflect these changes.

Change-Id: I1644b67b60ebb252344eb9695a521d2d958c724e

* Address comments

Change-Id: Ibe8a61b4c42da246ce54701c89ea985b423c8f83

* Fix not checking output saturation

Change-Id: Ia6f3d9db31cfb8c417d8556d29961210fea418b2

* Use defined set of trials

Change-Id: Ib180e3a0cbb84d6fa00c7e1994f58cb62662db15

* Rebase master

Change-Id: I5c932751cd38da06d6f2b397be5d8ab7fdeb169f
---
 docs/deploy/arm_compute_lib.rst|  69 +++--
 python/tvm/relay/op/contrib/arm_compute_lib.py |  86 ++-
 .../backend/contrib/arm_compute_lib/codegen.cc |  60 +
 src/runtime/contrib/arm_compute_lib/acl_runtime.cc |  54 +++-
 src/runtime/contrib/arm_compute_lib/acl_utils.cc   |  10 +-
 src/runtime/contrib/arm_compute_lib/acl_utils.h|   4 +-
 .../contrib/test_arm_compute_lib/infrastructure.py |  35 ++-
 .../contrib/test_arm_compute_lib/test_conv2d.py|  10 +-
 .../contrib/test_arm_compute_lib/test_network.py   |   4 +-
 .../contrib/test_arm_compute_lib/test_pooling.py   | 277 +
 .../contrib/test_arm_compute_lib/test_reshape.py   |   4 +-
 11 files changed, 506 insertions(+), 107 deletions(-)

diff --git a/docs/deploy/arm_compute_lib.rst b/docs/deploy/arm_compute_lib.rst
index 26b42ae..e3399c5 100644
--- a/docs/deploy/arm_compute_lib.rst
+++ b/docs/deploy/arm_compute_lib.rst
@@ -188,31 +188,50 @@ An example configuration for `test_config.json`:
 
 Operator support
 
-+--+-+
-| Relay Node   | Remarks   
  |
-+==+=+
-| nn.conv2d| fp32: 
  |
-|  |   Simple: nn.conv2d   
  |
-|  |   Composite: nn.pad?, nn.conv2d, nn.bias_add?, nn.relu?   
  |
-|  |   
  |
-|  | (only groups = 1 supported)   
  |
-+--+-+
-| qnn.conv2d   | uint8:
  |
-|  |   Composite: nn.pad?, nn.conv2d, nn.bias_add?, nn.relu?, 
qnn.requantize |
-|  |   
  |
-|  | (only groups = 1 supported)   
  |
-+--+-+
-| nn.dense | fp32: 
  |
-|  |   Simple: nn.dense
  |
-|  |   Composite: nn.dense, nn.bias_add?   
  |
-+--+-+
-| qnn.dense| uint8:
  |
-|  |   Composite: qnn.dense, nn.bias_add?, qnn.requantize  
  |
-+--+-+
-| nn.maxpool2d | fp32, uint8   
  |
-+--+-+
-| reshape  | fp32, uint8   
  |
-+--+-+
++--+-+
+| Relay Node   | Remarks   

[GitHub] [incubator-tvm] zhiics merged pull request #6248: [BYOC][ACL] Improved pooling support

2020-08-26 Thread GitBox


zhiics merged pull request #6248:
URL: https://github.com/apache/incubator-tvm/pull/6248


   



This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org




[GitHub] [incubator-tvm] zhiics commented on pull request #6248: [BYOC][ACL] Improved pooling support

2020-08-26 Thread GitBox


zhiics commented on pull request #6248:
URL: https://github.com/apache/incubator-tvm/pull/6248#issuecomment-681340134


   Thanks @lhutton1 @comaniac 



This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org




[GitHub] [incubator-tvm] zhanghaohit commented on a change in pull request #6125: [VTA][OpenCL] add device_annot support in graphpack

2020-08-26 Thread GitBox


zhanghaohit commented on a change in pull request #6125:
URL: https://github.com/apache/incubator-tvm/pull/6125#discussion_r478050263



##
File path: python/tvm/relay/op/_tensor.py
##
@@ -87,6 +87,10 @@
 register_broadcast_schedule("fast_exp")
 register_broadcast_schedule("fast_tanh")
 register_broadcast_schedule("fast_erf")
+# a fake on_device schedule.
+# this will not be used in actual computation
+# as on_device will be removed during DeviceAnnotation pass
+register_injective_schedule("on_device")

Review comment:
   It will raise 
   `
   AssertionError: on_device doesn't have FTVMStrategy registered
   `
   during build_module.cc::Optimize before we do the `RunDeviceAnnotationPass`.





This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org




[GitHub] [incubator-tvm] FrozenGene commented on pull request #6206: [Caffe Frontend] introduce caffe frontend for tvm

2020-08-26 Thread GitBox


FrozenGene commented on pull request #6206:
URL: https://github.com/apache/incubator-tvm/pull/6206#issuecomment-681321757


   Thanks @fernchen It is merged now.



This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org




[GitHub] [incubator-tvm] FrozenGene merged pull request #6206: [Caffe Frontend] introduce caffe frontend for tvm

2020-08-26 Thread GitBox


FrozenGene merged pull request #6206:
URL: https://github.com/apache/incubator-tvm/pull/6206


   



This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org




[incubator-tvm] branch master updated: [Caffe Frontend] introduce caffe frontend for tvm (#6206)

2020-08-26 Thread zhaowu
This is an automated email from the ASF dual-hosted git repository.

zhaowu pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-tvm.git


The following commit(s) were added to refs/heads/master by this push:
 new 44d97ad  [Caffe Frontend] introduce caffe frontend for tvm (#6206)
44d97ad is described below

commit 44d97ad08002a6e89c6aed6cd9ae242a3e15b222
Author: FernChen 
AuthorDate: Thu Aug 27 11:26:58 2020 +0800

[Caffe Frontend] introduce caffe frontend for tvm (#6206)

* [Caffe Frontend] introduce caffe frontend for tvm.

* [Caffe Frontend] fix bugs for generating caption in tutorial.

* [Caffe Frontend] delete statement for python2 and modify the function 
name.

* [Caffe Frontend] change the directory which will hold the tmp files
when testing the caffe frondend.

* [Caffe Frontend] delete tutorial about caffe frontend.

* [Caffe Frontend] delete some print statements

Co-authored-by: fernchen 
---
 python/tvm/relay/frontend/__init__.py   |   1 +
 python/tvm/relay/frontend/caffe.py  | 848 
 tests/python/frontend/caffe/test_forward.py | 968 
 tests/scripts/task_python_frontend_cpu.sh   |   3 +
 4 files changed, 1820 insertions(+)

diff --git a/python/tvm/relay/frontend/__init__.py 
b/python/tvm/relay/frontend/__init__.py
index aba9eea..7154f5a 100644
--- a/python/tvm/relay/frontend/__init__.py
+++ b/python/tvm/relay/frontend/__init__.py
@@ -33,3 +33,4 @@ from .caffe2 import from_caffe2
 from .tensorflow import from_tensorflow
 from .darknet import from_darknet
 from .pytorch import from_pytorch
+from .caffe import from_caffe
diff --git a/python/tvm/relay/frontend/caffe.py 
b/python/tvm/relay/frontend/caffe.py
new file mode 100644
index 000..b7bcbde
--- /dev/null
+++ b/python/tvm/relay/frontend/caffe.py
@@ -0,0 +1,848 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+# pylint: disable=invalid-name, unused-argument, too-many-lines, 
import-outside-toplevel
+# pylint: disable=no-else-return, no-else-continue
+"""Caffe frontend."""
+import numpy as np
+import tvm
+from tvm.ir import IRModule
+from .. import analysis
+from .. import expr as _expr
+from .. import function as _function
+from .. import op as _op
+from ... import nd as _nd
+from .common import ExprTable
+from .common import infer_shape as _infer_shape
+
+__all__ = ['from_caffe']
+
+
+class OperatorConverter(object):
+""" Operator Converted for converting Caffe ops to Relay ops """
+def __init__(self, init_layer_dict, predict_layer, exp_tab):
+self.init_layer_dict = init_layer_dict
+self.predict_layer = predict_layer
+self.exp_tab = exp_tab
+self.new_bn = {}
+self.changed_layers = None
+
+self.convert_map = {
+'BatchNorm': self.convert_batch_norm,
+'Concat': self.convert_concat,
+'Convolution': self.convert_conv,
+'Crop': self.convert_crop,
+'Deconvolution': self.convert_deconv,
+'Dropout': self.convert_dropout,
+'Eltwise': self.convert_eltwise,
+'Flatten': self.convert_flatten,
+'InnerProduct': self.convert_innerproduct,
+'Input': None,
+'LRN': self.convert_lrn,
+'Pooling': self.convert_pooling,
+'PReLU': self.convert_prelu,
+'ReLU': self.convert_relu,
+'Reshape': self.convert_reshape,
+'Scale': self.convert_scale,
+'Sigmoid': self.convert_sigmoid,
+'Slice': self.convert_slice,
+'Softmax': self.convert_softmax,
+'TanH': self.convert_tanh,
+}
+
+def convert_flatten(self, op):
+""" Convert Flatten layer """
+inputs = op.bottom
+in_expr = self.exp_tab.get_expr(inputs[0])
+
+flatten_params = op.flatten_param.axis
+assert flatten_params == 1, "flatten axis should be 1"
+out = _op.nn.batch_flatten(in_expr)
+
+return out
+
+def convert_eltwise(self, op):
+""" Convert Eltwise layer """
+inputs = op.bottom
+assert len(inputs) == 2, "input tensors length

[GitHub] [incubator-tvm] FrozenGene commented on pull request #6303: [Relay/TOPI][TFLite] Implemented MATRIX_SET_DIAG Operator for Relay/TOPI and TFLite Frontend.

2020-08-26 Thread GitBox


FrozenGene commented on pull request #6303:
URL: https://github.com/apache/incubator-tvm/pull/6303#issuecomment-681320486


   Thanks @jainris @siju-samuel @mbaret it is merged. For the alignment, wish 
@jainris could follow up, thanks!



This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org




[incubator-tvm] branch master updated: [Relay/TOPI][TFLite] Implemented MATRIX_SET_DIAG Operator for Relay/TOPI and TFLite Frontend. (#6303)

2020-08-26 Thread zhaowu
This is an automated email from the ASF dual-hosted git repository.

zhaowu pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-tvm.git


The following commit(s) were added to refs/heads/master by this push:
 new 082f27e  [Relay/TOPI][TFLite] Implemented MATRIX_SET_DIAG Operator for 
Relay/TOPI and TFLite Frontend. (#6303)
082f27e is described below

commit 082f27ebf8b14f537f0d7686e8161db1684f3110
Author: Rishabh Jain <56974688+jain...@users.noreply.github.com>
AuthorDate: Thu Aug 27 08:51:45 2020 +0530

[Relay/TOPI][TFLite] Implemented MATRIX_SET_DIAG Operator for Relay/TOPI 
and TFLite Frontend. (#6303)

* Corrected docstring error.

* Minor changes.

* Changed MATRIX_SET_DIAG registration from broadcast to injective.
---
 include/tvm/topi/transform.h| 29 ++
 python/tvm/relay/frontend/tflite.py | 28 ++
 python/tvm/relay/op/_transform.py   |  1 +
 python/tvm/relay/op/transform.py| 41 ++
 python/tvm/topi/testing/__init__.py |  1 +
 python/tvm/topi/testing/matrix_set_diag.py  | 47 
 python/tvm/topi/transform.py| 40 ++
 src/relay/op/tensor/transform.cc| 50 +
 src/topi/transform.cc   |  4 ++
 tests/python/frontend/tflite/test_forward.py| 72 +
 tests/python/relay/test_op_level10.py   | 28 ++
 tests/python/topi/python/test_topi_transform.py | 36 +
 12 files changed, 377 insertions(+)

diff --git a/include/tvm/topi/transform.h b/include/tvm/topi/transform.h
index 19b2ef4..eb69fc5 100644
--- a/include/tvm/topi/transform.h
+++ b/include/tvm/topi/transform.h
@@ -1511,6 +1511,35 @@ inline Tensor sparse_to_dense(const Tensor& 
sparse_indices, const Array
   name, tag);
 }
 
+/*!
+ * \brief Returns a tensor with the diagonal of input tensor replaced with the 
provided diagonal.
+ * \param input input tensor.
+ * \param diagonal values to be filled in the diagonal.
+ * \param name output tensor name.
+ * \param tag output tensor tag.
+ * \return new tensor with given diagonal values.
+ */
+inline Tensor matrix_set_diag(const Tensor& input, const Tensor& diagonal,
+  const std::string name = "T_matrix_set_diag",
+  const std::string tag = kInjective) {
+  size_t ndim = input->shape.size() - 1;
+
+  return compute(
+  input->shape,
+  [&](const Array& iter_vars) {
+auto get_diag = [&]() {
+  Array diagonal_indices;
+  for (size_t i = 0; i < ndim; i++) {
+diagonal_indices.push_back(iter_vars[i]);
+  }
+  return diagonal(diagonal_indices);
+};
+return if_then_else((PrimExpr)iter_vars[ndim] == iter_vars[ndim - 1], 
get_diag(),
+input(iter_vars));
+  },
+  name, tag);
+}
+
 }  // namespace topi
 }  // namespace tvm
 #endif  // TVM_TOPI_TRANSFORM_H_
diff --git a/python/tvm/relay/frontend/tflite.py 
b/python/tvm/relay/frontend/tflite.py
index 200352c..31ff871 100644
--- a/python/tvm/relay/frontend/tflite.py
+++ b/python/tvm/relay/frontend/tflite.py
@@ -107,6 +107,7 @@ class OperatorConverter(object):
 'LOGICAL_NOT': self.convert_logical_not,
 'LOGICAL_OR': self.convert_logical_or,
 'LOGISTIC': self.convert_logistic,
+'MATRIX_SET_DIAG': self.convert_matrix_set_diag,
 'MAX_POOL_2D': self.convert_max_pool2d,
 'MAXIMUM': self.convert_maximum,
 'MEAN': self.convert_reduce_mean,
@@ -2989,6 +2990,33 @@ class OperatorConverter(object):
 out = _op.reverse(input_expr, axis)
 return out
 
+def convert_matrix_set_diag(self, op):
+"""Convert TFLite MATRIX_SET_DIAG"""
+
+input_tensors = self.get_input_tensors(op)
+assert len(input_tensors) == 2, "input tensor's length should be 2"
+
+assert input_tensors[0].tensor.Type() == 
input_tensors[1].tensor.Type(), \
+"input and diagonal should be the same type of tensors"
+
+if input_tensors[0].qnn_params:
+# Check that input and output tensor have same qnn params.
+output_tensors = self.get_output_tensors(op)
+assert self.has_same_qnn_params(input_tensors[0], 
output_tensors[0]), \
+"TFLite MATRIX_SET_DIAG requires input and output tensors' \
+scale and zero points to be equal"
+
+# Check that input and diagonal tensor have same qnn params.
+assert self.has_same_qnn_params(input_tensors[0], 
input_tensors[1]), \
+"TFLite MATRIX_SET_DIAG requires input and diagonal tensors' \
+scale and zero points to be equal"
+
+input_expr = self.get_tensor_expr(input_tensors[0])
+diagonal_expr = self.get_tensor_exp

[GitHub] [incubator-tvm] FrozenGene merged pull request #6303: [Relay/TOPI][TFLite] Implemented MATRIX_SET_DIAG Operator for Relay/TOPI and TFLite Frontend.

2020-08-26 Thread GitBox


FrozenGene merged pull request #6303:
URL: https://github.com/apache/incubator-tvm/pull/6303


   



This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org




[GitHub] [incubator-tvm] tqchen merged pull request #6346: [TESTS] add gpuonly tests for python unittests and integration

2020-08-26 Thread GitBox


tqchen merged pull request #6346:
URL: https://github.com/apache/incubator-tvm/pull/6346


   



This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org




[incubator-tvm] branch master updated (c6dd26b -> f6d3cee)

2020-08-26 Thread tqchen
This is an automated email from the ASF dual-hosted git repository.

tqchen pushed a change to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-tvm.git.


from c6dd26b  Add `init` member to ReduceNode (#6138)
 add f6d3cee  [TESTS] add gpuonly tests for python unittests and 
integration (#6346)

No new revisions were added by this update.

Summary of changes:
 Jenkinsfile | 4 ++--
 .../scripts/task_python_integration_gpuonly.sh  | 6 +-
 .../scripts/task_python_unittest_gpuonly.sh | 6 +-
 3 files changed, 4 insertions(+), 12 deletions(-)
 copy docker/install/ubuntu_install_dgl.sh => 
tests/scripts/task_python_integration_gpuonly.sh (94%)
 mode change 100644 => 100755
 copy docker/install/ubuntu_install_dgl.sh => 
tests/scripts/task_python_unittest_gpuonly.sh (94%)
 mode change 100644 => 100755



[incubator-tvm] branch master updated: Add `init` member to ReduceNode (#6138)

2020-08-26 Thread tqchen
This is an automated email from the ASF dual-hosted git repository.

tqchen pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-tvm.git


The following commit(s) were added to refs/heads/master by this push:
 new c6dd26b  Add `init` member to ReduceNode (#6138)
c6dd26b is described below

commit c6dd26b9d73c60b3122bafe800529661e14f75e1
Author: quic-sanirudh <63797228+quic-sanir...@users.noreply.github.com>
AuthorDate: Thu Aug 27 07:41:24 2020 +0530

Add `init` member to ReduceNode (#6138)

- This patch adds a new member to ReduceNode called init which allows
  initialization with a custom ProducerLoad or a Float/Int immediate.
- This allows initialization of the output Tensor of a reduction with
  another Tensor instead of the `identity_element` defined in the
  CommReducer
- One example use case for this node is to initialize the Output of a
  convolution reduction with the Bias values thereby saving the
  Bias-add computation.
---
 include/tvm/tir/expr.h |  9 ++-
 include/tvm/tir/op.h   | 18 +++--
 include/tvm/topi/reduction.h   | 19 +++--
 python/tvm/tir/expr.py |  7 +-
 python/tvm/tir/op.py   | 29 +--
 src/arith/canonical_simplify.cc| 12 ++-
 src/printer/tir_text_printer.cc|  2 +-
 src/te/autodiff/ad_simplify.cc | 21 ++---
 src/te/autodiff/ad_util.cc |  9 ++-
 src/te/autodiff/jacobian.cc|  9 ++-
 src/te/operation/compute_op.cc | 10 ++-
 src/te/operation/cross_thread_reduction.cc |  1 +
 src/te/operation/tensorize.cc  |  2 +-
 src/te/schedule/schedule_dataflow_rewrite.cc   | 28 +--
 src/tir/ir/expr.cc | 16 +++-
 src/tir/ir/expr_functor.cc |  9 ++-
 src/tir/op/op.cc   | 24 +++---
 tests/python/integration/test_reduce.py| 94 ++
 .../unittest/test_arith_canonical_simplify.py  |  3 +
 tests/python/unittest/test_te_autodiff.py  | 10 +++
 20 files changed, 268 insertions(+), 64 deletions(-)

diff --git a/include/tvm/tir/expr.h b/include/tvm/tir/expr.h
index 100d163..9e6f440 100644
--- a/include/tvm/tir/expr.h
+++ b/include/tvm/tir/expr.h
@@ -1026,6 +1026,8 @@ class ReduceNode : public PrimExprNode {
   CommReducer combiner;
   /*! \brief The source operand */
   Array source;
+  /*! \brief The init operand */
+  Array init;
   /*! \brief The reduction axis */
   Array axis;
   /*!
@@ -1040,6 +1042,7 @@ class ReduceNode : public PrimExprNode {
 v->Visit("dtype", &dtype);
 v->Visit("combiner", &combiner);
 v->Visit("source", &source);
+v->Visit("init", &init);
 v->Visit("axis", &axis);
 v->Visit("condition", &condition);
 v->Visit("value_index", &value_index);
@@ -1049,7 +1052,8 @@ class ReduceNode : public PrimExprNode {
 // check axis first so IterVars can define the necessary variables.
 return equal(dtype, other->dtype) && equal(axis, other->axis) &&
equal(combiner, other->combiner) && equal(source, other->source) &&
-   equal(condition, other->condition) && equal(value_index, 
other->value_index);
+   equal(init, other->init) && equal(condition, other->condition) &&
+   equal(value_index, other->value_index);
   }
 
   void SHashReduce(SHashReducer hash_reduce) const {
@@ -1057,6 +1061,7 @@ class ReduceNode : public PrimExprNode {
 hash_reduce(axis);
 hash_reduce(combiner);
 hash_reduce(source);
+hash_reduce(init);
 hash_reduce(condition);
 hash_reduce(value_index);
   }
@@ -1072,7 +1077,7 @@ class ReduceNode : public PrimExprNode {
 class Reduce : public PrimExpr {
  public:
   TVM_DLL Reduce(CommReducer combiner, Array src, Array 
rdom, PrimExpr condition,
- int value_index);
+ int value_index, Array init);
 
   TVM_DEFINE_OBJECT_REF_METHODS(Reduce, PrimExpr, ReduceNode);
 };
diff --git a/include/tvm/tir/op.h b/include/tvm/tir/op.h
index 93a54b0..9e53e97 100644
--- a/include/tvm/tir/op.h
+++ b/include/tvm/tir/op.h
@@ -464,48 +464,54 @@ TVM_DLL PrimExpr isinf(PrimExpr x);
  * \brief sum of of source expression over axis
  * \param source The source expression.
  * \param axis List of iteration variables that will be used for reduction.
+ * \param init The value with which to initialize the output.
  * \return The result.
  */
-TVM_DLL PrimExpr sum(PrimExpr source, Array axis);
+TVM_DLL PrimExpr sum(PrimExpr source, Array axis, 
Array init = {});
 
 /*!
  * \brief logical And of of source expression over axis
  * \param source The source expression.
  * \param axis List of iteration variables that will be used for reduction.
+ * \param init The value with which to initializ

[GitHub] [incubator-tvm] tqchen commented on pull request #6138: Add `init` member to ReduceNode

2020-08-26 Thread GitBox


tqchen commented on pull request #6138:
URL: https://github.com/apache/incubator-tvm/pull/6138#issuecomment-681300404


   Thanks @quic-sanirudh ! this is now merged



This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org




[GitHub] [incubator-tvm] tqchen merged pull request #6138: Add `init` member to ReduceNode

2020-08-26 Thread GitBox


tqchen merged pull request #6138:
URL: https://github.com/apache/incubator-tvm/pull/6138


   



This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org




[incubator-tvm] branch master updated (4910c8c -> 415c088)

2020-08-26 Thread masahi
This is an automated email from the ASF dual-hosted git repository.

masahi pushed a change to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-tvm.git.


from 4910c8c  [MSVC] Make able to compile with MSVC (#6341)
 add 415c088  ROCm changed name of library and removed old one in ROCm 3.7 
release. (#6345)

No new revisions were added by this update.

Summary of changes:
 cmake/util/FindROCM.cmake | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)



[GitHub] [incubator-tvm] masahi merged pull request #6345: ROCm changed name of library and removed the old one in ROCm 3.7 release.

2020-08-26 Thread GitBox


masahi merged pull request #6345:
URL: https://github.com/apache/incubator-tvm/pull/6345


   



This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org




[GitHub] [incubator-tvm] quic-sanirudh commented on pull request #6138: Add `init` member to ReduceNode

2020-08-26 Thread GitBox


quic-sanirudh commented on pull request #6138:
URL: https://github.com/apache/incubator-tvm/pull/6138#issuecomment-681232652


   I added the support for initializing with rfactor, but doesn't work with 
crossthread_allreduce. I also added a few unit and compiled tests.



This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org




[GitHub] [incubator-tvm] t-vi commented on pull request #6345: ROCm changed name of library and removed the old one in ROCm 3.7 release.

2020-08-26 Thread GitBox


t-vi commented on pull request #6345:
URL: https://github.com/apache/incubator-tvm/pull/6345#issuecomment-681203248


   Seems good to me. If we are giving up on pre-3.3 compat, I should also 
remove the code object v3 workaround I introduced in the spring in favour of 
3.5+. (I'll send a PR.)
   



This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org




[GitHub] [incubator-tvm] hypercubestart commented on a change in pull request #5812: Bring Your Own Datatypes

2020-08-26 Thread GitBox


hypercubestart commented on a change in pull request #5812:
URL: https://github.com/apache/incubator-tvm/pull/5812#discussion_r477713045



##
File path: 3rdparty/posit/posit-wrapper.cc
##
@@ -0,0 +1,211 @@
+#include 
+
+#include 
+
+#include "universal/posit/posit.hpp"
+// must go after posit.hpp
+#include "universal/posit/math/exponent.hpp"
+#include "universal/posit/math/hyperbolic.hpp"
+#include "universal/posit/math/logarithm.hpp"
+#include "universal/posit/math/sqrt.hpp"
+
+TVM_DLL sw::unum::posit<8, 2> Uint8ToPosit8es2(uint8_t in) {
+  sw::unum::bitblock<8> bb;
+  bb = static_cast(in);
+  return sw::unum::posit<8, 2>().set(bb);
+}
+
+extern "C" {
+TVM_DLL uint8_t RawPosit8es2(uint8_t in) { return in; }
+
+TVM_DLL uint8_t Posit8es2toUint8(sw::unum::posit<8, 2> in) {
+  return static_cast(in.get().to_ullong());
+}
+
+TVM_DLL float Posit8es2ToFloat(uint8_t in) { return 
Uint8ToPosit8es2(in).operator float(); }
+
+TVM_DLL uint8_t FloatToPosit8es2(float in) {
+  auto posit = sw::unum::posit<8, 2>(in);
+  return Posit8es2toUint8(posit);
+}
+
+// TODO(gus) how wide should the input be?
+TVM_DLL uint8_t IntToPosit8es2(int in) { return 
Posit8es2toUint8(sw::unum::posit<8, 2>(in)); }
+
+TVM_DLL uint8_t Posit8es2Add(uint8_t a, uint8_t b) {
+  return Posit8es2toUint8(Uint8ToPosit8es2(a) + Uint8ToPosit8es2(b));
+}
+
+TVM_DLL uint8_t Posit8es2Sub(uint8_t a, uint8_t b) {
+  return Posit8es2toUint8(Uint8ToPosit8es2(a) - Uint8ToPosit8es2(b));
+}
+
+TVM_DLL uint8_t Posit8es2Mul(uint8_t a, uint8_t b) {
+  return Posit8es2toUint8(Uint8ToPosit8es2(a) * Uint8ToPosit8es2(b));
+}
+
+TVM_DLL uint8_t Posit8es2Div(uint8_t a, uint8_t b) {
+  return Posit8es2toUint8(Uint8ToPosit8es2(a) / Uint8ToPosit8es2(b));
+}
+
+TVM_DLL uint8_t Posit8es2Max(uint8_t a, uint8_t b) {
+  auto a_p = Uint8ToPosit8es2(a);
+  auto b_p = Uint8ToPosit8es2(b);
+  return Posit8es2toUint8(a_p > b_p ? a_p : b_p);
+}
+
+TVM_DLL uint8_t Posit8es2Sqrt(uint8_t a) {
+  return Posit8es2toUint8(sw::unum::sqrt(Uint8ToPosit8es2(a)));
+}
+
+TVM_DLL uint8_t Posit8es2Exp(uint8_t a) {
+  return Posit8es2toUint8(sw::unum::exp(Uint8ToPosit8es2(a)));
+}
+
+TVM_DLL uint8_t Posit8es2Log(uint8_t a) {
+  return Posit8es2toUint8(sw::unum::log(Uint8ToPosit8es2(a)));
+}
+
+TVM_DLL uint8_t Posit8es2Sigmoid(uint8_t a) {
+  auto posit_one = sw::unum::posit<8, 2>(1);
+  return Posit8es2toUint8(posit_one / (sw::unum::exp(-Uint8ToPosit8es2(a)) + 
posit_one));
+}
+
+TVM_DLL uint8_t Posit8es2Tanh(uint8_t a) {
+  return Posit8es2toUint8(sw::unum::tanh(Uint8ToPosit8es2(a)));
+}
+}
+
+TVM_DLL sw::unum::posit<16, 2> Uint16ToPosit16es2(uint16_t in) {
+  sw::unum::bitblock<16> bb;
+  bb = static_cast(in);
+  return sw::unum::posit<16, 2>().set(bb);
+}
+
+extern "C" {
+TVM_DLL uint16_t RawPosit16es2(uint16_t in) { return in; }
+
+TVM_DLL uint16_t Posit16es2toUint16(sw::unum::posit<16, 2> in) {
+  return static_cast(in.get().to_ullong());
+}
+
+TVM_DLL float Posit16es2ToFloat(uint16_t in) { return 
Uint16ToPosit16es2(in).operator float(); }
+
+TVM_DLL uint16_t FloatToPosit16es2(float in) {
+  auto posit = sw::unum::posit<16, 2>(in);
+  return Posit16es2toUint16(posit);
+}
+
+// TODO(gus) how wide should the input be?
+TVM_DLL uint16_t IntToPosit16es2(int in) { return 
Posit16es2toUint16(sw::unum::posit<16, 2>(in)); }
+
+TVM_DLL uint16_t Posit16es2Add(uint16_t a, uint16_t b) {
+  return Posit16es2toUint16(Uint16ToPosit16es2(a) + Uint16ToPosit16es2(b));
+}
+
+TVM_DLL uint16_t Posit16es2Sub(uint16_t a, uint16_t b) {
+  return Posit16es2toUint16(Uint16ToPosit16es2(a) - Uint16ToPosit16es2(b));
+}
+
+TVM_DLL uint16_t Posit16es2Mul(uint16_t a, uint16_t b) {
+  return Posit16es2toUint16(Uint16ToPosit16es2(a) * Uint16ToPosit16es2(b));
+}
+
+TVM_DLL uint16_t Posit16es2Div(uint16_t a, uint16_t b) {
+  return Posit16es2toUint16(Uint16ToPosit16es2(a) / Uint16ToPosit16es2(b));
+}
+
+TVM_DLL uint16_t Posit16es2Max(uint16_t a, uint16_t b) {
+  auto a_p = Uint16ToPosit16es2(a);
+  auto b_p = Uint16ToPosit16es2(b);
+  return Posit16es2toUint16(a_p > b_p ? a_p : b_p);
+}
+
+TVM_DLL uint16_t Posit16es2Sqrt(uint16_t a) {
+  return Posit16es2toUint16(sw::unum::sqrt(Uint16ToPosit16es2(a)));
+}
+
+TVM_DLL uint16_t Posit16es2Exp(uint16_t a) {
+  return Posit16es2toUint16(sw::unum::exp(Uint16ToPosit16es2(a)));
+}
+
+TVM_DLL uint16_t Posit16es2Log(uint16_t a) {
+  return Posit16es2toUint16(sw::unum::log(Uint16ToPosit16es2(a)));
+}
+
+TVM_DLL uint16_t Posit16es2Sigmoid(uint16_t a) {
+  auto posit_one = sw::unum::posit<16, 2>(1);
+  return Posit16es2toUint16(posit_one / (sw::unum::exp(-Uint16ToPosit16es2(a)) 
+ posit_one));
+}
+
+TVM_DLL uint16_t Posit16es2Tanh(uint16_t a) {
+  return Posit16es2toUint16(sw::unum::tanh(Uint16ToPosit16es2(a)));
+}
+}
+
+TVM_DLL sw::unum::posit<32, 2> Uint32ToPosit32es2(uint32_t in) {
+  sw::unum::bitblock<32> bb;
+  bb = static_cast(in);
+  return sw::unum::posit<32, 2>().set(bb);
+}
+
+extern "C" {
+TVM_DLL uint32_t RawPosit32es2(uint32_t in) { return in; }

Review commen

[GitHub] [incubator-tvm] hypercubestart commented on a change in pull request #5812: Bring Your Own Datatypes

2020-08-26 Thread GitBox


hypercubestart commented on a change in pull request #5812:
URL: https://github.com/apache/incubator-tvm/pull/5812#discussion_r477713045



##
File path: 3rdparty/posit/posit-wrapper.cc
##
@@ -0,0 +1,211 @@
+#include 
+
+#include 
+
+#include "universal/posit/posit.hpp"
+// must go after posit.hpp
+#include "universal/posit/math/exponent.hpp"
+#include "universal/posit/math/hyperbolic.hpp"
+#include "universal/posit/math/logarithm.hpp"
+#include "universal/posit/math/sqrt.hpp"
+
+TVM_DLL sw::unum::posit<8, 2> Uint8ToPosit8es2(uint8_t in) {
+  sw::unum::bitblock<8> bb;
+  bb = static_cast(in);
+  return sw::unum::posit<8, 2>().set(bb);
+}
+
+extern "C" {
+TVM_DLL uint8_t RawPosit8es2(uint8_t in) { return in; }
+
+TVM_DLL uint8_t Posit8es2toUint8(sw::unum::posit<8, 2> in) {
+  return static_cast(in.get().to_ullong());
+}
+
+TVM_DLL float Posit8es2ToFloat(uint8_t in) { return 
Uint8ToPosit8es2(in).operator float(); }
+
+TVM_DLL uint8_t FloatToPosit8es2(float in) {
+  auto posit = sw::unum::posit<8, 2>(in);
+  return Posit8es2toUint8(posit);
+}
+
+// TODO(gus) how wide should the input be?
+TVM_DLL uint8_t IntToPosit8es2(int in) { return 
Posit8es2toUint8(sw::unum::posit<8, 2>(in)); }
+
+TVM_DLL uint8_t Posit8es2Add(uint8_t a, uint8_t b) {
+  return Posit8es2toUint8(Uint8ToPosit8es2(a) + Uint8ToPosit8es2(b));
+}
+
+TVM_DLL uint8_t Posit8es2Sub(uint8_t a, uint8_t b) {
+  return Posit8es2toUint8(Uint8ToPosit8es2(a) - Uint8ToPosit8es2(b));
+}
+
+TVM_DLL uint8_t Posit8es2Mul(uint8_t a, uint8_t b) {
+  return Posit8es2toUint8(Uint8ToPosit8es2(a) * Uint8ToPosit8es2(b));
+}
+
+TVM_DLL uint8_t Posit8es2Div(uint8_t a, uint8_t b) {
+  return Posit8es2toUint8(Uint8ToPosit8es2(a) / Uint8ToPosit8es2(b));
+}
+
+TVM_DLL uint8_t Posit8es2Max(uint8_t a, uint8_t b) {
+  auto a_p = Uint8ToPosit8es2(a);
+  auto b_p = Uint8ToPosit8es2(b);
+  return Posit8es2toUint8(a_p > b_p ? a_p : b_p);
+}
+
+TVM_DLL uint8_t Posit8es2Sqrt(uint8_t a) {
+  return Posit8es2toUint8(sw::unum::sqrt(Uint8ToPosit8es2(a)));
+}
+
+TVM_DLL uint8_t Posit8es2Exp(uint8_t a) {
+  return Posit8es2toUint8(sw::unum::exp(Uint8ToPosit8es2(a)));
+}
+
+TVM_DLL uint8_t Posit8es2Log(uint8_t a) {
+  return Posit8es2toUint8(sw::unum::log(Uint8ToPosit8es2(a)));
+}
+
+TVM_DLL uint8_t Posit8es2Sigmoid(uint8_t a) {
+  auto posit_one = sw::unum::posit<8, 2>(1);
+  return Posit8es2toUint8(posit_one / (sw::unum::exp(-Uint8ToPosit8es2(a)) + 
posit_one));
+}
+
+TVM_DLL uint8_t Posit8es2Tanh(uint8_t a) {
+  return Posit8es2toUint8(sw::unum::tanh(Uint8ToPosit8es2(a)));
+}
+}
+
+TVM_DLL sw::unum::posit<16, 2> Uint16ToPosit16es2(uint16_t in) {
+  sw::unum::bitblock<16> bb;
+  bb = static_cast(in);
+  return sw::unum::posit<16, 2>().set(bb);
+}
+
+extern "C" {
+TVM_DLL uint16_t RawPosit16es2(uint16_t in) { return in; }
+
+TVM_DLL uint16_t Posit16es2toUint16(sw::unum::posit<16, 2> in) {
+  return static_cast(in.get().to_ullong());
+}
+
+TVM_DLL float Posit16es2ToFloat(uint16_t in) { return 
Uint16ToPosit16es2(in).operator float(); }
+
+TVM_DLL uint16_t FloatToPosit16es2(float in) {
+  auto posit = sw::unum::posit<16, 2>(in);
+  return Posit16es2toUint16(posit);
+}
+
+// TODO(gus) how wide should the input be?
+TVM_DLL uint16_t IntToPosit16es2(int in) { return 
Posit16es2toUint16(sw::unum::posit<16, 2>(in)); }
+
+TVM_DLL uint16_t Posit16es2Add(uint16_t a, uint16_t b) {
+  return Posit16es2toUint16(Uint16ToPosit16es2(a) + Uint16ToPosit16es2(b));
+}
+
+TVM_DLL uint16_t Posit16es2Sub(uint16_t a, uint16_t b) {
+  return Posit16es2toUint16(Uint16ToPosit16es2(a) - Uint16ToPosit16es2(b));
+}
+
+TVM_DLL uint16_t Posit16es2Mul(uint16_t a, uint16_t b) {
+  return Posit16es2toUint16(Uint16ToPosit16es2(a) * Uint16ToPosit16es2(b));
+}
+
+TVM_DLL uint16_t Posit16es2Div(uint16_t a, uint16_t b) {
+  return Posit16es2toUint16(Uint16ToPosit16es2(a) / Uint16ToPosit16es2(b));
+}
+
+TVM_DLL uint16_t Posit16es2Max(uint16_t a, uint16_t b) {
+  auto a_p = Uint16ToPosit16es2(a);
+  auto b_p = Uint16ToPosit16es2(b);
+  return Posit16es2toUint16(a_p > b_p ? a_p : b_p);
+}
+
+TVM_DLL uint16_t Posit16es2Sqrt(uint16_t a) {
+  return Posit16es2toUint16(sw::unum::sqrt(Uint16ToPosit16es2(a)));
+}
+
+TVM_DLL uint16_t Posit16es2Exp(uint16_t a) {
+  return Posit16es2toUint16(sw::unum::exp(Uint16ToPosit16es2(a)));
+}
+
+TVM_DLL uint16_t Posit16es2Log(uint16_t a) {
+  return Posit16es2toUint16(sw::unum::log(Uint16ToPosit16es2(a)));
+}
+
+TVM_DLL uint16_t Posit16es2Sigmoid(uint16_t a) {
+  auto posit_one = sw::unum::posit<16, 2>(1);
+  return Posit16es2toUint16(posit_one / (sw::unum::exp(-Uint16ToPosit16es2(a)) 
+ posit_one));
+}
+
+TVM_DLL uint16_t Posit16es2Tanh(uint16_t a) {
+  return Posit16es2toUint16(sw::unum::tanh(Uint16ToPosit16es2(a)));
+}
+}
+
+TVM_DLL sw::unum::posit<32, 2> Uint32ToPosit32es2(uint32_t in) {
+  sw::unum::bitblock<32> bb;
+  bb = static_cast(in);
+  return sw::unum::posit<32, 2>().set(bb);
+}
+
+extern "C" {
+TVM_DLL uint32_t RawPosit32es2(uint32_t in) { return in; }

Review commen

[GitHub] [incubator-tvm] tkonolige opened a new pull request #6346: [TESTS] add gpuonly tests for python unittests and integration

2020-08-26 Thread GitBox


tkonolige opened a new pull request #6346:
URL: https://github.com/apache/incubator-tvm/pull/6346


   In preparation for #6331 
   @tqchen 



This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org




[GitHub] [incubator-tvm] gussmith23 commented on a change in pull request #5812: Bring Your Own Datatypes

2020-08-26 Thread GitBox


gussmith23 commented on a change in pull request #5812:
URL: https://github.com/apache/incubator-tvm/pull/5812#discussion_r477650626



##
File path: tests/python/unittest/test_custom_datatypes.py
##
@@ -0,0 +1,396 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+"""Utilities for changing datatypes of models."""
+import tvm
+import tvm.topi.testing
+import numpy as np
+import pytest
+from numpy.random import MT19937, RandomState, SeedSequence
+from tvm import relay
+from tvm.relay.testing.inception_v3 import get_workload as get_inception
+from tvm.relay.testing.resnet import get_workload as get_resnet
+from tvm.relay.testing.layers import batch_norm_infer
+from tvm.relay.testing.mobilenet import get_workload as get_mobilenet
+from tvm.target.datatype import register, register_min_func, register_op, 
create_lower_func, lower_ite, lower_call_pure_extern
+from tvm.tir.op import call_pure_extern
+
+# we use a random seed to generate input_data
+# to guarantee stable tests
+rs = RandomState(MT19937(SeedSequence(123456789)))
+
+def convert_ndarray(dst_dtype, array):
+"""Converts NDArray(s) into the specified datatype"""
+x = relay.var('x', shape=array.shape, dtype=str(array.dtype))
+cast = relay.Function([x], x.astype(dst_dtype))
+with tvm.transform.PassContext(config={"tir.disable_vectorize": True}):
+return relay.create_executor('graph').evaluate(cast)(array)
+
+def change_dtype(src, dst, module, params):
+module = relay.frontend.ChangeDatatype(src, dst)(module)
+module = relay.transform.InferType()(module)
+params = {k: convert_ndarray(dst, v) for k, v in params.items()}
+return module, params
+
+def compare(module, input, src_dtype, dst_dtype, rtol, atol, params = {}, 
target='llvm'):
+module = relay.transform.SimplifyInference()(module)
+ex = relay.create_executor("graph", mod=module)
+
+correct = ex.evaluate()(*input, **params)
+
+module, converted_params = change_dtype(src_dtype, dst_dtype, module, 
params)
+ex = relay.create_executor("graph", mod=module, target=target)
+# converts all inputs to dst_dtype
+x_converted = [convert_ndarray(dst_dtype, arr) for arr in input]
+
+# Vectorization is not implemented with custom datatypes
+with tvm.transform.PassContext(config={"tir.disable_vectorize": True}):
+maybe_correct = ex.evaluate()(*x_converted, **converted_params)
+# currently this only works for comparing single output
+maybe_correct_converted = convert_ndarray(src_dtype, maybe_correct)
+np.testing.assert_allclose(maybe_correct_converted.asnumpy(),
+correct.asnumpy(),
+rtol=rtol,
+atol=atol)
+
+@pytest.fixture(scope="session", autouse=True)
+def setup():
+"""Set up tests
+
+Currently, this registers some custom datatypes using the Bring Your
+Own Datatypes framework.
+"""
+
+# To use datatype operations in an external library, you should first load
+# the library containing the datatype implementation:
+# CDLL("libposit.so", RTLD_GLOBAL)
+# In this case, the datatype library we are using is built right into TVM,
+# so we do not need to explicitly load any library.
+
+# You can pick a code for your datatype arbitrarily, as long as it is
+# greater than 128 and has not already been chosen.
+
+register("posites2", 131)
+
+register_op(create_lower_func(
+{
+(32, 32): "FloatToPosit32es2",
+(32, 16): "FloatToPosit16es2",
+(32, 8): 'FloatToPosit8es2',
+}), 
+"Cast", "llvm", "float", "posites2")
+register_op(create_lower_func(
+{
+(32, 32): "Posit32es2ToFloat",
+(16, 32): 'Posit16es2ToFloat',
+(8, 32): 'Posit8es2ToFloat',
+}), 
+"Cast", "llvm", "posites2", "float")
+register_op(create_lower_func({
+32: 'Posit32es2Add',
+16: 'Posit16es2Add',
+8: 'Posit8es2Add'
+}), "Add", "llvm", "posites2")
+register_op(create_lower_func({
+32: 'Posit32es2Sub',
+16: 'Posit16es2Sub',
+8: 'Posit8es2Sub'
+}), "Sub", "llvm", "posites2")
+

[GitHub] [incubator-tvm] gussmith23 commented on a change in pull request #5812: Bring Your Own Datatypes

2020-08-26 Thread GitBox


gussmith23 commented on a change in pull request #5812:
URL: https://github.com/apache/incubator-tvm/pull/5812#discussion_r477648461



##
File path: python/tvm/target/datatype.py
##
@@ -135,8 +166,40 @@ def lower(op):
 dtype = "uint" + str(t.bits)
 if t.lanes > 1:
 dtype += "x" + str(t.lanes)
-if isinstance(op, (_Cast, _FloatImm)):
-return tvm.tir.call_pure_extern(dtype, extern_func_name, op.value)
-return tvm.tir.call_pure_extern(dtype, extern_func_name, op.a, op.b)
+if isinstance(op, _Cast):
+src_bits = bit_length(op.value.dtype)
+return call_pure_extern(dtype, extern_func_map[(src_bits, 
t.bits)], op.value)
+if isinstance(op, _FloatImm):
+return call_pure_extern(dtype, extern_func_map[t.bits], op.value)
+if isinstance(op, _Call):
+return call_pure_extern(dtype, extern_func_map[t.bits], *op.args)
+if isinstance(op, _BinaryOpExpr):
+return call_pure_extern(dtype, extern_func_map[t.bits], op.a, op.b)

Review comment:
   Yeah, please do! Better error messages are never a bad idea 😄 





This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org




[GitHub] [incubator-tvm] gussmith23 commented on a change in pull request #5812: Bring Your Own Datatypes

2020-08-26 Thread GitBox


gussmith23 commented on a change in pull request #5812:
URL: https://github.com/apache/incubator-tvm/pull/5812#discussion_r477647304



##
File path: python/tvm/relay/frontend/change_datatype.py
##
@@ -0,0 +1,88 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+# pylint: disable=unused-argument
+"""Change Datatype Pass"""
+from ..function import Function
+from ..expr_functor import ExprMutator
+from ..transform.transform import function_pass
+from ..expr import var, bind
+
+# TODO(@gussmith23) what's the right opt level here?
+@function_pass(opt_level=0)
+class ChangeDatatype(ExprMutator):
+"""Mutator for changing the datatype of Relay programs.
+
+Example:
+
+.. code-block:: python
+
+from tvm.relay.testing.inception_v3 import get_workload
+expr, params = get_workload()
+
+def change_dtype(src, dst, expr, params):
+cdtype = ChangeDatatype(src, dst)
+expr = cdtype.visit(expr)
+expr = relay.ir_pass.infer_type(expr)
+params = dict((p, tvm.nd.array(params[p].asnumpy().astype(dst))) 
for p in params)
+return expr, params
+"""
+def __init__(self, src, dst):
+self.src = src
+self.dst = dst
+super().__init__()
+
+def transform_function(self, func, mod, ctx):
+return self.visit(func)
+
+def visit_constant(self, const):
+if const.data.dtype == self.src:
+return const.astype(self.dst)
+# TODO(hypercubestart): should we raise an error in this case, or 
return const?
+return const

Review comment:
   As I understand it, we may visit `const`s that are of different types -- 
types other than `self.src`, which is the type we're converting. If that's the 
case, we should leave those types alone. Check my logic on that though!





This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org




[GitHub] [incubator-tvm] gussmith23 commented on a change in pull request #5812: Bring Your Own Datatypes

2020-08-26 Thread GitBox


gussmith23 commented on a change in pull request #5812:
URL: https://github.com/apache/incubator-tvm/pull/5812#discussion_r477643995



##
File path: 3rdparty/posit/posit-wrapper.cc
##
@@ -0,0 +1,211 @@
+#include 
+
+#include 
+
+#include "universal/posit/posit.hpp"
+// must go after posit.hpp
+#include "universal/posit/math/exponent.hpp"
+#include "universal/posit/math/hyperbolic.hpp"
+#include "universal/posit/math/logarithm.hpp"
+#include "universal/posit/math/sqrt.hpp"
+
+TVM_DLL sw::unum::posit<8, 2> Uint8ToPosit8es2(uint8_t in) {
+  sw::unum::bitblock<8> bb;
+  bb = static_cast(in);
+  return sw::unum::posit<8, 2>().set(bb);
+}
+
+extern "C" {
+TVM_DLL uint8_t RawPosit8es2(uint8_t in) { return in; }
+
+TVM_DLL uint8_t Posit8es2toUint8(sw::unum::posit<8, 2> in) {
+  return static_cast(in.get().to_ullong());
+}
+
+TVM_DLL float Posit8es2ToFloat(uint8_t in) { return 
Uint8ToPosit8es2(in).operator float(); }
+
+TVM_DLL uint8_t FloatToPosit8es2(float in) {
+  auto posit = sw::unum::posit<8, 2>(in);
+  return Posit8es2toUint8(posit);
+}
+
+// TODO(gus) how wide should the input be?
+TVM_DLL uint8_t IntToPosit8es2(int in) { return 
Posit8es2toUint8(sw::unum::posit<8, 2>(in)); }
+
+TVM_DLL uint8_t Posit8es2Add(uint8_t a, uint8_t b) {
+  return Posit8es2toUint8(Uint8ToPosit8es2(a) + Uint8ToPosit8es2(b));
+}
+
+TVM_DLL uint8_t Posit8es2Sub(uint8_t a, uint8_t b) {
+  return Posit8es2toUint8(Uint8ToPosit8es2(a) - Uint8ToPosit8es2(b));
+}
+
+TVM_DLL uint8_t Posit8es2Mul(uint8_t a, uint8_t b) {
+  return Posit8es2toUint8(Uint8ToPosit8es2(a) * Uint8ToPosit8es2(b));
+}
+
+TVM_DLL uint8_t Posit8es2Div(uint8_t a, uint8_t b) {
+  return Posit8es2toUint8(Uint8ToPosit8es2(a) / Uint8ToPosit8es2(b));
+}
+
+TVM_DLL uint8_t Posit8es2Max(uint8_t a, uint8_t b) {
+  auto a_p = Uint8ToPosit8es2(a);
+  auto b_p = Uint8ToPosit8es2(b);
+  return Posit8es2toUint8(a_p > b_p ? a_p : b_p);
+}
+
+TVM_DLL uint8_t Posit8es2Sqrt(uint8_t a) {
+  return Posit8es2toUint8(sw::unum::sqrt(Uint8ToPosit8es2(a)));
+}
+
+TVM_DLL uint8_t Posit8es2Exp(uint8_t a) {
+  return Posit8es2toUint8(sw::unum::exp(Uint8ToPosit8es2(a)));
+}
+
+TVM_DLL uint8_t Posit8es2Log(uint8_t a) {
+  return Posit8es2toUint8(sw::unum::log(Uint8ToPosit8es2(a)));
+}
+
+TVM_DLL uint8_t Posit8es2Sigmoid(uint8_t a) {
+  auto posit_one = sw::unum::posit<8, 2>(1);
+  return Posit8es2toUint8(posit_one / (sw::unum::exp(-Uint8ToPosit8es2(a)) + 
posit_one));
+}
+
+TVM_DLL uint8_t Posit8es2Tanh(uint8_t a) {
+  return Posit8es2toUint8(sw::unum::tanh(Uint8ToPosit8es2(a)));
+}
+}
+
+TVM_DLL sw::unum::posit<16, 2> Uint16ToPosit16es2(uint16_t in) {
+  sw::unum::bitblock<16> bb;
+  bb = static_cast(in);
+  return sw::unum::posit<16, 2>().set(bb);
+}
+
+extern "C" {
+TVM_DLL uint16_t RawPosit16es2(uint16_t in) { return in; }
+
+TVM_DLL uint16_t Posit16es2toUint16(sw::unum::posit<16, 2> in) {
+  return static_cast(in.get().to_ullong());
+}
+
+TVM_DLL float Posit16es2ToFloat(uint16_t in) { return 
Uint16ToPosit16es2(in).operator float(); }
+
+TVM_DLL uint16_t FloatToPosit16es2(float in) {
+  auto posit = sw::unum::posit<16, 2>(in);
+  return Posit16es2toUint16(posit);
+}
+
+// TODO(gus) how wide should the input be?
+TVM_DLL uint16_t IntToPosit16es2(int in) { return 
Posit16es2toUint16(sw::unum::posit<16, 2>(in)); }
+
+TVM_DLL uint16_t Posit16es2Add(uint16_t a, uint16_t b) {
+  return Posit16es2toUint16(Uint16ToPosit16es2(a) + Uint16ToPosit16es2(b));
+}
+
+TVM_DLL uint16_t Posit16es2Sub(uint16_t a, uint16_t b) {
+  return Posit16es2toUint16(Uint16ToPosit16es2(a) - Uint16ToPosit16es2(b));
+}
+
+TVM_DLL uint16_t Posit16es2Mul(uint16_t a, uint16_t b) {
+  return Posit16es2toUint16(Uint16ToPosit16es2(a) * Uint16ToPosit16es2(b));
+}
+
+TVM_DLL uint16_t Posit16es2Div(uint16_t a, uint16_t b) {
+  return Posit16es2toUint16(Uint16ToPosit16es2(a) / Uint16ToPosit16es2(b));
+}
+
+TVM_DLL uint16_t Posit16es2Max(uint16_t a, uint16_t b) {
+  auto a_p = Uint16ToPosit16es2(a);
+  auto b_p = Uint16ToPosit16es2(b);
+  return Posit16es2toUint16(a_p > b_p ? a_p : b_p);
+}
+
+TVM_DLL uint16_t Posit16es2Sqrt(uint16_t a) {
+  return Posit16es2toUint16(sw::unum::sqrt(Uint16ToPosit16es2(a)));
+}
+
+TVM_DLL uint16_t Posit16es2Exp(uint16_t a) {
+  return Posit16es2toUint16(sw::unum::exp(Uint16ToPosit16es2(a)));
+}
+
+TVM_DLL uint16_t Posit16es2Log(uint16_t a) {
+  return Posit16es2toUint16(sw::unum::log(Uint16ToPosit16es2(a)));
+}
+
+TVM_DLL uint16_t Posit16es2Sigmoid(uint16_t a) {
+  auto posit_one = sw::unum::posit<16, 2>(1);
+  return Posit16es2toUint16(posit_one / (sw::unum::exp(-Uint16ToPosit16es2(a)) 
+ posit_one));
+}
+
+TVM_DLL uint16_t Posit16es2Tanh(uint16_t a) {
+  return Posit16es2toUint16(sw::unum::tanh(Uint16ToPosit16es2(a)));
+}
+}
+
+TVM_DLL sw::unum::posit<32, 2> Uint32ToPosit32es2(uint32_t in) {
+  sw::unum::bitblock<32> bb;
+  bb = static_cast(in);
+  return sw::unum::posit<32, 2>().set(bb);
+}
+
+extern "C" {
+TVM_DLL uint32_t RawPosit32es2(uint32_t in) { return in; }

Review comment:
 

[GitHub] [incubator-tvm] gussmith23 commented on a change in pull request #5812: Bring Your Own Datatypes

2020-08-26 Thread GitBox


gussmith23 commented on a change in pull request #5812:
URL: https://github.com/apache/incubator-tvm/pull/5812#discussion_r477642914



##
File path: 3rdparty/posit/posit-wrapper.cc
##
@@ -0,0 +1,211 @@
+#include 
+
+#include 
+
+#include "universal/posit/posit.hpp"
+// must go after posit.hpp
+#include "universal/posit/math/exponent.hpp"
+#include "universal/posit/math/hyperbolic.hpp"
+#include "universal/posit/math/logarithm.hpp"
+#include "universal/posit/math/sqrt.hpp"

Review comment:
   sg!





This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org




[GitHub] [incubator-tvm] gussmith23 commented on pull request #5812: Bring Your Own Datatypes

2020-08-26 Thread GitBox


gussmith23 commented on pull request #5812:
URL: https://github.com/apache/incubator-tvm/pull/5812#issuecomment-681159395


   Thanks all, will do!



This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org




[GitHub] [incubator-tvm] tkonolige commented on a change in pull request #6333: Add docker/lint.sh, for running dockerized lint scripts locally

2020-08-26 Thread GitBox


tkonolige commented on a change in pull request #6333:
URL: https://github.com/apache/incubator-tvm/pull/6333#discussion_r477618082



##
File path: tests/lint/clang_format.sh
##
@@ -0,0 +1,23 @@
+#!/bin/bash -e
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+
+# check lastest change, for squash merge into master
+./tests/lint/git-clang-format.sh HEAD~1

Review comment:
   Maybe we should use [git merge 
base](https://git-scm.com/docs/git-merge-base) so we can handle multiple commits





This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org




[GitHub] [incubator-tvm] tqchen commented on pull request #6331: [TESTS] Refactor tests to run on either the GPU or CPU

2020-08-26 Thread GitBox


tqchen commented on pull request #6331:
URL: https://github.com/apache/incubator-tvm/pull/6331#issuecomment-681146053


   @tkonolige because uses the old Jenkinsfile (before it get merged). Please 
try to send another PR to add `gpuonly_test` that redirects to the normal 
integration test first, then we can tweak the scripts once that PR get merged 



This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org




[GitHub] [incubator-tvm] mvermeulen commented on pull request #6345: ROCm changed name of library and removed the old one in ROCm 3.7 release.

2020-08-26 Thread GitBox


mvermeulen commented on pull request #6345:
URL: https://github.com/apache/incubator-tvm/pull/6345#issuecomment-681141322


   The new library name was introduced in ROCm 3.5.  The old library name was 
deleted in ROCm 3.7.
   
   So this change makes ROCm 3.5 or later a requirement for mainline TVM, and 
fixes a break with ROCm 3.7 when the old name was deleted.



This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org




[GitHub] [incubator-tvm] tkonolige commented on pull request #6331: [TESTS] Refactor tests to run on either the GPU or CPU

2020-08-26 Thread GitBox


tkonolige commented on pull request #6331:
URL: https://github.com/apache/incubator-tvm/pull/6331#issuecomment-681138755


   @tqchen I think I've addressed all issues. Do I need to do something to have 
Jenkins use the new Jenkinsfile?



This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org




[GitHub] [incubator-tvm] masahi commented on pull request #6345: ROCm changed name of library and removed the old one in ROCm 3.7 release.

2020-08-26 Thread GitBox


masahi commented on pull request #6345:
URL: https://github.com/apache/incubator-tvm/pull/6345#issuecomment-681123947


   Does this change make rocm 3.7 requirement for TVM? This is only for 
confirmation, I'm +1 for tracking the latest release.
   
   cc @t-vi 



This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org




[GitHub] [incubator-tvm] comaniac commented on a change in pull request #6342: [CI][Contrib] Add Vitis-AI docker installation

2020-08-26 Thread GitBox


comaniac commented on a change in pull request #6342:
URL: https://github.com/apache/incubator-tvm/pull/6342#discussion_r477585350



##
File path: docker/Dockerfile.ci_cpu
##
@@ -83,3 +83,7 @@ RUN bash /install/ubuntu_install_caffe.sh
 # Github Arm(R) Ethos(TM)-N NPU driver
 COPY install/ubuntu_install_ethosn_driver_stack.sh 
/install/ubuntu_install_ethosn_driver_stack.sh
 RUN bash /install/ubuntu_install_ethosn_driver_stack.sh
+
+# Vitis-AI PyXIR CI deps
+COPY install/ubuntu_install_vai_packages.sh 
/install/ubuntu_install_vai_packages.sh
+RUN bash /install/ubuntu_install_vai_packages.sh

Review comment:
   Hi could you explain the purpose of docker related files/scripts in the 
main integration PR? Ideally we should separate all environment related changes 
to this PR so that we could see the whole picture.





This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org




[GitHub] [incubator-tvm] mvermeulen opened a new pull request #6345: ROCm changed name of library and removed the old one in ROCm 3.7 release.

2020-08-26 Thread GitBox


mvermeulen opened a new pull request #6345:
URL: https://github.com/apache/incubator-tvm/pull/6345


   Thanks for contributing to TVM!   Please refer to guideline 
https://tvm.apache.org/docs/contribute/ for useful information and tips. After 
the pull request is submitted, please request code reviews from 
[Reviewers](https://github.com/apache/incubator-tvm/blob/master/CONTRIBUTORS.md#reviewers)
 by @ them in the pull request thread.
   



This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org




[GitHub] [incubator-tvm] zhiics commented on a change in pull request #6337: [RELAY][VM] Enable heterogeneous execution for Relay VM

2020-08-26 Thread GitBox


zhiics commented on a change in pull request #6337:
URL: https://github.com/apache/incubator-tvm/pull/6337#discussion_r477576510



##
File path: include/tvm/runtime/vm/bytecode.h
##
@@ -204,6 +207,13 @@ struct Instruction {
   RegName tensor;
   RegName newshape;
 } reshape_tensor;
+struct /* DeviceCopy Operands */ {
+  RegName src;
+  /*! \brief The source device type. */
+  Index src_device_type;

Review comment:
   It should be device type (i.e. the source is kDLCPU and destination is 
kDLGPU). We haven't really effectively used device id so far.





This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org




[GitHub] [incubator-tvm] zhiics commented on a change in pull request #6337: [RELAY][VM] Enable heterogeneous execution for Relay VM

2020-08-26 Thread GitBox


zhiics commented on a change in pull request #6337:
URL: https://github.com/apache/incubator-tvm/pull/6337#discussion_r477576510



##
File path: include/tvm/runtime/vm/bytecode.h
##
@@ -204,6 +207,13 @@ struct Instruction {
   RegName tensor;
   RegName newshape;
 } reshape_tensor;
+struct /* DeviceCopy Operands */ {
+  RegName src;
+  /*! \brief The source device type. */
+  Index src_device_type;

Review comment:
   It should be device type (i.e. the source is kDLCPU and destination is 
kDLGPU). We haven't really effectively use device id so far.





This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org




[GitHub] [incubator-tvm] jtuyls commented on a change in pull request #6342: [CI][Contrib] Add Vitis-AI docker installation

2020-08-26 Thread GitBox


jtuyls commented on a change in pull request #6342:
URL: https://github.com/apache/incubator-tvm/pull/6342#discussion_r477572385



##
File path: docker/Dockerfile.ci_cpu
##
@@ -83,3 +83,7 @@ RUN bash /install/ubuntu_install_caffe.sh
 # Github Arm(R) Ethos(TM)-N NPU driver
 COPY install/ubuntu_install_ethosn_driver_stack.sh 
/install/ubuntu_install_ethosn_driver_stack.sh
 RUN bash /install/ubuntu_install_ethosn_driver_stack.sh
+
+# Vitis-AI PyXIR CI deps
+COPY install/ubuntu_install_vai_packages.sh 
/install/ubuntu_install_vai_packages.sh
+RUN bash /install/ubuntu_install_vai_packages.sh

Review comment:
   @leandron Yes, we will make it more descriptive. However, in the main 
Vitis-AI integration PR we add another script so it wouldn't be too clear if we 
call this ```ubuntu_install_vitis_ai.sh```. Also, as this script is only used 
for CI I think ```ubuntu_install_vitis_ai_packages_ci.sh``` would be better. 
What do you think?





This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org




[GitHub] [incubator-tvm] hogepodge commented on pull request #5812: Bring Your Own Datatypes

2020-08-26 Thread GitBox


hogepodge commented on pull request #5812:
URL: https://github.com/apache/incubator-tvm/pull/5812#issuecomment-681074598


   At a high level, the docs organization is described in this discussion 
thread.
   
   https://discuss.tvm.ai/t/rfc-tvm-documentation-refactor/7456
   
   It sounds like the document you're producing is either an explainer or a 
reference guide. If it's a design guide, or a higher level overview of the 
design, explainer is the best classification for it. If it's actually 
documenting APIs and is meant to be used as a reference for building on top of 
it, treat it like a reference file. @gussmith23 I think the best thing to do is 
send a PR with the documentation and add me to the review. We can sort out 
where it belongs or how to organize it once we see what it looks like.



This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org




[GitHub] [incubator-tvm] comaniac commented on a change in pull request #6335: [BYOC][ETHOSN] Add support for quantized convolution

2020-08-26 Thread GitBox


comaniac commented on a change in pull request #6335:
URL: https://github.com/apache/incubator-tvm/pull/6335#discussion_r477513822



##
File path: src/relay/backend/contrib/ethosn/codegen.cc
##
@@ -50,6 +50,16 @@ bool IsEthosnOp(const Call& call, const std::string& 
op_name) {
   }
 }
 
+bool IsEthosnFunc(const Call& call, const std::string& op_name) {

Review comment:
   It seems to me that this function is not just for checking if a call is 
for an Ethos-N composite function. Maybe we should move it to a common place, 
so does IsEthosnOp.

##
File path: tests/python/contrib/test_ethosn/infrastructure.py
##
@@ -160,6 +162,26 @@ def inference_result(checksum, outputs):
 return False
 
 
+def generate_trials(space, r_factor=3):

Review comment:
   This function is not being used.





This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org




[GitHub] [incubator-tvm] slyubomirsky commented on pull request #5812: Bring Your Own Datatypes

2020-08-26 Thread GitBox


slyubomirsky commented on pull request #5812:
URL: https://github.com/apache/incubator-tvm/pull/5812#issuecomment-681066725


   > @slyubomirsky I heard from Zach that you have been looking into how 
documentation is done in modern TVM. I need to document the datatypes 
framework, which is distributed across multiple files, and I'm trying to decide 
on a central place to keep the documentation. I'm wondering if anything you've 
learned recently would be relevant here?
   
   As Jared said, @hogepodge would be best positioned to comment. What you are 
describing sounds like an "explainer" (describing the overall design and 
rationale as opposed to a tutorial), which should go in the `docs` directory 
with similar other "explainers."



This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org




[GitHub] [incubator-tvm] icemelon9 commented on a change in pull request #6337: [RELAY][VM] Enable heterogeneous execution for Relay VM

2020-08-26 Thread GitBox


icemelon9 commented on a change in pull request #6337:
URL: https://github.com/apache/incubator-tvm/pull/6337#discussion_r477516283



##
File path: include/tvm/runtime/vm/bytecode.h
##
@@ -204,6 +207,13 @@ struct Instruction {
   RegName tensor;
   RegName newshape;
 } reshape_tensor;
+struct /* DeviceCopy Operands */ {
+  RegName src;
+  /*! \brief The source device type. */
+  Index src_device_type;

Review comment:
   should it be device type or device id?





This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org




[GitHub] [incubator-tvm] comaniac commented on pull request #6343: [BYOC][CONTRIB] Vitis-AI codegen integration

2020-08-26 Thread GitBox


comaniac commented on pull request #6343:
URL: https://github.com/apache/incubator-tvm/pull/6343#issuecomment-681050745


   cc @tmoreau89 @zhiics @masahi @tqchen @liangfu 



This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org




[GitHub] [incubator-tvm] areusch commented on a change in pull request #6319: Remove comparison of unsigned expression < 0 warning

2020-08-26 Thread GitBox


areusch commented on a change in pull request #6319:
URL: https://github.com/apache/incubator-tvm/pull/6319#discussion_r477498454



##
File path: src/runtime/crt/common/ndarray.c
##
@@ -76,7 +76,7 @@ int TVMNDArray_Load(TVMNDArray* ret, const char** strm) {
   *strm += sizeof(ndim);
   dtype = ((DLDataType*)*strm)[0];  // NOLINT(*)
   *strm += sizeof(dtype);
-  if ((ndim < 0) || (ndim > TVM_CRT_MAX_NDIM)) {
+  if (ndim > TVM_CRT_MAX_NDIM) {

Review comment:
   @wrongtest I think it would make more sense here to change the type of 
`ndim` to int to match the dlpack.h header:
   https://github.com/dmlc/dlpack/blob/master/include/dlpack/dlpack.h#L136
   
   then we'd need to keep the original check. would you be up for making this 
change?





This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org




[GitHub] [incubator-tvm] areusch opened a new pull request #6344: Improve interactive docker/bash.sh

2020-08-26 Thread GitBox


areusch opened a new pull request #6344:
URL: https://github.com/apache/incubator-tvm/pull/6344


   Some fixes to hopefully improve docker/bash.sh for use in local development:
   
   * properly quote command-line arguments
   * mount repo at $(pwd) by default; fixes problems when using git-worktree.
   
   Don't know how the latter change will impact the CI, but let's see if it 
breaks it too badly first.



This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org




[GitHub] [incubator-tvm] areusch commented on pull request #6333: Add docker/lint.sh, for running dockerized lint scripts locally

2020-08-26 Thread GitBox


areusch commented on pull request #6333:
URL: https://github.com/apache/incubator-tvm/pull/6333#issuecomment-681015505


   I updated `pull_request.rst`, I don't know that the docker instructions 
mention linting at all



This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org




[GitHub] [incubator-tvm] areusch commented on a change in pull request #6333: Add docker/lint.sh, for running dockerized lint scripts locally

2020-08-26 Thread GitBox


areusch commented on a change in pull request #6333:
URL: https://github.com/apache/incubator-tvm/pull/6333#discussion_r477463878



##
File path: tests/python/unittest/test_filter_untracked.py
##
@@ -0,0 +1,178 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+import os
+import shutil
+import subprocess
+import sys
+import tempfile
+
+
+def setup_git_repo(worktree=False):
+  git_repo_dir = tempfile.mkdtemp()
+  to_rm = [git_repo_dir]
+  try:
+  subprocess.check_output(['git', 'init', '.'], cwd=git_repo_dir)
+
+  with open(f'{git_repo_dir}/committed', 'w') as committed_f:
+  committed_f.write('normal committed file\n')
+
+  subprocess.check_output(['git', 'add', 'committed'], cwd=git_repo_dir)
+
+  with open(f'{git_repo_dir}/committed-ignored', 'w') as gitignore_f:
+  gitignore_f.write('this file is gitignored, but committed already')
+
+  subprocess.check_output(['git', 'add', 'committed-ignored'], 
cwd=git_repo_dir)
+
+  with open(f'{git_repo_dir}/.gitignore', 'w') as gitignore_f:
+  gitignore_f.write('ignored\n'
+'committed-ignored\n')
+
+  subprocess.check_output(['git', 'add', '.gitignore'], cwd=git_repo_dir)
+
+  # NOTE: explicitly set the author so this test passes in the CI.
+  subprocess.check_output(['git',
+   '-c', 'user.name=Unit Test',
+   '-c', 'user.email=unit.t...@testing.tvm.ai',
+   'commit', '-m', 'initial commit'],
+  cwd=git_repo_dir)
+
+  if worktree:
+worktree_dir = tempfile.mkdtemp()
+to_rm.append(worktree_dir)
+subprocess.check_output(['git', 'worktree', 'add', worktree_dir], 
cwd=git_repo_dir)
+git_repo_dir = worktree_dir
+
+  with open(f'{git_repo_dir}/ignored', 'w') as gitignore_f:
+  gitignore_f.write('this file is gitignored')
+
+  with open(f'{git_repo_dir}/added-to-index', 'w') as added_f:
+  added_f.write('only added to git index\n')
+
+  subprocess.check_output(['git', 'add', 'added-to-index'], 
cwd=git_repo_dir)
+
+  with open(f'{git_repo_dir}/ignored-added-to-index', 'w') as ignored_f:
+  ignored_f.write('this file is gitignored but in the index already\n')
+
+  subprocess.check_output(['git', 'add', '-f', 'ignored-added-to-index'], 
cwd=git_repo_dir)
+
+  with open(f'{git_repo_dir}/untracked', 'w') as untracked_f:
+  untracked_f.write('this file is untracked\n')
+
+  os.mkdir(f'{git_repo_dir}/subdir')
+  with open(f'{git_repo_dir}/subdir/untracked', 'w') as untracked_f:
+  untracked_f.write('this file is untracked\n')
+
+  with open(f'{git_repo_dir}/subdir/untracked2', 'w') as untracked_f:
+  untracked_f.write('this file is also untracked\n')
+
+  return git_repo_dir, to_rm
+
+  except Exception:
+  for rm_dir in to_rm:
+  shutil.rmtree(rm_dir)
+  raise
+
+
+def run_test(repo_path, passed_files, filtered_files):
+test_input = '\n'.join(
+passed_files +
+filtered_files +
+[f'./{f}' for f in passed_files] +
+[f'./{f}' for f in filtered_files]) + '\n'
+
+test_script_dir = f'{repo_path}/test-script-dir'
+os.mkdir(test_script_dir)
+
+filter_script_path = f'{test_script_dir}/filter_untracked.py'
+test_script_dirname = os.path.dirname(__file__) or os.getcwd()
+
shutil.copy(os.path.realpath(f'{test_script_dirname}/../../lint/filter_untracked.py'),
+filter_script_path)
+filter_proc = subprocess.Popen(
+[sys.executable, filter_script_path],
+cwd=repo_path,
+stdin=subprocess.PIPE,
+stdout=subprocess.PIPE,
+encoding='utf-8')
+filter_output, _ = filter_proc.communicate(test_input)
+filter_output_lines = [l for l in filter_output.split('\n') if l]
+
+for pass_f in passed_files:
+assert pass_f in filter_output_lines, (
+f'expected in filter output: {pass_f}\filter output: 
{filter_output}')
+assert f'./{pass_f}' in filter_output_lines, (
+f'expected in filter output: ./{pass_f}\filter output: 
{filter_output}')
+
+for fil

[GitHub] [incubator-tvm] jroesch merged pull request #6341: [MSVC] Make able to compile with MSVC

2020-08-26 Thread GitBox


jroesch merged pull request #6341:
URL: https://github.com/apache/incubator-tvm/pull/6341


   



This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org




[incubator-tvm] branch master updated (942c90b -> 4910c8c)

2020-08-26 Thread jroesch
This is an automated email from the ASF dual-hosted git repository.

jroesch pushed a change to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-tvm.git.


from 942c90b  [DYN][RELAY] Resize support for NCHW-convertible layouts 
(#6293)
 add 4910c8c  [MSVC] Make able to compile with MSVC (#6341)

No new revisions were added by this update.

Summary of changes:
 src/target/llvm/codegen_hexagon.cc | 2 +-
 src/te/autodiff/ad_simplify.cc | 1 +
 2 files changed, 2 insertions(+), 1 deletion(-)



[GitHub] [incubator-tvm] jroesch commented on pull request #5812: Bring Your Own Datatypes

2020-08-26 Thread GitBox


jroesch commented on pull request #5812:
URL: https://github.com/apache/incubator-tvm/pull/5812#issuecomment-680991774


   > @slyubomirsky I heard from Zach that you have been looking into how 
documentation is done in modern TVM. I need to document the datatypes 
framework, which is distributed across multiple files, and I'm trying to decide 
on a central place to keep the documentation. I'm wondering if anything you've 
learned recently would be relevant here?
   
   @hogepodge maybe you can chime in



This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org




[GitHub] [incubator-tvm] zhiics edited a comment on pull request #6337: [RELAY][VM] Enable heterogeneous execution for Relay VM

2020-08-26 Thread GitBox


zhiics edited a comment on pull request #6337:
URL: https://github.com/apache/incubator-tvm/pull/6337#issuecomment-680989095


   @mbrookhart Thanks for reminding, I just enabled all the dynamic op tests 
except for level6 because topk has a problem for GPU which I have already had a 
TODO in the test_any. We need to look into it later. 



This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org




[GitHub] [incubator-tvm] zhiics commented on pull request #6337: [RELAY][VM] Enable heterogeneous execution for Relay VM

2020-08-26 Thread GitBox


zhiics commented on pull request #6337:
URL: https://github.com/apache/incubator-tvm/pull/6337#issuecomment-680989095


   @mbrookhart Thanks for reminding, I just enabled all they dynamic op tests 
except for level6 because topk has a problem for GPU which I have already had a 
TODO in the test_any. We need to look into it later. 



This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org




[GitHub] [incubator-tvm] zhiics commented on a change in pull request #6337: [RELAY][VM] Enable heterogeneous execution for Relay VM

2020-08-26 Thread GitBox


zhiics commented on a change in pull request #6337:
URL: https://github.com/apache/incubator-tvm/pull/6337#discussion_r477431597



##
File path: python/tvm/relay/transform/memory_alloc.py
##
@@ -19,26 +19,45 @@
 A pass for manifesting explicit memory allocations.
 """
 import numpy as np
+
+from tvm.ir.transform import PassContext, module_pass
+from tvm import nd, container
+from ..function import Function
 from ..expr_functor import ExprVisitor, ExprMutator
 from ..scope_builder import ScopeBuilder
-from . import transform
 from .. import op
 from ... import DataType, register_func
 from .. import ty, expr
 from ..backend import compile_engine
 from ..op.memory import flatten_tuple_type, from_tuple_type, to_tuple_type
 from ...import cpu
 from ..op.memory import alloc_storage
+from ..analysis import context_analysis as _context_analysis
+from ..._ffi.runtime_ctypes import TVMContext
 
 def alloc_tensor(storage, shape, dtype='float32', assert_shape=None):
 offset = expr.const(0, dtype="int64")
 return op.memory.alloc_tensor(storage, offset, shape, dtype, assert_shape)
 
+
 def is_primitive(call):
 return hasattr(call, 'op') and hasattr(call.op, 'attrs') and \
hasattr(call.op.attrs, 'Primitive') and 
int(call.op.attrs.Primitive) == 1
 
 
+def is_device_copy(func):
+"""
+Check if the current relay expression is shape_of call. We can simply check
+the body of it if it is a function becase the shape_of op is opaque.
+"""
+if isinstance(func, Function):
+body = func.body
+return isinstance(body, expr.Call) and body.op == op.get("device_copy")
+if isinstance(func, expr.Call):
+return body.op == op.get("device_copy")

Review comment:
   yeah, it should be func.body. It could be a fused primitive op or a 
device copy callnode. We have a device copy test for context analysis pass but 
not for memory alloc pass because it is usually automatically added





This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org




[GitHub] [incubator-tvm] zhiics commented on pull request #6342: [CI][Contrib] Add Vitis-AI docker installation

2020-08-26 Thread GitBox


zhiics commented on pull request #6342:
URL: https://github.com/apache/incubator-tvm/pull/6342#issuecomment-680977561


   cc @tqchen @tmoreau89 



This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org




[GitHub] [incubator-tvm] mbrookhart commented on pull request #6316: Dynamic Strided Slice

2020-08-26 Thread GitBox


mbrookhart commented on pull request #6316:
URL: https://github.com/apache/incubator-tvm/pull/6316#issuecomment-680967384


   @yongwww @electriclilies Can you take another look?



This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org




[GitHub] [incubator-tvm] mbaret commented on pull request #6335: [BYOC][ETHOSN] Add support for quantized convolution

2020-08-26 Thread GitBox


mbaret commented on pull request #6335:
URL: https://github.com/apache/incubator-tvm/pull/6335#issuecomment-680959658


   cc @masahi @comaniac @zhiics @Leo-arm 



This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org




[GitHub] [incubator-tvm] mbrookhart commented on pull request #6337: [RELAY][VM] Enable heterogeneous execution for Relay VM

2020-08-26 Thread GitBox


mbrookhart commented on pull request #6337:
URL: https://github.com/apache/incubator-tvm/pull/6337#issuecomment-680957809


   Yay! I'm so excited for this! I'll do a deep dive today
   
   There are a number tests in tests/python/relay/dyn that skip running on GPU 
while waiting for this feature, i.e. 
https://github.com/apache/incubator-tvm/blob/942c90ba7a7b9bccf6d9bce43808aba2bd6c9787/tests/python/relay/dyn/test_dynamic_op_level3.py#L30-L31
   
   Do you want to enable those as part of this test? Or I can do it as a second 
PR.



This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org




[GitHub] [incubator-tvm] leandron commented on a change in pull request #6342: [CI][Contrib] Add Vitis-AI docker installation

2020-08-26 Thread GitBox


leandron commented on a change in pull request #6342:
URL: https://github.com/apache/incubator-tvm/pull/6342#discussion_r477270729



##
File path: docker/install/ubuntu_install_vai_packages.sh
##
@@ -0,0 +1,29 @@
+#!/bin/bash
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+# 
+#   http://www.apache.org/licenses/LICENSE-2.0
+# 
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+set -e
+set -u
+set -o pipefail
+
+export PYXIR_HOME=/opt/pyxir
+mkdir /opt/pyxir

Review comment:
   nit:as you have it already defined, maybe `mkdir "$PYXIR_HOME"` is 
better.

##
File path: docker/Dockerfile.ci_cpu
##
@@ -83,3 +83,7 @@ RUN bash /install/ubuntu_install_caffe.sh
 # Github Arm(R) Ethos(TM)-N NPU driver
 COPY install/ubuntu_install_ethosn_driver_stack.sh 
/install/ubuntu_install_ethosn_driver_stack.sh
 RUN bash /install/ubuntu_install_ethosn_driver_stack.sh
+
+# Vitis-AI PyXIR CI deps
+COPY install/ubuntu_install_vai_packages.sh 
/install/ubuntu_install_vai_packages.sh
+RUN bash /install/ubuntu_install_vai_packages.sh

Review comment:
   suggestion: maybe `ubuntu_install_vitis_ai.sh` would be more descriptive?





This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org




[GitHub] [incubator-tvm] MarisaKirisame commented on a change in pull request #6336: [Relay][Training] Make AutoDiff thread through global function.

2020-08-26 Thread GitBox


MarisaKirisame commented on a change in pull request #6336:
URL: https://github.com/apache/incubator-tvm/pull/6336#discussion_r477230512



##
File path: src/relay/transforms/gradient.cc
##
@@ -438,12 +449,17 @@ Expr BPEmpty() {
 
 struct ReverseAD : ExprMutator {
   using ADVarMap = std::unordered_map;
-
+  using ADGVarMap = std::unordered_map;
+  Optional mod;

Review comment:
   you mean the list from ~2 years ago? that is very out of dated. I think 
we can just grep -r for TODO(@M.K.) in the source.





This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org




[GitHub] [incubator-tvm] anilmartha opened a new pull request #6343: [BYOC][CONTRIB] Vitis-AI codegen integration

2020-08-26 Thread GitBox


anilmartha opened a new pull request #6343:
URL: https://github.com/apache/incubator-tvm/pull/6343


   This PR implements the Vitis-AI codegen using the BYOC flow and enables us 
to offload subgraphs to FPGA DPU accelerators (cloud/edge).
   
   Below are the features added as part of this PR
   
   - Annotate the graph for the given Vitis-AI DPU (Deep Learning Processing 
Unit) target.
   - During codegen phase, convert the relay subgraph into PyXIR and save 
XGraph.
   - Vitis-AI runtime supports SaveToBinary and LoadFromBinary. We save the 
XGraph it in our own format and serialize it in the Module by keeping track of 
the path to the files.
   - Tests include a complete resnet18 model  test partly offloaded to PyXIR 
for DPU acceleration. However, we don't have access to  an FPGA  instance in 
the CI docker environment and therefore the offloaded subgraph is just executed 
on CPU. 
   
   This PR depends on following [Vitis-AI CI contribution 
PR](https://github.com/apache/incubator-tvm/pull/6342).
   The RFC for this PR can be found from 
[here](https://discuss.tvm.ai/t/rfc-byoc-vitis-ai-integration/7544).

   This work is co-authored by Jorn Tuyls  @jtuyls, Elliott 
Delaye  @edelaye and Sumit Nagpal  
@sumitn-xilinx. 
   
   



This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org




[GitHub] [incubator-tvm] anilmartha opened a new pull request #6342: [CI][Contrib] Add Vitis-AI docker installation

2020-08-26 Thread GitBox


anilmartha opened a new pull request #6342:
URL: https://github.com/apache/incubator-tvm/pull/6342


   This PR adds the Vitis-AI PyXIR dependency for testing Vitis-AI codegen to 
the ci_cpu dockerfile.
   



This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org




[GitHub] [incubator-tvm] leandron commented on a change in pull request #6337: [RELAY][VM] Enable heterogeneous execution for Relay VM

2020-08-26 Thread GitBox


leandron commented on a change in pull request #6337:
URL: https://github.com/apache/incubator-tvm/pull/6337#discussion_r477165445



##
File path: python/tvm/relay/transform/memory_alloc.py
##
@@ -19,26 +19,45 @@
 A pass for manifesting explicit memory allocations.
 """
 import numpy as np
+
+from tvm.ir.transform import PassContext, module_pass
+from tvm import nd, container
+from ..function import Function
 from ..expr_functor import ExprVisitor, ExprMutator
 from ..scope_builder import ScopeBuilder
-from . import transform
 from .. import op
 from ... import DataType, register_func
 from .. import ty, expr
 from ..backend import compile_engine
 from ..op.memory import flatten_tuple_type, from_tuple_type, to_tuple_type
 from ...import cpu
 from ..op.memory import alloc_storage
+from ..analysis import context_analysis as _context_analysis
+from ..._ffi.runtime_ctypes import TVMContext
 
 def alloc_tensor(storage, shape, dtype='float32', assert_shape=None):
 offset = expr.const(0, dtype="int64")
 return op.memory.alloc_tensor(storage, offset, shape, dtype, assert_shape)
 
+
 def is_primitive(call):
 return hasattr(call, 'op') and hasattr(call.op, 'attrs') and \
hasattr(call.op.attrs, 'Primitive') and 
int(call.op.attrs.Primitive) == 1
 
 
+def is_device_copy(func):
+"""
+Check if the current relay expression is shape_of call. We can simply check
+the body of it if it is a function becase the shape_of op is opaque.
+"""
+if isinstance(func, Function):
+body = func.body
+return isinstance(body, expr.Call) and body.op == op.get("device_copy")
+if isinstance(func, expr.Call):
+return body.op == op.get("device_copy")

Review comment:
   Is `body` here expected to have some value, or maybe should be replaced 
with `func.body`? or even removed in case it is fully covered by the previous 
`if` statement? 
   
   Also, I think it would be good to have a test case to cover this statement, 
if there is not one already.

##
File path: python/tvm/relay/analysis/analysis.py
##
@@ -28,6 +28,21 @@
 from .feature import Feature
 
 
+def context_analysis(mod, default_context):
+"""Analyze the device context information of each IR node in a Relay
+program.
+
+Parameters
+--
+expr : tvm.IRModule
+The input module.

Review comment:
   I guess here `expr` is actually `mod` ?





This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org




[GitHub] [incubator-tvm] jainris commented on a change in pull request #6303: [Relay/TOPI][TFLite] Implemented MATRIX_SET_DIAG Operator for Relay/TOPI and TFLite Frontend.

2020-08-26 Thread GitBox


jainris commented on a change in pull request #6303:
URL: https://github.com/apache/incubator-tvm/pull/6303#discussion_r477160005



##
File path: include/tvm/topi/transform.h
##
@@ -1511,6 +1511,35 @@ inline Tensor sparse_to_dense(const Tensor& 
sparse_indices, const Array
   name, tag);
 }
 
+/*!
+ * \brief Returns a tensor with the diagonal of input tensor replaced with the 
provided diagonal.
+ * \param input input tensor.
+ * \param diagonal values to be filled in the diagonal.
+ * \param name output tensor name.
+ * \param tag output tensor tag.
+ * \return new tensor with given diagonal values.
+ */
+inline Tensor matrix_set_diag(const Tensor& input, const Tensor& diagonal,

Review comment:
   That might take some time.
   Would it be fine to have that in a follow-up PR?





This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org




[GitHub] [incubator-tvm] jainris commented on a change in pull request #6303: [Relay/TOPI][TFLite] Implemented MATRIX_SET_DIAG Operator for Relay/TOPI and TFLite Frontend.

2020-08-26 Thread GitBox


jainris commented on a change in pull request #6303:
URL: https://github.com/apache/incubator-tvm/pull/6303#discussion_r477157597



##
File path: tests/python/frontend/tflite/test_forward.py
##
@@ -2652,6 +2652,77 @@ def test_forward_reverse_v2():
 _test_reverse_v2((5, 6, 4, 2), np.array([2], dtype='int32'), dtype)
 
 
+###
+# MATRIX_SET_DIAG
+# ---
+
+def _test_matrix_set_diag(input_shape, input_type, quantized=False):
+""" One iteration of MATRIX_SET_DIAG """
+with tf.Graph().as_default():
+diagonal_shape = list(input_shape[:-2])
+diagonal_shape.append(min(input_shape[-2], input_shape[-1]))
+
+if quantized:
+# ignoring input_type as quantized requires uint8
+input = np.random.uniform(0, 256, input_shape).astype('uint8')
+in_input = tf.placeholder(dtype='float32', shape=input.shape, 
name="input")
+inq_input = tf.quantization.fake_quant_with_min_max_args(
+in_input,
+min=-100,
+max=100,
+name="q_input")
+
+diagonal = np.random.uniform(0, 256, 
diagonal_shape).astype('uint8')
+in_diagonal = tf.placeholder(dtype='float32', 
shape=diagonal.shape, name="diagonal")
+inq_diagonal = tf.quantization.fake_quant_with_min_max_args(
+in_diagonal,
+min=-100,
+max=100,
+name="q_diagonal")
+
+input_range = {'q_input': (-100, 100), 'q_diagonal': (-100, 100)}
+
+out = array_ops.matrix_set_diag(inq_input, inq_diagonal)
+out = tf.quantization.fake_quant_with_min_max_args(
+out,
+min=-100,
+max=100,
+name="out")
+
+compare_tflite_with_tvm(
+[input, diagonal],
+["q_input", "q_diagonal"],
+[inq_input, inq_diagonal],
+[out],
+quantized=True,
+input_range=input_range)
+else:
+input = np.random.uniform(0, 100, input_shape).astype(input_type)
+diagonal = np.random.uniform(0, 100, 
diagonal_shape).astype(input_type)
+
+in_input = tf.placeholder(dtype=input.dtype, shape=input.shape, 
name="input")
+in_diagonal = tf.placeholder(dtype=diagonal.dtype, 
shape=diagonal.shape, name="diagonal")
+
+out = array_ops.matrix_set_diag(in_input, in_diagonal)
+
+compare_tflite_with_tvm(
+[input, diagonal],
+["input", "diagonal"],
+[in_input, in_diagonal],
+[out])
+
+def test_forward_matrix_set_diag():
+""" MATRIX_SET_DIAG """

Review comment:
   The API docs seem to suggest that matrix_set_diag is present even in 
version '1.0'.
   So, is there some other reason to add this check?





This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org




[GitHub] [incubator-tvm] kamaci commented on issue #6332: [VOTE] Apache TVM Graduation

2020-08-26 Thread GitBox


kamaci commented on issue #6332:
URL: https://github.com/apache/incubator-tvm/issues/6332#issuecomment-680756086


   Apart from being a great project, TVM has an active and growing community. 
It has a good understanding of the Apache Way [1] which is most important. I 
think that TVM is ready for graduation. 
   
   I am +1 for graduation!
   
   [1] https://www.apache.org/theapacheway/



This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org




[GitHub] [incubator-tvm] leandron commented on a change in pull request #6302: [tvmc] command line driver 'compile' (part 2/4)

2020-08-26 Thread GitBox


leandron commented on a change in pull request #6302:
URL: https://github.com/apache/incubator-tvm/pull/6302#discussion_r477133826



##
File path: python/tvm/driver/tvmc/compiler.py
##
@@ -0,0 +1,407 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+"""
+Provides support to compile networks both AOT and JIT.
+"""
+import argparse
+import logging
+import tarfile
+from pathlib import Path
+
+import tvm
+from tvm import autotvm
+from tvm import relay
+from tvm._ffi.runtime_ctypes import TVMContext
+from tvm.contrib import cc
+from tvm.contrib import util
+from tvm.relay.op.contrib import get_pattern_table
+
+from . import common, frontends
+from .main import register_parser
+
+# A dictionary of target aliases to simplify the command lines provided by end 
users
+TARGET_ALIASES = {
+"aarch64": "llvm -device=arm_cpu -mtriple=aarch64-linux-gnu -mattr=+neon"
+}
+
+#  A list of valid targets (including aliases) to be used in "--target"
+VALID_TARGETS = ["aarch64", "llvm"]
+
+DEFAULT_TARGET = "llvm"
+DUMP_FORMATS = ["relay", "ll", "asm"]
+
+
+def parse_target(targets_str):
+""" Parsing function for comma separated target syntax. """
+targets = targets_str.split(",")
+for target in targets:
+if target not in VALID_TARGETS:
+raise argparse.ArgumentTypeError(f"unrecognized target: {target}")
+return targets
+
+
+@register_parser
+def add_compile_parser(subparsers):
+""" Include parser for 'compile' subcommand """
+
+parser = subparsers.add_parser("compile", help="compile a model")
+parser.set_defaults(func=drive_compile)
+parser.add_argument(
+"--cross-compiler",
+default="",
+help="the cross compiler to use to generate target libraries",
+)
+parser.add_argument(
+"--dump-codegen", default="", choices=DUMP_FORMATS, help="dump 
generated code"
+)
+parser.add_argument(
+"--language",
+choices=frontends.get_frontends(),
+help="specify input language",
+)
+parser.add_argument(
+"--input-shape",
+type=common.parse_input_shapes,
+metavar="INPUT_SHAPE,[INPUT_SHAPE]...",
+help="for pytorch, e.g. '(1,3,224,224)'",
+)
+parser.add_argument(
+"-o",
+"--output",
+default="a.tar",
+help="output the compiled module to an archive",
+)
+parser.add_argument(
+"--sanitize-diagnostics",
+action="store_true",
+default=True,
+dest="sanitize_diagnostics",
+help="enable diagnostic sanitization",
+)
+parser.add_argument(
+"--no-sanitize-diagnostics",
+action="store_false",
+dest="sanitize_diagnostics",
+help="disable diagnostic sanitization",
+)
+parser.add_argument(
+"--target",

Review comment:
   This boils down to the [Target 
discussion](https://discuss.tvm.ai/t/rfc-tvm-target-specification/6844/52). 
With recent changes, it makes sense to point only one target, from the user 
point of view, and leave the JSON descriptions of targets deal with details.





This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org




[GitHub] [incubator-tvm] leandron commented on a change in pull request #6302: [tvmc] command line driver 'compile' (part 2/4)

2020-08-26 Thread GitBox


leandron commented on a change in pull request #6302:
URL: https://github.com/apache/incubator-tvm/pull/6302#discussion_r477131644



##
File path: python/tvm/driver/tvmc/compiler.py
##
@@ -0,0 +1,407 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+"""
+Provides support to compile networks both AOT and JIT.
+"""
+import argparse
+import logging
+import tarfile
+from pathlib import Path
+
+import tvm
+from tvm import autotvm
+from tvm import relay
+from tvm._ffi.runtime_ctypes import TVMContext
+from tvm.contrib import cc
+from tvm.contrib import util
+from tvm.relay.op.contrib import get_pattern_table
+
+from . import common, frontends
+from .main import register_parser
+
+# A dictionary of target aliases to simplify the command lines provided by end 
users
+TARGET_ALIASES = {
+"aarch64": "llvm -device=arm_cpu -mtriple=aarch64-linux-gnu -mattr=+neon"
+}
+
+#  A list of valid targets (including aliases) to be used in "--target"
+VALID_TARGETS = ["aarch64", "llvm"]
+
+DEFAULT_TARGET = "llvm"
+DUMP_FORMATS = ["relay", "ll", "asm"]
+
+
+def parse_target(targets_str):
+""" Parsing function for comma separated target syntax. """
+targets = targets_str.split(",")
+for target in targets:
+if target not in VALID_TARGETS:
+raise argparse.ArgumentTypeError(f"unrecognized target: {target}")
+return targets
+
+
+@register_parser
+def add_compile_parser(subparsers):
+""" Include parser for 'compile' subcommand """
+
+parser = subparsers.add_parser("compile", help="compile a model")
+parser.set_defaults(func=drive_compile)
+parser.add_argument(
+"--cross-compiler",
+default="",
+help="the cross compiler to use to generate target libraries",
+)
+parser.add_argument(
+"--dump-codegen", default="", choices=DUMP_FORMATS, help="dump 
generated code"
+)
+parser.add_argument(
+"--language",
+choices=frontends.get_frontends(),
+help="specify input language",
+)
+parser.add_argument(
+"--input-shape",
+type=common.parse_input_shapes,
+metavar="INPUT_SHAPE,[INPUT_SHAPE]...",
+help="for pytorch, e.g. '(1,3,224,224)'",
+)
+parser.add_argument(
+"-o",
+"--output",
+default="a.tar",
+help="output the compiled module to an archive",
+)
+parser.add_argument(
+"--sanitize-diagnostics",
+action="store_true",
+default=True,
+dest="sanitize_diagnostics",
+help="enable diagnostic sanitization",
+)
+parser.add_argument(
+"--no-sanitize-diagnostics",
+action="store_false",
+dest="sanitize_diagnostics",
+help="disable diagnostic sanitization",

Review comment:
   I see what you mean and I agree - I'll remove that and we can re-add in 
case we need in future. Happy also to hear @jroesch's thoughts.





This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org




[GitHub] [incubator-tvm] leonwanghui commented on a change in pull request #6339: Improve Rust bindings: Map, Array, String, various IR nodes

2020-08-26 Thread GitBox


leonwanghui commented on a change in pull request #6339:
URL: https://github.com/apache/incubator-tvm/pull/6339#discussion_r477130406



##
File path: rust/tvm-rt/src/array.rs
##
@@ -63,7 +66,7 @@ impl Array {
 array_data.count()
 );
 
-Ok(Array {
+   Ok(Array {

Review comment:
   Need to use `cargo fmt` to format the style.





This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org




[GitHub] [incubator-tvm] leandron commented on a change in pull request #6302: [tvmc] command line driver 'compile' (part 2/4)

2020-08-26 Thread GitBox


leandron commented on a change in pull request #6302:
URL: https://github.com/apache/incubator-tvm/pull/6302#discussion_r477130487



##
File path: python/tvm/driver/tvmc/compiler.py
##
@@ -0,0 +1,407 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+"""
+Provides support to compile networks both AOT and JIT.
+"""
+import argparse
+import logging
+import tarfile
+from pathlib import Path
+
+import tvm
+from tvm import autotvm
+from tvm import relay
+from tvm._ffi.runtime_ctypes import TVMContext
+from tvm.contrib import cc
+from tvm.contrib import util
+from tvm.relay.op.contrib import get_pattern_table
+
+from . import common, frontends
+from .main import register_parser
+
+# A dictionary of target aliases to simplify the command lines provided by end 
users
+TARGET_ALIASES = {
+"aarch64": "llvm -device=arm_cpu -mtriple=aarch64-linux-gnu -mattr=+neon"
+}
+
+#  A list of valid targets (including aliases) to be used in "--target"
+VALID_TARGETS = ["aarch64", "llvm"]
+
+DEFAULT_TARGET = "llvm"
+DUMP_FORMATS = ["relay", "ll", "asm"]
+
+
+def parse_target(targets_str):
+""" Parsing function for comma separated target syntax. """
+targets = targets_str.split(",")
+for target in targets:
+if target not in VALID_TARGETS:
+raise argparse.ArgumentTypeError(f"unrecognized target: {target}")
+return targets
+
+
+@register_parser
+def add_compile_parser(subparsers):
+""" Include parser for 'compile' subcommand """
+
+parser = subparsers.add_parser("compile", help="compile a model")
+parser.set_defaults(func=drive_compile)
+parser.add_argument(
+"--cross-compiler",
+default="",
+help="the cross compiler to use to generate target libraries",
+)
+parser.add_argument(
+"--dump-codegen", default="", choices=DUMP_FORMATS, help="dump 
generated code"
+)
+parser.add_argument(
+"--language",

Review comment:
   Yes, that is the case. We try guessing the framework from a list of 
known extensions per frontend. So in my next update, this will be renamed.





This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org




[GitHub] [incubator-tvm] jroesch commented on a change in pull request #6336: [Relay][Training] Make AutoDiff thread through global function.

2020-08-26 Thread GitBox


jroesch commented on a change in pull request #6336:
URL: https://github.com/apache/incubator-tvm/pull/6336#discussion_r477122547



##
File path: src/relay/transforms/gradient.cc
##
@@ -438,12 +449,17 @@ Expr BPEmpty() {
 
 struct ReverseAD : ExprMutator {
   using ADVarMap = std::unordered_map;
-
+  using ADGVarMap = std::unordered_map;
+  Optional mod;

Review comment:
   Can you just add a note about this to your on-going doc on things we 
might want to refactor?





This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org




[GitHub] [incubator-tvm] jroesch merged pull request #6293: [DYN][RELAY] Resize support for NCHW-convertible layouts

2020-08-26 Thread GitBox


jroesch merged pull request #6293:
URL: https://github.com/apache/incubator-tvm/pull/6293


   



This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org




[incubator-tvm] branch master updated (617949d -> 942c90b)

2020-08-26 Thread jroesch
This is an automated email from the ASF dual-hosted git repository.

jroesch pushed a change to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-tvm.git.


from 617949d  Use auto-tuner to improve conv2d_gemm performance (#6117)
 add 942c90b  [DYN][RELAY] Resize support for NCHW-convertible layouts 
(#6293)

No new revisions were added by this update.

Summary of changes:
 python/tvm/relay/op/dyn/image/_image.py | 34 ++---
 1 file changed, 14 insertions(+), 20 deletions(-)



[GitHub] [incubator-tvm] jroesch commented on pull request #6339: Improve Rust bindings: Map, Array, String, various IR nodes

2020-08-26 Thread GitBox


jroesch commented on pull request #6339:
URL: https://github.com/apache/incubator-tvm/pull/6339#issuecomment-680733737


   cc @robo-corg 



This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org




[GitHub] [incubator-tvm] jroesch commented on a change in pull request #6274: [Diagnostics][Relay][InferType] Refactor InferType to work on whole module, and use new diagnostics.

2020-08-26 Thread GitBox


jroesch commented on a change in pull request #6274:
URL: https://github.com/apache/incubator-tvm/pull/6274#discussion_r477119237



##
File path: src/ir/module.cc
##
@@ -174,46 +174,28 @@ tvm::Array 
IRModuleNode::GetGlobalTypeVars() const {
   return tvm::Array(global_type_vars);
 }
 
-template 
-tvm::Array concat(const tvm::Array& l, const tvm::Array& r) {
-  tvm::Array ret(l);
-  for (const T& t : r) {
-ret.push_back(t);
-  }
-  return ret;
-}
-
-// helper function to run type check
-relay::Function RunTypeCheck(const IRModule& mod, const GlobalVar& var, 
relay::Function f) {
-  auto func = Downcast(relay::DeDup(std::move(f)));
+void WarnIfMalformed(const IRModule& mod, relay::Function func) {
+  func = Downcast(relay::DeDup(func));
   // Type check the item before we add it to the module.
   auto fv = relay::FreeVars(func);
   auto ftv = relay::FreeTypeVars(func, mod);
+  // TODO(@jroesch): refactor to use diagnostic context
   CHECK_EQ(fv.size(), 0) << "There are free variables: " << fv
- << " in function: " << AsText(func, false);

Review comment:
   We should rewrite all the checkers to use diagnostic context so that we 
can report against the actual source program. 





This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org




[GitHub] [incubator-tvm] minminsun commented on a change in pull request #6297: [Ansor][AutoTVM v2.0] Phase 2: Layout Rewrite in AutoScheduler

2020-08-26 Thread GitBox


minminsun commented on a change in pull request #6297:
URL: https://github.com/apache/incubator-tvm/pull/6297#discussion_r477112491



##
File path: src/auto_scheduler/compute_dag.cc
##
@@ -665,9 +666,349 @@ ComputeDAG::ComputeDAG(Array tensors) {
   data_ = std::move(node);
 }
 
+/*!
+ * \brief utility function for kernel_layout_transform
+ */
+inline void parse_kernel_layout(const String& layout, Array* shape,
+std::vector* axes) {
+  int32_t factor = 0;
+  std::string axis = "";
+  for (char c : std::string(layout)) {
+if (c >= 'A' && c <= 'z') {
+  axis += c;
+  if (factor != 0) {
+shape->push_back(factor);
+factor = 0;
+  }
+} else if (c >= '0' && c <= '9') {
+  factor = factor * 10 + c - '0';
+  if (!axis.empty()) {
+axes->push_back(axis);
+axis = "";
+  }
+} else {
+  LOG(FATAL) << "Invalid layout " << layout;
+}
+  }
+  if (!axis.empty()) {
+axes->push_back(axis);
+  }
+}
+
+std::string BaseName(const std::string& str) { return str.substr(0, 
str.rfind("_")); }
+
+class IndexRewriter : public StmtExprMutator {
+ public:
+  IndexRewriter(const te::Operation& placeholder_op, const std::string& 
new_layout)
+  : placeholder_op_(placeholder_op), new_layout_(new_layout) {}
+
+  PrimExpr Rewrite(PrimExpr expr) { return this->VisitExpr(expr); }
+
+  PrimExpr VisitExpr_(const ProducerLoadNode* op) final {
+te::Tensor t = Downcast(op->producer);
+if (t->op == placeholder_op_) {
+  Array new_shape;
+  std::vector new_names;
+  parse_kernel_layout(new_layout_, &new_shape, &new_names);
+  std::unordered_map name_to_arg;
+  for (const auto& arg : op->indices) {
+std::string axis_name;
+if (const auto* pimm = arg.as()) {
+  CHECK_EQ(pimm->value, 0);
+  axis_name = "IntImm";
+} else {
+  axis_name = BaseName(CleanName(Downcast(arg)->name_hint));
+  CHECK_EQ(name_to_arg.count(axis_name), 0);
+  name_to_arg[axis_name] = arg;
+}
+  }
+
+  std::unordered_map div_factors;
+  std::vector r_new_args;
+  for (int i = new_names.size() - 1; i >= 0; --i) {
+auto ori_iter_name = new_names[i];
+auto name_it = name_to_arg.find(ori_iter_name);
+CHECK(name_it != name_to_arg.end());
+PrimExpr ori_arg = name_it->second;
+
+PrimExpr mod_factor = new_shape[i];
+
+PrimExpr div_factor = 1;
+if (div_factors.count(ori_iter_name)) {
+  div_factor = div_factors[ori_iter_name];
+}
+div_factors[ori_iter_name] = div_factor * new_shape[i];
+
+PrimExpr new_arg = indexmod(indexdiv(ori_arg, div_factor), mod_factor);
+
+r_new_args.push_back(new_arg);
+  }
+
+  Array new_args(std::make_move_iterator(r_new_args.rbegin()),
+   std::make_move_iterator(r_new_args.rend()));
+  return ProducerLoad(op->producer, new_args);
+}
+return GetRef(op);
+  }
+
+ private:
+  const te::Operation& placeholder_op_;
+  const std::string& new_layout_;
+};
+
+std::string get_ori_layout(std::set* placeholder_axis_names, 
const te::Operation& op,
+   const te::Tensor& placeholder) {
+  ReadAccessExtractor extractor;
+  for (const auto& exp : op.as()->body) {
+extractor.Extract(exp);
+  }
+
+  std::ostringstream os;
+  uint i = 0;
+  const auto& placeholder_op = placeholder->op;
+  CHECK_GT(extractor.read_access.count(placeholder_op), 0);
+  for (const auto& ev : extractor.read_access[placeholder_op]) {
+for (const auto& e : ev) {
+  std::string axis_name;
+  if (const auto* pimm = e.as()) {
+CHECK_EQ(pimm->value, 0);
+axis_name = "IntImm";
+  } else {
+axis_name = BaseName(CleanName(Downcast(e)->name_hint));
+  }
+
+  placeholder_axis_names->insert(axis_name);
+  os << placeholder->shape[i++] << axis_name;
+}
+  }
+
+  CHECK_EQ(placeholder_axis_names->size(), placeholder->shape.size());
+  std::string ori_layout = os.str();
+  os.str("");
+  // TODO(minmin): uncomment this line for relay integration
+  // 
::tvm::relay::KernelLayoutTransformer::global_ori_layouts_queue.push_back(ori_layout);
+  return ori_layout;
+}
+
+std::string get_new_layout(Array* new_shape, const State& state, 
const int stage_id,
+   const Stage& stage, const te::Operation& op,
+   const te::Tensor& placeholder,
+   const std::set& 
placeholder_axis_names) {
+  std::ostringstream os;
+  Array stage_iters;
+
+  auto attach_it = state->attach_map->stage_to_attach_iter.find(stage_id);
+  int attach_pos = -1;
+  size_t iters_before_attach = 0;
+  if (attach_it != state->attach_map->stage_to_attach_iter.end()) {
+auto attach = attach_it->second;
+const auto& attach_stage = state->stages[attach.first];
+attach_pos = attach.second;
+stage_iters.inse

[GitHub] [incubator-tvm] minminsun commented on a change in pull request #6297: [Ansor][AutoTVM v2.0] Phase 2: Layout Rewrite in AutoScheduler

2020-08-26 Thread GitBox


minminsun commented on a change in pull request #6297:
URL: https://github.com/apache/incubator-tvm/pull/6297#discussion_r477112406



##
File path: src/auto_scheduler/compute_dag.cc
##
@@ -665,9 +666,349 @@ ComputeDAG::ComputeDAG(Array tensors) {
   data_ = std::move(node);
 }
 
+/*!
+ * \brief utility function for kernel_layout_transform
+ */
+inline void parse_kernel_layout(const String& layout, Array* shape,
+std::vector* axes) {
+  int32_t factor = 0;
+  std::string axis = "";
+  for (char c : std::string(layout)) {
+if (c >= 'A' && c <= 'z') {
+  axis += c;
+  if (factor != 0) {
+shape->push_back(factor);
+factor = 0;
+  }
+} else if (c >= '0' && c <= '9') {
+  factor = factor * 10 + c - '0';
+  if (!axis.empty()) {
+axes->push_back(axis);
+axis = "";
+  }
+} else {
+  LOG(FATAL) << "Invalid layout " << layout;
+}
+  }
+  if (!axis.empty()) {
+axes->push_back(axis);
+  }
+}
+
+std::string BaseName(const std::string& str) { return str.substr(0, 
str.rfind("_")); }
+
+class IndexRewriter : public StmtExprMutator {
+ public:
+  IndexRewriter(const te::Operation& placeholder_op, const std::string& 
new_layout)
+  : placeholder_op_(placeholder_op), new_layout_(new_layout) {}
+
+  PrimExpr Rewrite(PrimExpr expr) { return this->VisitExpr(expr); }
+
+  PrimExpr VisitExpr_(const ProducerLoadNode* op) final {
+te::Tensor t = Downcast(op->producer);
+if (t->op == placeholder_op_) {
+  Array new_shape;
+  std::vector new_names;
+  parse_kernel_layout(new_layout_, &new_shape, &new_names);
+  std::unordered_map name_to_arg;
+  for (const auto& arg : op->indices) {
+std::string axis_name;
+if (const auto* pimm = arg.as()) {
+  CHECK_EQ(pimm->value, 0);
+  axis_name = "IntImm";
+} else {
+  axis_name = BaseName(CleanName(Downcast(arg)->name_hint));
+  CHECK_EQ(name_to_arg.count(axis_name), 0);
+  name_to_arg[axis_name] = arg;
+}
+  }
+
+  std::unordered_map div_factors;
+  std::vector r_new_args;
+  for (int i = new_names.size() - 1; i >= 0; --i) {
+auto ori_iter_name = new_names[i];
+auto name_it = name_to_arg.find(ori_iter_name);
+CHECK(name_it != name_to_arg.end());
+PrimExpr ori_arg = name_it->second;
+
+PrimExpr mod_factor = new_shape[i];
+
+PrimExpr div_factor = 1;
+if (div_factors.count(ori_iter_name)) {
+  div_factor = div_factors[ori_iter_name];
+}
+div_factors[ori_iter_name] = div_factor * new_shape[i];
+
+PrimExpr new_arg = indexmod(indexdiv(ori_arg, div_factor), mod_factor);
+
+r_new_args.push_back(new_arg);
+  }
+
+  Array new_args(std::make_move_iterator(r_new_args.rbegin()),
+   std::make_move_iterator(r_new_args.rend()));
+  return ProducerLoad(op->producer, new_args);
+}
+return GetRef(op);
+  }
+
+ private:
+  const te::Operation& placeholder_op_;
+  const std::string& new_layout_;
+};
+
+std::string get_ori_layout(std::set* placeholder_axis_names, 
const te::Operation& op,
+   const te::Tensor& placeholder) {
+  ReadAccessExtractor extractor;
+  for (const auto& exp : op.as()->body) {
+extractor.Extract(exp);
+  }
+
+  std::ostringstream os;
+  uint i = 0;
+  const auto& placeholder_op = placeholder->op;
+  CHECK_GT(extractor.read_access.count(placeholder_op), 0);
+  for (const auto& ev : extractor.read_access[placeholder_op]) {
+for (const auto& e : ev) {
+  std::string axis_name;
+  if (const auto* pimm = e.as()) {
+CHECK_EQ(pimm->value, 0);
+axis_name = "IntImm";
+  } else {
+axis_name = BaseName(CleanName(Downcast(e)->name_hint));
+  }
+
+  placeholder_axis_names->insert(axis_name);
+  os << placeholder->shape[i++] << axis_name;
+}
+  }
+
+  CHECK_EQ(placeholder_axis_names->size(), placeholder->shape.size());
+  std::string ori_layout = os.str();
+  os.str("");
+  // TODO(minmin): uncomment this line for relay integration
+  // 
::tvm::relay::KernelLayoutTransformer::global_ori_layouts_queue.push_back(ori_layout);
+  return ori_layout;
+}
+
+std::string get_new_layout(Array* new_shape, const State& state, 
const int stage_id,
+   const Stage& stage, const te::Operation& op,
+   const te::Tensor& placeholder,
+   const std::set& 
placeholder_axis_names) {
+  std::ostringstream os;
+  Array stage_iters;
+
+  auto attach_it = state->attach_map->stage_to_attach_iter.find(stage_id);
+  int attach_pos = -1;
+  size_t iters_before_attach = 0;
+  if (attach_it != state->attach_map->stage_to_attach_iter.end()) {
+auto attach = attach_it->second;
+const auto& attach_stage = state->stages[attach.first];
+attach_pos = attach.second;
+stage_iters.inse

[GitHub] [incubator-tvm] minminsun commented on a change in pull request #6297: [Ansor][AutoTVM v2.0] Phase 2: Layout Rewrite in AutoScheduler

2020-08-26 Thread GitBox


minminsun commented on a change in pull request #6297:
URL: https://github.com/apache/incubator-tvm/pull/6297#discussion_r477112240



##
File path: src/auto_scheduler/compute_dag.cc
##
@@ -665,9 +666,349 @@ ComputeDAG::ComputeDAG(Array tensors) {
   data_ = std::move(node);
 }
 
+/*!
+ * \brief utility function for kernel_layout_transform
+ */
+inline void parse_kernel_layout(const String& layout, Array* shape,
+std::vector* axes) {
+  int32_t factor = 0;
+  std::string axis = "";
+  for (char c : std::string(layout)) {
+if (c >= 'A' && c <= 'z') {
+  axis += c;
+  if (factor != 0) {
+shape->push_back(factor);
+factor = 0;
+  }
+} else if (c >= '0' && c <= '9') {
+  factor = factor * 10 + c - '0';
+  if (!axis.empty()) {
+axes->push_back(axis);
+axis = "";
+  }
+} else {
+  LOG(FATAL) << "Invalid layout " << layout;
+}
+  }
+  if (!axis.empty()) {
+axes->push_back(axis);
+  }
+}
+
+std::string BaseName(const std::string& str) { return str.substr(0, 
str.rfind("_")); }

Review comment:
   Done.

##
File path: src/auto_scheduler/compute_dag.cc
##
@@ -665,9 +666,349 @@ ComputeDAG::ComputeDAG(Array tensors) {
   data_ = std::move(node);
 }
 
+/*!
+ * \brief utility function for kernel_layout_transform
+ */
+inline void parse_kernel_layout(const String& layout, Array* shape,
+std::vector* axes) {
+  int32_t factor = 0;
+  std::string axis = "";
+  for (char c : std::string(layout)) {
+if (c >= 'A' && c <= 'z') {
+  axis += c;
+  if (factor != 0) {
+shape->push_back(factor);
+factor = 0;
+  }
+} else if (c >= '0' && c <= '9') {
+  factor = factor * 10 + c - '0';
+  if (!axis.empty()) {
+axes->push_back(axis);
+axis = "";
+  }
+} else {
+  LOG(FATAL) << "Invalid layout " << layout;
+}
+  }
+  if (!axis.empty()) {
+axes->push_back(axis);
+  }
+}
+
+std::string BaseName(const std::string& str) { return str.substr(0, 
str.rfind("_")); }
+
+class IndexRewriter : public StmtExprMutator {
+ public:
+  IndexRewriter(const te::Operation& placeholder_op, const std::string& 
new_layout)
+  : placeholder_op_(placeholder_op), new_layout_(new_layout) {}
+
+  PrimExpr Rewrite(PrimExpr expr) { return this->VisitExpr(expr); }
+
+  PrimExpr VisitExpr_(const ProducerLoadNode* op) final {
+te::Tensor t = Downcast(op->producer);
+if (t->op == placeholder_op_) {
+  Array new_shape;
+  std::vector new_names;
+  parse_kernel_layout(new_layout_, &new_shape, &new_names);

Review comment:
   Done.





This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org




[GitHub] [incubator-tvm] minminsun commented on a change in pull request #6297: [Ansor][AutoTVM v2.0] Phase 2: Layout Rewrite in AutoScheduler

2020-08-26 Thread GitBox


minminsun commented on a change in pull request #6297:
URL: https://github.com/apache/incubator-tvm/pull/6297#discussion_r477111962



##
File path: src/auto_scheduler/compute_dag.cc
##
@@ -40,6 +40,7 @@
 #include 
 
 #include "../arith/pattern_match.h"
+#include "search_policy/utils.h"

Review comment:
   Done.





This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org




[GitHub] [incubator-tvm] minminsun commented on a change in pull request #6297: [Ansor][AutoTVM v2.0] Phase 2: Layout Rewrite in AutoScheduler

2020-08-26 Thread GitBox


minminsun commented on a change in pull request #6297:
URL: https://github.com/apache/incubator-tvm/pull/6297#discussion_r477107266



##
File path: python/tvm/auto_scheduler/measure.py
##
@@ -419,7 +419,7 @@ def timed_func():
 
 try:
 sch, args = task.compute_dag.apply_steps_from_state(
-inp.state)
+inp.state, layout_rewrite=True)

Review comment:
   Whether to do layout rewrite or not for an op is specified by attr 
layout_free_placeholders in compute definition, so it's safe to set True by 
default here.





This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org




[GitHub] [incubator-tvm] minminsun commented on a change in pull request #6297: [Ansor][AutoTVM v2.0] Phase 2: Layout Rewrite in AutoScheduler

2020-08-26 Thread GitBox


minminsun commented on a change in pull request #6297:
URL: https://github.com/apache/incubator-tvm/pull/6297#discussion_r477105200



##
File path: python/tvm/auto_scheduler/compute_dag.py
##
@@ -72,7 +72,7 @@ def get_init_state(self):
 """
 return State(self.init_state, self)
 
-def apply_steps_from_state(self, state):
+def apply_steps_from_state(self, state, layout_rewrite=False):

Review comment:
   done





This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org




[GitHub] [incubator-tvm] fernchen commented on pull request #6206: [Caffe Frontend] introduce caffe frontend for tvm

2020-08-26 Thread GitBox


fernchen commented on pull request #6206:
URL: https://github.com/apache/incubator-tvm/pull/6206#issuecomment-680717983


   > > ```python
   > > from tvm.relay.frontend import caffe_pb2 as pb
   > > ```
   > 
   > where is from `tvm.relay.frontend import caffe_pb2 as pb`, i don't see it 
in this pr. And another thing is we must at least provide one instruction how 
to build own `caffe.proto` to related stuff.
   
   Based on disccussion on 
https://github.com/apache/incubator-tvm/pull/6023#issuecomment-660408499, we 
will support BVLC Caffe in TVM firstly. So there is no need to compile own 
caffe.proto in this pr, we use the caffe.proto as follow:
   ```python
   from caffe.proto import caffe_pb2 as pb
   ```



This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org




[GitHub] [incubator-tvm] fernchen commented on a change in pull request #6206: [Caffe Frontend] introduce caffe frontend for tvm

2020-08-26 Thread GitBox


fernchen commented on a change in pull request #6206:
URL: https://github.com/apache/incubator-tvm/pull/6206#discussion_r477085792



##
File path: tests/python/frontend/caffe/test_forward.py
##
@@ -0,0 +1,1003 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+# pylint: disable=import-self, invalid-name, unused-argument
+"""
+Caffe testcases
+
+This article is a test script to test Caffe operator with Relay.
+"""
+from __future__ import print_function
+import os
+os.environ['GLOG_minloglevel'] = '2'
+import sys
+import logging
+logging.basicConfig(level=logging.ERROR)
+
+import numpy as np
+from google.protobuf import text_format
+import caffe
+from caffe import layers as L, params as P
+from caffe.proto import caffe_pb2 as pb
+
+import tvm
+from tvm import relay
+from tvm.contrib import util, graph_runtime
+from tvm.contrib.download import download_testdata
+
+CURRENT_DIR = os.path.join(os.path.expanduser('~'), '.tvm_test_data', 
'caffe_test')
+
+###
+# Generic functions for TVM & Caffe
+# --
+
+
+def _create_dir(d_path):
+""" If the directory is not existed, create it"""
+if not (os.path.exists(d_path) and os.path.isdir(d_path)):
+os.makedirs(d_path)
+
+
+def _list_to_str(ll):
+""" Convert list or tuple to str, separated by underline. """
+if isinstance(ll, (tuple, list)):
+tmp = [str(i) for i in ll]
+return '_'.join(tmp)
+
+
+def _gen_filename_str(op_name, data_shape, *args, **kwargs):
+""" Combining the filename according to the op_name, shape and other args. 
"""
+file_dir = os.path.join(CURRENT_DIR, op_name)
+_create_dir(file_dir)
+res = op_name + "_"
+shape_str = _list_to_str(list(data_shape))
+res += shape_str
+for arg in args:
+if isinstance(arg, (tuple, list)):
+res += ("_" + _list_to_str(arg))
+elif isinstance(arg, (int, float, str)):
+res += ("_" + str(arg))
+for _, v in kwargs.items():
+if isinstance(v, (tuple, list)):
+res += ("_" + _list_to_str(v))
+elif isinstance(v, (int, float, str)):
+res += ("_" + str(v))
+res = res.replace(".", "_")
+res = res.replace("-", "_")
+proto_file = os.path.join(file_dir, res + ".prototxt")
+blob_file = os.path.join(file_dir, res + ".caffemodel")
+solver_file = os.path.join(file_dir, res + "_solver.prototxt")
+
+return (proto_file, blob_file, solver_file)
+
+
+def _save_prototxt(n_netspec, f_path):
+""" Generate .prototxt file according to caffe.NetSpec"""
+s = n_netspec.to_proto()
+with open(f_path, 'w') as f:
+f.write(str(s))
+
+
+def _save_solver(solver_file, proto_file, blob_file):
+""" Define a solver proto, you can change the configs."""
+blob_file_prefix = blob_file.split(".caffemodel")[0]
+s = pb.SolverParameter()
+s.train_net = proto_file
+s.base_lr = 0.01
+s.momentum = 0.9
+s.weight_decay = 0.0005
+s.lr_policy = "inv"
+s.gamma = 0.0001
+s.power = 0.75
+s.display = 1
+s.max_iter = 10
+s.snapshot = 10
+s.snapshot_prefix = blob_file_prefix
+
+with open(solver_file, 'w') as f:
+f.write(str(s))
+
+
+def _save_caffemodel(solver_file, blob_file):
+""" Generate .caffemodel file."""
+solver = caffe.SGDSolver(solver_file)
+solver.net.save(blob_file)
+
+
+def _gen_model_files(n_netspec, proto_file, blob_file, solver_file):
+_save_prototxt(n_netspec, proto_file)
+_save_solver(solver_file, proto_file, blob_file)
+_save_caffemodel(solver_file, blob_file)
+
+
+def _siso_op(data, func, *args, **kwargs):
+""" Create single input and single output Caffe op """
+n = caffe.NetSpec()
+n.data = L.Input(input_param={'shape': {'dim': list(data.shape)}})
+n.output = func(n.data, *args, **kwargs)
+return n
+
+
+def _miso_op(data_list, func, *args, **kwargs):
+""" Create multi input and single output Caffe op """
+n = caffe.NetSpec()
+if not isinstance(data_list, (tuple, list)):
+raise TypeError("Need tuple or list but get {}".format(
+type(data_list))

[GitHub] [incubator-tvm] yukatayu commented on a change in pull request #6341: [Easy fix] Make able to compile with msvc, clang

2020-08-26 Thread GitBox


yukatayu commented on a change in pull request #6341:
URL: https://github.com/apache/incubator-tvm/pull/6341#discussion_r477079101



##
File path: src/target/llvm/codegen_hexagon.cc
##
@@ -636,7 +636,10 @@ bool UsesExportABI(const PrimFunc& f) {
   return false;
 }
 
-__attribute__((unused)) std::ostream& operator<<(std::ostream& os, const 
llvm::Module& m) {
+#ifdef __GNUC__
+__attribute__((unused))
+#endif
+std::ostream& operator<<(std::ostream& os, const llvm::Module& m) {

Review comment:
   I agree





This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org




[GitHub] [incubator-tvm] fernchen commented on a change in pull request #6206: [Caffe Frontend] introduce caffe frontend for tvm

2020-08-26 Thread GitBox


fernchen commented on a change in pull request #6206:
URL: https://github.com/apache/incubator-tvm/pull/6206#discussion_r477078398



##
File path: tests/python/frontend/caffe/test_forward.py
##
@@ -0,0 +1,1003 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+# pylint: disable=import-self, invalid-name, unused-argument
+"""
+Caffe testcases
+
+This article is a test script to test Caffe operator with Relay.
+"""
+from __future__ import print_function
+import os
+os.environ['GLOG_minloglevel'] = '2'
+import sys
+import logging
+logging.basicConfig(level=logging.ERROR)
+
+import numpy as np
+from google.protobuf import text_format
+import caffe
+from caffe import layers as L, params as P
+from caffe.proto import caffe_pb2 as pb
+
+import tvm
+from tvm import relay
+from tvm.contrib import util, graph_runtime
+from tvm.contrib.download import download_testdata
+
+CURRENT_DIR = os.path.join(os.path.expanduser('~'), '.tvm_test_data', 
'caffe_test')
+
+###
+# Generic functions for TVM & Caffe
+# --
+
+
+def _create_dir(d_path):
+""" If the directory is not existed, create it"""
+if not (os.path.exists(d_path) and os.path.isdir(d_path)):
+os.makedirs(d_path)
+
+
+def _list_to_str(ll):
+""" Convert list or tuple to str, separated by underline. """
+if isinstance(ll, (tuple, list)):
+tmp = [str(i) for i in ll]
+return '_'.join(tmp)
+
+
+def _gen_filename_str(op_name, data_shape, *args, **kwargs):
+""" Combining the filename according to the op_name, shape and other args. 
"""
+file_dir = os.path.join(CURRENT_DIR, op_name)
+_create_dir(file_dir)
+res = op_name + "_"
+shape_str = _list_to_str(list(data_shape))
+res += shape_str
+for arg in args:
+if isinstance(arg, (tuple, list)):
+res += ("_" + _list_to_str(arg))
+elif isinstance(arg, (int, float, str)):
+res += ("_" + str(arg))
+for _, v in kwargs.items():
+if isinstance(v, (tuple, list)):
+res += ("_" + _list_to_str(v))
+elif isinstance(v, (int, float, str)):
+res += ("_" + str(v))
+res = res.replace(".", "_")
+res = res.replace("-", "_")
+proto_file = os.path.join(file_dir, res + ".prototxt")
+blob_file = os.path.join(file_dir, res + ".caffemodel")
+solver_file = os.path.join(file_dir, res + "_solver.prototxt")
+
+return (proto_file, blob_file, solver_file)
+
+
+def _save_prototxt(n_netspec, f_path):
+""" Generate .prototxt file according to caffe.NetSpec"""
+s = n_netspec.to_proto()
+with open(f_path, 'w') as f:
+f.write(str(s))
+
+
+def _save_solver(solver_file, proto_file, blob_file):
+""" Define a solver proto, you can change the configs."""
+blob_file_prefix = blob_file.split(".caffemodel")[0]
+s = pb.SolverParameter()
+s.train_net = proto_file
+s.base_lr = 0.01
+s.momentum = 0.9
+s.weight_decay = 0.0005
+s.lr_policy = "inv"
+s.gamma = 0.0001
+s.power = 0.75
+s.display = 1
+s.max_iter = 10
+s.snapshot = 10
+s.snapshot_prefix = blob_file_prefix
+
+with open(solver_file, 'w') as f:
+f.write(str(s))
+
+
+def _save_caffemodel(solver_file, blob_file):
+""" Generate .caffemodel file."""
+solver = caffe.SGDSolver(solver_file)
+solver.net.save(blob_file)
+
+
+def _gen_model_files(n_netspec, proto_file, blob_file, solver_file):
+_save_prototxt(n_netspec, proto_file)
+_save_solver(solver_file, proto_file, blob_file)
+_save_caffemodel(solver_file, blob_file)
+
+
+def _siso_op(data, func, *args, **kwargs):
+""" Create single input and single output Caffe op """
+n = caffe.NetSpec()
+n.data = L.Input(input_param={'shape': {'dim': list(data.shape)}})
+n.output = func(n.data, *args, **kwargs)
+return n
+
+
+def _miso_op(data_list, func, *args, **kwargs):
+""" Create multi input and single output Caffe op """
+n = caffe.NetSpec()
+if not isinstance(data_list, (tuple, list)):
+raise TypeError("Need tuple or list but get {}".format(
+type(data_list))