[GitHub] [incubator-tvm] zhiics opened a new pull request #5275: [BUGFIX][IR] Fix String SEqual

2020-04-07 Thread GitBox
zhiics opened a new pull request #5275: [BUGFIX][IR] Fix String SEqual
URL: https://github.com/apache/incubator-tvm/pull/5275
 
 
   cc @tqchen 
   


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] [incubator-tvm] windclarion opened a new pull request #5274: [RELAY][BYOC] for composite function, FTVMAnnotateTarget maybe do not exist

2020-04-07 Thread GitBox
windclarion opened a new pull request #5274: [RELAY][BYOC] for composite 
function, FTVMAnnotateTarget maybe do not exist
URL: https://github.com/apache/incubator-tvm/pull/5274
 
 
   composite function needn't query OpNode's FTVMAnnotateTarget attribute.


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] [incubator-tvm] sergei-grechanik commented on a change in pull request #5171: [Arith] linear system and equation solver

2020-04-07 Thread GitBox
sergei-grechanik commented on a change in pull request #5171: [Arith] linear 
system and equation solver
URL: https://github.com/apache/incubator-tvm/pull/5171#discussion_r405262306
 
 

 ##
 File path: src/arith/solve_linear_equation.cc
 ##
 @@ -0,0 +1,480 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+/*!
+ * \file tvm/arith/solve_linear_equation.cc
+ * \brief Solve linear equations.
+ */
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+
+namespace tvm {
+namespace arith {
+
+using namespace tvm::runtime;
+
+void SmithNormalFormDiag(std::vector >* S,
+ std::vector >* V,
+ std::vector* x,
+ std::vector* y) {
+  if (S->empty() || V->empty()) return;
+  size_t m = S->size();
+  size_t n = (*S)[0].size();  // n is # of variables
+  CHECK_EQ(V->size(), n);
+  CHECK_EQ((*V)[0].size(), n);
+
+  for (size_t index = 0; index < std::min(m, n); ++index) {
+// Here A is partially diagonalized, that is A[i, j] is zero for all i, j
+// such that (i < index) or (j < index), unless (i == j).
+// That is, now we are diagonalizing the submatrix with i >= index and j 
>= index
+
+// Find a row with a nonzero element in the index-th column
+// (We also prefer rows where this element has minimal abs value)
+size_t best_i = index;
+for (size_t i = best_i; i < m; ++i) {
+  int64_t s_old = (*S)[best_i][index];
+  int64_t s_new = (*S)[i][index];
+  if (s_new != 0) {
+if (s_old == 0 || std::abs(s_new) < std::abs(s_old)) {
+  best_i = i;
+}
+  }
+}
+// Move the row we found to the index-th position
+std::swap((*S)[index], (*S)[best_i]);
+std::swap((*y)[index], (*y)[best_i]);
+
+// If the index-th diagonal element is still zero, try to find a column 
with nonzero index-th
+// element and move it to the index-th position
+if ((*S)[index][index] == 0) {
+  for (size_t j = index + 1; j < n; ++j) {
+if ((*S)[index][j] != 0) {
+  for (size_t i = index; i < m; ++i) {
+std::swap((*S)[i][index], (*S)[i][j]);
+  }
+  // swapping columns corresponds to swapping the corresponding x
+  std::swap((*x)[index], (*x)[j]);
+  for (size_t i = 0; i < n; ++i) {
+std::swap((*V)[i][index], (*V)[i][j]);
+  }
+  break;
+}
+  }
+}
+
+// If the index-th diagonal element is still zero, then both the index-th 
row and the index-th
+// column are completely zero, and we don't need to do anything; just go 
to the next index
+if ((*S)[index][index] == 0) {
+  continue;
+}
+
+// Now the index-th diagonal element is non-zero and we can zero all the 
index-th column
+// below it by subtracting rows from each other
+for (auto i = index + 1; i < m; ++i) {
+  if ((*S)[i][index] != 0) {
+int64_t g, a, b;
+// g = a*matrix[index][index] + b*matrix[i][index]
+if ((*S)[i][index] % (*S)[index][index] != 0) {
+  std::tie(g, a, b) = xgcd((*S)[index][index], (*S)[i][index]);
+} else {
+  // Explicitly avoid changing the index-th row. This is important to 
avoid infinite loop.
+  g = (*S)[index][index];
+  a = 1;
+  b = 0;
+}
+
+// Let m = S[index][index], n = S[i][index], then the following is 
true:
+//
+// [ a   n/g ][ m/g  n/g ] = [ 1  0 ]
+// [ b  -m/g ][ b-a  ] = [ 0  1 ]
+//
+// Note that the two matrices are integer (since g = gcd(m, n)).
+// We will essentially multiply our matrix on the left by a dilated 
and transposed version
+// of the first of these two matrices. The second matrix is not needed 
here, however we will
+// use it while zeroing the index-th row.
+
+int64_t m_g = (*S)[index][index] / g;
+int64_t n_g = (*S)[i][index] / g;
+
+// Note that j is the index of the column, not the row
+for (size_t j = index; j < (*S)[i].size(); ++j) {
+  // Multiply index-th row by a and add the i-th row multiplied by b
+  // This 

[incubator-tvm] branch master updated (e9c90b7 -> 89da63e)

2020-04-07 Thread tqchen
This is an automated email from the ASF dual-hosted git repository.

tqchen pushed a change to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-tvm.git.


from e9c90b7  [LLVM] Include Support/Host.h for declaration of 
getDefaultTargetTriple (#5268)
 add 89da63e  [LINT] Remove scalalint from lint deps (#5269)

No new revisions were added by this update.

Summary of changes:
 Makefile | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)



[GitHub] [incubator-tvm] tqchen merged pull request #5269: [LINT] Remove scalalint from lint deps

2020-04-07 Thread GitBox
tqchen merged pull request #5269: [LINT] Remove scalalint from lint deps
URL: https://github.com/apache/incubator-tvm/pull/5269
 
 
   


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] [incubator-tvm] FrozenGene commented on issue #5230: Adding support for TFLite QnnSubtract operator.

2020-04-07 Thread GitBox
FrozenGene commented on issue #5230: Adding support for TFLite QnnSubtract 
operator.
URL: https://github.com/apache/incubator-tvm/pull/5230#issuecomment-610737656
 
 
   > @FrozenGene If we have C = 1, then depth wise conv becomes normal conv. 
There is nothing to accumulate across input channels basically. And 
depth_multiplier becomes equal to the number of output channels. What do you 
think? Is the change good with you?
   
   I think it is ok. I met this thing ever. That is when input channel is  1 
(for example, gray image), we will get depthwise convolution of multiplier 
greater than 1 rather than normal convolution.  Would you mind doing one 
performance comparison? I expect we could get better performance when we use 
normal convolution.


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[incubator-tvm] branch master updated (989b481 -> e9c90b7)

2020-04-07 Thread tqchen
This is an automated email from the ASF dual-hosted git repository.

tqchen pushed a change to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-tvm.git.


from 989b481  [PYTORCH]celu, gelu, selu activations (#5263)
 add e9c90b7  [LLVM] Include Support/Host.h for declaration of 
getDefaultTargetTriple (#5268)

No new revisions were added by this update.

Summary of changes:
 src/target/llvm/llvm_common.h | 1 +
 1 file changed, 1 insertion(+)



[GitHub] [incubator-tvm] CallmeZhangChenchen commented on issue #5273: Run relay_quick_start.py Wrong

2020-04-07 Thread GitBox
CallmeZhangChenchen commented on issue #5273: Run relay_quick_start.py Wrong
URL: https://github.com/apache/incubator-tvm/issues/5273#issuecomment-610736051
 
 
   and llvm no question 


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[incubator-tvm] branch master updated (d2de35e -> 989b481)

2020-04-07 Thread masahi
This is an automated email from the ASF dual-hosted git repository.

masahi pushed a change to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-tvm.git.


from d2de35e  [RELAY][BYOC] Add support for composite functions in BYOC 
(#5261)
 add 989b481  [PYTORCH]celu, gelu, selu activations (#5263)

No new revisions were added by this update.

Summary of changes:
 python/tvm/relay/frontend/pytorch.py  | 38 ---
 tests/python/frontend/pytorch/test_forward.py | 34 ++--
 2 files changed, 67 insertions(+), 5 deletions(-)



[GitHub] [incubator-tvm] masahi commented on issue #5263: [PYTORCH]celu, gelu, selu activations

2020-04-07 Thread GitBox
masahi commented on issue #5263: [PYTORCH]celu, gelu, selu activations
URL: https://github.com/apache/incubator-tvm/pull/5263#issuecomment-610735755
 
 
   Thanks @siju-samuel 


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] [incubator-tvm] CallmeZhangChenchen commented on issue #5273: Run relay_quick_start.py Wrong

2020-04-07 Thread GitBox
CallmeZhangChenchen commented on issue #5273: Run relay_quick_start.py Wrong
URL: https://github.com/apache/incubator-tvm/issues/5273#issuecomment-610735862
 
 
   i think tvm is no question .


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] [incubator-tvm] masahi merged pull request #5263: [PYTORCH]celu, gelu, selu activations

2020-04-07 Thread GitBox
masahi merged pull request #5263: [PYTORCH]celu, gelu, selu activations
URL: https://github.com/apache/incubator-tvm/pull/5263
 
 
   


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] [incubator-tvm] CallmeZhangChenchen opened a new issue #5273: Run relay_quick_start.py Wrong

2020-04-07 Thread GitBox
CallmeZhangChenchen opened a new issue #5273: Run relay_quick_start.py Wrong
URL: https://github.com/apache/incubator-tvm/issues/5273
 
 
   zzjhtest@zzjhtest:~/ZCC/testtvm$ python3 relay_quick_start.py 
   v0.0.4
   def @main(%data: Tensor[(1, 3, 224, 224), float32], %bn_data_gamma: 
Tensor[(3), float32], %bn_data_beta: Tensor[(3), float32], 
%bn_data_moving_mean: Tensor[(3), float32], %bn_data_moving_var: Tensor[(3), 
float32], %conv0_weight: Tensor[(64, 3, 7, 7), float32], %bn0_gamma: 
Tensor[(64), float32], %bn0_beta: Tensor[(64), float32], %bn0_moving_mean: 
Tensor[(64), float32], %bn0_moving_var: Tensor[(64), float32], 
%stage1_unit1_bn1_gamma: Tensor[(64), float32], %stage1_unit1_bn1_beta: 
Tensor[(64), float32], %stage1_unit1_bn1_moving_mean: Tensor[(64), float32], 
%stage1_unit1_bn1_moving_var: Tensor[(64), float32], 
%stage1_unit1_conv1_weight: Tensor[(64, 64, 3, 3), float32], 
%stage1_unit1_bn2_gamma: Tensor[(64), float32], %stage1_unit1_bn2_beta: 
Tensor[(64), float32], %stage1_unit1_bn2_moving_mean: Tensor[(64), float32], 
%stage1_unit1_bn2_moving_var: Tensor[(64), float32], 
%stage1_unit1_conv2_weight: Tensor[(64, 64, 3, 3), float32], 
%stage1_unit1_sc_weight: Tensor[(64, 64, 1, 1), float32], 
%stage1_unit2_bn1_gamma: Tensor[(64), float32], %stage1_unit2_bn1_beta: 
Tensor[(64), float32], %stage1_unit2_bn1_moving_mean: Tensor[(64), float32], 
%stage1_unit2_bn1_moving_var: Tensor[(64), float32], 
%stage1_unit2_conv1_weight: Tensor[(64, 64, 3, 3), float32], 
%stage1_unit2_bn2_gamma: Tensor[(64), float32], %stage1_unit2_bn2_beta: 
Tensor[(64), float32], %stage1_unit2_bn2_moving_mean: Tensor[(64), float32], 
%stage1_unit2_bn2_moving_var: Tensor[(64), float32], 
%stage1_unit2_conv2_weight: Tensor[(64, 64, 3, 3), float32], 
%stage2_unit1_bn1_gamma: Tensor[(64), float32], %stage2_unit1_bn1_beta: 
Tensor[(64), float32], %stage2_unit1_bn1_moving_mean: Tensor[(64), float32], 
%stage2_unit1_bn1_moving_var: Tensor[(64), float32], 
%stage2_unit1_conv1_weight: Tensor[(128, 64, 3, 3), float32], 
%stage2_unit1_bn2_gamma: Tensor[(128), float32], %stage2_unit1_bn2_beta: 
Tensor[(128), float32], %stage2_unit1_bn2_moving_mean: Tensor[(128), float32], 
%stage2_unit1_bn2_moving_var: Tensor[(128), float32], 
%stage2_unit1_conv2_weight: Tensor[(128, 128, 3, 3), float32], 
%stage2_unit1_sc_weight: Tensor[(128, 64, 1, 1), float32], 
%stage2_unit2_bn1_gamma: Tensor[(128), float32], %stage2_unit2_bn1_beta: 
Tensor[(128), float32], %stage2_unit2_bn1_moving_mean: Tensor[(128), float32], 
%stage2_unit2_bn1_moving_var: Tensor[(128), float32], 
%stage2_unit2_conv1_weight: Tensor[(128, 128, 3, 3), float32], 
%stage2_unit2_bn2_gamma: Tensor[(128), float32], %stage2_unit2_bn2_beta: 
Tensor[(128), float32], %stage2_unit2_bn2_moving_mean: Tensor[(128), float32], 
%stage2_unit2_bn2_moving_var: Tensor[(128), float32], 
%stage2_unit2_conv2_weight: Tensor[(128, 128, 3, 3), float32], 
%stage3_unit1_bn1_gamma: Tensor[(128), float32], %stage3_unit1_bn1_beta: 
Tensor[(128), float32], %stage3_unit1_bn1_moving_mean: Tensor[(128), float32], 
%stage3_unit1_bn1_moving_var: Tensor[(128), float32], 
%stage3_unit1_conv1_weight: Tensor[(256, 128, 3, 3), float32], 
%stage3_unit1_bn2_gamma: Tensor[(256), float32], %stage3_unit1_bn2_beta: 
Tensor[(256), float32], %stage3_unit1_bn2_moving_mean: Tensor[(256), float32], 
%stage3_unit1_bn2_moving_var: Tensor[(256), float32], 
%stage3_unit1_conv2_weight: Tensor[(256, 256, 3, 3), float32], 
%stage3_unit1_sc_weight: Tensor[(256, 128, 1, 1), float32], 
%stage3_unit2_bn1_gamma: Tensor[(256), float32], %stage3_unit2_bn1_beta: 
Tensor[(256), float32], %stage3_unit2_bn1_moving_mean: Tensor[(256), float32], 
%stage3_unit2_bn1_moving_var: Tensor[(256), float32], 
%stage3_unit2_conv1_weight: Tensor[(256, 256, 3, 3), float32], 
%stage3_unit2_bn2_gamma: Tensor[(256), float32], %stage3_unit2_bn2_beta: 
Tensor[(256), float32], %stage3_unit2_bn2_moving_mean: Tensor[(256), float32], 
%stage3_unit2_bn2_moving_var: Tensor[(256), float32], 
%stage3_unit2_conv2_weight: Tensor[(256, 256, 3, 3), float32], 
%stage4_unit1_bn1_gamma: Tensor[(256), float32], %stage4_unit1_bn1_beta: 
Tensor[(256), float32], %stage4_unit1_bn1_moving_mean: Tensor[(256), float32], 
%stage4_unit1_bn1_moving_var: Tensor[(256), float32], 
%stage4_unit1_conv1_weight: Tensor[(512, 256, 3, 3), float32], 
%stage4_unit1_bn2_gamma: Tensor[(512), float32], %stage4_unit1_bn2_beta: 
Tensor[(512), float32], %stage4_unit1_bn2_moving_mean: Tensor[(512), float32], 
%stage4_unit1_bn2_moving_var: Tensor[(512), float32], 
%stage4_unit1_conv2_weight: Tensor[(512, 512, 3, 3), float32], 
%stage4_unit1_sc_weight: Tensor[(512, 256, 1, 1), float32], 
%stage4_unit2_bn1_gamma: Tensor[(512), float32], %stage4_unit2_bn1_beta: 
Tensor[(512), float32], %stage4_unit2_bn1_moving_mean: Tensor[(512), float32], 
%stage4_unit2_bn1_moving_var: Tensor[(512), float32], 
%stage4_unit2_conv1_weight: Tensor[(512, 512, 3, 3), float32], 
%stage4_unit2_bn2_gamma: Tensor[(512), float32], 

[GitHub] [incubator-tvm] masahi commented on a change in pull request #5272: [BYOC] Add example of Composite + Annotate for DNNL fused op

2020-04-07 Thread GitBox
masahi commented on a change in pull request #5272: [BYOC] Add example of 
Composite + Annotate for DNNL fused op
URL: https://github.com/apache/incubator-tvm/pull/5272#discussion_r405241238
 
 

 ##
 File path: tests/python/relay/test_pass_partition_graph.py
 ##
 @@ -856,6 +857,111 @@ def expected():
 partitioned = transform.PartitionGraph()(mod)
 assert tvm.ir.structural_equal(partitioned, ref_mod, map_free_vars=True)
 
+
+def test_partition_conv_bias_relu():
+def make_pattern():
+data = relay.var("data", relay.TensorType((1, 3, 224, 224), "float32"))
+weight = relay.var("weight")
+bias = relay.var("bias")
+conv = relay.nn.conv2d(data=data, weight=weight, kernel_size=(3, 3),
+   channels=8, padding=(1, 1))
+add = relay.add(conv, bias)
+return relay.nn.relu(add)
+
+def get_blocks(prefix, data, in_channel, out_channel,
+   include_bn=True, include_sigmoid=False):
+weight = relay.var(prefix + "weight")
+bn_gamma = relay.var(prefix + "bn_gamma")
+bn_beta = relay.var(prefix + "bn_beta")
+bn_mmean = relay.var(prefix + "bn_mean")
+bn_mvar = relay.var(prefix + "bn_var")
+
+layer = relay.nn.conv2d(data=data, weight=weight, kernel_size=(3, 3),
+channels=out_channel, padding=(1, 1))
+if include_bn:
+bn_output = relay.nn.batch_norm(layer, bn_gamma, bn_beta,
+bn_mmean, bn_mvar)
+layer = bn_output[0]
+if include_sigmoid:
+# dummy layer to prevent pattern detection
+layer = relay.sigmoid(layer)
+layer = relay.nn.relu(layer)
+return layer
+
+def get_net(include_bn=True, include_sigmoid=False):
+data = relay.var("data", relay.TensorType((1, 3, 224, 224), "float32"))
+layer1 = get_blocks("layer1_", data, 3, 8, include_bn, include_sigmoid)
+layer2 = get_blocks("layer2_", layer1, 8, 8, include_bn, 
include_sigmoid)
+return relay.Function(relay.analysis.free_vars(layer2), layer2)
+
+def get_partitoned_mod(mod, params):
+# This is required for constant folding
+mod["main"] = bind_params_by_name(mod["main"], params)
+pattern_table = [
+("dnnl.conv_bias_relu", make_pattern())
 
 Review comment:
   Maybe it is better to move this pattern to `contrib/dnnl.py`. Let me know.


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] [incubator-tvm] masahi commented on issue #5262: [RELAY][BYOC] Register pattern tables from external codegens

2020-04-07 Thread GitBox
masahi commented on issue #5262: [RELAY][BYOC] Register pattern tables from 
external codegens
URL: https://github.com/apache/incubator-tvm/pull/5262#issuecomment-610734684
 
 
   @mbaret Is it better to merge this after 
https://github.com/apache/incubator-tvm/pull/5272?


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] [incubator-tvm] masahi edited a comment on issue #5262: [RELAY][BYOC] Register pattern tables from external codegens

2020-04-07 Thread GitBox
masahi edited a comment on issue #5262: [RELAY][BYOC] Register pattern tables 
from external codegens
URL: https://github.com/apache/incubator-tvm/pull/5262#issuecomment-610734684
 
 
   @mbaret Is it better to merge this after 
https://github.com/apache/incubator-tvm/pull/5272? The pattern for DNNL exists 
only in the test case.


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] [incubator-tvm] yzhliu commented on a change in pull request #5171: [Arith] linear system and equation solver

2020-04-07 Thread GitBox
yzhliu commented on a change in pull request #5171: [Arith] linear system and 
equation solver
URL: https://github.com/apache/incubator-tvm/pull/5171#discussion_r405240373
 
 

 ##
 File path: tests/python/unittest/test_arith_solve_linear_system.py
 ##
 @@ -14,9 +14,130 @@
 # KIND, either express or implied.  See the License for the
 # specific language governing permissions and limitations
 # under the License.
+import random
+import numpy as np
 import tvm
-from tvm import te, arith
-from tvm.tir import ir_pass
+from tvm import te, arith, ir, tir
+
+
+def run_expr(expr, vranges):
+def _compute_body(*us):
+vmap = {v: u + r.min for (v, r), u in zip(vranges.items(), us)}
+return tir.ir_pass.Substitute(expr, vmap)
+
+A = te.compute([r.extent.value for v, r in vranges.items()], _compute_body)
+args = [tvm.nd.empty(A.shape, A.dtype)]
+sch = te.create_schedule(A.op)
+mod = tvm.build(sch, [A])
+mod(*args)
+return args[0].asnumpy()
+
+
+def check_bruteforce(bool_expr, vranges, cond=None):
+if cond is not None:
+bool_expr = te.any(tir.Not(cond), bool_expr)
+
+res = run_expr(bool_expr, vranges)
+if not np.all(res):
+indices = list(np.argwhere(res == 0)[0])
+counterex = [(str(v), i + r.min) for (v, r), i in zip(vranges.items(), 
indices)]
+counterex = sorted(counterex, key=lambda x: x[0])
+counterex = ", ".join([v + " = " + str(i) for v, i in counterex])
+raise AssertionError("Expression {}\nis not true on {}\n"
+ "Counterexample: {}"
+ .format(tir.ir_pass.CanonicalSimplify(bool_expr), 
vranges, counterex))
+
+
+def check_solution(solution, vranges={}):
+def _check_forward(formula1, formula2, varmap, backvarmap):
+all_vranges = vranges.copy()
+all_vranges.update({v: r for v, r in formula1.ranges.items()})
+
+# Check that the transformation is injective
+cond_on_vars = tir.const(1, 'bool')
+for v in formula1.variables:
+# variable mapping is consistent
+v_back = tir.ir_pass.Simplify(tir.ir_pass.Substitute(varmap[v], 
backvarmap))
+cond_on_vars = te.all(cond_on_vars, v == v_back)
+# Also we have to check that the new relations are true when old 
relations are true
+cond_subst = tir.ir_pass.Substitute(
+te.all(tir.const(1, 'bool'), *formula2.relations), backvarmap)
+# We have to include relations from vranges too
+for v in formula2.variables:
+if v in formula2.ranges:
+r = formula2.ranges[v]
+range_cond = te.all(v >= r.min, v < r.min + r.extent)
+range_cond = tir.ir_pass.Substitute(range_cond, backvarmap)
+cond_subst = te.all(cond_subst, range_cond)
+cond_subst = tir.ir_pass.Simplify(cond_subst)
+check_bruteforce(te.all(cond_subst, cond_on_vars), all_vranges,
+ cond=te.all(tir.const(1, 'bool'), 
*formula1.relations))
+
+rels = solution.dst.relations
+if len(rels) == 1 and ir.structural_equal(rels[0], False):
+# not solvable, skip
+return
+_check_forward(solution.src, solution.dst,
+   solution.src_to_dst, solution.dst_to_src)
+_check_forward(solution.dst, solution.src,
+   solution.dst_to_src, solution.src_to_dst)
+
+
+def test_solution_consistency():
+random.seed(0)
 
 Review comment:
   good idea.


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] [incubator-tvm] masahi commented on issue #5272: [BYOC] Add example of Composite + Annotate for DNNL fused op

2020-04-07 Thread GitBox
masahi commented on issue #5272: [BYOC] Add example of Composite + Annotate for 
DNNL fused op
URL: https://github.com/apache/incubator-tvm/pull/5272#issuecomment-610732150
 
 
   Please review @zhiics @mbaret @comaniac @trevor-m 


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] [incubator-tvm] masahi commented on issue #4741: [External codegen] Add test cases for fused ops with manual annotation

2020-04-07 Thread GitBox
masahi commented on issue #4741: [External codegen] Add test cases for fused 
ops with manual annotation
URL: https://github.com/apache/incubator-tvm/pull/4741#issuecomment-610731955
 
 
   #5272


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] [incubator-tvm] masahi opened a new pull request #5272: [BYOC] Add example of Composite + Annotate for DNNL fused op

2020-04-07 Thread GitBox
masahi opened a new pull request #5272: [BYOC] Add example of Composite + 
Annotate for DNNL fused op
URL: https://github.com/apache/incubator-tvm/pull/5272
 
 
   This is a reimplementation of #4741 based on the new annotator support for 
composite added in #5261. This is the first real use case of composite in the 
code base. It is purely for demonstration purpose and not intended to be used 
for performance critical scenarios. 
   
   Due to the manually inlined tensors in c codegen, full mobilenet execution 
is disabled in the test. I tried but it took more than 5GB of RAM and 
compilation didn't finish in a reasonable time. The related issue discussed in 
https://discuss.tvm.ai/t/external-codegen-constant-tensors-in-c-codegen/5890/


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] [incubator-tvm] masahi closed pull request #4741: [External codegen] Add test cases for fused ops with manual annotation

2020-04-07 Thread GitBox
masahi closed pull request #4741: [External codegen] Add test cases for fused 
ops with manual annotation
URL: https://github.com/apache/incubator-tvm/pull/4741
 
 
   


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[incubator-tvm] branch master updated (53a4ad3 -> d2de35e)

2020-04-07 Thread masahi
This is an automated email from the ASF dual-hosted git repository.

masahi pushed a change to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-tvm.git.


from 53a4ad3  [RUNTIME] Implement TVMDSOOp(TensorFlow custom op) for TVM 
runtime (#4459)
 add d2de35e  [RELAY][BYOC] Add support for composite functions in BYOC 
(#5261)

No new revisions were added by this update.

Summary of changes:
 python/tvm/relay/transform/transform.py | 17 -
 src/relay/transforms/annotate_target.cc | 40 ---
 src/relay/transforms/merge_composite.cc | 90 -
 tests/python/relay/test_annotate_target.py  | 46 +
 tests/python/relay/test_pass_merge_composite.py | 38 +++
 5 files changed, 173 insertions(+), 58 deletions(-)



[GitHub] [incubator-tvm] masahi merged pull request #5261: [RELAY][BYOC] Add support for composite functions in BYOC

2020-04-07 Thread GitBox
masahi merged pull request #5261: [RELAY][BYOC] Add support for composite 
functions in BYOC
URL: https://github.com/apache/incubator-tvm/pull/5261
 
 
   


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] [incubator-tvm] masahi commented on issue #5261: [RELAY][BYOC] Add support for composite functions in BYOC

2020-04-07 Thread GitBox
masahi commented on issue #5261: [RELAY][BYOC] Add support for composite 
functions in BYOC
URL: https://github.com/apache/incubator-tvm/pull/5261#issuecomment-610727692
 
 
   Thanks @mbaret @zhiics 


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] [incubator-tvm] yzhliu commented on a change in pull request #5171: [Arith] linear system and equation solver

2020-04-07 Thread GitBox
yzhliu commented on a change in pull request #5171: [Arith] linear system and 
equation solver
URL: https://github.com/apache/incubator-tvm/pull/5171#discussion_r405213922
 
 

 ##
 File path: src/arith/solve_linear_equation.cc
 ##
 @@ -0,0 +1,480 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+/*!
+ * \file tvm/arith/solve_linear_equation.cc
+ * \brief Solve linear equations.
+ */
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+
+namespace tvm {
+namespace arith {
+
+using namespace tvm::runtime;
+
+void SmithNormalFormDiag(std::vector >* S,
+ std::vector >* V,
+ std::vector* x,
+ std::vector* y) {
+  if (S->empty() || V->empty()) return;
+  size_t m = S->size();
+  size_t n = (*S)[0].size();  // n is # of variables
+  CHECK_EQ(V->size(), n);
+  CHECK_EQ((*V)[0].size(), n);
+
+  for (size_t index = 0; index < std::min(m, n); ++index) {
+// Here A is partially diagonalized, that is A[i, j] is zero for all i, j
+// such that (i < index) or (j < index), unless (i == j).
+// That is, now we are diagonalizing the submatrix with i >= index and j 
>= index
+
+// Find a row with a nonzero element in the index-th column
+// (We also prefer rows where this element has minimal abs value)
+size_t best_i = index;
+for (size_t i = best_i; i < m; ++i) {
+  int64_t s_old = (*S)[best_i][index];
+  int64_t s_new = (*S)[i][index];
+  if (s_new != 0) {
+if (s_old == 0 || std::abs(s_new) < std::abs(s_old)) {
+  best_i = i;
+}
+  }
+}
+// Move the row we found to the index-th position
+std::swap((*S)[index], (*S)[best_i]);
+std::swap((*y)[index], (*y)[best_i]);
+
+// If the index-th diagonal element is still zero, try to find a column 
with nonzero index-th
+// element and move it to the index-th position
+if ((*S)[index][index] == 0) {
+  for (size_t j = index + 1; j < n; ++j) {
+if ((*S)[index][j] != 0) {
+  for (size_t i = index; i < m; ++i) {
+std::swap((*S)[i][index], (*S)[i][j]);
+  }
+  // swapping columns corresponds to swapping the corresponding x
+  std::swap((*x)[index], (*x)[j]);
+  for (size_t i = 0; i < n; ++i) {
+std::swap((*V)[i][index], (*V)[i][j]);
+  }
+  break;
+}
+  }
+}
+
+// If the index-th diagonal element is still zero, then both the index-th 
row and the index-th
+// column are completely zero, and we don't need to do anything; just go 
to the next index
+if ((*S)[index][index] == 0) {
+  continue;
+}
+
+// Now the index-th diagonal element is non-zero and we can zero all the 
index-th column
+// below it by subtracting rows from each other
+for (auto i = index + 1; i < m; ++i) {
+  if ((*S)[i][index] != 0) {
+int64_t g, a, b;
+// g = a*matrix[index][index] + b*matrix[i][index]
+if ((*S)[i][index] % (*S)[index][index] != 0) {
+  std::tie(g, a, b) = xgcd((*S)[index][index], (*S)[i][index]);
+} else {
+  // Explicitly avoid changing the index-th row. This is important to 
avoid infinite loop.
+  g = (*S)[index][index];
+  a = 1;
+  b = 0;
+}
+
+// Let m = S[index][index], n = S[i][index], then the following is 
true:
+//
+// [ a   n/g ][ m/g  n/g ] = [ 1  0 ]
+// [ b  -m/g ][ b-a  ] = [ 0  1 ]
+//
+// Note that the two matrices are integer (since g = gcd(m, n)).
+// We will essentially multiply our matrix on the left by a dilated 
and transposed version
+// of the first of these two matrices. The second matrix is not needed 
here, however we will
+// use it while zeroing the index-th row.
+
+int64_t m_g = (*S)[index][index] / g;
+int64_t n_g = (*S)[i][index] / g;
+
+// Note that j is the index of the column, not the row
+for (size_t j = index; j < (*S)[i].size(); ++j) {
+  // Multiply index-th row by a and add the i-th row multiplied by b
+  // This will make 

[GitHub] [incubator-tvm] yzhliu commented on a change in pull request #5171: [Arith] linear system and equation solver

2020-04-07 Thread GitBox
yzhliu commented on a change in pull request #5171: [Arith] linear system and 
equation solver
URL: https://github.com/apache/incubator-tvm/pull/5171#discussion_r405212419
 
 

 ##
 File path: include/tvm/arith/int_solver.h
 ##
 @@ -0,0 +1,173 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+/*!
+ * \file tvm/arith/int_solver.h
+ * \brief integer constraints data structures and solvers
+ */
+#ifndef TVM_ARITH_INT_SOLVER_H_
+#define TVM_ARITH_INT_SOLVER_H_
+
+#include 
+#include 
+#include 
+#include 
+
+namespace tvm {
+namespace arith {
+
+using tir::Var;
+using tir::VarNode;
+using tir::IterVar;
+
+/*!
+ * \brief Represent integer constrains including (integer) variables, their 
ranges and
+ *the relations between them (either equations or inequalities).
+ * \sa LinearSystem
+ */
+class IntConstraintsNode : public Object {
+ public:
+  // e.g., \alpha, \beta, must be integers
+  Array variables;
+  // e.g., 1 <= \alpha <= N, etc.
+  Map ranges;
+  // linear equalities or inequalities
+  // e.g., A \alpha = \beta or A \alpha <= \beta
+  Array relations;
+
+  void VisitAttrs(tvm::AttrVisitor* v) {
+v->Visit("variables", );
+v->Visit("ranges", );
+v->Visit("relations", );
+  }
+
+  static constexpr const char* _type_key = "arith.IntConstraints";
+  TVM_DECLARE_FINAL_OBJECT_INFO(IntConstraintsNode, Object);
+};
+
+/*!
+ * \brief Managed reference to IntConstraintsNode.
+ * \sa IntConstraintsNode
+ */
+class IntConstraints : public ObjectRef {
+ public:
+  /*!
+   * \brief Constructor by fields
+   * \param variables The variables in the constraints, must be integers.
+   * \param rangesThe ranges of the variables.
+   * \param relations The linear relations between the variables
+   *  (either equations or inequalities)
+   */
+  TVM_DLL IntConstraints(Array variables,
+ Map ranges,
+ Array relations);
+
+  TVM_DEFINE_OBJECT_REF_METHODS(IntConstraints, ObjectRef, IntConstraintsNode);
+};
+
+/*!
+ * \brief We can have different set of variables to represent the same 
constraints.
+ *For example, the following two systems are equivalent,
+ *{a + b = 0 | a >= 0, b >= 0} and
+ *{m - n = 0 | m >= 0, n <= 0}
+ *This data structure represents the transformation
+ *between two equivalent linear systems.
+ *In the above example,
+ *src: {a + b = 0 | a >= 0, b >= 0}
+ *dst: {m - n = 0 | m >= 0, n <= 0}
+ *src_to_dst : {a -> m, b -> -n}
+ *dst_to_src : {m -> a, n -> -b}
+ * \sa IntConstraintsTransform
+ */
+class IntConstraintsTransformNode : public Object {
+ public:
+  IntConstraints src;
+  IntConstraints dst;
+  Map src_to_dst;
+  Map dst_to_src;
+
+  void VisitAttrs(tvm::AttrVisitor* v) {
+v->Visit("src", );
+v->Visit("dst", );
+v->Visit("src_to_dst", _to_dst);
+v->Visit("dst_to_src", _to_src);
+  }
+
+  static constexpr const char* _type_key = "arith.IntConstraintsTransform";
+  TVM_DECLARE_FINAL_OBJECT_INFO(IntConstraintsTransformNode, Object);
+};
+
+/*!
+ * \brief Managed reference to IntConstraintsTransformNode.
+ * \sa IntConstraintsTransformNode
+ */
+class IntConstraintsTransform : public ObjectRef {
+ public:
+  /*!
+   * \brief Constructor by fields
+   * \param srcsource integer constraints, e.g., {a + b = 0 | a >= 0, 
b >= 0}
+   * \param dstinteger constraints equivalent to the source,
+   *   e.g., {m - n = 0 | m >= 0, n <= 0}
+   * \param src_to_dst mapping from variables in the \p src to the variables 
in the \p dst,
+   *   e.g., {a -> m, b -> -n}
+   * \param dst_to_src mapping from variables in the \p dst to the variables 
in the \p src,
+   *   e.g., {m -> a, n -> -b}
+   */
+  TVM_DLL IntConstraintsTransform(IntConstraints src,
+  IntConstraints dst,
+  Map src_to_dst,
+  Map dst_to_src);
+
+  TVM_DEFINE_OBJECT_REF_METHODS(IntConstraintsTransform, ObjectRef, 
IntConstraintsTransformNode);
+};
+
+/*!
+ * \brief Obtain Smith Normal Form of linear equation A 

[GitHub] [incubator-tvm] sergei-grechanik commented on a change in pull request #5171: [Arith] linear system and equation solver

2020-04-07 Thread GitBox
sergei-grechanik commented on a change in pull request #5171: [Arith] linear 
system and equation solver
URL: https://github.com/apache/incubator-tvm/pull/5171#discussion_r405211094
 
 

 ##
 File path: include/tvm/arith/int_solver.h
 ##
 @@ -0,0 +1,173 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+/*!
+ * \file tvm/arith/int_solver.h
+ * \brief integer constraints data structures and solvers
+ */
+#ifndef TVM_ARITH_INT_SOLVER_H_
+#define TVM_ARITH_INT_SOLVER_H_
+
+#include 
+#include 
+#include 
+#include 
+
+namespace tvm {
+namespace arith {
+
+using tir::Var;
+using tir::VarNode;
+using tir::IterVar;
+
+/*!
+ * \brief Represent integer constrains including (integer) variables, their 
ranges and
+ *the relations between them (either equations or inequalities).
+ * \sa LinearSystem
+ */
+class IntConstraintsNode : public Object {
+ public:
+  // e.g., \alpha, \beta, must be integers
+  Array variables;
+  // e.g., 1 <= \alpha <= N, etc.
+  Map ranges;
+  // linear equalities or inequalities
+  // e.g., A \alpha = \beta or A \alpha <= \beta
+  Array relations;
+
+  void VisitAttrs(tvm::AttrVisitor* v) {
+v->Visit("variables", );
+v->Visit("ranges", );
+v->Visit("relations", );
+  }
+
+  static constexpr const char* _type_key = "arith.IntConstraints";
+  TVM_DECLARE_FINAL_OBJECT_INFO(IntConstraintsNode, Object);
+};
+
+/*!
+ * \brief Managed reference to IntConstraintsNode.
+ * \sa IntConstraintsNode
+ */
+class IntConstraints : public ObjectRef {
+ public:
+  /*!
+   * \brief Constructor by fields
+   * \param variables The variables in the constraints, must be integers.
+   * \param rangesThe ranges of the variables.
+   * \param relations The linear relations between the variables
+   *  (either equations or inequalities)
+   */
+  TVM_DLL IntConstraints(Array variables,
+ Map ranges,
+ Array relations);
+
+  TVM_DEFINE_OBJECT_REF_METHODS(IntConstraints, ObjectRef, IntConstraintsNode);
+};
+
+/*!
+ * \brief We can have different set of variables to represent the same 
constraints.
+ *For example, the following two systems are equivalent,
+ *{a + b = 0 | a >= 0, b >= 0} and
+ *{m - n = 0 | m >= 0, n <= 0}
+ *This data structure represents the transformation
+ *between two equivalent linear systems.
+ *In the above example,
+ *src: {a + b = 0 | a >= 0, b >= 0}
+ *dst: {m - n = 0 | m >= 0, n <= 0}
+ *src_to_dst : {a -> m, b -> -n}
+ *dst_to_src : {m -> a, n -> -b}
+ * \sa IntConstraintsTransform
+ */
+class IntConstraintsTransformNode : public Object {
+ public:
+  IntConstraints src;
+  IntConstraints dst;
+  Map src_to_dst;
+  Map dst_to_src;
+
+  void VisitAttrs(tvm::AttrVisitor* v) {
+v->Visit("src", );
+v->Visit("dst", );
+v->Visit("src_to_dst", _to_dst);
+v->Visit("dst_to_src", _to_src);
+  }
+
+  static constexpr const char* _type_key = "arith.IntConstraintsTransform";
+  TVM_DECLARE_FINAL_OBJECT_INFO(IntConstraintsTransformNode, Object);
+};
+
+/*!
+ * \brief Managed reference to IntConstraintsTransformNode.
+ * \sa IntConstraintsTransformNode
+ */
+class IntConstraintsTransform : public ObjectRef {
+ public:
+  /*!
+   * \brief Constructor by fields
+   * \param srcsource integer constraints, e.g., {a + b = 0 | a >= 0, 
b >= 0}
+   * \param dstinteger constraints equivalent to the source,
+   *   e.g., {m - n = 0 | m >= 0, n <= 0}
+   * \param src_to_dst mapping from variables in the \p src to the variables 
in the \p dst,
+   *   e.g., {a -> m, b -> -n}
+   * \param dst_to_src mapping from variables in the \p dst to the variables 
in the \p src,
+   *   e.g., {m -> a, n -> -b}
+   */
+  TVM_DLL IntConstraintsTransform(IntConstraints src,
+  IntConstraints dst,
+  Map src_to_dst,
+  Map dst_to_src);
+
+  TVM_DEFINE_OBJECT_REF_METHODS(IntConstraintsTransform, ObjectRef, 
IntConstraintsTransformNode);
+};
+
+/*!
+ * \brief Obtain Smith Normal Form of linear 

[GitHub] [incubator-tvm] tqchen opened a new pull request #5271: [RUNTIME] Introduce RValue reference(move) support to TypedPackedFunc

2020-04-07 Thread GitBox
tqchen opened a new pull request #5271: [RUNTIME] Introduce RValue 
reference(move) support to TypedPackedFunc
URL: https://github.com/apache/incubator-tvm/pull/5271
 
 
   This PR introduces RValue reference support the PackedFunc calling 
convention to address the above issue. Specifically, when an argument is a 
r-value reference, we will use a assign a different type 
code(`kObjectRValueRefArg`), and pass `Object**`  (the address to the Object 
pointer) instead through the values array. The callee can choose to move out 
this Object pointer and set the original Object pointer from the caller side to 
be nullptr.
   
   We also add move support to the python side. This enhancement will enable 
copy on write optimizations through out the TVM stack.
   
   


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] [incubator-tvm] sergei-grechanik commented on a change in pull request #5171: [Arith] linear system and equation solver

2020-04-07 Thread GitBox
sergei-grechanik commented on a change in pull request #5171: [Arith] linear 
system and equation solver
URL: https://github.com/apache/incubator-tvm/pull/5171#discussion_r405204662
 
 

 ##
 File path: tests/python/unittest/test_arith_solve_linear_system.py
 ##
 @@ -14,9 +14,130 @@
 # KIND, either express or implied.  See the License for the
 # specific language governing permissions and limitations
 # under the License.
+import random
+import numpy as np
 import tvm
-from tvm import te, arith
-from tvm.tir import ir_pass
+from tvm import te, arith, ir, tir
+
+
+def run_expr(expr, vranges):
+def _compute_body(*us):
+vmap = {v: u + r.min for (v, r), u in zip(vranges.items(), us)}
+return tir.ir_pass.Substitute(expr, vmap)
+
+A = te.compute([r.extent.value for v, r in vranges.items()], _compute_body)
+args = [tvm.nd.empty(A.shape, A.dtype)]
+sch = te.create_schedule(A.op)
+mod = tvm.build(sch, [A])
+mod(*args)
+return args[0].asnumpy()
+
+
+def check_bruteforce(bool_expr, vranges, cond=None):
+if cond is not None:
+bool_expr = te.any(tir.Not(cond), bool_expr)
+
+res = run_expr(bool_expr, vranges)
+if not np.all(res):
+indices = list(np.argwhere(res == 0)[0])
+counterex = [(str(v), i + r.min) for (v, r), i in zip(vranges.items(), 
indices)]
+counterex = sorted(counterex, key=lambda x: x[0])
+counterex = ", ".join([v + " = " + str(i) for v, i in counterex])
+raise AssertionError("Expression {}\nis not true on {}\n"
+ "Counterexample: {}"
+ .format(tir.ir_pass.CanonicalSimplify(bool_expr), 
vranges, counterex))
+
+
+def check_solution(solution, vranges={}):
+def _check_forward(formula1, formula2, varmap, backvarmap):
+all_vranges = vranges.copy()
+all_vranges.update({v: r for v, r in formula1.ranges.items()})
+
+# Check that the transformation is injective
+cond_on_vars = tir.const(1, 'bool')
+for v in formula1.variables:
+# variable mapping is consistent
+v_back = tir.ir_pass.Simplify(tir.ir_pass.Substitute(varmap[v], 
backvarmap))
+cond_on_vars = te.all(cond_on_vars, v == v_back)
+# Also we have to check that the new relations are true when old 
relations are true
+cond_subst = tir.ir_pass.Substitute(
+te.all(tir.const(1, 'bool'), *formula2.relations), backvarmap)
+# We have to include relations from vranges too
+for v in formula2.variables:
+if v in formula2.ranges:
+r = formula2.ranges[v]
+range_cond = te.all(v >= r.min, v < r.min + r.extent)
+range_cond = tir.ir_pass.Substitute(range_cond, backvarmap)
+cond_subst = te.all(cond_subst, range_cond)
+cond_subst = tir.ir_pass.Simplify(cond_subst)
+check_bruteforce(te.all(cond_subst, cond_on_vars), all_vranges,
+ cond=te.all(tir.const(1, 'bool'), 
*formula1.relations))
+
+rels = solution.dst.relations
+if len(rels) == 1 and ir.structural_equal(rels[0], False):
+# not solvable, skip
+return
+_check_forward(solution.src, solution.dst,
+   solution.src_to_dst, solution.dst_to_src)
+_check_forward(solution.dst, solution.src,
+   solution.dst_to_src, solution.src_to_dst)
+
+
+def test_solution_consistency():
+random.seed(0)
 
 Review comment:
   Personally, I think it is a good idea. I'm just worried that people won't 
know what to do with such failure and will just complain about flaky tests 
instead of reporting the unlucky seed. Maybe print some message like "this test 
is intentionally nondeterministic, if it fails please report it together with 
this seed"?


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] [incubator-tvm] masahi commented on issue #5261: [RELAY][BYOC] Add support for composite functions in BYOC

2020-04-07 Thread GitBox
masahi commented on issue #5261: [RELAY][BYOC] Add support for composite 
functions in BYOC
URL: https://github.com/apache/incubator-tvm/pull/5261#issuecomment-610695078
 
 
   @zhiics Yes, I'll review and merge this today
   @alexbooth The error you get is because now with composite, op inside 
CallNode can be a FunctionNode, rather than OpNode. I'll send a DNNL change 
today so you can take a look.


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] [incubator-tvm] zhiics commented on issue #5261: [RELAY][BYOC] Add support for composite functions in BYOC

2020-04-07 Thread GitBox
zhiics commented on issue #5261: [RELAY][BYOC] Add support for composite 
functions in BYOC
URL: https://github.com/apache/incubator-tvm/pull/5261#issuecomment-610690039
 
 
   @masahi yeah, inlining the function you pointed out is a little tricky 
because it varies case by case. I intentionally made it a bit more conservative 
and left it for the external codegen to handle. In the long run, it would be 
more helpful if we make the passes more configurable. 


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] [incubator-tvm] yzhliu commented on a change in pull request #5171: [Arith] linear system and equation solver

2020-04-07 Thread GitBox
yzhliu commented on a change in pull request #5171: [Arith] linear system and 
equation solver
URL: https://github.com/apache/incubator-tvm/pull/5171#discussion_r405194241
 
 

 ##
 File path: tests/python/unittest/test_arith_solve_linear_system.py
 ##
 @@ -14,9 +14,130 @@
 # KIND, either express or implied.  See the License for the
 # specific language governing permissions and limitations
 # under the License.
+import random
+import numpy as np
 import tvm
-from tvm import te, arith
-from tvm.tir import ir_pass
+from tvm import te, arith, ir, tir
+
+
+def run_expr(expr, vranges):
+def _compute_body(*us):
+vmap = {v: u + r.min for (v, r), u in zip(vranges.items(), us)}
+return tir.ir_pass.Substitute(expr, vmap)
+
+A = te.compute([r.extent.value for v, r in vranges.items()], _compute_body)
+args = [tvm.nd.empty(A.shape, A.dtype)]
+sch = te.create_schedule(A.op)
+mod = tvm.build(sch, [A])
+mod(*args)
+return args[0].asnumpy()
+
+
+def check_bruteforce(bool_expr, vranges, cond=None):
+if cond is not None:
+bool_expr = te.any(tir.Not(cond), bool_expr)
+
+res = run_expr(bool_expr, vranges)
+if not np.all(res):
+indices = list(np.argwhere(res == 0)[0])
+counterex = [(str(v), i + r.min) for (v, r), i in zip(vranges.items(), 
indices)]
+counterex = sorted(counterex, key=lambda x: x[0])
+counterex = ", ".join([v + " = " + str(i) for v, i in counterex])
+raise AssertionError("Expression {}\nis not true on {}\n"
+ "Counterexample: {}"
+ .format(tir.ir_pass.CanonicalSimplify(bool_expr), 
vranges, counterex))
+
+
+def check_solution(solution, vranges={}):
+def _check_forward(formula1, formula2, varmap, backvarmap):
+all_vranges = vranges.copy()
+all_vranges.update({v: r for v, r in formula1.ranges.items()})
+
+# Check that the transformation is injective
+cond_on_vars = tir.const(1, 'bool')
+for v in formula1.variables:
+# variable mapping is consistent
+v_back = tir.ir_pass.Simplify(tir.ir_pass.Substitute(varmap[v], 
backvarmap))
+cond_on_vars = te.all(cond_on_vars, v == v_back)
+# Also we have to check that the new relations are true when old 
relations are true
+cond_subst = tir.ir_pass.Substitute(
+te.all(tir.const(1, 'bool'), *formula2.relations), backvarmap)
+# We have to include relations from vranges too
+for v in formula2.variables:
+if v in formula2.ranges:
+r = formula2.ranges[v]
+range_cond = te.all(v >= r.min, v < r.min + r.extent)
+range_cond = tir.ir_pass.Substitute(range_cond, backvarmap)
+cond_subst = te.all(cond_subst, range_cond)
+cond_subst = tir.ir_pass.Simplify(cond_subst)
+check_bruteforce(te.all(cond_subst, cond_on_vars), all_vranges,
+ cond=te.all(tir.const(1, 'bool'), 
*formula1.relations))
+
+rels = solution.dst.relations
+if len(rels) == 1 and ir.structural_equal(rels[0], False):
+# not solvable, skip
+return
+_check_forward(solution.src, solution.dst,
+   solution.src_to_dst, solution.dst_to_src)
+_check_forward(solution.dst, solution.src,
+   solution.dst_to_src, solution.src_to_dst)
+
+
+def test_solution_consistency():
+random.seed(0)
 
 Review comment:
   how about we always use random seed, and print out the seed so that we will 
be able to reproduce once triggered?


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] [incubator-tvm] zhiics commented on issue #5261: [RELAY][BYOC] Add support for composite functions in BYOC

2020-04-07 Thread GitBox
zhiics commented on issue #5261: [RELAY][BYOC] Add support for composite 
functions in BYOC
URL: https://github.com/apache/incubator-tvm/pull/5261#issuecomment-610688475
 
 
   @alexbooth Thanks for pointing out. The purpose of the codegen_c is mainly 
for quick prototyping. Would you be interested to send a PR to add the support 
for composite function after this is merged?


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] [incubator-tvm] zhiics commented on issue #5261: [RELAY][BYOC] Add support for composite functions in BYOC

2020-04-07 Thread GitBox
zhiics commented on issue #5261: [RELAY][BYOC] Add support for composite 
functions in BYOC
URL: https://github.com/apache/incubator-tvm/pull/5261#issuecomment-610687622
 
 
   @masahi @trevor-m can you guys help review as well?


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] [incubator-tvm] anijain2305 commented on issue #5270: [CI] Change MxNet from MKL verion to regular CPU version

2020-04-07 Thread GitBox
anijain2305 commented on issue #5270: [CI] Change MxNet from MKL verion to 
regular CPU version
URL: https://github.com/apache/incubator-tvm/pull/5270#issuecomment-610685005
 
 
   > @anijain2305 What do you mean ARM CPUs? mxnet doesn't have pip wheel for 
ARM.
   
   Nvm. I got confused. I was thinking that if we add ARM CPUs to CI in future, 
will it cause issues (as mxnet-mkl is not supported on ARM).


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] [incubator-tvm] anijain2305 edited a comment on issue #5270: [CI] Change MxNet from MKL verion to regular CPU version

2020-04-07 Thread GitBox
anijain2305 edited a comment on issue #5270: [CI] Change MxNet from MKL verion 
to regular CPU version
URL: https://github.com/apache/incubator-tvm/pull/5270#issuecomment-610685005
 
 
   > @anijain2305 What do you mean ARM CPUs? mxnet doesn't have pip wheel for 
ARM.
   
   Nvm. I got confused. I was thinking that if we add ARM CPUs to CI in future, 
will it cause issues (as mxnet-mkl is not supported on ARM)?


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] [incubator-tvm] shoubhik commented on issue #5270: [CI] Change MxNet from MKL verion to regular CPU version

2020-04-07 Thread GitBox
shoubhik commented on issue #5270: [CI] Change MxNet from MKL verion to regular 
CPU version
URL: https://github.com/apache/incubator-tvm/pull/5270#issuecomment-610684437
 
 
   If mxnet-mkl does not break any existing functionality we should probably 
take it as a dependency. That gives us enough wiggle room and infra in the 
future to experiment on how we can add tests for mxnet Qnn. What do you guys 
think?


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] [incubator-tvm] icemelon9 commented on issue #5270: [CI] Change MxNet from MKL verion to regular CPU version

2020-04-07 Thread GitBox
icemelon9 commented on issue #5270: [CI] Change MxNet from MKL verion to 
regular CPU version
URL: https://github.com/apache/incubator-tvm/pull/5270#issuecomment-610683044
 
 
   @anijain2305 What do you mean ARM CPUs? mxnet doesn't have pip wheel for ARM.


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] [incubator-tvm] masahi commented on issue #5261: [RELAY][BYOC] Add support for composite functions in BYOC

2020-04-07 Thread GitBox
masahi commented on issue #5261: [RELAY][BYOC] Add support for composite 
functions in BYOC
URL: https://github.com/apache/incubator-tvm/pull/5261#issuecomment-610682499
 
 
   Ok got the DNNL conv + bias + relu working 
(https://github.com/masahi/tvm/compare/byoc-composite...masahi:dnnl-composite). 
This is a reimplmentation of https://github.com/apache/incubator-tvm/pull/4741 
based on composite annotate support in this PR, rather than the manual 
approach. I think it is much cleaner.
   
   I can send a new PR as soon as this PR is merged.


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] [incubator-tvm] tqchen commented on issue #4459: [RUNTIME] Implement TVMDSOOp(TensorFlow custom op) for TVM runtime

2020-04-07 Thread GitBox
tqchen commented on issue #4459: [RUNTIME] Implement TVMDSOOp(TensorFlow custom 
op) for TVM runtime
URL: https://github.com/apache/incubator-tvm/pull/4459#issuecomment-610677853
 
 
   Thanks @tobegit3hub @FrozenGene @zhiics @soiferj @wrongtest @gmagogsfm 
@yzhliu . This PR is now merged


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[incubator-tvm] branch master updated (4e00763 -> 53a4ad3)

2020-04-07 Thread tqchen
This is an automated email from the ASF dual-hosted git repository.

tqchen pushed a change to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-tvm.git.


from 4e00763  [LLVM] Do not use x86_vcvtph2ps_256 intrinsic with LLVM 11+ 
(#5267)
 add 53a4ad3  [RUNTIME] Implement TVMDSOOp(TensorFlow custom op) for TVM 
runtime (#4459)

No new revisions were added by this update.

Summary of changes:
 CMakeLists.txt |   2 +
 .../tf_tvmdsoop/CMakeLists.txt |  27 +-
 .../tf_tvmdsoop/prepare_and_test_tfop_module.sh|  21 +-
 apps/tf_tvmdsoop/tests/test_tfop_module.py | 118 
 cmake/config.cmake |   4 +
 cmake/modules/contrib/TF_TVMDSOOP.cmake|  58 
 .../device/arm => contrib/tf_op}/__init__.py   |   5 +-
 python/tvm/contrib/tf_op/module.py | 113 
 src/contrib/tf_op/tvm_dso_op_kernels.cc| 310 +
 .../contrib/tf_op/tvm_dso_ops.cc   |  40 +--
 tests/scripts/task_python_integration.sh   |   3 +
 11 files changed, 649 insertions(+), 52 deletions(-)
 copy docker/Dockerfile.ci_lint => apps/tf_tvmdsoop/CMakeLists.txt (58%)
 copy tests/scripts/task_python_nightly.sh => 
apps/tf_tvmdsoop/prepare_and_test_tfop_module.sh (60%)
 mode change 100755 => 100644
 create mode 100644 apps/tf_tvmdsoop/tests/test_tfop_module.py
 create mode 100644 cmake/modules/contrib/TF_TVMDSOOP.cmake
 copy python/tvm/{micro/device/arm => contrib/tf_op}/__init__.py (89%)
 create mode 100644 python/tvm/contrib/tf_op/module.py
 create mode 100644 src/contrib/tf_op/tvm_dso_op_kernels.cc
 copy apps/ios_rpc/tvmrpcLauncher/tvmrpcLauncher.mm => 
src/contrib/tf_op/tvm_dso_ops.cc (63%)



[GitHub] [incubator-tvm] tqchen merged pull request #4459: [RUNTIME] Implement TVMDSOOp(TensorFlow custom op) for TVM runtime

2020-04-07 Thread GitBox
tqchen merged pull request #4459: [RUNTIME] Implement TVMDSOOp(TensorFlow 
custom op) for TVM runtime
URL: https://github.com/apache/incubator-tvm/pull/4459
 
 
   


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] [incubator-tvm] tqchen merged pull request #5267: [LLVM] Do not use x86_vcvtph2ps_256 intrinsic with LLVM 11+

2020-04-07 Thread GitBox
tqchen merged pull request #5267: [LLVM] Do not use x86_vcvtph2ps_256 intrinsic 
with LLVM 11+
URL: https://github.com/apache/incubator-tvm/pull/5267
 
 
   


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[incubator-tvm] branch master updated (2942278 -> 4e00763)

2020-04-07 Thread tqchen
This is an automated email from the ASF dual-hosted git repository.

tqchen pushed a change to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-tvm.git.


from 2942278  [RUNTIME] Quick fix PackedFunc String passing (#5266)
 add 4e00763  [LLVM] Do not use x86_vcvtph2ps_256 intrinsic with LLVM 11+ 
(#5267)

No new revisions were added by this update.

Summary of changes:
 src/target/llvm/codegen_x86_64.cc | 6 +-
 1 file changed, 5 insertions(+), 1 deletion(-)



[GitHub] [incubator-tvm] anijain2305 commented on issue #5270: [CI] Change MxNet from MKL verion to regular CPU version

2020-04-07 Thread GitBox
anijain2305 commented on issue #5270: [CI] Change MxNet from MKL verion to 
regular CPU version
URL: https://github.com/apache/incubator-tvm/pull/5270#issuecomment-610675721
 
 
   How about ARM CPUs?


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] [incubator-tvm] icemelon9 commented on issue #5270: [CI] Change MxNet from MKL verion to regular CPU version

2020-04-07 Thread GitBox
icemelon9 commented on issue #5270: [CI] Change MxNet from MKL verion to 
regular CPU version
URL: https://github.com/apache/incubator-tvm/pull/5270#issuecomment-610673485
 
 
   According to the link @leezu shared, mxnet-mkl is able to run on AMD CPU. So 
should we just keep mxnet-mkl so that @shoubhik can use the quantization 
feature for test cases?
   
   @tqchen 


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] [incubator-tvm] icemelon9 edited a comment on issue #5270: [CI] Change MxNet from MKL verion to regular CPU version

2020-04-07 Thread GitBox
icemelon9 edited a comment on issue #5270: [CI] Change MxNet from MKL verion to 
regular CPU version
URL: https://github.com/apache/incubator-tvm/pull/5270#issuecomment-610673485
 
 
   Thanks @leezu for the clarification. According to the link @leezu shares, 
mxnet-mkl is able to run on AMD CPU. So should we just keep mxnet-mkl so that 
@shoubhik can use the quantization feature for test cases?
   
   @tqchen 


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[incubator-tvm] branch master updated (df8a6f3 -> 2942278)

2020-04-07 Thread zhic
This is an automated email from the ASF dual-hosted git repository.

zhic pushed a change to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-tvm.git.


from df8a6f3  [LLVM] Use llvm::ElementCount with LLVM 11+ when creating 
vectors (#5265)
 add 2942278  [RUNTIME] Quick fix PackedFunc String passing (#5266)

No new revisions were added by this update.

Summary of changes:
 include/tvm/runtime/packed_func.h | 14 ++
 tests/cpp/packed_func_test.cc |  6 ++
 2 files changed, 16 insertions(+), 4 deletions(-)



[GitHub] [incubator-tvm] zhiics merged pull request #5266: [RUNTIME] Quick fix PackedFunc String passing

2020-04-07 Thread GitBox
zhiics merged pull request #5266: [RUNTIME] Quick fix PackedFunc String passing
URL: https://github.com/apache/incubator-tvm/pull/5266
 
 
   


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] [incubator-tvm] masahi commented on issue #5261: [RELAY][BYOC] Add support for composite functions in BYOC

2020-04-07 Thread GitBox
masahi commented on issue #5261: [RELAY][BYOC] Add support for composite 
functions in BYOC
URL: https://github.com/apache/incubator-tvm/pull/5261#issuecomment-610666034
 
 
   @trevor-m Yeah agree. I'm looking to support either function call or "manual 
inline" in the codegen.


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] [incubator-tvm] leezu commented on issue #5270: [CI] Change MxNet from MKL verion to regular CPU version

2020-04-07 Thread GitBox
leezu commented on issue #5270: [CI] Change MxNet from MKL verion to regular 
CPU version
URL: https://github.com/apache/incubator-tvm/pull/5270#issuecomment-610664896
 
 
   Any release after mxnet 1.6 will build with mkldnn by default. If you don't 
want to use mkldnn, you need to use mxnet-native package in the future. See 
https://lists.apache.org/thread.html/1a22dbd79098adab6d02d16e8d607bae2acc908c0bb1b085d28a51ba@%3Cdev.mxnet.apache.org%3E


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] [incubator-tvm] leezu commented on issue #5270: [CI] Change MxNet from MKL verion to regular CPU version

2020-04-07 Thread GitBox
leezu commented on issue #5270: [CI] Change MxNet from MKL verion to regular 
CPU version
URL: https://github.com/apache/incubator-tvm/pull/5270#issuecomment-610664463
 
 
   mxnet-mkl is not build with mkl but only mkldnn. The name is confusing.


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] [incubator-tvm] trevor-m commented on issue #5261: [RELAY][BYOC] Add support for composite functions in BYOC

2020-04-07 Thread GitBox
trevor-m commented on issue #5261: [RELAY][BYOC] Add support for composite 
functions in BYOC
URL: https://github.com/apache/incubator-tvm/pull/5261#issuecomment-610663673
 
 
   > Using MergeComposite, AnnotateTarget and PartitionGraph, I get the 
following graph for conv + bias + relu pattern:
   > 
   > ```
   > def @dnnl_0(%dnnl_0_i0: Tensor[(1, 3, 224, 224), float32], Inline=1, 
Compiler="dnnl", global_symbol=runtime.String(0x55a8d9cddbd0), Primitive=1) -> 
Tensor[(1, 1, 224, 224), float32] {
   >   %2 = fn (%data: Tensor[(1, 3, 224, 224), float32], %weight: Tensor[(1, 
3, 3, 3), float32], %bias: Tensor[(1, 1, 1), float32], 
Composite="dnnl.conv_bias_relu") -> Tensor[(1, 1, 224, 224), float32] {
   > %0 = nn.conv2d(%data, %weight, padding=[1, 1, 1, 1], channels=1, 
kernel_size=[3, 3]) /* ty=Tensor[(1, 1, 224, 224), float32] */;
   > %1 = add(%0, %bias) /* ty=Tensor[(1, 1, 224, 224), float32] */;
   > nn.relu(%1) /* ty=Tensor[(1, 1, 224, 224), float32] */
   >   };
   >   %2(%dnnl_0_i0, meta[relay.Constant][0] /* ty=Tensor[(1, 3, 3, 3), 
float32] */ /* ty=Tensor[(1, 3, 3, 3), float32] */, meta[relay.Constant][1] /* 
ty=Tensor[(1, 1, 1), float32] */ /* ty=Tensor[(1, 1, 1), float32] */) /* 
ty=Tensor[(1, 1, 224, 224), float32] */
   > }
   > 
   > def @main(%data1: Tensor[(1, 3, 224, 224), float32]) -> Tensor[(1, 1, 224, 
224), float32] {
   >   @dnnl_0(%data1) /* ty=Tensor[(1, 1, 224, 224), float32] */
   > }
   > ```
   > 
   > Is it possible to inline composite function `%2` there into `dnnl_0`? What 
I want is this:
   > 
   > ```
   > def @dnnl_0(%dnnl_0_i0: Tensor[(1, 3, 224, 224), float32], Inline=1, 
Compiler="dnnl", global_symbol=runtime.String(0x5599b307c370), Primitive=1) -> 
Tensor[(1, 1, 224, 224), float32] {
   >   %0 = nn.conv2d(%dnnl_0_i0, meta[relay.Constant][0] /* ty=Tensor[(1, 3, 
3, 3), float32] */ /* ty=Tensor[(1, 3, 3, 3), float32] */, padding=[1, 1, 1, 
1], channels=1, kernel_size=[3, 3]) /* ty=Tensor[(1, 1, 224, 224), float32] */;
   >   %1 = add(%0, meta[relay.Constant][1] /* ty=Tensor[(1, 1, 1), float32] */ 
/* ty=Tensor[(1, 1, 1), float32] */) /* ty=Tensor[(1, 1, 224, 224), float32] */;
   >   nn.relu(%1) /* ty=Tensor[(1, 1, 224, 224), float32] */
   > }
   > 
   > def @main(%data: Tensor[(1, 3, 224, 224), float32]) -> Tensor[(1, 1, 224, 
224), float32] {
   >   @dnnl_0(%data) /* ty=Tensor[(1, 1, 224, 224), float32] */
   > }
   > ```
   > 
   > Otherwise I have to support function call in DNNL codegen. @zhiics @mbaret
   > 
   > UPDATE: hmm if I inline the composite function, I lose the composite 
attribute and hence cannot detect fused call. Is supporting function call in 
the DNNL codegen a better option?
   
   Hi @masahi, I think it is best to leave this to the codegen. That way we 
have the option to handle a composite all at once at call node or invididually 
by just doing VisitExpr(func->body) when we encounter one.


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] [incubator-tvm] masahi edited a comment on issue #5261: [RELAY][BYOC] Add support for composite functions in BYOC

2020-04-07 Thread GitBox
masahi edited a comment on issue #5261: [RELAY][BYOC] Add support for composite 
functions in BYOC
URL: https://github.com/apache/incubator-tvm/pull/5261#issuecomment-610659753
 
 
   Using MergeComposite, AnnotateTarget and PartitionGraph, I get the following 
graph for conv + bias + relu pattern:
   
   ```
   def @dnnl_0(%dnnl_0_i0: Tensor[(1, 3, 224, 224), float32], Inline=1, 
Compiler="dnnl", global_symbol=runtime.String(0x55a8d9cddbd0), Primitive=1) -> 
Tensor[(1, 1, 224, 224), float32] {
 %2 = fn (%data: Tensor[(1, 3, 224, 224), float32], %weight: Tensor[(1, 3, 
3, 3), float32], %bias: Tensor[(1, 1, 1), float32], 
Composite="dnnl.conv_bias_relu") -> Tensor[(1, 1, 224, 224), float32] {
   %0 = nn.conv2d(%data, %weight, padding=[1, 1, 1, 1], channels=1, 
kernel_size=[3, 3]) /* ty=Tensor[(1, 1, 224, 224), float32] */;
   %1 = add(%0, %bias) /* ty=Tensor[(1, 1, 224, 224), float32] */;
   nn.relu(%1) /* ty=Tensor[(1, 1, 224, 224), float32] */
 };
 %2(%dnnl_0_i0, meta[relay.Constant][0] /* ty=Tensor[(1, 3, 3, 3), float32] 
*/ /* ty=Tensor[(1, 3, 3, 3), float32] */, meta[relay.Constant][1] /* 
ty=Tensor[(1, 1, 1), float32] */ /* ty=Tensor[(1, 1, 1), float32] */) /* 
ty=Tensor[(1, 1, 224, 224), float32] */
   }
   
   def @main(%data1: Tensor[(1, 3, 224, 224), float32]) -> Tensor[(1, 1, 224, 
224), float32] {
 @dnnl_0(%data1) /* ty=Tensor[(1, 1, 224, 224), float32] */
   }
   ```
   
   Is it possible to inline composite function `%2` there into `dnnl_0`? What I 
want is this:
   ```
   def @dnnl_0(%dnnl_0_i0: Tensor[(1, 3, 224, 224), float32], Inline=1, 
Compiler="dnnl", global_symbol=runtime.String(0x5599b307c370), Primitive=1) -> 
Tensor[(1, 1, 224, 224), float32] {
 %0 = nn.conv2d(%dnnl_0_i0, meta[relay.Constant][0] /* ty=Tensor[(1, 3, 3, 
3), float32] */ /* ty=Tensor[(1, 3, 3, 3), float32] */, padding=[1, 1, 1, 1], 
channels=1, kernel_size=[3, 3]) /* ty=Tensor[(1, 1, 224, 224), float32] */;
 %1 = add(%0, meta[relay.Constant][1] /* ty=Tensor[(1, 1, 1), float32] */ 
/* ty=Tensor[(1, 1, 1), float32] */) /* ty=Tensor[(1, 1, 224, 224), float32] */;
 nn.relu(%1) /* ty=Tensor[(1, 1, 224, 224), float32] */
   }
   
   def @main(%data: Tensor[(1, 3, 224, 224), float32]) -> Tensor[(1, 1, 224, 
224), float32] {
 @dnnl_0(%data) /* ty=Tensor[(1, 1, 224, 224), float32] */
   }
   ```
   
   Otherwise I have to support function call in DNNL codegen. @zhiics @mbaret 
   
   UPDATE: hmm if I inline the composite function, I lose the composite 
attribute and hence cannot detect fused call. Is supporting function call in 
the DNNL codegen a better option?


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] [incubator-tvm] masahi edited a comment on issue #5261: [RELAY][BYOC] Add support for composite functions in BYOC

2020-04-07 Thread GitBox
masahi edited a comment on issue #5261: [RELAY][BYOC] Add support for composite 
functions in BYOC
URL: https://github.com/apache/incubator-tvm/pull/5261#issuecomment-610659753
 
 
   Using MergeComposite, AnnotateTarget and PartitionGraph, I get the following 
graph for conv + bias + relu pattern:
   
   ```
   def @dnnl_0(%dnnl_0_i0: Tensor[(1, 3, 224, 224), float32], Inline=1, 
Compiler="dnnl", global_symbol=runtime.String(0x55a8d9cddbd0), Primitive=1) -> 
Tensor[(1, 1, 224, 224), float32] {
 %2 = fn (%data: Tensor[(1, 3, 224, 224), float32], %weight: Tensor[(1, 3, 
3, 3), float32], %bias: Tensor[(1, 1, 1), float32], 
Composite="dnnl.conv_bias_relu") -> Tensor[(1, 1, 224, 224), float32] {
   %0 = nn.conv2d(%data, %weight, padding=[1, 1, 1, 1], channels=1, 
kernel_size=[3, 3]) /* ty=Tensor[(1, 1, 224, 224), float32] */;
   %1 = add(%0, %bias) /* ty=Tensor[(1, 1, 224, 224), float32] */;
   nn.relu(%1) /* ty=Tensor[(1, 1, 224, 224), float32] */
 };
 %2(%dnnl_0_i0, meta[relay.Constant][0] /* ty=Tensor[(1, 3, 3, 3), float32] 
*/ /* ty=Tensor[(1, 3, 3, 3), float32] */, meta[relay.Constant][1] /* 
ty=Tensor[(1, 1, 1), float32] */ /* ty=Tensor[(1, 1, 1), float32] */) /* 
ty=Tensor[(1, 1, 224, 224), float32] */
   }
   
   def @main(%data1: Tensor[(1, 3, 224, 224), float32]) -> Tensor[(1, 1, 224, 
224), float32] {
 @dnnl_0(%data1) /* ty=Tensor[(1, 1, 224, 224), float32] */
   }
   ```
   
   Is it possible to inline composite function `%2` there into `dnnl_0`? What I 
want is this:
   ```
   def @dnnl_0(%dnnl_0_i0: Tensor[(1, 3, 224, 224), float32], Inline=1, 
Compiler="dnnl", global_symbol=runtime.String(0x5599b307c370), Primitive=1) -> 
Tensor[(1, 1, 224, 224), float32] {
 %0 = nn.conv2d(%dnnl_0_i0, meta[relay.Constant][0] /* ty=Tensor[(1, 3, 3, 
3), float32] */ /* ty=Tensor[(1, 3, 3, 3), float32] */, padding=[1, 1, 1, 1], 
channels=1, kernel_size=[3, 3]) /* ty=Tensor[(1, 1, 224, 224), float32] */;
 %1 = add(%0, meta[relay.Constant][1] /* ty=Tensor[(1, 1, 1), float32] */ 
/* ty=Tensor[(1, 1, 1), float32] */) /* ty=Tensor[(1, 1, 224, 224), float32] */;
 nn.relu(%1) /* ty=Tensor[(1, 1, 224, 224), float32] */
   }
   
   def @main(%data: Tensor[(1, 3, 224, 224), float32]) -> Tensor[(1, 1, 224, 
224), float32] {
 @dnnl_0(%data) /* ty=Tensor[(1, 1, 224, 224), float32] */
   }
   ```
   
   Otherwise I have to support function call in DNNL codegen. @zhiics @mbaret 
   
   UPDATE: hmm if I inline the composite function, I lose the composite 
attribute and hence cannot detect fused call. Is supporting functional call in 
DNNL a better option?


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] [incubator-tvm] masahi commented on issue #5261: [RELAY][BYOC] Add support for composite functions in BYOC

2020-04-07 Thread GitBox
masahi commented on issue #5261: [RELAY][BYOC] Add support for composite 
functions in BYOC
URL: https://github.com/apache/incubator-tvm/pull/5261#issuecomment-610659753
 
 
   Using MergeComposite, AnnotateTarget and PartitionGraph, I get the following 
graph for conv + bias + relu pattern:
   
   ```
   def @dnnl_0(%dnnl_0_i0: Tensor[(1, 3, 224, 224), float32], Inline=1, 
Compiler="dnnl", global_symbol=runtime.String(0x55a8d9cddbd0), Primitive=1) -> 
Tensor[(1, 1, 224, 224), float32] {
 %2 = fn (%data: Tensor[(1, 3, 224, 224), float32], %weight: Tensor[(1, 3, 
3, 3), float32], %bias: Tensor[(1, 1, 1), float32], 
Composite="dnnl.conv_bias_relu") -> Tensor[(1, 1, 224, 224), float32] {
   %0 = nn.conv2d(%data, %weight, padding=[1, 1, 1, 1], channels=1, 
kernel_size=[3, 3]) /* ty=Tensor[(1, 1, 224, 224), float32] */;
   %1 = add(%0, %bias) /* ty=Tensor[(1, 1, 224, 224), float32] */;
   nn.relu(%1) /* ty=Tensor[(1, 1, 224, 224), float32] */
 };
 %2(%dnnl_0_i0, meta[relay.Constant][0] /* ty=Tensor[(1, 3, 3, 3), float32] 
*/ /* ty=Tensor[(1, 3, 3, 3), float32] */, meta[relay.Constant][1] /* 
ty=Tensor[(1, 1, 1), float32] */ /* ty=Tensor[(1, 1, 1), float32] */) /* 
ty=Tensor[(1, 1, 224, 224), float32] */
   }
   
   def @main(%data1: Tensor[(1, 3, 224, 224), float32]) -> Tensor[(1, 1, 224, 
224), float32] {
 @dnnl_0(%data1) /* ty=Tensor[(1, 1, 224, 224), float32] */
   }
   ```
   
   Is it possible to inline composite function `%2` there into `dnnl_0`? What I 
want is this:
   ```
   def @dnnl_0(%dnnl_0_i0: Tensor[(1, 3, 224, 224), float32], Inline=1, 
Compiler="dnnl", global_symbol=runtime.String(0x5599b307c370), Primitive=1) -> 
Tensor[(1, 1, 224, 224), float32] {
 %0 = nn.conv2d(%dnnl_0_i0, meta[relay.Constant][0] /* ty=Tensor[(1, 3, 3, 
3), float32] */ /* ty=Tensor[(1, 3, 3, 3), float32] */, padding=[1, 1, 1, 1], 
channels=1, kernel_size=[3, 3]) /* ty=Tensor[(1, 1, 224, 224), float32] */;
 %1 = add(%0, meta[relay.Constant][1] /* ty=Tensor[(1, 1, 1), float32] */ 
/* ty=Tensor[(1, 1, 1), float32] */) /* ty=Tensor[(1, 1, 224, 224), float32] */;
 nn.relu(%1) /* ty=Tensor[(1, 1, 224, 224), float32] */
   }
   
   def @main(%data: Tensor[(1, 3, 224, 224), float32]) -> Tensor[(1, 1, 224, 
224), float32] {
 @dnnl_0(%data) /* ty=Tensor[(1, 1, 224, 224), float32] */
   }
   ```
   
   Otherwise I have to support function call in DNNL codegen. @zhiics @mbaret 


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[incubator-tvm] branch master updated: [LLVM] Use llvm::ElementCount with LLVM 11+ when creating vectors (#5265)

2020-04-07 Thread tqchen
This is an automated email from the ASF dual-hosted git repository.

tqchen pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-tvm.git


The following commit(s) were added to refs/heads/master by this push:
 new df8a6f3  [LLVM] Use llvm::ElementCount with LLVM 11+ when creating 
vectors (#5265)
df8a6f3 is described below

commit df8a6f3b85b2ae24560d6cac7587ce6a6f08033c
Author: Krzysztof Parzyszek 
AuthorDate: Tue Apr 7 17:49:07 2020 -0500

[LLVM] Use llvm::ElementCount with LLVM 11+ when creating vectors (#5265)

LLVM 11 added support for scalable vectors, and now the number of
elements in a vector is represented by a llvm::ElementCount class,
not just a number.
---
 src/target/llvm/codegen_llvm.cc | 5 +
 1 file changed, 5 insertions(+)

diff --git a/src/target/llvm/codegen_llvm.cc b/src/target/llvm/codegen_llvm.cc
index bd2cd9f..28f4efd 100644
--- a/src/target/llvm/codegen_llvm.cc
+++ b/src/target/llvm/codegen_llvm.cc
@@ -463,7 +463,12 @@ llvm::Value* CodeGenLLVM::CreateBroadcast(llvm::Value* 
value, int lanes) {
   llvm::VectorType::get(value->getType(), lanes));
   llvm::Constant* zero = ConstInt32(0);
   value = builder_->CreateInsertElement(undef, value, zero);
+#if TVM_LLVM_VERSION >= 110
+  llvm::Constant* mask =
+  llvm::ConstantVector::getSplat(llvm::ElementCount(lanes, 
/*Scalable=*/false), zero);
+#else
   llvm::Constant* mask = llvm::ConstantVector::getSplat(lanes, zero);
+#endif
   return builder_->CreateShuffleVector(value, undef, mask);
 }
 



[GitHub] [incubator-tvm] tqchen merged pull request #5265: [LLVM] Use llvm::ElementCount with LLVM 11+ when creating vectors

2020-04-07 Thread GitBox
tqchen merged pull request #5265: [LLVM] Use llvm::ElementCount with LLVM 11+ 
when creating vectors
URL: https://github.com/apache/incubator-tvm/pull/5265
 
 
   


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[incubator-tvm] branch master updated (e11a609 -> 36ce2e2)

2020-04-07 Thread tqchen
This is an automated email from the ASF dual-hosted git repository.

tqchen pushed a change to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-tvm.git.


from e11a609  [uTVM][Runtime] Introduce Virtual Memory Allocator to CRT 
(#5124)
 add 36ce2e2  [LLVM] Use llvm::Align with LLVM 11+ to avoid warnings (#5264)

No new revisions were added by this update.

Summary of changes:
 src/target/llvm/codegen_cpu.cc  | 32 
 src/target/llvm/codegen_llvm.cc | 31 +++
 2 files changed, 59 insertions(+), 4 deletions(-)



[GitHub] [incubator-tvm] tqchen merged pull request #5264: [LLVM] Use llvm::Align with LLVM 11+ to avoid warnings

2020-04-07 Thread GitBox
tqchen merged pull request #5264: [LLVM] Use llvm::Align with LLVM 11+ to avoid 
warnings
URL: https://github.com/apache/incubator-tvm/pull/5264
 
 
   


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] [incubator-tvm] tmoreau89 commented on issue #5124: [uTVM][Runtime] Introduce Virtual Memory Allocator to CRT

2020-04-07 Thread GitBox
tmoreau89 commented on issue #5124: [uTVM][Runtime] Introduce Virtual Memory 
Allocator to CRT
URL: https://github.com/apache/incubator-tvm/pull/5124#issuecomment-610632004
 
 
   Thanks @liangfu @tqchen the PR has been merged. For the CRT CI tests, I 
suggest these get added in a follow up PR.


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[incubator-tvm] branch master updated: [uTVM][Runtime] Introduce Virtual Memory Allocator to CRT (#5124)

2020-04-07 Thread moreau
This is an automated email from the ASF dual-hosted git repository.

moreau pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-tvm.git


The following commit(s) were added to refs/heads/master by this push:
 new e11a609  [uTVM][Runtime] Introduce Virtual Memory Allocator to CRT 
(#5124)
e11a609 is described below

commit e11a6092e629cadd34af1f48be47817ca9c65fd4
Author: Liangfu Chen 
AuthorDate: Wed Apr 8 05:33:05 2020 +0800

[uTVM][Runtime] Introduce Virtual Memory Allocator to CRT (#5124)

* initial crt_memory and memory leak fix in graph_runtime

Change-Id: I0f79f909a04d1c677aabb80f202f0612c5ce7f2a

* fix memory leak

Change-Id: I37104c09e28112b1974fa2b064c809d0a8d686c3

* clean up

Change-Id: I039b12015a1d56c8f4120867cd5a5292da34f3e3

* implement vrealloc

Change-Id: I35800470bcbfcf96652494f359711cb4c2d34398

* allocate from stack memory for most of the variables

Change-Id: I72071289843fff4031c0df8796868a0b9fbc57ee

* allocate from stack memory for all of the variables

Change-Id: I32dba85ac1660c77f51c2d0d8ab6436ed0c01c74

* lint

Change-Id: If12cd240685d7791fc60bc0cfb66389cdc186b73

* lint

Change-Id: I7c9d90c11b60b8edda2427ebd189ebe535af2100

* facilitate the growth of TVM_CRT_MAX_NDIM

Change-Id: I939fa43027a5c7529c5c7c6bd8d6e6beb91b7581

* extend test coverage of vmalloc

Change-Id: Ie4ff6b64fdfe6810836cf8fd44dace82a20c4581

* lint

Change-Id: Ibf3c06619ef296df5c49f3945cb642881d69

* move logging.h to src

* fix an error in macOS

* remove logging.h

* use cflags for gcc

* fix compilation error
---
 apps/bundle_deploy/Makefile|  16 +-
 apps/bundle_deploy/demo.cc |  10 +-
 apps/bundle_deploy/runtime.c   |  40 ++-
 apps/bundle_deploy/test.cc |  10 +-
 .../module.h => include/tvm/runtime/crt/memory.h   |  46 +--
 src/runtime/crt/crt_backend_api.c  |  12 +-
 src/runtime/crt/graph_runtime.c| 202 +++
 src/runtime/crt/graph_runtime.h|  43 +--
 src/runtime/crt/load_json.c|  24 +-
 src/runtime/crt/logging.h  |  73 
 src/runtime/crt/memory.c   | 393 +
 src/runtime/crt/module.h   |   5 +-
 src/runtime/crt/ndarray.c  |  10 +-
 src/runtime/crt/packed_func.h  |  10 +-
 .../crt/module.h => tests/cpp/crt_memory_test.cc   |  55 +--
 15 files changed, 757 insertions(+), 192 deletions(-)

diff --git a/apps/bundle_deploy/Makefile b/apps/bundle_deploy/Makefile
index c80765f..73f9d75 100644
--- a/apps/bundle_deploy/Makefile
+++ b/apps/bundle_deploy/Makefile
@@ -20,11 +20,11 @@
 # Setup build environment
 TVM_ROOT=$(shell cd ../..; pwd)
 DMLC_CORE=${TVM_ROOT}/3rdparty/dmlc-core
-PKG_CXXFLAGS = -std=c++14 -O2 -fPIC \
+PKG_CXXFLAGS = -Wall -std=c++14 -O2 -fPIC \
-I${TVM_ROOT}/include \
-I${DMLC_CORE}/include \
-I${TVM_ROOT}/3rdparty/dlpack/include
-PKG_CFLAGS = -std=c99 -O2 -fPIC \
+PKG_CFLAGS = -Wall -std=c99 -O2 -fPIC \
-I${TVM_ROOT}/include \
-I${DMLC_CORE}/include \
-I${TVM_ROOT}/3rdparty/dlpack/include
@@ -57,11 +57,11 @@ $(build_dir)/test_dynamic: test.cc 
${build_dir}/test_graph.json ${build_dir}/tes
 
 $(build_dir)/demo_static: demo_static.c ${build_dir}/bundle_static.o 
${build_dir}/model.o ${build_dir}/graph.json.c ${build_dir}/params.bin.c
@mkdir -p $(@D)
-   gcc $(PKG_CXXFLAGS) -o $@ demo_static.c ${build_dir}/bundle_static.o 
${build_dir}/model.o -lm
+   gcc $(PKG_CFLAGS) -o $@ demo_static.c ${build_dir}/bundle_static.o 
${build_dir}/model.o -lm
 
 $(build_dir)/test_static: test_static.c ${build_dir}/bundle_static.o 
${build_dir}/test_model.o
@mkdir -p $(@D)
-   gcc $(PKG_CXXFLAGS) -o $@ $^
+   gcc $(PKG_CFLAGS) -o $@ $^
 
 # Serialize our graph.json file.
 $(build_dir)/graph.json.c: $(build_dir)/graph.json
@@ -71,14 +71,6 @@ $(build_dir)/graph.json.c: $(build_dir)/graph.json
 $(build_dir)/params.bin.c: $(build_dir)/params.bin
xxd -i $^  > $@
 
-# # Serialize our test_graph.json file.
-# $(build_dir)/test_graph.json.c: $(build_dir)/test_graph.json
-#  xxd -i $^  > $@
-# 
-# # Serialize our test_params.bin file.
-# $(build_dir)/test_params.bin.c: $(build_dir)/test_params.bin
-#  xxd -i $^  > $@
-
 $(build_dir)/model.o $(build_dir)/graph.json $(build_dir)/params.bin 
$(build_dir)/cat.bin: build_model.py
python3 $< -o $(build_dir)
 
diff --git a/apps/bundle_deploy/demo.cc b/apps/bundle_deploy/demo.cc
index 34be279..0de10d7 100644
--- a/apps/bundle_deploy/demo.cc
+++ b/apps/bundle_deploy/demo.cc
@@ -109,11 +109,11 @@ int main(int 

[GitHub] [incubator-tvm] tmoreau89 merged pull request #5124: [uTVM][Runtime] Introduce Virtual Memory Allocator to CRT

2020-04-07 Thread GitBox
tmoreau89 merged pull request #5124: [uTVM][Runtime] Introduce Virtual Memory 
Allocator to CRT
URL: https://github.com/apache/incubator-tvm/pull/5124
 
 
   


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] [incubator-tvm] icemelon9 opened a new pull request #5270: [CI] Change MxNet from MKL verion to regular CPU version

2020-04-07 Thread GitBox
icemelon9 opened a new pull request #5270: [CI] Change MxNet from MKL verion to 
regular CPU version
URL: https://github.com/apache/incubator-tvm/pull/5270
 
 
   Because mxnet-mkl doesn't support AMD cpu. see #5240
   
   cc @tqchen @anijain2305 @shoubhik 


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] [incubator-tvm] icemelon9 opened a new pull request #5269: [LINT] Remove scalalint from lint deps

2020-04-07 Thread GitBox
icemelon9 opened a new pull request #5269: [LINT] Remove scalalint from lint 
deps
URL: https://github.com/apache/incubator-tvm/pull/5269
 
 
   Thanks for contributing to TVM!   Please refer to guideline 
https://tvm.apache.org/docs/contribute/ for useful information and tips. After 
the pull request is submitted, please request code reviews from 
[Reviewers](https://github.com/apache/incubator-tvm/blob/master/CONTRIBUTORS.md#reviewers)
 by @ them in the pull request thread.
   
   cc @tqchen 


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] [incubator-tvm] kparzysz-quic opened a new pull request #5268: [LLVM] Include Support/Host.h for declaration of getDefaultTargetTriple

2020-04-07 Thread GitBox
kparzysz-quic opened a new pull request #5268: [LLVM] Include Support/Host.h 
for declaration of getDefaultTargetTriple
URL: https://github.com/apache/incubator-tvm/pull/5268
 
 
   In newer versions of LLVM, this header is no longer included by one of the 
already included headers in `llvm_common.h`, so include it explicitly.


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] [incubator-tvm] kparzysz-quic opened a new pull request #5267: [LLVM] Do not use x86_vcvtph2ps_256 intrinsic with LLVM 11+

2020-04-07 Thread GitBox
kparzysz-quic opened a new pull request #5267: [LLVM] Do not use 
x86_vcvtph2ps_256 intrinsic with LLVM 11+
URL: https://github.com/apache/incubator-tvm/pull/5267
 
 
   This intrinsic was removed in LLVM 11.
   


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] [incubator-tvm] tqchen commented on issue #5264: [LLVM] Use llvm::Align with LLVM 11+ to avoid warnings

2020-04-07 Thread GitBox
tqchen commented on issue #5264: [LLVM] Use llvm::Align with LLVM 11+ to avoid 
warnings
URL: https://github.com/apache/incubator-tvm/pull/5264#issuecomment-610607243
 
 
   Thanks @kparzysz-quic 


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] [incubator-tvm] tqchen commented on a change in pull request #5251: [RUNTIME] Auto conversion from str to runtime::String in PackedFUnc

2020-04-07 Thread GitBox
tqchen commented on a change in pull request #5251: [RUNTIME] Auto conversion 
from str to runtime::String in PackedFUnc
URL: https://github.com/apache/incubator-tvm/pull/5251#discussion_r405095536
 
 

 ##
 File path: include/tvm/runtime/packed_func.h
 ##
 @@ -554,6 +512,10 @@ class TVMArgValue : public TVMPODValue_ {
   return std::string(value_.v_str);
 }
   }
+  operator tvm::runtime::String() const {
+// directly use the std::string constructor for now.
+return tvm::runtime::String(operator std::string());
 
 Review comment:
   https://github.com/apache/incubator-tvm/pull/5266


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] [incubator-tvm] tqchen opened a new pull request #5266: [RUNTIME] Quick fix PackedFunc String passing

2020-04-07 Thread GitBox
tqchen opened a new pull request #5266: [RUNTIME] Quick fix PackedFunc String 
passing
URL: https://github.com/apache/incubator-tvm/pull/5266
 
 
   cc @zhiics 


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] [incubator-tvm] zhiics commented on a change in pull request #5251: [RUNTIME] Auto conversion from str to runtime::String in PackedFUnc

2020-04-07 Thread GitBox
zhiics commented on a change in pull request #5251: [RUNTIME] Auto conversion 
from str to runtime::String in PackedFUnc
URL: https://github.com/apache/incubator-tvm/pull/5251#discussion_r405092183
 
 

 ##
 File path: include/tvm/runtime/packed_func.h
 ##
 @@ -554,6 +512,10 @@ class TVMArgValue : public TVMPODValue_ {
   return std::string(value_.v_str);
 }
   }
+  operator tvm::runtime::String() const {
+// directly use the std::string constructor for now.
+return tvm::runtime::String(operator std::string());
 
 Review comment:
   so let's just `return AsObjectRef()` for now?


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] [incubator-tvm] kparzysz-quic opened a new pull request #5265: [LLVM] Use llvm::ElementCount with LLVM 11+ when creating vectors

2020-04-07 Thread GitBox
kparzysz-quic opened a new pull request #5265: [LLVM] Use llvm::ElementCount 
with LLVM 11+ when creating vectors
URL: https://github.com/apache/incubator-tvm/pull/5265
 
 
   LLVM 11 added support for scalable vectors, and now the number of elements 
in a vector is represented by a `llvm::ElementCount` class, not just a number.
   


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] [incubator-tvm] kparzysz-quic opened a new pull request #5264: [LLVM] Use llvm::Align with LLVM 11+ to avoid warnings

2020-04-07 Thread GitBox
kparzysz-quic opened a new pull request #5264: [LLVM] Use llvm::Align with LLVM 
11+ to avoid warnings
URL: https://github.com/apache/incubator-tvm/pull/5264
 
 
   LLVM 11 is introducing a separate class to represent alignment. The 
functions in IRBuilder that create aligned loads and stores, and which accept 
the alignment as an unsigned value have been deprecated (and now cause warnings 
to be emitted).
   


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] [incubator-tvm] tqchen commented on a change in pull request #5251: [RUNTIME] Auto conversion from str to runtime::String in PackedFUnc

2020-04-07 Thread GitBox
tqchen commented on a change in pull request #5251: [RUNTIME] Auto conversion 
from str to runtime::String in PackedFUnc
URL: https://github.com/apache/incubator-tvm/pull/5251#discussion_r405065312
 
 

 ##
 File path: include/tvm/runtime/packed_func.h
 ##
 @@ -554,6 +512,10 @@ class TVMArgValue : public TVMPODValue_ {
   return std::string(value_.v_str);
 }
   }
+  operator tvm::runtime::String() const {
+// directly use the std::string constructor for now.
+return tvm::runtime::String(operator std::string());
 
 Review comment:
   Ah, i see, good catch, we will need to add a patch, by checking if the 
result is kStr and run this, alternatively, use AsObjectRef


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] [incubator-tvm] tqchen commented on a change in pull request #5251: [RUNTIME] Auto conversion from str to runtime::String in PackedFUnc

2020-04-07 Thread GitBox
tqchen commented on a change in pull request #5251: [RUNTIME] Auto conversion 
from str to runtime::String in PackedFUnc
URL: https://github.com/apache/incubator-tvm/pull/5251#discussion_r405065312
 
 

 ##
 File path: include/tvm/runtime/packed_func.h
 ##
 @@ -554,6 +512,10 @@ class TVMArgValue : public TVMPODValue_ {
   return std::string(value_.v_str);
 }
   }
+  operator tvm::runtime::String() const {
+// directly use the std::string constructor for now.
+return tvm::runtime::String(operator std::string());
 
 Review comment:
   Ah, i see, we will need to add a patch, by checking if the result is kStr 
and run this, alternatively, use AsObjectRef


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] [incubator-tvm] zhiics commented on issue #5250: [REFACTOR] StringImm -> String

2020-04-07 Thread GitBox
zhiics commented on issue #5250: [REFACTOR] StringImm -> String
URL: https://github.com/apache/incubator-tvm/issues/5250#issuecomment-610567541
 
 
   @tqchen I will spend some time on this. I just had simple test based on 
#5251. It looks I had a problem. Could you please take a look?


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] [incubator-tvm] zhiics edited a comment on issue #5250: [REFACTOR] StringImm -> String

2020-04-07 Thread GitBox
zhiics edited a comment on issue #5250: [REFACTOR] StringImm -> String
URL: https://github.com/apache/incubator-tvm/issues/5250#issuecomment-610567541
 
 
   @tqchen I will spend some time on this. I just had simple test based on 
#5251. It looks I had a problem. Could you please take a look at the comment 
there?


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] [incubator-tvm] zhiics commented on a change in pull request #5251: [RUNTIME] Auto conversion from str to runtime::String in PackedFUnc

2020-04-07 Thread GitBox
zhiics commented on a change in pull request #5251: [RUNTIME] Auto conversion 
from str to runtime::String in PackedFUnc
URL: https://github.com/apache/incubator-tvm/pull/5251#discussion_r405046250
 
 

 ##
 File path: include/tvm/runtime/packed_func.h
 ##
 @@ -554,6 +512,10 @@ class TVMArgValue : public TVMPODValue_ {
   return std::string(value_.v_str);
 }
   }
+  operator tvm::runtime::String() const {
+// directly use the std::string constructor for now.
+return tvm::runtime::String(operator std::string());
 
 Review comment:
   @tqchen It happened to me that Line511 above failed for the check because 
the type_code_ for String is an object. Should we remove this and pass String 
objectref directly? Or do we need to handle String through FFI?


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] [incubator-tvm] anijain2305 commented on issue #5241: [Relay][OP] Add fast_erf implementation

2020-04-07 Thread GitBox
anijain2305 commented on issue #5241: [Relay][OP] Add fast_erf implementation
URL: https://github.com/apache/incubator-tvm/pull/5241#issuecomment-610566829
 
 
   Thanks @icemelon9 @tqchen This is merged


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[incubator-tvm] branch master updated: [Relay][OP] Add fast_erf implementation (#5241)

2020-04-07 Thread anijain2305
This is an automated email from the ASF dual-hosted git repository.

anijain2305 pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-tvm.git


The following commit(s) were added to refs/heads/master by this push:
 new f5b02fd  [Relay][OP] Add fast_erf implementation (#5241)
f5b02fd is described below

commit f5b02fdb1b5a7b6be79df97035ec1c3b80e3c665
Author: Haichen Shen 
AuthorDate: Tue Apr 7 12:05:33 2020 -0700

[Relay][OP] Add fast_erf implementation (#5241)

* add fast erf

* doc

* lint

* fix

* fix indent
---
 include/tvm/target/generic_func.h   |  2 +-
 python/tvm/relay/op/_tensor.py  |  2 +
 src/relay/op/tensor/unary.cc| 11 +
 src/relay/transforms/fast_math.cc   |  4 ++
 src/relay/transforms/pattern_util.h |  5 +++
 tests/python/relay/test_op_fast_math.py |  3 ++
 topi/include/topi/elemwise.h| 73 -
 topi/python/topi/math.py| 16 
 topi/src/elemwise.cc|  5 +++
 topi/tests/python/test_topi_math.py |  9 ++--
 10 files changed, 124 insertions(+), 6 deletions(-)

diff --git a/include/tvm/target/generic_func.h 
b/include/tvm/target/generic_func.h
index 89a7f57..f2a361b3 100644
--- a/include/tvm/target/generic_func.h
+++ b/include/tvm/target/generic_func.h
@@ -72,7 +72,7 @@ class GenericFunc : public ObjectRef {
*
* \code
*   // Example code on how to call generic function
-   *   void CallGeneirc(GenericFunc f) {
+   *   void CallGeneric(GenericFunc f) {
* // call like normal functions by pass in arguments
* // return value is automatically converted back
* int rvalue = f(1, 2.0);
diff --git a/python/tvm/relay/op/_tensor.py b/python/tvm/relay/op/_tensor.py
index f24da05..a607a47 100644
--- a/python/tvm/relay/op/_tensor.py
+++ b/python/tvm/relay/op/_tensor.py
@@ -76,6 +76,7 @@ register_injective_schedule("shape_of")
 register_injective_schedule("ndarray_size")
 register_broadcast_schedule("fast_exp")
 register_broadcast_schedule("fast_tanh")
+register_broadcast_schedule("fast_erf")
 
 
 # zeros
@@ -222,3 +223,4 @@ register_shape_func("exp", False, elemwise_shape_func)
 register_shape_func("tan", False, elemwise_shape_func)
 register_shape_func("fast_exp", False, elemwise_shape_func)
 register_shape_func("fast_tanh", False, elemwise_shape_func)
+register_shape_func("fast_erf", False, elemwise_shape_func)
diff --git a/src/relay/op/tensor/unary.cc b/src/relay/op/tensor/unary.cc
index 3da77e9..4cca8b0 100644
--- a/src/relay/op/tensor/unary.cc
+++ b/src/relay/op/tensor/unary.cc
@@ -128,6 +128,17 @@ RELAY_REGISTER_UNARY_OP("erf")
 .set_attr("FTVMCompute", RELAY_UNARY_COMPUTE(topi::erf));
 
 
+RELAY_REGISTER_UNARY_OP("fast_erf")
+.describe(R"code(Returns the error function value for input array, computed 
element-wise.
+
+.. math::
+   \fast_erf(x)
+
+)code" TVM_ADD_FILELINE)
+.set_support_level(1)
+.set_attr("FTVMCompute", RELAY_UNARY_COMPUTE(topi::fast_erf));
+
+
 RELAY_REGISTER_UNARY_OP("sqrt")
 .describe(R"code(Returns the sqrt input array, computed element-wise.
 
diff --git a/src/relay/transforms/fast_math.cc 
b/src/relay/transforms/fast_math.cc
index 861566f..cf00a89 100644
--- a/src/relay/transforms/fast_math.cc
+++ b/src/relay/transforms/fast_math.cc
@@ -35,11 +35,14 @@ class FastMathMutator : public ExprRewriter {
  public:
   FastMathMutator()
   : exp_op_(Op::Get("exp")),
+erf_op_(Op::Get("erf")),
 tanh_op_(Op::Get("tanh")) {}
 
   Expr Rewrite_(const CallNode* pre, const Expr& post) override {
 if (pre->op == exp_op_) {
   return FastExp(post.as()->args[0]);
+} else if (pre->op == erf_op_) {
+  return FastErf(post.as()->args[0]);
 } else if (pre->op == tanh_op_) {
   return FastTanh(post.as()->args[0]);
 }
@@ -51,6 +54,7 @@ class FastMathMutator : public ExprRewriter {
   // operator equivalence checking so that the registry lookup overhead can be
   // reduced.
   const Op& exp_op_;
+  const Op& erf_op_;
   const Op& tanh_op_;
 };
 
diff --git a/src/relay/transforms/pattern_util.h 
b/src/relay/transforms/pattern_util.h
index 350d9e1..cd2af9f 100644
--- a/src/relay/transforms/pattern_util.h
+++ b/src/relay/transforms/pattern_util.h
@@ -322,6 +322,11 @@ inline Expr FastExp(Expr e) {
   return Call(op, {e});
 }
 
+inline Expr FastErf(Expr e) {
+  static const Op& op = Op::Get("fast_erf");
+  return Call(op, {e});
+}
+
 inline Expr FastTanh(Expr e) {
   static const Op& op = Op::Get("fast_tanh");
   return Call(op, {e});
diff --git a/tests/python/relay/test_op_fast_math.py 
b/tests/python/relay/test_op_fast_math.py
index 1d661c3..215b83e 100644
--- a/tests/python/relay/test_op_fast_math.py
+++ b/tests/python/relay/test_op_fast_math.py
@@ -15,6 +15,8 @@
 # specific language governing permissions and limitations
 # under the License.
 import numpy as np
+import scipy
+from scipy import special
 import tvm
 

[GitHub] [incubator-tvm] anijain2305 merged pull request #5241: [Relay][OP] Add fast_erf implementation

2020-04-07 Thread GitBox
anijain2305 merged pull request #5241: [Relay][OP] Add fast_erf implementation
URL: https://github.com/apache/incubator-tvm/pull/5241
 
 
   


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] [incubator-tvm] icemelon9 commented on issue #5241: [Relay][OP] Add fast_erf implementation

2020-04-07 Thread GitBox
icemelon9 commented on issue #5241: [Relay][OP] Add fast_erf implementation
URL: https://github.com/apache/incubator-tvm/pull/5241#issuecomment-610549195
 
 
   @anijain2305 ci is now green. Could you take a look again?


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] [incubator-tvm] masahi commented on issue #5262: [RELAY][BYOC] Register pattern tables from external codegens

2020-04-07 Thread GitBox
masahi commented on issue #5262: [RELAY][BYOC] Register pattern tables from 
external codegens
URL: https://github.com/apache/incubator-tvm/pull/5262#issuecomment-610518146
 
 
   I'll revive my old PR https://github.com/apache/incubator-tvm/pull/4741 
which should become the first use of composite in the code base.


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[incubator-tvm] branch master updated (7902f76 -> 869b718)

2020-04-07 Thread kevinthesun
This is an automated email from the ASF dual-hosted git repository.

kevinthesun pushed a change to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-tvm.git.


from 7902f76  Fixed typo and type mismatch (#5259)
 add 869b718  [TIR] Fix perf regression of tir refactor (#5258)

No new revisions were added by this update.

Summary of changes:
 python/tvm/driver/build_module.py   | 2 +-
 python/tvm/testing.py   | 2 +-
 src/driver/driver_api.cc| 2 +-
 tests/python/unittest/test_target_codegen_static_init.py| 2 +-
 tests/python/unittest/test_target_codegen_vm_basic.py   | 2 +-
 tests/python/unittest/test_tir_transform_make_packed_api.py | 2 +-
 6 files changed, 6 insertions(+), 6 deletions(-)



[GitHub] [incubator-tvm] kevinthesun merged pull request #5258: [TIR] Fix perf regression of tir refactor

2020-04-07 Thread GitBox
kevinthesun merged pull request #5258: [TIR] Fix perf regression of tir refactor
URL: https://github.com/apache/incubator-tvm/pull/5258
 
 
   


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] [incubator-tvm] alexbooth commented on issue #5261: [RELAY][BYOC] Add support for composite functions in BYOC

2020-04-07 Thread GitBox
alexbooth commented on issue #5261: [RELAY][BYOC] Add support for composite 
functions in BYOC
URL: https://github.com/apache/incubator-tvm/pull/5261#issuecomment-610509436
 
 
   Partitioning looks good for me with a simple pattern, conv2d_bias_relu. I'm 
seeing the check fail in IsOp for composite functions however. Maybe for 
another PR.
   
   ```c++
 bool IsOp(const CallNode* call, const std::string& op_name) const {
   const auto* op_node = call->op.as();
   CHECK(op_node) << "Expects a single op.";
   Op op = GetRef(op_node);
   return op == Op::Get(op_name);
 }
   ```
   
https://github.com/mbaret/tvm/blob/a197bca20a8effc48d06388190aa1b5f3b525ef6/src/relay/backend/contrib/codegen_c/codegen_c.h#L191-L205
   
   ```TVMError: Check failed: op_node: Expects a single op.```


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] [incubator-tvm] siju-samuel opened a new pull request #5263: [PYTORCH]celu, gelu, selu activations

2020-04-07 Thread GitBox
siju-samuel opened a new pull request #5263: [PYTORCH]celu, gelu, selu 
activations
URL: https://github.com/apache/incubator-tvm/pull/5263
 
 
   @masahi please help to review activations of pytorch. Thanks in advance.
   -celu
   -gelu
   -selu
   -some datatype conversion issues also fixed.
   -testcase coverage added
   


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] [incubator-tvm] masahi commented on issue #5261: [RELAY][BYOC] Add support for composite functions in BYOC

2020-04-07 Thread GitBox
masahi commented on issue #5261: [RELAY][BYOC] Add support for composite 
functions in BYOC
URL: https://github.com/apache/incubator-tvm/pull/5261#issuecomment-610495383
 
 
   cc @soiferj 


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] [incubator-tvm] wpan11nv commented on a change in pull request #5226: [CODEGEN][CUDA] Fix vector load

2020-04-07 Thread GitBox
wpan11nv commented on a change in pull request #5226: [CODEGEN][CUDA] Fix 
vector load
URL: https://github.com/apache/incubator-tvm/pull/5226#discussion_r404948586
 
 

 ##
 File path: src/target/source/codegen_cuda.cc
 ##
 @@ -591,13 +591,17 @@ void CodeGenCUDA::VisitExpr_(const RampNode* op, 
std::ostream& os) {
 }
 
 void CodeGenCUDA::VisitExpr_(const BroadcastNode* op, std::ostream& os) {   // 
NOLINT(*)
-  if (op->dtype.is_int() && op->dtype.bits() == 8 && op->lanes == 4) {
+  if ((op->dtype.is_int() || op->dtype.is_uint()) && op->dtype.bits() == 8 && 
op->lanes == 4) {
 // make_int8x4
 const int64_t *p = as_const_int(op->value);
 CHECK(p);
 int64_t v = *p & 0xFF;
 v = (v << 24) | (v << 16) | (v << 8) | v;
-os << "(int)" << v;
+if (op->dtype.is_uint()) {
 
 Review comment:
   why do we care the signedness? this just downcasts to 32 bits,.


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] [incubator-tvm] tqchen closed issue #5260: tvm 0.7 only work in python3.6

2020-04-07 Thread GitBox
tqchen closed issue #5260: tvm 0.7 only work in python3.6
URL: https://github.com/apache/incubator-tvm/issues/5260
 
 
   


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] [incubator-tvm] tqchen commented on issue #5260: tvm 0.7 only work in python3.6

2020-04-07 Thread GitBox
tqchen commented on issue #5260: tvm 0.7 only work in python3.6
URL: https://github.com/apache/incubator-tvm/issues/5260#issuecomment-610468392
 
 
   This is an expected behavior, as we will start to require python3.6 in 0.7 
For otehr questions, please open a new thread on https://discuss.tvm.ai/


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[incubator-tvm] branch master updated: Fixed typo and type mismatch (#5259)

2020-04-07 Thread zhic
This is an automated email from the ASF dual-hosted git repository.

zhic pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-tvm.git


The following commit(s) were added to refs/heads/master by this push:
 new 7902f76  Fixed typo and type mismatch (#5259)
7902f76 is described below

commit 7902f7627fb364abc9c5913ab1838872d0dc1d7d
Author: Adrian Muresan 
AuthorDate: Tue Apr 7 17:21:23 2020 +0200

Fixed typo and type mismatch (#5259)

Co-authored-by: Adrian Muresan 
---
 tutorials/dev/relay_pass_infra.py | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/tutorials/dev/relay_pass_infra.py 
b/tutorials/dev/relay_pass_infra.py
index b54ac13..6b844ff 100644
--- a/tutorials/dev/relay_pass_infra.py
+++ b/tutorials/dev/relay_pass_infra.py
@@ -216,13 +216,13 @@ class CustomPipeline:
 obj = self
 
 class ReplaceConstant(tvm.relay.ExprMutator):
-def visit_const(self, c):
+def visit_constant(self, c):
 return relay.multiply(obj.multiplier, c)
 return ReplaceConstant().visit(func)
 
 f = example()
 mod = tvm.IRModule.from_expr(f)
-custom_pass = CustomPipeline(multiplier=relay.const(3, "float"))
+custom_pass = CustomPipeline(multiplier=relay.const(3, "float32"))
 assert custom_pass.info.name == "CustomPipeline"
 mod3 = custom_pass(mod)
 print(mod3)



[GitHub] [incubator-tvm] zhiics merged pull request #5259: [Relay][Tutorial][Fix] Fixed typo and type mismatch in relay infrastructure tutorial

2020-04-07 Thread GitBox
zhiics merged pull request #5259: [Relay][Tutorial][Fix] Fixed typo and type 
mismatch in relay infrastructure tutorial
URL: https://github.com/apache/incubator-tvm/pull/5259
 
 
   


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] [incubator-tvm] kparzysz-quic commented on a change in pull request #5252: [RUNTIME] Initial implementation of Hexagon runtime support

2020-04-07 Thread GitBox
kparzysz-quic commented on a change in pull request #5252: [RUNTIME] Initial 
implementation of Hexagon runtime support
URL: https://github.com/apache/incubator-tvm/pull/5252#discussion_r404894728
 
 

 ##
 File path: src/runtime/hexagon/target/hexagon_device_target.cc
 ##
 @@ -0,0 +1,525 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+#ifdef __ANDROID__
+
+#include 
+
+#include 
+#include 
+#include 
+#include 
+#include 
+
+#include "../hexagon_module.h"
+#include "AEEStdErr.h"
+#include "fastrpc/tvm_hexagon_remote.h"
+#include "hexagon_dsprpcapi.h"
+#include "hexagon_stubapi.h"
+#include "hexagon_target_log.h"
+#include "remote64.h"
+#include "rpcmem.h"
+
+#pragma weak remote_session_control
+
+#define RPCMEM_HEAP 25
+
+// All log messages start with "HexagonTarget::%s", where %s is replaced
+// with the function name, so create macros that add that to avoid repetition.
+// The downside is that the format string must be given as a string literal,
+// but it seems to be a minor issue.
+#define VA_EXPANDER(...) , ##__VA_ARGS__
+#define TVM_LOGD_HT(fmt, ...) \
+  TVM_LOGD("HexagonTarget::%s: " fmt, __func__ VA_EXPANDER(__VA_ARGS__))
+#define TVM_LOGE_HT(fmt, ...) \
+  TVM_LOGE("HexagonTarget::%s: " fmt, __func__ VA_EXPANDER(__VA_ARGS__))
+
+namespace tvm {
+namespace runtime {
+namespace hexagon {
+
+static constexpr int kStackSize = 128 * 1024;  // 128kB stack
+
+class HexagonTarget : public tvm::runtime::hexagon::Device {
+ public:
+  HexagonTarget() {}
+  ~HexagonTarget() final {}
+  void* Alloc(unsigned size, unsigned align) final;
+  void Free(void* ptr) final;
+  void* AllocVtcm(unsigned size, unsigned align) final;
+  void FreeVtcm(void* ptr) final;
+  void CopyDeviceToDevice(void* dst, const void* src, unsigned len) final;
+  void CopyDeviceToHost(void* host_dst, const void* src, unsigned len) final;
+  void CopyHostToDevice(void* dst, const void* host_src, unsigned len) final;
+  void* Load(const std::string& data, const std::string& fmt) final;
+  void Unload(void* mod) final;
+  void* Resolve(const std::string& sym) final;
+  void Call(void* func, uint32_t* scalar, unsigned scalar_num, uint32_t* stack,
+unsigned stack_num) final;
+
+ private:
+  std::pair AddAddrMapping(const void* dsp_addr,
+  void* apps_addr, size_t size);
+  std::pair GetAppsAddr(const void* dsp_addr, bool exact) const;
+  void RemoveAddrMapping(const void* dsp_addr);
+  int OpenDomainChannel(bool set_unsigned_pd);
+  int CloseDomainChannel();
+  void ReleaseLibrary();
+  void FreeMemoryBeforeChannelClose();
+
+  // Mapping from a DSP address to a pair .
+  // Using void* pointers is ok, since DSP pointers will always fit
+  // in apps's pointers, i.e. sizeof_dsp(void*) <= sizeof_apps(void*).
+  std::map> dsp_to_apps_;
+  std::map> vtcm_addr_;
+  remote_handle64 domain_channel_handle_ = AEE_EUNKNOWN;
+  tvm_hexagon_remote_handle_t module_pointer_ = AEE_EUNKNOWN;
+  uint64_t count_channel_open_ = 0;
+  // Global lock, used for all critical sections. This can be refined
+  // in the future.
+  mutable std::mutex crit_section_;
+};
+
+std::shared_ptr CreateHexagonTarget() {
+  return std::make_shared();
+}
+
+std::pair HexagonTarget::AddAddrMapping(const void* dsp_addr,
+   void* apps_addr,
+   size_t size) {
+  crit_section_.lock();
+  auto p = dsp_to_apps_.insert({dsp_addr, {apps_addr, size}});
+  crit_section_.unlock();
+  if (!p.second) {
+TVM_LOGE_HT(
+"failed to insert address mapping: dsp:%p -> apps:%p, size:%zu",
+dsp_addr, apps_addr, size);
+return std::make_pair(nullptr, 0);
+  }
+  TVM_LOGD_HT("added address mapping: dsp:%p -> apps:%p, size:%zu", dsp_addr,
+  apps_addr, size);
+  return p.first->second;
+}
+
+void HexagonTarget::RemoveAddrMapping(const void* dsp_addr) {
+  crit_section_.lock();
+  auto f = dsp_to_apps_.find(dsp_addr);
+  if (f == dsp_to_apps_.end()) {
+TVM_LOGE_HT("failed to remove address mapping for dsp:%p", dsp_addr);
+crit_section_.unlock();
+return;
+  }
+  dsp_to_apps_.erase(f);
+  crit_section_.unlock();
+}
+

[GitHub] [incubator-tvm] FrozenGene commented on a change in pull request #5252: [RUNTIME] Initial implementation of Hexagon runtime support

2020-04-07 Thread GitBox
FrozenGene commented on a change in pull request #5252: [RUNTIME] Initial 
implementation of Hexagon runtime support
URL: https://github.com/apache/incubator-tvm/pull/5252#discussion_r404855401
 
 

 ##
 File path: src/runtime/hexagon/target/hexagon_device_target.cc
 ##
 @@ -0,0 +1,525 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+#ifdef __ANDROID__
+
+#include 
+
+#include 
+#include 
+#include 
+#include 
+#include 
+
+#include "../hexagon_module.h"
+#include "AEEStdErr.h"
+#include "fastrpc/tvm_hexagon_remote.h"
+#include "hexagon_dsprpcapi.h"
+#include "hexagon_stubapi.h"
+#include "hexagon_target_log.h"
+#include "remote64.h"
+#include "rpcmem.h"
+
+#pragma weak remote_session_control
+
+#define RPCMEM_HEAP 25
+
+// All log messages start with "HexagonTarget::%s", where %s is replaced
+// with the function name, so create macros that add that to avoid repetition.
+// The downside is that the format string must be given as a string literal,
+// but it seems to be a minor issue.
+#define VA_EXPANDER(...) , ##__VA_ARGS__
+#define TVM_LOGD_HT(fmt, ...) \
+  TVM_LOGD("HexagonTarget::%s: " fmt, __func__ VA_EXPANDER(__VA_ARGS__))
+#define TVM_LOGE_HT(fmt, ...) \
+  TVM_LOGE("HexagonTarget::%s: " fmt, __func__ VA_EXPANDER(__VA_ARGS__))
+
+namespace tvm {
+namespace runtime {
+namespace hexagon {
+
+static constexpr int kStackSize = 128 * 1024;  // 128kB stack
+
+class HexagonTarget : public tvm::runtime::hexagon::Device {
+ public:
+  HexagonTarget() {}
+  ~HexagonTarget() final {}
+  void* Alloc(unsigned size, unsigned align) final;
+  void Free(void* ptr) final;
+  void* AllocVtcm(unsigned size, unsigned align) final;
+  void FreeVtcm(void* ptr) final;
+  void CopyDeviceToDevice(void* dst, const void* src, unsigned len) final;
+  void CopyDeviceToHost(void* host_dst, const void* src, unsigned len) final;
+  void CopyHostToDevice(void* dst, const void* host_src, unsigned len) final;
+  void* Load(const std::string& data, const std::string& fmt) final;
+  void Unload(void* mod) final;
+  void* Resolve(const std::string& sym) final;
+  void Call(void* func, uint32_t* scalar, unsigned scalar_num, uint32_t* stack,
+unsigned stack_num) final;
+
+ private:
+  std::pair AddAddrMapping(const void* dsp_addr,
+  void* apps_addr, size_t size);
+  std::pair GetAppsAddr(const void* dsp_addr, bool exact) const;
+  void RemoveAddrMapping(const void* dsp_addr);
+  int OpenDomainChannel(bool set_unsigned_pd);
+  int CloseDomainChannel();
+  void ReleaseLibrary();
+  void FreeMemoryBeforeChannelClose();
+
+  // Mapping from a DSP address to a pair .
+  // Using void* pointers is ok, since DSP pointers will always fit
+  // in apps's pointers, i.e. sizeof_dsp(void*) <= sizeof_apps(void*).
+  std::map> dsp_to_apps_;
+  std::map> vtcm_addr_;
+  remote_handle64 domain_channel_handle_ = AEE_EUNKNOWN;
+  tvm_hexagon_remote_handle_t module_pointer_ = AEE_EUNKNOWN;
+  uint64_t count_channel_open_ = 0;
+  // Global lock, used for all critical sections. This can be refined
+  // in the future.
+  mutable std::mutex crit_section_;
+};
+
+std::shared_ptr CreateHexagonTarget() {
+  return std::make_shared();
+}
+
+std::pair HexagonTarget::AddAddrMapping(const void* dsp_addr,
+   void* apps_addr,
+   size_t size) {
+  crit_section_.lock();
+  auto p = dsp_to_apps_.insert({dsp_addr, {apps_addr, size}});
+  crit_section_.unlock();
+  if (!p.second) {
+TVM_LOGE_HT(
+"failed to insert address mapping: dsp:%p -> apps:%p, size:%zu",
+dsp_addr, apps_addr, size);
+return std::make_pair(nullptr, 0);
+  }
+  TVM_LOGD_HT("added address mapping: dsp:%p -> apps:%p, size:%zu", dsp_addr,
+  apps_addr, size);
+  return p.first->second;
+}
+
+void HexagonTarget::RemoveAddrMapping(const void* dsp_addr) {
+  crit_section_.lock();
+  auto f = dsp_to_apps_.find(dsp_addr);
+  if (f == dsp_to_apps_.end()) {
+TVM_LOGE_HT("failed to remove address mapping for dsp:%p", dsp_addr);
+crit_section_.unlock();
+return;
+  }
+  dsp_to_apps_.erase(f);
+  crit_section_.unlock();
+}
+

[GitHub] [incubator-tvm] mbaret commented on issue #5261: [RELAY][BYOC] Add support for composite functions in BYOC

2020-04-07 Thread GitBox
mbaret commented on issue #5261: [RELAY][BYOC] Add support for composite 
functions in BYOC
URL: https://github.com/apache/incubator-tvm/pull/5261#issuecomment-610392585
 
 
   cc @masahi @zhiics @trevor-m 


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] [incubator-tvm] mbaret commented on issue #5262: [RELAY][BYOC] Register pattern tables from external codegens

2020-04-07 Thread GitBox
mbaret commented on issue #5262: [RELAY][BYOC] Register pattern tables from 
external codegens
URL: https://github.com/apache/incubator-tvm/pull/5262#issuecomment-610392358
 
 
   cc @masahi @zhiics @trevor-m 


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] [incubator-tvm] kparzysz-quic commented on issue #5252: [RUNTIME] Initial implementation of Hexagon runtime support

2020-04-07 Thread GitBox
kparzysz-quic commented on issue #5252: [RUNTIME] Initial implementation of 
Hexagon runtime support
URL: https://github.com/apache/incubator-tvm/pull/5252#issuecomment-610391857
 
 
   The IDL file and the implementation of the Hexagon-side target code will be 
upstreamed in an upcoming PR.


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] [incubator-tvm] kparzysz-quic commented on a change in pull request #5252: [RUNTIME] Initial implementation of Hexagon runtime support

2020-04-07 Thread GitBox
kparzysz-quic commented on a change in pull request #5252: [RUNTIME] Initial 
implementation of Hexagon runtime support
URL: https://github.com/apache/incubator-tvm/pull/5252#discussion_r404814243
 
 

 ##
 File path: src/runtime/hexagon/target/hexagon_device_target.cc
 ##
 @@ -0,0 +1,525 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+#ifdef __ANDROID__
+
+#include 
+
+#include 
+#include 
+#include 
+#include 
+#include 
+
+#include "../hexagon_module.h"
+#include "AEEStdErr.h"
+#include "fastrpc/tvm_hexagon_remote.h"
+#include "hexagon_dsprpcapi.h"
+#include "hexagon_stubapi.h"
+#include "hexagon_target_log.h"
+#include "remote64.h"
+#include "rpcmem.h"
+
+#pragma weak remote_session_control
+
+#define RPCMEM_HEAP 25
+
+// All log messages start with "HexagonTarget::%s", where %s is replaced
+// with the function name, so create macros that add that to avoid repetition.
+// The downside is that the format string must be given as a string literal,
+// but it seems to be a minor issue.
+#define VA_EXPANDER(...) , ##__VA_ARGS__
+#define TVM_LOGD_HT(fmt, ...) \
+  TVM_LOGD("HexagonTarget::%s: " fmt, __func__ VA_EXPANDER(__VA_ARGS__))
+#define TVM_LOGE_HT(fmt, ...) \
+  TVM_LOGE("HexagonTarget::%s: " fmt, __func__ VA_EXPANDER(__VA_ARGS__))
+
+namespace tvm {
+namespace runtime {
+namespace hexagon {
+
+static constexpr int kStackSize = 128 * 1024;  // 128kB stack
+
+class HexagonTarget : public tvm::runtime::hexagon::Device {
+ public:
+  HexagonTarget() {}
+  ~HexagonTarget() final {}
+  void* Alloc(unsigned size, unsigned align) final;
+  void Free(void* ptr) final;
+  void* AllocVtcm(unsigned size, unsigned align) final;
+  void FreeVtcm(void* ptr) final;
+  void CopyDeviceToDevice(void* dst, const void* src, unsigned len) final;
+  void CopyDeviceToHost(void* host_dst, const void* src, unsigned len) final;
+  void CopyHostToDevice(void* dst, const void* host_src, unsigned len) final;
+  void* Load(const std::string& data, const std::string& fmt) final;
+  void Unload(void* mod) final;
+  void* Resolve(const std::string& sym) final;
+  void Call(void* func, uint32_t* scalar, unsigned scalar_num, uint32_t* stack,
+unsigned stack_num) final;
+
+ private:
+  std::pair AddAddrMapping(const void* dsp_addr,
+  void* apps_addr, size_t size);
+  std::pair GetAppsAddr(const void* dsp_addr, bool exact) const;
+  void RemoveAddrMapping(const void* dsp_addr);
+  int OpenDomainChannel(bool set_unsigned_pd);
+  int CloseDomainChannel();
+  void ReleaseLibrary();
+  void FreeMemoryBeforeChannelClose();
+
+  // Mapping from a DSP address to a pair .
+  // Using void* pointers is ok, since DSP pointers will always fit
+  // in apps's pointers, i.e. sizeof_dsp(void*) <= sizeof_apps(void*).
+  std::map> dsp_to_apps_;
+  std::map> vtcm_addr_;
+  remote_handle64 domain_channel_handle_ = AEE_EUNKNOWN;
+  tvm_hexagon_remote_handle_t module_pointer_ = AEE_EUNKNOWN;
+  uint64_t count_channel_open_ = 0;
+  // Global lock, used for all critical sections. This can be refined
+  // in the future.
+  mutable std::mutex crit_section_;
+};
+
+std::shared_ptr CreateHexagonTarget() {
+  return std::make_shared();
+}
+
+std::pair HexagonTarget::AddAddrMapping(const void* dsp_addr,
+   void* apps_addr,
+   size_t size) {
+  crit_section_.lock();
+  auto p = dsp_to_apps_.insert({dsp_addr, {apps_addr, size}});
+  crit_section_.unlock();
+  if (!p.second) {
+TVM_LOGE_HT(
+"failed to insert address mapping: dsp:%p -> apps:%p, size:%zu",
+dsp_addr, apps_addr, size);
+return std::make_pair(nullptr, 0);
+  }
+  TVM_LOGD_HT("added address mapping: dsp:%p -> apps:%p, size:%zu", dsp_addr,
+  apps_addr, size);
+  return p.first->second;
+}
+
+void HexagonTarget::RemoveAddrMapping(const void* dsp_addr) {
+  crit_section_.lock();
+  auto f = dsp_to_apps_.find(dsp_addr);
+  if (f == dsp_to_apps_.end()) {
+TVM_LOGE_HT("failed to remove address mapping for dsp:%p", dsp_addr);
+crit_section_.unlock();
+return;
+  }
+  dsp_to_apps_.erase(f);
+  crit_section_.unlock();
+}
+

[GitHub] [incubator-tvm] kparzysz-quic commented on a change in pull request #5252: [RUNTIME] Initial implementation of Hexagon runtime support

2020-04-07 Thread GitBox
kparzysz-quic commented on a change in pull request #5252: [RUNTIME] Initial 
implementation of Hexagon runtime support
URL: https://github.com/apache/incubator-tvm/pull/5252#discussion_r404783723
 
 

 ##
 File path: src/runtime/hexagon/target/hexagon_device_target.cc
 ##
 @@ -0,0 +1,525 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+#ifdef __ANDROID__
+
+#include 
+
+#include 
+#include 
+#include 
+#include 
+#include 
+
+#include "../hexagon_module.h"
+#include "AEEStdErr.h"
+#include "fastrpc/tvm_hexagon_remote.h"
+#include "hexagon_dsprpcapi.h"
+#include "hexagon_stubapi.h"
+#include "hexagon_target_log.h"
+#include "remote64.h"
+#include "rpcmem.h"
+
+#pragma weak remote_session_control
+
+#define RPCMEM_HEAP 25
+
+// All log messages start with "HexagonTarget::%s", where %s is replaced
+// with the function name, so create macros that add that to avoid repetition.
+// The downside is that the format string must be given as a string literal,
+// but it seems to be a minor issue.
+#define VA_EXPANDER(...) , ##__VA_ARGS__
+#define TVM_LOGD_HT(fmt, ...) \
+  TVM_LOGD("HexagonTarget::%s: " fmt, __func__ VA_EXPANDER(__VA_ARGS__))
+#define TVM_LOGE_HT(fmt, ...) \
+  TVM_LOGE("HexagonTarget::%s: " fmt, __func__ VA_EXPANDER(__VA_ARGS__))
+
+namespace tvm {
+namespace runtime {
+namespace hexagon {
+
+static constexpr int kStackSize = 128 * 1024;  // 128kB stack
+
+class HexagonTarget : public tvm::runtime::hexagon::Device {
+ public:
+  HexagonTarget() {}
+  ~HexagonTarget() final {}
+  void* Alloc(unsigned size, unsigned align) final;
+  void Free(void* ptr) final;
+  void* AllocVtcm(unsigned size, unsigned align) final;
+  void FreeVtcm(void* ptr) final;
+  void CopyDeviceToDevice(void* dst, const void* src, unsigned len) final;
+  void CopyDeviceToHost(void* host_dst, const void* src, unsigned len) final;
+  void CopyHostToDevice(void* dst, const void* host_src, unsigned len) final;
+  void* Load(const std::string& data, const std::string& fmt) final;
+  void Unload(void* mod) final;
+  void* Resolve(const std::string& sym) final;
+  void Call(void* func, uint32_t* scalar, unsigned scalar_num, uint32_t* stack,
+unsigned stack_num) final;
+
+ private:
+  std::pair AddAddrMapping(const void* dsp_addr,
+  void* apps_addr, size_t size);
+  std::pair GetAppsAddr(const void* dsp_addr, bool exact) const;
+  void RemoveAddrMapping(const void* dsp_addr);
+  int OpenDomainChannel(bool set_unsigned_pd);
+  int CloseDomainChannel();
+  void ReleaseLibrary();
+  void FreeMemoryBeforeChannelClose();
+
+  // Mapping from a DSP address to a pair .
+  // Using void* pointers is ok, since DSP pointers will always fit
+  // in apps's pointers, i.e. sizeof_dsp(void*) <= sizeof_apps(void*).
+  std::map> dsp_to_apps_;
+  std::map> vtcm_addr_;
+  remote_handle64 domain_channel_handle_ = AEE_EUNKNOWN;
+  tvm_hexagon_remote_handle_t module_pointer_ = AEE_EUNKNOWN;
+  uint64_t count_channel_open_ = 0;
+  // Global lock, used for all critical sections. This can be refined
+  // in the future.
+  mutable std::mutex crit_section_;
+};
+
+std::shared_ptr CreateHexagonTarget() {
+  return std::make_shared();
+}
+
+std::pair HexagonTarget::AddAddrMapping(const void* dsp_addr,
+   void* apps_addr,
+   size_t size) {
+  crit_section_.lock();
+  auto p = dsp_to_apps_.insert({dsp_addr, {apps_addr, size}});
+  crit_section_.unlock();
+  if (!p.second) {
+TVM_LOGE_HT(
+"failed to insert address mapping: dsp:%p -> apps:%p, size:%zu",
+dsp_addr, apps_addr, size);
+return std::make_pair(nullptr, 0);
+  }
+  TVM_LOGD_HT("added address mapping: dsp:%p -> apps:%p, size:%zu", dsp_addr,
+  apps_addr, size);
+  return p.first->second;
+}
+
+void HexagonTarget::RemoveAddrMapping(const void* dsp_addr) {
+  crit_section_.lock();
+  auto f = dsp_to_apps_.find(dsp_addr);
+  if (f == dsp_to_apps_.end()) {
+TVM_LOGE_HT("failed to remove address mapping for dsp:%p", dsp_addr);
+crit_section_.unlock();
+return;
+  }
+  dsp_to_apps_.erase(f);
+  crit_section_.unlock();
+}
+

  1   2   >