[GitHub] [incubator-tvm] junrushao1994 commented on a change in pull request #5601: [DataType] Add bfloat16

2020-06-18 Thread GitBox


junrushao1994 commented on a change in pull request #5601:
URL: https://github.com/apache/incubator-tvm/pull/5601#discussion_r442561558



##
File path: include/tvm/runtime/data_type.h
##
@@ -72,6 +73,9 @@ class DataType {
 data_.code = static_cast(code);
 data_.bits = static_cast(bits);
 data_.lanes = static_cast(lanes);
+if (code == kBFloat) {
+  CHECK_EQ(bits, 16);

Review comment:
   @tqchen This is just a nitpick. What do you think?





This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org




[GitHub] [incubator-tvm] junrushao1994 commented on a change in pull request #5601: [DataType] Add bfloat16

2020-06-16 Thread GitBox


junrushao1994 commented on a change in pull request #5601:
URL: https://github.com/apache/incubator-tvm/pull/5601#discussion_r441191343



##
File path: include/tvm/runtime/data_type.h
##
@@ -72,6 +73,9 @@ class DataType {
 data_.code = static_cast(code);
 data_.bits = static_cast(bits);
 data_.lanes = static_cast(lanes);
+if (code == kBFloat) {
+  CHECK_EQ(bits, 16);

Review comment:
   It is understandable that right now we only support bf16, but my concern 
is that "should we put the check here"?

##
File path: include/tvm/runtime/data_type.h
##
@@ -372,7 +372,7 @@ inline DLDataType String2DLDataType(std::string s) {
 t.lanes = 1;
 return t;
   } else if (s.substr(0, 6) == "bfloat") {
-t.code = kTVMBFloat;
+t.code = kDLBfloat;

Review comment:
   I agree with tq

##
File path: python/tvm/_ffi/_cython/base.pxi
##
@@ -27,7 +27,7 @@ cdef enum TVMTypeCode:
 kUInt = 1
 kFloat = 2
 kTVMOpaqueHandle = 3
-kTVMNullptr = 4
+kBFloat = 4

Review comment:
   shall we remove this?

##
File path: python/tvm/_ffi/runtime_ctypes.py
##
@@ -96,6 +98,9 @@ def __init__(self, type_str):
 self.type_code = DataTypeCode.HANDLE
 bits = 64
 head = ""
+elif head.startswith("bfloat"):
+self.type_code = 4

Review comment:
   not sure if it is good to hard code here





This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org




[GitHub] [incubator-tvm] junrushao1994 commented on a change in pull request #5601: [DataType] Add bfloat16

2020-06-13 Thread GitBox


junrushao1994 commented on a change in pull request #5601:
URL: https://github.com/apache/incubator-tvm/pull/5601#discussion_r439716507



##
File path: tests/python/unittest/test_tir_transform_bf16_legalize.py
##
@@ -0,0 +1,152 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+import tvm
+import topi
+from tvm import te
+from tvm.tir import const
+
+
+def lower_stmt(sche, params, passfunc):
+func = tvm.driver.build_module.form_irmodule(sche, params, "main", 
None)["main"]
+func = passfunc()(
+tvm.IRModule.from_expr(func))["main"]
+stmt = func.body
+return stmt
+
+
+def test_promote():
+def runpass(op, passfunc):
+a = te.placeholder((100,), dtype='bfloat16')
+b = te.placeholder((100,), dtype='bfloat16')
+c = te.compute((100,), lambda i: op(a[i], b[i]))
+s = te.create_schedule(c.op)
+return lower_stmt(s, [a, b, c], passfunc)
+
+def get_promoted(op):
+a = te.placeholder((100,), dtype='bfloat16')
+b = te.placeholder((100,), dtype='bfloat16')
+c = te.compute((100,), lambda i:
+topi.cast(op(topi.cast(a[i],'float'),
+topi.cast(b[i],'float')), 'bfloat16')
+)
+s = te.create_schedule(c.op)
+func = tvm.driver.build_module.form_irmodule(s, [a,b,c], "main", 
None)["main"]
+return func.body
+
+def test_promoted(op):
+stmt = runpass(op, tvm.tir.transform.BF16Promote)
+tvm.ir.assert_structural_equal(stmt, get_promoted(op))
+test_promoted(topi.add)
+test_promoted(topi.subtract)
+test_promoted(topi.multiply)
+test_promoted(topi.divide)
+
+def test_eliminate():
+def to32(v):
+return topi.cast(v, 'float')
+def to16(v):
+return topi.cast(v, 'bfloat16')
+def get_eliminated():
+a = te.placeholder((100,), dtype='bfloat16')
+b = te.placeholder((100,), dtype='bfloat16')
+c = te.compute((100,), lambda i: to16(
+topi.add(
+to32(
+to16(
+topi.add(
+to32(a[i]),
+to32(b[i]),
+)
+)
+),
+to32(
+to16(
+topi.add(
+to32(a[i]),
+to32(b[i]),
+)
+)
+)
+)
+))
+s = te.create_schedule(c.op)
+stmt = lower_stmt(s, [a, b, c], tvm.tir.transform.BF16CastElimination)
+return stmt
+
+def get_target():
+a = te.placeholder((100,), dtype='bfloat16')
+b = te.placeholder((100,), dtype='bfloat16')
+c = te.compute((100,), lambda i: to16(
+topi.add(topi.add(
+to32(a[i]),
+to32(b[i]),
+),
+topi.add(
+to32(a[i]),
+to32(b[i]),
+)
+)
+))
+s = te.create_schedule(c.op)
+func = tvm.driver.build_module.form_irmodule(s, [a,b,c], "main", 
None)["main"]
+return func.body
+tvm.ir.assert_structural_equal(get_eliminated(), get_target())
+
+def test_legalize():
+def to32(v):
+uint32_v = topi.cast(v, "uint32")
+uint32_v = tvm.tir.call_pure_intrin("uint32", "shift_left", uint32_v, 
tvm.tir.const(16, "uint32"))
+return tvm.tir.call_pure_intrin("float32", "reinterpret", uint32_v)
+def to16(v):
+uint32_v = tvm.tir.call_pure_intrin("uint32", "reinterpret", v)
+rounding_bias = tvm.tir.call_pure_intrin("uint32", "shift_right", 
uint32_v, tvm.tir.const(16, "uint32"))
+rounding_bias = tvm.tir.call_pure_intrin("uint32", "bitwise_and", 
rounding_bias, tvm.tir.const(1, "uint32"))
+rounding_bias = rounding_bias + tvm.tir.const(0x7FFF, "uint16")
+uint32_v = uint32_v + rounding_bias
+uint32_v = tvm.tir.call_pure_intrin("uint32", "shift_right", uint32_v, 
tvm.tir.const(16, "uint32"))
+return topi.cast(uint32_v, 'uint16')
+
+def 

[GitHub] [incubator-tvm] junrushao1994 commented on a change in pull request #5601: [DataType] Add bfloat16

2020-06-12 Thread GitBox


junrushao1994 commented on a change in pull request #5601:
URL: https://github.com/apache/incubator-tvm/pull/5601#discussion_r439551002



##
File path: tests/python/unittest/test_tir_transform_bf16_legalize.py
##
@@ -0,0 +1,152 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+import tvm
+import topi
+from tvm import te
+from tvm.tir import const
+
+
+def lower_stmt(sche, params, passfunc):
+func = tvm.driver.build_module.form_irmodule(sche, params, "main", 
None)["main"]
+func = passfunc()(
+tvm.IRModule.from_expr(func))["main"]
+stmt = func.body
+return stmt
+
+
+def test_promote():
+def runpass(op, passfunc):
+a = te.placeholder((100,), dtype='bfloat16')
+b = te.placeholder((100,), dtype='bfloat16')
+c = te.compute((100,), lambda i: op(a[i], b[i]))
+s = te.create_schedule(c.op)
+return lower_stmt(s, [a, b, c], passfunc)
+
+def get_promoted(op):
+a = te.placeholder((100,), dtype='bfloat16')
+b = te.placeholder((100,), dtype='bfloat16')
+c = te.compute((100,), lambda i:
+topi.cast(op(topi.cast(a[i],'float'),
+topi.cast(b[i],'float')), 'bfloat16')
+)
+s = te.create_schedule(c.op)
+func = tvm.driver.build_module.form_irmodule(s, [a,b,c], "main", 
None)["main"]
+return func.body
+
+def test_promoted(op):
+stmt = runpass(op, tvm.tir.transform.BF16Promote)
+tvm.ir.assert_structural_equal(stmt, get_promoted(op))
+test_promoted(topi.add)
+test_promoted(topi.subtract)
+test_promoted(topi.multiply)
+test_promoted(topi.divide)
+
+def test_eliminate():
+def to32(v):
+return topi.cast(v, 'float')
+def to16(v):
+return topi.cast(v, 'bfloat16')
+def get_eliminated():
+a = te.placeholder((100,), dtype='bfloat16')
+b = te.placeholder((100,), dtype='bfloat16')
+c = te.compute((100,), lambda i: to16(
+topi.add(
+to32(
+to16(
+topi.add(
+to32(a[i]),
+to32(b[i]),
+)
+)
+),
+to32(
+to16(
+topi.add(
+to32(a[i]),
+to32(b[i]),
+)
+)
+)
+)
+))
+s = te.create_schedule(c.op)
+stmt = lower_stmt(s, [a, b, c], tvm.tir.transform.BF16CastElimination)
+return stmt
+
+def get_target():
+a = te.placeholder((100,), dtype='bfloat16')
+b = te.placeholder((100,), dtype='bfloat16')
+c = te.compute((100,), lambda i: to16(
+topi.add(topi.add(
+to32(a[i]),
+to32(b[i]),
+),
+topi.add(
+to32(a[i]),
+to32(b[i]),
+)
+)
+))
+s = te.create_schedule(c.op)
+func = tvm.driver.build_module.form_irmodule(s, [a,b,c], "main", 
None)["main"]
+return func.body
+tvm.ir.assert_structural_equal(get_eliminated(), get_target())
+
+def test_legalize():
+def to32(v):
+uint32_v = topi.cast(v, "uint32")
+uint32_v = tvm.tir.call_pure_intrin("uint32", "shift_left", uint32_v, 
tvm.tir.const(16, "uint32"))
+return tvm.tir.call_pure_intrin("float32", "reinterpret", uint32_v)
+def to16(v):
+uint32_v = tvm.tir.call_pure_intrin("uint32", "reinterpret", v)
+rounding_bias = tvm.tir.call_pure_intrin("uint32", "shift_right", 
uint32_v, tvm.tir.const(16, "uint32"))
+rounding_bias = tvm.tir.call_pure_intrin("uint32", "bitwise_and", 
rounding_bias, tvm.tir.const(1, "uint32"))
+rounding_bias = rounding_bias + tvm.tir.const(0x7FFF, "uint16")
+uint32_v = uint32_v + rounding_bias
+uint32_v = tvm.tir.call_pure_intrin("uint32", "shift_right", uint32_v, 
tvm.tir.const(16, "uint32"))
+return topi.cast(uint32_v, 'uint16')
+
+def