This is an automated email from the ASF dual-hosted git repository.
tqchen pushed a commit to branch asf-site
in repository https://gitbox.apache.org/repos/asf/tvm-site.git
The following commit(s) were added to refs/heads/asf-site by this push:
new 237801fbef Build at Mon Jan 27 10:32:43 EST 2025
237801fbef is described below
commit 237801fbef5abe970f802a28c6c16cd11f1ba626
Author: tqchen <[email protected]>
AuthorDate: Mon Jan 27 10:32:43 2025 -0500
Build at Mon Jan 27 10:32:43 EST 2025
---
2017/08/17/tvm-release-announcement.html | 3 +-
...s-with-TVM-A-Depthwise-Convolution-Example.html | 3 +-
2017/10/06/nnvm-compiler-announcement.html | 3 +-
...s-to-TVM-Stack-and-NNVM-Compiler-with-ROCm.html | 3 +-
2017/11/08/android-rpc-introduction.html | 3 +-
2018/01/16/opt-mali-gpu.html | 3 +-
2018/03/12/webgl.html | 3 +-
2018/03/23/nmt-transformer-optimize.html | 3 +-
2018/07/12/vta-release-announcement.html | 3 +-
2018/08/10/DLPack-Bridge.html | 3 +-
2018/10/03/auto-opt-all.html | 3 +-
2018/10/09/ml-in-tees.html | 3 +-
2018/12/18/lowprecision-conv.html | 3 +-
2019/01/19/Golang.html | 3 +-
2019/03/18/tvm-apache-announcement.html | 3 +-
2019/04/29/opt-cuda-quantized.html | 3 +-
2019/05/30/pytorch-frontend.html | 3 +-
...machine-learning-to-webassembly-and-webgpu.html | 3 +-
2020/06/04/tinyml-how-tvm-is-taming-tiny.html | 3 +-
2020/07/14/bert-pytorch-tvm.html | 3 +-
.../15/how-to-bring-your-own-codegen-to-tvm.html | 3 +-
2020/09/26/bring-your-own-datatypes.html | 3 +-
2021/03/03/intro-auto-scheduler.html | 3 +-
2021/12/15/tvm-unity.html | 3 +-
404.html | 1 -
asf.html | 1 -
atom.xml | 42 ++++++++++-----------
blog.html | 1 -
categories.html | 1 -
community.html | 1 -
download.html | 1 -
feed.xml | 20 +++++-----
history.html | 1 -
index.html | 1 -
rss.xml | 44 +++++++++++-----------
scripts/task_deploy_asf_site.zsh | 25 ++++++++++++
tags.html | 1 -
tvm | 1 +
38 files changed, 103 insertions(+), 110 deletions(-)
diff --git a/2017/08/17/tvm-release-announcement.html
b/2017/08/17/tvm-release-announcement.html
index e3178483dd..cbbe3d8a56 100644
--- a/2017/08/17/tvm-release-announcement.html
+++ b/2017/08/17/tvm-release-announcement.html
@@ -137,7 +137,7 @@
<div class="span14 w-100">
<h1>TVM: An End to End IR Stack for Deploying Deep Learning Workloads on
Hardware Platforms </h1>
<p class="post-meta">
- <time datetime="2017-08-17T19:00:00+00:00" itemprop="datePublished">
+ <time datetime="2017-08-17T15:00:00-04:00" itemprop="datePublished">
Aug 17, 2017
</time>
@@ -281,7 +281,6 @@ that adopts the standard, such as MXNet, PyTorch, Caffe2
and tiny-dnn.</li>
<script
src="https://stackpath.bootstrapcdn.com/bootstrap/4.1.3/js/bootstrap.min.js"
integrity="sha384-ChfqqxuZUCnJSK3+MXmPNIyE6ZbWh2IMqE241rYiqJxyMiZ6OW/JmZQ5stwEULTy"
crossorigin="anonymous"></script>
<!-- <script src="./assets/js/slick.js"></script> -->
<script src="/assets/js/custome.js"></script>
- <script async
src="https://www.googletagmanager.com/gtag/js?id=UA-75982049-2"></script>
<script>
window.dataLayer = window.dataLayer || [];
function gtag(){dataLayer.push(arguments);}
diff --git
a/2017/08/22/Optimize-Deep-Learning-GPU-Operators-with-TVM-A-Depthwise-Convolution-Example.html
b/2017/08/22/Optimize-Deep-Learning-GPU-Operators-with-TVM-A-Depthwise-Convolution-Example.html
index 8ff970134b..5529d39696 100644
---
a/2017/08/22/Optimize-Deep-Learning-GPU-Operators-with-TVM-A-Depthwise-Convolution-Example.html
+++
b/2017/08/22/Optimize-Deep-Learning-GPU-Operators-with-TVM-A-Depthwise-Convolution-Example.html
@@ -137,7 +137,7 @@
<div class="span14 w-100">
<h1>Optimize Deep Learning GPU Operators with TVM: A Depthwise
Convolution Example </h1>
<p class="post-meta">
- <time datetime="2017-08-22T00:00:00+00:00" itemprop="datePublished">
+ <time datetime="2017-08-22T00:00:00-04:00" itemprop="datePublished">
Aug 22, 2017
</time>
@@ -736,7 +736,6 @@ He is experiencing a gap year after obtaining a bachelor’s
degree in electrica
<script
src="https://stackpath.bootstrapcdn.com/bootstrap/4.1.3/js/bootstrap.min.js"
integrity="sha384-ChfqqxuZUCnJSK3+MXmPNIyE6ZbWh2IMqE241rYiqJxyMiZ6OW/JmZQ5stwEULTy"
crossorigin="anonymous"></script>
<!-- <script src="./assets/js/slick.js"></script> -->
<script src="/assets/js/custome.js"></script>
- <script async
src="https://www.googletagmanager.com/gtag/js?id=UA-75982049-2"></script>
<script>
window.dataLayer = window.dataLayer || [];
function gtag(){dataLayer.push(arguments);}
diff --git a/2017/10/06/nnvm-compiler-announcement.html
b/2017/10/06/nnvm-compiler-announcement.html
index 5e210da475..aa71f85331 100644
--- a/2017/10/06/nnvm-compiler-announcement.html
+++ b/2017/10/06/nnvm-compiler-announcement.html
@@ -137,7 +137,7 @@
<div class="span14 w-100">
<h1>NNVM Compiler: Open Compiler for AI Frameworks </h1>
<p class="post-meta">
- <time datetime="2017-10-06T15:30:00+00:00" itemprop="datePublished">
+ <time datetime="2017-10-06T11:30:00-04:00" itemprop="datePublished">
Oct 6, 2017
</time>
@@ -236,7 +236,6 @@ We also learns from Halide when implementing the lowering
pipeline in TVM.</li>
<script
src="https://stackpath.bootstrapcdn.com/bootstrap/4.1.3/js/bootstrap.min.js"
integrity="sha384-ChfqqxuZUCnJSK3+MXmPNIyE6ZbWh2IMqE241rYiqJxyMiZ6OW/JmZQ5stwEULTy"
crossorigin="anonymous"></script>
<!-- <script src="./assets/js/slick.js"></script> -->
<script src="/assets/js/custome.js"></script>
- <script async
src="https://www.googletagmanager.com/gtag/js?id=UA-75982049-2"></script>
<script>
window.dataLayer = window.dataLayer || [];
function gtag(){dataLayer.push(arguments);}
diff --git
a/2017/10/30/Bringing-AMDGPUs-to-TVM-Stack-and-NNVM-Compiler-with-ROCm.html
b/2017/10/30/Bringing-AMDGPUs-to-TVM-Stack-and-NNVM-Compiler-with-ROCm.html
index d6329faac8..a58486b148 100644
--- a/2017/10/30/Bringing-AMDGPUs-to-TVM-Stack-and-NNVM-Compiler-with-ROCm.html
+++ b/2017/10/30/Bringing-AMDGPUs-to-TVM-Stack-and-NNVM-Compiler-with-ROCm.html
@@ -137,7 +137,7 @@
<div class="span14 w-100">
<h1>Bringing AMDGPUs to TVM Stack and NNVM Compiler with ROCm </h1>
<p class="post-meta">
- <time datetime="2017-10-30T00:00:00+00:00" itemprop="datePublished">
+ <time datetime="2017-10-30T00:00:00-04:00" itemprop="datePublished">
Oct 30, 2017
</time>
@@ -379,7 +379,6 @@ BB0_6:
<script
src="https://stackpath.bootstrapcdn.com/bootstrap/4.1.3/js/bootstrap.min.js"
integrity="sha384-ChfqqxuZUCnJSK3+MXmPNIyE6ZbWh2IMqE241rYiqJxyMiZ6OW/JmZQ5stwEULTy"
crossorigin="anonymous"></script>
<!-- <script src="./assets/js/slick.js"></script> -->
<script src="/assets/js/custome.js"></script>
- <script async
src="https://www.googletagmanager.com/gtag/js?id=UA-75982049-2"></script>
<script>
window.dataLayer = window.dataLayer || [];
function gtag(){dataLayer.push(arguments);}
diff --git a/2017/11/08/android-rpc-introduction.html
b/2017/11/08/android-rpc-introduction.html
index bba620f013..ca8023a5f5 100644
--- a/2017/11/08/android-rpc-introduction.html
+++ b/2017/11/08/android-rpc-introduction.html
@@ -137,7 +137,7 @@
<div class="span14 w-100">
<h1>Remote Profile and Test Deep Learning Cross Compilation on Mobile
Phones with TVM RPC </h1>
<p class="post-meta">
- <time datetime="2017-11-08T00:00:00+00:00" itemprop="datePublished">
+ <time datetime="2017-11-08T00:00:00-05:00" itemprop="datePublished">
Nov 8, 2017
</time>
@@ -385,7 +385,6 @@ make jvminstall
<script
src="https://stackpath.bootstrapcdn.com/bootstrap/4.1.3/js/bootstrap.min.js"
integrity="sha384-ChfqqxuZUCnJSK3+MXmPNIyE6ZbWh2IMqE241rYiqJxyMiZ6OW/JmZQ5stwEULTy"
crossorigin="anonymous"></script>
<!-- <script src="./assets/js/slick.js"></script> -->
<script src="/assets/js/custome.js"></script>
- <script async
src="https://www.googletagmanager.com/gtag/js?id=UA-75982049-2"></script>
<script>
window.dataLayer = window.dataLayer || [];
function gtag(){dataLayer.push(arguments);}
diff --git a/2018/01/16/opt-mali-gpu.html b/2018/01/16/opt-mali-gpu.html
index 3dcce78495..6b079119be 100644
--- a/2018/01/16/opt-mali-gpu.html
+++ b/2018/01/16/opt-mali-gpu.html
@@ -137,7 +137,7 @@
<div class="span14 w-100">
<h1>Optimizing Mobile Deep Learning on ARM GPU with TVM </h1>
<p class="post-meta">
- <time datetime="2018-01-16T00:00:00+00:00" itemprop="datePublished">
+ <time datetime="2018-01-16T00:00:00-05:00" itemprop="datePublished">
Jan 16, 2018
</time>
@@ -731,7 +731,6 @@ advice and <a href="https://github.com/yzhliu">Yizhi
Liu</a> for his earlier wor
<script
src="https://stackpath.bootstrapcdn.com/bootstrap/4.1.3/js/bootstrap.min.js"
integrity="sha384-ChfqqxuZUCnJSK3+MXmPNIyE6ZbWh2IMqE241rYiqJxyMiZ6OW/JmZQ5stwEULTy"
crossorigin="anonymous"></script>
<!-- <script src="./assets/js/slick.js"></script> -->
<script src="/assets/js/custome.js"></script>
- <script async
src="https://www.googletagmanager.com/gtag/js?id=UA-75982049-2"></script>
<script>
window.dataLayer = window.dataLayer || [];
function gtag(){dataLayer.push(arguments);}
diff --git a/2018/03/12/webgl.html b/2018/03/12/webgl.html
index 508dd679d6..dacd5df968 100644
--- a/2018/03/12/webgl.html
+++ b/2018/03/12/webgl.html
@@ -137,7 +137,7 @@
<div class="span14 w-100">
<h1>Compiling Deep Learning Models to WebGL with TVM </h1>
<p class="post-meta">
- <time datetime="2018-03-12T00:00:00+00:00" itemprop="datePublished">
+ <time datetime="2018-03-12T00:00:00-04:00" itemprop="datePublished">
Mar 12, 2018
</time>
@@ -273,7 +273,6 @@ optimizations into the TVM stack.</p>
<script
src="https://stackpath.bootstrapcdn.com/bootstrap/4.1.3/js/bootstrap.min.js"
integrity="sha384-ChfqqxuZUCnJSK3+MXmPNIyE6ZbWh2IMqE241rYiqJxyMiZ6OW/JmZQ5stwEULTy"
crossorigin="anonymous"></script>
<!-- <script src="./assets/js/slick.js"></script> -->
<script src="/assets/js/custome.js"></script>
- <script async
src="https://www.googletagmanager.com/gtag/js?id=UA-75982049-2"></script>
<script>
window.dataLayer = window.dataLayer || [];
function gtag(){dataLayer.push(arguments);}
diff --git a/2018/03/23/nmt-transformer-optimize.html
b/2018/03/23/nmt-transformer-optimize.html
index 3c0566413a..d7ab207ba7 100644
--- a/2018/03/23/nmt-transformer-optimize.html
+++ b/2018/03/23/nmt-transformer-optimize.html
@@ -137,7 +137,7 @@
<div class="span14 w-100">
<h1>Bringing TVM into TensorFlow for Optimizing Neural Machine
Translation on GPU </h1>
<p class="post-meta">
- <time datetime="2018-03-23T00:00:00+00:00" itemprop="datePublished">
+ <time datetime="2018-03-23T00:00:00-04:00" itemprop="datePublished">
Mar 23, 2018
</time>
@@ -419,7 +419,6 @@ C = tvm.compute(
<script
src="https://stackpath.bootstrapcdn.com/bootstrap/4.1.3/js/bootstrap.min.js"
integrity="sha384-ChfqqxuZUCnJSK3+MXmPNIyE6ZbWh2IMqE241rYiqJxyMiZ6OW/JmZQ5stwEULTy"
crossorigin="anonymous"></script>
<!-- <script src="./assets/js/slick.js"></script> -->
<script src="/assets/js/custome.js"></script>
- <script async
src="https://www.googletagmanager.com/gtag/js?id=UA-75982049-2"></script>
<script>
window.dataLayer = window.dataLayer || [];
function gtag(){dataLayer.push(arguments);}
diff --git a/2018/07/12/vta-release-announcement.html
b/2018/07/12/vta-release-announcement.html
index 1fa11439ef..b74e34622f 100644
--- a/2018/07/12/vta-release-announcement.html
+++ b/2018/07/12/vta-release-announcement.html
@@ -137,7 +137,7 @@
<div class="span14 w-100">
<h1>VTA: An Open, Customizable Deep Learning Acceleration Stack </h1>
<p class="post-meta">
- <time datetime="2018-07-12T00:00:00+00:00" itemprop="datePublished">
+ <time datetime="2018-07-12T00:00:00-04:00" itemprop="datePublished">
Jul 12, 2018
</time>
@@ -295,7 +295,6 @@ This kind of high-level visibility is essential to system
designers who want to
<script
src="https://stackpath.bootstrapcdn.com/bootstrap/4.1.3/js/bootstrap.min.js"
integrity="sha384-ChfqqxuZUCnJSK3+MXmPNIyE6ZbWh2IMqE241rYiqJxyMiZ6OW/JmZQ5stwEULTy"
crossorigin="anonymous"></script>
<!-- <script src="./assets/js/slick.js"></script> -->
<script src="/assets/js/custome.js"></script>
- <script async
src="https://www.googletagmanager.com/gtag/js?id=UA-75982049-2"></script>
<script>
window.dataLayer = window.dataLayer || [];
function gtag(){dataLayer.push(arguments);}
diff --git a/2018/08/10/DLPack-Bridge.html b/2018/08/10/DLPack-Bridge.html
index 387f4dd687..ba4b25205a 100644
--- a/2018/08/10/DLPack-Bridge.html
+++ b/2018/08/10/DLPack-Bridge.html
@@ -137,7 +137,7 @@
<div class="span14 w-100">
<h1>Building a Cross-Framework Deep Learning Compiler via DLPack </h1>
<p class="post-meta">
- <time datetime="2018-08-10T00:00:00+00:00" itemprop="datePublished">
+ <time datetime="2018-08-10T00:00:00-04:00" itemprop="datePublished">
Aug 10, 2018
</time>
@@ -296,7 +296,6 @@ support, and can be used to implement convenient
converters, such as
<script
src="https://stackpath.bootstrapcdn.com/bootstrap/4.1.3/js/bootstrap.min.js"
integrity="sha384-ChfqqxuZUCnJSK3+MXmPNIyE6ZbWh2IMqE241rYiqJxyMiZ6OW/JmZQ5stwEULTy"
crossorigin="anonymous"></script>
<!-- <script src="./assets/js/slick.js"></script> -->
<script src="/assets/js/custome.js"></script>
- <script async
src="https://www.googletagmanager.com/gtag/js?id=UA-75982049-2"></script>
<script>
window.dataLayer = window.dataLayer || [];
function gtag(){dataLayer.push(arguments);}
diff --git a/2018/10/03/auto-opt-all.html b/2018/10/03/auto-opt-all.html
index fffa2518a2..ae96fd910f 100644
--- a/2018/10/03/auto-opt-all.html
+++ b/2018/10/03/auto-opt-all.html
@@ -137,7 +137,7 @@
<div class="span14 w-100">
<h1>Automatic Kernel Optimization for Deep Learning on All Hardware
Platforms </h1>
<p class="post-meta">
- <time datetime="2018-10-03T00:00:00+00:00" itemprop="datePublished">
+ <time datetime="2018-10-03T00:00:00-04:00" itemprop="datePublished">
Oct 3, 2018
</time>
@@ -551,7 +551,6 @@ for inference deployment. TVM just provides such a
solution.</p>
<script
src="https://stackpath.bootstrapcdn.com/bootstrap/4.1.3/js/bootstrap.min.js"
integrity="sha384-ChfqqxuZUCnJSK3+MXmPNIyE6ZbWh2IMqE241rYiqJxyMiZ6OW/JmZQ5stwEULTy"
crossorigin="anonymous"></script>
<!-- <script src="./assets/js/slick.js"></script> -->
<script src="/assets/js/custome.js"></script>
- <script async
src="https://www.googletagmanager.com/gtag/js?id=UA-75982049-2"></script>
<script>
window.dataLayer = window.dataLayer || [];
function gtag(){dataLayer.push(arguments);}
diff --git a/2018/10/09/ml-in-tees.html b/2018/10/09/ml-in-tees.html
index 07810353ec..baec1a12dd 100644
--- a/2018/10/09/ml-in-tees.html
+++ b/2018/10/09/ml-in-tees.html
@@ -137,7 +137,7 @@
<div class="span14 w-100">
<h1>Efficient Privacy-Preserving ML Using TVM </h1>
<p class="post-meta">
- <time datetime="2018-10-09T00:00:00+00:00" itemprop="datePublished">
+ <time datetime="2018-10-09T00:00:00-04:00" itemprop="datePublished">
Oct 9, 2018
</time>
@@ -273,7 +273,6 @@ His research interest is in the general domain of ML on
shared private data, but
<script
src="https://stackpath.bootstrapcdn.com/bootstrap/4.1.3/js/bootstrap.min.js"
integrity="sha384-ChfqqxuZUCnJSK3+MXmPNIyE6ZbWh2IMqE241rYiqJxyMiZ6OW/JmZQ5stwEULTy"
crossorigin="anonymous"></script>
<!-- <script src="./assets/js/slick.js"></script> -->
<script src="/assets/js/custome.js"></script>
- <script async
src="https://www.googletagmanager.com/gtag/js?id=UA-75982049-2"></script>
<script>
window.dataLayer = window.dataLayer || [];
function gtag(){dataLayer.push(arguments);}
diff --git a/2018/12/18/lowprecision-conv.html
b/2018/12/18/lowprecision-conv.html
index 84a69d47e9..6a84dc31ac 100644
--- a/2018/12/18/lowprecision-conv.html
+++ b/2018/12/18/lowprecision-conv.html
@@ -137,7 +137,7 @@
<div class="span14 w-100">
<h1>Automating Generation of Low Precision Deep Learning Operators </h1>
<p class="post-meta">
- <time datetime="2018-12-18T00:00:00+00:00" itemprop="datePublished">
+ <time datetime="2018-12-18T00:00:00-05:00" itemprop="datePublished">
Dec 18, 2018
</time>
@@ -318,7 +318,6 @@ Note: x86 doesn’t support a vectorized popcount for this
microarchitecture, so
<script
src="https://stackpath.bootstrapcdn.com/bootstrap/4.1.3/js/bootstrap.min.js"
integrity="sha384-ChfqqxuZUCnJSK3+MXmPNIyE6ZbWh2IMqE241rYiqJxyMiZ6OW/JmZQ5stwEULTy"
crossorigin="anonymous"></script>
<!-- <script src="./assets/js/slick.js"></script> -->
<script src="/assets/js/custome.js"></script>
- <script async
src="https://www.googletagmanager.com/gtag/js?id=UA-75982049-2"></script>
<script>
window.dataLayer = window.dataLayer || [];
function gtag(){dataLayer.push(arguments);}
diff --git a/2019/01/19/Golang.html b/2019/01/19/Golang.html
index dd0223386f..6c8b0ccdd7 100644
--- a/2019/01/19/Golang.html
+++ b/2019/01/19/Golang.html
@@ -137,7 +137,7 @@
<div class="span14 w-100">
<h1>TVM Golang Runtime for Deep Learning Deployment </h1>
<p class="post-meta">
- <time datetime="2019-01-19T00:00:00+00:00" itemprop="datePublished">
+ <time datetime="2019-01-19T00:00:00-05:00" itemprop="datePublished">
Jan 19, 2019
</time>
@@ -327,7 +327,6 @@ closure as TVM packed function and invoke the same across
programming language b
<script
src="https://stackpath.bootstrapcdn.com/bootstrap/4.1.3/js/bootstrap.min.js"
integrity="sha384-ChfqqxuZUCnJSK3+MXmPNIyE6ZbWh2IMqE241rYiqJxyMiZ6OW/JmZQ5stwEULTy"
crossorigin="anonymous"></script>
<!-- <script src="./assets/js/slick.js"></script> -->
<script src="/assets/js/custome.js"></script>
- <script async
src="https://www.googletagmanager.com/gtag/js?id=UA-75982049-2"></script>
<script>
window.dataLayer = window.dataLayer || [];
function gtag(){dataLayer.push(arguments);}
diff --git a/2019/03/18/tvm-apache-announcement.html
b/2019/03/18/tvm-apache-announcement.html
index 4ea745935f..6119edc287 100644
--- a/2019/03/18/tvm-apache-announcement.html
+++ b/2019/03/18/tvm-apache-announcement.html
@@ -137,7 +137,7 @@
<div class="span14 w-100">
<h1>TVM Deep Learning Compiler Joins Apache Software Foundation </h1>
<p class="post-meta">
- <time datetime="2019-03-18T00:00:00+00:00" itemprop="datePublished">
+ <time datetime="2019-03-18T00:00:00-04:00" itemprop="datePublished">
Mar 18, 2019
</time>
@@ -180,7 +180,6 @@
<script
src="https://stackpath.bootstrapcdn.com/bootstrap/4.1.3/js/bootstrap.min.js"
integrity="sha384-ChfqqxuZUCnJSK3+MXmPNIyE6ZbWh2IMqE241rYiqJxyMiZ6OW/JmZQ5stwEULTy"
crossorigin="anonymous"></script>
<!-- <script src="./assets/js/slick.js"></script> -->
<script src="/assets/js/custome.js"></script>
- <script async
src="https://www.googletagmanager.com/gtag/js?id=UA-75982049-2"></script>
<script>
window.dataLayer = window.dataLayer || [];
function gtag(){dataLayer.push(arguments);}
diff --git a/2019/04/29/opt-cuda-quantized.html
b/2019/04/29/opt-cuda-quantized.html
index 9acea84899..a3df37e38a 100644
--- a/2019/04/29/opt-cuda-quantized.html
+++ b/2019/04/29/opt-cuda-quantized.html
@@ -137,7 +137,7 @@
<div class="span14 w-100">
<h1>Automating Optimization of Quantized Deep Learning Models on CUDA
</h1>
<p class="post-meta">
- <time datetime="2019-04-29T16:00:00+00:00" itemprop="datePublished">
+ <time datetime="2019-04-29T12:00:00-04:00" itemprop="datePublished">
Apr 29, 2019
</time>
@@ -301,7 +301,6 @@ We show that automatic optimization in TVM makes it easy
and flexible to support
<script
src="https://stackpath.bootstrapcdn.com/bootstrap/4.1.3/js/bootstrap.min.js"
integrity="sha384-ChfqqxuZUCnJSK3+MXmPNIyE6ZbWh2IMqE241rYiqJxyMiZ6OW/JmZQ5stwEULTy"
crossorigin="anonymous"></script>
<!-- <script src="./assets/js/slick.js"></script> -->
<script src="/assets/js/custome.js"></script>
- <script async
src="https://www.googletagmanager.com/gtag/js?id=UA-75982049-2"></script>
<script>
window.dataLayer = window.dataLayer || [];
function gtag(){dataLayer.push(arguments);}
diff --git a/2019/05/30/pytorch-frontend.html b/2019/05/30/pytorch-frontend.html
index 29a45b5561..fb4acbf346 100644
--- a/2019/05/30/pytorch-frontend.html
+++ b/2019/05/30/pytorch-frontend.html
@@ -137,7 +137,7 @@
<div class="span14 w-100">
<h1>Integrating TVM into PyTorch </h1>
<p class="post-meta">
- <time datetime="2019-05-30T00:00:00+00:00" itemprop="datePublished">
+ <time datetime="2019-05-30T00:00:00-04:00" itemprop="datePublished">
May 30, 2019
</time>
@@ -259,7 +259,6 @@ relay_graph = torch_tvm.to_relay(mul, inputs)
<script
src="https://stackpath.bootstrapcdn.com/bootstrap/4.1.3/js/bootstrap.min.js"
integrity="sha384-ChfqqxuZUCnJSK3+MXmPNIyE6ZbWh2IMqE241rYiqJxyMiZ6OW/JmZQ5stwEULTy"
crossorigin="anonymous"></script>
<!-- <script src="./assets/js/slick.js"></script> -->
<script src="/assets/js/custome.js"></script>
- <script async
src="https://www.googletagmanager.com/gtag/js?id=UA-75982049-2"></script>
<script>
window.dataLayer = window.dataLayer || [];
function gtag(){dataLayer.push(arguments);}
diff --git
a/2020/05/14/compiling-machine-learning-to-webassembly-and-webgpu.html
b/2020/05/14/compiling-machine-learning-to-webassembly-and-webgpu.html
index a7e5cfca02..15255f696b 100644
--- a/2020/05/14/compiling-machine-learning-to-webassembly-and-webgpu.html
+++ b/2020/05/14/compiling-machine-learning-to-webassembly-and-webgpu.html
@@ -137,7 +137,7 @@
<div class="span14 w-100">
<h1>Compiling Machine Learning to WASM and WebGPU with Apache TVM </h1>
<p class="post-meta">
- <time datetime="2020-05-14T00:00:00+00:00" itemprop="datePublished">
+ <time datetime="2020-05-14T00:00:00-04:00" itemprop="datePublished">
May 14, 2020
</time>
@@ -244,7 +244,6 @@
<script
src="https://stackpath.bootstrapcdn.com/bootstrap/4.1.3/js/bootstrap.min.js"
integrity="sha384-ChfqqxuZUCnJSK3+MXmPNIyE6ZbWh2IMqE241rYiqJxyMiZ6OW/JmZQ5stwEULTy"
crossorigin="anonymous"></script>
<!-- <script src="./assets/js/slick.js"></script> -->
<script src="/assets/js/custome.js"></script>
- <script async
src="https://www.googletagmanager.com/gtag/js?id=UA-75982049-2"></script>
<script>
window.dataLayer = window.dataLayer || [];
function gtag(){dataLayer.push(arguments);}
diff --git a/2020/06/04/tinyml-how-tvm-is-taming-tiny.html
b/2020/06/04/tinyml-how-tvm-is-taming-tiny.html
index aa0297b59b..0754cfc923 100644
--- a/2020/06/04/tinyml-how-tvm-is-taming-tiny.html
+++ b/2020/06/04/tinyml-how-tvm-is-taming-tiny.html
@@ -137,7 +137,7 @@
<div class="span14 w-100">
<h1>TinyML - How TVM is Taming Tiny </h1>
<p class="post-meta">
- <time datetime="2020-06-04T00:00:00+00:00" itemprop="datePublished">
+ <time datetime="2020-06-04T00:00:00-04:00" itemprop="datePublished">
Jun 4, 2020
</time>
@@ -466,7 +466,6 @@ Diagram from CMSIS-NN paper showing a 2x2 matrix
multiplication microkernel</p>
<script
src="https://stackpath.bootstrapcdn.com/bootstrap/4.1.3/js/bootstrap.min.js"
integrity="sha384-ChfqqxuZUCnJSK3+MXmPNIyE6ZbWh2IMqE241rYiqJxyMiZ6OW/JmZQ5stwEULTy"
crossorigin="anonymous"></script>
<!-- <script src="./assets/js/slick.js"></script> -->
<script src="/assets/js/custome.js"></script>
- <script async
src="https://www.googletagmanager.com/gtag/js?id=UA-75982049-2"></script>
<script>
window.dataLayer = window.dataLayer || [];
function gtag(){dataLayer.push(arguments);}
diff --git a/2020/07/14/bert-pytorch-tvm.html b/2020/07/14/bert-pytorch-tvm.html
index b48a23b437..4c4032c5c6 100644
--- a/2020/07/14/bert-pytorch-tvm.html
+++ b/2020/07/14/bert-pytorch-tvm.html
@@ -137,7 +137,7 @@
<div class="span14 w-100">
<h1>Bridging PyTorch and TVM </h1>
<p class="post-meta">
- <time datetime="2020-07-14T00:00:00+00:00" itemprop="datePublished">
+ <time datetime="2020-07-14T00:00:00-04:00" itemprop="datePublished">
Jul 14, 2020
</time>
@@ -680,7 +680,6 @@ He is a PyTorch core developer and co-authored <a
href="https://www.manning.com/
<script
src="https://stackpath.bootstrapcdn.com/bootstrap/4.1.3/js/bootstrap.min.js"
integrity="sha384-ChfqqxuZUCnJSK3+MXmPNIyE6ZbWh2IMqE241rYiqJxyMiZ6OW/JmZQ5stwEULTy"
crossorigin="anonymous"></script>
<!-- <script src="./assets/js/slick.js"></script> -->
<script src="/assets/js/custome.js"></script>
- <script async
src="https://www.googletagmanager.com/gtag/js?id=UA-75982049-2"></script>
<script>
window.dataLayer = window.dataLayer || [];
function gtag(){dataLayer.push(arguments);}
diff --git a/2020/07/15/how-to-bring-your-own-codegen-to-tvm.html
b/2020/07/15/how-to-bring-your-own-codegen-to-tvm.html
index d5968f40dc..bde6c35489 100644
--- a/2020/07/15/how-to-bring-your-own-codegen-to-tvm.html
+++ b/2020/07/15/how-to-bring-your-own-codegen-to-tvm.html
@@ -137,7 +137,7 @@
<div class="span14 w-100">
<h1>How to Bring Your Own Codegen to TVM </h1>
<p class="post-meta">
- <time datetime="2020-07-15T00:00:00+00:00" itemprop="datePublished">
+ <time datetime="2020-07-15T00:00:00-04:00" itemprop="datePublished">
Jul 15, 2020
</time>
@@ -636,7 +636,6 @@ Figure 4: After Graph Partitioning.
<script
src="https://stackpath.bootstrapcdn.com/bootstrap/4.1.3/js/bootstrap.min.js"
integrity="sha384-ChfqqxuZUCnJSK3+MXmPNIyE6ZbWh2IMqE241rYiqJxyMiZ6OW/JmZQ5stwEULTy"
crossorigin="anonymous"></script>
<!-- <script src="./assets/js/slick.js"></script> -->
<script src="/assets/js/custome.js"></script>
- <script async
src="https://www.googletagmanager.com/gtag/js?id=UA-75982049-2"></script>
<script>
window.dataLayer = window.dataLayer || [];
function gtag(){dataLayer.push(arguments);}
diff --git a/2020/09/26/bring-your-own-datatypes.html
b/2020/09/26/bring-your-own-datatypes.html
index b38ff416a3..d8bf33be95 100644
--- a/2020/09/26/bring-your-own-datatypes.html
+++ b/2020/09/26/bring-your-own-datatypes.html
@@ -137,7 +137,7 @@
<div class="span14 w-100">
<h1>Bring Your Own Datatypes: Enabling Custom Datatype Exploration in
TVM </h1>
<p class="post-meta">
- <time datetime="2020-09-26T00:00:00+00:00" itemprop="datePublished">
+ <time datetime="2020-09-26T00:00:00-04:00" itemprop="datePublished">
Sep 26, 2020
</time>
@@ -450,7 +450,6 @@ For more documentation about the Bring Your Own Datatypes
framework
<script
src="https://stackpath.bootstrapcdn.com/bootstrap/4.1.3/js/bootstrap.min.js"
integrity="sha384-ChfqqxuZUCnJSK3+MXmPNIyE6ZbWh2IMqE241rYiqJxyMiZ6OW/JmZQ5stwEULTy"
crossorigin="anonymous"></script>
<!-- <script src="./assets/js/slick.js"></script> -->
<script src="/assets/js/custome.js"></script>
- <script async
src="https://www.googletagmanager.com/gtag/js?id=UA-75982049-2"></script>
<script>
window.dataLayer = window.dataLayer || [];
function gtag(){dataLayer.push(arguments);}
diff --git a/2021/03/03/intro-auto-scheduler.html
b/2021/03/03/intro-auto-scheduler.html
index bfcb0df6a5..78e9567a8d 100644
--- a/2021/03/03/intro-auto-scheduler.html
+++ b/2021/03/03/intro-auto-scheduler.html
@@ -137,7 +137,7 @@
<div class="span14 w-100">
<h1>Introducing TVM Auto-scheduler (a.k.a. Ansor) </h1>
<p class="post-meta">
- <time datetime="2021-03-03T00:00:00+00:00" itemprop="datePublished">
+ <time datetime="2021-03-03T00:00:00-05:00" itemprop="datePublished">
Mar 3, 2021
</time>
@@ -287,7 +287,6 @@ sparse operators, low-precision operators, and dynamic
shape better.</p>
<script
src="https://stackpath.bootstrapcdn.com/bootstrap/4.1.3/js/bootstrap.min.js"
integrity="sha384-ChfqqxuZUCnJSK3+MXmPNIyE6ZbWh2IMqE241rYiqJxyMiZ6OW/JmZQ5stwEULTy"
crossorigin="anonymous"></script>
<!-- <script src="./assets/js/slick.js"></script> -->
<script src="/assets/js/custome.js"></script>
- <script async
src="https://www.googletagmanager.com/gtag/js?id=UA-75982049-2"></script>
<script>
window.dataLayer = window.dataLayer || [];
function gtag(){dataLayer.push(arguments);}
diff --git a/2021/12/15/tvm-unity.html b/2021/12/15/tvm-unity.html
index b4d937e12c..19fa4f9c07 100644
--- a/2021/12/15/tvm-unity.html
+++ b/2021/12/15/tvm-unity.html
@@ -139,7 +139,7 @@
<div class="span14 w-100">
<h1>Apache TVM Unity: a vision for the ML software & hardware ecosystem
in 2022 </h1>
<p class="post-meta">
- <time datetime="2021-12-15T00:00:00+00:00" itemprop="datePublished">
+ <time datetime="2021-12-15T00:00:00-05:00" itemprop="datePublished">
Dec 15, 2021
</time>
@@ -273,7 +273,6 @@ This example shows all of these capabilities:</p>
<script
src="https://stackpath.bootstrapcdn.com/bootstrap/4.1.3/js/bootstrap.min.js"
integrity="sha384-ChfqqxuZUCnJSK3+MXmPNIyE6ZbWh2IMqE241rYiqJxyMiZ6OW/JmZQ5stwEULTy"
crossorigin="anonymous"></script>
<!-- <script src="./assets/js/slick.js"></script> -->
<script src="/assets/js/custome.js"></script>
- <script async
src="https://www.googletagmanager.com/gtag/js?id=UA-75982049-2"></script>
<script>
window.dataLayer = window.dataLayer || [];
function gtag(){dataLayer.push(arguments);}
diff --git a/404.html b/404.html
index b311044e1f..32e69bec96 100644
--- a/404.html
+++ b/404.html
@@ -43,7 +43,6 @@
<script
src="https://stackpath.bootstrapcdn.com/bootstrap/4.1.3/js/bootstrap.min.js"
integrity="sha384-ChfqqxuZUCnJSK3+MXmPNIyE6ZbWh2IMqE241rYiqJxyMiZ6OW/JmZQ5stwEULTy"
crossorigin="anonymous"></script>
<!-- <script src="./assets/js/slick.js"></script> -->
<script src="/assets/js/custome.js"></script>
- <script async
src="https://www.googletagmanager.com/gtag/js?id=UA-75982049-2"></script>
<script>
window.dataLayer = window.dataLayer || [];
function gtag(){dataLayer.push(arguments);}
diff --git a/asf.html b/asf.html
index 0a780b4872..74251f7784 100644
--- a/asf.html
+++ b/asf.html
@@ -165,7 +165,6 @@
<script
src="https://stackpath.bootstrapcdn.com/bootstrap/4.1.3/js/bootstrap.min.js"
integrity="sha384-ChfqqxuZUCnJSK3+MXmPNIyE6ZbWh2IMqE241rYiqJxyMiZ6OW/JmZQ5stwEULTy"
crossorigin="anonymous"></script>
<!-- <script src="./assets/js/slick.js"></script> -->
<script src="/assets/js/custome.js"></script>
- <script async
src="https://www.googletagmanager.com/gtag/js?id=UA-75982049-2"></script>
<script>
window.dataLayer = window.dataLayer || [];
function gtag(){dataLayer.push(arguments);}
diff --git a/atom.xml b/atom.xml
index b45b4eb24c..a75f1cb898 100644
--- a/atom.xml
+++ b/atom.xml
@@ -4,7 +4,7 @@
<title>TVM</title>
<link href="https://tvm.apache.org" rel="self"/>
<link href="https://tvm.apache.org"/>
- <updated>2024-12-03T13:56:53+00:00</updated>
+ <updated>2025-01-27T10:32:32-05:00</updated>
<id>https://tvm.apache.org</id>
<author>
<name></name>
@@ -15,7 +15,7 @@
<entry>
<title>Apache TVM Unity: a vision for the ML software & hardware
ecosystem in 2022</title>
<link href="https://tvm.apache.org/2021/12/15/tvm-unity"/>
- <updated>2021-12-15T00:00:00+00:00</updated>
+ <updated>2021-12-15T00:00:00-05:00</updated>
<id>https://tvm.apache.org/2021/12/15/tvm-unity</id>
<content type="html"><p>Apache TVM Unity is a roadmap for the TVM
ecosystem in 2022. We see a broader shift coming in the way that machine
learning system stacks optimize for flexibility and agility in the face of a
rapidly changing hardware landscape. TVM will evolve to break down the
boundaries that constrain the ways current ML systems adapt to rapid changes in
ML models and the accelerators that implement them.</p>
@@ -129,7 +129,7 @@ This example shows all of these capabilities:</p>
<entry>
<title>Introducing TVM Auto-scheduler (a.k.a. Ansor)</title>
<link href="https://tvm.apache.org/2021/03/03/intro-auto-scheduler"/>
- <updated>2021-03-03T00:00:00+00:00</updated>
+ <updated>2021-03-03T00:00:00-05:00</updated>
<id>https://tvm.apache.org/2021/03/03/intro-auto-scheduler</id>
<content type="html"><p>Optimizing the execution speed of deep neural
networks is extremely hard with the growing
model size, operator diversity, and hardware heterogeneity.
@@ -259,7 +259,7 @@ sparse operators, low-precision operators, and dynamic
shape better.</p>
<entry>
<title>Bring Your Own Datatypes: Enabling Custom Datatype Exploration in
TVM</title>
<link href="https://tvm.apache.org/2020/09/26/bring-your-own-datatypes"/>
- <updated>2020-09-26T00:00:00+00:00</updated>
+ <updated>2020-09-26T00:00:00-04:00</updated>
<id>https://tvm.apache.org/2020/09/26/bring-your-own-datatypes</id>
<content type="html"><p>In this post, we describe the Bring Your Own
Datatypes framework, which enables the use of custom datatypes within
TVM.</p>
@@ -552,7 +552,7 @@ For more documentation about the Bring Your Own Datatypes
framework
<entry>
<title>How to Bring Your Own Codegen to TVM</title>
<link
href="https://tvm.apache.org/2020/07/15/how-to-bring-your-own-codegen-to-tvm"/>
- <updated>2020-07-15T00:00:00+00:00</updated>
+ <updated>2020-07-15T00:00:00-04:00</updated>
<id>https://tvm.apache.org/2020/07/15/how-to-bring-your-own-codegen-to-tvm</id>
<content type="html"><p>To free data scientists from worrying about
the performance when developing a new model, hardware backend providers (e.g.,
Intel, NVIDIA, ARM, etc) either provide kernel libraries such as cuBLAS or
cuDNN with many commonly used deep learning kernels, or provide frameworks such
as DNNL or TensorRT with a graph engine to let users describe their models in a
certain way to achieve high performance. In addition, emerging deep learning
accelerators also have t [...]
@@ -1031,7 +1031,7 @@ Figure 4: After Graph Partitioning.
<entry>
<title>Bridging PyTorch and TVM</title>
<link href="https://tvm.apache.org/2020/07/14/bert-pytorch-tvm"/>
- <updated>2020-07-14T00:00:00+00:00</updated>
+ <updated>2020-07-14T00:00:00-04:00</updated>
<id>https://tvm.apache.org/2020/07/14/bert-pytorch-tvm</id>
<content type="html">
<p>(A more code-heavy variant is crossposted on the more PyTorch affine
<a
href="https://lernapparat.de/transformers-pytorch-tvm/">Lernapparat</a>,
@@ -1554,7 +1554,7 @@ He is a PyTorch core developer and co-authored <a
href="https://www.mann
<entry>
<title>TinyML - How TVM is Taming Tiny</title>
<link
href="https://tvm.apache.org/2020/06/04/tinyml-how-tvm-is-taming-tiny"/>
- <updated>2020-06-04T00:00:00+00:00</updated>
+ <updated>2020-06-04T00:00:00-04:00</updated>
<id>https://tvm.apache.org/2020/06/04/tinyml-how-tvm-is-taming-tiny</id>
<content type="html">
<p><img src="/images/microtvm/logo.png" alt="microTVM
logo" width="30%" /><br /></p>
@@ -1863,7 +1863,7 @@ Diagram from CMSIS-NN paper showing a 2x2 matrix
multiplication microkernel</
<entry>
<title>Compiling Machine Learning to WASM and WebGPU with Apache TVM</title>
<link
href="https://tvm.apache.org/2020/05/14/compiling-machine-learning-to-webassembly-and-webgpu"/>
- <updated>2020-05-14T00:00:00+00:00</updated>
+ <updated>2020-05-14T00:00:00-04:00</updated>
<id>https://tvm.apache.org/2020/05/14/compiling-machine-learning-to-webassembly-and-webgpu</id>
<content type="html"><p><strong>TLDR</strong></p>
@@ -1950,7 +1950,7 @@ Diagram from CMSIS-NN paper showing a 2x2 matrix
multiplication microkernel</
<entry>
<title>Integrating TVM into PyTorch</title>
<link href="https://tvm.apache.org/2019/05/30/pytorch-frontend"/>
- <updated>2019-05-30T00:00:00+00:00</updated>
+ <updated>2019-05-30T00:00:00-04:00</updated>
<id>https://tvm.apache.org/2019/05/30/pytorch-frontend</id>
<content type="html"><p>As TVM continuously demonstrates improvements
to the efficiency of deep learning execution,
it has become clear that PyTorch stands to benefit from directly leveraging
the compiler stack.
@@ -2052,7 +2052,7 @@ relay_graph = torch_tvm.to_relay(mul, inputs)
<entry>
<title>Automating Optimization of Quantized Deep Learning Models on
CUDA</title>
<link href="https://tvm.apache.org/2019/04/29/opt-cuda-quantized"/>
- <updated>2019-04-29T16:00:00+00:00</updated>
+ <updated>2019-04-29T12:00:00-04:00</updated>
<id>https://tvm.apache.org/2019/04/29/opt-cuda-quantized</id>
<content type="html"><p>Deep learning has been successfully applied
to a variety of tasks.
On real-time scenarios such as inference on autonomous vehicles, the inference
speed of the model is critical.
@@ -2196,7 +2196,7 @@ We show that automatic optimization in TVM makes it easy
and flexible to support
<entry>
<title>TVM Deep Learning Compiler Joins Apache Software Foundation</title>
<link href="https://tvm.apache.org/2019/03/18/tvm-apache-announcement"/>
- <updated>2019-03-18T00:00:00+00:00</updated>
+ <updated>2019-03-18T00:00:00-04:00</updated>
<id>https://tvm.apache.org/2019/03/18/tvm-apache-announcement</id>
<content type="html"><p>There is an increasing need to bring machine
learning to a wide diversity of hardware devices. Current frameworks rely on
vendor-specific operator libraries and optimize for a narrow range of
server-class GPUs. Deploying workloads to new platforms – such as mobile
phones, embedded devices, and accelerators (e.g., FPGAs, ASICs) – requires
significant manual effort.</p>
@@ -2219,7 +2219,7 @@ We show that automatic optimization in TVM makes it easy
and flexible to support
<entry>
<title>TVM Golang Runtime for Deep Learning Deployment</title>
<link href="https://tvm.apache.org/2019/01/19/Golang"/>
- <updated>2019-01-19T00:00:00+00:00</updated>
+ <updated>2019-01-19T00:00:00-05:00</updated>
<id>https://tvm.apache.org/2019/01/19/Golang</id>
<content type="html"><h2
id="introduction">Introduction</h2>
@@ -2389,7 +2389,7 @@ closure as TVM packed function and invoke the same across
programming language b
<entry>
<title>Automating Generation of Low Precision Deep Learning
Operators</title>
<link href="https://tvm.apache.org/2018/12/18/lowprecision-conv"/>
- <updated>2018-12-18T00:00:00+00:00</updated>
+ <updated>2018-12-18T00:00:00-05:00</updated>
<id>https://tvm.apache.org/2018/12/18/lowprecision-conv</id>
<content type="html"><p>As deep learning models grow larger and more
complex, deploying them on low powered phone and IoT
devices becomes challenging because of their limited compute and energy
budgets. A recent trend
@@ -2550,7 +2550,7 @@ Note: x86 doesn’t support a vectorized popcount for this
microarchitecture, so
<entry>
<title>Efficient Privacy-Preserving ML Using TVM</title>
<link href="https://tvm.apache.org/2018/10/09/ml-in-tees"/>
- <updated>2018-10-09T00:00:00+00:00</updated>
+ <updated>2018-10-09T00:00:00-04:00</updated>
<id>https://tvm.apache.org/2018/10/09/ml-in-tees</id>
<content type="html"><p>This post describes Myelin, a framework for
privacy-preserving machine learning in trusted hardware enclaves, and how TVM
makes Myelin fast.
The key idea is that TVM, unlike other popular ML frameworks, compiles models
into lightweight, optimized, and dependency-free libraries which can fit into
resource constrained enclaves.</p>
@@ -2666,7 +2666,7 @@ His research interest is in the general domain of ML on
shared private data, but
<entry>
<title>Automatic Kernel Optimization for Deep Learning on All Hardware
Platforms</title>
<link href="https://tvm.apache.org/2018/10/03/auto-opt-all"/>
- <updated>2018-10-03T00:00:00+00:00</updated>
+ <updated>2018-10-03T00:00:00-04:00</updated>
<id>https://tvm.apache.org/2018/10/03/auto-opt-all</id>
<content type="html"><p>Optimizing the performance of deep neural
network on a diverse range of hardware platforms is still a hard
problem for AI developers. In terms of system support, we are facing a
many-to-many problem here:
@@ -3060,7 +3060,7 @@ for inference deployment. TVM just provides such a
solution.</p>
<entry>
<title>Building a Cross-Framework Deep Learning Compiler via DLPack</title>
<link href="https://tvm.apache.org/2018/08/10/DLPack-Bridge"/>
- <updated>2018-08-10T00:00:00+00:00</updated>
+ <updated>2018-08-10T00:00:00-04:00</updated>
<id>https://tvm.apache.org/2018/08/10/DLPack-Bridge</id>
<content type="html"><p>Deep learning frameworks such as Tensorflow,
PyTorch, and ApacheMxNet provide a
powerful toolbox for quickly prototyping and deploying deep learning models.
@@ -3199,7 +3199,7 @@ support, and can be used to implement convenient
converters, such as
<entry>
<title>VTA: An Open, Customizable Deep Learning Acceleration Stack </title>
<link href="https://tvm.apache.org/2018/07/12/vta-release-announcement"/>
- <updated>2018-07-12T00:00:00+00:00</updated>
+ <updated>2018-07-12T00:00:00-04:00</updated>
<id>https://tvm.apache.org/2018/07/12/vta-release-announcement</id>
<content type="html"><p style="text-align: center">Thierry
Moreau(VTA architect), Tianqi Chen(TVM stack), Ziheng Jiang†(graph
compilation), Luis Vega(cloud deployment)</p>
<p style="text-align: center">Advisors: Luis Ceze, Carlos
Guestrin, Arvind Krishnamurthy</p>
@@ -3341,7 +3341,7 @@ This kind of high-level visibility is essential to system
designers who want to
<entry>
<title>Bringing TVM into TensorFlow for Optimizing Neural Machine
Translation on GPU</title>
<link href="https://tvm.apache.org/2018/03/23/nmt-transformer-optimize"/>
- <updated>2018-03-23T00:00:00+00:00</updated>
+ <updated>2018-03-23T00:00:00-04:00</updated>
<id>https://tvm.apache.org/2018/03/23/nmt-transformer-optimize</id>
<content type="html"><h2 id="author">Author</h2>
@@ -3607,7 +3607,7 @@ C = tvm.compute(
<entry>
<title>Compiling Deep Learning Models to WebGL with TVM</title>
<link href="https://tvm.apache.org/2018/03/12/webgl"/>
- <updated>2018-03-12T00:00:00+00:00</updated>
+ <updated>2018-03-12T00:00:00-04:00</updated>
<id>https://tvm.apache.org/2018/03/12/webgl</id>
<content type="html"><p>Now TVM comes with a brand-new OpenGL/WebGL
backend!
This blog post explains what it is, and what you can achieve with it.</p>
@@ -3723,7 +3723,7 @@ optimizations into the TVM stack.</p>
<entry>
<title>Optimizing Mobile Deep Learning on ARM GPU with TVM</title>
<link href="https://tvm.apache.org/2018/01/16/opt-mali-gpu"/>
- <updated>2018-01-16T00:00:00+00:00</updated>
+ <updated>2018-01-16T00:00:00-05:00</updated>
<id>https://tvm.apache.org/2018/01/16/opt-mali-gpu</id>
<content type="html"><p>With the great success of deep learning, the
demand for
deploying deep neural networks to mobile devices is growing rapidly.
@@ -4297,7 +4297,7 @@ advice and <a
href="https://github.com/yzhliu">Yizhi Liu</a&g
<entry>
<title>Remote Profile and Test Deep Learning Cross Compilation on Mobile
Phones with TVM RPC</title>
<link href="https://tvm.apache.org/2017/11/08/android-rpc-introduction"/>
- <updated>2017-11-08T00:00:00+00:00</updated>
+ <updated>2017-11-08T00:00:00-05:00</updated>
<id>https://tvm.apache.org/2017/11/08/android-rpc-introduction</id>
<content type="html"><p>TVM stack is an end to end compilation stack
to deploy deep learning workloads to all hardware backends.
Thanks to the NNVM compiler support of TVM stack, we can now directly compile
descriptions from deep learning frameworks and compile them to bare metal code.
diff --git a/blog.html b/blog.html
index e77a7ee0c3..61e46f3a5e 100644
--- a/blog.html
+++ b/blog.html
@@ -398,7 +398,6 @@
<script
src="https://stackpath.bootstrapcdn.com/bootstrap/4.1.3/js/bootstrap.min.js"
integrity="sha384-ChfqqxuZUCnJSK3+MXmPNIyE6ZbWh2IMqE241rYiqJxyMiZ6OW/JmZQ5stwEULTy"
crossorigin="anonymous"></script>
<!-- <script src="./assets/js/slick.js"></script> -->
<script src="/assets/js/custome.js"></script>
- <script async
src="https://www.googletagmanager.com/gtag/js?id=UA-75982049-2"></script>
<script>
window.dataLayer = window.dataLayer || [];
function gtag(){dataLayer.push(arguments);}
diff --git a/categories.html b/categories.html
index 08e60308a8..e2a65d4119 100644
--- a/categories.html
+++ b/categories.html
@@ -167,7 +167,6 @@
<script
src="https://stackpath.bootstrapcdn.com/bootstrap/4.1.3/js/bootstrap.min.js"
integrity="sha384-ChfqqxuZUCnJSK3+MXmPNIyE6ZbWh2IMqE241rYiqJxyMiZ6OW/JmZQ5stwEULTy"
crossorigin="anonymous"></script>
<!-- <script src="./assets/js/slick.js"></script> -->
<script src="/assets/js/custome.js"></script>
- <script async
src="https://www.googletagmanager.com/gtag/js?id=UA-75982049-2"></script>
<script>
window.dataLayer = window.dataLayer || [];
function gtag(){dataLayer.push(arguments);}
diff --git a/community.html b/community.html
index 41a612a4af..738f2a5682 100644
--- a/community.html
+++ b/community.html
@@ -323,7 +323,6 @@ This is a community maintained list of organizations using
and contributing to t
<script
src="https://stackpath.bootstrapcdn.com/bootstrap/4.1.3/js/bootstrap.min.js"
integrity="sha384-ChfqqxuZUCnJSK3+MXmPNIyE6ZbWh2IMqE241rYiqJxyMiZ6OW/JmZQ5stwEULTy"
crossorigin="anonymous"></script>
<!-- <script src="./assets/js/slick.js"></script> -->
<script src="/assets/js/custome.js"></script>
- <script async
src="https://www.googletagmanager.com/gtag/js?id=UA-75982049-2"></script>
<script>
window.dataLayer = window.dataLayer || [];
function gtag(){dataLayer.push(arguments);}
diff --git a/download.html b/download.html
index bd6c5172c1..9ba81bc096 100644
--- a/download.html
+++ b/download.html
@@ -272,7 +272,6 @@ Choose your flavor of download from the following links:</p>
<script
src="https://stackpath.bootstrapcdn.com/bootstrap/4.1.3/js/bootstrap.min.js"
integrity="sha384-ChfqqxuZUCnJSK3+MXmPNIyE6ZbWh2IMqE241rYiqJxyMiZ6OW/JmZQ5stwEULTy"
crossorigin="anonymous"></script>
<!-- <script src="./assets/js/slick.js"></script> -->
<script src="/assets/js/custome.js"></script>
- <script async
src="https://www.googletagmanager.com/gtag/js?id=UA-75982049-2"></script>
<script>
window.dataLayer = window.dataLayer || [];
function gtag(){dataLayer.push(arguments);}
diff --git a/feed.xml b/feed.xml
index 0253c23410..b122b7296f 100644
--- a/feed.xml
+++ b/feed.xml
@@ -1,4 +1,4 @@
-<?xml version="1.0" encoding="utf-8"?><feed
xmlns="http://www.w3.org/2005/Atom" ><generator uri="https://jekyllrb.com/"
version="4.1.1">Jekyll</generator><link href="/feed.xml" rel="self"
type="application/atom+xml" /><link href="/" rel="alternate" type="text/html"
/><updated>2024-12-03T13:56:53+00:00</updated><id>/feed.xml</id><title
type="html">TVM</title><author><name>{"name"=>nil}</name></author><entry><title
type="html">Apache TVM Unity: a vision for the ML software &am [...]
+<?xml version="1.0" encoding="utf-8"?><feed
xmlns="http://www.w3.org/2005/Atom" ><generator uri="https://jekyllrb.com/"
version="4.1.1">Jekyll</generator><link href="/feed.xml" rel="self"
type="application/atom+xml" /><link href="/" rel="alternate" type="text/html"
/><updated>2025-01-27T10:32:32-05:00</updated><id>/feed.xml</id><title
type="html">TVM</title><author><name>{"name"=>nil}</name></author><entry><title
type="html">Apache TVM Unity: a vision for the ML software &am [...]
<h2 id="boundaries-in-the-modern-ml-system-stack">Boundaries in the Modern ML
System Stack</h2>
@@ -103,7 +103,7 @@ This example shows all of these capabilities:</p>
<p>Beyond TVM alone, the same forces that are driving TVM Unity exist across
the theory and practice of modern ML. Rapid changes to models, emerging
alternative hardware, and aging abstraction boundaries all point toward the
need for an integrated approach. We expect TVM to lead the way into the next
great industry-wide shift in ML systems.</p>
-<p>For more details about our vision for TVM, check out <a
href="https://www.tvmcon.org">TVMCon 2021</a> for more talks and
discussion.</p>]]></content><author><name>Adrian Sampson, Tianqi Chen, Jared
Roesch</name></author><summary type="html"><![CDATA[Apache TVM Unity is a
roadmap for the TVM ecosystem in 2022. We see a broader shift coming in the way
that machine learning system stacks optimize for flexibility and agility in the
face of a rapidly changing hardware landscape. TVM will e [...]
+<p>For more details about our vision for TVM, check out <a
href="https://www.tvmcon.org">TVMCon 2021</a> for more talks and
discussion.</p>]]></content><author><name>Adrian Sampson, Tianqi Chen, Jared
Roesch</name></author><summary type="html"><![CDATA[Apache TVM Unity is a
roadmap for the TVM ecosystem in 2022. We see a broader shift coming in the way
that machine learning system stacks optimize for flexibility and agility in the
face of a rapidly changing hardware landscape. TVM will e [...]
model size, operator diversity, and hardware heterogeneity.
From a computational perspective, deep neural networks are just layers and
layers of tensor computations.
These tensor computations, such as matmul and conv2d, can be easily described
by mathematical expressions.
@@ -223,7 +223,7 @@ sparse operators, low-precision operators, and dynamic
shape better.</p>
<p>[1] Tutorials: <a
href="https://tvm.apache.org/docs/tutorials/index.html#autoscheduler-template-free-auto-scheduling">https://tvm.apache.org/docs/tutorials/index.html#autoscheduler-template-free-auto-scheduling</a><br
/>
[2] Benchmark repo: <a
href="https://github.com/tlc-pack/TLCBench">https://github.com/tlc-pack/TLCBench</a><br
/>
[3] OSDI Paper: <a href="https://arxiv.org/abs/2006.06762">Ansor : Generating
High-Performance Tensor Programs for Deep Learning</a><br />
-[4] Results on Apple M1 chip: <a
href="https://medium.com/octoml/on-the-apple-m1-beating-apples-core-ml-4-with-30-model-performance-improvements-9d94af7d1b2d">https://medium.com/octoml/on-the-apple-m1-beating-apples-core-ml-4-with-30-model-performance-improvements-9d94af7d1b2d</a>.</p>]]></content><author><name>Lianmin
Zheng, Chengfan Jia, Minmin Sun, Zhao Wu, Cody Hao Yu</name></author><summary
type="html"><![CDATA[Optimizing the execution speed of deep neural networks is
extremely hard [...]
+[4] Results on Apple M1 chip: <a
href="https://medium.com/octoml/on-the-apple-m1-beating-apples-core-ml-4-with-30-model-performance-improvements-9d94af7d1b2d">https://medium.com/octoml/on-the-apple-m1-beating-apples-core-ml-4-with-30-model-performance-improvements-9d94af7d1b2d</a>.</p>]]></content><author><name>Lianmin
Zheng, Chengfan Jia, Minmin Sun, Zhao Wu, Cody Hao Yu</name></author><summary
type="html"><![CDATA[Optimizing the execution speed of deep neural networks is
extremely hard [...]
<h2 id="introduction">Introduction</h2>
@@ -507,7 +507,7 @@ For more documentation about the Bring Your Own Datatypes
framework
<p><a href="https://posithub.org/docs/BeatingFloatingPoint.pdf"
target="_blank">Beating Floating Point at its Own Game: Posit Arithmetic</a> <a
href="#fnref:posit" class="reversefootnote" role="doc-backlink">↩</a></p>
</li>
</ol>
-</div>]]></content><author><name>Gus Smith, Andrew Liu</name></author><summary
type="html"><![CDATA[In this post, we describe the Bring Your Own Datatypes
framework, which enables the use of custom datatypes within
TVM.]]></summary></entry><entry><title type="html">How to Bring Your Own
Codegen to TVM</title><link
href="/2020/07/15/how-to-bring-your-own-codegen-to-tvm" rel="alternate"
type="text/html" title="How to Bring Your Own Codegen to TVM"
/><published>2020-07-15T00:00:00+00:00</pu [...]
+</div>]]></content><author><name>Gus Smith, Andrew Liu</name></author><summary
type="html"><![CDATA[In this post, we describe the Bring Your Own Datatypes
framework, which enables the use of custom datatypes within
TVM.]]></summary></entry><entry><title type="html">How to Bring Your Own
Codegen to TVM</title><link
href="/2020/07/15/how-to-bring-your-own-codegen-to-tvm" rel="alternate"
type="text/html" title="How to Bring Your Own Codegen to TVM"
/><published>2020-07-15T00:00:00-04:00</pu [...]
<p>However, users have to learn a new programming interface when they attempt
to work on a new kernel library or a device. As a result, the demand for a
unified programming interface becomes more and more important to let all users
and hardware backend providers stand on the same page.</p>
@@ -976,7 +976,7 @@ Figure 4: After Graph Partitioning.
<h2 id="acknowledgment">Acknowledgment</h2>
-<p>We would like to thank our colleague Animesh Jain for valuable discussions
in the framework design; Tianqi Chen and Jared Roesch from OctoML for system
design discussions and prototyping; Masahiro Masuda from the TVM community to
help code review and improve the DNNL integration. We would also like to thank
Ramana Radhakrishnan, Matthew Barrett, Manupa Karunaratne, and Luke Hutton from
ARM, U.K. for contributing several helpful ideas, related Relay passes, and the
Arm Compute Library [...]
+<p>We would like to thank our colleague Animesh Jain for valuable discussions
in the framework design; Tianqi Chen and Jared Roesch from OctoML for system
design discussions and prototyping; Masahiro Masuda from the TVM community to
help code review and improve the DNNL integration. We would also like to thank
Ramana Radhakrishnan, Matthew Barrett, Manupa Karunaratne, and Luke Hutton from
ARM, U.K. for contributing several helpful ideas, related Relay passes, and the
Arm Compute Library [...]
the Jupyter Notebook to follow along is on <a
href="https://github.com/t-vi/pytorch-tvmisc/tree/master/transformers-pytorch-tvm/">github</a>.)</p>
<p>Some of the most intriguing applications of Artificial Intelligence have
been in Natural Language Processing.
@@ -1489,7 +1489,7 @@ one would want to re-do cheap computation, most
prominently point-wise computati
<h1 id="author">Author</h1>
<p><a href="https://lernapparat.de/">Thomas Viehmann</a> is the founder of <a
href="https://mathinf.eu/">MathInf GmbH</a>, Munich, Germany, a boutique
training and consultancy firm focusing on Machine Learning and PyTorch.
-He is a PyTorch core developer and co-authored <a
href="https://www.manning.com/books/deep-learning-with-pytorch">Deep Learning
with PyTorch</a>, which currently available as <a
href="https://pytorch.org/deep-learning-with-pytorch">free download from the
PyTorch website</a>.</p>]]></content><author><name>Thomas Viehmann, MathInf
GmbH</name></author><summary
type="html"><![CDATA[]]></summary></entry><entry><title type="html">TinyML -
How TVM is Taming Tiny</title><link href="/2020/06/04/t [...]
+He is a PyTorch core developer and co-authored <a
href="https://www.manning.com/books/deep-learning-with-pytorch">Deep Learning
with PyTorch</a>, which currently available as <a
href="https://pytorch.org/deep-learning-with-pytorch">free download from the
PyTorch website</a>.</p>]]></content><author><name>Thomas Viehmann, MathInf
GmbH</name></author><summary
type="html"><![CDATA[]]></summary></entry><entry><title type="html">TinyML -
How TVM is Taming Tiny</title><link href="/2020/06/04/t [...]
<p>The proliferation of low-cost, AI-powered consumer devices has led to
widespread interest in “bare-metal” (low-power, often without an operating
system) devices among ML researchers and practitioners. While it is already
possible for experts to run <em>some</em> models on <em>some</em> bare-metal
devices, optimizing models for diverse sets of devices is challenging, often
requiring manually optimized device-specific libraries. And for those
platforms without, say, Linux support, the [...]
@@ -1788,7 +1788,7 @@ Diagram from CMSIS-NN paper showing a 2x2 matrix
multiplication microkernel</p>
<li><a href="https://homes.cs.washington.edu/~moreau/">Thierry Moreau</a>,
for mentoring me during my time at OctoML.</li>
<li><a href="https://homes.cs.washington.edu/~vegaluis/">Luis Vega</a>, for
teaching me the fundamentals of interacting with microcontrollers.</li>
<li><a
href="https://www.linkedin.com/in/themadrasi/?originalSubdomain=uk">Ramana
Radhakrishnan</a>, for supplying the Arm hardware used in our experiments and
for providing guidance on its usage.</li>
-</ul>]]></content><author><name>Logan Weber and Andrew Reusch,
OctoML</name></author><summary
type="html"><![CDATA[]]></summary></entry><entry><title type="html">Compiling
Machine Learning to WASM and WebGPU with Apache TVM</title><link
href="/2020/05/14/compiling-machine-learning-to-webassembly-and-webgpu"
rel="alternate" type="text/html" title="Compiling Machine Learning to WASM and
WebGPU with Apache TVM"
/><published>2020-05-14T00:00:00+00:00</published><updated>2020-05-14T00:00:00+0
[...]
+</ul>]]></content><author><name>Logan Weber and Andrew Reusch,
OctoML</name></author><summary
type="html"><![CDATA[]]></summary></entry><entry><title type="html">Compiling
Machine Learning to WASM and WebGPU with Apache TVM</title><link
href="/2020/05/14/compiling-machine-learning-to-webassembly-and-webgpu"
rel="alternate" type="text/html" title="Compiling Machine Learning to WASM and
WebGPU with Apache TVM"
/><published>2020-05-14T00:00:00-04:00</published><updated>2020-05-14T00:00:00-0
[...]
<p>We introduced support for WASM and WebGPU to the Apache TVM deep learning
compiler. Our experiments shows that TVM’s WebGPU backend can get
<strong>close to native</strong> <strong>GPU performance</strong> when
deploying models to the web.</p>
@@ -1866,7 +1866,7 @@ Diagram from CMSIS-NN paper showing a 2x2 matrix
multiplication microkernel</p>
<h2 id="acknowledgement">Acknowledgement</h2>
-<p>We would like to thank the emscripten project for providing the WASM
compilation infrastructures as well as the JS library support on the web. We
would also like to thank the WebGPU community for various helpful discussions.
Thanks to Fletcher Haynes for valuable feedbacks to the
post.</p>]]></content><author><name>Tianqi Chen and Jared Roesch,
OctoML</name></author><summary
type="html"><![CDATA[TLDR]]></summary></entry><entry><title
type="html">Integrating TVM into PyTorch</title><li [...]
+<p>We would like to thank the emscripten project for providing the WASM
compilation infrastructures as well as the JS library support on the web. We
would also like to thank the WebGPU community for various helpful discussions.
Thanks to Fletcher Haynes for valuable feedbacks to the
post.</p>]]></content><author><name>Tianqi Chen and Jared Roesch,
OctoML</name></author><summary
type="html"><![CDATA[TLDR]]></summary></entry><entry><title
type="html">Integrating TVM into PyTorch</title><li [...]
it has become clear that PyTorch stands to benefit from directly leveraging
the compiler stack.
A major tenet of PyTorch is providing seamless and robust integrations that
don’t get in the user’s way.
To that end, PyTorch now has an official TVM-based backend, <a
href="https://github.com/pytorch/tvm">torch_tvm</a>.</p>
@@ -1958,7 +1958,7 @@ def mul(a, b, c):
# via script
relay_graph = torch_tvm.to_relay(mul, inputs)
-</code></pre></div></div>]]></content><author><name>Bram
Wasti</name></author><summary type="html"><![CDATA[As TVM continuously
demonstrates improvements to the efficiency of deep learning execution, it has
become clear that PyTorch stands to benefit from directly leveraging the
compiler stack. A major tenet of PyTorch is providing seamless and robust
integrations that don’t get in the user’s way. To that end, PyTorch now has an
official TVM-based backend, torch_tvm.]]></summary></entry> [...]
+</code></pre></div></div>]]></content><author><name>Bram
Wasti</name></author><summary type="html"><![CDATA[As TVM continuously
demonstrates improvements to the efficiency of deep learning execution, it has
become clear that PyTorch stands to benefit from directly leveraging the
compiler stack. A major tenet of PyTorch is providing seamless and robust
integrations that don’t get in the user’s way. To that end, PyTorch now has an
official TVM-based backend, torch_tvm.]]></summary></entry> [...]
On real-time scenarios such as inference on autonomous vehicles, the inference
speed of the model is critical.
Network quantization is an effective approach to accelerating deep learning
models.
In quantized models, both data and model parameters are represented with low
precision data types such as <code class="language-plaintext
highlighter-rouge">int8</code> and <code class="language-plaintext
highlighter-rouge">float16</code>.
@@ -2093,7 +2093,7 @@ We show that automatic optimization in TVM makes it easy
and flexible to support
</ul>
<h1 id="bio--acknowledgement">Bio & Acknowledgement</h1>
-<p><a href="https://wuwei.io/">Wuwei Lin</a> is an undergraduate student at
SJTU. He is currently an intern at TuSimple. The author has many thanks to <a
href="https://homes.cs.washington.edu/~tqchen/">Tianqi Chen</a> and <a
href="https://homes.cs.washington.edu/~eqy/">Eddie Yan</a> for their
reviews.</p>]]></content><author><name>Wuwei Lin</name></author><summary
type="html"><![CDATA[Deep learning has been successfully applied to a variety
of tasks. On real-time scenarios such as infere [...]
+<p><a href="https://wuwei.io/">Wuwei Lin</a> is an undergraduate student at
SJTU. He is currently an intern at TuSimple. The author has many thanks to <a
href="https://homes.cs.washington.edu/~tqchen/">Tianqi Chen</a> and <a
href="https://homes.cs.washington.edu/~eqy/">Eddie Yan</a> for their
reviews.</p>]]></content><author><name>Wuwei Lin</name></author><summary
type="html"><![CDATA[Deep learning has been successfully applied to a variety
of tasks. On real-time scenarios such as infere [...]
<p>TVM is an open source deep learning compiler stack that closes the gap
between the productivity-focused deep learning frameworks, and the performance-
or efficiency-oriented hardware backends. Today, we are glad to announce that
the TVM community has decided to move on to Apache incubator, and becomes an
Apache(incubating) project.</p>
diff --git a/history.html b/history.html
index 95cb779fb3..ebcb23dc99 100644
--- a/history.html
+++ b/history.html
@@ -164,7 +164,6 @@ Then a tensor operator optimization and code generation
layer that optimizes ten
<script
src="https://stackpath.bootstrapcdn.com/bootstrap/4.1.3/js/bootstrap.min.js"
integrity="sha384-ChfqqxuZUCnJSK3+MXmPNIyE6ZbWh2IMqE241rYiqJxyMiZ6OW/JmZQ5stwEULTy"
crossorigin="anonymous"></script>
<!-- <script src="./assets/js/slick.js"></script> -->
<script src="/assets/js/custome.js"></script>
- <script async
src="https://www.googletagmanager.com/gtag/js?id=UA-75982049-2"></script>
<script>
window.dataLayer = window.dataLayer || [];
function gtag(){dataLayer.push(arguments);}
diff --git a/index.html b/index.html
index 1d9d8484d9..cd7480f674 100644
--- a/index.html
+++ b/index.html
@@ -232,7 +232,6 @@ any hardware platform. TVM provides the following main
features:</p>
<script
src="https://stackpath.bootstrapcdn.com/bootstrap/4.1.3/js/bootstrap.min.js"
integrity="sha384-ChfqqxuZUCnJSK3+MXmPNIyE6ZbWh2IMqE241rYiqJxyMiZ6OW/JmZQ5stwEULTy"
crossorigin="anonymous"></script>
<!-- <script src="./assets/js/slick.js"></script> -->
<script src="/assets/js/custome.js"></script>
- <script async
src="https://www.googletagmanager.com/gtag/js?id=UA-75982049-2"></script>
<script>
window.dataLayer = window.dataLayer || [];
function gtag(){dataLayer.push(arguments);}
diff --git a/rss.xml b/rss.xml
index 880add77a7..561d5a3f50 100644
--- a/rss.xml
+++ b/rss.xml
@@ -5,8 +5,8 @@
<description>TVM - </description>
<link>https://tvm.apache.org</link>
<atom:link href="https://tvm.apache.org" rel="self"
type="application/rss+xml" />
- <lastBuildDate>Tue, 03 Dec 2024 13:56:53 +0000</lastBuildDate>
- <pubDate>Tue, 03 Dec 2024 13:56:53 +0000</pubDate>
+ <lastBuildDate>Mon, 27 Jan 2025 10:32:32 -0500</lastBuildDate>
+ <pubDate>Mon, 27 Jan 2025 10:32:32 -0500</pubDate>
<ttl>60</ttl>
@@ -121,7 +121,7 @@ This example shows all of these capabilities:</p>
</description>
<link>https://tvm.apache.org/2021/12/15/tvm-unity</link>
<guid>https://tvm.apache.org/2021/12/15/tvm-unity</guid>
- <pubDate>Wed, 15 Dec 2021 00:00:00 +0000</pubDate>
+ <pubDate>Wed, 15 Dec 2021 00:00:00 -0500</pubDate>
</item>
<item>
@@ -251,7 +251,7 @@ sparse operators, low-precision operators, and dynamic
shape better.</p>
</description>
<link>https://tvm.apache.org/2021/03/03/intro-auto-scheduler</link>
<guid>https://tvm.apache.org/2021/03/03/intro-auto-scheduler</guid>
- <pubDate>Wed, 03 Mar 2021 00:00:00 +0000</pubDate>
+ <pubDate>Wed, 03 Mar 2021 00:00:00 -0500</pubDate>
</item>
<item>
@@ -544,7 +544,7 @@ For more documentation about the Bring Your Own Datatypes
framework
</description>
<link>https://tvm.apache.org/2020/09/26/bring-your-own-datatypes</link>
<guid>https://tvm.apache.org/2020/09/26/bring-your-own-datatypes</guid>
- <pubDate>Sat, 26 Sep 2020 00:00:00 +0000</pubDate>
+ <pubDate>Sat, 26 Sep 2020 00:00:00 -0400</pubDate>
</item>
<item>
@@ -1023,7 +1023,7 @@ Figure 4: After Graph Partitioning.
</description>
<link>https://tvm.apache.org/2020/07/15/how-to-bring-your-own-codegen-to-tvm</link>
<guid>https://tvm.apache.org/2020/07/15/how-to-bring-your-own-codegen-to-tvm</guid>
- <pubDate>Wed, 15 Jul 2020 00:00:00 +0000</pubDate>
+ <pubDate>Wed, 15 Jul 2020 00:00:00 -0400</pubDate>
</item>
<item>
@@ -1546,7 +1546,7 @@ He is a PyTorch core developer and co-authored <a
href="https://www.mann
</description>
<link>https://tvm.apache.org/2020/07/14/bert-pytorch-tvm</link>
<guid>https://tvm.apache.org/2020/07/14/bert-pytorch-tvm</guid>
- <pubDate>Tue, 14 Jul 2020 00:00:00 +0000</pubDate>
+ <pubDate>Tue, 14 Jul 2020 00:00:00 -0400</pubDate>
</item>
<item>
@@ -1855,7 +1855,7 @@ Diagram from CMSIS-NN paper showing a 2x2 matrix
multiplication microkernel</
</description>
<link>https://tvm.apache.org/2020/06/04/tinyml-how-tvm-is-taming-tiny</link>
<guid>https://tvm.apache.org/2020/06/04/tinyml-how-tvm-is-taming-tiny</guid>
- <pubDate>Thu, 04 Jun 2020 00:00:00 +0000</pubDate>
+ <pubDate>Thu, 04 Jun 2020 00:00:00 -0400</pubDate>
</item>
<item>
@@ -1942,7 +1942,7 @@ Diagram from CMSIS-NN paper showing a 2x2 matrix
multiplication microkernel</
</description>
<link>https://tvm.apache.org/2020/05/14/compiling-machine-learning-to-webassembly-and-webgpu</link>
<guid>https://tvm.apache.org/2020/05/14/compiling-machine-learning-to-webassembly-and-webgpu</guid>
- <pubDate>Thu, 14 May 2020 00:00:00 +0000</pubDate>
+ <pubDate>Thu, 14 May 2020 00:00:00 -0400</pubDate>
</item>
<item>
@@ -2044,7 +2044,7 @@ relay_graph = torch_tvm.to_relay(mul, inputs)
</description>
<link>https://tvm.apache.org/2019/05/30/pytorch-frontend</link>
<guid>https://tvm.apache.org/2019/05/30/pytorch-frontend</guid>
- <pubDate>Thu, 30 May 2019 00:00:00 +0000</pubDate>
+ <pubDate>Thu, 30 May 2019 00:00:00 -0400</pubDate>
</item>
<item>
@@ -2188,7 +2188,7 @@ We show that automatic optimization in TVM makes it easy
and flexible to support
</description>
<link>https://tvm.apache.org/2019/04/29/opt-cuda-quantized</link>
<guid>https://tvm.apache.org/2019/04/29/opt-cuda-quantized</guid>
- <pubDate>Mon, 29 Apr 2019 16:00:00 +0000</pubDate>
+ <pubDate>Mon, 29 Apr 2019 12:00:00 -0400</pubDate>
</item>
<item>
@@ -2211,7 +2211,7 @@ We show that automatic optimization in TVM makes it easy
and flexible to support
</description>
<link>https://tvm.apache.org/2019/03/18/tvm-apache-announcement</link>
<guid>https://tvm.apache.org/2019/03/18/tvm-apache-announcement</guid>
- <pubDate>Mon, 18 Mar 2019 00:00:00 +0000</pubDate>
+ <pubDate>Mon, 18 Mar 2019 00:00:00 -0400</pubDate>
</item>
<item>
@@ -2381,7 +2381,7 @@ closure as TVM packed function and invoke the same across
programming language b
</description>
<link>https://tvm.apache.org/2019/01/19/Golang</link>
<guid>https://tvm.apache.org/2019/01/19/Golang</guid>
- <pubDate>Sat, 19 Jan 2019 00:00:00 +0000</pubDate>
+ <pubDate>Sat, 19 Jan 2019 00:00:00 -0500</pubDate>
</item>
<item>
@@ -2542,7 +2542,7 @@ Note: x86 doesn’t support a vectorized popcount for this
microarchitecture, so
</description>
<link>https://tvm.apache.org/2018/12/18/lowprecision-conv</link>
<guid>https://tvm.apache.org/2018/12/18/lowprecision-conv</guid>
- <pubDate>Tue, 18 Dec 2018 00:00:00 +0000</pubDate>
+ <pubDate>Tue, 18 Dec 2018 00:00:00 -0500</pubDate>
</item>
<item>
@@ -2658,7 +2658,7 @@ His research interest is in the general domain of ML on
shared private data, but
</description>
<link>https://tvm.apache.org/2018/10/09/ml-in-tees</link>
<guid>https://tvm.apache.org/2018/10/09/ml-in-tees</guid>
- <pubDate>Tue, 09 Oct 2018 00:00:00 +0000</pubDate>
+ <pubDate>Tue, 09 Oct 2018 00:00:00 -0400</pubDate>
</item>
<item>
@@ -3052,7 +3052,7 @@ for inference deployment. TVM just provides such a
solution.</p>
</description>
<link>https://tvm.apache.org/2018/10/03/auto-opt-all</link>
<guid>https://tvm.apache.org/2018/10/03/auto-opt-all</guid>
- <pubDate>Wed, 03 Oct 2018 00:00:00 +0000</pubDate>
+ <pubDate>Wed, 03 Oct 2018 00:00:00 -0400</pubDate>
</item>
<item>
@@ -3191,7 +3191,7 @@ support, and can be used to implement convenient
converters, such as
</description>
<link>https://tvm.apache.org/2018/08/10/DLPack-Bridge</link>
<guid>https://tvm.apache.org/2018/08/10/DLPack-Bridge</guid>
- <pubDate>Fri, 10 Aug 2018 00:00:00 +0000</pubDate>
+ <pubDate>Fri, 10 Aug 2018 00:00:00 -0400</pubDate>
</item>
<item>
@@ -3333,7 +3333,7 @@ This kind of high-level visibility is essential to system
designers who want to
</description>
<link>https://tvm.apache.org/2018/07/12/vta-release-announcement</link>
<guid>https://tvm.apache.org/2018/07/12/vta-release-announcement</guid>
- <pubDate>Thu, 12 Jul 2018 00:00:00 +0000</pubDate>
+ <pubDate>Thu, 12 Jul 2018 00:00:00 -0400</pubDate>
</item>
<item>
@@ -3599,7 +3599,7 @@ C = tvm.compute(
</description>
<link>https://tvm.apache.org/2018/03/23/nmt-transformer-optimize</link>
<guid>https://tvm.apache.org/2018/03/23/nmt-transformer-optimize</guid>
- <pubDate>Fri, 23 Mar 2018 00:00:00 +0000</pubDate>
+ <pubDate>Fri, 23 Mar 2018 00:00:00 -0400</pubDate>
</item>
<item>
@@ -3715,7 +3715,7 @@ optimizations into the TVM stack.</p>
</description>
<link>https://tvm.apache.org/2018/03/12/webgl</link>
<guid>https://tvm.apache.org/2018/03/12/webgl</guid>
- <pubDate>Mon, 12 Mar 2018 00:00:00 +0000</pubDate>
+ <pubDate>Mon, 12 Mar 2018 00:00:00 -0400</pubDate>
</item>
<item>
@@ -4289,7 +4289,7 @@ advice and <a
href="https://github.com/yzhliu">Yizhi Liu</a&g
</description>
<link>https://tvm.apache.org/2018/01/16/opt-mali-gpu</link>
<guid>https://tvm.apache.org/2018/01/16/opt-mali-gpu</guid>
- <pubDate>Tue, 16 Jan 2018 00:00:00 +0000</pubDate>
+ <pubDate>Tue, 16 Jan 2018 00:00:00 -0500</pubDate>
</item>
<item>
@@ -4517,7 +4517,7 @@ make jvminstall
</description>
<link>https://tvm.apache.org/2017/11/08/android-rpc-introduction</link>
<guid>https://tvm.apache.org/2017/11/08/android-rpc-introduction</guid>
- <pubDate>Wed, 08 Nov 2017 00:00:00 +0000</pubDate>
+ <pubDate>Wed, 08 Nov 2017 00:00:00 -0500</pubDate>
</item>
diff --git a/scripts/task_deploy_asf_site.zsh b/scripts/task_deploy_asf_site.zsh
new file mode 100755
index 0000000000..76c57791fb
--- /dev/null
+++ b/scripts/task_deploy_asf_site.zsh
@@ -0,0 +1,25 @@
+#!/bin/bash
+# Deploy the website to the asf-site branch.
+set -e
+set -u
+
+echo "Start to generate and deploy site ..."
+jekyll b
+cp .gitignore .gitignore.bak
+cp .asf.yaml .asf.yaml.bak
+
+# copy new files into the current site
+git fetch
+git checkout -B asf-site origin/asf-site
+
+# remove all existing files, excluding the docs
+git ls-files | grep -v ^docs| xargs rm -f
+cp .gitignore.bak .gitignore
+cp .asf.yaml.bak .asf.yaml
+
+cp -rf _site/* .
+DATE=`date`
+git add --all && git commit -am "Build at ${DATE}"
+git push origin asf-site
+git checkout main
+echo "Finish deployment at ${DATE}"
diff --git a/tags.html b/tags.html
index 769c221cc3..b52db0cb41 100644
--- a/tags.html
+++ b/tags.html
@@ -168,7 +168,6 @@
<script
src="https://stackpath.bootstrapcdn.com/bootstrap/4.1.3/js/bootstrap.min.js"
integrity="sha384-ChfqqxuZUCnJSK3+MXmPNIyE6ZbWh2IMqE241rYiqJxyMiZ6OW/JmZQ5stwEULTy"
crossorigin="anonymous"></script>
<!-- <script src="./assets/js/slick.js"></script> -->
<script src="/assets/js/custome.js"></script>
- <script async
src="https://www.googletagmanager.com/gtag/js?id=UA-75982049-2"></script>
<script>
window.dataLayer = window.dataLayer || [];
function gtag(){dataLayer.push(arguments);}
diff --git a/tvm b/tvm
new file mode 160000
index 0000000000..bb48a45bcf
--- /dev/null
+++ b/tvm
@@ -0,0 +1 @@
+Subproject commit bb48a45bcfc7d8a40dadca0ab7f589f59fdec374