This is an automated email from the ASF dual-hosted git repository.

tqchen pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/tvm.git


The following commit(s) were added to refs/heads/main by this push:
     new 9ffffae896 fix: Complete ICHECK update across codebase (#18858)
9ffffae896 is described below

commit 9ffffae89617f8ead94cf599eaec25b62b5372ac
Author: Ruihang Lai <[email protected]>
AuthorDate: Sun Mar 1 06:54:06 2026 -0500

    fix: Complete ICHECK update across codebase (#18858)
    
    Replace all references to the old `ICHECK` / `ICHECK_EQ` macro names
    with their `TVM_FFI_ICHECK` / `TVM_FFI_ICHECK_EQ` equivalents across
    source files, comments, and documentation, completing the macro rename
    introduced by the TVM FFI refactor.
---
 3rdparty/tensorrt_llm/custom_allreduce_kernels.cu            | 8 ++++----
 apps/ios_rpc/tvmrpc/RPCArgs.mm                               | 2 +-
 apps/ios_rpc/tvmrpc/RPCServer.mm                             | 2 +-
 apps/ios_rpc/tvmrpc/TVMRuntime.mm                            | 2 +-
 docs/arch/pass_infra.rst                                     | 6 +++---
 docs/contribute/error_handling.rst                           | 4 ++--
 docs/errors.rst                                              | 4 ++--
 src/arith/iter_affine_map.cc                                 | 2 +-
 src/relax/transform/bind_params.cc                           | 2 +-
 src/runtime/contrib/coreml/coreml_runtime.mm                 | 7 ++++---
 src/runtime/contrib/cutlass/fp16_group_gemm_runner_sm100.cuh | 2 +-
 src/runtime/contrib/cutlass/fp16_group_gemm_runner_sm90.cuh  | 2 +-
 src/runtime/contrib/cutlass/gemm_runner.cuh                  | 2 +-
 src/target/llvm/codegen_hexagon.cc                           | 2 +-
 src/tir/transform/storage_rewrite.cc                         | 2 +-
 src/tir/transform/vectorize_loop.cc                          | 2 +-
 16 files changed, 26 insertions(+), 25 deletions(-)

diff --git a/3rdparty/tensorrt_llm/custom_allreduce_kernels.cu 
b/3rdparty/tensorrt_llm/custom_allreduce_kernels.cu
index d26baea342..dc5ff6d139 100644
--- a/3rdparty/tensorrt_llm/custom_allreduce_kernels.cu
+++ b/3rdparty/tensorrt_llm/custom_allreduce_kernels.cu
@@ -269,7 +269,7 @@ inline int divUp(int a, int b) { return (a + b - 1) / b; }
 
 std::tuple<int, int> kernelLaunchConfig(AllReduceStrategyType algo, 
AllReduceParams& param,
                                         size_t elts_per_thread) {
-  ICHECK(param.elts_total % elts_per_thread == 0);
+  TVM_FFI_ICHECK(param.elts_total % elts_per_thread == 0);
 
   int blocks_per_grid = 1, threads_per_block = DEFAULT_BLOCK_SIZE;
 
@@ -291,11 +291,11 @@ std::tuple<int, int> 
kernelLaunchConfig(AllReduceStrategyType algo, AllReducePar
     }
     case AllReduceStrategyType::TWOSHOT: {  // two stage all reduce algo
       const size_t elts_per_rank = param.elts_total / param.ranks_per_node;
-      ICHECK(elts_per_rank % elts_per_thread == 0);
+      TVM_FFI_ICHECK(elts_per_rank % elts_per_thread == 0);
 
       size_t total_threads = elts_per_rank / elts_per_thread;
       total_threads = WARP_SIZE * ((total_threads + WARP_SIZE - 1) / 
WARP_SIZE);
-      ICHECK(total_threads % WARP_SIZE == 0);
+      TVM_FFI_ICHECK(total_threads % WARP_SIZE == 0);
 
       while (total_threads % blocks_per_grid != 0 ||
              total_threads / blocks_per_grid > DEFAULT_BLOCK_SIZE) {
@@ -343,7 +343,7 @@ void dispatchARKernels(AllReduceStrategyType algo, 
AllReduceParams& param, int b
 template <typename T>
 void invokeOneOrTwoShotAllReduceKernel(AllReduceParams& param, 
AllReduceStrategyType strat,
                                        cudaStream_t stream) {
-  ICHECK(strat == AllReduceStrategyType::ONESHOT || strat == 
AllReduceStrategyType::TWOSHOT);
+  TVM_FFI_ICHECK(strat == AllReduceStrategyType::ONESHOT || strat == 
AllReduceStrategyType::TWOSHOT);
   auto last_error = cudaGetLastError();
   if (last_error != cudaSuccess) {
     LOG(INFO) << "cuda error:" << cudaGetErrorString(last_error);
diff --git a/apps/ios_rpc/tvmrpc/RPCArgs.mm b/apps/ios_rpc/tvmrpc/RPCArgs.mm
index 7f5d68d7dd..5368ac6178 100644
--- a/apps/ios_rpc/tvmrpc/RPCArgs.mm
+++ b/apps/ios_rpc/tvmrpc/RPCArgs.mm
@@ -114,7 +114,7 @@ string GetCmdOption(int argc, char* argv[], string option, 
bool key = false) {
         return cmd;
       }
       // We assume "=" is the end of option.
-      ICHECK_EQ(*option.rbegin(), '=');
+      TVM_FFI_ICHECK_EQ(*option.rbegin(), '=');
       cmd = arg.substr(arg.find('=') + 1);
       return cmd;
     }
diff --git a/apps/ios_rpc/tvmrpc/RPCServer.mm b/apps/ios_rpc/tvmrpc/RPCServer.mm
index 2fa0c0d473..3801d49441 100644
--- a/apps/ios_rpc/tvmrpc/RPCServer.mm
+++ b/apps/ios_rpc/tvmrpc/RPCServer.mm
@@ -63,7 +63,7 @@ using FEventHandler = ffi::Function;
 FEventHandler CreateServerEventHandler(NSOutputStream* outputStream, 
std::string name,
                                        std::string remote_key) {
   auto event_handler_factory = 
tvm::ffi::Function::GetGlobal("rpc.CreateEventDrivenServer");
-  ICHECK(event_handler_factory.has_value())
+  TVM_FFI_ICHECK(event_handler_factory.has_value())
       << "You are using tvm_runtime module built without RPC support. "
       << "Please rebuild it with USE_RPC flag.";
 
diff --git a/apps/ios_rpc/tvmrpc/TVMRuntime.mm 
b/apps/ios_rpc/tvmrpc/TVMRuntime.mm
index 5dfff0cd86..65d6d76303 100644
--- a/apps/ios_rpc/tvmrpc/TVMRuntime.mm
+++ b/apps/ios_rpc/tvmrpc/TVMRuntime.mm
@@ -100,7 +100,7 @@ class UnsignedDSOLoader final : public Library {
   }
   void Init(const std::string& name) {
     lib_handle_ = custom_dlopen(name.c_str(), RTLD_NOW | RTLD_LOCAL);
-    ICHECK(lib_handle_ != nullptr)
+    TVM_FFI_ICHECK(lib_handle_ != nullptr)
         << "Failed to load dynamic shared library " << name << " " << 
custom_dlerror();
   }
 
diff --git a/docs/arch/pass_infra.rst b/docs/arch/pass_infra.rst
index 6c840beeb4..a1019c88ad 100644
--- a/docs/arch/pass_infra.rst
+++ b/docs/arch/pass_infra.rst
@@ -277,12 +277,12 @@ order that they were appended to the pass list.
                                       const PassContext& pass_ctx) const {
       Module mod = module;
       for (const Pass& pass : passes) {
-        ICHECK(pass.defined()) << "Found undefined pass for optimization.";
+        TVM_FFI_ICHECK(pass.defined()) << "Found undefined pass for 
optimization.";
         const PassInfo& pass_info = pass->Info();
         if (!PassEnabled(pass_info))  continue;
         for (const auto& it : pass_info->required) {
           const auto* name = it.as<tvm::ir::StringImm>();
-          ICHECK(name);
+          TVM_FFI_ICHECK(name);
           mod = GetPass(name->value)(mod, pass_ctx);
         }
         mod = pass(mod, pass_ctx);
@@ -307,7 +307,7 @@ pass is registered with an API endpoint as we will show 
later.
       using tvm::runtime::Registry;
       std::string fpass_name = "relax.transform." + pass_name;
       const std::optional<tvm::ffi::Function> f = 
tvm::ffi::Function::GetGlobal(fpass_name);
-      ICHECK(f.has_value()) << "Cannot find " << fpass_name
+      TVM_FFI_ICHECK(f.has_value()) << "Cannot find " << fpass_name
                             << "to create the pass " << pass_name;
       return (*f)();
     }
diff --git a/docs/contribute/error_handling.rst 
b/docs/contribute/error_handling.rst
index ee5f0c100e..a6049419ad 100644
--- a/docs/contribute/error_handling.rst
+++ b/docs/contribute/error_handling.rst
@@ -42,14 +42,14 @@ raise an error of the corresponding type.
 Note that you do not have to add a new type
 :py:class:`tvm.error.TVMError` will be raised by default when
 there is no error type prefix in the message.
-This mechanism works for both ``LOG(FATAL)`` and ``ICHECK`` macros.
+This mechanism works for both ``LOG(FATAL)`` and ``TVM_FFI_ICHECK`` macros.
 The following code gives an example on how to do so.
 
 .. code:: c
 
   // src/api_test.cc
   void ErrorTest(int x, int y) {
-    ICHECK_EQ(x, y) << "ValueError: expect x and y to be equal."
+    TVM_FFI_ICHECK_EQ(x, y) << "ValueError: expect x and y to be equal."
     if (x == 1) {
       LOG(FATAL) << "InternalError: cannot reach here";
     }
diff --git a/docs/errors.rst b/docs/errors.rst
index 42ffc88c1b..188cb5811a 100644
--- a/docs/errors.rst
+++ b/docs/errors.rst
@@ -36,8 +36,8 @@ Where do these errors come from?
 
 This error is caused by an internal invariant being violated during TVM's
 execution. On a technical level, the message is generated by the
-``ICHECK`` macro, found in ``include/tvm/runtime/logging.h``.
-The ``ICHECK`` macro is used in many places in the TVM code to assert
+``TVM_FFI_ICHECK`` macro, found in ``include/tvm/ffi/error.h`` in `TVM-FFI 
<https://github.com/tlc-pack/tvm-ffi>`_.
+The ``TVM_FFI_ICHECK`` macro is used in many places in the TVM code to assert
 some condition is true during execution; any time the assertion fails, TVM
 will exit with the error message shown above.
 
diff --git a/src/arith/iter_affine_map.cc b/src/arith/iter_affine_map.cc
index 1779c42583..d522f6b617 100644
--- a/src/arith/iter_affine_map.cc
+++ b/src/arith/iter_affine_map.cc
@@ -376,7 +376,7 @@ class IterMapRewriter : public ExprMutator {
    * It is not an error for IterMapRewriter to receive an expression that
    * cannot be represented as an IterSumExpr.  In these cases,
    * IterMapRewriter returns the unrepresentable portions of the TIR graph
-   * without modification.  As a result, the usual ICHECK or 
TVM_FFI_THROW(InternalError)
+   * without modification.  As a result, the usual TVM_FFI_ICHECK or 
TVM_FFI_THROW(InternalError)
    * macros cannot be used.  Instead, ErrorLogger(this) can be used to
    * report an unrepresentable TIR graph, which may be used in error
    * messages at the calling scope.
diff --git a/src/relax/transform/bind_params.cc 
b/src/relax/transform/bind_params.cc
index ea93a77ee7..35d393a918 100644
--- a/src/relax/transform/bind_params.cc
+++ b/src/relax/transform/bind_params.cc
@@ -38,7 +38,7 @@ void MatchSymbolicVar(const Expr& arg, const Expr& constant,
       << "The struct info of the bound parameter is expected to be 
TensorStructInfo, but got: "
       << GetStructInfo(arg);
   auto opt_const_sinfo = MatchStructInfo<TensorStructInfo>(constant);
-  // As the constant is generated by internal codes, we use ICHECK here.
+  // As the constant is generated by internal codes, we use TVM_FFI_ICHECK 
here.
   TVM_FFI_ICHECK(opt_const_sinfo)
       << "The struct info of the bound weight is expected to be 
TensorStructInfo, but got: "
       << GetStructInfo(constant);
diff --git a/src/runtime/contrib/coreml/coreml_runtime.mm 
b/src/runtime/contrib/coreml/coreml_runtime.mm
index 045ac482a6..5c0234e772 100644
--- a/src/runtime/contrib/coreml/coreml_runtime.mm
+++ b/src/runtime/contrib/coreml/coreml_runtime.mm
@@ -61,7 +61,7 @@ void CoreMLModel::SetInput(const std::string& key, DLTensor* 
data_in) {
 
   MLMultiArray* dest = [[MLMultiArray alloc] initWithShape:shape 
dataType:dataType error:nil];
 
-  ICHECK(ffi::IsContiguous(*data_in));
+  TVM_FFI_ICHECK(ffi::IsContiguous(*data_in));
   memcpy(dest.dataPointer, data_in->data, size);
 
   NSString* nsKey = [NSString stringWithUTF8String:key.c_str()];
@@ -158,7 +158,8 @@ ffi::Optional<ffi::Function> 
CoreMLRuntime::GetFunction(const ffi::String& name)
 
       // Copy input tensors to corresponding data entries.
       for (auto i = 0; i < args.size() - 1; ++i) {
-        ICHECK(args[i].type_code() == kTVMDLTensorHandle || 
args[i].type_code() == kTVMTensorHandle)
+        TVM_FFI_ICHECK(args[i].type_code() == kTVMDLTensorHandle ||
+                       args[i].type_code() == kTVMTensorHandle)
             << "Expect Tensor or DLTensor as inputs\n";
         if (args[i].type_code() == kTVMDLTensorHandle || args[i].type_code() 
== kTVMTensorHandle) {
           model_->SetInput([input_names[i] UTF8String], args[i]);
@@ -247,7 +248,7 @@ ffi::Module CoreMLRuntimeLoadFromBytes(const ffi::Bytes& 
bytes) {
   NSString* model_path = [tempDir stringByAppendingPathComponent:dirname];
   NSURL* url = [NSURL fileURLWithPath:model_path];
   BOOL res = [dirWrapper writeToURL:url options:0 originalContentsURL:nil 
error:nil];
-  ICHECK(res) << "Failed to create model directory " << [model_path 
UTF8String];
+  TVM_FFI_ICHECK(res) << "Failed to create model directory " << [model_path 
UTF8String];
 
   auto exec = ffi::make_object<CoreMLRuntime>();
   exec->Init(symbol, [model_path UTF8String]);
diff --git a/src/runtime/contrib/cutlass/fp16_group_gemm_runner_sm100.cuh 
b/src/runtime/contrib/cutlass/fp16_group_gemm_runner_sm100.cuh
index f38664915d..34a50fdc54 100644
--- a/src/runtime/contrib/cutlass/fp16_group_gemm_runner_sm100.cuh
+++ b/src/runtime/contrib/cutlass/fp16_group_gemm_runner_sm100.cuh
@@ -132,7 +132,7 @@ struct CutlassGroupGemmRunner {
     typename Gemm::Arguments arguments;
     decltype(arguments.epilogue.thread) fusion_args;
     [&]() {
-      ICHECK(alpha.index() == beta.index()) << "alpha and beta must have the 
same type";
+      TVM_FFI_ICHECK(alpha.index() == beta.index()) << "alpha and beta must 
have the same type";
       if (std::holds_alternative<ElementAccumulator>(alpha)) {
         fusion_args.alpha = std::get<ElementAccumulator>(alpha);
         fusion_args.beta = std::get<ElementAccumulator>(beta);
diff --git a/src/runtime/contrib/cutlass/fp16_group_gemm_runner_sm90.cuh 
b/src/runtime/contrib/cutlass/fp16_group_gemm_runner_sm90.cuh
index 246063ca03..b894cbc84e 100644
--- a/src/runtime/contrib/cutlass/fp16_group_gemm_runner_sm90.cuh
+++ b/src/runtime/contrib/cutlass/fp16_group_gemm_runner_sm90.cuh
@@ -132,7 +132,7 @@ struct CutlassGroupGemmRunner {
                       uint8_t* workspace, int64_t workspace_size, int 
num_groups, ScaleType alpha,
                       ScaleType beta, cudaStream_t stream) {
     typename Gemm::EpilogueOutputOp::Params epilogue_params = [&]() {
-      ICHECK(alpha.index() == beta.index()) << "alpha and beta must have the 
same type";
+      TVM_FFI_ICHECK(alpha.index() == beta.index()) << "alpha and beta must 
have the same type";
       if (std::holds_alternative<ElementAccumulator>(alpha)) {
         return typename 
Gemm::EpilogueOutputOp::Params{std::get<ElementAccumulator>(alpha),
                                                        
std::get<ElementAccumulator>(beta)};
diff --git a/src/runtime/contrib/cutlass/gemm_runner.cuh 
b/src/runtime/contrib/cutlass/gemm_runner.cuh
index c664f6cf6f..0ca1d1be02 100644
--- a/src/runtime/contrib/cutlass/gemm_runner.cuh
+++ b/src/runtime/contrib/cutlass/gemm_runner.cuh
@@ -116,7 +116,7 @@ struct CutlassGemmRunner {
                                        //  {epilogue_params, ptr_C, *stride_C, 
ptr_D, *stride_D},
                                        hw_info};
 
-    ICHECK(alpha.index() == beta.index()) << "alpha and beta must have the 
same type";
+    TVM_FFI_ICHECK(alpha.index() == beta.index()) << "alpha and beta must have 
the same type";
     if (std::holds_alternative<ElementAccumulator>(alpha)) {
       arguments.epilogue.thread.alpha = std::get<ElementAccumulator>(alpha);
       arguments.epilogue.thread.beta = std::get<ElementAccumulator>(beta);
diff --git a/src/target/llvm/codegen_hexagon.cc 
b/src/target/llvm/codegen_hexagon.cc
index 489cbd04e0..4babf4b733 100644
--- a/src/target/llvm/codegen_hexagon.cc
+++ b/src/target/llvm/codegen_hexagon.cc
@@ -536,7 +536,7 @@ ffi::Module BuildHexagon(IRModule mod, Target target) {
     TVM_FFI_ICHECK(!file.has_error()) << file.error().message();
     // If there is an error, execution will never get here, but return
     // {ec, name} anyway to allow caller to handle error conditions.
-    // This way the "ICHECK" above can be removed with minimal effort.
+    // This way the "TVM_FFI_ICHECK" above can be removed with minimal effort.
     return std::make_pair(file.error(), std::string(file_name.c_str()));
   };
 
diff --git a/src/tir/transform/storage_rewrite.cc 
b/src/tir/transform/storage_rewrite.cc
index e7acd28533..48f9cb67a0 100644
--- a/src/tir/transform/storage_rewrite.cc
+++ b/src/tir/transform/storage_rewrite.cc
@@ -1280,7 +1280,7 @@ class VectorTypeAccessChecker : public StmtExprVisitor {
     // PointerValueTypeRewrite in BuildSPIRV.  The rewrite_store_load = false 
is
     // necessary because the C-based codegens do not yet support vectorized
     // pointer types (e.g. float16x4*).  Once they do, this if statement should
-    // instead be replaced by the below ICHECK_EQ.
+    // instead be replaced by the below TVM_FFI_ICHECK_EQ.
     if (index_lanes * var_info.element_dtype.lanes() != value_dtype.lanes()) {
       TVM_FFI_ICHECK_EQ(index_lanes, value_dtype.lanes());
       lanes_used = 1;
diff --git a/src/tir/transform/vectorize_loop.cc 
b/src/tir/transform/vectorize_loop.cc
index 719d27e743..ec6e0447f4 100644
--- a/src/tir/transform/vectorize_loop.cc
+++ b/src/tir/transform/vectorize_loop.cc
@@ -785,7 +785,7 @@ class Vectorizer : public StmtMutator, public 
ExprFunctor<PrimExpr(const PrimExp
     bool cond_need_scalarize = false;
     std::swap(cond_need_scalarize, need_scalarize_);
     // temp clear need_scalarize flag, so VisitStmt
-    // won't trigger an ICHECK eror
+    // won't trigger an TVM_FFI_ICHECK eror
     Stmt then_case = this->VisitStmt(op->then_case);
     ffi::Optional<Stmt> else_case = std::nullopt;
     if (op->else_case) {

Reply via email to