This is an automated email from the ASF dual-hosted git repository.

ekalda pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/tvm.git


The following commit(s) were added to refs/heads/main by this push:
     new ed8b82c5d6 Remove duplicate the word (#15524)
ed8b82c5d6 is described below

commit ed8b82c5d6ea91e70d374988b16b47f64cd83f63
Author: Max Base <maxbasec...@gmail.com>
AuthorDate: Fri Aug 11 09:11:38 2023 +0100

    Remove duplicate the word (#15524)
    
    Hi Apache Team,
    
    I'm thrilled to submit my first contribution to Apache! I've addressed the 
issue of duplicate words in the code. Paying attention to such details is vital 
for code quality, and I'm committed to enhancing the project.
    
    -- MAX
---
 gallery/how_to/work_with_microtvm/micro_mlperftiny.py | 2 +-
 include/tvm/relay/transform.h                         | 2 +-
 include/tvm/tir/analysis.h                            | 4 ++--
 include/tvm/tir/schedule/schedule.h                   | 2 +-
 python/tvm/relay/op/contrib/dnnl.py                   | 2 +-
 python/tvm/tir/schedule/schedule.py                   | 2 +-
 python/tvm/topi/hexagon/compute_poolarea.py           | 2 +-
 python/tvm/topi/hexagon/slice_ops/max_pool2d.py       | 2 +-
 src/runtime/hexagon/ops/conv2d_fp16_hvx.cc            | 2 +-
 src/tir/schedule/primitive.h                          | 2 +-
 tests/python/contrib/test_clml/infrastructure.py      | 2 +-
 tests/python/contrib/test_ethosn/test_codegen.py      | 4 ++--
 12 files changed, 14 insertions(+), 14 deletions(-)

diff --git a/gallery/how_to/work_with_microtvm/micro_mlperftiny.py 
b/gallery/how_to/work_with_microtvm/micro_mlperftiny.py
index d9d178f3bf..6be61789f8 100644
--- a/gallery/how_to/work_with_microtvm/micro_mlperftiny.py
+++ b/gallery/how_to/work_with_microtvm/micro_mlperftiny.py
@@ -166,7 +166,7 @@ EXECUTOR = Executor(
 # Select a Zephyr board
 BOARD = os.getenv("TVM_MICRO_BOARD", default="nucleo_l4r5zi")
 
-# Get the the full target description using the BOARD
+# Get the full target description using the BOARD
 TARGET = tvm.micro.testing.get_target("zephyr", BOARD)
 
 ######################################################################
diff --git a/include/tvm/relay/transform.h b/include/tvm/relay/transform.h
index 4f5b5d146d..41675001db 100644
--- a/include/tvm/relay/transform.h
+++ b/include/tvm/relay/transform.h
@@ -492,7 +492,7 @@ TVM_DLL Pass SimplifyExprPostAlterOp();
  * A typical custom pass will:
  *  - Find calls to "Compiler" attributes functions with matching compiler 
name.
  *  - Lower those function to TIR PrimFuncs.
- *  - Bind those functions into the IRModule under the the functions' 
"global_symbol" attribute.
+ *  - Bind those functions into the IRModule under the functions' 
"global_symbol" attribute.
  *  - Replace all calls to those functions with 'call_lowered' to the matching 
global.
  * Care should be taken to handle multiple calls to the same function.
  * See src/relay/backend/contrib/example_target_hooks/relay_to_tir.cc for an 
example custom pass.
diff --git a/include/tvm/tir/analysis.h b/include/tvm/tir/analysis.h
index f4684231f0..701e2a5143 100644
--- a/include/tvm/tir/analysis.h
+++ b/include/tvm/tir/analysis.h
@@ -281,7 +281,7 @@ TVM_DLL size_t CalculateWorkspaceBytes(const PrimFunc& func,
 
 /*!
  * \brief Calculate the allocated memory per scope in bytes needed inside the 
TIR PrimFunc
- * \param func The TIR PrimFunc for which the the allocated memory size to be 
calculated
+ * \param func The TIR PrimFunc for which the allocated memory size to be 
calculated
  * \return Allocated memory size per scope in bytes inside the PrimFunc 
returned as a Map with
  * key "main" and a Map of allocated sizes as values.
  */
@@ -289,7 +289,7 @@ TVM_DLL tvm::Map<String, tvm::Map<String, Integer>> 
CalculateAllocatedBytes(cons
 
 /*!
  * \brief Calculate the allocated memory per scope in bytes for each function 
inside the module
- * \param mod The IRModule for which the the allocated memory size has to be 
calculated
+ * \param mod The IRModule for which the allocated memory size has to be 
calculated
  * \return Allocated memory size per scope in bytes for each function in the 
IRModule returned as a
            Map with function names as keys and a Map of allocated sizes as 
values.
  */
diff --git a/include/tvm/tir/schedule/schedule.h 
b/include/tvm/tir/schedule/schedule.h
index e50bbb779a..273912ed1f 100644
--- a/include/tvm/tir/schedule/schedule.h
+++ b/include/tvm/tir/schedule/schedule.h
@@ -480,7 +480,7 @@ class ScheduleNode : public runtime::Object {
                                     const String& storage_scope, const 
IndexMap& index_map) = 0;
   /*!
    * \brief Create 2 blocks that read&write a buffer region into a read/write 
cache.
-   * It requires the the target block both read & write the target buffer.
+   * It requires the target block both read & write the target buffer.
    * \param block_rv The target block operates on the target buffer.
    * \param read_buffer_index The index of the buffer in block's read region.
    * \param storage_scope The target storage scope
diff --git a/python/tvm/relay/op/contrib/dnnl.py 
b/python/tvm/relay/op/contrib/dnnl.py
index 71a126ae8f..aa54dc7c19 100644
--- a/python/tvm/relay/op/contrib/dnnl.py
+++ b/python/tvm/relay/op/contrib/dnnl.py
@@ -1165,7 +1165,7 @@ class ResNetV1Rewrite(DFPatternCallback):
 
 
 def rewrite_resnetv1(mod):
-    """Rewrite the the ResNetV1 downsize block to reduce the computation 
complexity."""
+    """Rewrite the ResNetV1 downsize block to reduce the computation 
complexity."""
     mod["main"] = rewrite(ResNetV1Rewrite(), mod["main"])
     return mod
 
diff --git a/python/tvm/tir/schedule/schedule.py 
b/python/tvm/tir/schedule/schedule.py
index 6c42f15a2f..e82cd251d3 100644
--- a/python/tvm/tir/schedule/schedule.py
+++ b/python/tvm/tir/schedule/schedule.py
@@ -1617,7 +1617,7 @@ class Schedule(Object):
         storage_scope: str,
     ) -> List[BlockRV]:
         """Create blocks that reads & write a buffer region into a cache block.
-        It requires the the target block both read & write the target buffer.
+        It requires the target block both read & write the target buffer.
         Mainly for inplace operation.
 
         Parameters
diff --git a/python/tvm/topi/hexagon/compute_poolarea.py 
b/python/tvm/topi/hexagon/compute_poolarea.py
index 6ba50c4a96..0e1130edd8 100644
--- a/python/tvm/topi/hexagon/compute_poolarea.py
+++ b/python/tvm/topi/hexagon/compute_poolarea.py
@@ -114,7 +114,7 @@ def compute_PoolArea(i, j, ih, iw, kh, kw, sh, sw, dh, dw, 
pad_top, pad_left):
     # data boundary, we should move the edge to the right untill we get to the 
first dilated kernel
     # point inside the input data boundary.
     # The third row of figures shows how this row adjustment can solve the 
problem.
-    # So the problem is reduced to finding the the first dilated kernel point 
inside the data
+    # So the problem is reduced to finding the first dilated kernel point 
inside the data
     # boundary.# For that, we can find the number of dialted points which are 
mapped to the padded
     # area and find the location of the next one which should be inside the 
input data:
     #    num_of_prev_points = (pad_top - i * sh - 1) // dh
diff --git a/python/tvm/topi/hexagon/slice_ops/max_pool2d.py 
b/python/tvm/topi/hexagon/slice_ops/max_pool2d.py
index ec546e95ba..0691165795 100644
--- a/python/tvm/topi/hexagon/slice_ops/max_pool2d.py
+++ b/python/tvm/topi/hexagon/slice_ops/max_pool2d.py
@@ -157,7 +157,7 @@ def STIR_schedule_nhwc_8h2w32c2w_nhwc_8h8w32c(
     #
     # 3) Ideally, the innermost loop variable will iterate only over the output
     #    tensor's fastest-changing indices and nothing else.  But in our case,
-    #    our two innermost loops correspond to the the max operator's 
reduction axes.
+    #    our two innermost loops correspond to the max operator's reduction 
axes.
     #
     # Finding a good way to satisfy all of these requirements at the same time 
is
     # left for future work.
diff --git a/src/runtime/hexagon/ops/conv2d_fp16_hvx.cc 
b/src/runtime/hexagon/ops/conv2d_fp16_hvx.cc
index 53ea0868ad..6d4a4839fb 100644
--- a/src/runtime/hexagon/ops/conv2d_fp16_hvx.cc
+++ b/src/runtime/hexagon/ops/conv2d_fp16_hvx.cc
@@ -255,7 +255,7 @@ void conv_layer_fp16_hvx(DLTensor& cr_out, const DLTensor& 
cr_act,  // NOLINT(*)
    * height to finally get 32 elements representing 32 output channels.
    *
    * Since the output block also has the 8h2w32c2w format, the 32 elements of 
the next element
-   * along the width is also added into the the same vector such that the 
first 32 channel elements
+   * along the width is also added into the same vector such that the first 32 
channel elements
    * occupy the even lanes and the next 32 occupy the odd lanes to form a 
single 64-element vector
    * which is then stored
    */
diff --git a/src/tir/schedule/primitive.h b/src/tir/schedule/primitive.h
index 4ae65ddc17..fe6280e1c4 100644
--- a/src/tir/schedule/primitive.h
+++ b/src/tir/schedule/primitive.h
@@ -374,7 +374,7 @@ TVM_DLL StmtSRef ReindexCacheWrite(ScheduleState self, 
const StmtSRef& block_sre
 /*!
  *!
  * \brief Create 2 blocks that read&write a buffer region into a read/write 
cache.
- * It requires the the target block both read & write the target buffer.
+ * It requires the target block both read & write the target buffer.
  * \param self The state of the schedule
  * \param block_sref The target block operates on the target buffer.
  * \param read_buffer_index The index of the buffer in block's read region.
diff --git a/tests/python/contrib/test_clml/infrastructure.py 
b/tests/python/contrib/test_clml/infrastructure.py
index 42dcf083d0..f0a513cc17 100644
--- a/tests/python/contrib/test_clml/infrastructure.py
+++ b/tests/python/contrib/test_clml/infrastructure.py
@@ -45,7 +45,7 @@ class Device:
 
     Notes
     -----
-        The test configuration will be loaded once when the the class is 
created. If the configuration
+        The test configuration will be loaded once when the class is created. 
If the configuration
         changes between tests, any changes will not be picked up.
 
     Parameters
diff --git a/tests/python/contrib/test_ethosn/test_codegen.py 
b/tests/python/contrib/test_ethosn/test_codegen.py
index 4a40d062af..3759d83b1e 100644
--- a/tests/python/contrib/test_ethosn/test_codegen.py
+++ b/tests/python/contrib/test_ethosn/test_codegen.py
@@ -71,7 +71,7 @@ def test_experimental_compiler(capfd):
     tei.build(mod, {}, True, additional_config_args=additional_config_args)
 
     # Check for hints that the experimental compiler was activated.
-    # The support library logs a warning to say the the experimental
+    # The support library logs a warning to say the experimental
     # compiler is in use. Check that this warning was logged.
     captured = capfd.readouterr()
     assert (
@@ -98,7 +98,7 @@ def test_without_experimental_compiler(capfd):
     tei.build(mod, {}, True, additional_config_args=additional_config_args)
 
     # Check for hints that the experimental compiler was activated.
-    # The support library logs a warning to say the the experimental
+    # The support library logs a warning to say the experimental
     # compiler is in use. Check that this warning was logged.
     captured = capfd.readouterr()
     assert (

Reply via email to