This is an automated email from the ASF dual-hosted git repository.

comaniac pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/tvm.git


The following commit(s) were added to refs/heads/main by this push:
     new a6cbe0d13e [python][docs] fix docstring / comment typos (#11608)
a6cbe0d13e is described below

commit a6cbe0d13eacbdcb6471caade4baa4b02926a490
Author: Christian Convey <ccon...@octoml.ai>
AuthorDate: Thu Jun 23 13:41:59 2022 -0400

    [python][docs] fix docstring / comment typos (#11608)
---
 python/tvm/auto_scheduler/cost_model/xgb_model.py | 10 +++++-----
 python/tvm/auto_scheduler/task_scheduler.py       | 12 ++++++------
 2 files changed, 11 insertions(+), 11 deletions(-)

diff --git a/python/tvm/auto_scheduler/cost_model/xgb_model.py 
b/python/tvm/auto_scheduler/cost_model/xgb_model.py
index 3cf65954be..a4e39b9061 100644
--- a/python/tvm/auto_scheduler/cost_model/xgb_model.py
+++ b/python/tvm/auto_scheduler/cost_model/xgb_model.py
@@ -98,8 +98,8 @@ class XGBModel(PythonBasedModel):
         The random seed
     model_file: Optional[str]
         If is not None, save model to this file after every update.
-    adapative_training: bool = False
-        Whether to use adapatie training, which reduces the training frequency 
when there are
+    adaptive_training: bool = False
+        Whether to use adaptive training, which reduces the training frequency 
when there are
         too many logs.
     """
 
@@ -109,7 +109,7 @@ class XGBModel(PythonBasedModel):
         num_warmup_sample=100,
         seed=None,
         model_file=None,
-        adapative_training=False,
+        adaptive_training=False,
     ):
         global xgb
         try:
@@ -141,7 +141,7 @@ class XGBModel(PythonBasedModel):
         self.num_warmup_sample = num_warmup_sample
         self.verbose_eval = verbose_eval
         self.model_file = model_file
-        self.adapative_training = adapative_training
+        self.adaptive_training = adaptive_training
 
         super().__init__()
 
@@ -169,7 +169,7 @@ class XGBModel(PythonBasedModel):
         self.results.extend(results)
 
         if (
-            self.adapative_training
+            self.adaptive_training
             and len(self.inputs) - self.last_train_length < 
self.last_train_length / 5
         ):
             # Set a training threshold related to `last_train_length` to 
reduce the training
diff --git a/python/tvm/auto_scheduler/task_scheduler.py 
b/python/tvm/auto_scheduler/task_scheduler.py
index 762c507359..c23c9b3c0c 100644
--- a/python/tvm/auto_scheduler/task_scheduler.py
+++ b/python/tvm/auto_scheduler/task_scheduler.py
@@ -47,7 +47,7 @@ def make_search_policies(
     verbose,
     load_model_file=None,
     load_log_file=None,
-    adapative_training=False,
+    adaptive_training=False,
 ):
     """Make a list of search policies for a list of search tasks.
     It creates one policy per task.
@@ -71,7 +71,7 @@ def make_search_policies(
     load_log_file: Optional[str]
         Load measurement records from this file. If it is not None, the status 
of the
         task scheduler, search policies and cost models will be restored 
according to this file.
-    adapative_training: bool = False
+    adaptive_training: bool = False
         Option used by XGBModel to reduce the model training frequency when 
there're too
         many logs.
 
@@ -89,7 +89,7 @@ def make_search_policies(
             cost_model = XGBModel(
                 num_warmup_sample=len(tasks) * num_measures_per_round,
                 model_file=load_model_file,
-                adapative_training=adapative_training,
+                adaptive_training=adaptive_training,
             )
             if load_model_file and os.path.isfile(load_model_file):
                 logger.info("TaskScheduler: Load pretrained model...")
@@ -283,7 +283,7 @@ class TaskScheduler:
         tune_option,
         search_policy="default",
         search_policy_params=None,
-        adapative_training=False,
+        adaptive_training=False,
         per_task_early_stopping=None,
     ):
         """Tune a batch of tasks together.
@@ -300,7 +300,7 @@ class TaskScheduler:
             "sketch.random" for SketchPolicy + RandomModel.
         search_policy_params : Optional[Dict[str, Any]]
             The parameters of the search policy
-        adapative_training : bool = False
+        adaptive_training : bool = False
             Option used by XGBModel to reduce the model training frequency 
when there're
             too many logs.
         per_task_early_stopping : Optional[int]
@@ -347,7 +347,7 @@ class TaskScheduler:
             tune_option.verbose,
             self.load_model_file,
             self.load_log_file,
-            adapative_training,
+            adaptive_training,
         )
 
         # do a round robin first to warm up

Reply via email to