jcf94 commented on a change in pull request #6663:
URL: https://github.com/apache/incubator-tvm/pull/6663#discussion_r503867002



##########
File path: python/tvm/auto_scheduler/task_scheduler.py
##########
@@ -0,0 +1,452 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+# pylint: disable=invalid-name
+
+""" The task scheduler that allocates the time resources when tuning multiple 
tasks together
+
+The details of the "gradient" strategy below can be found in the section 6 of 
this paper:
+L. Zheng, C. Jia, M. Sun, Z. Wu, C. Yu, et al. "Ansor : Generating 
High-Performance Tensor
+Programs for Deep Learning." (OSDI 2020).
+"""
+
+import time
+import math
+import logging
+
+import numpy as np
+
+from .search_policy import SearchPolicy, SketchPolicy
+from .cost_model import RandomModel, XGBModel
+from .utils import array_mean, to_str_round
+from .measure import ProgramMeasurer
+from .measure_record import RecordReader
+
+logger = logging.getLogger("auto_scheduler")
+
+
+class TaskScheduler:
+    """Allocate the time resources when tuning multiple tasks together
+
+    Parameters
+    ----------
+    tasks: List[SearchTask]
+        The list of all tasks
+    objective_func: Callable[List[float] -> float]
+        The objective function to be optimized
+    """
+
+    def __init__(self, tasks, objective_func):

Review comment:
       ```suggestion
       def __init__(self, tasks, objective_func=None):
   ```
   Additional to cody's last comment.

##########
File path: python/tvm/auto_scheduler/task_scheduler.py
##########
@@ -0,0 +1,452 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+# pylint: disable=invalid-name
+
+""" The task scheduler that allocates the time resources when tuning multiple 
tasks together
+
+The details of the "gradient" strategy below can be found in the section 6 of 
this paper:
+L. Zheng, C. Jia, M. Sun, Z. Wu, C. Yu, et al. "Ansor : Generating 
High-Performance Tensor
+Programs for Deep Learning." (OSDI 2020).
+"""
+
+import time
+import math
+import logging
+
+import numpy as np
+
+from .search_policy import SearchPolicy, SketchPolicy
+from .cost_model import RandomModel, XGBModel
+from .utils import array_mean, to_str_round
+from .measure import ProgramMeasurer
+from .measure_record import RecordReader
+
+logger = logging.getLogger("auto_scheduler")
+
+
+class TaskScheduler:
+    """Allocate the time resources when tuning multiple tasks together
+
+    Parameters
+    ----------
+    tasks: List[SearchTask]
+        The list of all tasks
+    objective_func: Callable[List[float] -> float]
+        The objective function to be optimized
+    """
+
+    def __init__(self, tasks, objective_func):
+        self.tasks = tasks
+        self.objective_func = objective_func or sum
+
+    def compute_score(self, costs) -> float:
+        return self.objective_func(costs)
+
+
+def make_search_policies(
+    search_policy, tasks, num_measures_per_round, load_model_file=None, 
load_log_file=None
+):
+    """Make a list of search policies for a list of search tasks.
+    It creates one policy per task.
+
+    Parameters
+    ----------
+    search_policy: Union[str, List[SearchPolicy]]
+        The name of search policy.
+    tasks: List[SearchTask]
+        The list of all tasks
+    num_measures_per_round: int
+        The number of schedules to be measured at each search round.
+        This should be the same as `TuningOptions.num_measures_per_round`
+    load_model_file: Optional[str]
+        Load pre-trained model from this file
+    load_log_file: Optional[str]
+        Load measurement records from this file
+
+    Returns
+    -------
+    policies: List[SearchPolicy]
+        The list of search policies
+    """
+    if search_policy == "default":
+        search_policy = "sketch.xgb"
+
+    if isinstance(search_policy, str):
+        policy_type, model_type = search_policy.split(".")
+        if model_type == "xgb":
+            cost_model = XGBModel(num_warmup_sample=len(tasks) * 
num_measures_per_round)
+            if load_model_file:
+                logger.info("Load pretrained model...")
+                cost_model.load(load_model_file)
+            elif load_log_file:
+                cost_model.load_log_file(load_log_file)
+        elif model_type == "random":
+            cost_model = RandomModel()
+        else:
+            raise ValueError("Invalid search policy: " + search_policy)
+
+        if policy_type == "sketch":
+            search_policies = [SketchPolicy(task, cost_model) for task in 
tasks]
+        else:
+            raise ValueError("Invalid search policy: " + search_policy)
+    else:
+        # check type
+        assert isinstance(search_policy, (tuple, list))
+        for item in search_policy:
+            assert isinstance(item, SearchPolicy)
+        search_policies = search_policy
+
+    return search_policies
+
+
+def derive_similarity_tag(dag, log_base=1.618):
+    """Derive the tag for similarity check from one computational DAG.
+    The DAGs with the same tag are considered as similar tasks.
+
+    Parameters
+    ----------
+    dag: ComputeDAG
+        The input computational DAG
+    log_base: float = 1.618
+        The base of log to normalize FLOPS
+
+    Returns
+    -------
+    tag: str
+        The tag of this computational DAG.
+    """
+    ret = ""
+    for op in dag.ops:
+        tag = op.attrs.get("ansor_task_scheduler_tag", None)
+        if tag:
+            ret += op.attrs["ansor_task_scheduler_tag"] + "_"
+    if ret != "":
+        ret += "%d" % int(math.log(dag.flop_ct + 1, log_base))
+    return ret
+
+
+class SimpleTaskScheduler(TaskScheduler):
+    """The default task scheduler with several strategies
+
+    Parameters
+    ----------
+    tasks: List[SearchTask]
+        All tasks to tune
+    objective_func: Callable[List[float] -> float]
+        The objective function to be optimized
+    strategy: Optional[str]
+        The scheduling strategy.
+        "round-robin": Tune tasks in round robin order.
+        "gradient" : Tune tasks with gradient descent.
+    load_model_file: Optional[str]
+        Load pre-trained model from this file
+    load_log_file: Optional[str]
+        Load measurement records from this file
+    eps_random: float = 0.05
+        Always allocate this percent of n_trials to select tasks randomly.
+        This is for encouraging exploration.
+    verbose: int = 1
+        The level of verbosity. 0 means silent.
+    alpha: float = 0.2
+        The parameter used for 'gradient' strategy
+    beta: float = 2
+        The parameter used for 'gradient' strategy
+    backward_window_size: int = 3
+        The parameter used for 'gradient' strategy
+    """
+
+    def __init__(
+        self,
+        tasks,
+        objective_func,
+        strategy="gradient",
+        load_model_file: str = None,
+        load_log_file: str = None,
+        eps_random: float = 0.05,
+        verbose: int = 1,
+        alpha: float = 0.2,
+        beta: float = 2,
+        gamma: float = 0.5,
+        backward_window_size: int = 3,
+    ):
+        super().__init__(tasks, objective_func)
+        self.strategy = strategy
+        self.eps_random = eps_random
+        self.verbose = verbose
+        self.load_log_file = load_log_file
+        self.load_model_file = load_model_file
+        self.alpha = alpha
+        self.beta = beta
+        self.gamma = gamma
+        self.backward_window_size = backward_window_size
+
+        assert self.strategy in ["round-robin", "gradient"]
+
+        # task_cts[i] saves how many times task i is tuned
+        self.task_cts = [0 for _ in range(len(self.tasks))]
+
+        # task_costs_history[i] saves the latency history of task i
+        self.task_costs_history = [[] for _ in range(len(self.tasks))]
+
+        # best_costs[i] saves the best latency of task i
+        self.best_costs = 1e10 * np.ones(len(self.tasks))
+
+        self.tune_option = self.measurer = self.search_policies = self.ct = 
self.tic = None
+        self.num_measures_per_round = None
+        self.dead_tasks = set()
+        self.sequential_now_task_idx = 0
+        self.sequential_now_task_begin_ct = 0
+
+        assert len(tasks) != 0, "No tasks"
+
+        # Build similarity group
+        self.task_tags = []
+        self.tag_to_group_id = {}
+        self.group_task_ids = []
+        self.flop_cts = []
+        for i, task in enumerate(self.tasks):
+            tag = derive_similarity_tag(task.compute_dag)
+            self.task_tags.append(tag)
+            self.flop_cts.append(task.compute_dag.flop_ct)
+            if tag == "":
+                continue
+
+            if tag not in self.tag_to_group_id:
+                self.tag_to_group_id[tag] = len(self.tag_to_group_id)
+                self.group_task_ids.append([])
+            self.group_task_ids[self.tag_to_group_id[tag]].append(i)
+
+    def tune(self, tune_option, search_policy="default"):
+        """Tune a batch of tasks together.
+
+        Parameters
+        ----------
+        tune_option: TuningOptions
+            The options of tuning
+        search_policy: : Union[str, List[SearchPolicy]]
+            The list of search policies.
+            If it is str.
+            "sketch.xgb" for SketchPolicy + XGBModel
+            "sketch.random" for SketchPolicy + RandomModel
+        """
+        # init members
+        self.tune_option = tune_option
+        self.measurer = ProgramMeasurer(
+            tune_option.builder,
+            tune_option.runner,
+            tune_option.measure_callbacks,
+            tune_option.verbose,
+        )
+        self.ct = 0
+        self.tic = time.time()
+        self.sequential_now_task_idx = 0
+        self.sequential_now_task_begin_ct = 0
+        # reset num_measures_per_round to make sure every task is tuned at 
least once
+        self.num_measures_per_round = min(
+            tune_option.num_measures_per_round, tune_option.num_measure_trials 
// len(self.tasks)
+        )
+        assert self.num_measures_per_round > 0, "num_measure_trials is too 
small"
+
+        # restore the status of the task scheduler from a log file
+        self.restore_status(self.load_log_file, self.num_measures_per_round)
+
+        # make one search policy for one task
+        self.search_policies = make_search_policies(
+            search_policy,
+            self.tasks,
+            self.num_measures_per_round,
+            self.load_model_file,
+            self.load_log_file,
+        )
+        for i in range(len(self.tasks)):
+            search_policy = self.search_policies[i]
+            search_policy.set_verbose(tune_option.verbose)
+            # todo(merrymercy): call presearch callbacks?

Review comment:
       Since currently we have "verbose" and "init_search_callbacks" in 
SketchPolicy's construction function as input parameters, it's better to just 
move these to `make_search_policies()`.
   The `set_verbose`, `set_task` and `run_callbacks` in our original repo are 
no longer needed.

##########
File path: python/tvm/auto_scheduler/task_scheduler.py
##########
@@ -0,0 +1,452 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+# pylint: disable=invalid-name
+
+""" The task scheduler that allocates the time resources when tuning multiple 
tasks together
+
+The details of the "gradient" strategy below can be found in the section 6 of 
this paper:
+L. Zheng, C. Jia, M. Sun, Z. Wu, C. Yu, et al. "Ansor : Generating 
High-Performance Tensor
+Programs for Deep Learning." (OSDI 2020).
+"""
+
+import time
+import math
+import logging
+
+import numpy as np
+
+from .search_policy import SearchPolicy, SketchPolicy
+from .cost_model import RandomModel, XGBModel
+from .utils import array_mean, to_str_round
+from .measure import ProgramMeasurer
+from .measure_record import RecordReader
+
+logger = logging.getLogger("auto_scheduler")
+
+
+class TaskScheduler:
+    """Allocate the time resources when tuning multiple tasks together
+
+    Parameters
+    ----------
+    tasks: List[SearchTask]
+        The list of all tasks
+    objective_func: Callable[List[float] -> float]
+        The objective function to be optimized
+    """
+
+    def __init__(self, tasks, objective_func):
+        self.tasks = tasks
+        self.objective_func = objective_func or sum
+
+    def compute_score(self, costs) -> float:
+        return self.objective_func(costs)
+
+
+def make_search_policies(
+    search_policy, tasks, num_measures_per_round, load_model_file=None, 
load_log_file=None
+):
+    """Make a list of search policies for a list of search tasks.
+    It creates one policy per task.
+
+    Parameters
+    ----------
+    search_policy: Union[str, List[SearchPolicy]]
+        The name of search policy.
+    tasks: List[SearchTask]
+        The list of all tasks
+    num_measures_per_round: int
+        The number of schedules to be measured at each search round.
+        This should be the same as `TuningOptions.num_measures_per_round`
+    load_model_file: Optional[str]
+        Load pre-trained model from this file
+    load_log_file: Optional[str]
+        Load measurement records from this file
+
+    Returns
+    -------
+    policies: List[SearchPolicy]
+        The list of search policies
+    """
+    if search_policy == "default":
+        search_policy = "sketch.xgb"
+
+    if isinstance(search_policy, str):
+        policy_type, model_type = search_policy.split(".")
+        if model_type == "xgb":
+            cost_model = XGBModel(num_warmup_sample=len(tasks) * 
num_measures_per_round)
+            if load_model_file:
+                logger.info("Load pretrained model...")
+                cost_model.load(load_model_file)
+            elif load_log_file:
+                cost_model.load_log_file(load_log_file)
+        elif model_type == "random":
+            cost_model = RandomModel()
+        else:
+            raise ValueError("Invalid search policy: " + search_policy)
+
+        if policy_type == "sketch":
+            search_policies = [SketchPolicy(task, cost_model) for task in 
tasks]
+        else:
+            raise ValueError("Invalid search policy: " + search_policy)
+    else:
+        # check type
+        assert isinstance(search_policy, (tuple, list))
+        for item in search_policy:
+            assert isinstance(item, SearchPolicy)
+        search_policies = search_policy
+
+    return search_policies
+
+
+def derive_similarity_tag(dag, log_base=1.618):
+    """Derive the tag for similarity check from one computational DAG.
+    The DAGs with the same tag are considered as similar tasks.
+
+    Parameters
+    ----------
+    dag: ComputeDAG
+        The input computational DAG
+    log_base: float = 1.618
+        The base of log to normalize FLOPS
+
+    Returns
+    -------
+    tag: str
+        The tag of this computational DAG.
+    """
+    ret = ""
+    for op in dag.ops:
+        tag = op.attrs.get("ansor_task_scheduler_tag", None)
+        if tag:
+            ret += op.attrs["ansor_task_scheduler_tag"] + "_"
+    if ret != "":
+        ret += "%d" % int(math.log(dag.flop_ct + 1, log_base))
+    return ret
+
+
+class SimpleTaskScheduler(TaskScheduler):
+    """The default task scheduler with several strategies
+
+    Parameters
+    ----------
+    tasks: List[SearchTask]
+        All tasks to tune
+    objective_func: Callable[List[float] -> float]
+        The objective function to be optimized
+    strategy: Optional[str]
+        The scheduling strategy.
+        "round-robin": Tune tasks in round robin order.
+        "gradient" : Tune tasks with gradient descent.
+    load_model_file: Optional[str]
+        Load pre-trained model from this file
+    load_log_file: Optional[str]
+        Load measurement records from this file
+    eps_random: float = 0.05
+        Always allocate this percent of n_trials to select tasks randomly.
+        This is for encouraging exploration.
+    verbose: int = 1
+        The level of verbosity. 0 means silent.
+    alpha: float = 0.2
+        The parameter used for 'gradient' strategy
+    beta: float = 2
+        The parameter used for 'gradient' strategy
+    backward_window_size: int = 3
+        The parameter used for 'gradient' strategy
+    """
+
+    def __init__(
+        self,
+        tasks,
+        objective_func,
+        strategy="gradient",
+        load_model_file: str = None,
+        load_log_file: str = None,
+        eps_random: float = 0.05,
+        verbose: int = 1,
+        alpha: float = 0.2,
+        beta: float = 2,
+        gamma: float = 0.5,
+        backward_window_size: int = 3,
+    ):
+        super().__init__(tasks, objective_func)
+        self.strategy = strategy
+        self.eps_random = eps_random
+        self.verbose = verbose
+        self.load_log_file = load_log_file
+        self.load_model_file = load_model_file
+        self.alpha = alpha
+        self.beta = beta
+        self.gamma = gamma
+        self.backward_window_size = backward_window_size
+
+        assert self.strategy in ["round-robin", "gradient"]
+
+        # task_cts[i] saves how many times task i is tuned
+        self.task_cts = [0 for _ in range(len(self.tasks))]
+
+        # task_costs_history[i] saves the latency history of task i
+        self.task_costs_history = [[] for _ in range(len(self.tasks))]
+
+        # best_costs[i] saves the best latency of task i
+        self.best_costs = 1e10 * np.ones(len(self.tasks))
+
+        self.tune_option = self.measurer = self.search_policies = self.ct = 
self.tic = None
+        self.num_measures_per_round = None
+        self.dead_tasks = set()
+        self.sequential_now_task_idx = 0
+        self.sequential_now_task_begin_ct = 0
+
+        assert len(tasks) != 0, "No tasks"
+
+        # Build similarity group
+        self.task_tags = []
+        self.tag_to_group_id = {}
+        self.group_task_ids = []
+        self.flop_cts = []
+        for i, task in enumerate(self.tasks):
+            tag = derive_similarity_tag(task.compute_dag)
+            self.task_tags.append(tag)
+            self.flop_cts.append(task.compute_dag.flop_ct)
+            if tag == "":
+                continue
+
+            if tag not in self.tag_to_group_id:
+                self.tag_to_group_id[tag] = len(self.tag_to_group_id)
+                self.group_task_ids.append([])
+            self.group_task_ids[self.tag_to_group_id[tag]].append(i)
+
+    def tune(self, tune_option, search_policy="default"):
+        """Tune a batch of tasks together.
+
+        Parameters
+        ----------
+        tune_option: TuningOptions
+            The options of tuning
+        search_policy: : Union[str, List[SearchPolicy]]
+            The list of search policies.
+            If it is str.
+            "sketch.xgb" for SketchPolicy + XGBModel
+            "sketch.random" for SketchPolicy + RandomModel
+        """
+        # init members
+        self.tune_option = tune_option
+        self.measurer = ProgramMeasurer(
+            tune_option.builder,
+            tune_option.runner,
+            tune_option.measure_callbacks,
+            tune_option.verbose,
+        )
+        self.ct = 0
+        self.tic = time.time()
+        self.sequential_now_task_idx = 0
+        self.sequential_now_task_begin_ct = 0
+        # reset num_measures_per_round to make sure every task is tuned at 
least once
+        self.num_measures_per_round = min(
+            tune_option.num_measures_per_round, tune_option.num_measure_trials 
// len(self.tasks)
+        )
+        assert self.num_measures_per_round > 0, "num_measure_trials is too 
small"
+
+        # restore the status of the task scheduler from a log file
+        self.restore_status(self.load_log_file, self.num_measures_per_round)
+
+        # make one search policy for one task
+        self.search_policies = make_search_policies(
+            search_policy,
+            self.tasks,
+            self.num_measures_per_round,
+            self.load_model_file,
+            self.load_log_file,
+        )
+        for i in range(len(self.tasks)):
+            search_policy = self.search_policies[i]
+            search_policy.set_verbose(tune_option.verbose)
+            # todo(merrymercy): call presearch callbacks?
+
+        # do a round robin first
+        if self.strategy != "sequential":
+            for i in range(len(self.tasks)):
+                self.tune_task(i)
+
+        # use the specific strategy to choose workload to tune
+        task_idx = -1
+        while self.ct < tune_option.num_measure_trials and 
len(self.dead_tasks) < len(self.tasks):
+            if self.strategy == "sequential":
+                allocated_total_ct = (
+                    tune_option.num_measure_trials - 
self.sequential_now_task_begin_ct
+                ) / (len(self.tasks) - self.sequential_now_task_idx)
+                used_ct = self.ct - self.sequential_now_task_begin_ct
+
+                if self.sequential_now_task_idx in self.dead_tasks or used_ct 
>= allocated_total_ct:
+                    self.sequential_now_task_idx += 1
+                    self.sequential_now_task_begin_ct = self.ct
+                task_idx = self.sequential_now_task_idx
+                if task_idx >= len(self.tasks):
+                    break
+            elif self.strategy == "round-robin":
+                task_idx = (task_idx + 1) % len(self.tasks)
+                while task_idx in self.dead_tasks:
+                    task_idx = (task_idx + 1) % len(self.tasks)
+            elif self.strategy == "gradient":
+                gradients = []
+                for i in range(len(self.tasks)):
+                    if i in self.dead_tasks:
+                        gradients.append(0)
+                        continue
+
+                    # compute gradient from chain rule : (delta f / delta g_i)
+                    delta = 1e-7
+                    new_costs = list(self.best_costs)
+                    new_costs[i] -= delta
+                    chain_grad = (
+                        self.compute_score(self.best_costs) - 
self.compute_score(new_costs)
+                    ) / delta
+
+                    # compute (g_i(t_i) - g(t_i - \Delta t)) / (\Delta t)
+                    if (
+                        self.task_cts[i] - 1 < len(self.task_costs_history[i])
+                        and self.task_cts[i] - 1 - self.backward_window_size 
>= 0
+                    ):
+                        backward_grad = (
+                            self.task_costs_history[i][self.task_cts[i] - 1]
+                            - self.task_costs_history[i][
+                                self.task_cts[i] - 1 - 
self.backward_window_size
+                            ]
+                        ) / self.backward_window_size
+                    else:
+                        backward_grad = 0
+
+                    # compute (g_i(t_i + \Delta t) - g(t_i)) / (\Delta t)
+                    g_next_1 = self.best_costs[i] - (self.best_costs[i] / 
self.task_cts[i])
+
+                    g_next_2 = self.beta * 1e30
+                    group_id = self.tag_to_group_id.get(self.task_tags[i], 
None)
+                    if group_id is not None and 
len(self.group_task_ids[group_id]) > 1:
+                        best_flops = max(
+                            [
+                                self.flop_cts[j] / self.best_costs[j]
+                                for j in self.group_task_ids[group_id]
+                            ]
+                        )
+                        g_next_2 = self.beta * self.flop_cts[i] / best_flops
+
+                    g_next = min(g_next_1, g_next_2)
+                    forward_grad = g_next - self.best_costs[i]
+
+                    # combine all grads
+                    grad = chain_grad * (
+                        self.alpha * backward_grad + (1 - self.alpha) * 
forward_grad
+                    )
+                    assert grad <= 0
+                    gradients.append(grad)
+
+                if max(gradients) == min(gradients):
+                    task_idx = np.random.choice(len(gradients))
+                else:
+                    task_idx = np.argmin(gradients)
+            else:
+                raise ValueError("Invalid strategy: " + self.strategy)
+
+            self.tune_task(task_idx)
+            self.adjust_similarity_group(task_idx)
+
+    def tune_task(self, task_idx):

Review comment:
       ```suggestion
       def _tune_task(self, task_idx):
   ```
   
   This function is not likely to be called from outside? Maybe it's better to 
use a different name format. So as the other functions.




----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


Reply via email to