This is an automated email from the ASF dual-hosted git repository.

pabloem pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/beam.git


The following commit(s) were added to refs/heads/master by this push:
     new ad120dfe202 [Playground] [Infrastructure] format python code style 
(#22291)
ad120dfe202 is described below

commit ad120dfe202942dde81ac0f227e4193b48435c7e
Author: Vladislav Chunikhin <102509589+vchunik...@users.noreply.github.com>
AuthorDate: Wed Aug 10 20:17:17 2022 +0300

    [Playground] [Infrastructure] format python code style (#22291)
---
 playground/infrastructure/cd_helper.py        | 442 +++++++++---------
 playground/infrastructure/checker.py          |  52 +--
 playground/infrastructure/ci_cd.py            |  74 +--
 playground/infrastructure/ci_helper.py        | 184 ++++----
 playground/infrastructure/config.py           | 108 ++---
 playground/infrastructure/grpc_client.py      | 280 +++++------
 playground/infrastructure/helper.py           | 644 +++++++++++++-------------
 playground/infrastructure/logger.py           |  46 +-
 playground/infrastructure/test_cd_helper.py   | 232 +++++-----
 playground/infrastructure/test_ci_cd.py       |  12 +-
 playground/infrastructure/test_ci_helper.py   | 254 +++++-----
 playground/infrastructure/test_grpc_client.py |  80 ++--
 playground/infrastructure/test_helper.py      | 372 +++++++--------
 playground/infrastructure/test_logger.py      |   6 +-
 14 files changed, 1393 insertions(+), 1393 deletions(-)

diff --git a/playground/infrastructure/cd_helper.py 
b/playground/infrastructure/cd_helper.py
index 4b19d551485..f638caa8e7a 100644
--- a/playground/infrastructure/cd_helper.py
+++ b/playground/infrastructure/cd_helper.py
@@ -36,227 +36,227 @@ from helper import Example, get_statuses
 
 
 class CDHelper:
-  """
-  Helper for CD step.
-
-  It is used to save beam examples/katas/tests and their output on the GCS.
-  """
-  def store_examples(self, examples: List[Example]):
-    """
-    Store beam examples and their output in the Google Cloud.
-
-    Outputs for multifile examples are left empty.
-    """
-    single_file_examples = list(filter(
-      lambda example: example.tag.multifile is False, examples))
-    logging.info("Start of executing only single-file Playground examples ...")
-    asyncio.run(self._get_outputs(single_file_examples))
-    logging.info("Finish of executing single-file Playground examples")
-
-    logging.info("Start of sending Playground examples to the bucket ...")
-    self._save_to_cloud_storage(examples)
-    logging.info("Finish of sending Playground examples to the bucket")
-    self._clear_temp_folder()
-
-  async def _get_outputs(self, examples: List[Example]):
-    """
-    Run beam examples and keep their output.
-
-    Call the backend to start code processing for the examples.
-    Then receive code output.
-
-    Args:
-        examples: beam examples that should be run
-    """
-    await get_statuses(
-        examples)  # run examples code and wait until all are executed
-    client = GRPCClient()
-    tasks = [client.get_run_output(example.pipeline_id) for example in 
examples]
-    outputs = await asyncio.gather(*tasks)
-
-    tasks = [client.get_log(example.pipeline_id) for example in examples]
-    logs = await asyncio.gather(*tasks)
-
-    if len(examples) > 0 and (examples[0].sdk is SDK_PYTHON or
-                              examples[0].sdk is SDK_JAVA):
-      tasks = [
-          client.get_graph(example.pipeline_id, example.filepath)
-          for example in examples
-      ]
-      graphs = await asyncio.gather(*tasks)
-
-      for graph, example in zip(graphs, examples):
-        example.graph = graph
-
-    for output, example in zip(outputs, examples):
-      example.output = output
-
-    for log, example in zip(logs, examples):
-      example.logs = log
-
-  def _save_to_cloud_storage(self, examples: List[Example]):
-    """
-    Save examples, outputs and meta to bucket
-
-    Args:
-        examples: precompiled examples
-    """
-    self._storage_client = storage.Client()
-    self._bucket = self._storage_client.bucket(Config.BUCKET_NAME)
-
-    for example in tqdm(examples):
-      file_names = self._write_to_local_fs(example)
-
-      if example.tag.default_example:
-        default_example_path = str(Path([*file_names].pop()).parent)
-        cloud_path = self._write_default_example_path_to_local_fs(
-          default_example_path)
-
-        self._upload_blob(
-          source_file=os.path.join(Config.TEMP_FOLDER, cloud_path),
-          destination_blob_name=cloud_path)
-
-      for cloud_file_name, local_file_name in file_names.items():
-        self._upload_blob(
-            source_file=local_file_name, destination_blob_name=cloud_file_name)
-
-  def _write_default_example_path_to_local_fs(self, path: str) -> str:
     """
-    Write default example path to the file (in temp folder)
+    Helper for CD step.
 
-    Args:
-        path: path of the default example
-
-    Returns: name of the file
-
-    """
-    sdk = Path(path).parts[0]
-    cloud_path = os.path.join(sdk, Config.DEFAULT_PRECOMPILED_OBJECT)
-
-    path_to_file = os.path.join(Config.TEMP_FOLDER, sdk)
-    Path(path_to_file).mkdir(parents=True, exist_ok=True)
-
-    local_path = os.path.join(path_to_file, Config.DEFAULT_PRECOMPILED_OBJECT)
-
-    content = json.dumps({sdk: path})
-    with open(local_path, "w", encoding="utf-8") as file:
-      file.write(content)
-
-    return cloud_path
-
-  def _write_to_local_fs(self, example: Example):
-    """
-    Write code of an example, output and meta info
-    to the filesystem (in temp folder)
-
-    Args:
-        example: example object
-
-    Returns: dict {path_at_the_bucket:path_at_the_os}
-
-    """
-    path_to_object_folder = os.path.join(
-        Config.TEMP_FOLDER,
-        example.pipeline_id,
-        Sdk.Name(example.sdk),
-        PrecompiledObjectType.Name(example.type),
-        example.tag.name)
-    Path(path_to_object_folder).mkdir(parents=True, exist_ok=True)
-
-    file_names = {}
-    code_path = self._get_gcs_object_name(
-        sdk=example.sdk,
-        type=example.type,
-        base_folder_name=example.tag.name,
-        file_name=example.tag.name)
-    output_path = self._get_gcs_object_name(
-        sdk=example.sdk,
-        type=example.type,
-        base_folder_name=example.tag.name,
-        file_name=example.tag.name,
-        extension=PrecompiledExample.OUTPUT_EXTENSION)
-    log_path = self._get_gcs_object_name(
-        sdk=example.sdk,
-        type=example.type,
-        base_folder_name=example.tag.name,
-        file_name=example.tag.name,
-        extension=PrecompiledExample.LOG_EXTENSION)
-    graph_path = self._get_gcs_object_name(
-        sdk=example.sdk,
-        type=example.type,
-        base_folder_name=example.tag.name,
-        file_name=example.tag.name,
-        extension=PrecompiledExample.GRAPH_EXTENSION)
-    meta_path = self._get_gcs_object_name(
-        sdk=example.sdk,
-        type=example.type,
-        base_folder_name=example.tag.name,
-        file_name=PrecompiledExample.META_NAME,
-        extension=PrecompiledExample.META_EXTENSION)
-    file_names[code_path] = example.code
-    file_names[output_path] = example.output
-    meta = example.tag._asdict()
-    meta["link"] = example.link
-    file_names[meta_path] = json.dumps(meta)
-    file_names[log_path] = example.logs
-    if example.sdk == SDK_PYTHON or example.sdk == SDK_JAVA:
-      file_names[graph_path] = example.graph
-    for file_name, file_content in file_names.items():
-      local_file_path = os.path.join(
-          Config.TEMP_FOLDER, example.pipeline_id, file_name)
-      with open(local_file_path, "w", encoding="utf-8") as file:
-        file.write(file_content)
-      # don't need content anymore, instead save the local path
-      file_names[file_name] = local_file_path
-    return file_names
-
-  def _get_gcs_object_name(
-      self,
-      sdk: Sdk,
-      type: PrecompiledObjectType,
-      base_folder_name: str,
-      file_name: str,
-      extension: str = None):
-    """
-    Get the path where file will be stored at the bucket.
-
-    Args:
-      sdk: sdk of the example
-      type: type of the example
-      file_name: name of the example
-      base_folder_name: name of the folder where example is stored
-        (eq. to example name)
-      extension: extension of the file
-
-    Returns: file name
-    """
-    if extension is None:
-      extension = Config.SDK_TO_EXTENSION[sdk]
-    return os.path.join(
-        Sdk.Name(sdk),
-        PrecompiledObjectType.Name(type),
-        base_folder_name,
-        f"{file_name}.{extension}")
-
-  def _upload_blob(self, source_file: str, destination_blob_name: str):
-    """
-    Upload a file to the bucket.
-
-    Args:
-        source_file: name of the file to be stored
-        destination_blob_name: "storage-object-name"
-    """
-
-    blob = self._bucket.blob(destination_blob_name)
-    blob.upload_from_filename(source_file)
-    # change caching to no caching
-    blob.cache_control = Config.NO_STORE
-    blob.patch()
-    logging.info("File uploaded to %s", destination_blob_name)
-
-  def _clear_temp_folder(self):
-    """
-    Remove temporary folder with source files.
+    It is used to save beam examples/katas/tests and their output on the GCS.
     """
-    if os.path.exists(Config.TEMP_FOLDER):
-      shutil.rmtree(Config.TEMP_FOLDER)
+    def store_examples(self, examples: List[Example]):
+        """
+        Store beam examples and their output in the Google Cloud.
+
+        Outputs for multifile examples are left empty.
+        """
+        single_file_examples = list(filter(
+            lambda example: example.tag.multifile is False, examples))
+        logging.info("Start of executing only single-file Playground examples 
...")
+        asyncio.run(self._get_outputs(single_file_examples))
+        logging.info("Finish of executing single-file Playground examples")
+
+        logging.info("Start of sending Playground examples to the bucket ...")
+        self._save_to_cloud_storage(examples)
+        logging.info("Finish of sending Playground examples to the bucket")
+        self._clear_temp_folder()
+
+    async def _get_outputs(self, examples: List[Example]):
+        """
+        Run beam examples and keep their output.
+
+        Call the backend to start code processing for the examples.
+        Then receive code output.
+
+        Args:
+            examples: beam examples that should be run
+        """
+        await get_statuses(
+            examples)  # run examples code and wait until all are executed
+        client = GRPCClient()
+        tasks = [client.get_run_output(example.pipeline_id) for example in 
examples]
+        outputs = await asyncio.gather(*tasks)
+
+        tasks = [client.get_log(example.pipeline_id) for example in examples]
+        logs = await asyncio.gather(*tasks)
+
+        if len(examples) > 0 and (examples[0].sdk is SDK_PYTHON or
+                                  examples[0].sdk is SDK_JAVA):
+            tasks = [
+                client.get_graph(example.pipeline_id, example.filepath)
+                for example in examples
+            ]
+            graphs = await asyncio.gather(*tasks)
+
+            for graph, example in zip(graphs, examples):
+                example.graph = graph
+
+        for output, example in zip(outputs, examples):
+            example.output = output
+
+        for log, example in zip(logs, examples):
+            example.logs = log
+
+    def _save_to_cloud_storage(self, examples: List[Example]):
+        """
+        Save examples, outputs and meta to bucket
+
+        Args:
+            examples: precompiled examples
+        """
+        self._storage_client = storage.Client()
+        self._bucket = self._storage_client.bucket(Config.BUCKET_NAME)
+
+        for example in tqdm(examples):
+            file_names = self._write_to_local_fs(example)
+
+            if example.tag.default_example:
+                default_example_path = str(Path([*file_names].pop()).parent)
+                cloud_path = self._write_default_example_path_to_local_fs(
+                    default_example_path)
+
+                self._upload_blob(
+                    source_file=os.path.join(Config.TEMP_FOLDER, cloud_path),
+                    destination_blob_name=cloud_path)
+
+            for cloud_file_name, local_file_name in file_names.items():
+                self._upload_blob(
+                    source_file=local_file_name, 
destination_blob_name=cloud_file_name)
+
+    def _write_default_example_path_to_local_fs(self, path: str) -> str:
+        """
+        Write default example path to the file (in temp folder)
+
+        Args:
+            path: path of the default example
+
+        Returns: name of the file
+
+        """
+        sdk = Path(path).parts[0]
+        cloud_path = os.path.join(sdk, Config.DEFAULT_PRECOMPILED_OBJECT)
+
+        path_to_file = os.path.join(Config.TEMP_FOLDER, sdk)
+        Path(path_to_file).mkdir(parents=True, exist_ok=True)
+
+        local_path = os.path.join(path_to_file, 
Config.DEFAULT_PRECOMPILED_OBJECT)
+
+        content = json.dumps({sdk: path})
+        with open(local_path, "w", encoding="utf-8") as file:
+            file.write(content)
+
+        return cloud_path
+
+    def _write_to_local_fs(self, example: Example):
+        """
+        Write code of an example, output and meta info
+        to the filesystem (in temp folder)
+
+        Args:
+            example: example object
+
+        Returns: dict {path_at_the_bucket:path_at_the_os}
+
+        """
+        path_to_object_folder = os.path.join(
+            Config.TEMP_FOLDER,
+            example.pipeline_id,
+            Sdk.Name(example.sdk),
+            PrecompiledObjectType.Name(example.type),
+            example.tag.name)
+        Path(path_to_object_folder).mkdir(parents=True, exist_ok=True)
+
+        file_names = {}
+        code_path = self._get_gcs_object_name(
+            sdk=example.sdk,
+            type=example.type,
+            base_folder_name=example.tag.name,
+            file_name=example.tag.name)
+        output_path = self._get_gcs_object_name(
+            sdk=example.sdk,
+            type=example.type,
+            base_folder_name=example.tag.name,
+            file_name=example.tag.name,
+            extension=PrecompiledExample.OUTPUT_EXTENSION)
+        log_path = self._get_gcs_object_name(
+            sdk=example.sdk,
+            type=example.type,
+            base_folder_name=example.tag.name,
+            file_name=example.tag.name,
+            extension=PrecompiledExample.LOG_EXTENSION)
+        graph_path = self._get_gcs_object_name(
+            sdk=example.sdk,
+            type=example.type,
+            base_folder_name=example.tag.name,
+            file_name=example.tag.name,
+            extension=PrecompiledExample.GRAPH_EXTENSION)
+        meta_path = self._get_gcs_object_name(
+            sdk=example.sdk,
+            type=example.type,
+            base_folder_name=example.tag.name,
+            file_name=PrecompiledExample.META_NAME,
+            extension=PrecompiledExample.META_EXTENSION)
+        file_names[code_path] = example.code
+        file_names[output_path] = example.output
+        meta = example.tag._asdict()
+        meta["link"] = example.link
+        file_names[meta_path] = json.dumps(meta)
+        file_names[log_path] = example.logs
+        if example.sdk == SDK_PYTHON or example.sdk == SDK_JAVA:
+            file_names[graph_path] = example.graph
+        for file_name, file_content in file_names.items():
+            local_file_path = os.path.join(
+                Config.TEMP_FOLDER, example.pipeline_id, file_name)
+            with open(local_file_path, "w", encoding="utf-8") as file:
+                file.write(file_content)
+            # don't need content anymore, instead save the local path
+            file_names[file_name] = local_file_path
+        return file_names
+
+    def _get_gcs_object_name(
+          self,
+          sdk: Sdk,
+          type: PrecompiledObjectType,
+          base_folder_name: str,
+          file_name: str,
+          extension: str = None):
+        """
+        Get the path where file will be stored at the bucket.
+
+        Args:
+          sdk: sdk of the example
+          type: type of the example
+          file_name: name of the example
+          base_folder_name: name of the folder where example is stored
+            (eq. to example name)
+          extension: extension of the file
+
+        Returns: file name
+        """
+        if extension is None:
+            extension = Config.SDK_TO_EXTENSION[sdk]
+        return os.path.join(
+            Sdk.Name(sdk),
+            PrecompiledObjectType.Name(type),
+            base_folder_name,
+            f"{file_name}.{extension}")
+
+    def _upload_blob(self, source_file: str, destination_blob_name: str):
+        """
+        Upload a file to the bucket.
+
+        Args:
+            source_file: name of the file to be stored
+            destination_blob_name: "storage-object-name"
+        """
+
+        blob = self._bucket.blob(destination_blob_name)
+        blob.upload_from_filename(source_file)
+        # change caching to no caching
+        blob.cache_control = Config.NO_STORE
+        blob.patch()
+        logging.info("File uploaded to %s", destination_blob_name)
+
+    def _clear_temp_folder(self):
+        """
+        Remove temporary folder with source files.
+        """
+        if os.path.exists(Config.TEMP_FOLDER):
+            shutil.rmtree(Config.TEMP_FOLDER)
diff --git a/playground/infrastructure/checker.py 
b/playground/infrastructure/checker.py
index f8e3bd77f64..e0499f0f44c 100644
--- a/playground/infrastructure/checker.py
+++ b/playground/infrastructure/checker.py
@@ -27,36 +27,36 @@ root_dir = os.getenv("BEAM_ROOT_DIR")
 
 
 def _check_envs():
-  if root_dir is None:
-    raise KeyError(
-      "BEAM_ROOT_DIR environment variable should be specified in os")
+    if root_dir is None:
+        raise KeyError(
+            "BEAM_ROOT_DIR environment variable should be specified in os")
 
 
 def check(paths) -> bool:
-  pathsArr = []
-  startInd = 0
-  lastInd = 0
-  while lastInd < len(paths):
-    if paths[lastInd] == ".":
-      lastInd += 1
-      while lastInd < len(paths) and paths[lastInd] != " ":
+    pathsArr = []
+    startInd = 0
+    lastInd = 0
+    while lastInd < len(paths):
+        if paths[lastInd] == ".":
+            lastInd += 1
+            while lastInd < len(paths) and paths[lastInd] != " ":
+                lastInd += 1
+            pathsArr.append(paths[startInd:lastInd])
+            lastInd += 1
+            startInd = lastInd
         lastInd += 1
-      pathsArr.append(paths[startInd:lastInd])
-      lastInd += 1
-      startInd = lastInd
-    lastInd += 1
-  for filepath in pathsArr:
-    extension = filepath.split(os.extsep)[-1]
-    if extension not in Config.SDK_TO_EXTENSION.values():
-      continue
-    filepath = root_dir + filepath
-    if get_tag(filepath) is not None:
-      return True
-  return False
+    for filepath in pathsArr:
+        extension = filepath.split(os.extsep)[-1]
+        if extension not in Config.SDK_TO_EXTENSION.values():
+            continue
+        filepath = root_dir + filepath
+        if get_tag(filepath) is not None:
+            return True
+    return False
 
 
 if __name__ == "__main__":
-  paths = " ".join(sys.argv[1:])
-  if paths == "":
-    print(False)
-  print(check(paths))
+    paths = " ".join(sys.argv[1:])
+    if paths == "":
+        print(False)
+    print(check(paths))
diff --git a/playground/infrastructure/ci_cd.py 
b/playground/infrastructure/ci_cd.py
index 3517113d3ad..15ed27c944e 100644
--- a/playground/infrastructure/ci_cd.py
+++ b/playground/infrastructure/ci_cd.py
@@ -36,7 +36,7 @@ parser.add_argument(
     dest="step",
     required=True,
     help="CI step to verify all beam examples/tests/katas. CD step to save all 
"
-    "beam examples/tests/katas and their outputs on the GCS",
+         "beam examples/tests/katas and their outputs on the GCS",
     choices=[config.Config.CI_STEP_NAME, config.Config.CD_STEP_NAME])
 parser.add_argument(
     "--sdk",
@@ -50,52 +50,52 @@ categories_file = os.getenv("BEAM_EXAMPLE_CATEGORIES")
 
 
 def _ci_step(examples: List[Example]):
-  """
-  CI step to verify single-file beam examples/tests/katas
-  """
+    """
+    CI step to verify single-file beam examples/tests/katas
+    """
 
-  ci_helper = CIHelper()
-  asyncio.run(ci_helper.verify_examples(examples))
+    ci_helper = CIHelper()
+    asyncio.run(ci_helper.verify_examples(examples))
 
 
 def _cd_step(examples: List[Example]):
-  """
-  CD step to save all beam examples/tests/katas and their outputs on the GCS
-  """
-  cd_helper = CDHelper()
-  cd_helper.store_examples(examples)
+    """
+    CD step to save all beam examples/tests/katas and their outputs on the GCS
+    """
+    cd_helper = CDHelper()
+    cd_helper.store_examples(examples)
 
 
 def _check_envs():
-  if root_dir is None:
-    raise KeyError(
-        "BEAM_ROOT_DIR environment variable should be specified in os")
-  if categories_file is None:
-    raise KeyError(
-        "BEAM_EXAMPLE_CATEGORIES environment variable should be specified in 
os"
-    )
+    if root_dir is None:
+        raise KeyError(
+            "BEAM_ROOT_DIR environment variable should be specified in os")
+    if categories_file is None:
+        raise KeyError(
+            "BEAM_EXAMPLE_CATEGORIES environment variable should be specified 
in os"
+        )
 
 
 def _run_ci_cd(step: config.Config.CI_CD_LITERAL, sdk: Sdk):
-  supported_categories = get_supported_categories(categories_file)
-  logging.info("Start of searching Playground examples ...")
-  examples = find_examples(root_dir, supported_categories, sdk)
-  logging.info("Finish of searching Playground examples")
-  logging.info("Number of found Playground examples: %s", len(examples))
-
-  if step == config.Config.CI_STEP_NAME:
-    logging.info(
-        "Start of verification only single_file Playground examples ...")
-    _ci_step(examples=examples)
-    logging.info("Finish of verification single_file Playground examples")
-  if step == config.Config.CD_STEP_NAME:
-    logging.info("Start of storing Playground examples ...")
-    _cd_step(examples=examples)
-    logging.info("Finish of storing Playground examples")
+    supported_categories = get_supported_categories(categories_file)
+    logging.info("Start of searching Playground examples ...")
+    examples = find_examples(root_dir, supported_categories, sdk)
+    logging.info("Finish of searching Playground examples")
+    logging.info("Number of found Playground examples: %s", len(examples))
+
+    if step == config.Config.CI_STEP_NAME:
+        logging.info(
+            "Start of verification only single_file Playground examples ...")
+        _ci_step(examples=examples)
+        logging.info("Finish of verification single_file Playground examples")
+    if step == config.Config.CD_STEP_NAME:
+        logging.info("Start of storing Playground examples ...")
+        _cd_step(examples=examples)
+        logging.info("Finish of storing Playground examples")
 
 
 if __name__ == "__main__":
-  parser = parser.parse_args()
-  _check_envs()
-  setup_logger()
-  _run_ci_cd(parser.step, Sdk.Value(parser.sdk))
+    parser = parser.parse_args()
+    _check_envs()
+    setup_logger()
+    _run_ci_cd(parser.step, Sdk.Value(parser.sdk))
diff --git a/playground/infrastructure/ci_helper.py 
b/playground/infrastructure/ci_helper.py
index 1a5b40bec2d..5f85cbffed6 100644
--- a/playground/infrastructure/ci_helper.py
+++ b/playground/infrastructure/ci_helper.py
@@ -23,108 +23,108 @@ import logging
 from typing import List
 
 from api.v1.api_pb2 import STATUS_COMPILE_ERROR, STATUS_ERROR, 
STATUS_RUN_ERROR, \
-  STATUS_RUN_TIMEOUT, \
-  STATUS_VALIDATION_ERROR, STATUS_PREPARATION_ERROR
+    STATUS_RUN_TIMEOUT, \
+    STATUS_VALIDATION_ERROR, STATUS_PREPARATION_ERROR
 from config import Config
 from grpc_client import GRPCClient
 from helper import Example, get_statuses
 
 
 class VerifyException(Exception):
-  def __init__(self, error: str):
-    super().__init__()
-    self.msg = error
+    def __init__(self, error: str):
+        super().__init__()
+        self.msg = error
 
-  def __str__(self):
-    return self.msg
+    def __str__(self):
+        return self.msg
 
 
 class CIHelper:
-  """
-  Helper for CI step.
-
-  It is used to find and verify correctness if beam examples/katas/tests.
-  """
-  async def verify_examples(self, examples: List[Example]):
-    """
-    Verify correctness of beam examples.
-
-    1. Find all beam examples starting from directory 
os.getenv("BEAM_ROOT_DIR")
-    2. Group code of examples by their SDK.
-    3. Run processing for single-file examples to verify examples' code.
     """
-    single_file_examples = list(filter(
-      lambda example: example.tag.multifile is False, examples))
-    await get_statuses(single_file_examples)
-    await self._verify_examples(single_file_examples)
+    Helper for CI step.
 
-  async def _verify_examples(self, examples: List[Example]):
-    """
-    Verify statuses of beam examples and the number of found default examples.
-
-    Check example.status for each examples. If the status of the example is:
-    - STATUS_VALIDATION_ERROR/STATUS_PREPARATION_ERROR
-      /STATUS_ERROR/STATUS_RUN_TIMEOUT: log error
-    - STATUS_COMPILE_ERROR: get logs using GetCompileOutput request and
-      log them with error.
-    - STATUS_RUN_ERROR: get logs using GetRunError request and
-      log them with error.
-
-    Args:
-        examples: beam examples that should be verified
+    It is used to find and verify correctness if beam examples/katas/tests.
     """
-    count_of_verified = 0
-    client = GRPCClient()
-    verify_status_failed = False
-    default_examples = []
-
-    for example in examples:
-      if example.tag.default_example:
-        default_examples.append(example)
-      if example.status not in Config.ERROR_STATUSES:
-        count_of_verified += 1
-        continue
-      if example.status == STATUS_VALIDATION_ERROR:
-        logging.error("Example: %s has validation error", example.filepath)
-      elif example.status == STATUS_PREPARATION_ERROR:
-        logging.error("Example: %s has preparation error", example.filepath)
-      elif example.status == STATUS_ERROR:
-        logging.error(
-            "Example: %s has error during setup run builder", example.filepath)
-      elif example.status == STATUS_RUN_TIMEOUT:
-        logging.error("Example: %s failed because of timeout", 
example.filepath)
-      elif example.status == STATUS_COMPILE_ERROR:
-        err = await client.get_compile_output(example.pipeline_id)
-        logging.error(
-            "Example: %s has compilation error: %s", example.filepath, err)
-      elif example.status == STATUS_RUN_ERROR:
-        err = await client.get_run_error(example.pipeline_id)
-        logging.error(
-            "Example: %s has execution error: %s", example.filepath, err)
-      verify_status_failed = True
-
-    logging.info(
-        "Number of verified Playground examples: %s / %s",
-        count_of_verified,
-        len(examples))
-    logging.info(
-        "Number of Playground examples with some error: %s / %s",
-        len(examples) - count_of_verified,
-        len(examples))
-
-    if len(default_examples) == 0:
-      logging.error("Default example not found")
-      raise VerifyException(
-          "CI step failed due to finding an incorrect number "
-          "of default examples. Default example not found")
-    if len(default_examples) > 1:
-      logging.error("Many default examples found")
-      logging.error("Examples where the default_example field is true:")
-      for example in default_examples:
-        logging.error(example.filepath)
-      raise VerifyException(
-          "CI step failed due to finding an incorrect number "
-          "of default examples. Many default examples found")
-
-    if verify_status_failed:
-      raise VerifyException("CI step failed due to errors in the examples")
+    async def verify_examples(self, examples: List[Example]):
+        """
+        Verify correctness of beam examples.
+
+        1. Find all beam examples starting from directory 
os.getenv("BEAM_ROOT_DIR")
+        2. Group code of examples by their SDK.
+        3. Run processing for single-file examples to verify examples' code.
+        """
+        single_file_examples = list(filter(
+            lambda example: example.tag.multifile is False, examples))
+        await get_statuses(single_file_examples)
+        await self._verify_examples(single_file_examples)
+
+    async def _verify_examples(self, examples: List[Example]):
+        """
+        Verify statuses of beam examples and the number of found default 
examples.
+
+        Check example.status for each examples. If the status of the example 
is:
+        - STATUS_VALIDATION_ERROR/STATUS_PREPARATION_ERROR
+          /STATUS_ERROR/STATUS_RUN_TIMEOUT: log error
+        - STATUS_COMPILE_ERROR: get logs using GetCompileOutput request and
+          log them with error.
+        - STATUS_RUN_ERROR: get logs using GetRunError request and
+          log them with error.
+
+        Args:
+            examples: beam examples that should be verified
+        """
+        count_of_verified = 0
+        client = GRPCClient()
+        verify_status_failed = False
+        default_examples = []
+
+        for example in examples:
+            if example.tag.default_example:
+                default_examples.append(example)
+            if example.status not in Config.ERROR_STATUSES:
+                count_of_verified += 1
+                continue
+            if example.status == STATUS_VALIDATION_ERROR:
+                logging.error("Example: %s has validation error", 
example.filepath)
+            elif example.status == STATUS_PREPARATION_ERROR:
+                logging.error("Example: %s has preparation error", 
example.filepath)
+            elif example.status == STATUS_ERROR:
+                logging.error(
+                    "Example: %s has error during setup run builder", 
example.filepath)
+            elif example.status == STATUS_RUN_TIMEOUT:
+                logging.error("Example: %s failed because of timeout", 
example.filepath)
+            elif example.status == STATUS_COMPILE_ERROR:
+                err = await client.get_compile_output(example.pipeline_id)
+                logging.error(
+                    "Example: %s has compilation error: %s", example.filepath, 
err)
+            elif example.status == STATUS_RUN_ERROR:
+                err = await client.get_run_error(example.pipeline_id)
+                logging.error(
+                    "Example: %s has execution error: %s", example.filepath, 
err)
+            verify_status_failed = True
+
+        logging.info(
+            "Number of verified Playground examples: %s / %s",
+            count_of_verified,
+            len(examples))
+        logging.info(
+            "Number of Playground examples with some error: %s / %s",
+            len(examples) - count_of_verified,
+            len(examples))
+
+        if len(default_examples) == 0:
+            logging.error("Default example not found")
+            raise VerifyException(
+                "CI step failed due to finding an incorrect number "
+                "of default examples. Default example not found")
+        if len(default_examples) > 1:
+            logging.error("Many default examples found")
+            logging.error("Examples where the default_example field is true:")
+            for example in default_examples:
+                logging.error(example.filepath)
+            raise VerifyException(
+                "CI step failed due to finding an incorrect number "
+                "of default examples. Many default examples found")
+
+        if verify_status_failed:
+            raise VerifyException("CI step failed due to errors in the 
examples")
diff --git a/playground/infrastructure/config.py 
b/playground/infrastructure/config.py
index e072297486d..f729253e840 100644
--- a/playground/infrastructure/config.py
+++ b/playground/infrastructure/config.py
@@ -22,77 +22,77 @@ from dataclasses import dataclass
 from typing import Literal
 
 from api.v1.api_pb2 import STATUS_VALIDATION_ERROR, STATUS_ERROR, \
-  STATUS_PREPARATION_ERROR, STATUS_COMPILE_ERROR, \
-  STATUS_RUN_TIMEOUT, STATUS_RUN_ERROR, SDK_JAVA, SDK_GO, SDK_PYTHON, \
-  SDK_SCIO, Sdk
+    STATUS_PREPARATION_ERROR, STATUS_COMPILE_ERROR, \
+    STATUS_RUN_TIMEOUT, STATUS_RUN_ERROR, SDK_JAVA, SDK_GO, SDK_PYTHON, \
+    SDK_SCIO, Sdk
 
 
 @dataclass(frozen=True)
 class Config:
-  """
-  General configuration for CI/CD steps
-  """
-  SERVER_ADDRESS = os.getenv("SERVER_ADDRESS", "localhost:8080")
-  EXTENSION_TO_SDK = {
-      "java": SDK_JAVA, "go": SDK_GO, "py": SDK_PYTHON, "scala": SDK_SCIO
-  }
-  SUPPORTED_SDK = (
-      Sdk.Name(SDK_JAVA),
-      Sdk.Name(SDK_GO),
-      Sdk.Name(SDK_PYTHON),
-      Sdk.Name(SDK_SCIO))
-  BUCKET_NAME = "playground-precompiled-objects"
-  TEMP_FOLDER = "temp"
-  DEFAULT_PRECOMPILED_OBJECT = "defaultPrecompiledObject.info"
-  SDK_TO_EXTENSION = {
-      SDK_JAVA: "java", SDK_GO: "go", SDK_PYTHON: "py", SDK_SCIO: "scala"
-  }
-  NO_STORE = "no-store"
-  ERROR_STATUSES = [
-      STATUS_VALIDATION_ERROR,
-      STATUS_ERROR,
-      STATUS_PREPARATION_ERROR,
-      STATUS_COMPILE_ERROR,
-      STATUS_RUN_TIMEOUT,
-      STATUS_RUN_ERROR
-  ]
-  BEAM_PLAYGROUND_TITLE = "beam-playground:\n"
-  BEAM_PLAYGROUND = "beam-playground"
-  PAUSE_DELAY = 10
-  CI_STEP_NAME = "CI"
-  CD_STEP_NAME = "CD"
-  CI_CD_LITERAL = Literal["CI", "CD"]
-  LINK_PREFIX = "https://github.com/apache/beam/blob/master";
+    """
+    General configuration for CI/CD steps
+    """
+    SERVER_ADDRESS = os.getenv("SERVER_ADDRESS", "localhost:8080")
+    EXTENSION_TO_SDK = {
+        "java": SDK_JAVA, "go": SDK_GO, "py": SDK_PYTHON, "scala": SDK_SCIO
+    }
+    SUPPORTED_SDK = (
+        Sdk.Name(SDK_JAVA),
+        Sdk.Name(SDK_GO),
+        Sdk.Name(SDK_PYTHON),
+        Sdk.Name(SDK_SCIO))
+    BUCKET_NAME = "playground-precompiled-objects"
+    TEMP_FOLDER = "temp"
+    DEFAULT_PRECOMPILED_OBJECT = "defaultPrecompiledObject.info"
+    SDK_TO_EXTENSION = {
+        SDK_JAVA: "java", SDK_GO: "go", SDK_PYTHON: "py", SDK_SCIO: "scala"
+    }
+    NO_STORE = "no-store"
+    ERROR_STATUSES = [
+        STATUS_VALIDATION_ERROR,
+        STATUS_ERROR,
+        STATUS_PREPARATION_ERROR,
+        STATUS_COMPILE_ERROR,
+        STATUS_RUN_TIMEOUT,
+        STATUS_RUN_ERROR
+    ]
+    BEAM_PLAYGROUND_TITLE = "beam-playground:\n"
+    BEAM_PLAYGROUND = "beam-playground"
+    PAUSE_DELAY = 10
+    CI_STEP_NAME = "CI"
+    CD_STEP_NAME = "CD"
+    CI_CD_LITERAL = Literal["CI", "CD"]
+    LINK_PREFIX = "https://github.com/apache/beam/blob/master";
 
 
 @dataclass(frozen=True)
 class TagFields:
-  name: str = "name"
-  description: str = "description"
-  multifile: str = "multifile"
-  categories: str = "categories"
-  pipeline_options: str = "pipeline_options"
-  default_example: str = "default_example"
-  context_line: int = "context_line"
+    name: str = "name"
+    description: str = "description"
+    multifile: str = "multifile"
+    categories: str = "categories"
+    pipeline_options: str = "pipeline_options"
+    default_example: str = "default_example"
+    context_line: int = "context_line"
 
 
 @dataclass(frozen=True)
 class PrecompiledExample:
-  OUTPUT_EXTENSION = "output"
-  LOG_EXTENSION = "log"
-  GRAPH_EXTENSION = "graph"
-  META_NAME = "meta"
-  META_EXTENSION = "info"
+    OUTPUT_EXTENSION = "output"
+    LOG_EXTENSION = "log"
+    GRAPH_EXTENSION = "graph"
+    META_NAME = "meta"
+    META_EXTENSION = "info"
 
 
 @dataclass(frozen=True)
 class PrecompiledExampleType:
-  examples = "examples"
-  katas = "katas"
-  test_ends = ("test", "it")
+    examples = "examples"
+    katas = "katas"
+    test_ends = ("test", "it")
 
 
 @dataclass(frozen=True)
 class OptionalTagFields:
-  pipeline_options: str = "pipeline_options"
-  default_example: str = "default_example"
+    pipeline_options: str = "pipeline_options"
+    default_example: str = "default_example"
diff --git a/playground/infrastructure/grpc_client.py 
b/playground/infrastructure/grpc_client.py
index 40543d5dd36..860bc2cf524 100644
--- a/playground/infrastructure/grpc_client.py
+++ b/playground/infrastructure/grpc_client.py
@@ -26,143 +26,143 @@ from config import Config
 
 
 class GRPCClient:
-  """GRPCClient is gRPC client for sending a request to the backend."""
-
-  def __init__(self):
-    self._channel = grpc.aio.insecure_channel(Config.SERVER_ADDRESS)
-    self._stub = api_pb2_grpc.PlaygroundServiceStub(self._channel)
-
-  async def run_code(
-      self, code: str, sdk: api_pb2.Sdk, pipeline_options: str) -> str:
-    """
-    Run example by his code and SDK
-
-    Args:
-        code: code of the example.
-        sdk: SDK of the example.
-        pipeline_options: pipeline options of the example.
-
-    Returns:
-        pipeline_uuid: uuid of the pipeline
-    """
-    if sdk not in api_pb2.Sdk.values():
-      sdks = api_pb2.Sdk.keys()
-      sdks.remove(api_pb2.Sdk.Name(0))  # del SDK_UNSPECIFIED
-      raise Exception(
-          f'Incorrect sdk: must be from this pool: {", ".join(sdks)}')
-    request = api_pb2.RunCodeRequest(
-        code=code, sdk=sdk, pipeline_options=pipeline_options)
-    response = await self._stub.RunCode(request)
-    return response.pipeline_uuid
-
-  async def check_status(self, pipeline_uuid: str) -> api_pb2.Status:
-    """
-    Get status of the pipeline by his pipeline
-
-    Args:
-        pipeline_uuid: uuid of the pipeline
-
-    Returns:
-        status: status of the pipeline
-    """
-    self._verify_pipeline_uuid(pipeline_uuid)
-    request = api_pb2.CheckStatusRequest(pipeline_uuid=pipeline_uuid)
-    response = await self._stub.CheckStatus(request)
-    return response.status
-
-  async def get_run_error(self, pipeline_uuid: str) -> str:
-    """
-    Get the error of pipeline execution.
-
-    Args:
-        pipeline_uuid: uuid of the pipeline
-
-    Returns:
-        output: contain an error of pipeline execution
-    """
-    self._verify_pipeline_uuid(pipeline_uuid)
-    request = api_pb2.GetRunErrorRequest(pipeline_uuid=pipeline_uuid)
-    response = await self._stub.GetRunError(request)
-    return response.output
-
-  async def get_run_output(self, pipeline_uuid: str) -> str:
-    """
-    Get the result of pipeline execution.
-
-    Args:
-        pipeline_uuid: uuid of the pipeline
-
-    Returns:
-        output: contain the result of pipeline execution
-    """
-    self._verify_pipeline_uuid(pipeline_uuid)
-    request = api_pb2.GetRunOutputRequest(pipeline_uuid=pipeline_uuid)
-    response = await self._stub.GetRunOutput(request)
-    return response.output
-
-  async def get_log(self, pipeline_uuid: str) -> str:
-    """
-    Get the result of pipeline execution.
-
-    Args:
-        pipeline_uuid: uuid of the pipeline
-
-    Returns:
-        output: contain the result of pipeline execution
-    """
-    self._verify_pipeline_uuid(pipeline_uuid)
-    request = api_pb2.GetLogsRequest(pipeline_uuid=pipeline_uuid)
-    response = await self._stub.GetLogs(request)
-    return response.output
-
-  async def get_compile_output(self, pipeline_uuid: str) -> str:
-    """
-    Get the result of pipeline compilation.
-
-    Args:
-        pipeline_uuid: uuid of the pipeline
-
-    Returns:
-        output: contain the result of pipeline compilation
-    """
-    self._verify_pipeline_uuid(pipeline_uuid)
-    request = api_pb2.GetCompileOutputRequest(pipeline_uuid=pipeline_uuid)
-    response = await self._stub.GetCompileOutput(request)
-    return response.output
-
-  async def get_graph(self, pipeline_uuid: str, example_filepath: str) -> str:
-    """
-    Get the graph of pipeline execution.
-
-    Args:
-        pipeline_uuid: uuid of the pipeline
-        example_filepath: path to the file of the example
-
-    Returns:
-        graph: contain the graph of pipeline execution as a string
-    """
-    self._verify_pipeline_uuid(pipeline_uuid)
-    request = api_pb2.GetGraphRequest(pipeline_uuid=pipeline_uuid)
-    try:
-      response = await self._stub.GetGraph(request)
-      if response.graph == "":
-        logging.warning("Graph for %s wasn't generated", example_filepath)
-      return response.graph
-    except grpc.RpcError:
-      logging.warning("Graph for %s wasn't generated", example_filepath)
-      return ""
-
-  def _verify_pipeline_uuid(self, pipeline_uuid):
-    """
-    Verify the received pipeline_uuid format
-
-    Args:
-        pipeline_uuid: uuid of the pipeline
-
-    Returns:
-        If pipeline ID is not verified, will raise an exception
-    """
-    try:
-      uuid.UUID(pipeline_uuid)
-    except ValueError as ve:
-      raise ValueError(f"Incorrect pipeline uuid: '{pipeline_uuid}'") from ve
+    """GRPCClient is gRPC client for sending a request to the backend."""
+
+    def __init__(self):
+        self._channel = grpc.aio.insecure_channel(Config.SERVER_ADDRESS)
+        self._stub = api_pb2_grpc.PlaygroundServiceStub(self._channel)
+
+    async def run_code(
+          self, code: str, sdk: api_pb2.Sdk, pipeline_options: str) -> str:
+        """
+        Run example by his code and SDK
+
+        Args:
+            code: code of the example.
+            sdk: SDK of the example.
+            pipeline_options: pipeline options of the example.
+
+        Returns:
+            pipeline_uuid: uuid of the pipeline
+        """
+        if sdk not in api_pb2.Sdk.values():
+            sdks = api_pb2.Sdk.keys()
+            sdks.remove(api_pb2.Sdk.Name(0))  # del SDK_UNSPECIFIED
+            raise Exception(
+                f'Incorrect sdk: must be from this pool: {", ".join(sdks)}')
+        request = api_pb2.RunCodeRequest(
+            code=code, sdk=sdk, pipeline_options=pipeline_options)
+        response = await self._stub.RunCode(request)
+        return response.pipeline_uuid
+
+    async def check_status(self, pipeline_uuid: str) -> api_pb2.Status:
+        """
+        Get status of the pipeline by his pipeline
+
+        Args:
+            pipeline_uuid: uuid of the pipeline
+
+        Returns:
+            status: status of the pipeline
+        """
+        self._verify_pipeline_uuid(pipeline_uuid)
+        request = api_pb2.CheckStatusRequest(pipeline_uuid=pipeline_uuid)
+        response = await self._stub.CheckStatus(request)
+        return response.status
+
+    async def get_run_error(self, pipeline_uuid: str) -> str:
+        """
+        Get the error of pipeline execution.
+
+        Args:
+            pipeline_uuid: uuid of the pipeline
+
+        Returns:
+            output: contain an error of pipeline execution
+        """
+        self._verify_pipeline_uuid(pipeline_uuid)
+        request = api_pb2.GetRunErrorRequest(pipeline_uuid=pipeline_uuid)
+        response = await self._stub.GetRunError(request)
+        return response.output
+
+    async def get_run_output(self, pipeline_uuid: str) -> str:
+        """
+        Get the result of pipeline execution.
+
+        Args:
+            pipeline_uuid: uuid of the pipeline
+
+        Returns:
+            output: contain the result of pipeline execution
+        """
+        self._verify_pipeline_uuid(pipeline_uuid)
+        request = api_pb2.GetRunOutputRequest(pipeline_uuid=pipeline_uuid)
+        response = await self._stub.GetRunOutput(request)
+        return response.output
+
+    async def get_log(self, pipeline_uuid: str) -> str:
+        """
+        Get the result of pipeline execution.
+
+        Args:
+            pipeline_uuid: uuid of the pipeline
+
+        Returns:
+            output: contain the result of pipeline execution
+        """
+        self._verify_pipeline_uuid(pipeline_uuid)
+        request = api_pb2.GetLogsRequest(pipeline_uuid=pipeline_uuid)
+        response = await self._stub.GetLogs(request)
+        return response.output
+
+    async def get_compile_output(self, pipeline_uuid: str) -> str:
+        """
+        Get the result of pipeline compilation.
+
+        Args:
+            pipeline_uuid: uuid of the pipeline
+
+        Returns:
+            output: contain the result of pipeline compilation
+        """
+        self._verify_pipeline_uuid(pipeline_uuid)
+        request = api_pb2.GetCompileOutputRequest(pipeline_uuid=pipeline_uuid)
+        response = await self._stub.GetCompileOutput(request)
+        return response.output
+
+    async def get_graph(self, pipeline_uuid: str, example_filepath: str) -> 
str:
+        """
+        Get the graph of pipeline execution.
+
+        Args:
+            pipeline_uuid: uuid of the pipeline
+            example_filepath: path to the file of the example
+
+        Returns:
+            graph: contain the graph of pipeline execution as a string
+        """
+        self._verify_pipeline_uuid(pipeline_uuid)
+        request = api_pb2.GetGraphRequest(pipeline_uuid=pipeline_uuid)
+        try:
+            response = await self._stub.GetGraph(request)
+            if response.graph == "":
+                logging.warning("Graph for %s wasn't generated", 
example_filepath)
+            return response.graph
+        except grpc.RpcError:
+            logging.warning("Graph for %s wasn't generated", example_filepath)
+            return ""
+
+    def _verify_pipeline_uuid(self, pipeline_uuid):
+        """
+        Verify the received pipeline_uuid format
+
+        Args:
+            pipeline_uuid: uuid of the pipeline
+
+        Returns:
+            If pipeline ID is not verified, will raise an exception
+        """
+        try:
+            uuid.UUID(pipeline_uuid)
+        except ValueError as ve:
+            raise ValueError(f"Incorrect pipeline uuid: '{pipeline_uuid}'") 
from ve
diff --git a/playground/infrastructure/helper.py 
b/playground/infrastructure/helper.py
index f2a8eb04a70..3780465e105 100644
--- a/playground/infrastructure/helper.py
+++ b/playground/infrastructure/helper.py
@@ -29,10 +29,10 @@ import yaml
 from yaml import YAMLError
 
 from api.v1.api_pb2 import SDK_UNSPECIFIED, STATUS_UNSPECIFIED, Sdk, \
-  STATUS_VALIDATING, STATUS_PREPARING, \
-  STATUS_COMPILING, STATUS_EXECUTING, PRECOMPILED_OBJECT_TYPE_UNIT_TEST, \
-  PRECOMPILED_OBJECT_TYPE_KATA, PRECOMPILED_OBJECT_TYPE_UNSPECIFIED, \
-  PRECOMPILED_OBJECT_TYPE_EXAMPLE, PrecompiledObjectType
+    STATUS_VALIDATING, STATUS_PREPARING, \
+    STATUS_COMPILING, STATUS_EXECUTING, PRECOMPILED_OBJECT_TYPE_UNIT_TEST, \
+    PRECOMPILED_OBJECT_TYPE_KATA, PRECOMPILED_OBJECT_TYPE_UNSPECIFIED, \
+    PRECOMPILED_OBJECT_TYPE_EXAMPLE, PrecompiledObjectType
 from config import Config, TagFields, PrecompiledExampleType, OptionalTagFields
 from grpc_client import GRPCClient
 
@@ -52,369 +52,369 @@ Tag = namedtuple(
 
 @dataclass
 class Example:
-  """
-  Class which contains all information about beam example
-  """
-  name: str
-  sdk: SDK_UNSPECIFIED
-  filepath: str
-  code: str
-  status: STATUS_UNSPECIFIED
-  tag: Tag
-  link: str
-  logs: str = ""
-  type: PrecompiledObjectType = PRECOMPILED_OBJECT_TYPE_UNSPECIFIED
-  pipeline_id: str = ""
-  output: str = ""
-  graph: str = ""
+    """
+    Class which contains all information about beam example
+    """
+    name: str
+    sdk: SDK_UNSPECIFIED
+    filepath: str
+    code: str
+    status: STATUS_UNSPECIFIED
+    tag: Tag
+    link: str
+    logs: str = ""
+    type: PrecompiledObjectType = PRECOMPILED_OBJECT_TYPE_UNSPECIFIED
+    pipeline_id: str = ""
+    output: str = ""
+    graph: str = ""
 
 
 @dataclass
 class ExampleTag:
-  """
-  Class which contains all information about beam playground tag
-  """
-  tag_as_dict: Dict[str, str]
-  tag_as_string: str
+    """
+    Class which contains all information about beam playground tag
+    """
+    tag_as_dict: Dict[str, str]
+    tag_as_string: str
 
 
 def find_examples(work_dir: str, supported_categories: List[str],
                   sdk: Sdk) -> List[Example]:
-  """
-  Find and return beam examples.
-
-  Search throws all child files of work_dir directory files with beam tag:
-  Beam-playground:
-      name: NameOfExample
-      description: Description of NameOfExample.
-      multifile: false
-      default_example: false
-      context_line: 10
-      categories:
-          - category-1
-          - category-2
-      pipeline_options: --inputFile your_file --outputFile your_output_file
-  If some example contain beam tag with incorrect format raise an error.
-
-  Args:
-      work_dir: directory where to search examples.
-      supported_categories: list of supported categories.
-      sdk: sdk that using to find examples for the specific sdk.
-
-  Returns:
-      List of Examples.
-  """
-  has_error = False
-  examples = []
-  for root, _, files in os.walk(work_dir):
-    for filename in files:
-      filepath = os.path.join(root, filename)
-      error_during_check_file = _check_file(
-          examples=examples,
-          filename=filename,
-          filepath=filepath,
-          supported_categories=supported_categories,
-          sdk=sdk)
-      has_error = has_error or error_during_check_file
-  if has_error:
-    raise ValueError(
-        "Some of the beam examples contain beam playground tag with "
-        "an incorrect format")
-  return examples
+    """
+    Find and return beam examples.
+
+    Search throws all child files of work_dir directory files with beam tag:
+    Beam-playground:
+        name: NameOfExample
+        description: Description of NameOfExample.
+        multifile: false
+        default_example: false
+        context_line: 10
+        categories:
+            - category-1
+            - category-2
+        pipeline_options: --inputFile your_file --outputFile your_output_file
+    If some example contain beam tag with incorrect format raise an error.
+
+    Args:
+        work_dir: directory where to search examples.
+        supported_categories: list of supported categories.
+        sdk: sdk that using to find examples for the specific sdk.
+
+    Returns:
+        List of Examples.
+    """
+    has_error = False
+    examples = []
+    for root, _, files in os.walk(work_dir):
+        for filename in files:
+            filepath = os.path.join(root, filename)
+            error_during_check_file = _check_file(
+                examples=examples,
+                filename=filename,
+                filepath=filepath,
+                supported_categories=supported_categories,
+                sdk=sdk)
+            has_error = has_error or error_during_check_file
+    if has_error:
+        raise ValueError(
+            "Some of the beam examples contain beam playground tag with "
+            "an incorrect format")
+    return examples
 
 
 async def get_statuses(examples: List[Example]):
-  """
-  Receive status and update example.status and example.pipeline_id for
-  each example
+    """
+    Receive status and update example.status and example.pipeline_id for
+    each example
 
-  Args:
-      examples: beam examples for processing and updating statuses and
-      pipeline_id values.
-  """
-  tasks = []
-  client = GRPCClient()
-  for example in examples:
-    tasks.append(_update_example_status(example, client))
-  await tqdm.gather(*tasks)
+    Args:
+        examples: beam examples for processing and updating statuses and
+        pipeline_id values.
+    """
+    tasks = []
+    client = GRPCClient()
+    for example in examples:
+        tasks.append(_update_example_status(example, client))
+    await tqdm.gather(*tasks)
 
 
 def get_tag(filepath) -> Optional[ExampleTag]:
-  """
-  Parse file by filepath and find beam tag
-
-  Args:
-      filepath: path of the file
-
-  Returns:
-      If file contains tag, returns tag as a map.
-      If file doesn't contain tag, returns None
-  """
-  add_to_yaml = False
-  yaml_string = ""
-  tag_string = ""
-
-  with open(filepath, encoding="utf-8") as parsed_file:
-    lines = parsed_file.readlines()
-
-  for line in lines:
-    formatted_line = line.replace("//", "").replace("#",
-                                                    "").replace("\t", "    ")
-    if add_to_yaml is False:
-      if formatted_line.lstrip() == Config.BEAM_PLAYGROUND_TITLE:
-        add_to_yaml = True
-        yaml_string += formatted_line.lstrip()
-        tag_string += line
-    else:
-      yaml_with_new_string = yaml_string + formatted_line
-      try:
-        yaml.load(yaml_with_new_string, Loader=yaml.SafeLoader)
-        yaml_string += formatted_line
-        tag_string += line
-      except YAMLError:
-        break
-
-  if add_to_yaml:
-    tag_object = yaml.load(yaml_string, Loader=yaml.SafeLoader)
-    return ExampleTag(tag_object[Config.BEAM_PLAYGROUND], tag_string)
-
-  return None
+    """
+    Parse file by filepath and find beam tag
+
+    Args:
+        filepath: path of the file
+
+    Returns:
+        If file contains tag, returns tag as a map.
+        If file doesn't contain tag, returns None
+    """
+    add_to_yaml = False
+    yaml_string = ""
+    tag_string = ""
+
+    with open(filepath, encoding="utf-8") as parsed_file:
+        lines = parsed_file.readlines()
+
+    for line in lines:
+        formatted_line = line.replace("//", "").replace("#",
+                                                        "").replace("\t", "    
")
+        if add_to_yaml is False:
+            if formatted_line.lstrip() == Config.BEAM_PLAYGROUND_TITLE:
+                add_to_yaml = True
+                yaml_string += formatted_line.lstrip()
+                tag_string += line
+        else:
+            yaml_with_new_string = yaml_string + formatted_line
+            try:
+                yaml.load(yaml_with_new_string, Loader=yaml.SafeLoader)
+                yaml_string += formatted_line
+                tag_string += line
+            except YAMLError:
+                break
+
+    if add_to_yaml:
+        tag_object = yaml.load(yaml_string, Loader=yaml.SafeLoader)
+        return ExampleTag(tag_object[Config.BEAM_PLAYGROUND], tag_string)
+
+    return None
 
 
 def _check_file(examples, filename, filepath, supported_categories, sdk: Sdk):
-  """
-  Check file by filepath for matching to beam example. If file is beam example,
-  then add it to list of examples
-
-  Args:
-      examples: list of examples.
-      filename: name of the file.
-      filepath: path to the file.
-      supported_categories: list of supported categories.
-      sdk: sdk that using to find examples for the specific sdk.
-
-  Returns:
-      True if file has beam playground tag with incorrect format.
-      False if file has correct beam playground tag.
-      False if file doesn't contains beam playground tag.
-  """
-  if filepath.endswith("infrastructure/helper.py"):
-    return False
-
-  has_error = False
-  extension = filepath.split(os.extsep)[-1]
-  if extension == Config.SDK_TO_EXTENSION[sdk]:
-    tag = get_tag(filepath)
-    if tag is not None:
-      if _validate(tag.tag_as_dict, supported_categories) is False:
-        logging.error(
-            "%s contains beam playground tag with incorrect format", filepath)
-        has_error = True
-      else:
-        examples.append(_get_example(filepath, filename, tag))
-  return has_error
+    """
+    Check file by filepath for matching to beam example. If file is beam 
example,
+    then add it to list of examples
+
+    Args:
+        examples: list of examples.
+        filename: name of the file.
+        filepath: path to the file.
+        supported_categories: list of supported categories.
+        sdk: sdk that using to find examples for the specific sdk.
+
+    Returns:
+        True if file has beam playground tag with incorrect format.
+        False if file has correct beam playground tag.
+        False if file doesn't contains beam playground tag.
+    """
+    if filepath.endswith("infrastructure/helper.py"):
+        return False
+
+    has_error = False
+    extension = filepath.split(os.extsep)[-1]
+    if extension == Config.SDK_TO_EXTENSION[sdk]:
+        tag = get_tag(filepath)
+        if tag is not None:
+            if _validate(tag.tag_as_dict, supported_categories) is False:
+                logging.error(
+                    "%s contains beam playground tag with incorrect format", 
filepath)
+                has_error = True
+            else:
+                examples.append(_get_example(filepath, filename, tag))
+    return has_error
 
 
 def get_supported_categories(categories_path: str) -> List[str]:
-  """
-  Return list of supported categories from categories_path file
+    """
+    Return list of supported categories from categories_path file
 
-  Args:
-      categories_path: path to the file with categories.
+    Args:
+        categories_path: path to the file with categories.
 
-  Returns:
-      All supported categories as a list.
-  """
-  with open(categories_path, encoding="utf-8") as supported_categories:
-    yaml_object = yaml.load(supported_categories.read(), 
Loader=yaml.SafeLoader)
-    return yaml_object[TagFields.categories]
+    Returns:
+        All supported categories as a list.
+    """
+    with open(categories_path, encoding="utf-8") as supported_categories:
+        yaml_object = yaml.load(supported_categories.read(), 
Loader=yaml.SafeLoader)
+        return yaml_object[TagFields.categories]
 
 
 def _get_example(filepath: str, filename: str, tag: ExampleTag) -> Example:
-  """
-  Return an Example by filepath and filename.
-
-  Args:
-       tag: tag of the example.
-       filepath: path of the example's file.
-       filename: name of the example's file.
-
-  Returns:
-      Parsed Example object.
-  """
-  name = _get_name(filename)
-  sdk = Config.EXTENSION_TO_SDK[filename.split(os.extsep)[-1]]
-  object_type = _get_object_type(filename, filepath)
-  with open(filepath, encoding="utf-8") as parsed_file:
-    content = parsed_file.read()
-  content = content.replace(tag.tag_as_string, "")
-  tag.tag_as_dict[TagFields.context_line] -= tag.tag_as_string.count("\n")
-  root_dir = os.getenv("BEAM_ROOT_DIR", "")
-  file_path_without_root = filepath.replace(root_dir, "", 1)
-  if file_path_without_root.startswith("/"):
-    link = "{}{}".format(Config.LINK_PREFIX, file_path_without_root)
-  else:
-    link = "{}/{}".format(Config.LINK_PREFIX, file_path_without_root)
-
-  return Example(
-      name=name,
-      sdk=sdk,
-      filepath=filepath,
-      code=content,
-      status=STATUS_UNSPECIFIED,
-      tag=Tag(**tag.tag_as_dict),
-      type=object_type,
-      link=link)
+    """
+    Return an Example by filepath and filename.
+
+    Args:
+         tag: tag of the example.
+         filepath: path of the example's file.
+         filename: name of the example's file.
+
+    Returns:
+        Parsed Example object.
+    """
+    name = _get_name(filename)
+    sdk = Config.EXTENSION_TO_SDK[filename.split(os.extsep)[-1]]
+    object_type = _get_object_type(filename, filepath)
+    with open(filepath, encoding="utf-8") as parsed_file:
+        content = parsed_file.read()
+    content = content.replace(tag.tag_as_string, "")
+    tag.tag_as_dict[TagFields.context_line] -= tag.tag_as_string.count("\n")
+    root_dir = os.getenv("BEAM_ROOT_DIR", "")
+    file_path_without_root = filepath.replace(root_dir, "", 1)
+    if file_path_without_root.startswith("/"):
+        link = "{}{}".format(Config.LINK_PREFIX, file_path_without_root)
+    else:
+        link = "{}/{}".format(Config.LINK_PREFIX, file_path_without_root)
+
+    return Example(
+        name=name,
+        sdk=sdk,
+        filepath=filepath,
+        code=content,
+        status=STATUS_UNSPECIFIED,
+        tag=Tag(**tag.tag_as_dict),
+        type=object_type,
+        link=link)
 
 
 def _validate(tag: dict, supported_categories: List[str]) -> bool:
-  """
-  Validate all tag's fields
-
-  Validate that tag contains all required fields and all fields have required
-  format.
-
-  Args:
-      tag: beam tag to validate.
-      supported_categories: list of supported categories.
-
-  Returns:
-      In case tag is valid, True
-      In case tag is not valid, False
-  """
-  valid = True
-  required_tag_fields = {
-      f.default
-      for f in fields(TagFields)
-      if f.default not in {o_f.default
-                           for o_f in fields(OptionalTagFields)}
-  }
-  # check that all fields exist and they have no empty value
-  for field in required_tag_fields:
-    if field not in tag:
-      logging.error(
-          "tag doesn't contain %s field: %s \n"
-          "Please, check that this field exists in the beam playground tag."
-          "If you are sure that this field exists in the tag"
-          " check the format of indenting.",
-          field,
-          tag)
-      valid = False
-    if valid is True:
-      value = tag.get(field)
-      if (value == "" or value is None) and field != 
TagFields.pipeline_options:
+    """
+    Validate all tag's fields
+
+    Validate that tag contains all required fields and all fields have required
+    format.
+
+    Args:
+        tag: beam tag to validate.
+        supported_categories: list of supported categories.
+
+    Returns:
+        In case tag is valid, True
+        In case tag is not valid, False
+    """
+    valid = True
+    required_tag_fields = {
+        f.default
+        for f in fields(TagFields)
+        if f.default not in {o_f.default
+                             for o_f in fields(OptionalTagFields)}
+    }
+    # check that all fields exist and they have no empty value
+    for field in required_tag_fields:
+        if field not in tag:
+            logging.error(
+                "tag doesn't contain %s field: %s \n"
+                "Please, check that this field exists in the beam playground 
tag."
+                "If you are sure that this field exists in the tag"
+                " check the format of indenting.",
+                field,
+                tag)
+            valid = False
+        if valid is True:
+            value = tag.get(field)
+            if (value == "" or value is None) and field != 
TagFields.pipeline_options:
+                logging.error(
+                    "tag's value is incorrect: %s\n%s field can not be empty.",
+                    tag,
+                    field)
+                valid = False
+
+    if valid is False:
+        return valid
+
+    # check that multifile's value is boolean
+    multifile = tag.get(TagFields.multifile)
+    if str(multifile).lower() not in ["true", "false"]:
         logging.error(
-            "tag's value is incorrect: %s\n%s field can not be empty.",
+            "tag's field multifile is incorrect: %s \n"
+            "multifile variable should be boolean format, but tag contains: 
%s",
             tag,
-            field)
+            multifile)
         valid = False
 
-  if valid is False:
-    return valid
-
-  # check that multifile's value is boolean
-  multifile = tag.get(TagFields.multifile)
-  if str(multifile).lower() not in ["true", "false"]:
-    logging.error(
-        "tag's field multifile is incorrect: %s \n"
-        "multifile variable should be boolean format, but tag contains: %s",
-        tag,
-        multifile)
-    valid = False
-
-  # check that categories' value is a list of supported categories
-  categories = tag.get(TagFields.categories)
-  if not isinstance(categories, list):
-    logging.error(
-        "tag's field categories is incorrect: %s \n"
-        "categories variable should be list format, but tag contains: %s",
-        tag,
-        type(categories))
-    valid = False
-  else:
-    for category in categories:
-      if category not in supported_categories:
+    # check that categories' value is a list of supported categories
+    categories = tag.get(TagFields.categories)
+    if not isinstance(categories, list):
         logging.error(
-            "tag contains unsupported category: %s \n"
-            "If you are sure that %s category should be placed in "
-            "Beam Playground, you can add it to the "
-            "`playground/categories.yaml` file",
-            category,
-            category)
+            "tag's field categories is incorrect: %s \n"
+            "categories variable should be list format, but tag contains: %s",
+            tag,
+            type(categories))
         valid = False
-
-  # check that context line's value is integer
-  context_line = tag.get(TagFields.context_line)
-  if not isinstance(context_line, int):
-    logging.error(
-        "Tag's field context_line is incorrect: %s \n"
-        "context_line variable should be integer format, "
-        "but tag contains: %s",
-        tag,
-        context_line)
-    valid = False
-  return valid
+    else:
+        for category in categories:
+            if category not in supported_categories:
+                logging.error(
+                    "tag contains unsupported category: %s \n"
+                    "If you are sure that %s category should be placed in "
+                    "Beam Playground, you can add it to the "
+                    "`playground/categories.yaml` file",
+                    category,
+                    category)
+                valid = False
+
+    # check that context line's value is integer
+    context_line = tag.get(TagFields.context_line)
+    if not isinstance(context_line, int):
+        logging.error(
+            "Tag's field context_line is incorrect: %s \n"
+            "context_line variable should be integer format, "
+            "but tag contains: %s",
+            tag,
+            context_line)
+        valid = False
+    return valid
 
 
 def _get_name(filename: str) -> str:
-  """
-  Return name of the example by his filepath.
+    """
+    Return name of the example by his filepath.
 
-  Get name of the example by his filename.
+    Get name of the example by his filename.
 
-  Args:
-      filename: filename of the beam example file.
+    Args:
+        filename: filename of the beam example file.
 
-  Returns:
-      example's name.
-  """
-  return filename.split(os.extsep)[0]
+    Returns:
+        example's name.
+    """
+    return filename.split(os.extsep)[0]
 
 
 async def _update_example_status(example: Example, client: GRPCClient):
-  """
-  Receive status for examples and update example.status and pipeline_id
-
-  Use client to send requests to the backend:
-  1. Start code processing.
-  2. Ping the backend while status is STATUS_VALIDATING/
-    STATUS_PREPARING/STATUS_COMPILING/STATUS_EXECUTING
-  Update example.status with resulting status.
-
-  Args:
-      example: beam example for processing and updating status and pipeline_id.
-      client: client to send requests to the server.
-  """
-  pipeline_id = await client.run_code(
-      example.code, example.sdk, example.tag.pipeline_options)
-  example.pipeline_id = pipeline_id
-  status = await client.check_status(pipeline_id)
-  while status in [STATUS_VALIDATING,
-                   STATUS_PREPARING,
-                   STATUS_COMPILING,
-                   STATUS_EXECUTING]:
-    await asyncio.sleep(Config.PAUSE_DELAY)
+    """
+    Receive status for examples and update example.status and pipeline_id
+
+    Use client to send requests to the backend:
+    1. Start code processing.
+    2. Ping the backend while status is STATUS_VALIDATING/
+      STATUS_PREPARING/STATUS_COMPILING/STATUS_EXECUTING
+    Update example.status with resulting status.
+
+    Args:
+        example: beam example for processing and updating status and 
pipeline_id.
+        client: client to send requests to the server.
+    """
+    pipeline_id = await client.run_code(
+        example.code, example.sdk, example.tag.pipeline_options)
+    example.pipeline_id = pipeline_id
     status = await client.check_status(pipeline_id)
-  example.status = status
+    while status in [STATUS_VALIDATING,
+                     STATUS_PREPARING,
+                     STATUS_COMPILING,
+                     STATUS_EXECUTING]:
+        await asyncio.sleep(Config.PAUSE_DELAY)
+        status = await client.check_status(pipeline_id)
+    example.status = status
 
 
 def _get_object_type(filename, filepath):
-  """
-  Get type of an object based on it filename/filepath
-
-  Args:
-      filename: object's filename
-      filepath: object's filepath
-
-  Returns: type of the object (example, kata, unit-test)
-  """
-  filename_no_ext = (os.path.splitext(filename)[0]).lower()
-  if filename_no_ext.endswith(PrecompiledExampleType.test_ends):
-    object_type = PRECOMPILED_OBJECT_TYPE_UNIT_TEST
-  elif PrecompiledExampleType.katas in filepath.split(os.sep):
-    object_type = PRECOMPILED_OBJECT_TYPE_KATA
-  elif PrecompiledExampleType.examples in filepath.split(os.sep):
-    object_type = PRECOMPILED_OBJECT_TYPE_EXAMPLE
-  else:
-    object_type = PRECOMPILED_OBJECT_TYPE_UNSPECIFIED
-  return object_type
+    """
+    Get type of an object based on it filename/filepath
+
+    Args:
+        filename: object's filename
+        filepath: object's filepath
+
+    Returns: type of the object (example, kata, unit-test)
+    """
+    filename_no_ext = (os.path.splitext(filename)[0]).lower()
+    if filename_no_ext.endswith(PrecompiledExampleType.test_ends):
+        object_type = PRECOMPILED_OBJECT_TYPE_UNIT_TEST
+    elif PrecompiledExampleType.katas in filepath.split(os.sep):
+        object_type = PRECOMPILED_OBJECT_TYPE_KATA
+    elif PrecompiledExampleType.examples in filepath.split(os.sep):
+        object_type = PRECOMPILED_OBJECT_TYPE_EXAMPLE
+    else:
+        object_type = PRECOMPILED_OBJECT_TYPE_UNSPECIFIED
+    return object_type
diff --git a/playground/infrastructure/logger.py 
b/playground/infrastructure/logger.py
index 6ee2f373690..18afd8a1b7e 100644
--- a/playground/infrastructure/logger.py
+++ b/playground/infrastructure/logger.py
@@ -23,26 +23,26 @@ from logging import INFO, WARNING, ERROR, CRITICAL
 
 
 def setup_logger():
-  """
-  Setup logging.
-
-  Add 2 handler in root logger:
-      StreamHandler - for logs(INFO and WARNING levels) to the stdout
-      StreamHandler - for logs(ERROR and CRITICAL levels) to the stderr
-  """
-  log = logging.getLogger()
-  log.setLevel(logging.INFO)
-  formatter = logging.Formatter(
-    '[%(asctime)s] %(levelname)s [%(name)s.%(funcName)s:%(lineno)d] 
%(message)s'
-  )
-
-  stdout_handler = logging.StreamHandler(sys.stdout)
-  stdout_handler.addFilter(lambda record: record.levelno in (INFO, WARNING))
-  stdout_handler.setFormatter(formatter)
-
-  stderr_handler = logging.StreamHandler(sys.stderr)
-  stderr_handler.addFilter(lambda record: record.levelno in (ERROR, CRITICAL))
-  stderr_handler.setFormatter(formatter)
-
-  log.addHandler(stdout_handler)
-  log.addHandler(stderr_handler)
+    """
+    Setup logging.
+
+    Add 2 handler in root logger:
+        StreamHandler - for logs(INFO and WARNING levels) to the stdout
+        StreamHandler - for logs(ERROR and CRITICAL levels) to the stderr
+    """
+    log = logging.getLogger()
+    log.setLevel(logging.INFO)
+    formatter = logging.Formatter(
+        '[%(asctime)s] %(levelname)s [%(name)s.%(funcName)s:%(lineno)d] 
%(message)s'
+    )
+
+    stdout_handler = logging.StreamHandler(sys.stdout)
+    stdout_handler.addFilter(lambda record: record.levelno in (INFO, WARNING))
+    stdout_handler.setFormatter(formatter)
+
+    stderr_handler = logging.StreamHandler(sys.stderr)
+    stderr_handler.addFilter(lambda record: record.levelno in (ERROR, 
CRITICAL))
+    stderr_handler.setFormatter(formatter)
+
+    log.addHandler(stdout_handler)
+    log.addHandler(stderr_handler)
diff --git a/playground/infrastructure/test_cd_helper.py 
b/playground/infrastructure/test_cd_helper.py
index afa31adbaad..b597185b8c7 100644
--- a/playground/infrastructure/test_cd_helper.py
+++ b/playground/infrastructure/test_cd_helper.py
@@ -28,138 +28,138 @@ from helper import Example, Tag
 
 @pytest.fixture
 def delete_temp_folder():
-  """
-  Create temp folder for tests with storing files
-  """
-  yield delete_temp_folder
-  if os.path.exists(Config.TEMP_FOLDER):
-    shutil.rmtree(Config.TEMP_FOLDER)
+    """
+    Create temp folder for tests with storing files
+    """
+    yield delete_temp_folder
+    if os.path.exists(Config.TEMP_FOLDER):
+        shutil.rmtree(Config.TEMP_FOLDER)
 
 
 @pytest.fixture
 def upload_blob():
-  """
-  Fake method for mocking
-  Returns: None
-  """
-  pass
+    """
+    Fake method for mocking
+    Returns: None
+    """
+    pass
 
 
 def test__get_gcs_object_name():
-  """
-    Test getting the path where file will be stored at the bucket
-  """
-  expected_path = "SDK_JAVA/PRECOMPILED_OBJECT_TYPE_UNIT_TEST/base_folder"
-  expected_result = "%s/%s" % (expected_path, "file.java")
-  expected_result_with_extension = "%s/%s" % (expected_path, "file.output")
-  assert CDHelper()._get_gcs_object_name(
-      SDK_JAVA, PRECOMPILED_OBJECT_TYPE_UNIT_TEST, "base_folder",
-      "file") == expected_result
-  assert CDHelper()._get_gcs_object_name(
-      SDK_JAVA,
-      PRECOMPILED_OBJECT_TYPE_UNIT_TEST,
-      "base_folder",
-      "file",
-      "output") == expected_result_with_extension
+    """
+      Test getting the path where file will be stored at the bucket
+    """
+    expected_path = "SDK_JAVA/PRECOMPILED_OBJECT_TYPE_UNIT_TEST/base_folder"
+    expected_result = "%s/%s" % (expected_path, "file.java")
+    expected_result_with_extension = "%s/%s" % (expected_path, "file.output")
+    assert CDHelper()._get_gcs_object_name(
+        SDK_JAVA, PRECOMPILED_OBJECT_TYPE_UNIT_TEST, "base_folder",
+        "file") == expected_result
+    assert CDHelper()._get_gcs_object_name(
+        SDK_JAVA,
+        PRECOMPILED_OBJECT_TYPE_UNIT_TEST,
+        "base_folder",
+        "file",
+        "output") == expected_result_with_extension
 
 
 def test__write_to_local_fs(delete_temp_folder):
-  """
-  Test writing code of an example, output and meta info to
-  the filesystem (in temp folder)
-  Args:
-      delete_temp_folder: python fixture to clean up temp folder
-      after method execution
-  """
-  object_meta = {
-      "name": "name",
-      "description": "description",
-      "multifile": False,
-      "categories": ["category-1", "category-2"],
-      "pipeline_options": "--option option"
-  }
-  example = Example(
-      name="name",
-      pipeline_id="pipeline_id",
-      sdk=SDK_JAVA,
-      filepath="filepath",
-      code="code_of_example",
-      output="output_of_example",
-      status=STATUS_UNSPECIFIED,
-      tag=Tag(**object_meta),
-      link="link")
-  result_filepath = "SDK_JAVA/PRECOMPILED_OBJECT_TYPE_UNSPECIFIED/name"
-  expected_result = {
-      "%s/%s" % (result_filepath, "name.java"): "%s/%s/%s" %
-      ("temp/pipeline_id", result_filepath, "name.java"),
-      "%s/%s" % (result_filepath, "name.output"): "%s/%s/%s" %
-      ("temp/pipeline_id", result_filepath, "name.output"),
-      "%s/%s" % (result_filepath, "name.log"): "%s/%s/%s" %
-      ("temp/pipeline_id", result_filepath, "name.log"),
-      "%s/%s" % (result_filepath, "name.graph"): "%s/%s/%s" %
-      ("temp/pipeline_id", result_filepath, "name.graph"),
-      "%s/%s" % (result_filepath, "meta.info"): "%s/%s/%s" %
-      ("temp/pipeline_id", result_filepath, "meta.info"),
-  }
-  assert CDHelper()._write_to_local_fs(example) == expected_result
+    """
+    Test writing code of an example, output and meta info to
+    the filesystem (in temp folder)
+    Args:
+        delete_temp_folder: python fixture to clean up temp folder
+        after method execution
+    """
+    object_meta = {
+        "name": "name",
+        "description": "description",
+        "multifile": False,
+        "categories": ["category-1", "category-2"],
+        "pipeline_options": "--option option"
+    }
+    example = Example(
+        name="name",
+        pipeline_id="pipeline_id",
+        sdk=SDK_JAVA,
+        filepath="filepath",
+        code="code_of_example",
+        output="output_of_example",
+        status=STATUS_UNSPECIFIED,
+        tag=Tag(**object_meta),
+        link="link")
+    result_filepath = "SDK_JAVA/PRECOMPILED_OBJECT_TYPE_UNSPECIFIED/name"
+    expected_result = {
+        "%s/%s" % (result_filepath, "name.java"): "%s/%s/%s" %
+                                                  ("temp/pipeline_id", 
result_filepath, "name.java"),
+        "%s/%s" % (result_filepath, "name.output"): "%s/%s/%s" %
+                                                    ("temp/pipeline_id", 
result_filepath, "name.output"),
+        "%s/%s" % (result_filepath, "name.log"): "%s/%s/%s" %
+                                                 ("temp/pipeline_id", 
result_filepath, "name.log"),
+        "%s/%s" % (result_filepath, "name.graph"): "%s/%s/%s" %
+                                                   ("temp/pipeline_id", 
result_filepath, "name.graph"),
+        "%s/%s" % (result_filepath, "meta.info"): "%s/%s/%s" %
+                                                  ("temp/pipeline_id", 
result_filepath, "meta.info"),
+    }
+    assert CDHelper()._write_to_local_fs(example) == expected_result
 
 
 def test__save_to_cloud_storage(mocker):
-  """
-  Test saving examples, outputs and meta to bucket
-  Args:
-      mocker: mocker fixture from pytest-mocker
-  """
-  expected_cloud_path = "SDK_JAVA/Example/example.java"
-  object_meta = {
-      "name": "name",
-      "description": "description",
-      "multifile": False,
-      "categories": ["category-1", "category-2"],
-      "pipeline_options": "--option option",
-      "default_example": True
-  }
-  upload_blob_mock = mocker.patch(
-      "cd_helper.CDHelper._upload_blob", return_value=upload_blob)
-  write_to_os_mock = mocker.patch(
-      "cd_helper.CDHelper._write_to_local_fs",
-      return_value={expected_cloud_path: ""})
-  example = Example(
-      name="name",
-      pipeline_id="pipeline_id",
-      sdk=SDK_JAVA,
-      filepath="filepath",
-      code="code_of_example",
-      output="output_of_example",
-      status=STATUS_UNSPECIFIED,
-      tag=Tag(**object_meta),
-      link="link")
-
-  CDHelper()._save_to_cloud_storage([example])
-  write_to_os_mock.assert_called_with(example)
-  upload_blob_mock.assert_called_with(
-      source_file="", destination_blob_name=expected_cloud_path)
+    """
+    Test saving examples, outputs and meta to bucket
+    Args:
+        mocker: mocker fixture from pytest-mocker
+    """
+    expected_cloud_path = "SDK_JAVA/Example/example.java"
+    object_meta = {
+        "name": "name",
+        "description": "description",
+        "multifile": False,
+        "categories": ["category-1", "category-2"],
+        "pipeline_options": "--option option",
+        "default_example": True
+    }
+    upload_blob_mock = mocker.patch(
+        "cd_helper.CDHelper._upload_blob", return_value=upload_blob)
+    write_to_os_mock = mocker.patch(
+        "cd_helper.CDHelper._write_to_local_fs",
+        return_value={expected_cloud_path: ""})
+    example = Example(
+        name="name",
+        pipeline_id="pipeline_id",
+        sdk=SDK_JAVA,
+        filepath="filepath",
+        code="code_of_example",
+        output="output_of_example",
+        status=STATUS_UNSPECIFIED,
+        tag=Tag(**object_meta),
+        link="link")
+
+    CDHelper()._save_to_cloud_storage([example])
+    write_to_os_mock.assert_called_with(example)
+    upload_blob_mock.assert_called_with(
+        source_file="", destination_blob_name=expected_cloud_path)
 
 
 def test__write_default_example_path_to_local_fs(delete_temp_folder):
-  """
-    Test writing default example link of sdk to
-    the filesystem (in temp folder)
-    Args:
-        delete_temp_folder: python fixture to clean up temp folder
-        after method execution
-  """
-  sdk = Sdk.Name(SDK_GO)
-  default_example_path = "SDK_GO/PRECOMPILED_OBJECT_TYPE_EXAMPLE/WordCount"
-  expected_result = str(pathlib.Path(sdk, Config.DEFAULT_PRECOMPILED_OBJECT))
-  cloud_path = CDHelper()._write_default_example_path_to_local_fs(
-      default_example_path)
-  assert cloud_path == expected_result
-  assert os.path.exists(os.path.join("temp", cloud_path))
+    """
+      Test writing default example link of sdk to
+      the filesystem (in temp folder)
+      Args:
+          delete_temp_folder: python fixture to clean up temp folder
+          after method execution
+    """
+    sdk = Sdk.Name(SDK_GO)
+    default_example_path = "SDK_GO/PRECOMPILED_OBJECT_TYPE_EXAMPLE/WordCount"
+    expected_result = str(pathlib.Path(sdk, Config.DEFAULT_PRECOMPILED_OBJECT))
+    cloud_path = CDHelper()._write_default_example_path_to_local_fs(
+        default_example_path)
+    assert cloud_path == expected_result
+    assert os.path.exists(os.path.join("temp", cloud_path))
 
 
 def test__clear_temp_folder():
-  if not os.path.exists(Config.TEMP_FOLDER):
-    os.mkdir(Config.TEMP_FOLDER)
-  CDHelper()._clear_temp_folder()
-  assert os.path.exists(Config.TEMP_FOLDER) is False
+    if not os.path.exists(Config.TEMP_FOLDER):
+        os.mkdir(Config.TEMP_FOLDER)
+    CDHelper()._clear_temp_folder()
+    assert os.path.exists(Config.TEMP_FOLDER) is False
diff --git a/playground/infrastructure/test_ci_cd.py 
b/playground/infrastructure/test_ci_cd.py
index 87bbe5ee591..9a3b4a890fa 100644
--- a/playground/infrastructure/test_ci_cd.py
+++ b/playground/infrastructure/test_ci_cd.py
@@ -21,16 +21,16 @@ from ci_cd import _ci_step, _cd_step, _check_envs
 
 @mock.patch("ci_helper.CIHelper.verify_examples")
 def test_ci_step(mock_verify_examples):
-  _ci_step([])
-  mock_verify_examples.assert_called_once_with([])
+    _ci_step([])
+    mock_verify_examples.assert_called_once_with([])
 
 
 @mock.patch("cd_helper.CDHelper.store_examples")
 def test_cd_step(mock_store_examples):
-  _cd_step([])
-  mock_store_examples.assert_called_once_with([])
+    _cd_step([])
+    mock_store_examples.assert_called_once_with([])
 
 
 def test__check_envs():
-  with pytest.raises(KeyError):
-    _check_envs()
+    with pytest.raises(KeyError):
+        _check_envs()
diff --git a/playground/infrastructure/test_ci_helper.py 
b/playground/infrastructure/test_ci_helper.py
index ff426285f61..e1e8dfabf81 100644
--- a/playground/infrastructure/test_ci_helper.py
+++ b/playground/infrastructure/test_ci_helper.py
@@ -30,138 +30,138 @@ from helper import Example, Tag
 @mock.patch("ci_helper.CIHelper._verify_examples")
 @mock.patch("ci_helper.get_statuses")
 async def test_verify_examples(mock_get_statuses, mock_verify_examples):
-  helper = CIHelper()
-  await helper.verify_examples([])
+    helper = CIHelper()
+    await helper.verify_examples([])
 
-  mock_get_statuses.assert_called_once_with([])
-  mock_verify_examples.assert_called_once_with([])
+    mock_get_statuses.assert_called_once_with([])
+    mock_verify_examples.assert_called_once_with([])
 
 
 @pytest.mark.asyncio
 @mock.patch("grpc_client.GRPCClient.get_run_error")
 @mock.patch("grpc_client.GRPCClient.get_compile_output")
 async def test__verify_examples(mock_get_compile_output, mock_get_run_output):
-  helper = CIHelper()
-  object_meta = {
-      "name": "name",
-      "description": "description",
-      "multifile": False,
-      "categories": ["category-1", "category-2"],
-      "pipeline_options": "--option option",
-      "default_example": False
-  }
-  object_meta_def_ex = copy.copy(object_meta)
-  object_meta_def_ex["default_example"] = True
-  pipeline_id = str(uuid.uuid4())
-  default_example = Example(
-      name="name",
-      pipeline_id=pipeline_id,
-      sdk=SDK_JAVA,
-      filepath="filepath",
-      code="code_of_example",
-      output="output_of_example",
-      status=STATUS_FINISHED,
-      tag=Tag(**object_meta_def_ex),
-      link="link")
-  finished_example = Example(
-      name="name",
-      pipeline_id=pipeline_id,
-      sdk=SDK_JAVA,
-      filepath="filepath",
-      code="code_of_example",
-      output="output_of_example",
-      status=STATUS_FINISHED,
-      tag=Tag(**object_meta),
-      link="link")
-  examples_without_def_ex = [
-      finished_example,
-      finished_example,
-  ]
-  examples_with_several_def_ex = [
-      default_example,
-      default_example,
-  ]
-  examples_without_errors = [
-      default_example,
-      finished_example,
-  ]
-  examples_with_errors = [
-      Example(
-          name="name",
-          pipeline_id=pipeline_id,
-          sdk=SDK_JAVA,
-          filepath="filepath",
-          code="code_of_example",
-          output="output_of_example",
-          status=STATUS_VALIDATION_ERROR,
-          tag=Tag(**object_meta_def_ex),
-          link="link"),
-      Example(
-          name="name",
-          pipeline_id=pipeline_id,
-          sdk=SDK_JAVA,
-          filepath="filepath",
-          code="code_of_example",
-          output="output_of_example",
-          status=STATUS_ERROR,
-          tag=Tag(**object_meta),
-          link="link"),
-      Example(
-          name="name",
-          pipeline_id=pipeline_id,
-          sdk=SDK_JAVA,
-          filepath="filepath",
-          code="code_of_example",
-          output="output_of_example",
-          status=STATUS_COMPILE_ERROR,
-          tag=Tag(**object_meta),
-          link="link"),
-      Example(
-          name="name",
-          pipeline_id=pipeline_id,
-          sdk=SDK_JAVA,
-          filepath="filepath",
-          code="code_of_example",
-          output="output_of_example",
-          status=STATUS_PREPARATION_ERROR,
-          tag=Tag(**object_meta),
-          link="link"),
-      Example(
-          name="name",
-          pipeline_id=pipeline_id,
-          sdk=SDK_JAVA,
-          filepath="filepath",
-          code="code_of_example",
-          output="output_of_example",
-          status=STATUS_RUN_TIMEOUT,
-          tag=Tag(**object_meta),
-          link="link"),
-      Example(
-          name="name",
-          pipeline_id=pipeline_id,
-          sdk=SDK_JAVA,
-          filepath="filepath",
-          code="code_of_example",
-          output="output_of_example",
-          status=STATUS_VALIDATION_ERROR,
-          tag=Tag(**object_meta),
-          link="link"),
-      Example(
-          name="name",
-          pipeline_id=pipeline_id,
-          sdk=SDK_JAVA,
-          filepath="filepath",
-          code="code_of_example",
-          output="output_of_example",
-          status=STATUS_RUN_ERROR,
-          tag=Tag(**object_meta),
-          link="link"),
-  ]
+    helper = CIHelper()
+    object_meta = {
+        "name": "name",
+        "description": "description",
+        "multifile": False,
+        "categories": ["category-1", "category-2"],
+        "pipeline_options": "--option option",
+        "default_example": False
+    }
+    object_meta_def_ex = copy.copy(object_meta)
+    object_meta_def_ex["default_example"] = True
+    pipeline_id = str(uuid.uuid4())
+    default_example = Example(
+        name="name",
+        pipeline_id=pipeline_id,
+        sdk=SDK_JAVA,
+        filepath="filepath",
+        code="code_of_example",
+        output="output_of_example",
+        status=STATUS_FINISHED,
+        tag=Tag(**object_meta_def_ex),
+        link="link")
+    finished_example = Example(
+        name="name",
+        pipeline_id=pipeline_id,
+        sdk=SDK_JAVA,
+        filepath="filepath",
+        code="code_of_example",
+        output="output_of_example",
+        status=STATUS_FINISHED,
+        tag=Tag(**object_meta),
+        link="link")
+    examples_without_def_ex = [
+        finished_example,
+        finished_example,
+    ]
+    examples_with_several_def_ex = [
+        default_example,
+        default_example,
+    ]
+    examples_without_errors = [
+        default_example,
+        finished_example,
+    ]
+    examples_with_errors = [
+        Example(
+            name="name",
+            pipeline_id=pipeline_id,
+            sdk=SDK_JAVA,
+            filepath="filepath",
+            code="code_of_example",
+            output="output_of_example",
+            status=STATUS_VALIDATION_ERROR,
+            tag=Tag(**object_meta_def_ex),
+            link="link"),
+        Example(
+            name="name",
+            pipeline_id=pipeline_id,
+            sdk=SDK_JAVA,
+            filepath="filepath",
+            code="code_of_example",
+            output="output_of_example",
+            status=STATUS_ERROR,
+            tag=Tag(**object_meta),
+            link="link"),
+        Example(
+            name="name",
+            pipeline_id=pipeline_id,
+            sdk=SDK_JAVA,
+            filepath="filepath",
+            code="code_of_example",
+            output="output_of_example",
+            status=STATUS_COMPILE_ERROR,
+            tag=Tag(**object_meta),
+            link="link"),
+        Example(
+            name="name",
+            pipeline_id=pipeline_id,
+            sdk=SDK_JAVA,
+            filepath="filepath",
+            code="code_of_example",
+            output="output_of_example",
+            status=STATUS_PREPARATION_ERROR,
+            tag=Tag(**object_meta),
+            link="link"),
+        Example(
+            name="name",
+            pipeline_id=pipeline_id,
+            sdk=SDK_JAVA,
+            filepath="filepath",
+            code="code_of_example",
+            output="output_of_example",
+            status=STATUS_RUN_TIMEOUT,
+            tag=Tag(**object_meta),
+            link="link"),
+        Example(
+            name="name",
+            pipeline_id=pipeline_id,
+            sdk=SDK_JAVA,
+            filepath="filepath",
+            code="code_of_example",
+            output="output_of_example",
+            status=STATUS_VALIDATION_ERROR,
+            tag=Tag(**object_meta),
+            link="link"),
+        Example(
+            name="name",
+            pipeline_id=pipeline_id,
+            sdk=SDK_JAVA,
+            filepath="filepath",
+            code="code_of_example",
+            output="output_of_example",
+            status=STATUS_RUN_ERROR,
+            tag=Tag(**object_meta),
+            link="link"),
+    ]
 
-  with pytest.raises(VerifyException):
-    await helper._verify_examples(examples_with_errors)
-  with pytest.raises(VerifyException):
-    await helper._verify_examples(examples_without_def_ex)
-  with pytest.raises(VerifyException):
-    await helper._verify_examples(examples_with_several_def_ex)
-  await helper._verify_examples(examples_without_errors)
+    with pytest.raises(VerifyException):
+        await helper._verify_examples(examples_with_errors)
+    with pytest.raises(VerifyException):
+        await helper._verify_examples(examples_without_def_ex)
+    with pytest.raises(VerifyException):
+        await helper._verify_examples(examples_with_several_def_ex)
+    await helper._verify_examples(examples_without_errors)
diff --git a/playground/infrastructure/test_grpc_client.py 
b/playground/infrastructure/test_grpc_client.py
index 113cbb23251..4cc13ea569f 100644
--- a/playground/infrastructure/test_grpc_client.py
+++ b/playground/infrastructure/test_grpc_client.py
@@ -24,63 +24,63 @@ from grpc_client import GRPCClient
 
 @pytest.fixture()
 def mock_run_code(mocker):
-  async_mock = AsyncMock(return_value=str(uuid.uuid4()))
-  mocker.patch("grpc_client.GRPCClient.run_code", side_effect=async_mock)
-  return async_mock
+    async_mock = AsyncMock(return_value=str(uuid.uuid4()))
+    mocker.patch("grpc_client.GRPCClient.run_code", side_effect=async_mock)
+    return async_mock
 
 
 @pytest.fixture()
 def mock_check_status(mocker):
-  async_mock = AsyncMock(return_value=api_pb2.STATUS_FINISHED)
-  mocker.patch("grpc_client.GRPCClient.check_status", side_effect=async_mock)
-  return async_mock
+    async_mock = AsyncMock(return_value=api_pb2.STATUS_FINISHED)
+    mocker.patch("grpc_client.GRPCClient.check_status", side_effect=async_mock)
+    return async_mock
 
 
 @pytest.fixture()
 def mock_get_run_error(mocker):
-  async_mock = AsyncMock(return_value="MOCK_ERROR")
-  mocker.patch("grpc_client.GRPCClient.get_run_error", side_effect=async_mock)
-  return async_mock
+    async_mock = AsyncMock(return_value="MOCK_ERROR")
+    mocker.patch("grpc_client.GRPCClient.get_run_error", 
side_effect=async_mock)
+    return async_mock
 
 
 @pytest.fixture()
 def mock_get_run_output(mocker):
-  async_mock = AsyncMock(return_value="MOCK_RUN_OUTPUT")
-  mocker.patch("grpc_client.GRPCClient.get_run_output", side_effect=async_mock)
-  return async_mock
+    async_mock = AsyncMock(return_value="MOCK_RUN_OUTPUT")
+    mocker.patch("grpc_client.GRPCClient.get_run_output", 
side_effect=async_mock)
+    return async_mock
 
 
 @pytest.fixture()
 def mock_get_compile_output(mocker):
-  async_mock = AsyncMock(return_value="MOCK_COMPILE_OUTPUT")
-  mocker.patch(
-      "grpc_client.GRPCClient.get_compile_output", side_effect=async_mock)
-  return async_mock
+    async_mock = AsyncMock(return_value="MOCK_COMPILE_OUTPUT")
+    mocker.patch(
+        "grpc_client.GRPCClient.get_compile_output", side_effect=async_mock)
+    return async_mock
 
 
 class TestGRPCClient:
 
-  @pytest.mark.asyncio
-  async def test_run_code(self, mock_run_code):
-    result = await GRPCClient().run_code("", api_pb2.SDK_GO, "")
-    assert isinstance(result, str)
-
-  @pytest.mark.asyncio
-  async def test_check_status(self, mock_check_status):
-    result = await GRPCClient().check_status(str(uuid.uuid4()))
-    assert result == api_pb2.STATUS_FINISHED
-
-  @pytest.mark.asyncio
-  async def test_get_run_error(self, mock_get_run_error):
-    result = await GRPCClient().get_run_error(str(uuid.uuid4()))
-    assert result == "MOCK_ERROR"
-
-  @pytest.mark.asyncio
-  async def test_get_run_output(self, mock_get_run_output):
-    result = await GRPCClient().get_run_output(str(uuid.uuid4()))
-    assert result == "MOCK_RUN_OUTPUT"
-
-  @pytest.mark.asyncio
-  async def test_get_compile_output(self, mock_get_compile_output):
-    result = await GRPCClient().get_compile_output(str(uuid.uuid4()))
-    assert result == "MOCK_COMPILE_OUTPUT"
+    @pytest.mark.asyncio
+    async def test_run_code(self, mock_run_code):
+        result = await GRPCClient().run_code("", api_pb2.SDK_GO, "")
+        assert isinstance(result, str)
+
+    @pytest.mark.asyncio
+    async def test_check_status(self, mock_check_status):
+        result = await GRPCClient().check_status(str(uuid.uuid4()))
+        assert result == api_pb2.STATUS_FINISHED
+
+    @pytest.mark.asyncio
+    async def test_get_run_error(self, mock_get_run_error):
+        result = await GRPCClient().get_run_error(str(uuid.uuid4()))
+        assert result == "MOCK_ERROR"
+
+    @pytest.mark.asyncio
+    async def test_get_run_output(self, mock_get_run_output):
+        result = await GRPCClient().get_run_output(str(uuid.uuid4()))
+        assert result == "MOCK_RUN_OUTPUT"
+
+    @pytest.mark.asyncio
+    async def test_get_compile_output(self, mock_get_compile_output):
+        result = await GRPCClient().get_compile_output(str(uuid.uuid4()))
+        assert result == "MOCK_COMPILE_OUTPUT"
diff --git a/playground/infrastructure/test_helper.py 
b/playground/infrastructure/test_helper.py
index dc9a66b7bbb..d8e02ce714c 100644
--- a/playground/infrastructure/test_helper.py
+++ b/playground/infrastructure/test_helper.py
@@ -19,77 +19,77 @@ import mock
 import pytest
 
 from api.v1.api_pb2 import SDK_UNSPECIFIED, STATUS_UNSPECIFIED, \
-  STATUS_VALIDATING, \
-  STATUS_FINISHED, SDK_JAVA, \
-  PRECOMPILED_OBJECT_TYPE_EXAMPLE, PRECOMPILED_OBJECT_TYPE_KATA, \
-  PRECOMPILED_OBJECT_TYPE_UNIT_TEST
+    STATUS_VALIDATING, \
+    STATUS_FINISHED, SDK_JAVA, \
+    PRECOMPILED_OBJECT_TYPE_EXAMPLE, PRECOMPILED_OBJECT_TYPE_KATA, \
+    PRECOMPILED_OBJECT_TYPE_UNIT_TEST
 from grpc_client import GRPCClient
 from helper import find_examples, Example, _get_example, _get_name, get_tag, \
-  _validate, Tag, get_statuses, \
-  _update_example_status, get_supported_categories, _check_file, \
-  _get_object_type, ExampleTag
+    _validate, Tag, get_statuses, \
+    _update_example_status, get_supported_categories, _check_file, \
+    _get_object_type, ExampleTag
 
 
 @mock.patch("helper._check_file")
 @mock.patch("helper.os.walk")
 def test_find_examples_with_valid_tag(mock_os_walk, mock_check_file):
-  mock_os_walk.return_value = [("/root", (), ("file.java", ))]
-  mock_check_file.return_value = False
-  sdk = SDK_UNSPECIFIED
-  result = find_examples(work_dir="", supported_categories=[], sdk=sdk)
+    mock_os_walk.return_value = [("/root", (), ("file.java",))]
+    mock_check_file.return_value = False
+    sdk = SDK_UNSPECIFIED
+    result = find_examples(work_dir="", supported_categories=[], sdk=sdk)
 
-  assert not result
-  mock_os_walk.assert_called_once_with("")
-  mock_check_file.assert_called_once_with(
-      examples=[],
-      filename="file.java",
-      filepath="/root/file.java",
-      supported_categories=[],
-      sdk=sdk)
+    assert not result
+    mock_os_walk.assert_called_once_with("")
+    mock_check_file.assert_called_once_with(
+        examples=[],
+        filename="file.java",
+        filepath="/root/file.java",
+        supported_categories=[],
+        sdk=sdk)
 
 
 @mock.patch("helper._check_file")
 @mock.patch("helper.os.walk")
 def test_find_examples_with_invalid_tag(mock_os_walk, mock_check_file):
-  mock_os_walk.return_value = [("/root", (), ("file.java", ))]
-  mock_check_file.return_value = True
-  sdk = SDK_UNSPECIFIED
-  with pytest.raises(
-      ValueError,
-      match="Some of the beam examples contain beam playground tag with "
-      "an incorrect format"):
-    find_examples("", [], sdk=sdk)
-
-  mock_os_walk.assert_called_once_with("")
-  mock_check_file.assert_called_once_with(
-      examples=[],
-      filename="file.java",
-      filepath="/root/file.java",
-      supported_categories=[],
-      sdk=sdk)
+    mock_os_walk.return_value = [("/root", (), ("file.java",))]
+    mock_check_file.return_value = True
+    sdk = SDK_UNSPECIFIED
+    with pytest.raises(
+          ValueError,
+          match="Some of the beam examples contain beam playground tag with "
+                "an incorrect format"):
+        find_examples("", [], sdk=sdk)
+
+    mock_os_walk.assert_called_once_with("")
+    mock_check_file.assert_called_once_with(
+        examples=[],
+        filename="file.java",
+        filepath="/root/file.java",
+        supported_categories=[],
+        sdk=sdk)
 
 
 @pytest.mark.asyncio
 @mock.patch("helper.GRPCClient")
 @mock.patch("helper._update_example_status")
 async def test_get_statuses(mock_update_example_status, mock_grpc_client):
-  example = Example(
-      name="file",
-      pipeline_id="pipeline_id",
-      sdk=SDK_UNSPECIFIED,
-      filepath="root/file.extension",
-      code="code",
-      output="output",
-      status=STATUS_UNSPECIFIED,
-      tag={"name": "Name"},
-      link="link")
-  client = None
+    example = Example(
+        name="file",
+        pipeline_id="pipeline_id",
+        sdk=SDK_UNSPECIFIED,
+        filepath="root/file.extension",
+        code="code",
+        output="output",
+        status=STATUS_UNSPECIFIED,
+        tag={"name": "Name"},
+        link="link")
+    client = None
 
-  mock_grpc_client.return_value = client
+    mock_grpc_client.return_value = client
 
-  await get_statuses([example])
+    await get_statuses([example])
 
-  mock_update_example_status.assert_called_once_with(example, client)
+    mock_update_example_status.assert_called_once_with(example, client)
 
 
 @mock.patch(
@@ -97,206 +97,206 @@ async def test_get_statuses(mock_update_example_status, 
mock_grpc_client):
     mock_open(
         read_data="...\n# beam-playground:\n#     name: Name\n\nimport ..."))
 def test_get_tag_when_tag_is_exists():
-  result = get_tag("")
+    result = get_tag("")
 
-  assert result.tag_as_dict.get("name") == "Name"
-  assert result.tag_as_string == "# beam-playground:\n#     name: Name\n\n"
+    assert result.tag_as_dict.get("name") == "Name"
+    assert result.tag_as_string == "# beam-playground:\n#     name: Name\n\n"
 
 
 @mock.patch("builtins.open", mock_open(read_data="...\n..."))
 def test_get_tag_when_tag_does_not_exist():
-  result = get_tag("")
+    result = get_tag("")
 
-  assert result is None
+    assert result is None
 
 
 @mock.patch("helper._get_example")
 @mock.patch("helper._validate")
 @mock.patch("helper.get_tag")
 def test__check_file_with_correct_tag(
-    mock_get_tag, mock_validate, mock_get_example):
-  tag = ExampleTag({"name": "Name"}, "")
-  example = Example(
-      name="filename",
-      sdk=SDK_JAVA,
-      filepath="/root/filename.java",
-      code="data",
-      status=STATUS_UNSPECIFIED,
-      tag=Tag("Name", "Description", False, [], '--option option'),
-      link="link")
-  examples = []
-
-  mock_get_tag.return_value = tag
-  mock_validate.return_value = True
-  mock_get_example.return_value = example
-
-  result = _check_file(
-      examples, "filename.java", "/root/filename.java", [], sdk=SDK_JAVA)
-
-  assert result is False
-  assert len(examples) == 1
-  assert examples[0] == example
-  mock_get_tag.assert_called_once_with("/root/filename.java")
-  mock_validate.assert_called_once_with(tag.tag_as_dict, [])
-  mock_get_example.assert_called_once_with(
-      "/root/filename.java", "filename.java", tag)
+      mock_get_tag, mock_validate, mock_get_example):
+    tag = ExampleTag({"name": "Name"}, "")
+    example = Example(
+        name="filename",
+        sdk=SDK_JAVA,
+        filepath="/root/filename.java",
+        code="data",
+        status=STATUS_UNSPECIFIED,
+        tag=Tag("Name", "Description", False, [], '--option option'),
+        link="link")
+    examples = []
+
+    mock_get_tag.return_value = tag
+    mock_validate.return_value = True
+    mock_get_example.return_value = example
+
+    result = _check_file(
+        examples, "filename.java", "/root/filename.java", [], sdk=SDK_JAVA)
+
+    assert result is False
+    assert len(examples) == 1
+    assert examples[0] == example
+    mock_get_tag.assert_called_once_with("/root/filename.java")
+    mock_validate.assert_called_once_with(tag.tag_as_dict, [])
+    mock_get_example.assert_called_once_with(
+        "/root/filename.java", "filename.java", tag)
 
 
 @mock.patch("helper._validate")
 @mock.patch("helper.get_tag")
 def test__check_file_with_incorrect_tag(mock_get_tag, mock_validate):
-  tag = ExampleTag({"name": "Name"}, "")
-  examples = []
-  sdk = SDK_JAVA
-  mock_get_tag.return_value = tag
-  mock_validate.return_value = False
+    tag = ExampleTag({"name": "Name"}, "")
+    examples = []
+    sdk = SDK_JAVA
+    mock_get_tag.return_value = tag
+    mock_validate.return_value = False
 
-  result = _check_file(
-      examples, "filename.java", "/root/filename.java", [], sdk)
+    result = _check_file(
+        examples, "filename.java", "/root/filename.java", [], sdk)
 
-  assert result is True
-  assert len(examples) == 0
-  mock_get_tag.assert_called_once_with("/root/filename.java")
-  mock_validate.assert_called_once_with(tag.tag_as_dict, [])
+    assert result is True
+    assert len(examples) == 0
+    mock_get_tag.assert_called_once_with("/root/filename.java")
+    mock_validate.assert_called_once_with(tag.tag_as_dict, [])
 
 
 @mock.patch("builtins.open", mock_open(read_data="categories:\n    - 
category"))
 def test_get_supported_categories():
-  result = get_supported_categories("")
+    result = get_supported_categories("")
 
-  assert len(result) == 1
-  assert result[0] == "category"
+    assert len(result) == 1
+    assert result[0] == "category"
 
 
 @mock.patch("builtins.open", mock_open(read_data="data"))
 @mock.patch("helper._get_name")
 def test__get_example(mock_get_name):
-  mock_get_name.return_value = "filepath"
-  tag = ExampleTag({
-      "name": "Name",
-      "description": "Description",
-      "multifile": "False",
-      "categories": [""],
-      "pipeline_options": "--option option",
-      "context_line": 1
-  },
-                   "")
-
-  result = _get_example("/root/filepath.java", "filepath.java", tag)
-
-  assert result == Example(
-      name="filepath",
-      sdk=SDK_JAVA,
-      filepath="/root/filepath.java",
-      code="data",
-      status=STATUS_UNSPECIFIED,
-      tag=Tag(
-          "Name", "Description", "False", [""], "--option option", False, 1),
-      link="https://github.com/apache/beam/blob/master/root/filepath.java";)
-  mock_get_name.assert_called_once_with("filepath.java")
+    mock_get_name.return_value = "filepath"
+    tag = ExampleTag({
+        "name": "Name",
+        "description": "Description",
+        "multifile": "False",
+        "categories": [""],
+        "pipeline_options": "--option option",
+        "context_line": 1
+    },
+        "")
+
+    result = _get_example("/root/filepath.java", "filepath.java", tag)
+
+    assert result == Example(
+        name="filepath",
+        sdk=SDK_JAVA,
+        filepath="/root/filepath.java",
+        code="data",
+        status=STATUS_UNSPECIFIED,
+        tag=Tag(
+            "Name", "Description", "False", [""], "--option option", False, 1),
+        link="https://github.com/apache/beam/blob/master/root/filepath.java";)
+    mock_get_name.assert_called_once_with("filepath.java")
 
 
 def test__validate_without_name_field():
-  tag = {}
-  assert _validate(tag, []) is False
+    tag = {}
+    assert _validate(tag, []) is False
 
 
 def test__validate_without_description_field():
-  tag = {"name": "Name"}
-  assert _validate(tag, []) is False
+    tag = {"name": "Name"}
+    assert _validate(tag, []) is False
 
 
 def test__validate_without_multifile_field():
-  tag = {"name": "Name", "description": "Description"}
-  assert _validate(tag, []) is False
+    tag = {"name": "Name", "description": "Description"}
+    assert _validate(tag, []) is False
 
 
 def test__validate_with_incorrect_multifile_field():
-  tag = {"name": "Name", "description": "Description", "multifile": 
"Multifile"}
-  assert _validate(tag, []) is False
+    tag = {"name": "Name", "description": "Description", "multifile": 
"Multifile"}
+    assert _validate(tag, []) is False
 
 
 def test__validate_without_categories_field():
-  tag = {"name": "Name", "description": "Description", "multifile": "true"}
-  assert _validate(tag, []) is False
+    tag = {"name": "Name", "description": "Description", "multifile": "true"}
+    assert _validate(tag, []) is False
 
 
 def test__validate_without_incorrect_categories_field():
-  tag = {
-      "name": "Name",
-      "description": "Description",
-      "multifile": "true",
-      "categories": "Categories"
-  }
-  assert _validate(tag, []) is False
+    tag = {
+        "name": "Name",
+        "description": "Description",
+        "multifile": "true",
+        "categories": "Categories"
+    }
+    assert _validate(tag, []) is False
 
 
 def test__validate_with_not_supported_category():
-  tag = {
-      "name": "Name",
-      "description": "Description",
-      "multifile": "true",
-      "categories": ["category1"]
-  }
-  assert _validate(tag, ["category"]) is False
+    tag = {
+        "name": "Name",
+        "description": "Description",
+        "multifile": "true",
+        "categories": ["category1"]
+    }
+    assert _validate(tag, ["category"]) is False
 
 
 def test__validate_with_all_fields():
-  tag = {
-      "name": "Name",
-      "description": "Description",
-      "multifile": "true",
-      "categories": ["category"],
-      "pipeline_options": "--option option",
-      "context_line": 1
-  }
-  assert _validate(tag, ["category"]) is True
+    tag = {
+        "name": "Name",
+        "description": "Description",
+        "multifile": "true",
+        "categories": ["category"],
+        "pipeline_options": "--option option",
+        "context_line": 1
+    }
+    assert _validate(tag, ["category"]) is True
 
 
 def test__get_name():
-  result = _get_name("filepath.extension")
+    result = _get_name("filepath.extension")
 
-  assert result == "filepath"
+    assert result == "filepath"
 
 
 @pytest.mark.asyncio
 @mock.patch("grpc_client.GRPCClient.check_status")
 @mock.patch("grpc_client.GRPCClient.run_code")
 async def test__update_example_status(
-    mock_grpc_client_run_code, mock_grpc_client_check_status):
-  example = Example(
-      name="file",
-      pipeline_id="pipeline_id",
-      sdk=SDK_UNSPECIFIED,
-      filepath="root/file.extension",
-      code="code",
-      output="output",
-      status=STATUS_UNSPECIFIED,
-      tag=Tag(**{"pipeline_options": "--key value"}),
-      link="link")
-
-  mock_grpc_client_run_code.return_value = "pipeline_id"
-  mock_grpc_client_check_status.side_effect = [
-      STATUS_VALIDATING, STATUS_FINISHED
-  ]
-
-  await _update_example_status(example, GRPCClient())
-
-  assert example.pipeline_id == "pipeline_id"
-  assert example.status == STATUS_FINISHED
-  mock_grpc_client_run_code.assert_called_once_with(
-      example.code, example.sdk, "--key value")
-  mock_grpc_client_check_status.assert_has_calls([mock.call("pipeline_id")])
+      mock_grpc_client_run_code, mock_grpc_client_check_status):
+    example = Example(
+        name="file",
+        pipeline_id="pipeline_id",
+        sdk=SDK_UNSPECIFIED,
+        filepath="root/file.extension",
+        code="code",
+        output="output",
+        status=STATUS_UNSPECIFIED,
+        tag=Tag(**{"pipeline_options": "--key value"}),
+        link="link")
+
+    mock_grpc_client_run_code.return_value = "pipeline_id"
+    mock_grpc_client_check_status.side_effect = [
+        STATUS_VALIDATING, STATUS_FINISHED
+    ]
+
+    await _update_example_status(example, GRPCClient())
+
+    assert example.pipeline_id == "pipeline_id"
+    assert example.status == STATUS_FINISHED
+    mock_grpc_client_run_code.assert_called_once_with(
+        example.code, example.sdk, "--key value")
+    mock_grpc_client_check_status.assert_has_calls([mock.call("pipeline_id")])
 
 
 def test__get_object_type():
-  result_example = _get_object_type(
-      "filename.extension", "filepath/examples/filename.extension")
-  result_kata = _get_object_type(
-      "filename.extension", "filepath/katas/filename.extension")
-  result_test = _get_object_type(
-      "filename_test.extension", "filepath/examples/filename_test.extension")
-
-  assert result_example == PRECOMPILED_OBJECT_TYPE_EXAMPLE
-  assert result_kata == PRECOMPILED_OBJECT_TYPE_KATA
-  assert result_test == PRECOMPILED_OBJECT_TYPE_UNIT_TEST
+    result_example = _get_object_type(
+        "filename.extension", "filepath/examples/filename.extension")
+    result_kata = _get_object_type(
+        "filename.extension", "filepath/katas/filename.extension")
+    result_test = _get_object_type(
+        "filename_test.extension", "filepath/examples/filename_test.extension")
+
+    assert result_example == PRECOMPILED_OBJECT_TYPE_EXAMPLE
+    assert result_kata == PRECOMPILED_OBJECT_TYPE_KATA
+    assert result_test == PRECOMPILED_OBJECT_TYPE_UNIT_TEST
diff --git a/playground/infrastructure/test_logger.py 
b/playground/infrastructure/test_logger.py
index 26cf159e21e..847f120f72f 100644
--- a/playground/infrastructure/test_logger.py
+++ b/playground/infrastructure/test_logger.py
@@ -19,6 +19,6 @@ from logger import setup_logger
 
 
 def test_setup_logger():
-  count_default_handlers = len(logging.getLogger().handlers)
-  setup_logger()  # adding 2 handlers in root logger
-  assert (len(logging.getLogger().handlers) - count_default_handlers) == 2
+    count_default_handlers = len(logging.getLogger().handlers)
+    setup_logger()  # adding 2 handlers in root logger
+    assert (len(logging.getLogger().handlers) - count_default_handlers) == 2

Reply via email to