diff --git a/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/templates/foreign_key/sql/tests/test_foreign_key_properties.py b/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/templates/foreign_key/sql/tests/test_foreign_key_properties.py
index faef8d2..b1f4e17 100644
--- a/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/templates/foreign_key/sql/tests/test_foreign_key_properties.py
+++ b/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/templates/foreign_key/sql/tests/test_foreign_key_properties.py
@@ -22,6 +22,11 @@ if sys.version_info[0] >= 3:
 
 
 class TestColumnForeignKeyGetConstraintCols(BaseTestGenerator):
+    scenarios = [
+        ("Test foreign key get constraint with no foreign key properties on"
+         " the column", dict())
+    ]
+
     def runTest(self):
         """ When there are no foreign key properties on the column, it returns an empty result """
         with test_utils.Database(self.server) as (connection, database_name):
diff --git a/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/templates/table/sql/tests/test_tables_acl.py b/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/templates/table/sql/tests/test_tables_acl.py
index d95246c..a0f7f94 100644
--- a/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/templates/table/sql/tests/test_tables_acl.py
+++ b/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/templates/table/sql/tests/test_tables_acl.py
@@ -22,6 +22,11 @@ if sys.version_info[0] >= 3:
 
 
 class TestTablesAcl(BaseTestGenerator):
+    scenarios = [
+        ("Test query returns the permissions when there are permissions set up"
+         " on the table", dict())
+    ]
+
     def runTest(self):
         """ This tests that when there are permissions set up on the table, acl query returns the permissions"""
         with test_utils.Database(self.server) as (connection, database_name):
diff --git a/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/templates/table/sql/tests/test_tables_node.py b/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/templates/table/sql/tests/test_tables_node.py
index 20a645a..21ec327 100644
--- a/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/templates/table/sql/tests/test_tables_node.py
+++ b/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/templates/table/sql/tests/test_tables_node.py
@@ -19,7 +19,13 @@ from regression.python_test_utils import test_utils
 if sys.version_info[0] >= 3:
     long = int
 
+
 class TestTablesNode(BaseTestGenerator):
+    scenarios = [
+        ("This scenario tests that all applicable sql template versions can "
+         "fetch table names", dict())
+    ]
+
     def runTest(self):
         """ This tests that all applicable sql template versions can fetch table names """
         with test_utils.Database(self.server) as (connection, database_name):
diff --git a/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/templates/table/sql/tests/test_tables_properties.py b/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/templates/table/sql/tests/test_tables_properties.py
index a553bd9..5984bda 100644
--- a/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/templates/table/sql/tests/test_tables_properties.py
+++ b/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/templates/table/sql/tests/test_tables_properties.py
@@ -22,6 +22,11 @@ if sys.version_info[0] >= 3:
 
 
 class TestTablesProperties(BaseTestGenerator):
+    scenarios = [
+        ("This scenario tests that all applicable sql template versions can "
+         "fetch some ddl", dict())
+    ]
+
     def runTest(self):
         """ This tests that all applicable sql template versions can fetch some ddl """
         with test_utils.Database(self.server) as (connection, database_name):
diff --git a/web/pgadmin/browser/server_groups/servers/templates/connect/sql/tests/test_check_recovery.py b/web/pgadmin/browser/server_groups/servers/templates/connect/sql/tests/test_check_recovery.py
index 90b2e52..954e20b 100644
--- a/web/pgadmin/browser/server_groups/servers/templates/connect/sql/tests/test_check_recovery.py
+++ b/web/pgadmin/browser/server_groups/servers/templates/connect/sql/tests/test_check_recovery.py
@@ -8,6 +8,10 @@ class TestCheckRecovery(BaseTestGenerator):
 
     versions_to_test = ["default", "9.0_plus"]
 
+    scenarios = [
+        ("Test for check recovery", dict())
+    ]
+
     def runTest(self):
 
         cursor = test_utils.get_db_connection(self.server['db'],
diff --git a/web/pgadmin/utils/tests/test_versioned_template_loader.py b/web/pgadmin/utils/tests/test_versioned_template_loader.py
index 6d1d943..5374b87 100644
--- a/web/pgadmin/utils/tests/test_versioned_template_loader.py
+++ b/web/pgadmin/utils/tests/test_versioned_template_loader.py
@@ -18,6 +18,10 @@ from pgadmin.utils.route import BaseTestGenerator
 
 
 class TestVersionedTemplateLoader(BaseTestGenerator):
+    scenarios = [
+        ("Test versioned template loader", dict())
+    ]
+
     def setUp(self):
         self.loader = VersionedTemplateLoader(FakeApp())
 
diff --git a/web/regression/python_test_utils/test_utils.py b/web/regression/python_test_utils/test_utils.py
index c388025..03b5258 100644
--- a/web/regression/python_test_utils/test_utils.py
+++ b/web/regression/python_test_utils/test_utils.py
@@ -425,6 +425,25 @@ def apply_scenario(scenario, test):
     return newtest
 
 
+# This method is overridden to catch passed test cases
+def add_success(self, test):
+    """
+    This function add the passed test cases in list i.e. TextTestResult.passed
+    :param self:TextTestResult class
+    :type self: TextTestResult object
+    :param test: test case
+    :type test: test case object
+    :return: None
+    """
+    if self.showAll:
+        self.passed.append((test, None)
+                           )
+        self.stream.writeln("ok")
+    elif self.dots:
+        self.stream.write('.')
+        self.stream.flush()
+
+
 def get_scenario_name(cases):
     """
     This function filters the test cases from list of test cases and returns
@@ -451,7 +470,6 @@ def get_scenario_name(cases):
     return test_cases_dict, test_cases_dict_json
 
 
-
 class Database:
     """
     Temporarily create and connect to a database, tear it down at exit
diff --git a/web/regression/runtests.py b/web/regression/runtests.py
index 0e3f3dc..3349957 100644
--- a/web/regression/runtests.py
+++ b/web/regression/runtests.py
@@ -73,7 +73,7 @@ if pgadmin_credentials:
                 'login_password']
 
 # Execute the setup file
-exec(open("setup.py").read())
+exec (open("setup.py").read())
 
 # Get the config database schema version. We store this in pgadmin.model
 # as it turns out that putting it in the config files isn't a great idea
@@ -98,6 +98,13 @@ driver = None
 app_starter = None
 handle_cleanup = None
 
+from unittest.result import TestResult
+from unittest.runner import TextTestResult
+
+setattr(TestResult, "passed", [])
+
+TextTestResult.addSuccess = test_utils.add_success
+
 # Override apply_scenario method as we need custom test description/name
 scenarios.apply_scenario = test_utils.apply_scenario
 
@@ -207,49 +214,50 @@ def sig_handler(signo, frame):
         handle_cleanup()
 
 
+def update_test_result(test_cases, test_result_dict):
+    """
+    This function update the test result in appropriate test behaviours i.e
+    passed/failed/skipped.
+    :param test_cases: test cases
+    :type test_cases: dict
+    :param test_result_dict: test result to be stored
+    :type test_result_dict: dict
+    :return: None
+    """
+    for test_case in test_cases:
+        test_class_name = test_case[0].__class__.__name__
+        if test_class_name in test_result_dict:
+            test_result_dict[test_class_name].append(
+                {test_case[0].scenario_name: test_case[1]})
+        else:
+            test_result_dict[test_class_name] = \
+                [{test_case[0].scenario_name: test_case[
+                    1]}]
+
+
 def get_tests_result(test_suite):
     """This function returns the total ran and total failed test cases count"""
     try:
         total_ran = test_suite.testsRun
+        passed_cases_result = {}
         failed_cases_result = {}
         skipped_cases_result = {}
         if total_ran:
-            if test_suite.failures:
-                for failed_case in test_suite.failures:
-                    if hasattr(failed_case[0], "scenario_name"):
-                        class_name = str(
-                            failed_case[0]).split('.')[-1].split()[0].strip(
-                            ')')
-                        if class_name in failed_cases_result:
-                            failed_cases_result[class_name].append(
-                                {failed_case[0].scenario_name: failed_case[1]})
-                        else:
-                            failed_cases_result[class_name] = \
-                                [{failed_case[0].scenario_name: failed_case[
-                                    1]}]
-            if test_suite.errors:
-                for error_case in test_suite.errors:
-                    if hasattr(error_case[0], "scenario_name"):
-                        class_name = str(
-                            error_case[0]).split('.')[-1].split()[0].strip(')')
-                        if class_name in failed_cases_result:
-                            failed_cases_result[class_name].append(
-                                {error_case[0].scenario_name: error_case[1]})
-                        else:
-                            failed_cases_result[class_name] = \
-                                [{error_case[0].scenario_name: error_case[1]}]
-            if test_suite.skipped:
-                for skip_test in test_suite.skipped:
-                    # if hasattr(skip_test[0], "scenario_name"):
-                    class_name = str(
-                        skip_test[0]).split('.')[-1].split()[0].strip(')')
-                    if class_name in skipped_cases_result:
-                        skipped_cases_result[class_name].append(
-                            {skip_test[0].scenario_name: skip_test[1]})
-                    else:
-                        skipped_cases_result[class_name] = \
-                            [{skip_test[0].scenario_name: skip_test[1]}]
-        return total_ran, failed_cases_result, skipped_cases_result
+            passed = test_suite.passed
+            failures = test_suite.failures
+            errors = test_suite.errors
+            skipped = test_suite.skipped
+            if passed:
+                update_test_result(passed, passed_cases_result)
+            if failures:
+                update_test_result(failures, failed_cases_result)
+            if errors:
+                update_test_result(errors, failed_cases_result)
+            if skipped:
+                update_test_result(skipped, skipped_cases_result)
+
+        return total_ran, failed_cases_result, skipped_cases_result, \
+            passed_cases_result
     except Exception:
         traceback.print_exc(file=sys.stderr)
 
@@ -325,10 +333,14 @@ if __name__ == '__main__':
                                             descriptions=True,
                                             verbosity=2).run(suite)
 
-            ran_tests, failed_cases, skipped_cases = \
+            ran_tests, failed_cases, skipped_cases, passed_cases = \
                 get_tests_result(tests)
             test_result[server['name']] = [ran_tests, failed_cases,
-                                           skipped_cases]
+                                           skipped_cases, passed_cases]
+
+            # Set empty list for 'passed' parameter for each testRun.
+            # So that it will not append same test case name
+            TestResult.passed = []
 
             if len(failed_cases) > 0:
                 failure = True
@@ -350,10 +362,11 @@ if __name__ == '__main__':
     for server_res in test_result:
         failed_cases = test_result[server_res][1]
         skipped_cases = test_result[server_res][2]
+        passed_cases = test_result[server_res][3]
         skipped_cases, skipped_cases_json = test_utils.get_scenario_name(
-                skipped_cases)
+            skipped_cases)
         failed_cases, failed_cases_json = test_utils.get_scenario_name(
-                failed_cases)
+            failed_cases)
         total_failed = sum({key: len(value) for key, value in
                             failed_cases.items()}.values())
         total_skipped = sum({key: len(value) for key, value in
@@ -377,7 +390,7 @@ if __name__ == '__main__':
             file=sys.stderr)
 
         temp_dict_for_server = {
-            server_res: {"tests_passed": total_passed_cases,
+            server_res: {"tests_passed": [total_passed_cases, passed_cases],
                          "tests_failed": [total_failed, failed_cases_json],
                          "tests_skipped": [total_skipped, skipped_cases_json]
                          }
