diff --git a/web/regression/runtests.py b/web/regression/runtests.py
index c225208..3622a18 100644
--- a/web/regression/runtests.py
+++ b/web/regression/runtests.py
@@ -18,6 +18,7 @@ import os
 import signal
 import sys
 import traceback
+import json
 
 from selenium import webdriver
 
@@ -29,7 +30,7 @@ else:
 logger = logging.getLogger(__name__)
 file_name = os.path.basename(__file__)
 
-from testscenarios.scenarios import generate_scenarios
+from testscenarios import scenarios
 
 CURRENT_PATH = os.path.dirname(os.path.realpath(__file__))
 
@@ -51,8 +52,8 @@ if os.path.isfile(config.TEST_SQLITE_PATH):
 
 config.TESTING_MODE = True
 
-# Disable upgrade checks - no need during testing, and it'll cause an error if there's
-# no network connection when it runs.
+# Disable upgrade checks - no need during testing, and it'll cause an error
+# if there's no network connection when it runs.
 config.UPGRADE_CHECK_ENABLED = False
 
 pgadmin_credentials = test_setup.config_data
@@ -72,7 +73,7 @@ if pgadmin_credentials:
                 'login_password']
 
 # Execute the setup file
-exec (open("setup.py").read())
+exec(open("setup.py").read())
 
 # Get the config database schema version. We store this in pgadmin.model
 # as it turns out that putting it in the config files isn't a great idea
@@ -97,6 +98,10 @@ driver = None
 app_starter = None
 handle_cleanup = None
 
+# Override apply_scenario method as we need custom test description/name
+scenarios.apply_scenario = test_utils.apply_scenario
+
+
 def get_suite(module_list, test_server, test_app_client):
     """
      This function add the tests to test suite and return modified test suite
@@ -125,7 +130,7 @@ def get_suite(module_list, test_server, test_app_client):
         obj.setTestClient(test_app_client)
         obj.setTestServer(test_server)
         obj.setDriver(driver)
-        scenario = generate_scenarios(obj)
+        scenario = scenarios.generate_scenarios(obj)
         pgadmin_suite.addTests(scenario)
 
     return pgadmin_suite
@@ -138,7 +143,7 @@ def get_test_modules(arguments):
 
     :param arguments: this is command line arguments for module name to
     which test suite will run
-    :type arguments: str
+    :type arguments: dict
     :return module list: test module list
     :rtype: list
     """
@@ -158,10 +163,6 @@ def get_test_modules(arguments):
         app_starter = AppStarter(driver, config)
         app_starter.start_app()
 
-    handle_cleanup = test_utils.get_cleanup_handler(test_client, app_starter)
-    # Register cleanup function to cleanup on exit
-    atexit.register(handle_cleanup)
-
     # Load the test modules which are in given package(i.e. in arguments.pkg)
     if arguments['pkg'] is None or arguments['pkg'] == "all":
         TestsGeneratorRegistry.load_generators('pgadmin', exclude_pkgs)
@@ -205,26 +206,36 @@ def get_tests_result(test_suite):
     """This function returns the total ran and total failed test cases count"""
     try:
         total_ran = test_suite.testsRun
-        failed_cases_result = []
-        skipped_cases_result = []
+        failed_cases_result = {}
+        skipped_cases_result = {}
         if total_ran:
             if test_suite.failures:
                 for failed_case in test_suite.failures:
                     class_name = str(
                         failed_case[0]).split('.')[-1].split()[0].strip(')')
-                    failed_cases_result.append(class_name)
+                    if class_name in failed_cases_result:
+                        failed_cases_result[class_name].append(failed_case[
+                                                                   0].name)
+                    else:
+                        failed_cases_result[class_name] = [failed_case[0].name]
             if test_suite.errors:
                 for error_case in test_suite.errors:
                     class_name = str(
                         error_case[0]).split('.')[-1].split()[0].strip(')')
-                    if class_name not in failed_cases_result:
-                        failed_cases_result.append(class_name)
+                    if class_name in failed_cases_result:
+                        failed_cases_result[class_name].append(error_case[
+                                                                   0].name)
+                    else:
+                        failed_cases_result[class_name] = [error_case[0].name]
             if test_suite.skipped:
                 for skip_test in test_suite.skipped:
                     class_name = str(
                         skip_test[0]).split('.')[-1].split()[0].strip(')')
-                    if class_name not in failed_cases_result:
-                        skipped_cases_result.append(class_name)
+                    if class_name in skipped_cases_result:
+                        skipped_cases_result[class_name].append(skip_test[
+                                                                    0].name)
+                    else:
+                        skipped_cases_result[class_name] = [skip_test[0].name]
         return total_ran, failed_cases_result, skipped_cases_result
     except Exception:
         traceback.print_exc(file=sys.stderr)
@@ -257,8 +268,12 @@ class StreamToLogger(object):
 if __name__ == '__main__':
     # Failure detected?
     failure = False
-
     test_result = dict()
+
+    handle_cleanup = test_utils.get_cleanup_handler(test_client, app_starter)
+    # Register cleanup function to cleanup on exit
+    atexit.register(handle_cleanup)
+
     # Set signal handler for cleanup
     signal_list = dir(signal)
     required_signal_list = ['SIGTERM', 'SIGABRT', 'SIGQUIT', 'SIGINT']
@@ -321,11 +336,15 @@ if __name__ == '__main__':
     print(
         "==================================================================="
         "===\n", file=sys.stderr)
+
+    test_result_json = {}
     for server_res in test_result:
-        failed_cases = "\n\t\t".join(test_result[server_res][1])
-        skipped_cases = "\n\t\t".join(test_result[server_res][2])
-        total_failed = len(test_result[server_res][1])
-        total_skipped = len(test_result[server_res][2])
+        failed_cases = test_result[server_res][1]
+        skipped_cases = test_result[server_res][2]
+        total_failed = sum({key: len(value) for key, value in
+                            test_result[server_res][1].items()}.values())
+        total_skipped = sum({key: len(value) for key, value in
+                             test_result[server_res][2].items()}.values())
         total_passed_cases = int(
             test_result[server_res][0]) - total_failed - total_skipped
 
@@ -335,18 +354,35 @@ if __name__ == '__main__':
             (server_res, total_passed_cases,
              (total_passed_cases != 1 and "s" or ""),
              total_failed, (total_failed != 1 and "s" or ""),
-             (total_failed != 0 and ":\n\t\t" or ""), failed_cases,
+             (total_failed != 0 and ":\n\t\t" or ""),
+             "\n\t\t".join("{} ({})".format(k, ",\n\t\t\t\t\t".join(map(
+                 str, v))) for k, v in failed_cases.items()),
              total_skipped, (total_skipped != 1 and "s" or ""),
-             (total_skipped != 0 and ":\n\t\t" or ""), skipped_cases),
+             (total_skipped != 0 and ":\n\t\t" or ""),
+             "\n\t\t".join("{} ({})".format(k, ",\n\t\t\t\t\t".join(map(
+                 str, v))) for k, v in skipped_cases.items())),
             file=sys.stderr)
 
+        temp_dict_for_server = {
+            server_res: {"passed_tests": total_passed_cases,
+                         "failed_tests": [total_failed, failed_cases],
+                         "skipped_tests": [total_skipped, skipped_cases]
+                         }
+        }
+        test_result_json.update(temp_dict_for_server)
+
+    # Dump test result into json file
+    json_file_path = CURRENT_PATH + "/tests_result.json"
+    with open(json_file_path, 'w') as outfile:
+        json.dump(test_result_json, outfile, indent=2)
+
     print(
         "==================================================================="
         "===\n", file=sys.stderr)
 
     print("Please check output in file: %s/regression.log\n" % CURRENT_PATH)
 
-    if failure == True:
+    if failure:
         sys.exit(1)
     else:
         sys.exit(0)
diff --git a/web/regression/test_utils.py b/web/regression/test_utils.py
index fd0cb2a..41b656e 100644
--- a/web/regression/test_utils.py
+++ b/web/regression/test_utils.py
@@ -15,6 +15,7 @@ import uuid
 import psycopg2
 import sqlite3
 from functools import partial
+from testtools.testcase import clone_test_with_new_id
 
 import config
 import regression
@@ -48,9 +49,10 @@ def login_tester_account(tester):
         tester.post('/login', data=dict(email=email, password=password),
                     follow_redirects=True)
     else:
+        from regression.runtests import app_starter
         print("Unable to login test client, email and password not found.",
               file=sys.stderr)
-        _drop_objects(tester)
+        _cleanup(tester, app_starter)
         sys.exit(1)
 
 
@@ -118,7 +120,8 @@ def create_database(server, db_name):
         old_isolation_level = connection.isolation_level
         connection.set_isolation_level(0)
         pg_cursor = connection.cursor()
-        pg_cursor.execute('''CREATE DATABASE "%s" TEMPLATE template0''' % db_name)
+        pg_cursor.execute(
+            '''CREATE DATABASE "%s" TEMPLATE template0''' % db_name)
         connection.set_isolation_level(old_isolation_level)
         connection.commit()
 
@@ -145,27 +148,10 @@ def create_table(server, db_name, table_name):
         old_isolation_level = connection.isolation_level
         connection.set_isolation_level(0)
         pg_cursor = connection.cursor()
-        pg_cursor.execute('''CREATE TABLE "%s" (name VARCHAR, value NUMERIC)''' % table_name)
-        pg_cursor.execute('''INSERT INTO "%s" VALUES ('Some-Name', 6)''' % table_name)
-        connection.set_isolation_level(old_isolation_level)
-        connection.commit()
-
-    except Exception:
-        traceback.print_exc(file=sys.stderr)
-
-
-def create_table(server, db_name, table_name):
-    try:
-        connection = get_db_connection(db_name,
-                                       server['username'],
-                                       server['db_password'],
-                                       server['host'],
-                                       server['port'])
-        old_isolation_level = connection.isolation_level
-        connection.set_isolation_level(0)
-        pg_cursor = connection.cursor()
-        pg_cursor.execute('''CREATE TABLE "%s" (name VARCHAR, value NUMERIC)''' % table_name)
-        pg_cursor.execute('''INSERT INTO "%s" VALUES ('Some-Name', 6)''' % table_name)
+        pg_cursor.execute(
+            '''CREATE TABLE "%s" (name VARCHAR, value NUMERIC)''' % table_name)
+        pg_cursor.execute(
+            '''INSERT INTO "%s" VALUES ('Some-Name', 6)''' % table_name)
         connection.set_isolation_level(old_isolation_level)
         connection.commit()
 
@@ -181,7 +167,7 @@ def drop_database(connection, database_name):
         pg_cursor.execute(
             "SELECT pg_terminate_backend(pg_stat_activity.pid) FROM pg_stat_activity "
             "WHERE pg_stat_activity.datname ='%s' and pid <> pg_backend_pid();" % database_name
-                          )
+        )
         pg_cursor.execute("SELECT * FROM pg_database db WHERE"
                           " db.datname='%s'" % database_name)
         if pg_cursor.fetchall():
@@ -414,6 +400,29 @@ def get_cleanup_handler(tester, app_starter):
     return partial(_cleanup, tester, app_starter)
 
 
+def apply_scenario(scenario, test):
+    """Apply scenario to test.
+    :param scenario: A tuple (name, parameters) to apply to the test. The test
+        is cloned, its id adjusted to have (name) after it, and the parameters
+        dict is used to update the new test.
+    :param test: The test to apply the scenario to. This test is unaltered.
+    :return: A new test cloned from test, with the scenario applied.
+    """
+    name, parameters = scenario
+    parameters["name"] = name
+    scenario_suffix = '(' + name + ')'
+    newtest = clone_test_with_new_id(test,
+                                     test.id() + scenario_suffix)
+    # Replace test description with test scenario name
+    test_desc = name
+    if test_desc is not None:
+        newtest_desc = test_desc
+        newtest.shortDescription = (lambda: newtest_desc)
+    for key, value in parameters.items():
+        setattr(newtest, key, value)
+    return newtest
+
+
 class Database:
     """
     Temporarily create and connect to a database, tear it down at exit
@@ -434,8 +443,10 @@ class Database:
     def __enter__(self):
         self.name = "test_db_{0}".format(str(uuid.uuid4())[0:7])
         self.maintenance_connection = get_db_connection(self.server['db'],
-                                                        self.server['username'],
-                                                        self.server['db_password'],
+                                                        self.server[
+                                                            'username'],
+                                                        self.server[
+                                                            'db_password'],
                                                         self.server['host'],
                                                         self.server['port'])
         create_database(self.server, self.name)
