diff --git a/web/regression/python_test_utils/test_utils.py b/web/regression/python_test_utils/test_utils.py
index c388025..96e9504 100644
--- a/web/regression/python_test_utils/test_utils.py
+++ b/web/regression/python_test_utils/test_utils.py
@@ -425,6 +425,24 @@ def apply_scenario(scenario, test):
     return newtest
 
 
+# This method is overridden to catch passed test cases
+def addSuccess(self, test):
+    """
+    This function add the passed test cases in list i.e. TextTestResult.passed
+    :param self:TextTestResult class
+    :type self: TextTestResult object
+    :param test: test case
+    :type test: test case object
+    :return: None
+    """
+    if self.showAll:
+        self.passed.append(test)
+        self.stream.writeln("ok")
+    elif self.dots:
+        self.stream.write('.')
+        self.stream.flush()
+
+
 def get_scenario_name(cases):
     """
     This function filters the test cases from list of test cases and returns
@@ -451,7 +469,6 @@ def get_scenario_name(cases):
     return test_cases_dict, test_cases_dict_json
 
 
-
 class Database:
     """
     Temporarily create and connect to a database, tear it down at exit
diff --git a/web/regression/runtests.py b/web/regression/runtests.py
index 0e3f3dc..6c6e6dc 100644
--- a/web/regression/runtests.py
+++ b/web/regression/runtests.py
@@ -98,6 +98,13 @@ driver = None
 app_starter = None
 handle_cleanup = None
 
+from unittest.result import TestResult
+from unittest.runner import TextTestResult
+
+setattr(TestResult, "passed", [])
+
+TextTestResult.addSuccess = test_utils.addSuccess
+
 # Override apply_scenario method as we need custom test description/name
 scenarios.apply_scenario = test_utils.apply_scenario
 
@@ -211,9 +218,23 @@ def get_tests_result(test_suite):
     """This function returns the total ran and total failed test cases count"""
     try:
         total_ran = test_suite.testsRun
+        passed_cases_result = {}
         failed_cases_result = {}
         skipped_cases_result = {}
+
         if total_ran:
+            if test_suite.passed:
+                for passed_case in test_suite.passed:
+                    if hasattr(passed_case, "scenario_name"):
+                        class_name = str(
+                            passed_case).split('.')[-1].split()[0].strip(
+                            ')')
+                        if class_name in passed_cases_result:
+                            passed_cases_result[class_name].append(
+                                passed_case.scenario_name)
+                        else:
+                            passed_cases_result[class_name] = \
+                                [passed_case.scenario_name]
             if test_suite.failures:
                 for failed_case in test_suite.failures:
                     if hasattr(failed_case[0], "scenario_name"):
@@ -249,7 +270,9 @@ def get_tests_result(test_suite):
                     else:
                         skipped_cases_result[class_name] = \
                             [{skip_test[0].scenario_name: skip_test[1]}]
-        return total_ran, failed_cases_result, skipped_cases_result
+        print(passed_cases_result)
+        return total_ran, failed_cases_result, skipped_cases_result, \
+               passed_cases_result
     except Exception:
         traceback.print_exc(file=sys.stderr)
 
@@ -325,10 +348,14 @@ if __name__ == '__main__':
                                             descriptions=True,
                                             verbosity=2).run(suite)
 
-            ran_tests, failed_cases, skipped_cases = \
+            ran_tests, failed_cases, skipped_cases, passed_cases = \
                 get_tests_result(tests)
             test_result[server['name']] = [ran_tests, failed_cases,
-                                           skipped_cases]
+                                           skipped_cases, passed_cases]
+
+            # Set empty list for 'passed' parameter for each testRun.
+            # So that it will not append same test case name
+            TestResult.passed = []
 
             if len(failed_cases) > 0:
                 failure = True
@@ -350,6 +377,7 @@ if __name__ == '__main__':
     for server_res in test_result:
         failed_cases = test_result[server_res][1]
         skipped_cases = test_result[server_res][2]
+        passed_cases = test_result[server_res][3]
         skipped_cases, skipped_cases_json = test_utils.get_scenario_name(
                 skipped_cases)
         failed_cases, failed_cases_json = test_utils.get_scenario_name(
@@ -377,7 +405,7 @@ if __name__ == '__main__':
             file=sys.stderr)
 
         temp_dict_for_server = {
-            server_res: {"tests_passed": total_passed_cases,
+            server_res: {"tests_passed": [total_passed_cases,passed_cases],
                          "tests_failed": [total_failed, failed_cases_json],
                          "tests_skipped": [total_skipped, skipped_cases_json]
                          }
