D3701: run-tests: extract onStart and onEnd into the test result

2018-07-09 Thread lothiraldan (Boris Feld)
This revision was automatically updated to reflect the committed changes.
Closed by commit rHG948691ea92a9: run-tests: extract onStart and onEnd into the 
test result (authored by lothiraldan, committed by ).

REPOSITORY
  rHG Mercurial

CHANGES SINCE LAST UPDATE
  https://phab.mercurial-scm.org/D3701?vs=9417=9479

REVISION DETAIL
  https://phab.mercurial-scm.org/D3701

AFFECTED FILES
  tests/run-tests.py

CHANGE DETAILS

diff --git a/tests/run-tests.py b/tests/run-tests.py
--- a/tests/run-tests.py
+++ b/tests/run-tests.py
@@ -1710,6 +1710,14 @@
 else: # 'always', for testing purposes
 self.color = pygmentspresent
 
+def onStart(self, test):
+""" Can be overriden by custom TestResult
+"""
+
+def onEnd(self):
+""" Can be overriden by custom TestResult
+"""
+
 def addFailure(self, test, reason):
 self.failures.append((test, reason))
 
@@ -2098,71 +2106,73 @@
 super(TextTestRunner, self).__init__(*args, **kwargs)
 
 self._runner = runner
+self._result = getTestResult()(self._runner.options, self.stream,
+   self.descriptions, 0)
 
 def listtests(self, test):
-result = getTestResult()(self._runner.options, self.stream,
- self.descriptions, 0)
 test = sorted(test, key=lambda t: t.name)
+
+self._result.onStart(test)
+
 for t in test:
 print(t.name)
-result.addSuccess(t)
+self._result.addSuccess(t)
 
 if self._runner.options.xunit:
 with open(self._runner.options.xunit, "wb") as xuf:
-self._writexunit(result, xuf)
+self._writexunit(self._result, xuf)
 
 if self._runner.options.json:
 jsonpath = os.path.join(self._runner._outputdir, b'report.json')
 with open(jsonpath, 'w') as fp:
-self._writejson(result, fp)
-
-return result
+self._writejson(self._result, fp)
+
+return self._result
 
 def run(self, test):
-result = getTestResult()(self._runner.options, self.stream,
- self.descriptions, self.verbosity)
-test(result)
-
-failed = len(result.failures)
-skipped = len(result.skipped)
-ignored = len(result.ignored)
+self._result.onStart(test)
+test(self._result)
+
+failed = len(self._result.failures)
+skipped = len(self._result.skipped)
+ignored = len(self._result.ignored)
 
 with iolock:
 self.stream.writeln('')
 
 if not self._runner.options.noskips:
-for test, msg in result.skipped:
+for test, msg in self._result.skipped:
 formatted = 'Skipped %s: %s\n' % (test.name, msg)
-self.stream.write(highlightmsg(formatted, result.color))
-for test, msg in result.failures:
+self.stream.write(highlightmsg(formatted, 
self._result.color))
+for test, msg in self._result.failures:
 formatted = 'Failed %s: %s\n' % (test.name, msg)
-self.stream.write(highlightmsg(formatted, result.color))
-for test, msg in result.errors:
+self.stream.write(highlightmsg(formatted, self._result.color))
+for test, msg in self._result.errors:
 self.stream.writeln('Errored %s: %s' % (test.name, msg))
 
 if self._runner.options.xunit:
 with open(self._runner.options.xunit, "wb") as xuf:
-self._writexunit(result, xuf)
+self._writexunit(self._result, xuf)
 
 if self._runner.options.json:
 jsonpath = os.path.join(self._runner._outputdir, 
b'report.json')
 with open(jsonpath, 'w') as fp:
-self._writejson(result, fp)
+self._writejson(self._result, fp)
 
 self._runner._checkhglib('Tested')
 
-savetimes(self._runner._outputdir, result)
+savetimes(self._runner._outputdir, self._result)
 
 if failed and self._runner.options.known_good_rev:
-self._bisecttests(t for t, m in result.failures)
+self._bisecttests(t for t, m in self._result.failures)
 self.stream.writeln(
 '# Ran %d tests, %d skipped, %d failed.'
-% (result.testsRun, skipped + ignored, failed))
+% (self._result.testsRun, skipped + ignored, failed))
 if failed:
 self.stream.writeln('python hash seed: %s' %
 os.environ['PYTHONHASHSEED'])
 if self._runner.options.time:
-self.printtimes(result.times)
+self.printtimes(self._result.times)
 
 if self._runner.options.exceptions:
 exceptions = 

D3701: run-tests: extract onStart and onEnd into the test result

2018-07-03 Thread lothiraldan (Boris Feld)
lothiraldan updated this revision to Diff 9417.

REPOSITORY
  rHG Mercurial

CHANGES SINCE LAST UPDATE
  https://phab.mercurial-scm.org/D3701?vs=9032=9417

REVISION DETAIL
  https://phab.mercurial-scm.org/D3701

AFFECTED FILES
  tests/run-tests.py

CHANGE DETAILS

diff --git a/tests/run-tests.py b/tests/run-tests.py
--- a/tests/run-tests.py
+++ b/tests/run-tests.py
@@ -1711,6 +1711,14 @@
 else: # 'always', for testing purposes
 self.color = pygmentspresent
 
+def onStart(self, test):
+""" Can be overriden by custom TestResult
+"""
+
+def onEnd(self):
+""" Can be overriden by custom TestResult
+"""
+
 def addFailure(self, test, reason):
 self.failures.append((test, reason))
 
@@ -2099,71 +2107,73 @@
 super(TextTestRunner, self).__init__(*args, **kwargs)
 
 self._runner = runner
+self._result = getTestResult()(self._runner.options, self.stream,
+   self.descriptions, 0)
 
 def listtests(self, test):
-result = getTestResult()(self._runner.options, self.stream,
- self.descriptions, 0)
 test = sorted(test, key=lambda t: t.name)
+
+self._result.onStart(test)
+
 for t in test:
 print(t.name)
-result.addSuccess(t)
+self._result.addSuccess(t)
 
 if self._runner.options.xunit:
 with open(self._runner.options.xunit, "wb") as xuf:
-self._writexunit(result, xuf)
+self._writexunit(self._result, xuf)
 
 if self._runner.options.json:
 jsonpath = os.path.join(self._runner._outputdir, b'report.json')
 with open(jsonpath, 'w') as fp:
-self._writejson(result, fp)
-
-return result
+self._writejson(self._result, fp)
+
+return self._result
 
 def run(self, test):
-result = getTestResult()(self._runner.options, self.stream,
- self.descriptions, self.verbosity)
-test(result)
-
-failed = len(result.failures)
-skipped = len(result.skipped)
-ignored = len(result.ignored)
+self._result.onStart(test)
+test(self._result)
+
+failed = len(self._result.failures)
+skipped = len(self._result.skipped)
+ignored = len(self._result.ignored)
 
 with iolock:
 self.stream.writeln('')
 
 if not self._runner.options.noskips:
-for test, msg in result.skipped:
+for test, msg in self._result.skipped:
 formatted = 'Skipped %s: %s\n' % (test.name, msg)
-self.stream.write(highlightmsg(formatted, result.color))
-for test, msg in result.failures:
+self.stream.write(highlightmsg(formatted, 
self._result.color))
+for test, msg in self._result.failures:
 formatted = 'Failed %s: %s\n' % (test.name, msg)
-self.stream.write(highlightmsg(formatted, result.color))
-for test, msg in result.errors:
+self.stream.write(highlightmsg(formatted, self._result.color))
+for test, msg in self._result.errors:
 self.stream.writeln('Errored %s: %s' % (test.name, msg))
 
 if self._runner.options.xunit:
 with open(self._runner.options.xunit, "wb") as xuf:
-self._writexunit(result, xuf)
+self._writexunit(self._result, xuf)
 
 if self._runner.options.json:
 jsonpath = os.path.join(self._runner._outputdir, 
b'report.json')
 with open(jsonpath, 'w') as fp:
-self._writejson(result, fp)
+self._writejson(self._result, fp)
 
 self._runner._checkhglib('Tested')
 
-savetimes(self._runner._outputdir, result)
+savetimes(self._runner._outputdir, self._result)
 
 if failed and self._runner.options.known_good_rev:
-self._bisecttests(t for t, m in result.failures)
+self._bisecttests(t for t, m in self._result.failures)
 self.stream.writeln(
 '# Ran %d tests, %d skipped, %d failed.'
-% (result.testsRun, skipped + ignored, failed))
+% (self._result.testsRun, skipped + ignored, failed))
 if failed:
 self.stream.writeln('python hash seed: %s' %
 os.environ['PYTHONHASHSEED'])
 if self._runner.options.time:
-self.printtimes(result.times)
+self.printtimes(self._result.times)
 
 if self._runner.options.exceptions:
 exceptions = aggregateexceptions(
@@ -2186,7 +2196,7 @@
 
 self.stream.flush()
 
-return result
+return self._result
 
 def _bisecttests(self, tests):

D3701: run-tests: extract onStart and onEnd into the test result

2018-07-03 Thread durin42 (Augie Fackler)
durin42 added inline comments.

INLINE COMMENTS

> run-tests.py:1717
> +"""
> +pass
> +

omit superfluous pass

REPOSITORY
  rHG Mercurial

REVISION DETAIL
  https://phab.mercurial-scm.org/D3701

To: lothiraldan, #hg-reviewers
Cc: durin42, mercurial-devel
___
Mercurial-devel mailing list
Mercurial-devel@mercurial-scm.org
https://www.mercurial-scm.org/mailman/listinfo/mercurial-devel


D3701: run-tests: extract onStart and onEnd into the test result

2018-06-12 Thread lothiraldan (Boris Feld)
lothiraldan updated this revision to Diff 9032.

REPOSITORY
  rHG Mercurial

CHANGES SINCE LAST UPDATE
  https://phab.mercurial-scm.org/D3701?vs=8993=9032

REVISION DETAIL
  https://phab.mercurial-scm.org/D3701

AFFECTED FILES
  tests/run-tests.py

CHANGE DETAILS

diff --git a/tests/run-tests.py b/tests/run-tests.py
--- a/tests/run-tests.py
+++ b/tests/run-tests.py
@@ -1711,6 +1711,11 @@
 else: # 'always', for testing purposes
 self.color = pygmentspresent
 
+def onStart(self, test):
+""" Can be overriden by custom TestResult
+"""
+pass
+
 def addFailure(self, test, reason):
 self.failures.append((test, reason))
 
@@ -2099,71 +2104,73 @@
 super(TextTestRunner, self).__init__(*args, **kwargs)
 
 self._runner = runner
+self._result = getTestResult()(self._runner.options, self.stream,
+   self.descriptions, 0)
 
 def listtests(self, test):
-result = getTestResult()(self._runner.options, self.stream,
- self.descriptions, 0)
 test = sorted(test, key=lambda t: t.name)
+
+self._result.onStart(test)
+
 for t in test:
 print(t.name)
-result.addSuccess(t)
+self._result.addSuccess(t)
 
 if self._runner.options.xunit:
 with open(self._runner.options.xunit, "wb") as xuf:
-self._writexunit(result, xuf)
+self._writexunit(self._result, xuf)
 
 if self._runner.options.json:
 jsonpath = os.path.join(self._runner._outputdir, b'report.json')
 with open(jsonpath, 'w') as fp:
-self._writejson(result, fp)
-
-return result
+self._writejson(self._result, fp)
+
+return self._result
 
 def run(self, test):
-result = getTestResult()(self._runner.options, self.stream,
- self.descriptions, self.verbosity)
-test(result)
-
-failed = len(result.failures)
-skipped = len(result.skipped)
-ignored = len(result.ignored)
+self._result.onStart(test)
+test(self._result)
+
+failed = len(self._result.failures)
+skipped = len(self._result.skipped)
+ignored = len(self._result.ignored)
 
 with iolock:
 self.stream.writeln('')
 
 if not self._runner.options.noskips:
-for test, msg in result.skipped:
+for test, msg in self._result.skipped:
 formatted = 'Skipped %s: %s\n' % (test.name, msg)
-self.stream.write(highlightmsg(formatted, result.color))
-for test, msg in result.failures:
+self.stream.write(highlightmsg(formatted, 
self._result.color))
+for test, msg in self._result.failures:
 formatted = 'Failed %s: %s\n' % (test.name, msg)
-self.stream.write(highlightmsg(formatted, result.color))
-for test, msg in result.errors:
+self.stream.write(highlightmsg(formatted, self._result.color))
+for test, msg in self._result.errors:
 self.stream.writeln('Errored %s: %s' % (test.name, msg))
 
 if self._runner.options.xunit:
 with open(self._runner.options.xunit, "wb") as xuf:
-self._writexunit(result, xuf)
+self._writexunit(self._result, xuf)
 
 if self._runner.options.json:
 jsonpath = os.path.join(self._runner._outputdir, 
b'report.json')
 with open(jsonpath, 'w') as fp:
-self._writejson(result, fp)
+self._writejson(self._result, fp)
 
 self._runner._checkhglib('Tested')
 
-savetimes(self._runner._outputdir, result)
+savetimes(self._runner._outputdir, self._result)
 
 if failed and self._runner.options.known_good_rev:
-self._bisecttests(t for t, m in result.failures)
+self._bisecttests(t for t, m in self._result.failures)
 self.stream.writeln(
 '# Ran %d tests, %d skipped, %d failed.'
-% (result.testsRun, skipped + ignored, failed))
+% (self._result.testsRun, skipped + ignored, failed))
 if failed:
 self.stream.writeln('python hash seed: %s' %
 os.environ['PYTHONHASHSEED'])
 if self._runner.options.time:
-self.printtimes(result.times)
+self.printtimes(self._result.times)
 
 if self._runner.options.exceptions:
 exceptions = aggregateexceptions(
@@ -2186,7 +2193,7 @@
 
 self.stream.flush()
 
-return result
+return self._result
 
 def _bisecttests(self, tests):
 bisectcmd = ['hg', 'bisect']
@@ -2752,6 +2759,8 @@
 

D3701: run-tests: extract onStart and onEnd into the test result

2018-06-07 Thread lothiraldan (Boris Feld)
lothiraldan created this revision.
Herald added a subscriber: mercurial-devel.
Herald added a reviewer: hg-reviewers.

REVISION SUMMARY
  It would allow custom test result to display custom messages.

REPOSITORY
  rHG Mercurial

REVISION DETAIL
  https://phab.mercurial-scm.org/D3701

AFFECTED FILES
  tests/run-tests.py

CHANGE DETAILS

diff --git a/tests/run-tests.py b/tests/run-tests.py
--- a/tests/run-tests.py
+++ b/tests/run-tests.py
@@ -1711,6 +1711,11 @@
 else: # 'always', for testing purposes
 self.color = pygmentspresent
 
+def onStart(self, test):
+""" Can be overriden by custom TestResult
+"""
+pass
+
 def addFailure(self, test, reason):
 self.failures.append((test, reason))
 
@@ -2099,71 +2104,73 @@
 super(TextTestRunner, self).__init__(*args, **kwargs)
 
 self._runner = runner
+self._result = getTestResult()(self._runner.options, self.stream,
+   self.descriptions, 0)
 
 def listtests(self, test):
-result = getTestResult()(self._runner.options, self.stream,
- self.descriptions, 0)
 test = sorted(test, key=lambda t: t.name)
+
+self._result.onStart(test)
+
 for t in test:
 print(t.name)
-result.addSuccess(t)
+self._result.addSuccess(t)
 
 if self._runner.options.xunit:
 with open(self._runner.options.xunit, "wb") as xuf:
-self._writexunit(result, xuf)
+self._writexunit(self._result, xuf)
 
 if self._runner.options.json:
 jsonpath = os.path.join(self._runner._outputdir, b'report.json')
 with open(jsonpath, 'w') as fp:
-self._writejson(result, fp)
-
-return result
+self._writejson(self._result, fp)
+
+return self._result
 
 def run(self, test):
-result = getTestResult()(self._runner.options, self.stream,
- self.descriptions, self.verbosity)
-test(result)
-
-failed = len(result.failures)
-skipped = len(result.skipped)
-ignored = len(result.ignored)
+self._result.onStart(test)
+test(self._result)
+
+failed = len(self._result.failures)
+skipped = len(self._result.skipped)
+ignored = len(self._result.ignored)
 
 with iolock:
 self.stream.writeln('')
 
 if not self._runner.options.noskips:
-for test, msg in result.skipped:
+for test, msg in self._result.skipped:
 formatted = 'Skipped %s: %s\n' % (test.name, msg)
-self.stream.write(highlightmsg(formatted, result.color))
-for test, msg in result.failures:
+self.stream.write(highlightmsg(formatted, 
self._result.color))
+for test, msg in self._result.failures:
 formatted = 'Failed %s: %s\n' % (test.name, msg)
-self.stream.write(highlightmsg(formatted, result.color))
-for test, msg in result.errors:
+self.stream.write(highlightmsg(formatted, self._result.color))
+for test, msg in self._result.errors:
 self.stream.writeln('Errored %s: %s' % (test.name, msg))
 
 if self._runner.options.xunit:
 with open(self._runner.options.xunit, "wb") as xuf:
-self._writexunit(result, xuf)
+self._writexunit(self._result, xuf)
 
 if self._runner.options.json:
 jsonpath = os.path.join(self._runner._outputdir, 
b'report.json')
 with open(jsonpath, 'w') as fp:
-self._writejson(result, fp)
+self._writejson(self._result, fp)
 
 self._runner._checkhglib('Tested')
 
-savetimes(self._runner._outputdir, result)
+savetimes(self._runner._outputdir, self._result)
 
 if failed and self._runner.options.known_good_rev:
-self._bisecttests(t for t, m in result.failures)
+self._bisecttests(t for t, m in self._result.failures)
 self.stream.writeln(
 '# Ran %d tests, %d skipped, %d failed.'
-% (result.testsRun, skipped + ignored, failed))
+% (self._result.testsRun, skipped + ignored, failed))
 if failed:
 self.stream.writeln('python hash seed: %s' %
 os.environ['PYTHONHASHSEED'])
 if self._runner.options.time:
-self.printtimes(result.times)
+self.printtimes(self._result.times)
 
 if self._runner.options.exceptions:
 exceptions = aggregateexceptions(
@@ -2186,7 +2193,7 @@
 
 self.stream.flush()
 
-return result
+return self._result
 
 def _bisecttests(self, tests):