https://github.com/python/cpython/commit/ad39f017881e0bd8ffd809755ebf76380b928ad3
commit: ad39f017881e0bd8ffd809755ebf76380b928ad3
branch: main
author: Serhiy Storchaka <[email protected]>
committer: serhiy-storchaka <[email protected]>
date: 2025-05-31T13:01:46+03:00
summary:
gh-108885: Use subtests for doctest examples run by unittest (GH-134890)
Run each example as a subtest in unit tests synthesized by
doctest.DocFileSuite() and doctest.DocTestSuite().
Add the doctest.DocTestRunner.report_skip() method.
files:
A Misc/NEWS.d/next/Library/2025-05-29-17-39-13.gh-issue-108885.MegCRA.rst
M Doc/library/doctest.rst
M Lib/doctest.py
M Lib/test/test_doctest/test_doctest.py
M Lib/test/test_regrtest.py
diff --git a/Doc/library/doctest.rst b/Doc/library/doctest.rst
index 8236d703fc1e45..fb43cf918b84dd 100644
--- a/Doc/library/doctest.rst
+++ b/Doc/library/doctest.rst
@@ -1046,12 +1046,15 @@ from text files and modules with doctests:
Convert doctest tests from one or more text files to a
:class:`unittest.TestSuite`.
- The returned :class:`unittest.TestSuite` is to be run by the unittest
framework
- and runs the interactive examples in each file. If an example in any file
- fails, then the synthesized unit test fails, and a
:exc:`~unittest.TestCase.failureException`
- exception is raised showing the name of the file containing the test and a
- (sometimes approximate) line number. If all the examples in a file are
- skipped, then the synthesized unit test is also marked as skipped.
+ The returned :class:`unittest.TestSuite` is to be run by the unittest
+ framework and runs the interactive examples in each file.
+ Each file is run as a separate unit test, and each example in a file
+ is run as a :ref:`subtest <subtests>`.
+ If any example in a file fails, then the synthesized unit test fails.
+ The traceback for failure or error contains the name of the file
+ containing the test and a (sometimes approximate) line number.
+ If all the examples in a file are skipped, then the synthesized unit
+ test is also marked as skipped.
Pass one or more paths (as strings) to text files to be examined.
@@ -1109,18 +1112,23 @@ from text files and modules with doctests:
The global ``__file__`` is added to the globals provided to doctests loaded
from a text file using :func:`DocFileSuite`.
+ .. versionchanged:: next
+ Run each example as a :ref:`subtest <subtests>`.
+
.. function:: DocTestSuite(module=None, globs=None, extraglobs=None,
test_finder=None, setUp=None, tearDown=None, optionflags=0, checker=None)
Convert doctest tests for a module to a :class:`unittest.TestSuite`.
- The returned :class:`unittest.TestSuite` is to be run by the unittest
framework
- and runs each doctest in the module.
- Each docstring is run as a separate unit test.
- If any of the doctests fail, then the synthesized unit test fails,
- and a :exc:`unittest.TestCase.failureException` exception is raised
- showing the name of the file containing the test and a (sometimes
approximate)
- line number. If all the examples in a docstring are skipped, then the
+ The returned :class:`unittest.TestSuite` is to be run by the unittest
+ framework and runs each doctest in the module.
+ Each docstring is run as a separate unit test, and each example in
+ a docstring is run as a :ref:`subtest <subtests>`.
+ If any of the doctests fail, then the synthesized unit test fails.
+ The traceback for failure or error contains the name of the file
+ containing the test and a (sometimes approximate) line number.
+ If all the examples in a docstring are skipped, then the
+ synthesized unit test is also marked as skipped.
Optional argument *module* provides the module to be tested. It can be a
module
object or a (possibly dotted) module name. If not specified, the module
calling
@@ -1145,6 +1153,9 @@ from text files and modules with doctests:
:func:`DocTestSuite` returns an empty :class:`unittest.TestSuite` if
*module*
contains no docstrings instead of raising :exc:`ValueError`.
+ .. versionchanged:: next
+ Run each example as a :ref:`subtest <subtests>`.
+
Under the covers, :func:`DocTestSuite` creates a :class:`unittest.TestSuite`
out
of :class:`!doctest.DocTestCase` instances, and :class:`!DocTestCase` is a
subclass of :class:`unittest.TestCase`. :class:`!DocTestCase` isn't documented
@@ -1507,7 +1518,7 @@ DocTestRunner objects
with strings that should be displayed. It defaults to
``sys.stdout.write``. If
capturing the output is not sufficient, then the display output can be also
customized by subclassing DocTestRunner, and overriding the methods
- :meth:`report_start`, :meth:`report_success`,
+ :meth:`report_skip`, :meth:`report_start`, :meth:`report_success`,
:meth:`report_unexpected_exception`, and :meth:`report_failure`.
The optional keyword argument *checker* specifies the :class:`OutputChecker`
@@ -1532,6 +1543,19 @@ DocTestRunner objects
:class:`DocTestRunner` defines the following methods:
+ .. method:: report_skip(out, test, example)
+
+ Report that the given example was skipped. This method is provided to
+ allow subclasses of :class:`DocTestRunner` to customize their output; it
+ should not be called directly.
+
+ *example* is the example about to be processed. *test* is the test
+ containing *example*. *out* is the output function that was passed to
+ :meth:`DocTestRunner.run`.
+
+ .. versionadded:: next
+
+
.. method:: report_start(out, test, example)
Report that the test runner is about to process the given example. This
method
diff --git a/Lib/doctest.py b/Lib/doctest.py
index dec10a345165da..c8c95ecbb273b2 100644
--- a/Lib/doctest.py
+++ b/Lib/doctest.py
@@ -101,6 +101,7 @@ def _test():
import re
import sys
import traceback
+import types
import unittest
from io import StringIO, IncrementalNewlineDecoder
from collections import namedtuple
@@ -108,8 +109,6 @@ def _test():
from _colorize import ANSIColors, can_colorize
-__unittest = True
-
class TestResults(namedtuple('TestResults', 'failed attempted')):
def __new__(cls, failed, attempted, *, skipped=0):
results = super().__new__(cls, failed, attempted)
@@ -387,7 +386,7 @@ def __init__(self, out):
self.__out = out
self.__debugger_used = False
# do not play signal games in the pdb
- pdb.Pdb.__init__(self, stdout=out, nosigint=True)
+ super().__init__(stdout=out, nosigint=True)
# still use input() to get user input
self.use_rawinput = 1
@@ -1280,6 +1279,11 @@ def __init__(self, checker=None, verbose=None,
optionflags=0):
# Reporting methods
#/////////////////////////////////////////////////////////////////
+ def report_skip(self, out, test, example):
+ """
+ Report that the given example was skipped.
+ """
+
def report_start(self, out, test, example):
"""
Report that the test runner is about to process the given
@@ -1377,6 +1381,8 @@ def __run(self, test, compileflags, out):
# If 'SKIP' is set, then skip this example.
if self.optionflags & SKIP:
+ if not quiet:
+ self.report_skip(out, test, example)
skips += 1
continue
@@ -2274,12 +2280,63 @@ def set_unittest_reportflags(flags):
return old
+class _DocTestCaseRunner(DocTestRunner):
+
+ def __init__(self, *args, test_case, test_result, **kwargs):
+ super().__init__(*args, **kwargs)
+ self._test_case = test_case
+ self._test_result = test_result
+ self._examplenum = 0
+
+ def _subTest(self):
+ subtest = unittest.case._SubTest(self._test_case,
str(self._examplenum), {})
+ self._examplenum += 1
+ return subtest
+
+ def report_skip(self, out, test, example):
+ unittest.case._addSkip(self._test_result, self._subTest(), '')
+
+ def report_success(self, out, test, example, got):
+ self._test_result.addSubTest(self._test_case, self._subTest(), None)
+
+ def report_unexpected_exception(self, out, test, example, exc_info):
+ tb = self._add_traceback(exc_info[2], test, example)
+ exc_info = (*exc_info[:2], tb)
+ self._test_result.addSubTest(self._test_case, self._subTest(),
exc_info)
+
+ def report_failure(self, out, test, example, got):
+ msg = ('Failed example:\n' + _indent(example.source) +
+ self._checker.output_difference(example, got,
self.optionflags).rstrip('\n'))
+ exc = self._test_case.failureException(msg)
+ tb = self._add_traceback(None, test, example)
+ exc_info = (type(exc), exc, tb)
+ self._test_result.addSubTest(self._test_case, self._subTest(),
exc_info)
+
+ def _add_traceback(self, traceback, test, example):
+ if test.lineno is None or example.lineno is None:
+ lineno = None
+ else:
+ lineno = test.lineno + example.lineno + 1
+ return types.SimpleNamespace(
+ tb_frame = types.SimpleNamespace(
+ f_globals=test.globs,
+ f_code=types.SimpleNamespace(
+ co_filename=test.filename,
+ co_name=test.name,
+ ),
+ ),
+ tb_next = traceback,
+ tb_lasti = -1,
+ tb_lineno = lineno,
+ )
+
+
class DocTestCase(unittest.TestCase):
def __init__(self, test, optionflags=0, setUp=None, tearDown=None,
checker=None):
- unittest.TestCase.__init__(self)
+ super().__init__()
self._dt_optionflags = optionflags
self._dt_checker = checker
self._dt_test = test
@@ -2303,30 +2360,28 @@ def tearDown(self):
test.globs.clear()
test.globs.update(self._dt_globs)
+ def run(self, result=None):
+ self._test_result = result
+ return super().run(result)
+
def runTest(self):
test = self._dt_test
- old = sys.stdout
- new = StringIO()
optionflags = self._dt_optionflags
+ result = self._test_result
if not (optionflags & REPORTING_FLAGS):
# The option flags don't include any reporting flags,
# so add the default reporting flags
optionflags |= _unittest_reportflags
+ if getattr(result, 'failfast', False):
+ optionflags |= FAIL_FAST
- runner = DocTestRunner(optionflags=optionflags,
- checker=self._dt_checker, verbose=False)
-
- try:
- runner.DIVIDER = "-"*70
- results = runner.run(test, out=new.write, clear_globs=False)
- if results.skipped == results.attempted:
- raise unittest.SkipTest("all examples were skipped")
- finally:
- sys.stdout = old
-
- if results.failed:
- raise
self.failureException(self.format_failure(new.getvalue().rstrip('\n')))
+ runner = _DocTestCaseRunner(optionflags=optionflags,
+ checker=self._dt_checker, verbose=False,
+ test_case=self, test_result=result)
+ results = runner.run(test, clear_globs=False)
+ if results.skipped == results.attempted:
+ raise unittest.SkipTest("all examples were skipped")
def format_failure(self, err):
test = self._dt_test
@@ -2441,7 +2496,7 @@ def shortDescription(self):
class SkipDocTestCase(DocTestCase):
def __init__(self, module):
self.module = module
- DocTestCase.__init__(self, None)
+ super().__init__(None)
def setUp(self):
self.skipTest("DocTestSuite will not work with -O2 and above")
diff --git a/Lib/test/test_doctest/test_doctest.py
b/Lib/test/test_doctest/test_doctest.py
index 2bfaa6c599cd47..72763d4a0132d0 100644
--- a/Lib/test/test_doctest/test_doctest.py
+++ b/Lib/test/test_doctest/test_doctest.py
@@ -2269,20 +2269,22 @@ def test_DocTestSuite():
>>> suite = doctest.DocTestSuite(test.test_doctest.sample_doctest)
>>> result = suite.run(unittest.TestResult())
>>> result
- <unittest.result.TestResult run=9 errors=0 failures=4>
+ <unittest.result.TestResult run=9 errors=2 failures=2>
>>> for tst, _ in result.failures:
... print(tst)
- bad (test.test_doctest.sample_doctest.__test__)
- foo (test.test_doctest.sample_doctest)
- test_silly_setup (test.test_doctest.sample_doctest)
- y_is_one (test.test_doctest.sample_doctest)
+ bad (test.test_doctest.sample_doctest.__test__) [0]
+ foo (test.test_doctest.sample_doctest) [0]
+ >>> for tst, _ in result.errors:
+ ... print(tst)
+ test_silly_setup (test.test_doctest.sample_doctest) [1]
+ y_is_one (test.test_doctest.sample_doctest) [0]
We can also supply the module by name:
>>> suite = doctest.DocTestSuite('test.test_doctest.sample_doctest')
>>> result = suite.run(unittest.TestResult())
>>> result
- <unittest.result.TestResult run=9 errors=0 failures=4>
+ <unittest.result.TestResult run=9 errors=2 failures=2>
The module need not contain any doctest examples:
@@ -2304,21 +2306,26 @@ def test_DocTestSuite():
>>> result
<unittest.result.TestResult run=6 errors=0 failures=2>
>>> len(result.skipped)
- 2
+ 7
>>> for tst, _ in result.skipped:
... print(tst)
+ double_skip (test.test_doctest.sample_doctest_skip) [0]
+ double_skip (test.test_doctest.sample_doctest_skip) [1]
double_skip (test.test_doctest.sample_doctest_skip)
+ partial_skip_fail (test.test_doctest.sample_doctest_skip) [0]
+ partial_skip_pass (test.test_doctest.sample_doctest_skip) [0]
+ single_skip (test.test_doctest.sample_doctest_skip) [0]
single_skip (test.test_doctest.sample_doctest_skip)
>>> for tst, _ in result.failures:
... print(tst)
- no_skip_fail (test.test_doctest.sample_doctest_skip)
- partial_skip_fail (test.test_doctest.sample_doctest_skip)
+ no_skip_fail (test.test_doctest.sample_doctest_skip) [0]
+ partial_skip_fail (test.test_doctest.sample_doctest_skip) [1]
We can use the current module:
>>> suite = test.test_doctest.sample_doctest.test_suite()
>>> suite.run(unittest.TestResult())
- <unittest.result.TestResult run=9 errors=0 failures=4>
+ <unittest.result.TestResult run=9 errors=2 failures=2>
We can also provide a DocTestFinder:
@@ -2326,7 +2333,7 @@ def test_DocTestSuite():
>>> suite = doctest.DocTestSuite('test.test_doctest.sample_doctest',
... test_finder=finder)
>>> suite.run(unittest.TestResult())
- <unittest.result.TestResult run=9 errors=0 failures=4>
+ <unittest.result.TestResult run=9 errors=2 failures=2>
The DocTestFinder need not return any tests:
@@ -2342,7 +2349,7 @@ def test_DocTestSuite():
>>> suite = doctest.DocTestSuite('test.test_doctest.sample_doctest',
globs={})
>>> suite.run(unittest.TestResult())
- <unittest.result.TestResult run=9 errors=0 failures=5>
+ <unittest.result.TestResult run=9 errors=3 failures=2>
Alternatively, we can provide extra globals. Here we'll make an
error go away by providing an extra global variable:
@@ -2350,7 +2357,7 @@ def test_DocTestSuite():
>>> suite = doctest.DocTestSuite('test.test_doctest.sample_doctest',
... extraglobs={'y': 1})
>>> suite.run(unittest.TestResult())
- <unittest.result.TestResult run=9 errors=0 failures=3>
+ <unittest.result.TestResult run=9 errors=1 failures=2>
You can pass option flags. Here we'll cause an extra error
by disabling the blank-line feature:
@@ -2358,7 +2365,7 @@ def test_DocTestSuite():
>>> suite = doctest.DocTestSuite('test.test_doctest.sample_doctest',
... optionflags=doctest.DONT_ACCEPT_BLANKLINE)
>>> suite.run(unittest.TestResult())
- <unittest.result.TestResult run=9 errors=0 failures=5>
+ <unittest.result.TestResult run=9 errors=2 failures=3>
You can supply setUp and tearDown functions:
@@ -2375,7 +2382,7 @@ def test_DocTestSuite():
>>> suite = doctest.DocTestSuite('test.test_doctest.sample_doctest',
... setUp=setUp, tearDown=tearDown)
>>> suite.run(unittest.TestResult())
- <unittest.result.TestResult run=9 errors=0 failures=3>
+ <unittest.result.TestResult run=9 errors=1 failures=2>
But the tearDown restores sanity:
@@ -2393,7 +2400,7 @@ def test_DocTestSuite():
>>> suite = doctest.DocTestSuite('test.test_doctest.sample_doctest',
setUp=setUp)
>>> suite.run(unittest.TestResult())
- <unittest.result.TestResult run=9 errors=0 failures=3>
+ <unittest.result.TestResult run=9 errors=1 failures=2>
Here, we didn't need to use a tearDown function because we
modified the test globals, which are a copy of the
@@ -2409,115 +2416,97 @@ def test_DocTestSuite_errors():
>>> suite = doctest.DocTestSuite(mod)
>>> result = suite.run(unittest.TestResult())
>>> result
- <unittest.result.TestResult run=4 errors=0 failures=4>
+ <unittest.result.TestResult run=4 errors=6 failures=3>
>>> print(result.failures[0][1]) # doctest: +ELLIPSIS
- AssertionError: Failed doctest test for
test.test_doctest.sample_doctest_errors
- File "...sample_doctest_errors.py", line 0, in sample_doctest_errors
- <BLANKLINE>
- ----------------------------------------------------------------------
- File "...sample_doctest_errors.py", line 5, in
test.test_doctest.sample_doctest_errors
- Failed example:
+ Traceback (most recent call last):
+ File "...sample_doctest_errors.py", line 5, in
test.test_doctest.sample_doctest_errors
+ >...>> 2 + 2
+ AssertionError: Failed example:
2 + 2
Expected:
5
Got:
4
- ----------------------------------------------------------------------
- File "...sample_doctest_errors.py", line 7, in
test.test_doctest.sample_doctest_errors
- Failed example:
- 1/0
- Exception raised:
- Traceback (most recent call last):
- File "<doctest test.test_doctest.sample_doctest_errors[1]>",
line 1, in <module>
- 1/0
- ~^~
- ZeroDivisionError: division by zero
<BLANKLINE>
>>> print(result.failures[1][1]) # doctest: +ELLIPSIS
- AssertionError: Failed doctest test for
test.test_doctest.sample_doctest_errors.__test__.bad
- File "...sample_doctest_errors.py", line unknown line number, in bad
- <BLANKLINE>
- ----------------------------------------------------------------------
- File "...sample_doctest_errors.py", line ?, in
test.test_doctest.sample_doctest_errors.__test__.bad
- Failed example:
+ Traceback (most recent call last):
+ File "...sample_doctest_errors.py", line None, in
test.test_doctest.sample_doctest_errors.__test__.bad
+ AssertionError: Failed example:
2 + 2
Expected:
5
Got:
4
- ----------------------------------------------------------------------
- File "...sample_doctest_errors.py", line ?, in
test.test_doctest.sample_doctest_errors.__test__.bad
- Failed example:
- 1/0
- Exception raised:
- Traceback (most recent call last):
- File "<doctest
test.test_doctest.sample_doctest_errors.__test__.bad[1]>", line 1, in <module>
- 1/0
- ~^~
- ZeroDivisionError: division by zero
<BLANKLINE>
>>> print(result.failures[2][1]) # doctest: +ELLIPSIS
- AssertionError: Failed doctest test for
test.test_doctest.sample_doctest_errors.errors
- File "...sample_doctest_errors.py", line 14, in errors
- <BLANKLINE>
- ----------------------------------------------------------------------
- File "...sample_doctest_errors.py", line 16, in
test.test_doctest.sample_doctest_errors.errors
- Failed example:
+ Traceback (most recent call last):
+ File "...sample_doctest_errors.py", line 16, in
test.test_doctest.sample_doctest_errors.errors
+ >...>> 2 + 2
+ AssertionError: Failed example:
2 + 2
Expected:
5
Got:
4
- ----------------------------------------------------------------------
- File "...sample_doctest_errors.py", line 18, in
test.test_doctest.sample_doctest_errors.errors
- Failed example:
+ <BLANKLINE>
+ >>> print(result.errors[0][1]) # doctest: +ELLIPSIS
+ Traceback (most recent call last):
+ File "...sample_doctest_errors.py", line 7, in
test.test_doctest.sample_doctest_errors
+ >...>> 1/0
+ File "<doctest test.test_doctest.sample_doctest_errors[1]>", line
1, in <module>
+ 1/0
+ ~^~
+ ZeroDivisionError: division by zero
+ <BLANKLINE>
+ >>> print(result.errors[1][1]) # doctest: +ELLIPSIS
+ Traceback (most recent call last):
+ File "...sample_doctest_errors.py", line None, in
test.test_doctest.sample_doctest_errors.__test__.bad
+ File "<doctest
test.test_doctest.sample_doctest_errors.__test__.bad[1]>", line 1, in <module>
1/0
- Exception raised:
- Traceback (most recent call last):
- File "<doctest
test.test_doctest.sample_doctest_errors.errors[1]>", line 1, in <module>
- 1/0
- ~^~
- ZeroDivisionError: division by zero
- ----------------------------------------------------------------------
- File "...sample_doctest_errors.py", line 23, in
test.test_doctest.sample_doctest_errors.errors
- Failed example:
+ ~^~
+ ZeroDivisionError: division by zero
+ <BLANKLINE>
+ >>> print(result.errors[2][1]) # doctest: +ELLIPSIS
+ Traceback (most recent call last):
+ File "...sample_doctest_errors.py", line 18, in
test.test_doctest.sample_doctest_errors.errors
+ >...>> 1/0
+ File "<doctest test.test_doctest.sample_doctest_errors.errors[1]>",
line 1, in <module>
+ 1/0
+ ~^~
+ ZeroDivisionError: division by zero
+ <BLANKLINE>
+ >>> print(result.errors[3][1]) # doctest: +ELLIPSIS
+ Traceback (most recent call last):
+ File "...sample_doctest_errors.py", line 23, in
test.test_doctest.sample_doctest_errors.errors
+ >...>> f()
+ File "<doctest test.test_doctest.sample_doctest_errors.errors[3]>",
line 1, in <module>
f()
- Exception raised:
- Traceback (most recent call last):
- File "<doctest
test.test_doctest.sample_doctest_errors.errors[3]>", line 1, in <module>
- f()
- ~^^
- File "<doctest
test.test_doctest.sample_doctest_errors.errors[2]>", line 2, in f
- 2 + '2'
- ~~^~~~~
- TypeError: ...
- ----------------------------------------------------------------------
- File "...sample_doctest_errors.py", line 25, in
test.test_doctest.sample_doctest_errors.errors
- Failed example:
- g()
- Exception raised:
- Traceback (most recent call last):
- File "<doctest
test.test_doctest.sample_doctest_errors.errors[4]>", line 1, in <module>
- g()
- ~^^
- File "...sample_doctest_errors.py", line 12, in g
- [][0] # line 12
- ~~^^^
- IndexError: list index out of range
+ ~^^
+ File "<doctest test.test_doctest.sample_doctest_errors.errors[2]>",
line 2, in f
+ 2 + '2'
+ ~~^~~~~
+ TypeError: ...
<BLANKLINE>
- >>> print(result.failures[3][1]) # doctest: +ELLIPSIS
- AssertionError: Failed doctest test for
test.test_doctest.sample_doctest_errors.syntax_error
- File "...sample_doctest_errors.py", line 29, in syntax_error
+ >>> print(result.errors[4][1]) # doctest: +ELLIPSIS
+ Traceback (most recent call last):
+ File "...sample_doctest_errors.py", line 25, in
test.test_doctest.sample_doctest_errors.errors
+ >...>> g()
+ File "<doctest test.test_doctest.sample_doctest_errors.errors[4]>",
line 1, in <module>
+ g()
+ ~^^
+ File "...sample_doctest_errors.py", line 12, in g
+ [][0] # line 12
+ ~~^^^
+ IndexError: list index out of range
<BLANKLINE>
- ----------------------------------------------------------------------
- File "...sample_doctest_errors.py", line 31, in
test.test_doctest.sample_doctest_errors.syntax_error
- Failed example:
+ >>> print(result.errors[5][1]) # doctest: +ELLIPSIS
+ Traceback (most recent call last):
+ File "...sample_doctest_errors.py", line 31, in
test.test_doctest.sample_doctest_errors.syntax_error
+ >...>> 2+*3
+ File "<doctest
test.test_doctest.sample_doctest_errors.syntax_error[0]>", line 1
2+*3
- Exception raised:
- File "<doctest
test.test_doctest.sample_doctest_errors.syntax_error[0]>", line 1
- 2+*3
- ^
- SyntaxError: invalid syntax
+ ^
+ SyntaxError: invalid syntax
<BLANKLINE>
"""
@@ -2532,7 +2521,7 @@ def test_DocFileSuite():
... 'test_doctest2.txt',
... 'test_doctest4.txt')
>>> suite.run(unittest.TestResult())
- <unittest.result.TestResult run=3 errors=0 failures=2>
+ <unittest.result.TestResult run=3 errors=2 failures=0>
The test files are looked for in the directory containing the
calling module. A package keyword argument can be provided to
@@ -2544,14 +2533,14 @@ def test_DocFileSuite():
... 'test_doctest4.txt',
... package='test.test_doctest')
>>> suite.run(unittest.TestResult())
- <unittest.result.TestResult run=3 errors=0 failures=2>
+ <unittest.result.TestResult run=3 errors=2 failures=0>
'/' should be used as a path separator. It will be converted
to a native separator at run time:
>>> suite = doctest.DocFileSuite('../test_doctest/test_doctest.txt')
>>> suite.run(unittest.TestResult())
- <unittest.result.TestResult run=1 errors=0 failures=1>
+ <unittest.result.TestResult run=1 errors=1 failures=0>
If DocFileSuite is used from an interactive session, then files
are resolved relative to the directory of sys.argv[0]:
@@ -2577,7 +2566,7 @@ def test_DocFileSuite():
>>> suite = doctest.DocFileSuite(test_file, module_relative=False)
>>> suite.run(unittest.TestResult())
- <unittest.result.TestResult run=1 errors=0 failures=1>
+ <unittest.result.TestResult run=1 errors=1 failures=0>
It is an error to specify `package` when `module_relative=False`:
@@ -2595,12 +2584,15 @@ def test_DocFileSuite():
... 'test_doctest_skip2.txt')
>>> result = suite.run(unittest.TestResult())
>>> result
- <unittest.result.TestResult run=4 errors=0 failures=1>
+ <unittest.result.TestResult run=4 errors=1 failures=0>
>>> len(result.skipped)
- 1
+ 4
>>> for tst, _ in result.skipped: # doctest: +ELLIPSIS
... print('=', tst)
+ = ...test_doctest_skip.txt [0]
+ = ...test_doctest_skip.txt [1]
= ...test_doctest_skip.txt
+ = ...test_doctest_skip2.txt [0]
You can specify initial global variables:
@@ -2609,7 +2601,7 @@ def test_DocFileSuite():
... 'test_doctest4.txt',
... globs={'favorite_color': 'blue'})
>>> suite.run(unittest.TestResult())
- <unittest.result.TestResult run=3 errors=0 failures=1>
+ <unittest.result.TestResult run=3 errors=1 failures=0>
In this case, we supplied a missing favorite color. You can
provide doctest options:
@@ -2620,7 +2612,7 @@ def test_DocFileSuite():
... optionflags=doctest.DONT_ACCEPT_BLANKLINE,
... globs={'favorite_color': 'blue'})
>>> suite.run(unittest.TestResult())
- <unittest.result.TestResult run=3 errors=0 failures=2>
+ <unittest.result.TestResult run=3 errors=1 failures=1>
And, you can provide setUp and tearDown functions:
@@ -2639,7 +2631,7 @@ def test_DocFileSuite():
... 'test_doctest4.txt',
... setUp=setUp, tearDown=tearDown)
>>> suite.run(unittest.TestResult())
- <unittest.result.TestResult run=3 errors=0 failures=1>
+ <unittest.result.TestResult run=3 errors=1 failures=0>
But the tearDown restores sanity:
@@ -2681,7 +2673,7 @@ def test_DocFileSuite():
... 'test_doctest4.txt',
... encoding='utf-8')
>>> suite.run(unittest.TestResult())
- <unittest.result.TestResult run=3 errors=0 failures=2>
+ <unittest.result.TestResult run=3 errors=2 failures=0>
"""
def test_DocFileSuite_errors():
@@ -2691,52 +2683,49 @@ def test_DocFileSuite_errors():
>>> suite = doctest.DocFileSuite('test_doctest_errors.txt')
>>> result = suite.run(unittest.TestResult())
>>> result
- <unittest.result.TestResult run=1 errors=0 failures=1>
+ <unittest.result.TestResult run=1 errors=3 failures=1>
>>> print(result.failures[0][1]) # doctest: +ELLIPSIS
- AssertionError: Failed doctest test for test_doctest_errors.txt
- File "...test_doctest_errors.txt", line 0
- <BLANKLINE>
- ----------------------------------------------------------------------
- File "...test_doctest_errors.txt", line 4, in test_doctest_errors.txt
- Failed example:
+ Traceback (most recent call last):
+ File "...test_doctest_errors.txt", line 4, in test_doctest_errors.txt
+ >...>> 2 + 2
+ AssertionError: Failed example:
2 + 2
Expected:
5
Got:
4
- ----------------------------------------------------------------------
- File "...test_doctest_errors.txt", line 6, in test_doctest_errors.txt
- Failed example:
+ <BLANKLINE>
+ >>> print(result.errors[0][1]) # doctest: +ELLIPSIS
+ Traceback (most recent call last):
+ File "...test_doctest_errors.txt", line 6, in test_doctest_errors.txt
+ >...>> 1/0
+ File "<doctest test_doctest_errors.txt[1]>", line 1, in <module>
1/0
- Exception raised:
- Traceback (most recent call last):
- File "<doctest test_doctest_errors.txt[1]>", line 1, in <module>
- 1/0
- ~^~
- ZeroDivisionError: division by zero
- ----------------------------------------------------------------------
- File "...test_doctest_errors.txt", line 11, in test_doctest_errors.txt
- Failed example:
+ ~^~
+ ZeroDivisionError: division by zero
+ <BLANKLINE>
+ >>> print(result.errors[1][1]) # doctest: +ELLIPSIS
+ Traceback (most recent call last):
+ File "...test_doctest_errors.txt", line 11, in
test_doctest_errors.txt
+ >...>> f()
+ File "<doctest test_doctest_errors.txt[3]>", line 1, in <module>
f()
- Exception raised:
- Traceback (most recent call last):
- File "<doctest test_doctest_errors.txt[3]>", line 1, in <module>
- f()
- ~^^
- File "<doctest test_doctest_errors.txt[2]>", line 2, in f
- 2 + '2'
- ~~^~~~~
- TypeError: ...
- ----------------------------------------------------------------------
- File "...test_doctest_errors.txt", line 13, in test_doctest_errors.txt
- Failed example:
+ ~^^
+ File "<doctest test_doctest_errors.txt[2]>", line 2, in f
+ 2 + '2'
+ ~~^~~~~
+ TypeError: ...
+ <BLANKLINE>
+ >>> print(result.errors[2][1]) # doctest: +ELLIPSIS
+ Traceback (most recent call last):
+ File "...test_doctest_errors.txt", line 13, in
test_doctest_errors.txt
+ >...>> 2+*3
+ File "<doctest test_doctest_errors.txt[4]>", line 1
2+*3
- Exception raised:
- File "<doctest test_doctest_errors.txt[4]>", line 1
- 2+*3
- ^
- SyntaxError: invalid syntax
+ ^
+ SyntaxError: invalid syntax
<BLANKLINE>
+
"""
def test_trailing_space_in_test():
@@ -2807,16 +2796,25 @@ def test_unittest_reportflags():
>>> import unittest
>>> result = suite.run(unittest.TestResult())
>>> result
- <unittest.result.TestResult run=1 errors=0 failures=1>
+ <unittest.result.TestResult run=1 errors=1 failures=1>
>>> print(result.failures[0][1]) # doctest: +ELLIPSIS
- AssertionError: Failed doctest test for test_doctest.txt
- ...
- Failed example:
- favorite_color
- ...
- Failed example:
+ Traceback (most recent call last):
+ File ...
+ >...>> if 1:
+ AssertionError: Failed example:
if 1:
- ...
+ print('a')
+ print()
+ print('b')
+ Expected:
+ a
+ <BLANKLINE>
+ b
+ Got:
+ a
+ <BLANKLINE>
+ b
+ <BLANKLINE>
Note that we see both failures displayed.
@@ -2825,18 +2823,8 @@ def test_unittest_reportflags():
Now, when we run the test:
- >>> result = suite.run(unittest.TestResult())
- >>> result
- <unittest.result.TestResult run=1 errors=0 failures=1>
- >>> print(result.failures[0][1]) # doctest: +ELLIPSIS
- AssertionError: Failed doctest test for test_doctest.txt
- ...
- Failed example:
- favorite_color
- Exception raised:
- ...
- NameError: name 'favorite_color' is not defined
- <BLANKLINE>
+ >>> suite.run(unittest.TestResult())
+ <unittest.result.TestResult run=1 errors=1 failures=0>
We get only the first failure.
@@ -2846,22 +2834,20 @@ def test_unittest_reportflags():
>>> suite = doctest.DocFileSuite('test_doctest.txt',
... optionflags=doctest.DONT_ACCEPT_BLANKLINE | doctest.REPORT_NDIFF)
- Then the default eporting options are ignored:
+ Then the default reporting options are ignored:
>>> result = suite.run(unittest.TestResult())
>>> result
- <unittest.result.TestResult run=1 errors=0 failures=1>
+ <unittest.result.TestResult run=1 errors=1 failures=1>
*NOTE*: These doctest are intentionally not placed in raw string to depict
the trailing whitespace using `\x20` in the diff below.
>>> print(result.failures[0][1]) # doctest: +ELLIPSIS
- AssertionError: Failed doctest test for test_doctest.txt
- ...
- Failed example:
- favorite_color
- ...
- Failed example:
+ Traceback ...
+ File ...
+ >...>> if 1:
+ AssertionError: Failed example:
if 1:
print('a')
print()
@@ -3669,9 +3655,9 @@ def test_run_doctestsuite_multiple_times():
>>> import test.test_doctest.sample_doctest
>>> suite = doctest.DocTestSuite(test.test_doctest.sample_doctest)
>>> suite.run(unittest.TestResult())
- <unittest.result.TestResult run=9 errors=0 failures=4>
+ <unittest.result.TestResult run=9 errors=2 failures=2>
>>> suite.run(unittest.TestResult())
- <unittest.result.TestResult run=9 errors=0 failures=4>
+ <unittest.result.TestResult run=9 errors=2 failures=2>
"""
diff --git a/Lib/test/test_regrtest.py b/Lib/test/test_regrtest.py
index 8f4fc09442e083..f3ac301686b9fc 100644
--- a/Lib/test/test_regrtest.py
+++ b/Lib/test/test_regrtest.py
@@ -2067,7 +2067,7 @@ def load_tests(loader, tests, pattern):
self.check_executed_tests(output, [testname],
failed=[testname],
parallel=True,
- stats=TestStats(1, 1, 0))
+ stats=TestStats(1, 2, 1))
def _check_random_seed(self, run_workers: bool):
# gh-109276: When -r/--randomize is used, random.seed() is called
diff --git
a/Misc/NEWS.d/next/Library/2025-05-29-17-39-13.gh-issue-108885.MegCRA.rst
b/Misc/NEWS.d/next/Library/2025-05-29-17-39-13.gh-issue-108885.MegCRA.rst
new file mode 100644
index 00000000000000..e37cf121f5f529
--- /dev/null
+++ b/Misc/NEWS.d/next/Library/2025-05-29-17-39-13.gh-issue-108885.MegCRA.rst
@@ -0,0 +1,3 @@
+Run each example as a subtest in unit tests synthesized by
+:func:`doctest.DocFileSuite` and :func:`doctest.DocTestSuite`.
+Add the :meth:`doctest.DocTestRunner.report_skip` method.
_______________________________________________
Python-checkins mailing list -- [email protected]
To unsubscribe send an email to [email protected]
https://mail.python.org/mailman3//lists/python-checkins.python.org
Member address: [email protected]