mirror of
https://github.com/python/cpython.git
synced 2024-11-23 09:54:58 +08:00
gh-108794: doctest counts skipped tests (#108795)
* Add 'skipped' attribute to TestResults. * Add 'skips' attribute to DocTestRunner. * Rename private DocTestRunner._name2ft attribute to DocTestRunner._stats. * Use f-string for string formatting. * Add some tests. * Document DocTestRunner attributes and its API for statistics. * Document TestResults class. Co-authored-by: Alex Waygood <Alex.Waygood@Gmail.com>
This commit is contained in:
parent
4ba18099b7
commit
4f9b706c6f
@ -1409,6 +1409,27 @@ DocTestParser objects
|
||||
identifying this string, and is only used for error messages.
|
||||
|
||||
|
||||
TestResults objects
|
||||
^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
|
||||
.. class:: TestResults(failed, attempted)
|
||||
|
||||
.. attribute:: failed
|
||||
|
||||
Number of failed tests.
|
||||
|
||||
.. attribute:: attempted
|
||||
|
||||
Number of attempted tests.
|
||||
|
||||
.. attribute:: skipped
|
||||
|
||||
Number of skipped tests.
|
||||
|
||||
.. versionadded:: 3.13
|
||||
|
||||
|
||||
.. _doctest-doctestrunner:
|
||||
|
||||
DocTestRunner objects
|
||||
@ -1427,7 +1448,7 @@ DocTestRunner objects
|
||||
passing a subclass of :class:`OutputChecker` to the constructor.
|
||||
|
||||
The test runner's display output can be controlled in two ways. First, an output
|
||||
function can be passed to :meth:`TestRunner.run`; this function will be called
|
||||
function can be passed to :meth:`run`; this function will be called
|
||||
with strings that should be displayed. It defaults to ``sys.stdout.write``. If
|
||||
capturing the output is not sufficient, then the display output can be also
|
||||
customized by subclassing DocTestRunner, and overriding the methods
|
||||
@ -1448,6 +1469,10 @@ DocTestRunner objects
|
||||
runner compares expected output to actual output, and how it displays failures.
|
||||
For more information, see section :ref:`doctest-options`.
|
||||
|
||||
The test runner accumulates statistics. The aggregated number of attempted,
|
||||
failed and skipped examples is also available via the :attr:`tries`,
|
||||
:attr:`failures` and :attr:`skips` attributes. The :meth:`run` and
|
||||
:meth:`summarize` methods return a :class:`TestResults` instance.
|
||||
|
||||
:class:`DocTestParser` defines the following methods:
|
||||
|
||||
@ -1500,7 +1525,8 @@ DocTestRunner objects
|
||||
.. method:: run(test, compileflags=None, out=None, clear_globs=True)
|
||||
|
||||
Run the examples in *test* (a :class:`DocTest` object), and display the
|
||||
results using the writer function *out*.
|
||||
results using the writer function *out*. Return a :class:`TestResults`
|
||||
instance.
|
||||
|
||||
The examples are run in the namespace ``test.globs``. If *clear_globs* is
|
||||
true (the default), then this namespace will be cleared after the test runs,
|
||||
@ -1519,12 +1545,29 @@ DocTestRunner objects
|
||||
.. method:: summarize(verbose=None)
|
||||
|
||||
Print a summary of all the test cases that have been run by this DocTestRunner,
|
||||
and return a :term:`named tuple` ``TestResults(failed, attempted)``.
|
||||
and return a :class:`TestResults` instance.
|
||||
|
||||
The optional *verbose* argument controls how detailed the summary is. If the
|
||||
verbosity is not specified, then the :class:`DocTestRunner`'s verbosity is
|
||||
used.
|
||||
|
||||
:class:`DocTestParser` has the following attributes:
|
||||
|
||||
.. attribute:: tries
|
||||
|
||||
Number of attempted examples.
|
||||
|
||||
.. attribute:: failures
|
||||
|
||||
Number of failed examples.
|
||||
|
||||
.. attribute:: skips
|
||||
|
||||
Number of skipped examples.
|
||||
|
||||
.. versionadded:: 3.13
|
||||
|
||||
|
||||
.. _doctest-outputchecker:
|
||||
|
||||
OutputChecker objects
|
||||
|
@ -122,6 +122,14 @@ dbm
|
||||
from the database.
|
||||
(Contributed by Dong-hee Na in :gh:`107122`.)
|
||||
|
||||
doctest
|
||||
-------
|
||||
|
||||
* The :meth:`doctest.DocTestRunner.run` method now counts the number of skipped
|
||||
tests. Add :attr:`doctest.DocTestRunner.skips` and
|
||||
:attr:`doctest.TestResults.skipped` attributes.
|
||||
(Contributed by Victor Stinner in :gh:`108794`.)
|
||||
|
||||
io
|
||||
--
|
||||
|
||||
|
150
Lib/doctest.py
150
Lib/doctest.py
@ -105,7 +105,23 @@ import unittest
|
||||
from io import StringIO, IncrementalNewlineDecoder
|
||||
from collections import namedtuple
|
||||
|
||||
TestResults = namedtuple('TestResults', 'failed attempted')
|
||||
|
||||
class TestResults(namedtuple('TestResults', 'failed attempted')):
|
||||
def __new__(cls, failed, attempted, *, skipped=0):
|
||||
results = super().__new__(cls, failed, attempted)
|
||||
results.skipped = skipped
|
||||
return results
|
||||
|
||||
def __repr__(self):
|
||||
if self.skipped:
|
||||
return (f'TestResults(failed={self.failed}, '
|
||||
f'attempted={self.attempted}, '
|
||||
f'skipped={self.skipped})')
|
||||
else:
|
||||
# Leave the repr() unchanged for backward compatibility
|
||||
# if skipped is zero
|
||||
return super().__repr__()
|
||||
|
||||
|
||||
# There are 4 basic classes:
|
||||
# - Example: a <source, want> pair, plus an intra-docstring line number.
|
||||
@ -1150,8 +1166,7 @@ class DocTestRunner:
|
||||
"""
|
||||
A class used to run DocTest test cases, and accumulate statistics.
|
||||
The `run` method is used to process a single DocTest case. It
|
||||
returns a tuple `(f, t)`, where `t` is the number of test cases
|
||||
tried, and `f` is the number of test cases that failed.
|
||||
returns a TestResults instance.
|
||||
|
||||
>>> tests = DocTestFinder().find(_TestClass)
|
||||
>>> runner = DocTestRunner(verbose=False)
|
||||
@ -1164,8 +1179,8 @@ class DocTestRunner:
|
||||
_TestClass.square -> TestResults(failed=0, attempted=1)
|
||||
|
||||
The `summarize` method prints a summary of all the test cases that
|
||||
have been run by the runner, and returns an aggregated `(f, t)`
|
||||
tuple:
|
||||
have been run by the runner, and returns an aggregated TestResults
|
||||
instance:
|
||||
|
||||
>>> runner.summarize(verbose=1)
|
||||
4 items passed all tests:
|
||||
@ -1178,13 +1193,15 @@ class DocTestRunner:
|
||||
Test passed.
|
||||
TestResults(failed=0, attempted=7)
|
||||
|
||||
The aggregated number of tried examples and failed examples is
|
||||
also available via the `tries` and `failures` attributes:
|
||||
The aggregated number of tried examples and failed examples is also
|
||||
available via the `tries`, `failures` and `skips` attributes:
|
||||
|
||||
>>> runner.tries
|
||||
7
|
||||
>>> runner.failures
|
||||
0
|
||||
>>> runner.skips
|
||||
0
|
||||
|
||||
The comparison between expected outputs and actual outputs is done
|
||||
by an `OutputChecker`. This comparison may be customized with a
|
||||
@ -1233,7 +1250,8 @@ class DocTestRunner:
|
||||
# Keep track of the examples we've run.
|
||||
self.tries = 0
|
||||
self.failures = 0
|
||||
self._name2ft = {}
|
||||
self.skips = 0
|
||||
self._stats = {}
|
||||
|
||||
# Create a fake output target for capturing doctest output.
|
||||
self._fakeout = _SpoofOut()
|
||||
@ -1302,13 +1320,11 @@ class DocTestRunner:
|
||||
Run the examples in `test`. Write the outcome of each example
|
||||
with one of the `DocTestRunner.report_*` methods, using the
|
||||
writer function `out`. `compileflags` is the set of compiler
|
||||
flags that should be used to execute examples. Return a tuple
|
||||
`(f, t)`, where `t` is the number of examples tried, and `f`
|
||||
is the number of examples that failed. The examples are run
|
||||
in the namespace `test.globs`.
|
||||
flags that should be used to execute examples. Return a TestResults
|
||||
instance. The examples are run in the namespace `test.globs`.
|
||||
"""
|
||||
# Keep track of the number of failures and tries.
|
||||
failures = tries = 0
|
||||
# Keep track of the number of failed, attempted, skipped examples.
|
||||
failures = attempted = skips = 0
|
||||
|
||||
# Save the option flags (since option directives can be used
|
||||
# to modify them).
|
||||
@ -1320,6 +1336,7 @@ class DocTestRunner:
|
||||
|
||||
# Process each example.
|
||||
for examplenum, example in enumerate(test.examples):
|
||||
attempted += 1
|
||||
|
||||
# If REPORT_ONLY_FIRST_FAILURE is set, then suppress
|
||||
# reporting after the first failure.
|
||||
@ -1337,10 +1354,10 @@ class DocTestRunner:
|
||||
|
||||
# If 'SKIP' is set, then skip this example.
|
||||
if self.optionflags & SKIP:
|
||||
skips += 1
|
||||
continue
|
||||
|
||||
# Record that we started this example.
|
||||
tries += 1
|
||||
if not quiet:
|
||||
self.report_start(out, test, example)
|
||||
|
||||
@ -1418,19 +1435,22 @@ class DocTestRunner:
|
||||
# Restore the option flags (in case they were modified)
|
||||
self.optionflags = original_optionflags
|
||||
|
||||
# Record and return the number of failures and tries.
|
||||
self.__record_outcome(test, failures, tries)
|
||||
return TestResults(failures, tries)
|
||||
# Record and return the number of failures and attempted.
|
||||
self.__record_outcome(test, failures, attempted, skips)
|
||||
return TestResults(failures, attempted, skipped=skips)
|
||||
|
||||
def __record_outcome(self, test, f, t):
|
||||
def __record_outcome(self, test, failures, tries, skips):
|
||||
"""
|
||||
Record the fact that the given DocTest (`test`) generated `f`
|
||||
failures out of `t` tried examples.
|
||||
Record the fact that the given DocTest (`test`) generated `failures`
|
||||
failures out of `tries` tried examples.
|
||||
"""
|
||||
f2, t2 = self._name2ft.get(test.name, (0,0))
|
||||
self._name2ft[test.name] = (f+f2, t+t2)
|
||||
self.failures += f
|
||||
self.tries += t
|
||||
failures2, tries2, skips2 = self._stats.get(test.name, (0, 0, 0))
|
||||
self._stats[test.name] = (failures + failures2,
|
||||
tries + tries2,
|
||||
skips + skips2)
|
||||
self.failures += failures
|
||||
self.tries += tries
|
||||
self.skips += skips
|
||||
|
||||
__LINECACHE_FILENAME_RE = re.compile(r'<doctest '
|
||||
r'(?P<name>.+)'
|
||||
@ -1519,9 +1539,7 @@ class DocTestRunner:
|
||||
def summarize(self, verbose=None):
|
||||
"""
|
||||
Print a summary of all the test cases that have been run by
|
||||
this DocTestRunner, and return a tuple `(f, t)`, where `f` is
|
||||
the total number of failed examples, and `t` is the total
|
||||
number of tried examples.
|
||||
this DocTestRunner, and return a TestResults instance.
|
||||
|
||||
The optional `verbose` argument controls how detailed the
|
||||
summary is. If the verbosity is not specified, then the
|
||||
@ -1532,59 +1550,61 @@ class DocTestRunner:
|
||||
notests = []
|
||||
passed = []
|
||||
failed = []
|
||||
totalt = totalf = 0
|
||||
for x in self._name2ft.items():
|
||||
name, (f, t) = x
|
||||
assert f <= t
|
||||
totalt += t
|
||||
totalf += f
|
||||
if t == 0:
|
||||
total_tries = total_failures = total_skips = 0
|
||||
for item in self._stats.items():
|
||||
name, (failures, tries, skips) = item
|
||||
assert failures <= tries
|
||||
total_tries += tries
|
||||
total_failures += failures
|
||||
total_skips += skips
|
||||
if tries == 0:
|
||||
notests.append(name)
|
||||
elif f == 0:
|
||||
passed.append( (name, t) )
|
||||
elif failures == 0:
|
||||
passed.append((name, tries))
|
||||
else:
|
||||
failed.append(x)
|
||||
failed.append(item)
|
||||
if verbose:
|
||||
if notests:
|
||||
print(len(notests), "items had no tests:")
|
||||
print(f"{len(notests)} items had no tests:")
|
||||
notests.sort()
|
||||
for thing in notests:
|
||||
print(" ", thing)
|
||||
for name in notests:
|
||||
print(f" {name}")
|
||||
if passed:
|
||||
print(len(passed), "items passed all tests:")
|
||||
print(f"{len(passed)} items passed all tests:")
|
||||
passed.sort()
|
||||
for thing, count in passed:
|
||||
print(" %3d tests in %s" % (count, thing))
|
||||
for name, count in passed:
|
||||
print(f" {count:3d} tests in {name}")
|
||||
if failed:
|
||||
print(self.DIVIDER)
|
||||
print(len(failed), "items had failures:")
|
||||
print(f"{len(failed)} items had failures:")
|
||||
failed.sort()
|
||||
for thing, (f, t) in failed:
|
||||
print(" %3d of %3d in %s" % (f, t, thing))
|
||||
for name, (failures, tries, skips) in failed:
|
||||
print(f" {failures:3d} of {tries:3d} in {name}")
|
||||
if verbose:
|
||||
print(totalt, "tests in", len(self._name2ft), "items.")
|
||||
print(totalt - totalf, "passed and", totalf, "failed.")
|
||||
if totalf:
|
||||
print("***Test Failed***", totalf, "failures.")
|
||||
print(f"{total_tries} tests in {len(self._stats)} items.")
|
||||
print(f"{total_tries - total_failures} passed and {total_failures} failed.")
|
||||
if total_failures:
|
||||
msg = f"***Test Failed*** {total_failures} failures"
|
||||
if total_skips:
|
||||
msg = f"{msg} and {total_skips} skipped tests"
|
||||
print(f"{msg}.")
|
||||
elif verbose:
|
||||
print("Test passed.")
|
||||
return TestResults(totalf, totalt)
|
||||
return TestResults(total_failures, total_tries, skipped=total_skips)
|
||||
|
||||
#/////////////////////////////////////////////////////////////////
|
||||
# Backward compatibility cruft to maintain doctest.master.
|
||||
#/////////////////////////////////////////////////////////////////
|
||||
def merge(self, other):
|
||||
d = self._name2ft
|
||||
for name, (f, t) in other._name2ft.items():
|
||||
d = self._stats
|
||||
for name, (failures, tries, skips) in other._stats.items():
|
||||
if name in d:
|
||||
# Don't print here by default, since doing
|
||||
# so breaks some of the buildbots
|
||||
#print("*** DocTestRunner.merge: '" + name + "' in both" \
|
||||
# " testers; summing outcomes.")
|
||||
f2, t2 = d[name]
|
||||
f = f + f2
|
||||
t = t + t2
|
||||
d[name] = f, t
|
||||
failures2, tries2, skips2 = d[name]
|
||||
failures = failures + failures2
|
||||
tries = tries + tries2
|
||||
skips = skips + skips2
|
||||
d[name] = (failures, tries, skips)
|
||||
|
||||
|
||||
class OutputChecker:
|
||||
"""
|
||||
@ -1984,7 +2004,8 @@ def testmod(m=None, name=None, globs=None, verbose=None,
|
||||
else:
|
||||
master.merge(runner)
|
||||
|
||||
return TestResults(runner.failures, runner.tries)
|
||||
return TestResults(runner.failures, runner.tries, skipped=runner.skips)
|
||||
|
||||
|
||||
def testfile(filename, module_relative=True, name=None, package=None,
|
||||
globs=None, verbose=None, report=True, optionflags=0,
|
||||
@ -2107,7 +2128,8 @@ def testfile(filename, module_relative=True, name=None, package=None,
|
||||
else:
|
||||
master.merge(runner)
|
||||
|
||||
return TestResults(runner.failures, runner.tries)
|
||||
return TestResults(runner.failures, runner.tries, skipped=runner.skips)
|
||||
|
||||
|
||||
def run_docstring_examples(f, globs, verbose=False, name="NoName",
|
||||
compileflags=None, optionflags=0):
|
||||
|
@ -748,6 +748,38 @@ and 'int' is a type.
|
||||
"""
|
||||
|
||||
|
||||
class TestDocTest(unittest.TestCase):
|
||||
|
||||
def test_run(self):
|
||||
test = '''
|
||||
>>> 1 + 1
|
||||
11
|
||||
>>> 2 + 3 # doctest: +SKIP
|
||||
"23"
|
||||
>>> 5 + 7
|
||||
57
|
||||
'''
|
||||
|
||||
def myfunc():
|
||||
pass
|
||||
myfunc.__doc__ = test
|
||||
|
||||
# test DocTestFinder.run()
|
||||
test = doctest.DocTestFinder().find(myfunc)[0]
|
||||
with support.captured_stdout():
|
||||
with support.captured_stderr():
|
||||
results = doctest.DocTestRunner(verbose=False).run(test)
|
||||
|
||||
# test TestResults
|
||||
self.assertIsInstance(results, doctest.TestResults)
|
||||
self.assertEqual(results.failed, 2)
|
||||
self.assertEqual(results.attempted, 3)
|
||||
self.assertEqual(results.skipped, 1)
|
||||
self.assertEqual(tuple(results), (2, 3))
|
||||
x, y = results
|
||||
self.assertEqual((x, y), (2, 3))
|
||||
|
||||
|
||||
class TestDocTestFinder(unittest.TestCase):
|
||||
|
||||
def test_issue35753(self):
|
||||
|
@ -0,0 +1,3 @@
|
||||
The :meth:`doctest.DocTestRunner.run` method now counts the number of skipped
|
||||
tests. Add :attr:`doctest.DocTestRunner.skips` and
|
||||
:attr:`doctest.TestResults.skipped` attributes. Patch by Victor Stinner.
|
Loading…
Reference in New Issue
Block a user