mirror of
https://github.com/python/cpython.git
synced 2024-11-25 02:44:06 +08:00
b48af54ff7
svn+ssh://pythondev@svn.python.org/python/trunk ........ r79464 | michael.foord | 2010-03-27 07:55:19 -0500 (Sat, 27 Mar 2010) | 1 line A fix for running unittest tests on platforms without the audioop module (e.g. jython and IronPython) ........ r79471 | michael.foord | 2010-03-27 14:10:11 -0500 (Sat, 27 Mar 2010) | 4 lines Addition of delta keyword argument to unittest.TestCase.assertAlmostEquals and assertNotAlmostEquals This allows the comparison of objects by specifying a maximum difference; this includes the comparing of non-numeric objects that don't support rounding. ........ r79623 | michael.foord | 2010-04-02 16:42:47 -0500 (Fri, 02 Apr 2010) | 1 line Addition of -b command line option to unittest for buffering stdout and stderr during test runs. ........ r79626 | michael.foord | 2010-04-02 17:08:29 -0500 (Fri, 02 Apr 2010) | 1 line TestResult stores original sys.stdout and tests no longer use sys.__stdout__ (etc) in tests for unittest -b command line option ........ r79630 | michael.foord | 2010-04-02 17:30:56 -0500 (Fri, 02 Apr 2010) | 1 line unittest tests no longer replace the sys.stdout put in place by regrtest ........ r79632 | michael.foord | 2010-04-02 17:55:59 -0500 (Fri, 02 Apr 2010) | 1 line Issue #8038: Addition of unittest.TestCase.assertNotRegexpMatches ........ r79643 | michael.foord | 2010-04-02 20:15:21 -0500 (Fri, 02 Apr 2010) | 1 line Support dotted module names for test discovery paths in unittest. Issue 8038. ........ r79648 | michael.foord | 2010-04-02 21:21:39 -0500 (Fri, 02 Apr 2010) | 1 line Cross platform unittest.TestResult newline handling when buffering stdout / stderr. ........ r79649 | michael.foord | 2010-04-02 21:33:55 -0500 (Fri, 02 Apr 2010) | 1 line Another attempt at a fix for unittest.test.test_result for windows line endings ........ r79679 | michael.foord | 2010-04-03 09:52:18 -0500 (Sat, 03 Apr 2010) | 1 line Adding -b command line option to the unittest usage message. ........ r79685 | michael.foord | 2010-04-03 10:20:00 -0500 (Sat, 03 Apr 2010) | 1 line Minor tweak to unittest command line usage message ........ r79711 | michael.foord | 2010-04-03 12:03:11 -0500 (Sat, 03 Apr 2010) | 1 line Documenting new features in unittest ........ r79761 | michael.foord | 2010-04-04 17:41:54 -0500 (Sun, 04 Apr 2010) | 1 line unittest documentation formatting changes ........ r79774 | michael.foord | 2010-04-04 18:28:44 -0500 (Sun, 04 Apr 2010) | 1 line Adding documentation for new unittest.main() parameters ........ r79777 | michael.foord | 2010-04-04 19:39:50 -0500 (Sun, 04 Apr 2010) | 1 line Document signal handling functions in unittest.rst ........ r79792 | michael.foord | 2010-04-05 05:26:26 -0500 (Mon, 05 Apr 2010) | 1 line Documentation fixes for unittest ........ r79793 | michael.foord | 2010-04-05 05:28:27 -0500 (Mon, 05 Apr 2010) | 1 line Furterh documentation fix for unittest.rst ........ r79794 | michael.foord | 2010-04-05 05:30:14 -0500 (Mon, 05 Apr 2010) | 1 line Further documentation fix for unittest.rst ........ r79877 | michael.foord | 2010-04-06 18:18:16 -0500 (Tue, 06 Apr 2010) | 1 line Fix module directory finding logic for dotted paths in unittest test discovery. ........ r79898 | michael.foord | 2010-04-07 18:04:22 -0500 (Wed, 07 Apr 2010) | 1 line unittest.result.TestResult does not create its buffers until they're used. It uses StringIO not cStringIO. Issue 8333. ........ r79899 | michael.foord | 2010-04-07 19:04:24 -0500 (Wed, 07 Apr 2010) | 1 line Switch regrtest to use StringIO instead of cStringIO for test_multiprocessing on Windows. Issue 8333. ........ r79900 | michael.foord | 2010-04-07 23:33:20 -0500 (Wed, 07 Apr 2010) | 1 line Correction of unittest documentation typos and omissions ........
196 lines
6.3 KiB
Python
196 lines
6.3 KiB
Python
"""Running tests"""
|
|
|
|
import sys
|
|
import time
|
|
|
|
from . import result
|
|
from .signals import registerResult
|
|
|
|
__unittest = True
|
|
|
|
|
|
class _WritelnDecorator(object):
|
|
"""Used to decorate file-like objects with a handy 'writeln' method"""
|
|
def __init__(self,stream):
|
|
self.stream = stream
|
|
|
|
def __getattr__(self, attr):
|
|
if attr in ('stream', '__getstate__'):
|
|
raise AttributeError(attr)
|
|
return getattr(self.stream,attr)
|
|
|
|
def writeln(self, arg=None):
|
|
if arg:
|
|
self.write(arg)
|
|
self.write('\n') # text-mode streams translate to \r\n if needed
|
|
|
|
|
|
class TextTestResult(result.TestResult):
|
|
"""A test result class that can print formatted text results to a stream.
|
|
|
|
Used by TextTestRunner.
|
|
"""
|
|
separator1 = '=' * 70
|
|
separator2 = '-' * 70
|
|
|
|
def __init__(self, stream, descriptions, verbosity):
|
|
super(TextTestResult, self).__init__()
|
|
self.stream = stream
|
|
self.showAll = verbosity > 1
|
|
self.dots = verbosity == 1
|
|
self.descriptions = descriptions
|
|
|
|
def getDescription(self, test):
|
|
doc_first_line = test.shortDescription()
|
|
if self.descriptions and doc_first_line:
|
|
return '\n'.join((str(test), doc_first_line))
|
|
else:
|
|
return str(test)
|
|
|
|
def startTest(self, test):
|
|
super(TextTestResult, self).startTest(test)
|
|
if self.showAll:
|
|
self.stream.write(self.getDescription(test))
|
|
self.stream.write(" ... ")
|
|
self.stream.flush()
|
|
|
|
def addSuccess(self, test):
|
|
super(TextTestResult, self).addSuccess(test)
|
|
if self.showAll:
|
|
self.stream.writeln("ok")
|
|
elif self.dots:
|
|
self.stream.write('.')
|
|
self.stream.flush()
|
|
|
|
def addError(self, test, err):
|
|
super(TextTestResult, self).addError(test, err)
|
|
if self.showAll:
|
|
self.stream.writeln("ERROR")
|
|
elif self.dots:
|
|
self.stream.write('E')
|
|
self.stream.flush()
|
|
|
|
def addFailure(self, test, err):
|
|
super(TextTestResult, self).addFailure(test, err)
|
|
if self.showAll:
|
|
self.stream.writeln("FAIL")
|
|
elif self.dots:
|
|
self.stream.write('F')
|
|
self.stream.flush()
|
|
|
|
def addSkip(self, test, reason):
|
|
super(TextTestResult, self).addSkip(test, reason)
|
|
if self.showAll:
|
|
self.stream.writeln("skipped {0!r}".format(reason))
|
|
elif self.dots:
|
|
self.stream.write("s")
|
|
self.stream.flush()
|
|
|
|
def addExpectedFailure(self, test, err):
|
|
super(TextTestResult, self).addExpectedFailure(test, err)
|
|
if self.showAll:
|
|
self.stream.writeln("expected failure")
|
|
elif self.dots:
|
|
self.stream.write("x")
|
|
self.stream.flush()
|
|
|
|
def addUnexpectedSuccess(self, test):
|
|
super(TextTestResult, self).addUnexpectedSuccess(test)
|
|
if self.showAll:
|
|
self.stream.writeln("unexpected success")
|
|
elif self.dots:
|
|
self.stream.write("u")
|
|
self.stream.flush()
|
|
|
|
def printErrors(self):
|
|
if self.dots or self.showAll:
|
|
self.stream.writeln()
|
|
self.printErrorList('ERROR', self.errors)
|
|
self.printErrorList('FAIL', self.failures)
|
|
|
|
def printErrorList(self, flavour, errors):
|
|
for test, err in errors:
|
|
self.stream.writeln(self.separator1)
|
|
self.stream.writeln("%s: %s" % (flavour,self.getDescription(test)))
|
|
self.stream.writeln(self.separator2)
|
|
self.stream.writeln("%s" % err)
|
|
|
|
|
|
class TextTestRunner(object):
|
|
"""A test runner class that displays results in textual form.
|
|
|
|
It prints out the names of tests as they are run, errors as they
|
|
occur, and a summary of the results at the end of the test run.
|
|
"""
|
|
resultclass = TextTestResult
|
|
|
|
def __init__(self, stream=sys.stderr, descriptions=True, verbosity=1,
|
|
failfast=False, buffer=False, resultclass=None):
|
|
self.stream = _WritelnDecorator(stream)
|
|
self.descriptions = descriptions
|
|
self.verbosity = verbosity
|
|
self.failfast = failfast
|
|
self.buffer = buffer
|
|
if resultclass is not None:
|
|
self.resultclass = resultclass
|
|
|
|
def _makeResult(self):
|
|
return self.resultclass(self.stream, self.descriptions, self.verbosity)
|
|
|
|
def run(self, test):
|
|
"Run the given test case or test suite."
|
|
result = self._makeResult()
|
|
registerResult(result)
|
|
result.failfast = self.failfast
|
|
result.buffer = self.buffer
|
|
startTime = time.time()
|
|
startTestRun = getattr(result, 'startTestRun', None)
|
|
if startTestRun is not None:
|
|
startTestRun()
|
|
try:
|
|
test(result)
|
|
finally:
|
|
stopTestRun = getattr(result, 'stopTestRun', None)
|
|
if stopTestRun is not None:
|
|
stopTestRun()
|
|
stopTime = time.time()
|
|
timeTaken = stopTime - startTime
|
|
result.printErrors()
|
|
if hasattr(result, 'separator2'):
|
|
self.stream.writeln(result.separator2)
|
|
run = result.testsRun
|
|
self.stream.writeln("Ran %d test%s in %.3fs" %
|
|
(run, run != 1 and "s" or "", timeTaken))
|
|
self.stream.writeln()
|
|
|
|
expectedFails = unexpectedSuccesses = skipped = 0
|
|
try:
|
|
results = map(len, (result.expectedFailures,
|
|
result.unexpectedSuccesses,
|
|
result.skipped))
|
|
expectedFails, unexpectedSuccesses, skipped = results
|
|
except AttributeError:
|
|
pass
|
|
|
|
infos = []
|
|
if not result.wasSuccessful():
|
|
self.stream.write("FAILED")
|
|
failed, errored = len(result.failures), len(result.errors)
|
|
if failed:
|
|
infos.append("failures=%d" % failed)
|
|
if errored:
|
|
infos.append("errors=%d" % errored)
|
|
else:
|
|
self.stream.write("OK")
|
|
if skipped:
|
|
infos.append("skipped=%d" % skipped)
|
|
if expectedFails:
|
|
infos.append("expected failures=%d" % expectedFails)
|
|
if unexpectedSuccesses:
|
|
infos.append("unexpected successes=%d" % unexpectedSuccesses)
|
|
if infos:
|
|
self.stream.writeln(" (%s)" % (", ".join(infos),))
|
|
else:
|
|
self.stream.write("\n")
|
|
return result
|