mirror of
https://gcc.gnu.org/git/gcc.git
synced 2024-11-26 21:33:59 +08:00
Add a new option --clean_build to validate_failures.py
This is useful when you have two builds of the same compiler. One with your changes. The other one, a clean build at the same revision. Instead of using a manifest file, --clean_build will compare the results it gather from the patched build against those it gathers from the clean build. Usage $ cd /top/of/patched/gcc/bld $ validate_failures.py --clean_build=clean/bld-gcc Source directory: /usr/local/google/home/dnovillo/gcc/trunk Build target: x86_64-unknown-linux-gnu Getting actual results from build directory . ./x86_64-unknown-linux-gnu/libstdc++-v3/testsuite/libstdc++.sum ./x86_64-unknown-linux-gnu/libffi/testsuite/libffi.sum ./x86_64-unknown-linux-gnu/libgomp/testsuite/libgomp.sum ./x86_64-unknown-linux-gnu/libgo/libgo.sum ./x86_64-unknown-linux-gnu/boehm-gc/testsuite/boehm-gc.sum ./x86_64-unknown-linux-gnu/libatomic/testsuite/libatomic.sum ./x86_64-unknown-linux-gnu/libmudflap/testsuite/libmudflap.sum ./x86_64-unknown-linux-gnu/libitm/testsuite/libitm.sum ./x86_64-unknown-linux-gnu/libjava/testsuite/libjava.sum ./gcc/testsuite/g++/g++.sum ./gcc/testsuite/gnat/gnat.sum ./gcc/testsuite/ada/acats/acats.sum ./gcc/testsuite/gcc/gcc.sum ./gcc/testsuite/gfortran/gfortran.sum ./gcc/testsuite/obj-c++/obj-c++.sum ./gcc/testsuite/go/go.sum ./gcc/testsuite/objc/objc.sum Getting actual results from build directory clean/bld-gcc clean/bld-gcc/x86_64-unknown-linux-gnu/libstdc++-v3/testsuite/libstdc++.sum clean/bld-gcc/x86_64-unknown-linux-gnu/libffi/testsuite/libffi.sum clean/bld-gcc/x86_64-unknown-linux-gnu/libgomp/testsuite/libgomp.sum clean/bld-gcc/x86_64-unknown-linux-gnu/libgo/libgo.sum clean/bld-gcc/x86_64-unknown-linux-gnu/boehm-gc/testsuite/boehm-gc.sum clean/bld-gcc/x86_64-unknown-linux-gnu/libatomic/testsuite/libatomic.sum clean/bld-gcc/x86_64-unknown-linux-gnu/libmudflap/testsuite/libmudflap.sum clean/bld-gcc/x86_64-unknown-linux-gnu/libitm/testsuite/libitm.sum clean/bld-gcc/x86_64-unknown-linux-gnu/libjava/testsuite/libjava.sum clean/bld-gcc/gcc/testsuite/g++/g++.sum clean/bld-gcc/gcc/testsuite/gnat/gnat.sum clean/bld-gcc/gcc/testsuite/ada/acats/acats.sum clean/bld-gcc/gcc/testsuite/gcc/gcc.sum clean/bld-gcc/gcc/testsuite/gfortran/gfortran.sum clean/bld-gcc/gcc/testsuite/obj-c++/obj-c++.sum clean/bld-gcc/gcc/testsuite/go/go.sum clean/bld-gcc/gcc/testsuite/objc/objc.sum SUCCESS: No unexpected failures. 2012-11-02 Diego Novillo <dnovillo@google.com> * testsuite-management/validate_failures.py: Add option --clean_build to compare test results against another build. From-SVN: r193105
This commit is contained in:
parent
73ddf95bf1
commit
b436bf3805
@ -1,3 +1,9 @@
|
||||
2012-11-02 Diego Novillo <dnovillo@google.com>
|
||||
|
||||
* testsuite-management/validate_failures.py: Add option
|
||||
--clean_build to compare test results against another
|
||||
build.
|
||||
|
||||
2012-10-31 Diego Novillo <dnovillo@google.com>
|
||||
|
||||
* testsuite-management/validate_failures.py: Fix parsing
|
||||
|
@ -292,7 +292,7 @@ def PrintSummary(msg, summary):
|
||||
|
||||
def GetSumFiles(results, build_dir):
|
||||
if not results:
|
||||
print 'Getting actual results from build'
|
||||
print 'Getting actual results from build directory %s' % build_dir
|
||||
sum_files = CollectSumFiles(build_dir)
|
||||
else:
|
||||
print 'Getting actual results from user-provided results'
|
||||
@ -300,6 +300,27 @@ def GetSumFiles(results, build_dir):
|
||||
return sum_files
|
||||
|
||||
|
||||
def PerformComparison(expected, actual, ignore_missing_failures):
|
||||
actual_vs_expected, expected_vs_actual = CompareResults(expected, actual)
|
||||
|
||||
tests_ok = True
|
||||
if len(actual_vs_expected) > 0:
|
||||
PrintSummary('Unexpected results in this build (new failures)',
|
||||
actual_vs_expected)
|
||||
tests_ok = False
|
||||
|
||||
if not ignore_missing_failures and len(expected_vs_actual) > 0:
|
||||
PrintSummary('Expected results not present in this build (fixed tests)'
|
||||
'\n\nNOTE: This is not a failure. It just means that these '
|
||||
'tests were expected\nto fail, but they worked in this '
|
||||
'configuration.\n', expected_vs_actual)
|
||||
|
||||
if tests_ok:
|
||||
print '\nSUCCESS: No unexpected failures.'
|
||||
|
||||
return tests_ok
|
||||
|
||||
|
||||
def CheckExpectedResults(options):
|
||||
if not options.manifest:
|
||||
(srcdir, target, valid_build) = GetBuildData(options)
|
||||
@ -320,24 +341,7 @@ def CheckExpectedResults(options):
|
||||
PrintSummary('Tests expected to fail', manifest)
|
||||
PrintSummary('\nActual test results', actual)
|
||||
|
||||
actual_vs_manifest, manifest_vs_actual = CompareResults(manifest, actual)
|
||||
|
||||
tests_ok = True
|
||||
if len(actual_vs_manifest) > 0:
|
||||
PrintSummary('Build results not in the manifest', actual_vs_manifest)
|
||||
tests_ok = False
|
||||
|
||||
if not options.ignore_missing_failures and len(manifest_vs_actual) > 0:
|
||||
PrintSummary('Manifest results not present in the build'
|
||||
'\n\nNOTE: This is not a failure. It just means that the '
|
||||
'manifest expected\nthese tests to fail, '
|
||||
'but they worked in this configuration.\n',
|
||||
manifest_vs_actual)
|
||||
|
||||
if tests_ok:
|
||||
print '\nSUCCESS: No unexpected failures.'
|
||||
|
||||
return tests_ok
|
||||
return PerformComparison(manifest, actual, options.ignore_missing_failures)
|
||||
|
||||
|
||||
def ProduceManifest(options):
|
||||
@ -361,6 +365,20 @@ def ProduceManifest(options):
|
||||
return True
|
||||
|
||||
|
||||
def CompareBuilds(options):
|
||||
(srcdir, target, valid_build) = GetBuildData(options)
|
||||
if not valid_build:
|
||||
return False
|
||||
|
||||
sum_files = GetSumFiles(options.results, options.build_dir)
|
||||
actual = GetResults(sum_files)
|
||||
|
||||
clean_sum_files = GetSumFiles(None, options.clean_build)
|
||||
clean = GetResults(clean_sum_files)
|
||||
|
||||
return PerformComparison(clean, actual, options.ignore_missing_failures)
|
||||
|
||||
|
||||
def Main(argv):
|
||||
parser = optparse.OptionParser(usage=__doc__)
|
||||
|
||||
@ -368,6 +386,14 @@ def Main(argv):
|
||||
parser.add_option('--build_dir', action='store', type='string',
|
||||
dest='build_dir', default='.',
|
||||
help='Build directory to check (default = .)')
|
||||
parser.add_option('--clean_build', action='store', type='string',
|
||||
dest='clean_build', default=None,
|
||||
help='Compare test results from this build against '
|
||||
'those of another (clean) build. Use this option '
|
||||
'when comparing the test results of your patch versus '
|
||||
'the test results of a clean build without your patch. '
|
||||
'You must provide the path to the top directory of your '
|
||||
'clean build.')
|
||||
parser.add_option('--force', action='store_true', dest='force',
|
||||
default=False, help='When used with --produce_manifest, '
|
||||
'it will overwrite an existing manifest file '
|
||||
@ -400,6 +426,8 @@ def Main(argv):
|
||||
|
||||
if options.produce_manifest:
|
||||
retval = ProduceManifest(options)
|
||||
elif options.clean_build:
|
||||
retval = CompareBuilds(options)
|
||||
else:
|
||||
retval = CheckExpectedResults(options)
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user