patman: test_util: Use unittest text runner to print test results
The python tools' test utilities handle printing test results, but the output is quite bare compared to an ordinary unittest run. Delegate printing the results to a unittest text runner, which gives us niceties like clear separation between each test's result and how long it took to run the test suite. Unfortunately it does not print info for skipped tests by default, but this can be handled later by a custom test result subclass. It also does not print the tool name; manually print a heading that includes the toolname so that the outputs of each tool's tests are distinguishable in the CI output. Signed-off-by: Alper Nebi Yasak <alpernebiyasak@gmail.com> Reviewed-by: Simon Glass <sjg@chromium.org>
This commit is contained in:

committed by
Simon Glass

parent
ce12c47b92
commit
d8318feba1
@@ -13,7 +13,6 @@ import os
|
|||||||
import site
|
import site
|
||||||
import sys
|
import sys
|
||||||
import traceback
|
import traceback
|
||||||
import unittest
|
|
||||||
|
|
||||||
# Get the absolute path to this file at run-time
|
# Get the absolute path to this file at run-time
|
||||||
our_path = os.path.dirname(os.path.realpath(__file__))
|
our_path = os.path.dirname(os.path.realpath(__file__))
|
||||||
@@ -73,19 +72,18 @@ def RunTests(debug, verbosity, processes, test_preserve_dirs, args, toolpath):
|
|||||||
from binman import image_test
|
from binman import image_test
|
||||||
import doctest
|
import doctest
|
||||||
|
|
||||||
result = unittest.TestResult()
|
|
||||||
test_name = args and args[0] or None
|
test_name = args and args[0] or None
|
||||||
|
|
||||||
# Run the entry tests first ,since these need to be the first to import the
|
# Run the entry tests first ,since these need to be the first to import the
|
||||||
# 'entry' module.
|
# 'entry' module.
|
||||||
test_util.run_test_suites(
|
result = test_util.run_test_suites(
|
||||||
result, debug, verbosity, test_preserve_dirs, processes, test_name,
|
'binman', debug, verbosity, test_preserve_dirs, processes, test_name,
|
||||||
toolpath,
|
toolpath,
|
||||||
[bintool_test.TestBintool, entry_test.TestEntry, ftest.TestFunctional,
|
[bintool_test.TestBintool, entry_test.TestEntry, ftest.TestFunctional,
|
||||||
fdt_test.TestFdt, elf_test.TestElf, image_test.TestImage,
|
fdt_test.TestFdt, elf_test.TestElf, image_test.TestImage,
|
||||||
cbfs_util_test.TestCbfs, fip_util_test.TestFip])
|
cbfs_util_test.TestCbfs, fip_util_test.TestFip])
|
||||||
|
|
||||||
return test_util.report_result('binman', test_name, result)
|
return (0 if result.wasSuccessful() else 1)
|
||||||
|
|
||||||
def RunTestCoverage(toolpath):
|
def RunTestCoverage(toolpath):
|
||||||
"""Run the tests and check that we get 100% coverage"""
|
"""Run the tests and check that we get 100% coverage"""
|
||||||
|
@@ -11,7 +11,6 @@ import multiprocessing
|
|||||||
import os
|
import os
|
||||||
import re
|
import re
|
||||||
import sys
|
import sys
|
||||||
import unittest
|
|
||||||
|
|
||||||
# Bring in the patman libraries
|
# Bring in the patman libraries
|
||||||
our_path = os.path.dirname(os.path.realpath(__file__))
|
our_path = os.path.dirname(os.path.realpath(__file__))
|
||||||
@@ -34,19 +33,18 @@ def RunTests(skip_net_tests, verboose, args):
|
|||||||
from buildman import test
|
from buildman import test
|
||||||
import doctest
|
import doctest
|
||||||
|
|
||||||
result = unittest.TestResult()
|
|
||||||
test_name = args and args[0] or None
|
test_name = args and args[0] or None
|
||||||
if skip_net_tests:
|
if skip_net_tests:
|
||||||
test.use_network = False
|
test.use_network = False
|
||||||
|
|
||||||
# Run the entry tests first ,since these need to be the first to import the
|
# Run the entry tests first ,since these need to be the first to import the
|
||||||
# 'entry' module.
|
# 'entry' module.
|
||||||
test_util.run_test_suites(
|
result = test_util.run_test_suites(
|
||||||
result, False, verboose, False, None, test_name, [],
|
'buildman', False, verboose, False, None, test_name, [],
|
||||||
[test.TestBuild, func_test.TestFunctional,
|
[test.TestBuild, func_test.TestFunctional,
|
||||||
'buildman.toolchain', 'patman.gitutil'])
|
'buildman.toolchain', 'patman.gitutil'])
|
||||||
|
|
||||||
return test_util.report_result('buildman', test_name, result)
|
return (0 if result.wasSuccessful() else 1)
|
||||||
|
|
||||||
options, args = cmdline.ParseArgs()
|
options, args = cmdline.ParseArgs()
|
||||||
|
|
||||||
|
@@ -24,7 +24,6 @@ see doc/driver-model/of-plat.rst
|
|||||||
from argparse import ArgumentParser
|
from argparse import ArgumentParser
|
||||||
import os
|
import os
|
||||||
import sys
|
import sys
|
||||||
import unittest
|
|
||||||
|
|
||||||
# Bring in the patman libraries
|
# Bring in the patman libraries
|
||||||
our_path = os.path.dirname(os.path.realpath(__file__))
|
our_path = os.path.dirname(os.path.realpath(__file__))
|
||||||
@@ -49,18 +48,18 @@ def run_tests(processes, args):
|
|||||||
from dtoc import test_src_scan
|
from dtoc import test_src_scan
|
||||||
from dtoc import test_dtoc
|
from dtoc import test_dtoc
|
||||||
|
|
||||||
result = unittest.TestResult()
|
|
||||||
sys.argv = [sys.argv[0]]
|
sys.argv = [sys.argv[0]]
|
||||||
test_name = args.files and args.files[0] or None
|
test_name = args.files and args.files[0] or None
|
||||||
|
|
||||||
test_dtoc.setup()
|
test_dtoc.setup()
|
||||||
|
|
||||||
test_util.run_test_suites(
|
result = test_util.run_test_suites(
|
||||||
result, debug=True, verbosity=1, test_preserve_dirs=False,
|
toolname='dtoc', debug=True, verbosity=1, test_preserve_dirs=False,
|
||||||
processes=processes, test_name=test_name, toolpath=[],
|
processes=processes, test_name=test_name, toolpath=[],
|
||||||
class_and_module_list=[test_dtoc.TestDtoc,test_src_scan.TestSrcScan])
|
class_and_module_list=[test_dtoc.TestDtoc,test_src_scan.TestSrcScan])
|
||||||
|
|
||||||
return test_util.report_result('binman', test_name, result)
|
return (0 if result.wasSuccessful() else 1)
|
||||||
|
|
||||||
|
|
||||||
def RunTestCoverage():
|
def RunTestCoverage():
|
||||||
"""Run the tests and check that we get 100% coverage"""
|
"""Run the tests and check that we get 100% coverage"""
|
||||||
|
@@ -784,13 +784,13 @@ def RunTests(args):
|
|||||||
Returns:
|
Returns:
|
||||||
Return code, 0 on success
|
Return code, 0 on success
|
||||||
"""
|
"""
|
||||||
result = unittest.TestResult()
|
|
||||||
test_name = args and args[0] or None
|
test_name = args and args[0] or None
|
||||||
test_util.run_test_suites(
|
result = test_util.run_test_suites(
|
||||||
result, False, False, False, None, test_name, None,
|
'test_fdt', False, False, False, None, test_name, None,
|
||||||
[TestFdt, TestNode, TestProp, TestFdtUtil])
|
[TestFdt, TestNode, TestProp, TestFdtUtil])
|
||||||
|
|
||||||
return test_util.report_result('fdt', test_name, result)
|
return (0 if result.wasSuccessful() else 1)
|
||||||
|
|
||||||
|
|
||||||
if __name__ != '__main__':
|
if __name__ != '__main__':
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
@@ -12,7 +12,6 @@ import re
|
|||||||
import shutil
|
import shutil
|
||||||
import sys
|
import sys
|
||||||
import traceback
|
import traceback
|
||||||
import unittest
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
# Allow 'from patman import xxx to work'
|
# Allow 'from patman import xxx to work'
|
||||||
@@ -134,13 +133,12 @@ if args.cmd == 'test':
|
|||||||
import doctest
|
import doctest
|
||||||
from patman import func_test
|
from patman import func_test
|
||||||
|
|
||||||
result = unittest.TestResult()
|
result = test_util.run_test_suites(
|
||||||
test_util.run_test_suites(
|
'patman', False, False, False, None, None, None,
|
||||||
result, False, False, False, None, None, None,
|
|
||||||
[test_checkpatch.TestPatch, func_test.TestFunctional,
|
[test_checkpatch.TestPatch, func_test.TestFunctional,
|
||||||
'gitutil', 'settings', 'terminal'])
|
'gitutil', 'settings', 'terminal'])
|
||||||
|
|
||||||
sys.exit(test_util.report_result('patman', args.testname, result))
|
sys.exit(0 if result.wasSuccessful() else 1)
|
||||||
|
|
||||||
# Process commits, produce patches files, check them, email them
|
# Process commits, produce patches files, check them, email them
|
||||||
elif args.cmd == 'send':
|
elif args.cmd == 'send':
|
||||||
|
@@ -102,36 +102,12 @@ def capture_sys_output():
|
|||||||
sys.stdout, sys.stderr = old_out, old_err
|
sys.stdout, sys.stderr = old_out, old_err
|
||||||
|
|
||||||
|
|
||||||
def report_result(toolname:str, test_name: str, result: unittest.TestResult):
|
def run_test_suites(toolname, debug, verbosity, test_preserve_dirs, processes,
|
||||||
"""Report the results from a suite of tests
|
|
||||||
|
|
||||||
Args:
|
|
||||||
toolname: Name of the tool that ran the tests
|
|
||||||
test_name: Name of test that was run, or None for all
|
|
||||||
result: A unittest.TestResult object containing the results
|
|
||||||
"""
|
|
||||||
print(result)
|
|
||||||
for test, err in result.errors:
|
|
||||||
print(test.id(), err)
|
|
||||||
for test, err in result.failures:
|
|
||||||
print(test.id(), err)
|
|
||||||
if result.skipped:
|
|
||||||
print('%d %s test%s SKIPPED:' % (len(result.skipped), toolname,
|
|
||||||
's' if len(result.skipped) > 1 else ''))
|
|
||||||
for skip_info in result.skipped:
|
|
||||||
print('%s: %s' % (skip_info[0], skip_info[1]))
|
|
||||||
if result.errors or result.failures:
|
|
||||||
print('%s tests FAILED' % toolname)
|
|
||||||
return 1
|
|
||||||
return 0
|
|
||||||
|
|
||||||
|
|
||||||
def run_test_suites(result, debug, verbosity, test_preserve_dirs, processes,
|
|
||||||
test_name, toolpath, class_and_module_list):
|
test_name, toolpath, class_and_module_list):
|
||||||
"""Run a series of test suites and collect the results
|
"""Run a series of test suites and collect the results
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
result: A unittest.TestResult object to add the results to
|
toolname: Name of the tool that ran the tests
|
||||||
debug: True to enable debugging, which shows a full stack trace on error
|
debug: True to enable debugging, which shows a full stack trace on error
|
||||||
verbosity: Verbosity level to use (0-4)
|
verbosity: Verbosity level to use (0-4)
|
||||||
test_preserve_dirs: True to preserve the input directory used by tests
|
test_preserve_dirs: True to preserve the input directory used by tests
|
||||||
@@ -145,11 +121,6 @@ def run_test_suites(result, debug, verbosity, test_preserve_dirs, processes,
|
|||||||
class_and_module_list: List of test classes (type class) and module
|
class_and_module_list: List of test classes (type class) and module
|
||||||
names (type str) to run
|
names (type str) to run
|
||||||
"""
|
"""
|
||||||
for module in class_and_module_list:
|
|
||||||
if isinstance(module, str) and (not test_name or test_name == module):
|
|
||||||
suite = doctest.DocTestSuite(module)
|
|
||||||
suite.run(result)
|
|
||||||
|
|
||||||
sys.argv = [sys.argv[0]]
|
sys.argv = [sys.argv[0]]
|
||||||
if debug:
|
if debug:
|
||||||
sys.argv.append('-D')
|
sys.argv.append('-D')
|
||||||
@@ -161,6 +132,19 @@ def run_test_suites(result, debug, verbosity, test_preserve_dirs, processes,
|
|||||||
|
|
||||||
suite = unittest.TestSuite()
|
suite = unittest.TestSuite()
|
||||||
loader = unittest.TestLoader()
|
loader = unittest.TestLoader()
|
||||||
|
runner = unittest.TextTestRunner(
|
||||||
|
stream=sys.stdout,
|
||||||
|
verbosity=(1 if verbosity is None else verbosity),
|
||||||
|
)
|
||||||
|
|
||||||
|
if use_concurrent and processes != 1:
|
||||||
|
suite = ConcurrentTestSuite(suite,
|
||||||
|
fork_for_tests(processes or multiprocessing.cpu_count()))
|
||||||
|
|
||||||
|
for module in class_and_module_list:
|
||||||
|
if isinstance(module, str) and (not test_name or test_name == module):
|
||||||
|
suite.addTests(doctest.DocTestSuite(module))
|
||||||
|
|
||||||
for module in class_and_module_list:
|
for module in class_and_module_list:
|
||||||
if isinstance(module, str):
|
if isinstance(module, str):
|
||||||
continue
|
continue
|
||||||
@@ -179,9 +163,9 @@ def run_test_suites(result, debug, verbosity, test_preserve_dirs, processes,
|
|||||||
suite.addTests(loader.loadTestsFromName(test_name, module))
|
suite.addTests(loader.loadTestsFromName(test_name, module))
|
||||||
else:
|
else:
|
||||||
suite.addTests(loader.loadTestsFromTestCase(module))
|
suite.addTests(loader.loadTestsFromTestCase(module))
|
||||||
if use_concurrent and processes != 1:
|
|
||||||
concurrent_suite = ConcurrentTestSuite(suite,
|
print(f" Running {toolname} tests ".center(70, "="))
|
||||||
fork_for_tests(processes or multiprocessing.cpu_count()))
|
result = runner.run(suite)
|
||||||
concurrent_suite.run(result)
|
print()
|
||||||
else:
|
|
||||||
suite.run(result)
|
return result
|
||||||
|
Reference in New Issue
Block a user