blob: c27e0b39e5fa2a29802599d84677d690952a3656 [file] [log] [blame]
Simon Glassff1fd6c2018-07-06 10:27:23 -06001# SPDX-License-Identifier: GPL-2.0+
2#
3# Copyright (c) 2016 Google, Inc
4#
5
Simon Glassc3f94542018-07-06 10:27:34 -06006from contextlib import contextmanager
Simon Glass1d0f30e2022-01-22 05:07:28 -07007import doctest
Simon Glassff1fd6c2018-07-06 10:27:23 -06008import glob
Simon Glassce0dc2e2020-04-17 18:09:01 -06009import multiprocessing
Simon Glassff1fd6c2018-07-06 10:27:23 -060010import os
11import sys
Simon Glassce0dc2e2020-04-17 18:09:01 -060012import unittest
Simon Glassff1fd6c2018-07-06 10:27:23 -060013
Simon Glassbf776672020-04-17 18:09:04 -060014from patman import command
Simon Glassff1fd6c2018-07-06 10:27:23 -060015
Simon Glassc3a13cc2020-04-17 18:08:55 -060016from io import StringIO
Simon Glassc3f94542018-07-06 10:27:34 -060017
Alper Nebi Yasakebcaafc2022-04-02 20:06:08 +030018buffer_outputs = True
Simon Glassce0dc2e2020-04-17 18:09:01 -060019use_concurrent = True
20try:
Simon Glass347e0f02020-07-09 18:39:34 -060021 from concurrencytest.concurrencytest import ConcurrentTestSuite
22 from concurrencytest.concurrencytest import fork_for_tests
Simon Glassce0dc2e2020-04-17 18:09:01 -060023except:
24 use_concurrent = False
25
Simon Glassc3f94542018-07-06 10:27:34 -060026
Simon Glass5e2ab402022-01-29 14:14:14 -070027def run_test_coverage(prog, filter_fname, exclude_list, build_dir, required=None,
Simon Glass32eb66d2020-07-09 18:39:29 -060028 extra_args=None):
Simon Glassff1fd6c2018-07-06 10:27:23 -060029 """Run tests and check that we get 100% coverage
30
31 Args:
32 prog: Program to run (with be passed a '-t' argument to run tests
33 filter_fname: Normally all *.py files in the program's directory will
34 be included. If this is not None, then it is used to filter the
35 list so that only filenames that don't contain filter_fname are
36 included.
37 exclude_list: List of file patterns to exclude from the coverage
38 calculation
39 build_dir: Build directory, used to locate libfdt.py
40 required: List of modules which must be in the coverage report
Simon Glass32eb66d2020-07-09 18:39:29 -060041 extra_args (str): Extra arguments to pass to the tool before the -t/test
42 arg
Simon Glassff1fd6c2018-07-06 10:27:23 -060043
44 Raises:
45 ValueError if the code coverage is not 100%
46 """
47 # This uses the build output from sandbox_spl to get _libfdt.so
48 path = os.path.dirname(prog)
49 if filter_fname:
50 glob_list = glob.glob(os.path.join(path, '*.py'))
51 glob_list = [fname for fname in glob_list if filter_fname in fname]
52 else:
53 glob_list = []
54 glob_list += exclude_list
Simon Glass9550f9a2019-05-17 22:00:54 -060055 glob_list += ['*libfdt.py', '*site-packages*', '*dist-packages*']
Simon Glass347e0f02020-07-09 18:39:34 -060056 glob_list += ['*concurrencytest*']
Simon Glass6bb74de2020-07-05 21:41:55 -060057 test_cmd = 'test' if 'binman' in prog or 'patman' in prog else '-t'
Simon Glass428e7732020-04-17 18:09:00 -060058 prefix = ''
59 if build_dir:
60 prefix = 'PYTHONPATH=$PYTHONPATH:%s/sandbox_spl/tools ' % build_dir
61 cmd = ('%spython3-coverage run '
Simon Glass32eb66d2020-07-09 18:39:29 -060062 '--omit "%s" %s %s %s -P1' % (prefix, ','.join(glob_list),
63 prog, extra_args or '', test_cmd))
Simon Glassff1fd6c2018-07-06 10:27:23 -060064 os.system(cmd)
Simon Glassd9800692022-01-29 14:14:05 -070065 stdout = command.output('python3-coverage', 'report')
Simon Glassff1fd6c2018-07-06 10:27:23 -060066 lines = stdout.splitlines()
67 if required:
68 # Convert '/path/to/name.py' just the module name 'name'
69 test_set = set([os.path.splitext(os.path.basename(line.split()[0]))[0]
70 for line in lines if '/etype/' in line])
71 missing_list = required
Simon Glasse4304402019-07-08 14:25:32 -060072 missing_list.discard('__init__')
Simon Glassff1fd6c2018-07-06 10:27:23 -060073 missing_list.difference_update(test_set)
74 if missing_list:
Simon Glass5a1af1d2019-05-14 15:53:36 -060075 print('Missing tests for %s' % (', '.join(missing_list)))
76 print(stdout)
Simon Glassff1fd6c2018-07-06 10:27:23 -060077 ok = False
78
79 coverage = lines[-1].split(' ')[-1]
80 ok = True
Simon Glass5a1af1d2019-05-14 15:53:36 -060081 print(coverage)
Simon Glassff1fd6c2018-07-06 10:27:23 -060082 if coverage != '100%':
Simon Glass5a1af1d2019-05-14 15:53:36 -060083 print(stdout)
Simon Glass428e7732020-04-17 18:09:00 -060084 print("Type 'python3-coverage html' to get a report in "
85 'htmlcov/index.html')
Simon Glass5a1af1d2019-05-14 15:53:36 -060086 print('Coverage error: %s, but should be 100%%' % coverage)
Simon Glassff1fd6c2018-07-06 10:27:23 -060087 ok = False
88 if not ok:
89 raise ValueError('Test coverage failure')
Simon Glassc3f94542018-07-06 10:27:34 -060090
91
92# Use this to suppress stdout/stderr output:
93# with capture_sys_output() as (stdout, stderr)
94# ...do something...
95@contextmanager
96def capture_sys_output():
97 capture_out, capture_err = StringIO(), StringIO()
98 old_out, old_err = sys.stdout, sys.stderr
99 try:
100 sys.stdout, sys.stderr = capture_out, capture_err
101 yield capture_out, capture_err
102 finally:
103 sys.stdout, sys.stderr = old_out, old_err
Simon Glassce0dc2e2020-04-17 18:09:01 -0600104
105
Alper Nebi Yasakdd6b92b2022-04-02 20:06:07 +0300106class FullTextTestResult(unittest.TextTestResult):
107 """A test result class that can print extended text results to a stream
108
109 This is meant to be used by a TestRunner as a result class. Like
110 TextTestResult, this prints out the names of tests as they are run,
111 errors as they occur, and a summary of the results at the end of the
112 test run. Beyond those, this prints information about skipped tests,
113 expected failures and unexpected successes.
114
115 Args:
116 stream: A file-like object to write results to
117 descriptions (bool): True to print descriptions with test names
118 verbosity (int): Detail of printed output per test as they run
119 Test stdout and stderr always get printed when buffering
120 them is disabled by the test runner. In addition to that,
121 0: Print nothing
122 1: Print a dot per test
123 2: Print test names
Alper Nebi Yasakebcaafc2022-04-02 20:06:08 +0300124 3: Print test names, and buffered outputs for failing tests
Alper Nebi Yasakdd6b92b2022-04-02 20:06:07 +0300125 """
126 def __init__(self, stream, descriptions, verbosity):
127 self.verbosity = verbosity
128 super().__init__(stream, descriptions, verbosity)
129
130 def printErrors(self):
131 "Called by TestRunner after test run to summarize the tests"
132 # The parent class doesn't keep unexpected successes in the same
133 # format as the rest. Adapt it to what printErrorList expects.
134 unexpected_successes = [
135 (test, 'Test was expected to fail, but succeeded.\n')
136 for test in self.unexpectedSuccesses
137 ]
138
139 super().printErrors() # FAIL and ERROR
140 self.printErrorList('SKIP', self.skipped)
141 self.printErrorList('XFAIL', self.expectedFailures)
142 self.printErrorList('XPASS', unexpected_successes)
143
Alper Nebi Yasakebcaafc2022-04-02 20:06:08 +0300144 def addError(self, test, err):
145 """Called when an error has occurred."""
146 super().addError(test, err)
147 self._mirrorOutput &= self.verbosity >= 3
148
149 def addFailure(self, test, err):
150 """Called when a test has failed."""
151 super().addFailure(test, err)
152 self._mirrorOutput &= self.verbosity >= 3
153
154 def addSubTest(self, test, subtest, err):
155 """Called at the end of a subtest."""
156 super().addSubTest(test, subtest, err)
157 self._mirrorOutput &= self.verbosity >= 3
158
159 def addSuccess(self, test):
160 """Called when a test has completed successfully"""
161 super().addSuccess(test)
162 # Don't print stdout/stderr for successful tests
163 self._mirrorOutput = False
164
Alper Nebi Yasakdd6b92b2022-04-02 20:06:07 +0300165 def addSkip(self, test, reason):
166 """Called when a test is skipped."""
167 # Add empty line to keep spacing consistent with other results
168 if not reason.endswith('\n'):
169 reason += '\n'
170 super().addSkip(test, reason)
Alper Nebi Yasakebcaafc2022-04-02 20:06:08 +0300171 self._mirrorOutput &= self.verbosity >= 3
172
173 def addExpectedFailure(self, test, err):
174 """Called when an expected failure/error occurred."""
175 super().addExpectedFailure(test, err)
176 self._mirrorOutput &= self.verbosity >= 3
Alper Nebi Yasakdd6b92b2022-04-02 20:06:07 +0300177
178
Alper Nebi Yasakd8318fe2022-04-02 20:06:06 +0300179def run_test_suites(toolname, debug, verbosity, test_preserve_dirs, processes,
Simon Glass5e2ab402022-01-29 14:14:14 -0700180 test_name, toolpath, class_and_module_list):
Simon Glassce0dc2e2020-04-17 18:09:01 -0600181 """Run a series of test suites and collect the results
182
183 Args:
Alper Nebi Yasakd8318fe2022-04-02 20:06:06 +0300184 toolname: Name of the tool that ran the tests
Simon Glassce0dc2e2020-04-17 18:09:01 -0600185 debug: True to enable debugging, which shows a full stack trace on error
186 verbosity: Verbosity level to use (0-4)
187 test_preserve_dirs: True to preserve the input directory used by tests
188 so that it can be examined afterwards (only useful for debugging
189 tests). If a single test is selected (in args[0]) it also preserves
190 the output directory for this test. Both directories are displayed
191 on the command line.
192 processes: Number of processes to use to run tests (None=same as #CPUs)
193 test_name: Name of test to run, or None for all
194 toolpath: List of paths to use for tools
Simon Glass1d0f30e2022-01-22 05:07:28 -0700195 class_and_module_list: List of test classes (type class) and module
196 names (type str) to run
Simon Glassce0dc2e2020-04-17 18:09:01 -0600197 """
Simon Glassce0dc2e2020-04-17 18:09:01 -0600198 sys.argv = [sys.argv[0]]
199 if debug:
200 sys.argv.append('-D')
201 if verbosity:
202 sys.argv.append('-v%d' % verbosity)
203 if toolpath:
204 for path in toolpath:
205 sys.argv += ['--toolpath', path]
206
207 suite = unittest.TestSuite()
208 loader = unittest.TestLoader()
Alper Nebi Yasakd8318fe2022-04-02 20:06:06 +0300209 runner = unittest.TextTestRunner(
210 stream=sys.stdout,
211 verbosity=(1 if verbosity is None else verbosity),
Alper Nebi Yasakebcaafc2022-04-02 20:06:08 +0300212 buffer=buffer_outputs,
Alper Nebi Yasakdd6b92b2022-04-02 20:06:07 +0300213 resultclass=FullTextTestResult,
Alper Nebi Yasakd8318fe2022-04-02 20:06:06 +0300214 )
215
216 if use_concurrent and processes != 1:
217 suite = ConcurrentTestSuite(suite,
Alper Nebi Yasakebcaafc2022-04-02 20:06:08 +0300218 fork_for_tests(processes or multiprocessing.cpu_count(),
219 buffer=buffer_outputs))
Alper Nebi Yasakd8318fe2022-04-02 20:06:06 +0300220
221 for module in class_and_module_list:
222 if isinstance(module, str) and (not test_name or test_name == module):
223 suite.addTests(doctest.DocTestSuite(module))
224
Simon Glass1d0f30e2022-01-22 05:07:28 -0700225 for module in class_and_module_list:
226 if isinstance(module, str):
227 continue
Simon Glassce0dc2e2020-04-17 18:09:01 -0600228 # Test the test module about our arguments, if it is interested
229 if hasattr(module, 'setup_test_args'):
230 setup_test_args = getattr(module, 'setup_test_args')
231 setup_test_args(preserve_indir=test_preserve_dirs,
232 preserve_outdirs=test_preserve_dirs and test_name is not None,
233 toolpath=toolpath, verbosity=verbosity)
234 if test_name:
Alper Nebi Yasakce12c472022-04-02 20:06:05 +0300235 # Since Python v3.5 If an ImportError or AttributeError occurs
236 # while traversing a name then a synthetic test that raises that
237 # error when run will be returned. Check that the requested test
238 # exists, otherwise these errors are included in the results.
239 if test_name in loader.getTestCaseNames(module):
Simon Glassce0dc2e2020-04-17 18:09:01 -0600240 suite.addTests(loader.loadTestsFromName(test_name, module))
Simon Glassce0dc2e2020-04-17 18:09:01 -0600241 else:
242 suite.addTests(loader.loadTestsFromTestCase(module))
Alper Nebi Yasakd8318fe2022-04-02 20:06:06 +0300243
244 print(f" Running {toolname} tests ".center(70, "="))
245 result = runner.run(suite)
246 print()
247
248 return result