blob: 2ba34479e035eab47c95316799871b6cd163001c [file] [log] [blame]
Tom Rini83d290c2018-05-06 17:58:06 -04001# SPDX-License-Identifier: GPL-2.0
Stephen Warrend2015062016-01-15 11:15:24 -07002# Copyright (c) 2015 Stephen Warren
3# Copyright (c) 2015-2016, NVIDIA CORPORATION. All rights reserved.
Stephen Warrend2015062016-01-15 11:15:24 -07004
5# Implementation of pytest run-time hook functions. These are invoked by
6# pytest at certain points during operation, e.g. startup, for each executed
7# test, at shutdown etc. These hooks perform functions such as:
8# - Parsing custom command-line options.
9# - Pullilng in user-specified board configuration.
10# - Creating the U-Boot console test fixture.
11# - Creating the HTML log file.
12# - Monitoring each test's results.
13# - Implementing custom pytest markers.
14
15import atexit
Tom Rinifd31fc12019-10-24 11:59:21 -040016import configparser
Stephen Warrend2015062016-01-15 11:15:24 -070017import errno
Tom Rinifd31fc12019-10-24 11:59:21 -040018import io
Stephen Warrend2015062016-01-15 11:15:24 -070019import os
20import os.path
Stephen Warrend2015062016-01-15 11:15:24 -070021import pytest
Stephen Warren1cd85f52016-02-08 14:44:16 -070022import re
Tom Rinifd31fc12019-10-24 11:59:21 -040023from _pytest.runner import runtestprotocol
Stephen Warrend2015062016-01-15 11:15:24 -070024import sys
25
26# Globals: The HTML log file, and the connection to the U-Boot console.
27log = None
28console = None
29
30def mkdir_p(path):
Stephen Warrene8debf32016-01-26 13:41:30 -070031 """Create a directory path.
Stephen Warrend2015062016-01-15 11:15:24 -070032
33 This includes creating any intermediate/parent directories. Any errors
34 caused due to already extant directories are ignored.
35
36 Args:
37 path: The directory path to create.
38
39 Returns:
40 Nothing.
Stephen Warrene8debf32016-01-26 13:41:30 -070041 """
Stephen Warrend2015062016-01-15 11:15:24 -070042
43 try:
44 os.makedirs(path)
45 except OSError as exc:
46 if exc.errno == errno.EEXIST and os.path.isdir(path):
47 pass
48 else:
49 raise
50
51def pytest_addoption(parser):
Stephen Warrene8debf32016-01-26 13:41:30 -070052 """pytest hook: Add custom command-line options to the cmdline parser.
Stephen Warrend2015062016-01-15 11:15:24 -070053
54 Args:
55 parser: The pytest command-line parser.
56
57 Returns:
58 Nothing.
Stephen Warrene8debf32016-01-26 13:41:30 -070059 """
Stephen Warrend2015062016-01-15 11:15:24 -070060
61 parser.addoption('--build-dir', default=None,
62 help='U-Boot build directory (O=)')
63 parser.addoption('--result-dir', default=None,
64 help='U-Boot test result/tmp directory')
65 parser.addoption('--persistent-data-dir', default=None,
66 help='U-Boot test persistent generated data directory')
67 parser.addoption('--board-type', '--bd', '-B', default='sandbox',
68 help='U-Boot board type')
69 parser.addoption('--board-identity', '--id', default='na',
70 help='U-Boot board identity/instance')
71 parser.addoption('--build', default=False, action='store_true',
72 help='Compile U-Boot before running tests')
Simon Glassf5ec7ee2020-03-18 09:43:01 -060073 parser.addoption('--buildman', default=False, action='store_true',
74 help='Use buildman to build U-Boot (assuming --build is given)')
Stephen Warren89ab8412016-02-04 16:11:50 -070075 parser.addoption('--gdbserver', default=None,
76 help='Run sandbox under gdbserver. The argument is the channel '+
77 'over which gdbserver should communicate, e.g. localhost:1234')
Stephen Warrend2015062016-01-15 11:15:24 -070078
79def pytest_configure(config):
Stephen Warrene8debf32016-01-26 13:41:30 -070080 """pytest hook: Perform custom initialization at startup time.
Stephen Warrend2015062016-01-15 11:15:24 -070081
82 Args:
83 config: The pytest configuration.
84
85 Returns:
86 Nothing.
Stephen Warrene8debf32016-01-26 13:41:30 -070087 """
Simon Glasscb105792019-12-01 19:34:18 -070088 def parse_config(conf_file):
89 """Parse a config file, loading it into the ubconfig container
90
91 Args:
92 conf_file: Filename to load (within build_dir)
93
94 Raises
95 Exception if the file does not exist
96 """
97 dot_config = build_dir + '/' + conf_file
98 if not os.path.exists(dot_config):
99 raise Exception(conf_file + ' does not exist; ' +
100 'try passing --build option?')
101
102 with open(dot_config, 'rt') as f:
103 ini_str = '[root]\n' + f.read()
104 ini_sio = io.StringIO(ini_str)
105 parser = configparser.RawConfigParser()
106 parser.read_file(ini_sio)
107 ubconfig.buildconfig.update(parser.items('root'))
Stephen Warrend2015062016-01-15 11:15:24 -0700108
109 global log
110 global console
111 global ubconfig
112
113 test_py_dir = os.path.dirname(os.path.abspath(__file__))
114 source_dir = os.path.dirname(os.path.dirname(test_py_dir))
115
116 board_type = config.getoption('board_type')
117 board_type_filename = board_type.replace('-', '_')
118
119 board_identity = config.getoption('board_identity')
120 board_identity_filename = board_identity.replace('-', '_')
121
122 build_dir = config.getoption('build_dir')
123 if not build_dir:
124 build_dir = source_dir + '/build-' + board_type
125 mkdir_p(build_dir)
126
127 result_dir = config.getoption('result_dir')
128 if not result_dir:
129 result_dir = build_dir
130 mkdir_p(result_dir)
131
132 persistent_data_dir = config.getoption('persistent_data_dir')
133 if not persistent_data_dir:
134 persistent_data_dir = build_dir + '/persistent-data'
135 mkdir_p(persistent_data_dir)
136
Stephen Warren89ab8412016-02-04 16:11:50 -0700137 gdbserver = config.getoption('gdbserver')
Igor Opaniuk7374b152019-02-12 16:18:14 +0200138 if gdbserver and not board_type.startswith('sandbox'):
139 raise Exception('--gdbserver only supported with sandbox targets')
Stephen Warren89ab8412016-02-04 16:11:50 -0700140
Stephen Warrend2015062016-01-15 11:15:24 -0700141 import multiplexed_log
142 log = multiplexed_log.Logfile(result_dir + '/test-log.html')
143
144 if config.getoption('build'):
Simon Glassf5ec7ee2020-03-18 09:43:01 -0600145 if config.getoption('buildman'):
146 if build_dir != source_dir:
147 dest_args = ['-o', build_dir, '-w']
148 else:
149 dest_args = ['-i']
150 cmds = (['buildman', '--board', board_type] + dest_args,)
151 name = 'buildman'
Stephen Warrend2015062016-01-15 11:15:24 -0700152 else:
Simon Glassf5ec7ee2020-03-18 09:43:01 -0600153 if build_dir != source_dir:
154 o_opt = 'O=%s' % build_dir
155 else:
156 o_opt = ''
157 cmds = (
158 ['make', o_opt, '-s', board_type + '_defconfig'],
Heinrich Schuchardt371a2e72020-05-31 00:44:24 +0200159 ['make', o_opt, '-s', '-j{}'.format(os.cpu_count())],
Simon Glassf5ec7ee2020-03-18 09:43:01 -0600160 )
161 name = 'make'
162
163 with log.section(name):
164 runner = log.get_runner(name, sys.stdout)
Stephen Warren83357fd2016-02-03 16:46:34 -0700165 for cmd in cmds:
166 runner.run(cmd, cwd=source_dir)
167 runner.close()
168 log.status_pass('OK')
Stephen Warrend2015062016-01-15 11:15:24 -0700169
170 class ArbitraryAttributeContainer(object):
171 pass
172
173 ubconfig = ArbitraryAttributeContainer()
174 ubconfig.brd = dict()
175 ubconfig.env = dict()
176
177 modules = [
178 (ubconfig.brd, 'u_boot_board_' + board_type_filename),
179 (ubconfig.env, 'u_boot_boardenv_' + board_type_filename),
180 (ubconfig.env, 'u_boot_boardenv_' + board_type_filename + '_' +
181 board_identity_filename),
182 ]
183 for (dict_to_fill, module_name) in modules:
184 try:
185 module = __import__(module_name)
186 except ImportError:
187 continue
188 dict_to_fill.update(module.__dict__)
189
190 ubconfig.buildconfig = dict()
191
Simon Glasscb105792019-12-01 19:34:18 -0700192 # buildman -k puts autoconf.mk in the rootdir, so handle this as well
193 # as the standard U-Boot build which leaves it in include/autoconf.mk
194 parse_config('.config')
195 if os.path.exists(build_dir + '/' + 'autoconf.mk'):
196 parse_config('autoconf.mk')
197 else:
198 parse_config('include/autoconf.mk')
Stephen Warrend2015062016-01-15 11:15:24 -0700199
200 ubconfig.test_py_dir = test_py_dir
201 ubconfig.source_dir = source_dir
202 ubconfig.build_dir = build_dir
203 ubconfig.result_dir = result_dir
204 ubconfig.persistent_data_dir = persistent_data_dir
205 ubconfig.board_type = board_type
206 ubconfig.board_identity = board_identity
Stephen Warren89ab8412016-02-04 16:11:50 -0700207 ubconfig.gdbserver = gdbserver
Simon Glass06719602016-07-03 09:40:36 -0600208 ubconfig.dtb = build_dir + '/arch/sandbox/dts/test.dtb'
Stephen Warrend2015062016-01-15 11:15:24 -0700209
210 env_vars = (
211 'board_type',
212 'board_identity',
213 'source_dir',
214 'test_py_dir',
215 'build_dir',
216 'result_dir',
217 'persistent_data_dir',
218 )
219 for v in env_vars:
220 os.environ['U_BOOT_' + v.upper()] = getattr(ubconfig, v)
221
Simon Glass2fedbaa2016-07-04 11:58:37 -0600222 if board_type.startswith('sandbox'):
Stephen Warrend2015062016-01-15 11:15:24 -0700223 import u_boot_console_sandbox
224 console = u_boot_console_sandbox.ConsoleSandbox(log, ubconfig)
225 else:
226 import u_boot_console_exec_attach
227 console = u_boot_console_exec_attach.ConsoleExecAttach(log, ubconfig)
228
Simon Glass689d0a12021-10-23 17:26:11 -0600229re_ut_test_list = re.compile(r'[^a-zA-Z0-9_]_u_boot_list_2_ut_(.*)_test_2_(.*)\s*$')
Simon Glassbc84d582020-10-25 20:38:31 -0600230def generate_ut_subtest(metafunc, fixture_name, sym_path):
Stephen Warren1cd85f52016-02-08 14:44:16 -0700231 """Provide parametrization for a ut_subtest fixture.
232
233 Determines the set of unit tests built into a U-Boot binary by parsing the
234 list of symbols generated by the build process. Provides this information
235 to test functions by parameterizing their ut_subtest fixture parameter.
236
237 Args:
238 metafunc: The pytest test function.
239 fixture_name: The fixture name to test.
Simon Glassbc84d582020-10-25 20:38:31 -0600240 sym_path: Relative path to the symbol file with preceding '/'
241 (e.g. '/u-boot.sym')
Stephen Warren1cd85f52016-02-08 14:44:16 -0700242
243 Returns:
244 Nothing.
245 """
Simon Glassbc84d582020-10-25 20:38:31 -0600246 fn = console.config.build_dir + sym_path
Stephen Warren1cd85f52016-02-08 14:44:16 -0700247 try:
248 with open(fn, 'rt') as f:
249 lines = f.readlines()
250 except:
251 lines = []
252 lines.sort()
253
254 vals = []
255 for l in lines:
256 m = re_ut_test_list.search(l)
257 if not m:
258 continue
259 vals.append(m.group(1) + ' ' + m.group(2))
260
261 ids = ['ut_' + s.replace(' ', '_') for s in vals]
262 metafunc.parametrize(fixture_name, vals, ids=ids)
263
264def generate_config(metafunc, fixture_name):
265 """Provide parametrization for {env,brd}__ fixtures.
Stephen Warrend2015062016-01-15 11:15:24 -0700266
267 If a test function takes parameter(s) (fixture names) of the form brd__xxx
268 or env__xxx, the brd and env configuration dictionaries are consulted to
269 find the list of values to use for those parameters, and the test is
270 parametrized so that it runs once for each combination of values.
271
272 Args:
273 metafunc: The pytest test function.
Stephen Warren1cd85f52016-02-08 14:44:16 -0700274 fixture_name: The fixture name to test.
Stephen Warrend2015062016-01-15 11:15:24 -0700275
276 Returns:
277 Nothing.
Stephen Warrene8debf32016-01-26 13:41:30 -0700278 """
Stephen Warrend2015062016-01-15 11:15:24 -0700279
280 subconfigs = {
281 'brd': console.config.brd,
282 'env': console.config.env,
283 }
Stephen Warren1cd85f52016-02-08 14:44:16 -0700284 parts = fixture_name.split('__')
285 if len(parts) < 2:
286 return
287 if parts[0] not in subconfigs:
288 return
289 subconfig = subconfigs[parts[0]]
290 vals = []
291 val = subconfig.get(fixture_name, [])
292 # If that exact name is a key in the data source:
293 if val:
294 # ... use the dict value as a single parameter value.
295 vals = (val, )
296 else:
297 # ... otherwise, see if there's a key that contains a list of
298 # values to use instead.
299 vals = subconfig.get(fixture_name+ 's', [])
300 def fixture_id(index, val):
301 try:
302 return val['fixture_id']
303 except:
304 return fixture_name + str(index)
305 ids = [fixture_id(index, val) for (index, val) in enumerate(vals)]
306 metafunc.parametrize(fixture_name, vals, ids=ids)
307
308def pytest_generate_tests(metafunc):
309 """pytest hook: parameterize test functions based on custom rules.
310
311 Check each test function parameter (fixture name) to see if it is one of
312 our custom names, and if so, provide the correct parametrization for that
313 parameter.
314
315 Args:
316 metafunc: The pytest test function.
317
318 Returns:
319 Nothing.
320 """
Stephen Warrend2015062016-01-15 11:15:24 -0700321 for fn in metafunc.fixturenames:
Stephen Warren1cd85f52016-02-08 14:44:16 -0700322 if fn == 'ut_subtest':
Simon Glassbc84d582020-10-25 20:38:31 -0600323 generate_ut_subtest(metafunc, fn, '/u-boot.sym')
324 continue
Simon Glass313438c2022-04-30 00:56:55 -0600325 m_subtest = re.match('ut_(.)pl_subtest', fn)
326 if m_subtest:
327 spl_name = m_subtest.group(1)
328 generate_ut_subtest(
329 metafunc, fn, f'/{spl_name}pl/u-boot-{spl_name}pl.sym')
Stephen Warrend2015062016-01-15 11:15:24 -0700330 continue
Stephen Warren1cd85f52016-02-08 14:44:16 -0700331 generate_config(metafunc, fn)
Stephen Warrend2015062016-01-15 11:15:24 -0700332
Stefan Brünsd8c1e032016-11-05 17:45:32 +0100333@pytest.fixture(scope='session')
334def u_boot_log(request):
335 """Generate the value of a test's log fixture.
336
337 Args:
338 request: The pytest request.
339
340 Returns:
341 The fixture value.
342 """
343
344 return console.log
345
346@pytest.fixture(scope='session')
347def u_boot_config(request):
348 """Generate the value of a test's u_boot_config fixture.
349
350 Args:
351 request: The pytest request.
352
353 Returns:
354 The fixture value.
355 """
356
357 return console.config
358
Stephen Warren636f38d2016-01-22 12:30:08 -0700359@pytest.fixture(scope='function')
Stephen Warrend2015062016-01-15 11:15:24 -0700360def u_boot_console(request):
Stephen Warrene8debf32016-01-26 13:41:30 -0700361 """Generate the value of a test's u_boot_console fixture.
Stephen Warrend2015062016-01-15 11:15:24 -0700362
363 Args:
364 request: The pytest request.
365
366 Returns:
367 The fixture value.
Stephen Warrene8debf32016-01-26 13:41:30 -0700368 """
Stephen Warrend2015062016-01-15 11:15:24 -0700369
Stephen Warren636f38d2016-01-22 12:30:08 -0700370 console.ensure_spawned()
Stephen Warrend2015062016-01-15 11:15:24 -0700371 return console
372
Stephen Warren83357fd2016-02-03 16:46:34 -0700373anchors = {}
Stephen Warren13260222016-02-10 13:47:37 -0700374tests_not_run = []
375tests_failed = []
376tests_xpassed = []
377tests_xfailed = []
378tests_skipped = []
Stephen Warren32090e52018-02-20 12:51:55 -0700379tests_warning = []
Stephen Warren13260222016-02-10 13:47:37 -0700380tests_passed = []
Stephen Warrend2015062016-01-15 11:15:24 -0700381
382def pytest_itemcollected(item):
Stephen Warrene8debf32016-01-26 13:41:30 -0700383 """pytest hook: Called once for each test found during collection.
Stephen Warrend2015062016-01-15 11:15:24 -0700384
385 This enables our custom result analysis code to see the list of all tests
386 that should eventually be run.
387
388 Args:
389 item: The item that was collected.
390
391 Returns:
392 Nothing.
Stephen Warrene8debf32016-01-26 13:41:30 -0700393 """
Stephen Warrend2015062016-01-15 11:15:24 -0700394
Stephen Warren13260222016-02-10 13:47:37 -0700395 tests_not_run.append(item.name)
Stephen Warrend2015062016-01-15 11:15:24 -0700396
397def cleanup():
Stephen Warrene8debf32016-01-26 13:41:30 -0700398 """Clean up all global state.
Stephen Warrend2015062016-01-15 11:15:24 -0700399
400 Executed (via atexit) once the entire test process is complete. This
401 includes logging the status of all tests, and the identity of any failed
402 or skipped tests.
403
404 Args:
405 None.
406
407 Returns:
408 Nothing.
Stephen Warrene8debf32016-01-26 13:41:30 -0700409 """
Stephen Warrend2015062016-01-15 11:15:24 -0700410
411 if console:
412 console.close()
413 if log:
Stephen Warren83357fd2016-02-03 16:46:34 -0700414 with log.section('Status Report', 'status_report'):
415 log.status_pass('%d passed' % len(tests_passed))
Stephen Warren32090e52018-02-20 12:51:55 -0700416 if tests_warning:
417 log.status_warning('%d passed with warning' % len(tests_warning))
418 for test in tests_warning:
419 anchor = anchors.get(test, None)
420 log.status_warning('... ' + test, anchor)
Stephen Warren83357fd2016-02-03 16:46:34 -0700421 if tests_skipped:
422 log.status_skipped('%d skipped' % len(tests_skipped))
423 for test in tests_skipped:
424 anchor = anchors.get(test, None)
425 log.status_skipped('... ' + test, anchor)
426 if tests_xpassed:
427 log.status_xpass('%d xpass' % len(tests_xpassed))
428 for test in tests_xpassed:
429 anchor = anchors.get(test, None)
430 log.status_xpass('... ' + test, anchor)
431 if tests_xfailed:
432 log.status_xfail('%d xfail' % len(tests_xfailed))
433 for test in tests_xfailed:
434 anchor = anchors.get(test, None)
435 log.status_xfail('... ' + test, anchor)
436 if tests_failed:
437 log.status_fail('%d failed' % len(tests_failed))
438 for test in tests_failed:
439 anchor = anchors.get(test, None)
440 log.status_fail('... ' + test, anchor)
441 if tests_not_run:
442 log.status_fail('%d not run' % len(tests_not_run))
443 for test in tests_not_run:
444 anchor = anchors.get(test, None)
445 log.status_fail('... ' + test, anchor)
Stephen Warrend2015062016-01-15 11:15:24 -0700446 log.close()
447atexit.register(cleanup)
448
449def setup_boardspec(item):
Stephen Warrene8debf32016-01-26 13:41:30 -0700450 """Process any 'boardspec' marker for a test.
Stephen Warrend2015062016-01-15 11:15:24 -0700451
452 Such a marker lists the set of board types that a test does/doesn't
453 support. If tests are being executed on an unsupported board, the test is
454 marked to be skipped.
455
456 Args:
457 item: The pytest test item.
458
459 Returns:
460 Nothing.
Stephen Warrene8debf32016-01-26 13:41:30 -0700461 """
Stephen Warrend2015062016-01-15 11:15:24 -0700462
Stephen Warrend2015062016-01-15 11:15:24 -0700463 required_boards = []
Marek Vasut3c941e02019-10-24 11:59:19 -0400464 for boards in item.iter_markers('boardspec'):
465 board = boards.args[0]
Stephen Warrend2015062016-01-15 11:15:24 -0700466 if board.startswith('!'):
467 if ubconfig.board_type == board[1:]:
Stephen Warrend5170442017-09-18 11:11:48 -0600468 pytest.skip('board "%s" not supported' % ubconfig.board_type)
Stephen Warrend2015062016-01-15 11:15:24 -0700469 return
470 else:
471 required_boards.append(board)
472 if required_boards and ubconfig.board_type not in required_boards:
Stephen Warrend5170442017-09-18 11:11:48 -0600473 pytest.skip('board "%s" not supported' % ubconfig.board_type)
Stephen Warrend2015062016-01-15 11:15:24 -0700474
475def setup_buildconfigspec(item):
Stephen Warrene8debf32016-01-26 13:41:30 -0700476 """Process any 'buildconfigspec' marker for a test.
Stephen Warrend2015062016-01-15 11:15:24 -0700477
478 Such a marker lists some U-Boot configuration feature that the test
479 requires. If tests are being executed on an U-Boot build that doesn't
480 have the required feature, the test is marked to be skipped.
481
482 Args:
483 item: The pytest test item.
484
485 Returns:
486 Nothing.
Stephen Warrene8debf32016-01-26 13:41:30 -0700487 """
Stephen Warrend2015062016-01-15 11:15:24 -0700488
Marek Vasut3c941e02019-10-24 11:59:19 -0400489 for options in item.iter_markers('buildconfigspec'):
490 option = options.args[0]
491 if not ubconfig.buildconfig.get('config_' + option.lower(), None):
492 pytest.skip('.config feature "%s" not enabled' % option.lower())
Cristian Ciocaltea49b5b192019-12-24 17:19:12 +0200493 for options in item.iter_markers('notbuildconfigspec'):
Marek Vasut3c941e02019-10-24 11:59:19 -0400494 option = options.args[0]
495 if ubconfig.buildconfig.get('config_' + option.lower(), None):
496 pytest.skip('.config feature "%s" enabled' % option.lower())
Stephen Warrend2015062016-01-15 11:15:24 -0700497
Stephen Warren2d26bf62017-09-18 11:11:49 -0600498def tool_is_in_path(tool):
499 for path in os.environ["PATH"].split(os.pathsep):
500 fn = os.path.join(path, tool)
501 if os.path.isfile(fn) and os.access(fn, os.X_OK):
502 return True
503 return False
504
505def setup_requiredtool(item):
506 """Process any 'requiredtool' marker for a test.
507
508 Such a marker lists some external tool (binary, executable, application)
509 that the test requires. If tests are being executed on a system that
510 doesn't have the required tool, the test is marked to be skipped.
511
512 Args:
513 item: The pytest test item.
514
515 Returns:
516 Nothing.
517 """
518
Marek Vasut3c941e02019-10-24 11:59:19 -0400519 for tools in item.iter_markers('requiredtool'):
520 tool = tools.args[0]
Stephen Warren2d26bf62017-09-18 11:11:49 -0600521 if not tool_is_in_path(tool):
522 pytest.skip('tool "%s" not in $PATH' % tool)
523
Stephen Warrenb0a928a2016-10-17 17:25:52 -0600524def start_test_section(item):
525 anchors[item.name] = log.start_section(item.name)
526
Stephen Warrend2015062016-01-15 11:15:24 -0700527def pytest_runtest_setup(item):
Stephen Warrene8debf32016-01-26 13:41:30 -0700528 """pytest hook: Configure (set up) a test item.
Stephen Warrend2015062016-01-15 11:15:24 -0700529
530 Called once for each test to perform any custom configuration. This hook
531 is used to skip the test if certain conditions apply.
532
533 Args:
534 item: The pytest test item.
535
536 Returns:
537 Nothing.
Stephen Warrene8debf32016-01-26 13:41:30 -0700538 """
Stephen Warrend2015062016-01-15 11:15:24 -0700539
Stephen Warrenb0a928a2016-10-17 17:25:52 -0600540 start_test_section(item)
Stephen Warrend2015062016-01-15 11:15:24 -0700541 setup_boardspec(item)
542 setup_buildconfigspec(item)
Stephen Warren2d26bf62017-09-18 11:11:49 -0600543 setup_requiredtool(item)
Stephen Warrend2015062016-01-15 11:15:24 -0700544
545def pytest_runtest_protocol(item, nextitem):
Stephen Warrene8debf32016-01-26 13:41:30 -0700546 """pytest hook: Called to execute a test.
Stephen Warrend2015062016-01-15 11:15:24 -0700547
548 This hook wraps the standard pytest runtestprotocol() function in order
549 to acquire visibility into, and record, each test function's result.
550
551 Args:
552 item: The pytest test item to execute.
553 nextitem: The pytest test item that will be executed after this one.
554
555 Returns:
556 A list of pytest reports (test result data).
Stephen Warrene8debf32016-01-26 13:41:30 -0700557 """
Stephen Warrend2015062016-01-15 11:15:24 -0700558
Stephen Warren32090e52018-02-20 12:51:55 -0700559 log.get_and_reset_warning()
Stephen Warren37249752021-01-30 20:12:18 -0700560 ihook = item.ihook
561 ihook.pytest_runtest_logstart(nodeid=item.nodeid, location=item.location)
Stephen Warrend2015062016-01-15 11:15:24 -0700562 reports = runtestprotocol(item, nextitem=nextitem)
Stephen Warren37249752021-01-30 20:12:18 -0700563 ihook.pytest_runtest_logfinish(nodeid=item.nodeid, location=item.location)
Stephen Warren32090e52018-02-20 12:51:55 -0700564 was_warning = log.get_and_reset_warning()
Stephen Warren78b39cc2016-01-27 23:57:51 -0700565
Stephen Warrenb0a928a2016-10-17 17:25:52 -0600566 # In pytest 3, runtestprotocol() may not call pytest_runtest_setup() if
567 # the test is skipped. That call is required to create the test's section
568 # in the log file. The call to log.end_section() requires that the log
569 # contain a section for this test. Create a section for the test if it
570 # doesn't already exist.
571 if not item.name in anchors:
572 start_test_section(item)
573
Stephen Warren78b39cc2016-01-27 23:57:51 -0700574 failure_cleanup = False
Stephen Warren32090e52018-02-20 12:51:55 -0700575 if not was_warning:
576 test_list = tests_passed
577 msg = 'OK'
578 msg_log = log.status_pass
579 else:
580 test_list = tests_warning
581 msg = 'OK (with warning)'
582 msg_log = log.status_warning
Stephen Warrend2015062016-01-15 11:15:24 -0700583 for report in reports:
584 if report.outcome == 'failed':
Stephen Warren78b39cc2016-01-27 23:57:51 -0700585 if hasattr(report, 'wasxfail'):
586 test_list = tests_xpassed
587 msg = 'XPASSED'
588 msg_log = log.status_xpass
589 else:
590 failure_cleanup = True
591 test_list = tests_failed
592 msg = 'FAILED:\n' + str(report.longrepr)
593 msg_log = log.status_fail
Stephen Warrend2015062016-01-15 11:15:24 -0700594 break
595 if report.outcome == 'skipped':
Stephen Warren78b39cc2016-01-27 23:57:51 -0700596 if hasattr(report, 'wasxfail'):
597 failure_cleanup = True
598 test_list = tests_xfailed
599 msg = 'XFAILED:\n' + str(report.longrepr)
600 msg_log = log.status_xfail
601 break
602 test_list = tests_skipped
603 msg = 'SKIPPED:\n' + str(report.longrepr)
604 msg_log = log.status_skipped
Stephen Warrend2015062016-01-15 11:15:24 -0700605
Stephen Warren78b39cc2016-01-27 23:57:51 -0700606 if failure_cleanup:
Stephen Warrenc10eb9d2016-01-22 12:30:09 -0700607 console.drain_console()
Stephen Warren78b39cc2016-01-27 23:57:51 -0700608
Stephen Warren13260222016-02-10 13:47:37 -0700609 test_list.append(item.name)
Stephen Warrend2015062016-01-15 11:15:24 -0700610 tests_not_run.remove(item.name)
611
612 try:
Stephen Warren78b39cc2016-01-27 23:57:51 -0700613 msg_log(msg)
Stephen Warrend2015062016-01-15 11:15:24 -0700614 except:
615 # If something went wrong with logging, it's better to let the test
616 # process continue, which may report other exceptions that triggered
617 # the logging issue (e.g. console.log wasn't created). Hence, just
618 # squash the exception. If the test setup failed due to e.g. syntax
619 # error somewhere else, this won't be seen. However, once that issue
620 # is fixed, if this exception still exists, it will then be logged as
621 # part of the test's stdout.
622 import traceback
Paul Burtondffd56d2017-09-14 14:34:43 -0700623 print('Exception occurred while logging runtest status:')
Stephen Warrend2015062016-01-15 11:15:24 -0700624 traceback.print_exc()
625 # FIXME: Can we force a test failure here?
626
627 log.end_section(item.name)
628
Stephen Warren78b39cc2016-01-27 23:57:51 -0700629 if failure_cleanup:
Stephen Warrend2015062016-01-15 11:15:24 -0700630 console.cleanup_spawn()
631
Stephen Warren37249752021-01-30 20:12:18 -0700632 return True