blob: a4e54c66ceda85389b7f7da1ac1d79477feda589 [file] [log] [blame]
Stephen Warrend2015062016-01-15 11:15:24 -07001# Copyright (c) 2015 Stephen Warren
2# Copyright (c) 2015-2016, NVIDIA CORPORATION. All rights reserved.
3#
4# SPDX-License-Identifier: GPL-2.0
5
6# Implementation of pytest run-time hook functions. These are invoked by
7# pytest at certain points during operation, e.g. startup, for each executed
8# test, at shutdown etc. These hooks perform functions such as:
9# - Parsing custom command-line options.
10# - Pullilng in user-specified board configuration.
11# - Creating the U-Boot console test fixture.
12# - Creating the HTML log file.
13# - Monitoring each test's results.
14# - Implementing custom pytest markers.
15
16import atexit
17import errno
18import os
19import os.path
20import pexpect
21import pytest
22from _pytest.runner import runtestprotocol
23import ConfigParser
24import StringIO
25import sys
26
27# Globals: The HTML log file, and the connection to the U-Boot console.
28log = None
29console = None
30
31def mkdir_p(path):
Stephen Warrene8debf32016-01-26 13:41:30 -070032 """Create a directory path.
Stephen Warrend2015062016-01-15 11:15:24 -070033
34 This includes creating any intermediate/parent directories. Any errors
35 caused due to already extant directories are ignored.
36
37 Args:
38 path: The directory path to create.
39
40 Returns:
41 Nothing.
Stephen Warrene8debf32016-01-26 13:41:30 -070042 """
Stephen Warrend2015062016-01-15 11:15:24 -070043
44 try:
45 os.makedirs(path)
46 except OSError as exc:
47 if exc.errno == errno.EEXIST and os.path.isdir(path):
48 pass
49 else:
50 raise
51
52def pytest_addoption(parser):
Stephen Warrene8debf32016-01-26 13:41:30 -070053 """pytest hook: Add custom command-line options to the cmdline parser.
Stephen Warrend2015062016-01-15 11:15:24 -070054
55 Args:
56 parser: The pytest command-line parser.
57
58 Returns:
59 Nothing.
Stephen Warrene8debf32016-01-26 13:41:30 -070060 """
Stephen Warrend2015062016-01-15 11:15:24 -070061
62 parser.addoption('--build-dir', default=None,
63 help='U-Boot build directory (O=)')
64 parser.addoption('--result-dir', default=None,
65 help='U-Boot test result/tmp directory')
66 parser.addoption('--persistent-data-dir', default=None,
67 help='U-Boot test persistent generated data directory')
68 parser.addoption('--board-type', '--bd', '-B', default='sandbox',
69 help='U-Boot board type')
70 parser.addoption('--board-identity', '--id', default='na',
71 help='U-Boot board identity/instance')
72 parser.addoption('--build', default=False, action='store_true',
73 help='Compile U-Boot before running tests')
74
75def pytest_configure(config):
Stephen Warrene8debf32016-01-26 13:41:30 -070076 """pytest hook: Perform custom initialization at startup time.
Stephen Warrend2015062016-01-15 11:15:24 -070077
78 Args:
79 config: The pytest configuration.
80
81 Returns:
82 Nothing.
Stephen Warrene8debf32016-01-26 13:41:30 -070083 """
Stephen Warrend2015062016-01-15 11:15:24 -070084
85 global log
86 global console
87 global ubconfig
88
89 test_py_dir = os.path.dirname(os.path.abspath(__file__))
90 source_dir = os.path.dirname(os.path.dirname(test_py_dir))
91
92 board_type = config.getoption('board_type')
93 board_type_filename = board_type.replace('-', '_')
94
95 board_identity = config.getoption('board_identity')
96 board_identity_filename = board_identity.replace('-', '_')
97
98 build_dir = config.getoption('build_dir')
99 if not build_dir:
100 build_dir = source_dir + '/build-' + board_type
101 mkdir_p(build_dir)
102
103 result_dir = config.getoption('result_dir')
104 if not result_dir:
105 result_dir = build_dir
106 mkdir_p(result_dir)
107
108 persistent_data_dir = config.getoption('persistent_data_dir')
109 if not persistent_data_dir:
110 persistent_data_dir = build_dir + '/persistent-data'
111 mkdir_p(persistent_data_dir)
112
113 import multiplexed_log
114 log = multiplexed_log.Logfile(result_dir + '/test-log.html')
115
116 if config.getoption('build'):
117 if build_dir != source_dir:
118 o_opt = 'O=%s' % build_dir
119 else:
120 o_opt = ''
121 cmds = (
122 ['make', o_opt, '-s', board_type + '_defconfig'],
123 ['make', o_opt, '-s', '-j8'],
124 )
125 runner = log.get_runner('make', sys.stdout)
126 for cmd in cmds:
127 runner.run(cmd, cwd=source_dir)
128 runner.close()
129
130 class ArbitraryAttributeContainer(object):
131 pass
132
133 ubconfig = ArbitraryAttributeContainer()
134 ubconfig.brd = dict()
135 ubconfig.env = dict()
136
137 modules = [
138 (ubconfig.brd, 'u_boot_board_' + board_type_filename),
139 (ubconfig.env, 'u_boot_boardenv_' + board_type_filename),
140 (ubconfig.env, 'u_boot_boardenv_' + board_type_filename + '_' +
141 board_identity_filename),
142 ]
143 for (dict_to_fill, module_name) in modules:
144 try:
145 module = __import__(module_name)
146 except ImportError:
147 continue
148 dict_to_fill.update(module.__dict__)
149
150 ubconfig.buildconfig = dict()
151
152 for conf_file in ('.config', 'include/autoconf.mk'):
153 dot_config = build_dir + '/' + conf_file
154 if not os.path.exists(dot_config):
155 raise Exception(conf_file + ' does not exist; ' +
156 'try passing --build option?')
157
158 with open(dot_config, 'rt') as f:
159 ini_str = '[root]\n' + f.read()
160 ini_sio = StringIO.StringIO(ini_str)
161 parser = ConfigParser.RawConfigParser()
162 parser.readfp(ini_sio)
163 ubconfig.buildconfig.update(parser.items('root'))
164
165 ubconfig.test_py_dir = test_py_dir
166 ubconfig.source_dir = source_dir
167 ubconfig.build_dir = build_dir
168 ubconfig.result_dir = result_dir
169 ubconfig.persistent_data_dir = persistent_data_dir
170 ubconfig.board_type = board_type
171 ubconfig.board_identity = board_identity
172
173 env_vars = (
174 'board_type',
175 'board_identity',
176 'source_dir',
177 'test_py_dir',
178 'build_dir',
179 'result_dir',
180 'persistent_data_dir',
181 )
182 for v in env_vars:
183 os.environ['U_BOOT_' + v.upper()] = getattr(ubconfig, v)
184
185 if board_type == 'sandbox':
186 import u_boot_console_sandbox
187 console = u_boot_console_sandbox.ConsoleSandbox(log, ubconfig)
188 else:
189 import u_boot_console_exec_attach
190 console = u_boot_console_exec_attach.ConsoleExecAttach(log, ubconfig)
191
192def pytest_generate_tests(metafunc):
Stephen Warrene8debf32016-01-26 13:41:30 -0700193 """pytest hook: parameterize test functions based on custom rules.
Stephen Warrend2015062016-01-15 11:15:24 -0700194
195 If a test function takes parameter(s) (fixture names) of the form brd__xxx
196 or env__xxx, the brd and env configuration dictionaries are consulted to
197 find the list of values to use for those parameters, and the test is
198 parametrized so that it runs once for each combination of values.
199
200 Args:
201 metafunc: The pytest test function.
202
203 Returns:
204 Nothing.
Stephen Warrene8debf32016-01-26 13:41:30 -0700205 """
Stephen Warrend2015062016-01-15 11:15:24 -0700206
207 subconfigs = {
208 'brd': console.config.brd,
209 'env': console.config.env,
210 }
211 for fn in metafunc.fixturenames:
212 parts = fn.split('__')
213 if len(parts) < 2:
214 continue
215 if parts[0] not in subconfigs:
216 continue
217 subconfig = subconfigs[parts[0]]
218 vals = []
219 val = subconfig.get(fn, [])
220 # If that exact name is a key in the data source:
221 if val:
222 # ... use the dict value as a single parameter value.
223 vals = (val, )
224 else:
225 # ... otherwise, see if there's a key that contains a list of
226 # values to use instead.
227 vals = subconfig.get(fn + 's', [])
228 metafunc.parametrize(fn, vals)
229
Stephen Warren636f38d2016-01-22 12:30:08 -0700230@pytest.fixture(scope='function')
Stephen Warrend2015062016-01-15 11:15:24 -0700231def u_boot_console(request):
Stephen Warrene8debf32016-01-26 13:41:30 -0700232 """Generate the value of a test's u_boot_console fixture.
Stephen Warrend2015062016-01-15 11:15:24 -0700233
234 Args:
235 request: The pytest request.
236
237 Returns:
238 The fixture value.
Stephen Warrene8debf32016-01-26 13:41:30 -0700239 """
Stephen Warrend2015062016-01-15 11:15:24 -0700240
Stephen Warren636f38d2016-01-22 12:30:08 -0700241 console.ensure_spawned()
Stephen Warrend2015062016-01-15 11:15:24 -0700242 return console
243
244tests_not_run = set()
245tests_failed = set()
246tests_skipped = set()
247tests_passed = set()
248
249def pytest_itemcollected(item):
Stephen Warrene8debf32016-01-26 13:41:30 -0700250 """pytest hook: Called once for each test found during collection.
Stephen Warrend2015062016-01-15 11:15:24 -0700251
252 This enables our custom result analysis code to see the list of all tests
253 that should eventually be run.
254
255 Args:
256 item: The item that was collected.
257
258 Returns:
259 Nothing.
Stephen Warrene8debf32016-01-26 13:41:30 -0700260 """
Stephen Warrend2015062016-01-15 11:15:24 -0700261
262 tests_not_run.add(item.name)
263
264def cleanup():
Stephen Warrene8debf32016-01-26 13:41:30 -0700265 """Clean up all global state.
Stephen Warrend2015062016-01-15 11:15:24 -0700266
267 Executed (via atexit) once the entire test process is complete. This
268 includes logging the status of all tests, and the identity of any failed
269 or skipped tests.
270
271 Args:
272 None.
273
274 Returns:
275 Nothing.
Stephen Warrene8debf32016-01-26 13:41:30 -0700276 """
Stephen Warrend2015062016-01-15 11:15:24 -0700277
278 if console:
279 console.close()
280 if log:
281 log.status_pass('%d passed' % len(tests_passed))
282 if tests_skipped:
283 log.status_skipped('%d skipped' % len(tests_skipped))
284 for test in tests_skipped:
285 log.status_skipped('... ' + test)
286 if tests_failed:
287 log.status_fail('%d failed' % len(tests_failed))
288 for test in tests_failed:
289 log.status_fail('... ' + test)
290 if tests_not_run:
291 log.status_fail('%d not run' % len(tests_not_run))
292 for test in tests_not_run:
293 log.status_fail('... ' + test)
294 log.close()
295atexit.register(cleanup)
296
297def setup_boardspec(item):
Stephen Warrene8debf32016-01-26 13:41:30 -0700298 """Process any 'boardspec' marker for a test.
Stephen Warrend2015062016-01-15 11:15:24 -0700299
300 Such a marker lists the set of board types that a test does/doesn't
301 support. If tests are being executed on an unsupported board, the test is
302 marked to be skipped.
303
304 Args:
305 item: The pytest test item.
306
307 Returns:
308 Nothing.
Stephen Warrene8debf32016-01-26 13:41:30 -0700309 """
Stephen Warrend2015062016-01-15 11:15:24 -0700310
311 mark = item.get_marker('boardspec')
312 if not mark:
313 return
314 required_boards = []
315 for board in mark.args:
316 if board.startswith('!'):
317 if ubconfig.board_type == board[1:]:
318 pytest.skip('board not supported')
319 return
320 else:
321 required_boards.append(board)
322 if required_boards and ubconfig.board_type not in required_boards:
323 pytest.skip('board not supported')
324
325def setup_buildconfigspec(item):
Stephen Warrene8debf32016-01-26 13:41:30 -0700326 """Process any 'buildconfigspec' marker for a test.
Stephen Warrend2015062016-01-15 11:15:24 -0700327
328 Such a marker lists some U-Boot configuration feature that the test
329 requires. If tests are being executed on an U-Boot build that doesn't
330 have the required feature, the test is marked to be skipped.
331
332 Args:
333 item: The pytest test item.
334
335 Returns:
336 Nothing.
Stephen Warrene8debf32016-01-26 13:41:30 -0700337 """
Stephen Warrend2015062016-01-15 11:15:24 -0700338
339 mark = item.get_marker('buildconfigspec')
340 if not mark:
341 return
342 for option in mark.args:
343 if not ubconfig.buildconfig.get('config_' + option.lower(), None):
344 pytest.skip('.config feature not enabled')
345
346def pytest_runtest_setup(item):
Stephen Warrene8debf32016-01-26 13:41:30 -0700347 """pytest hook: Configure (set up) a test item.
Stephen Warrend2015062016-01-15 11:15:24 -0700348
349 Called once for each test to perform any custom configuration. This hook
350 is used to skip the test if certain conditions apply.
351
352 Args:
353 item: The pytest test item.
354
355 Returns:
356 Nothing.
Stephen Warrene8debf32016-01-26 13:41:30 -0700357 """
Stephen Warrend2015062016-01-15 11:15:24 -0700358
359 log.start_section(item.name)
360 setup_boardspec(item)
361 setup_buildconfigspec(item)
362
363def pytest_runtest_protocol(item, nextitem):
Stephen Warrene8debf32016-01-26 13:41:30 -0700364 """pytest hook: Called to execute a test.
Stephen Warrend2015062016-01-15 11:15:24 -0700365
366 This hook wraps the standard pytest runtestprotocol() function in order
367 to acquire visibility into, and record, each test function's result.
368
369 Args:
370 item: The pytest test item to execute.
371 nextitem: The pytest test item that will be executed after this one.
372
373 Returns:
374 A list of pytest reports (test result data).
Stephen Warrene8debf32016-01-26 13:41:30 -0700375 """
Stephen Warrend2015062016-01-15 11:15:24 -0700376
377 reports = runtestprotocol(item, nextitem=nextitem)
378 failed = None
379 skipped = None
380 for report in reports:
381 if report.outcome == 'failed':
382 failed = report
383 break
384 if report.outcome == 'skipped':
385 if not skipped:
386 skipped = report
387
388 if failed:
Stephen Warrenc10eb9d2016-01-22 12:30:09 -0700389 console.drain_console()
Stephen Warrend2015062016-01-15 11:15:24 -0700390 tests_failed.add(item.name)
391 elif skipped:
392 tests_skipped.add(item.name)
393 else:
394 tests_passed.add(item.name)
395 tests_not_run.remove(item.name)
396
397 try:
398 if failed:
399 msg = 'FAILED:\n' + str(failed.longrepr)
400 log.status_fail(msg)
401 elif skipped:
402 msg = 'SKIPPED:\n' + str(skipped.longrepr)
403 log.status_skipped(msg)
404 else:
405 log.status_pass('OK')
406 except:
407 # If something went wrong with logging, it's better to let the test
408 # process continue, which may report other exceptions that triggered
409 # the logging issue (e.g. console.log wasn't created). Hence, just
410 # squash the exception. If the test setup failed due to e.g. syntax
411 # error somewhere else, this won't be seen. However, once that issue
412 # is fixed, if this exception still exists, it will then be logged as
413 # part of the test's stdout.
414 import traceback
415 print 'Exception occurred while logging runtest status:'
416 traceback.print_exc()
417 # FIXME: Can we force a test failure here?
418
419 log.end_section(item.name)
420
421 if failed:
422 console.cleanup_spawn()
423
424 return reports