xref: /openbmc/u-boot/test/py/conftest.py (revision dffceb4b)
1# Copyright (c) 2015 Stephen Warren
2# Copyright (c) 2015-2016, NVIDIA CORPORATION. All rights reserved.
3#
4# SPDX-License-Identifier: GPL-2.0
5
6# Implementation of pytest run-time hook functions. These are invoked by
7# pytest at certain points during operation, e.g. startup, for each executed
8# test, at shutdown etc. These hooks perform functions such as:
9# - Parsing custom command-line options.
10# - Pullilng in user-specified board configuration.
11# - Creating the U-Boot console test fixture.
12# - Creating the HTML log file.
13# - Monitoring each test's results.
14# - Implementing custom pytest markers.
15
16import atexit
17import errno
18import os
19import os.path
20import pexpect
21import pytest
22from _pytest.runner import runtestprotocol
23import ConfigParser
24import StringIO
25import sys
26
27# Globals: The HTML log file, and the connection to the U-Boot console.
28log = None
29console = None
30
31def mkdir_p(path):
32    """Create a directory path.
33
34    This includes creating any intermediate/parent directories. Any errors
35    caused due to already extant directories are ignored.
36
37    Args:
38        path: The directory path to create.
39
40    Returns:
41        Nothing.
42    """
43
44    try:
45        os.makedirs(path)
46    except OSError as exc:
47        if exc.errno == errno.EEXIST and os.path.isdir(path):
48            pass
49        else:
50            raise
51
52def pytest_addoption(parser):
53    """pytest hook: Add custom command-line options to the cmdline parser.
54
55    Args:
56        parser: The pytest command-line parser.
57
58    Returns:
59        Nothing.
60    """
61
62    parser.addoption('--build-dir', default=None,
63        help='U-Boot build directory (O=)')
64    parser.addoption('--result-dir', default=None,
65        help='U-Boot test result/tmp directory')
66    parser.addoption('--persistent-data-dir', default=None,
67        help='U-Boot test persistent generated data directory')
68    parser.addoption('--board-type', '--bd', '-B', default='sandbox',
69        help='U-Boot board type')
70    parser.addoption('--board-identity', '--id', default='na',
71        help='U-Boot board identity/instance')
72    parser.addoption('--build', default=False, action='store_true',
73        help='Compile U-Boot before running tests')
74
75def pytest_configure(config):
76    """pytest hook: Perform custom initialization at startup time.
77
78    Args:
79        config: The pytest configuration.
80
81    Returns:
82        Nothing.
83    """
84
85    global log
86    global console
87    global ubconfig
88
89    test_py_dir = os.path.dirname(os.path.abspath(__file__))
90    source_dir = os.path.dirname(os.path.dirname(test_py_dir))
91
92    board_type = config.getoption('board_type')
93    board_type_filename = board_type.replace('-', '_')
94
95    board_identity = config.getoption('board_identity')
96    board_identity_filename = board_identity.replace('-', '_')
97
98    build_dir = config.getoption('build_dir')
99    if not build_dir:
100        build_dir = source_dir + '/build-' + board_type
101    mkdir_p(build_dir)
102
103    result_dir = config.getoption('result_dir')
104    if not result_dir:
105        result_dir = build_dir
106    mkdir_p(result_dir)
107
108    persistent_data_dir = config.getoption('persistent_data_dir')
109    if not persistent_data_dir:
110        persistent_data_dir = build_dir + '/persistent-data'
111    mkdir_p(persistent_data_dir)
112
113    import multiplexed_log
114    log = multiplexed_log.Logfile(result_dir + '/test-log.html')
115
116    if config.getoption('build'):
117        if build_dir != source_dir:
118            o_opt = 'O=%s' % build_dir
119        else:
120            o_opt = ''
121        cmds = (
122            ['make', o_opt, '-s', board_type + '_defconfig'],
123            ['make', o_opt, '-s', '-j8'],
124        )
125        runner = log.get_runner('make', sys.stdout)
126        for cmd in cmds:
127            runner.run(cmd, cwd=source_dir)
128        runner.close()
129
130    class ArbitraryAttributeContainer(object):
131        pass
132
133    ubconfig = ArbitraryAttributeContainer()
134    ubconfig.brd = dict()
135    ubconfig.env = dict()
136
137    modules = [
138        (ubconfig.brd, 'u_boot_board_' + board_type_filename),
139        (ubconfig.env, 'u_boot_boardenv_' + board_type_filename),
140        (ubconfig.env, 'u_boot_boardenv_' + board_type_filename + '_' +
141            board_identity_filename),
142    ]
143    for (dict_to_fill, module_name) in modules:
144        try:
145            module = __import__(module_name)
146        except ImportError:
147            continue
148        dict_to_fill.update(module.__dict__)
149
150    ubconfig.buildconfig = dict()
151
152    for conf_file in ('.config', 'include/autoconf.mk'):
153        dot_config = build_dir + '/' + conf_file
154        if not os.path.exists(dot_config):
155            raise Exception(conf_file + ' does not exist; ' +
156                'try passing --build option?')
157
158        with open(dot_config, 'rt') as f:
159            ini_str = '[root]\n' + f.read()
160            ini_sio = StringIO.StringIO(ini_str)
161            parser = ConfigParser.RawConfigParser()
162            parser.readfp(ini_sio)
163            ubconfig.buildconfig.update(parser.items('root'))
164
165    ubconfig.test_py_dir = test_py_dir
166    ubconfig.source_dir = source_dir
167    ubconfig.build_dir = build_dir
168    ubconfig.result_dir = result_dir
169    ubconfig.persistent_data_dir = persistent_data_dir
170    ubconfig.board_type = board_type
171    ubconfig.board_identity = board_identity
172
173    env_vars = (
174        'board_type',
175        'board_identity',
176        'source_dir',
177        'test_py_dir',
178        'build_dir',
179        'result_dir',
180        'persistent_data_dir',
181    )
182    for v in env_vars:
183        os.environ['U_BOOT_' + v.upper()] = getattr(ubconfig, v)
184
185    if board_type == 'sandbox':
186        import u_boot_console_sandbox
187        console = u_boot_console_sandbox.ConsoleSandbox(log, ubconfig)
188    else:
189        import u_boot_console_exec_attach
190        console = u_boot_console_exec_attach.ConsoleExecAttach(log, ubconfig)
191
192def pytest_generate_tests(metafunc):
193    """pytest hook: parameterize test functions based on custom rules.
194
195    If a test function takes parameter(s) (fixture names) of the form brd__xxx
196    or env__xxx, the brd and env configuration dictionaries are consulted to
197    find the list of values to use for those parameters, and the test is
198    parametrized so that it runs once for each combination of values.
199
200    Args:
201        metafunc: The pytest test function.
202
203    Returns:
204        Nothing.
205    """
206
207    subconfigs = {
208        'brd': console.config.brd,
209        'env': console.config.env,
210    }
211    for fn in metafunc.fixturenames:
212        parts = fn.split('__')
213        if len(parts) < 2:
214            continue
215        if parts[0] not in subconfigs:
216            continue
217        subconfig = subconfigs[parts[0]]
218        vals = []
219        val = subconfig.get(fn, [])
220        # If that exact name is a key in the data source:
221        if val:
222            # ... use the dict value as a single parameter value.
223            vals = (val, )
224        else:
225            # ... otherwise, see if there's a key that contains a list of
226            # values to use instead.
227            vals = subconfig.get(fn + 's', [])
228        def fixture_id(index, val):
229            try:
230                return val["fixture_id"]
231            except:
232                return fn + str(index)
233        ids = [fixture_id(index, val) for (index, val) in enumerate(vals)]
234        metafunc.parametrize(fn, vals, ids=ids)
235
236@pytest.fixture(scope='function')
237def u_boot_console(request):
238    """Generate the value of a test's u_boot_console fixture.
239
240    Args:
241        request: The pytest request.
242
243    Returns:
244        The fixture value.
245    """
246
247    console.ensure_spawned()
248    return console
249
250tests_not_run = set()
251tests_failed = set()
252tests_xpassed = set()
253tests_xfailed = set()
254tests_skipped = set()
255tests_passed = set()
256
257def pytest_itemcollected(item):
258    """pytest hook: Called once for each test found during collection.
259
260    This enables our custom result analysis code to see the list of all tests
261    that should eventually be run.
262
263    Args:
264        item: The item that was collected.
265
266    Returns:
267        Nothing.
268    """
269
270    tests_not_run.add(item.name)
271
272def cleanup():
273    """Clean up all global state.
274
275    Executed (via atexit) once the entire test process is complete. This
276    includes logging the status of all tests, and the identity of any failed
277    or skipped tests.
278
279    Args:
280        None.
281
282    Returns:
283        Nothing.
284    """
285
286    if console:
287        console.close()
288    if log:
289        log.status_pass('%d passed' % len(tests_passed))
290        if tests_skipped:
291            log.status_skipped('%d skipped' % len(tests_skipped))
292            for test in tests_skipped:
293                log.status_skipped('... ' + test)
294        if tests_xpassed:
295            log.status_xpass('%d xpass' % len(tests_xpassed))
296            for test in tests_xpassed:
297                log.status_xpass('... ' + test)
298        if tests_xfailed:
299            log.status_xfail('%d xfail' % len(tests_xfailed))
300            for test in tests_xfailed:
301                log.status_xfail('... ' + test)
302        if tests_failed:
303            log.status_fail('%d failed' % len(tests_failed))
304            for test in tests_failed:
305                log.status_fail('... ' + test)
306        if tests_not_run:
307            log.status_fail('%d not run' % len(tests_not_run))
308            for test in tests_not_run:
309                log.status_fail('... ' + test)
310        log.close()
311atexit.register(cleanup)
312
313def setup_boardspec(item):
314    """Process any 'boardspec' marker for a test.
315
316    Such a marker lists the set of board types that a test does/doesn't
317    support. If tests are being executed on an unsupported board, the test is
318    marked to be skipped.
319
320    Args:
321        item: The pytest test item.
322
323    Returns:
324        Nothing.
325    """
326
327    mark = item.get_marker('boardspec')
328    if not mark:
329        return
330    required_boards = []
331    for board in mark.args:
332        if board.startswith('!'):
333            if ubconfig.board_type == board[1:]:
334                pytest.skip('board not supported')
335                return
336        else:
337            required_boards.append(board)
338    if required_boards and ubconfig.board_type not in required_boards:
339        pytest.skip('board not supported')
340
341def setup_buildconfigspec(item):
342    """Process any 'buildconfigspec' marker for a test.
343
344    Such a marker lists some U-Boot configuration feature that the test
345    requires. If tests are being executed on an U-Boot build that doesn't
346    have the required feature, the test is marked to be skipped.
347
348    Args:
349        item: The pytest test item.
350
351    Returns:
352        Nothing.
353    """
354
355    mark = item.get_marker('buildconfigspec')
356    if not mark:
357        return
358    for option in mark.args:
359        if not ubconfig.buildconfig.get('config_' + option.lower(), None):
360            pytest.skip('.config feature not enabled')
361
362def pytest_runtest_setup(item):
363    """pytest hook: Configure (set up) a test item.
364
365    Called once for each test to perform any custom configuration. This hook
366    is used to skip the test if certain conditions apply.
367
368    Args:
369        item: The pytest test item.
370
371    Returns:
372        Nothing.
373    """
374
375    log.start_section(item.name)
376    setup_boardspec(item)
377    setup_buildconfigspec(item)
378
379def pytest_runtest_protocol(item, nextitem):
380    """pytest hook: Called to execute a test.
381
382    This hook wraps the standard pytest runtestprotocol() function in order
383    to acquire visibility into, and record, each test function's result.
384
385    Args:
386        item: The pytest test item to execute.
387        nextitem: The pytest test item that will be executed after this one.
388
389    Returns:
390        A list of pytest reports (test result data).
391    """
392
393    reports = runtestprotocol(item, nextitem=nextitem)
394
395    failure_cleanup = False
396    test_list = tests_passed
397    msg = 'OK'
398    msg_log = log.status_pass
399    for report in reports:
400        if report.outcome == 'failed':
401            if hasattr(report, 'wasxfail'):
402                test_list = tests_xpassed
403                msg = 'XPASSED'
404                msg_log = log.status_xpass
405            else:
406                failure_cleanup = True
407                test_list = tests_failed
408                msg = 'FAILED:\n' + str(report.longrepr)
409                msg_log = log.status_fail
410            break
411        if report.outcome == 'skipped':
412            if hasattr(report, 'wasxfail'):
413                failure_cleanup = True
414                test_list = tests_xfailed
415                msg = 'XFAILED:\n' + str(report.longrepr)
416                msg_log = log.status_xfail
417                break
418            test_list = tests_skipped
419            msg = 'SKIPPED:\n' + str(report.longrepr)
420            msg_log = log.status_skipped
421
422    if failure_cleanup:
423        console.drain_console()
424
425    test_list.add(item.name)
426    tests_not_run.remove(item.name)
427
428    try:
429        msg_log(msg)
430    except:
431        # If something went wrong with logging, it's better to let the test
432        # process continue, which may report other exceptions that triggered
433        # the logging issue (e.g. console.log wasn't created). Hence, just
434        # squash the exception. If the test setup failed due to e.g. syntax
435        # error somewhere else, this won't be seen. However, once that issue
436        # is fixed, if this exception still exists, it will then be logged as
437        # part of the test's stdout.
438        import traceback
439        print 'Exception occurred while logging runtest status:'
440        traceback.print_exc()
441        # FIXME: Can we force a test failure here?
442
443    log.end_section(item.name)
444
445    if failure_cleanup:
446        console.cleanup_spawn()
447
448    return reports
449