xref: /openbmc/u-boot/test/py/conftest.py (revision 43741396)
1# Copyright (c) 2015 Stephen Warren
2# Copyright (c) 2015-2016, NVIDIA CORPORATION. All rights reserved.
3#
4# SPDX-License-Identifier: GPL-2.0
5
6# Implementation of pytest run-time hook functions. These are invoked by
7# pytest at certain points during operation, e.g. startup, for each executed
8# test, at shutdown etc. These hooks perform functions such as:
9# - Parsing custom command-line options.
10# - Pullilng in user-specified board configuration.
11# - Creating the U-Boot console test fixture.
12# - Creating the HTML log file.
13# - Monitoring each test's results.
14# - Implementing custom pytest markers.
15
16import atexit
17import errno
18import os
19import os.path
20import pytest
21from _pytest.runner import runtestprotocol
22import ConfigParser
23import re
24import StringIO
25import sys
26
27# Globals: The HTML log file, and the connection to the U-Boot console.
28log = None
29console = None
30
31def mkdir_p(path):
32    """Create a directory path.
33
34    This includes creating any intermediate/parent directories. Any errors
35    caused due to already extant directories are ignored.
36
37    Args:
38        path: The directory path to create.
39
40    Returns:
41        Nothing.
42    """
43
44    try:
45        os.makedirs(path)
46    except OSError as exc:
47        if exc.errno == errno.EEXIST and os.path.isdir(path):
48            pass
49        else:
50            raise
51
52def pytest_addoption(parser):
53    """pytest hook: Add custom command-line options to the cmdline parser.
54
55    Args:
56        parser: The pytest command-line parser.
57
58    Returns:
59        Nothing.
60    """
61
62    parser.addoption('--build-dir', default=None,
63        help='U-Boot build directory (O=)')
64    parser.addoption('--result-dir', default=None,
65        help='U-Boot test result/tmp directory')
66    parser.addoption('--persistent-data-dir', default=None,
67        help='U-Boot test persistent generated data directory')
68    parser.addoption('--board-type', '--bd', '-B', default='sandbox',
69        help='U-Boot board type')
70    parser.addoption('--board-identity', '--id', default='na',
71        help='U-Boot board identity/instance')
72    parser.addoption('--build', default=False, action='store_true',
73        help='Compile U-Boot before running tests')
74    parser.addoption('--gdbserver', default=None,
75        help='Run sandbox under gdbserver. The argument is the channel '+
76        'over which gdbserver should communicate, e.g. localhost:1234')
77
78def pytest_configure(config):
79    """pytest hook: Perform custom initialization at startup time.
80
81    Args:
82        config: The pytest configuration.
83
84    Returns:
85        Nothing.
86    """
87
88    global log
89    global console
90    global ubconfig
91
92    test_py_dir = os.path.dirname(os.path.abspath(__file__))
93    source_dir = os.path.dirname(os.path.dirname(test_py_dir))
94
95    board_type = config.getoption('board_type')
96    board_type_filename = board_type.replace('-', '_')
97
98    board_identity = config.getoption('board_identity')
99    board_identity_filename = board_identity.replace('-', '_')
100
101    build_dir = config.getoption('build_dir')
102    if not build_dir:
103        build_dir = source_dir + '/build-' + board_type
104    mkdir_p(build_dir)
105
106    result_dir = config.getoption('result_dir')
107    if not result_dir:
108        result_dir = build_dir
109    mkdir_p(result_dir)
110
111    persistent_data_dir = config.getoption('persistent_data_dir')
112    if not persistent_data_dir:
113        persistent_data_dir = build_dir + '/persistent-data'
114    mkdir_p(persistent_data_dir)
115
116    gdbserver = config.getoption('gdbserver')
117    if gdbserver and board_type != 'sandbox':
118        raise Exception('--gdbserver only supported with sandbox')
119
120    import multiplexed_log
121    log = multiplexed_log.Logfile(result_dir + '/test-log.html')
122
123    if config.getoption('build'):
124        if build_dir != source_dir:
125            o_opt = 'O=%s' % build_dir
126        else:
127            o_opt = ''
128        cmds = (
129            ['make', o_opt, '-s', board_type + '_defconfig'],
130            ['make', o_opt, '-s', '-j8'],
131        )
132        with log.section('make'):
133            runner = log.get_runner('make', sys.stdout)
134            for cmd in cmds:
135                runner.run(cmd, cwd=source_dir)
136            runner.close()
137            log.status_pass('OK')
138
139    class ArbitraryAttributeContainer(object):
140        pass
141
142    ubconfig = ArbitraryAttributeContainer()
143    ubconfig.brd = dict()
144    ubconfig.env = dict()
145
146    modules = [
147        (ubconfig.brd, 'u_boot_board_' + board_type_filename),
148        (ubconfig.env, 'u_boot_boardenv_' + board_type_filename),
149        (ubconfig.env, 'u_boot_boardenv_' + board_type_filename + '_' +
150            board_identity_filename),
151    ]
152    for (dict_to_fill, module_name) in modules:
153        try:
154            module = __import__(module_name)
155        except ImportError:
156            continue
157        dict_to_fill.update(module.__dict__)
158
159    ubconfig.buildconfig = dict()
160
161    for conf_file in ('.config', 'include/autoconf.mk'):
162        dot_config = build_dir + '/' + conf_file
163        if not os.path.exists(dot_config):
164            raise Exception(conf_file + ' does not exist; ' +
165                'try passing --build option?')
166
167        with open(dot_config, 'rt') as f:
168            ini_str = '[root]\n' + f.read()
169            ini_sio = StringIO.StringIO(ini_str)
170            parser = ConfigParser.RawConfigParser()
171            parser.readfp(ini_sio)
172            ubconfig.buildconfig.update(parser.items('root'))
173
174    ubconfig.test_py_dir = test_py_dir
175    ubconfig.source_dir = source_dir
176    ubconfig.build_dir = build_dir
177    ubconfig.result_dir = result_dir
178    ubconfig.persistent_data_dir = persistent_data_dir
179    ubconfig.board_type = board_type
180    ubconfig.board_identity = board_identity
181    ubconfig.gdbserver = gdbserver
182
183    env_vars = (
184        'board_type',
185        'board_identity',
186        'source_dir',
187        'test_py_dir',
188        'build_dir',
189        'result_dir',
190        'persistent_data_dir',
191    )
192    for v in env_vars:
193        os.environ['U_BOOT_' + v.upper()] = getattr(ubconfig, v)
194
195    if board_type == 'sandbox':
196        import u_boot_console_sandbox
197        console = u_boot_console_sandbox.ConsoleSandbox(log, ubconfig)
198    else:
199        import u_boot_console_exec_attach
200        console = u_boot_console_exec_attach.ConsoleExecAttach(log, ubconfig)
201
202re_ut_test_list = re.compile(r'_u_boot_list_2_(dm|env)_test_2_\1_test_(.*)\s*$')
203def generate_ut_subtest(metafunc, fixture_name):
204    """Provide parametrization for a ut_subtest fixture.
205
206    Determines the set of unit tests built into a U-Boot binary by parsing the
207    list of symbols generated by the build process. Provides this information
208    to test functions by parameterizing their ut_subtest fixture parameter.
209
210    Args:
211        metafunc: The pytest test function.
212        fixture_name: The fixture name to test.
213
214    Returns:
215        Nothing.
216    """
217
218    fn = console.config.build_dir + '/u-boot.sym'
219    try:
220        with open(fn, 'rt') as f:
221            lines = f.readlines()
222    except:
223        lines = []
224    lines.sort()
225
226    vals = []
227    for l in lines:
228        m = re_ut_test_list.search(l)
229        if not m:
230            continue
231        vals.append(m.group(1) + ' ' + m.group(2))
232
233    ids = ['ut_' + s.replace(' ', '_') for s in vals]
234    metafunc.parametrize(fixture_name, vals, ids=ids)
235
236def generate_config(metafunc, fixture_name):
237    """Provide parametrization for {env,brd}__ fixtures.
238
239    If a test function takes parameter(s) (fixture names) of the form brd__xxx
240    or env__xxx, the brd and env configuration dictionaries are consulted to
241    find the list of values to use for those parameters, and the test is
242    parametrized so that it runs once for each combination of values.
243
244    Args:
245        metafunc: The pytest test function.
246        fixture_name: The fixture name to test.
247
248    Returns:
249        Nothing.
250    """
251
252    subconfigs = {
253        'brd': console.config.brd,
254        'env': console.config.env,
255    }
256    parts = fixture_name.split('__')
257    if len(parts) < 2:
258        return
259    if parts[0] not in subconfigs:
260        return
261    subconfig = subconfigs[parts[0]]
262    vals = []
263    val = subconfig.get(fixture_name, [])
264    # If that exact name is a key in the data source:
265    if val:
266        # ... use the dict value as a single parameter value.
267        vals = (val, )
268    else:
269        # ... otherwise, see if there's a key that contains a list of
270        # values to use instead.
271        vals = subconfig.get(fixture_name+ 's', [])
272    def fixture_id(index, val):
273        try:
274            return val['fixture_id']
275        except:
276            return fixture_name + str(index)
277    ids = [fixture_id(index, val) for (index, val) in enumerate(vals)]
278    metafunc.parametrize(fixture_name, vals, ids=ids)
279
280def pytest_generate_tests(metafunc):
281    """pytest hook: parameterize test functions based on custom rules.
282
283    Check each test function parameter (fixture name) to see if it is one of
284    our custom names, and if so, provide the correct parametrization for that
285    parameter.
286
287    Args:
288        metafunc: The pytest test function.
289
290    Returns:
291        Nothing.
292    """
293
294    for fn in metafunc.fixturenames:
295        if fn == 'ut_subtest':
296            generate_ut_subtest(metafunc, fn)
297            continue
298        generate_config(metafunc, fn)
299
300@pytest.fixture(scope='function')
301def u_boot_console(request):
302    """Generate the value of a test's u_boot_console fixture.
303
304    Args:
305        request: The pytest request.
306
307    Returns:
308        The fixture value.
309    """
310
311    console.ensure_spawned()
312    return console
313
314anchors = {}
315tests_not_run = []
316tests_failed = []
317tests_xpassed = []
318tests_xfailed = []
319tests_skipped = []
320tests_passed = []
321
322def pytest_itemcollected(item):
323    """pytest hook: Called once for each test found during collection.
324
325    This enables our custom result analysis code to see the list of all tests
326    that should eventually be run.
327
328    Args:
329        item: The item that was collected.
330
331    Returns:
332        Nothing.
333    """
334
335    tests_not_run.append(item.name)
336
337def cleanup():
338    """Clean up all global state.
339
340    Executed (via atexit) once the entire test process is complete. This
341    includes logging the status of all tests, and the identity of any failed
342    or skipped tests.
343
344    Args:
345        None.
346
347    Returns:
348        Nothing.
349    """
350
351    if console:
352        console.close()
353    if log:
354        with log.section('Status Report', 'status_report'):
355            log.status_pass('%d passed' % len(tests_passed))
356            if tests_skipped:
357                log.status_skipped('%d skipped' % len(tests_skipped))
358                for test in tests_skipped:
359                    anchor = anchors.get(test, None)
360                    log.status_skipped('... ' + test, anchor)
361            if tests_xpassed:
362                log.status_xpass('%d xpass' % len(tests_xpassed))
363                for test in tests_xpassed:
364                    anchor = anchors.get(test, None)
365                    log.status_xpass('... ' + test, anchor)
366            if tests_xfailed:
367                log.status_xfail('%d xfail' % len(tests_xfailed))
368                for test in tests_xfailed:
369                    anchor = anchors.get(test, None)
370                    log.status_xfail('... ' + test, anchor)
371            if tests_failed:
372                log.status_fail('%d failed' % len(tests_failed))
373                for test in tests_failed:
374                    anchor = anchors.get(test, None)
375                    log.status_fail('... ' + test, anchor)
376            if tests_not_run:
377                log.status_fail('%d not run' % len(tests_not_run))
378                for test in tests_not_run:
379                    anchor = anchors.get(test, None)
380                    log.status_fail('... ' + test, anchor)
381        log.close()
382atexit.register(cleanup)
383
384def setup_boardspec(item):
385    """Process any 'boardspec' marker for a test.
386
387    Such a marker lists the set of board types that a test does/doesn't
388    support. If tests are being executed on an unsupported board, the test is
389    marked to be skipped.
390
391    Args:
392        item: The pytest test item.
393
394    Returns:
395        Nothing.
396    """
397
398    mark = item.get_marker('boardspec')
399    if not mark:
400        return
401    required_boards = []
402    for board in mark.args:
403        if board.startswith('!'):
404            if ubconfig.board_type == board[1:]:
405                pytest.skip('board not supported')
406                return
407        else:
408            required_boards.append(board)
409    if required_boards and ubconfig.board_type not in required_boards:
410        pytest.skip('board not supported')
411
412def setup_buildconfigspec(item):
413    """Process any 'buildconfigspec' marker for a test.
414
415    Such a marker lists some U-Boot configuration feature that the test
416    requires. If tests are being executed on an U-Boot build that doesn't
417    have the required feature, the test is marked to be skipped.
418
419    Args:
420        item: The pytest test item.
421
422    Returns:
423        Nothing.
424    """
425
426    mark = item.get_marker('buildconfigspec')
427    if not mark:
428        return
429    for option in mark.args:
430        if not ubconfig.buildconfig.get('config_' + option.lower(), None):
431            pytest.skip('.config feature not enabled')
432
433def pytest_runtest_setup(item):
434    """pytest hook: Configure (set up) a test item.
435
436    Called once for each test to perform any custom configuration. This hook
437    is used to skip the test if certain conditions apply.
438
439    Args:
440        item: The pytest test item.
441
442    Returns:
443        Nothing.
444    """
445
446    anchors[item.name] = log.start_section(item.name)
447    setup_boardspec(item)
448    setup_buildconfigspec(item)
449
450def pytest_runtest_protocol(item, nextitem):
451    """pytest hook: Called to execute a test.
452
453    This hook wraps the standard pytest runtestprotocol() function in order
454    to acquire visibility into, and record, each test function's result.
455
456    Args:
457        item: The pytest test item to execute.
458        nextitem: The pytest test item that will be executed after this one.
459
460    Returns:
461        A list of pytest reports (test result data).
462    """
463
464    reports = runtestprotocol(item, nextitem=nextitem)
465
466    failure_cleanup = False
467    test_list = tests_passed
468    msg = 'OK'
469    msg_log = log.status_pass
470    for report in reports:
471        if report.outcome == 'failed':
472            if hasattr(report, 'wasxfail'):
473                test_list = tests_xpassed
474                msg = 'XPASSED'
475                msg_log = log.status_xpass
476            else:
477                failure_cleanup = True
478                test_list = tests_failed
479                msg = 'FAILED:\n' + str(report.longrepr)
480                msg_log = log.status_fail
481            break
482        if report.outcome == 'skipped':
483            if hasattr(report, 'wasxfail'):
484                failure_cleanup = True
485                test_list = tests_xfailed
486                msg = 'XFAILED:\n' + str(report.longrepr)
487                msg_log = log.status_xfail
488                break
489            test_list = tests_skipped
490            msg = 'SKIPPED:\n' + str(report.longrepr)
491            msg_log = log.status_skipped
492
493    if failure_cleanup:
494        console.drain_console()
495
496    test_list.append(item.name)
497    tests_not_run.remove(item.name)
498
499    try:
500        msg_log(msg)
501    except:
502        # If something went wrong with logging, it's better to let the test
503        # process continue, which may report other exceptions that triggered
504        # the logging issue (e.g. console.log wasn't created). Hence, just
505        # squash the exception. If the test setup failed due to e.g. syntax
506        # error somewhere else, this won't be seen. However, once that issue
507        # is fixed, if this exception still exists, it will then be logged as
508        # part of the test's stdout.
509        import traceback
510        print 'Exception occurred while logging runtest status:'
511        traceback.print_exc()
512        # FIXME: Can we force a test failure here?
513
514    log.end_section(item.name)
515
516    if failure_cleanup:
517        console.cleanup_spawn()
518
519    return reports
520