xref: /openbmc/u-boot/test/py/conftest.py (revision 85187b80)
1# SPDX-License-Identifier: GPL-2.0
2# Copyright (c) 2015 Stephen Warren
3# Copyright (c) 2015-2016, NVIDIA CORPORATION. All rights reserved.
4
5# Implementation of pytest run-time hook functions. These are invoked by
6# pytest at certain points during operation, e.g. startup, for each executed
7# test, at shutdown etc. These hooks perform functions such as:
8# - Parsing custom command-line options.
9# - Pullilng in user-specified board configuration.
10# - Creating the U-Boot console test fixture.
11# - Creating the HTML log file.
12# - Monitoring each test's results.
13# - Implementing custom pytest markers.
14
15import atexit
16import errno
17import os
18import os.path
19import pytest
20from _pytest.runner import runtestprotocol
21import ConfigParser
22import re
23import StringIO
24import sys
25
26# Globals: The HTML log file, and the connection to the U-Boot console.
27log = None
28console = None
29
30def mkdir_p(path):
31    """Create a directory path.
32
33    This includes creating any intermediate/parent directories. Any errors
34    caused due to already extant directories are ignored.
35
36    Args:
37        path: The directory path to create.
38
39    Returns:
40        Nothing.
41    """
42
43    try:
44        os.makedirs(path)
45    except OSError as exc:
46        if exc.errno == errno.EEXIST and os.path.isdir(path):
47            pass
48        else:
49            raise
50
51def pytest_addoption(parser):
52    """pytest hook: Add custom command-line options to the cmdline parser.
53
54    Args:
55        parser: The pytest command-line parser.
56
57    Returns:
58        Nothing.
59    """
60
61    parser.addoption('--build-dir', default=None,
62        help='U-Boot build directory (O=)')
63    parser.addoption('--result-dir', default=None,
64        help='U-Boot test result/tmp directory')
65    parser.addoption('--persistent-data-dir', default=None,
66        help='U-Boot test persistent generated data directory')
67    parser.addoption('--board-type', '--bd', '-B', default='sandbox',
68        help='U-Boot board type')
69    parser.addoption('--board-identity', '--id', default='na',
70        help='U-Boot board identity/instance')
71    parser.addoption('--build', default=False, action='store_true',
72        help='Compile U-Boot before running tests')
73    parser.addoption('--gdbserver', default=None,
74        help='Run sandbox under gdbserver. The argument is the channel '+
75        'over which gdbserver should communicate, e.g. localhost:1234')
76
77def pytest_configure(config):
78    """pytest hook: Perform custom initialization at startup time.
79
80    Args:
81        config: The pytest configuration.
82
83    Returns:
84        Nothing.
85    """
86
87    global log
88    global console
89    global ubconfig
90
91    test_py_dir = os.path.dirname(os.path.abspath(__file__))
92    source_dir = os.path.dirname(os.path.dirname(test_py_dir))
93
94    board_type = config.getoption('board_type')
95    board_type_filename = board_type.replace('-', '_')
96
97    board_identity = config.getoption('board_identity')
98    board_identity_filename = board_identity.replace('-', '_')
99
100    build_dir = config.getoption('build_dir')
101    if not build_dir:
102        build_dir = source_dir + '/build-' + board_type
103    mkdir_p(build_dir)
104
105    result_dir = config.getoption('result_dir')
106    if not result_dir:
107        result_dir = build_dir
108    mkdir_p(result_dir)
109
110    persistent_data_dir = config.getoption('persistent_data_dir')
111    if not persistent_data_dir:
112        persistent_data_dir = build_dir + '/persistent-data'
113    mkdir_p(persistent_data_dir)
114
115    gdbserver = config.getoption('gdbserver')
116    if gdbserver and board_type != 'sandbox':
117        raise Exception('--gdbserver only supported with sandbox')
118
119    import multiplexed_log
120    log = multiplexed_log.Logfile(result_dir + '/test-log.html')
121
122    if config.getoption('build'):
123        if build_dir != source_dir:
124            o_opt = 'O=%s' % build_dir
125        else:
126            o_opt = ''
127        cmds = (
128            ['make', o_opt, '-s', board_type + '_defconfig'],
129            ['make', o_opt, '-s', '-j8'],
130        )
131        with log.section('make'):
132            runner = log.get_runner('make', sys.stdout)
133            for cmd in cmds:
134                runner.run(cmd, cwd=source_dir)
135            runner.close()
136            log.status_pass('OK')
137
138    class ArbitraryAttributeContainer(object):
139        pass
140
141    ubconfig = ArbitraryAttributeContainer()
142    ubconfig.brd = dict()
143    ubconfig.env = dict()
144
145    modules = [
146        (ubconfig.brd, 'u_boot_board_' + board_type_filename),
147        (ubconfig.env, 'u_boot_boardenv_' + board_type_filename),
148        (ubconfig.env, 'u_boot_boardenv_' + board_type_filename + '_' +
149            board_identity_filename),
150    ]
151    for (dict_to_fill, module_name) in modules:
152        try:
153            module = __import__(module_name)
154        except ImportError:
155            continue
156        dict_to_fill.update(module.__dict__)
157
158    ubconfig.buildconfig = dict()
159
160    for conf_file in ('.config', 'include/autoconf.mk'):
161        dot_config = build_dir + '/' + conf_file
162        if not os.path.exists(dot_config):
163            raise Exception(conf_file + ' does not exist; ' +
164                'try passing --build option?')
165
166        with open(dot_config, 'rt') as f:
167            ini_str = '[root]\n' + f.read()
168            ini_sio = StringIO.StringIO(ini_str)
169            parser = ConfigParser.RawConfigParser()
170            parser.readfp(ini_sio)
171            ubconfig.buildconfig.update(parser.items('root'))
172
173    ubconfig.test_py_dir = test_py_dir
174    ubconfig.source_dir = source_dir
175    ubconfig.build_dir = build_dir
176    ubconfig.result_dir = result_dir
177    ubconfig.persistent_data_dir = persistent_data_dir
178    ubconfig.board_type = board_type
179    ubconfig.board_identity = board_identity
180    ubconfig.gdbserver = gdbserver
181    ubconfig.dtb = build_dir + '/arch/sandbox/dts/test.dtb'
182
183    env_vars = (
184        'board_type',
185        'board_identity',
186        'source_dir',
187        'test_py_dir',
188        'build_dir',
189        'result_dir',
190        'persistent_data_dir',
191    )
192    for v in env_vars:
193        os.environ['U_BOOT_' + v.upper()] = getattr(ubconfig, v)
194
195    if board_type.startswith('sandbox'):
196        import u_boot_console_sandbox
197        console = u_boot_console_sandbox.ConsoleSandbox(log, ubconfig)
198    else:
199        import u_boot_console_exec_attach
200        console = u_boot_console_exec_attach.ConsoleExecAttach(log, ubconfig)
201
202re_ut_test_list = re.compile(r'_u_boot_list_2_(.*)_test_2_\1_test_(.*)\s*$')
203def generate_ut_subtest(metafunc, fixture_name):
204    """Provide parametrization for a ut_subtest fixture.
205
206    Determines the set of unit tests built into a U-Boot binary by parsing the
207    list of symbols generated by the build process. Provides this information
208    to test functions by parameterizing their ut_subtest fixture parameter.
209
210    Args:
211        metafunc: The pytest test function.
212        fixture_name: The fixture name to test.
213
214    Returns:
215        Nothing.
216    """
217
218    fn = console.config.build_dir + '/u-boot.sym'
219    try:
220        with open(fn, 'rt') as f:
221            lines = f.readlines()
222    except:
223        lines = []
224    lines.sort()
225
226    vals = []
227    for l in lines:
228        m = re_ut_test_list.search(l)
229        if not m:
230            continue
231        vals.append(m.group(1) + ' ' + m.group(2))
232
233    ids = ['ut_' + s.replace(' ', '_') for s in vals]
234    metafunc.parametrize(fixture_name, vals, ids=ids)
235
236def generate_config(metafunc, fixture_name):
237    """Provide parametrization for {env,brd}__ fixtures.
238
239    If a test function takes parameter(s) (fixture names) of the form brd__xxx
240    or env__xxx, the brd and env configuration dictionaries are consulted to
241    find the list of values to use for those parameters, and the test is
242    parametrized so that it runs once for each combination of values.
243
244    Args:
245        metafunc: The pytest test function.
246        fixture_name: The fixture name to test.
247
248    Returns:
249        Nothing.
250    """
251
252    subconfigs = {
253        'brd': console.config.brd,
254        'env': console.config.env,
255    }
256    parts = fixture_name.split('__')
257    if len(parts) < 2:
258        return
259    if parts[0] not in subconfigs:
260        return
261    subconfig = subconfigs[parts[0]]
262    vals = []
263    val = subconfig.get(fixture_name, [])
264    # If that exact name is a key in the data source:
265    if val:
266        # ... use the dict value as a single parameter value.
267        vals = (val, )
268    else:
269        # ... otherwise, see if there's a key that contains a list of
270        # values to use instead.
271        vals = subconfig.get(fixture_name+ 's', [])
272    def fixture_id(index, val):
273        try:
274            return val['fixture_id']
275        except:
276            return fixture_name + str(index)
277    ids = [fixture_id(index, val) for (index, val) in enumerate(vals)]
278    metafunc.parametrize(fixture_name, vals, ids=ids)
279
280def pytest_generate_tests(metafunc):
281    """pytest hook: parameterize test functions based on custom rules.
282
283    Check each test function parameter (fixture name) to see if it is one of
284    our custom names, and if so, provide the correct parametrization for that
285    parameter.
286
287    Args:
288        metafunc: The pytest test function.
289
290    Returns:
291        Nothing.
292    """
293
294    for fn in metafunc.fixturenames:
295        if fn == 'ut_subtest':
296            generate_ut_subtest(metafunc, fn)
297            continue
298        generate_config(metafunc, fn)
299
300@pytest.fixture(scope='session')
301def u_boot_log(request):
302     """Generate the value of a test's log fixture.
303
304     Args:
305         request: The pytest request.
306
307     Returns:
308         The fixture value.
309     """
310
311     return console.log
312
313@pytest.fixture(scope='session')
314def u_boot_config(request):
315     """Generate the value of a test's u_boot_config fixture.
316
317     Args:
318         request: The pytest request.
319
320     Returns:
321         The fixture value.
322     """
323
324     return console.config
325
326@pytest.fixture(scope='function')
327def u_boot_console(request):
328    """Generate the value of a test's u_boot_console fixture.
329
330    Args:
331        request: The pytest request.
332
333    Returns:
334        The fixture value.
335    """
336
337    console.ensure_spawned()
338    return console
339
340anchors = {}
341tests_not_run = []
342tests_failed = []
343tests_xpassed = []
344tests_xfailed = []
345tests_skipped = []
346tests_warning = []
347tests_passed = []
348
349def pytest_itemcollected(item):
350    """pytest hook: Called once for each test found during collection.
351
352    This enables our custom result analysis code to see the list of all tests
353    that should eventually be run.
354
355    Args:
356        item: The item that was collected.
357
358    Returns:
359        Nothing.
360    """
361
362    tests_not_run.append(item.name)
363
364def cleanup():
365    """Clean up all global state.
366
367    Executed (via atexit) once the entire test process is complete. This
368    includes logging the status of all tests, and the identity of any failed
369    or skipped tests.
370
371    Args:
372        None.
373
374    Returns:
375        Nothing.
376    """
377
378    if console:
379        console.close()
380    if log:
381        with log.section('Status Report', 'status_report'):
382            log.status_pass('%d passed' % len(tests_passed))
383            if tests_warning:
384                log.status_warning('%d passed with warning' % len(tests_warning))
385                for test in tests_warning:
386                    anchor = anchors.get(test, None)
387                    log.status_warning('... ' + test, anchor)
388            if tests_skipped:
389                log.status_skipped('%d skipped' % len(tests_skipped))
390                for test in tests_skipped:
391                    anchor = anchors.get(test, None)
392                    log.status_skipped('... ' + test, anchor)
393            if tests_xpassed:
394                log.status_xpass('%d xpass' % len(tests_xpassed))
395                for test in tests_xpassed:
396                    anchor = anchors.get(test, None)
397                    log.status_xpass('... ' + test, anchor)
398            if tests_xfailed:
399                log.status_xfail('%d xfail' % len(tests_xfailed))
400                for test in tests_xfailed:
401                    anchor = anchors.get(test, None)
402                    log.status_xfail('... ' + test, anchor)
403            if tests_failed:
404                log.status_fail('%d failed' % len(tests_failed))
405                for test in tests_failed:
406                    anchor = anchors.get(test, None)
407                    log.status_fail('... ' + test, anchor)
408            if tests_not_run:
409                log.status_fail('%d not run' % len(tests_not_run))
410                for test in tests_not_run:
411                    anchor = anchors.get(test, None)
412                    log.status_fail('... ' + test, anchor)
413        log.close()
414atexit.register(cleanup)
415
416def setup_boardspec(item):
417    """Process any 'boardspec' marker for a test.
418
419    Such a marker lists the set of board types that a test does/doesn't
420    support. If tests are being executed on an unsupported board, the test is
421    marked to be skipped.
422
423    Args:
424        item: The pytest test item.
425
426    Returns:
427        Nothing.
428    """
429
430    mark = item.get_marker('boardspec')
431    if not mark:
432        return
433    required_boards = []
434    for board in mark.args:
435        if board.startswith('!'):
436            if ubconfig.board_type == board[1:]:
437                pytest.skip('board "%s" not supported' % ubconfig.board_type)
438                return
439        else:
440            required_boards.append(board)
441    if required_boards and ubconfig.board_type not in required_boards:
442        pytest.skip('board "%s" not supported' % ubconfig.board_type)
443
444def setup_buildconfigspec(item):
445    """Process any 'buildconfigspec' marker for a test.
446
447    Such a marker lists some U-Boot configuration feature that the test
448    requires. If tests are being executed on an U-Boot build that doesn't
449    have the required feature, the test is marked to be skipped.
450
451    Args:
452        item: The pytest test item.
453
454    Returns:
455        Nothing.
456    """
457
458    mark = item.get_marker('buildconfigspec')
459    if not mark:
460        return
461    for option in mark.args:
462        if not ubconfig.buildconfig.get('config_' + option.lower(), None):
463            pytest.skip('.config feature "%s" not enabled' % option.lower())
464
465def tool_is_in_path(tool):
466    for path in os.environ["PATH"].split(os.pathsep):
467        fn = os.path.join(path, tool)
468        if os.path.isfile(fn) and os.access(fn, os.X_OK):
469            return True
470    return False
471
472def setup_requiredtool(item):
473    """Process any 'requiredtool' marker for a test.
474
475    Such a marker lists some external tool (binary, executable, application)
476    that the test requires. If tests are being executed on a system that
477    doesn't have the required tool, the test is marked to be skipped.
478
479    Args:
480        item: The pytest test item.
481
482    Returns:
483        Nothing.
484    """
485
486    mark = item.get_marker('requiredtool')
487    if not mark:
488        return
489    for tool in mark.args:
490        if not tool_is_in_path(tool):
491            pytest.skip('tool "%s" not in $PATH' % tool)
492
493def start_test_section(item):
494    anchors[item.name] = log.start_section(item.name)
495
496def pytest_runtest_setup(item):
497    """pytest hook: Configure (set up) a test item.
498
499    Called once for each test to perform any custom configuration. This hook
500    is used to skip the test if certain conditions apply.
501
502    Args:
503        item: The pytest test item.
504
505    Returns:
506        Nothing.
507    """
508
509    start_test_section(item)
510    setup_boardspec(item)
511    setup_buildconfigspec(item)
512    setup_requiredtool(item)
513
514def pytest_runtest_protocol(item, nextitem):
515    """pytest hook: Called to execute a test.
516
517    This hook wraps the standard pytest runtestprotocol() function in order
518    to acquire visibility into, and record, each test function's result.
519
520    Args:
521        item: The pytest test item to execute.
522        nextitem: The pytest test item that will be executed after this one.
523
524    Returns:
525        A list of pytest reports (test result data).
526    """
527
528    log.get_and_reset_warning()
529    reports = runtestprotocol(item, nextitem=nextitem)
530    was_warning = log.get_and_reset_warning()
531
532    # In pytest 3, runtestprotocol() may not call pytest_runtest_setup() if
533    # the test is skipped. That call is required to create the test's section
534    # in the log file. The call to log.end_section() requires that the log
535    # contain a section for this test. Create a section for the test if it
536    # doesn't already exist.
537    if not item.name in anchors:
538        start_test_section(item)
539
540    failure_cleanup = False
541    if not was_warning:
542        test_list = tests_passed
543        msg = 'OK'
544        msg_log = log.status_pass
545    else:
546        test_list = tests_warning
547        msg = 'OK (with warning)'
548        msg_log = log.status_warning
549    for report in reports:
550        if report.outcome == 'failed':
551            if hasattr(report, 'wasxfail'):
552                test_list = tests_xpassed
553                msg = 'XPASSED'
554                msg_log = log.status_xpass
555            else:
556                failure_cleanup = True
557                test_list = tests_failed
558                msg = 'FAILED:\n' + str(report.longrepr)
559                msg_log = log.status_fail
560            break
561        if report.outcome == 'skipped':
562            if hasattr(report, 'wasxfail'):
563                failure_cleanup = True
564                test_list = tests_xfailed
565                msg = 'XFAILED:\n' + str(report.longrepr)
566                msg_log = log.status_xfail
567                break
568            test_list = tests_skipped
569            msg = 'SKIPPED:\n' + str(report.longrepr)
570            msg_log = log.status_skipped
571
572    if failure_cleanup:
573        console.drain_console()
574
575    test_list.append(item.name)
576    tests_not_run.remove(item.name)
577
578    try:
579        msg_log(msg)
580    except:
581        # If something went wrong with logging, it's better to let the test
582        # process continue, which may report other exceptions that triggered
583        # the logging issue (e.g. console.log wasn't created). Hence, just
584        # squash the exception. If the test setup failed due to e.g. syntax
585        # error somewhere else, this won't be seen. However, once that issue
586        # is fixed, if this exception still exists, it will then be logged as
587        # part of the test's stdout.
588        import traceback
589        print 'Exception occurred while logging runtest status:'
590        traceback.print_exc()
591        # FIXME: Can we force a test failure here?
592
593    log.end_section(item.name)
594
595    if failure_cleanup:
596        console.cleanup_spawn()
597
598    return reports
599