xref: /openbmc/u-boot/test/py/conftest.py (revision 71b75644)
1# Copyright (c) 2015 Stephen Warren
2# Copyright (c) 2015-2016, NVIDIA CORPORATION. All rights reserved.
3#
4# SPDX-License-Identifier: GPL-2.0
5
6# Implementation of pytest run-time hook functions. These are invoked by
7# pytest at certain points during operation, e.g. startup, for each executed
8# test, at shutdown etc. These hooks perform functions such as:
9# - Parsing custom command-line options.
10# - Pullilng in user-specified board configuration.
11# - Creating the U-Boot console test fixture.
12# - Creating the HTML log file.
13# - Monitoring each test's results.
14# - Implementing custom pytest markers.
15
16import atexit
17import errno
18import os
19import os.path
20import pytest
21from _pytest.runner import runtestprotocol
22import ConfigParser
23import re
24import StringIO
25import sys
26
27# Globals: The HTML log file, and the connection to the U-Boot console.
28log = None
29console = None
30
31def mkdir_p(path):
32    """Create a directory path.
33
34    This includes creating any intermediate/parent directories. Any errors
35    caused due to already extant directories are ignored.
36
37    Args:
38        path: The directory path to create.
39
40    Returns:
41        Nothing.
42    """
43
44    try:
45        os.makedirs(path)
46    except OSError as exc:
47        if exc.errno == errno.EEXIST and os.path.isdir(path):
48            pass
49        else:
50            raise
51
52def pytest_addoption(parser):
53    """pytest hook: Add custom command-line options to the cmdline parser.
54
55    Args:
56        parser: The pytest command-line parser.
57
58    Returns:
59        Nothing.
60    """
61
62    parser.addoption('--build-dir', default=None,
63        help='U-Boot build directory (O=)')
64    parser.addoption('--result-dir', default=None,
65        help='U-Boot test result/tmp directory')
66    parser.addoption('--persistent-data-dir', default=None,
67        help='U-Boot test persistent generated data directory')
68    parser.addoption('--board-type', '--bd', '-B', default='sandbox',
69        help='U-Boot board type')
70    parser.addoption('--board-identity', '--id', default='na',
71        help='U-Boot board identity/instance')
72    parser.addoption('--build', default=False, action='store_true',
73        help='Compile U-Boot before running tests')
74    parser.addoption('--gdbserver', default=None,
75        help='Run sandbox under gdbserver. The argument is the channel '+
76        'over which gdbserver should communicate, e.g. localhost:1234')
77
78def pytest_configure(config):
79    """pytest hook: Perform custom initialization at startup time.
80
81    Args:
82        config: The pytest configuration.
83
84    Returns:
85        Nothing.
86    """
87
88    global log
89    global console
90    global ubconfig
91
92    test_py_dir = os.path.dirname(os.path.abspath(__file__))
93    source_dir = os.path.dirname(os.path.dirname(test_py_dir))
94
95    board_type = config.getoption('board_type')
96    board_type_filename = board_type.replace('-', '_')
97
98    board_identity = config.getoption('board_identity')
99    board_identity_filename = board_identity.replace('-', '_')
100
101    build_dir = config.getoption('build_dir')
102    if not build_dir:
103        build_dir = source_dir + '/build-' + board_type
104    mkdir_p(build_dir)
105
106    result_dir = config.getoption('result_dir')
107    if not result_dir:
108        result_dir = build_dir
109    mkdir_p(result_dir)
110
111    persistent_data_dir = config.getoption('persistent_data_dir')
112    if not persistent_data_dir:
113        persistent_data_dir = build_dir + '/persistent-data'
114    mkdir_p(persistent_data_dir)
115
116    gdbserver = config.getoption('gdbserver')
117    if gdbserver and board_type != 'sandbox':
118        raise Exception('--gdbserver only supported with sandbox')
119
120    import multiplexed_log
121    log = multiplexed_log.Logfile(result_dir + '/test-log.html')
122
123    if config.getoption('build'):
124        if build_dir != source_dir:
125            o_opt = 'O=%s' % build_dir
126        else:
127            o_opt = ''
128        cmds = (
129            ['make', o_opt, '-s', board_type + '_defconfig'],
130            ['make', o_opt, '-s', '-j8'],
131        )
132        with log.section('make'):
133            runner = log.get_runner('make', sys.stdout)
134            for cmd in cmds:
135                runner.run(cmd, cwd=source_dir)
136            runner.close()
137            log.status_pass('OK')
138
139    class ArbitraryAttributeContainer(object):
140        pass
141
142    ubconfig = ArbitraryAttributeContainer()
143    ubconfig.brd = dict()
144    ubconfig.env = dict()
145
146    modules = [
147        (ubconfig.brd, 'u_boot_board_' + board_type_filename),
148        (ubconfig.env, 'u_boot_boardenv_' + board_type_filename),
149        (ubconfig.env, 'u_boot_boardenv_' + board_type_filename + '_' +
150            board_identity_filename),
151    ]
152    for (dict_to_fill, module_name) in modules:
153        try:
154            module = __import__(module_name)
155        except ImportError:
156            continue
157        dict_to_fill.update(module.__dict__)
158
159    ubconfig.buildconfig = dict()
160
161    for conf_file in ('.config', 'include/autoconf.mk'):
162        dot_config = build_dir + '/' + conf_file
163        if not os.path.exists(dot_config):
164            raise Exception(conf_file + ' does not exist; ' +
165                'try passing --build option?')
166
167        with open(dot_config, 'rt') as f:
168            ini_str = '[root]\n' + f.read()
169            ini_sio = StringIO.StringIO(ini_str)
170            parser = ConfigParser.RawConfigParser()
171            parser.readfp(ini_sio)
172            ubconfig.buildconfig.update(parser.items('root'))
173
174    ubconfig.test_py_dir = test_py_dir
175    ubconfig.source_dir = source_dir
176    ubconfig.build_dir = build_dir
177    ubconfig.result_dir = result_dir
178    ubconfig.persistent_data_dir = persistent_data_dir
179    ubconfig.board_type = board_type
180    ubconfig.board_identity = board_identity
181    ubconfig.gdbserver = gdbserver
182    ubconfig.dtb = build_dir + '/arch/sandbox/dts/test.dtb'
183
184    env_vars = (
185        'board_type',
186        'board_identity',
187        'source_dir',
188        'test_py_dir',
189        'build_dir',
190        'result_dir',
191        'persistent_data_dir',
192    )
193    for v in env_vars:
194        os.environ['U_BOOT_' + v.upper()] = getattr(ubconfig, v)
195
196    if board_type.startswith('sandbox'):
197        import u_boot_console_sandbox
198        console = u_boot_console_sandbox.ConsoleSandbox(log, ubconfig)
199    else:
200        import u_boot_console_exec_attach
201        console = u_boot_console_exec_attach.ConsoleExecAttach(log, ubconfig)
202
203re_ut_test_list = re.compile(r'_u_boot_list_2_(.*)_test_2_\1_test_(.*)\s*$')
204def generate_ut_subtest(metafunc, fixture_name):
205    """Provide parametrization for a ut_subtest fixture.
206
207    Determines the set of unit tests built into a U-Boot binary by parsing the
208    list of symbols generated by the build process. Provides this information
209    to test functions by parameterizing their ut_subtest fixture parameter.
210
211    Args:
212        metafunc: The pytest test function.
213        fixture_name: The fixture name to test.
214
215    Returns:
216        Nothing.
217    """
218
219    fn = console.config.build_dir + '/u-boot.sym'
220    try:
221        with open(fn, 'rt') as f:
222            lines = f.readlines()
223    except:
224        lines = []
225    lines.sort()
226
227    vals = []
228    for l in lines:
229        m = re_ut_test_list.search(l)
230        if not m:
231            continue
232        vals.append(m.group(1) + ' ' + m.group(2))
233
234    ids = ['ut_' + s.replace(' ', '_') for s in vals]
235    metafunc.parametrize(fixture_name, vals, ids=ids)
236
237def generate_config(metafunc, fixture_name):
238    """Provide parametrization for {env,brd}__ fixtures.
239
240    If a test function takes parameter(s) (fixture names) of the form brd__xxx
241    or env__xxx, the brd and env configuration dictionaries are consulted to
242    find the list of values to use for those parameters, and the test is
243    parametrized so that it runs once for each combination of values.
244
245    Args:
246        metafunc: The pytest test function.
247        fixture_name: The fixture name to test.
248
249    Returns:
250        Nothing.
251    """
252
253    subconfigs = {
254        'brd': console.config.brd,
255        'env': console.config.env,
256    }
257    parts = fixture_name.split('__')
258    if len(parts) < 2:
259        return
260    if parts[0] not in subconfigs:
261        return
262    subconfig = subconfigs[parts[0]]
263    vals = []
264    val = subconfig.get(fixture_name, [])
265    # If that exact name is a key in the data source:
266    if val:
267        # ... use the dict value as a single parameter value.
268        vals = (val, )
269    else:
270        # ... otherwise, see if there's a key that contains a list of
271        # values to use instead.
272        vals = subconfig.get(fixture_name+ 's', [])
273    def fixture_id(index, val):
274        try:
275            return val['fixture_id']
276        except:
277            return fixture_name + str(index)
278    ids = [fixture_id(index, val) for (index, val) in enumerate(vals)]
279    metafunc.parametrize(fixture_name, vals, ids=ids)
280
281def pytest_generate_tests(metafunc):
282    """pytest hook: parameterize test functions based on custom rules.
283
284    Check each test function parameter (fixture name) to see if it is one of
285    our custom names, and if so, provide the correct parametrization for that
286    parameter.
287
288    Args:
289        metafunc: The pytest test function.
290
291    Returns:
292        Nothing.
293    """
294
295    for fn in metafunc.fixturenames:
296        if fn == 'ut_subtest':
297            generate_ut_subtest(metafunc, fn)
298            continue
299        generate_config(metafunc, fn)
300
301@pytest.fixture(scope='session')
302def u_boot_log(request):
303     """Generate the value of a test's log fixture.
304
305     Args:
306         request: The pytest request.
307
308     Returns:
309         The fixture value.
310     """
311
312     return console.log
313
314@pytest.fixture(scope='session')
315def u_boot_config(request):
316     """Generate the value of a test's u_boot_config fixture.
317
318     Args:
319         request: The pytest request.
320
321     Returns:
322         The fixture value.
323     """
324
325     return console.config
326
327@pytest.fixture(scope='function')
328def u_boot_console(request):
329    """Generate the value of a test's u_boot_console fixture.
330
331    Args:
332        request: The pytest request.
333
334    Returns:
335        The fixture value.
336    """
337
338    console.ensure_spawned()
339    return console
340
341anchors = {}
342tests_not_run = []
343tests_failed = []
344tests_xpassed = []
345tests_xfailed = []
346tests_skipped = []
347tests_warning = []
348tests_passed = []
349
350def pytest_itemcollected(item):
351    """pytest hook: Called once for each test found during collection.
352
353    This enables our custom result analysis code to see the list of all tests
354    that should eventually be run.
355
356    Args:
357        item: The item that was collected.
358
359    Returns:
360        Nothing.
361    """
362
363    tests_not_run.append(item.name)
364
365def cleanup():
366    """Clean up all global state.
367
368    Executed (via atexit) once the entire test process is complete. This
369    includes logging the status of all tests, and the identity of any failed
370    or skipped tests.
371
372    Args:
373        None.
374
375    Returns:
376        Nothing.
377    """
378
379    if console:
380        console.close()
381    if log:
382        with log.section('Status Report', 'status_report'):
383            log.status_pass('%d passed' % len(tests_passed))
384            if tests_warning:
385                log.status_warning('%d passed with warning' % len(tests_warning))
386                for test in tests_warning:
387                    anchor = anchors.get(test, None)
388                    log.status_warning('... ' + test, anchor)
389            if tests_skipped:
390                log.status_skipped('%d skipped' % len(tests_skipped))
391                for test in tests_skipped:
392                    anchor = anchors.get(test, None)
393                    log.status_skipped('... ' + test, anchor)
394            if tests_xpassed:
395                log.status_xpass('%d xpass' % len(tests_xpassed))
396                for test in tests_xpassed:
397                    anchor = anchors.get(test, None)
398                    log.status_xpass('... ' + test, anchor)
399            if tests_xfailed:
400                log.status_xfail('%d xfail' % len(tests_xfailed))
401                for test in tests_xfailed:
402                    anchor = anchors.get(test, None)
403                    log.status_xfail('... ' + test, anchor)
404            if tests_failed:
405                log.status_fail('%d failed' % len(tests_failed))
406                for test in tests_failed:
407                    anchor = anchors.get(test, None)
408                    log.status_fail('... ' + test, anchor)
409            if tests_not_run:
410                log.status_fail('%d not run' % len(tests_not_run))
411                for test in tests_not_run:
412                    anchor = anchors.get(test, None)
413                    log.status_fail('... ' + test, anchor)
414        log.close()
415atexit.register(cleanup)
416
417def setup_boardspec(item):
418    """Process any 'boardspec' marker for a test.
419
420    Such a marker lists the set of board types that a test does/doesn't
421    support. If tests are being executed on an unsupported board, the test is
422    marked to be skipped.
423
424    Args:
425        item: The pytest test item.
426
427    Returns:
428        Nothing.
429    """
430
431    mark = item.get_marker('boardspec')
432    if not mark:
433        return
434    required_boards = []
435    for board in mark.args:
436        if board.startswith('!'):
437            if ubconfig.board_type == board[1:]:
438                pytest.skip('board "%s" not supported' % ubconfig.board_type)
439                return
440        else:
441            required_boards.append(board)
442    if required_boards and ubconfig.board_type not in required_boards:
443        pytest.skip('board "%s" not supported' % ubconfig.board_type)
444
445def setup_buildconfigspec(item):
446    """Process any 'buildconfigspec' marker for a test.
447
448    Such a marker lists some U-Boot configuration feature that the test
449    requires. If tests are being executed on an U-Boot build that doesn't
450    have the required feature, the test is marked to be skipped.
451
452    Args:
453        item: The pytest test item.
454
455    Returns:
456        Nothing.
457    """
458
459    mark = item.get_marker('buildconfigspec')
460    if not mark:
461        return
462    for option in mark.args:
463        if not ubconfig.buildconfig.get('config_' + option.lower(), None):
464            pytest.skip('.config feature "%s" not enabled' % option.lower())
465
466def tool_is_in_path(tool):
467    for path in os.environ["PATH"].split(os.pathsep):
468        fn = os.path.join(path, tool)
469        if os.path.isfile(fn) and os.access(fn, os.X_OK):
470            return True
471    return False
472
473def setup_requiredtool(item):
474    """Process any 'requiredtool' marker for a test.
475
476    Such a marker lists some external tool (binary, executable, application)
477    that the test requires. If tests are being executed on a system that
478    doesn't have the required tool, the test is marked to be skipped.
479
480    Args:
481        item: The pytest test item.
482
483    Returns:
484        Nothing.
485    """
486
487    mark = item.get_marker('requiredtool')
488    if not mark:
489        return
490    for tool in mark.args:
491        if not tool_is_in_path(tool):
492            pytest.skip('tool "%s" not in $PATH' % tool)
493
494def start_test_section(item):
495    anchors[item.name] = log.start_section(item.name)
496
497def pytest_runtest_setup(item):
498    """pytest hook: Configure (set up) a test item.
499
500    Called once for each test to perform any custom configuration. This hook
501    is used to skip the test if certain conditions apply.
502
503    Args:
504        item: The pytest test item.
505
506    Returns:
507        Nothing.
508    """
509
510    start_test_section(item)
511    setup_boardspec(item)
512    setup_buildconfigspec(item)
513    setup_requiredtool(item)
514
515def pytest_runtest_protocol(item, nextitem):
516    """pytest hook: Called to execute a test.
517
518    This hook wraps the standard pytest runtestprotocol() function in order
519    to acquire visibility into, and record, each test function's result.
520
521    Args:
522        item: The pytest test item to execute.
523        nextitem: The pytest test item that will be executed after this one.
524
525    Returns:
526        A list of pytest reports (test result data).
527    """
528
529    log.get_and_reset_warning()
530    reports = runtestprotocol(item, nextitem=nextitem)
531    was_warning = log.get_and_reset_warning()
532
533    # In pytest 3, runtestprotocol() may not call pytest_runtest_setup() if
534    # the test is skipped. That call is required to create the test's section
535    # in the log file. The call to log.end_section() requires that the log
536    # contain a section for this test. Create a section for the test if it
537    # doesn't already exist.
538    if not item.name in anchors:
539        start_test_section(item)
540
541    failure_cleanup = False
542    if not was_warning:
543        test_list = tests_passed
544        msg = 'OK'
545        msg_log = log.status_pass
546    else:
547        test_list = tests_warning
548        msg = 'OK (with warning)'
549        msg_log = log.status_warning
550    for report in reports:
551        if report.outcome == 'failed':
552            if hasattr(report, 'wasxfail'):
553                test_list = tests_xpassed
554                msg = 'XPASSED'
555                msg_log = log.status_xpass
556            else:
557                failure_cleanup = True
558                test_list = tests_failed
559                msg = 'FAILED:\n' + str(report.longrepr)
560                msg_log = log.status_fail
561            break
562        if report.outcome == 'skipped':
563            if hasattr(report, 'wasxfail'):
564                failure_cleanup = True
565                test_list = tests_xfailed
566                msg = 'XFAILED:\n' + str(report.longrepr)
567                msg_log = log.status_xfail
568                break
569            test_list = tests_skipped
570            msg = 'SKIPPED:\n' + str(report.longrepr)
571            msg_log = log.status_skipped
572
573    if failure_cleanup:
574        console.drain_console()
575
576    test_list.append(item.name)
577    tests_not_run.remove(item.name)
578
579    try:
580        msg_log(msg)
581    except:
582        # If something went wrong with logging, it's better to let the test
583        # process continue, which may report other exceptions that triggered
584        # the logging issue (e.g. console.log wasn't created). Hence, just
585        # squash the exception. If the test setup failed due to e.g. syntax
586        # error somewhere else, this won't be seen. However, once that issue
587        # is fixed, if this exception still exists, it will then be logged as
588        # part of the test's stdout.
589        import traceback
590        print 'Exception occurred while logging runtest status:'
591        traceback.print_exc()
592        # FIXME: Can we force a test failure here?
593
594    log.end_section(item.name)
595
596    if failure_cleanup:
597        console.cleanup_spawn()
598
599    return reports
600