1# SPDX-License-Identifier: GPL-2.0 2# Copyright (c) 2015 Stephen Warren 3# Copyright (c) 2015-2016, NVIDIA CORPORATION. All rights reserved. 4 5# Implementation of pytest run-time hook functions. These are invoked by 6# pytest at certain points during operation, e.g. startup, for each executed 7# test, at shutdown etc. These hooks perform functions such as: 8# - Parsing custom command-line options. 9# - Pullilng in user-specified board configuration. 10# - Creating the U-Boot console test fixture. 11# - Creating the HTML log file. 12# - Monitoring each test's results. 13# - Implementing custom pytest markers. 14 15import atexit 16import errno 17import os 18import os.path 19import pytest 20from _pytest.runner import runtestprotocol 21import re 22import StringIO 23import sys 24 25try: 26 import configparser 27except: 28 import ConfigParser as configparser 29 30# Globals: The HTML log file, and the connection to the U-Boot console. 31log = None 32console = None 33 34def mkdir_p(path): 35 """Create a directory path. 36 37 This includes creating any intermediate/parent directories. Any errors 38 caused due to already extant directories are ignored. 39 40 Args: 41 path: The directory path to create. 42 43 Returns: 44 Nothing. 45 """ 46 47 try: 48 os.makedirs(path) 49 except OSError as exc: 50 if exc.errno == errno.EEXIST and os.path.isdir(path): 51 pass 52 else: 53 raise 54 55def pytest_addoption(parser): 56 """pytest hook: Add custom command-line options to the cmdline parser. 57 58 Args: 59 parser: The pytest command-line parser. 60 61 Returns: 62 Nothing. 63 """ 64 65 parser.addoption('--build-dir', default=None, 66 help='U-Boot build directory (O=)') 67 parser.addoption('--result-dir', default=None, 68 help='U-Boot test result/tmp directory') 69 parser.addoption('--persistent-data-dir', default=None, 70 help='U-Boot test persistent generated data directory') 71 parser.addoption('--board-type', '--bd', '-B', default='sandbox', 72 help='U-Boot board type') 73 parser.addoption('--board-identity', '--id', default='na', 74 help='U-Boot board identity/instance') 75 parser.addoption('--build', default=False, action='store_true', 76 help='Compile U-Boot before running tests') 77 parser.addoption('--gdbserver', default=None, 78 help='Run sandbox under gdbserver. The argument is the channel '+ 79 'over which gdbserver should communicate, e.g. localhost:1234') 80 81def pytest_configure(config): 82 """pytest hook: Perform custom initialization at startup time. 83 84 Args: 85 config: The pytest configuration. 86 87 Returns: 88 Nothing. 89 """ 90 91 global log 92 global console 93 global ubconfig 94 95 test_py_dir = os.path.dirname(os.path.abspath(__file__)) 96 source_dir = os.path.dirname(os.path.dirname(test_py_dir)) 97 98 board_type = config.getoption('board_type') 99 board_type_filename = board_type.replace('-', '_') 100 101 board_identity = config.getoption('board_identity') 102 board_identity_filename = board_identity.replace('-', '_') 103 104 build_dir = config.getoption('build_dir') 105 if not build_dir: 106 build_dir = source_dir + '/build-' + board_type 107 mkdir_p(build_dir) 108 109 result_dir = config.getoption('result_dir') 110 if not result_dir: 111 result_dir = build_dir 112 mkdir_p(result_dir) 113 114 persistent_data_dir = config.getoption('persistent_data_dir') 115 if not persistent_data_dir: 116 persistent_data_dir = build_dir + '/persistent-data' 117 mkdir_p(persistent_data_dir) 118 119 gdbserver = config.getoption('gdbserver') 120 if gdbserver and not board_type.startswith('sandbox'): 121 raise Exception('--gdbserver only supported with sandbox targets') 122 123 import multiplexed_log 124 log = multiplexed_log.Logfile(result_dir + '/test-log.html') 125 126 if config.getoption('build'): 127 if build_dir != source_dir: 128 o_opt = 'O=%s' % build_dir 129 else: 130 o_opt = '' 131 cmds = ( 132 ['make', o_opt, '-s', board_type + '_defconfig'], 133 ['make', o_opt, '-s', '-j8'], 134 ) 135 with log.section('make'): 136 runner = log.get_runner('make', sys.stdout) 137 for cmd in cmds: 138 runner.run(cmd, cwd=source_dir) 139 runner.close() 140 log.status_pass('OK') 141 142 class ArbitraryAttributeContainer(object): 143 pass 144 145 ubconfig = ArbitraryAttributeContainer() 146 ubconfig.brd = dict() 147 ubconfig.env = dict() 148 149 modules = [ 150 (ubconfig.brd, 'u_boot_board_' + board_type_filename), 151 (ubconfig.env, 'u_boot_boardenv_' + board_type_filename), 152 (ubconfig.env, 'u_boot_boardenv_' + board_type_filename + '_' + 153 board_identity_filename), 154 ] 155 for (dict_to_fill, module_name) in modules: 156 try: 157 module = __import__(module_name) 158 except ImportError: 159 continue 160 dict_to_fill.update(module.__dict__) 161 162 ubconfig.buildconfig = dict() 163 164 for conf_file in ('.config', 'include/autoconf.mk'): 165 dot_config = build_dir + '/' + conf_file 166 if not os.path.exists(dot_config): 167 raise Exception(conf_file + ' does not exist; ' + 168 'try passing --build option?') 169 170 with open(dot_config, 'rt') as f: 171 ini_str = '[root]\n' + f.read() 172 ini_sio = StringIO.StringIO(ini_str) 173 parser = configparser.RawConfigParser() 174 parser.readfp(ini_sio) 175 ubconfig.buildconfig.update(parser.items('root')) 176 177 ubconfig.test_py_dir = test_py_dir 178 ubconfig.source_dir = source_dir 179 ubconfig.build_dir = build_dir 180 ubconfig.result_dir = result_dir 181 ubconfig.persistent_data_dir = persistent_data_dir 182 ubconfig.board_type = board_type 183 ubconfig.board_identity = board_identity 184 ubconfig.gdbserver = gdbserver 185 ubconfig.dtb = build_dir + '/arch/sandbox/dts/test.dtb' 186 187 env_vars = ( 188 'board_type', 189 'board_identity', 190 'source_dir', 191 'test_py_dir', 192 'build_dir', 193 'result_dir', 194 'persistent_data_dir', 195 ) 196 for v in env_vars: 197 os.environ['U_BOOT_' + v.upper()] = getattr(ubconfig, v) 198 199 if board_type.startswith('sandbox'): 200 import u_boot_console_sandbox 201 console = u_boot_console_sandbox.ConsoleSandbox(log, ubconfig) 202 else: 203 import u_boot_console_exec_attach 204 console = u_boot_console_exec_attach.ConsoleExecAttach(log, ubconfig) 205 206re_ut_test_list = re.compile(r'_u_boot_list_2_(.*)_test_2_\1_test_(.*)\s*$') 207def generate_ut_subtest(metafunc, fixture_name): 208 """Provide parametrization for a ut_subtest fixture. 209 210 Determines the set of unit tests built into a U-Boot binary by parsing the 211 list of symbols generated by the build process. Provides this information 212 to test functions by parameterizing their ut_subtest fixture parameter. 213 214 Args: 215 metafunc: The pytest test function. 216 fixture_name: The fixture name to test. 217 218 Returns: 219 Nothing. 220 """ 221 222 fn = console.config.build_dir + '/u-boot.sym' 223 try: 224 with open(fn, 'rt') as f: 225 lines = f.readlines() 226 except: 227 lines = [] 228 lines.sort() 229 230 vals = [] 231 for l in lines: 232 m = re_ut_test_list.search(l) 233 if not m: 234 continue 235 vals.append(m.group(1) + ' ' + m.group(2)) 236 237 ids = ['ut_' + s.replace(' ', '_') for s in vals] 238 metafunc.parametrize(fixture_name, vals, ids=ids) 239 240def generate_config(metafunc, fixture_name): 241 """Provide parametrization for {env,brd}__ fixtures. 242 243 If a test function takes parameter(s) (fixture names) of the form brd__xxx 244 or env__xxx, the brd and env configuration dictionaries are consulted to 245 find the list of values to use for those parameters, and the test is 246 parametrized so that it runs once for each combination of values. 247 248 Args: 249 metafunc: The pytest test function. 250 fixture_name: The fixture name to test. 251 252 Returns: 253 Nothing. 254 """ 255 256 subconfigs = { 257 'brd': console.config.brd, 258 'env': console.config.env, 259 } 260 parts = fixture_name.split('__') 261 if len(parts) < 2: 262 return 263 if parts[0] not in subconfigs: 264 return 265 subconfig = subconfigs[parts[0]] 266 vals = [] 267 val = subconfig.get(fixture_name, []) 268 # If that exact name is a key in the data source: 269 if val: 270 # ... use the dict value as a single parameter value. 271 vals = (val, ) 272 else: 273 # ... otherwise, see if there's a key that contains a list of 274 # values to use instead. 275 vals = subconfig.get(fixture_name+ 's', []) 276 def fixture_id(index, val): 277 try: 278 return val['fixture_id'] 279 except: 280 return fixture_name + str(index) 281 ids = [fixture_id(index, val) for (index, val) in enumerate(vals)] 282 metafunc.parametrize(fixture_name, vals, ids=ids) 283 284def pytest_generate_tests(metafunc): 285 """pytest hook: parameterize test functions based on custom rules. 286 287 Check each test function parameter (fixture name) to see if it is one of 288 our custom names, and if so, provide the correct parametrization for that 289 parameter. 290 291 Args: 292 metafunc: The pytest test function. 293 294 Returns: 295 Nothing. 296 """ 297 298 for fn in metafunc.fixturenames: 299 if fn == 'ut_subtest': 300 generate_ut_subtest(metafunc, fn) 301 continue 302 generate_config(metafunc, fn) 303 304@pytest.fixture(scope='session') 305def u_boot_log(request): 306 """Generate the value of a test's log fixture. 307 308 Args: 309 request: The pytest request. 310 311 Returns: 312 The fixture value. 313 """ 314 315 return console.log 316 317@pytest.fixture(scope='session') 318def u_boot_config(request): 319 """Generate the value of a test's u_boot_config fixture. 320 321 Args: 322 request: The pytest request. 323 324 Returns: 325 The fixture value. 326 """ 327 328 return console.config 329 330@pytest.fixture(scope='function') 331def u_boot_console(request): 332 """Generate the value of a test's u_boot_console fixture. 333 334 Args: 335 request: The pytest request. 336 337 Returns: 338 The fixture value. 339 """ 340 341 console.ensure_spawned() 342 return console 343 344anchors = {} 345tests_not_run = [] 346tests_failed = [] 347tests_xpassed = [] 348tests_xfailed = [] 349tests_skipped = [] 350tests_warning = [] 351tests_passed = [] 352 353def pytest_itemcollected(item): 354 """pytest hook: Called once for each test found during collection. 355 356 This enables our custom result analysis code to see the list of all tests 357 that should eventually be run. 358 359 Args: 360 item: The item that was collected. 361 362 Returns: 363 Nothing. 364 """ 365 366 tests_not_run.append(item.name) 367 368def cleanup(): 369 """Clean up all global state. 370 371 Executed (via atexit) once the entire test process is complete. This 372 includes logging the status of all tests, and the identity of any failed 373 or skipped tests. 374 375 Args: 376 None. 377 378 Returns: 379 Nothing. 380 """ 381 382 if console: 383 console.close() 384 if log: 385 with log.section('Status Report', 'status_report'): 386 log.status_pass('%d passed' % len(tests_passed)) 387 if tests_warning: 388 log.status_warning('%d passed with warning' % len(tests_warning)) 389 for test in tests_warning: 390 anchor = anchors.get(test, None) 391 log.status_warning('... ' + test, anchor) 392 if tests_skipped: 393 log.status_skipped('%d skipped' % len(tests_skipped)) 394 for test in tests_skipped: 395 anchor = anchors.get(test, None) 396 log.status_skipped('... ' + test, anchor) 397 if tests_xpassed: 398 log.status_xpass('%d xpass' % len(tests_xpassed)) 399 for test in tests_xpassed: 400 anchor = anchors.get(test, None) 401 log.status_xpass('... ' + test, anchor) 402 if tests_xfailed: 403 log.status_xfail('%d xfail' % len(tests_xfailed)) 404 for test in tests_xfailed: 405 anchor = anchors.get(test, None) 406 log.status_xfail('... ' + test, anchor) 407 if tests_failed: 408 log.status_fail('%d failed' % len(tests_failed)) 409 for test in tests_failed: 410 anchor = anchors.get(test, None) 411 log.status_fail('... ' + test, anchor) 412 if tests_not_run: 413 log.status_fail('%d not run' % len(tests_not_run)) 414 for test in tests_not_run: 415 anchor = anchors.get(test, None) 416 log.status_fail('... ' + test, anchor) 417 log.close() 418atexit.register(cleanup) 419 420def setup_boardspec(item): 421 """Process any 'boardspec' marker for a test. 422 423 Such a marker lists the set of board types that a test does/doesn't 424 support. If tests are being executed on an unsupported board, the test is 425 marked to be skipped. 426 427 Args: 428 item: The pytest test item. 429 430 Returns: 431 Nothing. 432 """ 433 434 mark = item.get_marker('boardspec') 435 if not mark: 436 return 437 required_boards = [] 438 for board in mark.args: 439 if board.startswith('!'): 440 if ubconfig.board_type == board[1:]: 441 pytest.skip('board "%s" not supported' % ubconfig.board_type) 442 return 443 else: 444 required_boards.append(board) 445 if required_boards and ubconfig.board_type not in required_boards: 446 pytest.skip('board "%s" not supported' % ubconfig.board_type) 447 448def setup_buildconfigspec(item): 449 """Process any 'buildconfigspec' marker for a test. 450 451 Such a marker lists some U-Boot configuration feature that the test 452 requires. If tests are being executed on an U-Boot build that doesn't 453 have the required feature, the test is marked to be skipped. 454 455 Args: 456 item: The pytest test item. 457 458 Returns: 459 Nothing. 460 """ 461 462 mark = item.get_marker('buildconfigspec') 463 if not mark: 464 return 465 for option in mark.args: 466 if not ubconfig.buildconfig.get('config_' + option.lower(), None): 467 pytest.skip('.config feature "%s" not enabled' % option.lower()) 468 469def tool_is_in_path(tool): 470 for path in os.environ["PATH"].split(os.pathsep): 471 fn = os.path.join(path, tool) 472 if os.path.isfile(fn) and os.access(fn, os.X_OK): 473 return True 474 return False 475 476def setup_requiredtool(item): 477 """Process any 'requiredtool' marker for a test. 478 479 Such a marker lists some external tool (binary, executable, application) 480 that the test requires. If tests are being executed on a system that 481 doesn't have the required tool, the test is marked to be skipped. 482 483 Args: 484 item: The pytest test item. 485 486 Returns: 487 Nothing. 488 """ 489 490 mark = item.get_marker('requiredtool') 491 if not mark: 492 return 493 for tool in mark.args: 494 if not tool_is_in_path(tool): 495 pytest.skip('tool "%s" not in $PATH' % tool) 496 497def start_test_section(item): 498 anchors[item.name] = log.start_section(item.name) 499 500def pytest_runtest_setup(item): 501 """pytest hook: Configure (set up) a test item. 502 503 Called once for each test to perform any custom configuration. This hook 504 is used to skip the test if certain conditions apply. 505 506 Args: 507 item: The pytest test item. 508 509 Returns: 510 Nothing. 511 """ 512 513 start_test_section(item) 514 setup_boardspec(item) 515 setup_buildconfigspec(item) 516 setup_requiredtool(item) 517 518def pytest_runtest_protocol(item, nextitem): 519 """pytest hook: Called to execute a test. 520 521 This hook wraps the standard pytest runtestprotocol() function in order 522 to acquire visibility into, and record, each test function's result. 523 524 Args: 525 item: The pytest test item to execute. 526 nextitem: The pytest test item that will be executed after this one. 527 528 Returns: 529 A list of pytest reports (test result data). 530 """ 531 532 log.get_and_reset_warning() 533 reports = runtestprotocol(item, nextitem=nextitem) 534 was_warning = log.get_and_reset_warning() 535 536 # In pytest 3, runtestprotocol() may not call pytest_runtest_setup() if 537 # the test is skipped. That call is required to create the test's section 538 # in the log file. The call to log.end_section() requires that the log 539 # contain a section for this test. Create a section for the test if it 540 # doesn't already exist. 541 if not item.name in anchors: 542 start_test_section(item) 543 544 failure_cleanup = False 545 if not was_warning: 546 test_list = tests_passed 547 msg = 'OK' 548 msg_log = log.status_pass 549 else: 550 test_list = tests_warning 551 msg = 'OK (with warning)' 552 msg_log = log.status_warning 553 for report in reports: 554 if report.outcome == 'failed': 555 if hasattr(report, 'wasxfail'): 556 test_list = tests_xpassed 557 msg = 'XPASSED' 558 msg_log = log.status_xpass 559 else: 560 failure_cleanup = True 561 test_list = tests_failed 562 msg = 'FAILED:\n' + str(report.longrepr) 563 msg_log = log.status_fail 564 break 565 if report.outcome == 'skipped': 566 if hasattr(report, 'wasxfail'): 567 failure_cleanup = True 568 test_list = tests_xfailed 569 msg = 'XFAILED:\n' + str(report.longrepr) 570 msg_log = log.status_xfail 571 break 572 test_list = tests_skipped 573 msg = 'SKIPPED:\n' + str(report.longrepr) 574 msg_log = log.status_skipped 575 576 if failure_cleanup: 577 console.drain_console() 578 579 test_list.append(item.name) 580 tests_not_run.remove(item.name) 581 582 try: 583 msg_log(msg) 584 except: 585 # If something went wrong with logging, it's better to let the test 586 # process continue, which may report other exceptions that triggered 587 # the logging issue (e.g. console.log wasn't created). Hence, just 588 # squash the exception. If the test setup failed due to e.g. syntax 589 # error somewhere else, this won't be seen. However, once that issue 590 # is fixed, if this exception still exists, it will then be logged as 591 # part of the test's stdout. 592 import traceback 593 print('Exception occurred while logging runtest status:') 594 traceback.print_exc() 595 # FIXME: Can we force a test failure here? 596 597 log.end_section(item.name) 598 599 if failure_cleanup: 600 console.cleanup_spawn() 601 602 return reports 603