1# 2# Copyright OpenEmbedded Contributors 3# 4# SPDX-License-Identifier: MIT 5# 6 7import os 8import unittest 9import pprint 10import datetime 11 12from oeqa.runtime.case import OERuntimeTestCase 13from oeqa.core.decorator.depends import OETestDepends 14from oeqa.core.decorator.data import skipIfNotFeature 15from oeqa.runtime.decorator.package import OEHasPackage 16from oeqa.utils.logparser import PtestParser 17 18class PtestRunnerTest(OERuntimeTestCase): 19 20 @skipIfNotFeature('ptest', 'Test requires ptest to be in DISTRO_FEATURES') 21 @OETestDepends(['ssh.SSHTest.test_ssh']) 22 @OEHasPackage(['ptest-runner']) 23 @unittest.expectedFailure 24 def test_ptestrunner_expectfail(self): 25 if not self.td.get('PTEST_EXPECT_FAILURE'): 26 self.skipTest('Cannot run ptests with @expectedFailure as ptests are required to pass') 27 self.do_ptestrunner() 28 29 @skipIfNotFeature('ptest', 'Test requires ptest to be in DISTRO_FEATURES') 30 @OETestDepends(['ssh.SSHTest.test_ssh']) 31 @OEHasPackage(['ptest-runner']) 32 def test_ptestrunner_expectsuccess(self): 33 if self.td.get('PTEST_EXPECT_FAILURE'): 34 self.skipTest('Cannot run ptests without @expectedFailure as ptests are expected to fail') 35 self.do_ptestrunner() 36 37 def do_ptestrunner(self): 38 status, output = self.target.run('which ptest-runner', 0) 39 if status != 0: 40 self.skipTest("No -ptest packages are installed in the image") 41 42 test_log_dir = self.td.get('TEST_LOG_DIR', '') 43 # The TEST_LOG_DIR maybe NULL when testimage is added after 44 # testdata.json is generated. 45 if not test_log_dir: 46 test_log_dir = os.path.join(self.td.get('WORKDIR', ''), 'testimage') 47 # Make the test output path absolute, otherwise the output content will be 48 # created relative to current directory 49 if not os.path.isabs(test_log_dir): 50 test_log_dir = os.path.join(self.td.get('TOPDIR', ''), test_log_dir) 51 # Don't use self.td.get('DATETIME'), it's from testdata.json, not 52 # up-to-date, and may cause "File exists" when re-reun. 53 timestamp = datetime.datetime.now().strftime('%Y%m%d%H%M%S') 54 ptest_log_dir_link = os.path.join(test_log_dir, 'ptest_log') 55 ptest_log_dir = '%s.%s' % (ptest_log_dir_link, timestamp) 56 ptest_runner_log = os.path.join(ptest_log_dir, 'ptest-runner.log') 57 58 libdir = self.td.get('libdir', '') 59 ptest_dirs = [ '/usr/lib' ] 60 if not libdir in ptest_dirs: 61 ptest_dirs.append(libdir) 62 status, output = self.target.run('ptest-runner -t 450 -d \"{}\"'.format(' '.join(ptest_dirs)), 0) 63 os.makedirs(ptest_log_dir) 64 with open(ptest_runner_log, 'w') as f: 65 f.write(output) 66 67 # status != 0 is OK since some ptest tests may fail 68 self.assertTrue(status != 127, msg="Cannot execute ptest-runner!") 69 70 if not hasattr(self.tc, "extraresults"): 71 self.tc.extraresults = {} 72 extras = self.tc.extraresults 73 extras['ptestresult.rawlogs'] = {'log': output} 74 75 # Parse and save results 76 parser = PtestParser() 77 results, sections = parser.parse(ptest_runner_log) 78 parser.results_as_files(ptest_log_dir) 79 if os.path.exists(ptest_log_dir_link): 80 # Remove the old link to create a new one 81 os.remove(ptest_log_dir_link) 82 os.symlink(os.path.basename(ptest_log_dir), ptest_log_dir_link) 83 84 extras['ptestresult.sections'] = sections 85 86 zerolength = [] 87 trans = str.maketrans("()", "__") 88 for section in results: 89 for test in results[section]: 90 result = results[section][test] 91 testname = "ptestresult." + (section or "No-section") + "." + "_".join(test.translate(trans).split()) 92 extras[testname] = {'status': result} 93 if not results[section]: 94 zerolength.append(section) 95 96 failed_tests = {} 97 98 for section in sections: 99 if 'exitcode' in sections[section].keys() or 'timeout' in sections[section].keys(): 100 failed_tests[section] = sections[section]["log"] 101 102 for section in results: 103 failed_testcases = [ "_".join(test.translate(trans).split()) for test in results[section] if results[section][test] == 'FAILED' ] 104 if failed_testcases: 105 failed_tests[section] = failed_testcases 106 107 failmsg = "" 108 status, output = self.target.run('dmesg | grep "Killed process"', 0) 109 if output: 110 failmsg = "ERROR: Processes were killed by the OOM Killer:\n%s\n" % output 111 112 if failed_tests: 113 failmsg = failmsg + "\nFailed ptests:\n%s\n" % pprint.pformat(failed_tests) 114 115 if zerolength: 116 failmsg = failmsg + "\nptests which had no test results:\n%s" % pprint.pformat(zerolength) 117 118 if failmsg: 119 self.logger.warning("There were failing ptests.") 120 self.fail(failmsg) 121