1# 2# Copyright OpenEmbedded Contributors 3# 4# SPDX-License-Identifier: MIT 5# 6 7import os 8import sys 9basepath = os.path.abspath(os.path.dirname(__file__) + '/../../../../../') 10lib_path = basepath + '/scripts/lib' 11sys.path = sys.path + [lib_path] 12from resulttool.report import ResultsTextReport 13from resulttool import regression as regression 14from resulttool import resultutils as resultutils 15from oeqa.selftest.case import OESelftestTestCase 16 17class ResultToolTests(OESelftestTestCase): 18 base_results_data = {'base_result1': {'configuration': {"TEST_TYPE": "runtime", 19 "TESTSERIES": "series1", 20 "IMAGE_BASENAME": "image", 21 "IMAGE_PKGTYPE": "ipk", 22 "DISTRO": "mydistro", 23 "MACHINE": "qemux86"}, 24 'result': {}}, 25 'base_result2': {'configuration': {"TEST_TYPE": "runtime", 26 "TESTSERIES": "series1", 27 "IMAGE_BASENAME": "image", 28 "IMAGE_PKGTYPE": "ipk", 29 "DISTRO": "mydistro", 30 "MACHINE": "qemux86-64"}, 31 'result': {}}} 32 target_results_data = {'target_result1': {'configuration': {"TEST_TYPE": "runtime", 33 "TESTSERIES": "series1", 34 "IMAGE_BASENAME": "image", 35 "IMAGE_PKGTYPE": "ipk", 36 "DISTRO": "mydistro", 37 "MACHINE": "qemux86"}, 38 'result': {}}, 39 'target_result2': {'configuration': {"TEST_TYPE": "runtime", 40 "TESTSERIES": "series1", 41 "IMAGE_BASENAME": "image", 42 "IMAGE_PKGTYPE": "ipk", 43 "DISTRO": "mydistro", 44 "MACHINE": "qemux86"}, 45 'result': {}}, 46 'target_result3': {'configuration': {"TEST_TYPE": "runtime", 47 "TESTSERIES": "series1", 48 "IMAGE_BASENAME": "image", 49 "IMAGE_PKGTYPE": "ipk", 50 "DISTRO": "mydistro", 51 "MACHINE": "qemux86-64"}, 52 'result': {}}} 53 54 def test_report_can_aggregate_test_result(self): 55 result_data = {'result': {'test1': {'status': 'PASSED'}, 56 'test2': {'status': 'PASSED'}, 57 'test3': {'status': 'FAILED'}, 58 'test4': {'status': 'ERROR'}, 59 'test5': {'status': 'SKIPPED'}}} 60 report = ResultsTextReport() 61 result_report = report.get_aggregated_test_result(None, result_data, 'DummyMachine') 62 self.assertTrue(result_report['passed'] == 2, msg="Passed count not correct:%s" % result_report['passed']) 63 self.assertTrue(result_report['failed'] == 2, msg="Failed count not correct:%s" % result_report['failed']) 64 self.assertTrue(result_report['skipped'] == 1, msg="Skipped count not correct:%s" % result_report['skipped']) 65 66 def test_regression_can_get_regression_base_target_pair(self): 67 68 results = {} 69 resultutils.append_resultsdata(results, ResultToolTests.base_results_data) 70 resultutils.append_resultsdata(results, ResultToolTests.target_results_data) 71 self.assertTrue('target_result1' in results['runtime/mydistro/qemux86/image'], msg="Pair not correct:%s" % results) 72 self.assertTrue('target_result3' in results['runtime/mydistro/qemux86-64/image'], msg="Pair not correct:%s" % results) 73 74 def test_regression_can_get_regression_result(self): 75 base_result_data = {'result': {'test1': {'status': 'PASSED'}, 76 'test2': {'status': 'PASSED'}, 77 'test3': {'status': 'FAILED'}, 78 'test4': {'status': 'ERROR'}, 79 'test5': {'status': 'SKIPPED'}}} 80 target_result_data = {'result': {'test1': {'status': 'PASSED'}, 81 'test2': {'status': 'FAILED'}, 82 'test3': {'status': 'PASSED'}, 83 'test4': {'status': 'ERROR'}, 84 'test5': {'status': 'SKIPPED'}}} 85 result, text = regression.compare_result(self.logger, "BaseTestRunName", "TargetTestRunName", base_result_data, target_result_data) 86 self.assertTrue(result['test2']['base'] == 'PASSED', 87 msg="regression not correct:%s" % result['test2']['base']) 88 self.assertTrue(result['test2']['target'] == 'FAILED', 89 msg="regression not correct:%s" % result['test2']['target']) 90 self.assertTrue(result['test3']['base'] == 'FAILED', 91 msg="regression not correct:%s" % result['test3']['base']) 92 self.assertTrue(result['test3']['target'] == 'PASSED', 93 msg="regression not correct:%s" % result['test3']['target']) 94 95 def test_merge_can_merged_results(self): 96 results = {} 97 resultutils.append_resultsdata(results, ResultToolTests.base_results_data, configmap=resultutils.flatten_map) 98 resultutils.append_resultsdata(results, ResultToolTests.target_results_data, configmap=resultutils.flatten_map) 99 self.assertEqual(len(results[''].keys()), 5, msg="Flattened results not correct %s" % str(results)) 100 101 def test_results_without_metadata_can_be_compared(self): 102 base_configuration = {"configuration": { 103 "TEST_TYPE": "oeselftest", 104 "TESTSERIES": "series1", 105 "IMAGE_BASENAME": "image", 106 "IMAGE_PKGTYPE": "ipk", 107 "DISTRO": "mydistro", 108 "MACHINE": "qemux86", 109 "STARTTIME": 1672527600 110 }, "result": {}} 111 target_configuration = {"configuration": { 112 "TEST_TYPE": "oeselftest", 113 "TESTSERIES": "series1", 114 "IMAGE_BASENAME": "image", 115 "IMAGE_PKGTYPE": "ipk", 116 "DISTRO": "mydistro", 117 "MACHINE": "qemux86", 118 "STARTTIME": 1672527600 119 }, "result": {}} 120 self.assertTrue(regression.can_be_compared(self.logger, base_configuration, target_configuration), 121 msg="incorrect metadata filtering, tests without metadata should be compared") 122 123 def test_target_result_with_missing_metadata_can_not_be_compared(self): 124 base_configuration = {"configuration": { 125 "TEST_TYPE": "oeselftest", 126 "TESTSERIES": "series1", 127 "IMAGE_BASENAME": "image", 128 "IMAGE_PKGTYPE": "ipk", 129 "DISTRO": "mydistro", 130 "MACHINE": "qemux86", 131 "OESELFTEST_METADATA": { 132 "run_all_tests": True, 133 "run_tests": None, 134 "skips": None, 135 "machine": None, 136 "select_tags": ["toolchain-user", "toolchain-system"], 137 "exclude_tags": None 138 }}, "result": {}} 139 target_configuration = {"configuration": {"TEST_TYPE": "oeselftest", 140 "TESTSERIES": "series1", 141 "IMAGE_BASENAME": "image", 142 "IMAGE_PKGTYPE": "ipk", 143 "DISTRO": "mydistro", 144 "MACHINE": "qemux86", 145 "STARTTIME": 1672527600 146 }, "result": {}} 147 self.assertFalse(regression.can_be_compared(self.logger, base_configuration, target_configuration), 148 msg="incorrect metadata filtering, tests should not be compared") 149 150 def test_results_with_matching_metadata_can_be_compared(self): 151 base_configuration = {"configuration": { 152 "TEST_TYPE": "oeselftest", 153 "TESTSERIES": "series1", 154 "IMAGE_BASENAME": "image", 155 "IMAGE_PKGTYPE": "ipk", 156 "DISTRO": "mydistro", 157 "MACHINE": "qemux86", 158 "STARTTIME": 1672527600, 159 "OESELFTEST_METADATA": {"run_all_tests": True, 160 "run_tests": None, 161 "skips": None, 162 "machine": None, 163 "select_tags": ["toolchain-user", "toolchain-system"], 164 "exclude_tags": None} 165 }, "result": {}} 166 target_configuration = {"configuration": { 167 "TEST_TYPE": "oeselftest", 168 "TESTSERIES": "series1", 169 "IMAGE_BASENAME": "image", 170 "IMAGE_PKGTYPE": "ipk", 171 "DISTRO": "mydistro", 172 "MACHINE": "qemux86", 173 "STARTTIME": 1672527600, 174 "OESELFTEST_METADATA": {"run_all_tests": True, 175 "run_tests": None, 176 "skips": None, 177 "machine": None, 178 "select_tags": ["toolchain-user", "toolchain-system"], 179 "exclude_tags": None} 180 }, "result": {}} 181 self.assertTrue(regression.can_be_compared(self.logger, base_configuration, target_configuration), 182 msg="incorrect metadata filtering, tests with matching metadata should be compared") 183 184 def test_results_with_mismatching_metadata_can_not_be_compared(self): 185 base_configuration = {"configuration": { 186 "TEST_TYPE": "oeselftest", 187 "TESTSERIES": "series1", 188 "IMAGE_BASENAME": "image", 189 "IMAGE_PKGTYPE": "ipk", 190 "DISTRO": "mydistro", 191 "MACHINE": "qemux86", 192 "STARTTIME": 1672527600, 193 "OESELFTEST_METADATA": {"run_all_tests": True, 194 "run_tests": None, 195 "skips": None, 196 "machine": None, 197 "select_tags": ["toolchain-user", "toolchain-system"], 198 "exclude_tags": None} 199 }, "result": {}} 200 target_configuration = {"configuration": { 201 "TEST_TYPE": "oeselftest", 202 "TESTSERIES": "series1", 203 "IMAGE_BASENAME": "image", 204 "IMAGE_PKGTYPE": "ipk", 205 "DISTRO": "mydistro", 206 "MACHINE": "qemux86", 207 "STARTTIME": 1672527600, 208 "OESELFTEST_METADATA": {"run_all_tests": True, 209 "run_tests": None, 210 "skips": None, 211 "machine": None, 212 "select_tags": ["machine"], 213 "exclude_tags": None} 214 }, "result": {}} 215 self.assertFalse(regression.can_be_compared(self.logger, base_configuration, target_configuration), 216 msg="incorrect metadata filtering, tests with mismatching metadata should not be compared") 217 218 def test_metadata_matching_is_only_checked_for_relevant_test_type(self): 219 base_configuration = {"configuration": {"TEST_TYPE": "runtime", 220 "TESTSERIES": "series1", 221 "IMAGE_BASENAME": "image", 222 "IMAGE_PKGTYPE": "ipk", 223 "DISTRO": "mydistro", 224 "MACHINE": "qemux86", 225 "STARTTIME": 1672527600, 226 "OESELFTEST_METADATA": {"run_all_tests": True, 227 "run_tests": None, 228 "skips": None, 229 "machine": None, 230 "select_tags": ["toolchain-user", "toolchain-system"], 231 "exclude_tags": None}}, "result": {}} 232 target_configuration = {"configuration": {"TEST_TYPE": "runtime", 233 "TESTSERIES": "series1", 234 "IMAGE_BASENAME": "image", 235 "IMAGE_PKGTYPE": "ipk", 236 "DISTRO": "mydistro", 237 "MACHINE": "qemux86", 238 "STARTTIME": 1672527600, 239 "OESELFTEST_METADATA": {"run_all_tests": True, 240 "run_tests": None, 241 "skips": None, 242 "machine": None, 243 "select_tags": ["machine"], 244 "exclude_tags": None}}, "result": {}} 245 self.assertTrue(regression.can_be_compared(self.logger, base_configuration, target_configuration), 246 msg="incorrect metadata filtering, %s tests should be compared" % base_configuration['configuration']['TEST_TYPE']) 247 248 def test_machine_matches(self): 249 base_configuration = {"configuration": { 250 "TEST_TYPE": "runtime", 251 "MACHINE": "qemux86"}, "result": {}} 252 target_configuration = {"configuration": { 253 "TEST_TYPE": "runtime", 254 "MACHINE": "qemux86" 255 }, "result": {}} 256 self.assertTrue(regression.can_be_compared(self.logger, base_configuration, target_configuration), 257 msg="incorrect machine filtering, identical machine tests should be compared") 258 259 def test_machine_mismatches(self): 260 base_configuration = {"configuration": { 261 "TEST_TYPE": "runtime", 262 "MACHINE": "qemux86" 263 }, "result": {}} 264 target_configuration = {"configuration": { 265 "TEST_TYPE": "runtime", 266 "MACHINE": "qemux86_64" 267 }, "result": {}} 268 self.assertFalse(regression.can_be_compared(self.logger, base_configuration, target_configuration), 269 msg="incorrect machine filtering, mismatching machine tests should not be compared") 270 271 def test_can_not_compare_non_ltp_tests(self): 272 base_configuration = {"configuration": { 273 "TEST_TYPE": "runtime", 274 "MACHINE": "qemux86" 275 }, "result": { 276 "ltpresult_foo": { 277 "status": "PASSED" 278 }}} 279 target_configuration = {"configuration": { 280 "TEST_TYPE": "runtime", 281 "MACHINE": "qemux86_64" 282 }, "result": { 283 "bar": { 284 "status": "PASSED" 285 }}} 286 self.assertFalse(regression.can_be_compared(self.logger, base_configuration, target_configuration), 287 msg="incorrect ltpresult filtering, mismatching ltpresult content should not be compared") 288 289 def test_can_compare_ltp_tests(self): 290 base_configuration = {"configuration": { 291 "TEST_TYPE": "runtime", 292 "MACHINE": "qemux86" 293 }, "result": { 294 "ltpresult_foo": { 295 "status": "PASSED" 296 }}} 297 target_configuration = {"configuration": { 298 "TEST_TYPE": "runtime", 299 "MACHINE": "qemux86" 300 }, "result": { 301 "ltpresult_foo": { 302 "status": "PASSED" 303 }}} 304 self.assertTrue(regression.can_be_compared(self.logger, base_configuration, target_configuration), 305 msg="incorrect ltpresult filtering, matching ltpresult content should be compared") 306 307 def test_can_match_non_static_ptest_names(self): 308 base_configuration = {"a": { 309 "conf_X": { 310 "configuration": { 311 "TEST_TYPE": "runtime", 312 "MACHINE": "qemux86" 313 }, "result": { 314 "ptestresult.lttng-tools.foo_-_bar_-_moo": { 315 "status": "PASSED" 316 }, 317 "ptestresult.babeltrace.bar_-_moo_-_foo": { 318 "status": "PASSED" 319 }, 320 "ptestresult.babeltrace2.moo_-_foo_-_bar": { 321 "status": "PASSED" 322 }, 323 "ptestresult.curl.test_0000__foo_out_of_bar": { 324 "status": "PASSED" 325 }, 326 "ptestresult.dbus.test_0000__foo_out_of_bar,_remaining:_00:02,_took_0.032s,_duration:_03:32_": { 327 "status": "PASSED" 328 }, 329 "ptestresult.binutils-ld.in testcase /foo/build-st-bar/moo/ctf.exp": { 330 "status": "PASSED" 331 }, 332 "ptestresult.gcc-libstdc++-v3.Couldn't create remote directory /tmp/runtest.30975 on target": { 333 "status": "PASSED" 334 }, 335 "ptestresult.gcc-libgomp.Couldn't create remote directory /tmp/runtest.3657621 on": { 336 "status": "PASSED" 337 } 338 }}}} 339 target_configuration = {"a": { 340 "conf_Y": { 341 "configuration": { 342 "TEST_TYPE": "runtime", 343 "MACHINE": "qemux86" 344 }, "result": { 345 "ptestresult.lttng-tools.foo_-_yyy_-_zzz": { 346 "status": "PASSED" 347 }, 348 "ptestresult.babeltrace.bar_-_zzz_-_xxx": { 349 "status": "PASSED" 350 }, 351 "ptestresult.babeltrace2.moo_-_xxx_-_yyy": { 352 "status": "PASSED" 353 }, 354 "ptestresult.curl.test_0000__xxx_out_of_yyy": { 355 "status": "PASSED" 356 }, 357 "ptestresult.dbus.test_0000__yyy_out_of_zzz,_remaining:_00:03,_took_0.034s,_duration:_03:30_": { 358 "status": "PASSED" 359 }, 360 "ptestresult.binutils-ld.in testcase /xxx/build-st-yyy/zzz/ctf.exp": { 361 "status": "PASSED" 362 }, 363 "ptestresult.gcc-libstdc++-v3.Couldn't create remote directory /tmp/runtest.45678 on target": { 364 "status": "PASSED" 365 }, 366 "ptestresult.gcc-libgomp.Couldn't create remote directory /tmp/runtest.3657621 on": { 367 "status": "PASSED" 368 } 369 }}}} 370 regression.fixup_ptest_names(base_configuration, self.logger) 371 regression.fixup_ptest_names(target_configuration, self.logger) 372 result, resultstring = regression.compare_result( 373 self.logger, "A", "B", base_configuration["a"]["conf_X"], target_configuration["a"]["conf_Y"]) 374 self.assertDictEqual( 375 result, {}, msg=f"ptests should be compared: {resultstring}") 376