1# resulttool - common library/utility functions 2# 3# Copyright (c) 2019, Intel Corporation. 4# Copyright (c) 2019, Linux Foundation 5# 6# SPDX-License-Identifier: GPL-2.0-only 7# 8 9import os 10import base64 11import zlib 12import json 13import scriptpath 14import copy 15import urllib.request 16import posixpath 17scriptpath.add_oe_lib_path() 18 19flatten_map = { 20 "oeselftest": [], 21 "runtime": [], 22 "sdk": [], 23 "sdkext": [], 24 "manual": [] 25} 26regression_map = { 27 "oeselftest": ['TEST_TYPE', 'MACHINE'], 28 "runtime": ['TESTSERIES', 'TEST_TYPE', 'IMAGE_BASENAME', 'MACHINE', 'IMAGE_PKGTYPE', 'DISTRO'], 29 "sdk": ['TESTSERIES', 'TEST_TYPE', 'IMAGE_BASENAME', 'MACHINE', 'SDKMACHINE'], 30 "sdkext": ['TESTSERIES', 'TEST_TYPE', 'IMAGE_BASENAME', 'MACHINE', 'SDKMACHINE'], 31 "manual": ['TEST_TYPE', 'TEST_MODULE', 'IMAGE_BASENAME', 'MACHINE'] 32} 33store_map = { 34 "oeselftest": ['TEST_TYPE'], 35 "runtime": ['TEST_TYPE', 'DISTRO', 'MACHINE', 'IMAGE_BASENAME'], 36 "sdk": ['TEST_TYPE', 'MACHINE', 'SDKMACHINE', 'IMAGE_BASENAME'], 37 "sdkext": ['TEST_TYPE', 'MACHINE', 'SDKMACHINE', 'IMAGE_BASENAME'], 38 "manual": ['TEST_TYPE', 'TEST_MODULE', 'MACHINE', 'IMAGE_BASENAME'] 39} 40 41def is_url(p): 42 """ 43 Helper for determining if the given path is a URL 44 """ 45 return p.startswith('http://') or p.startswith('https://') 46 47extra_configvars = {'TESTSERIES': ''} 48 49# 50# Load the json file and append the results data into the provided results dict 51# 52def append_resultsdata(results, f, configmap=store_map, configvars=extra_configvars): 53 if type(f) is str: 54 if is_url(f): 55 with urllib.request.urlopen(f) as response: 56 data = json.loads(response.read().decode('utf-8')) 57 url = urllib.parse.urlparse(f) 58 testseries = posixpath.basename(posixpath.dirname(url.path)) 59 else: 60 with open(f, "r") as filedata: 61 data = json.load(filedata) 62 testseries = os.path.basename(os.path.dirname(f)) 63 else: 64 data = f 65 for res in data: 66 if "configuration" not in data[res] or "result" not in data[res]: 67 raise ValueError("Test results data without configuration or result section?") 68 for config in configvars: 69 if config == "TESTSERIES" and "TESTSERIES" not in data[res]["configuration"]: 70 data[res]["configuration"]["TESTSERIES"] = testseries 71 continue 72 if config not in data[res]["configuration"]: 73 data[res]["configuration"][config] = configvars[config] 74 testtype = data[res]["configuration"].get("TEST_TYPE") 75 if testtype not in configmap: 76 raise ValueError("Unknown test type %s" % testtype) 77 testpath = "/".join(data[res]["configuration"].get(i) for i in configmap[testtype]) 78 if testpath not in results: 79 results[testpath] = {} 80 results[testpath][res] = data[res] 81 82# 83# Walk a directory and find/load results data 84# or load directly from a file 85# 86def load_resultsdata(source, configmap=store_map, configvars=extra_configvars): 87 results = {} 88 if is_url(source) or os.path.isfile(source): 89 append_resultsdata(results, source, configmap, configvars) 90 return results 91 for root, dirs, files in os.walk(source): 92 for name in files: 93 f = os.path.join(root, name) 94 if name == "testresults.json": 95 append_resultsdata(results, f, configmap, configvars) 96 return results 97 98def filter_resultsdata(results, resultid): 99 newresults = {} 100 for r in results: 101 for i in results[r]: 102 if i == resultsid: 103 newresults[r] = {} 104 newresults[r][i] = results[r][i] 105 return newresults 106 107def strip_ptestresults(results): 108 newresults = copy.deepcopy(results) 109 #for a in newresults2: 110 # newresults = newresults2[a] 111 for res in newresults: 112 if 'result' not in newresults[res]: 113 continue 114 if 'ptestresult.rawlogs' in newresults[res]['result']: 115 del newresults[res]['result']['ptestresult.rawlogs'] 116 if 'ptestresult.sections' in newresults[res]['result']: 117 for i in newresults[res]['result']['ptestresult.sections']: 118 if 'log' in newresults[res]['result']['ptestresult.sections'][i]: 119 del newresults[res]['result']['ptestresult.sections'][i]['log'] 120 return newresults 121 122def decode_log(logdata): 123 if isinstance(logdata, str): 124 return logdata 125 elif isinstance(logdata, dict): 126 if "compressed" in logdata: 127 data = logdata.get("compressed") 128 data = base64.b64decode(data.encode("utf-8")) 129 data = zlib.decompress(data) 130 return data.decode("utf-8", errors='ignore') 131 return None 132 133def generic_get_log(sectionname, results, section): 134 if sectionname not in results: 135 return None 136 if section not in results[sectionname]: 137 return None 138 139 ptest = results[sectionname][section] 140 if 'log' not in ptest: 141 return None 142 return decode_log(ptest['log']) 143 144def ptestresult_get_log(results, section): 145 return generic_get_log('ptestresuls.sections', results, section) 146 147def generic_get_rawlogs(sectname, results): 148 if sectname not in results: 149 return None 150 if 'log' not in results[sectname]: 151 return None 152 return decode_log(results[sectname]['log']) 153 154def ptestresult_get_rawlogs(results): 155 return generic_get_rawlogs('ptestresult.rawlogs', results) 156 157def save_resultsdata(results, destdir, fn="testresults.json", ptestjson=False, ptestlogs=False): 158 for res in results: 159 if res: 160 dst = destdir + "/" + res + "/" + fn 161 else: 162 dst = destdir + "/" + fn 163 os.makedirs(os.path.dirname(dst), exist_ok=True) 164 resultsout = results[res] 165 if not ptestjson: 166 resultsout = strip_ptestresults(results[res]) 167 with open(dst, 'w') as f: 168 f.write(json.dumps(resultsout, sort_keys=True, indent=4)) 169 for res2 in results[res]: 170 if ptestlogs and 'result' in results[res][res2]: 171 seriesresults = results[res][res2]['result'] 172 rawlogs = ptestresult_get_rawlogs(seriesresults) 173 if rawlogs is not None: 174 with open(dst.replace(fn, "ptest-raw.log"), "w+") as f: 175 f.write(rawlogs) 176 if 'ptestresult.sections' in seriesresults: 177 for i in seriesresults['ptestresult.sections']: 178 sectionlog = ptestresult_get_log(seriesresults, i) 179 if sectionlog is not None: 180 with open(dst.replace(fn, "ptest-%s.log" % i), "w+") as f: 181 f.write(sectionlog) 182 183def git_get_result(repo, tags, configmap=store_map): 184 git_objs = [] 185 for tag in tags: 186 files = repo.run_cmd(['ls-tree', "--name-only", "-r", tag]).splitlines() 187 git_objs.extend([tag + ':' + f for f in files if f.endswith("testresults.json")]) 188 189 def parse_json_stream(data): 190 """Parse multiple concatenated JSON objects""" 191 objs = [] 192 json_d = "" 193 for line in data.splitlines(): 194 if line == '}{': 195 json_d += '}' 196 objs.append(json.loads(json_d)) 197 json_d = '{' 198 else: 199 json_d += line 200 objs.append(json.loads(json_d)) 201 return objs 202 203 # Optimize by reading all data with one git command 204 results = {} 205 for obj in parse_json_stream(repo.run_cmd(['show'] + git_objs + ['--'])): 206 append_resultsdata(results, obj, configmap=configmap) 207 208 return results 209 210def test_run_results(results): 211 """ 212 Convenient generator function that iterates over all test runs that have a 213 result section. 214 215 Generates a tuple of: 216 (result json file path, test run name, test run (dict), test run "results" (dict)) 217 for each test run that has a "result" section 218 """ 219 for path in results: 220 for run_name, test_run in results[path].items(): 221 if not 'result' in test_run: 222 continue 223 yield path, run_name, test_run, test_run['result'] 224 225