1# resulttool - common library/utility functions
2#
3# Copyright (c) 2019, Intel Corporation.
4# Copyright (c) 2019, Linux Foundation
5#
6# SPDX-License-Identifier: GPL-2.0-only
7#
8
9import os
10import base64
11import zlib
12import json
13import scriptpath
14import copy
15import urllib.request
16import posixpath
17scriptpath.add_oe_lib_path()
18
19flatten_map = {
20    "oeselftest": [],
21    "runtime": [],
22    "sdk": [],
23    "sdkext": [],
24    "manual": []
25}
26regression_map = {
27    "oeselftest": ['TEST_TYPE', 'MACHINE'],
28    "runtime": ['TESTSERIES', 'TEST_TYPE', 'IMAGE_BASENAME', 'MACHINE', 'IMAGE_PKGTYPE', 'DISTRO'],
29    "sdk": ['TESTSERIES', 'TEST_TYPE', 'IMAGE_BASENAME', 'MACHINE', 'SDKMACHINE'],
30    "sdkext": ['TESTSERIES', 'TEST_TYPE', 'IMAGE_BASENAME', 'MACHINE', 'SDKMACHINE'],
31    "manual": ['TEST_TYPE', 'TEST_MODULE', 'IMAGE_BASENAME', 'MACHINE']
32}
33store_map = {
34    "oeselftest": ['TEST_TYPE'],
35    "runtime": ['TEST_TYPE', 'DISTRO', 'MACHINE', 'IMAGE_BASENAME'],
36    "sdk": ['TEST_TYPE', 'MACHINE', 'SDKMACHINE', 'IMAGE_BASENAME'],
37    "sdkext": ['TEST_TYPE', 'MACHINE', 'SDKMACHINE', 'IMAGE_BASENAME'],
38    "manual": ['TEST_TYPE', 'TEST_MODULE', 'MACHINE', 'IMAGE_BASENAME']
39}
40
41def is_url(p):
42    """
43    Helper for determining if the given path is a URL
44    """
45    return p.startswith('http://') or p.startswith('https://')
46
47extra_configvars = {'TESTSERIES': ''}
48
49#
50# Load the json file and append the results data into the provided results dict
51#
52def append_resultsdata(results, f, configmap=store_map, configvars=extra_configvars):
53    if type(f) is str:
54        if is_url(f):
55            with urllib.request.urlopen(f) as response:
56                data = json.loads(response.read().decode('utf-8'))
57            url = urllib.parse.urlparse(f)
58            testseries = posixpath.basename(posixpath.dirname(url.path))
59        else:
60            with open(f, "r") as filedata:
61                try:
62                    data = json.load(filedata)
63                except json.decoder.JSONDecodeError:
64                    print("Cannot decode {}. Possible corruption. Skipping.".format(f))
65                    data = ""
66            testseries = os.path.basename(os.path.dirname(f))
67    else:
68        data = f
69    for res in data:
70        if "configuration" not in data[res] or "result" not in data[res]:
71            raise ValueError("Test results data without configuration or result section?")
72        for config in configvars:
73            if config == "TESTSERIES" and "TESTSERIES" not in data[res]["configuration"]:
74                data[res]["configuration"]["TESTSERIES"] = testseries
75                continue
76            if config not in data[res]["configuration"]:
77                data[res]["configuration"][config] = configvars[config]
78        testtype = data[res]["configuration"].get("TEST_TYPE")
79        if testtype not in configmap:
80            raise ValueError("Unknown test type %s" % testtype)
81        testpath = "/".join(data[res]["configuration"].get(i) for i in configmap[testtype])
82        if testpath not in results:
83            results[testpath] = {}
84        results[testpath][res] = data[res]
85
86#
87# Walk a directory and find/load results data
88# or load directly from a file
89#
90def load_resultsdata(source, configmap=store_map, configvars=extra_configvars):
91    results = {}
92    if is_url(source) or os.path.isfile(source):
93        append_resultsdata(results, source, configmap, configvars)
94        return results
95    for root, dirs, files in os.walk(source):
96        for name in files:
97            f = os.path.join(root, name)
98            if name == "testresults.json":
99                append_resultsdata(results, f, configmap, configvars)
100    return results
101
102def filter_resultsdata(results, resultid):
103    newresults = {}
104    for r in results:
105        for i in results[r]:
106            if i == resultsid:
107                 newresults[r] = {}
108                 newresults[r][i] = results[r][i]
109    return newresults
110
111def strip_ptestresults(results):
112    newresults = copy.deepcopy(results)
113    #for a in newresults2:
114    #  newresults = newresults2[a]
115    for res in newresults:
116        if 'result' not in newresults[res]:
117            continue
118        if 'ptestresult.rawlogs' in newresults[res]['result']:
119            del newresults[res]['result']['ptestresult.rawlogs']
120        if 'ptestresult.sections' in newresults[res]['result']:
121            for i in newresults[res]['result']['ptestresult.sections']:
122                if 'log' in newresults[res]['result']['ptestresult.sections'][i]:
123                    del newresults[res]['result']['ptestresult.sections'][i]['log']
124    return newresults
125
126def decode_log(logdata):
127    if isinstance(logdata, str):
128        return logdata
129    elif isinstance(logdata, dict):
130        if "compressed" in logdata:
131            data = logdata.get("compressed")
132            data = base64.b64decode(data.encode("utf-8"))
133            data = zlib.decompress(data)
134            return data.decode("utf-8", errors='ignore')
135    return None
136
137def generic_get_log(sectionname, results, section):
138    if sectionname not in results:
139        return None
140    if section not in results[sectionname]:
141        return None
142
143    ptest = results[sectionname][section]
144    if 'log' not in ptest:
145        return None
146    return decode_log(ptest['log'])
147
148def ptestresult_get_log(results, section):
149    return generic_get_log('ptestresult.sections', results, section)
150
151def generic_get_rawlogs(sectname, results):
152    if sectname not in results:
153        return None
154    if 'log' not in results[sectname]:
155        return None
156    return decode_log(results[sectname]['log'])
157
158def ptestresult_get_rawlogs(results):
159    return generic_get_rawlogs('ptestresult.rawlogs', results)
160
161def save_resultsdata(results, destdir, fn="testresults.json", ptestjson=False, ptestlogs=False):
162    for res in results:
163        if res:
164            dst = destdir + "/" + res + "/" + fn
165        else:
166            dst = destdir + "/" + fn
167        os.makedirs(os.path.dirname(dst), exist_ok=True)
168        resultsout = results[res]
169        if not ptestjson:
170            resultsout = strip_ptestresults(results[res])
171        with open(dst, 'w') as f:
172            f.write(json.dumps(resultsout, sort_keys=True, indent=4))
173        for res2 in results[res]:
174            if ptestlogs and 'result' in results[res][res2]:
175                seriesresults = results[res][res2]['result']
176                rawlogs = ptestresult_get_rawlogs(seriesresults)
177                if rawlogs is not None:
178                    with open(dst.replace(fn, "ptest-raw.log"), "w+") as f:
179                        f.write(rawlogs)
180                if 'ptestresult.sections' in seriesresults:
181                    for i in seriesresults['ptestresult.sections']:
182                        sectionlog = ptestresult_get_log(seriesresults, i)
183                        if sectionlog is not None:
184                            with open(dst.replace(fn, "ptest-%s.log" % i), "w+") as f:
185                                f.write(sectionlog)
186
187def git_get_result(repo, tags, configmap=store_map):
188    git_objs = []
189    for tag in tags:
190        files = repo.run_cmd(['ls-tree', "--name-only", "-r", tag]).splitlines()
191        git_objs.extend([tag + ':' + f for f in files if f.endswith("testresults.json")])
192
193    def parse_json_stream(data):
194        """Parse multiple concatenated JSON objects"""
195        objs = []
196        json_d = ""
197        for line in data.splitlines():
198            if line == '}{':
199                json_d += '}'
200                objs.append(json.loads(json_d))
201                json_d = '{'
202            else:
203                json_d += line
204        objs.append(json.loads(json_d))
205        return objs
206
207    # Optimize by reading all data with one git command
208    results = {}
209    for obj in parse_json_stream(repo.run_cmd(['show'] + git_objs + ['--'])):
210        append_resultsdata(results, obj, configmap=configmap)
211
212    return results
213
214def test_run_results(results):
215    """
216    Convenient generator function that iterates over all test runs that have a
217    result section.
218
219    Generates a tuple of:
220        (result json file path, test run name, test run (dict), test run "results" (dict))
221    for each test run that has a "result" section
222    """
223    for path in results:
224        for run_name, test_run in results[path].items():
225            if not 'result' in test_run:
226                continue
227            yield path, run_name, test_run, test_run['result']
228
229