1# test result tool - report text based test results
2#
3# Copyright (c) 2019, Intel Corporation.
4# Copyright (c) 2019, Linux Foundation
5#
6# SPDX-License-Identifier: GPL-2.0-only
7#
8
9import os
10import glob
11import json
12import resulttool.resultutils as resultutils
13from oeqa.utils.git import GitRepo
14import oeqa.utils.gitarchive as gitarchive
15
16
17class ResultsTextReport(object):
18    def __init__(self):
19        self.ptests = {}
20        self.ltptests = {}
21        self.ltpposixtests = {}
22        self.result_types = {'passed': ['PASSED', 'passed', 'PASS', 'XFAIL'],
23                             'failed': ['FAILED', 'failed', 'FAIL', 'ERROR', 'error', 'UNKNOWN', 'XPASS'],
24                             'skipped': ['SKIPPED', 'skipped', 'UNSUPPORTED', 'UNTESTED', 'UNRESOLVED']}
25
26
27    def handle_ptest_result(self, k, status, result, machine):
28        if machine not in self.ptests:
29            self.ptests[machine] = {}
30
31        if k == 'ptestresult.sections':
32            # Ensure tests without any test results still show up on the report
33            for suite in result['ptestresult.sections']:
34                if suite not in self.ptests[machine]:
35                    self.ptests[machine][suite] = {
36                            'passed': 0, 'failed': 0, 'skipped': 0, 'duration' : '-',
37                            'failed_testcases': [], "testcases": set(),
38                            }
39                if 'duration' in result['ptestresult.sections'][suite]:
40                    self.ptests[machine][suite]['duration'] = result['ptestresult.sections'][suite]['duration']
41                if 'timeout' in result['ptestresult.sections'][suite]:
42                    self.ptests[machine][suite]['duration'] += " T"
43            return True
44
45        # process test result
46        try:
47            _, suite, test = k.split(".", 2)
48        except ValueError:
49            return True
50
51        # Handle 'glib-2.0'
52        if 'ptestresult.sections' in result and suite not in result['ptestresult.sections']:
53            try:
54                _, suite, suite1, test = k.split(".", 3)
55                if suite + "." + suite1 in result['ptestresult.sections']:
56                    suite = suite + "." + suite1
57            except ValueError:
58                pass
59
60        if suite not in self.ptests[machine]:
61            self.ptests[machine][suite] = {
62                    'passed': 0, 'failed': 0, 'skipped': 0, 'duration' : '-',
63                    'failed_testcases': [], "testcases": set(),
64                    }
65
66        # do not process duplicate results
67        if test in self.ptests[machine][suite]["testcases"]:
68            print("Warning duplicate ptest result '{}.{}' for {}".format(suite, test, machine))
69            return False
70
71        for tk in self.result_types:
72            if status in self.result_types[tk]:
73                self.ptests[machine][suite][tk] += 1
74        self.ptests[machine][suite]["testcases"].add(test)
75        return True
76
77    def handle_ltptest_result(self, k, status, result, machine):
78        if machine not in self.ltptests:
79            self.ltptests[machine] = {}
80
81        if k == 'ltpresult.sections':
82            # Ensure tests without any test results still show up on the report
83            for suite in result['ltpresult.sections']:
84                if suite not in self.ltptests[machine]:
85                    self.ltptests[machine][suite] = {'passed': 0, 'failed': 0, 'skipped': 0, 'duration' : '-', 'failed_testcases': []}
86                if 'duration' in result['ltpresult.sections'][suite]:
87                    self.ltptests[machine][suite]['duration'] = result['ltpresult.sections'][suite]['duration']
88                if 'timeout' in result['ltpresult.sections'][suite]:
89                    self.ltptests[machine][suite]['duration'] += " T"
90            return
91        try:
92            _, suite, test = k.split(".", 2)
93        except ValueError:
94            return
95        # Handle 'glib-2.0'
96        if 'ltpresult.sections' in result and suite not in result['ltpresult.sections']:
97            try:
98                _, suite, suite1, test = k.split(".", 3)
99                if suite + "." + suite1 in result['ltpresult.sections']:
100                    suite = suite + "." + suite1
101            except ValueError:
102                pass
103        if suite not in self.ltptests[machine]:
104            self.ltptests[machine][suite] = {'passed': 0, 'failed': 0, 'skipped': 0, 'duration' : '-', 'failed_testcases': []}
105        for tk in self.result_types:
106            if status in self.result_types[tk]:
107                self.ltptests[machine][suite][tk] += 1
108
109    def handle_ltpposixtest_result(self, k, status, result, machine):
110        if machine not in self.ltpposixtests:
111            self.ltpposixtests[machine] = {}
112
113        if k == 'ltpposixresult.sections':
114            # Ensure tests without any test results still show up on the report
115            for suite in result['ltpposixresult.sections']:
116                if suite not in self.ltpposixtests[machine]:
117                    self.ltpposixtests[machine][suite] = {'passed': 0, 'failed': 0, 'skipped': 0, 'duration' : '-', 'failed_testcases': []}
118                if 'duration' in result['ltpposixresult.sections'][suite]:
119                    self.ltpposixtests[machine][suite]['duration'] = result['ltpposixresult.sections'][suite]['duration']
120            return
121        try:
122            _, suite, test = k.split(".", 2)
123        except ValueError:
124            return
125        # Handle 'glib-2.0'
126        if 'ltpposixresult.sections' in result and suite not in result['ltpposixresult.sections']:
127            try:
128                _, suite, suite1, test = k.split(".", 3)
129                if suite + "." + suite1 in result['ltpposixresult.sections']:
130                    suite = suite + "." + suite1
131            except ValueError:
132                pass
133        if suite not in self.ltpposixtests[machine]:
134            self.ltpposixtests[machine][suite] = {'passed': 0, 'failed': 0, 'skipped': 0, 'duration' : '-', 'failed_testcases': []}
135        for tk in self.result_types:
136            if status in self.result_types[tk]:
137                self.ltpposixtests[machine][suite][tk] += 1
138
139    def get_aggregated_test_result(self, logger, testresult, machine):
140        test_count_report = {'passed': 0, 'failed': 0, 'skipped': 0, 'failed_testcases': []}
141        result = testresult.get('result', [])
142        for k in result:
143            test_status = result[k].get('status', [])
144            if k.startswith("ptestresult."):
145                if not self.handle_ptest_result(k, test_status, result, machine):
146                    continue
147            elif k.startswith("ltpresult."):
148                self.handle_ltptest_result(k, test_status, result, machine)
149            elif k.startswith("ltpposixresult."):
150                self.handle_ltpposixtest_result(k, test_status, result, machine)
151
152            # process result if it was not skipped by a handler
153            for tk in self.result_types:
154                if test_status in self.result_types[tk]:
155                    test_count_report[tk] += 1
156            if test_status in self.result_types['failed']:
157                test_count_report['failed_testcases'].append(k)
158        return test_count_report
159
160    def print_test_report(self, template_file_name, test_count_reports):
161        from jinja2 import Environment, FileSystemLoader
162        script_path = os.path.dirname(os.path.realpath(__file__))
163        file_loader = FileSystemLoader(script_path + '/template')
164        env = Environment(loader=file_loader, trim_blocks=True)
165        template = env.get_template(template_file_name)
166        havefailed = False
167        reportvalues = []
168        machines = []
169        cols = ['passed', 'failed', 'skipped']
170        maxlen = {'passed' : 0, 'failed' : 0, 'skipped' : 0, 'result_id': 0, 'testseries' : 0, 'ptest' : 0 ,'ltptest': 0, 'ltpposixtest': 0}
171        for line in test_count_reports:
172            total_tested = line['passed'] + line['failed'] + line['skipped']
173            vals = {}
174            vals['result_id'] = line['result_id']
175            vals['testseries'] = line['testseries']
176            vals['sort'] = line['testseries'] + "_" + line['result_id']
177            vals['failed_testcases'] = line['failed_testcases']
178            for k in cols:
179                vals[k] = "%d (%s%%)" % (line[k], format(line[k] / total_tested * 100, '.0f'))
180            for k in maxlen:
181                if k in vals and len(vals[k]) > maxlen[k]:
182                    maxlen[k] = len(vals[k])
183            reportvalues.append(vals)
184            if line['failed_testcases']:
185                havefailed = True
186            if line['machine'] not in machines:
187                machines.append(line['machine'])
188        reporttotalvalues = {}
189        for k in cols:
190            reporttotalvalues[k] = '%s' % sum([line[k] for line in test_count_reports])
191        reporttotalvalues['count'] = '%s' % len(test_count_reports)
192        for (machine, report) in self.ptests.items():
193            for ptest in self.ptests[machine]:
194                if len(ptest) > maxlen['ptest']:
195                    maxlen['ptest'] = len(ptest)
196        for (machine, report) in self.ltptests.items():
197            for ltptest in self.ltptests[machine]:
198                if len(ltptest) > maxlen['ltptest']:
199                    maxlen['ltptest'] = len(ltptest)
200        for (machine, report) in self.ltpposixtests.items():
201            for ltpposixtest in self.ltpposixtests[machine]:
202                if len(ltpposixtest) > maxlen['ltpposixtest']:
203                    maxlen['ltpposixtest'] = len(ltpposixtest)
204        output = template.render(reportvalues=reportvalues,
205                                 reporttotalvalues=reporttotalvalues,
206                                 havefailed=havefailed,
207                                 machines=machines,
208                                 ptests=self.ptests,
209                                 ltptests=self.ltptests,
210                                 ltpposixtests=self.ltpposixtests,
211                                 maxlen=maxlen)
212        print(output)
213
214    def view_test_report(self, logger, source_dir, branch, commit, tag, use_regression_map, raw_test, selected_test_case_only):
215        def print_selected_testcase_result(testresults, selected_test_case_only):
216            for testsuite in testresults:
217                for resultid in testresults[testsuite]:
218                    result = testresults[testsuite][resultid]['result']
219                    test_case_result = result.get(selected_test_case_only, {})
220                    if test_case_result.get('status'):
221                        print('Found selected test case result for %s from %s' % (selected_test_case_only,
222                                                                                           resultid))
223                        print(test_case_result['status'])
224                    else:
225                        print('Could not find selected test case result for %s from %s' % (selected_test_case_only,
226                                                                                           resultid))
227                    if test_case_result.get('log'):
228                        print(test_case_result['log'])
229        test_count_reports = []
230        configmap = resultutils.store_map
231        if use_regression_map:
232            configmap = resultutils.regression_map
233        if commit:
234            if tag:
235                logger.warning("Ignoring --tag as --commit was specified")
236            tag_name = "{branch}/{commit_number}-g{commit}/{tag_number}"
237            repo = GitRepo(source_dir)
238            revs = gitarchive.get_test_revs(logger, repo, tag_name, branch=branch)
239            rev_index = gitarchive.rev_find(revs, 'commit', commit)
240            testresults = resultutils.git_get_result(repo, revs[rev_index][2], configmap=configmap)
241        elif tag:
242            repo = GitRepo(source_dir)
243            testresults = resultutils.git_get_result(repo, [tag], configmap=configmap)
244        else:
245            testresults = resultutils.load_resultsdata(source_dir, configmap=configmap)
246        if raw_test:
247            raw_results = {}
248            for testsuite in testresults:
249                result = testresults[testsuite].get(raw_test, {})
250                if result:
251                    raw_results[testsuite] = {raw_test: result}
252            if raw_results:
253                if selected_test_case_only:
254                    print_selected_testcase_result(raw_results, selected_test_case_only)
255                else:
256                    print(json.dumps(raw_results, sort_keys=True, indent=4))
257            else:
258                print('Could not find raw test result for %s' % raw_test)
259            return 0
260        if selected_test_case_only:
261            print_selected_testcase_result(testresults, selected_test_case_only)
262            return 0
263        for testsuite in testresults:
264            for resultid in testresults[testsuite]:
265                skip = False
266                result = testresults[testsuite][resultid]
267                machine = result['configuration']['MACHINE']
268
269                # Check to see if there is already results for these kinds of tests for the machine
270                for key in result['result'].keys():
271                    testtype = str(key).split('.')[0]
272                    if ((machine in self.ltptests and testtype == "ltpiresult" and self.ltptests[machine]) or
273                        (machine in self.ltpposixtests and testtype == "ltpposixresult" and self.ltpposixtests[machine])):
274                        print("Already have test results for %s on %s, skipping %s" %(str(key).split('.')[0], machine, resultid))
275                        skip = True
276                        break
277                if skip:
278                    break
279
280                test_count_report = self.get_aggregated_test_result(logger, result, machine)
281                test_count_report['machine'] = machine
282                test_count_report['testseries'] = result['configuration']['TESTSERIES']
283                test_count_report['result_id'] = resultid
284                test_count_reports.append(test_count_report)
285        self.print_test_report('test_report_full_text.txt', test_count_reports)
286
287def report(args, logger):
288    report = ResultsTextReport()
289    report.view_test_report(logger, args.source_dir, args.branch, args.commit, args.tag, args.use_regression_map,
290                            args.raw_test_only, args.selected_test_case_only)
291    return 0
292
293def register_commands(subparsers):
294    """Register subcommands from this plugin"""
295    parser_build = subparsers.add_parser('report', help='summarise test results',
296                                         description='print a text-based summary of the test results',
297                                         group='analysis')
298    parser_build.set_defaults(func=report)
299    parser_build.add_argument('source_dir',
300                              help='source file/directory/URL that contain the test result files to summarise')
301    parser_build.add_argument('--branch', '-B', default='master', help="Branch to find commit in")
302    parser_build.add_argument('--commit', help="Revision to report")
303    parser_build.add_argument('-t', '--tag', default='',
304                              help='source_dir is a git repository, report on the tag specified from that repository')
305    parser_build.add_argument('-m', '--use_regression_map', action='store_true',
306                              help='instead of the default "store_map", use the "regression_map" for report')
307    parser_build.add_argument('-r', '--raw_test_only', default='',
308                              help='output raw test result only for the user provided test result id')
309    parser_build.add_argument('-s', '--selected_test_case_only', default='',
310                              help='output selected test case result for the user provided test case id, if both test '
311                                   'result id and test case id are provided then output the selected test case result '
312                                   'from the provided test result id')
313