xref: /openbmc/openbmc/poky/meta/lib/oeqa/core/runner.py (revision 220dafdb)
1#
2# Copyright (C) 2016 Intel Corporation
3#
4# SPDX-License-Identifier: MIT
5#
6
7import os
8import time
9import unittest
10import logging
11import re
12import json
13import sys
14
15from unittest import TextTestResult as _TestResult
16from unittest import TextTestRunner as _TestRunner
17
18class OEStreamLogger(object):
19    def __init__(self, logger):
20        self.logger = logger
21        self.buffer = ""
22
23    def write(self, msg):
24        if len(msg) > 1 and msg[0] != '\n':
25            if '...' in msg:
26                self.buffer += msg
27            elif self.buffer:
28                self.buffer += msg
29                self.logger.log(logging.INFO, self.buffer)
30                self.buffer = ""
31            else:
32                self.logger.log(logging.INFO, msg)
33
34    def flush(self):
35        for handler in self.logger.handlers:
36            handler.flush()
37
38class OETestResult(_TestResult):
39    def __init__(self, tc, *args, **kwargs):
40        super(OETestResult, self).__init__(*args, **kwargs)
41
42        self.successes = []
43        self.starttime = {}
44        self.endtime = {}
45        self.progressinfo = {}
46        self.extraresults = {}
47        self.shownmsg = []
48
49        # Inject into tc so that TestDepends decorator can see results
50        tc.results = self
51
52        self.tc = tc
53
54        # stdout and stderr for each test case
55        self.logged_output = {}
56
57    def startTest(self, test):
58        # May have been set by concurrencytest
59        if test.id() not in self.starttime:
60            self.starttime[test.id()] = time.time()
61        super(OETestResult, self).startTest(test)
62
63    def stopTest(self, test):
64        self.endtime[test.id()] = time.time()
65        if self.buffer:
66            self.logged_output[test.id()] = (
67                    sys.stdout.getvalue(), sys.stderr.getvalue())
68        super(OETestResult, self).stopTest(test)
69        if test.id() in self.progressinfo:
70            self.tc.logger.info(self.progressinfo[test.id()])
71
72        # Print the errors/failures early to aid/speed debugging, its a pain
73        # to wait until selftest finishes to see them.
74        for t in ['failures', 'errors', 'skipped', 'expectedFailures']:
75            for (scase, msg) in getattr(self, t):
76                if test.id() == scase.id():
77                    self.tc.logger.info(str(msg))
78                    self.shownmsg.append(test.id())
79                    break
80
81    def logSummary(self, component, context_msg=''):
82        elapsed_time = self.tc._run_end_time - self.tc._run_start_time
83        self.tc.logger.info("SUMMARY:")
84        self.tc.logger.info("%s (%s) - Ran %d test%s in %.3fs" % (component,
85            context_msg, self.testsRun, self.testsRun != 1 and "s" or "",
86            elapsed_time))
87
88        if self.wasSuccessful():
89            msg = "%s - OK - All required tests passed" % component
90        else:
91            msg = "%s - FAIL - Required tests failed" % component
92        msg += " (successes=%d, skipped=%d, failures=%d, errors=%d)" % (len(self.successes), len(self.skipped), len(self.failures), len(self.errors))
93        self.tc.logger.info(msg)
94
95    def _getTestResultDetails(self, case):
96        result_types = {'failures': 'FAILED', 'errors': 'ERROR', 'skipped': 'SKIPPED',
97                        'expectedFailures': 'EXPECTEDFAIL', 'successes': 'PASSED',
98                        'unexpectedSuccesses' : 'PASSED'}
99
100        for rtype in result_types:
101            found = False
102            for resultclass in getattr(self, rtype):
103                # unexpectedSuccesses are just lists, not lists of tuples
104                if isinstance(resultclass, tuple):
105                    scase, msg = resultclass
106                else:
107                    scase, msg = resultclass, None
108                if case.id() == scase.id():
109                    found = True
110                    break
111                scase_str = str(scase.id())
112
113                # When fails at module or class level the class name is passed as string
114                # so figure out to see if match
115                m = re.search(r"^setUpModule \((?P<module_name>.*)\).*$", scase_str)
116                if m:
117                    if case.__class__.__module__ == m.group('module_name'):
118                        found = True
119                        break
120
121                m = re.search(r"^setUpClass \((?P<class_name>.*)\).*$", scase_str)
122                if m:
123                    class_name = "%s.%s" % (case.__class__.__module__,
124                                            case.__class__.__name__)
125
126                    if class_name == m.group('class_name'):
127                        found = True
128                        break
129
130            if found:
131                return result_types[rtype], msg
132
133        return 'UNKNOWN', None
134
135    def extractExtraResults(self, test, details = None):
136        extraresults = None
137        if details is not None and "extraresults" in details:
138            extraresults = details.get("extraresults", {})
139        elif hasattr(test, "extraresults"):
140            extraresults = test.extraresults
141
142        if extraresults is not None:
143            for k, v in extraresults.items():
144                # handle updating already existing entries (e.g. ptestresults.sections)
145                if k in self.extraresults:
146                    self.extraresults[k].update(v)
147                else:
148                    self.extraresults[k] = v
149
150    def addError(self, test, *args, details = None):
151        self.extractExtraResults(test, details = details)
152        return super(OETestResult, self).addError(test, *args)
153
154    def addFailure(self, test, *args, details = None):
155        self.extractExtraResults(test, details = details)
156        return super(OETestResult, self).addFailure(test, *args)
157
158    def addSuccess(self, test, details = None):
159        #Added so we can keep track of successes too
160        self.successes.append((test, None))
161        self.extractExtraResults(test, details = details)
162        return super(OETestResult, self).addSuccess(test)
163
164    def addExpectedFailure(self, test, *args, details = None):
165        self.extractExtraResults(test, details = details)
166        return super(OETestResult, self).addExpectedFailure(test, *args)
167
168    def addUnexpectedSuccess(self, test, details = None):
169        self.extractExtraResults(test, details = details)
170        return super(OETestResult, self).addUnexpectedSuccess(test)
171
172    def logDetails(self, json_file_dir=None, configuration=None, result_id=None,
173            dump_streams=False):
174
175        result = self.extraresults
176        logs = {}
177        if hasattr(self.tc, "extraresults"):
178            result.update(self.tc.extraresults)
179
180        for case_name in self.tc._registry['cases']:
181            case = self.tc._registry['cases'][case_name]
182
183            (status, log) = self._getTestResultDetails(case)
184
185            t = ""
186            duration = 0
187            if case.id() in self.starttime and case.id() in self.endtime:
188                duration = self.endtime[case.id()] - self.starttime[case.id()]
189                t = " (" + "{0:.2f}".format(duration) + "s)"
190
191            if status not in logs:
192                logs[status] = []
193            logs[status].append("RESULTS - %s: %s%s" % (case.id(), status, t))
194            report = {'status': status}
195            if log:
196                report['log'] = log
197                # Class setup failures wouldn't enter stopTest so would never display
198                if case.id() not in self.shownmsg:
199                    self.tc.logger.info("Failure (%s) for %s:\n" % (status, case.id()) + log)
200
201            if duration:
202                report['duration'] = duration
203
204            alltags = []
205            # pull tags from the case class
206            if hasattr(case, "__oeqa_testtags"):
207                alltags.extend(getattr(case, "__oeqa_testtags"))
208            # pull tags from the method itself
209            test_name = case._testMethodName
210            if hasattr(case, test_name):
211                method = getattr(case, test_name)
212                if hasattr(method, "__oeqa_testtags"):
213                    alltags.extend(getattr(method, "__oeqa_testtags"))
214            if alltags:
215                report['oetags'] = alltags
216
217            if dump_streams and case.id() in self.logged_output:
218                (stdout, stderr) = self.logged_output[case.id()]
219                report['stdout'] = stdout
220                report['stderr'] = stderr
221            result[case.id()] = report
222
223        self.tc.logger.info("RESULTS:")
224        for i in ['PASSED', 'SKIPPED', 'EXPECTEDFAIL', 'ERROR', 'FAILED', 'UNKNOWN']:
225            if i not in logs:
226                continue
227            for l in logs[i]:
228                self.tc.logger.info(l)
229
230        if json_file_dir:
231            tresultjsonhelper = OETestResultJSONHelper()
232            tresultjsonhelper.dump_testresult_file(json_file_dir, configuration, result_id, result)
233
234    def wasSuccessful(self):
235        # Override as we unexpected successes aren't failures for us
236        return (len(self.failures) == len(self.errors) == 0)
237
238    def hasAnyFailingTest(self):
239        # Account for expected failures
240        return not self.wasSuccessful() or len(self.expectedFailures)
241
242class OEListTestsResult(object):
243    def wasSuccessful(self):
244        return True
245
246class OETestRunner(_TestRunner):
247    streamLoggerClass = OEStreamLogger
248
249    def __init__(self, tc, *args, **kwargs):
250        kwargs['stream'] = self.streamLoggerClass(tc.logger)
251        super(OETestRunner, self).__init__(*args, **kwargs)
252        self.tc = tc
253        self.resultclass = OETestResult
254
255    def _makeResult(self):
256        return self.resultclass(self.tc, self.stream, self.descriptions,
257                self.verbosity)
258
259    def _walk_suite(self, suite, func):
260        for obj in suite:
261            if isinstance(obj, unittest.suite.TestSuite):
262                if len(obj._tests):
263                    self._walk_suite(obj, func)
264            elif isinstance(obj, unittest.case.TestCase):
265                func(self.tc.logger, obj)
266                self._walked_cases = self._walked_cases + 1
267
268    def _list_tests_name(self, suite):
269        self._walked_cases = 0
270
271        def _list_cases(logger, case):
272            oetags = []
273            if hasattr(case, '__oeqa_testtags'):
274                oetags = getattr(case, '__oeqa_testtags')
275            if oetags:
276                logger.info("%s (%s)" % (case.id(), ",".join(oetags)))
277            else:
278                logger.info("%s" % (case.id()))
279
280        self.tc.logger.info("Listing all available tests:")
281        self._walked_cases = 0
282        self.tc.logger.info("test (tags)")
283        self.tc.logger.info("-" * 80)
284        self._walk_suite(suite, _list_cases)
285        self.tc.logger.info("-" * 80)
286        self.tc.logger.info("Total found:\t%s" % self._walked_cases)
287
288    def _list_tests_class(self, suite):
289        self._walked_cases = 0
290
291        curr = {}
292        def _list_classes(logger, case):
293            if not 'module' in curr or curr['module'] != case.__module__:
294                curr['module'] = case.__module__
295                logger.info(curr['module'])
296
297            if not 'class' in curr  or curr['class'] != \
298                    case.__class__.__name__:
299                curr['class'] = case.__class__.__name__
300                logger.info(" -- %s" % curr['class'])
301
302            logger.info(" -- -- %s" % case._testMethodName)
303
304        self.tc.logger.info("Listing all available test classes:")
305        self._walk_suite(suite, _list_classes)
306
307    def _list_tests_module(self, suite):
308        self._walked_cases = 0
309
310        listed = []
311        def _list_modules(logger, case):
312            if not case.__module__ in listed:
313                if case.__module__.startswith('_'):
314                    logger.info("%s (hidden)" % case.__module__)
315                else:
316                    logger.info(case.__module__)
317                listed.append(case.__module__)
318
319        self.tc.logger.info("Listing all available test modules:")
320        self._walk_suite(suite, _list_modules)
321
322    def list_tests(self, suite, display_type):
323        if display_type == 'name':
324            self._list_tests_name(suite)
325        elif display_type == 'class':
326            self._list_tests_class(suite)
327        elif display_type == 'module':
328            self._list_tests_module(suite)
329
330        return OEListTestsResult()
331
332class OETestResultJSONHelper(object):
333
334    testresult_filename = 'testresults.json'
335
336    def _get_existing_testresults_if_available(self, write_dir):
337        testresults = {}
338        file = os.path.join(write_dir, self.testresult_filename)
339        if os.path.exists(file):
340            with open(file, "r") as f:
341                testresults = json.load(f)
342        return testresults
343
344    def _write_file(self, write_dir, file_name, file_content):
345        file_path = os.path.join(write_dir, file_name)
346        with open(file_path, 'w') as the_file:
347            the_file.write(file_content)
348
349    def dump_testresult_file(self, write_dir, configuration, result_id, test_result):
350        try:
351            import bb
352            has_bb = True
353            bb.utils.mkdirhier(write_dir)
354            lf = bb.utils.lockfile(os.path.join(write_dir, 'jsontestresult.lock'))
355        except ImportError:
356            has_bb = False
357            os.makedirs(write_dir, exist_ok=True)
358        test_results = self._get_existing_testresults_if_available(write_dir)
359        test_results[result_id] = {'configuration': configuration, 'result': test_result}
360        json_testresults = json.dumps(test_results, sort_keys=True, indent=4)
361        self._write_file(write_dir, self.testresult_filename, json_testresults)
362        if has_bb:
363            bb.utils.unlockfile(lf)
364