xref: /openbmc/openbmc/poky/meta/lib/oeqa/buildperf/base.py (revision 615f2f11d3f46e3eae642475495a7ca4cfddc49e)
1 # Copyright (c) 2016, Intel Corporation.
2 #
3 # SPDX-License-Identifier: GPL-2.0-only
4 #
5 """Build performance test base classes and functionality"""
6 import json
7 import logging
8 import os
9 import re
10 import resource
11 import socket
12 import shutil
13 import time
14 import unittest
15 import xml.etree.ElementTree as ET
16 from collections import OrderedDict
17 from datetime import datetime, timedelta
18 from functools import partial
19 from multiprocessing import Process
20 from multiprocessing import SimpleQueue
21 from xml.dom import minidom
22 
23 import oe.path
24 from oeqa.utils.commands import CommandError, runCmd, get_bb_vars
25 from oeqa.utils.git import GitError, GitRepo
26 
27 # Get logger for this module
28 log = logging.getLogger('build-perf')
29 
30 # Our own version of runCmd which does not raise AssertErrors which would cause
31 # errors to interpreted as failures
32 runCmd2 = partial(runCmd, assert_error=False, limit_exc_output=40)
33 
34 
35 class KernelDropCaches(object):
36     """Container of the functions for dropping kernel caches"""
37     sudo_passwd = None
38 
39     @classmethod
40     def check(cls):
41         """Check permssions for dropping kernel caches"""
42         from getpass import getpass
43         from locale import getdefaultlocale
44         cmd = ['sudo', '-k', '-n', 'tee', '/proc/sys/vm/drop_caches']
45         ret = runCmd2(cmd, ignore_status=True, data=b'0')
46         if ret.output.startswith('sudo:'):
47             pass_str = getpass(
48                 "\nThe script requires sudo access to drop caches between "
49                 "builds (echo 3 > /proc/sys/vm/drop_caches).\n"
50                 "Please enter your sudo password: ")
51             cls.sudo_passwd = bytes(pass_str, getdefaultlocale()[1])
52 
53     @classmethod
54     def drop(cls):
55         """Drop kernel caches"""
56         cmd = ['sudo', '-k']
57         if cls.sudo_passwd:
58             cmd.append('-S')
59             input_data = cls.sudo_passwd + b'\n'
60         else:
61             cmd.append('-n')
62             input_data = b''
63         cmd += ['tee', '/proc/sys/vm/drop_caches']
64         input_data += b'3'
65         runCmd2(cmd, data=input_data)
66 
67 
68 def str_to_fn(string):
69     """Convert string to a sanitized filename"""
70     return re.sub(r'(\W+)', '-', string, flags=re.LOCALE)
71 
72 
73 class ResultsJsonEncoder(json.JSONEncoder):
74     """Extended encoder for build perf test results"""
75     unix_epoch = datetime.utcfromtimestamp(0)
76 
77     def default(self, obj):
78         """Encoder for our types"""
79         if isinstance(obj, datetime):
80             # NOTE: we assume that all timestamps are in UTC time
81             return (obj - self.unix_epoch).total_seconds()
82         if isinstance(obj, timedelta):
83             return obj.total_seconds()
84         return json.JSONEncoder.default(self, obj)
85 
86 
87 class BuildPerfTestResult(unittest.TextTestResult):
88     """Runner class for executing the individual tests"""
89     # List of test cases to run
90     test_run_queue = []
91 
92     def __init__(self, out_dir, *args, **kwargs):
93         super(BuildPerfTestResult, self).__init__(*args, **kwargs)
94 
95         self.out_dir = out_dir
96         self.hostname = socket.gethostname()
97         self.product = os.getenv('OE_BUILDPERFTEST_PRODUCT', 'oe-core')
98         self.start_time = self.elapsed_time = None
99         self.successes = []
100 
101     def addSuccess(self, test):
102         """Record results from successful tests"""
103         super(BuildPerfTestResult, self).addSuccess(test)
104         self.successes.append(test)
105 
106     def addError(self, test, err):
107         """Record results from crashed test"""
108         test.err = err
109         super(BuildPerfTestResult, self).addError(test, err)
110 
111     def addFailure(self, test, err):
112         """Record results from failed test"""
113         test.err = err
114         super(BuildPerfTestResult, self).addFailure(test, err)
115 
116     def addExpectedFailure(self, test, err):
117         """Record results from expectedly failed test"""
118         test.err = err
119         super(BuildPerfTestResult, self).addExpectedFailure(test, err)
120 
121     def startTest(self, test):
122         """Pre-test hook"""
123         test.base_dir = self.out_dir
124         log.info("Executing test %s: %s", test.name, test.shortDescription())
125         self.stream.write(datetime.now().strftime("[%Y-%m-%d %H:%M:%S] "))
126         super(BuildPerfTestResult, self).startTest(test)
127 
128     def startTestRun(self):
129         """Pre-run hook"""
130         self.start_time = datetime.utcnow()
131 
132     def stopTestRun(self):
133         """Pre-run hook"""
134         self.elapsed_time = datetime.utcnow() - self.start_time
135 
136     def all_results(self):
137         compound = [('SUCCESS', t, None) for t in self.successes] + \
138                    [('FAILURE', t, m) for t, m in self.failures] + \
139                    [('ERROR', t, m) for t, m in self.errors] + \
140                    [('EXPECTED_FAILURE', t, m) for t, m in self.expectedFailures] + \
141                    [('UNEXPECTED_SUCCESS', t, None) for t in self.unexpectedSuccesses] + \
142                    [('SKIPPED', t, m) for t, m in self.skipped]
143         return sorted(compound, key=lambda info: info[1].start_time)
144 
145 
146     def write_buildstats_json(self):
147         """Write buildstats file"""
148         buildstats = OrderedDict()
149         for _, test, _ in self.all_results():
150             for key, val in test.buildstats.items():
151                 buildstats[test.name + '.' + key] = val
152         with open(os.path.join(self.out_dir, 'buildstats.json'), 'w') as fobj:
153             json.dump(buildstats, fobj, cls=ResultsJsonEncoder)
154 
155 
156     def write_results_json(self):
157         """Write test results into a json-formatted file"""
158         results = OrderedDict([('tester_host', self.hostname),
159                                ('start_time', self.start_time),
160                                ('elapsed_time', self.elapsed_time),
161                                ('tests', OrderedDict())])
162 
163         for status, test, reason in self.all_results():
164             test_result = OrderedDict([('name', test.name),
165                                        ('description', test.shortDescription()),
166                                        ('status', status),
167                                        ('start_time', test.start_time),
168                                        ('elapsed_time', test.elapsed_time),
169                                        ('measurements', test.measurements)])
170             if status in ('ERROR', 'FAILURE', 'EXPECTED_FAILURE'):
171                 test_result['message'] = str(test.err[1])
172                 test_result['err_type'] = test.err[0].__name__
173                 test_result['err_output'] = reason
174             elif reason:
175                 test_result['message'] = reason
176 
177             results['tests'][test.name] = test_result
178 
179         with open(os.path.join(self.out_dir, 'results.json'), 'w') as fobj:
180             json.dump(results, fobj, indent=4,
181                       cls=ResultsJsonEncoder)
182 
183     def write_results_xml(self):
184         """Write test results into a JUnit XML file"""
185         top = ET.Element('testsuites')
186         suite = ET.SubElement(top, 'testsuite')
187         suite.set('name', 'oeqa.buildperf')
188         suite.set('timestamp', self.start_time.isoformat())
189         suite.set('time', str(self.elapsed_time.total_seconds()))
190         suite.set('hostname', self.hostname)
191         suite.set('failures', str(len(self.failures) + len(self.expectedFailures)))
192         suite.set('errors', str(len(self.errors)))
193         suite.set('skipped', str(len(self.skipped)))
194 
195         test_cnt = 0
196         for status, test, reason in self.all_results():
197             test_cnt += 1
198             testcase = ET.SubElement(suite, 'testcase')
199             testcase.set('classname', test.__module__ + '.' + test.__class__.__name__)
200             testcase.set('name', test.name)
201             testcase.set('description', test.shortDescription())
202             testcase.set('timestamp', test.start_time.isoformat())
203             testcase.set('time', str(test.elapsed_time.total_seconds()))
204             if status in ('ERROR', 'FAILURE', 'EXP_FAILURE'):
205                 if status in ('FAILURE', 'EXP_FAILURE'):
206                     result = ET.SubElement(testcase, 'failure')
207                 else:
208                     result = ET.SubElement(testcase, 'error')
209                 result.set('message', str(test.err[1]))
210                 result.set('type', test.err[0].__name__)
211                 result.text = reason
212             elif status == 'SKIPPED':
213                 result = ET.SubElement(testcase, 'skipped')
214                 result.text = reason
215             elif status not in ('SUCCESS', 'UNEXPECTED_SUCCESS'):
216                 raise TypeError("BUG: invalid test status '%s'" % status)
217 
218             for data in test.measurements.values():
219                 measurement = ET.SubElement(testcase, data['type'])
220                 measurement.set('name', data['name'])
221                 measurement.set('legend', data['legend'])
222                 vals = data['values']
223                 if data['type'] == BuildPerfTestCase.SYSRES:
224                     ET.SubElement(measurement, 'time',
225                                   timestamp=vals['start_time'].isoformat()).text = \
226                         str(vals['elapsed_time'].total_seconds())
227                     attrib = dict((k, str(v)) for k, v in vals['iostat'].items())
228                     ET.SubElement(measurement, 'iostat', attrib=attrib)
229                     attrib = dict((k, str(v)) for k, v in vals['rusage'].items())
230                     ET.SubElement(measurement, 'rusage', attrib=attrib)
231                 elif data['type'] == BuildPerfTestCase.DISKUSAGE:
232                     ET.SubElement(measurement, 'size').text = str(vals['size'])
233                 else:
234                     raise TypeError('BUG: unsupported measurement type')
235 
236         suite.set('tests', str(test_cnt))
237 
238         # Use minidom for pretty-printing
239         dom_doc = minidom.parseString(ET.tostring(top, 'utf-8'))
240         with open(os.path.join(self.out_dir, 'results.xml'), 'w') as fobj:
241             dom_doc.writexml(fobj, addindent='  ', newl='\n', encoding='utf-8')
242 
243 
244 class BuildPerfTestCase(unittest.TestCase):
245     """Base class for build performance tests"""
246     SYSRES = 'sysres'
247     DISKUSAGE = 'diskusage'
248     build_target = None
249 
250     def __init__(self, *args, **kwargs):
251         super(BuildPerfTestCase, self).__init__(*args, **kwargs)
252         self.name = self._testMethodName
253         self.base_dir = None
254         self.start_time = None
255         self.elapsed_time = None
256         self.measurements = OrderedDict()
257         self.buildstats = OrderedDict()
258         # self.err is supposed to be a tuple from sys.exc_info()
259         self.err = None
260         self.bb_vars = get_bb_vars()
261         # TODO: remove 'times' and 'sizes' arrays when globalres support is
262         # removed
263         self.times = []
264         self.sizes = []
265 
266     @property
267     def tmp_dir(self):
268         return os.path.join(self.base_dir, self.name + '.tmp')
269 
270     def shortDescription(self):
271         return super(BuildPerfTestCase, self).shortDescription() or ""
272 
273     def setUp(self):
274         """Set-up fixture for each test"""
275         if not os.path.isdir(self.tmp_dir):
276             os.mkdir(self.tmp_dir)
277         if self.build_target:
278             self.run_cmd(['bitbake', self.build_target, '--runall=fetch'])
279 
280     def tearDown(self):
281         """Tear-down fixture for each test"""
282         if os.path.isdir(self.tmp_dir):
283             shutil.rmtree(self.tmp_dir)
284 
285     def run(self, *args, **kwargs):
286         """Run test"""
287         self.start_time = datetime.now()
288         super(BuildPerfTestCase, self).run(*args, **kwargs)
289         self.elapsed_time = datetime.now() - self.start_time
290 
291     def run_cmd(self, cmd):
292         """Convenience method for running a command"""
293         cmd_str = cmd if isinstance(cmd, str) else ' '.join(cmd)
294         log.info("Logging command: %s", cmd_str)
295         try:
296             runCmd2(cmd)
297         except CommandError as err:
298             log.error("Command failed: %s", err.retcode)
299             raise
300 
301     def _append_measurement(self, measurement):
302         """Simple helper for adding measurements results"""
303         if measurement['name'] in self.measurements:
304             raise ValueError('BUG: two measurements with the same name in {}'.format(
305                 self.__class__.__name__))
306         self.measurements[measurement['name']] = measurement
307 
308     def measure_cmd_resources(self, cmd, name, legend, save_bs=False):
309         """Measure system resource usage of a command"""
310         def _worker(data_q, cmd, **kwargs):
311             """Worker process for measuring resources"""
312             try:
313                 start_time = datetime.now()
314                 ret = runCmd2(cmd, **kwargs)
315                 etime = datetime.now() - start_time
316                 rusage_struct = resource.getrusage(resource.RUSAGE_CHILDREN)
317                 iostat = OrderedDict()
318                 with open('/proc/{}/io'.format(os.getpid())) as fobj:
319                     for line in fobj.readlines():
320                         key, val = line.split(':')
321                         iostat[key] = int(val)
322                 rusage = OrderedDict()
323                 # Skip unused fields, (i.e. 'ru_ixrss', 'ru_idrss', 'ru_isrss',
324                 # 'ru_nswap', 'ru_msgsnd', 'ru_msgrcv' and 'ru_nsignals')
325                 for key in ['ru_utime', 'ru_stime', 'ru_maxrss', 'ru_minflt',
326                             'ru_majflt', 'ru_inblock', 'ru_oublock',
327                             'ru_nvcsw', 'ru_nivcsw']:
328                     rusage[key] = getattr(rusage_struct, key)
329                 data_q.put({'ret': ret,
330                             'start_time': start_time,
331                             'elapsed_time': etime,
332                             'rusage': rusage,
333                             'iostat': iostat})
334             except Exception as err:
335                 data_q.put(err)
336 
337         cmd_str = cmd if isinstance(cmd, str) else ' '.join(cmd)
338         log.info("Timing command: %s", cmd_str)
339         data_q = SimpleQueue()
340         try:
341             proc = Process(target=_worker, args=(data_q, cmd,))
342             proc.start()
343             data = data_q.get()
344             proc.join()
345             if isinstance(data, Exception):
346                 raise data
347         except CommandError:
348             log.error("Command '%s' failed", cmd_str)
349             raise
350         etime = data['elapsed_time']
351 
352         measurement = OrderedDict([('type', self.SYSRES),
353                                    ('name', name),
354                                    ('legend', legend)])
355         measurement['values'] = OrderedDict([('start_time', data['start_time']),
356                                              ('elapsed_time', etime),
357                                              ('rusage', data['rusage']),
358                                              ('iostat', data['iostat'])])
359         if save_bs:
360             self.save_buildstats(name)
361 
362         self._append_measurement(measurement)
363 
364         # Append to 'times' array for globalres log
365         e_sec = etime.total_seconds()
366         self.times.append('{:d}:{:02d}:{:05.2f}'.format(int(e_sec / 3600),
367                                                       int((e_sec % 3600) / 60),
368                                                        e_sec % 60))
369 
370     def measure_disk_usage(self, path, name, legend, apparent_size=False):
371         """Estimate disk usage of a file or directory"""
372         cmd = ['du', '-s', '--block-size', '1024']
373         if apparent_size:
374             cmd.append('--apparent-size')
375         cmd.append(path)
376 
377         ret = runCmd2(cmd)
378         size = int(ret.output.split()[0])
379         log.debug("Size of %s path is %s", path, size)
380         measurement = OrderedDict([('type', self.DISKUSAGE),
381                                    ('name', name),
382                                    ('legend', legend)])
383         measurement['values'] = OrderedDict([('size', size)])
384         self._append_measurement(measurement)
385         # Append to 'sizes' array for globalres log
386         self.sizes.append(str(size))
387 
388     def save_buildstats(self, measurement_name):
389         """Save buildstats"""
390         def split_nevr(nevr):
391             """Split name and version information from recipe "nevr" string"""
392             n_e_v, revision = nevr.rsplit('-', 1)
393             match = re.match(r'^(?P<name>\S+)-((?P<epoch>[0-9]{1,5})_)?(?P<version>[0-9]\S*)$',
394                              n_e_v)
395             if not match:
396                 # If we're not able to parse a version starting with a number, just
397                 # take the part after last dash
398                 match = re.match(r'^(?P<name>\S+)-((?P<epoch>[0-9]{1,5})_)?(?P<version>[^-]+)$',
399                                  n_e_v)
400             name = match.group('name')
401             version = match.group('version')
402             epoch = match.group('epoch')
403             return name, epoch, version, revision
404 
405         def bs_to_json(filename):
406             """Convert (task) buildstats file into json format"""
407             bs_json = OrderedDict()
408             iostat = OrderedDict()
409             rusage = OrderedDict()
410             with open(filename) as fobj:
411                 for line in fobj.readlines():
412                     key, val = line.split(':', 1)
413                     val = val.strip()
414                     if key == 'Started':
415                         start_time = datetime.utcfromtimestamp(float(val))
416                         bs_json['start_time'] = start_time
417                     elif key == 'Ended':
418                         end_time = datetime.utcfromtimestamp(float(val))
419                     elif key.startswith('IO '):
420                         split = key.split()
421                         iostat[split[1]] = int(val)
422                     elif key.find('rusage') >= 0:
423                         split = key.split()
424                         ru_key = split[-1]
425                         if ru_key in ('ru_stime', 'ru_utime'):
426                             val = float(val)
427                         else:
428                             val = int(val)
429                         rusage[ru_key] = rusage.get(ru_key, 0) + val
430                     elif key == 'Status':
431                         bs_json['status'] = val
432             bs_json['elapsed_time'] = end_time - start_time
433             bs_json['rusage'] = rusage
434             bs_json['iostat'] = iostat
435             return bs_json
436 
437         log.info('Saving buildstats in JSON format')
438         bs_dirs = sorted(os.listdir(self.bb_vars['BUILDSTATS_BASE']))
439         if len(bs_dirs) > 1:
440             log.warning("Multiple buildstats found for test %s, only "
441                         "archiving the last one", self.name)
442         bs_dir = os.path.join(self.bb_vars['BUILDSTATS_BASE'], bs_dirs[-1])
443 
444         buildstats = []
445         for fname in os.listdir(bs_dir):
446             recipe_dir = os.path.join(bs_dir, fname)
447             if not os.path.isdir(recipe_dir) or fname == "reduced_proc_pressure":
448                 continue
449             name, epoch, version, revision = split_nevr(fname)
450             recipe_bs = OrderedDict((('name', name),
451                                      ('epoch', epoch),
452                                      ('version', version),
453                                      ('revision', revision),
454                                      ('tasks', OrderedDict())))
455             for task in os.listdir(recipe_dir):
456                 recipe_bs['tasks'][task] = bs_to_json(os.path.join(recipe_dir,
457                                                                    task))
458             buildstats.append(recipe_bs)
459 
460         self.buildstats[measurement_name] = buildstats
461 
462     def rm_tmp(self):
463         """Cleanup temporary/intermediate files and directories"""
464         log.debug("Removing temporary and cache files")
465         for name in ['bitbake.lock', 'cache/sanity_info',
466                      self.bb_vars['TMPDIR']]:
467             oe.path.remove(name, recurse=True)
468 
469     def rm_sstate(self):
470         """Remove sstate directory"""
471         log.debug("Removing sstate-cache")
472         oe.path.remove(self.bb_vars['SSTATE_DIR'], recurse=True)
473 
474     def rm_cache(self):
475         """Drop bitbake caches"""
476         oe.path.remove(self.bb_vars['PERSISTENT_DIR'], recurse=True)
477 
478     @staticmethod
479     def sync():
480         """Sync and drop kernel caches"""
481         runCmd2('bitbake -m', ignore_status=True)
482         log.debug("Syncing and dropping kernel caches""")
483         KernelDropCaches.drop()
484         os.sync()
485         # Wait a bit for all the dirty blocks to be written onto disk
486         time.sleep(3)
487 
488 
489 class BuildPerfTestLoader(unittest.TestLoader):
490     """Test loader for build performance tests"""
491     sortTestMethodsUsing = None
492 
493 
494 class BuildPerfTestRunner(unittest.TextTestRunner):
495     """Test loader for build performance tests"""
496     sortTestMethodsUsing = None
497 
498     def __init__(self, out_dir, *args, **kwargs):
499         super(BuildPerfTestRunner, self).__init__(*args, **kwargs)
500         self.out_dir = out_dir
501 
502     def _makeResult(self):
503         return BuildPerfTestResult(self.out_dir, self.stream, self.descriptions,
504                                    self.verbosity)
505