1# Yocto Project layer check tool 2# 3# Copyright (C) 2017 Intel Corporation 4# 5# SPDX-License-Identifier: MIT 6# 7 8import os 9import re 10import subprocess 11from enum import Enum 12 13import bb.tinfoil 14 15class LayerType(Enum): 16 BSP = 0 17 DISTRO = 1 18 SOFTWARE = 2 19 CORE = 3 20 ERROR_NO_LAYER_CONF = 98 21 ERROR_BSP_DISTRO = 99 22 23def _get_configurations(path): 24 configs = [] 25 26 for f in os.listdir(path): 27 file_path = os.path.join(path, f) 28 if os.path.isfile(file_path) and f.endswith('.conf'): 29 configs.append(f[:-5]) # strip .conf 30 return configs 31 32def _get_layer_collections(layer_path, lconf=None, data=None): 33 import bb.parse 34 import bb.data 35 36 if lconf is None: 37 lconf = os.path.join(layer_path, 'conf', 'layer.conf') 38 39 if data is None: 40 ldata = bb.data.init() 41 bb.parse.init_parser(ldata) 42 else: 43 ldata = data.createCopy() 44 45 ldata.setVar('LAYERDIR', layer_path) 46 try: 47 ldata = bb.parse.handle(lconf, ldata, include=True, baseconfig=True) 48 except: 49 raise RuntimeError("Parsing of layer.conf from layer: %s failed" % layer_path) 50 ldata.expandVarref('LAYERDIR') 51 52 collections = (ldata.getVar('BBFILE_COLLECTIONS') or '').split() 53 if not collections: 54 name = os.path.basename(layer_path) 55 collections = [name] 56 57 collections = {c: {} for c in collections} 58 for name in collections: 59 priority = ldata.getVar('BBFILE_PRIORITY_%s' % name) 60 pattern = ldata.getVar('BBFILE_PATTERN_%s' % name) 61 depends = ldata.getVar('LAYERDEPENDS_%s' % name) 62 compat = ldata.getVar('LAYERSERIES_COMPAT_%s' % name) 63 try: 64 depDict = bb.utils.explode_dep_versions2(depends or "") 65 except bb.utils.VersionStringException as vse: 66 bb.fatal('Error parsing LAYERDEPENDS_%s: %s' % (name, str(vse))) 67 68 collections[name]['priority'] = priority 69 collections[name]['pattern'] = pattern 70 collections[name]['depends'] = ' '.join(depDict.keys()) 71 collections[name]['compat'] = compat 72 73 return collections 74 75def _detect_layer(layer_path): 76 """ 77 Scans layer directory to detect what type of layer 78 is BSP, Distro or Software. 79 80 Returns a dictionary with layer name, type and path. 81 """ 82 83 layer = {} 84 layer_name = os.path.basename(layer_path) 85 86 layer['name'] = layer_name 87 layer['path'] = layer_path 88 layer['conf'] = {} 89 90 if not os.path.isfile(os.path.join(layer_path, 'conf', 'layer.conf')): 91 layer['type'] = LayerType.ERROR_NO_LAYER_CONF 92 return layer 93 94 machine_conf = os.path.join(layer_path, 'conf', 'machine') 95 distro_conf = os.path.join(layer_path, 'conf', 'distro') 96 97 is_bsp = False 98 is_distro = False 99 100 if os.path.isdir(machine_conf): 101 machines = _get_configurations(machine_conf) 102 if machines: 103 is_bsp = True 104 105 if os.path.isdir(distro_conf): 106 distros = _get_configurations(distro_conf) 107 if distros: 108 is_distro = True 109 110 layer['collections'] = _get_layer_collections(layer['path']) 111 112 if layer_name == "meta" and "core" in layer['collections']: 113 layer['type'] = LayerType.CORE 114 layer['conf']['machines'] = machines 115 layer['conf']['distros'] = distros 116 elif is_bsp and is_distro: 117 layer['type'] = LayerType.ERROR_BSP_DISTRO 118 elif is_bsp: 119 layer['type'] = LayerType.BSP 120 layer['conf']['machines'] = machines 121 elif is_distro: 122 layer['type'] = LayerType.DISTRO 123 layer['conf']['distros'] = distros 124 else: 125 layer['type'] = LayerType.SOFTWARE 126 127 return layer 128 129def detect_layers(layer_directories, no_auto): 130 layers = [] 131 132 for directory in layer_directories: 133 directory = os.path.realpath(directory) 134 if directory[-1] == '/': 135 directory = directory[0:-1] 136 137 if no_auto: 138 conf_dir = os.path.join(directory, 'conf') 139 if os.path.isdir(conf_dir): 140 layer = _detect_layer(directory) 141 if layer: 142 layers.append(layer) 143 else: 144 for root, dirs, files in os.walk(directory): 145 dir_name = os.path.basename(root) 146 conf_dir = os.path.join(root, 'conf') 147 if os.path.isdir(conf_dir): 148 layer = _detect_layer(root) 149 if layer: 150 layers.append(layer) 151 152 return layers 153 154def _find_layer(depend, layers): 155 for layer in layers: 156 if 'collections' not in layer: 157 continue 158 159 for collection in layer['collections']: 160 if depend == collection: 161 return layer 162 return None 163 164def sanity_check_layers(layers, logger): 165 """ 166 Check that we didn't find duplicate collection names, as the layer that will 167 be used is non-deterministic. The precise check is duplicate collections 168 with different patterns, as the same pattern being repeated won't cause 169 problems. 170 """ 171 import collections 172 173 passed = True 174 seen = collections.defaultdict(set) 175 for layer in layers: 176 for name, data in layer.get("collections", {}).items(): 177 seen[name].add(data["pattern"]) 178 179 for name, patterns in seen.items(): 180 if len(patterns) > 1: 181 passed = False 182 logger.error("Collection %s found multiple times: %s" % (name, ", ".join(patterns))) 183 return passed 184 185def get_layer_dependencies(layer, layers, logger): 186 def recurse_dependencies(depends, layer, layers, logger, ret = []): 187 logger.debug('Processing dependencies %s for layer %s.' % \ 188 (depends, layer['name'])) 189 190 for depend in depends.split(): 191 # core (oe-core) is suppose to be provided 192 if depend == 'core': 193 continue 194 195 layer_depend = _find_layer(depend, layers) 196 if not layer_depend: 197 logger.error('Layer %s depends on %s and isn\'t found.' % \ 198 (layer['name'], depend)) 199 ret = None 200 continue 201 202 # We keep processing, even if ret is None, this allows us to report 203 # multiple errors at once 204 if ret is not None and layer_depend not in ret: 205 ret.append(layer_depend) 206 else: 207 # we might have processed this dependency already, in which case 208 # we should not do it again (avoid recursive loop) 209 continue 210 211 # Recursively process... 212 if 'collections' not in layer_depend: 213 continue 214 215 for collection in layer_depend['collections']: 216 collect_deps = layer_depend['collections'][collection]['depends'] 217 if not collect_deps: 218 continue 219 ret = recurse_dependencies(collect_deps, layer_depend, layers, logger, ret) 220 221 return ret 222 223 layer_depends = [] 224 for collection in layer['collections']: 225 depends = layer['collections'][collection]['depends'] 226 if not depends: 227 continue 228 229 layer_depends = recurse_dependencies(depends, layer, layers, logger, layer_depends) 230 231 # Note: [] (empty) is allowed, None is not! 232 return layer_depends 233 234def add_layer_dependencies(bblayersconf, layer, layers, logger): 235 236 layer_depends = get_layer_dependencies(layer, layers, logger) 237 if layer_depends is None: 238 return False 239 else: 240 add_layers(bblayersconf, layer_depends, logger) 241 242 return True 243 244def add_layers(bblayersconf, layers, logger): 245 # Don't add a layer that is already present. 246 added = set() 247 output = check_command('Getting existing layers failed.', 'bitbake-layers show-layers').decode('utf-8') 248 for layer, path, pri in re.findall(r'^(\S+) +([^\n]*?) +(\d+)$', output, re.MULTILINE): 249 added.add(path) 250 251 with open(bblayersconf, 'a+') as f: 252 for layer in layers: 253 logger.info('Adding layer %s' % layer['name']) 254 name = layer['name'] 255 path = layer['path'] 256 if path in added: 257 logger.info('%s is already in %s' % (name, bblayersconf)) 258 else: 259 added.add(path) 260 f.write("\nBBLAYERS += \"%s\"\n" % path) 261 return True 262 263def check_bblayers(bblayersconf, layer_path, logger): 264 ''' 265 If layer_path found in BBLAYERS return True 266 ''' 267 import bb.parse 268 import bb.data 269 270 ldata = bb.parse.handle(bblayersconf, bb.data.init(), include=True) 271 for bblayer in (ldata.getVar('BBLAYERS') or '').split(): 272 if os.path.normpath(bblayer) == os.path.normpath(layer_path): 273 return True 274 275 return False 276 277def check_command(error_msg, cmd, cwd=None): 278 ''' 279 Run a command under a shell, capture stdout and stderr in a single stream, 280 throw an error when command returns non-zero exit code. Returns the output. 281 ''' 282 283 p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, cwd=cwd) 284 output, _ = p.communicate() 285 if p.returncode: 286 msg = "%s\nCommand: %s\nOutput:\n%s" % (error_msg, cmd, output.decode('utf-8')) 287 raise RuntimeError(msg) 288 return output 289 290def get_signatures(builddir, failsafe=False, machine=None, extravars=None): 291 import re 292 293 # some recipes needs to be excluded like meta-world-pkgdata 294 # because a layer can add recipes to a world build so signature 295 # will be change 296 exclude_recipes = ('meta-world-pkgdata',) 297 298 sigs = {} 299 tune2tasks = {} 300 301 cmd = 'BB_ENV_PASSTHROUGH_ADDITIONS="$BB_ENV_PASSTHROUGH_ADDITIONS BB_SIGNATURE_HANDLER" BB_SIGNATURE_HANDLER="OEBasicHash" ' 302 if extravars: 303 cmd += extravars 304 cmd += ' ' 305 if machine: 306 cmd += 'MACHINE=%s ' % machine 307 cmd += 'bitbake ' 308 if failsafe: 309 cmd += '-k ' 310 cmd += '-S lockedsigs world' 311 sigs_file = os.path.join(builddir, 'locked-sigs.inc') 312 if os.path.exists(sigs_file): 313 os.unlink(sigs_file) 314 try: 315 check_command('Generating signatures failed. This might be due to some parse error and/or general layer incompatibilities.', 316 cmd, builddir) 317 except RuntimeError as ex: 318 if failsafe and os.path.exists(sigs_file): 319 # Ignore the error here. Most likely some recipes active 320 # in a world build lack some dependencies. There is a 321 # separate test_machine_world_build which exposes the 322 # failure. 323 pass 324 else: 325 raise 326 327 sig_regex = re.compile(r"^(?P<task>.*:.*):(?P<hash>.*) .$") 328 tune_regex = re.compile(r"(^|\s)SIGGEN_LOCKEDSIGS_t-(?P<tune>\S*)\s*=\s*") 329 current_tune = None 330 with open(sigs_file, 'r') as f: 331 for line in f.readlines(): 332 line = line.strip() 333 t = tune_regex.search(line) 334 if t: 335 current_tune = t.group('tune') 336 s = sig_regex.match(line) 337 if s: 338 exclude = False 339 for er in exclude_recipes: 340 (recipe, task) = s.group('task').split(':') 341 if er == recipe: 342 exclude = True 343 break 344 if exclude: 345 continue 346 347 sigs[s.group('task')] = s.group('hash') 348 tune2tasks.setdefault(current_tune, []).append(s.group('task')) 349 350 if not sigs: 351 raise RuntimeError('Can\'t load signatures from %s' % sigs_file) 352 353 return (sigs, tune2tasks) 354 355def get_depgraph(targets=['world'], failsafe=False): 356 ''' 357 Returns the dependency graph for the given target(s). 358 The dependency graph is taken directly from DepTreeEvent. 359 ''' 360 depgraph = None 361 with bb.tinfoil.Tinfoil() as tinfoil: 362 tinfoil.prepare(config_only=False) 363 tinfoil.set_event_mask(['bb.event.NoProvider', 'bb.event.DepTreeGenerated', 'bb.command.CommandCompleted']) 364 if not tinfoil.run_command('generateDepTreeEvent', targets, 'do_build'): 365 raise RuntimeError('starting generateDepTreeEvent failed') 366 while True: 367 event = tinfoil.wait_event(timeout=1000) 368 if event: 369 if isinstance(event, bb.command.CommandFailed): 370 raise RuntimeError('Generating dependency information failed: %s' % event.error) 371 elif isinstance(event, bb.command.CommandCompleted): 372 break 373 elif isinstance(event, bb.event.NoProvider): 374 if failsafe: 375 # The event is informational, we will get information about the 376 # remaining dependencies eventually and thus can ignore this 377 # here like we do in get_signatures(), if desired. 378 continue 379 if event._reasons: 380 raise RuntimeError('Nothing provides %s: %s' % (event._item, event._reasons)) 381 else: 382 raise RuntimeError('Nothing provides %s.' % (event._item)) 383 elif isinstance(event, bb.event.DepTreeGenerated): 384 depgraph = event._depgraph 385 386 if depgraph is None: 387 raise RuntimeError('Could not retrieve the depgraph.') 388 return depgraph 389 390def compare_signatures(old_sigs, curr_sigs): 391 ''' 392 Compares the result of two get_signatures() calls. Returns None if no 393 problems found, otherwise a string that can be used as additional 394 explanation in self.fail(). 395 ''' 396 # task -> (old signature, new signature) 397 sig_diff = {} 398 for task in old_sigs: 399 if task in curr_sigs and \ 400 old_sigs[task] != curr_sigs[task]: 401 sig_diff[task] = (old_sigs[task], curr_sigs[task]) 402 403 if not sig_diff: 404 return None 405 406 # Beware, depgraph uses task=<pn>.<taskname> whereas get_signatures() 407 # uses <pn>:<taskname>. Need to convert sometimes. The output follows 408 # the convention from get_signatures() because that seems closer to 409 # normal bitbake output. 410 def sig2graph(task): 411 pn, taskname = task.rsplit(':', 1) 412 return pn + '.' + taskname 413 def graph2sig(task): 414 pn, taskname = task.rsplit('.', 1) 415 return pn + ':' + taskname 416 depgraph = get_depgraph(failsafe=True) 417 depends = depgraph['tdepends'] 418 419 # If a task A has a changed signature, but none of its 420 # dependencies, then we need to report it because it is 421 # the one which introduces a change. Any task depending on 422 # A (directly or indirectly) will also have a changed 423 # signature, but we don't need to report it. It might have 424 # its own changes, which will become apparent once the 425 # issues that we do report are fixed and the test gets run 426 # again. 427 sig_diff_filtered = [] 428 for task, (old_sig, new_sig) in sig_diff.items(): 429 deps_tainted = False 430 for dep in depends.get(sig2graph(task), ()): 431 if graph2sig(dep) in sig_diff: 432 deps_tainted = True 433 break 434 if not deps_tainted: 435 sig_diff_filtered.append((task, old_sig, new_sig)) 436 437 msg = [] 438 msg.append('%d signatures changed, initial differences (first hash before, second after):' % 439 len(sig_diff)) 440 for diff in sorted(sig_diff_filtered): 441 recipe, taskname = diff[0].rsplit(':', 1) 442 cmd = 'bitbake-diffsigs --task %s %s --signature %s %s' % \ 443 (recipe, taskname, diff[1], diff[2]) 444 msg.append(' %s: %s -> %s' % diff) 445 msg.append(' %s' % cmd) 446 try: 447 output = check_command('Determining signature difference failed.', 448 cmd).decode('utf-8') 449 except RuntimeError as error: 450 output = str(error) 451 if output: 452 msg.extend([' ' + line for line in output.splitlines()]) 453 msg.append('') 454 return '\n'.join(msg) 455