1# 2# Copyright OpenEmbedded Contributors 3# 4# SPDX-License-Identifier: MIT 5# 6 7SSTATE_VERSION = "14" 8 9SSTATE_ZSTD_CLEVEL ??= "8" 10 11SSTATE_MANIFESTS ?= "${TMPDIR}/sstate-control" 12SSTATE_MANFILEPREFIX = "${SSTATE_MANIFESTS}/manifest-${SSTATE_MANMACH}-${PN}" 13 14def generate_sstatefn(spec, hash, taskname, siginfo, d): 15 if taskname is None: 16 return "" 17 extension = ".tar.zst" 18 # 8 chars reserved for siginfo 19 limit = 254 - 8 20 if siginfo: 21 limit = 254 22 extension = ".tar.zst.siginfo" 23 if not hash: 24 hash = "INVALID" 25 fn = spec + hash + "_" + taskname + extension 26 # If the filename is too long, attempt to reduce it 27 if len(fn) > limit: 28 components = spec.split(":") 29 # Fields 0,5,6 are mandatory, 1 is most useful, 2,3,4 are just for information 30 # 7 is for the separators 31 avail = (limit - len(hash + "_" + taskname + extension) - len(components[0]) - len(components[1]) - len(components[5]) - len(components[6]) - 7) // 3 32 components[2] = components[2][:avail] 33 components[3] = components[3][:avail] 34 components[4] = components[4][:avail] 35 spec = ":".join(components) 36 fn = spec + hash + "_" + taskname + extension 37 if len(fn) > limit: 38 bb.fatal("Unable to reduce sstate name to less than 255 chararacters") 39 return hash[:2] + "/" + hash[2:4] + "/" + fn 40 41SSTATE_PKGARCH = "${PACKAGE_ARCH}" 42SSTATE_PKGSPEC = "sstate:${PN}:${PACKAGE_ARCH}${TARGET_VENDOR}-${TARGET_OS}:${PV}:${PR}:${SSTATE_PKGARCH}:${SSTATE_VERSION}:" 43SSTATE_SWSPEC = "sstate:${PN}::${PV}:${PR}::${SSTATE_VERSION}:" 44SSTATE_PKGNAME = "${SSTATE_EXTRAPATH}${@generate_sstatefn(d.getVar('SSTATE_PKGSPEC'), d.getVar('BB_UNIHASH'), d.getVar('SSTATE_CURRTASK'), False, d)}" 45SSTATE_PKG = "${SSTATE_DIR}/${SSTATE_PKGNAME}" 46SSTATE_EXTRAPATH = "" 47SSTATE_EXTRAPATHWILDCARD = "" 48SSTATE_PATHSPEC = "${SSTATE_DIR}/${SSTATE_EXTRAPATHWILDCARD}*/*/${SSTATE_PKGSPEC}*_${SSTATE_PATH_CURRTASK}.tar.zst*" 49 50# explicitly make PV to depend on evaluated value of PV variable 51PV[vardepvalue] = "${PV}" 52 53# We don't want the sstate to depend on things like the distro string 54# of the system, we let the sstate paths take care of this. 55SSTATE_EXTRAPATH[vardepvalue] = "" 56SSTATE_EXTRAPATHWILDCARD[vardepvalue] = "" 57 58# Avoid docbook/sgml catalog warnings for now 59SSTATE_ALLOW_OVERLAP_FILES += "${STAGING_ETCDIR_NATIVE}/sgml ${STAGING_DATADIR_NATIVE}/sgml" 60# sdk-provides-dummy-nativesdk and nativesdk-buildtools-perl-dummy overlap for different SDKMACHINE 61SSTATE_ALLOW_OVERLAP_FILES += "${DEPLOY_DIR_RPM}/sdk_provides_dummy_nativesdk/ ${DEPLOY_DIR_IPK}/sdk-provides-dummy-nativesdk/" 62SSTATE_ALLOW_OVERLAP_FILES += "${DEPLOY_DIR_RPM}/buildtools_dummy_nativesdk/ ${DEPLOY_DIR_IPK}/buildtools-dummy-nativesdk/" 63# target-sdk-provides-dummy overlaps that allarch is disabled when multilib is used 64SSTATE_ALLOW_OVERLAP_FILES += "${COMPONENTS_DIR}/sdk-provides-dummy-target/ ${DEPLOY_DIR_RPM}/sdk_provides_dummy_target/ ${DEPLOY_DIR_IPK}/sdk-provides-dummy-target/" 65# Archive the sources for many architectures in one deploy folder 66SSTATE_ALLOW_OVERLAP_FILES += "${DEPLOY_DIR_SRC}" 67# ovmf/grub-efi/systemd-boot/intel-microcode multilib recipes can generate identical overlapping files 68SSTATE_ALLOW_OVERLAP_FILES += "${DEPLOY_DIR_IMAGE}/ovmf" 69SSTATE_ALLOW_OVERLAP_FILES += "${DEPLOY_DIR_IMAGE}/grub-efi" 70SSTATE_ALLOW_OVERLAP_FILES += "${DEPLOY_DIR_IMAGE}/systemd-boot" 71SSTATE_ALLOW_OVERLAP_FILES += "${DEPLOY_DIR_IMAGE}/microcode" 72 73SSTATE_SCAN_FILES ?= "*.la *-config *_config postinst-*" 74SSTATE_SCAN_CMD ??= 'find ${SSTATE_BUILDDIR} \( -name "${@"\" -o -name \"".join(d.getVar("SSTATE_SCAN_FILES").split())}" \) -type f' 75SSTATE_SCAN_CMD_NATIVE ??= 'grep -Irl -e ${RECIPE_SYSROOT} -e ${RECIPE_SYSROOT_NATIVE} -e ${HOSTTOOLS_DIR} ${SSTATE_BUILDDIR}' 76SSTATE_HASHEQUIV_FILEMAP ?= " \ 77 populate_sysroot:*/postinst-useradd-*:${TMPDIR} \ 78 populate_sysroot:*/postinst-useradd-*:${COREBASE} \ 79 populate_sysroot:*/postinst-useradd-*:regex-\s(PATH|PSEUDO_INCLUDE_PATHS|HOME|LOGNAME|OMP_NUM_THREADS|USER)=.*\s \ 80 populate_sysroot:*/crossscripts/*:${TMPDIR} \ 81 populate_sysroot:*/crossscripts/*:${COREBASE} \ 82 " 83 84BB_HASHFILENAME = "False ${SSTATE_PKGSPEC} ${SSTATE_SWSPEC}" 85 86SSTATE_ARCHS_TUNEPKG ??= "${TUNE_PKGARCH}" 87SSTATE_ARCHS = " \ 88 ${BUILD_ARCH} \ 89 ${BUILD_ARCH}_${ORIGNATIVELSBSTRING} \ 90 ${BUILD_ARCH}_${SDK_ARCH}_${SDK_OS} \ 91 ${SDK_ARCH}_${SDK_OS} \ 92 ${SDK_ARCH}_${SDK_ARCH}-${SDKPKGSUFFIX} \ 93 allarch \ 94 ${SSTATE_ARCHS_TUNEPKG} \ 95 ${PACKAGE_EXTRA_ARCHS} \ 96 ${MACHINE_ARCH}" 97SSTATE_ARCHS[vardepsexclude] = "ORIGNATIVELSBSTRING" 98 99SSTATE_MANMACH ?= "${SSTATE_PKGARCH}" 100 101SSTATECREATEFUNCS += "sstate_hardcode_path" 102SSTATECREATEFUNCS[vardeps] = "SSTATE_SCAN_FILES" 103SSTATEPOSTCREATEFUNCS = "" 104SSTATEPREINSTFUNCS = "" 105SSTATEPOSTUNPACKFUNCS = "sstate_hardcode_path_unpack" 106EXTRA_STAGING_FIXMES ?= "HOSTTOOLS_DIR" 107 108# Check whether sstate exists for tasks that support sstate and are in the 109# locked signatures file. 110SIGGEN_LOCKEDSIGS_SSTATE_EXISTS_CHECK ?= 'error' 111 112# Check whether the task's computed hash matches the task's hash in the 113# locked signatures file. 114SIGGEN_LOCKEDSIGS_TASKSIG_CHECK ?= "error" 115 116# The GnuPG key ID and passphrase to use to sign sstate archives (or unset to 117# not sign) 118SSTATE_SIG_KEY ?= "" 119SSTATE_SIG_PASSPHRASE ?= "" 120# Whether to verify the GnUPG signatures when extracting sstate archives 121SSTATE_VERIFY_SIG ?= "0" 122# List of signatures to consider valid. 123SSTATE_VALID_SIGS ??= "" 124SSTATE_VALID_SIGS[vardepvalue] = "" 125 126SSTATE_HASHEQUIV_METHOD ?= "oe.sstatesig.OEOuthashBasic" 127SSTATE_HASHEQUIV_METHOD[doc] = "The fully-qualified function used to calculate \ 128 the output hash for a task, which in turn is used to determine equivalency. \ 129 " 130 131SSTATE_HASHEQUIV_REPORT_TASKDATA ?= "0" 132SSTATE_HASHEQUIV_REPORT_TASKDATA[doc] = "Report additional useful data to the \ 133 hash equivalency server, such as PN, PV, taskname, etc. This information \ 134 is very useful for developers looking at task data, but may leak sensitive \ 135 data if the equivalence server is public. \ 136 " 137 138python () { 139 if bb.data.inherits_class('native', d): 140 d.setVar('SSTATE_PKGARCH', d.getVar('BUILD_ARCH', False)) 141 elif bb.data.inherits_class('crosssdk', d): 142 d.setVar('SSTATE_PKGARCH', d.expand("${BUILD_ARCH}_${SDK_ARCH}_${SDK_OS}")) 143 elif bb.data.inherits_class('cross', d): 144 d.setVar('SSTATE_PKGARCH', d.expand("${BUILD_ARCH}")) 145 elif bb.data.inherits_class('nativesdk', d): 146 d.setVar('SSTATE_PKGARCH', d.expand("${SDK_ARCH}_${SDK_OS}")) 147 elif bb.data.inherits_class('cross-canadian', d): 148 d.setVar('SSTATE_PKGARCH', d.expand("${SDK_ARCH}_${PACKAGE_ARCH}")) 149 elif bb.data.inherits_class('allarch', d) and d.getVar("PACKAGE_ARCH") == "all": 150 d.setVar('SSTATE_PKGARCH', "allarch") 151 else: 152 d.setVar('SSTATE_MANMACH', d.expand("${PACKAGE_ARCH}")) 153 154 if bb.data.inherits_class('native', d) or bb.data.inherits_class('crosssdk', d) or bb.data.inherits_class('cross', d): 155 d.setVar('SSTATE_EXTRAPATH', "${NATIVELSBSTRING}/") 156 d.setVar('BB_HASHFILENAME', "True ${SSTATE_PKGSPEC} ${SSTATE_SWSPEC}") 157 d.setVar('SSTATE_EXTRAPATHWILDCARD', "${NATIVELSBSTRING}/") 158 159 unique_tasks = sorted(set((d.getVar('SSTATETASKS') or "").split())) 160 d.setVar('SSTATETASKS', " ".join(unique_tasks)) 161 for task in unique_tasks: 162 d.prependVarFlag(task, 'prefuncs', "sstate_task_prefunc ") 163 # Generally sstate should be last, execpt for buildhistory functions 164 postfuncs = (d.getVarFlag(task, 'postfuncs') or "").split() 165 newpostfuncs = [p for p in postfuncs if "buildhistory" not in p] + ["sstate_task_postfunc"] + [p for p in postfuncs if "buildhistory" in p] 166 d.setVarFlag(task, 'postfuncs', " ".join(newpostfuncs)) 167 d.setVarFlag(task, 'network', '1') 168 d.setVarFlag(task + "_setscene", 'network', '1') 169} 170 171def sstate_init(task, d): 172 ss = {} 173 ss['task'] = task 174 ss['dirs'] = [] 175 ss['plaindirs'] = [] 176 ss['lockfiles'] = [] 177 ss['lockfiles-shared'] = [] 178 return ss 179 180def sstate_state_fromvars(d, task = None): 181 if task is None: 182 task = d.getVar('BB_CURRENTTASK') 183 if not task: 184 bb.fatal("sstate code running without task context?!") 185 task = task.replace("_setscene", "") 186 187 if task.startswith("do_"): 188 task = task[3:] 189 inputs = (d.getVarFlag("do_" + task, 'sstate-inputdirs') or "").split() 190 outputs = (d.getVarFlag("do_" + task, 'sstate-outputdirs') or "").split() 191 plaindirs = (d.getVarFlag("do_" + task, 'sstate-plaindirs') or "").split() 192 lockfiles = (d.getVarFlag("do_" + task, 'sstate-lockfile') or "").split() 193 lockfilesshared = (d.getVarFlag("do_" + task, 'sstate-lockfile-shared') or "").split() 194 fixmedir = d.getVarFlag("do_" + task, 'sstate-fixmedir') or "" 195 if not task or len(inputs) != len(outputs): 196 bb.fatal("sstate variables not setup correctly?!") 197 198 if task == "populate_lic": 199 d.setVar("SSTATE_PKGSPEC", "${SSTATE_SWSPEC}") 200 d.setVar("SSTATE_EXTRAPATH", "") 201 d.setVar('SSTATE_EXTRAPATHWILDCARD', "") 202 203 ss = sstate_init(task, d) 204 for i in range(len(inputs)): 205 sstate_add(ss, inputs[i], outputs[i], d) 206 ss['lockfiles'] = lockfiles 207 ss['lockfiles-shared'] = lockfilesshared 208 ss['plaindirs'] = plaindirs 209 ss['fixmedir'] = fixmedir 210 return ss 211 212def sstate_add(ss, source, dest, d): 213 if not source.endswith("/"): 214 source = source + "/" 215 if not dest.endswith("/"): 216 dest = dest + "/" 217 source = os.path.normpath(source) 218 dest = os.path.normpath(dest) 219 srcbase = os.path.basename(source) 220 ss['dirs'].append([srcbase, source, dest]) 221 return ss 222 223def sstate_install(ss, d): 224 import oe.path 225 import oe.sstatesig 226 import subprocess 227 228 def prepdir(dir): 229 # remove dir if it exists, ensure any parent directories do exist 230 if os.path.exists(dir): 231 oe.path.remove(dir) 232 bb.utils.mkdirhier(dir) 233 oe.path.remove(dir) 234 235 sstateinst = d.getVar("SSTATE_INSTDIR") 236 237 for state in ss['dirs']: 238 prepdir(state[1]) 239 bb.utils.rename(sstateinst + state[0], state[1]) 240 241 sharedfiles = [] 242 shareddirs = [] 243 bb.utils.mkdirhier(d.expand("${SSTATE_MANIFESTS}")) 244 245 manifest, d2 = oe.sstatesig.sstate_get_manifest_filename(ss['task'], d) 246 247 if os.access(manifest, os.R_OK): 248 bb.fatal("Package already staged (%s)?!" % manifest) 249 250 d.setVar("SSTATE_INST_POSTRM", manifest + ".postrm") 251 252 locks = [] 253 for lock in ss['lockfiles-shared']: 254 locks.append(bb.utils.lockfile(lock, True)) 255 for lock in ss['lockfiles']: 256 locks.append(bb.utils.lockfile(lock)) 257 258 for state in ss['dirs']: 259 bb.debug(2, "Staging files from %s to %s" % (state[1], state[2])) 260 for walkroot, dirs, files in os.walk(state[1]): 261 for file in files: 262 srcpath = os.path.join(walkroot, file) 263 dstpath = srcpath.replace(state[1], state[2]) 264 #bb.debug(2, "Staging %s to %s" % (srcpath, dstpath)) 265 sharedfiles.append(dstpath) 266 for dir in dirs: 267 srcdir = os.path.join(walkroot, dir) 268 dstdir = srcdir.replace(state[1], state[2]) 269 #bb.debug(2, "Staging %s to %s" % (srcdir, dstdir)) 270 if os.path.islink(srcdir): 271 sharedfiles.append(dstdir) 272 continue 273 if not dstdir.endswith("/"): 274 dstdir = dstdir + "/" 275 shareddirs.append(dstdir) 276 277 # Check the file list for conflicts against files which already exist 278 overlap_allowed = (d.getVar("SSTATE_ALLOW_OVERLAP_FILES") or "").split() 279 match = [] 280 for f in sharedfiles: 281 if os.path.exists(f): 282 f = os.path.normpath(f) 283 realmatch = True 284 for w in overlap_allowed: 285 w = os.path.normpath(w) 286 if f.startswith(w): 287 realmatch = False 288 break 289 if realmatch: 290 match.append(f) 291 sstate_search_cmd = "grep -rlF '%s' %s --exclude=index-* | sed -e 's:^.*/::'" % (f, d.expand("${SSTATE_MANIFESTS}")) 292 search_output = subprocess.Popen(sstate_search_cmd, shell=True, stdout=subprocess.PIPE).communicate()[0] 293 if search_output: 294 match.append(" (matched in %s)" % search_output.decode('utf-8').rstrip()) 295 else: 296 match.append(" (not matched to any task)") 297 if match: 298 bb.fatal("Recipe %s is trying to install files into a shared " \ 299 "area when those files already exist. The files and the manifests listing " \ 300 "them are:\n %s\n" 301 "Please adjust the recipes so only one recipe provides a given file. " % \ 302 (d.getVar('PN'), "\n ".join(match))) 303 304 if ss['fixmedir'] and os.path.exists(ss['fixmedir'] + "/fixmepath.cmd"): 305 sharedfiles.append(ss['fixmedir'] + "/fixmepath.cmd") 306 sharedfiles.append(ss['fixmedir'] + "/fixmepath") 307 308 # Write out the manifest 309 f = open(manifest, "w") 310 for file in sharedfiles: 311 f.write(file + "\n") 312 313 # We want to ensure that directories appear at the end of the manifest 314 # so that when we test to see if they should be deleted any contents 315 # added by the task will have been removed first. 316 dirs = sorted(shareddirs, key=len) 317 # Must remove children first, which will have a longer path than the parent 318 for di in reversed(dirs): 319 f.write(di + "\n") 320 f.close() 321 322 # Append to the list of manifests for this PACKAGE_ARCH 323 324 i = d2.expand("${SSTATE_MANIFESTS}/index-${SSTATE_MANMACH}") 325 l = bb.utils.lockfile(i + ".lock") 326 filedata = d.getVar("STAMP") + " " + d2.getVar("SSTATE_MANFILEPREFIX") + " " + d.getVar("WORKDIR") + "\n" 327 manifests = [] 328 if os.path.exists(i): 329 with open(i, "r") as f: 330 manifests = f.readlines() 331 # We append new entries, we don't remove older entries which may have the same 332 # manifest name but different versions from stamp/workdir. See below. 333 if filedata not in manifests: 334 with open(i, "a+") as f: 335 f.write(filedata) 336 bb.utils.unlockfile(l) 337 338 # Run the actual file install 339 for state in ss['dirs']: 340 if os.path.exists(state[1]): 341 oe.path.copyhardlinktree(state[1], state[2]) 342 343 for plain in ss['plaindirs']: 344 workdir = d.getVar('WORKDIR') 345 sharedworkdir = os.path.join(d.getVar('TMPDIR'), "work-shared") 346 src = sstateinst + "/" + plain.replace(workdir, '') 347 if sharedworkdir in plain: 348 src = sstateinst + "/" + plain.replace(sharedworkdir, '') 349 dest = plain 350 bb.utils.mkdirhier(src) 351 prepdir(dest) 352 bb.utils.rename(src, dest) 353 354 for lock in locks: 355 bb.utils.unlockfile(lock) 356 357sstate_install[vardepsexclude] += "SSTATE_ALLOW_OVERLAP_FILES SSTATE_MANMACH SSTATE_MANFILEPREFIX STAMP" 358 359def sstate_installpkg(ss, d): 360 from oe.gpg_sign import get_signer 361 362 sstateinst = d.expand("${WORKDIR}/sstate-install-%s/" % ss['task']) 363 d.setVar("SSTATE_CURRTASK", ss['task']) 364 sstatefetch = d.getVar('SSTATE_PKGNAME') 365 sstatepkg = d.getVar('SSTATE_PKG') 366 verify_sig = bb.utils.to_boolean(d.getVar("SSTATE_VERIFY_SIG"), False) 367 368 if not os.path.exists(sstatepkg) or (verify_sig and not os.path.exists(sstatepkg + '.sig')): 369 pstaging_fetch(sstatefetch, d) 370 371 if not os.path.isfile(sstatepkg): 372 bb.note("Sstate package %s does not exist" % sstatepkg) 373 return False 374 375 sstate_clean(ss, d) 376 377 d.setVar('SSTATE_INSTDIR', sstateinst) 378 379 if verify_sig: 380 if not os.path.isfile(sstatepkg + '.sig'): 381 bb.warn("No signature file for sstate package %s, skipping acceleration..." % sstatepkg) 382 return False 383 signer = get_signer(d, 'local') 384 if not signer.verify(sstatepkg + '.sig', d.getVar("SSTATE_VALID_SIGS")): 385 bb.warn("Cannot verify signature on sstate package %s, skipping acceleration..." % sstatepkg) 386 return False 387 388 # Empty sstateinst directory, ensure its clean 389 if os.path.exists(sstateinst): 390 oe.path.remove(sstateinst) 391 bb.utils.mkdirhier(sstateinst) 392 393 sstateinst = d.getVar("SSTATE_INSTDIR") 394 d.setVar('SSTATE_FIXMEDIR', ss['fixmedir']) 395 396 for f in (d.getVar('SSTATEPREINSTFUNCS') or '').split() + ['sstate_unpack_package']: 397 # All hooks should run in the SSTATE_INSTDIR 398 bb.build.exec_func(f, d, (sstateinst,)) 399 400 return sstate_installpkgdir(ss, d) 401 402def sstate_installpkgdir(ss, d): 403 import oe.path 404 import subprocess 405 406 sstateinst = d.getVar("SSTATE_INSTDIR") 407 d.setVar('SSTATE_FIXMEDIR', ss['fixmedir']) 408 409 for f in (d.getVar('SSTATEPOSTUNPACKFUNCS') or '').split(): 410 # All hooks should run in the SSTATE_INSTDIR 411 bb.build.exec_func(f, d, (sstateinst,)) 412 413 sstate_install(ss, d) 414 415 return True 416 417python sstate_hardcode_path_unpack () { 418 # Fixup hardcoded paths 419 # 420 # Note: The logic below must match the reverse logic in 421 # sstate_hardcode_path(d) 422 import subprocess 423 424 sstateinst = d.getVar('SSTATE_INSTDIR') 425 sstatefixmedir = d.getVar('SSTATE_FIXMEDIR') 426 fixmefn = sstateinst + "fixmepath" 427 if os.path.isfile(fixmefn): 428 staging_target = d.getVar('RECIPE_SYSROOT') 429 staging_host = d.getVar('RECIPE_SYSROOT_NATIVE') 430 431 if bb.data.inherits_class('native', d) or bb.data.inherits_class('cross-canadian', d): 432 sstate_sed_cmd = "sed -i -e 's:FIXMESTAGINGDIRHOST:%s:g'" % (staging_host) 433 elif bb.data.inherits_class('cross', d) or bb.data.inherits_class('crosssdk', d): 434 sstate_sed_cmd = "sed -i -e 's:FIXMESTAGINGDIRTARGET:%s:g; s:FIXMESTAGINGDIRHOST:%s:g'" % (staging_target, staging_host) 435 else: 436 sstate_sed_cmd = "sed -i -e 's:FIXMESTAGINGDIRTARGET:%s:g'" % (staging_target) 437 438 extra_staging_fixmes = d.getVar('EXTRA_STAGING_FIXMES') or '' 439 for fixmevar in extra_staging_fixmes.split(): 440 fixme_path = d.getVar(fixmevar) 441 sstate_sed_cmd += " -e 's:FIXME_%s:%s:g'" % (fixmevar, fixme_path) 442 443 # Add sstateinst to each filename in fixmepath, use xargs to efficiently call sed 444 sstate_hardcode_cmd = "sed -e 's:^:%s:g' %s | xargs %s" % (sstateinst, fixmefn, sstate_sed_cmd) 445 446 # Defer do_populate_sysroot relocation command 447 if sstatefixmedir: 448 bb.utils.mkdirhier(sstatefixmedir) 449 with open(sstatefixmedir + "/fixmepath.cmd", "w") as f: 450 sstate_hardcode_cmd = sstate_hardcode_cmd.replace(fixmefn, sstatefixmedir + "/fixmepath") 451 sstate_hardcode_cmd = sstate_hardcode_cmd.replace(sstateinst, "FIXMEFINALSSTATEINST") 452 sstate_hardcode_cmd = sstate_hardcode_cmd.replace(staging_host, "FIXMEFINALSSTATEHOST") 453 sstate_hardcode_cmd = sstate_hardcode_cmd.replace(staging_target, "FIXMEFINALSSTATETARGET") 454 f.write(sstate_hardcode_cmd) 455 bb.utils.copyfile(fixmefn, sstatefixmedir + "/fixmepath") 456 return 457 458 bb.note("Replacing fixme paths in sstate package: %s" % (sstate_hardcode_cmd)) 459 subprocess.check_call(sstate_hardcode_cmd, shell=True) 460 461 # Need to remove this or we'd copy it into the target directory and may 462 # conflict with another writer 463 os.remove(fixmefn) 464} 465 466def sstate_clean_cachefile(ss, d): 467 import oe.path 468 469 if d.getVarFlag('do_%s' % ss['task'], 'task'): 470 d.setVar("SSTATE_PATH_CURRTASK", ss['task']) 471 sstatepkgfile = d.getVar('SSTATE_PATHSPEC') 472 bb.note("Removing %s" % sstatepkgfile) 473 oe.path.remove(sstatepkgfile) 474 475def sstate_clean_cachefiles(d): 476 for task in (d.getVar('SSTATETASKS') or "").split(): 477 ld = d.createCopy() 478 ss = sstate_state_fromvars(ld, task) 479 sstate_clean_cachefile(ss, ld) 480 481def sstate_clean_manifest(manifest, d, canrace=False, prefix=None): 482 import oe.path 483 484 mfile = open(manifest) 485 entries = mfile.readlines() 486 mfile.close() 487 488 for entry in entries: 489 entry = entry.strip() 490 if prefix and not entry.startswith("/"): 491 entry = prefix + "/" + entry 492 bb.debug(2, "Removing manifest: %s" % entry) 493 # We can race against another package populating directories as we're removing them 494 # so we ignore errors here. 495 try: 496 if entry.endswith("/"): 497 if os.path.islink(entry[:-1]): 498 os.remove(entry[:-1]) 499 elif os.path.exists(entry) and len(os.listdir(entry)) == 0 and not canrace: 500 # Removing directories whilst builds are in progress exposes a race. Only 501 # do it in contexts where it is safe to do so. 502 os.rmdir(entry[:-1]) 503 else: 504 os.remove(entry) 505 except OSError: 506 pass 507 508 postrm = manifest + ".postrm" 509 if os.path.exists(manifest + ".postrm"): 510 import subprocess 511 os.chmod(postrm, 0o755) 512 subprocess.check_call(postrm, shell=True) 513 oe.path.remove(postrm) 514 515 oe.path.remove(manifest) 516 517def sstate_clean(ss, d): 518 import oe.path 519 import glob 520 521 d2 = d.createCopy() 522 stamp_clean = d.getVar("STAMPCLEAN") 523 extrainf = d.getVarFlag("do_" + ss['task'], 'stamp-extra-info') 524 if extrainf: 525 d2.setVar("SSTATE_MANMACH", extrainf) 526 wildcard_stfile = "%s.do_%s*.%s" % (stamp_clean, ss['task'], extrainf) 527 else: 528 wildcard_stfile = "%s.do_%s*" % (stamp_clean, ss['task']) 529 530 manifest = d2.expand("${SSTATE_MANFILEPREFIX}.%s" % ss['task']) 531 532 if os.path.exists(manifest): 533 locks = [] 534 for lock in ss['lockfiles-shared']: 535 locks.append(bb.utils.lockfile(lock)) 536 for lock in ss['lockfiles']: 537 locks.append(bb.utils.lockfile(lock)) 538 539 sstate_clean_manifest(manifest, d, canrace=True) 540 541 for lock in locks: 542 bb.utils.unlockfile(lock) 543 544 # Remove the current and previous stamps, but keep the sigdata. 545 # 546 # The glob() matches do_task* which may match multiple tasks, for 547 # example: do_package and do_package_write_ipk, so we need to 548 # exactly match *.do_task.* and *.do_task_setscene.* 549 rm_stamp = '.do_%s.' % ss['task'] 550 rm_setscene = '.do_%s_setscene.' % ss['task'] 551 # For BB_SIGNATURE_HANDLER = "noop" 552 rm_nohash = ".do_%s" % ss['task'] 553 for stfile in glob.glob(wildcard_stfile): 554 # Keep the sigdata 555 if ".sigdata." in stfile or ".sigbasedata." in stfile: 556 continue 557 # Preserve taint files in the stamps directory 558 if stfile.endswith('.taint'): 559 continue 560 if rm_stamp in stfile or rm_setscene in stfile or \ 561 stfile.endswith(rm_nohash): 562 oe.path.remove(stfile) 563 564sstate_clean[vardepsexclude] = "SSTATE_MANFILEPREFIX" 565 566CLEANFUNCS += "sstate_cleanall" 567 568python sstate_cleanall() { 569 bb.note("Removing shared state for package %s" % d.getVar('PN')) 570 571 manifest_dir = d.getVar('SSTATE_MANIFESTS') 572 if not os.path.exists(manifest_dir): 573 return 574 575 tasks = d.getVar('SSTATETASKS').split() 576 for name in tasks: 577 ld = d.createCopy() 578 shared_state = sstate_state_fromvars(ld, name) 579 sstate_clean(shared_state, ld) 580} 581 582python sstate_hardcode_path () { 583 import subprocess, platform 584 585 # Need to remove hardcoded paths and fix these when we install the 586 # staging packages. 587 # 588 # Note: the logic in this function needs to match the reverse logic 589 # in sstate_installpkg(ss, d) 590 591 staging_target = d.getVar('RECIPE_SYSROOT') 592 staging_host = d.getVar('RECIPE_SYSROOT_NATIVE') 593 sstate_builddir = d.getVar('SSTATE_BUILDDIR') 594 595 sstate_sed_cmd = "sed -i -e 's:%s:FIXMESTAGINGDIRHOST:g'" % staging_host 596 if bb.data.inherits_class('native', d) or bb.data.inherits_class('cross-canadian', d): 597 sstate_grep_cmd = "grep -l -e '%s'" % (staging_host) 598 elif bb.data.inherits_class('cross', d) or bb.data.inherits_class('crosssdk', d): 599 sstate_grep_cmd = "grep -l -e '%s' -e '%s'" % (staging_target, staging_host) 600 sstate_sed_cmd += " -e 's:%s:FIXMESTAGINGDIRTARGET:g'" % staging_target 601 else: 602 sstate_grep_cmd = "grep -l -e '%s' -e '%s'" % (staging_target, staging_host) 603 sstate_sed_cmd += " -e 's:%s:FIXMESTAGINGDIRTARGET:g'" % staging_target 604 605 extra_staging_fixmes = d.getVar('EXTRA_STAGING_FIXMES') or '' 606 for fixmevar in extra_staging_fixmes.split(): 607 fixme_path = d.getVar(fixmevar) 608 sstate_sed_cmd += " -e 's:%s:FIXME_%s:g'" % (fixme_path, fixmevar) 609 sstate_grep_cmd += " -e '%s'" % (fixme_path) 610 611 fixmefn = sstate_builddir + "fixmepath" 612 613 sstate_scan_cmd = d.getVar('SSTATE_SCAN_CMD') 614 sstate_filelist_cmd = "tee %s" % (fixmefn) 615 616 # fixmepath file needs relative paths, drop sstate_builddir prefix 617 sstate_filelist_relative_cmd = "sed -i -e 's:^%s::g' %s" % (sstate_builddir, fixmefn) 618 619 xargs_no_empty_run_cmd = '--no-run-if-empty' 620 if platform.system() == 'Darwin': 621 xargs_no_empty_run_cmd = '' 622 623 # Limit the fixpaths and sed operations based on the initial grep search 624 # This has the side effect of making sure the vfs cache is hot 625 sstate_hardcode_cmd = "%s | xargs %s | %s | xargs %s %s" % (sstate_scan_cmd, sstate_grep_cmd, sstate_filelist_cmd, xargs_no_empty_run_cmd, sstate_sed_cmd) 626 627 bb.note("Removing hardcoded paths from sstate package: '%s'" % (sstate_hardcode_cmd)) 628 subprocess.check_output(sstate_hardcode_cmd, shell=True, cwd=sstate_builddir) 629 630 # If the fixmefn is empty, remove it.. 631 if os.stat(fixmefn).st_size == 0: 632 os.remove(fixmefn) 633 else: 634 bb.note("Replacing absolute paths in fixmepath file: '%s'" % (sstate_filelist_relative_cmd)) 635 subprocess.check_output(sstate_filelist_relative_cmd, shell=True) 636} 637 638def sstate_package(ss, d): 639 import oe.path 640 import time 641 642 tmpdir = d.getVar('TMPDIR') 643 644 sstatebuild = d.expand("${WORKDIR}/sstate-build-%s/" % ss['task']) 645 sde = int(d.getVar("SOURCE_DATE_EPOCH") or time.time()) 646 d.setVar("SSTATE_CURRTASK", ss['task']) 647 bb.utils.remove(sstatebuild, recurse=True) 648 bb.utils.mkdirhier(sstatebuild) 649 exit = False 650 for state in ss['dirs']: 651 if not os.path.exists(state[1]): 652 continue 653 srcbase = state[0].rstrip("/").rsplit('/', 1)[0] 654 # Find and error for absolute symlinks. We could attempt to relocate but its not 655 # clear where the symlink is relative to in this context. We could add that markup 656 # to sstate tasks but there aren't many of these so better just avoid them entirely. 657 for walkroot, dirs, files in os.walk(state[1]): 658 for file in files + dirs: 659 srcpath = os.path.join(walkroot, file) 660 if not os.path.islink(srcpath): 661 continue 662 link = os.readlink(srcpath) 663 if not os.path.isabs(link): 664 continue 665 if not link.startswith(tmpdir): 666 continue 667 bb.error("sstate found an absolute path symlink %s pointing at %s. Please replace this with a relative link." % (srcpath, link)) 668 exit = True 669 bb.debug(2, "Preparing tree %s for packaging at %s" % (state[1], sstatebuild + state[0])) 670 bb.utils.rename(state[1], sstatebuild + state[0]) 671 if exit: 672 bb.fatal("Failing task due to absolute path symlinks") 673 674 workdir = d.getVar('WORKDIR') 675 sharedworkdir = os.path.join(d.getVar('TMPDIR'), "work-shared") 676 for plain in ss['plaindirs']: 677 pdir = plain.replace(workdir, sstatebuild) 678 if sharedworkdir in plain: 679 pdir = plain.replace(sharedworkdir, sstatebuild) 680 bb.utils.mkdirhier(plain) 681 bb.utils.mkdirhier(pdir) 682 bb.utils.rename(plain, pdir) 683 684 d.setVar('SSTATE_BUILDDIR', sstatebuild) 685 d.setVar('SSTATE_INSTDIR', sstatebuild) 686 687 if d.getVar('SSTATE_SKIP_CREATION') == '1': 688 return 689 690 sstate_create_package = ['sstate_report_unihash', 'sstate_create_and_sign_package'] 691 692 for f in (d.getVar('SSTATECREATEFUNCS') or '').split() + \ 693 sstate_create_package + \ 694 (d.getVar('SSTATEPOSTCREATEFUNCS') or '').split(): 695 # All hooks should run in SSTATE_BUILDDIR. 696 bb.build.exec_func(f, d, (sstatebuild,)) 697 698 # SSTATE_PKG may have been changed by sstate_report_unihash 699 siginfo = d.getVar('SSTATE_PKG') + ".siginfo" 700 if not os.path.exists(siginfo): 701 bb.siggen.dump_this_task(siginfo, d) 702 else: 703 try: 704 os.utime(siginfo, None) 705 except PermissionError: 706 pass 707 except OSError as e: 708 # Handle read-only file systems gracefully 709 import errno 710 if e.errno != errno.EROFS: 711 raise e 712 713 return 714 715sstate_package[vardepsexclude] += "SSTATE_SIG_KEY SSTATE_PKG" 716 717def pstaging_fetch(sstatefetch, d): 718 import bb.fetch2 719 720 # Only try and fetch if the user has configured a mirror 721 mirrors = d.getVar('SSTATE_MIRRORS') 722 if not mirrors: 723 return 724 725 # Copy the data object and override DL_DIR and SRC_URI 726 localdata = bb.data.createCopy(d) 727 728 dldir = localdata.expand("${SSTATE_DIR}") 729 bb.utils.mkdirhier(dldir) 730 731 localdata.delVar('MIRRORS') 732 localdata.setVar('FILESPATH', dldir) 733 localdata.setVar('DL_DIR', dldir) 734 localdata.setVar('PREMIRRORS', mirrors) 735 736 # if BB_NO_NETWORK is set but we also have SSTATE_MIRROR_ALLOW_NETWORK, 737 # we'll want to allow network access for the current set of fetches. 738 if bb.utils.to_boolean(localdata.getVar('BB_NO_NETWORK')) and \ 739 bb.utils.to_boolean(localdata.getVar('SSTATE_MIRROR_ALLOW_NETWORK')): 740 localdata.delVar('BB_NO_NETWORK') 741 742 # Try a fetch from the sstate mirror, if it fails just return and 743 # we will build the package 744 uris = ['file://{0};downloadfilename={0}'.format(sstatefetch), 745 'file://{0}.siginfo;downloadfilename={0}.siginfo'.format(sstatefetch)] 746 if bb.utils.to_boolean(d.getVar("SSTATE_VERIFY_SIG"), False): 747 uris += ['file://{0}.sig;downloadfilename={0}.sig'.format(sstatefetch)] 748 749 for srcuri in uris: 750 localdata.delVar('SRC_URI') 751 localdata.setVar('SRC_URI', srcuri) 752 try: 753 fetcher = bb.fetch2.Fetch([srcuri], localdata, cache=False) 754 fetcher.checkstatus() 755 fetcher.download() 756 757 except bb.fetch2.BBFetchException: 758 pass 759 760def sstate_setscene(d): 761 shared_state = sstate_state_fromvars(d) 762 accelerate = sstate_installpkg(shared_state, d) 763 if not accelerate: 764 msg = "No sstate archive obtainable, will run full task instead." 765 bb.warn(msg) 766 raise bb.BBHandledException(msg) 767 768python sstate_task_prefunc () { 769 shared_state = sstate_state_fromvars(d) 770 sstate_clean(shared_state, d) 771} 772sstate_task_prefunc[dirs] = "${WORKDIR}" 773 774python sstate_task_postfunc () { 775 shared_state = sstate_state_fromvars(d) 776 777 omask = os.umask(0o002) 778 if omask != 0o002: 779 bb.note("Using umask 0o002 (not %0o) for sstate packaging" % omask) 780 sstate_package(shared_state, d) 781 os.umask(omask) 782 783 sstateinst = d.getVar("SSTATE_INSTDIR") 784 d.setVar('SSTATE_FIXMEDIR', shared_state['fixmedir']) 785 786 sstate_installpkgdir(shared_state, d) 787 788 bb.utils.remove(d.getVar("SSTATE_BUILDDIR"), recurse=True) 789} 790sstate_task_postfunc[dirs] = "${WORKDIR}" 791 792# Create a sstate package 793# If enabled, sign the package. 794# Package and signature are created in a sub-directory 795# and renamed in place once created. 796python sstate_create_and_sign_package () { 797 from pathlib import Path 798 799 # Best effort touch 800 def touch(file): 801 try: 802 file.touch() 803 except: 804 pass 805 806 def update_file(src, dst, force=False): 807 if dst.is_symlink() and not dst.exists(): 808 force=True 809 try: 810 # This relies on that src is a temporary file that can be renamed 811 # or left as is. 812 if force: 813 src.rename(dst) 814 else: 815 os.link(src, dst) 816 return True 817 except: 818 pass 819 820 if dst.exists(): 821 touch(dst) 822 823 return False 824 825 sign_pkg = ( 826 bb.utils.to_boolean(d.getVar("SSTATE_VERIFY_SIG")) and 827 bool(d.getVar("SSTATE_SIG_KEY")) 828 ) 829 830 sstate_pkg = Path(d.getVar("SSTATE_PKG")) 831 sstate_pkg_sig = Path(str(sstate_pkg) + ".sig") 832 if sign_pkg: 833 if sstate_pkg.exists() and sstate_pkg_sig.exists(): 834 touch(sstate_pkg) 835 touch(sstate_pkg_sig) 836 return 837 else: 838 if sstate_pkg.exists(): 839 touch(sstate_pkg) 840 return 841 842 # Create the required sstate directory if it is not present. 843 if not sstate_pkg.parent.is_dir(): 844 with bb.utils.umask(0o002): 845 bb.utils.mkdirhier(str(sstate_pkg.parent)) 846 847 if sign_pkg: 848 from tempfile import TemporaryDirectory 849 with TemporaryDirectory(dir=sstate_pkg.parent) as tmp_dir: 850 tmp_pkg = Path(tmp_dir) / sstate_pkg.name 851 sstate_archive_package(tmp_pkg, d) 852 853 from oe.gpg_sign import get_signer 854 signer = get_signer(d, 'local') 855 signer.detach_sign(str(tmp_pkg), d.getVar('SSTATE_SIG_KEY'), None, 856 d.getVar('SSTATE_SIG_PASSPHRASE'), armor=False) 857 858 tmp_pkg_sig = Path(tmp_dir) / sstate_pkg_sig.name 859 if not update_file(tmp_pkg_sig, sstate_pkg_sig): 860 # If the created signature file could not be copied into place, 861 # then we should not use the sstate package either. 862 return 863 864 # If the .sig file was updated, then the sstate package must also 865 # be updated. 866 update_file(tmp_pkg, sstate_pkg, force=True) 867 else: 868 from tempfile import NamedTemporaryFile 869 with NamedTemporaryFile(prefix=sstate_pkg.name, dir=sstate_pkg.parent) as tmp_pkg_fd: 870 tmp_pkg = tmp_pkg_fd.name 871 sstate_archive_package(tmp_pkg, d) 872 update_file(tmp_pkg, sstate_pkg) 873 # update_file() may have renamed tmp_pkg, which must exist when the 874 # NamedTemporaryFile() context handler ends. 875 touch(Path(tmp_pkg)) 876 877} 878 879# Function to generate a sstate package from the current directory. 880# The calling function handles moving the sstate package into the final 881# destination. 882def sstate_archive_package(sstate_pkg, d): 883 import subprocess 884 885 cmd = [ 886 "tar", 887 "-I", d.expand("pzstd -${SSTATE_ZSTD_CLEVEL} -p${ZSTD_THREADS}"), 888 "-cS", 889 "-f", sstate_pkg, 890 ] 891 892 # tar refuses to create an empty archive unless told explicitly 893 files = sorted(os.listdir(".")) 894 if not files: 895 files = ["--files-from=/dev/null"] 896 897 try: 898 subprocess.run(cmd + files, check=True) 899 except subprocess.CalledProcessError as e: 900 # Ignore error 1 as this is caused by files changing 901 # (link count increasing from hardlinks being created). 902 if e.returncode != 1: 903 raise 904 905 os.chmod(sstate_pkg, 0o664) 906 907 908python sstate_report_unihash() { 909 report_unihash = getattr(bb.parse.siggen, 'report_unihash', None) 910 911 if report_unihash: 912 ss = sstate_state_fromvars(d) 913 report_unihash(os.getcwd(), ss['task'], d) 914} 915 916# 917# Shell function to decompress and prepare a package for installation 918# Will be run from within SSTATE_INSTDIR. 919# 920sstate_unpack_package () { 921 ZSTD="zstd -T${ZSTD_THREADS}" 922 # Use pzstd if available 923 if [ -x "$(command -v pzstd)" ]; then 924 ZSTD="pzstd -p ${ZSTD_THREADS}" 925 fi 926 927 tar -I "$ZSTD" -xvpf ${SSTATE_PKG} 928 # update .siginfo atime on local/NFS mirror if it is a symbolic link 929 [ ! -h ${SSTATE_PKG}.siginfo ] || [ ! -e ${SSTATE_PKG}.siginfo ] || touch -a ${SSTATE_PKG}.siginfo 2>/dev/null || true 930 # update each symbolic link instead of any referenced file 931 touch --no-dereference ${SSTATE_PKG} 2>/dev/null || true 932 [ ! -e ${SSTATE_PKG}.sig ] || touch --no-dereference ${SSTATE_PKG}.sig 2>/dev/null || true 933 [ ! -e ${SSTATE_PKG}.siginfo ] || touch --no-dereference ${SSTATE_PKG}.siginfo 2>/dev/null || true 934} 935 936BB_HASHCHECK_FUNCTION = "sstate_checkhashes" 937 938def sstate_checkhashes(sq_data, d, siginfo=False, currentcount=0, summary=True, **kwargs): 939 import itertools 940 941 found = set() 942 missed = set() 943 944 def gethash(task): 945 return sq_data['unihash'][task] 946 947 def getpathcomponents(task, d): 948 # Magic data from BB_HASHFILENAME 949 splithashfn = sq_data['hashfn'][task].split(" ") 950 spec = splithashfn[1] 951 if splithashfn[0] == "True": 952 extrapath = d.getVar("NATIVELSBSTRING") + "/" 953 else: 954 extrapath = "" 955 956 tname = bb.runqueue.taskname_from_tid(task)[3:] 957 958 if tname in ["fetch", "unpack", "patch", "populate_lic", "preconfigure"] and splithashfn[2]: 959 spec = splithashfn[2] 960 extrapath = "" 961 962 return spec, extrapath, tname 963 964 def getsstatefile(tid, siginfo, d): 965 spec, extrapath, tname = getpathcomponents(tid, d) 966 return extrapath + generate_sstatefn(spec, gethash(tid), tname, siginfo, d) 967 968 for tid in sq_data['hash']: 969 970 sstatefile = d.expand("${SSTATE_DIR}/" + getsstatefile(tid, siginfo, d)) 971 972 if os.path.exists(sstatefile): 973 oe.utils.touch(sstatefile) 974 found.add(tid) 975 bb.debug(2, "SState: Found valid sstate file %s" % sstatefile) 976 else: 977 missed.add(tid) 978 bb.debug(2, "SState: Looked for but didn't find file %s" % sstatefile) 979 980 foundLocal = len(found) 981 mirrors = d.getVar("SSTATE_MIRRORS") 982 if mirrors: 983 # Copy the data object and override DL_DIR and SRC_URI 984 localdata = bb.data.createCopy(d) 985 986 dldir = localdata.expand("${SSTATE_DIR}") 987 localdata.delVar('MIRRORS') 988 localdata.setVar('FILESPATH', dldir) 989 localdata.setVar('DL_DIR', dldir) 990 localdata.setVar('PREMIRRORS', mirrors) 991 992 bb.debug(2, "SState using premirror of: %s" % mirrors) 993 994 # if BB_NO_NETWORK is set but we also have SSTATE_MIRROR_ALLOW_NETWORK, 995 # we'll want to allow network access for the current set of fetches. 996 if bb.utils.to_boolean(localdata.getVar('BB_NO_NETWORK')) and \ 997 bb.utils.to_boolean(localdata.getVar('SSTATE_MIRROR_ALLOW_NETWORK')): 998 localdata.delVar('BB_NO_NETWORK') 999 1000 from bb.fetch2 import FetchConnectionCache 1001 def checkstatus_init(): 1002 while not connection_cache_pool.full(): 1003 connection_cache_pool.put(FetchConnectionCache()) 1004 1005 def checkstatus_end(): 1006 while not connection_cache_pool.empty(): 1007 connection_cache = connection_cache_pool.get() 1008 connection_cache.close_connections() 1009 1010 def checkstatus(arg): 1011 (tid, sstatefile) = arg 1012 1013 connection_cache = connection_cache_pool.get() 1014 localdata2 = bb.data.createCopy(localdata) 1015 srcuri = "file://" + sstatefile 1016 localdata2.setVar('SRC_URI', srcuri) 1017 bb.debug(2, "SState: Attempting to fetch %s" % srcuri) 1018 1019 import traceback 1020 1021 try: 1022 fetcher = bb.fetch2.Fetch(srcuri.split(), localdata2, 1023 connection_cache=connection_cache) 1024 fetcher.checkstatus() 1025 bb.debug(2, "SState: Successful fetch test for %s" % srcuri) 1026 found.add(tid) 1027 missed.remove(tid) 1028 except bb.fetch2.FetchError as e: 1029 bb.debug(2, "SState: Unsuccessful fetch test for %s (%s)\n%s" % (srcuri, repr(e), traceback.format_exc())) 1030 except Exception as e: 1031 bb.error("SState: cannot test %s: %s\n%s" % (srcuri, repr(e), traceback.format_exc())) 1032 1033 connection_cache_pool.put(connection_cache) 1034 1035 if progress: 1036 bb.event.fire(bb.event.ProcessProgress(msg, next(cnt_tasks_done)), d) 1037 bb.event.check_for_interrupts(d) 1038 1039 tasklist = [] 1040 for tid in missed: 1041 sstatefile = d.expand(getsstatefile(tid, siginfo, d)) 1042 tasklist.append((tid, sstatefile)) 1043 1044 if tasklist: 1045 nproc = min(int(d.getVar("BB_NUMBER_THREADS")), len(tasklist)) 1046 1047 ## thread-safe counter 1048 cnt_tasks_done = itertools.count(start = 1) 1049 progress = len(tasklist) >= 100 1050 if progress: 1051 msg = "Checking sstate mirror object availability" 1052 bb.event.fire(bb.event.ProcessStarted(msg, len(tasklist)), d) 1053 1054 # Have to setup the fetcher environment here rather than in each thread as it would race 1055 fetcherenv = bb.fetch2.get_fetcher_environment(d) 1056 with bb.utils.environment(**fetcherenv): 1057 bb.event.enable_threadlock() 1058 import concurrent.futures 1059 from queue import Queue 1060 connection_cache_pool = Queue(nproc) 1061 checkstatus_init() 1062 with concurrent.futures.ThreadPoolExecutor(max_workers=nproc) as executor: 1063 executor.map(checkstatus, tasklist.copy()) 1064 checkstatus_end() 1065 bb.event.disable_threadlock() 1066 1067 if progress: 1068 bb.event.fire(bb.event.ProcessFinished(msg), d) 1069 1070 inheritlist = d.getVar("INHERIT") 1071 if "toaster" in inheritlist: 1072 evdata = {'missed': [], 'found': []}; 1073 for tid in missed: 1074 sstatefile = d.expand(getsstatefile(tid, False, d)) 1075 evdata['missed'].append((bb.runqueue.fn_from_tid(tid), bb.runqueue.taskname_from_tid(tid), gethash(tid), sstatefile ) ) 1076 for tid in found: 1077 sstatefile = d.expand(getsstatefile(tid, False, d)) 1078 evdata['found'].append((bb.runqueue.fn_from_tid(tid), bb.runqueue.taskname_from_tid(tid), gethash(tid), sstatefile ) ) 1079 bb.event.fire(bb.event.MetadataEvent("MissedSstate", evdata), d) 1080 1081 if summary: 1082 # Print some summary statistics about the current task completion and how much sstate 1083 # reuse there was. Avoid divide by zero errors. 1084 total = len(sq_data['hash']) 1085 complete = 0 1086 if currentcount: 1087 complete = (len(found) + currentcount) / (total + currentcount) * 100 1088 match = 0 1089 if total: 1090 match = len(found) / total * 100 1091 bb.plain("Sstate summary: Wanted %d Local %d Mirrors %d Missed %d Current %d (%d%% match, %d%% complete)" % 1092 (total, foundLocal, len(found)-foundLocal, len(missed), currentcount, match, complete)) 1093 1094 if hasattr(bb.parse.siggen, "checkhashes"): 1095 bb.parse.siggen.checkhashes(sq_data, missed, found, d) 1096 1097 return found 1098setscene_depvalid[vardepsexclude] = "SSTATE_EXCLUDEDEPS_SYSROOT _SSTATE_EXCLUDEDEPS_SYSROOT" 1099 1100BB_SETSCENE_DEPVALID = "setscene_depvalid" 1101 1102def setscene_depvalid(task, taskdependees, notneeded, d, log=None): 1103 # taskdependees is a dict of tasks which depend on task, each being a 3 item list of [PN, TASKNAME, FILENAME] 1104 # task is included in taskdependees too 1105 # Return - False - We need this dependency 1106 # - True - We can skip this dependency 1107 import re 1108 1109 def logit(msg, log): 1110 if log is not None: 1111 log.append(msg) 1112 else: 1113 bb.debug(2, msg) 1114 1115 logit("Considering setscene task: %s" % (str(taskdependees[task])), log) 1116 1117 directtasks = ["do_populate_lic", "do_deploy_source_date_epoch", "do_shared_workdir", "do_stash_locale", "do_gcc_stash_builddir", "do_create_spdx", "do_deploy_archives"] 1118 1119 def isNativeCross(x): 1120 return x.endswith("-native") or "-cross-" in x or "-crosssdk" in x or x.endswith("-cross") 1121 1122 # We only need to trigger deploy_source_date_epoch through direct dependencies 1123 if taskdependees[task][1] in directtasks: 1124 return True 1125 1126 # We only need to trigger packagedata through direct dependencies 1127 # but need to preserve packagedata on packagedata links 1128 if taskdependees[task][1] == "do_packagedata": 1129 for dep in taskdependees: 1130 if taskdependees[dep][1] == "do_packagedata": 1131 return False 1132 return True 1133 1134 for dep in taskdependees: 1135 logit(" considering dependency: %s" % (str(taskdependees[dep])), log) 1136 if task == dep: 1137 continue 1138 if dep in notneeded: 1139 continue 1140 # do_package_write_* and do_package doesn't need do_package 1141 if taskdependees[task][1] == "do_package" and taskdependees[dep][1] in ['do_package', 'do_package_write_deb', 'do_package_write_ipk', 'do_package_write_rpm', 'do_packagedata', 'do_package_qa']: 1142 continue 1143 # do_package_write_* need do_populate_sysroot as they're mainly postinstall dependencies 1144 if taskdependees[task][1] == "do_populate_sysroot" and taskdependees[dep][1] in ['do_package_write_deb', 'do_package_write_ipk', 'do_package_write_rpm']: 1145 return False 1146 # do_package/packagedata/package_qa/deploy don't need do_populate_sysroot 1147 if taskdependees[task][1] == "do_populate_sysroot" and taskdependees[dep][1] in ['do_package', 'do_packagedata', 'do_package_qa', 'do_deploy']: 1148 continue 1149 # Native/Cross packages don't exist and are noexec anyway 1150 if isNativeCross(taskdependees[dep][0]) and taskdependees[dep][1] in ['do_package_write_deb', 'do_package_write_ipk', 'do_package_write_rpm', 'do_packagedata', 'do_package', 'do_package_qa']: 1151 continue 1152 1153 # Consider sysroot depending on sysroot tasks 1154 if taskdependees[task][1] == 'do_populate_sysroot' and taskdependees[dep][1] == 'do_populate_sysroot': 1155 # Allow excluding certain recursive dependencies. If a recipe needs it should add a 1156 # specific dependency itself, rather than relying on one of its dependees to pull 1157 # them in. 1158 # See also http://lists.openembedded.org/pipermail/openembedded-core/2018-January/146324.html 1159 not_needed = False 1160 excludedeps = d.getVar('_SSTATE_EXCLUDEDEPS_SYSROOT') 1161 if excludedeps is None: 1162 # Cache the regular expressions for speed 1163 excludedeps = [] 1164 for excl in (d.getVar('SSTATE_EXCLUDEDEPS_SYSROOT') or "").split(): 1165 excludedeps.append((re.compile(excl.split('->', 1)[0]), re.compile(excl.split('->', 1)[1]))) 1166 d.setVar('_SSTATE_EXCLUDEDEPS_SYSROOT', excludedeps) 1167 for excl in excludedeps: 1168 if excl[0].match(taskdependees[dep][0]): 1169 if excl[1].match(taskdependees[task][0]): 1170 not_needed = True 1171 break 1172 if not_needed: 1173 continue 1174 # For meta-extsdk-toolchain we want all sysroot dependencies 1175 if taskdependees[dep][0] == 'meta-extsdk-toolchain': 1176 return False 1177 # Native/Cross populate_sysroot need their dependencies 1178 if isNativeCross(taskdependees[task][0]) and isNativeCross(taskdependees[dep][0]): 1179 return False 1180 # Target populate_sysroot depended on by cross tools need to be installed 1181 if isNativeCross(taskdependees[dep][0]): 1182 return False 1183 # Native/cross tools depended upon by target sysroot are not needed 1184 # Add an exception for shadow-native as required by useradd.bbclass 1185 if isNativeCross(taskdependees[task][0]) and taskdependees[task][0] != 'shadow-native': 1186 continue 1187 # Target populate_sysroot need their dependencies 1188 return False 1189 1190 if taskdependees[dep][1] in directtasks: 1191 continue 1192 1193 # Safe fallthrough default 1194 logit(" Default setscene dependency fall through due to dependency: %s" % (str(taskdependees[dep])), log) 1195 return False 1196 return True 1197 1198addhandler sstate_eventhandler 1199sstate_eventhandler[eventmask] = "bb.build.TaskSucceeded" 1200python sstate_eventhandler() { 1201 d = e.data 1202 writtensstate = d.getVar('SSTATE_CURRTASK') 1203 if not writtensstate: 1204 taskname = d.getVar("BB_RUNTASK")[3:] 1205 spec = d.getVar('SSTATE_PKGSPEC') 1206 swspec = d.getVar('SSTATE_SWSPEC') 1207 if taskname in ["fetch", "unpack", "patch", "populate_lic", "preconfigure"] and swspec: 1208 d.setVar("SSTATE_PKGSPEC", "${SSTATE_SWSPEC}") 1209 d.setVar("SSTATE_EXTRAPATH", "") 1210 d.setVar("SSTATE_CURRTASK", taskname) 1211 siginfo = d.getVar('SSTATE_PKG') + ".siginfo" 1212 if not os.path.exists(siginfo): 1213 bb.siggen.dump_this_task(siginfo, d) 1214 else: 1215 oe.utils.touch(siginfo) 1216} 1217 1218SSTATE_PRUNE_OBSOLETEWORKDIR ?= "1" 1219 1220# 1221# Event handler which removes manifests and stamps file for recipes which are no 1222# longer 'reachable' in a build where they once were. 'Reachable' refers to 1223# whether a recipe is parsed so recipes in a layer which was removed would no 1224# longer be reachable. Switching between systemd and sysvinit where recipes 1225# became skipped would be another example. 1226# 1227# Also optionally removes the workdir of those tasks/recipes 1228# 1229addhandler sstate_eventhandler_reachablestamps 1230sstate_eventhandler_reachablestamps[eventmask] = "bb.event.ReachableStamps" 1231python sstate_eventhandler_reachablestamps() { 1232 import glob 1233 d = e.data 1234 stamps = e.stamps.values() 1235 removeworkdir = (d.getVar("SSTATE_PRUNE_OBSOLETEWORKDIR", False) == "1") 1236 preservestampfile = d.expand('${SSTATE_MANIFESTS}/preserve-stamps') 1237 preservestamps = [] 1238 if os.path.exists(preservestampfile): 1239 with open(preservestampfile, 'r') as f: 1240 preservestamps = f.readlines() 1241 seen = [] 1242 1243 # The machine index contains all the stamps this machine has ever seen in this build directory. 1244 # We should only remove things which this machine once accessed but no longer does. 1245 machineindex = set() 1246 bb.utils.mkdirhier(d.expand("${SSTATE_MANIFESTS}")) 1247 mi = d.expand("${SSTATE_MANIFESTS}/index-machine-${MACHINE}") 1248 if os.path.exists(mi): 1249 with open(mi, "r") as f: 1250 machineindex = set(line.strip() for line in f.readlines()) 1251 1252 for a in sorted(list(set(d.getVar("SSTATE_ARCHS").split()))): 1253 toremove = [] 1254 i = d.expand("${SSTATE_MANIFESTS}/index-" + a) 1255 if not os.path.exists(i): 1256 continue 1257 manseen = set() 1258 ignore = [] 1259 with open(i, "r") as f: 1260 lines = f.readlines() 1261 for l in reversed(lines): 1262 try: 1263 (stamp, manifest, workdir) = l.split() 1264 # The index may have multiple entries for the same manifest as the code above only appends 1265 # new entries and there may be an entry with matching manifest but differing version in stamp/workdir. 1266 # The last entry in the list is the valid one, any earlier entries with matching manifests 1267 # should be ignored. 1268 if manifest in manseen: 1269 ignore.append(l) 1270 continue 1271 manseen.add(manifest) 1272 if stamp not in stamps and stamp not in preservestamps and stamp in machineindex: 1273 toremove.append(l) 1274 if stamp not in seen: 1275 bb.debug(2, "Stamp %s is not reachable, removing related manifests" % stamp) 1276 seen.append(stamp) 1277 except ValueError: 1278 bb.fatal("Invalid line '%s' in sstate manifest '%s'" % (l, i)) 1279 1280 if toremove: 1281 msg = "Removing %d recipes from the %s sysroot" % (len(toremove), a) 1282 bb.event.fire(bb.event.ProcessStarted(msg, len(toremove)), d) 1283 1284 removed = 0 1285 for r in toremove: 1286 (stamp, manifest, workdir) = r.split() 1287 for m in glob.glob(manifest + ".*"): 1288 if m.endswith(".postrm"): 1289 continue 1290 sstate_clean_manifest(m, d) 1291 bb.utils.remove(stamp + "*") 1292 if removeworkdir: 1293 bb.utils.remove(workdir, recurse = True) 1294 lines.remove(r) 1295 removed = removed + 1 1296 bb.event.fire(bb.event.ProcessProgress(msg, removed), d) 1297 bb.event.check_for_interrupts(d) 1298 1299 bb.event.fire(bb.event.ProcessFinished(msg), d) 1300 1301 with open(i, "w") as f: 1302 for l in lines: 1303 if l in ignore: 1304 continue 1305 f.write(l) 1306 machineindex |= set(stamps) 1307 with open(mi, "w") as f: 1308 for l in machineindex: 1309 f.write(l + "\n") 1310 1311 if preservestamps: 1312 os.remove(preservestampfile) 1313} 1314 1315 1316# 1317# Bitbake can generate an event showing which setscene tasks are 'stale', 1318# i.e. which ones will be rerun. These are ones where a stamp file is present but 1319# it is stable (e.g. taskhash doesn't match). With that list we can go through 1320# the manifests for matching tasks and "uninstall" those manifests now. We do 1321# this now rather than mid build since the distribution of files between sstate 1322# objects may have changed, new tasks may run first and if those new tasks overlap 1323# with the stale tasks, we'd see overlapping files messages and failures. Thankfully 1324# removing these files is fast. 1325# 1326addhandler sstate_eventhandler_stalesstate 1327sstate_eventhandler_stalesstate[eventmask] = "bb.event.StaleSetSceneTasks" 1328python sstate_eventhandler_stalesstate() { 1329 d = e.data 1330 tasks = e.tasks 1331 1332 bb.utils.mkdirhier(d.expand("${SSTATE_MANIFESTS}")) 1333 1334 for a in list(set(d.getVar("SSTATE_ARCHS").split())): 1335 toremove = [] 1336 i = d.expand("${SSTATE_MANIFESTS}/index-" + a) 1337 if not os.path.exists(i): 1338 continue 1339 with open(i, "r") as f: 1340 lines = f.readlines() 1341 for l in lines: 1342 try: 1343 (stamp, manifest, workdir) = l.split() 1344 for tid in tasks: 1345 for s in tasks[tid]: 1346 if s.startswith(stamp): 1347 taskname = bb.runqueue.taskname_from_tid(tid)[3:] 1348 manname = manifest + "." + taskname 1349 if os.path.exists(manname): 1350 bb.debug(2, "Sstate for %s is stale, removing related manifest %s" % (tid, manname)) 1351 toremove.append((manname, tid, tasks[tid])) 1352 break 1353 except ValueError: 1354 bb.fatal("Invalid line '%s' in sstate manifest '%s'" % (l, i)) 1355 1356 if toremove: 1357 msg = "Removing %d stale sstate objects for arch %s" % (len(toremove), a) 1358 bb.event.fire(bb.event.ProcessStarted(msg, len(toremove)), d) 1359 1360 removed = 0 1361 for (manname, tid, stamps) in toremove: 1362 sstate_clean_manifest(manname, d) 1363 for stamp in stamps: 1364 bb.utils.remove(stamp) 1365 removed = removed + 1 1366 bb.event.fire(bb.event.ProcessProgress(msg, removed), d) 1367 bb.event.check_for_interrupts(d) 1368 1369 bb.event.fire(bb.event.ProcessFinished(msg), d) 1370} 1371