1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7SSTATE_VERSION = "12"
8
9SSTATE_ZSTD_CLEVEL ??= "8"
10
11SSTATE_MANIFESTS ?= "${TMPDIR}/sstate-control"
12SSTATE_MANFILEPREFIX = "${SSTATE_MANIFESTS}/manifest-${SSTATE_MANMACH}-${PN}"
13
14def generate_sstatefn(spec, hash, taskname, siginfo, d):
15    if taskname is None:
16       return ""
17    extension = ".tar.zst"
18    # 8 chars reserved for siginfo
19    limit = 254 - 8
20    if siginfo:
21        limit = 254
22        extension = ".tar.zst.siginfo"
23    if not hash:
24        hash = "INVALID"
25    fn = spec + hash + "_" + taskname + extension
26    # If the filename is too long, attempt to reduce it
27    if len(fn) > limit:
28        components = spec.split(":")
29        # Fields 0,5,6 are mandatory, 1 is most useful, 2,3,4 are just for information
30        # 7 is for the separators
31        avail = (limit - len(hash + "_" + taskname + extension) - len(components[0]) - len(components[1]) - len(components[5]) - len(components[6]) - 7) // 3
32        components[2] = components[2][:avail]
33        components[3] = components[3][:avail]
34        components[4] = components[4][:avail]
35        spec = ":".join(components)
36        fn = spec + hash + "_" + taskname + extension
37        if len(fn) > limit:
38            bb.fatal("Unable to reduce sstate name to less than 255 chararacters")
39    return hash[:2] + "/" + hash[2:4] + "/" + fn
40
41SSTATE_PKGARCH    = "${PACKAGE_ARCH}"
42SSTATE_PKGSPEC    = "sstate:${PN}:${PACKAGE_ARCH}${TARGET_VENDOR}-${TARGET_OS}:${PV}:${PR}:${SSTATE_PKGARCH}:${SSTATE_VERSION}:"
43SSTATE_SWSPEC     = "sstate:${PN}::${PV}:${PR}::${SSTATE_VERSION}:"
44SSTATE_PKGNAME    = "${SSTATE_EXTRAPATH}${@generate_sstatefn(d.getVar('SSTATE_PKGSPEC'), d.getVar('BB_UNIHASH'), d.getVar('SSTATE_CURRTASK'), False, d)}"
45SSTATE_PKG        = "${SSTATE_DIR}/${SSTATE_PKGNAME}"
46SSTATE_EXTRAPATH   = ""
47SSTATE_EXTRAPATHWILDCARD = ""
48SSTATE_PATHSPEC   = "${SSTATE_DIR}/${SSTATE_EXTRAPATHWILDCARD}*/*/${SSTATE_PKGSPEC}*_${SSTATE_PATH_CURRTASK}.tar.zst*"
49
50# explicitly make PV to depend on evaluated value of PV variable
51PV[vardepvalue] = "${PV}"
52
53# We don't want the sstate to depend on things like the distro string
54# of the system, we let the sstate paths take care of this.
55SSTATE_EXTRAPATH[vardepvalue] = ""
56SSTATE_EXTRAPATHWILDCARD[vardepvalue] = ""
57
58# Avoid docbook/sgml catalog warnings for now
59SSTATE_ALLOW_OVERLAP_FILES += "${STAGING_ETCDIR_NATIVE}/sgml ${STAGING_DATADIR_NATIVE}/sgml"
60# sdk-provides-dummy-nativesdk and nativesdk-buildtools-perl-dummy overlap for different SDKMACHINE
61SSTATE_ALLOW_OVERLAP_FILES += "${DEPLOY_DIR_RPM}/sdk_provides_dummy_nativesdk/ ${DEPLOY_DIR_IPK}/sdk-provides-dummy-nativesdk/"
62SSTATE_ALLOW_OVERLAP_FILES += "${DEPLOY_DIR_RPM}/buildtools_dummy_nativesdk/ ${DEPLOY_DIR_IPK}/buildtools-dummy-nativesdk/"
63# target-sdk-provides-dummy overlaps that allarch is disabled when multilib is used
64SSTATE_ALLOW_OVERLAP_FILES += "${COMPONENTS_DIR}/sdk-provides-dummy-target/ ${DEPLOY_DIR_RPM}/sdk_provides_dummy_target/ ${DEPLOY_DIR_IPK}/sdk-provides-dummy-target/"
65# Archive the sources for many architectures in one deploy folder
66SSTATE_ALLOW_OVERLAP_FILES += "${DEPLOY_DIR_SRC}"
67# ovmf/grub-efi/systemd-boot/intel-microcode multilib recipes can generate identical overlapping files
68SSTATE_ALLOW_OVERLAP_FILES += "${DEPLOY_DIR_IMAGE}/ovmf"
69SSTATE_ALLOW_OVERLAP_FILES += "${DEPLOY_DIR_IMAGE}/grub-efi"
70SSTATE_ALLOW_OVERLAP_FILES += "${DEPLOY_DIR_IMAGE}/systemd-boot"
71SSTATE_ALLOW_OVERLAP_FILES += "${DEPLOY_DIR_IMAGE}/microcode"
72
73SSTATE_SCAN_FILES ?= "*.la *-config *_config postinst-*"
74SSTATE_SCAN_CMD ??= 'find ${SSTATE_BUILDDIR} \( -name "${@"\" -o -name \"".join(d.getVar("SSTATE_SCAN_FILES").split())}" \) -type f'
75SSTATE_SCAN_CMD_NATIVE ??= 'grep -Irl -e ${RECIPE_SYSROOT} -e ${RECIPE_SYSROOT_NATIVE} -e ${HOSTTOOLS_DIR} ${SSTATE_BUILDDIR}'
76SSTATE_HASHEQUIV_FILEMAP ?= " \
77    populate_sysroot:*/postinst-useradd-*:${TMPDIR} \
78    populate_sysroot:*/postinst-useradd-*:${COREBASE} \
79    populate_sysroot:*/postinst-useradd-*:regex-\s(PATH|PSEUDO_IGNORE_PATHS|HOME|LOGNAME|OMP_NUM_THREADS|USER)=.*\s \
80    populate_sysroot:*/crossscripts/*:${TMPDIR} \
81    populate_sysroot:*/crossscripts/*:${COREBASE} \
82    "
83
84BB_HASHFILENAME = "False ${SSTATE_PKGSPEC} ${SSTATE_SWSPEC}"
85
86SSTATE_ARCHS_TUNEPKG ??= "${TUNE_PKGARCH}"
87SSTATE_ARCHS = " \
88    ${BUILD_ARCH} \
89    ${BUILD_ARCH}_${ORIGNATIVELSBSTRING} \
90    ${BUILD_ARCH}_${SDK_ARCH}_${SDK_OS} \
91    ${SDK_ARCH}_${SDK_OS} \
92    ${SDK_ARCH}_${SDK_ARCH}-${SDKPKGSUFFIX} \
93    allarch \
94    ${SSTATE_ARCHS_TUNEPKG} \
95    ${PACKAGE_EXTRA_ARCHS} \
96    ${MACHINE_ARCH}"
97SSTATE_ARCHS[vardepsexclude] = "ORIGNATIVELSBSTRING"
98
99SSTATE_MANMACH ?= "${SSTATE_PKGARCH}"
100
101SSTATECREATEFUNCS += "sstate_hardcode_path"
102SSTATECREATEFUNCS[vardeps] = "SSTATE_SCAN_FILES"
103SSTATEPOSTCREATEFUNCS = ""
104SSTATEPREINSTFUNCS = ""
105SSTATEPOSTUNPACKFUNCS = "sstate_hardcode_path_unpack"
106SSTATEPOSTINSTFUNCS = ""
107EXTRA_STAGING_FIXMES ?= "HOSTTOOLS_DIR"
108
109# Check whether sstate exists for tasks that support sstate and are in the
110# locked signatures file.
111SIGGEN_LOCKEDSIGS_SSTATE_EXISTS_CHECK ?= 'error'
112
113# Check whether the task's computed hash matches the task's hash in the
114# locked signatures file.
115SIGGEN_LOCKEDSIGS_TASKSIG_CHECK ?= "error"
116
117# The GnuPG key ID and passphrase to use to sign sstate archives (or unset to
118# not sign)
119SSTATE_SIG_KEY ?= ""
120SSTATE_SIG_PASSPHRASE ?= ""
121# Whether to verify the GnUPG signatures when extracting sstate archives
122SSTATE_VERIFY_SIG ?= "0"
123# List of signatures to consider valid.
124SSTATE_VALID_SIGS ??= ""
125SSTATE_VALID_SIGS[vardepvalue] = ""
126
127SSTATE_HASHEQUIV_METHOD ?= "oe.sstatesig.OEOuthashBasic"
128SSTATE_HASHEQUIV_METHOD[doc] = "The fully-qualified function used to calculate \
129    the output hash for a task, which in turn is used to determine equivalency. \
130    "
131
132SSTATE_HASHEQUIV_REPORT_TASKDATA ?= "0"
133SSTATE_HASHEQUIV_REPORT_TASKDATA[doc] = "Report additional useful data to the \
134    hash equivalency server, such as PN, PV, taskname, etc. This information \
135    is very useful for developers looking at task data, but may leak sensitive \
136    data if the equivalence server is public. \
137    "
138
139python () {
140    if bb.data.inherits_class('native', d):
141        d.setVar('SSTATE_PKGARCH', d.getVar('BUILD_ARCH', False))
142    elif bb.data.inherits_class('crosssdk', d):
143        d.setVar('SSTATE_PKGARCH', d.expand("${BUILD_ARCH}_${SDK_ARCH}_${SDK_OS}"))
144    elif bb.data.inherits_class('cross', d):
145        d.setVar('SSTATE_PKGARCH', d.expand("${BUILD_ARCH}"))
146    elif bb.data.inherits_class('nativesdk', d):
147        d.setVar('SSTATE_PKGARCH', d.expand("${SDK_ARCH}_${SDK_OS}"))
148    elif bb.data.inherits_class('cross-canadian', d):
149        d.setVar('SSTATE_PKGARCH', d.expand("${SDK_ARCH}_${PACKAGE_ARCH}"))
150    elif bb.data.inherits_class('allarch', d) and d.getVar("PACKAGE_ARCH") == "all":
151        d.setVar('SSTATE_PKGARCH', "allarch")
152    else:
153        d.setVar('SSTATE_MANMACH', d.expand("${PACKAGE_ARCH}"))
154
155    if bb.data.inherits_class('native', d) or bb.data.inherits_class('crosssdk', d) or bb.data.inherits_class('cross', d):
156        d.setVar('SSTATE_EXTRAPATH', "${NATIVELSBSTRING}/")
157        d.setVar('BB_HASHFILENAME', "True ${SSTATE_PKGSPEC} ${SSTATE_SWSPEC}")
158        d.setVar('SSTATE_EXTRAPATHWILDCARD', "${NATIVELSBSTRING}/")
159
160    unique_tasks = sorted(set((d.getVar('SSTATETASKS') or "").split()))
161    d.setVar('SSTATETASKS', " ".join(unique_tasks))
162    for task in unique_tasks:
163        d.prependVarFlag(task, 'prefuncs', "sstate_task_prefunc ")
164        d.appendVarFlag(task, 'postfuncs', " sstate_task_postfunc")
165        d.setVarFlag(task, 'network', '1')
166        d.setVarFlag(task + "_setscene", 'network', '1')
167}
168
169def sstate_init(task, d):
170    ss = {}
171    ss['task'] = task
172    ss['dirs'] = []
173    ss['plaindirs'] = []
174    ss['lockfiles'] = []
175    ss['lockfiles-shared'] = []
176    return ss
177
178def sstate_state_fromvars(d, task = None):
179    if task is None:
180        task = d.getVar('BB_CURRENTTASK')
181        if not task:
182            bb.fatal("sstate code running without task context?!")
183        task = task.replace("_setscene", "")
184
185    if task.startswith("do_"):
186        task = task[3:]
187    inputs = (d.getVarFlag("do_" + task, 'sstate-inputdirs') or "").split()
188    outputs = (d.getVarFlag("do_" + task, 'sstate-outputdirs') or "").split()
189    plaindirs = (d.getVarFlag("do_" + task, 'sstate-plaindirs') or "").split()
190    lockfiles = (d.getVarFlag("do_" + task, 'sstate-lockfile') or "").split()
191    lockfilesshared = (d.getVarFlag("do_" + task, 'sstate-lockfile-shared') or "").split()
192    interceptfuncs = (d.getVarFlag("do_" + task, 'sstate-interceptfuncs') or "").split()
193    fixmedir = d.getVarFlag("do_" + task, 'sstate-fixmedir') or ""
194    if not task or len(inputs) != len(outputs):
195        bb.fatal("sstate variables not setup correctly?!")
196
197    if task == "populate_lic":
198        d.setVar("SSTATE_PKGSPEC", "${SSTATE_SWSPEC}")
199        d.setVar("SSTATE_EXTRAPATH", "")
200        d.setVar('SSTATE_EXTRAPATHWILDCARD', "")
201
202    ss = sstate_init(task, d)
203    for i in range(len(inputs)):
204        sstate_add(ss, inputs[i], outputs[i], d)
205    ss['lockfiles'] = lockfiles
206    ss['lockfiles-shared'] = lockfilesshared
207    ss['plaindirs'] = plaindirs
208    ss['interceptfuncs'] = interceptfuncs
209    ss['fixmedir'] = fixmedir
210    return ss
211
212def sstate_add(ss, source, dest, d):
213    if not source.endswith("/"):
214         source = source + "/"
215    if not dest.endswith("/"):
216         dest = dest + "/"
217    source = os.path.normpath(source)
218    dest = os.path.normpath(dest)
219    srcbase = os.path.basename(source)
220    ss['dirs'].append([srcbase, source, dest])
221    return ss
222
223def sstate_install(ss, d):
224    import oe.path
225    import oe.sstatesig
226    import subprocess
227
228    sharedfiles = []
229    shareddirs = []
230    bb.utils.mkdirhier(d.expand("${SSTATE_MANIFESTS}"))
231
232    sstateinst = d.expand("${WORKDIR}/sstate-install-%s/" % ss['task'])
233
234    manifest, d2 = oe.sstatesig.sstate_get_manifest_filename(ss['task'], d)
235
236    if os.access(manifest, os.R_OK):
237        bb.fatal("Package already staged (%s)?!" % manifest)
238
239    d.setVar("SSTATE_INST_POSTRM", manifest + ".postrm")
240
241    locks = []
242    for lock in ss['lockfiles-shared']:
243        locks.append(bb.utils.lockfile(lock, True))
244    for lock in ss['lockfiles']:
245        locks.append(bb.utils.lockfile(lock))
246
247    for state in ss['dirs']:
248        bb.debug(2, "Staging files from %s to %s" % (state[1], state[2]))
249        for walkroot, dirs, files in os.walk(state[1]):
250            for file in files:
251                srcpath = os.path.join(walkroot, file)
252                dstpath = srcpath.replace(state[1], state[2])
253                #bb.debug(2, "Staging %s to %s" % (srcpath, dstpath))
254                sharedfiles.append(dstpath)
255            for dir in dirs:
256                srcdir = os.path.join(walkroot, dir)
257                dstdir = srcdir.replace(state[1], state[2])
258                #bb.debug(2, "Staging %s to %s" % (srcdir, dstdir))
259                if os.path.islink(srcdir):
260                    sharedfiles.append(dstdir)
261                    continue
262                if not dstdir.endswith("/"):
263                    dstdir = dstdir + "/"
264                shareddirs.append(dstdir)
265
266    # Check the file list for conflicts against files which already exist
267    overlap_allowed = (d.getVar("SSTATE_ALLOW_OVERLAP_FILES") or "").split()
268    match = []
269    for f in sharedfiles:
270        if os.path.exists(f):
271            f = os.path.normpath(f)
272            realmatch = True
273            for w in overlap_allowed:
274                w = os.path.normpath(w)
275                if f.startswith(w):
276                    realmatch = False
277                    break
278            if realmatch:
279                match.append(f)
280                sstate_search_cmd = "grep -rlF '%s' %s --exclude=index-* | sed -e 's:^.*/::'" % (f, d.expand("${SSTATE_MANIFESTS}"))
281                search_output = subprocess.Popen(sstate_search_cmd, shell=True, stdout=subprocess.PIPE).communicate()[0]
282                if search_output:
283                    match.append("  (matched in %s)" % search_output.decode('utf-8').rstrip())
284                else:
285                    match.append("  (not matched to any task)")
286    if match:
287        bb.fatal("Recipe %s is trying to install files into a shared " \
288          "area when those files already exist. The files and the manifests listing " \
289          "them are:\n  %s\n"
290          "Please adjust the recipes so only one recipe provides a given file. " % \
291          (d.getVar('PN'), "\n  ".join(match)))
292
293    if ss['fixmedir'] and os.path.exists(ss['fixmedir'] + "/fixmepath.cmd"):
294        sharedfiles.append(ss['fixmedir'] + "/fixmepath.cmd")
295        sharedfiles.append(ss['fixmedir'] + "/fixmepath")
296
297    # Write out the manifest
298    f = open(manifest, "w")
299    for file in sharedfiles:
300        f.write(file + "\n")
301
302    # We want to ensure that directories appear at the end of the manifest
303    # so that when we test to see if they should be deleted any contents
304    # added by the task will have been removed first.
305    dirs = sorted(shareddirs, key=len)
306    # Must remove children first, which will have a longer path than the parent
307    for di in reversed(dirs):
308        f.write(di + "\n")
309    f.close()
310
311    # Append to the list of manifests for this PACKAGE_ARCH
312
313    i = d2.expand("${SSTATE_MANIFESTS}/index-${SSTATE_MANMACH}")
314    l = bb.utils.lockfile(i + ".lock")
315    filedata = d.getVar("STAMP") + " " + d2.getVar("SSTATE_MANFILEPREFIX") + " " + d.getVar("WORKDIR") + "\n"
316    manifests = []
317    if os.path.exists(i):
318        with open(i, "r") as f:
319            manifests = f.readlines()
320    # We append new entries, we don't remove older entries which may have the same
321    # manifest name but different versions from stamp/workdir. See below.
322    if filedata not in manifests:
323        with open(i, "a+") as f:
324            f.write(filedata)
325    bb.utils.unlockfile(l)
326
327    # Run the actual file install
328    for state in ss['dirs']:
329        if os.path.exists(state[1]):
330            oe.path.copyhardlinktree(state[1], state[2])
331
332    for postinst in (d.getVar('SSTATEPOSTINSTFUNCS') or '').split():
333        # All hooks should run in the SSTATE_INSTDIR
334        bb.build.exec_func(postinst, d, (sstateinst,))
335
336    for lock in locks:
337        bb.utils.unlockfile(lock)
338
339sstate_install[vardepsexclude] += "SSTATE_ALLOW_OVERLAP_FILES SSTATE_MANMACH SSTATE_MANFILEPREFIX"
340sstate_install[vardeps] += "${SSTATEPOSTINSTFUNCS}"
341
342def sstate_installpkg(ss, d):
343    from oe.gpg_sign import get_signer
344
345    sstateinst = d.expand("${WORKDIR}/sstate-install-%s/" % ss['task'])
346    d.setVar("SSTATE_CURRTASK", ss['task'])
347    sstatefetch = d.getVar('SSTATE_PKGNAME')
348    sstatepkg = d.getVar('SSTATE_PKG')
349    verify_sig = bb.utils.to_boolean(d.getVar("SSTATE_VERIFY_SIG"), False)
350
351    if not os.path.exists(sstatepkg) or (verify_sig and not os.path.exists(sstatepkg + '.sig')):
352        pstaging_fetch(sstatefetch, d)
353
354    if not os.path.isfile(sstatepkg):
355        bb.note("Sstate package %s does not exist" % sstatepkg)
356        return False
357
358    sstate_clean(ss, d)
359
360    d.setVar('SSTATE_INSTDIR', sstateinst)
361
362    if verify_sig:
363        if not os.path.isfile(sstatepkg + '.sig'):
364            bb.warn("No signature file for sstate package %s, skipping acceleration..." % sstatepkg)
365            return False
366        signer = get_signer(d, 'local')
367        if not signer.verify(sstatepkg + '.sig', d.getVar("SSTATE_VALID_SIGS")):
368            bb.warn("Cannot verify signature on sstate package %s, skipping acceleration..." % sstatepkg)
369            return False
370
371    # Empty sstateinst directory, ensure its clean
372    if os.path.exists(sstateinst):
373        oe.path.remove(sstateinst)
374    bb.utils.mkdirhier(sstateinst)
375
376    sstateinst = d.getVar("SSTATE_INSTDIR")
377    d.setVar('SSTATE_FIXMEDIR', ss['fixmedir'])
378
379    for f in (d.getVar('SSTATEPREINSTFUNCS') or '').split() + ['sstate_unpack_package']:
380        # All hooks should run in the SSTATE_INSTDIR
381        bb.build.exec_func(f, d, (sstateinst,))
382
383    return sstate_installpkgdir(ss, d)
384
385def sstate_installpkgdir(ss, d):
386    import oe.path
387    import subprocess
388
389    sstateinst = d.getVar("SSTATE_INSTDIR")
390    d.setVar('SSTATE_FIXMEDIR', ss['fixmedir'])
391
392    for f in (d.getVar('SSTATEPOSTUNPACKFUNCS') or '').split():
393        # All hooks should run in the SSTATE_INSTDIR
394        bb.build.exec_func(f, d, (sstateinst,))
395
396    def prepdir(dir):
397        # remove dir if it exists, ensure any parent directories do exist
398        if os.path.exists(dir):
399            oe.path.remove(dir)
400        bb.utils.mkdirhier(dir)
401        oe.path.remove(dir)
402
403    for state in ss['dirs']:
404        prepdir(state[1])
405        bb.utils.rename(sstateinst + state[0], state[1])
406    sstate_install(ss, d)
407
408    for plain in ss['plaindirs']:
409        workdir = d.getVar('WORKDIR')
410        sharedworkdir = os.path.join(d.getVar('TMPDIR'), "work-shared")
411        src = sstateinst + "/" + plain.replace(workdir, '')
412        if sharedworkdir in plain:
413            src = sstateinst + "/" + plain.replace(sharedworkdir, '')
414        dest = plain
415        bb.utils.mkdirhier(src)
416        prepdir(dest)
417        bb.utils.rename(src, dest)
418
419    return True
420
421python sstate_hardcode_path_unpack () {
422    # Fixup hardcoded paths
423    #
424    # Note: The logic below must match the reverse logic in
425    # sstate_hardcode_path(d)
426    import subprocess
427
428    sstateinst = d.getVar('SSTATE_INSTDIR')
429    sstatefixmedir = d.getVar('SSTATE_FIXMEDIR')
430    fixmefn = sstateinst + "fixmepath"
431    if os.path.isfile(fixmefn):
432        staging_target = d.getVar('RECIPE_SYSROOT')
433        staging_host = d.getVar('RECIPE_SYSROOT_NATIVE')
434
435        if bb.data.inherits_class('native', d) or bb.data.inherits_class('cross-canadian', d):
436            sstate_sed_cmd = "sed -i -e 's:FIXMESTAGINGDIRHOST:%s:g'" % (staging_host)
437        elif bb.data.inherits_class('cross', d) or bb.data.inherits_class('crosssdk', d):
438            sstate_sed_cmd = "sed -i -e 's:FIXMESTAGINGDIRTARGET:%s:g; s:FIXMESTAGINGDIRHOST:%s:g'" % (staging_target, staging_host)
439        else:
440            sstate_sed_cmd = "sed -i -e 's:FIXMESTAGINGDIRTARGET:%s:g'" % (staging_target)
441
442        extra_staging_fixmes = d.getVar('EXTRA_STAGING_FIXMES') or ''
443        for fixmevar in extra_staging_fixmes.split():
444            fixme_path = d.getVar(fixmevar)
445            sstate_sed_cmd += " -e 's:FIXME_%s:%s:g'" % (fixmevar, fixme_path)
446
447        # Add sstateinst to each filename in fixmepath, use xargs to efficiently call sed
448        sstate_hardcode_cmd = "sed -e 's:^:%s:g' %s | xargs %s" % (sstateinst, fixmefn, sstate_sed_cmd)
449
450        # Defer do_populate_sysroot relocation command
451        if sstatefixmedir:
452            bb.utils.mkdirhier(sstatefixmedir)
453            with open(sstatefixmedir + "/fixmepath.cmd", "w") as f:
454                sstate_hardcode_cmd = sstate_hardcode_cmd.replace(fixmefn, sstatefixmedir + "/fixmepath")
455                sstate_hardcode_cmd = sstate_hardcode_cmd.replace(sstateinst, "FIXMEFINALSSTATEINST")
456                sstate_hardcode_cmd = sstate_hardcode_cmd.replace(staging_host, "FIXMEFINALSSTATEHOST")
457                sstate_hardcode_cmd = sstate_hardcode_cmd.replace(staging_target, "FIXMEFINALSSTATETARGET")
458                f.write(sstate_hardcode_cmd)
459            bb.utils.copyfile(fixmefn, sstatefixmedir + "/fixmepath")
460            return
461
462        bb.note("Replacing fixme paths in sstate package: %s" % (sstate_hardcode_cmd))
463        subprocess.check_call(sstate_hardcode_cmd, shell=True)
464
465        # Need to remove this or we'd copy it into the target directory and may
466        # conflict with another writer
467        os.remove(fixmefn)
468}
469
470def sstate_clean_cachefile(ss, d):
471    import oe.path
472
473    if d.getVarFlag('do_%s' % ss['task'], 'task'):
474        d.setVar("SSTATE_PATH_CURRTASK", ss['task'])
475        sstatepkgfile = d.getVar('SSTATE_PATHSPEC')
476        bb.note("Removing %s" % sstatepkgfile)
477        oe.path.remove(sstatepkgfile)
478
479def sstate_clean_cachefiles(d):
480    for task in (d.getVar('SSTATETASKS') or "").split():
481        ld = d.createCopy()
482        ss = sstate_state_fromvars(ld, task)
483        sstate_clean_cachefile(ss, ld)
484
485def sstate_clean_manifest(manifest, d, canrace=False, prefix=None):
486    import oe.path
487
488    mfile = open(manifest)
489    entries = mfile.readlines()
490    mfile.close()
491
492    for entry in entries:
493        entry = entry.strip()
494        if prefix and not entry.startswith("/"):
495            entry = prefix + "/" + entry
496        bb.debug(2, "Removing manifest: %s" % entry)
497        # We can race against another package populating directories as we're removing them
498        # so we ignore errors here.
499        try:
500            if entry.endswith("/"):
501                if os.path.islink(entry[:-1]):
502                    os.remove(entry[:-1])
503                elif os.path.exists(entry) and len(os.listdir(entry)) == 0 and not canrace:
504                    # Removing directories whilst builds are in progress exposes a race. Only
505                    # do it in contexts where it is safe to do so.
506                    os.rmdir(entry[:-1])
507            else:
508                os.remove(entry)
509        except OSError:
510            pass
511
512    postrm = manifest + ".postrm"
513    if os.path.exists(manifest + ".postrm"):
514        import subprocess
515        os.chmod(postrm, 0o755)
516        subprocess.check_call(postrm, shell=True)
517        oe.path.remove(postrm)
518
519    oe.path.remove(manifest)
520
521def sstate_clean(ss, d):
522    import oe.path
523    import glob
524
525    d2 = d.createCopy()
526    stamp_clean = d.getVar("STAMPCLEAN")
527    extrainf = d.getVarFlag("do_" + ss['task'], 'stamp-extra-info')
528    if extrainf:
529        d2.setVar("SSTATE_MANMACH", extrainf)
530        wildcard_stfile = "%s.do_%s*.%s" % (stamp_clean, ss['task'], extrainf)
531    else:
532        wildcard_stfile = "%s.do_%s*" % (stamp_clean, ss['task'])
533
534    manifest = d2.expand("${SSTATE_MANFILEPREFIX}.%s" % ss['task'])
535
536    if os.path.exists(manifest):
537        locks = []
538        for lock in ss['lockfiles-shared']:
539            locks.append(bb.utils.lockfile(lock))
540        for lock in ss['lockfiles']:
541            locks.append(bb.utils.lockfile(lock))
542
543        sstate_clean_manifest(manifest, d, canrace=True)
544
545        for lock in locks:
546            bb.utils.unlockfile(lock)
547
548    # Remove the current and previous stamps, but keep the sigdata.
549    #
550    # The glob() matches do_task* which may match multiple tasks, for
551    # example: do_package and do_package_write_ipk, so we need to
552    # exactly match *.do_task.* and *.do_task_setscene.*
553    rm_stamp = '.do_%s.' % ss['task']
554    rm_setscene = '.do_%s_setscene.' % ss['task']
555    # For BB_SIGNATURE_HANDLER = "noop"
556    rm_nohash = ".do_%s" % ss['task']
557    for stfile in glob.glob(wildcard_stfile):
558        # Keep the sigdata
559        if ".sigdata." in stfile or ".sigbasedata." in stfile:
560            continue
561        # Preserve taint files in the stamps directory
562        if stfile.endswith('.taint'):
563            continue
564        if rm_stamp in stfile or rm_setscene in stfile or \
565                stfile.endswith(rm_nohash):
566            oe.path.remove(stfile)
567
568sstate_clean[vardepsexclude] = "SSTATE_MANFILEPREFIX"
569
570CLEANFUNCS += "sstate_cleanall"
571
572python sstate_cleanall() {
573    bb.note("Removing shared state for package %s" % d.getVar('PN'))
574
575    manifest_dir = d.getVar('SSTATE_MANIFESTS')
576    if not os.path.exists(manifest_dir):
577        return
578
579    tasks = d.getVar('SSTATETASKS').split()
580    for name in tasks:
581        ld = d.createCopy()
582        shared_state = sstate_state_fromvars(ld, name)
583        sstate_clean(shared_state, ld)
584}
585
586python sstate_hardcode_path () {
587    import subprocess, platform
588
589    # Need to remove hardcoded paths and fix these when we install the
590    # staging packages.
591    #
592    # Note: the logic in this function needs to match the reverse logic
593    # in sstate_installpkg(ss, d)
594
595    staging_target = d.getVar('RECIPE_SYSROOT')
596    staging_host = d.getVar('RECIPE_SYSROOT_NATIVE')
597    sstate_builddir = d.getVar('SSTATE_BUILDDIR')
598
599    sstate_sed_cmd = "sed -i -e 's:%s:FIXMESTAGINGDIRHOST:g'" % staging_host
600    if bb.data.inherits_class('native', d) or bb.data.inherits_class('cross-canadian', d):
601        sstate_grep_cmd = "grep -l -e '%s'" % (staging_host)
602    elif bb.data.inherits_class('cross', d) or bb.data.inherits_class('crosssdk', d):
603        sstate_grep_cmd = "grep -l -e '%s' -e '%s'" % (staging_target, staging_host)
604        sstate_sed_cmd += " -e 's:%s:FIXMESTAGINGDIRTARGET:g'" % staging_target
605    else:
606        sstate_grep_cmd = "grep -l -e '%s' -e '%s'" % (staging_target, staging_host)
607        sstate_sed_cmd += " -e 's:%s:FIXMESTAGINGDIRTARGET:g'" % staging_target
608
609    extra_staging_fixmes = d.getVar('EXTRA_STAGING_FIXMES') or ''
610    for fixmevar in extra_staging_fixmes.split():
611        fixme_path = d.getVar(fixmevar)
612        sstate_sed_cmd += " -e 's:%s:FIXME_%s:g'" % (fixme_path, fixmevar)
613        sstate_grep_cmd += " -e '%s'" % (fixme_path)
614
615    fixmefn =  sstate_builddir + "fixmepath"
616
617    sstate_scan_cmd = d.getVar('SSTATE_SCAN_CMD')
618    sstate_filelist_cmd = "tee %s" % (fixmefn)
619
620    # fixmepath file needs relative paths, drop sstate_builddir prefix
621    sstate_filelist_relative_cmd = "sed -i -e 's:^%s::g' %s" % (sstate_builddir, fixmefn)
622
623    xargs_no_empty_run_cmd = '--no-run-if-empty'
624    if platform.system() == 'Darwin':
625        xargs_no_empty_run_cmd = ''
626
627    # Limit the fixpaths and sed operations based on the initial grep search
628    # This has the side effect of making sure the vfs cache is hot
629    sstate_hardcode_cmd = "%s | xargs %s | %s | xargs %s %s" % (sstate_scan_cmd, sstate_grep_cmd, sstate_filelist_cmd, xargs_no_empty_run_cmd, sstate_sed_cmd)
630
631    bb.note("Removing hardcoded paths from sstate package: '%s'" % (sstate_hardcode_cmd))
632    subprocess.check_output(sstate_hardcode_cmd, shell=True, cwd=sstate_builddir)
633
634        # If the fixmefn is empty, remove it..
635    if os.stat(fixmefn).st_size == 0:
636        os.remove(fixmefn)
637    else:
638        bb.note("Replacing absolute paths in fixmepath file: '%s'" % (sstate_filelist_relative_cmd))
639        subprocess.check_output(sstate_filelist_relative_cmd, shell=True)
640}
641
642def sstate_package(ss, d):
643    import oe.path
644    import time
645
646    tmpdir = d.getVar('TMPDIR')
647
648    fixtime = False
649    if ss['task'] == "package":
650        fixtime = True
651
652    def fixtimestamp(root, path):
653        f = os.path.join(root, path)
654        if os.lstat(f).st_mtime > sde:
655            os.utime(f, (sde, sde), follow_symlinks=False)
656
657    sstatebuild = d.expand("${WORKDIR}/sstate-build-%s/" % ss['task'])
658    sde = int(d.getVar("SOURCE_DATE_EPOCH") or time.time())
659    d.setVar("SSTATE_CURRTASK", ss['task'])
660    bb.utils.remove(sstatebuild, recurse=True)
661    bb.utils.mkdirhier(sstatebuild)
662    for state in ss['dirs']:
663        if not os.path.exists(state[1]):
664            continue
665        srcbase = state[0].rstrip("/").rsplit('/', 1)[0]
666        # Find and error for absolute symlinks. We could attempt to relocate but its not
667        # clear where the symlink is relative to in this context. We could add that markup
668        # to sstate tasks but there aren't many of these so better just avoid them entirely.
669        for walkroot, dirs, files in os.walk(state[1]):
670            for file in files + dirs:
671                if fixtime:
672                    fixtimestamp(walkroot, file)
673                srcpath = os.path.join(walkroot, file)
674                if not os.path.islink(srcpath):
675                    continue
676                link = os.readlink(srcpath)
677                if not os.path.isabs(link):
678                    continue
679                if not link.startswith(tmpdir):
680                    continue
681                bb.error("sstate found an absolute path symlink %s pointing at %s. Please replace this with a relative link." % (srcpath, link))
682        bb.debug(2, "Preparing tree %s for packaging at %s" % (state[1], sstatebuild + state[0]))
683        bb.utils.rename(state[1], sstatebuild + state[0])
684
685    workdir = d.getVar('WORKDIR')
686    sharedworkdir = os.path.join(d.getVar('TMPDIR'), "work-shared")
687    for plain in ss['plaindirs']:
688        pdir = plain.replace(workdir, sstatebuild)
689        if sharedworkdir in plain:
690            pdir = plain.replace(sharedworkdir, sstatebuild)
691        bb.utils.mkdirhier(plain)
692        bb.utils.mkdirhier(pdir)
693        bb.utils.rename(plain, pdir)
694        if fixtime:
695            fixtimestamp(pdir, "")
696            for walkroot, dirs, files in os.walk(pdir):
697                for file in files + dirs:
698                    fixtimestamp(walkroot, file)
699
700    d.setVar('SSTATE_BUILDDIR', sstatebuild)
701    d.setVar('SSTATE_INSTDIR', sstatebuild)
702
703    if d.getVar('SSTATE_SKIP_CREATION') == '1':
704        return
705
706    sstate_create_package = ['sstate_report_unihash', 'sstate_create_and_sign_package']
707
708    for f in (d.getVar('SSTATECREATEFUNCS') or '').split() + \
709             sstate_create_package + \
710             (d.getVar('SSTATEPOSTCREATEFUNCS') or '').split():
711        # All hooks should run in SSTATE_BUILDDIR.
712        bb.build.exec_func(f, d, (sstatebuild,))
713
714    # SSTATE_PKG may have been changed by sstate_report_unihash
715    siginfo = d.getVar('SSTATE_PKG') + ".siginfo"
716    if not os.path.exists(siginfo):
717        bb.siggen.dump_this_task(siginfo, d)
718    else:
719        try:
720            os.utime(siginfo, None)
721        except PermissionError:
722            pass
723        except OSError as e:
724            # Handle read-only file systems gracefully
725            import errno
726            if e.errno != errno.EROFS:
727                raise e
728
729    return
730
731sstate_package[vardepsexclude] += "SSTATE_SIG_KEY"
732
733def pstaging_fetch(sstatefetch, d):
734    import bb.fetch2
735
736    # Only try and fetch if the user has configured a mirror
737    mirrors = d.getVar('SSTATE_MIRRORS')
738    if not mirrors:
739        return
740
741    # Copy the data object and override DL_DIR and SRC_URI
742    localdata = bb.data.createCopy(d)
743
744    dldir = localdata.expand("${SSTATE_DIR}")
745    bb.utils.mkdirhier(dldir)
746
747    localdata.delVar('MIRRORS')
748    localdata.setVar('FILESPATH', dldir)
749    localdata.setVar('DL_DIR', dldir)
750    localdata.setVar('PREMIRRORS', mirrors)
751
752    # if BB_NO_NETWORK is set but we also have SSTATE_MIRROR_ALLOW_NETWORK,
753    # we'll want to allow network access for the current set of fetches.
754    if bb.utils.to_boolean(localdata.getVar('BB_NO_NETWORK')) and \
755            bb.utils.to_boolean(localdata.getVar('SSTATE_MIRROR_ALLOW_NETWORK')):
756        localdata.delVar('BB_NO_NETWORK')
757
758    # Try a fetch from the sstate mirror, if it fails just return and
759    # we will build the package
760    uris = ['file://{0};downloadfilename={0}'.format(sstatefetch),
761            'file://{0}.siginfo;downloadfilename={0}.siginfo'.format(sstatefetch)]
762    if bb.utils.to_boolean(d.getVar("SSTATE_VERIFY_SIG"), False):
763        uris += ['file://{0}.sig;downloadfilename={0}.sig'.format(sstatefetch)]
764
765    for srcuri in uris:
766        localdata.delVar('SRC_URI')
767        localdata.setVar('SRC_URI', srcuri)
768        try:
769            fetcher = bb.fetch2.Fetch([srcuri], localdata, cache=False)
770            fetcher.checkstatus()
771            fetcher.download()
772
773        except bb.fetch2.BBFetchException:
774            pass
775
776def sstate_setscene(d):
777    shared_state = sstate_state_fromvars(d)
778    accelerate = sstate_installpkg(shared_state, d)
779    if not accelerate:
780        msg = "No sstate archive obtainable, will run full task instead."
781        bb.warn(msg)
782        raise bb.BBHandledException(msg)
783
784python sstate_task_prefunc () {
785    shared_state = sstate_state_fromvars(d)
786    sstate_clean(shared_state, d)
787}
788sstate_task_prefunc[dirs] = "${WORKDIR}"
789
790python sstate_task_postfunc () {
791    shared_state = sstate_state_fromvars(d)
792
793    for intercept in shared_state['interceptfuncs']:
794        bb.build.exec_func(intercept, d, (d.getVar("WORKDIR"),))
795
796    omask = os.umask(0o002)
797    if omask != 0o002:
798       bb.note("Using umask 0o002 (not %0o) for sstate packaging" % omask)
799    sstate_package(shared_state, d)
800    os.umask(omask)
801
802    sstateinst = d.getVar("SSTATE_INSTDIR")
803    d.setVar('SSTATE_FIXMEDIR', shared_state['fixmedir'])
804
805    sstate_installpkgdir(shared_state, d)
806
807    bb.utils.remove(d.getVar("SSTATE_BUILDDIR"), recurse=True)
808}
809sstate_task_postfunc[dirs] = "${WORKDIR}"
810
811# Create a sstate package
812# If enabled, sign the package.
813# Package and signature are created in a sub-directory
814# and renamed in place once created.
815python sstate_create_and_sign_package () {
816    from pathlib import Path
817
818    # Best effort touch
819    def touch(file):
820        try:
821            file.touch()
822        except:
823            pass
824
825    def update_file(src, dst, force=False):
826        if dst.is_symlink() and not dst.exists():
827            force=True
828        try:
829            # This relies on that src is a temporary file that can be renamed
830            # or left as is.
831            if force:
832                src.rename(dst)
833            else:
834                os.link(src, dst)
835            return True
836        except:
837            pass
838
839        if dst.exists():
840            touch(dst)
841
842        return False
843
844    sign_pkg = (
845        bb.utils.to_boolean(d.getVar("SSTATE_VERIFY_SIG")) and
846        bool(d.getVar("SSTATE_SIG_KEY"))
847    )
848
849    sstate_pkg = Path(d.getVar("SSTATE_PKG"))
850    sstate_pkg_sig = Path(str(sstate_pkg) + ".sig")
851    if sign_pkg:
852        if sstate_pkg.exists() and sstate_pkg_sig.exists():
853            touch(sstate_pkg)
854            touch(sstate_pkg_sig)
855            return
856    else:
857        if sstate_pkg.exists():
858            touch(sstate_pkg)
859            return
860
861    # Create the required sstate directory if it is not present.
862    if not sstate_pkg.parent.is_dir():
863        with bb.utils.umask(0o002):
864            bb.utils.mkdirhier(str(sstate_pkg.parent))
865
866    if sign_pkg:
867        from tempfile import TemporaryDirectory
868        with TemporaryDirectory(dir=sstate_pkg.parent) as tmp_dir:
869            tmp_pkg = Path(tmp_dir) / sstate_pkg.name
870            d.setVar("TMP_SSTATE_PKG", str(tmp_pkg))
871            bb.build.exec_func('sstate_archive_package', d)
872
873            from oe.gpg_sign import get_signer
874            signer = get_signer(d, 'local')
875            signer.detach_sign(str(tmp_pkg), d.getVar('SSTATE_SIG_KEY'), None,
876                                d.getVar('SSTATE_SIG_PASSPHRASE'), armor=False)
877
878            tmp_pkg_sig = Path(tmp_dir) / sstate_pkg_sig.name
879            if not update_file(tmp_pkg_sig, sstate_pkg_sig):
880                # If the created signature file could not be copied into place,
881                # then we should not use the sstate package either.
882                return
883
884            # If the .sig file was updated, then the sstate package must also
885            # be updated.
886            update_file(tmp_pkg, sstate_pkg, force=True)
887    else:
888        from tempfile import NamedTemporaryFile
889        with NamedTemporaryFile(prefix=sstate_pkg.name, dir=sstate_pkg.parent) as tmp_pkg_fd:
890            tmp_pkg = tmp_pkg_fd.name
891            d.setVar("TMP_SSTATE_PKG", str(tmp_pkg))
892            bb.build.exec_func('sstate_archive_package',d)
893            update_file(tmp_pkg, sstate_pkg)
894            # update_file() may have renamed tmp_pkg, which must exist when the
895            # NamedTemporaryFile() context handler ends.
896            touch(Path(tmp_pkg))
897
898}
899
900# Shell function to generate a sstate package from a directory
901# set as SSTATE_BUILDDIR. Will be run from within SSTATE_BUILDDIR.
902# The calling function handles moving the sstate package into the final
903# destination.
904sstate_archive_package () {
905	OPT="-cS"
906	ZSTD="zstd -${SSTATE_ZSTD_CLEVEL} -T${ZSTD_THREADS}"
907	# Use pzstd if available
908	if [ -x "$(command -v pzstd)" ]; then
909		ZSTD="pzstd -${SSTATE_ZSTD_CLEVEL} -p ${ZSTD_THREADS}"
910	fi
911
912	# Need to handle empty directories
913	if [ "$(ls -A)" ]; then
914		set +e
915		tar -I "$ZSTD" $OPT -f ${TMP_SSTATE_PKG} *
916		ret=$?
917		if [ $ret -ne 0 ] && [ $ret -ne 1 ]; then
918			exit 1
919		fi
920		set -e
921	else
922		tar -I "$ZSTD" $OPT --file=${TMP_SSTATE_PKG} --files-from=/dev/null
923	fi
924	chmod 0664 ${TMP_SSTATE_PKG}
925}
926
927
928python sstate_report_unihash() {
929    report_unihash = getattr(bb.parse.siggen, 'report_unihash', None)
930
931    if report_unihash:
932        ss = sstate_state_fromvars(d)
933        report_unihash(os.getcwd(), ss['task'], d)
934}
935
936#
937# Shell function to decompress and prepare a package for installation
938# Will be run from within SSTATE_INSTDIR.
939#
940sstate_unpack_package () {
941	ZSTD="zstd -T${ZSTD_THREADS}"
942	# Use pzstd if available
943	if [ -x "$(command -v pzstd)" ]; then
944		ZSTD="pzstd -p ${ZSTD_THREADS}"
945	fi
946
947	tar -I "$ZSTD" -xvpf ${SSTATE_PKG}
948	# update .siginfo atime on local/NFS mirror if it is a symbolic link
949	[ ! -h ${SSTATE_PKG}.siginfo ] || [ ! -e ${SSTATE_PKG}.siginfo ] || touch -a ${SSTATE_PKG}.siginfo 2>/dev/null || true
950	# update each symbolic link instead of any referenced file
951	touch --no-dereference ${SSTATE_PKG} 2>/dev/null || true
952	[ ! -e ${SSTATE_PKG}.sig ] || touch --no-dereference ${SSTATE_PKG}.sig 2>/dev/null || true
953	[ ! -e ${SSTATE_PKG}.siginfo ] || touch --no-dereference ${SSTATE_PKG}.siginfo 2>/dev/null || true
954}
955
956BB_HASHCHECK_FUNCTION = "sstate_checkhashes"
957
958def sstate_checkhashes(sq_data, d, siginfo=False, currentcount=0, summary=True, **kwargs):
959    import itertools
960
961    found = set()
962    missed = set()
963
964    def gethash(task):
965        return sq_data['unihash'][task]
966
967    def getpathcomponents(task, d):
968        # Magic data from BB_HASHFILENAME
969        splithashfn = sq_data['hashfn'][task].split(" ")
970        spec = splithashfn[1]
971        if splithashfn[0] == "True":
972            extrapath = d.getVar("NATIVELSBSTRING") + "/"
973        else:
974            extrapath = ""
975
976        tname = bb.runqueue.taskname_from_tid(task)[3:]
977
978        if tname in ["fetch", "unpack", "patch", "populate_lic", "preconfigure"] and splithashfn[2]:
979            spec = splithashfn[2]
980            extrapath = ""
981
982        return spec, extrapath, tname
983
984    def getsstatefile(tid, siginfo, d):
985        spec, extrapath, tname = getpathcomponents(tid, d)
986        return extrapath + generate_sstatefn(spec, gethash(tid), tname, siginfo, d)
987
988    for tid in sq_data['hash']:
989
990        sstatefile = d.expand("${SSTATE_DIR}/" + getsstatefile(tid, siginfo, d))
991
992        if os.path.exists(sstatefile):
993            oe.utils.touch(sstatefile)
994            found.add(tid)
995            bb.debug(2, "SState: Found valid sstate file %s" % sstatefile)
996        else:
997            missed.add(tid)
998            bb.debug(2, "SState: Looked for but didn't find file %s" % sstatefile)
999
1000    foundLocal = len(found)
1001    mirrors = d.getVar("SSTATE_MIRRORS")
1002    if mirrors:
1003        # Copy the data object and override DL_DIR and SRC_URI
1004        localdata = bb.data.createCopy(d)
1005
1006        dldir = localdata.expand("${SSTATE_DIR}")
1007        localdata.delVar('MIRRORS')
1008        localdata.setVar('FILESPATH', dldir)
1009        localdata.setVar('DL_DIR', dldir)
1010        localdata.setVar('PREMIRRORS', mirrors)
1011
1012        bb.debug(2, "SState using premirror of: %s" % mirrors)
1013
1014        # if BB_NO_NETWORK is set but we also have SSTATE_MIRROR_ALLOW_NETWORK,
1015        # we'll want to allow network access for the current set of fetches.
1016        if bb.utils.to_boolean(localdata.getVar('BB_NO_NETWORK')) and \
1017                bb.utils.to_boolean(localdata.getVar('SSTATE_MIRROR_ALLOW_NETWORK')):
1018            localdata.delVar('BB_NO_NETWORK')
1019
1020        from bb.fetch2 import FetchConnectionCache
1021        def checkstatus_init():
1022            while not connection_cache_pool.full():
1023                connection_cache_pool.put(FetchConnectionCache())
1024
1025        def checkstatus_end():
1026            while not connection_cache_pool.empty():
1027                connection_cache = connection_cache_pool.get()
1028                connection_cache.close_connections()
1029
1030        def checkstatus(arg):
1031            (tid, sstatefile) = arg
1032
1033            connection_cache = connection_cache_pool.get()
1034            localdata2 = bb.data.createCopy(localdata)
1035            srcuri = "file://" + sstatefile
1036            localdata2.setVar('SRC_URI', srcuri)
1037            bb.debug(2, "SState: Attempting to fetch %s" % srcuri)
1038
1039            import traceback
1040
1041            try:
1042                fetcher = bb.fetch2.Fetch(srcuri.split(), localdata2,
1043                            connection_cache=connection_cache)
1044                fetcher.checkstatus()
1045                bb.debug(2, "SState: Successful fetch test for %s" % srcuri)
1046                found.add(tid)
1047                missed.remove(tid)
1048            except bb.fetch2.FetchError as e:
1049                bb.debug(2, "SState: Unsuccessful fetch test for %s (%s)\n%s" % (srcuri, repr(e), traceback.format_exc()))
1050            except Exception as e:
1051                bb.error("SState: cannot test %s: %s\n%s" % (srcuri, repr(e), traceback.format_exc()))
1052
1053            connection_cache_pool.put(connection_cache)
1054
1055            if progress:
1056                bb.event.fire(bb.event.ProcessProgress(msg, next(cnt_tasks_done)), d)
1057            bb.event.check_for_interrupts(d)
1058
1059        tasklist = []
1060        for tid in missed:
1061            sstatefile = d.expand(getsstatefile(tid, siginfo, d))
1062            tasklist.append((tid, sstatefile))
1063
1064        if tasklist:
1065            nproc = min(int(d.getVar("BB_NUMBER_THREADS")), len(tasklist))
1066
1067            ## thread-safe counter
1068            cnt_tasks_done = itertools.count(start = 1)
1069            progress = len(tasklist) >= 100
1070            if progress:
1071                msg = "Checking sstate mirror object availability"
1072                bb.event.fire(bb.event.ProcessStarted(msg, len(tasklist)), d)
1073
1074            # Have to setup the fetcher environment here rather than in each thread as it would race
1075            fetcherenv = bb.fetch2.get_fetcher_environment(d)
1076            with bb.utils.environment(**fetcherenv):
1077                bb.event.enable_threadlock()
1078                import concurrent.futures
1079                from queue import Queue
1080                connection_cache_pool = Queue(nproc)
1081                checkstatus_init()
1082                with concurrent.futures.ThreadPoolExecutor(max_workers=nproc) as executor:
1083                    executor.map(checkstatus, tasklist.copy())
1084                checkstatus_end()
1085                bb.event.disable_threadlock()
1086
1087            if progress:
1088                bb.event.fire(bb.event.ProcessFinished(msg), d)
1089
1090    inheritlist = d.getVar("INHERIT")
1091    if "toaster" in inheritlist:
1092        evdata = {'missed': [], 'found': []};
1093        for tid in missed:
1094            sstatefile = d.expand(getsstatefile(tid, False, d))
1095            evdata['missed'].append((bb.runqueue.fn_from_tid(tid), bb.runqueue.taskname_from_tid(tid), gethash(tid), sstatefile ) )
1096        for tid in found:
1097            sstatefile = d.expand(getsstatefile(tid, False, d))
1098            evdata['found'].append((bb.runqueue.fn_from_tid(tid), bb.runqueue.taskname_from_tid(tid), gethash(tid), sstatefile ) )
1099        bb.event.fire(bb.event.MetadataEvent("MissedSstate", evdata), d)
1100
1101    if summary:
1102        # Print some summary statistics about the current task completion and how much sstate
1103        # reuse there was. Avoid divide by zero errors.
1104        total = len(sq_data['hash'])
1105        complete = 0
1106        if currentcount:
1107            complete = (len(found) + currentcount) / (total + currentcount) * 100
1108        match = 0
1109        if total:
1110            match = len(found) / total * 100
1111        bb.plain("Sstate summary: Wanted %d Local %d Mirrors %d Missed %d Current %d (%d%% match, %d%% complete)" %
1112            (total, foundLocal, len(found)-foundLocal, len(missed), currentcount, match, complete))
1113
1114    if hasattr(bb.parse.siggen, "checkhashes"):
1115        bb.parse.siggen.checkhashes(sq_data, missed, found, d)
1116
1117    return found
1118setscene_depvalid[vardepsexclude] = "SSTATE_EXCLUDEDEPS_SYSROOT"
1119
1120BB_SETSCENE_DEPVALID = "setscene_depvalid"
1121
1122def setscene_depvalid(task, taskdependees, notneeded, d, log=None):
1123    # taskdependees is a dict of tasks which depend on task, each being a 3 item list of [PN, TASKNAME, FILENAME]
1124    # task is included in taskdependees too
1125    # Return - False - We need this dependency
1126    #        - True - We can skip this dependency
1127    import re
1128
1129    def logit(msg, log):
1130        if log is not None:
1131            log.append(msg)
1132        else:
1133            bb.debug(2, msg)
1134
1135    logit("Considering setscene task: %s" % (str(taskdependees[task])), log)
1136
1137    directtasks = ["do_populate_lic", "do_deploy_source_date_epoch", "do_shared_workdir", "do_stash_locale", "do_gcc_stash_builddir", "do_create_spdx", "do_deploy_archives"]
1138
1139    def isNativeCross(x):
1140        return x.endswith("-native") or "-cross-" in x or "-crosssdk" in x or x.endswith("-cross")
1141
1142    # We only need to trigger deploy_source_date_epoch through direct dependencies
1143    if taskdependees[task][1] in directtasks:
1144        return True
1145
1146    # We only need to trigger packagedata through direct dependencies
1147    # but need to preserve packagedata on packagedata links
1148    if taskdependees[task][1] == "do_packagedata":
1149        for dep in taskdependees:
1150            if taskdependees[dep][1] == "do_packagedata":
1151                return False
1152        return True
1153
1154    for dep in taskdependees:
1155        logit("  considering dependency: %s" % (str(taskdependees[dep])), log)
1156        if task == dep:
1157            continue
1158        if dep in notneeded:
1159            continue
1160        # do_package_write_* and do_package doesn't need do_package
1161        if taskdependees[task][1] == "do_package" and taskdependees[dep][1] in ['do_package', 'do_package_write_deb', 'do_package_write_ipk', 'do_package_write_rpm', 'do_packagedata', 'do_package_qa']:
1162            continue
1163        # do_package_write_* need do_populate_sysroot as they're mainly postinstall dependencies
1164        if taskdependees[task][1] == "do_populate_sysroot" and taskdependees[dep][1] in ['do_package_write_deb', 'do_package_write_ipk', 'do_package_write_rpm']:
1165            return False
1166        # do_package/packagedata/package_qa/deploy don't need do_populate_sysroot
1167        if taskdependees[task][1] == "do_populate_sysroot" and taskdependees[dep][1] in ['do_package', 'do_packagedata', 'do_package_qa', 'do_deploy']:
1168            continue
1169        # Native/Cross packages don't exist and are noexec anyway
1170        if isNativeCross(taskdependees[dep][0]) and taskdependees[dep][1] in ['do_package_write_deb', 'do_package_write_ipk', 'do_package_write_rpm', 'do_packagedata', 'do_package', 'do_package_qa']:
1171            continue
1172
1173        # Consider sysroot depending on sysroot tasks
1174        if taskdependees[task][1] == 'do_populate_sysroot' and taskdependees[dep][1] == 'do_populate_sysroot':
1175            # Allow excluding certain recursive dependencies. If a recipe needs it should add a
1176            # specific dependency itself, rather than relying on one of its dependees to pull
1177            # them in.
1178            # See also http://lists.openembedded.org/pipermail/openembedded-core/2018-January/146324.html
1179            not_needed = False
1180            excludedeps = d.getVar('_SSTATE_EXCLUDEDEPS_SYSROOT')
1181            if excludedeps is None:
1182                # Cache the regular expressions for speed
1183                excludedeps = []
1184                for excl in (d.getVar('SSTATE_EXCLUDEDEPS_SYSROOT') or "").split():
1185                    excludedeps.append((re.compile(excl.split('->', 1)[0]), re.compile(excl.split('->', 1)[1])))
1186                d.setVar('_SSTATE_EXCLUDEDEPS_SYSROOT', excludedeps)
1187            for excl in excludedeps:
1188                if excl[0].match(taskdependees[dep][0]):
1189                    if excl[1].match(taskdependees[task][0]):
1190                        not_needed = True
1191                        break
1192            if not_needed:
1193                continue
1194            # For meta-extsdk-toolchain we want all sysroot dependencies
1195            if taskdependees[dep][0] == 'meta-extsdk-toolchain':
1196                return False
1197            # Native/Cross populate_sysroot need their dependencies
1198            if isNativeCross(taskdependees[task][0]) and isNativeCross(taskdependees[dep][0]):
1199                return False
1200            # Target populate_sysroot depended on by cross tools need to be installed
1201            if isNativeCross(taskdependees[dep][0]):
1202                return False
1203            # Native/cross tools depended upon by target sysroot are not needed
1204            # Add an exception for shadow-native as required by useradd.bbclass
1205            if isNativeCross(taskdependees[task][0]) and taskdependees[task][0] != 'shadow-native':
1206                continue
1207            # Target populate_sysroot need their dependencies
1208            return False
1209
1210        if taskdependees[dep][1] in directtasks:
1211            continue
1212
1213        # Safe fallthrough default
1214        logit(" Default setscene dependency fall through due to dependency: %s" % (str(taskdependees[dep])), log)
1215        return False
1216    return True
1217
1218addhandler sstate_eventhandler
1219sstate_eventhandler[eventmask] = "bb.build.TaskSucceeded"
1220python sstate_eventhandler() {
1221    d = e.data
1222    writtensstate = d.getVar('SSTATE_CURRTASK')
1223    if not writtensstate:
1224        taskname = d.getVar("BB_RUNTASK")[3:]
1225        spec = d.getVar('SSTATE_PKGSPEC')
1226        swspec = d.getVar('SSTATE_SWSPEC')
1227        if taskname in ["fetch", "unpack", "patch", "populate_lic", "preconfigure"] and swspec:
1228            d.setVar("SSTATE_PKGSPEC", "${SSTATE_SWSPEC}")
1229            d.setVar("SSTATE_EXTRAPATH", "")
1230        d.setVar("SSTATE_CURRTASK", taskname)
1231        siginfo = d.getVar('SSTATE_PKG') + ".siginfo"
1232        if not os.path.exists(siginfo):
1233            bb.siggen.dump_this_task(siginfo, d)
1234        else:
1235            oe.utils.touch(siginfo)
1236}
1237
1238SSTATE_PRUNE_OBSOLETEWORKDIR ?= "1"
1239
1240#
1241# Event handler which removes manifests and stamps file for recipes which are no
1242# longer 'reachable' in a build where they once were. 'Reachable' refers to
1243# whether a recipe is parsed so recipes in a layer which was removed would no
1244# longer be reachable. Switching between systemd and sysvinit where recipes
1245# became skipped would be another example.
1246#
1247# Also optionally removes the workdir of those tasks/recipes
1248#
1249addhandler sstate_eventhandler_reachablestamps
1250sstate_eventhandler_reachablestamps[eventmask] = "bb.event.ReachableStamps"
1251python sstate_eventhandler_reachablestamps() {
1252    import glob
1253    d = e.data
1254    stamps = e.stamps.values()
1255    removeworkdir = (d.getVar("SSTATE_PRUNE_OBSOLETEWORKDIR", False) == "1")
1256    preservestampfile = d.expand('${SSTATE_MANIFESTS}/preserve-stamps')
1257    preservestamps = []
1258    if os.path.exists(preservestampfile):
1259        with open(preservestampfile, 'r') as f:
1260            preservestamps = f.readlines()
1261    seen = []
1262
1263    # The machine index contains all the stamps this machine has ever seen in this build directory.
1264    # We should only remove things which this machine once accessed but no longer does.
1265    machineindex = set()
1266    bb.utils.mkdirhier(d.expand("${SSTATE_MANIFESTS}"))
1267    mi = d.expand("${SSTATE_MANIFESTS}/index-machine-${MACHINE}")
1268    if os.path.exists(mi):
1269        with open(mi, "r") as f:
1270            machineindex = set(line.strip() for line in f.readlines())
1271
1272    for a in sorted(list(set(d.getVar("SSTATE_ARCHS").split()))):
1273        toremove = []
1274        i = d.expand("${SSTATE_MANIFESTS}/index-" + a)
1275        if not os.path.exists(i):
1276            continue
1277        manseen = set()
1278        ignore = []
1279        with open(i, "r") as f:
1280            lines = f.readlines()
1281            for l in reversed(lines):
1282                try:
1283                    (stamp, manifest, workdir) = l.split()
1284                    # The index may have multiple entries for the same manifest as the code above only appends
1285                    # new entries and there may be an entry with matching manifest but differing version in stamp/workdir.
1286                    # The last entry in the list is the valid one, any earlier entries with matching manifests
1287                    # should be ignored.
1288                    if manifest in manseen:
1289                        ignore.append(l)
1290                        continue
1291                    manseen.add(manifest)
1292                    if stamp not in stamps and stamp not in preservestamps and stamp in machineindex:
1293                        toremove.append(l)
1294                        if stamp not in seen:
1295                            bb.debug(2, "Stamp %s is not reachable, removing related manifests" % stamp)
1296                            seen.append(stamp)
1297                except ValueError:
1298                    bb.fatal("Invalid line '%s' in sstate manifest '%s'" % (l, i))
1299
1300        if toremove:
1301            msg = "Removing %d recipes from the %s sysroot" % (len(toremove), a)
1302            bb.event.fire(bb.event.ProcessStarted(msg, len(toremove)), d)
1303
1304            removed = 0
1305            for r in toremove:
1306                (stamp, manifest, workdir) = r.split()
1307                for m in glob.glob(manifest + ".*"):
1308                    if m.endswith(".postrm"):
1309                        continue
1310                    sstate_clean_manifest(m, d)
1311                bb.utils.remove(stamp + "*")
1312                if removeworkdir:
1313                    bb.utils.remove(workdir, recurse = True)
1314                lines.remove(r)
1315                removed = removed + 1
1316                bb.event.fire(bb.event.ProcessProgress(msg, removed), d)
1317                bb.event.check_for_interrupts(d)
1318
1319            bb.event.fire(bb.event.ProcessFinished(msg), d)
1320
1321        with open(i, "w") as f:
1322            for l in lines:
1323                if l in ignore:
1324                    continue
1325                f.write(l)
1326    machineindex |= set(stamps)
1327    with open(mi, "w") as f:
1328        for l in machineindex:
1329            f.write(l + "\n")
1330
1331    if preservestamps:
1332        os.remove(preservestampfile)
1333}
1334
1335
1336#
1337# Bitbake can generate an event showing which setscene tasks are 'stale',
1338# i.e. which ones will be rerun. These are ones where a stamp file is present but
1339# it is stable (e.g. taskhash doesn't match). With that list we can go through
1340# the manifests for matching tasks and "uninstall" those manifests now. We do
1341# this now rather than mid build since the distribution of files between sstate
1342# objects may have changed, new tasks may run first and if those new tasks overlap
1343# with the stale tasks, we'd see overlapping files messages and failures. Thankfully
1344# removing these files is fast.
1345#
1346addhandler sstate_eventhandler_stalesstate
1347sstate_eventhandler_stalesstate[eventmask] = "bb.event.StaleSetSceneTasks"
1348python sstate_eventhandler_stalesstate() {
1349    d = e.data
1350    tasks = e.tasks
1351
1352    bb.utils.mkdirhier(d.expand("${SSTATE_MANIFESTS}"))
1353
1354    for a in list(set(d.getVar("SSTATE_ARCHS").split())):
1355        toremove = []
1356        i = d.expand("${SSTATE_MANIFESTS}/index-" + a)
1357        if not os.path.exists(i):
1358            continue
1359        with open(i, "r") as f:
1360            lines = f.readlines()
1361            for l in lines:
1362                try:
1363                    (stamp, manifest, workdir) = l.split()
1364                    for tid in tasks:
1365                        for s in tasks[tid]:
1366                            if s.startswith(stamp):
1367                                taskname = bb.runqueue.taskname_from_tid(tid)[3:]
1368                                manname = manifest + "." + taskname
1369                                if os.path.exists(manname):
1370                                    bb.debug(2, "Sstate for %s is stale, removing related manifest %s" % (tid, manname))
1371                                    toremove.append((manname, tid, tasks[tid]))
1372                                    break
1373                except ValueError:
1374                    bb.fatal("Invalid line '%s' in sstate manifest '%s'" % (l, i))
1375
1376        if toremove:
1377            msg = "Removing %d stale sstate objects for arch %s" % (len(toremove), a)
1378            bb.event.fire(bb.event.ProcessStarted(msg, len(toremove)), d)
1379
1380            removed = 0
1381            for (manname, tid, stamps) in toremove:
1382                sstate_clean_manifest(manname, d)
1383                for stamp in stamps:
1384                    bb.utils.remove(stamp)
1385                removed = removed + 1
1386                bb.event.fire(bb.event.ProcessProgress(msg, removed), d)
1387                bb.event.check_for_interrupts(d)
1388
1389            bb.event.fire(bb.event.ProcessFinished(msg), d)
1390}
1391