xref: /openbmc/openbmc/poky/bitbake/lib/bb/utils.py (revision c9537f57ab488bf5d90132917b0184e2527970a5)
1"""
2BitBake Utility Functions
3"""
4
5# Copyright (C) 2004 Michael Lauer
6#
7# SPDX-License-Identifier: GPL-2.0-only
8#
9
10import re, fcntl, os, string, stat, shutil, time
11import sys
12import errno
13import logging
14import locale
15import multiprocessing
16import importlib
17import importlib.machinery
18import importlib.util
19import itertools
20import subprocess
21import glob
22import fnmatch
23import traceback
24import signal
25import collections
26import copy
27import ctypes
28import random
29import socket
30import struct
31import tempfile
32from subprocess import getstatusoutput
33from contextlib import contextmanager
34from ctypes import cdll
35import bb
36import bb.msg
37
38logger = logging.getLogger("BitBake.Util")
39python_extensions = importlib.machinery.all_suffixes()
40
41
42def clean_context():
43    return {
44        "os": os,
45        "bb": bb,
46        "time": time,
47    }
48
49def get_context():
50    return _context
51
52
53def set_context(ctx):
54    _context = ctx
55
56# Context used in better_exec, eval
57_context = clean_context()
58
59class VersionStringException(Exception):
60    """Exception raised when an invalid version specification is found"""
61
62def explode_version(s):
63    r = []
64    alpha_regexp = re.compile(r'^([a-zA-Z]+)(.*)$')
65    numeric_regexp = re.compile(r'^(\d+)(.*)$')
66    while (s != ''):
67        if s[0] in string.digits:
68            m = numeric_regexp.match(s)
69            r.append((0, int(m.group(1))))
70            s = m.group(2)
71            continue
72        if s[0] in string.ascii_letters:
73            m = alpha_regexp.match(s)
74            r.append((1, m.group(1)))
75            s = m.group(2)
76            continue
77        if s[0] == '~':
78            r.append((-1, s[0]))
79        else:
80            r.append((2, s[0]))
81        s = s[1:]
82    return r
83
84def split_version(s):
85    """Split a version string into its constituent parts (PE, PV, PR).
86
87    Arguments:
88
89    -  ``s``: version string. The format of the input string should be::
90
91          ${PE}:${PV}-${PR}
92
93    Returns a tuple ``(pe, pv, pr)``.
94    """
95    s = s.strip(" <>=")
96    e = 0
97    if s.count(':'):
98        e = int(s.split(":")[0])
99        s = s.split(":")[1]
100    r = ""
101    if s.count('-'):
102        r = s.rsplit("-", 1)[1]
103        s = s.rsplit("-", 1)[0]
104    v = s
105    return (e, v, r)
106
107def vercmp_part(a, b):
108    va = explode_version(a)
109    vb = explode_version(b)
110    while True:
111        if va == []:
112            (oa, ca) = (0, None)
113        else:
114            (oa, ca) = va.pop(0)
115        if vb == []:
116            (ob, cb) = (0, None)
117        else:
118            (ob, cb) = vb.pop(0)
119        if (oa, ca) == (0, None) and (ob, cb) == (0, None):
120            return 0
121        if oa < ob:
122            return -1
123        elif oa > ob:
124            return 1
125        elif ca is None:
126            return -1
127        elif cb is None:
128            return 1
129        elif ca < cb:
130            return -1
131        elif ca > cb:
132            return 1
133
134def vercmp(ta, tb):
135    (ea, va, ra) = ta
136    (eb, vb, rb) = tb
137
138    r = int(ea or 0) - int(eb or 0)
139    if (r == 0):
140        r = vercmp_part(va, vb)
141    if (r == 0):
142        r = vercmp_part(ra, rb)
143    return r
144
145def vercmp_string(a, b):
146    """ Split version strings using ``bb.utils.split_version()`` and compare
147    them with ``bb.utils.vercmp().``
148
149    Arguments:
150
151    -  ``a``: left version string operand.
152    -  ``b``: right version string operand.
153
154    Returns what ``bb.utils.vercmp()`` returns."""
155    ta = split_version(a)
156    tb = split_version(b)
157    return vercmp(ta, tb)
158
159def vercmp_string_op(a, b, op):
160    """
161    Takes the return value ``bb.utils.vercmp()`` and returns the operation
162    defined by ``op`` between the return value and 0.
163
164    Arguments:
165
166    -  ``a``: left version string operand.
167    -  ``b``: right version string operand.
168    -  ``op``: operator string. Can be one of ``=``, ``==``, ``<=``, ``>=``,
169       ``>``, ``>>``, ``<``, ``<<`` or ``!=``.
170    """
171    res = vercmp_string(a, b)
172    if op in ('=', '=='):
173        return res == 0
174    elif op == '<=':
175        return res <= 0
176    elif op == '>=':
177        return res >= 0
178    elif op in ('>', '>>'):
179        return res > 0
180    elif op in ('<', '<<'):
181        return res < 0
182    elif op == '!=':
183        return res != 0
184    else:
185        raise VersionStringException('Unsupported comparison operator "%s"' % op)
186
187def explode_deps(s):
188    """
189    Takes an RDEPENDS style string of format::
190
191      DEPEND1 (optional version) DEPEND2 (optional version) ...
192
193    Arguments:
194
195    -  ``s``: input RDEPENDS style string
196
197    Returns a list of dependencies.
198
199    Version information is ignored.
200    """
201    r = []
202    l = s.split()
203    flag = False
204    for i in l:
205        if i[0] == '(':
206            flag = True
207            #j = []
208        if not flag:
209            r.append(i)
210        #else:
211        #    j.append(i)
212        if flag and i.endswith(')'):
213            flag = False
214            # Ignore version
215            #r[-1] += ' ' + ' '.join(j)
216    return r
217
218def explode_dep_versions2(s, *, sort=True):
219    """
220    Takes an RDEPENDS style string of format::
221
222       DEPEND1 (optional version) DEPEND2 (optional version) ...
223
224    Arguments:
225
226    -  ``s``: input RDEPENDS style string
227    -  ``*``: *Unused*.
228    -  ``sort``: whether to sort the output or not.
229
230    Returns a dictionary of dependencies and versions.
231    """
232    r = collections.OrderedDict()
233    l = s.replace(",", "").split()
234    lastdep = None
235    lastcmp = ""
236    lastver = ""
237    incmp = False
238    inversion = False
239    for i in l:
240        if i[0] == '(':
241            incmp = True
242            i = i[1:].strip()
243            if not i:
244                continue
245
246        if incmp:
247            incmp = False
248            inversion = True
249            # This list is based on behavior and supported comparisons from deb, opkg and rpm.
250            #
251            # Even though =<, <<, ==, !=, =>, and >> may not be supported,
252            # we list each possibly valid item.
253            # The build system is responsible for validation of what it supports.
254            if i.startswith(('<=', '=<', '<<', '==', '!=', '>=', '=>', '>>')):
255                lastcmp = i[0:2]
256                i = i[2:]
257            elif i.startswith(('<', '>', '=')):
258                lastcmp = i[0:1]
259                i = i[1:]
260            else:
261                # This is an unsupported case!
262                raise VersionStringException('Invalid version specification in "(%s" - invalid or missing operator' % i)
263                lastcmp = (i or "")
264                i = ""
265            i.strip()
266            if not i:
267                continue
268
269        if inversion:
270            if i.endswith(')'):
271                i = i[:-1] or ""
272                inversion = False
273                if lastver and i:
274                    lastver += " "
275            if i:
276                lastver += i
277                if lastdep not in r:
278                    r[lastdep] = []
279                r[lastdep].append(lastcmp + " " + lastver)
280            continue
281
282        #if not inversion:
283        lastdep = i
284        lastver = ""
285        lastcmp = ""
286        if not (i in r and r[i]):
287            r[lastdep] = []
288
289    if sort:
290        r = collections.OrderedDict(sorted(r.items(), key=lambda x: x[0]))
291    return r
292
293def explode_dep_versions(s):
294    """
295    Take an RDEPENDS style string of format::
296
297      DEPEND1 (optional version) DEPEND2 (optional version) ...
298
299    Skips null values and items appeared in dependency string multiple times.
300
301    Arguments:
302
303    -  ``s``: input RDEPENDS style string
304
305    Returns a dictionary of dependencies and versions.
306    """
307    r = explode_dep_versions2(s)
308    for d in r:
309        if not r[d]:
310            r[d] = None
311            continue
312        if len(r[d]) > 1:
313            bb.warn("explode_dep_versions(): Item %s appeared in dependency string '%s' multiple times with different values.  explode_dep_versions cannot cope with this." % (d, s))
314        r[d] = r[d][0]
315    return r
316
317def join_deps(deps, commasep=True):
318    """
319    Take a result from ``bb.utils.explode_dep_versions()`` and generate a
320    dependency string.
321
322    Arguments:
323
324    -  ``deps``: dictionary of dependencies and versions.
325    -  ``commasep``: makes the return value separated by commas if ``True``,
326       separated by spaces otherwise.
327
328    Returns a comma-separated (space-separated if ``comma-sep`` is ``False``)
329    string of dependencies and versions.
330    """
331    result = []
332    for dep in deps:
333        if deps[dep]:
334            if isinstance(deps[dep], list):
335                for v in deps[dep]:
336                    result.append(dep + " (" + v + ")")
337            else:
338                result.append(dep + " (" + deps[dep] + ")")
339        else:
340            result.append(dep)
341    if commasep:
342        return ", ".join(result)
343    else:
344        return " ".join(result)
345
346def _print_trace(body, line):
347    """
348    Print the Environment of a Text Body
349    """
350    error = []
351    # print the environment of the method
352    min_line = max(1, line-4)
353    max_line = min(line + 4, len(body))
354    for i in range(min_line, max_line + 1):
355        if line == i:
356            error.append(' *** %.4d:%s' % (i, body[i-1].rstrip()))
357        else:
358            error.append('     %.4d:%s' % (i, body[i-1].rstrip()))
359    return error
360
361def better_compile(text, file, realfile, mode = "exec", lineno = 0):
362    """
363    A better compile method. This method
364    will print the offending lines.
365    """
366    try:
367        cache = bb.methodpool.compile_cache(text)
368        if cache:
369            return cache
370        # We can't add to the linenumbers for compile, we can pad to the correct number of blank lines though
371        text2 = "\n" * int(lineno) + text
372        code = compile(text2, realfile, mode)
373        bb.methodpool.compile_cache_add(text, code)
374        return code
375    except Exception as e:
376        error = []
377        # split the text into lines again
378        body = text.split('\n')
379        error.append("Error in compiling python function in %s, line %s:\n" % (realfile, e.lineno))
380        if hasattr(e, "lineno"):
381            error.append("The code lines resulting in this error were:")
382            # e.lineno: line's position in reaflile
383            # lineno: function name's "position -1" in realfile
384            # e.lineno - lineno: line's relative position in function
385            error.extend(_print_trace(body, e.lineno - lineno))
386        else:
387            error.append("The function causing this error was:")
388            for line in body:
389                error.append(line)
390        error.append("%s: %s" % (e.__class__.__name__, str(e)))
391
392        logger.error("\n".join(error))
393
394        e = bb.BBHandledException(e)
395        raise e
396
397def _print_exception(t, value, tb, realfile, text, context):
398    error = []
399    try:
400        exception = traceback.format_exception_only(t, value)
401        error.append('Error executing a python function in %s:\n' % realfile)
402
403        # Strip 'us' from the stack (better_exec call) unless that was where the
404        # error came from
405        if tb.tb_next is not None:
406            tb = tb.tb_next
407
408        textarray = text.split('\n')
409
410        linefailed = tb.tb_lineno
411
412        tbextract = traceback.extract_tb(tb)
413        tbformat = traceback.format_list(tbextract)
414        error.append("The stack trace of python calls that resulted in this exception/failure was:")
415        error.append("File: '%s', lineno: %s, function: %s" % (tbextract[0][0], tbextract[0][1], tbextract[0][2]))
416        error.extend(_print_trace(textarray, linefailed))
417
418        # See if this is a function we constructed and has calls back into other functions in
419        # "text". If so, try and improve the context of the error by diving down the trace
420        level = 0
421        nexttb = tb.tb_next
422        while nexttb is not None and (level+1) < len(tbextract):
423            error.append("File: '%s', lineno: %s, function: %s" % (tbextract[level+1][0], tbextract[level+1][1], tbextract[level+1][2]))
424            if tbextract[level][0] == tbextract[level+1][0] and tbextract[level+1][2] == tbextract[level][0]:
425                # The code was possibly in the string we compiled ourselves
426                error.extend(_print_trace(textarray, tbextract[level+1][1]))
427            elif tbextract[level+1][0].startswith("/"):
428                # The code looks like it might be in a file, try and load it
429                try:
430                    with open(tbextract[level+1][0], "r") as f:
431                        text = f.readlines()
432                        error.extend(_print_trace(text, tbextract[level+1][1]))
433                except:
434                    error.append(tbformat[level+1])
435            else:
436                error.append(tbformat[level+1])
437            nexttb = tb.tb_next
438            level = level + 1
439
440        error.append("Exception: %s" % ''.join(exception))
441
442        # If the exception is from spawning a task, let's be helpful and display
443        # the output (which hopefully includes stderr).
444        if isinstance(value, subprocess.CalledProcessError) and value.output:
445            error.append("Subprocess output:")
446            error.append(value.output.decode("utf-8", errors="ignore"))
447    finally:
448        logger.error("\n".join(error))
449
450def better_exec(code, context, text = None, realfile = "<code>", pythonexception=False):
451    """
452    Similiar to better_compile, better_exec will
453    print the lines that are responsible for the
454    error.
455    """
456    import bb.parse
457    if not text:
458        text = code
459    if not hasattr(code, "co_filename"):
460        code = better_compile(code, realfile, realfile)
461    try:
462        exec(code, get_context(), context)
463    except (bb.BBHandledException, bb.parse.SkipRecipe, bb.data_smart.ExpansionError, bb.process.ExecutionError):
464        # Error already shown so passthrough, no need for traceback
465        raise
466    except Exception as e:
467        if pythonexception:
468            raise
469        (t, value, tb) = sys.exc_info()
470        try:
471            _print_exception(t, value, tb, realfile, text, context)
472        except Exception as e2:
473            logger.error("Exception handler error: %s" % str(e2))
474
475        e = bb.BBHandledException(e)
476        raise e
477
478def simple_exec(code, context):
479    exec(code, get_context(), context)
480
481def better_eval(source, locals, extraglobals = None):
482    ctx = get_context()
483    if extraglobals:
484        ctx = copy.copy(ctx)
485        for g in extraglobals:
486            ctx[g] = extraglobals[g]
487    return eval(source, ctx, locals)
488
489@contextmanager
490def fileslocked(files, *args, **kwargs):
491    """Context manager for locking and unlocking file locks. Uses
492    ``bb.utils.lockfile()`` and ``bb.utils.unlockfile()`` to lock and unlock
493    files.
494
495    No return value."""
496    locks = []
497    if files:
498        for lockfile in files:
499            l = bb.utils.lockfile(lockfile, *args, **kwargs)
500            if l is not None:
501                locks.append(l)
502
503    try:
504        yield
505    finally:
506        locks.reverse()
507        for lock in locks:
508            bb.utils.unlockfile(lock)
509
510def lockfile(name, shared=False, retry=True, block=False):
511    """
512    Use the specified file (with filename ``name``) as a lock file, return when
513    the lock has been acquired. Returns a variable to pass to unlockfile().
514
515    Arguments:
516
517    -  ``shared``: sets the lock as a shared lock instead of an
518       exclusive lock.
519    -  ``retry``: ``True`` to re-try locking if it fails, ``False``
520       otherwise.
521    -  ``block``: ``True`` to block until the lock succeeds,
522       ``False`` otherwise.
523
524    The retry and block parameters are kind of equivalent unless you
525    consider the possibility of sending a signal to the process to break
526    out - at which point you want block=True rather than retry=True.
527
528    Returns the locked file descriptor in case of success, ``None`` otherwise.
529    """
530    basename = os.path.basename(name)
531    if len(basename) > 255:
532        root, ext = os.path.splitext(basename)
533        basename = root[:255 - len(ext)] + ext
534
535    dirname = os.path.dirname(name)
536    mkdirhier(dirname)
537
538    name = os.path.join(dirname, basename)
539
540    if not os.access(dirname, os.W_OK):
541        logger.error("Unable to acquire lock '%s', directory is not writable",
542                     name)
543        sys.exit(1)
544
545    op = fcntl.LOCK_EX
546    if shared:
547        op = fcntl.LOCK_SH
548    if not retry and not block:
549        op = op | fcntl.LOCK_NB
550
551    while True:
552        # If we leave the lockfiles lying around there is no problem
553        # but we should clean up after ourselves. This gives potential
554        # for races though. To work around this, when we acquire the lock
555        # we check the file we locked was still the lock file on disk.
556        # by comparing inode numbers. If they don't match or the lockfile
557        # no longer exists, we start again.
558
559        # This implementation is unfair since the last person to request the
560        # lock is the most likely to win it.
561
562        try:
563            lf = open(name, 'a+')
564            fileno = lf.fileno()
565            fcntl.flock(fileno, op)
566            statinfo = os.fstat(fileno)
567            if os.path.exists(lf.name):
568                statinfo2 = os.stat(lf.name)
569                if statinfo.st_ino == statinfo2.st_ino:
570                    return lf
571            lf.close()
572        except OSError as e:
573            if e.errno == errno.EACCES or e.errno == errno.ENAMETOOLONG:
574                logger.error("Unable to acquire lock '%s', %s",
575                             e.strerror, name)
576                sys.exit(1)
577            try:
578                lf.close()
579            except Exception:
580                pass
581            pass
582        if not retry:
583            return None
584
585def unlockfile(lf):
586    """
587    Unlock a file locked using ``bb.utils.lockfile()``.
588
589    Arguments:
590
591    -  ``lf``: the locked file descriptor.
592
593    No return value.
594    """
595    try:
596        # If we had a shared lock, we need to promote to exclusive before
597        # removing the lockfile. Attempt this, ignore failures.
598        fcntl.flock(lf.fileno(), fcntl.LOCK_EX|fcntl.LOCK_NB)
599        os.unlink(lf.name)
600    except (IOError, OSError):
601        pass
602    fcntl.flock(lf.fileno(), fcntl.LOCK_UN)
603    lf.close()
604
605def _hasher(method, filename):
606    import mmap
607
608    with open(filename, "rb") as f:
609        try:
610            with mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_READ) as mm:
611                for chunk in iter(lambda: mm.read(8192), b''):
612                    method.update(chunk)
613        except ValueError:
614            # You can't mmap() an empty file so silence this exception
615            pass
616    return method.hexdigest()
617
618
619def md5_file(filename):
620    """
621    Arguments:
622
623    -  ``filename``: path to the input file.
624
625    Returns the hexadecimal string representation of the MD5 checksum of filename.
626    """
627    import hashlib
628    try:
629        sig = hashlib.new('MD5', usedforsecurity=False)
630    except TypeError:
631        # Some configurations don't appear to support two arguments
632        sig = hashlib.new('MD5')
633    return _hasher(sig, filename)
634
635def sha256_file(filename):
636    """
637    Returns the hexadecimal representation of the 256-bit SHA checksum of
638    filename.
639
640    Arguments:
641
642    -  ``filename``: path to the file.
643    """
644    import hashlib
645    return _hasher(hashlib.sha256(), filename)
646
647def sha1_file(filename):
648    """
649    Returns the hexadecimal representation of the SHA1 checksum of the filename
650
651    Arguments:
652
653    -  ``filename``: path to the file.
654    """
655    import hashlib
656    return _hasher(hashlib.sha1(), filename)
657
658def sha384_file(filename):
659    """
660    Returns the hexadecimal representation of the SHA384 checksum of the filename
661
662    Arguments:
663
664    -  ``filename``: path to the file.
665    """
666    import hashlib
667    return _hasher(hashlib.sha384(), filename)
668
669def sha512_file(filename):
670    """
671    Returns the hexadecimal representation of the SHA512 checksum of the filename
672
673    Arguments:
674
675    -  ``filename``: path to the file.
676    """
677    import hashlib
678    return _hasher(hashlib.sha512(), filename)
679
680def goh1_file(filename):
681    """
682    Returns the hexadecimal string representation of the Go mod h1 checksum of the
683    filename. The Go mod h1 checksum uses the Go dirhash package. The package
684    defines hashes over directory trees and is used by go mod for mod files and
685    zip archives.
686
687    Arguments:
688
689    -  ``filename``: path to the file.
690    """
691    import hashlib
692    import zipfile
693
694    lines = []
695    if zipfile.is_zipfile(filename):
696        with zipfile.ZipFile(filename) as archive:
697            for fn in sorted(archive.namelist()):
698                method = hashlib.sha256()
699                method.update(archive.read(fn))
700                hash = method.hexdigest()
701                lines.append("%s  %s\n" % (hash, fn))
702    else:
703        hash = _hasher(hashlib.sha256(), filename)
704        lines.append("%s  go.mod\n" % hash)
705    method = hashlib.sha256()
706    method.update("".join(lines).encode('utf-8'))
707    return method.hexdigest()
708
709def preserved_envvars_exported():
710    """Returns the list of variables which are taken from the environment and
711    placed in and exported from the metadata."""
712    return [
713        'BB_TASKHASH',
714        'HOME',
715        'LOGNAME',
716        'PATH',
717        'PWD',
718        'SHELL',
719        'USER',
720        'LC_ALL',
721        'BBSERVER',
722    ]
723
724def preserved_envvars():
725    """Returns the list of variables which are taken from the environment and
726    placed in the metadata."""
727    v = [
728        'BBPATH',
729        'BB_PRESERVE_ENV',
730        'BB_ENV_PASSTHROUGH_ADDITIONS',
731    ]
732    return v + preserved_envvars_exported()
733
734def check_system_locale():
735    """Make sure the required system locale are available and configured.
736
737    No return value."""
738    default_locale = locale.getlocale(locale.LC_CTYPE)
739
740    try:
741        locale.setlocale(locale.LC_CTYPE, ("en_US", "UTF-8"))
742    except:
743        sys.exit("Please make sure locale 'en_US.UTF-8' is available on your system")
744    else:
745        locale.setlocale(locale.LC_CTYPE, default_locale)
746
747    if sys.getfilesystemencoding() != "utf-8":
748        sys.exit("Please use a locale setting which supports UTF-8 (such as LANG=en_US.UTF-8).\n"
749                 "Python can't change the filesystem locale after loading so we need a UTF-8 when Python starts or things won't work.")
750
751def filter_environment(good_vars):
752    """
753    Create a pristine environment for bitbake. This will remove variables that
754    are not known and may influence the build in a negative way.
755
756    Arguments:
757
758    -  ``good_vars``: list of variable to exclude from the filtering.
759
760    No return value.
761    """
762
763    removed_vars = {}
764    for key in list(os.environ):
765        if key in good_vars:
766            continue
767
768        removed_vars[key] = os.environ[key]
769        del os.environ[key]
770
771    # If we spawn a python process, we need to have a UTF-8 locale, else python's file
772    # access methods will use ascii. You can't change that mode once the interpreter is
773    # started so we have to ensure a locale is set. Ideally we'd use C.UTF-8 but not all
774    # distros support that and we need to set something.
775    os.environ["LC_ALL"] = "en_US.UTF-8"
776
777    if removed_vars:
778        logger.debug("Removed the following variables from the environment: %s", ", ".join(removed_vars.keys()))
779
780    return removed_vars
781
782def approved_variables():
783    """
784    Determine and return the list of variables which are approved
785    to remain in the environment.
786    """
787    if 'BB_PRESERVE_ENV' in os.environ:
788        return os.environ.keys()
789    approved = []
790    if 'BB_ENV_PASSTHROUGH' in os.environ:
791        approved = os.environ['BB_ENV_PASSTHROUGH'].split()
792        approved.extend(['BB_ENV_PASSTHROUGH'])
793    else:
794        approved = preserved_envvars()
795    if 'BB_ENV_PASSTHROUGH_ADDITIONS' in os.environ:
796        approved.extend(os.environ['BB_ENV_PASSTHROUGH_ADDITIONS'].split())
797        if 'BB_ENV_PASSTHROUGH_ADDITIONS' not in approved:
798            approved.extend(['BB_ENV_PASSTHROUGH_ADDITIONS'])
799    return approved
800
801def clean_environment():
802    """
803    Clean up any spurious environment variables. This will remove any
804    variables the user hasn't chosen to preserve.
805
806    No return value.
807    """
808    if 'BB_PRESERVE_ENV' not in os.environ:
809        good_vars = approved_variables()
810        return filter_environment(good_vars)
811
812    return {}
813
814def empty_environment():
815    """
816    Remove all variables from the environment.
817
818    No return value.
819    """
820    for s in list(os.environ.keys()):
821        os.unsetenv(s)
822        del os.environ[s]
823
824def build_environment(d):
825    """
826    Build an environment from all exported variables.
827
828    Arguments:
829
830    -  ``d``: the data store.
831
832    No return value.
833    """
834    import bb.data
835    for var in bb.data.keys(d):
836        export = d.getVarFlag(var, "export", False)
837        if export:
838            os.environ[var] = d.getVar(var) or ""
839
840def _check_unsafe_delete_path(path):
841    """
842    Basic safeguard against recursively deleting something we shouldn't. If it returns True,
843    the caller should raise an exception with an appropriate message.
844    NOTE: This is NOT meant to be a security mechanism - just a guard against silly mistakes
845    with potentially disastrous results.
846    """
847    extra = ''
848    # HOME might not be /home/something, so in case we can get it, check against it
849    homedir = os.environ.get('HOME', '')
850    if homedir:
851        extra = '|%s' % homedir
852    if re.match('(/|//|/home|/home/[^/]*%s)$' % extra, os.path.abspath(path)):
853        return True
854    return False
855
856def remove(path, recurse=False, ionice=False):
857    """Equivalent to rm -f or rm -rf.
858
859    Arguments:
860
861    -  ``path``: path to file/directory to remove.
862    -  ``recurse``: deletes recursively if ``True``.
863    -  ``ionice``: prepends ``ionice -c 3`` to the ``rm`` command. See ``man
864       ionice``.
865
866    No return value.
867    """
868    if not path:
869        return
870    if recurse:
871        for name in glob.glob(path):
872            if _check_unsafe_delete_path(name):
873                raise Exception('bb.utils.remove: called with dangerous path "%s" and recurse=True, refusing to delete!' % name)
874        # shutil.rmtree(name) would be ideal but its too slow
875        cmd = []
876        if ionice:
877            cmd = ['ionice', '-c', '3']
878        subprocess.check_call(cmd + ['rm', '-rf'] + glob.glob(path))
879        return
880    for name in glob.glob(path):
881        try:
882            os.unlink(name)
883        except OSError as exc:
884            if exc.errno != errno.ENOENT:
885                raise
886
887def prunedir(topdir, ionice=False):
888    """
889    Delete everything reachable from the directory named in ``topdir``.
890
891    Arguments:
892
893    -  ``topdir``: directory path.
894    -  ``ionice``: prepends ``ionice -c 3`` to the ``rm`` command. See ``man
895       ionice``.
896
897    No return value.
898    """
899    # CAUTION:  This is dangerous!
900    if _check_unsafe_delete_path(topdir):
901        raise Exception('bb.utils.prunedir: called with dangerous path "%s", refusing to delete!' % topdir)
902    remove(topdir, recurse=True, ionice=ionice)
903
904#
905# Could also use return re.compile("(%s)" % "|".join(map(re.escape, suffixes))).sub(lambda mo: "", var)
906# but thats possibly insane and suffixes is probably going to be small
907#
908def prune_suffix(var, suffixes, d):
909    """
910    Check if ``var`` ends with any of the suffixes listed in ``suffixes`` and
911    remove it if found.
912
913    Arguments:
914
915    -  ``var``: string to check for suffixes.
916    -  ``suffixes``: list of strings representing suffixes to check for.
917
918    Returns the string ``var`` without the suffix.
919    """
920    for suffix in suffixes:
921        if suffix and var.endswith(suffix):
922            return var[:-len(suffix)]
923    return var
924
925def mkdirhier(directory):
926    """Create a directory like 'mkdir -p', but does not complain if
927    directory already exists like ``os.makedirs()``.
928
929    Arguments:
930
931    -  ``directory``: path to the directory.
932
933    No return value.
934    """
935    if '${' in str(directory):
936        bb.fatal("Directory name {} contains unexpanded bitbake variable. This may cause build failures and WORKDIR polution.".format(directory))
937    try:
938        os.makedirs(directory)
939    except OSError as e:
940        if e.errno != errno.EEXIST or not os.path.isdir(directory):
941            raise e
942
943def movefile(src, dest, newmtime = None, sstat = None):
944    """Moves a file from ``src`` to ``dest``, preserving all permissions and
945    attributes; mtime will be preserved even when moving across
946    filesystems.  Returns ``True`` on success and ``False`` on failure. Move is
947    atomic.
948
949    Arguments:
950
951    -  ``src`` -- Source file.
952    -  ``dest`` -- Destination file.
953    -  ``newmtime`` -- new mtime to be passed as float seconds since the epoch.
954    -  ``sstat`` -- os.stat_result to use for the destination file.
955
956    Returns an ``os.stat_result`` of the destination file if the
957    source file is a symbolic link or the ``sstat`` argument represents a
958    symbolic link - in which case the destination file will also be created as
959    a symbolic link.
960
961    Otherwise, returns ``newmtime`` on success and ``False`` on failure.
962    """
963
964    #print "movefile(" + src + "," + dest + "," + str(newmtime) + "," + str(sstat) + ")"
965    try:
966        if not sstat:
967            sstat = os.lstat(src)
968    except Exception as e:
969        logger.warning("movefile: Stating source file failed...", e)
970        return None
971
972    destexists = 1
973    try:
974        dstat = os.lstat(dest)
975    except:
976        dstat = os.lstat(os.path.dirname(dest))
977        destexists = 0
978
979    if destexists:
980        if stat.S_ISLNK(dstat[stat.ST_MODE]):
981            try:
982                os.unlink(dest)
983                destexists = 0
984            except Exception as e:
985                pass
986
987    if stat.S_ISLNK(sstat[stat.ST_MODE]):
988        try:
989            target = os.readlink(src)
990            if destexists and not stat.S_ISDIR(dstat[stat.ST_MODE]):
991                os.unlink(dest)
992            os.symlink(target, dest)
993            #os.lchown(dest,sstat[stat.ST_UID],sstat[stat.ST_GID])
994            os.unlink(src)
995            return os.lstat(dest)
996        except Exception as e:
997            logger.warning("movefile: failed to properly create symlink:", dest, "->", target, e)
998            return None
999
1000    renamefailed = 1
1001    # os.rename needs to know the dest path ending with file name
1002    # so append the file name to a path only if it's a dir specified
1003    srcfname = os.path.basename(src)
1004    destpath = os.path.join(dest, srcfname) if os.path.isdir(dest) \
1005                else dest
1006
1007    if sstat[stat.ST_DEV] == dstat[stat.ST_DEV]:
1008        try:
1009            bb.utils.rename(src, destpath)
1010            renamefailed = 0
1011        except Exception as e:
1012            if e.errno != errno.EXDEV:
1013                # Some random error.
1014                logger.warning("movefile: Failed to move", src, "to", dest, e)
1015                return None
1016            # Invalid cross-device-link 'bind' mounted or actually Cross-Device
1017
1018    if renamefailed:
1019        didcopy = 0
1020        if stat.S_ISREG(sstat[stat.ST_MODE]):
1021            try: # For safety copy then move it over.
1022                shutil.copyfile(src, destpath + "#new")
1023                bb.utils.rename(destpath + "#new", destpath)
1024                didcopy = 1
1025            except Exception as e:
1026                logger.warning('movefile: copy', src, '->', dest, 'failed.', e)
1027                return None
1028        else:
1029            #we don't yet handle special, so we need to fall back to /bin/mv
1030            a = getstatusoutput("/bin/mv -f " + "'" + src + "' '" + dest + "'")
1031            if a[0] != 0:
1032                logger.warning("movefile: Failed to move special file:" + src + "' to '" + dest + "'", a)
1033                return None # failure
1034        try:
1035            if didcopy:
1036                os.lchown(destpath, sstat[stat.ST_UID], sstat[stat.ST_GID])
1037                os.chmod(destpath, stat.S_IMODE(sstat[stat.ST_MODE])) # Sticky is reset on chown
1038                os.unlink(src)
1039        except Exception as e:
1040            logger.warning("movefile: Failed to chown/chmod/unlink", dest, e)
1041            return None
1042
1043    if newmtime:
1044        os.utime(destpath, (newmtime, newmtime))
1045    else:
1046        os.utime(destpath, (sstat[stat.ST_ATIME], sstat[stat.ST_MTIME]))
1047        newmtime = sstat[stat.ST_MTIME]
1048    return newmtime
1049
1050def copyfile(src, dest, newmtime = None, sstat = None):
1051    """
1052    Copies a file from ``src`` to ``dest``, preserving all permissions and
1053    attributes; mtime will be preserved even when moving across
1054    filesystems.
1055
1056    Arguments:
1057
1058    -  ``src``: Source file.
1059    -  ``dest``: Destination file.
1060    -  ``newmtime``: new mtime to be passed as float seconds since the epoch.
1061    -  ``sstat``: os.stat_result to use for the destination file.
1062
1063    Returns an ``os.stat_result`` of the destination file if the
1064    source file is a symbolic link or the ``sstat`` argument represents a
1065    symbolic link - in which case the destination file will also be created as
1066    a symbolic link.
1067
1068    Otherwise, returns ``newmtime`` on success and ``False`` on failure.
1069
1070    """
1071    #print "copyfile(" + src + "," + dest + "," + str(newmtime) + "," + str(sstat) + ")"
1072    try:
1073        if not sstat:
1074            sstat = os.lstat(src)
1075    except Exception as e:
1076        logger.warning("copyfile: stat of %s failed (%s)" % (src, e))
1077        return False
1078
1079    destexists = 1
1080    try:
1081        dstat = os.lstat(dest)
1082    except:
1083        dstat = os.lstat(os.path.dirname(dest))
1084        destexists = 0
1085
1086    if destexists:
1087        if stat.S_ISLNK(dstat[stat.ST_MODE]):
1088            try:
1089                os.unlink(dest)
1090                destexists = 0
1091            except Exception as e:
1092                pass
1093
1094    if stat.S_ISLNK(sstat[stat.ST_MODE]):
1095        try:
1096            target = os.readlink(src)
1097            if destexists and not stat.S_ISDIR(dstat[stat.ST_MODE]):
1098                os.unlink(dest)
1099            os.symlink(target, dest)
1100            os.lchown(dest,sstat[stat.ST_UID],sstat[stat.ST_GID])
1101            return os.lstat(dest)
1102        except Exception as e:
1103            logger.warning("copyfile: failed to create symlink %s to %s (%s)" % (dest, target, e))
1104            return False
1105
1106    if stat.S_ISREG(sstat[stat.ST_MODE]):
1107        try:
1108            srcchown = False
1109            if not os.access(src, os.R_OK):
1110                # Make sure we can read it
1111                srcchown = True
1112                os.chmod(src, sstat[stat.ST_MODE] | stat.S_IRUSR)
1113
1114            # For safety copy then move it over.
1115            shutil.copyfile(src, dest + "#new")
1116            bb.utils.rename(dest + "#new", dest)
1117        except Exception as e:
1118            logger.warning("copyfile: copy %s to %s failed (%s)" % (src, dest, e))
1119            return False
1120        finally:
1121            if srcchown:
1122                os.chmod(src, sstat[stat.ST_MODE])
1123                os.utime(src, (sstat[stat.ST_ATIME], sstat[stat.ST_MTIME]))
1124
1125    else:
1126        #we don't yet handle special, so we need to fall back to /bin/mv
1127        a = getstatusoutput("/bin/cp -f " + "'" + src + "' '" + dest + "'")
1128        if a[0] != 0:
1129            logger.warning("copyfile: failed to copy special file %s to %s (%s)" % (src, dest, a))
1130            return False # failure
1131    try:
1132        os.lchown(dest, sstat[stat.ST_UID], sstat[stat.ST_GID])
1133        os.chmod(dest, stat.S_IMODE(sstat[stat.ST_MODE])) # Sticky is reset on chown
1134    except Exception as e:
1135        logger.warning("copyfile: failed to chown/chmod %s (%s)" % (dest, e))
1136        return False
1137
1138    if newmtime:
1139        os.utime(dest, (newmtime, newmtime))
1140    else:
1141        os.utime(dest, (sstat[stat.ST_ATIME], sstat[stat.ST_MTIME]))
1142        newmtime = sstat[stat.ST_MTIME]
1143    return newmtime
1144
1145def break_hardlinks(src, sstat = None):
1146    """
1147    Ensures ``src`` is the only hardlink to this file.  Other hardlinks,
1148    if any, are not affected (other than in their st_nlink value, of
1149    course).
1150
1151    Arguments:
1152
1153    -  ``src``: source file path.
1154    -  ``sstat``: os.stat_result to use when checking if the file is a link.
1155
1156    Returns ``True`` on success and ``False`` on failure.
1157    """
1158    try:
1159        if not sstat:
1160            sstat = os.lstat(src)
1161    except Exception as e:
1162        logger.warning("break_hardlinks: stat of %s failed (%s)" % (src, e))
1163        return False
1164    if sstat[stat.ST_NLINK] == 1:
1165        return True
1166    return copyfile(src, src, sstat=sstat)
1167
1168def which(path, item, direction = 0, history = False, executable=False):
1169    """
1170    Locate ``item`` in the list of paths ``path`` (colon separated string like
1171    ``$PATH``).
1172
1173    Arguments:
1174
1175    -  ``path``: list of colon-separated paths.
1176    -  ``item``: string to search for.
1177    -  ``direction``: if non-zero then the list is reversed.
1178    -  ``history``: if ``True`` then the list of candidates also returned as
1179       ``result,history`` where ``history`` is the list of previous path
1180       checked.
1181    -  ``executable``: if ``True`` then the candidate defined by ``path`` has
1182       to be an executable file, otherwise if ``False`` the candidate simply
1183       has to exist.
1184
1185    Returns the item if found in the list of path, otherwise an empty string.
1186    If ``history`` is ``True``, return the list of previous path checked in a
1187    tuple with the found (or not found) item as ``(item, history)``.
1188    """
1189
1190    if executable:
1191        is_candidate = lambda p: os.path.isfile(p) and os.access(p, os.X_OK)
1192    else:
1193        is_candidate = lambda p: os.path.exists(p)
1194
1195    hist = []
1196    paths = (path or "").split(':')
1197    if direction != 0:
1198        paths.reverse()
1199
1200    for p in paths:
1201        next = os.path.join(p, item)
1202        hist.append(next)
1203        if is_candidate(next):
1204            if not os.path.isabs(next):
1205                next = os.path.abspath(next)
1206            if history:
1207                return next, hist
1208            return next
1209
1210    if history:
1211        return "", hist
1212    return ""
1213
1214@contextmanager
1215def umask(new_mask):
1216    """
1217    Context manager to set the umask to a specific mask, and restore it afterwards.
1218
1219    No return value.
1220    """
1221    current_mask = os.umask(new_mask)
1222    try:
1223        yield
1224    finally:
1225        os.umask(current_mask)
1226
1227def to_boolean(string, default=None):
1228    """
1229    Check input string and return boolean value True/False/None
1230    depending upon the checks.
1231
1232    Arguments:
1233
1234    -  ``string``: input string.
1235    -  ``default``: default return value if the input ``string`` is ``None``,
1236       ``0``, ``False`` or an empty string.
1237
1238    Returns ``True`` if the string is one of "y", "yes", "1", "true", ``False``
1239    if the string is one of "n", "no", "0", or "false". Return ``default`` if
1240    the input ``string`` is ``None``, ``0``, ``False`` or an empty string.
1241    """
1242    if not string:
1243        return default
1244
1245    if isinstance(string, int):
1246        return string != 0
1247
1248    normalized = string.lower()
1249    if normalized in ("y", "yes", "1", "true"):
1250        return True
1251    elif normalized in ("n", "no", "0", "false"):
1252        return False
1253    else:
1254        raise ValueError("Invalid value for to_boolean: %s" % string)
1255
1256def contains(variable, checkvalues, truevalue, falsevalue, d):
1257    """Check if a variable contains all the values specified.
1258
1259    Arguments:
1260
1261    -  ``variable``: the variable name. This will be fetched and expanded (using
1262       d.getVar(variable)) and then split into a set().
1263    -  ``checkvalues``: if this is a string it is split on whitespace into a set(),
1264       otherwise coerced directly into a set().
1265    -  ``truevalue``: the value to return if checkvalues is a subset of variable.
1266    -  ``falsevalue``: the value to return if variable is empty or if checkvalues is
1267       not a subset of variable.
1268    -  ``d``: the data store.
1269
1270    Returns ``True`` if the variable contains the values specified, ``False``
1271    otherwise.
1272    """
1273
1274    val = d.getVar(variable)
1275    if not val:
1276        return falsevalue
1277    val = set(val.split())
1278    if isinstance(checkvalues, str):
1279        checkvalues = set(checkvalues.split())
1280    else:
1281        checkvalues = set(checkvalues)
1282    if checkvalues.issubset(val):
1283        return truevalue
1284    return falsevalue
1285
1286def contains_any(variable, checkvalues, truevalue, falsevalue, d):
1287    """Check if a variable contains any values specified.
1288
1289    Arguments:
1290
1291    -  ``variable``: the variable name. This will be fetched and expanded (using
1292       d.getVar(variable)) and then split into a set().
1293    -  ``checkvalues``: if this is a string it is split on whitespace into a set(),
1294       otherwise coerced directly into a set().
1295    -  ``truevalue``: the value to return if checkvalues is a subset of variable.
1296    -  ``falsevalue``: the value to return if variable is empty or if checkvalues is
1297       not a subset of variable.
1298    -  ``d``: the data store.
1299
1300    Returns ``True`` if the variable contains any of the values specified,
1301    ``False`` otherwise.
1302    """
1303    val = d.getVar(variable)
1304    if not val:
1305        return falsevalue
1306    val = set(val.split())
1307    if isinstance(checkvalues, str):
1308        checkvalues = set(checkvalues.split())
1309    else:
1310        checkvalues = set(checkvalues)
1311    if checkvalues & val:
1312        return truevalue
1313    return falsevalue
1314
1315def filter(variable, checkvalues, d):
1316    """Return all words in the variable that are present in the ``checkvalues``.
1317
1318    Arguments:
1319
1320    -  ``variable``: the variable name. This will be fetched and expanded (using
1321       d.getVar(variable)) and then split into a set().
1322    -  ``checkvalues``: if this is a string it is split on whitespace into a set(),
1323       otherwise coerced directly into a set().
1324    -  ``d``: the data store.
1325
1326    Returns a list of string.
1327    """
1328
1329    val = d.getVar(variable)
1330    if not val:
1331        return ''
1332    val = set(val.split())
1333    if isinstance(checkvalues, str):
1334        checkvalues = set(checkvalues.split())
1335    else:
1336        checkvalues = set(checkvalues)
1337    return ' '.join(sorted(checkvalues & val))
1338
1339
1340def get_referenced_vars(start_expr, d):
1341    """
1342    Get the names of the variables referenced in a given expression.
1343
1344    Arguments:
1345
1346      -  ``start_expr``: the expression where to look for variables references.
1347
1348         For example::
1349
1350            ${VAR_A} string ${VAR_B}
1351
1352         Or::
1353
1354            ${@d.getVar('VAR')}
1355
1356         If a variables makes references to other variables, the latter are also
1357         returned recursively.
1358
1359      -  ``d``: the data store.
1360
1361    Returns the names of vars referenced in ``start_expr`` (recursively), in
1362    quasi-BFS order (variables within the same level are ordered arbitrarily).
1363    """
1364
1365    seen = set()
1366    ret = []
1367
1368    # The first entry in the queue is the unexpanded start expression
1369    queue = collections.deque([start_expr])
1370    # Subsequent entries will be variable names, so we need to track whether or not entry requires getVar
1371    is_first = True
1372
1373    empty_data = bb.data.init()
1374    while queue:
1375        entry = queue.popleft()
1376        if is_first:
1377            # Entry is the start expression - no expansion needed
1378            is_first = False
1379            expression = entry
1380        else:
1381            # This is a variable name - need to get the value
1382            expression = d.getVar(entry, False)
1383            ret.append(entry)
1384
1385        # expandWithRefs is how we actually get the referenced variables in the expression. We call it using an empty
1386        # data store because we only want the variables directly used in the expression. It returns a set, which is what
1387        # dooms us to only ever be "quasi-BFS" rather than full BFS.
1388        new_vars = empty_data.expandWithRefs(expression, None).references - set(seen)
1389
1390        queue.extend(new_vars)
1391        seen.update(new_vars)
1392    return ret
1393
1394
1395def cpu_count():
1396    try:
1397        return len(os.sched_getaffinity(0))
1398    except OSError:
1399        return multiprocessing.cpu_count()
1400
1401def nonblockingfd(fd):
1402    fcntl.fcntl(fd, fcntl.F_SETFL, fcntl.fcntl(fd, fcntl.F_GETFL) | os.O_NONBLOCK)
1403
1404def process_profilelog(fn, pout = None):
1405    # Either call with a list of filenames and set pout or a filename and optionally pout.
1406    if not pout:
1407        pout = fn + '.processed'
1408
1409    with open(pout, 'w') as pout:
1410        import pstats
1411        if isinstance(fn, list):
1412            p = pstats.Stats(*fn, stream=pout)
1413        else:
1414            p = pstats.Stats(fn, stream=pout)
1415        p.sort_stats('time')
1416        p.print_stats()
1417        p.print_callers()
1418        p.sort_stats('cumulative')
1419        p.print_stats()
1420
1421        pout.flush()
1422
1423#
1424# Was present to work around multiprocessing pool bugs in python < 2.7.3
1425#
1426def multiprocessingpool(*args, **kwargs):
1427
1428    import multiprocessing.pool
1429    #import multiprocessing.util
1430    #multiprocessing.util.log_to_stderr(10)
1431    # Deal with a multiprocessing bug where signals to the processes would be delayed until the work
1432    # completes. Putting in a timeout means the signals (like SIGINT/SIGTERM) get processed.
1433    def wrapper(func):
1434        def wrap(self, timeout=None):
1435            return func(self, timeout=timeout if timeout is not None else 1e100)
1436        return wrap
1437    multiprocessing.pool.IMapIterator.next = wrapper(multiprocessing.pool.IMapIterator.next)
1438
1439    return multiprocessing.Pool(*args, **kwargs)
1440
1441def exec_flat_python_func(func, *args, **kwargs):
1442    """Execute a flat python function (defined with ``def funcname(args): ...``)
1443
1444    Returns the return value of the function."""
1445    # Prepare a small piece of python code which calls the requested function
1446    # To do this we need to prepare two things - a set of variables we can use to pass
1447    # the values of arguments into the calling function, and the list of arguments for
1448    # the function being called
1449    context = {}
1450    funcargs = []
1451    # Handle unnamed arguments
1452    aidx = 1
1453    for arg in args:
1454        argname = 'arg_%s' % aidx
1455        context[argname] = arg
1456        funcargs.append(argname)
1457        aidx += 1
1458    # Handle keyword arguments
1459    context.update(kwargs)
1460    funcargs.extend(['%s=%s' % (arg, arg) for arg in kwargs.keys()])
1461    code = 'retval = %s(%s)' % (func, ', '.join(funcargs))
1462    comp = bb.utils.better_compile(code, '<string>', '<string>')
1463    bb.utils.better_exec(comp, context, code, '<string>')
1464    return context['retval']
1465
1466def edit_metadata(meta_lines, variables, varfunc, match_overrides=False):
1467    """Edit lines from a recipe or config file and modify one or more
1468    specified variable values set in the file using a specified callback
1469    function. Lines are expected to have trailing newlines.
1470
1471    Arguments:
1472
1473    -  ``meta_lines``: lines from the file; can be a list or an iterable
1474       (e.g. file pointer)
1475    -  ``variables``: a list of variable names to look for. Functions
1476       may also be specified, but must be specified with ``()`` at
1477       the end of the name. Note that the function doesn't have
1478       any intrinsic understanding of ``:append``, ``:prepend``, ``:remove``,
1479       or overrides, so these are considered as part of the name.
1480       These values go into a regular expression, so regular
1481       expression syntax is allowed.
1482    -  ``varfunc``: callback function called for every variable matching
1483       one of the entries in the variables parameter.
1484
1485       The function should take four arguments:
1486
1487       -  ``varname``: name of variable matched
1488       -  ``origvalue``: current value in file
1489       -  ``op``: the operator (e.g. ``+=``)
1490       -  ``newlines``: list of lines up to this point. You can use
1491          this to prepend lines before this variable setting
1492          if you wish.
1493
1494       And should return a four-element tuple:
1495
1496       -  ``newvalue``: new value to substitute in, or ``None`` to drop
1497          the variable setting entirely. (If the removal
1498          results in two consecutive blank lines, one of the
1499          blank lines will also be dropped).
1500       -  ``newop``: the operator to use - if you specify ``None`` here,
1501          the original operation will be used.
1502       -  ``indent``: number of spaces to indent multi-line entries,
1503          or ``-1`` to indent up to the level of the assignment
1504          and opening quote, or a string to use as the indent.
1505       -  ``minbreak``: ``True`` to allow the first element of a
1506          multi-line value to continue on the same line as
1507          the assignment, ``False`` to indent before the first
1508          element.
1509
1510       To clarify, if you wish not to change the value, then you
1511       would return like this::
1512
1513          return origvalue, None, 0, True
1514    -  ``match_overrides``: True to match items with _overrides on the end,
1515       False otherwise
1516
1517    Returns a tuple:
1518
1519    -  ``updated``: ``True`` if changes were made, ``False`` otherwise.
1520    -  ``newlines``: Lines after processing.
1521    """
1522
1523    var_res = {}
1524    if match_overrides:
1525        override_re = r'(_[a-zA-Z0-9-_$(){}]+)?'
1526    else:
1527        override_re = ''
1528    for var in variables:
1529        if var.endswith('()'):
1530            var_res[var] = re.compile(r'^(%s%s)[ \\t]*\([ \\t]*\)[ \\t]*{' % (var[:-2].rstrip(), override_re))
1531        else:
1532            var_res[var] = re.compile(r'^(%s%s)[ \\t]*[?+:.]*=[+.]*[ \\t]*(["\'])' % (var, override_re))
1533
1534    updated = False
1535    varset_start = ''
1536    varlines = []
1537    newlines = []
1538    in_var = None
1539    full_value = ''
1540    var_end = ''
1541
1542    def handle_var_end():
1543        prerun_newlines = newlines[:]
1544        op = varset_start[len(in_var):].strip()
1545        (newvalue, newop, indent, minbreak) = varfunc(in_var, full_value, op, newlines)
1546        changed = (prerun_newlines != newlines)
1547
1548        if newvalue is None:
1549            # Drop the value
1550            return True
1551        elif newvalue != full_value or (newop not in [None, op]):
1552            if newop not in [None, op]:
1553                # Callback changed the operator
1554                varset_new = "%s %s" % (in_var, newop)
1555            else:
1556                varset_new = varset_start
1557
1558            if isinstance(indent, int):
1559                if indent == -1:
1560                    indentspc = ' ' * (len(varset_new) + 2)
1561                else:
1562                    indentspc = ' ' * indent
1563            else:
1564                indentspc = indent
1565            if in_var.endswith('()'):
1566                # A function definition
1567                if isinstance(newvalue, list):
1568                    newlines.append('%s {\n%s%s\n}\n' % (varset_new, indentspc, ('\n%s' % indentspc).join(newvalue)))
1569                else:
1570                    if not newvalue.startswith('\n'):
1571                        newvalue = '\n' + newvalue
1572                    if not newvalue.endswith('\n'):
1573                        newvalue = newvalue + '\n'
1574                    newlines.append('%s {%s}\n' % (varset_new, newvalue))
1575            else:
1576                # Normal variable
1577                if isinstance(newvalue, list):
1578                    if not newvalue:
1579                        # Empty list -> empty string
1580                        newlines.append('%s ""\n' % varset_new)
1581                    elif minbreak:
1582                        # First item on first line
1583                        if len(newvalue) == 1:
1584                            newlines.append('%s "%s"\n' % (varset_new, newvalue[0]))
1585                        else:
1586                            newlines.append('%s "%s \\\n' % (varset_new, newvalue[0]))
1587                            for item in newvalue[1:]:
1588                                newlines.append('%s%s \\\n' % (indentspc, item))
1589                            newlines.append('%s"\n' % indentspc)
1590                    else:
1591                        # No item on first line
1592                        newlines.append('%s " \\\n' % varset_new)
1593                        for item in newvalue:
1594                            newlines.append('%s%s \\\n' % (indentspc, item))
1595                        newlines.append('%s"\n' % indentspc)
1596                else:
1597                    newlines.append('%s "%s"\n' % (varset_new, newvalue))
1598            return True
1599        else:
1600            # Put the old lines back where they were
1601            newlines.extend(varlines)
1602            # If newlines was touched by the function, we'll need to return True
1603            return changed
1604
1605    checkspc = False
1606
1607    for line in meta_lines:
1608        if in_var:
1609            value = line.rstrip()
1610            varlines.append(line)
1611            if in_var.endswith('()'):
1612                full_value += '\n' + value
1613            else:
1614                full_value += value[:-1]
1615            if value.endswith(var_end):
1616                if in_var.endswith('()'):
1617                    if full_value.count('{') - full_value.count('}') >= 0:
1618                        continue
1619                    full_value = full_value[:-1]
1620                if handle_var_end():
1621                    updated = True
1622                    checkspc = True
1623                in_var = None
1624        else:
1625            skip = False
1626            for (varname, var_re) in var_res.items():
1627                res = var_re.match(line)
1628                if res:
1629                    isfunc = varname.endswith('()')
1630                    if isfunc:
1631                        splitvalue = line.split('{', 1)
1632                        var_end = '}'
1633                    else:
1634                        var_end = res.groups()[-1]
1635                        splitvalue = line.split(var_end, 1)
1636                    varset_start = splitvalue[0].rstrip()
1637                    value = splitvalue[1].rstrip()
1638                    if not isfunc and value.endswith('\\'):
1639                        value = value[:-1]
1640                    full_value = value
1641                    varlines = [line]
1642                    in_var = res.group(1)
1643                    if isfunc:
1644                        in_var += '()'
1645                    if value.endswith(var_end):
1646                        full_value = full_value[:-1]
1647                        if handle_var_end():
1648                            updated = True
1649                            checkspc = True
1650                        in_var = None
1651                    skip = True
1652                    break
1653            if not skip:
1654                if checkspc:
1655                    checkspc = False
1656                    if newlines and newlines[-1] == '\n' and line == '\n':
1657                        # Squash blank line if there are two consecutive blanks after a removal
1658                        continue
1659                newlines.append(line)
1660    return (updated, newlines)
1661
1662
1663def edit_metadata_file(meta_file, variables, varfunc):
1664    """Edit a recipe or configuration file and modify one or more specified
1665    variable values set in the file using a specified callback function.
1666    The file is only written to if the value(s) actually change.
1667    This is basically the file version of ``bb.utils.edit_metadata()``, see that
1668    function's description for parameter/usage information.
1669
1670    Returns ``True`` if the file was written to, ``False`` otherwise.
1671    """
1672    with open(meta_file, 'r') as f:
1673        (updated, newlines) = edit_metadata(f, variables, varfunc)
1674    if updated:
1675        with open(meta_file, 'w') as f:
1676            f.writelines(newlines)
1677    return updated
1678
1679
1680def edit_bblayers_conf(bblayers_conf, add, remove, edit_cb=None):
1681    """Edit ``bblayers.conf``, adding and/or removing layers.
1682
1683    Arguments:
1684
1685    -  ``bblayers_conf``: path to ``bblayers.conf`` file to edit
1686    -  ``add``: layer path (or list of layer paths) to add; ``None`` or empty
1687       list to add nothing
1688    -  ``remove``: layer path (or list of layer paths) to remove; ``None`` or
1689       empty list to remove nothing
1690    -  ``edit_cb``: optional callback function that will be called
1691       after processing adds/removes once per existing entry.
1692
1693    Returns a tuple:
1694
1695    -  ``notadded``: list of layers specified to be added but weren't
1696       (because they were already in the list)
1697    -  ``notremoved``: list of layers that were specified to be removed
1698       but weren't (because they weren't in the list)
1699    """
1700
1701    def remove_trailing_sep(pth):
1702        if pth and pth[-1] == os.sep:
1703            pth = pth[:-1]
1704        return pth
1705
1706    approved = bb.utils.approved_variables()
1707    def canonicalise_path(pth):
1708        pth = remove_trailing_sep(pth)
1709        if 'HOME' in approved and '~' in pth:
1710            pth = os.path.expanduser(pth)
1711        return pth
1712
1713    def layerlist_param(value):
1714        if not value:
1715            return []
1716        elif isinstance(value, list):
1717            return [remove_trailing_sep(x) for x in value]
1718        else:
1719            return [remove_trailing_sep(value)]
1720
1721    addlayers = layerlist_param(add)
1722    removelayers = layerlist_param(remove)
1723
1724    # Need to use a list here because we can't set non-local variables from a callback in python 2.x
1725    bblayercalls = []
1726    removed = []
1727    plusequals = False
1728    orig_bblayers = []
1729
1730    def handle_bblayers_firstpass(varname, origvalue, op, newlines):
1731        bblayercalls.append(op)
1732        if op == '=':
1733            del orig_bblayers[:]
1734        orig_bblayers.extend([canonicalise_path(x) for x in origvalue.split()])
1735        return (origvalue, None, 2, False)
1736
1737    def handle_bblayers(varname, origvalue, op, newlines):
1738        updated = False
1739        bblayers = [remove_trailing_sep(x) for x in origvalue.split()]
1740        if removelayers:
1741            for removelayer in removelayers:
1742                for layer in bblayers:
1743                    if fnmatch.fnmatch(canonicalise_path(layer), canonicalise_path(removelayer)):
1744                        updated = True
1745                        bblayers.remove(layer)
1746                        removed.append(removelayer)
1747                        break
1748        if addlayers and not plusequals:
1749            for addlayer in addlayers:
1750                if addlayer not in bblayers:
1751                    updated = True
1752                    bblayers.append(addlayer)
1753            del addlayers[:]
1754
1755        if edit_cb:
1756            newlist = []
1757            for layer in bblayers:
1758                res = edit_cb(layer, canonicalise_path(layer))
1759                if res != layer:
1760                    newlist.append(res)
1761                    updated = True
1762                else:
1763                    newlist.append(layer)
1764            bblayers = newlist
1765
1766        if updated:
1767            if op == '+=' and not bblayers:
1768                bblayers = None
1769            return (bblayers, None, 2, False)
1770        else:
1771            return (origvalue, None, 2, False)
1772
1773    with open(bblayers_conf, 'r') as f:
1774        (_, newlines) = edit_metadata(f, ['BBLAYERS'], handle_bblayers_firstpass)
1775
1776    if not bblayercalls:
1777        raise Exception('Unable to find BBLAYERS in %s' % bblayers_conf)
1778
1779    # Try to do the "smart" thing depending on how the user has laid out
1780    # their bblayers.conf file
1781    if bblayercalls.count('+=') > 1:
1782        plusequals = True
1783
1784    removelayers_canon = [canonicalise_path(layer) for layer in removelayers]
1785    notadded = []
1786    for layer in addlayers:
1787        layer_canon = canonicalise_path(layer)
1788        if layer_canon in orig_bblayers and not layer_canon in removelayers_canon:
1789            notadded.append(layer)
1790    notadded_canon = [canonicalise_path(layer) for layer in notadded]
1791    addlayers[:] = [layer for layer in addlayers if canonicalise_path(layer) not in notadded_canon]
1792
1793    (updated, newlines) = edit_metadata(newlines, ['BBLAYERS'], handle_bblayers)
1794    if addlayers:
1795        # Still need to add these
1796        for addlayer in addlayers:
1797            newlines.append('BBLAYERS += "%s"\n' % addlayer)
1798        updated = True
1799
1800    if updated:
1801        with open(bblayers_conf, 'w') as f:
1802            f.writelines(newlines)
1803
1804    notremoved = list(set(removelayers) - set(removed))
1805
1806    return (notadded, notremoved)
1807
1808def get_collection_res(d):
1809    collections = (d.getVar('BBFILE_COLLECTIONS') or '').split()
1810    collection_res = {}
1811    for collection in collections:
1812        collection_res[collection] = d.getVar('BBFILE_PATTERN_%s' % collection) or ''
1813
1814    return collection_res
1815
1816
1817def get_file_layer(filename, d, collection_res={}):
1818    """Determine the collection (or layer name, as defined by a layer's
1819    ``layer.conf`` file) containing the specified file.
1820
1821    Arguments:
1822
1823    -  ``filename``: the filename to look for.
1824    -  ``d``: the data store.
1825    -  ``collection_res``: dictionary with the layer names as keys and file
1826       patterns to match as defined with the BBFILE_COLLECTIONS and
1827       BBFILE_PATTERN variables respectively. The return value of
1828       ``bb.utils.get_collection_res()`` is the default if this variable is
1829       not specified.
1830
1831    Returns the layer name containing the file. If multiple layers contain the
1832    file, the last matching layer name from collection_res is returned.
1833    """
1834    if not collection_res:
1835        collection_res = get_collection_res(d)
1836
1837    def path_to_layer(path):
1838        # Use longest path so we handle nested layers
1839        matchlen = 0
1840        match = None
1841        for collection, regex in collection_res.items():
1842            if len(regex) > matchlen and re.match(regex, path):
1843                matchlen = len(regex)
1844                match = collection
1845        return match
1846
1847    result = None
1848    bbfiles = (d.getVar('BBFILES_PRIORITIZED') or '').split()
1849    bbfilesmatch = False
1850    for bbfilesentry in bbfiles:
1851        if fnmatch.fnmatchcase(filename, bbfilesentry):
1852            bbfilesmatch = True
1853            result = path_to_layer(bbfilesentry)
1854            break
1855
1856    if not bbfilesmatch:
1857        # Probably a bbclass
1858        result = path_to_layer(filename)
1859
1860    return result
1861
1862
1863# Constant taken from http://linux.die.net/include/linux/prctl.h
1864PR_SET_PDEATHSIG = 1
1865
1866class PrCtlError(Exception):
1867    pass
1868
1869def signal_on_parent_exit(signame):
1870    """
1871    Trigger ``signame`` to be sent when the parent process dies.
1872
1873    Arguments:
1874
1875    -  ``signame``: name of the signal. See ``man signal``.
1876
1877    No return value.
1878    """
1879    signum = getattr(signal, signame)
1880    # http://linux.die.net/man/2/prctl
1881    result = cdll['libc.so.6'].prctl(PR_SET_PDEATHSIG, signum)
1882    if result != 0:
1883        raise PrCtlError('prctl failed with error code %s' % result)
1884
1885#
1886# Manually call the ioprio syscall. We could depend on other libs like psutil
1887# however this gets us enough of what we need to bitbake for now without the
1888# dependency
1889#
1890_unamearch = os.uname()[4]
1891IOPRIO_WHO_PROCESS = 1
1892IOPRIO_CLASS_SHIFT = 13
1893
1894def ioprio_set(who, cls, value):
1895    NR_ioprio_set = None
1896    if _unamearch == "x86_64":
1897      NR_ioprio_set = 251
1898    elif _unamearch[0] == "i" and _unamearch[2:3] == "86":
1899      NR_ioprio_set = 289
1900    elif _unamearch == "aarch64":
1901      NR_ioprio_set = 30
1902
1903    if NR_ioprio_set:
1904        ioprio = value | (cls << IOPRIO_CLASS_SHIFT)
1905        rc = cdll['libc.so.6'].syscall(NR_ioprio_set, IOPRIO_WHO_PROCESS, who, ioprio)
1906        if rc != 0:
1907            raise ValueError("Unable to set ioprio, syscall returned %s" % rc)
1908    else:
1909        bb.warn("Unable to set IO Prio for arch %s" % _unamearch)
1910
1911def set_process_name(name):
1912    from ctypes import byref, create_string_buffer
1913    # This is nice to have for debugging, not essential
1914    try:
1915        libc = cdll.LoadLibrary('libc.so.6')
1916        buf = create_string_buffer(bytes(name, 'utf-8'))
1917        libc.prctl(15, byref(buf), 0, 0, 0)
1918    except:
1919        pass
1920
1921def enable_loopback_networking():
1922    # From bits/ioctls.h
1923    SIOCGIFFLAGS = 0x8913
1924    SIOCSIFFLAGS = 0x8914
1925    SIOCSIFADDR = 0x8916
1926    SIOCSIFNETMASK = 0x891C
1927
1928    # if.h
1929    IFF_UP = 0x1
1930    IFF_RUNNING = 0x40
1931
1932    # bits/socket.h
1933    AF_INET = 2
1934
1935    # char ifr_name[IFNAMSIZ=16]
1936    ifr_name = struct.pack("@16s", b"lo")
1937    def netdev_req(fd, req, data = b""):
1938        # Pad and add interface name
1939        data = ifr_name + data + (b'\x00' * (16 - len(data)))
1940        # Return all data after interface name
1941        return fcntl.ioctl(fd, req, data)[16:]
1942
1943    with socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_IP) as sock:
1944        fd = sock.fileno()
1945
1946        # struct sockaddr_in ifr_addr { unsigned short family; uint16_t sin_port ; uint32_t in_addr; }
1947        req = struct.pack("@H", AF_INET) + struct.pack("=H4B", 0, 127, 0, 0, 1)
1948        netdev_req(fd, SIOCSIFADDR, req)
1949
1950        # short ifr_flags
1951        flags = struct.unpack_from('@h', netdev_req(fd, SIOCGIFFLAGS))[0]
1952        flags |= IFF_UP | IFF_RUNNING
1953        netdev_req(fd, SIOCSIFFLAGS, struct.pack('@h', flags))
1954
1955        # struct sockaddr_in ifr_netmask
1956        req = struct.pack("@H", AF_INET) + struct.pack("=H4B", 0, 255, 0, 0, 0)
1957        netdev_req(fd, SIOCSIFNETMASK, req)
1958
1959def disable_network(uid=None, gid=None):
1960    """
1961    Disable networking in the current process if the kernel supports it, else
1962    just return after logging to debug. To do this we need to create a new user
1963    namespace, then map back to the original uid/gid.
1964
1965    Arguments:
1966
1967    -  ``uid``: original user id.
1968    -  ``gid``: original user group id.
1969
1970    No return value.
1971    """
1972    libc = ctypes.CDLL('libc.so.6')
1973
1974    # From sched.h
1975    # New user namespace
1976    CLONE_NEWUSER = 0x10000000
1977    # New network namespace
1978    CLONE_NEWNET = 0x40000000
1979
1980    if uid is None:
1981        uid = os.getuid()
1982    if gid is None:
1983        gid = os.getgid()
1984
1985    ret = libc.unshare(CLONE_NEWNET | CLONE_NEWUSER)
1986    if ret != 0:
1987        logger.debug("System doesn't support disabling network without admin privs")
1988        return
1989    with open("/proc/self/uid_map", "w") as f:
1990        f.write("%s %s 1" % (uid, uid))
1991    with open("/proc/self/setgroups", "w") as f:
1992        f.write("deny")
1993    with open("/proc/self/gid_map", "w") as f:
1994        f.write("%s %s 1" % (gid, gid))
1995
1996def export_proxies(d):
1997    from bb.fetch2 import get_fetcher_environment
1998    """ export common proxies variables from datastore to environment """
1999    newenv = get_fetcher_environment(d)
2000    for v in newenv:
2001        os.environ[v] = newenv[v]
2002
2003def load_plugins(logger, plugins, pluginpath):
2004    def load_plugin(name):
2005        logger.debug('Loading plugin %s' % name)
2006        spec = importlib.machinery.PathFinder.find_spec(name, path=[pluginpath] )
2007        if spec:
2008            mod = importlib.util.module_from_spec(spec)
2009            spec.loader.exec_module(mod)
2010            return mod
2011
2012    logger.debug('Loading plugins from %s...' % pluginpath)
2013
2014    expanded = (glob.glob(os.path.join(pluginpath, '*' + ext))
2015                for ext in python_extensions)
2016    files = itertools.chain.from_iterable(expanded)
2017    names = set(os.path.splitext(os.path.basename(fn))[0] for fn in files)
2018    for name in names:
2019        if name != '__init__':
2020            plugin = load_plugin(name)
2021            if hasattr(plugin, 'plugin_init'):
2022                obj = plugin.plugin_init(plugins)
2023                plugins.append(obj or plugin)
2024            else:
2025                plugins.append(plugin)
2026
2027
2028class LogCatcher(logging.Handler):
2029    """Logging handler for collecting logged messages so you can check them later"""
2030    def __init__(self):
2031        self.messages = []
2032        logging.Handler.__init__(self, logging.WARNING)
2033    def emit(self, record):
2034        self.messages.append(bb.build.logformatter.format(record))
2035    def contains(self, message):
2036        return (message in self.messages)
2037
2038def is_semver(version):
2039    """
2040    Arguments:
2041
2042    -  ``version``: the version string.
2043
2044    Returns ``True`` if the version string follow semantic versioning, ``False``
2045    otherwise.
2046
2047    See https://semver.org/spec/v2.0.0.html.
2048    """
2049    regex = re.compile(
2050    r"""
2051    ^
2052    (0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)
2053    (?:-(
2054        (?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)
2055        (?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*
2056    ))?
2057    (?:\+(
2058        [0-9a-zA-Z-]+
2059        (?:\.[0-9a-zA-Z-]+)*
2060    ))?
2061    $
2062    """, re.VERBOSE)
2063
2064    if regex.match(version) is None:
2065        return False
2066
2067    return True
2068
2069# Wrapper around os.rename which can handle cross device problems
2070# e.g. from container filesystems
2071def rename(src, dst):
2072    try:
2073        os.rename(src, dst)
2074    except OSError as err:
2075        if err.errno == 18:
2076            # Invalid cross-device link error
2077            shutil.move(src, dst)
2078        else:
2079            raise err
2080
2081@contextmanager
2082def environment(**envvars):
2083    """
2084    Context manager to selectively update the environment with the specified mapping.
2085
2086    No return value.
2087    """
2088    backup = dict(os.environ)
2089    try:
2090        os.environ.update(envvars)
2091        yield
2092    finally:
2093        for var in envvars:
2094            if var in backup:
2095                os.environ[var] = backup[var]
2096            elif var in os.environ:
2097                del os.environ[var]
2098
2099def is_local_uid(uid=''):
2100    """
2101    Check whether uid is a local one or not.
2102    Can't use pwd module since it gets all UIDs, not local ones only.
2103
2104    Arguments:
2105
2106    -  ``uid``: user id. If not specified the user id is determined from
2107       ``os.getuid()``.
2108
2109    Returns ``True`` is the user id is local, ``False`` otherwise.
2110    """
2111    if not uid:
2112        uid = os.getuid()
2113    with open('/etc/passwd', 'r') as f:
2114        for line in f:
2115            line_split = line.split(':')
2116            if len(line_split) < 3:
2117                continue
2118            if str(uid) == line_split[2]:
2119                return True
2120    return False
2121
2122def mkstemp(suffix=None, prefix=None, dir=None, text=False):
2123    """
2124    Generates a unique temporary file, independent of time.
2125
2126    mkstemp() in glibc (at least) generates unique file names based on the
2127    current system time. When combined with highly parallel builds, and
2128    operating over NFS (e.g. shared sstate/downloads) this can result in
2129    conflicts and race conditions.
2130
2131    This function adds additional entropy to the file name so that a collision
2132    is independent of time and thus extremely unlikely.
2133
2134    Arguments:
2135
2136    -  ``suffix``: filename suffix.
2137    -  ``prefix``: filename prefix.
2138    -  ``dir``: directory where the file will be created.
2139    -  ``text``: if ``True``, the file is opened in text mode.
2140
2141    Returns a tuple containing:
2142
2143    -  the file descriptor for the created file
2144    -  the name of the file.
2145    """
2146    entropy = "".join(random.choices("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890", k=20))
2147    if prefix:
2148        prefix = prefix + entropy
2149    else:
2150        prefix = tempfile.gettempprefix() + entropy
2151    return tempfile.mkstemp(suffix=suffix, prefix=prefix, dir=dir, text=text)
2152
2153def path_is_descendant(descendant, ancestor):
2154    """
2155    Returns ``True`` if the path ``descendant`` is a descendant of ``ancestor``
2156    (including being equivalent to ``ancestor`` itself). Otherwise returns
2157    ``False``.
2158
2159    Correctly accounts for symlinks, bind mounts, etc. by using
2160    ``os.path.samestat()`` to compare paths.
2161
2162    May raise any exception that ``os.stat()`` raises.
2163
2164    Arguments:
2165
2166    -  ``descendant``: path to check for being an ancestor.
2167    -  ``ancestor``: path to the ancestor ``descendant`` will be checked
2168       against.
2169    """
2170
2171    ancestor_stat = os.stat(ancestor)
2172
2173    # Recurse up each directory component of the descendant to see if it is
2174    # equivalent to the ancestor
2175    check_dir = os.path.abspath(descendant).rstrip("/")
2176    while check_dir:
2177        check_stat = os.stat(check_dir)
2178        if os.path.samestat(check_stat, ancestor_stat):
2179            return True
2180        check_dir = os.path.dirname(check_dir).rstrip("/")
2181
2182    return False
2183
2184# If we don't have a timeout of some kind and a process/thread exits badly (for example
2185# OOM killed) and held a lock, we'd just hang in the lock futex forever. It is better
2186# we exit at some point than hang. 5 minutes with no progress means we're probably deadlocked.
2187# This function can still deadlock python since it can't signal the other threads to exit
2188# (signals are handled in the main thread) and even os._exit() will wait on non-daemon threads
2189# to exit.
2190@contextmanager
2191def lock_timeout(lock):
2192    try:
2193        s = signal.pthread_sigmask(signal.SIG_BLOCK, signal.valid_signals())
2194        held = lock.acquire(timeout=5*60)
2195        if not held:
2196            bb.server.process.serverlog("Couldn't get the lock for 5 mins, timed out, exiting.\n%s" % traceback.format_stack())
2197            os._exit(1)
2198        yield held
2199    finally:
2200        lock.release()
2201        signal.pthread_sigmask(signal.SIG_SETMASK, s)
2202
2203# A version of lock_timeout without the check that the lock was locked and a shorter timeout
2204@contextmanager
2205def lock_timeout_nocheck(lock):
2206    l = False
2207    try:
2208        s = signal.pthread_sigmask(signal.SIG_BLOCK, signal.valid_signals())
2209        l = lock.acquire(timeout=10)
2210        yield l
2211    finally:
2212        if l:
2213            lock.release()
2214        signal.pthread_sigmask(signal.SIG_SETMASK, s)
2215