xref: /openbmc/openbmc/poky/meta/lib/oe/package_manager/__init__.py (revision 8460358c3d24c71d9d38fd126c745854a6301564)
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: GPL-2.0-only
5#
6
7from abc import ABCMeta, abstractmethod
8import os
9import glob
10import subprocess
11import shutil
12import re
13import collections
14import bb
15import tempfile
16import oe.utils
17import oe.path
18import string
19from oe.gpg_sign import get_signer
20import hashlib
21import fnmatch
22
23# this can be used by all PM backends to create the index files in parallel
24def create_index(arg):
25    index_cmd = arg
26
27    bb.note("Executing '%s' ..." % index_cmd)
28    result = subprocess.check_output(index_cmd, stderr=subprocess.STDOUT, shell=True).decode("utf-8")
29    if result:
30        bb.note(result)
31
32def opkg_query(cmd_output):
33    """
34    This method parse the output from the package managerand return
35    a dictionary with the information of the packages. This is used
36    when the packages are in deb or ipk format.
37    """
38    verregex = re.compile(r' \([=<>]* [^ )]*\)')
39    output = dict()
40    pkg = ""
41    arch = ""
42    ver = ""
43    filename = ""
44    dep = []
45    prov = []
46    pkgarch = ""
47    for line in cmd_output.splitlines()+['']:
48        line = line.rstrip()
49        if ':' in line:
50            if line.startswith("Package: "):
51                pkg = line.split(": ")[1]
52            elif line.startswith("Architecture: "):
53                arch = line.split(": ")[1]
54            elif line.startswith("Version: "):
55                ver = line.split(": ")[1]
56            elif line.startswith("File: ") or line.startswith("Filename:"):
57                filename = line.split(": ")[1]
58                if "/" in filename:
59                    filename = os.path.basename(filename)
60            elif line.startswith("Depends: "):
61                depends = verregex.sub('', line.split(": ")[1])
62                for depend in depends.split(", "):
63                    dep.append(depend)
64            elif line.startswith("Recommends: "):
65                recommends = verregex.sub('', line.split(": ")[1])
66                for recommend in recommends.split(", "):
67                    dep.append("%s [REC]" % recommend)
68            elif line.startswith("PackageArch: "):
69                pkgarch = line.split(": ")[1]
70            elif line.startswith("Provides: "):
71                provides = verregex.sub('', line.split(": ")[1])
72                for provide in provides.split(", "):
73                    prov.append(provide)
74
75        # When there is a blank line save the package information
76        elif not line:
77            # IPK doesn't include the filename
78            if not filename:
79                filename = "%s_%s_%s.ipk" % (pkg, ver, arch)
80            if pkg:
81                output[pkg] = {"arch":arch, "ver":ver,
82                        "filename":filename, "deps": dep, "pkgarch":pkgarch, "provs": prov}
83            pkg = ""
84            arch = ""
85            ver = ""
86            filename = ""
87            dep = []
88            prov = []
89            pkgarch = ""
90
91    return output
92
93def failed_postinsts_abort(pkgs, log_path):
94    bb.fatal("""Postinstall scriptlets of %s have failed. If the intention is to defer them to first boot,
95then please place them into pkg_postinst_ontarget:${PN} ().
96Deferring to first boot via 'exit 1' is no longer supported.
97Details of the failure are in %s.""" %(pkgs, log_path))
98
99def generate_locale_archive(d, rootfs, target_arch, localedir):
100    # Pretty sure we don't need this for locale archive generation but
101    # keeping it to be safe...
102    locale_arch_options = { \
103        "arc": ["--uint32-align=4", "--little-endian"],
104        "arceb": ["--uint32-align=4", "--big-endian"],
105        "arm": ["--uint32-align=4", "--little-endian"],
106        "armeb": ["--uint32-align=4", "--big-endian"],
107        "aarch64": ["--uint32-align=4", "--little-endian"],
108        "aarch64_be": ["--uint32-align=4", "--big-endian"],
109        "sh4": ["--uint32-align=4", "--big-endian"],
110        "powerpc": ["--uint32-align=4", "--big-endian"],
111        "powerpc64": ["--uint32-align=4", "--big-endian"],
112        "powerpc64le": ["--uint32-align=4", "--little-endian"],
113        "mips": ["--uint32-align=4", "--big-endian"],
114        "mipsisa32r6": ["--uint32-align=4", "--big-endian"],
115        "mips64": ["--uint32-align=4", "--big-endian"],
116        "mipsisa64r6": ["--uint32-align=4", "--big-endian"],
117        "mipsel": ["--uint32-align=4", "--little-endian"],
118        "mipsisa32r6el": ["--uint32-align=4", "--little-endian"],
119        "mips64el": ["--uint32-align=4", "--little-endian"],
120        "mipsisa64r6el": ["--uint32-align=4", "--little-endian"],
121        "riscv64": ["--uint32-align=4", "--little-endian"],
122        "riscv32": ["--uint32-align=4", "--little-endian"],
123        "i586": ["--uint32-align=4", "--little-endian"],
124        "i686": ["--uint32-align=4", "--little-endian"],
125        "x86_64": ["--uint32-align=4", "--little-endian"],
126        "loongarch64": ["--uint32-align=4", "--little-endian"]
127    }
128    if target_arch in locale_arch_options:
129        arch_options = locale_arch_options[target_arch]
130    else:
131        bb.error("locale_arch_options not found for target_arch=" + target_arch)
132        bb.fatal("unknown arch:" + target_arch + " for locale_arch_options")
133
134    # Need to set this so cross-localedef knows where the archive is
135    env = dict(os.environ)
136    env["LOCALEARCHIVE"] = oe.path.join(localedir, "locale-archive")
137
138    for name in sorted(os.listdir(localedir)):
139        path = os.path.join(localedir, name)
140        if os.path.isdir(path):
141            cmd = ["cross-localedef", "--verbose"]
142            cmd += arch_options
143            cmd += ["--add-to-archive", path]
144            subprocess.check_output(cmd, env=env, stderr=subprocess.STDOUT)
145
146class Indexer(object, metaclass=ABCMeta):
147    def __init__(self, d, deploy_dir):
148        self.d = d
149        self.deploy_dir = deploy_dir
150
151    @abstractmethod
152    def write_index(self):
153        pass
154
155class PkgsList(object, metaclass=ABCMeta):
156    def __init__(self, d, rootfs_dir):
157        self.d = d
158        self.rootfs_dir = rootfs_dir
159
160    @abstractmethod
161    def list_pkgs(self):
162        pass
163
164class PackageManager(object, metaclass=ABCMeta):
165    """
166    This is an abstract class. Do not instantiate this directly.
167    """
168
169    def __init__(self, d, target_rootfs):
170        self.d = d
171        self.target_rootfs = target_rootfs
172        self.deploy_dir = None
173        self.deploy_lock = None
174        self._initialize_intercepts()
175
176    def _initialize_intercepts(self):
177        bb.note("Initializing intercept dir for %s" % self.target_rootfs)
178        # As there might be more than one instance of PackageManager operating at the same time
179        # we need to isolate the intercept_scripts directories from each other,
180        # hence the ugly hash digest in dir name.
181        self.intercepts_dir = os.path.join(self.d.getVar('WORKDIR'), "intercept_scripts-%s" %
182                                           (hashlib.sha256(self.target_rootfs.encode()).hexdigest()))
183
184        postinst_intercepts = (self.d.getVar("POSTINST_INTERCEPTS") or "").split()
185        if not postinst_intercepts:
186            postinst_intercepts_path = self.d.getVar("POSTINST_INTERCEPTS_PATH")
187            if not postinst_intercepts_path:
188                postinst_intercepts_path = self.d.getVar("POSTINST_INTERCEPTS_DIR") or self.d.expand("${COREBASE}/scripts/postinst-intercepts")
189            postinst_intercepts = oe.path.which_wild('*', postinst_intercepts_path)
190
191        bb.debug(1, 'Collected intercepts:\n%s' % ''.join('  %s\n' % i for i in postinst_intercepts))
192        bb.utils.remove(self.intercepts_dir, True)
193        bb.utils.mkdirhier(self.intercepts_dir)
194        for intercept in postinst_intercepts:
195            shutil.copy(intercept, os.path.join(self.intercepts_dir, os.path.basename(intercept)))
196
197    @abstractmethod
198    def _handle_intercept_failure(self, failed_script):
199        pass
200
201    def _postpone_to_first_boot(self, postinst_intercept_hook):
202        with open(postinst_intercept_hook) as intercept:
203            registered_pkgs = None
204            for line in intercept.read().split("\n"):
205                m = re.match(r"^##PKGS:(.*)", line)
206                if m is not None:
207                    registered_pkgs = m.group(1).strip()
208                    break
209
210            if registered_pkgs is not None:
211                bb.note("If an image is being built, the postinstalls for the following packages "
212                        "will be postponed for first boot: %s" %
213                        registered_pkgs)
214
215                # call the backend dependent handler
216                self._handle_intercept_failure(registered_pkgs)
217
218
219    def run_intercepts(self, populate_sdk=None):
220        intercepts_dir = self.intercepts_dir
221
222        bb.note("Running intercept scripts:")
223        os.environ['D'] = self.target_rootfs
224        os.environ['STAGING_DIR_NATIVE'] = self.d.getVar('STAGING_DIR_NATIVE')
225        for script in os.listdir(intercepts_dir):
226            script_full = os.path.join(intercepts_dir, script)
227
228            if script == "postinst_intercept" or not os.access(script_full, os.X_OK):
229                continue
230
231            # we do not want to run any multilib variant of this
232            if script.startswith("delay_to_first_boot"):
233                self._postpone_to_first_boot(script_full)
234                continue
235
236            if populate_sdk == 'host' and self.d.getVar('SDK_OS') == 'mingw32':
237                bb.note("The postinstall intercept hook '%s' could not be executed due to missing wine support, details in %s/log.do_%s"
238                                % (script, self.d.getVar('T'), self.d.getVar('BB_CURRENTTASK')))
239                continue
240
241            bb.note("> Executing %s intercept ..." % script)
242
243            try:
244                output = subprocess.check_output(script_full, stderr=subprocess.STDOUT)
245                if output: bb.note(output.decode("utf-8"))
246            except subprocess.CalledProcessError as e:
247                bb.note("Exit code %d. Output:\n%s" % (e.returncode, e.output.decode("utf-8")))
248                if populate_sdk == 'host':
249                    bb.fatal("The postinstall intercept hook '%s' failed, details in %s/log.do_%s" % (script, self.d.getVar('T'), self.d.getVar('BB_CURRENTTASK')))
250                elif populate_sdk == 'target':
251                    if "qemuwrapper: qemu usermode is not supported" in e.output.decode("utf-8"):
252                        bb.note("The postinstall intercept hook '%s' could not be executed due to missing qemu usermode support, details in %s/log.do_%s"
253                                % (script, self.d.getVar('T'), self.d.getVar('BB_CURRENTTASK')))
254                    else:
255                        bb.fatal("The postinstall intercept hook '%s' failed, details in %s/log.do_%s" % (script, self.d.getVar('T'), self.d.getVar('BB_CURRENTTASK')))
256                else:
257                    if "qemuwrapper: qemu usermode is not supported" in e.output.decode("utf-8"):
258                        bb.note("The postinstall intercept hook '%s' could not be executed due to missing qemu usermode support, details in %s/log.do_%s"
259                                % (script, self.d.getVar('T'), self.d.getVar('BB_CURRENTTASK')))
260                        self._postpone_to_first_boot(script_full)
261                    else:
262                        bb.fatal("The postinstall intercept hook '%s' failed, details in %s/log.do_%s" % (script, self.d.getVar('T'), self.d.getVar('BB_CURRENTTASK')))
263
264    @abstractmethod
265    def update(self):
266        """
267        Update the package manager package database.
268        """
269        pass
270
271    @abstractmethod
272    def install(self, pkgs, attempt_only=False, hard_depends_only=False):
273        """
274        Install a list of packages. 'pkgs' is a list object. If 'attempt_only' is
275        True, installation failures are ignored.
276        """
277        pass
278
279    @abstractmethod
280    def remove(self, pkgs, with_dependencies=True):
281        """
282        Remove a list of packages. 'pkgs' is a list object. If 'with_dependencies'
283        is False, then any dependencies are left in place.
284        """
285        pass
286
287    @abstractmethod
288    def write_index(self):
289        """
290        This function creates the index files
291        """
292        pass
293
294    @abstractmethod
295    def remove_packaging_data(self):
296        pass
297
298    @abstractmethod
299    def list_installed(self):
300        pass
301
302    @abstractmethod
303    def extract(self, pkg):
304        """
305        Returns the path to a tmpdir where resides the contents of a package.
306        Deleting the tmpdir is responsability of the caller.
307        """
308        pass
309
310    @abstractmethod
311    def insert_feeds_uris(self, feed_uris, feed_base_paths, feed_archs):
312        """
313        Add remote package feeds into repository manager configuration. The parameters
314        for the feeds are set by feed_uris, feed_base_paths and feed_archs.
315        See http://www.yoctoproject.org/docs/current/ref-manual/ref-manual.html#var-PACKAGE_FEED_URIS
316        for their description.
317        """
318        pass
319
320    def install_glob(self, globs, sdk=False):
321        """
322        Install all packages that match a glob.
323        """
324        # TODO don't have sdk here but have a property on the superclass
325        # (and respect in install_complementary)
326        if sdk:
327            pkgdatadir = self.d.getVar("PKGDATA_DIR_SDK")
328        else:
329            pkgdatadir = self.d.getVar("PKGDATA_DIR")
330
331        try:
332            bb.note("Installing globbed packages...")
333            cmd = ["oe-pkgdata-util", "-p", pkgdatadir, "list-pkgs", globs]
334            bb.note('Running %s' % cmd)
335            proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
336            stdout, stderr = proc.communicate()
337            if stderr: bb.note(stderr.decode("utf-8"))
338            pkgs = stdout.decode("utf-8")
339            self.install(pkgs.split(), attempt_only=True)
340        except subprocess.CalledProcessError as e:
341            # Return code 1 means no packages matched
342            if e.returncode != 1:
343                bb.fatal("Could not compute globbed packages list. Command "
344                         "'%s' returned %d:\n%s" %
345                         (' '.join(cmd), e.returncode, e.output.decode("utf-8")))
346
347    def install_complementary(self, globs=None):
348        """
349        Install complementary packages based upon the list of currently installed
350        packages e.g. locales, *-dev, *-dbg, etc. Note: every backend needs to
351        call this function explicitly after the normal package installation.
352        """
353        if globs is None:
354            globs = self.d.getVar('IMAGE_INSTALL_COMPLEMENTARY')
355            split_linguas = set()
356
357            for translation in self.d.getVar('IMAGE_LINGUAS').split():
358                split_linguas.add(translation)
359                split_linguas.add(translation.split('-')[0])
360
361            split_linguas = sorted(split_linguas)
362
363            for lang in split_linguas:
364                globs += " *-locale-%s" % lang
365                for complementary_linguas in (self.d.getVar('IMAGE_LINGUAS_COMPLEMENTARY') or "").split():
366                    globs += (" " + complementary_linguas) % lang
367
368        if globs:
369            # we need to write the list of installed packages to a file because the
370            # oe-pkgdata-util reads it from a file
371            with tempfile.NamedTemporaryFile(mode="w+", prefix="installed-pkgs") as installed_pkgs:
372                pkgs = self.list_installed()
373
374                provided_pkgs = set()
375                for pkg in pkgs.values():
376                    provided_pkgs |= set(pkg.get('provs', []))
377
378                output = oe.utils.format_pkg_list(pkgs, "arch")
379                installed_pkgs.write(output)
380                installed_pkgs.flush()
381
382                cmd = ["oe-pkgdata-util",
383                    "-p", self.d.getVar('PKGDATA_DIR'), "glob", installed_pkgs.name,
384                    globs]
385                exclude = self.d.getVar('PACKAGE_EXCLUDE_COMPLEMENTARY')
386                if exclude:
387                    cmd.extend(['--exclude=' + '|'.join(exclude.split())])
388                try:
389                    bb.note('Running %s' % cmd)
390                    proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
391                    stdout, stderr = proc.communicate()
392                    if stderr: bb.note(stderr.decode("utf-8"))
393                    complementary_pkgs = stdout.decode("utf-8")
394                    complementary_pkgs = set(complementary_pkgs.split())
395                    skip_pkgs = sorted(complementary_pkgs & provided_pkgs)
396                    install_pkgs = sorted(complementary_pkgs - provided_pkgs)
397                    bb.note("Installing complementary packages ... %s (skipped already provided packages %s)" % (
398                        ' '.join(install_pkgs),
399                        ' '.join(skip_pkgs)))
400                    self.install(install_pkgs, hard_depends_only=True)
401                except subprocess.CalledProcessError as e:
402                    bb.fatal("Could not compute complementary packages list. Command "
403                            "'%s' returned %d:\n%s" %
404                            (' '.join(cmd), e.returncode, e.output.decode("utf-8")))
405
406        if self.d.getVar('IMAGE_LOCALES_ARCHIVE') == '1':
407            target_arch = self.d.getVar('TARGET_ARCH')
408            localedir = oe.path.join(self.target_rootfs, self.d.getVar("libdir"), "locale")
409            if os.path.exists(localedir) and os.listdir(localedir):
410                generate_locale_archive(self.d, self.target_rootfs, target_arch, localedir)
411                # And now delete the binary locales
412                self.remove(fnmatch.filter(self.list_installed(), "glibc-binary-localedata-*"), False)
413
414    def deploy_dir_lock(self):
415        if self.deploy_dir is None:
416            raise RuntimeError("deploy_dir is not set!")
417
418        lock_file_name = os.path.join(self.deploy_dir, "deploy.lock")
419
420        self.deploy_lock = bb.utils.lockfile(lock_file_name)
421
422    def deploy_dir_unlock(self):
423        if self.deploy_lock is None:
424            return
425
426        bb.utils.unlockfile(self.deploy_lock)
427
428        self.deploy_lock = None
429
430    def construct_uris(self, uris, base_paths):
431        """
432        Construct URIs based on the following pattern: uri/base_path where 'uri'
433        and 'base_path' correspond to each element of the corresponding array
434        argument leading to len(uris) x len(base_paths) elements on the returned
435        array
436        """
437        def _append(arr1, arr2, sep='/'):
438            res = []
439            narr1 = [a.rstrip(sep) for a in arr1]
440            narr2 = [a.rstrip(sep).lstrip(sep) for a in arr2]
441            for a1 in narr1:
442                if arr2:
443                    for a2 in narr2:
444                        res.append("%s%s%s" % (a1, sep, a2))
445                else:
446                    res.append(a1)
447            return res
448        return _append(uris, base_paths)
449
450def create_packages_dir(d, subrepo_dir, deploydir, taskname, filterbydependencies, include_self=False):
451    """
452    Go through our do_package_write_X dependencies and hardlink the packages we depend
453    upon into the repo directory. This prevents us seeing other packages that may
454    have been built that we don't depend upon and also packages for architectures we don't
455    support.
456    """
457    import errno
458
459    taskdepdata = d.getVar("BB_TASKDEPDATA", False)
460    mytaskname = d.getVar("BB_RUNTASK")
461    pn = d.getVar("PN")
462    seendirs = set()
463    multilibs = {}
464
465    bb.utils.remove(subrepo_dir, recurse=True)
466    bb.utils.mkdirhier(subrepo_dir)
467
468    # Detect bitbake -b usage
469    nodeps = d.getVar("BB_LIMITEDDEPS") or False
470    if nodeps or not filterbydependencies:
471        for arch in d.getVar("ALL_MULTILIB_PACKAGE_ARCHS").split() + d.getVar("ALL_MULTILIB_PACKAGE_ARCHS").replace("-", "_").split():
472            target = os.path.join(deploydir + "/" + arch)
473            if os.path.exists(target):
474                oe.path.symlink(target, subrepo_dir + "/" + arch, True)
475        return
476
477    start = None
478    for dep in taskdepdata:
479        data = taskdepdata[dep]
480        if data[1] == mytaskname and data[0] == pn:
481            start = dep
482            break
483    if start is None:
484        bb.fatal("Couldn't find ourself in BB_TASKDEPDATA?")
485    pkgdeps = set()
486    start = [start]
487    if include_self:
488        seen = set()
489    else:
490        seen = set(start)
491    # Support direct dependencies (do_rootfs -> do_package_write_X)
492    # or indirect dependencies within PN (do_populate_sdk_ext -> do_rootfs -> do_package_write_X)
493    while start:
494        next = []
495        for dep2 in start:
496            for dep in taskdepdata[dep2][3]:
497                if include_self or taskdepdata[dep][0] != pn:
498                    if "do_" + taskname in dep:
499                        pkgdeps.add(dep)
500                elif dep not in seen:
501                    next.append(dep)
502                    seen.add(dep)
503        start = next
504
505    for dep in pkgdeps:
506        c = taskdepdata[dep][0]
507        manifest, d2 = oe.sstatesig.find_sstate_manifest(c, taskdepdata[dep][2], taskname, d, multilibs)
508        if not manifest:
509            bb.fatal("No manifest generated from: %s in %s" % (c, taskdepdata[dep][2]))
510        if not os.path.exists(manifest):
511            continue
512        with open(manifest, "r") as f:
513            for l in f:
514                l = l.strip()
515                deploydir = os.path.normpath(deploydir)
516                if bb.data.inherits_class('packagefeed-stability', d):
517                    dest = l.replace(deploydir + "-prediff", "")
518                else:
519                    dest = l.replace(deploydir, "")
520                dest = subrepo_dir + dest
521                if l.endswith("/"):
522                    if dest not in seendirs:
523                        bb.utils.mkdirhier(dest)
524                        seendirs.add(dest)
525                    continue
526                # Try to hardlink the file, copy if that fails
527                destdir = os.path.dirname(dest)
528                if destdir not in seendirs:
529                    bb.utils.mkdirhier(destdir)
530                    seendirs.add(destdir)
531                try:
532                    os.link(l, dest)
533                except OSError as err:
534                    if err.errno == errno.EXDEV:
535                        bb.utils.copyfile(l, dest)
536                    else:
537                        raise
538
539
540def generate_index_files(d):
541    from oe.package_manager.rpm import RpmSubdirIndexer
542    from oe.package_manager.ipk import OpkgIndexer
543    from oe.package_manager.deb import DpkgIndexer
544
545    classes = d.getVar('PACKAGE_CLASSES').replace("package_", "").split()
546
547    indexer_map = {
548        "rpm": (RpmSubdirIndexer, d.getVar('DEPLOY_DIR_RPM')),
549        "ipk": (OpkgIndexer, d.getVar('DEPLOY_DIR_IPK')),
550        "deb": (DpkgIndexer, d.getVar('DEPLOY_DIR_DEB'))
551    }
552
553    result = None
554
555    for pkg_class in classes:
556        if not pkg_class in indexer_map:
557            continue
558
559        if os.path.exists(indexer_map[pkg_class][1]):
560            result = indexer_map[pkg_class][0](d, indexer_map[pkg_class][1]).write_index()
561
562            if result is not None:
563                bb.fatal(result)
564