1#!/usr/bin/env python3
2#
3# Build the required docker image to run package unit tests
4#
5# Script Variables:
6#   DOCKER_IMG_NAME:  <optional, the name of the docker image to generate>
7#                     default is openbmc/ubuntu-unit-test
8#   DISTRO:           <optional, the distro to build a docker image against>
9#   FORCE_DOCKER_BUILD: <optional, a non-zero value with force all Docker
10#                     images to be rebuilt rather than reusing caches.>
11#   BUILD_URL:        <optional, used to detect running under CI context
12#                     (ex. Jenkins)>
13#   BRANCH:           <optional, branch to build from each of the openbmc/
14#                     repositories>
15#                     default is master, which will be used if input branch not
16#                     provided or not found
17#   UBUNTU_MIRROR:    <optional, the URL of a mirror of Ubuntu to override the
18#                     default ones in /etc/apt/sources.list>
19#                     default is empty, and no mirror is used.
20#   http_proxy        The HTTP address of the proxy server to connect to.
21#                     Default: "", proxy is not setup if this is not set
22
23import os
24import sys
25import threading
26from datetime import date
27from hashlib import sha256
28
29# typing.Dict is used for type-hints.
30from typing import Any, Callable, Dict, Iterable, Optional  # noqa: F401
31
32from sh import docker, git, nproc, uname  # type: ignore
33
34try:
35    # Python before 3.8 doesn't have TypedDict, so reroute to standard 'dict'.
36    from typing import TypedDict
37except Exception:
38
39    class TypedDict(dict):  # type: ignore
40        # We need to do this to eat the 'total' argument.
41        def __init_subclass__(cls, **kwargs: Any) -> None:
42            super().__init_subclass__()
43
44
45# Declare some variables used in package definitions.
46prefix = "/usr/local"
47proc_count = nproc().strip()
48
49
50class PackageDef(TypedDict, total=False):
51    """Package Definition for packages dictionary."""
52
53    # rev [optional]: Revision of package to use.
54    rev: str
55    # url [optional]: lambda function to create URL: (package, rev) -> url.
56    url: Callable[[str, str], str]
57    # depends [optional]: List of package dependencies.
58    depends: Iterable[str]
59    # build_type [required]: Build type used for package.
60    #   Currently supported: autoconf, cmake, custom, make, meson
61    build_type: str
62    # build_steps [optional]: Steps to run for 'custom' build_type.
63    build_steps: Iterable[str]
64    # config_flags [optional]: List of options to pass configuration tool.
65    config_flags: Iterable[str]
66    # config_env [optional]: List of environment variables to set for config.
67    config_env: Iterable[str]
68    # custom_post_dl [optional]: List of steps to run after download, but
69    #   before config / build / install.
70    custom_post_dl: Iterable[str]
71    # custom_post_install [optional]: List of steps to run after install.
72    custom_post_install: Iterable[str]
73
74    # __tag [private]: Generated Docker tag name for package stage.
75    __tag: str
76    # __package [private]: Package object associated with this package.
77    __package: Any  # Type is Package, but not defined yet.
78
79
80# Packages to include in image.
81packages = {
82    "boost": PackageDef(
83        rev="1.81.0",
84        url=(
85            lambda pkg, rev: f"https://boostorg.jfrog.io/artifactory/main/release/{rev}/source/{pkg}_{rev.replace('.', '_')}.tar.gz"  # noqa: E501
86        ),
87        build_type="custom",
88        build_steps=[
89            (
90                "./bootstrap.sh"
91                f" --prefix={prefix} --with-libraries=context,coroutine"
92            ),
93            "./b2",
94            f"./b2 install --prefix={prefix}",
95        ],
96    ),
97    "USCiLab/cereal": PackageDef(
98        rev="v1.3.2",
99        build_type="custom",
100        build_steps=[f"cp -a include/cereal/ {prefix}/include/"],
101    ),
102    "danmar/cppcheck": PackageDef(
103        rev="2.9",
104        build_type="cmake",
105    ),
106    "CLIUtils/CLI11": PackageDef(
107        rev="v1.9.1",
108        build_type="cmake",
109        config_flags=[
110            "-DBUILD_TESTING=OFF",
111            "-DCLI11_BUILD_DOCS=OFF",
112            "-DCLI11_BUILD_EXAMPLES=OFF",
113        ],
114    ),
115    "fmtlib/fmt": PackageDef(
116        rev="9.1.0",
117        build_type="cmake",
118        config_flags=[
119            "-DFMT_DOC=OFF",
120            "-DFMT_TEST=OFF",
121        ],
122    ),
123    "Naios/function2": PackageDef(
124        rev="4.2.1",
125        build_type="custom",
126        build_steps=[
127            f"mkdir {prefix}/include/function2",
128            f"cp include/function2/function2.hpp {prefix}/include/function2/",
129        ],
130    ),
131    # release-1.12.1
132    "google/googletest": PackageDef(
133        rev="58d77fa8070e8cec2dc1ed015d66b454c8d78850",
134        build_type="cmake",
135        config_env=["CXXFLAGS=-std=c++20"],
136        config_flags=["-DTHREADS_PREFER_PTHREAD_FLAG=ON"],
137    ),
138    "nlohmann/json": PackageDef(
139        rev="v3.11.2",
140        build_type="cmake",
141        config_flags=["-DJSON_BuildTests=OFF"],
142        custom_post_install=[
143            (
144                f"ln -s {prefix}/include/nlohmann/json.hpp"
145                f" {prefix}/include/json.hpp"
146            ),
147        ],
148    ),
149    "json-c/json-c": PackageDef(
150        rev="json-c-0.16-20220414",
151        build_type="cmake",
152    ),
153    # Snapshot from 2019-05-24
154    "linux-test-project/lcov": PackageDef(
155        rev="v1.15",
156        build_type="make",
157    ),
158    # dev-6.0 2022-11-28
159    "openbmc/linux": PackageDef(
160        rev="1b16243b004ce4d977a9f3b9d9e715cf5028f867",
161        build_type="custom",
162        build_steps=[
163            f"make -j{proc_count} defconfig",
164            f"make INSTALL_HDR_PATH={prefix} headers_install",
165        ],
166    ),
167    "LibVNC/libvncserver": PackageDef(
168        rev="LibVNCServer-0.9.13",
169        build_type="cmake",
170    ),
171    "leethomason/tinyxml2": PackageDef(
172        rev="9.0.0",
173        build_type="cmake",
174    ),
175    "tristanpenman/valijson": PackageDef(
176        rev="v0.7",
177        build_type="cmake",
178        config_flags=[
179            "-Dvalijson_BUILD_TESTS=0",
180            "-Dvalijson_INSTALL_HEADERS=1",
181        ],
182    ),
183    "open-power/pdbg": PackageDef(build_type="autoconf"),
184    "openbmc/gpioplus": PackageDef(
185        depends=["openbmc/stdplus"],
186        build_type="meson",
187        config_flags=[
188            "-Dexamples=false",
189            "-Dtests=disabled",
190        ],
191    ),
192    "openbmc/phosphor-dbus-interfaces": PackageDef(
193        depends=["openbmc/sdbusplus"],
194        build_type="meson",
195        config_flags=["-Dgenerate_md=false"],
196    ),
197    "openbmc/phosphor-logging": PackageDef(
198        depends=[
199            "USCiLab/cereal",
200            "openbmc/phosphor-dbus-interfaces",
201            "openbmc/sdbusplus",
202            "openbmc/sdeventplus",
203        ],
204        build_type="meson",
205        config_flags=[
206            "-Dlibonly=true",
207            "-Dtests=disabled",
208            f"-Dyamldir={prefix}/share/phosphor-dbus-yaml/yaml",
209        ],
210    ),
211    "openbmc/phosphor-objmgr": PackageDef(
212        depends=[
213            "CLIUtils/CLI11",
214            "boost",
215            "leethomason/tinyxml2",
216            "openbmc/phosphor-dbus-interfaces",
217            "openbmc/phosphor-logging",
218            "openbmc/sdbusplus",
219        ],
220        build_type="meson",
221        config_flags=[
222            "-Dtests=disabled",
223        ],
224    ),
225    "openbmc/libpldm": PackageDef(
226        build_type="meson",
227        config_flags=[
228            "-Doem-ibm=enabled",
229            "-Dtests=disabled",
230        ],
231    ),
232    "openbmc/sdbusplus": PackageDef(
233        build_type="meson",
234        custom_post_dl=[
235            "cd tools",
236            f"./setup.py install --root=/ --prefix={prefix}",
237            "cd ..",
238        ],
239        config_flags=[
240            "-Dexamples=disabled",
241            "-Dtests=disabled",
242        ],
243    ),
244    "openbmc/sdeventplus": PackageDef(
245        depends=[
246            "Naios/function2",
247            "openbmc/stdplus",
248        ],
249        build_type="meson",
250        config_flags=[
251            "-Dexamples=false",
252            "-Dtests=disabled",
253        ],
254    ),
255    "openbmc/stdplus": PackageDef(
256        depends=[
257            "fmtlib/fmt",
258            "google/googletest",
259            "Naios/function2",
260        ],
261        build_type="meson",
262        config_flags=[
263            "-Dexamples=false",
264            "-Dtests=disabled",
265            "-Dgtest=enabled",
266        ],
267    ),
268}  # type: Dict[str, PackageDef]
269
270# Define common flags used for builds
271configure_flags = " ".join(
272    [
273        f"--prefix={prefix}",
274    ]
275)
276cmake_flags = " ".join(
277    [
278        "-DBUILD_SHARED_LIBS=ON",
279        "-DCMAKE_BUILD_TYPE=RelWithDebInfo",
280        f"-DCMAKE_INSTALL_PREFIX:PATH={prefix}",
281        "-GNinja",
282        "-DCMAKE_MAKE_PROGRAM=ninja",
283    ]
284)
285meson_flags = " ".join(
286    [
287        "--wrap-mode=nodownload",
288        f"-Dprefix={prefix}",
289    ]
290)
291
292
293class Package(threading.Thread):
294    """Class used to build the Docker stages for each package.
295
296    Generally, this class should not be instantiated directly but through
297    Package.generate_all().
298    """
299
300    # Copy the packages dictionary.
301    packages = packages.copy()
302
303    # Lock used for thread-safety.
304    lock = threading.Lock()
305
306    def __init__(self, pkg: str):
307        """pkg - The name of this package (ex. foo/bar )"""
308        super(Package, self).__init__()
309
310        self.package = pkg
311        self.exception = None  # type: Optional[Exception]
312
313        # Reference to this package's
314        self.pkg_def = Package.packages[pkg]
315        self.pkg_def["__package"] = self
316
317    def run(self) -> None:
318        """Thread 'run' function.  Builds the Docker stage."""
319
320        # In case this package has no rev, fetch it from Github.
321        self._update_rev()
322
323        # Find all the Package objects that this package depends on.
324        #   This section is locked because we are looking into another
325        #   package's PackageDef dict, which could be being modified.
326        Package.lock.acquire()
327        deps: Iterable[Package] = [
328            Package.packages[deppkg]["__package"]
329            for deppkg in self.pkg_def.get("depends", [])
330        ]
331        Package.lock.release()
332
333        # Wait until all the depends finish building.  We need them complete
334        # for the "COPY" commands.
335        for deppkg in deps:
336            deppkg.join()
337
338        # Generate this package's Dockerfile.
339        dockerfile = f"""
340FROM {docker_base_img_name}
341{self._df_copycmds()}
342{self._df_build()}
343"""
344
345        # Generate the resulting tag name and save it to the PackageDef.
346        #   This section is locked because we are modifying the PackageDef,
347        #   which can be accessed by other threads.
348        Package.lock.acquire()
349        tag = Docker.tagname(self._stagename(), dockerfile)
350        self.pkg_def["__tag"] = tag
351        Package.lock.release()
352
353        # Do the build / save any exceptions.
354        try:
355            Docker.build(self.package, tag, dockerfile)
356        except Exception as e:
357            self.exception = e
358
359    @classmethod
360    def generate_all(cls) -> None:
361        """Ensure a Docker stage is created for all defined packages.
362
363        These are done in parallel but with appropriate blocking per
364        package 'depends' specifications.
365        """
366
367        # Create a Package for each defined package.
368        pkg_threads = [Package(p) for p in cls.packages.keys()]
369
370        # Start building them all.
371        #   This section is locked because threads depend on each other,
372        #   based on the packages, and they cannot 'join' on a thread
373        #   which is not yet started.  Adding a lock here allows all the
374        #   threads to start before they 'join' their dependencies.
375        Package.lock.acquire()
376        for t in pkg_threads:
377            t.start()
378        Package.lock.release()
379
380        # Wait for completion.
381        for t in pkg_threads:
382            t.join()
383            # Check if the thread saved off its own exception.
384            if t.exception:
385                print(f"Package {t.package} failed!", file=sys.stderr)
386                raise t.exception
387
388    @staticmethod
389    def df_all_copycmds() -> str:
390        """Formulate the Dockerfile snippet necessary to copy all packages
391        into the final image.
392        """
393        return Package.df_copycmds_set(Package.packages.keys())
394
395    @classmethod
396    def depcache(cls) -> str:
397        """Create the contents of the '/tmp/depcache'.
398        This file is a comma-separated list of "<pkg>:<rev>".
399        """
400
401        # This needs to be sorted for consistency.
402        depcache = ""
403        for pkg in sorted(cls.packages.keys()):
404            depcache += "%s:%s," % (pkg, cls.packages[pkg]["rev"])
405        return depcache
406
407    def _update_rev(self) -> None:
408        """Look up the HEAD for missing a static rev."""
409
410        if "rev" in self.pkg_def:
411            return
412
413        # Check if Jenkins/Gerrit gave us a revision and use it.
414        if gerrit_project == self.package and gerrit_rev:
415            print(
416                f"Found Gerrit revision for {self.package}: {gerrit_rev}",
417                file=sys.stderr,
418            )
419            self.pkg_def["rev"] = gerrit_rev
420            return
421
422        # Ask Github for all the branches.
423        lookup = git(
424            "ls-remote", "--heads", f"https://github.com/{self.package}"
425        )
426
427        # Find the branch matching {branch} (or fallback to master).
428        #   This section is locked because we are modifying the PackageDef.
429        Package.lock.acquire()
430        for line in lookup.split("\n"):
431            if f"refs/heads/{branch}" in line:
432                self.pkg_def["rev"] = line.split()[0]
433            elif (
434                "refs/heads/master" in line or "refs/heads/main" in line
435            ) and "rev" not in self.pkg_def:
436                self.pkg_def["rev"] = line.split()[0]
437        Package.lock.release()
438
439    def _stagename(self) -> str:
440        """Create a name for the Docker stage associated with this pkg."""
441        return self.package.replace("/", "-").lower()
442
443    def _url(self) -> str:
444        """Get the URL for this package."""
445        rev = self.pkg_def["rev"]
446
447        # If the lambda exists, call it.
448        if "url" in self.pkg_def:
449            return self.pkg_def["url"](self.package, rev)
450
451        # Default to the github archive URL.
452        return f"https://github.com/{self.package}/archive/{rev}.tar.gz"
453
454    def _cmd_download(self) -> str:
455        """Formulate the command necessary to download and unpack to source."""
456
457        url = self._url()
458        if ".tar." not in url:
459            raise NotImplementedError(
460                f"Unhandled download type for {self.package}: {url}"
461            )
462
463        cmd = f"curl -L {url} | tar -x"
464
465        if url.endswith(".bz2"):
466            cmd += "j"
467        elif url.endswith(".gz"):
468            cmd += "z"
469        else:
470            raise NotImplementedError(
471                f"Unknown tar flags needed for {self.package}: {url}"
472            )
473
474        return cmd
475
476    def _cmd_cd_srcdir(self) -> str:
477        """Formulate the command necessary to 'cd' into the source dir."""
478        return f"cd {self.package.split('/')[-1]}*"
479
480    def _df_copycmds(self) -> str:
481        """Formulate the dockerfile snippet necessary to COPY all depends."""
482
483        if "depends" not in self.pkg_def:
484            return ""
485        return Package.df_copycmds_set(self.pkg_def["depends"])
486
487    @staticmethod
488    def df_copycmds_set(pkgs: Iterable[str]) -> str:
489        """Formulate the Dockerfile snippet necessary to COPY a set of
490        packages into a Docker stage.
491        """
492
493        copy_cmds = ""
494
495        # Sort the packages for consistency.
496        for p in sorted(pkgs):
497            tag = Package.packages[p]["__tag"]
498            copy_cmds += f"COPY --from={tag} {prefix} {prefix}\n"
499            # Workaround for upstream docker bug and multiple COPY cmds
500            # https://github.com/moby/moby/issues/37965
501            copy_cmds += "RUN true\n"
502
503        return copy_cmds
504
505    def _df_build(self) -> str:
506        """Formulate the Dockerfile snippet necessary to download, build, and
507        install a package into a Docker stage.
508        """
509
510        # Download and extract source.
511        result = f"RUN {self._cmd_download()} && {self._cmd_cd_srcdir()} && "
512
513        # Handle 'custom_post_dl' commands.
514        custom_post_dl = self.pkg_def.get("custom_post_dl")
515        if custom_post_dl:
516            result += " && ".join(custom_post_dl) + " && "
517
518        # Build and install package based on 'build_type'.
519        build_type = self.pkg_def["build_type"]
520        if build_type == "autoconf":
521            result += self._cmd_build_autoconf()
522        elif build_type == "cmake":
523            result += self._cmd_build_cmake()
524        elif build_type == "custom":
525            result += self._cmd_build_custom()
526        elif build_type == "make":
527            result += self._cmd_build_make()
528        elif build_type == "meson":
529            result += self._cmd_build_meson()
530        else:
531            raise NotImplementedError(
532                f"Unhandled build type for {self.package}: {build_type}"
533            )
534
535        # Handle 'custom_post_install' commands.
536        custom_post_install = self.pkg_def.get("custom_post_install")
537        if custom_post_install:
538            result += " && " + " && ".join(custom_post_install)
539
540        return result
541
542    def _cmd_build_autoconf(self) -> str:
543        options = " ".join(self.pkg_def.get("config_flags", []))
544        env = " ".join(self.pkg_def.get("config_env", []))
545        result = "./bootstrap.sh && "
546        result += f"{env} ./configure {configure_flags} {options} && "
547        result += f"make -j{proc_count} && make install"
548        return result
549
550    def _cmd_build_cmake(self) -> str:
551        options = " ".join(self.pkg_def.get("config_flags", []))
552        env = " ".join(self.pkg_def.get("config_env", []))
553        result = "mkdir builddir && cd builddir && "
554        result += f"{env} cmake {cmake_flags} {options} .. && "
555        result += "cmake --build . --target all && "
556        result += "cmake --build . --target install && "
557        result += "cd .."
558        return result
559
560    def _cmd_build_custom(self) -> str:
561        return " && ".join(self.pkg_def.get("build_steps", []))
562
563    def _cmd_build_make(self) -> str:
564        return f"make -j{proc_count} && make install"
565
566    def _cmd_build_meson(self) -> str:
567        options = " ".join(self.pkg_def.get("config_flags", []))
568        env = " ".join(self.pkg_def.get("config_env", []))
569        result = f"{env} meson builddir {meson_flags} {options} && "
570        result += "ninja -C builddir && ninja -C builddir install"
571        return result
572
573
574class Docker:
575    """Class to assist with Docker interactions.  All methods are static."""
576
577    @staticmethod
578    def timestamp() -> str:
579        """Generate a timestamp for today using the ISO week."""
580        today = date.today().isocalendar()
581        return f"{today[0]}-W{today[1]:02}"
582
583    @staticmethod
584    def tagname(pkgname: Optional[str], dockerfile: str) -> str:
585        """Generate a tag name for a package using a hash of the Dockerfile."""
586        result = docker_image_name
587        if pkgname:
588            result += "-" + pkgname
589
590        result += ":" + Docker.timestamp()
591        result += "-" + sha256(dockerfile.encode()).hexdigest()[0:16]
592
593        return result
594
595    @staticmethod
596    def build(pkg: str, tag: str, dockerfile: str) -> None:
597        """Build a docker image using the Dockerfile and tagging it with 'tag'."""
598
599        # If we're not forcing builds, check if it already exists and skip.
600        if not force_build:
601            if docker.image.ls(tag, "--format", '"{{.Repository}}:{{.Tag}}"'):
602                print(
603                    f"Image {tag} already exists.  Skipping.", file=sys.stderr
604                )
605                return
606
607        # Build it.
608        #   Capture the output of the 'docker build' command and send it to
609        #   stderr (prefixed with the package name).  This allows us to see
610        #   progress but not polute stdout.  Later on we output the final
611        #   docker tag to stdout and we want to keep that pristine.
612        #
613        #   Other unusual flags:
614        #       --no-cache: Bypass the Docker cache if 'force_build'.
615        #       --force-rm: Clean up Docker processes if they fail.
616        docker.build(
617            proxy_args,
618            "--network=host",
619            "--force-rm",
620            "--no-cache=true" if force_build else "--no-cache=false",
621            "-t",
622            tag,
623            "-",
624            _in=dockerfile,
625            _out=(
626                lambda line: print(
627                    pkg + ":", line, end="", file=sys.stderr, flush=True
628                )
629            ),
630        )
631
632
633# Read a bunch of environment variables.
634docker_image_name = os.environ.get(
635    "DOCKER_IMAGE_NAME", "openbmc/ubuntu-unit-test"
636)
637force_build = os.environ.get("FORCE_DOCKER_BUILD")
638is_automated_ci_build = os.environ.get("BUILD_URL", False)
639distro = os.environ.get("DISTRO", "ubuntu:kinetic")
640branch = os.environ.get("BRANCH", "master")
641ubuntu_mirror = os.environ.get("UBUNTU_MIRROR")
642http_proxy = os.environ.get("http_proxy")
643
644gerrit_project = os.environ.get("GERRIT_PROJECT")
645gerrit_rev = os.environ.get("GERRIT_PATCHSET_REVISION")
646
647# Ensure appropriate docker build output to see progress and identify
648# any issues
649os.environ["BUILDKIT_PROGRESS"] = "plain"
650
651# Set up some common variables.
652username = os.environ.get("USER", "root")
653homedir = os.environ.get("HOME", "/root")
654gid = os.getgid()
655uid = os.getuid()
656
657# Use well-known constants if user is root
658if username == "root":
659    homedir = "/root"
660    gid = 0
661    uid = 0
662
663# Determine the architecture for Docker.
664arch = uname("-m").strip()
665if arch == "ppc64le":
666    docker_base = "ppc64le/"
667elif arch == "x86_64":
668    docker_base = ""
669elif arch == "aarch64":
670    docker_base = "arm64v8/"
671else:
672    print(
673        f"Unsupported system architecture({arch}) found for docker image",
674        file=sys.stderr,
675    )
676    sys.exit(1)
677
678# Special flags if setting up a deb mirror.
679mirror = ""
680if "ubuntu" in distro and ubuntu_mirror:
681    mirror = f"""
682RUN echo "deb {ubuntu_mirror} \
683        $(. /etc/os-release && echo $VERSION_CODENAME) \
684        main restricted universe multiverse" > /etc/apt/sources.list && \\
685    echo "deb {ubuntu_mirror} \
686        $(. /etc/os-release && echo $VERSION_CODENAME)-updates \
687            main restricted universe multiverse" >> /etc/apt/sources.list && \\
688    echo "deb {ubuntu_mirror} \
689        $(. /etc/os-release && echo $VERSION_CODENAME)-security \
690            main restricted universe multiverse" >> /etc/apt/sources.list && \\
691    echo "deb {ubuntu_mirror} \
692        $(. /etc/os-release && echo $VERSION_CODENAME)-proposed \
693            main restricted universe multiverse" >> /etc/apt/sources.list && \\
694    echo "deb {ubuntu_mirror} \
695        $(. /etc/os-release && echo $VERSION_CODENAME)-backports \
696            main restricted universe multiverse" >> /etc/apt/sources.list
697"""
698
699# Special flags for proxying.
700proxy_cmd = ""
701proxy_keyserver = ""
702proxy_args = []
703if http_proxy:
704    proxy_cmd = f"""
705RUN echo "[http]" >> {homedir}/.gitconfig && \
706    echo "proxy = {http_proxy}" >> {homedir}/.gitconfig
707"""
708    proxy_keyserver = f"--keyserver-options http-proxy={http_proxy}"
709
710    proxy_args.extend(
711        [
712            "--build-arg",
713            f"http_proxy={http_proxy}",
714            "--build-arg",
715            f"https_proxy={http_proxy}",
716        ]
717    )
718
719# Create base Dockerfile.
720dockerfile_base = f"""
721FROM {docker_base}{distro}
722
723{mirror}
724
725ENV DEBIAN_FRONTEND noninteractive
726
727ENV PYTHONPATH "/usr/local/lib/python3.10/site-packages/"
728
729# Sometimes the ubuntu key expires and we need a way to force an execution
730# of the apt-get commands for the dbgsym-keyring.  When this happens we see
731# an error like: "Release: The following signatures were invalid:"
732# Insert a bogus echo that we can change here when we get this error to force
733# the update.
734RUN echo "ubuntu keyserver rev as of 2021-04-21"
735
736# We need the keys to be imported for dbgsym repos
737# New releases have a package, older ones fall back to manual fetching
738# https://wiki.ubuntu.com/Debug%20Symbol%20Packages
739RUN apt-get update && apt-get dist-upgrade -yy && \
740    ( apt-get install gpgv ubuntu-dbgsym-keyring || \
741        ( apt-get install -yy dirmngr && \
742          apt-key adv --keyserver keyserver.ubuntu.com \
743                      {proxy_keyserver} \
744                      --recv-keys F2EDC64DC5AEE1F6B9C621F0C8CAB6595FDFF622 ) )
745
746# Parse the current repo list into a debug repo list
747RUN sed -n '/^deb /s,^deb [^ ]* ,deb http://ddebs.ubuntu.com ,p' \
748        /etc/apt/sources.list >/etc/apt/sources.list.d/debug.list
749
750# Remove non-existent debug repos
751RUN sed -i '/-\\(backports\\|security\\) /d' /etc/apt/sources.list.d/debug.list
752
753RUN cat /etc/apt/sources.list.d/debug.list
754
755RUN apt-get update && apt-get dist-upgrade -yy && apt-get install -yy \
756    autoconf \
757    autoconf-archive \
758    bison \
759    clang-15 \
760    clang-format-15 \
761    clang-tidy-15 \
762    clang-tools-15 \
763    cmake \
764    curl \
765    dbus \
766    device-tree-compiler \
767    flex \
768    g++-12 \
769    gcc-12 \
770    git \
771    iproute2 \
772    iputils-ping \
773    libc6-dbg \
774    libc6-dev \
775    libconfig++-dev \
776    libcryptsetup-dev \
777    libdbus-1-dev \
778    libevdev-dev \
779    libgpiod-dev \
780    libi2c-dev \
781    libjpeg-dev \
782    libjson-perl \
783    libldap2-dev \
784    libmimetic-dev \
785    libnl-3-dev \
786    libnl-genl-3-dev \
787    libpam0g-dev \
788    libpciaccess-dev \
789    libperlio-gzip-perl \
790    libpng-dev \
791    libprotobuf-dev \
792    libsnmp-dev \
793    libssl-dev \
794    libsystemd-dev \
795    libtool \
796    liburing2-dbgsym \
797    liburing-dev \
798    libxml2-utils \
799    libxml-simple-perl \
800    ninja-build \
801    npm \
802    pkg-config \
803    protobuf-compiler \
804    python3 \
805    python3-dev\
806    python3-git \
807    python3-mako \
808    python3-pip \
809    python3-setuptools \
810    python3-socks \
811    python3-yaml \
812    rsync \
813    shellcheck \
814    sudo \
815    systemd \
816    valgrind \
817    valgrind-dbg \
818    vim \
819    wget \
820    xxd
821
822# Kinetic comes with GCC-12, so skip this.
823#RUN update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-12 12 \
824#  --slave /usr/bin/g++ g++ /usr/bin/g++-12 \
825#  --slave /usr/bin/gcov gcov /usr/bin/gcov-12 \
826#  --slave /usr/bin/gcov-dump gcov-dump /usr/bin/gcov-dump-12 \
827#  --slave /usr/bin/gcov-tool gcov-tool /usr/bin/gcov-tool-12
828#RUN update-alternatives --install /usr/bin/cpp cpp /usr/bin/cpp-12 12
829
830RUN update-alternatives --install /usr/bin/clang clang /usr/bin/clang-15 1000 \
831  --slave /usr/bin/clang++ clang++ /usr/bin/clang++-15 \
832  --slave /usr/bin/clang-tidy clang-tidy /usr/bin/clang-tidy-15 \
833  --slave /usr/bin/clang-format clang-format /usr/bin/clang-format-15 \
834  --slave /usr/bin/run-clang-tidy run-clang-tidy.py \
835        /usr/bin/run-clang-tidy-15 \
836  --slave /usr/bin/scan-build scan-build /usr/bin/scan-build-15
837
838"""
839
840if is_automated_ci_build:
841    dockerfile_base += f"""
842# Run an arbitrary command to polute the docker cache regularly force us
843# to re-run `apt-get update` daily.
844RUN echo {Docker.timestamp()}
845RUN apt-get update && apt-get dist-upgrade -yy
846
847"""
848
849dockerfile_base += """
850RUN pip3 install beautysh
851RUN pip3 install black
852RUN pip3 install codespell
853RUN pip3 install flake8
854RUN pip3 install gitlint
855RUN pip3 install inflection
856RUN pip3 install isort
857RUN pip3 install jsonschema
858RUN pip3 install meson==0.63.0
859RUN pip3 install protobuf
860RUN pip3 install requests
861
862RUN npm install -g \
863        eslint@latest eslint-plugin-json@latest \
864        markdownlint-cli@latest \
865        prettier@latest
866"""
867
868# Build the base and stage docker images.
869docker_base_img_name = Docker.tagname("base", dockerfile_base)
870Docker.build("base", docker_base_img_name, dockerfile_base)
871Package.generate_all()
872
873# Create the final Dockerfile.
874dockerfile = f"""
875# Build the final output image
876FROM {docker_base_img_name}
877{Package.df_all_copycmds()}
878
879# Some of our infrastructure still relies on the presence of this file
880# even though it is no longer needed to rebuild the docker environment
881# NOTE: The file is sorted to ensure the ordering is stable.
882RUN echo '{Package.depcache()}' > /tmp/depcache
883
884# Final configuration for the workspace
885RUN grep -q {gid} /etc/group || groupadd -f -g {gid} {username}
886RUN mkdir -p "{os.path.dirname(homedir)}"
887RUN grep -q {uid} /etc/passwd || \
888        useradd -d {homedir} -m -u {uid} -g {gid} {username}
889RUN sed -i '1iDefaults umask=000' /etc/sudoers
890RUN echo "{username} ALL=(ALL) NOPASSWD: ALL" >>/etc/sudoers
891
892# Ensure user has ability to write to /usr/local for different tool
893# and data installs
894RUN chown -R {username}:{username} /usr/local/share
895
896{proxy_cmd}
897
898RUN /bin/bash
899"""
900
901# Do the final docker build
902docker_final_img_name = Docker.tagname(None, dockerfile)
903Docker.build("final", docker_final_img_name, dockerfile)
904
905# Print the tag of the final image.
906print(docker_final_img_name)
907