1#!/usr/bin/env python3
2#
3# Build the required docker image to run package unit tests
4#
5# Script Variables:
6#   DOCKER_IMG_NAME:  <optional, the name of the docker image to generate>
7#                     default is openbmc/ubuntu-unit-test
8#   DISTRO:           <optional, the distro to build a docker image against>
9#   FORCE_DOCKER_BUILD: <optional, a non-zero value with force all Docker
10#                     images to be rebuilt rather than reusing caches.>
11#   BUILD_URL:        <optional, used to detect running under CI context
12#                     (ex. Jenkins)>
13#   BRANCH:           <optional, branch to build from each of the openbmc/
14#                     repositories>
15#                     default is master, which will be used if input branch not
16#                     provided or not found
17#   UBUNTU_MIRROR:    <optional, the URL of a mirror of Ubuntu to override the
18#                     default ones in /etc/apt/sources.list>
19#                     default is empty, and no mirror is used.
20#   http_proxy        The HTTP address of the proxy server to connect to.
21#                     Default: "", proxy is not setup if this is not set
22
23import os
24import sys
25import threading
26from datetime import date
27from hashlib import sha256
28from sh import docker, git, nproc, uname  # type: ignore
29from typing import Any, Callable, Dict, Iterable, Optional
30
31try:
32    # Python before 3.8 doesn't have TypedDict, so reroute to standard 'dict'.
33    from typing import TypedDict
34except:
35
36    class TypedDict(dict):  # type: ignore
37        # We need to do this to eat the 'total' argument.
38        def __init_subclass__(cls, **kwargs):
39            super().__init_subclass__()
40
41
42# Declare some variables used in package definitions.
43prefix = "/usr/local"
44proc_count = nproc().strip()
45
46
47class PackageDef(TypedDict, total=False):
48    """ Package Definition for packages dictionary. """
49
50    # rev [optional]: Revision of package to use.
51    rev: str
52    # url [optional]: lambda function to create URL: (package, rev) -> url.
53    url: Callable[[str, str], str]
54    # depends [optional]: List of package dependencies.
55    depends: Iterable[str]
56    # build_type [required]: Build type used for package.
57    #   Currently supported: autoconf, cmake, custom, make, meson
58    build_type: str
59    # build_steps [optional]: Steps to run for 'custom' build_type.
60    build_steps: Iterable[str]
61    # config_flags [optional]: List of options to pass configuration tool.
62    config_flags: Iterable[str]
63    # config_env [optional]: List of environment variables to set for config.
64    config_env: Iterable[str]
65    # custom_post_dl [optional]: List of steps to run after download, but
66    #   before config / build / install.
67    custom_post_dl: Iterable[str]
68    # custom_post_install [optional]: List of steps to run after install.
69    custom_post_install: Iterable[str]
70
71    # __tag [private]: Generated Docker tag name for package stage.
72    __tag: str
73    # __package [private]: Package object associated with this package.
74    __package: Any  # Type is Package, but not defined yet.
75
76
77# Packages to include in image.
78packages = {
79    "boost": PackageDef(
80        rev="1.79.0",
81        url=(
82            lambda pkg, rev: f"https://downloads.yoctoproject.org/mirror/sources/{pkg}_{rev.replace('.', '_')}.tar.bz2"
83        ),
84        build_type="custom",
85        build_steps=[
86            f"./bootstrap.sh --prefix={prefix} --with-libraries=context,coroutine",
87            "./b2",
88            f"./b2 install --prefix={prefix}",
89        ],
90    ),
91    "USCiLab/cereal": PackageDef(
92        rev="3e4d1b84cab4891368d2179a61a7ba06a5693e7f",
93        build_type="custom",
94        build_steps=[f"cp -a include/cereal/ {prefix}/include/"],
95    ),
96    "danmar/cppcheck": PackageDef(
97        rev="27578e9c4c1f90c62b6938867735a054082e178e",
98        build_type="cmake",
99    ),
100    "CLIUtils/CLI11": PackageDef(
101        rev="v1.9.1",
102        build_type="cmake",
103        config_flags=[
104            "-DBUILD_TESTING=OFF",
105            "-DCLI11_BUILD_DOCS=OFF",
106            "-DCLI11_BUILD_EXAMPLES=OFF",
107        ],
108    ),
109    "fmtlib/fmt": PackageDef(
110        rev="8.1.1",
111        build_type="cmake",
112        config_flags=[
113            "-DFMT_DOC=OFF",
114            "-DFMT_TEST=OFF",
115        ],
116    ),
117    "Naios/function2": PackageDef(
118        rev="4.1.0",
119        build_type="custom",
120        build_steps=[
121            f"mkdir {prefix}/include/function2",
122            f"cp include/function2/function2.hpp {prefix}/include/function2/",
123        ],
124    ),
125    # release-1.12.1
126    "google/googletest": PackageDef(
127        rev="58d77fa8070e8cec2dc1ed015d66b454c8d78850",
128        build_type="cmake",
129        config_env=["CXXFLAGS=-std=c++20"],
130        config_flags=["-DTHREADS_PREFER_PTHREAD_FLAG=ON"],
131    ),
132    # Release 2020-08-06
133    "nlohmann/json": PackageDef(
134        rev="v3.10.4",
135        build_type="cmake",
136        config_flags=["-DJSON_BuildTests=OFF"],
137        custom_post_install=[
138            f"ln -s {prefix}/include/nlohmann/json.hpp {prefix}/include/json.hpp",
139        ],
140    ),
141    # Snapshot from 2019-05-24
142    "linux-test-project/lcov": PackageDef(
143        rev="v1.15",
144        build_type="make",
145    ),
146    # dev-5.8 2021-01-11
147    "openbmc/linux": PackageDef(
148        rev="3cc95ae40716e56f81b69615781f54c78079042d",
149        build_type="custom",
150        build_steps=[
151            f"make -j{proc_count} defconfig",
152            f"make INSTALL_HDR_PATH={prefix} headers_install",
153        ],
154    ),
155    # Snapshot from 2020-06-13
156    "LibVNC/libvncserver": PackageDef(
157        rev="LibVNCServer-0.9.13",
158        build_type="cmake",
159    ),
160    # version from meta-openembedded/meta-oe/recipes-support/libtinyxml2/libtinyxml2_8.0.0.bb
161    "leethomason/tinyxml2": PackageDef(
162        rev="8.0.0",
163        build_type="cmake",
164    ),
165    # version from /meta-openembedded/meta-oe/recipes-devtools/boost-url/boost-url_git.bb
166    "CPPAlliance/url": PackageDef(
167        rev="d740a92d38e3a8f4d5b2153f53b82f1c98e312ab",
168        build_type="custom",
169        build_steps=[f"cp -a include/** {prefix}/include/"],
170    ),
171    # version from meta-openembedded/meta-oe/dynamic-layers/networking-layer/recipes-devools/valijson/valijson_0.6.bb
172    "tristanpenman/valijson": PackageDef(
173        rev="v0.6",
174        build_type="cmake",
175        config_flags=[
176            "-Dvalijson_BUILD_TESTS=0",
177            "-Dvalijson_INSTALL_HEADERS=1",
178        ],
179    ),
180    "open-power/pdbg": PackageDef(build_type="autoconf"),
181    "openbmc/gpioplus": PackageDef(
182        depends=["openbmc/stdplus"],
183        build_type="meson",
184        config_flags=[
185            "-Dexamples=false",
186            "-Dtests=disabled",
187        ],
188    ),
189    "openbmc/phosphor-dbus-interfaces": PackageDef(
190        depends=["openbmc/sdbusplus"],
191        build_type="meson",
192        config_flags=["-Dgenerate_md=false"],
193    ),
194    "openbmc/phosphor-logging": PackageDef(
195        depends=[
196            "USCiLab/cereal",
197            "openbmc/phosphor-dbus-interfaces",
198            "openbmc/sdbusplus",
199            "openbmc/sdeventplus",
200        ],
201        build_type="meson",
202        config_flags=[
203            f"-Dyamldir={prefix}/share/phosphor-dbus-yaml/yaml",
204        ],
205    ),
206    "openbmc/phosphor-objmgr": PackageDef(
207        depends=[
208            "boost",
209            "CLIUtils/CLI11",
210            "leethomason/tinyxml2",
211            "openbmc/phosphor-logging",
212            "openbmc/sdbusplus",
213        ],
214        build_type="meson",
215        config_flags=[
216            "-Dtests=disabled",
217        ],
218    ),
219    "openbmc/pldm": PackageDef(
220        depends=[
221            "CLIUtils/CLI11",
222            "boost",
223            "nlohmann/json",
224            "openbmc/phosphor-dbus-interfaces",
225            "openbmc/phosphor-logging",
226            "openbmc/sdbusplus",
227            "openbmc/sdeventplus",
228        ],
229        build_type="meson",
230        config_flags=[
231            "-Dlibpldm-only=enabled",
232            "-Doem-ibm=enabled",
233            "-Dtests=disabled",
234        ],
235    ),
236    "openbmc/sdbusplus": PackageDef(
237        build_type="meson",
238        custom_post_dl=[
239            "cd tools",
240            f"./setup.py install --root=/ --prefix={prefix}",
241            "cd ..",
242        ],
243        config_flags=[
244            "-Dexamples=disabled",
245            "-Dtests=disabled",
246        ],
247    ),
248    "openbmc/sdeventplus": PackageDef(
249        depends=["Naios/function2", "openbmc/stdplus"],
250        build_type="meson",
251        config_flags=[
252            "-Dexamples=false",
253            "-Dtests=disabled",
254        ],
255    ),
256    "openbmc/stdplus": PackageDef(
257        depends=["fmtlib/fmt", "Naios/function2", "google/googletest"],
258        build_type="meson",
259        config_flags=[
260            "-Dexamples=false",
261            "-Dtests=disabled",
262        ],
263    ),
264}  # type: Dict[str, PackageDef]
265
266# Define common flags used for builds
267configure_flags = " ".join(
268    [
269        f"--prefix={prefix}",
270    ]
271)
272cmake_flags = " ".join(
273    [
274        "-DBUILD_SHARED_LIBS=ON",
275        "-DCMAKE_BUILD_TYPE=RelWithDebInfo",
276        f"-DCMAKE_INSTALL_PREFIX:PATH={prefix}",
277        "-GNinja",
278        "-DCMAKE_MAKE_PROGRAM=ninja",
279    ]
280)
281meson_flags = " ".join(
282    [
283        "--wrap-mode=nodownload",
284        f"-Dprefix={prefix}",
285    ]
286)
287
288
289class Package(threading.Thread):
290    """Class used to build the Docker stages for each package.
291
292    Generally, this class should not be instantiated directly but through
293    Package.generate_all().
294    """
295
296    # Copy the packages dictionary.
297    packages = packages.copy()
298
299    # Lock used for thread-safety.
300    lock = threading.Lock()
301
302    def __init__(self, pkg: str):
303        """ pkg - The name of this package (ex. foo/bar ) """
304        super(Package, self).__init__()
305
306        self.package = pkg
307        self.exception = None  # type: Optional[Exception]
308
309        # Reference to this package's
310        self.pkg_def = Package.packages[pkg]
311        self.pkg_def["__package"] = self
312
313    def run(self) -> None:
314        """ Thread 'run' function.  Builds the Docker stage. """
315
316        # In case this package has no rev, fetch it from Github.
317        self._update_rev()
318
319        # Find all the Package objects that this package depends on.
320        #   This section is locked because we are looking into another
321        #   package's PackageDef dict, which could be being modified.
322        Package.lock.acquire()
323        deps: Iterable[Package] = [
324            Package.packages[deppkg]["__package"]
325            for deppkg in self.pkg_def.get("depends", [])
326        ]
327        Package.lock.release()
328
329        # Wait until all the depends finish building.  We need them complete
330        # for the "COPY" commands.
331        for deppkg in deps:
332            deppkg.join()
333
334        # Generate this package's Dockerfile.
335        dockerfile = f"""
336FROM {docker_base_img_name}
337{self._df_copycmds()}
338{self._df_build()}
339"""
340
341        # Generate the resulting tag name and save it to the PackageDef.
342        #   This section is locked because we are modifying the PackageDef,
343        #   which can be accessed by other threads.
344        Package.lock.acquire()
345        tag = Docker.tagname(self._stagename(), dockerfile)
346        self.pkg_def["__tag"] = tag
347        Package.lock.release()
348
349        # Do the build / save any exceptions.
350        try:
351            Docker.build(self.package, tag, dockerfile)
352        except Exception as e:
353            self.exception = e
354
355    @classmethod
356    def generate_all(cls) -> None:
357        """Ensure a Docker stage is created for all defined packages.
358
359        These are done in parallel but with appropriate blocking per
360        package 'depends' specifications.
361        """
362
363        # Create a Package for each defined package.
364        pkg_threads = [Package(p) for p in cls.packages.keys()]
365
366        # Start building them all.
367        #   This section is locked because threads depend on each other,
368        #   based on the packages, and they cannot 'join' on a thread
369        #   which is not yet started.  Adding a lock here allows all the
370        #   threads to start before they 'join' their dependencies.
371        Package.lock.acquire()
372        for t in pkg_threads:
373            t.start()
374        Package.lock.release()
375
376        # Wait for completion.
377        for t in pkg_threads:
378            t.join()
379            # Check if the thread saved off its own exception.
380            if t.exception:
381                print(f"Package {t.package} failed!", file=sys.stderr)
382                raise t.exception
383
384    @staticmethod
385    def df_all_copycmds() -> str:
386        """Formulate the Dockerfile snippet necessary to copy all packages
387        into the final image.
388        """
389        return Package.df_copycmds_set(Package.packages.keys())
390
391    @classmethod
392    def depcache(cls) -> str:
393        """Create the contents of the '/tmp/depcache'.
394        This file is a comma-separated list of "<pkg>:<rev>".
395        """
396
397        # This needs to be sorted for consistency.
398        depcache = ""
399        for pkg in sorted(cls.packages.keys()):
400            depcache += "%s:%s," % (pkg, cls.packages[pkg]["rev"])
401        return depcache
402
403    def _update_rev(self) -> None:
404        """ Look up the HEAD for missing a static rev. """
405
406        if "rev" in self.pkg_def:
407            return
408
409        # Check if Jenkins/Gerrit gave us a revision and use it.
410        if gerrit_project == self.package and gerrit_rev:
411            print(
412                f"Found Gerrit revision for {self.package}: {gerrit_rev}",
413                file=sys.stderr,
414            )
415            self.pkg_def["rev"] = gerrit_rev
416            return
417
418        # Ask Github for all the branches.
419        lookup = git("ls-remote", "--heads", f"https://github.com/{self.package}")
420
421        # Find the branch matching {branch} (or fallback to master).
422        #   This section is locked because we are modifying the PackageDef.
423        Package.lock.acquire()
424        for line in lookup.split("\n"):
425            if f"refs/heads/{branch}" in line:
426                self.pkg_def["rev"] = line.split()[0]
427            elif f"refs/heads/master" in line and "rev" not in self.pkg_def:
428                self.pkg_def["rev"] = line.split()[0]
429        Package.lock.release()
430
431    def _stagename(self) -> str:
432        """ Create a name for the Docker stage associated with this pkg. """
433        return self.package.replace("/", "-").lower()
434
435    def _url(self) -> str:
436        """ Get the URL for this package. """
437        rev = self.pkg_def["rev"]
438
439        # If the lambda exists, call it.
440        if "url" in self.pkg_def:
441            return self.pkg_def["url"](self.package, rev)
442
443        # Default to the github archive URL.
444        return f"https://github.com/{self.package}/archive/{rev}.tar.gz"
445
446    def _cmd_download(self) -> str:
447        """Formulate the command necessary to download and unpack to source."""
448
449        url = self._url()
450        if ".tar." not in url:
451            raise NotImplementedError(
452                f"Unhandled download type for {self.package}: {url}"
453            )
454
455        cmd = f"curl -L {url} | tar -x"
456
457        if url.endswith(".bz2"):
458            cmd += "j"
459        elif url.endswith(".gz"):
460            cmd += "z"
461        else:
462            raise NotImplementedError(
463                f"Unknown tar flags needed for {self.package}: {url}"
464            )
465
466        return cmd
467
468    def _cmd_cd_srcdir(self) -> str:
469        """ Formulate the command necessary to 'cd' into the source dir. """
470        return f"cd {self.package.split('/')[-1]}*"
471
472    def _df_copycmds(self) -> str:
473        """ Formulate the dockerfile snippet necessary to COPY all depends. """
474
475        if "depends" not in self.pkg_def:
476            return ""
477        return Package.df_copycmds_set(self.pkg_def["depends"])
478
479    @staticmethod
480    def df_copycmds_set(pkgs: Iterable[str]) -> str:
481        """Formulate the Dockerfile snippet necessary to COPY a set of
482        packages into a Docker stage.
483        """
484
485        copy_cmds = ""
486
487        # Sort the packages for consistency.
488        for p in sorted(pkgs):
489            tag = Package.packages[p]["__tag"]
490            copy_cmds += f"COPY --from={tag} {prefix} {prefix}\n"
491            # Workaround for upstream docker bug and multiple COPY cmds
492            # https://github.com/moby/moby/issues/37965
493            copy_cmds += "RUN true\n"
494
495        return copy_cmds
496
497    def _df_build(self) -> str:
498        """Formulate the Dockerfile snippet necessary to download, build, and
499        install a package into a Docker stage.
500        """
501
502        # Download and extract source.
503        result = f"RUN {self._cmd_download()} && {self._cmd_cd_srcdir()} && "
504
505        # Handle 'custom_post_dl' commands.
506        custom_post_dl = self.pkg_def.get("custom_post_dl")
507        if custom_post_dl:
508            result += " && ".join(custom_post_dl) + " && "
509
510        # Build and install package based on 'build_type'.
511        build_type = self.pkg_def["build_type"]
512        if build_type == "autoconf":
513            result += self._cmd_build_autoconf()
514        elif build_type == "cmake":
515            result += self._cmd_build_cmake()
516        elif build_type == "custom":
517            result += self._cmd_build_custom()
518        elif build_type == "make":
519            result += self._cmd_build_make()
520        elif build_type == "meson":
521            result += self._cmd_build_meson()
522        else:
523            raise NotImplementedError(
524                f"Unhandled build type for {self.package}: {build_type}"
525            )
526
527        # Handle 'custom_post_install' commands.
528        custom_post_install = self.pkg_def.get("custom_post_install")
529        if custom_post_install:
530            result += " && " + " && ".join(custom_post_install)
531
532        return result
533
534    def _cmd_build_autoconf(self) -> str:
535        options = " ".join(self.pkg_def.get("config_flags", []))
536        env = " ".join(self.pkg_def.get("config_env", []))
537        result = "./bootstrap.sh && "
538        result += f"{env} ./configure {configure_flags} {options} && "
539        result += f"make -j{proc_count} && make install"
540        return result
541
542    def _cmd_build_cmake(self) -> str:
543        options = " ".join(self.pkg_def.get("config_flags", []))
544        env = " ".join(self.pkg_def.get("config_env", []))
545        result = "mkdir builddir && cd builddir && "
546        result += f"{env} cmake {cmake_flags} {options} .. && "
547        result += "cmake --build . --target all && "
548        result += "cmake --build . --target install && "
549        result += "cd .."
550        return result
551
552    def _cmd_build_custom(self) -> str:
553        return " && ".join(self.pkg_def.get("build_steps", []))
554
555    def _cmd_build_make(self) -> str:
556        return f"make -j{proc_count} && make install"
557
558    def _cmd_build_meson(self) -> str:
559        options = " ".join(self.pkg_def.get("config_flags", []))
560        env = " ".join(self.pkg_def.get("config_env", []))
561        result = f"{env} meson builddir {meson_flags} {options} && "
562        result += "ninja -C builddir && ninja -C builddir install"
563        return result
564
565
566class Docker:
567    """Class to assist with Docker interactions.  All methods are static."""
568
569    @staticmethod
570    def timestamp() -> str:
571        """ Generate a timestamp for today using the ISO week. """
572        today = date.today().isocalendar()
573        return f"{today[0]}-W{today[1]:02}"
574
575    @staticmethod
576    def tagname(pkgname: str, dockerfile: str) -> str:
577        """ Generate a tag name for a package using a hash of the Dockerfile. """
578        result = docker_image_name
579        if pkgname:
580            result += "-" + pkgname
581
582        result += ":" + Docker.timestamp()
583        result += "-" + sha256(dockerfile.encode()).hexdigest()[0:16]
584
585        return result
586
587    @staticmethod
588    def build(pkg: str, tag: str, dockerfile: str) -> None:
589        """Build a docker image using the Dockerfile and tagging it with 'tag'."""
590
591        # If we're not forcing builds, check if it already exists and skip.
592        if not force_build:
593            if docker.image.ls(tag, "--format", '"{{.Repository}}:{{.Tag}}"'):
594                print(f"Image {tag} already exists.  Skipping.", file=sys.stderr)
595                return
596
597        # Build it.
598        #   Capture the output of the 'docker build' command and send it to
599        #   stderr (prefixed with the package name).  This allows us to see
600        #   progress but not polute stdout.  Later on we output the final
601        #   docker tag to stdout and we want to keep that pristine.
602        #
603        #   Other unusual flags:
604        #       --no-cache: Bypass the Docker cache if 'force_build'.
605        #       --force-rm: Clean up Docker processes if they fail.
606        docker.build(
607            proxy_args,
608            "--network=host",
609            "--force-rm",
610            "--no-cache=true" if force_build else "--no-cache=false",
611            "-t",
612            tag,
613            "-",
614            _in=dockerfile,
615            _out=(
616                lambda line: print(
617                    pkg + ":", line, end="", file=sys.stderr, flush=True
618                )
619            ),
620        )
621
622
623# Read a bunch of environment variables.
624docker_image_name = os.environ.get("DOCKER_IMAGE_NAME", "openbmc/ubuntu-unit-test")
625force_build = os.environ.get("FORCE_DOCKER_BUILD")
626is_automated_ci_build = os.environ.get("BUILD_URL", False)
627distro = os.environ.get("DISTRO", "ubuntu:jammy")
628branch = os.environ.get("BRANCH", "master")
629ubuntu_mirror = os.environ.get("UBUNTU_MIRROR")
630http_proxy = os.environ.get("http_proxy")
631
632gerrit_project = os.environ.get("GERRIT_PROJECT")
633gerrit_rev = os.environ.get("GERRIT_PATCHSET_REVISION")
634
635# Set up some common variables.
636username = os.environ.get("USER", "root")
637homedir = os.environ.get("HOME", "/root")
638gid = os.getgid()
639uid = os.getuid()
640
641# Use well-known constants if user is root
642if username == "root":
643    homedir = "/root"
644    gid = 0
645    uid = 0
646
647# Determine the architecture for Docker.
648arch = uname("-m").strip()
649if arch == "ppc64le":
650    docker_base = "ppc64le/"
651elif arch == "x86_64":
652    docker_base = ""
653elif arch == "aarch64":
654    docker_base = "arm64v8/"
655else:
656    print(
657        f"Unsupported system architecture({arch}) found for docker image",
658        file=sys.stderr,
659    )
660    sys.exit(1)
661
662# Special flags if setting up a deb mirror.
663mirror = ""
664if "ubuntu" in distro and ubuntu_mirror:
665    mirror = f"""
666RUN echo "deb {ubuntu_mirror} $(. /etc/os-release && echo $VERSION_CODENAME) main restricted universe multiverse" > /etc/apt/sources.list && \\
667    echo "deb {ubuntu_mirror} $(. /etc/os-release && echo $VERSION_CODENAME)-updates main restricted universe multiverse" >> /etc/apt/sources.list && \\
668    echo "deb {ubuntu_mirror} $(. /etc/os-release && echo $VERSION_CODENAME)-security main restricted universe multiverse" >> /etc/apt/sources.list && \\
669    echo "deb {ubuntu_mirror} $(. /etc/os-release && echo $VERSION_CODENAME)-proposed main restricted universe multiverse" >> /etc/apt/sources.list && \\
670    echo "deb {ubuntu_mirror} $(. /etc/os-release && echo $VERSION_CODENAME)-backports main restricted universe multiverse" >> /etc/apt/sources.list
671"""
672
673# Special flags for proxying.
674proxy_cmd = ""
675proxy_keyserver = ""
676proxy_args = []
677if http_proxy:
678    proxy_cmd = f"""
679RUN echo "[http]" >> {homedir}/.gitconfig && \
680    echo "proxy = {http_proxy}" >> {homedir}/.gitconfig
681"""
682    proxy_keyserver = f"--keyserver-options http-proxy={http_proxy}"
683
684    proxy_args.extend(
685        [
686            "--build-arg",
687            f"http_proxy={http_proxy}",
688            "--build-arg",
689            f"https_proxy={http_proxy}",
690        ]
691    )
692
693# Create base Dockerfile.
694dockerfile_base = f"""
695FROM {docker_base}{distro}
696
697{mirror}
698
699ENV DEBIAN_FRONTEND noninteractive
700
701ENV PYTHONPATH "/usr/local/lib/python3.10/site-packages/"
702
703# Sometimes the ubuntu key expires and we need a way to force an execution
704# of the apt-get commands for the dbgsym-keyring.  When this happens we see
705# an error like: "Release: The following signatures were invalid:"
706# Insert a bogus echo that we can change here when we get this error to force
707# the update.
708RUN echo "ubuntu keyserver rev as of 2021-04-21"
709
710# We need the keys to be imported for dbgsym repos
711# New releases have a package, older ones fall back to manual fetching
712# https://wiki.ubuntu.com/Debug%20Symbol%20Packages
713RUN apt-get update && apt-get dist-upgrade -yy && \
714    ( apt-get install gpgv ubuntu-dbgsym-keyring || \
715        ( apt-get install -yy dirmngr && \
716          apt-key adv --keyserver keyserver.ubuntu.com \
717                      {proxy_keyserver} \
718                      --recv-keys F2EDC64DC5AEE1F6B9C621F0C8CAB6595FDFF622 ) )
719
720# Parse the current repo list into a debug repo list
721RUN sed -n '/^deb /s,^deb [^ ]* ,deb http://ddebs.ubuntu.com ,p' /etc/apt/sources.list >/etc/apt/sources.list.d/debug.list
722
723# Remove non-existent debug repos
724RUN sed -i '/-\(backports\|security\) /d' /etc/apt/sources.list.d/debug.list
725
726RUN cat /etc/apt/sources.list.d/debug.list
727
728RUN apt-get update && apt-get dist-upgrade -yy && apt-get install -yy \
729    gcc-12 \
730    g++-12 \
731    libc6-dbg \
732    libc6-dev \
733    libtool \
734    bison \
735    libdbus-1-dev \
736    flex \
737    cmake \
738    python3 \
739    python3-dev\
740    python3-yaml \
741    python3-mako \
742    python3-pip \
743    python3-setuptools \
744    python3-git \
745    python3-socks \
746    pkg-config \
747    autoconf \
748    autoconf-archive \
749    libsystemd-dev \
750    systemd \
751    libssl-dev \
752    libevdev-dev \
753    libjpeg-dev \
754    libpng-dev \
755    ninja-build \
756    sudo \
757    curl \
758    git \
759    dbus \
760    iputils-ping \
761    clang-14 \
762    clang-format-14 \
763    clang-tidy-14 \
764    clang-tools-14 \
765    shellcheck \
766    npm \
767    iproute2 \
768    libnl-3-dev \
769    libnl-genl-3-dev \
770    libconfig++-dev \
771    libsnmp-dev \
772    valgrind \
773    valgrind-dbg \
774    libpam0g-dev \
775    xxd \
776    libi2c-dev \
777    wget \
778    libldap2-dev \
779    libprotobuf-dev \
780    liburing-dev \
781    liburing2-dbgsym \
782    libperlio-gzip-perl \
783    libjson-perl \
784    protobuf-compiler \
785    libgpiod-dev \
786    device-tree-compiler \
787    libpciaccess-dev \
788    libmimetic-dev \
789    libxml2-utils \
790    libxml-simple-perl \
791    rsync \
792    libcryptsetup-dev
793
794# Apply autoconf-archive-v2022.02.11 file ax_cxx_compile_stdcxx for C++20.
795RUN curl "http://git.savannah.gnu.org/gitweb/?p=autoconf-archive.git;a=blob_plain;f=m4/ax_cxx_compile_stdcxx.m4;hb=3311b6bdeff883c6a13952594a9dcb60bce6ba80" \
796  > /usr/share/aclocal/ax_cxx_compile_stdcxx.m4
797
798RUN npm install -g eslint@latest eslint-plugin-json@latest
799
800RUN update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-12 12 \
801  --slave /usr/bin/g++ g++ /usr/bin/g++-12 \
802  --slave /usr/bin/gcov gcov /usr/bin/gcov-12 \
803  --slave /usr/bin/gcov-dump gcov-dump /usr/bin/gcov-dump-12 \
804  --slave /usr/bin/gcov-tool gcov-tool /usr/bin/gcov-tool-12
805
806RUN update-alternatives --install /usr/bin/clang clang /usr/bin/clang-14 1000 \
807  --slave /usr/bin/clang++ clang++ /usr/bin/clang++-14 \
808  --slave /usr/bin/clang-tidy clang-tidy /usr/bin/clang-tidy-14 \
809  --slave /usr/bin/clang-format clang-format /usr/bin/clang-format-14 \
810  --slave /usr/bin/run-clang-tidy run-clang-tidy.py /usr/bin/run-clang-tidy-14 \
811  --slave /usr/bin/scan-build scan-build /usr/bin/scan-build-14
812
813"""
814
815if is_automated_ci_build:
816    dockerfile_base += f"""
817# Run an arbitrary command to polute the docker cache regularly force us
818# to re-run `apt-get update` daily.
819RUN echo {Docker.timestamp()}
820RUN apt-get update && apt-get dist-upgrade -yy
821
822"""
823
824dockerfile_base += f"""
825RUN pip3 install inflection
826RUN pip3 install pycodestyle
827RUN pip3 install jsonschema
828RUN pip3 install meson==0.63.0
829RUN pip3 install protobuf
830RUN pip3 install codespell
831RUN pip3 install requests
832"""
833
834# Note, we use sha1s here because the newest gitlint release doesn't include
835# some features we need.  Next time they release, we can rely on a direct
836# release tag
837dockerfile_base += f"""
838RUN pip3 install git+https://github.com/jorisroovers/gitlint.git@8ede310d62d5794efa7518b235f899f8a8ad6a68\#subdirectory=gitlint-core
839RUN pip3 install git+https://github.com/jorisroovers/gitlint.git@8ede310d62d5794efa7518b235f899f8a8ad6a68
840"""
841
842# Build the base and stage docker images.
843docker_base_img_name = Docker.tagname("base", dockerfile_base)
844Docker.build("base", docker_base_img_name, dockerfile_base)
845Package.generate_all()
846
847# Create the final Dockerfile.
848dockerfile = f"""
849# Build the final output image
850FROM {docker_base_img_name}
851{Package.df_all_copycmds()}
852
853# Some of our infrastructure still relies on the presence of this file
854# even though it is no longer needed to rebuild the docker environment
855# NOTE: The file is sorted to ensure the ordering is stable.
856RUN echo '{Package.depcache()}' > /tmp/depcache
857
858# Final configuration for the workspace
859RUN grep -q {gid} /etc/group || groupadd -f -g {gid} {username}
860RUN mkdir -p "{os.path.dirname(homedir)}"
861RUN grep -q {uid} /etc/passwd || useradd -d {homedir} -m -u {uid} -g {gid} {username}
862RUN sed -i '1iDefaults umask=000' /etc/sudoers
863RUN echo "{username} ALL=(ALL) NOPASSWD: ALL" >>/etc/sudoers
864
865# Ensure user has ability to write to /usr/local for different tool
866# and data installs
867RUN chown -R {username}:{username} /usr/local/share
868
869{proxy_cmd}
870
871RUN /bin/bash
872"""
873
874# Do the final docker build
875docker_final_img_name = Docker.tagname(None, dockerfile)
876Docker.build("final", docker_final_img_name, dockerfile)
877
878# Print the tag of the final image.
879print(docker_final_img_name)
880