1#!/usr/bin/env python3
2#
3# Build the required docker image to run package unit tests
4#
5# Script Variables:
6#   DOCKER_IMG_NAME:  <optional, the name of the docker image to generate>
7#                     default is openbmc/ubuntu-unit-test
8#   DISTRO:           <optional, the distro to build a docker image against>
9#   FORCE_DOCKER_BUILD: <optional, a non-zero value with force all Docker
10#                     images to be rebuilt rather than reusing caches.>
11#   BUILD_URL:        <optional, used to detect running under CI context
12#                     (ex. Jenkins)>
13#   BRANCH:           <optional, branch to build from each of the openbmc/
14#                     repositories>
15#                     default is master, which will be used if input branch not
16#                     provided or not found
17#   UBUNTU_MIRROR:    <optional, the URL of a mirror of Ubuntu to override the
18#                     default ones in /etc/apt/sources.list>
19#                     default is empty, and no mirror is used.
20#   http_proxy        The HTTP address of the proxy server to connect to.
21#                     Default: "", proxy is not setup if this is not set
22
23import os
24import sys
25import threading
26from datetime import date
27from hashlib import sha256
28from sh import docker, git, nproc, uname  # type: ignore
29from typing import Any, Callable, Dict, Iterable, Optional
30
31try:
32    # Python before 3.8 doesn't have TypedDict, so reroute to standard 'dict'.
33    from typing import TypedDict
34except:
35
36    class TypedDict(dict):  # type: ignore
37        # We need to do this to eat the 'total' argument.
38        def __init_subclass__(cls, **kwargs):
39            super().__init_subclass__()
40
41
42# Declare some variables used in package definitions.
43prefix = "/usr/local"
44proc_count = nproc().strip()
45
46
47class PackageDef(TypedDict, total=False):
48    """ Package Definition for packages dictionary. """
49
50    # rev [optional]: Revision of package to use.
51    rev: str
52    # url [optional]: lambda function to create URL: (package, rev) -> url.
53    url: Callable[[str, str], str]
54    # depends [optional]: List of package dependencies.
55    depends: Iterable[str]
56    # build_type [required]: Build type used for package.
57    #   Currently supported: autoconf, cmake, custom, make, meson
58    build_type: str
59    # build_steps [optional]: Steps to run for 'custom' build_type.
60    build_steps: Iterable[str]
61    # config_flags [optional]: List of options to pass configuration tool.
62    config_flags: Iterable[str]
63    # config_env [optional]: List of environment variables to set for config.
64    config_env: Iterable[str]
65    # custom_post_dl [optional]: List of steps to run after download, but
66    #   before config / build / install.
67    custom_post_dl: Iterable[str]
68    # custom_post_install [optional]: List of steps to run after install.
69    custom_post_install: Iterable[str]
70
71    # __tag [private]: Generated Docker tag name for package stage.
72    __tag: str
73    # __package [private]: Package object associated with this package.
74    __package: Any  # Type is Package, but not defined yet.
75
76
77# Packages to include in image.
78packages = {
79    "boost": PackageDef(
80        rev="1.79.0",
81        url=(
82            lambda pkg, rev: f"https://downloads.yoctoproject.org/mirror/sources/{pkg}_{rev.replace('.', '_')}.tar.bz2"
83        ),
84        build_type="custom",
85        build_steps=[
86            f"./bootstrap.sh --prefix={prefix} --with-libraries=context,coroutine",
87            "./b2",
88            f"./b2 install --prefix={prefix}",
89        ],
90    ),
91    "USCiLab/cereal": PackageDef(
92        rev="3e4d1b84cab4891368d2179a61a7ba06a5693e7f",
93        build_type="custom",
94        build_steps=[f"cp -a include/cereal/ {prefix}/include/"],
95    ),
96    "danmar/cppcheck": PackageDef(
97        rev="27578e9c4c1f90c62b6938867735a054082e178e",
98        build_type="cmake",
99    ),
100    "CLIUtils/CLI11": PackageDef(
101        rev="v1.9.1",
102        build_type="cmake",
103        config_flags=[
104            "-DBUILD_TESTING=OFF",
105            "-DCLI11_BUILD_DOCS=OFF",
106            "-DCLI11_BUILD_EXAMPLES=OFF",
107        ],
108    ),
109    "fmtlib/fmt": PackageDef(
110        rev="8.1.1",
111        build_type="cmake",
112        config_flags=[
113            "-DFMT_DOC=OFF",
114            "-DFMT_TEST=OFF",
115        ],
116    ),
117    "Naios/function2": PackageDef(
118        rev="4.1.0",
119        build_type="custom",
120        build_steps=[
121            f"mkdir {prefix}/include/function2",
122            f"cp include/function2/function2.hpp {prefix}/include/function2/",
123        ],
124    ),
125    # release-1.12.1
126    "google/googletest": PackageDef(
127        rev="58d77fa8070e8cec2dc1ed015d66b454c8d78850",
128        build_type="cmake",
129        config_env=["CXXFLAGS=-std=c++20"],
130        config_flags=["-DTHREADS_PREFER_PTHREAD_FLAG=ON"],
131    ),
132    # Release 2020-08-06
133    "nlohmann/json": PackageDef(
134        rev="v3.10.4",
135        build_type="cmake",
136        config_flags=["-DJSON_BuildTests=OFF"],
137        custom_post_install=[
138            f"ln -s {prefix}/include/nlohmann/json.hpp {prefix}/include/json.hpp",
139        ],
140    ),
141    # Snapshot from 2019-05-24
142    "linux-test-project/lcov": PackageDef(
143        rev="v1.15",
144        build_type="make",
145    ),
146    # dev-5.8 2021-01-11
147    "openbmc/linux": PackageDef(
148        rev="3cc95ae40716e56f81b69615781f54c78079042d",
149        build_type="custom",
150        build_steps=[
151            f"make -j{proc_count} defconfig",
152            f"make INSTALL_HDR_PATH={prefix} headers_install",
153        ],
154    ),
155    # Snapshot from 2020-06-13
156    "LibVNC/libvncserver": PackageDef(
157        rev="LibVNCServer-0.9.13",
158        build_type="cmake",
159    ),
160    # version from meta-openembedded/meta-oe/recipes-support/libtinyxml2/libtinyxml2_8.0.0.bb
161    "leethomason/tinyxml2": PackageDef(
162        rev="8.0.0",
163        build_type="cmake",
164    ),
165    # version from /meta-openembedded/meta-oe/recipes-devtools/boost-url/boost-url_git.bb
166    "CPPAlliance/url": PackageDef(
167        rev="d740a92d38e3a8f4d5b2153f53b82f1c98e312ab",
168        build_type="custom",
169        build_steps=[f"cp -a include/** {prefix}/include/"],
170    ),
171    # version from meta-openembedded/meta-oe/dynamic-layers/networking-layer/recipes-devools/valijson/valijson_0.6.bb
172    "tristanpenman/valijson": PackageDef(
173        rev="v0.6",
174        build_type="cmake",
175        config_flags=[
176            "-Dvalijson_BUILD_TESTS=0",
177            "-Dvalijson_INSTALL_HEADERS=1",
178        ],
179    ),
180    "open-power/pdbg": PackageDef(build_type="autoconf"),
181    "openbmc/gpioplus": PackageDef(
182        depends=["openbmc/stdplus"],
183        build_type="meson",
184        config_flags=[
185            "-Dexamples=false",
186            "-Dtests=disabled",
187        ],
188    ),
189    "openbmc/phosphor-dbus-interfaces": PackageDef(
190        depends=["openbmc/sdbusplus"],
191        build_type="meson",
192        config_flags=["-Dgenerate_md=false"],
193    ),
194    "openbmc/phosphor-logging": PackageDef(
195        depends=[
196            "USCiLab/cereal",
197            "openbmc/phosphor-dbus-interfaces",
198            "openbmc/sdbusplus",
199            "openbmc/sdeventplus",
200        ],
201        build_type="meson",
202        config_flags=[
203            f"-Dyamldir={prefix}/share/phosphor-dbus-yaml/yaml",
204        ],
205    ),
206    "openbmc/phosphor-objmgr": PackageDef(
207        depends=[
208            "boost",
209            "leethomason/tinyxml2",
210            "openbmc/phosphor-logging",
211            "openbmc/sdbusplus",
212        ],
213        build_type="meson",
214        config_flags=[
215            "-Dtests=disabled",
216        ],
217    ),
218    "openbmc/pldm": PackageDef(
219        depends=[
220            "CLIUtils/CLI11",
221            "boost",
222            "nlohmann/json",
223            "openbmc/phosphor-dbus-interfaces",
224            "openbmc/phosphor-logging",
225            "openbmc/sdbusplus",
226            "openbmc/sdeventplus",
227        ],
228        build_type="meson",
229        config_flags=[
230            "-Dlibpldm-only=enabled",
231            "-Doem-ibm=enabled",
232            "-Dtests=disabled",
233        ],
234    ),
235    "openbmc/sdbusplus": PackageDef(
236        build_type="meson",
237        custom_post_dl=[
238            "cd tools",
239            f"./setup.py install --root=/ --prefix={prefix}",
240            "cd ..",
241        ],
242        config_flags=[
243            "-Dexamples=disabled",
244            "-Dtests=disabled",
245        ],
246    ),
247    "openbmc/sdeventplus": PackageDef(
248        depends=["Naios/function2", "openbmc/stdplus"],
249        build_type="meson",
250        config_flags=[
251            "-Dexamples=false",
252            "-Dtests=disabled",
253        ],
254    ),
255    "openbmc/stdplus": PackageDef(
256        depends=["fmtlib/fmt", "google/googletest"],
257        build_type="meson",
258        config_flags=[
259            "-Dexamples=false",
260            "-Dtests=disabled",
261        ],
262    ),
263}  # type: Dict[str, PackageDef]
264
265# Define common flags used for builds
266configure_flags = " ".join(
267    [
268        f"--prefix={prefix}",
269    ]
270)
271cmake_flags = " ".join(
272    [
273        "-DBUILD_SHARED_LIBS=ON",
274        "-DCMAKE_BUILD_TYPE=RelWithDebInfo",
275        f"-DCMAKE_INSTALL_PREFIX:PATH={prefix}",
276        "-GNinja",
277        "-DCMAKE_MAKE_PROGRAM=ninja",
278    ]
279)
280meson_flags = " ".join(
281    [
282        "--wrap-mode=nodownload",
283        f"-Dprefix={prefix}",
284    ]
285)
286
287
288class Package(threading.Thread):
289    """Class used to build the Docker stages for each package.
290
291    Generally, this class should not be instantiated directly but through
292    Package.generate_all().
293    """
294
295    # Copy the packages dictionary.
296    packages = packages.copy()
297
298    # Lock used for thread-safety.
299    lock = threading.Lock()
300
301    def __init__(self, pkg: str):
302        """ pkg - The name of this package (ex. foo/bar ) """
303        super(Package, self).__init__()
304
305        self.package = pkg
306        self.exception = None  # type: Optional[Exception]
307
308        # Reference to this package's
309        self.pkg_def = Package.packages[pkg]
310        self.pkg_def["__package"] = self
311
312    def run(self) -> None:
313        """ Thread 'run' function.  Builds the Docker stage. """
314
315        # In case this package has no rev, fetch it from Github.
316        self._update_rev()
317
318        # Find all the Package objects that this package depends on.
319        #   This section is locked because we are looking into another
320        #   package's PackageDef dict, which could be being modified.
321        Package.lock.acquire()
322        deps: Iterable[Package] = [
323            Package.packages[deppkg]["__package"]
324            for deppkg in self.pkg_def.get("depends", [])
325        ]
326        Package.lock.release()
327
328        # Wait until all the depends finish building.  We need them complete
329        # for the "COPY" commands.
330        for deppkg in deps:
331            deppkg.join()
332
333        # Generate this package's Dockerfile.
334        dockerfile = f"""
335FROM {docker_base_img_name}
336{self._df_copycmds()}
337{self._df_build()}
338"""
339
340        # Generate the resulting tag name and save it to the PackageDef.
341        #   This section is locked because we are modifying the PackageDef,
342        #   which can be accessed by other threads.
343        Package.lock.acquire()
344        tag = Docker.tagname(self._stagename(), dockerfile)
345        self.pkg_def["__tag"] = tag
346        Package.lock.release()
347
348        # Do the build / save any exceptions.
349        try:
350            Docker.build(self.package, tag, dockerfile)
351        except Exception as e:
352            self.exception = e
353
354    @classmethod
355    def generate_all(cls) -> None:
356        """Ensure a Docker stage is created for all defined packages.
357
358        These are done in parallel but with appropriate blocking per
359        package 'depends' specifications.
360        """
361
362        # Create a Package for each defined package.
363        pkg_threads = [Package(p) for p in cls.packages.keys()]
364
365        # Start building them all.
366        #   This section is locked because threads depend on each other,
367        #   based on the packages, and they cannot 'join' on a thread
368        #   which is not yet started.  Adding a lock here allows all the
369        #   threads to start before they 'join' their dependencies.
370        Package.lock.acquire()
371        for t in pkg_threads:
372            t.start()
373        Package.lock.release()
374
375        # Wait for completion.
376        for t in pkg_threads:
377            t.join()
378            # Check if the thread saved off its own exception.
379            if t.exception:
380                print(f"Package {t.package} failed!", file=sys.stderr)
381                raise t.exception
382
383    @staticmethod
384    def df_all_copycmds() -> str:
385        """Formulate the Dockerfile snippet necessary to copy all packages
386        into the final image.
387        """
388        return Package.df_copycmds_set(Package.packages.keys())
389
390    @classmethod
391    def depcache(cls) -> str:
392        """Create the contents of the '/tmp/depcache'.
393        This file is a comma-separated list of "<pkg>:<rev>".
394        """
395
396        # This needs to be sorted for consistency.
397        depcache = ""
398        for pkg in sorted(cls.packages.keys()):
399            depcache += "%s:%s," % (pkg, cls.packages[pkg]["rev"])
400        return depcache
401
402    def _update_rev(self) -> None:
403        """ Look up the HEAD for missing a static rev. """
404
405        if "rev" in self.pkg_def:
406            return
407
408        # Check if Jenkins/Gerrit gave us a revision and use it.
409        if gerrit_project == self.package and gerrit_rev:
410            print(
411                f"Found Gerrit revision for {self.package}: {gerrit_rev}",
412                file=sys.stderr,
413            )
414            self.pkg_def["rev"] = gerrit_rev
415            return
416
417        # Ask Github for all the branches.
418        lookup = git("ls-remote", "--heads", f"https://github.com/{self.package}")
419
420        # Find the branch matching {branch} (or fallback to master).
421        #   This section is locked because we are modifying the PackageDef.
422        Package.lock.acquire()
423        for line in lookup.split("\n"):
424            if f"refs/heads/{branch}" in line:
425                self.pkg_def["rev"] = line.split()[0]
426            elif f"refs/heads/master" in line and "rev" not in self.pkg_def:
427                self.pkg_def["rev"] = line.split()[0]
428        Package.lock.release()
429
430    def _stagename(self) -> str:
431        """ Create a name for the Docker stage associated with this pkg. """
432        return self.package.replace("/", "-").lower()
433
434    def _url(self) -> str:
435        """ Get the URL for this package. """
436        rev = self.pkg_def["rev"]
437
438        # If the lambda exists, call it.
439        if "url" in self.pkg_def:
440            return self.pkg_def["url"](self.package, rev)
441
442        # Default to the github archive URL.
443        return f"https://github.com/{self.package}/archive/{rev}.tar.gz"
444
445    def _cmd_download(self) -> str:
446        """Formulate the command necessary to download and unpack to source."""
447
448        url = self._url()
449        if ".tar." not in url:
450            raise NotImplementedError(
451                f"Unhandled download type for {self.package}: {url}"
452            )
453
454        cmd = f"curl -L {url} | tar -x"
455
456        if url.endswith(".bz2"):
457            cmd += "j"
458        elif url.endswith(".gz"):
459            cmd += "z"
460        else:
461            raise NotImplementedError(
462                f"Unknown tar flags needed for {self.package}: {url}"
463            )
464
465        return cmd
466
467    def _cmd_cd_srcdir(self) -> str:
468        """ Formulate the command necessary to 'cd' into the source dir. """
469        return f"cd {self.package.split('/')[-1]}*"
470
471    def _df_copycmds(self) -> str:
472        """ Formulate the dockerfile snippet necessary to COPY all depends. """
473
474        if "depends" not in self.pkg_def:
475            return ""
476        return Package.df_copycmds_set(self.pkg_def["depends"])
477
478    @staticmethod
479    def df_copycmds_set(pkgs: Iterable[str]) -> str:
480        """Formulate the Dockerfile snippet necessary to COPY a set of
481        packages into a Docker stage.
482        """
483
484        copy_cmds = ""
485
486        # Sort the packages for consistency.
487        for p in sorted(pkgs):
488            tag = Package.packages[p]["__tag"]
489            copy_cmds += f"COPY --from={tag} {prefix} {prefix}\n"
490            # Workaround for upstream docker bug and multiple COPY cmds
491            # https://github.com/moby/moby/issues/37965
492            copy_cmds += "RUN true\n"
493
494        return copy_cmds
495
496    def _df_build(self) -> str:
497        """Formulate the Dockerfile snippet necessary to download, build, and
498        install a package into a Docker stage.
499        """
500
501        # Download and extract source.
502        result = f"RUN {self._cmd_download()} && {self._cmd_cd_srcdir()} && "
503
504        # Handle 'custom_post_dl' commands.
505        custom_post_dl = self.pkg_def.get("custom_post_dl")
506        if custom_post_dl:
507            result += " && ".join(custom_post_dl) + " && "
508
509        # Build and install package based on 'build_type'.
510        build_type = self.pkg_def["build_type"]
511        if build_type == "autoconf":
512            result += self._cmd_build_autoconf()
513        elif build_type == "cmake":
514            result += self._cmd_build_cmake()
515        elif build_type == "custom":
516            result += self._cmd_build_custom()
517        elif build_type == "make":
518            result += self._cmd_build_make()
519        elif build_type == "meson":
520            result += self._cmd_build_meson()
521        else:
522            raise NotImplementedError(
523                f"Unhandled build type for {self.package}: {build_type}"
524            )
525
526        # Handle 'custom_post_install' commands.
527        custom_post_install = self.pkg_def.get("custom_post_install")
528        if custom_post_install:
529            result += " && " + " && ".join(custom_post_install)
530
531        return result
532
533    def _cmd_build_autoconf(self) -> str:
534        options = " ".join(self.pkg_def.get("config_flags", []))
535        env = " ".join(self.pkg_def.get("config_env", []))
536        result = "./bootstrap.sh && "
537        result += f"{env} ./configure {configure_flags} {options} && "
538        result += f"make -j{proc_count} && make install"
539        return result
540
541    def _cmd_build_cmake(self) -> str:
542        options = " ".join(self.pkg_def.get("config_flags", []))
543        env = " ".join(self.pkg_def.get("config_env", []))
544        result = "mkdir builddir && cd builddir && "
545        result += f"{env} cmake {cmake_flags} {options} .. && "
546        result += "cmake --build . --target all && "
547        result += "cmake --build . --target install && "
548        result += "cd .."
549        return result
550
551    def _cmd_build_custom(self) -> str:
552        return " && ".join(self.pkg_def.get("build_steps", []))
553
554    def _cmd_build_make(self) -> str:
555        return f"make -j{proc_count} && make install"
556
557    def _cmd_build_meson(self) -> str:
558        options = " ".join(self.pkg_def.get("config_flags", []))
559        env = " ".join(self.pkg_def.get("config_env", []))
560        result = f"{env} meson builddir {meson_flags} {options} && "
561        result += "ninja -C builddir && ninja -C builddir install"
562        return result
563
564
565class Docker:
566    """Class to assist with Docker interactions.  All methods are static."""
567
568    @staticmethod
569    def timestamp() -> str:
570        """ Generate a timestamp for today using the ISO week. """
571        today = date.today().isocalendar()
572        return f"{today[0]}-W{today[1]:02}"
573
574    @staticmethod
575    def tagname(pkgname: str, dockerfile: str) -> str:
576        """ Generate a tag name for a package using a hash of the Dockerfile. """
577        result = docker_image_name
578        if pkgname:
579            result += "-" + pkgname
580
581        result += ":" + Docker.timestamp()
582        result += "-" + sha256(dockerfile.encode()).hexdigest()[0:16]
583
584        return result
585
586    @staticmethod
587    def build(pkg: str, tag: str, dockerfile: str) -> None:
588        """Build a docker image using the Dockerfile and tagging it with 'tag'."""
589
590        # If we're not forcing builds, check if it already exists and skip.
591        if not force_build:
592            if docker.image.ls(tag, "--format", '"{{.Repository}}:{{.Tag}}"'):
593                print(f"Image {tag} already exists.  Skipping.", file=sys.stderr)
594                return
595
596        # Build it.
597        #   Capture the output of the 'docker build' command and send it to
598        #   stderr (prefixed with the package name).  This allows us to see
599        #   progress but not polute stdout.  Later on we output the final
600        #   docker tag to stdout and we want to keep that pristine.
601        #
602        #   Other unusual flags:
603        #       --no-cache: Bypass the Docker cache if 'force_build'.
604        #       --force-rm: Clean up Docker processes if they fail.
605        docker.build(
606            proxy_args,
607            "--network=host",
608            "--force-rm",
609            "--no-cache=true" if force_build else "--no-cache=false",
610            "-t",
611            tag,
612            "-",
613            _in=dockerfile,
614            _out=(
615                lambda line: print(
616                    pkg + ":", line, end="", file=sys.stderr, flush=True
617                )
618            ),
619        )
620
621
622# Read a bunch of environment variables.
623docker_image_name = os.environ.get("DOCKER_IMAGE_NAME", "openbmc/ubuntu-unit-test")
624force_build = os.environ.get("FORCE_DOCKER_BUILD")
625is_automated_ci_build = os.environ.get("BUILD_URL", False)
626distro = os.environ.get("DISTRO", "ubuntu:jammy")
627branch = os.environ.get("BRANCH", "master")
628ubuntu_mirror = os.environ.get("UBUNTU_MIRROR")
629http_proxy = os.environ.get("http_proxy")
630
631gerrit_project = os.environ.get("GERRIT_PROJECT")
632gerrit_rev = os.environ.get("GERRIT_PATCHSET_REVISION")
633
634# Set up some common variables.
635username = os.environ.get("USER", "root")
636homedir = os.environ.get("HOME", "/root")
637gid = os.getgid()
638uid = os.getuid()
639
640# Use well-known constants if user is root
641if username == "root":
642    homedir = "/root"
643    gid = 0
644    uid = 0
645
646# Determine the architecture for Docker.
647arch = uname("-m").strip()
648if arch == "ppc64le":
649    docker_base = "ppc64le/"
650elif arch == "x86_64":
651    docker_base = ""
652elif arch == "aarch64":
653    docker_base = "arm64v8/"
654else:
655    print(
656        f"Unsupported system architecture({arch}) found for docker image",
657        file=sys.stderr,
658    )
659    sys.exit(1)
660
661# Special flags if setting up a deb mirror.
662mirror = ""
663if "ubuntu" in distro and ubuntu_mirror:
664    mirror = f"""
665RUN echo "deb {ubuntu_mirror} $(. /etc/os-release && echo $VERSION_CODENAME) main restricted universe multiverse" > /etc/apt/sources.list && \\
666    echo "deb {ubuntu_mirror} $(. /etc/os-release && echo $VERSION_CODENAME)-updates main restricted universe multiverse" >> /etc/apt/sources.list && \\
667    echo "deb {ubuntu_mirror} $(. /etc/os-release && echo $VERSION_CODENAME)-security main restricted universe multiverse" >> /etc/apt/sources.list && \\
668    echo "deb {ubuntu_mirror} $(. /etc/os-release && echo $VERSION_CODENAME)-proposed main restricted universe multiverse" >> /etc/apt/sources.list && \\
669    echo "deb {ubuntu_mirror} $(. /etc/os-release && echo $VERSION_CODENAME)-backports main restricted universe multiverse" >> /etc/apt/sources.list
670"""
671
672# Special flags for proxying.
673proxy_cmd = ""
674proxy_keyserver = ""
675proxy_args = []
676if http_proxy:
677    proxy_cmd = f"""
678RUN echo "[http]" >> {homedir}/.gitconfig && \
679    echo "proxy = {http_proxy}" >> {homedir}/.gitconfig
680"""
681    proxy_keyserver = f"--keyserver-options http-proxy={http_proxy}"
682
683    proxy_args.extend(
684        [
685            "--build-arg",
686            f"http_proxy={http_proxy}",
687            "--build-arg",
688            f"https_proxy={http_proxy}",
689        ]
690    )
691
692# Create base Dockerfile.
693dockerfile_base = f"""
694FROM {docker_base}{distro}
695
696{mirror}
697
698ENV DEBIAN_FRONTEND noninteractive
699
700ENV PYTHONPATH "/usr/local/lib/python3.10/site-packages/"
701
702# Sometimes the ubuntu key expires and we need a way to force an execution
703# of the apt-get commands for the dbgsym-keyring.  When this happens we see
704# an error like: "Release: The following signatures were invalid:"
705# Insert a bogus echo that we can change here when we get this error to force
706# the update.
707RUN echo "ubuntu keyserver rev as of 2021-04-21"
708
709# We need the keys to be imported for dbgsym repos
710# New releases have a package, older ones fall back to manual fetching
711# https://wiki.ubuntu.com/Debug%20Symbol%20Packages
712RUN apt-get update && apt-get dist-upgrade -yy && \
713    ( apt-get install gpgv ubuntu-dbgsym-keyring || \
714        ( apt-get install -yy dirmngr && \
715          apt-key adv --keyserver keyserver.ubuntu.com \
716                      {proxy_keyserver} \
717                      --recv-keys F2EDC64DC5AEE1F6B9C621F0C8CAB6595FDFF622 ) )
718
719# Parse the current repo list into a debug repo list
720RUN sed -n '/^deb /s,^deb [^ ]* ,deb http://ddebs.ubuntu.com ,p' /etc/apt/sources.list >/etc/apt/sources.list.d/debug.list
721
722# Remove non-existent debug repos
723RUN sed -i '/-\(backports\|security\) /d' /etc/apt/sources.list.d/debug.list
724
725RUN cat /etc/apt/sources.list.d/debug.list
726
727RUN apt-get update && apt-get dist-upgrade -yy && apt-get install -yy \
728    gcc-11 \
729    g++-11 \
730    libc6-dbg \
731    libc6-dev \
732    libtool \
733    bison \
734    libdbus-1-dev \
735    flex \
736    cmake \
737    python3 \
738    python3-dev\
739    python3-yaml \
740    python3-mako \
741    python3-pip \
742    python3-setuptools \
743    python3-git \
744    python3-socks \
745    pkg-config \
746    autoconf \
747    autoconf-archive \
748    libsystemd-dev \
749    systemd \
750    libssl-dev \
751    libevdev-dev \
752    libjpeg-dev \
753    libpng-dev \
754    ninja-build \
755    sudo \
756    curl \
757    git \
758    dbus \
759    iputils-ping \
760    clang-14 \
761    clang-format-14 \
762    clang-tidy-14 \
763    clang-tools-14 \
764    shellcheck \
765    npm \
766    iproute2 \
767    libnl-3-dev \
768    libnl-genl-3-dev \
769    libconfig++-dev \
770    libsnmp-dev \
771    valgrind \
772    valgrind-dbg \
773    libpam0g-dev \
774    xxd \
775    libi2c-dev \
776    wget \
777    libldap2-dev \
778    libprotobuf-dev \
779    liburing-dev \
780    liburing2-dbgsym \
781    libperlio-gzip-perl \
782    libjson-perl \
783    protobuf-compiler \
784    libgpiod-dev \
785    device-tree-compiler \
786    libpciaccess-dev \
787    libmimetic-dev \
788    libxml2-utils \
789    libxml-simple-perl \
790    rsync \
791    libcryptsetup-dev
792
793# Apply autoconf-archive-v2022.02.11 file ax_cxx_compile_stdcxx for C++20.
794RUN curl "http://git.savannah.gnu.org/gitweb/?p=autoconf-archive.git;a=blob_plain;f=m4/ax_cxx_compile_stdcxx.m4;hb=3311b6bdeff883c6a13952594a9dcb60bce6ba80" \
795  > /usr/share/aclocal/ax_cxx_compile_stdcxx.m4
796
797RUN npm install -g eslint@latest eslint-plugin-json@latest
798
799RUN update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-11 11 \
800  --slave /usr/bin/g++ g++ /usr/bin/g++-11 \
801  --slave /usr/bin/gcov gcov /usr/bin/gcov-11 \
802  --slave /usr/bin/gcov-dump gcov-dump /usr/bin/gcov-dump-11 \
803  --slave /usr/bin/gcov-tool gcov-tool /usr/bin/gcov-tool-11
804
805RUN update-alternatives --install /usr/bin/clang clang /usr/bin/clang-14 1000 \
806  --slave /usr/bin/clang++ clang++ /usr/bin/clang++-14 \
807  --slave /usr/bin/clang-tidy clang-tidy /usr/bin/clang-tidy-14 \
808  --slave /usr/bin/clang-format clang-format /usr/bin/clang-format-14 \
809  --slave /usr/bin/run-clang-tidy run-clang-tidy.py /usr/bin/run-clang-tidy-14 \
810  --slave /usr/bin/scan-build scan-build /usr/bin/scan-build-14
811
812"""
813
814if is_automated_ci_build:
815    dockerfile_base += f"""
816# Run an arbitrary command to polute the docker cache regularly force us
817# to re-run `apt-get update` daily.
818RUN echo {Docker.timestamp()}
819RUN apt-get update && apt-get dist-upgrade -yy
820
821"""
822
823dockerfile_base += f"""
824RUN pip3 install inflection
825RUN pip3 install pycodestyle
826RUN pip3 install jsonschema
827RUN pip3 install meson==0.63.0
828RUN pip3 install protobuf
829RUN pip3 install codespell
830RUN pip3 install requests
831"""
832
833# Note, we use sha1s here because the newest gitlint release doesn't include
834# some features we need.  Next time they release, we can rely on a direct
835# release tag
836dockerfile_base += f"""
837RUN pip3 install git+https://github.com/jorisroovers/gitlint.git@8ede310d62d5794efa7518b235f899f8a8ad6a68\#subdirectory=gitlint-core
838RUN pip3 install git+https://github.com/jorisroovers/gitlint.git@8ede310d62d5794efa7518b235f899f8a8ad6a68
839"""
840
841# Build the base and stage docker images.
842docker_base_img_name = Docker.tagname("base", dockerfile_base)
843Docker.build("base", docker_base_img_name, dockerfile_base)
844Package.generate_all()
845
846# Create the final Dockerfile.
847dockerfile = f"""
848# Build the final output image
849FROM {docker_base_img_name}
850{Package.df_all_copycmds()}
851
852# Some of our infrastructure still relies on the presence of this file
853# even though it is no longer needed to rebuild the docker environment
854# NOTE: The file is sorted to ensure the ordering is stable.
855RUN echo '{Package.depcache()}' > /tmp/depcache
856
857# Final configuration for the workspace
858RUN grep -q {gid} /etc/group || groupadd -f -g {gid} {username}
859RUN mkdir -p "{os.path.dirname(homedir)}"
860RUN grep -q {uid} /etc/passwd || useradd -d {homedir} -m -u {uid} -g {gid} {username}
861RUN sed -i '1iDefaults umask=000' /etc/sudoers
862RUN echo "{username} ALL=(ALL) NOPASSWD: ALL" >>/etc/sudoers
863
864# Ensure user has ability to write to /usr/local for different tool
865# and data installs
866RUN chown -R {username}:{username} /usr/local/share
867
868{proxy_cmd}
869
870RUN /bin/bash
871"""
872
873# Do the final docker build
874docker_final_img_name = Docker.tagname(None, dockerfile)
875Docker.build("final", docker_final_img_name, dockerfile)
876
877# Print the tag of the final image.
878print(docker_final_img_name)
879