1#!/usr/bin/env python3
2#
3# Build the required docker image to run package unit tests
4#
5# Script Variables:
6#   DOCKER_IMG_NAME:  <optional, the name of the docker image to generate>
7#                     default is openbmc/ubuntu-unit-test
8#   DISTRO:           <optional, the distro to build a docker image against>
9#   FORCE_DOCKER_BUILD: <optional, a non-zero value with force all Docker
10#                     images to be rebuilt rather than reusing caches.>
11#   BUILD_URL:        <optional, used to detect running under CI context
12#                     (ex. Jenkins)>
13#   BRANCH:           <optional, branch to build from each of the openbmc/
14#                     repositories>
15#                     default is master, which will be used if input branch not
16#                     provided or not found
17#   UBUNTU_MIRROR:    <optional, the URL of a mirror of Ubuntu to override the
18#                     default ones in /etc/apt/sources.list>
19#                     default is empty, and no mirror is used.
20#   http_proxy        The HTTP address of the proxy server to connect to.
21#                     Default: "", proxy is not setup if this is not set
22
23import os
24import sys
25import threading
26from datetime import date
27from hashlib import sha256
28from sh import docker, git, nproc, uname  # type: ignore
29from typing import Any, Callable, Dict, Iterable, Optional
30
31try:
32    # Python before 3.8 doesn't have TypedDict, so reroute to standard 'dict'.
33    from typing import TypedDict
34except:
35
36    class TypedDict(dict):  # type: ignore
37        # We need to do this to eat the 'total' argument.
38        def __init_subclass__(cls, **kwargs):
39            super().__init_subclass__()
40
41
42# Declare some variables used in package definitions.
43prefix = "/usr/local"
44proc_count = nproc().strip()
45
46
47class PackageDef(TypedDict, total=False):
48    """ Package Definition for packages dictionary. """
49
50    # rev [optional]: Revision of package to use.
51    rev: str
52    # url [optional]: lambda function to create URL: (package, rev) -> url.
53    url: Callable[[str, str], str]
54    # depends [optional]: List of package dependencies.
55    depends: Iterable[str]
56    # build_type [required]: Build type used for package.
57    #   Currently supported: autoconf, cmake, custom, make, meson
58    build_type: str
59    # build_steps [optional]: Steps to run for 'custom' build_type.
60    build_steps: Iterable[str]
61    # config_flags [optional]: List of options to pass configuration tool.
62    config_flags: Iterable[str]
63    # config_env [optional]: List of environment variables to set for config.
64    config_env: Iterable[str]
65    # custom_post_dl [optional]: List of steps to run after download, but
66    #   before config / build / install.
67    custom_post_dl: Iterable[str]
68    # custom_post_install [optional]: List of steps to run after install.
69    custom_post_install: Iterable[str]
70
71    # __tag [private]: Generated Docker tag name for package stage.
72    __tag: str
73    # __package [private]: Package object associated with this package.
74    __package: Any  # Type is Package, but not defined yet.
75
76
77# Packages to include in image.
78packages = {
79    "boost": PackageDef(
80        rev="1.80.0",
81        url=(
82            lambda pkg, rev: f"https://downloads.yoctoproject.org/mirror/sources/{pkg}_{rev.replace('.', '_')}.tar.bz2"
83        ),
84        build_type="custom",
85        build_steps=[
86            f"./bootstrap.sh --prefix={prefix} --with-libraries=context,coroutine",
87            "./b2",
88            f"./b2 install --prefix={prefix}",
89        ],
90    ),
91    "USCiLab/cereal": PackageDef(
92        rev="v1.3.2",
93        build_type="custom",
94        build_steps=[f"cp -a include/cereal/ {prefix}/include/"],
95    ),
96    "danmar/cppcheck": PackageDef(
97        rev="27578e9c4c1f90c62b6938867735a054082e178e",
98        build_type="cmake",
99    ),
100    "CLIUtils/CLI11": PackageDef(
101        rev="v1.9.1",
102        build_type="cmake",
103        config_flags=[
104            "-DBUILD_TESTING=OFF",
105            "-DCLI11_BUILD_DOCS=OFF",
106            "-DCLI11_BUILD_EXAMPLES=OFF",
107        ],
108    ),
109    "fmtlib/fmt": PackageDef(
110        rev="8.1.1",
111        build_type="cmake",
112        config_flags=[
113            "-DFMT_DOC=OFF",
114            "-DFMT_TEST=OFF",
115        ],
116    ),
117    "Naios/function2": PackageDef(
118        rev="4.2.1",
119        build_type="custom",
120        build_steps=[
121            f"mkdir {prefix}/include/function2",
122            f"cp include/function2/function2.hpp {prefix}/include/function2/",
123        ],
124    ),
125    # release-1.12.1
126    "google/googletest": PackageDef(
127        rev="58d77fa8070e8cec2dc1ed015d66b454c8d78850",
128        build_type="cmake",
129        config_env=["CXXFLAGS=-std=c++20"],
130        config_flags=["-DTHREADS_PREFER_PTHREAD_FLAG=ON"],
131    ),
132    "nlohmann/json": PackageDef(
133        rev="v3.11.2",
134        build_type="cmake",
135        config_flags=["-DJSON_BuildTests=OFF"],
136        custom_post_install=[
137            f"ln -s {prefix}/include/nlohmann/json.hpp {prefix}/include/json.hpp",
138        ],
139    ),
140    # Snapshot from 2019-05-24
141    "linux-test-project/lcov": PackageDef(
142        rev="v1.15",
143        build_type="make",
144    ),
145    # dev-5.15 2022-09-27
146    "openbmc/linux": PackageDef(
147        rev="c9fb275212dac5b300311f6f6b1dcc5ed18a3493",
148        build_type="custom",
149        build_steps=[
150            f"make -j{proc_count} defconfig",
151            f"make INSTALL_HDR_PATH={prefix} headers_install",
152        ],
153    ),
154    "LibVNC/libvncserver": PackageDef(
155        rev="LibVNCServer-0.9.13",
156        build_type="cmake",
157    ),
158    "leethomason/tinyxml2": PackageDef(
159        rev="9.0.0",
160        build_type="cmake",
161    ),
162    # version from /meta-openembedded/meta-oe/recipes-devtools/boost-url/boost-url_git.bb
163    "CPPAlliance/url": PackageDef(
164        rev="d740a92d38e3a8f4d5b2153f53b82f1c98e312ab",
165        build_type="custom",
166        build_steps=[f"cp -a include/** {prefix}/include/"],
167    ),
168    "tristanpenman/valijson": PackageDef(
169        rev="v0.7",
170        build_type="cmake",
171        config_flags=[
172            "-Dvalijson_BUILD_TESTS=0",
173            "-Dvalijson_INSTALL_HEADERS=1",
174        ],
175    ),
176    "open-power/pdbg": PackageDef(build_type="autoconf"),
177    "openbmc/gpioplus": PackageDef(
178        depends=["openbmc/stdplus"],
179        build_type="meson",
180        config_flags=[
181            "-Dexamples=false",
182            "-Dtests=disabled",
183        ],
184    ),
185    "openbmc/phosphor-dbus-interfaces": PackageDef(
186        depends=["openbmc/sdbusplus"],
187        build_type="meson",
188        config_flags=["-Dgenerate_md=false"],
189    ),
190    "openbmc/phosphor-logging": PackageDef(
191        depends=[
192            "USCiLab/cereal",
193            "openbmc/phosphor-dbus-interfaces",
194            "openbmc/sdbusplus",
195            "openbmc/sdeventplus",
196        ],
197        build_type="meson",
198        config_flags=[
199            f"-Dyamldir={prefix}/share/phosphor-dbus-yaml/yaml",
200        ],
201    ),
202    "openbmc/phosphor-objmgr": PackageDef(
203        depends=[
204            "CLIUtils/CLI11",
205            "boost",
206            "leethomason/tinyxml2",
207            "openbmc/phosphor-dbus-interfaces",
208            "openbmc/phosphor-logging",
209            "openbmc/sdbusplus",
210        ],
211        build_type="meson",
212        config_flags=[
213            "-Dtests=disabled",
214        ],
215    ),
216    "openbmc/pldm": PackageDef(
217        depends=[
218            "CLIUtils/CLI11",
219            "boost",
220            "nlohmann/json",
221            "openbmc/phosphor-dbus-interfaces",
222            "openbmc/phosphor-logging",
223            "openbmc/sdbusplus",
224            "openbmc/sdeventplus",
225        ],
226        build_type="meson",
227        config_flags=[
228            "-Dlibpldm-only=enabled",
229            "-Doem-ibm=enabled",
230            "-Dtests=disabled",
231        ],
232    ),
233    "openbmc/sdbusplus": PackageDef(
234        build_type="meson",
235        custom_post_dl=[
236            "cd tools",
237            f"./setup.py install --root=/ --prefix={prefix}",
238            "cd ..",
239        ],
240        config_flags=[
241            "-Dexamples=disabled",
242            "-Dtests=disabled",
243        ],
244    ),
245    "openbmc/sdeventplus": PackageDef(
246        depends=[
247            "Naios/function2",
248            "openbmc/stdplus",
249        ],
250        build_type="meson",
251        config_flags=[
252            "-Dexamples=false",
253            "-Dtests=disabled",
254        ],
255    ),
256    "openbmc/stdplus": PackageDef(
257        depends=[
258            "Naios/function2",
259            "fmtlib/fmt",
260        ],
261        build_type="meson",
262        config_flags=[
263            "-Dexamples=false",
264            "-Dtests=disabled",
265        ],
266    ),
267}  # type: Dict[str, PackageDef]
268
269# Define common flags used for builds
270configure_flags = " ".join(
271    [
272        f"--prefix={prefix}",
273    ]
274)
275cmake_flags = " ".join(
276    [
277        "-DBUILD_SHARED_LIBS=ON",
278        "-DCMAKE_BUILD_TYPE=RelWithDebInfo",
279        f"-DCMAKE_INSTALL_PREFIX:PATH={prefix}",
280        "-GNinja",
281        "-DCMAKE_MAKE_PROGRAM=ninja",
282    ]
283)
284meson_flags = " ".join(
285    [
286        "--wrap-mode=nodownload",
287        f"-Dprefix={prefix}",
288    ]
289)
290
291
292class Package(threading.Thread):
293    """Class used to build the Docker stages for each package.
294
295    Generally, this class should not be instantiated directly but through
296    Package.generate_all().
297    """
298
299    # Copy the packages dictionary.
300    packages = packages.copy()
301
302    # Lock used for thread-safety.
303    lock = threading.Lock()
304
305    def __init__(self, pkg: str):
306        """ pkg - The name of this package (ex. foo/bar ) """
307        super(Package, self).__init__()
308
309        self.package = pkg
310        self.exception = None  # type: Optional[Exception]
311
312        # Reference to this package's
313        self.pkg_def = Package.packages[pkg]
314        self.pkg_def["__package"] = self
315
316    def run(self) -> None:
317        """ Thread 'run' function.  Builds the Docker stage. """
318
319        # In case this package has no rev, fetch it from Github.
320        self._update_rev()
321
322        # Find all the Package objects that this package depends on.
323        #   This section is locked because we are looking into another
324        #   package's PackageDef dict, which could be being modified.
325        Package.lock.acquire()
326        deps: Iterable[Package] = [
327            Package.packages[deppkg]["__package"]
328            for deppkg in self.pkg_def.get("depends", [])
329        ]
330        Package.lock.release()
331
332        # Wait until all the depends finish building.  We need them complete
333        # for the "COPY" commands.
334        for deppkg in deps:
335            deppkg.join()
336
337        # Generate this package's Dockerfile.
338        dockerfile = f"""
339FROM {docker_base_img_name}
340{self._df_copycmds()}
341{self._df_build()}
342"""
343
344        # Generate the resulting tag name and save it to the PackageDef.
345        #   This section is locked because we are modifying the PackageDef,
346        #   which can be accessed by other threads.
347        Package.lock.acquire()
348        tag = Docker.tagname(self._stagename(), dockerfile)
349        self.pkg_def["__tag"] = tag
350        Package.lock.release()
351
352        # Do the build / save any exceptions.
353        try:
354            Docker.build(self.package, tag, dockerfile)
355        except Exception as e:
356            self.exception = e
357
358    @classmethod
359    def generate_all(cls) -> None:
360        """Ensure a Docker stage is created for all defined packages.
361
362        These are done in parallel but with appropriate blocking per
363        package 'depends' specifications.
364        """
365
366        # Create a Package for each defined package.
367        pkg_threads = [Package(p) for p in cls.packages.keys()]
368
369        # Start building them all.
370        #   This section is locked because threads depend on each other,
371        #   based on the packages, and they cannot 'join' on a thread
372        #   which is not yet started.  Adding a lock here allows all the
373        #   threads to start before they 'join' their dependencies.
374        Package.lock.acquire()
375        for t in pkg_threads:
376            t.start()
377        Package.lock.release()
378
379        # Wait for completion.
380        for t in pkg_threads:
381            t.join()
382            # Check if the thread saved off its own exception.
383            if t.exception:
384                print(f"Package {t.package} failed!", file=sys.stderr)
385                raise t.exception
386
387    @staticmethod
388    def df_all_copycmds() -> str:
389        """Formulate the Dockerfile snippet necessary to copy all packages
390        into the final image.
391        """
392        return Package.df_copycmds_set(Package.packages.keys())
393
394    @classmethod
395    def depcache(cls) -> str:
396        """Create the contents of the '/tmp/depcache'.
397        This file is a comma-separated list of "<pkg>:<rev>".
398        """
399
400        # This needs to be sorted for consistency.
401        depcache = ""
402        for pkg in sorted(cls.packages.keys()):
403            depcache += "%s:%s," % (pkg, cls.packages[pkg]["rev"])
404        return depcache
405
406    def _update_rev(self) -> None:
407        """ Look up the HEAD for missing a static rev. """
408
409        if "rev" in self.pkg_def:
410            return
411
412        # Check if Jenkins/Gerrit gave us a revision and use it.
413        if gerrit_project == self.package and gerrit_rev:
414            print(
415                f"Found Gerrit revision for {self.package}: {gerrit_rev}",
416                file=sys.stderr,
417            )
418            self.pkg_def["rev"] = gerrit_rev
419            return
420
421        # Ask Github for all the branches.
422        lookup = git("ls-remote", "--heads", f"https://github.com/{self.package}")
423
424        # Find the branch matching {branch} (or fallback to master).
425        #   This section is locked because we are modifying the PackageDef.
426        Package.lock.acquire()
427        for line in lookup.split("\n"):
428            if f"refs/heads/{branch}" in line:
429                self.pkg_def["rev"] = line.split()[0]
430            elif f"refs/heads/master" in line and "rev" not in self.pkg_def:
431                self.pkg_def["rev"] = line.split()[0]
432        Package.lock.release()
433
434    def _stagename(self) -> str:
435        """ Create a name for the Docker stage associated with this pkg. """
436        return self.package.replace("/", "-").lower()
437
438    def _url(self) -> str:
439        """ Get the URL for this package. """
440        rev = self.pkg_def["rev"]
441
442        # If the lambda exists, call it.
443        if "url" in self.pkg_def:
444            return self.pkg_def["url"](self.package, rev)
445
446        # Default to the github archive URL.
447        return f"https://github.com/{self.package}/archive/{rev}.tar.gz"
448
449    def _cmd_download(self) -> str:
450        """Formulate the command necessary to download and unpack to source."""
451
452        url = self._url()
453        if ".tar." not in url:
454            raise NotImplementedError(
455                f"Unhandled download type for {self.package}: {url}"
456            )
457
458        cmd = f"curl -L {url} | tar -x"
459
460        if url.endswith(".bz2"):
461            cmd += "j"
462        elif url.endswith(".gz"):
463            cmd += "z"
464        else:
465            raise NotImplementedError(
466                f"Unknown tar flags needed for {self.package}: {url}"
467            )
468
469        return cmd
470
471    def _cmd_cd_srcdir(self) -> str:
472        """ Formulate the command necessary to 'cd' into the source dir. """
473        return f"cd {self.package.split('/')[-1]}*"
474
475    def _df_copycmds(self) -> str:
476        """ Formulate the dockerfile snippet necessary to COPY all depends. """
477
478        if "depends" not in self.pkg_def:
479            return ""
480        return Package.df_copycmds_set(self.pkg_def["depends"])
481
482    @staticmethod
483    def df_copycmds_set(pkgs: Iterable[str]) -> str:
484        """Formulate the Dockerfile snippet necessary to COPY a set of
485        packages into a Docker stage.
486        """
487
488        copy_cmds = ""
489
490        # Sort the packages for consistency.
491        for p in sorted(pkgs):
492            tag = Package.packages[p]["__tag"]
493            copy_cmds += f"COPY --from={tag} {prefix} {prefix}\n"
494            # Workaround for upstream docker bug and multiple COPY cmds
495            # https://github.com/moby/moby/issues/37965
496            copy_cmds += "RUN true\n"
497
498        return copy_cmds
499
500    def _df_build(self) -> str:
501        """Formulate the Dockerfile snippet necessary to download, build, and
502        install a package into a Docker stage.
503        """
504
505        # Download and extract source.
506        result = f"RUN {self._cmd_download()} && {self._cmd_cd_srcdir()} && "
507
508        # Handle 'custom_post_dl' commands.
509        custom_post_dl = self.pkg_def.get("custom_post_dl")
510        if custom_post_dl:
511            result += " && ".join(custom_post_dl) + " && "
512
513        # Build and install package based on 'build_type'.
514        build_type = self.pkg_def["build_type"]
515        if build_type == "autoconf":
516            result += self._cmd_build_autoconf()
517        elif build_type == "cmake":
518            result += self._cmd_build_cmake()
519        elif build_type == "custom":
520            result += self._cmd_build_custom()
521        elif build_type == "make":
522            result += self._cmd_build_make()
523        elif build_type == "meson":
524            result += self._cmd_build_meson()
525        else:
526            raise NotImplementedError(
527                f"Unhandled build type for {self.package}: {build_type}"
528            )
529
530        # Handle 'custom_post_install' commands.
531        custom_post_install = self.pkg_def.get("custom_post_install")
532        if custom_post_install:
533            result += " && " + " && ".join(custom_post_install)
534
535        return result
536
537    def _cmd_build_autoconf(self) -> str:
538        options = " ".join(self.pkg_def.get("config_flags", []))
539        env = " ".join(self.pkg_def.get("config_env", []))
540        result = "./bootstrap.sh && "
541        result += f"{env} ./configure {configure_flags} {options} && "
542        result += f"make -j{proc_count} && make install"
543        return result
544
545    def _cmd_build_cmake(self) -> str:
546        options = " ".join(self.pkg_def.get("config_flags", []))
547        env = " ".join(self.pkg_def.get("config_env", []))
548        result = "mkdir builddir && cd builddir && "
549        result += f"{env} cmake {cmake_flags} {options} .. && "
550        result += "cmake --build . --target all && "
551        result += "cmake --build . --target install && "
552        result += "cd .."
553        return result
554
555    def _cmd_build_custom(self) -> str:
556        return " && ".join(self.pkg_def.get("build_steps", []))
557
558    def _cmd_build_make(self) -> str:
559        return f"make -j{proc_count} && make install"
560
561    def _cmd_build_meson(self) -> str:
562        options = " ".join(self.pkg_def.get("config_flags", []))
563        env = " ".join(self.pkg_def.get("config_env", []))
564        result = f"{env} meson builddir {meson_flags} {options} && "
565        result += "ninja -C builddir && ninja -C builddir install"
566        return result
567
568
569class Docker:
570    """Class to assist with Docker interactions.  All methods are static."""
571
572    @staticmethod
573    def timestamp() -> str:
574        """ Generate a timestamp for today using the ISO week. """
575        today = date.today().isocalendar()
576        return f"{today[0]}-W{today[1]:02}"
577
578    @staticmethod
579    def tagname(pkgname: str, dockerfile: str) -> str:
580        """ Generate a tag name for a package using a hash of the Dockerfile. """
581        result = docker_image_name
582        if pkgname:
583            result += "-" + pkgname
584
585        result += ":" + Docker.timestamp()
586        result += "-" + sha256(dockerfile.encode()).hexdigest()[0:16]
587
588        return result
589
590    @staticmethod
591    def build(pkg: str, tag: str, dockerfile: str) -> None:
592        """Build a docker image using the Dockerfile and tagging it with 'tag'."""
593
594        # If we're not forcing builds, check if it already exists and skip.
595        if not force_build:
596            if docker.image.ls(tag, "--format", '"{{.Repository}}:{{.Tag}}"'):
597                print(f"Image {tag} already exists.  Skipping.", file=sys.stderr)
598                return
599
600        # Build it.
601        #   Capture the output of the 'docker build' command and send it to
602        #   stderr (prefixed with the package name).  This allows us to see
603        #   progress but not polute stdout.  Later on we output the final
604        #   docker tag to stdout and we want to keep that pristine.
605        #
606        #   Other unusual flags:
607        #       --no-cache: Bypass the Docker cache if 'force_build'.
608        #       --force-rm: Clean up Docker processes if they fail.
609        docker.build(
610            proxy_args,
611            "--network=host",
612            "--force-rm",
613            "--no-cache=true" if force_build else "--no-cache=false",
614            "-t",
615            tag,
616            "-",
617            _in=dockerfile,
618            _out=(
619                lambda line: print(
620                    pkg + ":", line, end="", file=sys.stderr, flush=True
621                )
622            ),
623        )
624
625
626# Read a bunch of environment variables.
627docker_image_name = os.environ.get("DOCKER_IMAGE_NAME", "openbmc/ubuntu-unit-test")
628force_build = os.environ.get("FORCE_DOCKER_BUILD")
629is_automated_ci_build = os.environ.get("BUILD_URL", False)
630distro = os.environ.get("DISTRO", "ubuntu:kinetic")
631branch = os.environ.get("BRANCH", "master")
632ubuntu_mirror = os.environ.get("UBUNTU_MIRROR")
633http_proxy = os.environ.get("http_proxy")
634
635gerrit_project = os.environ.get("GERRIT_PROJECT")
636gerrit_rev = os.environ.get("GERRIT_PATCHSET_REVISION")
637
638# Set up some common variables.
639username = os.environ.get("USER", "root")
640homedir = os.environ.get("HOME", "/root")
641gid = os.getgid()
642uid = os.getuid()
643
644# Use well-known constants if user is root
645if username == "root":
646    homedir = "/root"
647    gid = 0
648    uid = 0
649
650# Determine the architecture for Docker.
651arch = uname("-m").strip()
652if arch == "ppc64le":
653    docker_base = "ppc64le/"
654elif arch == "x86_64":
655    docker_base = ""
656elif arch == "aarch64":
657    docker_base = "arm64v8/"
658else:
659    print(
660        f"Unsupported system architecture({arch}) found for docker image",
661        file=sys.stderr,
662    )
663    sys.exit(1)
664
665# Special flags if setting up a deb mirror.
666mirror = ""
667if "ubuntu" in distro and ubuntu_mirror:
668    mirror = f"""
669RUN echo "deb {ubuntu_mirror} $(. /etc/os-release && echo $VERSION_CODENAME) main restricted universe multiverse" > /etc/apt/sources.list && \\
670    echo "deb {ubuntu_mirror} $(. /etc/os-release && echo $VERSION_CODENAME)-updates main restricted universe multiverse" >> /etc/apt/sources.list && \\
671    echo "deb {ubuntu_mirror} $(. /etc/os-release && echo $VERSION_CODENAME)-security main restricted universe multiverse" >> /etc/apt/sources.list && \\
672    echo "deb {ubuntu_mirror} $(. /etc/os-release && echo $VERSION_CODENAME)-proposed main restricted universe multiverse" >> /etc/apt/sources.list && \\
673    echo "deb {ubuntu_mirror} $(. /etc/os-release && echo $VERSION_CODENAME)-backports main restricted universe multiverse" >> /etc/apt/sources.list
674"""
675
676# Special flags for proxying.
677proxy_cmd = ""
678proxy_keyserver = ""
679proxy_args = []
680if http_proxy:
681    proxy_cmd = f"""
682RUN echo "[http]" >> {homedir}/.gitconfig && \
683    echo "proxy = {http_proxy}" >> {homedir}/.gitconfig
684"""
685    proxy_keyserver = f"--keyserver-options http-proxy={http_proxy}"
686
687    proxy_args.extend(
688        [
689            "--build-arg",
690            f"http_proxy={http_proxy}",
691            "--build-arg",
692            f"https_proxy={http_proxy}",
693        ]
694    )
695
696# Create base Dockerfile.
697dockerfile_base = f"""
698FROM {docker_base}{distro}
699
700{mirror}
701
702ENV DEBIAN_FRONTEND noninteractive
703
704ENV PYTHONPATH "/usr/local/lib/python3.10/site-packages/"
705
706# Sometimes the ubuntu key expires and we need a way to force an execution
707# of the apt-get commands for the dbgsym-keyring.  When this happens we see
708# an error like: "Release: The following signatures were invalid:"
709# Insert a bogus echo that we can change here when we get this error to force
710# the update.
711RUN echo "ubuntu keyserver rev as of 2021-04-21"
712
713# We need the keys to be imported for dbgsym repos
714# New releases have a package, older ones fall back to manual fetching
715# https://wiki.ubuntu.com/Debug%20Symbol%20Packages
716RUN apt-get update && apt-get dist-upgrade -yy && \
717    ( apt-get install gpgv ubuntu-dbgsym-keyring || \
718        ( apt-get install -yy dirmngr && \
719          apt-key adv --keyserver keyserver.ubuntu.com \
720                      {proxy_keyserver} \
721                      --recv-keys F2EDC64DC5AEE1F6B9C621F0C8CAB6595FDFF622 ) )
722
723# Parse the current repo list into a debug repo list
724RUN sed -n '/^deb /s,^deb [^ ]* ,deb http://ddebs.ubuntu.com ,p' /etc/apt/sources.list >/etc/apt/sources.list.d/debug.list
725
726# Remove non-existent debug repos
727RUN sed -i '/-\(backports\|security\) /d' /etc/apt/sources.list.d/debug.list
728
729RUN cat /etc/apt/sources.list.d/debug.list
730
731RUN apt-get update && apt-get dist-upgrade -yy && apt-get install -yy \
732    gcc-12 \
733    g++-12 \
734    libc6-dbg \
735    libc6-dev \
736    libtool \
737    bison \
738    libdbus-1-dev \
739    flex \
740    cmake \
741    python3 \
742    python3-dev\
743    python3-yaml \
744    python3-mako \
745    python3-pip \
746    python3-setuptools \
747    python3-git \
748    python3-socks \
749    pkg-config \
750    autoconf \
751    autoconf-archive \
752    libsystemd-dev \
753    systemd \
754    libssl-dev \
755    libevdev-dev \
756    libjpeg-dev \
757    libpng-dev \
758    ninja-build \
759    sudo \
760    curl \
761    git \
762    dbus \
763    iputils-ping \
764    clang-15 \
765    clang-format-15 \
766    clang-tidy-15 \
767    clang-tools-15 \
768    shellcheck \
769    npm \
770    iproute2 \
771    libnl-3-dev \
772    libnl-genl-3-dev \
773    libconfig++-dev \
774    libsnmp-dev \
775    valgrind \
776    valgrind-dbg \
777    libpam0g-dev \
778    xxd \
779    libi2c-dev \
780    wget \
781    libldap2-dev \
782    libprotobuf-dev \
783    liburing-dev \
784    liburing2-dbgsym \
785    libperlio-gzip-perl \
786    libjson-perl \
787    protobuf-compiler \
788    libgpiod-dev \
789    device-tree-compiler \
790    libpciaccess-dev \
791    libmimetic-dev \
792    libxml2-utils \
793    libxml-simple-perl \
794    rsync \
795    libcryptsetup-dev
796
797RUN npm install -g eslint@latest eslint-plugin-json@latest
798
799# Kinetic comes with GCC-12, so skip this.
800#RUN update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-12 12 \
801#  --slave /usr/bin/g++ g++ /usr/bin/g++-12 \
802#  --slave /usr/bin/gcov gcov /usr/bin/gcov-12 \
803#  --slave /usr/bin/gcov-dump gcov-dump /usr/bin/gcov-dump-12 \
804#  --slave /usr/bin/gcov-tool gcov-tool /usr/bin/gcov-tool-12
805#RUN update-alternatives --install /usr/bin/cpp cpp /usr/bin/cpp-12 12
806
807RUN update-alternatives --install /usr/bin/clang clang /usr/bin/clang-15 1000 \
808  --slave /usr/bin/clang++ clang++ /usr/bin/clang++-15 \
809  --slave /usr/bin/clang-tidy clang-tidy /usr/bin/clang-tidy-15 \
810  --slave /usr/bin/clang-format clang-format /usr/bin/clang-format-15 \
811  --slave /usr/bin/run-clang-tidy run-clang-tidy.py /usr/bin/run-clang-tidy-15 \
812  --slave /usr/bin/scan-build scan-build /usr/bin/scan-build-15
813
814"""
815
816if is_automated_ci_build:
817    dockerfile_base += f"""
818# Run an arbitrary command to polute the docker cache regularly force us
819# to re-run `apt-get update` daily.
820RUN echo {Docker.timestamp()}
821RUN apt-get update && apt-get dist-upgrade -yy
822
823"""
824
825dockerfile_base += f"""
826RUN pip3 install inflection
827RUN pip3 install pycodestyle
828RUN pip3 install jsonschema
829RUN pip3 install meson==0.63.0
830RUN pip3 install packaging
831RUN pip3 install protobuf
832RUN pip3 install codespell
833RUN pip3 install requests
834"""
835
836# Note, we use sha1s here because the newest gitlint release doesn't include
837# some features we need.  Next time they release, we can rely on a direct
838# release tag
839dockerfile_base += f"""
840RUN pip3 install git+https://github.com/jorisroovers/gitlint.git@8ede310d62d5794efa7518b235f899f8a8ad6a68\#subdirectory=gitlint-core
841RUN pip3 install git+https://github.com/jorisroovers/gitlint.git@8ede310d62d5794efa7518b235f899f8a8ad6a68
842"""
843
844# Build the base and stage docker images.
845docker_base_img_name = Docker.tagname("base", dockerfile_base)
846Docker.build("base", docker_base_img_name, dockerfile_base)
847Package.generate_all()
848
849# Create the final Dockerfile.
850dockerfile = f"""
851# Build the final output image
852FROM {docker_base_img_name}
853{Package.df_all_copycmds()}
854
855# Some of our infrastructure still relies on the presence of this file
856# even though it is no longer needed to rebuild the docker environment
857# NOTE: The file is sorted to ensure the ordering is stable.
858RUN echo '{Package.depcache()}' > /tmp/depcache
859
860# Final configuration for the workspace
861RUN grep -q {gid} /etc/group || groupadd -f -g {gid} {username}
862RUN mkdir -p "{os.path.dirname(homedir)}"
863RUN grep -q {uid} /etc/passwd || useradd -d {homedir} -m -u {uid} -g {gid} {username}
864RUN sed -i '1iDefaults umask=000' /etc/sudoers
865RUN echo "{username} ALL=(ALL) NOPASSWD: ALL" >>/etc/sudoers
866
867# Ensure user has ability to write to /usr/local for different tool
868# and data installs
869RUN chown -R {username}:{username} /usr/local/share
870
871{proxy_cmd}
872
873RUN /bin/bash
874"""
875
876# Do the final docker build
877docker_final_img_name = Docker.tagname(None, dockerfile)
878Docker.build("final", docker_final_img_name, dockerfile)
879
880# Print the tag of the final image.
881print(docker_final_img_name)
882