1#!/usr/bin/env python3
2#
3# Build the required docker image to run package unit tests
4#
5# Script Variables:
6#   DOCKER_IMG_NAME:  <optional, the name of the docker image to generate>
7#                     default is openbmc/ubuntu-unit-test
8#   DISTRO:           <optional, the distro to build a docker image against>
9#   FORCE_DOCKER_BUILD: <optional, a non-zero value with force all Docker
10#                     images to be rebuilt rather than reusing caches.>
11#   BUILD_URL:        <optional, used to detect running under CI context
12#                     (ex. Jenkins)>
13#   BRANCH:           <optional, branch to build from each of the openbmc/
14#                     repositories>
15#                     default is master, which will be used if input branch not
16#                     provided or not found
17#   UBUNTU_MIRROR:    <optional, the URL of a mirror of Ubuntu to override the
18#                     default ones in /etc/apt/sources.list>
19#                     default is empty, and no mirror is used.
20#   http_proxy        The HTTP address of the proxy server to connect to.
21#                     Default: "", proxy is not setup if this is not set
22
23import os
24import sys
25import threading
26from datetime import date
27from hashlib import sha256
28from sh import docker, git, nproc, uname  # type: ignore
29from typing import Any, Callable, Dict, Iterable, Optional
30
31try:
32    # Python before 3.8 doesn't have TypedDict, so reroute to standard 'dict'.
33    from typing import TypedDict
34except:
35
36    class TypedDict(dict):  # type: ignore
37        # We need to do this to eat the 'total' argument.
38        def __init_subclass__(cls, **kwargs):
39            super().__init_subclass__()
40
41
42# Declare some variables used in package definitions.
43prefix = "/usr/local"
44proc_count = nproc().strip()
45
46
47class PackageDef(TypedDict, total=False):
48    """ Package Definition for packages dictionary. """
49
50    # rev [optional]: Revision of package to use.
51    rev: str
52    # url [optional]: lambda function to create URL: (package, rev) -> url.
53    url: Callable[[str, str], str]
54    # depends [optional]: List of package dependencies.
55    depends: Iterable[str]
56    # build_type [required]: Build type used for package.
57    #   Currently supported: autoconf, cmake, custom, make, meson
58    build_type: str
59    # build_steps [optional]: Steps to run for 'custom' build_type.
60    build_steps: Iterable[str]
61    # config_flags [optional]: List of options to pass configuration tool.
62    config_flags: Iterable[str]
63    # config_env [optional]: List of environment variables to set for config.
64    config_env: Iterable[str]
65    # custom_post_dl [optional]: List of steps to run after download, but
66    #   before config / build / install.
67    custom_post_dl: Iterable[str]
68    # custom_post_install [optional]: List of steps to run after install.
69    custom_post_install: Iterable[str]
70
71    # __tag [private]: Generated Docker tag name for package stage.
72    __tag: str
73    # __package [private]: Package object associated with this package.
74    __package: Any  # Type is Package, but not defined yet.
75
76
77# Packages to include in image.
78packages = {
79    "boost": PackageDef(
80        rev="1.80.0",
81        url=(
82            lambda pkg, rev: f"https://downloads.yoctoproject.org/mirror/sources/{pkg}_{rev.replace('.', '_')}.tar.bz2"
83        ),
84        build_type="custom",
85        build_steps=[
86            f"./bootstrap.sh --prefix={prefix} --with-libraries=context,coroutine",
87            "./b2",
88            f"./b2 install --prefix={prefix}",
89        ],
90    ),
91    "USCiLab/cereal": PackageDef(
92        rev="v1.3.2",
93        build_type="custom",
94        build_steps=[f"cp -a include/cereal/ {prefix}/include/"],
95    ),
96    "danmar/cppcheck": PackageDef(
97        rev="27578e9c4c1f90c62b6938867735a054082e178e",
98        build_type="cmake",
99    ),
100    "CLIUtils/CLI11": PackageDef(
101        rev="v1.9.1",
102        build_type="cmake",
103        config_flags=[
104            "-DBUILD_TESTING=OFF",
105            "-DCLI11_BUILD_DOCS=OFF",
106            "-DCLI11_BUILD_EXAMPLES=OFF",
107        ],
108    ),
109    "fmtlib/fmt": PackageDef(
110        rev="8.1.1",
111        build_type="cmake",
112        config_flags=[
113            "-DFMT_DOC=OFF",
114            "-DFMT_TEST=OFF",
115        ],
116    ),
117    "Naios/function2": PackageDef(
118        rev="4.2.1",
119        build_type="custom",
120        build_steps=[
121            f"mkdir {prefix}/include/function2",
122            f"cp include/function2/function2.hpp {prefix}/include/function2/",
123        ],
124    ),
125    # release-1.12.1
126    "google/googletest": PackageDef(
127        rev="58d77fa8070e8cec2dc1ed015d66b454c8d78850",
128        build_type="cmake",
129        config_env=["CXXFLAGS=-std=c++20"],
130        config_flags=["-DTHREADS_PREFER_PTHREAD_FLAG=ON"],
131    ),
132    "nlohmann/json": PackageDef(
133        rev="v3.11.2",
134        build_type="cmake",
135        config_flags=["-DJSON_BuildTests=OFF"],
136        custom_post_install=[
137            f"ln -s {prefix}/include/nlohmann/json.hpp {prefix}/include/json.hpp",
138        ],
139    ),
140    # Snapshot from 2019-05-24
141    "linux-test-project/lcov": PackageDef(
142        rev="v1.15",
143        build_type="make",
144    ),
145    # dev-5.15 2022-09-27
146    "openbmc/linux": PackageDef(
147        rev="c9fb275212dac5b300311f6f6b1dcc5ed18a3493",
148        build_type="custom",
149        build_steps=[
150            f"make -j{proc_count} defconfig",
151            f"make INSTALL_HDR_PATH={prefix} headers_install",
152        ],
153    ),
154    "LibVNC/libvncserver": PackageDef(
155        rev="LibVNCServer-0.9.13",
156        build_type="cmake",
157    ),
158    "leethomason/tinyxml2": PackageDef(
159        rev="9.0.0",
160        build_type="cmake",
161    ),
162    # version from /meta-openembedded/meta-oe/recipes-devtools/boost-url/boost-url_git.bb
163    "CPPAlliance/url": PackageDef(
164        rev="d740a92d38e3a8f4d5b2153f53b82f1c98e312ab",
165        build_type="custom",
166        build_steps=[f"cp -a include/** {prefix}/include/"],
167    ),
168    "tristanpenman/valijson": PackageDef(
169        rev="v0.7",
170        build_type="cmake",
171        config_flags=[
172            "-Dvalijson_BUILD_TESTS=0",
173            "-Dvalijson_INSTALL_HEADERS=1",
174        ],
175    ),
176    "open-power/pdbg": PackageDef(build_type="autoconf"),
177    "openbmc/gpioplus": PackageDef(
178        depends=["openbmc/stdplus"],
179        build_type="meson",
180        config_flags=[
181            "-Dexamples=false",
182            "-Dtests=disabled",
183        ],
184    ),
185    "openbmc/phosphor-dbus-interfaces": PackageDef(
186        depends=["openbmc/sdbusplus"],
187        build_type="meson",
188        config_flags=["-Dgenerate_md=false"],
189    ),
190    "openbmc/phosphor-logging": PackageDef(
191        depends=[
192            "USCiLab/cereal",
193            "openbmc/phosphor-dbus-interfaces",
194            "openbmc/sdbusplus",
195            "openbmc/sdeventplus",
196        ],
197        build_type="meson",
198        config_flags=[
199            f"-Dyamldir={prefix}/share/phosphor-dbus-yaml/yaml",
200        ],
201    ),
202    "openbmc/phosphor-objmgr": PackageDef(
203        depends=[
204            "boost",
205            "CLIUtils/CLI11",
206            "leethomason/tinyxml2",
207            "openbmc/phosphor-logging",
208            "openbmc/sdbusplus",
209        ],
210        build_type="meson",
211        config_flags=[
212            "-Dtests=disabled",
213        ],
214    ),
215    "openbmc/pldm": PackageDef(
216        depends=[
217            "CLIUtils/CLI11",
218            "boost",
219            "nlohmann/json",
220            "openbmc/phosphor-dbus-interfaces",
221            "openbmc/phosphor-logging",
222            "openbmc/sdbusplus",
223            "openbmc/sdeventplus",
224        ],
225        build_type="meson",
226        config_flags=[
227            "-Dlibpldm-only=enabled",
228            "-Doem-ibm=enabled",
229            "-Dtests=disabled",
230        ],
231    ),
232    "openbmc/sdbusplus": PackageDef(
233        build_type="meson",
234        custom_post_dl=[
235            "cd tools",
236            f"./setup.py install --root=/ --prefix={prefix}",
237            "cd ..",
238        ],
239        config_flags=[
240            "-Dexamples=disabled",
241            "-Dtests=disabled",
242        ],
243    ),
244    "openbmc/sdeventplus": PackageDef(
245        depends=["Naios/function2", "openbmc/stdplus"],
246        build_type="meson",
247        config_flags=[
248            "-Dexamples=false",
249            "-Dtests=disabled",
250        ],
251    ),
252    "openbmc/stdplus": PackageDef(
253        depends=["fmtlib/fmt", "Naios/function2", "google/googletest"],
254        build_type="meson",
255        config_flags=[
256            "-Dexamples=false",
257            "-Dtests=disabled",
258        ],
259    ),
260}  # type: Dict[str, PackageDef]
261
262# Define common flags used for builds
263configure_flags = " ".join(
264    [
265        f"--prefix={prefix}",
266    ]
267)
268cmake_flags = " ".join(
269    [
270        "-DBUILD_SHARED_LIBS=ON",
271        "-DCMAKE_BUILD_TYPE=RelWithDebInfo",
272        f"-DCMAKE_INSTALL_PREFIX:PATH={prefix}",
273        "-GNinja",
274        "-DCMAKE_MAKE_PROGRAM=ninja",
275    ]
276)
277meson_flags = " ".join(
278    [
279        "--wrap-mode=nodownload",
280        f"-Dprefix={prefix}",
281    ]
282)
283
284
285class Package(threading.Thread):
286    """Class used to build the Docker stages for each package.
287
288    Generally, this class should not be instantiated directly but through
289    Package.generate_all().
290    """
291
292    # Copy the packages dictionary.
293    packages = packages.copy()
294
295    # Lock used for thread-safety.
296    lock = threading.Lock()
297
298    def __init__(self, pkg: str):
299        """ pkg - The name of this package (ex. foo/bar ) """
300        super(Package, self).__init__()
301
302        self.package = pkg
303        self.exception = None  # type: Optional[Exception]
304
305        # Reference to this package's
306        self.pkg_def = Package.packages[pkg]
307        self.pkg_def["__package"] = self
308
309    def run(self) -> None:
310        """ Thread 'run' function.  Builds the Docker stage. """
311
312        # In case this package has no rev, fetch it from Github.
313        self._update_rev()
314
315        # Find all the Package objects that this package depends on.
316        #   This section is locked because we are looking into another
317        #   package's PackageDef dict, which could be being modified.
318        Package.lock.acquire()
319        deps: Iterable[Package] = [
320            Package.packages[deppkg]["__package"]
321            for deppkg in self.pkg_def.get("depends", [])
322        ]
323        Package.lock.release()
324
325        # Wait until all the depends finish building.  We need them complete
326        # for the "COPY" commands.
327        for deppkg in deps:
328            deppkg.join()
329
330        # Generate this package's Dockerfile.
331        dockerfile = f"""
332FROM {docker_base_img_name}
333{self._df_copycmds()}
334{self._df_build()}
335"""
336
337        # Generate the resulting tag name and save it to the PackageDef.
338        #   This section is locked because we are modifying the PackageDef,
339        #   which can be accessed by other threads.
340        Package.lock.acquire()
341        tag = Docker.tagname(self._stagename(), dockerfile)
342        self.pkg_def["__tag"] = tag
343        Package.lock.release()
344
345        # Do the build / save any exceptions.
346        try:
347            Docker.build(self.package, tag, dockerfile)
348        except Exception as e:
349            self.exception = e
350
351    @classmethod
352    def generate_all(cls) -> None:
353        """Ensure a Docker stage is created for all defined packages.
354
355        These are done in parallel but with appropriate blocking per
356        package 'depends' specifications.
357        """
358
359        # Create a Package for each defined package.
360        pkg_threads = [Package(p) for p in cls.packages.keys()]
361
362        # Start building them all.
363        #   This section is locked because threads depend on each other,
364        #   based on the packages, and they cannot 'join' on a thread
365        #   which is not yet started.  Adding a lock here allows all the
366        #   threads to start before they 'join' their dependencies.
367        Package.lock.acquire()
368        for t in pkg_threads:
369            t.start()
370        Package.lock.release()
371
372        # Wait for completion.
373        for t in pkg_threads:
374            t.join()
375            # Check if the thread saved off its own exception.
376            if t.exception:
377                print(f"Package {t.package} failed!", file=sys.stderr)
378                raise t.exception
379
380    @staticmethod
381    def df_all_copycmds() -> str:
382        """Formulate the Dockerfile snippet necessary to copy all packages
383        into the final image.
384        """
385        return Package.df_copycmds_set(Package.packages.keys())
386
387    @classmethod
388    def depcache(cls) -> str:
389        """Create the contents of the '/tmp/depcache'.
390        This file is a comma-separated list of "<pkg>:<rev>".
391        """
392
393        # This needs to be sorted for consistency.
394        depcache = ""
395        for pkg in sorted(cls.packages.keys()):
396            depcache += "%s:%s," % (pkg, cls.packages[pkg]["rev"])
397        return depcache
398
399    def _update_rev(self) -> None:
400        """ Look up the HEAD for missing a static rev. """
401
402        if "rev" in self.pkg_def:
403            return
404
405        # Check if Jenkins/Gerrit gave us a revision and use it.
406        if gerrit_project == self.package and gerrit_rev:
407            print(
408                f"Found Gerrit revision for {self.package}: {gerrit_rev}",
409                file=sys.stderr,
410            )
411            self.pkg_def["rev"] = gerrit_rev
412            return
413
414        # Ask Github for all the branches.
415        lookup = git("ls-remote", "--heads", f"https://github.com/{self.package}")
416
417        # Find the branch matching {branch} (or fallback to master).
418        #   This section is locked because we are modifying the PackageDef.
419        Package.lock.acquire()
420        for line in lookup.split("\n"):
421            if f"refs/heads/{branch}" in line:
422                self.pkg_def["rev"] = line.split()[0]
423            elif f"refs/heads/master" in line and "rev" not in self.pkg_def:
424                self.pkg_def["rev"] = line.split()[0]
425        Package.lock.release()
426
427    def _stagename(self) -> str:
428        """ Create a name for the Docker stage associated with this pkg. """
429        return self.package.replace("/", "-").lower()
430
431    def _url(self) -> str:
432        """ Get the URL for this package. """
433        rev = self.pkg_def["rev"]
434
435        # If the lambda exists, call it.
436        if "url" in self.pkg_def:
437            return self.pkg_def["url"](self.package, rev)
438
439        # Default to the github archive URL.
440        return f"https://github.com/{self.package}/archive/{rev}.tar.gz"
441
442    def _cmd_download(self) -> str:
443        """Formulate the command necessary to download and unpack to source."""
444
445        url = self._url()
446        if ".tar." not in url:
447            raise NotImplementedError(
448                f"Unhandled download type for {self.package}: {url}"
449            )
450
451        cmd = f"curl -L {url} | tar -x"
452
453        if url.endswith(".bz2"):
454            cmd += "j"
455        elif url.endswith(".gz"):
456            cmd += "z"
457        else:
458            raise NotImplementedError(
459                f"Unknown tar flags needed for {self.package}: {url}"
460            )
461
462        return cmd
463
464    def _cmd_cd_srcdir(self) -> str:
465        """ Formulate the command necessary to 'cd' into the source dir. """
466        return f"cd {self.package.split('/')[-1]}*"
467
468    def _df_copycmds(self) -> str:
469        """ Formulate the dockerfile snippet necessary to COPY all depends. """
470
471        if "depends" not in self.pkg_def:
472            return ""
473        return Package.df_copycmds_set(self.pkg_def["depends"])
474
475    @staticmethod
476    def df_copycmds_set(pkgs: Iterable[str]) -> str:
477        """Formulate the Dockerfile snippet necessary to COPY a set of
478        packages into a Docker stage.
479        """
480
481        copy_cmds = ""
482
483        # Sort the packages for consistency.
484        for p in sorted(pkgs):
485            tag = Package.packages[p]["__tag"]
486            copy_cmds += f"COPY --from={tag} {prefix} {prefix}\n"
487            # Workaround for upstream docker bug and multiple COPY cmds
488            # https://github.com/moby/moby/issues/37965
489            copy_cmds += "RUN true\n"
490
491        return copy_cmds
492
493    def _df_build(self) -> str:
494        """Formulate the Dockerfile snippet necessary to download, build, and
495        install a package into a Docker stage.
496        """
497
498        # Download and extract source.
499        result = f"RUN {self._cmd_download()} && {self._cmd_cd_srcdir()} && "
500
501        # Handle 'custom_post_dl' commands.
502        custom_post_dl = self.pkg_def.get("custom_post_dl")
503        if custom_post_dl:
504            result += " && ".join(custom_post_dl) + " && "
505
506        # Build and install package based on 'build_type'.
507        build_type = self.pkg_def["build_type"]
508        if build_type == "autoconf":
509            result += self._cmd_build_autoconf()
510        elif build_type == "cmake":
511            result += self._cmd_build_cmake()
512        elif build_type == "custom":
513            result += self._cmd_build_custom()
514        elif build_type == "make":
515            result += self._cmd_build_make()
516        elif build_type == "meson":
517            result += self._cmd_build_meson()
518        else:
519            raise NotImplementedError(
520                f"Unhandled build type for {self.package}: {build_type}"
521            )
522
523        # Handle 'custom_post_install' commands.
524        custom_post_install = self.pkg_def.get("custom_post_install")
525        if custom_post_install:
526            result += " && " + " && ".join(custom_post_install)
527
528        return result
529
530    def _cmd_build_autoconf(self) -> str:
531        options = " ".join(self.pkg_def.get("config_flags", []))
532        env = " ".join(self.pkg_def.get("config_env", []))
533        result = "./bootstrap.sh && "
534        result += f"{env} ./configure {configure_flags} {options} && "
535        result += f"make -j{proc_count} && make install"
536        return result
537
538    def _cmd_build_cmake(self) -> str:
539        options = " ".join(self.pkg_def.get("config_flags", []))
540        env = " ".join(self.pkg_def.get("config_env", []))
541        result = "mkdir builddir && cd builddir && "
542        result += f"{env} cmake {cmake_flags} {options} .. && "
543        result += "cmake --build . --target all && "
544        result += "cmake --build . --target install && "
545        result += "cd .."
546        return result
547
548    def _cmd_build_custom(self) -> str:
549        return " && ".join(self.pkg_def.get("build_steps", []))
550
551    def _cmd_build_make(self) -> str:
552        return f"make -j{proc_count} && make install"
553
554    def _cmd_build_meson(self) -> str:
555        options = " ".join(self.pkg_def.get("config_flags", []))
556        env = " ".join(self.pkg_def.get("config_env", []))
557        result = f"{env} meson builddir {meson_flags} {options} && "
558        result += "ninja -C builddir && ninja -C builddir install"
559        return result
560
561
562class Docker:
563    """Class to assist with Docker interactions.  All methods are static."""
564
565    @staticmethod
566    def timestamp() -> str:
567        """ Generate a timestamp for today using the ISO week. """
568        today = date.today().isocalendar()
569        return f"{today[0]}-W{today[1]:02}"
570
571    @staticmethod
572    def tagname(pkgname: str, dockerfile: str) -> str:
573        """ Generate a tag name for a package using a hash of the Dockerfile. """
574        result = docker_image_name
575        if pkgname:
576            result += "-" + pkgname
577
578        result += ":" + Docker.timestamp()
579        result += "-" + sha256(dockerfile.encode()).hexdigest()[0:16]
580
581        return result
582
583    @staticmethod
584    def build(pkg: str, tag: str, dockerfile: str) -> None:
585        """Build a docker image using the Dockerfile and tagging it with 'tag'."""
586
587        # If we're not forcing builds, check if it already exists and skip.
588        if not force_build:
589            if docker.image.ls(tag, "--format", '"{{.Repository}}:{{.Tag}}"'):
590                print(f"Image {tag} already exists.  Skipping.", file=sys.stderr)
591                return
592
593        # Build it.
594        #   Capture the output of the 'docker build' command and send it to
595        #   stderr (prefixed with the package name).  This allows us to see
596        #   progress but not polute stdout.  Later on we output the final
597        #   docker tag to stdout and we want to keep that pristine.
598        #
599        #   Other unusual flags:
600        #       --no-cache: Bypass the Docker cache if 'force_build'.
601        #       --force-rm: Clean up Docker processes if they fail.
602        docker.build(
603            proxy_args,
604            "--network=host",
605            "--force-rm",
606            "--no-cache=true" if force_build else "--no-cache=false",
607            "-t",
608            tag,
609            "-",
610            _in=dockerfile,
611            _out=(
612                lambda line: print(
613                    pkg + ":", line, end="", file=sys.stderr, flush=True
614                )
615            ),
616        )
617
618
619# Read a bunch of environment variables.
620docker_image_name = os.environ.get("DOCKER_IMAGE_NAME", "openbmc/ubuntu-unit-test")
621force_build = os.environ.get("FORCE_DOCKER_BUILD")
622is_automated_ci_build = os.environ.get("BUILD_URL", False)
623distro = os.environ.get("DISTRO", "ubuntu:jammy")
624branch = os.environ.get("BRANCH", "master")
625ubuntu_mirror = os.environ.get("UBUNTU_MIRROR")
626http_proxy = os.environ.get("http_proxy")
627
628gerrit_project = os.environ.get("GERRIT_PROJECT")
629gerrit_rev = os.environ.get("GERRIT_PATCHSET_REVISION")
630
631# Set up some common variables.
632username = os.environ.get("USER", "root")
633homedir = os.environ.get("HOME", "/root")
634gid = os.getgid()
635uid = os.getuid()
636
637# Use well-known constants if user is root
638if username == "root":
639    homedir = "/root"
640    gid = 0
641    uid = 0
642
643# Determine the architecture for Docker.
644arch = uname("-m").strip()
645if arch == "ppc64le":
646    docker_base = "ppc64le/"
647elif arch == "x86_64":
648    docker_base = ""
649elif arch == "aarch64":
650    docker_base = "arm64v8/"
651else:
652    print(
653        f"Unsupported system architecture({arch}) found for docker image",
654        file=sys.stderr,
655    )
656    sys.exit(1)
657
658# Special flags if setting up a deb mirror.
659mirror = ""
660if "ubuntu" in distro and ubuntu_mirror:
661    mirror = f"""
662RUN echo "deb {ubuntu_mirror} $(. /etc/os-release && echo $VERSION_CODENAME) main restricted universe multiverse" > /etc/apt/sources.list && \\
663    echo "deb {ubuntu_mirror} $(. /etc/os-release && echo $VERSION_CODENAME)-updates main restricted universe multiverse" >> /etc/apt/sources.list && \\
664    echo "deb {ubuntu_mirror} $(. /etc/os-release && echo $VERSION_CODENAME)-security main restricted universe multiverse" >> /etc/apt/sources.list && \\
665    echo "deb {ubuntu_mirror} $(. /etc/os-release && echo $VERSION_CODENAME)-proposed main restricted universe multiverse" >> /etc/apt/sources.list && \\
666    echo "deb {ubuntu_mirror} $(. /etc/os-release && echo $VERSION_CODENAME)-backports main restricted universe multiverse" >> /etc/apt/sources.list
667"""
668
669# Special flags for proxying.
670proxy_cmd = ""
671proxy_keyserver = ""
672proxy_args = []
673if http_proxy:
674    proxy_cmd = f"""
675RUN echo "[http]" >> {homedir}/.gitconfig && \
676    echo "proxy = {http_proxy}" >> {homedir}/.gitconfig
677"""
678    proxy_keyserver = f"--keyserver-options http-proxy={http_proxy}"
679
680    proxy_args.extend(
681        [
682            "--build-arg",
683            f"http_proxy={http_proxy}",
684            "--build-arg",
685            f"https_proxy={http_proxy}",
686        ]
687    )
688
689# Create base Dockerfile.
690dockerfile_base = f"""
691FROM {docker_base}{distro}
692
693{mirror}
694
695ENV DEBIAN_FRONTEND noninteractive
696
697ENV PYTHONPATH "/usr/local/lib/python3.10/site-packages/"
698
699# Sometimes the ubuntu key expires and we need a way to force an execution
700# of the apt-get commands for the dbgsym-keyring.  When this happens we see
701# an error like: "Release: The following signatures were invalid:"
702# Insert a bogus echo that we can change here when we get this error to force
703# the update.
704RUN echo "ubuntu keyserver rev as of 2021-04-21"
705
706# We need the keys to be imported for dbgsym repos
707# New releases have a package, older ones fall back to manual fetching
708# https://wiki.ubuntu.com/Debug%20Symbol%20Packages
709RUN apt-get update && apt-get dist-upgrade -yy && \
710    ( apt-get install gpgv ubuntu-dbgsym-keyring || \
711        ( apt-get install -yy dirmngr && \
712          apt-key adv --keyserver keyserver.ubuntu.com \
713                      {proxy_keyserver} \
714                      --recv-keys F2EDC64DC5AEE1F6B9C621F0C8CAB6595FDFF622 ) )
715
716# Parse the current repo list into a debug repo list
717RUN sed -n '/^deb /s,^deb [^ ]* ,deb http://ddebs.ubuntu.com ,p' /etc/apt/sources.list >/etc/apt/sources.list.d/debug.list
718
719# Remove non-existent debug repos
720RUN sed -i '/-\(backports\|security\) /d' /etc/apt/sources.list.d/debug.list
721
722RUN cat /etc/apt/sources.list.d/debug.list
723
724RUN apt-get update && apt-get dist-upgrade -yy && apt-get install -yy \
725    gcc-12 \
726    g++-12 \
727    libc6-dbg \
728    libc6-dev \
729    libtool \
730    bison \
731    libdbus-1-dev \
732    flex \
733    cmake \
734    python3 \
735    python3-dev\
736    python3-yaml \
737    python3-mako \
738    python3-pip \
739    python3-setuptools \
740    python3-git \
741    python3-socks \
742    pkg-config \
743    autoconf \
744    autoconf-archive \
745    libsystemd-dev \
746    systemd \
747    libssl-dev \
748    libevdev-dev \
749    libjpeg-dev \
750    libpng-dev \
751    ninja-build \
752    sudo \
753    curl \
754    git \
755    dbus \
756    iputils-ping \
757    clang-14 \
758    clang-format-14 \
759    clang-tidy-14 \
760    clang-tools-14 \
761    shellcheck \
762    npm \
763    iproute2 \
764    libnl-3-dev \
765    libnl-genl-3-dev \
766    libconfig++-dev \
767    libsnmp-dev \
768    valgrind \
769    valgrind-dbg \
770    libpam0g-dev \
771    xxd \
772    libi2c-dev \
773    wget \
774    libldap2-dev \
775    libprotobuf-dev \
776    liburing-dev \
777    liburing2-dbgsym \
778    libperlio-gzip-perl \
779    libjson-perl \
780    protobuf-compiler \
781    libgpiod-dev \
782    device-tree-compiler \
783    libpciaccess-dev \
784    libmimetic-dev \
785    libxml2-utils \
786    libxml-simple-perl \
787    rsync \
788    libcryptsetup-dev
789
790# Apply autoconf-archive-v2022.02.11 file ax_cxx_compile_stdcxx for C++20.
791RUN curl "http://git.savannah.gnu.org/gitweb/?p=autoconf-archive.git;a=blob_plain;f=m4/ax_cxx_compile_stdcxx.m4;hb=3311b6bdeff883c6a13952594a9dcb60bce6ba80" \
792  > /usr/share/aclocal/ax_cxx_compile_stdcxx.m4
793
794RUN npm install -g eslint@latest eslint-plugin-json@latest
795
796RUN update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-12 12 \
797  --slave /usr/bin/g++ g++ /usr/bin/g++-12 \
798  --slave /usr/bin/gcov gcov /usr/bin/gcov-12 \
799  --slave /usr/bin/gcov-dump gcov-dump /usr/bin/gcov-dump-12 \
800  --slave /usr/bin/gcov-tool gcov-tool /usr/bin/gcov-tool-12
801RUN update-alternatives --install /usr/bin/cpp cpp /usr/bin/cpp-12 12
802
803RUN update-alternatives --install /usr/bin/clang clang /usr/bin/clang-14 1000 \
804  --slave /usr/bin/clang++ clang++ /usr/bin/clang++-14 \
805  --slave /usr/bin/clang-tidy clang-tidy /usr/bin/clang-tidy-14 \
806  --slave /usr/bin/clang-format clang-format /usr/bin/clang-format-14 \
807  --slave /usr/bin/run-clang-tidy run-clang-tidy.py /usr/bin/run-clang-tidy-14 \
808  --slave /usr/bin/scan-build scan-build /usr/bin/scan-build-14
809
810"""
811
812if is_automated_ci_build:
813    dockerfile_base += f"""
814# Run an arbitrary command to polute the docker cache regularly force us
815# to re-run `apt-get update` daily.
816RUN echo {Docker.timestamp()}
817RUN apt-get update && apt-get dist-upgrade -yy
818
819"""
820
821dockerfile_base += f"""
822RUN pip3 install inflection
823RUN pip3 install pycodestyle
824RUN pip3 install jsonschema
825RUN pip3 install meson==0.63.0
826RUN pip3 install packaging
827RUN pip3 install protobuf
828RUN pip3 install codespell
829RUN pip3 install requests
830"""
831
832# Note, we use sha1s here because the newest gitlint release doesn't include
833# some features we need.  Next time they release, we can rely on a direct
834# release tag
835dockerfile_base += f"""
836RUN pip3 install git+https://github.com/jorisroovers/gitlint.git@8ede310d62d5794efa7518b235f899f8a8ad6a68\#subdirectory=gitlint-core
837RUN pip3 install git+https://github.com/jorisroovers/gitlint.git@8ede310d62d5794efa7518b235f899f8a8ad6a68
838"""
839
840# Build the base and stage docker images.
841docker_base_img_name = Docker.tagname("base", dockerfile_base)
842Docker.build("base", docker_base_img_name, dockerfile_base)
843Package.generate_all()
844
845# Create the final Dockerfile.
846dockerfile = f"""
847# Build the final output image
848FROM {docker_base_img_name}
849{Package.df_all_copycmds()}
850
851# Some of our infrastructure still relies on the presence of this file
852# even though it is no longer needed to rebuild the docker environment
853# NOTE: The file is sorted to ensure the ordering is stable.
854RUN echo '{Package.depcache()}' > /tmp/depcache
855
856# Final configuration for the workspace
857RUN grep -q {gid} /etc/group || groupadd -f -g {gid} {username}
858RUN mkdir -p "{os.path.dirname(homedir)}"
859RUN grep -q {uid} /etc/passwd || useradd -d {homedir} -m -u {uid} -g {gid} {username}
860RUN sed -i '1iDefaults umask=000' /etc/sudoers
861RUN echo "{username} ALL=(ALL) NOPASSWD: ALL" >>/etc/sudoers
862
863# Ensure user has ability to write to /usr/local for different tool
864# and data installs
865RUN chown -R {username}:{username} /usr/local/share
866
867{proxy_cmd}
868
869RUN /bin/bash
870"""
871
872# Do the final docker build
873docker_final_img_name = Docker.tagname(None, dockerfile)
874Docker.build("final", docker_final_img_name, dockerfile)
875
876# Print the tag of the final image.
877print(docker_final_img_name)
878