1#!/usr/bin/env python3
2#
3# Build the required docker image to run package unit tests
4#
5# Script Variables:
6#   DOCKER_IMG_NAME:  <optional, the name of the docker image to generate>
7#                     default is openbmc/ubuntu-unit-test
8#   DISTRO:           <optional, the distro to build a docker image against>
9#   FORCE_DOCKER_BUILD: <optional, a non-zero value with force all Docker
10#                     images to be rebuilt rather than reusing caches.>
11#   BUILD_URL:        <optional, used to detect running under CI context
12#                     (ex. Jenkins)>
13#   BRANCH:           <optional, branch to build from each of the openbmc/
14#                     repositories>
15#                     default is master, which will be used if input branch not
16#                     provided or not found
17#   UBUNTU_MIRROR:    <optional, the URL of a mirror of Ubuntu to override the
18#                     default ones in /etc/apt/sources.list>
19#                     default is empty, and no mirror is used.
20#   http_proxy        The HTTP address of the proxy server to connect to.
21#                     Default: "", proxy is not setup if this is not set
22
23import os
24import sys
25import threading
26from datetime import date
27from hashlib import sha256
28from sh import docker, git, nproc, uname  # type: ignore
29from typing import Any, Callable, Dict, Iterable, Optional
30
31try:
32    # Python before 3.8 doesn't have TypedDict, so reroute to standard 'dict'.
33    from typing import TypedDict
34except:
35
36    class TypedDict(dict):  # type: ignore
37        # We need to do this to eat the 'total' argument.
38        def __init_subclass__(cls, **kwargs):
39            super().__init_subclass__()
40
41
42# Declare some variables used in package definitions.
43prefix = "/usr/local"
44proc_count = nproc().strip()
45
46
47class PackageDef(TypedDict, total=False):
48    """ Package Definition for packages dictionary. """
49
50    # rev [optional]: Revision of package to use.
51    rev: str
52    # url [optional]: lambda function to create URL: (package, rev) -> url.
53    url: Callable[[str, str], str]
54    # depends [optional]: List of package dependencies.
55    depends: Iterable[str]
56    # build_type [required]: Build type used for package.
57    #   Currently supported: autoconf, cmake, custom, make, meson
58    build_type: str
59    # build_steps [optional]: Steps to run for 'custom' build_type.
60    build_steps: Iterable[str]
61    # config_flags [optional]: List of options to pass configuration tool.
62    config_flags: Iterable[str]
63    # config_env [optional]: List of environment variables to set for config.
64    config_env: Iterable[str]
65    # custom_post_dl [optional]: List of steps to run after download, but
66    #   before config / build / install.
67    custom_post_dl: Iterable[str]
68    # custom_post_install [optional]: List of steps to run after install.
69    custom_post_install: Iterable[str]
70
71    # __tag [private]: Generated Docker tag name for package stage.
72    __tag: str
73    # __package [private]: Package object associated with this package.
74    __package: Any  # Type is Package, but not defined yet.
75
76
77# Packages to include in image.
78packages = {
79    "boost": PackageDef(
80        rev="1.80.0",
81        url=(
82            lambda pkg, rev: f"https://downloads.yoctoproject.org/mirror/sources/{pkg}_{rev.replace('.', '_')}.tar.bz2"
83        ),
84        build_type="custom",
85        build_steps=[
86            f"./bootstrap.sh --prefix={prefix} --with-libraries=context,coroutine",
87            "./b2",
88            f"./b2 install --prefix={prefix}",
89        ],
90    ),
91    "USCiLab/cereal": PackageDef(
92        rev="v1.3.2",
93        build_type="custom",
94        build_steps=[f"cp -a include/cereal/ {prefix}/include/"],
95    ),
96    "danmar/cppcheck": PackageDef(
97        rev="2.9",
98        build_type="cmake",
99    ),
100    "CLIUtils/CLI11": PackageDef(
101        rev="v1.9.1",
102        build_type="cmake",
103        config_flags=[
104            "-DBUILD_TESTING=OFF",
105            "-DCLI11_BUILD_DOCS=OFF",
106            "-DCLI11_BUILD_EXAMPLES=OFF",
107        ],
108    ),
109    "fmtlib/fmt": PackageDef(
110        rev="9.1.0",
111        build_type="cmake",
112        config_flags=[
113            "-DFMT_DOC=OFF",
114            "-DFMT_TEST=OFF",
115        ],
116    ),
117    "Naios/function2": PackageDef(
118        rev="4.2.1",
119        build_type="custom",
120        build_steps=[
121            f"mkdir {prefix}/include/function2",
122            f"cp include/function2/function2.hpp {prefix}/include/function2/",
123        ],
124    ),
125    # release-1.12.1
126    "google/googletest": PackageDef(
127        rev="58d77fa8070e8cec2dc1ed015d66b454c8d78850",
128        build_type="cmake",
129        config_env=["CXXFLAGS=-std=c++20"],
130        config_flags=["-DTHREADS_PREFER_PTHREAD_FLAG=ON"],
131    ),
132    "nlohmann/json": PackageDef(
133        rev="v3.11.2",
134        build_type="cmake",
135        config_flags=["-DJSON_BuildTests=OFF"],
136        custom_post_install=[
137            f"ln -s {prefix}/include/nlohmann/json.hpp {prefix}/include/json.hpp",
138        ],
139    ),
140    # Snapshot from 2019-05-24
141    "linux-test-project/lcov": PackageDef(
142        rev="v1.15",
143        build_type="make",
144    ),
145    # dev-5.15 2022-09-27
146    "openbmc/linux": PackageDef(
147        rev="c9fb275212dac5b300311f6f6b1dcc5ed18a3493",
148        build_type="custom",
149        build_steps=[
150            f"make -j{proc_count} defconfig",
151            f"make INSTALL_HDR_PATH={prefix} headers_install",
152        ],
153    ),
154    "LibVNC/libvncserver": PackageDef(
155        rev="LibVNCServer-0.9.13",
156        build_type="cmake",
157    ),
158    "leethomason/tinyxml2": PackageDef(
159        rev="9.0.0",
160        build_type="cmake",
161    ),
162    # version from /meta-openembedded/meta-oe/recipes-devtools/boost-url/boost-url_git.bb
163    "CPPAlliance/url": PackageDef(
164        rev="d740a92d38e3a8f4d5b2153f53b82f1c98e312ab",
165        build_type="custom",
166        build_steps=[f"cp -a include/** {prefix}/include/"],
167    ),
168    "tristanpenman/valijson": PackageDef(
169        rev="v0.7",
170        build_type="cmake",
171        config_flags=[
172            "-Dvalijson_BUILD_TESTS=0",
173            "-Dvalijson_INSTALL_HEADERS=1",
174        ],
175    ),
176    "open-power/pdbg": PackageDef(build_type="autoconf"),
177    "openbmc/gpioplus": PackageDef(
178        depends=["openbmc/stdplus"],
179        build_type="meson",
180        config_flags=[
181            "-Dexamples=false",
182            "-Dtests=disabled",
183        ],
184    ),
185    "openbmc/phosphor-dbus-interfaces": PackageDef(
186        depends=["openbmc/sdbusplus"],
187        build_type="meson",
188        config_flags=["-Dgenerate_md=false"],
189    ),
190    "openbmc/phosphor-logging": PackageDef(
191        depends=[
192            "USCiLab/cereal",
193            "openbmc/phosphor-dbus-interfaces",
194            "openbmc/sdbusplus",
195            "openbmc/sdeventplus",
196        ],
197        build_type="meson",
198        config_flags=[
199            f"-Dyamldir={prefix}/share/phosphor-dbus-yaml/yaml",
200        ],
201    ),
202    "openbmc/phosphor-objmgr": PackageDef(
203        depends=[
204            "CLIUtils/CLI11",
205            "boost",
206            "leethomason/tinyxml2",
207            "openbmc/phosphor-dbus-interfaces",
208            "openbmc/phosphor-logging",
209            "openbmc/sdbusplus",
210        ],
211        build_type="meson",
212        config_flags=[
213            "-Dtests=disabled",
214        ],
215    ),
216    "openbmc/pldm": PackageDef(
217        depends=[
218            "CLIUtils/CLI11",
219            "boost",
220            "nlohmann/json",
221            "openbmc/phosphor-dbus-interfaces",
222            "openbmc/phosphor-logging",
223            "openbmc/sdbusplus",
224            "openbmc/sdeventplus",
225        ],
226        build_type="meson",
227        config_flags=[
228            "-Dlibpldm-only=enabled",
229            "-Doem-ibm=enabled",
230            "-Dtests=disabled",
231        ],
232    ),
233    "openbmc/sdbusplus": PackageDef(
234        build_type="meson",
235        custom_post_dl=[
236            "cd tools",
237            f"./setup.py install --root=/ --prefix={prefix}",
238            "cd ..",
239        ],
240        config_flags=[
241            "-Dexamples=disabled",
242            "-Dtests=disabled",
243        ],
244    ),
245    "openbmc/sdeventplus": PackageDef(
246        depends=[
247            "Naios/function2",
248            "openbmc/stdplus",
249        ],
250        build_type="meson",
251        config_flags=[
252            "-Dexamples=false",
253            "-Dtests=disabled",
254        ],
255    ),
256    "openbmc/stdplus": PackageDef(
257        depends=[
258            "fmtlib/fmt",
259            "google/googletest",
260            "Naios/function2",
261        ],
262        build_type="meson",
263        config_flags=[
264            "-Dexamples=false",
265            "-Dtests=disabled",
266            "-Dgtest=enabled",
267        ],
268    ),
269}  # type: Dict[str, PackageDef]
270
271# Define common flags used for builds
272configure_flags = " ".join(
273    [
274        f"--prefix={prefix}",
275    ]
276)
277cmake_flags = " ".join(
278    [
279        "-DBUILD_SHARED_LIBS=ON",
280        "-DCMAKE_BUILD_TYPE=RelWithDebInfo",
281        f"-DCMAKE_INSTALL_PREFIX:PATH={prefix}",
282        "-GNinja",
283        "-DCMAKE_MAKE_PROGRAM=ninja",
284    ]
285)
286meson_flags = " ".join(
287    [
288        "--wrap-mode=nodownload",
289        f"-Dprefix={prefix}",
290    ]
291)
292
293
294class Package(threading.Thread):
295    """Class used to build the Docker stages for each package.
296
297    Generally, this class should not be instantiated directly but through
298    Package.generate_all().
299    """
300
301    # Copy the packages dictionary.
302    packages = packages.copy()
303
304    # Lock used for thread-safety.
305    lock = threading.Lock()
306
307    def __init__(self, pkg: str):
308        """ pkg - The name of this package (ex. foo/bar ) """
309        super(Package, self).__init__()
310
311        self.package = pkg
312        self.exception = None  # type: Optional[Exception]
313
314        # Reference to this package's
315        self.pkg_def = Package.packages[pkg]
316        self.pkg_def["__package"] = self
317
318    def run(self) -> None:
319        """ Thread 'run' function.  Builds the Docker stage. """
320
321        # In case this package has no rev, fetch it from Github.
322        self._update_rev()
323
324        # Find all the Package objects that this package depends on.
325        #   This section is locked because we are looking into another
326        #   package's PackageDef dict, which could be being modified.
327        Package.lock.acquire()
328        deps: Iterable[Package] = [
329            Package.packages[deppkg]["__package"]
330            for deppkg in self.pkg_def.get("depends", [])
331        ]
332        Package.lock.release()
333
334        # Wait until all the depends finish building.  We need them complete
335        # for the "COPY" commands.
336        for deppkg in deps:
337            deppkg.join()
338
339        # Generate this package's Dockerfile.
340        dockerfile = f"""
341FROM {docker_base_img_name}
342{self._df_copycmds()}
343{self._df_build()}
344"""
345
346        # Generate the resulting tag name and save it to the PackageDef.
347        #   This section is locked because we are modifying the PackageDef,
348        #   which can be accessed by other threads.
349        Package.lock.acquire()
350        tag = Docker.tagname(self._stagename(), dockerfile)
351        self.pkg_def["__tag"] = tag
352        Package.lock.release()
353
354        # Do the build / save any exceptions.
355        try:
356            Docker.build(self.package, tag, dockerfile)
357        except Exception as e:
358            self.exception = e
359
360    @classmethod
361    def generate_all(cls) -> None:
362        """Ensure a Docker stage is created for all defined packages.
363
364        These are done in parallel but with appropriate blocking per
365        package 'depends' specifications.
366        """
367
368        # Create a Package for each defined package.
369        pkg_threads = [Package(p) for p in cls.packages.keys()]
370
371        # Start building them all.
372        #   This section is locked because threads depend on each other,
373        #   based on the packages, and they cannot 'join' on a thread
374        #   which is not yet started.  Adding a lock here allows all the
375        #   threads to start before they 'join' their dependencies.
376        Package.lock.acquire()
377        for t in pkg_threads:
378            t.start()
379        Package.lock.release()
380
381        # Wait for completion.
382        for t in pkg_threads:
383            t.join()
384            # Check if the thread saved off its own exception.
385            if t.exception:
386                print(f"Package {t.package} failed!", file=sys.stderr)
387                raise t.exception
388
389    @staticmethod
390    def df_all_copycmds() -> str:
391        """Formulate the Dockerfile snippet necessary to copy all packages
392        into the final image.
393        """
394        return Package.df_copycmds_set(Package.packages.keys())
395
396    @classmethod
397    def depcache(cls) -> str:
398        """Create the contents of the '/tmp/depcache'.
399        This file is a comma-separated list of "<pkg>:<rev>".
400        """
401
402        # This needs to be sorted for consistency.
403        depcache = ""
404        for pkg in sorted(cls.packages.keys()):
405            depcache += "%s:%s," % (pkg, cls.packages[pkg]["rev"])
406        return depcache
407
408    def _update_rev(self) -> None:
409        """ Look up the HEAD for missing a static rev. """
410
411        if "rev" in self.pkg_def:
412            return
413
414        # Check if Jenkins/Gerrit gave us a revision and use it.
415        if gerrit_project == self.package and gerrit_rev:
416            print(
417                f"Found Gerrit revision for {self.package}: {gerrit_rev}",
418                file=sys.stderr,
419            )
420            self.pkg_def["rev"] = gerrit_rev
421            return
422
423        # Ask Github for all the branches.
424        lookup = git("ls-remote", "--heads", f"https://github.com/{self.package}")
425
426        # Find the branch matching {branch} (or fallback to master).
427        #   This section is locked because we are modifying the PackageDef.
428        Package.lock.acquire()
429        for line in lookup.split("\n"):
430            if f"refs/heads/{branch}" in line:
431                self.pkg_def["rev"] = line.split()[0]
432            elif f"refs/heads/master" in line and "rev" not in self.pkg_def:
433                self.pkg_def["rev"] = line.split()[0]
434        Package.lock.release()
435
436    def _stagename(self) -> str:
437        """ Create a name for the Docker stage associated with this pkg. """
438        return self.package.replace("/", "-").lower()
439
440    def _url(self) -> str:
441        """ Get the URL for this package. """
442        rev = self.pkg_def["rev"]
443
444        # If the lambda exists, call it.
445        if "url" in self.pkg_def:
446            return self.pkg_def["url"](self.package, rev)
447
448        # Default to the github archive URL.
449        return f"https://github.com/{self.package}/archive/{rev}.tar.gz"
450
451    def _cmd_download(self) -> str:
452        """Formulate the command necessary to download and unpack to source."""
453
454        url = self._url()
455        if ".tar." not in url:
456            raise NotImplementedError(
457                f"Unhandled download type for {self.package}: {url}"
458            )
459
460        cmd = f"curl -L {url} | tar -x"
461
462        if url.endswith(".bz2"):
463            cmd += "j"
464        elif url.endswith(".gz"):
465            cmd += "z"
466        else:
467            raise NotImplementedError(
468                f"Unknown tar flags needed for {self.package}: {url}"
469            )
470
471        return cmd
472
473    def _cmd_cd_srcdir(self) -> str:
474        """ Formulate the command necessary to 'cd' into the source dir. """
475        return f"cd {self.package.split('/')[-1]}*"
476
477    def _df_copycmds(self) -> str:
478        """ Formulate the dockerfile snippet necessary to COPY all depends. """
479
480        if "depends" not in self.pkg_def:
481            return ""
482        return Package.df_copycmds_set(self.pkg_def["depends"])
483
484    @staticmethod
485    def df_copycmds_set(pkgs: Iterable[str]) -> str:
486        """Formulate the Dockerfile snippet necessary to COPY a set of
487        packages into a Docker stage.
488        """
489
490        copy_cmds = ""
491
492        # Sort the packages for consistency.
493        for p in sorted(pkgs):
494            tag = Package.packages[p]["__tag"]
495            copy_cmds += f"COPY --from={tag} {prefix} {prefix}\n"
496            # Workaround for upstream docker bug and multiple COPY cmds
497            # https://github.com/moby/moby/issues/37965
498            copy_cmds += "RUN true\n"
499
500        return copy_cmds
501
502    def _df_build(self) -> str:
503        """Formulate the Dockerfile snippet necessary to download, build, and
504        install a package into a Docker stage.
505        """
506
507        # Download and extract source.
508        result = f"RUN {self._cmd_download()} && {self._cmd_cd_srcdir()} && "
509
510        # Handle 'custom_post_dl' commands.
511        custom_post_dl = self.pkg_def.get("custom_post_dl")
512        if custom_post_dl:
513            result += " && ".join(custom_post_dl) + " && "
514
515        # Build and install package based on 'build_type'.
516        build_type = self.pkg_def["build_type"]
517        if build_type == "autoconf":
518            result += self._cmd_build_autoconf()
519        elif build_type == "cmake":
520            result += self._cmd_build_cmake()
521        elif build_type == "custom":
522            result += self._cmd_build_custom()
523        elif build_type == "make":
524            result += self._cmd_build_make()
525        elif build_type == "meson":
526            result += self._cmd_build_meson()
527        else:
528            raise NotImplementedError(
529                f"Unhandled build type for {self.package}: {build_type}"
530            )
531
532        # Handle 'custom_post_install' commands.
533        custom_post_install = self.pkg_def.get("custom_post_install")
534        if custom_post_install:
535            result += " && " + " && ".join(custom_post_install)
536
537        return result
538
539    def _cmd_build_autoconf(self) -> str:
540        options = " ".join(self.pkg_def.get("config_flags", []))
541        env = " ".join(self.pkg_def.get("config_env", []))
542        result = "./bootstrap.sh && "
543        result += f"{env} ./configure {configure_flags} {options} && "
544        result += f"make -j{proc_count} && make install"
545        return result
546
547    def _cmd_build_cmake(self) -> str:
548        options = " ".join(self.pkg_def.get("config_flags", []))
549        env = " ".join(self.pkg_def.get("config_env", []))
550        result = "mkdir builddir && cd builddir && "
551        result += f"{env} cmake {cmake_flags} {options} .. && "
552        result += "cmake --build . --target all && "
553        result += "cmake --build . --target install && "
554        result += "cd .."
555        return result
556
557    def _cmd_build_custom(self) -> str:
558        return " && ".join(self.pkg_def.get("build_steps", []))
559
560    def _cmd_build_make(self) -> str:
561        return f"make -j{proc_count} && make install"
562
563    def _cmd_build_meson(self) -> str:
564        options = " ".join(self.pkg_def.get("config_flags", []))
565        env = " ".join(self.pkg_def.get("config_env", []))
566        result = f"{env} meson builddir {meson_flags} {options} && "
567        result += "ninja -C builddir && ninja -C builddir install"
568        return result
569
570
571class Docker:
572    """Class to assist with Docker interactions.  All methods are static."""
573
574    @staticmethod
575    def timestamp() -> str:
576        """ Generate a timestamp for today using the ISO week. """
577        today = date.today().isocalendar()
578        return f"{today[0]}-W{today[1]:02}"
579
580    @staticmethod
581    def tagname(pkgname: str, dockerfile: str) -> str:
582        """ Generate a tag name for a package using a hash of the Dockerfile. """
583        result = docker_image_name
584        if pkgname:
585            result += "-" + pkgname
586
587        result += ":" + Docker.timestamp()
588        result += "-" + sha256(dockerfile.encode()).hexdigest()[0:16]
589
590        return result
591
592    @staticmethod
593    def build(pkg: str, tag: str, dockerfile: str) -> None:
594        """Build a docker image using the Dockerfile and tagging it with 'tag'."""
595
596        # If we're not forcing builds, check if it already exists and skip.
597        if not force_build:
598            if docker.image.ls(tag, "--format", '"{{.Repository}}:{{.Tag}}"'):
599                print(f"Image {tag} already exists.  Skipping.", file=sys.stderr)
600                return
601
602        # Build it.
603        #   Capture the output of the 'docker build' command and send it to
604        #   stderr (prefixed with the package name).  This allows us to see
605        #   progress but not polute stdout.  Later on we output the final
606        #   docker tag to stdout and we want to keep that pristine.
607        #
608        #   Other unusual flags:
609        #       --no-cache: Bypass the Docker cache if 'force_build'.
610        #       --force-rm: Clean up Docker processes if they fail.
611        docker.build(
612            proxy_args,
613            "--network=host",
614            "--force-rm",
615            "--no-cache=true" if force_build else "--no-cache=false",
616            "-t",
617            tag,
618            "-",
619            _in=dockerfile,
620            _out=(
621                lambda line: print(
622                    pkg + ":", line, end="", file=sys.stderr, flush=True
623                )
624            ),
625        )
626
627
628# Read a bunch of environment variables.
629docker_image_name = os.environ.get("DOCKER_IMAGE_NAME", "openbmc/ubuntu-unit-test")
630force_build = os.environ.get("FORCE_DOCKER_BUILD")
631is_automated_ci_build = os.environ.get("BUILD_URL", False)
632distro = os.environ.get("DISTRO", "ubuntu:kinetic")
633branch = os.environ.get("BRANCH", "master")
634ubuntu_mirror = os.environ.get("UBUNTU_MIRROR")
635http_proxy = os.environ.get("http_proxy")
636
637gerrit_project = os.environ.get("GERRIT_PROJECT")
638gerrit_rev = os.environ.get("GERRIT_PATCHSET_REVISION")
639
640# Set up some common variables.
641username = os.environ.get("USER", "root")
642homedir = os.environ.get("HOME", "/root")
643gid = os.getgid()
644uid = os.getuid()
645
646# Use well-known constants if user is root
647if username == "root":
648    homedir = "/root"
649    gid = 0
650    uid = 0
651
652# Determine the architecture for Docker.
653arch = uname("-m").strip()
654if arch == "ppc64le":
655    docker_base = "ppc64le/"
656elif arch == "x86_64":
657    docker_base = ""
658elif arch == "aarch64":
659    docker_base = "arm64v8/"
660else:
661    print(
662        f"Unsupported system architecture({arch}) found for docker image",
663        file=sys.stderr,
664    )
665    sys.exit(1)
666
667# Special flags if setting up a deb mirror.
668mirror = ""
669if "ubuntu" in distro and ubuntu_mirror:
670    mirror = f"""
671RUN echo "deb {ubuntu_mirror} $(. /etc/os-release && echo $VERSION_CODENAME) main restricted universe multiverse" > /etc/apt/sources.list && \\
672    echo "deb {ubuntu_mirror} $(. /etc/os-release && echo $VERSION_CODENAME)-updates main restricted universe multiverse" >> /etc/apt/sources.list && \\
673    echo "deb {ubuntu_mirror} $(. /etc/os-release && echo $VERSION_CODENAME)-security main restricted universe multiverse" >> /etc/apt/sources.list && \\
674    echo "deb {ubuntu_mirror} $(. /etc/os-release && echo $VERSION_CODENAME)-proposed main restricted universe multiverse" >> /etc/apt/sources.list && \\
675    echo "deb {ubuntu_mirror} $(. /etc/os-release && echo $VERSION_CODENAME)-backports main restricted universe multiverse" >> /etc/apt/sources.list
676"""
677
678# Special flags for proxying.
679proxy_cmd = ""
680proxy_keyserver = ""
681proxy_args = []
682if http_proxy:
683    proxy_cmd = f"""
684RUN echo "[http]" >> {homedir}/.gitconfig && \
685    echo "proxy = {http_proxy}" >> {homedir}/.gitconfig
686"""
687    proxy_keyserver = f"--keyserver-options http-proxy={http_proxy}"
688
689    proxy_args.extend(
690        [
691            "--build-arg",
692            f"http_proxy={http_proxy}",
693            "--build-arg",
694            f"https_proxy={http_proxy}",
695        ]
696    )
697
698# Create base Dockerfile.
699dockerfile_base = f"""
700FROM {docker_base}{distro}
701
702{mirror}
703
704ENV DEBIAN_FRONTEND noninteractive
705
706ENV PYTHONPATH "/usr/local/lib/python3.10/site-packages/"
707
708# Sometimes the ubuntu key expires and we need a way to force an execution
709# of the apt-get commands for the dbgsym-keyring.  When this happens we see
710# an error like: "Release: The following signatures were invalid:"
711# Insert a bogus echo that we can change here when we get this error to force
712# the update.
713RUN echo "ubuntu keyserver rev as of 2021-04-21"
714
715# We need the keys to be imported for dbgsym repos
716# New releases have a package, older ones fall back to manual fetching
717# https://wiki.ubuntu.com/Debug%20Symbol%20Packages
718RUN apt-get update && apt-get dist-upgrade -yy && \
719    ( apt-get install gpgv ubuntu-dbgsym-keyring || \
720        ( apt-get install -yy dirmngr && \
721          apt-key adv --keyserver keyserver.ubuntu.com \
722                      {proxy_keyserver} \
723                      --recv-keys F2EDC64DC5AEE1F6B9C621F0C8CAB6595FDFF622 ) )
724
725# Parse the current repo list into a debug repo list
726RUN sed -n '/^deb /s,^deb [^ ]* ,deb http://ddebs.ubuntu.com ,p' /etc/apt/sources.list >/etc/apt/sources.list.d/debug.list
727
728# Remove non-existent debug repos
729RUN sed -i '/-\(backports\|security\) /d' /etc/apt/sources.list.d/debug.list
730
731RUN cat /etc/apt/sources.list.d/debug.list
732
733RUN apt-get update && apt-get dist-upgrade -yy && apt-get install -yy \
734    gcc-12 \
735    g++-12 \
736    libc6-dbg \
737    libc6-dev \
738    libtool \
739    bison \
740    libdbus-1-dev \
741    flex \
742    cmake \
743    python3 \
744    python3-dev\
745    python3-yaml \
746    python3-mako \
747    python3-pip \
748    python3-setuptools \
749    python3-git \
750    python3-socks \
751    pkg-config \
752    autoconf \
753    autoconf-archive \
754    libsystemd-dev \
755    systemd \
756    libssl-dev \
757    libevdev-dev \
758    libjpeg-dev \
759    libpng-dev \
760    ninja-build \
761    sudo \
762    curl \
763    git \
764    dbus \
765    iputils-ping \
766    clang-15 \
767    clang-format-15 \
768    clang-tidy-15 \
769    clang-tools-15 \
770    shellcheck \
771    npm \
772    iproute2 \
773    libnl-3-dev \
774    libnl-genl-3-dev \
775    libconfig++-dev \
776    libsnmp-dev \
777    valgrind \
778    valgrind-dbg \
779    libpam0g-dev \
780    xxd \
781    libi2c-dev \
782    wget \
783    libldap2-dev \
784    libprotobuf-dev \
785    liburing-dev \
786    liburing2-dbgsym \
787    libperlio-gzip-perl \
788    libjson-perl \
789    protobuf-compiler \
790    libgpiod-dev \
791    device-tree-compiler \
792    libpciaccess-dev \
793    libmimetic-dev \
794    libxml2-utils \
795    libxml-simple-perl \
796    rsync \
797    libcryptsetup-dev
798
799RUN npm install -g eslint@latest eslint-plugin-json@latest
800
801# Kinetic comes with GCC-12, so skip this.
802#RUN update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-12 12 \
803#  --slave /usr/bin/g++ g++ /usr/bin/g++-12 \
804#  --slave /usr/bin/gcov gcov /usr/bin/gcov-12 \
805#  --slave /usr/bin/gcov-dump gcov-dump /usr/bin/gcov-dump-12 \
806#  --slave /usr/bin/gcov-tool gcov-tool /usr/bin/gcov-tool-12
807#RUN update-alternatives --install /usr/bin/cpp cpp /usr/bin/cpp-12 12
808
809RUN update-alternatives --install /usr/bin/clang clang /usr/bin/clang-15 1000 \
810  --slave /usr/bin/clang++ clang++ /usr/bin/clang++-15 \
811  --slave /usr/bin/clang-tidy clang-tidy /usr/bin/clang-tidy-15 \
812  --slave /usr/bin/clang-format clang-format /usr/bin/clang-format-15 \
813  --slave /usr/bin/run-clang-tidy run-clang-tidy.py /usr/bin/run-clang-tidy-15 \
814  --slave /usr/bin/scan-build scan-build /usr/bin/scan-build-15
815
816"""
817
818if is_automated_ci_build:
819    dockerfile_base += f"""
820# Run an arbitrary command to polute the docker cache regularly force us
821# to re-run `apt-get update` daily.
822RUN echo {Docker.timestamp()}
823RUN apt-get update && apt-get dist-upgrade -yy
824
825"""
826
827dockerfile_base += f"""
828RUN pip3 install inflection
829RUN pip3 install pycodestyle
830RUN pip3 install jsonschema
831RUN pip3 install meson==0.63.0
832RUN pip3 install packaging
833RUN pip3 install protobuf
834RUN pip3 install codespell
835RUN pip3 install requests
836"""
837
838# Note, we use sha1s here because the newest gitlint release doesn't include
839# some features we need.  Next time they release, we can rely on a direct
840# release tag
841dockerfile_base += f"""
842RUN pip3 install git+https://github.com/jorisroovers/gitlint.git@8ede310d62d5794efa7518b235f899f8a8ad6a68\#subdirectory=gitlint-core
843RUN pip3 install git+https://github.com/jorisroovers/gitlint.git@8ede310d62d5794efa7518b235f899f8a8ad6a68
844"""
845
846# Build the base and stage docker images.
847docker_base_img_name = Docker.tagname("base", dockerfile_base)
848Docker.build("base", docker_base_img_name, dockerfile_base)
849Package.generate_all()
850
851# Create the final Dockerfile.
852dockerfile = f"""
853# Build the final output image
854FROM {docker_base_img_name}
855{Package.df_all_copycmds()}
856
857# Some of our infrastructure still relies on the presence of this file
858# even though it is no longer needed to rebuild the docker environment
859# NOTE: The file is sorted to ensure the ordering is stable.
860RUN echo '{Package.depcache()}' > /tmp/depcache
861
862# Final configuration for the workspace
863RUN grep -q {gid} /etc/group || groupadd -f -g {gid} {username}
864RUN mkdir -p "{os.path.dirname(homedir)}"
865RUN grep -q {uid} /etc/passwd || useradd -d {homedir} -m -u {uid} -g {gid} {username}
866RUN sed -i '1iDefaults umask=000' /etc/sudoers
867RUN echo "{username} ALL=(ALL) NOPASSWD: ALL" >>/etc/sudoers
868
869# Ensure user has ability to write to /usr/local for different tool
870# and data installs
871RUN chown -R {username}:{username} /usr/local/share
872
873{proxy_cmd}
874
875RUN /bin/bash
876"""
877
878# Do the final docker build
879docker_final_img_name = Docker.tagname(None, dockerfile)
880Docker.build("final", docker_final_img_name, dockerfile)
881
882# Print the tag of the final image.
883print(docker_final_img_name)
884