xref: /openbmc/openbmc-build-scripts/scripts/build-unit-test-docker (revision 3aa71c8c60aeb4d5042e68148f769844b37cec0c)
1#!/usr/bin/env python3
2#
3# Build the required docker image to run package unit tests
4#
5# Script Variables:
6#   DOCKER_IMG_NAME:  <optional, the name of the docker image to generate>
7#                     default is openbmc/ubuntu-unit-test
8#   DISTRO:           <optional, the distro to build a docker image against>
9#   FORCE_DOCKER_BUILD: <optional, a non-zero value with force all Docker
10#                     images to be rebuilt rather than reusing caches.>
11#   BUILD_URL:        <optional, used to detect running under CI context
12#                     (ex. Jenkins)>
13#   BRANCH:           <optional, branch to build from each of the openbmc/
14#                     repositories>
15#                     default is master, which will be used if input branch not
16#                     provided or not found
17#   UBUNTU_MIRROR:    <optional, the URL of a mirror of Ubuntu to override the
18#                     default ones in /etc/apt/sources.list>
19#                     default is empty, and no mirror is used.
20#   DOCKER_REG:       <optional, the URL of a docker registry to utilize
21#                     instead of our default (public.ecr.aws/ubuntu)
22#                     (ex. docker.io)
23#   http_proxy        The HTTP address of the proxy server to connect to.
24#                     Default: "", proxy is not setup if this is not set
25
26import json
27import os
28import re
29import sys
30import threading
31import urllib.request
32from datetime import date
33from hashlib import sha256
34
35# typing.Dict is used for type-hints.
36from typing import Any, Callable, Dict, Iterable, Optional  # noqa: F401
37
38from sh import git, nproc  # type: ignore
39
40try:
41    # System may have docker or it may have podman, try docker first
42    from sh import docker
43
44    container = docker
45except ImportError:
46    try:
47        from sh import podman
48
49        container = podman
50    except Exception:
51        print("No docker or podman found on system")
52        exit(1)
53
54try:
55    # Python before 3.8 doesn't have TypedDict, so reroute to standard 'dict'.
56    from typing import TypedDict
57except Exception:
58
59    class TypedDict(dict):  # type: ignore
60        # We need to do this to eat the 'total' argument.
61        def __init_subclass__(cls, **kwargs: Any) -> None:
62            super().__init_subclass__()
63
64
65# Declare some variables used in package definitions.
66prefix = "/usr/local"
67proc_count = nproc().strip()
68
69
70class PackageDef(TypedDict, total=False):
71    """Package Definition for packages dictionary."""
72
73    # rev [optional]: Revision of package to use.
74    rev: str
75    # url [optional]: lambda function to create URL: (package, rev) -> url.
76    url: Callable[[str, str], str]
77    # depends [optional]: List of package dependencies.
78    depends: Iterable[str]
79    # build_type [required]: Build type used for package.
80    #   Currently supported: autoconf, cmake, custom, make, meson
81    build_type: str
82    # build_steps [optional]: Steps to run for 'custom' build_type.
83    build_steps: Iterable[str]
84    # config_flags [optional]: List of options to pass configuration tool.
85    config_flags: Iterable[str]
86    # config_env [optional]: List of environment variables to set for config.
87    config_env: Iterable[str]
88    # custom_post_dl [optional]: List of steps to run after download, but
89    #   before config / build / install.
90    custom_post_dl: Iterable[str]
91    # custom_post_install [optional]: List of steps to run after install.
92    custom_post_install: Iterable[str]
93
94    # __tag [private]: Generated Docker tag name for package stage.
95    __tag: str
96    # __package [private]: Package object associated with this package.
97    __package: Any  # Type is Package, but not defined yet.
98
99
100# Packages to include in image.
101packages = {
102    "boost": PackageDef(
103        rev="1.86.0",
104        url=(
105            lambda pkg, rev: f"https://github.com/boostorg/{pkg}/releases/download/{pkg}-{rev}/{pkg}-{rev}-cmake.tar.gz"
106        ),
107        build_type="custom",
108        build_steps=[
109            (
110                "./bootstrap.sh"
111                f" --prefix={prefix} --with-libraries=atomic,context,coroutine,filesystem,process,url"
112            ),
113            "./b2",
114            f"./b2 install --prefix={prefix} valgrind=on",
115        ],
116    ),
117    "USCiLab/cereal": PackageDef(
118        rev="v1.3.2",
119        build_type="custom",
120        build_steps=[f"cp -a include/cereal/ {prefix}/include/"],
121    ),
122    "danmar/cppcheck": PackageDef(
123        rev="2.12.1",
124        build_type="cmake",
125    ),
126    "CLIUtils/CLI11": PackageDef(
127        rev="v2.3.2",
128        build_type="cmake",
129        config_flags=[
130            "-DBUILD_TESTING=OFF",
131            "-DCLI11_BUILD_DOCS=OFF",
132            "-DCLI11_BUILD_EXAMPLES=OFF",
133        ],
134    ),
135    "fmtlib/fmt": PackageDef(
136        rev="10.1.1",
137        build_type="cmake",
138        config_flags=[
139            "-DFMT_DOC=OFF",
140            "-DFMT_TEST=OFF",
141        ],
142    ),
143    "Naios/function2": PackageDef(
144        rev="4.2.4",
145        build_type="custom",
146        build_steps=[
147            f"mkdir {prefix}/include/function2",
148            f"cp include/function2/function2.hpp {prefix}/include/function2/",
149        ],
150    ),
151    "google/googletest": PackageDef(
152        rev="v1.15.2",
153        build_type="cmake",
154        config_env=["CXXFLAGS=-std=c++20"],
155        config_flags=["-DTHREADS_PREFER_PTHREAD_FLAG=ON"],
156    ),
157    "nghttp2/nghttp2": PackageDef(
158        rev="v1.61.0",
159        build_type="cmake",
160        config_env=["CXXFLAGS=-std=c++20"],
161        config_flags=[
162            "-DENABLE_LIB_ONLY=ON",
163            "-DENABLE_STATIC_LIB=ON",
164        ],
165    ),
166    "nlohmann/json": PackageDef(
167        rev="v3.11.2",
168        build_type="cmake",
169        config_flags=["-DJSON_BuildTests=OFF"],
170        custom_post_install=[
171            (
172                f"ln -s {prefix}/include/nlohmann/json.hpp"
173                f" {prefix}/include/json.hpp"
174            ),
175        ],
176    ),
177    "json-c/json-c": PackageDef(
178        rev="json-c-0.17-20230812",
179        build_type="cmake",
180    ),
181    "LibVNC/libvncserver": PackageDef(
182        rev="LibVNCServer-0.9.14",
183        build_type="cmake",
184    ),
185    "leethomason/tinyxml2": PackageDef(
186        rev="9.0.0",
187        build_type="cmake",
188    ),
189    "tristanpenman/valijson": PackageDef(
190        rev="v1.0.1",
191        build_type="cmake",
192        config_flags=[
193            "-Dvalijson_BUILD_TESTS=0",
194            "-Dvalijson_INSTALL_HEADERS=1",
195        ],
196    ),
197    "open-power/pdbg": PackageDef(build_type="autoconf"),
198    "openbmc/gpioplus": PackageDef(
199        build_type="meson",
200        config_flags=[
201            "-Dexamples=false",
202            "-Dtests=disabled",
203        ],
204    ),
205    "openbmc/phosphor-dbus-interfaces": PackageDef(
206        depends=["openbmc/sdbusplus"],
207        build_type="meson",
208        config_flags=["-Dgenerate_md=false"],
209    ),
210    "openbmc/phosphor-logging": PackageDef(
211        depends=[
212            "USCiLab/cereal",
213            "openbmc/phosphor-dbus-interfaces",
214            "openbmc/sdbusplus",
215            "openbmc/sdeventplus",
216        ],
217        build_type="meson",
218        config_flags=[
219            "-Dlibonly=true",
220            "-Dtests=disabled",
221        ],
222    ),
223    "openbmc/phosphor-objmgr": PackageDef(
224        depends=[
225            "CLIUtils/CLI11",
226            "boost",
227            "leethomason/tinyxml2",
228            "openbmc/phosphor-dbus-interfaces",
229            "openbmc/phosphor-logging",
230            "openbmc/sdbusplus",
231        ],
232        build_type="meson",
233        config_flags=[
234            "-Dtests=disabled",
235        ],
236    ),
237    "openbmc/libpeci": PackageDef(
238        build_type="meson",
239        config_flags=[
240            "-Draw-peci=disabled",
241        ],
242    ),
243    "openbmc/libpldm": PackageDef(
244        build_type="meson",
245        config_flags=[
246            "-Dabi=deprecated,stable",
247            "-Dtests=false",
248            "-Dabi-compliance-check=false",
249        ],
250    ),
251    "openbmc/sdbusplus": PackageDef(
252        depends=[
253            "nlohmann/json",
254        ],
255        build_type="meson",
256        custom_post_dl=[
257            "cd tools",
258            f"./setup.py install --root=/ --prefix={prefix}",
259            "cd ..",
260        ],
261        config_flags=[
262            "-Dexamples=disabled",
263            "-Dtests=disabled",
264        ],
265    ),
266    "openbmc/sdeventplus": PackageDef(
267        depends=[
268            "openbmc/stdplus",
269        ],
270        build_type="meson",
271        config_flags=[
272            "-Dexamples=false",
273            "-Dtests=disabled",
274        ],
275    ),
276    "openbmc/stdplus": PackageDef(
277        depends=[
278            "fmtlib/fmt",
279            "google/googletest",
280            "Naios/function2",
281        ],
282        build_type="meson",
283        config_flags=[
284            "-Dexamples=false",
285            "-Dtests=disabled",
286            "-Dgtest=enabled",
287        ],
288    ),
289}  # type: Dict[str, PackageDef]
290
291# Define common flags used for builds
292configure_flags = " ".join(
293    [
294        f"--prefix={prefix}",
295    ]
296)
297cmake_flags = " ".join(
298    [
299        "-DBUILD_SHARED_LIBS=ON",
300        "-DCMAKE_BUILD_TYPE=RelWithDebInfo",
301        f"-DCMAKE_INSTALL_PREFIX:PATH={prefix}",
302        "-GNinja",
303        "-DCMAKE_MAKE_PROGRAM=ninja",
304    ]
305)
306meson_flags = " ".join(
307    [
308        "--wrap-mode=nodownload",
309        f"-Dprefix={prefix}",
310    ]
311)
312
313
314class Package(threading.Thread):
315    """Class used to build the Docker stages for each package.
316
317    Generally, this class should not be instantiated directly but through
318    Package.generate_all().
319    """
320
321    # Copy the packages dictionary.
322    packages = packages.copy()
323
324    # Lock used for thread-safety.
325    lock = threading.Lock()
326
327    def __init__(self, pkg: str):
328        """pkg - The name of this package (ex. foo/bar )"""
329        super(Package, self).__init__()
330
331        self.package = pkg
332        self.exception = None  # type: Optional[Exception]
333
334        # Reference to this package's
335        self.pkg_def = Package.packages[pkg]
336        self.pkg_def["__package"] = self
337
338    def run(self) -> None:
339        """Thread 'run' function.  Builds the Docker stage."""
340
341        # In case this package has no rev, fetch it from Github.
342        self._update_rev()
343
344        # Find all the Package objects that this package depends on.
345        #   This section is locked because we are looking into another
346        #   package's PackageDef dict, which could be being modified.
347        Package.lock.acquire()
348        deps: Iterable[Package] = [
349            Package.packages[deppkg]["__package"]
350            for deppkg in self.pkg_def.get("depends", [])
351        ]
352        Package.lock.release()
353
354        # Wait until all the depends finish building.  We need them complete
355        # for the "COPY" commands.
356        for deppkg in deps:
357            deppkg.join()
358
359        # Generate this package's Dockerfile.
360        dockerfile = f"""
361FROM {docker_base_img_name}
362{self._df_copycmds()}
363{self._df_build()}
364"""
365
366        # Generate the resulting tag name and save it to the PackageDef.
367        #   This section is locked because we are modifying the PackageDef,
368        #   which can be accessed by other threads.
369        Package.lock.acquire()
370        tag = Docker.tagname(self._stagename(), dockerfile)
371        self.pkg_def["__tag"] = tag
372        Package.lock.release()
373
374        # Do the build / save any exceptions.
375        try:
376            Docker.build(self.package, tag, dockerfile)
377        except Exception as e:
378            self.exception = e
379
380    @classmethod
381    def generate_all(cls) -> None:
382        """Ensure a Docker stage is created for all defined packages.
383
384        These are done in parallel but with appropriate blocking per
385        package 'depends' specifications.
386        """
387
388        # Create a Package for each defined package.
389        pkg_threads = [Package(p) for p in cls.packages.keys()]
390
391        # Start building them all.
392        #   This section is locked because threads depend on each other,
393        #   based on the packages, and they cannot 'join' on a thread
394        #   which is not yet started.  Adding a lock here allows all the
395        #   threads to start before they 'join' their dependencies.
396        Package.lock.acquire()
397        for t in pkg_threads:
398            t.start()
399        Package.lock.release()
400
401        # Wait for completion.
402        for t in pkg_threads:
403            t.join()
404            # Check if the thread saved off its own exception.
405            if t.exception:
406                print(f"Package {t.package} failed!", file=sys.stderr)
407                raise t.exception
408
409    @staticmethod
410    def df_all_copycmds() -> str:
411        """Formulate the Dockerfile snippet necessary to copy all packages
412        into the final image.
413        """
414        return Package.df_copycmds_set(Package.packages.keys())
415
416    @classmethod
417    def depcache(cls) -> str:
418        """Create the contents of the '/tmp/depcache'.
419        This file is a comma-separated list of "<pkg>:<rev>".
420        """
421
422        # This needs to be sorted for consistency.
423        depcache = ""
424        for pkg in sorted(cls.packages.keys()):
425            depcache += "%s:%s," % (pkg, cls.packages[pkg]["rev"])
426        return depcache
427
428    def _check_gerrit_topic(self) -> bool:
429        if not gerrit_topic:
430            return False
431        if not self.package.startswith("openbmc/"):
432            return False
433        if gerrit_project == self.package and gerrit_rev:
434            return False
435
436        try:
437            commits = json.loads(
438                urllib.request.urlopen(
439                    f"https://gerrit.openbmc.org/changes/?q=status:open+project:{self.package}+topic:{gerrit_topic}"
440                )
441                .read()
442                .splitlines()[-1]
443            )
444
445            if len(commits) == 0:
446                return False
447            if len(commits) > 1:
448                print(
449                    f"{self.package} has more than 1 commit under {gerrit_topic}; using lastest upstream: {len(commits)}",
450                    file=sys.stderr,
451                )
452                return False
453
454            change_id = commits[0]["id"]
455
456            commit = json.loads(
457                urllib.request.urlopen(
458                    f"https://gerrit.openbmc.org/changes/{change_id}/revisions/current/commit"
459                )
460                .read()
461                .splitlines()[-1]
462            )["commit"]
463
464            print(
465                f"Using {commit} from {gerrit_topic} for {self.package}",
466                file=sys.stderr,
467            )
468            self.pkg_def["rev"] = commit
469            return True
470
471        except urllib.error.HTTPError as e:
472            print(
473                f"Error loading topic {gerrit_topic} for {self.package}: ",
474                e,
475                file=sys.stderr,
476            )
477            return False
478
479    def _update_rev(self) -> None:
480        """Look up the HEAD for missing a static rev."""
481
482        if "rev" in self.pkg_def:
483            return
484
485        if self._check_gerrit_topic():
486            return
487
488        # Check if Jenkins/Gerrit gave us a revision and use it.
489        if gerrit_project == self.package and gerrit_rev:
490            print(
491                f"Found Gerrit revision for {self.package}: {gerrit_rev}",
492                file=sys.stderr,
493            )
494            self.pkg_def["rev"] = gerrit_rev
495            return
496
497        # Ask Github for all the branches.
498        lookup = git(
499            "ls-remote", "--heads", f"https://github.com/{self.package}"
500        )
501
502        # Find the branch matching {branch} (or fallback to master).
503        #   This section is locked because we are modifying the PackageDef.
504        Package.lock.acquire()
505        for line in lookup.split("\n"):
506            if re.fullmatch(f".*{branch}$", line.strip()):
507                self.pkg_def["rev"] = line.split()[0]
508                break
509            elif (
510                "refs/heads/master" in line or "refs/heads/main" in line
511            ) and "rev" not in self.pkg_def:
512                self.pkg_def["rev"] = line.split()[0]
513        Package.lock.release()
514
515    def _stagename(self) -> str:
516        """Create a name for the Docker stage associated with this pkg."""
517        return self.package.replace("/", "-").lower()
518
519    def _url(self) -> str:
520        """Get the URL for this package."""
521        rev = self.pkg_def["rev"]
522
523        # If the lambda exists, call it.
524        if "url" in self.pkg_def:
525            return self.pkg_def["url"](self.package, rev)
526
527        # Default to the github archive URL.
528        return f"https://github.com/{self.package}/archive/{rev}.tar.gz"
529
530    def _cmd_download(self) -> str:
531        """Formulate the command necessary to download and unpack to source."""
532
533        url = self._url()
534        if ".tar." not in url:
535            raise NotImplementedError(
536                f"Unhandled download type for {self.package}: {url}"
537            )
538
539        cmd = f"curl -L {url} | tar -x"
540
541        if url.endswith(".bz2"):
542            cmd += "j"
543        elif url.endswith(".gz"):
544            cmd += "z"
545        else:
546            raise NotImplementedError(
547                f"Unknown tar flags needed for {self.package}: {url}"
548            )
549
550        return cmd
551
552    def _cmd_cd_srcdir(self) -> str:
553        """Formulate the command necessary to 'cd' into the source dir."""
554        return f"cd {self.package.split('/')[-1]}*"
555
556    def _df_copycmds(self) -> str:
557        """Formulate the dockerfile snippet necessary to COPY all depends."""
558
559        if "depends" not in self.pkg_def:
560            return ""
561        return Package.df_copycmds_set(self.pkg_def["depends"])
562
563    @staticmethod
564    def df_copycmds_set(pkgs: Iterable[str]) -> str:
565        """Formulate the Dockerfile snippet necessary to COPY a set of
566        packages into a Docker stage.
567        """
568
569        copy_cmds = ""
570
571        # Sort the packages for consistency.
572        for p in sorted(pkgs):
573            tag = Package.packages[p]["__tag"]
574            copy_cmds += f"COPY --from={tag} {prefix} {prefix}\n"
575            # Workaround for upstream docker bug and multiple COPY cmds
576            # https://github.com/moby/moby/issues/37965
577            copy_cmds += "RUN true\n"
578
579        return copy_cmds
580
581    def _df_build(self) -> str:
582        """Formulate the Dockerfile snippet necessary to download, build, and
583        install a package into a Docker stage.
584        """
585
586        # Download and extract source.
587        result = f"RUN {self._cmd_download()} && {self._cmd_cd_srcdir()} && "
588
589        # Handle 'custom_post_dl' commands.
590        custom_post_dl = self.pkg_def.get("custom_post_dl")
591        if custom_post_dl:
592            result += " && ".join(custom_post_dl) + " && "
593
594        # Build and install package based on 'build_type'.
595        build_type = self.pkg_def["build_type"]
596        if build_type == "autoconf":
597            result += self._cmd_build_autoconf()
598        elif build_type == "cmake":
599            result += self._cmd_build_cmake()
600        elif build_type == "custom":
601            result += self._cmd_build_custom()
602        elif build_type == "make":
603            result += self._cmd_build_make()
604        elif build_type == "meson":
605            result += self._cmd_build_meson()
606        else:
607            raise NotImplementedError(
608                f"Unhandled build type for {self.package}: {build_type}"
609            )
610
611        # Handle 'custom_post_install' commands.
612        custom_post_install = self.pkg_def.get("custom_post_install")
613        if custom_post_install:
614            result += " && " + " && ".join(custom_post_install)
615
616        return result
617
618    def _cmd_build_autoconf(self) -> str:
619        options = " ".join(self.pkg_def.get("config_flags", []))
620        env = " ".join(self.pkg_def.get("config_env", []))
621        result = "./bootstrap.sh && "
622        result += f"{env} ./configure {configure_flags} {options} && "
623        result += f"make -j{proc_count} && make install"
624        return result
625
626    def _cmd_build_cmake(self) -> str:
627        options = " ".join(self.pkg_def.get("config_flags", []))
628        env = " ".join(self.pkg_def.get("config_env", []))
629        result = "mkdir builddir && cd builddir && "
630        result += f"{env} cmake {cmake_flags} {options} .. && "
631        result += "cmake --build . --target all && "
632        result += "cmake --build . --target install && "
633        result += "cd .."
634        return result
635
636    def _cmd_build_custom(self) -> str:
637        return " && ".join(self.pkg_def.get("build_steps", []))
638
639    def _cmd_build_make(self) -> str:
640        return f"make -j{proc_count} && make install"
641
642    def _cmd_build_meson(self) -> str:
643        options = " ".join(self.pkg_def.get("config_flags", []))
644        env = " ".join(self.pkg_def.get("config_env", []))
645        result = f"{env} meson setup builddir {meson_flags} {options} && "
646        result += "ninja -C builddir && ninja -C builddir install"
647        return result
648
649
650class Docker:
651    """Class to assist with Docker interactions.  All methods are static."""
652
653    @staticmethod
654    def timestamp() -> str:
655        """Generate a timestamp for today using the ISO week."""
656        today = date.today().isocalendar()
657        return f"{today[0]}-W{today[1]:02}"
658
659    @staticmethod
660    def tagname(pkgname: Optional[str], dockerfile: str) -> str:
661        """Generate a tag name for a package using a hash of the Dockerfile."""
662        result = docker_image_name
663        if pkgname:
664            result += "-" + pkgname
665
666        result += ":" + Docker.timestamp()
667        result += "-" + sha256(dockerfile.encode()).hexdigest()[0:16]
668
669        return result
670
671    @staticmethod
672    def build(pkg: str, tag: str, dockerfile: str) -> None:
673        """Build a docker image using the Dockerfile and tagging it with 'tag'."""
674
675        # If we're not forcing builds, check if it already exists and skip.
676        if not force_build:
677            if container.image.ls(
678                tag, "--format", '"{{.Repository}}:{{.Tag}}"'
679            ):
680                print(
681                    f"Image {tag} already exists.  Skipping.", file=sys.stderr
682                )
683                return
684
685        # Build it.
686        #   Capture the output of the 'docker build' command and send it to
687        #   stderr (prefixed with the package name).  This allows us to see
688        #   progress but not pollute stdout.  Later on we output the final
689        #   docker tag to stdout and we want to keep that pristine.
690        #
691        #   Other unusual flags:
692        #       --no-cache: Bypass the Docker cache if 'force_build'.
693        #       --force-rm: Clean up Docker processes if they fail.
694        container.build(
695            proxy_args,
696            "--network=host",
697            "--force-rm",
698            "--no-cache=true" if force_build else "--no-cache=false",
699            "-t",
700            tag,
701            "-",
702            _in=dockerfile,
703            _out=(
704                lambda line: print(
705                    pkg + ":", line, end="", file=sys.stderr, flush=True
706                )
707            ),
708            _err_to_out=True,
709        )
710
711
712# Read a bunch of environment variables.
713docker_image_name = os.environ.get(
714    "DOCKER_IMAGE_NAME", "openbmc/ubuntu-unit-test"
715)
716force_build = os.environ.get("FORCE_DOCKER_BUILD")
717is_automated_ci_build = os.environ.get("BUILD_URL", False)
718distro = os.environ.get("DISTRO", "ubuntu:oracular")
719branch = os.environ.get("BRANCH", "master")
720ubuntu_mirror = os.environ.get("UBUNTU_MIRROR")
721docker_reg = os.environ.get("DOCKER_REG", "public.ecr.aws/ubuntu")
722http_proxy = os.environ.get("http_proxy")
723
724gerrit_project = os.environ.get("GERRIT_PROJECT")
725gerrit_rev = os.environ.get("GERRIT_PATCHSET_REVISION")
726gerrit_topic = os.environ.get("GERRIT_TOPIC")
727
728# Ensure appropriate docker build output to see progress and identify
729# any issues
730os.environ["BUILDKIT_PROGRESS"] = "plain"
731
732# Set up some common variables.
733username = os.environ.get("USER", "root")
734homedir = os.environ.get("HOME", "/root")
735gid = os.getgid()
736uid = os.getuid()
737
738# Use well-known constants if user is root
739if username == "root":
740    homedir = "/root"
741    gid = 0
742    uid = 0
743
744# Special flags if setting up a deb mirror.
745mirror = ""
746if "ubuntu" in distro and ubuntu_mirror:
747    mirror = f"""
748RUN echo "deb {ubuntu_mirror} \
749        $(. /etc/os-release && echo $VERSION_CODENAME) \
750        main restricted universe multiverse" > /etc/apt/sources.list && \\
751    echo "deb {ubuntu_mirror} \
752        $(. /etc/os-release && echo $VERSION_CODENAME)-updates \
753            main restricted universe multiverse" >> /etc/apt/sources.list && \\
754    echo "deb {ubuntu_mirror} \
755        $(. /etc/os-release && echo $VERSION_CODENAME)-security \
756            main restricted universe multiverse" >> /etc/apt/sources.list && \\
757    echo "deb {ubuntu_mirror} \
758        $(. /etc/os-release && echo $VERSION_CODENAME)-proposed \
759            main restricted universe multiverse" >> /etc/apt/sources.list && \\
760    echo "deb {ubuntu_mirror} \
761        $(. /etc/os-release && echo $VERSION_CODENAME)-backports \
762            main restricted universe multiverse" >> /etc/apt/sources.list
763"""
764
765# Special flags for proxying.
766proxy_cmd = ""
767proxy_keyserver = ""
768proxy_args = []
769if http_proxy:
770    proxy_cmd = f"""
771RUN echo "[http]" >> {homedir}/.gitconfig && \
772    echo "proxy = {http_proxy}" >> {homedir}/.gitconfig
773COPY <<EOF_WGETRC {homedir}/.wgetrc
774https_proxy = {http_proxy}
775http_proxy = {http_proxy}
776use_proxy = on
777EOF_WGETRC
778"""
779    proxy_keyserver = f"--keyserver-options http-proxy={http_proxy}"
780
781    proxy_args.extend(
782        [
783            "--build-arg",
784            f"http_proxy={http_proxy}",
785            "--build-arg",
786            f"https_proxy={http_proxy}",
787        ]
788    )
789
790# Create base Dockerfile.
791dockerfile_base = f"""
792FROM {docker_reg}/{distro}
793
794{mirror}
795
796ENV DEBIAN_FRONTEND noninteractive
797
798ENV PYTHONPATH "/usr/local/lib/python3.10/site-packages/"
799
800# Sometimes the ubuntu key expires and we need a way to force an execution
801# of the apt-get commands for the dbgsym-keyring.  When this happens we see
802# an error like: "Release: The following signatures were invalid:"
803# Insert a bogus echo that we can change here when we get this error to force
804# the update.
805RUN echo "ubuntu keyserver rev as of 2021-04-21"
806
807# We need the keys to be imported for dbgsym repos
808# New releases have a package, older ones fall back to manual fetching
809# https://wiki.ubuntu.com/Debug%20Symbol%20Packages
810# Known issue with gpg to get keys via proxy -
811# https://bugs.launchpad.net/ubuntu/+source/gnupg2/+bug/1788190, hence using
812# curl to get keys.
813RUN apt-get update && apt-get dist-upgrade -yy && \
814    ( apt-get install -yy gpgv ubuntu-dbgsym-keyring || \
815        ( apt-get install -yy dirmngr curl && \
816          curl -sSL \
817          'https://keyserver.ubuntu.com/pks/lookup?op=get&search=0xF2EDC64DC5AEE1F6B9C621F0C8CAB6595FDFF622' \
818          | apt-key add - ))
819
820# Parse the current repo list into a debug repo list
821RUN sed -n '/^deb /s,^deb [^ ]* ,deb http://ddebs.ubuntu.com ,p' \
822        /etc/apt/sources.list >/etc/apt/sources.list.d/debug.list
823
824# Remove non-existent debug repos
825RUN sed -i '/-\\(backports\\|security\\) /d' /etc/apt/sources.list.d/debug.list
826
827RUN cat /etc/apt/sources.list.d/debug.list
828
829RUN apt-get update && apt-get dist-upgrade -yy && apt-get install -yy \
830    abi-compliance-checker \
831    abi-dumper \
832    autoconf \
833    autoconf-archive \
834    bison \
835    cmake \
836    curl \
837    dbus \
838    device-tree-compiler \
839    flex \
840    g++-14 \
841    gcc-14 \
842    git \
843    glib-2.0 \
844    gnupg \
845    iproute2 \
846    iputils-ping \
847    libaudit-dev \
848    libc6-dbg \
849    libc6-dev \
850    libcjson-dev \
851    libconfig++-dev \
852    libcryptsetup-dev \
853    libcurl4-openssl-dev \
854    libdbus-1-dev \
855    libevdev-dev \
856    libgpiod-dev \
857    libi2c-dev \
858    libjpeg-dev \
859    libjson-perl \
860    libldap2-dev \
861    libmimetic-dev \
862    libnl-3-dev \
863    libnl-genl-3-dev \
864    libpam0g-dev \
865    libpciaccess-dev \
866    libperlio-gzip-perl \
867    libpng-dev \
868    libprotobuf-dev \
869    libsnmp-dev \
870    libssl-dev \
871    libsystemd-dev \
872    libtool \
873    liburing-dev \
874    libxml2-utils \
875    libxml-simple-perl \
876    lsb-release \
877    ninja-build \
878    npm \
879    pkg-config \
880    protobuf-compiler \
881    python3 \
882    python3-dev\
883    python3-git \
884    python3-mako \
885    python3-pip \
886    python3-protobuf \
887    python3-setuptools \
888    python3-socks \
889    python3-yaml \
890    rsync \
891    shellcheck \
892    socat \
893    software-properties-common \
894    sudo \
895    systemd \
896    systemd-dev \
897    valgrind \
898    vim \
899    wget \
900    xxd
901
902RUN update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-14 14 \
903  --slave /usr/bin/g++ g++ /usr/bin/g++-14 \
904  --slave /usr/bin/gcov gcov /usr/bin/gcov-14 \
905  --slave /usr/bin/gcov-dump gcov-dump /usr/bin/gcov-dump-14 \
906  --slave /usr/bin/gcov-tool gcov-tool /usr/bin/gcov-tool-14
907RUN update-alternatives --remove cpp /usr/bin/cpp && \
908    update-alternatives --install /usr/bin/cpp cpp /usr/bin/cpp-14 14
909
910# Set up LLVM apt repository.
911RUN bash -c "$(wget -O - https://apt.llvm.org/llvm.sh)" -- 19
912
913# Install extra clang tools
914RUN apt-get install -y \
915        clang-19 \
916        clang-format-19 \
917        clang-tidy-19
918
919RUN update-alternatives --install /usr/bin/clang clang /usr/bin/clang-19 1000 \
920  --slave /usr/bin/clang++ clang++ /usr/bin/clang++-19 \
921  --slave /usr/bin/clang-tidy clang-tidy /usr/bin/clang-tidy-19 \
922  --slave /usr/bin/clang-format clang-format /usr/bin/clang-format-19 \
923  --slave /usr/bin/run-clang-tidy run-clang-tidy.py \
924        /usr/bin/run-clang-tidy-19 \
925  --slave /usr/bin/scan-build scan-build /usr/bin/scan-build-19
926
927"""
928
929if is_automated_ci_build:
930    dockerfile_base += f"""
931# Run an arbitrary command to pollute the docker cache regularly force us
932# to re-run `apt-get update` daily.
933RUN echo {Docker.timestamp()}
934RUN apt-get update && apt-get dist-upgrade -yy
935
936"""
937
938dockerfile_base += """
939RUN pip3 install --break-system-packages \
940        beautysh \
941        black \
942        codespell \
943        flake8 \
944        gcovr \
945        gitlint \
946        inflection \
947        isoduration \
948        isort \
949        jsonschema \
950        meson==1.5.1 \
951        requests
952
953RUN npm install -g \
954        eslint@v8.56.0 eslint-plugin-json@v3.1.0 \
955        markdownlint-cli@latest \
956        prettier@latest
957"""
958
959# Build the base and stage docker images.
960docker_base_img_name = Docker.tagname("base", dockerfile_base)
961Docker.build("base", docker_base_img_name, dockerfile_base)
962Package.generate_all()
963
964# Create the final Dockerfile.
965dockerfile = f"""
966# Build the final output image
967FROM {docker_base_img_name}
968{Package.df_all_copycmds()}
969
970# Some of our infrastructure still relies on the presence of this file
971# even though it is no longer needed to rebuild the docker environment
972# NOTE: The file is sorted to ensure the ordering is stable.
973RUN echo '{Package.depcache()}' > /tmp/depcache
974
975# Ensure the group, user, and home directory are created (or rename them if
976# they already exist).
977RUN if grep -q ":{gid}:" /etc/group ; then \
978        groupmod -n {username} $(awk -F : '{{ if ($3 == {gid}) {{ print $1 }} }}' /etc/group) ; \
979    else \
980        groupadd -f -g {gid} {username} ; \
981    fi
982RUN mkdir -p "{os.path.dirname(homedir)}"
983RUN if grep -q ":{uid}:" /etc/passwd ; then \
984        usermod -l {username} -d {homedir} -m $(awk -F : '{{ if ($3 == {uid}) {{ print $1 }} }}' /etc/passwd) ; \
985    else \
986        useradd -d {homedir} -m -u {uid} -g {gid} {username} ; \
987    fi
988RUN sed -i '1iDefaults umask=000' /etc/sudoers
989RUN echo "{username} ALL=(ALL) NOPASSWD: ALL" >>/etc/sudoers
990
991# Ensure user has ability to write to /usr/local for different tool
992# and data installs
993RUN chown -R {username}:{username} /usr/local/share
994
995# Update library cache
996RUN ldconfig
997
998{proxy_cmd}
999
1000RUN /bin/bash
1001"""
1002
1003# Do the final docker build
1004docker_final_img_name = Docker.tagname(None, dockerfile)
1005Docker.build("final", docker_final_img_name, dockerfile)
1006
1007# Print the tag of the final image.
1008print(docker_final_img_name)
1009