1#!/usr/bin/env python3
2#
3# Build the required docker image to run package unit tests
4#
5# Script Variables:
6#   DOCKER_IMG_NAME:  <optional, the name of the docker image to generate>
7#                     default is openbmc/ubuntu-unit-test
8#   DISTRO:           <optional, the distro to build a docker image against>
9#   FORCE_DOCKER_BUILD: <optional, a non-zero value with force all Docker
10#                     images to be rebuilt rather than reusing caches.>
11#   BUILD_URL:        <optional, used to detect running under CI context
12#                     (ex. Jenkins)>
13#   BRANCH:           <optional, branch to build from each of the openbmc/
14#                     repositories>
15#                     default is master, which will be used if input branch not
16#                     provided or not found
17#   UBUNTU_MIRROR:    <optional, the URL of a mirror of Ubuntu to override the
18#                     default ones in /etc/apt/sources.list>
19#                     default is empty, and no mirror is used.
20#   DOCKER_REG:       <optional, the URL of a docker registry to utilize
21#                     instead of our default (public.ecr.aws/ubuntu)
22#                     (ex. docker.io)
23#   http_proxy        The HTTP address of the proxy server to connect to.
24#                     Default: "", proxy is not setup if this is not set
25
26import json
27import os
28import re
29import sys
30import threading
31import urllib.request
32from datetime import date
33from hashlib import sha256
34
35# typing.Dict is used for type-hints.
36from typing import Any, Callable, Dict, Iterable, Optional  # noqa: F401
37
38from sh import docker, git, nproc  # type: ignore
39
40try:
41    # Python before 3.8 doesn't have TypedDict, so reroute to standard 'dict'.
42    from typing import TypedDict
43except Exception:
44
45    class TypedDict(dict):  # type: ignore
46        # We need to do this to eat the 'total' argument.
47        def __init_subclass__(cls, **kwargs: Any) -> None:
48            super().__init_subclass__()
49
50
51# Declare some variables used in package definitions.
52prefix = "/usr/local"
53proc_count = nproc().strip()
54
55
56class PackageDef(TypedDict, total=False):
57    """Package Definition for packages dictionary."""
58
59    # rev [optional]: Revision of package to use.
60    rev: str
61    # url [optional]: lambda function to create URL: (package, rev) -> url.
62    url: Callable[[str, str], str]
63    # depends [optional]: List of package dependencies.
64    depends: Iterable[str]
65    # build_type [required]: Build type used for package.
66    #   Currently supported: autoconf, cmake, custom, make, meson
67    build_type: str
68    # build_steps [optional]: Steps to run for 'custom' build_type.
69    build_steps: Iterable[str]
70    # config_flags [optional]: List of options to pass configuration tool.
71    config_flags: Iterable[str]
72    # config_env [optional]: List of environment variables to set for config.
73    config_env: Iterable[str]
74    # custom_post_dl [optional]: List of steps to run after download, but
75    #   before config / build / install.
76    custom_post_dl: Iterable[str]
77    # custom_post_install [optional]: List of steps to run after install.
78    custom_post_install: Iterable[str]
79
80    # __tag [private]: Generated Docker tag name for package stage.
81    __tag: str
82    # __package [private]: Package object associated with this package.
83    __package: Any  # Type is Package, but not defined yet.
84
85
86# Packages to include in image.
87packages = {
88    "boost": PackageDef(
89        rev="1.84.0",
90        url=(
91            lambda pkg, rev: f"https://github.com/boostorg/{pkg}/releases/download/{pkg}-{rev}/{pkg}-{rev}.tar.gz"
92        ),
93        build_type="custom",
94        build_steps=[
95            (
96                "./bootstrap.sh"
97                f" --prefix={prefix} --with-libraries=context,coroutine,url"
98            ),
99            "./b2",
100            f"./b2 install --prefix={prefix} valgrind=on",
101        ],
102    ),
103    "USCiLab/cereal": PackageDef(
104        rev="v1.3.2",
105        build_type="custom",
106        build_steps=[f"cp -a include/cereal/ {prefix}/include/"],
107    ),
108    "danmar/cppcheck": PackageDef(
109        rev="2.12.1",
110        build_type="cmake",
111    ),
112    "CLIUtils/CLI11": PackageDef(
113        rev="v2.3.2",
114        build_type="cmake",
115        config_flags=[
116            "-DBUILD_TESTING=OFF",
117            "-DCLI11_BUILD_DOCS=OFF",
118            "-DCLI11_BUILD_EXAMPLES=OFF",
119        ],
120    ),
121    "fmtlib/fmt": PackageDef(
122        rev="10.1.1",
123        build_type="cmake",
124        config_flags=[
125            "-DFMT_DOC=OFF",
126            "-DFMT_TEST=OFF",
127        ],
128    ),
129    "Naios/function2": PackageDef(
130        rev="4.2.4",
131        build_type="custom",
132        build_steps=[
133            f"mkdir {prefix}/include/function2",
134            f"cp include/function2/function2.hpp {prefix}/include/function2/",
135        ],
136    ),
137    "google/googletest": PackageDef(
138        rev="v1.15.2",
139        build_type="cmake",
140        config_env=["CXXFLAGS=-std=c++20"],
141        config_flags=["-DTHREADS_PREFER_PTHREAD_FLAG=ON"],
142    ),
143    "nghttp2/nghttp2": PackageDef(
144        rev="v1.61.0",
145        build_type="cmake",
146        config_env=["CXXFLAGS=-std=c++20"],
147        config_flags=[
148            "-DENABLE_LIB_ONLY=ON",
149            "-DENABLE_STATIC_LIB=ON",
150        ],
151    ),
152    "nlohmann/json": PackageDef(
153        rev="v3.11.2",
154        build_type="cmake",
155        config_flags=["-DJSON_BuildTests=OFF"],
156        custom_post_install=[
157            (
158                f"ln -s {prefix}/include/nlohmann/json.hpp"
159                f" {prefix}/include/json.hpp"
160            ),
161        ],
162    ),
163    "json-c/json-c": PackageDef(
164        rev="json-c-0.17-20230812",
165        build_type="cmake",
166    ),
167    "LibVNC/libvncserver": PackageDef(
168        rev="LibVNCServer-0.9.14",
169        build_type="cmake",
170    ),
171    "leethomason/tinyxml2": PackageDef(
172        rev="9.0.0",
173        build_type="cmake",
174    ),
175    "tristanpenman/valijson": PackageDef(
176        rev="v1.0.1",
177        build_type="cmake",
178        config_flags=[
179            "-Dvalijson_BUILD_TESTS=0",
180            "-Dvalijson_INSTALL_HEADERS=1",
181        ],
182    ),
183    "open-power/pdbg": PackageDef(build_type="autoconf"),
184    "openbmc/gpioplus": PackageDef(
185        build_type="meson",
186        config_flags=[
187            "-Dexamples=false",
188            "-Dtests=disabled",
189        ],
190    ),
191    "openbmc/phosphor-dbus-interfaces": PackageDef(
192        depends=["openbmc/sdbusplus"],
193        build_type="meson",
194        config_flags=["-Dgenerate_md=false"],
195    ),
196    "openbmc/phosphor-logging": PackageDef(
197        depends=[
198            "USCiLab/cereal",
199            "openbmc/phosphor-dbus-interfaces",
200            "openbmc/sdbusplus",
201            "openbmc/sdeventplus",
202        ],
203        build_type="meson",
204        config_flags=[
205            "-Dlibonly=true",
206            "-Dtests=disabled",
207            f"-Dyamldir={prefix}/share/phosphor-dbus-yaml/yaml",
208        ],
209    ),
210    "openbmc/phosphor-objmgr": PackageDef(
211        depends=[
212            "CLIUtils/CLI11",
213            "boost",
214            "leethomason/tinyxml2",
215            "openbmc/phosphor-dbus-interfaces",
216            "openbmc/phosphor-logging",
217            "openbmc/sdbusplus",
218        ],
219        build_type="meson",
220        config_flags=[
221            "-Dtests=disabled",
222        ],
223    ),
224    "openbmc/libpeci": PackageDef(
225        build_type="meson",
226        config_flags=[
227            "-Draw-peci=disabled",
228        ],
229    ),
230    "openbmc/libpldm": PackageDef(
231        build_type="meson",
232        config_flags=["-Dabi=deprecated,stable", "--wrap-mode=default"],
233    ),
234    "openbmc/sdbusplus": PackageDef(
235        depends=[
236            "nlohmann/json",
237        ],
238        build_type="meson",
239        custom_post_dl=[
240            "cd tools",
241            f"./setup.py install --root=/ --prefix={prefix}",
242            "cd ..",
243        ],
244        config_flags=[
245            "-Dexamples=disabled",
246            "-Dtests=disabled",
247        ],
248    ),
249    "openbmc/sdeventplus": PackageDef(
250        depends=[
251            "openbmc/stdplus",
252        ],
253        build_type="meson",
254        config_flags=[
255            "-Dexamples=false",
256            "-Dtests=disabled",
257        ],
258    ),
259    "openbmc/stdplus": PackageDef(
260        depends=[
261            "fmtlib/fmt",
262            "google/googletest",
263            "Naios/function2",
264        ],
265        build_type="meson",
266        config_flags=[
267            "-Dexamples=false",
268            "-Dtests=disabled",
269            "-Dgtest=enabled",
270        ],
271    ),
272}  # type: Dict[str, PackageDef]
273
274# Define common flags used for builds
275configure_flags = " ".join(
276    [
277        f"--prefix={prefix}",
278    ]
279)
280cmake_flags = " ".join(
281    [
282        "-DBUILD_SHARED_LIBS=ON",
283        "-DCMAKE_BUILD_TYPE=RelWithDebInfo",
284        f"-DCMAKE_INSTALL_PREFIX:PATH={prefix}",
285        "-GNinja",
286        "-DCMAKE_MAKE_PROGRAM=ninja",
287    ]
288)
289meson_flags = " ".join(
290    [
291        "--wrap-mode=nodownload",
292        f"-Dprefix={prefix}",
293    ]
294)
295
296
297class Package(threading.Thread):
298    """Class used to build the Docker stages for each package.
299
300    Generally, this class should not be instantiated directly but through
301    Package.generate_all().
302    """
303
304    # Copy the packages dictionary.
305    packages = packages.copy()
306
307    # Lock used for thread-safety.
308    lock = threading.Lock()
309
310    def __init__(self, pkg: str):
311        """pkg - The name of this package (ex. foo/bar )"""
312        super(Package, self).__init__()
313
314        self.package = pkg
315        self.exception = None  # type: Optional[Exception]
316
317        # Reference to this package's
318        self.pkg_def = Package.packages[pkg]
319        self.pkg_def["__package"] = self
320
321    def run(self) -> None:
322        """Thread 'run' function.  Builds the Docker stage."""
323
324        # In case this package has no rev, fetch it from Github.
325        self._update_rev()
326
327        # Find all the Package objects that this package depends on.
328        #   This section is locked because we are looking into another
329        #   package's PackageDef dict, which could be being modified.
330        Package.lock.acquire()
331        deps: Iterable[Package] = [
332            Package.packages[deppkg]["__package"]
333            for deppkg in self.pkg_def.get("depends", [])
334        ]
335        Package.lock.release()
336
337        # Wait until all the depends finish building.  We need them complete
338        # for the "COPY" commands.
339        for deppkg in deps:
340            deppkg.join()
341
342        # Generate this package's Dockerfile.
343        dockerfile = f"""
344FROM {docker_base_img_name}
345{self._df_copycmds()}
346{self._df_build()}
347"""
348
349        # Generate the resulting tag name and save it to the PackageDef.
350        #   This section is locked because we are modifying the PackageDef,
351        #   which can be accessed by other threads.
352        Package.lock.acquire()
353        tag = Docker.tagname(self._stagename(), dockerfile)
354        self.pkg_def["__tag"] = tag
355        Package.lock.release()
356
357        # Do the build / save any exceptions.
358        try:
359            Docker.build(self.package, tag, dockerfile)
360        except Exception as e:
361            self.exception = e
362
363    @classmethod
364    def generate_all(cls) -> None:
365        """Ensure a Docker stage is created for all defined packages.
366
367        These are done in parallel but with appropriate blocking per
368        package 'depends' specifications.
369        """
370
371        # Create a Package for each defined package.
372        pkg_threads = [Package(p) for p in cls.packages.keys()]
373
374        # Start building them all.
375        #   This section is locked because threads depend on each other,
376        #   based on the packages, and they cannot 'join' on a thread
377        #   which is not yet started.  Adding a lock here allows all the
378        #   threads to start before they 'join' their dependencies.
379        Package.lock.acquire()
380        for t in pkg_threads:
381            t.start()
382        Package.lock.release()
383
384        # Wait for completion.
385        for t in pkg_threads:
386            t.join()
387            # Check if the thread saved off its own exception.
388            if t.exception:
389                print(f"Package {t.package} failed!", file=sys.stderr)
390                raise t.exception
391
392    @staticmethod
393    def df_all_copycmds() -> str:
394        """Formulate the Dockerfile snippet necessary to copy all packages
395        into the final image.
396        """
397        return Package.df_copycmds_set(Package.packages.keys())
398
399    @classmethod
400    def depcache(cls) -> str:
401        """Create the contents of the '/tmp/depcache'.
402        This file is a comma-separated list of "<pkg>:<rev>".
403        """
404
405        # This needs to be sorted for consistency.
406        depcache = ""
407        for pkg in sorted(cls.packages.keys()):
408            depcache += "%s:%s," % (pkg, cls.packages[pkg]["rev"])
409        return depcache
410
411    def _check_gerrit_topic(self) -> bool:
412        if not gerrit_topic:
413            return False
414        if not self.package.startswith("openbmc/"):
415            return False
416        if gerrit_project == self.package and gerrit_rev:
417            return False
418
419        try:
420            commits = json.loads(
421                urllib.request.urlopen(
422                    f"https://gerrit.openbmc.org/changes/?q=status:open+project:{self.package}+topic:{gerrit_topic}"
423                )
424                .read()
425                .splitlines()[-1]
426            )
427
428            if len(commits) == 0:
429                return False
430            if len(commits) > 1:
431                print(
432                    f"{self.package} has more than 1 commit under {gerrit_topic}; using lastest upstream: {len(commits)}",
433                    file=sys.stderr,
434                )
435                return False
436
437            change_id = commits[0]["id"]
438
439            commit = json.loads(
440                urllib.request.urlopen(
441                    f"https://gerrit.openbmc.org/changes/{change_id}/revisions/current/commit"
442                )
443                .read()
444                .splitlines()[-1]
445            )["commit"]
446
447            print(
448                f"Using {commit} from {gerrit_topic} for {self.package}",
449                file=sys.stderr,
450            )
451            self.pkg_def["rev"] = commit
452            return True
453
454        except urllib.error.HTTPError as e:
455            print(
456                f"Error loading topic {gerrit_topic} for {self.package}: ",
457                e,
458                file=sys.stderr,
459            )
460            return False
461
462    def _update_rev(self) -> None:
463        """Look up the HEAD for missing a static rev."""
464
465        if "rev" in self.pkg_def:
466            return
467
468        if self._check_gerrit_topic():
469            return
470
471        # Check if Jenkins/Gerrit gave us a revision and use it.
472        if gerrit_project == self.package and gerrit_rev:
473            print(
474                f"Found Gerrit revision for {self.package}: {gerrit_rev}",
475                file=sys.stderr,
476            )
477            self.pkg_def["rev"] = gerrit_rev
478            return
479
480        # Ask Github for all the branches.
481        lookup = git(
482            "ls-remote", "--heads", f"https://github.com/{self.package}"
483        )
484
485        # Find the branch matching {branch} (or fallback to master).
486        #   This section is locked because we are modifying the PackageDef.
487        Package.lock.acquire()
488        for line in lookup.split("\n"):
489            if re.fullmatch(f".*{branch}$", line.strip()):
490                self.pkg_def["rev"] = line.split()[0]
491                break
492            elif (
493                "refs/heads/master" in line or "refs/heads/main" in line
494            ) and "rev" not in self.pkg_def:
495                self.pkg_def["rev"] = line.split()[0]
496        Package.lock.release()
497
498    def _stagename(self) -> str:
499        """Create a name for the Docker stage associated with this pkg."""
500        return self.package.replace("/", "-").lower()
501
502    def _url(self) -> str:
503        """Get the URL for this package."""
504        rev = self.pkg_def["rev"]
505
506        # If the lambda exists, call it.
507        if "url" in self.pkg_def:
508            return self.pkg_def["url"](self.package, rev)
509
510        # Default to the github archive URL.
511        return f"https://github.com/{self.package}/archive/{rev}.tar.gz"
512
513    def _cmd_download(self) -> str:
514        """Formulate the command necessary to download and unpack to source."""
515
516        url = self._url()
517        if ".tar." not in url:
518            raise NotImplementedError(
519                f"Unhandled download type for {self.package}: {url}"
520            )
521
522        cmd = f"curl -L {url} | tar -x"
523
524        if url.endswith(".bz2"):
525            cmd += "j"
526        elif url.endswith(".gz"):
527            cmd += "z"
528        else:
529            raise NotImplementedError(
530                f"Unknown tar flags needed for {self.package}: {url}"
531            )
532
533        return cmd
534
535    def _cmd_cd_srcdir(self) -> str:
536        """Formulate the command necessary to 'cd' into the source dir."""
537        return f"cd {self.package.split('/')[-1]}*"
538
539    def _df_copycmds(self) -> str:
540        """Formulate the dockerfile snippet necessary to COPY all depends."""
541
542        if "depends" not in self.pkg_def:
543            return ""
544        return Package.df_copycmds_set(self.pkg_def["depends"])
545
546    @staticmethod
547    def df_copycmds_set(pkgs: Iterable[str]) -> str:
548        """Formulate the Dockerfile snippet necessary to COPY a set of
549        packages into a Docker stage.
550        """
551
552        copy_cmds = ""
553
554        # Sort the packages for consistency.
555        for p in sorted(pkgs):
556            tag = Package.packages[p]["__tag"]
557            copy_cmds += f"COPY --from={tag} {prefix} {prefix}\n"
558            # Workaround for upstream docker bug and multiple COPY cmds
559            # https://github.com/moby/moby/issues/37965
560            copy_cmds += "RUN true\n"
561
562        return copy_cmds
563
564    def _df_build(self) -> str:
565        """Formulate the Dockerfile snippet necessary to download, build, and
566        install a package into a Docker stage.
567        """
568
569        # Download and extract source.
570        result = f"RUN {self._cmd_download()} && {self._cmd_cd_srcdir()} && "
571
572        # Handle 'custom_post_dl' commands.
573        custom_post_dl = self.pkg_def.get("custom_post_dl")
574        if custom_post_dl:
575            result += " && ".join(custom_post_dl) + " && "
576
577        # Build and install package based on 'build_type'.
578        build_type = self.pkg_def["build_type"]
579        if build_type == "autoconf":
580            result += self._cmd_build_autoconf()
581        elif build_type == "cmake":
582            result += self._cmd_build_cmake()
583        elif build_type == "custom":
584            result += self._cmd_build_custom()
585        elif build_type == "make":
586            result += self._cmd_build_make()
587        elif build_type == "meson":
588            result += self._cmd_build_meson()
589        else:
590            raise NotImplementedError(
591                f"Unhandled build type for {self.package}: {build_type}"
592            )
593
594        # Handle 'custom_post_install' commands.
595        custom_post_install = self.pkg_def.get("custom_post_install")
596        if custom_post_install:
597            result += " && " + " && ".join(custom_post_install)
598
599        return result
600
601    def _cmd_build_autoconf(self) -> str:
602        options = " ".join(self.pkg_def.get("config_flags", []))
603        env = " ".join(self.pkg_def.get("config_env", []))
604        result = "./bootstrap.sh && "
605        result += f"{env} ./configure {configure_flags} {options} && "
606        result += f"make -j{proc_count} && make install"
607        return result
608
609    def _cmd_build_cmake(self) -> str:
610        options = " ".join(self.pkg_def.get("config_flags", []))
611        env = " ".join(self.pkg_def.get("config_env", []))
612        result = "mkdir builddir && cd builddir && "
613        result += f"{env} cmake {cmake_flags} {options} .. && "
614        result += "cmake --build . --target all && "
615        result += "cmake --build . --target install && "
616        result += "cd .."
617        return result
618
619    def _cmd_build_custom(self) -> str:
620        return " && ".join(self.pkg_def.get("build_steps", []))
621
622    def _cmd_build_make(self) -> str:
623        return f"make -j{proc_count} && make install"
624
625    def _cmd_build_meson(self) -> str:
626        options = " ".join(self.pkg_def.get("config_flags", []))
627        env = " ".join(self.pkg_def.get("config_env", []))
628        result = f"{env} meson setup builddir {meson_flags} {options} && "
629        result += "ninja -C builddir && ninja -C builddir install"
630        return result
631
632
633class Docker:
634    """Class to assist with Docker interactions.  All methods are static."""
635
636    @staticmethod
637    def timestamp() -> str:
638        """Generate a timestamp for today using the ISO week."""
639        today = date.today().isocalendar()
640        return f"{today[0]}-W{today[1]:02}"
641
642    @staticmethod
643    def tagname(pkgname: Optional[str], dockerfile: str) -> str:
644        """Generate a tag name for a package using a hash of the Dockerfile."""
645        result = docker_image_name
646        if pkgname:
647            result += "-" + pkgname
648
649        result += ":" + Docker.timestamp()
650        result += "-" + sha256(dockerfile.encode()).hexdigest()[0:16]
651
652        return result
653
654    @staticmethod
655    def build(pkg: str, tag: str, dockerfile: str) -> None:
656        """Build a docker image using the Dockerfile and tagging it with 'tag'."""
657
658        # If we're not forcing builds, check if it already exists and skip.
659        if not force_build:
660            if docker.image.ls(tag, "--format", '"{{.Repository}}:{{.Tag}}"'):
661                print(
662                    f"Image {tag} already exists.  Skipping.", file=sys.stderr
663                )
664                return
665
666        # Build it.
667        #   Capture the output of the 'docker build' command and send it to
668        #   stderr (prefixed with the package name).  This allows us to see
669        #   progress but not pollute stdout.  Later on we output the final
670        #   docker tag to stdout and we want to keep that pristine.
671        #
672        #   Other unusual flags:
673        #       --no-cache: Bypass the Docker cache if 'force_build'.
674        #       --force-rm: Clean up Docker processes if they fail.
675        docker.build(
676            proxy_args,
677            "--network=host",
678            "--force-rm",
679            "--no-cache=true" if force_build else "--no-cache=false",
680            "-t",
681            tag,
682            "-",
683            _in=dockerfile,
684            _out=(
685                lambda line: print(
686                    pkg + ":", line, end="", file=sys.stderr, flush=True
687                )
688            ),
689            _err_to_out=True,
690        )
691
692
693# Read a bunch of environment variables.
694docker_image_name = os.environ.get(
695    "DOCKER_IMAGE_NAME", "openbmc/ubuntu-unit-test"
696)
697force_build = os.environ.get("FORCE_DOCKER_BUILD")
698is_automated_ci_build = os.environ.get("BUILD_URL", False)
699distro = os.environ.get("DISTRO", "ubuntu:noble")
700branch = os.environ.get("BRANCH", "master")
701ubuntu_mirror = os.environ.get("UBUNTU_MIRROR")
702docker_reg = os.environ.get("DOCKER_REG", "public.ecr.aws/ubuntu")
703http_proxy = os.environ.get("http_proxy")
704
705gerrit_project = os.environ.get("GERRIT_PROJECT")
706gerrit_rev = os.environ.get("GERRIT_PATCHSET_REVISION")
707gerrit_topic = os.environ.get("GERRIT_TOPIC")
708
709# Ensure appropriate docker build output to see progress and identify
710# any issues
711os.environ["BUILDKIT_PROGRESS"] = "plain"
712
713# Set up some common variables.
714username = os.environ.get("USER", "root")
715homedir = os.environ.get("HOME", "/root")
716gid = os.getgid()
717uid = os.getuid()
718
719# Use well-known constants if user is root
720if username == "root":
721    homedir = "/root"
722    gid = 0
723    uid = 0
724
725# Special flags if setting up a deb mirror.
726mirror = ""
727if "ubuntu" in distro and ubuntu_mirror:
728    mirror = f"""
729RUN echo "deb {ubuntu_mirror} \
730        $(. /etc/os-release && echo $VERSION_CODENAME) \
731        main restricted universe multiverse" > /etc/apt/sources.list && \\
732    echo "deb {ubuntu_mirror} \
733        $(. /etc/os-release && echo $VERSION_CODENAME)-updates \
734            main restricted universe multiverse" >> /etc/apt/sources.list && \\
735    echo "deb {ubuntu_mirror} \
736        $(. /etc/os-release && echo $VERSION_CODENAME)-security \
737            main restricted universe multiverse" >> /etc/apt/sources.list && \\
738    echo "deb {ubuntu_mirror} \
739        $(. /etc/os-release && echo $VERSION_CODENAME)-proposed \
740            main restricted universe multiverse" >> /etc/apt/sources.list && \\
741    echo "deb {ubuntu_mirror} \
742        $(. /etc/os-release && echo $VERSION_CODENAME)-backports \
743            main restricted universe multiverse" >> /etc/apt/sources.list
744"""
745
746# Special flags for proxying.
747proxy_cmd = ""
748proxy_keyserver = ""
749proxy_args = []
750if http_proxy:
751    proxy_cmd = f"""
752RUN echo "[http]" >> {homedir}/.gitconfig && \
753    echo "proxy = {http_proxy}" >> {homedir}/.gitconfig
754"""
755    proxy_keyserver = f"--keyserver-options http-proxy={http_proxy}"
756
757    proxy_args.extend(
758        [
759            "--build-arg",
760            f"http_proxy={http_proxy}",
761            "--build-arg",
762            f"https_proxy={http_proxy}",
763        ]
764    )
765
766# Create base Dockerfile.
767dockerfile_base = f"""
768FROM {docker_reg}/{distro}
769
770{mirror}
771
772ENV DEBIAN_FRONTEND noninteractive
773
774ENV PYTHONPATH "/usr/local/lib/python3.10/site-packages/"
775
776# Sometimes the ubuntu key expires and we need a way to force an execution
777# of the apt-get commands for the dbgsym-keyring.  When this happens we see
778# an error like: "Release: The following signatures were invalid:"
779# Insert a bogus echo that we can change here when we get this error to force
780# the update.
781RUN echo "ubuntu keyserver rev as of 2021-04-21"
782
783# We need the keys to be imported for dbgsym repos
784# New releases have a package, older ones fall back to manual fetching
785# https://wiki.ubuntu.com/Debug%20Symbol%20Packages
786# Known issue with gpg to get keys via proxy -
787# https://bugs.launchpad.net/ubuntu/+source/gnupg2/+bug/1788190, hence using
788# curl to get keys.
789RUN apt-get update && apt-get dist-upgrade -yy && \
790    ( apt-get install -yy gpgv ubuntu-dbgsym-keyring || \
791        ( apt-get install -yy dirmngr curl && \
792          curl -sSL \
793          'https://keyserver.ubuntu.com/pks/lookup?op=get&search=0xF2EDC64DC5AEE1F6B9C621F0C8CAB6595FDFF622' \
794          | apt-key add - ))
795
796# Parse the current repo list into a debug repo list
797RUN sed -n '/^deb /s,^deb [^ ]* ,deb http://ddebs.ubuntu.com ,p' \
798        /etc/apt/sources.list >/etc/apt/sources.list.d/debug.list
799
800# Remove non-existent debug repos
801RUN sed -i '/-\\(backports\\|security\\) /d' /etc/apt/sources.list.d/debug.list
802
803RUN cat /etc/apt/sources.list.d/debug.list
804
805RUN apt-get update && apt-get dist-upgrade -yy && apt-get install -yy \
806    abi-compliance-checker \
807    abi-dumper \
808    autoconf \
809    autoconf-archive \
810    bison \
811    cmake \
812    curl \
813    dbus \
814    device-tree-compiler \
815    flex \
816    g++-13 \
817    gcc-13 \
818    git \
819    glib-2.0 \
820    gnupg \
821    iproute2 \
822    iputils-ping \
823    libaudit-dev \
824    libc6-dbg \
825    libc6-dev \
826    libcjson-dev \
827    libconfig++-dev \
828    libcryptsetup-dev \
829    libdbus-1-dev \
830    libevdev-dev \
831    libgpiod-dev \
832    libi2c-dev \
833    libjpeg-dev \
834    libjson-perl \
835    libldap2-dev \
836    libmimetic-dev \
837    libnl-3-dev \
838    libnl-genl-3-dev \
839    libpam0g-dev \
840    libpciaccess-dev \
841    libperlio-gzip-perl \
842    libpng-dev \
843    libprotobuf-dev \
844    libsnmp-dev \
845    libssl-dev \
846    libsystemd-dev \
847    libtool \
848    liburing-dev \
849    libxml2-utils \
850    libxml-simple-perl \
851    lsb-release \
852    ninja-build \
853    npm \
854    pkg-config \
855    protobuf-compiler \
856    python3 \
857    python3-dev\
858    python3-git \
859    python3-mako \
860    python3-pip \
861    python3-protobuf \
862    python3-setuptools \
863    python3-socks \
864    python3-yaml \
865    rsync \
866    shellcheck \
867    socat \
868    software-properties-common \
869    sudo \
870    systemd \
871    valgrind \
872    vim \
873    wget \
874    xxd
875
876RUN update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-13 13 \
877  --slave /usr/bin/g++ g++ /usr/bin/g++-13 \
878  --slave /usr/bin/gcov gcov /usr/bin/gcov-13 \
879  --slave /usr/bin/gcov-dump gcov-dump /usr/bin/gcov-dump-13 \
880  --slave /usr/bin/gcov-tool gcov-tool /usr/bin/gcov-tool-13
881RUN update-alternatives --remove cpp /usr/bin/cpp && \
882    update-alternatives --install /usr/bin/cpp cpp /usr/bin/cpp-13 13
883
884# Set up LLVM apt repository.
885RUN bash -c "$(wget -O - https://apt.llvm.org/llvm.sh)" 18
886
887# Install extra clang tools
888RUN apt-get install \
889        clang-18 \
890        clang-format-18 \
891        clang-tidy-18
892
893RUN update-alternatives --install /usr/bin/clang clang /usr/bin/clang-18 1000 \
894  --slave /usr/bin/clang++ clang++ /usr/bin/clang++-18 \
895  --slave /usr/bin/clang-tidy clang-tidy /usr/bin/clang-tidy-18 \
896  --slave /usr/bin/clang-format clang-format /usr/bin/clang-format-18 \
897  --slave /usr/bin/run-clang-tidy run-clang-tidy.py \
898        /usr/bin/run-clang-tidy-18 \
899  --slave /usr/bin/scan-build scan-build /usr/bin/scan-build-18
900
901"""
902
903if is_automated_ci_build:
904    dockerfile_base += f"""
905# Run an arbitrary command to pollute the docker cache regularly force us
906# to re-run `apt-get update` daily.
907RUN echo {Docker.timestamp()}
908RUN apt-get update && apt-get dist-upgrade -yy
909
910"""
911
912dockerfile_base += """
913RUN pip3 install --break-system-packages \
914        beautysh \
915        black \
916        codespell \
917        flake8 \
918        gcovr \
919        gitlint \
920        inflection \
921        isort \
922        jsonschema \
923        meson==1.3.0 \
924        requests
925
926RUN npm install -g \
927        eslint@v8.56.0 eslint-plugin-json@v3.1.0 \
928        markdownlint-cli@latest \
929        prettier@latest
930"""
931
932# Build the base and stage docker images.
933docker_base_img_name = Docker.tagname("base", dockerfile_base)
934Docker.build("base", docker_base_img_name, dockerfile_base)
935Package.generate_all()
936
937# Create the final Dockerfile.
938dockerfile = f"""
939# Build the final output image
940FROM {docker_base_img_name}
941{Package.df_all_copycmds()}
942
943# Some of our infrastructure still relies on the presence of this file
944# even though it is no longer needed to rebuild the docker environment
945# NOTE: The file is sorted to ensure the ordering is stable.
946RUN echo '{Package.depcache()}' > /tmp/depcache
947
948# Ensure the group, user, and home directory are created (or rename them if
949# they already exist).
950RUN if grep -q ":{gid}:" /etc/group ; then \
951        groupmod -n {username} $(awk -F : '{{ if ($3 == {gid}) {{ print $1 }} }}' /etc/group) ; \
952    else \
953        groupadd -f -g {gid} {username} ; \
954    fi
955RUN mkdir -p "{os.path.dirname(homedir)}"
956RUN if grep -q ":{uid}:" /etc/passwd ; then \
957        usermod -l {username} -d {homedir} -m $(awk -F : '{{ if ($3 == {uid}) {{ print $1 }} }}' /etc/passwd) ; \
958    else \
959        useradd -d {homedir} -m -u {uid} -g {gid} {username} ; \
960    fi
961RUN sed -i '1iDefaults umask=000' /etc/sudoers
962RUN echo "{username} ALL=(ALL) NOPASSWD: ALL" >>/etc/sudoers
963
964# Ensure user has ability to write to /usr/local for different tool
965# and data installs
966RUN chown -R {username}:{username} /usr/local/share
967
968# Update library cache
969RUN ldconfig
970
971{proxy_cmd}
972
973RUN /bin/bash
974"""
975
976# Do the final docker build
977docker_final_img_name = Docker.tagname(None, dockerfile)
978Docker.build("final", docker_final_img_name, dockerfile)
979
980# Print the tag of the final image.
981print(docker_final_img_name)
982