1#!/usr/bin/env python3
2#
3# Build the required docker image to run package unit tests
4#
5# Script Variables:
6#   DOCKER_IMG_NAME:  <optional, the name of the docker image to generate>
7#                     default is openbmc/ubuntu-unit-test
8#   DISTRO:           <optional, the distro to build a docker image against>
9#   FORCE_DOCKER_BUILD: <optional, a non-zero value with force all Docker
10#                     images to be rebuilt rather than reusing caches.>
11#   BUILD_URL:        <optional, used to detect running under CI context
12#                     (ex. Jenkins)>
13#   BRANCH:           <optional, branch to build from each of the openbmc/
14#                     repositories>
15#                     default is master, which will be used if input branch not
16#                     provided or not found
17#   UBUNTU_MIRROR:    <optional, the URL of a mirror of Ubuntu to override the
18#                     default ones in /etc/apt/sources.list>
19#                     default is empty, and no mirror is used.
20#   http_proxy        The HTTP address of the proxy server to connect to.
21#                     Default: "", proxy is not setup if this is not set
22
23import os
24import sys
25import threading
26from datetime import date
27from hashlib import sha256
28
29# typing.Dict is used for type-hints.
30from typing import Any, Callable, Dict, Iterable, Optional  # noqa: F401
31
32from sh import docker, git, nproc, uname  # type: ignore
33
34try:
35    # Python before 3.8 doesn't have TypedDict, so reroute to standard 'dict'.
36    from typing import TypedDict
37except Exception:
38
39    class TypedDict(dict):  # type: ignore
40        # We need to do this to eat the 'total' argument.
41        def __init_subclass__(cls, **kwargs: Any) -> None:
42            super().__init_subclass__()
43
44
45# Declare some variables used in package definitions.
46prefix = "/usr/local"
47proc_count = nproc().strip()
48
49
50class PackageDef(TypedDict, total=False):
51    """Package Definition for packages dictionary."""
52
53    # rev [optional]: Revision of package to use.
54    rev: str
55    # url [optional]: lambda function to create URL: (package, rev) -> url.
56    url: Callable[[str, str], str]
57    # depends [optional]: List of package dependencies.
58    depends: Iterable[str]
59    # build_type [required]: Build type used for package.
60    #   Currently supported: autoconf, cmake, custom, make, meson
61    build_type: str
62    # build_steps [optional]: Steps to run for 'custom' build_type.
63    build_steps: Iterable[str]
64    # config_flags [optional]: List of options to pass configuration tool.
65    config_flags: Iterable[str]
66    # config_env [optional]: List of environment variables to set for config.
67    config_env: Iterable[str]
68    # custom_post_dl [optional]: List of steps to run after download, but
69    #   before config / build / install.
70    custom_post_dl: Iterable[str]
71    # custom_post_install [optional]: List of steps to run after install.
72    custom_post_install: Iterable[str]
73
74    # __tag [private]: Generated Docker tag name for package stage.
75    __tag: str
76    # __package [private]: Package object associated with this package.
77    __package: Any  # Type is Package, but not defined yet.
78
79
80# Packages to include in image.
81packages = {
82    "boost": PackageDef(
83        rev="1.81.0",
84        url=(
85            lambda pkg, rev: f"https://boostorg.jfrog.io/artifactory/main/release/{rev}/source/{pkg}_{rev.replace('.', '_')}.tar.gz"  # noqa: E501
86        ),
87        build_type="custom",
88        build_steps=[
89            (
90                "./bootstrap.sh"
91                f" --prefix={prefix} --with-libraries=context,coroutine"
92            ),
93            "./b2",
94            f"./b2 install --prefix={prefix}",
95        ],
96    ),
97    "USCiLab/cereal": PackageDef(
98        rev="v1.3.2",
99        build_type="custom",
100        build_steps=[f"cp -a include/cereal/ {prefix}/include/"],
101    ),
102    "danmar/cppcheck": PackageDef(
103        rev="2.9",
104        build_type="cmake",
105    ),
106    "CLIUtils/CLI11": PackageDef(
107        rev="v1.9.1",
108        build_type="cmake",
109        config_flags=[
110            "-DBUILD_TESTING=OFF",
111            "-DCLI11_BUILD_DOCS=OFF",
112            "-DCLI11_BUILD_EXAMPLES=OFF",
113        ],
114    ),
115    "fmtlib/fmt": PackageDef(
116        rev="9.1.0",
117        build_type="cmake",
118        config_flags=[
119            "-DFMT_DOC=OFF",
120            "-DFMT_TEST=OFF",
121        ],
122    ),
123    "Naios/function2": PackageDef(
124        rev="4.2.1",
125        build_type="custom",
126        build_steps=[
127            f"mkdir {prefix}/include/function2",
128            f"cp include/function2/function2.hpp {prefix}/include/function2/",
129        ],
130    ),
131    # release-1.12.1
132    "google/googletest": PackageDef(
133        rev="58d77fa8070e8cec2dc1ed015d66b454c8d78850",
134        build_type="cmake",
135        config_env=["CXXFLAGS=-std=c++20"],
136        config_flags=["-DTHREADS_PREFER_PTHREAD_FLAG=ON"],
137    ),
138    "nlohmann/json": PackageDef(
139        rev="v3.11.2",
140        build_type="cmake",
141        config_flags=["-DJSON_BuildTests=OFF"],
142        custom_post_install=[
143            (
144                f"ln -s {prefix}/include/nlohmann/json.hpp"
145                f" {prefix}/include/json.hpp"
146            ),
147        ],
148    ),
149    "json-c/json-c": PackageDef(
150        rev="json-c-0.16-20220414",
151        build_type="cmake",
152    ),
153    # Snapshot from 2019-05-24
154    "linux-test-project/lcov": PackageDef(
155        rev="v1.15",
156        build_type="make",
157    ),
158    "LibVNC/libvncserver": PackageDef(
159        rev="LibVNCServer-0.9.13",
160        build_type="cmake",
161    ),
162    "leethomason/tinyxml2": PackageDef(
163        rev="9.0.0",
164        build_type="cmake",
165    ),
166    "tristanpenman/valijson": PackageDef(
167        rev="v0.7",
168        build_type="cmake",
169        config_flags=[
170            "-Dvalijson_BUILD_TESTS=0",
171            "-Dvalijson_INSTALL_HEADERS=1",
172        ],
173    ),
174    "open-power/pdbg": PackageDef(build_type="autoconf"),
175    "openbmc/gpioplus": PackageDef(
176        depends=["openbmc/stdplus"],
177        build_type="meson",
178        config_flags=[
179            "-Dexamples=false",
180            "-Dtests=disabled",
181        ],
182    ),
183    "openbmc/phosphor-dbus-interfaces": PackageDef(
184        depends=["openbmc/sdbusplus"],
185        build_type="meson",
186        config_flags=["-Dgenerate_md=false"],
187    ),
188    "openbmc/phosphor-logging": PackageDef(
189        depends=[
190            "USCiLab/cereal",
191            "openbmc/phosphor-dbus-interfaces",
192            "openbmc/sdbusplus",
193            "openbmc/sdeventplus",
194        ],
195        build_type="meson",
196        config_flags=[
197            "-Dlibonly=true",
198            "-Dtests=disabled",
199            f"-Dyamldir={prefix}/share/phosphor-dbus-yaml/yaml",
200        ],
201    ),
202    "openbmc/phosphor-objmgr": PackageDef(
203        depends=[
204            "CLIUtils/CLI11",
205            "boost",
206            "leethomason/tinyxml2",
207            "openbmc/phosphor-dbus-interfaces",
208            "openbmc/phosphor-logging",
209            "openbmc/sdbusplus",
210        ],
211        build_type="meson",
212        config_flags=[
213            "-Dtests=disabled",
214        ],
215    ),
216    "openbmc/libpldm": PackageDef(
217        build_type="meson",
218        config_flags=[
219            "-Doem-ibm=enabled",
220            "-Dtests=disabled",
221        ],
222    ),
223    "openbmc/sdbusplus": PackageDef(
224        build_type="meson",
225        custom_post_dl=[
226            "cd tools",
227            f"./setup.py install --root=/ --prefix={prefix}",
228            "cd ..",
229        ],
230        config_flags=[
231            "-Dexamples=disabled",
232            "-Dtests=disabled",
233        ],
234    ),
235    "openbmc/sdeventplus": PackageDef(
236        depends=[
237            "Naios/function2",
238            "openbmc/stdplus",
239        ],
240        build_type="meson",
241        config_flags=[
242            "-Dexamples=false",
243            "-Dtests=disabled",
244        ],
245    ),
246    "openbmc/stdplus": PackageDef(
247        depends=[
248            "fmtlib/fmt",
249            "google/googletest",
250            "Naios/function2",
251        ],
252        build_type="meson",
253        config_flags=[
254            "-Dexamples=false",
255            "-Dtests=disabled",
256            "-Dgtest=enabled",
257        ],
258    ),
259}  # type: Dict[str, PackageDef]
260
261# Define common flags used for builds
262configure_flags = " ".join(
263    [
264        f"--prefix={prefix}",
265    ]
266)
267cmake_flags = " ".join(
268    [
269        "-DBUILD_SHARED_LIBS=ON",
270        "-DCMAKE_BUILD_TYPE=RelWithDebInfo",
271        f"-DCMAKE_INSTALL_PREFIX:PATH={prefix}",
272        "-GNinja",
273        "-DCMAKE_MAKE_PROGRAM=ninja",
274    ]
275)
276meson_flags = " ".join(
277    [
278        "--wrap-mode=nodownload",
279        f"-Dprefix={prefix}",
280    ]
281)
282
283
284class Package(threading.Thread):
285    """Class used to build the Docker stages for each package.
286
287    Generally, this class should not be instantiated directly but through
288    Package.generate_all().
289    """
290
291    # Copy the packages dictionary.
292    packages = packages.copy()
293
294    # Lock used for thread-safety.
295    lock = threading.Lock()
296
297    def __init__(self, pkg: str):
298        """pkg - The name of this package (ex. foo/bar )"""
299        super(Package, self).__init__()
300
301        self.package = pkg
302        self.exception = None  # type: Optional[Exception]
303
304        # Reference to this package's
305        self.pkg_def = Package.packages[pkg]
306        self.pkg_def["__package"] = self
307
308    def run(self) -> None:
309        """Thread 'run' function.  Builds the Docker stage."""
310
311        # In case this package has no rev, fetch it from Github.
312        self._update_rev()
313
314        # Find all the Package objects that this package depends on.
315        #   This section is locked because we are looking into another
316        #   package's PackageDef dict, which could be being modified.
317        Package.lock.acquire()
318        deps: Iterable[Package] = [
319            Package.packages[deppkg]["__package"]
320            for deppkg in self.pkg_def.get("depends", [])
321        ]
322        Package.lock.release()
323
324        # Wait until all the depends finish building.  We need them complete
325        # for the "COPY" commands.
326        for deppkg in deps:
327            deppkg.join()
328
329        # Generate this package's Dockerfile.
330        dockerfile = f"""
331FROM {docker_base_img_name}
332{self._df_copycmds()}
333{self._df_build()}
334"""
335
336        # Generate the resulting tag name and save it to the PackageDef.
337        #   This section is locked because we are modifying the PackageDef,
338        #   which can be accessed by other threads.
339        Package.lock.acquire()
340        tag = Docker.tagname(self._stagename(), dockerfile)
341        self.pkg_def["__tag"] = tag
342        Package.lock.release()
343
344        # Do the build / save any exceptions.
345        try:
346            Docker.build(self.package, tag, dockerfile)
347        except Exception as e:
348            self.exception = e
349
350    @classmethod
351    def generate_all(cls) -> None:
352        """Ensure a Docker stage is created for all defined packages.
353
354        These are done in parallel but with appropriate blocking per
355        package 'depends' specifications.
356        """
357
358        # Create a Package for each defined package.
359        pkg_threads = [Package(p) for p in cls.packages.keys()]
360
361        # Start building them all.
362        #   This section is locked because threads depend on each other,
363        #   based on the packages, and they cannot 'join' on a thread
364        #   which is not yet started.  Adding a lock here allows all the
365        #   threads to start before they 'join' their dependencies.
366        Package.lock.acquire()
367        for t in pkg_threads:
368            t.start()
369        Package.lock.release()
370
371        # Wait for completion.
372        for t in pkg_threads:
373            t.join()
374            # Check if the thread saved off its own exception.
375            if t.exception:
376                print(f"Package {t.package} failed!", file=sys.stderr)
377                raise t.exception
378
379    @staticmethod
380    def df_all_copycmds() -> str:
381        """Formulate the Dockerfile snippet necessary to copy all packages
382        into the final image.
383        """
384        return Package.df_copycmds_set(Package.packages.keys())
385
386    @classmethod
387    def depcache(cls) -> str:
388        """Create the contents of the '/tmp/depcache'.
389        This file is a comma-separated list of "<pkg>:<rev>".
390        """
391
392        # This needs to be sorted for consistency.
393        depcache = ""
394        for pkg in sorted(cls.packages.keys()):
395            depcache += "%s:%s," % (pkg, cls.packages[pkg]["rev"])
396        return depcache
397
398    def _update_rev(self) -> None:
399        """Look up the HEAD for missing a static rev."""
400
401        if "rev" in self.pkg_def:
402            return
403
404        # Check if Jenkins/Gerrit gave us a revision and use it.
405        if gerrit_project == self.package and gerrit_rev:
406            print(
407                f"Found Gerrit revision for {self.package}: {gerrit_rev}",
408                file=sys.stderr,
409            )
410            self.pkg_def["rev"] = gerrit_rev
411            return
412
413        # Ask Github for all the branches.
414        lookup = git(
415            "ls-remote", "--heads", f"https://github.com/{self.package}"
416        )
417
418        # Find the branch matching {branch} (or fallback to master).
419        #   This section is locked because we are modifying the PackageDef.
420        Package.lock.acquire()
421        for line in lookup.split("\n"):
422            if f"refs/heads/{branch}" in line:
423                self.pkg_def["rev"] = line.split()[0]
424            elif (
425                "refs/heads/master" in line or "refs/heads/main" in line
426            ) and "rev" not in self.pkg_def:
427                self.pkg_def["rev"] = line.split()[0]
428        Package.lock.release()
429
430    def _stagename(self) -> str:
431        """Create a name for the Docker stage associated with this pkg."""
432        return self.package.replace("/", "-").lower()
433
434    def _url(self) -> str:
435        """Get the URL for this package."""
436        rev = self.pkg_def["rev"]
437
438        # If the lambda exists, call it.
439        if "url" in self.pkg_def:
440            return self.pkg_def["url"](self.package, rev)
441
442        # Default to the github archive URL.
443        return f"https://github.com/{self.package}/archive/{rev}.tar.gz"
444
445    def _cmd_download(self) -> str:
446        """Formulate the command necessary to download and unpack to source."""
447
448        url = self._url()
449        if ".tar." not in url:
450            raise NotImplementedError(
451                f"Unhandled download type for {self.package}: {url}"
452            )
453
454        cmd = f"curl -L {url} | tar -x"
455
456        if url.endswith(".bz2"):
457            cmd += "j"
458        elif url.endswith(".gz"):
459            cmd += "z"
460        else:
461            raise NotImplementedError(
462                f"Unknown tar flags needed for {self.package}: {url}"
463            )
464
465        return cmd
466
467    def _cmd_cd_srcdir(self) -> str:
468        """Formulate the command necessary to 'cd' into the source dir."""
469        return f"cd {self.package.split('/')[-1]}*"
470
471    def _df_copycmds(self) -> str:
472        """Formulate the dockerfile snippet necessary to COPY all depends."""
473
474        if "depends" not in self.pkg_def:
475            return ""
476        return Package.df_copycmds_set(self.pkg_def["depends"])
477
478    @staticmethod
479    def df_copycmds_set(pkgs: Iterable[str]) -> str:
480        """Formulate the Dockerfile snippet necessary to COPY a set of
481        packages into a Docker stage.
482        """
483
484        copy_cmds = ""
485
486        # Sort the packages for consistency.
487        for p in sorted(pkgs):
488            tag = Package.packages[p]["__tag"]
489            copy_cmds += f"COPY --from={tag} {prefix} {prefix}\n"
490            # Workaround for upstream docker bug and multiple COPY cmds
491            # https://github.com/moby/moby/issues/37965
492            copy_cmds += "RUN true\n"
493
494        return copy_cmds
495
496    def _df_build(self) -> str:
497        """Formulate the Dockerfile snippet necessary to download, build, and
498        install a package into a Docker stage.
499        """
500
501        # Download and extract source.
502        result = f"RUN {self._cmd_download()} && {self._cmd_cd_srcdir()} && "
503
504        # Handle 'custom_post_dl' commands.
505        custom_post_dl = self.pkg_def.get("custom_post_dl")
506        if custom_post_dl:
507            result += " && ".join(custom_post_dl) + " && "
508
509        # Build and install package based on 'build_type'.
510        build_type = self.pkg_def["build_type"]
511        if build_type == "autoconf":
512            result += self._cmd_build_autoconf()
513        elif build_type == "cmake":
514            result += self._cmd_build_cmake()
515        elif build_type == "custom":
516            result += self._cmd_build_custom()
517        elif build_type == "make":
518            result += self._cmd_build_make()
519        elif build_type == "meson":
520            result += self._cmd_build_meson()
521        else:
522            raise NotImplementedError(
523                f"Unhandled build type for {self.package}: {build_type}"
524            )
525
526        # Handle 'custom_post_install' commands.
527        custom_post_install = self.pkg_def.get("custom_post_install")
528        if custom_post_install:
529            result += " && " + " && ".join(custom_post_install)
530
531        return result
532
533    def _cmd_build_autoconf(self) -> str:
534        options = " ".join(self.pkg_def.get("config_flags", []))
535        env = " ".join(self.pkg_def.get("config_env", []))
536        result = "./bootstrap.sh && "
537        result += f"{env} ./configure {configure_flags} {options} && "
538        result += f"make -j{proc_count} && make install"
539        return result
540
541    def _cmd_build_cmake(self) -> str:
542        options = " ".join(self.pkg_def.get("config_flags", []))
543        env = " ".join(self.pkg_def.get("config_env", []))
544        result = "mkdir builddir && cd builddir && "
545        result += f"{env} cmake {cmake_flags} {options} .. && "
546        result += "cmake --build . --target all && "
547        result += "cmake --build . --target install && "
548        result += "cd .."
549        return result
550
551    def _cmd_build_custom(self) -> str:
552        return " && ".join(self.pkg_def.get("build_steps", []))
553
554    def _cmd_build_make(self) -> str:
555        return f"make -j{proc_count} && make install"
556
557    def _cmd_build_meson(self) -> str:
558        options = " ".join(self.pkg_def.get("config_flags", []))
559        env = " ".join(self.pkg_def.get("config_env", []))
560        result = f"{env} meson builddir {meson_flags} {options} && "
561        result += "ninja -C builddir && ninja -C builddir install"
562        return result
563
564
565class Docker:
566    """Class to assist with Docker interactions.  All methods are static."""
567
568    @staticmethod
569    def timestamp() -> str:
570        """Generate a timestamp for today using the ISO week."""
571        today = date.today().isocalendar()
572        return f"{today[0]}-W{today[1]:02}"
573
574    @staticmethod
575    def tagname(pkgname: Optional[str], dockerfile: str) -> str:
576        """Generate a tag name for a package using a hash of the Dockerfile."""
577        result = docker_image_name
578        if pkgname:
579            result += "-" + pkgname
580
581        result += ":" + Docker.timestamp()
582        result += "-" + sha256(dockerfile.encode()).hexdigest()[0:16]
583
584        return result
585
586    @staticmethod
587    def build(pkg: str, tag: str, dockerfile: str) -> None:
588        """Build a docker image using the Dockerfile and tagging it with 'tag'."""
589
590        # If we're not forcing builds, check if it already exists and skip.
591        if not force_build:
592            if docker.image.ls(tag, "--format", '"{{.Repository}}:{{.Tag}}"'):
593                print(
594                    f"Image {tag} already exists.  Skipping.", file=sys.stderr
595                )
596                return
597
598        # Build it.
599        #   Capture the output of the 'docker build' command and send it to
600        #   stderr (prefixed with the package name).  This allows us to see
601        #   progress but not polute stdout.  Later on we output the final
602        #   docker tag to stdout and we want to keep that pristine.
603        #
604        #   Other unusual flags:
605        #       --no-cache: Bypass the Docker cache if 'force_build'.
606        #       --force-rm: Clean up Docker processes if they fail.
607        docker.build(
608            proxy_args,
609            "--network=host",
610            "--force-rm",
611            "--no-cache=true" if force_build else "--no-cache=false",
612            "-t",
613            tag,
614            "-",
615            _in=dockerfile,
616            _out=(
617                lambda line: print(
618                    pkg + ":", line, end="", file=sys.stderr, flush=True
619                )
620            ),
621        )
622
623
624# Read a bunch of environment variables.
625docker_image_name = os.environ.get(
626    "DOCKER_IMAGE_NAME", "openbmc/ubuntu-unit-test"
627)
628force_build = os.environ.get("FORCE_DOCKER_BUILD")
629is_automated_ci_build = os.environ.get("BUILD_URL", False)
630distro = os.environ.get("DISTRO", "ubuntu:kinetic")
631branch = os.environ.get("BRANCH", "master")
632ubuntu_mirror = os.environ.get("UBUNTU_MIRROR")
633http_proxy = os.environ.get("http_proxy")
634
635gerrit_project = os.environ.get("GERRIT_PROJECT")
636gerrit_rev = os.environ.get("GERRIT_PATCHSET_REVISION")
637
638# Ensure appropriate docker build output to see progress and identify
639# any issues
640os.environ["BUILDKIT_PROGRESS"] = "plain"
641
642# Set up some common variables.
643username = os.environ.get("USER", "root")
644homedir = os.environ.get("HOME", "/root")
645gid = os.getgid()
646uid = os.getuid()
647
648# Use well-known constants if user is root
649if username == "root":
650    homedir = "/root"
651    gid = 0
652    uid = 0
653
654# Determine the architecture for Docker.
655arch = uname("-m").strip()
656if arch == "ppc64le":
657    docker_base = "ppc64le/"
658elif arch == "x86_64":
659    docker_base = ""
660elif arch == "aarch64":
661    docker_base = "arm64v8/"
662else:
663    print(
664        f"Unsupported system architecture({arch}) found for docker image",
665        file=sys.stderr,
666    )
667    sys.exit(1)
668
669# Special flags if setting up a deb mirror.
670mirror = ""
671if "ubuntu" in distro and ubuntu_mirror:
672    mirror = f"""
673RUN echo "deb {ubuntu_mirror} \
674        $(. /etc/os-release && echo $VERSION_CODENAME) \
675        main restricted universe multiverse" > /etc/apt/sources.list && \\
676    echo "deb {ubuntu_mirror} \
677        $(. /etc/os-release && echo $VERSION_CODENAME)-updates \
678            main restricted universe multiverse" >> /etc/apt/sources.list && \\
679    echo "deb {ubuntu_mirror} \
680        $(. /etc/os-release && echo $VERSION_CODENAME)-security \
681            main restricted universe multiverse" >> /etc/apt/sources.list && \\
682    echo "deb {ubuntu_mirror} \
683        $(. /etc/os-release && echo $VERSION_CODENAME)-proposed \
684            main restricted universe multiverse" >> /etc/apt/sources.list && \\
685    echo "deb {ubuntu_mirror} \
686        $(. /etc/os-release && echo $VERSION_CODENAME)-backports \
687            main restricted universe multiverse" >> /etc/apt/sources.list
688"""
689
690# Special flags for proxying.
691proxy_cmd = ""
692proxy_keyserver = ""
693proxy_args = []
694if http_proxy:
695    proxy_cmd = f"""
696RUN echo "[http]" >> {homedir}/.gitconfig && \
697    echo "proxy = {http_proxy}" >> {homedir}/.gitconfig
698"""
699    proxy_keyserver = f"--keyserver-options http-proxy={http_proxy}"
700
701    proxy_args.extend(
702        [
703            "--build-arg",
704            f"http_proxy={http_proxy}",
705            "--build-arg",
706            f"https_proxy={http_proxy}",
707        ]
708    )
709
710# Create base Dockerfile.
711dockerfile_base = f"""
712FROM {docker_base}{distro}
713
714{mirror}
715
716ENV DEBIAN_FRONTEND noninteractive
717
718ENV PYTHONPATH "/usr/local/lib/python3.10/site-packages/"
719
720# Sometimes the ubuntu key expires and we need a way to force an execution
721# of the apt-get commands for the dbgsym-keyring.  When this happens we see
722# an error like: "Release: The following signatures were invalid:"
723# Insert a bogus echo that we can change here when we get this error to force
724# the update.
725RUN echo "ubuntu keyserver rev as of 2021-04-21"
726
727# We need the keys to be imported for dbgsym repos
728# New releases have a package, older ones fall back to manual fetching
729# https://wiki.ubuntu.com/Debug%20Symbol%20Packages
730RUN apt-get update && apt-get dist-upgrade -yy && \
731    ( apt-get install gpgv ubuntu-dbgsym-keyring || \
732        ( apt-get install -yy dirmngr && \
733          apt-key adv --keyserver keyserver.ubuntu.com \
734                      {proxy_keyserver} \
735                      --recv-keys F2EDC64DC5AEE1F6B9C621F0C8CAB6595FDFF622 ) )
736
737# Parse the current repo list into a debug repo list
738RUN sed -n '/^deb /s,^deb [^ ]* ,deb http://ddebs.ubuntu.com ,p' \
739        /etc/apt/sources.list >/etc/apt/sources.list.d/debug.list
740
741# Remove non-existent debug repos
742RUN sed -i '/-\\(backports\\|security\\) /d' /etc/apt/sources.list.d/debug.list
743
744RUN cat /etc/apt/sources.list.d/debug.list
745
746RUN apt-get update && apt-get dist-upgrade -yy && apt-get install -yy \
747    autoconf \
748    autoconf-archive \
749    bison \
750    clang-15 \
751    clang-format-15 \
752    clang-tidy-15 \
753    clang-tools-15 \
754    cmake \
755    curl \
756    dbus \
757    device-tree-compiler \
758    flex \
759    g++-12 \
760    gcc-12 \
761    git \
762    iproute2 \
763    iputils-ping \
764    libaudit-dev \
765    libc6-dbg \
766    libc6-dev \
767    libconfig++-dev \
768    libcryptsetup-dev \
769    libdbus-1-dev \
770    libevdev-dev \
771    libgpiod-dev \
772    libi2c-dev \
773    libjpeg-dev \
774    libjson-perl \
775    libldap2-dev \
776    libmimetic-dev \
777    libnl-3-dev \
778    libnl-genl-3-dev \
779    libpam0g-dev \
780    libpciaccess-dev \
781    libperlio-gzip-perl \
782    libpng-dev \
783    libprotobuf-dev \
784    libsnmp-dev \
785    libssl-dev \
786    libsystemd-dev \
787    libtool \
788    liburing2-dbgsym \
789    liburing-dev \
790    libxml2-utils \
791    libxml-simple-perl \
792    ninja-build \
793    npm \
794    pkg-config \
795    protobuf-compiler \
796    python3 \
797    python3-dev\
798    python3-git \
799    python3-mako \
800    python3-pip \
801    python3-setuptools \
802    python3-socks \
803    python3-yaml \
804    rsync \
805    shellcheck \
806    sudo \
807    systemd \
808    valgrind \
809    valgrind-dbg \
810    vim \
811    wget \
812    xxd
813
814# Kinetic comes with GCC-12, so skip this.
815#RUN update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-12 12 \
816#  --slave /usr/bin/g++ g++ /usr/bin/g++-12 \
817#  --slave /usr/bin/gcov gcov /usr/bin/gcov-12 \
818#  --slave /usr/bin/gcov-dump gcov-dump /usr/bin/gcov-dump-12 \
819#  --slave /usr/bin/gcov-tool gcov-tool /usr/bin/gcov-tool-12
820#RUN update-alternatives --install /usr/bin/cpp cpp /usr/bin/cpp-12 12
821
822RUN update-alternatives --install /usr/bin/clang clang /usr/bin/clang-15 1000 \
823  --slave /usr/bin/clang++ clang++ /usr/bin/clang++-15 \
824  --slave /usr/bin/clang-tidy clang-tidy /usr/bin/clang-tidy-15 \
825  --slave /usr/bin/clang-format clang-format /usr/bin/clang-format-15 \
826  --slave /usr/bin/run-clang-tidy run-clang-tidy.py \
827        /usr/bin/run-clang-tidy-15 \
828  --slave /usr/bin/scan-build scan-build /usr/bin/scan-build-15
829
830"""
831
832if is_automated_ci_build:
833    dockerfile_base += f"""
834# Run an arbitrary command to polute the docker cache regularly force us
835# to re-run `apt-get update` daily.
836RUN echo {Docker.timestamp()}
837RUN apt-get update && apt-get dist-upgrade -yy
838
839"""
840
841dockerfile_base += """
842RUN pip3 install beautysh
843RUN pip3 install black
844RUN pip3 install codespell
845RUN pip3 install flake8
846RUN pip3 install gitlint
847RUN pip3 install inflection
848RUN pip3 install isort
849RUN pip3 install jsonschema
850RUN pip3 install meson==0.63.0
851RUN pip3 install protobuf
852RUN pip3 install requests
853
854RUN npm install -g \
855        eslint@latest eslint-plugin-json@latest \
856        markdownlint-cli@latest \
857        prettier@latest
858"""
859
860# Build the base and stage docker images.
861docker_base_img_name = Docker.tagname("base", dockerfile_base)
862Docker.build("base", docker_base_img_name, dockerfile_base)
863Package.generate_all()
864
865# Create the final Dockerfile.
866dockerfile = f"""
867# Build the final output image
868FROM {docker_base_img_name}
869{Package.df_all_copycmds()}
870
871# Some of our infrastructure still relies on the presence of this file
872# even though it is no longer needed to rebuild the docker environment
873# NOTE: The file is sorted to ensure the ordering is stable.
874RUN echo '{Package.depcache()}' > /tmp/depcache
875
876# Final configuration for the workspace
877RUN grep -q {gid} /etc/group || groupadd -f -g {gid} {username}
878RUN mkdir -p "{os.path.dirname(homedir)}"
879RUN grep -q {uid} /etc/passwd || \
880        useradd -d {homedir} -m -u {uid} -g {gid} {username}
881RUN sed -i '1iDefaults umask=000' /etc/sudoers
882RUN echo "{username} ALL=(ALL) NOPASSWD: ALL" >>/etc/sudoers
883
884# Ensure user has ability to write to /usr/local for different tool
885# and data installs
886RUN chown -R {username}:{username} /usr/local/share
887
888{proxy_cmd}
889
890RUN /bin/bash
891"""
892
893# Do the final docker build
894docker_final_img_name = Docker.tagname(None, dockerfile)
895Docker.build("final", docker_final_img_name, dockerfile)
896
897# Print the tag of the final image.
898print(docker_final_img_name)
899