xref: /openbmc/openbmc-build-scripts/scripts/build-unit-test-docker (revision 058e3a34bc2ea50dea63fa364144a382a5ecfd62)
1#!/usr/bin/env python3
2#
3# Build the required docker image to run package unit tests
4#
5# Script Variables:
6#   DOCKER_IMG_NAME:  <optional, the name of the docker image to generate>
7#                     default is openbmc/ubuntu-unit-test
8#   DISTRO:           <optional, the distro to build a docker image against>
9#   FORCE_DOCKER_BUILD: <optional, a non-zero value with force all Docker
10#                     images to be rebuilt rather than reusing caches.>
11#   BUILD_URL:        <optional, used to detect running under CI context
12#                     (ex. Jenkins)>
13#   BRANCH:           <optional, branch to build from each of the openbmc/
14#                     repositories>
15#                     default is master, which will be used if input branch not
16#                     provided or not found
17#   UBUNTU_MIRROR:    <optional, the URL of a mirror of Ubuntu to override the
18#                     default ones in /etc/apt/sources.list>
19#                     default is empty, and no mirror is used.
20#   http_proxy        The HTTP address of the proxy server to connect to.
21#                     Default: "", proxy is not setup if this is not set
22
23import os
24import sys
25import threading
26from datetime import date
27from hashlib import sha256
28
29# typing.Dict is used for type-hints.
30from typing import Any, Callable, Dict, Iterable, Optional  # noqa: F401
31
32from sh import docker, git, nproc, uname  # type: ignore
33
34try:
35    # Python before 3.8 doesn't have TypedDict, so reroute to standard 'dict'.
36    from typing import TypedDict
37except Exception:
38
39    class TypedDict(dict):  # type: ignore
40        # We need to do this to eat the 'total' argument.
41        def __init_subclass__(cls, **kwargs: Any) -> None:
42            super().__init_subclass__()
43
44
45# Declare some variables used in package definitions.
46prefix = "/usr/local"
47proc_count = nproc().strip()
48
49
50class PackageDef(TypedDict, total=False):
51    """Package Definition for packages dictionary."""
52
53    # rev [optional]: Revision of package to use.
54    rev: str
55    # url [optional]: lambda function to create URL: (package, rev) -> url.
56    url: Callable[[str, str], str]
57    # depends [optional]: List of package dependencies.
58    depends: Iterable[str]
59    # build_type [required]: Build type used for package.
60    #   Currently supported: autoconf, cmake, custom, make, meson
61    build_type: str
62    # build_steps [optional]: Steps to run for 'custom' build_type.
63    build_steps: Iterable[str]
64    # config_flags [optional]: List of options to pass configuration tool.
65    config_flags: Iterable[str]
66    # config_env [optional]: List of environment variables to set for config.
67    config_env: Iterable[str]
68    # custom_post_dl [optional]: List of steps to run after download, but
69    #   before config / build / install.
70    custom_post_dl: Iterable[str]
71    # custom_post_install [optional]: List of steps to run after install.
72    custom_post_install: Iterable[str]
73
74    # __tag [private]: Generated Docker tag name for package stage.
75    __tag: str
76    # __package [private]: Package object associated with this package.
77    __package: Any  # Type is Package, but not defined yet.
78
79
80# Packages to include in image.
81packages = {
82    "boost": PackageDef(
83        rev="1.81.0",
84        url=(
85            lambda pkg, rev: f"https://boostorg.jfrog.io/artifactory/main/release/{rev}/source/{pkg}_{rev.replace('.', '_')}.tar.gz"  # noqa: E501
86        ),
87        build_type="custom",
88        build_steps=[
89            (
90                "./bootstrap.sh"
91                f" --prefix={prefix} --with-libraries=context,coroutine"
92            ),
93            "./b2",
94            f"./b2 install --prefix={prefix}",
95        ],
96    ),
97    "USCiLab/cereal": PackageDef(
98        rev="v1.3.2",
99        build_type="custom",
100        build_steps=[f"cp -a include/cereal/ {prefix}/include/"],
101    ),
102    "danmar/cppcheck": PackageDef(
103        rev="2.9",
104        build_type="cmake",
105    ),
106    "CLIUtils/CLI11": PackageDef(
107        rev="v1.9.1",
108        build_type="cmake",
109        config_flags=[
110            "-DBUILD_TESTING=OFF",
111            "-DCLI11_BUILD_DOCS=OFF",
112            "-DCLI11_BUILD_EXAMPLES=OFF",
113        ],
114    ),
115    "fmtlib/fmt": PackageDef(
116        rev="9.1.0",
117        build_type="cmake",
118        config_flags=[
119            "-DFMT_DOC=OFF",
120            "-DFMT_TEST=OFF",
121        ],
122    ),
123    "Naios/function2": PackageDef(
124        rev="4.2.1",
125        build_type="custom",
126        build_steps=[
127            f"mkdir {prefix}/include/function2",
128            f"cp include/function2/function2.hpp {prefix}/include/function2/",
129        ],
130    ),
131    # release-1.12.1
132    "google/googletest": PackageDef(
133        rev="58d77fa8070e8cec2dc1ed015d66b454c8d78850",
134        build_type="cmake",
135        config_env=["CXXFLAGS=-std=c++20"],
136        config_flags=["-DTHREADS_PREFER_PTHREAD_FLAG=ON"],
137    ),
138    "nlohmann/json": PackageDef(
139        rev="v3.11.2",
140        build_type="cmake",
141        config_flags=["-DJSON_BuildTests=OFF"],
142        custom_post_install=[
143            (
144                f"ln -s {prefix}/include/nlohmann/json.hpp"
145                f" {prefix}/include/json.hpp"
146            ),
147        ],
148    ),
149    "json-c/json-c": PackageDef(
150        rev="json-c-0.16-20220414",
151        build_type="cmake",
152    ),
153    # Snapshot from 2019-05-24
154    "linux-test-project/lcov": PackageDef(
155        rev="v1.15",
156        build_type="make",
157    ),
158    # dev-6.0 2022-11-28
159    "openbmc/linux": PackageDef(
160        rev="1b16243b004ce4d977a9f3b9d9e715cf5028f867",
161        build_type="custom",
162        build_steps=[
163            f"make -j{proc_count} defconfig",
164            f"make INSTALL_HDR_PATH={prefix} headers_install",
165        ],
166    ),
167    "LibVNC/libvncserver": PackageDef(
168        rev="LibVNCServer-0.9.13",
169        build_type="cmake",
170    ),
171    "leethomason/tinyxml2": PackageDef(
172        rev="9.0.0",
173        build_type="cmake",
174    ),
175    "tristanpenman/valijson": PackageDef(
176        rev="v0.7",
177        build_type="cmake",
178        config_flags=[
179            "-Dvalijson_BUILD_TESTS=0",
180            "-Dvalijson_INSTALL_HEADERS=1",
181        ],
182    ),
183    "open-power/pdbg": PackageDef(build_type="autoconf"),
184    "openbmc/gpioplus": PackageDef(
185        depends=["openbmc/stdplus"],
186        build_type="meson",
187        config_flags=[
188            "-Dexamples=false",
189            "-Dtests=disabled",
190        ],
191    ),
192    "openbmc/phosphor-dbus-interfaces": PackageDef(
193        depends=["openbmc/sdbusplus"],
194        build_type="meson",
195        config_flags=["-Dgenerate_md=false"],
196    ),
197    "openbmc/phosphor-logging": PackageDef(
198        depends=[
199            "USCiLab/cereal",
200            "openbmc/phosphor-dbus-interfaces",
201            "openbmc/sdbusplus",
202            "openbmc/sdeventplus",
203        ],
204        build_type="meson",
205        config_flags=[
206            "-Dlibonly=true",
207            "-Dtests=disabled",
208            f"-Dyamldir={prefix}/share/phosphor-dbus-yaml/yaml",
209        ],
210    ),
211    "openbmc/phosphor-objmgr": PackageDef(
212        depends=[
213            "CLIUtils/CLI11",
214            "boost",
215            "leethomason/tinyxml2",
216            "openbmc/phosphor-dbus-interfaces",
217            "openbmc/phosphor-logging",
218            "openbmc/sdbusplus",
219        ],
220        build_type="meson",
221        config_flags=[
222            "-Dtests=disabled",
223        ],
224    ),
225    "openbmc/libpldm": PackageDef(
226        build_type="meson",
227        config_flags=[
228            "-Doem-ibm=enabled",
229            "-Dtests=disabled",
230        ],
231    ),
232    "openbmc/sdbusplus": PackageDef(
233        build_type="meson",
234        custom_post_dl=[
235            "cd tools",
236            f"./setup.py install --root=/ --prefix={prefix}",
237            "cd ..",
238        ],
239        config_flags=[
240            "-Dexamples=disabled",
241            "-Dtests=disabled",
242        ],
243    ),
244    "openbmc/sdeventplus": PackageDef(
245        depends=[
246            "Naios/function2",
247            "openbmc/stdplus",
248        ],
249        build_type="meson",
250        config_flags=[
251            "-Dexamples=false",
252            "-Dtests=disabled",
253        ],
254    ),
255    "openbmc/stdplus": PackageDef(
256        depends=[
257            "fmtlib/fmt",
258            "google/googletest",
259            "Naios/function2",
260        ],
261        build_type="meson",
262        config_flags=[
263            "-Dexamples=false",
264            "-Dtests=disabled",
265            "-Dgtest=enabled",
266        ],
267    ),
268}  # type: Dict[str, PackageDef]
269
270# Define common flags used for builds
271configure_flags = " ".join(
272    [
273        f"--prefix={prefix}",
274    ]
275)
276cmake_flags = " ".join(
277    [
278        "-DBUILD_SHARED_LIBS=ON",
279        "-DCMAKE_BUILD_TYPE=RelWithDebInfo",
280        f"-DCMAKE_INSTALL_PREFIX:PATH={prefix}",
281        "-GNinja",
282        "-DCMAKE_MAKE_PROGRAM=ninja",
283    ]
284)
285meson_flags = " ".join(
286    [
287        "--wrap-mode=nodownload",
288        f"-Dprefix={prefix}",
289    ]
290)
291
292
293class Package(threading.Thread):
294    """Class used to build the Docker stages for each package.
295
296    Generally, this class should not be instantiated directly but through
297    Package.generate_all().
298    """
299
300    # Copy the packages dictionary.
301    packages = packages.copy()
302
303    # Lock used for thread-safety.
304    lock = threading.Lock()
305
306    def __init__(self, pkg: str):
307        """pkg - The name of this package (ex. foo/bar )"""
308        super(Package, self).__init__()
309
310        self.package = pkg
311        self.exception = None  # type: Optional[Exception]
312
313        # Reference to this package's
314        self.pkg_def = Package.packages[pkg]
315        self.pkg_def["__package"] = self
316
317    def run(self) -> None:
318        """Thread 'run' function.  Builds the Docker stage."""
319
320        # In case this package has no rev, fetch it from Github.
321        self._update_rev()
322
323        # Find all the Package objects that this package depends on.
324        #   This section is locked because we are looking into another
325        #   package's PackageDef dict, which could be being modified.
326        Package.lock.acquire()
327        deps: Iterable[Package] = [
328            Package.packages[deppkg]["__package"]
329            for deppkg in self.pkg_def.get("depends", [])
330        ]
331        Package.lock.release()
332
333        # Wait until all the depends finish building.  We need them complete
334        # for the "COPY" commands.
335        for deppkg in deps:
336            deppkg.join()
337
338        # Generate this package's Dockerfile.
339        dockerfile = f"""
340FROM {docker_base_img_name}
341{self._df_copycmds()}
342{self._df_build()}
343"""
344
345        # Generate the resulting tag name and save it to the PackageDef.
346        #   This section is locked because we are modifying the PackageDef,
347        #   which can be accessed by other threads.
348        Package.lock.acquire()
349        tag = Docker.tagname(self._stagename(), dockerfile)
350        self.pkg_def["__tag"] = tag
351        Package.lock.release()
352
353        # Do the build / save any exceptions.
354        try:
355            Docker.build(self.package, tag, dockerfile)
356        except Exception as e:
357            self.exception = e
358
359    @classmethod
360    def generate_all(cls) -> None:
361        """Ensure a Docker stage is created for all defined packages.
362
363        These are done in parallel but with appropriate blocking per
364        package 'depends' specifications.
365        """
366
367        # Create a Package for each defined package.
368        pkg_threads = [Package(p) for p in cls.packages.keys()]
369
370        # Start building them all.
371        #   This section is locked because threads depend on each other,
372        #   based on the packages, and they cannot 'join' on a thread
373        #   which is not yet started.  Adding a lock here allows all the
374        #   threads to start before they 'join' their dependencies.
375        Package.lock.acquire()
376        for t in pkg_threads:
377            t.start()
378        Package.lock.release()
379
380        # Wait for completion.
381        for t in pkg_threads:
382            t.join()
383            # Check if the thread saved off its own exception.
384            if t.exception:
385                print(f"Package {t.package} failed!", file=sys.stderr)
386                raise t.exception
387
388    @staticmethod
389    def df_all_copycmds() -> str:
390        """Formulate the Dockerfile snippet necessary to copy all packages
391        into the final image.
392        """
393        return Package.df_copycmds_set(Package.packages.keys())
394
395    @classmethod
396    def depcache(cls) -> str:
397        """Create the contents of the '/tmp/depcache'.
398        This file is a comma-separated list of "<pkg>:<rev>".
399        """
400
401        # This needs to be sorted for consistency.
402        depcache = ""
403        for pkg in sorted(cls.packages.keys()):
404            depcache += "%s:%s," % (pkg, cls.packages[pkg]["rev"])
405        return depcache
406
407    def _update_rev(self) -> None:
408        """Look up the HEAD for missing a static rev."""
409
410        if "rev" in self.pkg_def:
411            return
412
413        # Check if Jenkins/Gerrit gave us a revision and use it.
414        if gerrit_project == self.package and gerrit_rev:
415            print(
416                f"Found Gerrit revision for {self.package}: {gerrit_rev}",
417                file=sys.stderr,
418            )
419            self.pkg_def["rev"] = gerrit_rev
420            return
421
422        # Ask Github for all the branches.
423        lookup = git(
424            "ls-remote", "--heads", f"https://github.com/{self.package}"
425        )
426
427        # Find the branch matching {branch} (or fallback to master).
428        #   This section is locked because we are modifying the PackageDef.
429        Package.lock.acquire()
430        for line in lookup.split("\n"):
431            if f"refs/heads/{branch}" in line:
432                self.pkg_def["rev"] = line.split()[0]
433            elif (
434                "refs/heads/master" in line or "refs/heads/main" in line
435            ) and "rev" not in self.pkg_def:
436                self.pkg_def["rev"] = line.split()[0]
437        Package.lock.release()
438
439    def _stagename(self) -> str:
440        """Create a name for the Docker stage associated with this pkg."""
441        return self.package.replace("/", "-").lower()
442
443    def _url(self) -> str:
444        """Get the URL for this package."""
445        rev = self.pkg_def["rev"]
446
447        # If the lambda exists, call it.
448        if "url" in self.pkg_def:
449            return self.pkg_def["url"](self.package, rev)
450
451        # Default to the github archive URL.
452        return f"https://github.com/{self.package}/archive/{rev}.tar.gz"
453
454    def _cmd_download(self) -> str:
455        """Formulate the command necessary to download and unpack to source."""
456
457        url = self._url()
458        if ".tar." not in url:
459            raise NotImplementedError(
460                f"Unhandled download type for {self.package}: {url}"
461            )
462
463        cmd = f"curl -L {url} | tar -x"
464
465        if url.endswith(".bz2"):
466            cmd += "j"
467        elif url.endswith(".gz"):
468            cmd += "z"
469        else:
470            raise NotImplementedError(
471                f"Unknown tar flags needed for {self.package}: {url}"
472            )
473
474        return cmd
475
476    def _cmd_cd_srcdir(self) -> str:
477        """Formulate the command necessary to 'cd' into the source dir."""
478        return f"cd {self.package.split('/')[-1]}*"
479
480    def _df_copycmds(self) -> str:
481        """Formulate the dockerfile snippet necessary to COPY all depends."""
482
483        if "depends" not in self.pkg_def:
484            return ""
485        return Package.df_copycmds_set(self.pkg_def["depends"])
486
487    @staticmethod
488    def df_copycmds_set(pkgs: Iterable[str]) -> str:
489        """Formulate the Dockerfile snippet necessary to COPY a set of
490        packages into a Docker stage.
491        """
492
493        copy_cmds = ""
494
495        # Sort the packages for consistency.
496        for p in sorted(pkgs):
497            tag = Package.packages[p]["__tag"]
498            copy_cmds += f"COPY --from={tag} {prefix} {prefix}\n"
499            # Workaround for upstream docker bug and multiple COPY cmds
500            # https://github.com/moby/moby/issues/37965
501            copy_cmds += "RUN true\n"
502
503        return copy_cmds
504
505    def _df_build(self) -> str:
506        """Formulate the Dockerfile snippet necessary to download, build, and
507        install a package into a Docker stage.
508        """
509
510        # Download and extract source.
511        result = f"RUN {self._cmd_download()} && {self._cmd_cd_srcdir()} && "
512
513        # Handle 'custom_post_dl' commands.
514        custom_post_dl = self.pkg_def.get("custom_post_dl")
515        if custom_post_dl:
516            result += " && ".join(custom_post_dl) + " && "
517
518        # Build and install package based on 'build_type'.
519        build_type = self.pkg_def["build_type"]
520        if build_type == "autoconf":
521            result += self._cmd_build_autoconf()
522        elif build_type == "cmake":
523            result += self._cmd_build_cmake()
524        elif build_type == "custom":
525            result += self._cmd_build_custom()
526        elif build_type == "make":
527            result += self._cmd_build_make()
528        elif build_type == "meson":
529            result += self._cmd_build_meson()
530        else:
531            raise NotImplementedError(
532                f"Unhandled build type for {self.package}: {build_type}"
533            )
534
535        # Handle 'custom_post_install' commands.
536        custom_post_install = self.pkg_def.get("custom_post_install")
537        if custom_post_install:
538            result += " && " + " && ".join(custom_post_install)
539
540        return result
541
542    def _cmd_build_autoconf(self) -> str:
543        options = " ".join(self.pkg_def.get("config_flags", []))
544        env = " ".join(self.pkg_def.get("config_env", []))
545        result = "./bootstrap.sh && "
546        result += f"{env} ./configure {configure_flags} {options} && "
547        result += f"make -j{proc_count} && make install"
548        return result
549
550    def _cmd_build_cmake(self) -> str:
551        options = " ".join(self.pkg_def.get("config_flags", []))
552        env = " ".join(self.pkg_def.get("config_env", []))
553        result = "mkdir builddir && cd builddir && "
554        result += f"{env} cmake {cmake_flags} {options} .. && "
555        result += "cmake --build . --target all && "
556        result += "cmake --build . --target install && "
557        result += "cd .."
558        return result
559
560    def _cmd_build_custom(self) -> str:
561        return " && ".join(self.pkg_def.get("build_steps", []))
562
563    def _cmd_build_make(self) -> str:
564        return f"make -j{proc_count} && make install"
565
566    def _cmd_build_meson(self) -> str:
567        options = " ".join(self.pkg_def.get("config_flags", []))
568        env = " ".join(self.pkg_def.get("config_env", []))
569        result = f"{env} meson builddir {meson_flags} {options} && "
570        result += "ninja -C builddir && ninja -C builddir install"
571        return result
572
573
574class Docker:
575    """Class to assist with Docker interactions.  All methods are static."""
576
577    @staticmethod
578    def timestamp() -> str:
579        """Generate a timestamp for today using the ISO week."""
580        today = date.today().isocalendar()
581        return f"{today[0]}-W{today[1]:02}"
582
583    @staticmethod
584    def tagname(pkgname: Optional[str], dockerfile: str) -> str:
585        """Generate a tag name for a package using a hash of the Dockerfile."""
586        result = docker_image_name
587        if pkgname:
588            result += "-" + pkgname
589
590        result += ":" + Docker.timestamp()
591        result += "-" + sha256(dockerfile.encode()).hexdigest()[0:16]
592
593        return result
594
595    @staticmethod
596    def build(pkg: str, tag: str, dockerfile: str) -> None:
597        """Build a docker image using the Dockerfile and tagging it with 'tag'.
598        """
599
600        # If we're not forcing builds, check if it already exists and skip.
601        if not force_build:
602            if docker.image.ls(tag, "--format", '"{{.Repository}}:{{.Tag}}"'):
603                print(
604                    f"Image {tag} already exists.  Skipping.", file=sys.stderr
605                )
606                return
607
608        # Build it.
609        #   Capture the output of the 'docker build' command and send it to
610        #   stderr (prefixed with the package name).  This allows us to see
611        #   progress but not polute stdout.  Later on we output the final
612        #   docker tag to stdout and we want to keep that pristine.
613        #
614        #   Other unusual flags:
615        #       --no-cache: Bypass the Docker cache if 'force_build'.
616        #       --force-rm: Clean up Docker processes if they fail.
617        docker.build(
618            proxy_args,
619            "--network=host",
620            "--force-rm",
621            "--no-cache=true" if force_build else "--no-cache=false",
622            "-t",
623            tag,
624            "-",
625            _in=dockerfile,
626            _out=(
627                lambda line: print(
628                    pkg + ":", line, end="", file=sys.stderr, flush=True
629                )
630            ),
631        )
632
633
634# Read a bunch of environment variables.
635docker_image_name = os.environ.get(
636    "DOCKER_IMAGE_NAME", "openbmc/ubuntu-unit-test"
637)
638force_build = os.environ.get("FORCE_DOCKER_BUILD")
639is_automated_ci_build = os.environ.get("BUILD_URL", False)
640distro = os.environ.get("DISTRO", "ubuntu:kinetic")
641branch = os.environ.get("BRANCH", "master")
642ubuntu_mirror = os.environ.get("UBUNTU_MIRROR")
643http_proxy = os.environ.get("http_proxy")
644
645gerrit_project = os.environ.get("GERRIT_PROJECT")
646gerrit_rev = os.environ.get("GERRIT_PATCHSET_REVISION")
647
648# Set up some common variables.
649username = os.environ.get("USER", "root")
650homedir = os.environ.get("HOME", "/root")
651gid = os.getgid()
652uid = os.getuid()
653
654# Use well-known constants if user is root
655if username == "root":
656    homedir = "/root"
657    gid = 0
658    uid = 0
659
660# Determine the architecture for Docker.
661arch = uname("-m").strip()
662if arch == "ppc64le":
663    docker_base = "ppc64le/"
664elif arch == "x86_64":
665    docker_base = ""
666elif arch == "aarch64":
667    docker_base = "arm64v8/"
668else:
669    print(
670        f"Unsupported system architecture({arch}) found for docker image",
671        file=sys.stderr,
672    )
673    sys.exit(1)
674
675# Special flags if setting up a deb mirror.
676mirror = ""
677if "ubuntu" in distro and ubuntu_mirror:
678    mirror = f"""
679RUN echo "deb {ubuntu_mirror} \
680        $(. /etc/os-release && echo $VERSION_CODENAME) \
681        main restricted universe multiverse" > /etc/apt/sources.list && \\
682    echo "deb {ubuntu_mirror} \
683        $(. /etc/os-release && echo $VERSION_CODENAME)-updates \
684            main restricted universe multiverse" >> /etc/apt/sources.list && \\
685    echo "deb {ubuntu_mirror} \
686        $(. /etc/os-release && echo $VERSION_CODENAME)-security \
687            main restricted universe multiverse" >> /etc/apt/sources.list && \\
688    echo "deb {ubuntu_mirror} \
689        $(. /etc/os-release && echo $VERSION_CODENAME)-proposed \
690            main restricted universe multiverse" >> /etc/apt/sources.list && \\
691    echo "deb {ubuntu_mirror} \
692        $(. /etc/os-release && echo $VERSION_CODENAME)-backports \
693            main restricted universe multiverse" >> /etc/apt/sources.list
694"""
695
696# Special flags for proxying.
697proxy_cmd = ""
698proxy_keyserver = ""
699proxy_args = []
700if http_proxy:
701    proxy_cmd = f"""
702RUN echo "[http]" >> {homedir}/.gitconfig && \
703    echo "proxy = {http_proxy}" >> {homedir}/.gitconfig
704"""
705    proxy_keyserver = f"--keyserver-options http-proxy={http_proxy}"
706
707    proxy_args.extend(
708        [
709            "--build-arg",
710            f"http_proxy={http_proxy}",
711            "--build-arg",
712            f"https_proxy={http_proxy}",
713        ]
714    )
715
716# Create base Dockerfile.
717dockerfile_base = f"""
718FROM {docker_base}{distro}
719
720{mirror}
721
722ENV DEBIAN_FRONTEND noninteractive
723
724ENV PYTHONPATH "/usr/local/lib/python3.10/site-packages/"
725
726# Sometimes the ubuntu key expires and we need a way to force an execution
727# of the apt-get commands for the dbgsym-keyring.  When this happens we see
728# an error like: "Release: The following signatures were invalid:"
729# Insert a bogus echo that we can change here when we get this error to force
730# the update.
731RUN echo "ubuntu keyserver rev as of 2021-04-21"
732
733# We need the keys to be imported for dbgsym repos
734# New releases have a package, older ones fall back to manual fetching
735# https://wiki.ubuntu.com/Debug%20Symbol%20Packages
736RUN apt-get update && apt-get dist-upgrade -yy && \
737    ( apt-get install gpgv ubuntu-dbgsym-keyring || \
738        ( apt-get install -yy dirmngr && \
739          apt-key adv --keyserver keyserver.ubuntu.com \
740                      {proxy_keyserver} \
741                      --recv-keys F2EDC64DC5AEE1F6B9C621F0C8CAB6595FDFF622 ) )
742
743# Parse the current repo list into a debug repo list
744RUN sed -n '/^deb /s,^deb [^ ]* ,deb http://ddebs.ubuntu.com ,p' \
745        /etc/apt/sources.list >/etc/apt/sources.list.d/debug.list
746
747# Remove non-existent debug repos
748RUN sed -i '/-\\(backports\\|security\\) /d' /etc/apt/sources.list.d/debug.list
749
750RUN cat /etc/apt/sources.list.d/debug.list
751
752RUN apt-get update && apt-get dist-upgrade -yy && apt-get install -yy \
753    autoconf \
754    autoconf-archive \
755    bison \
756    clang-15 \
757    clang-format-15 \
758    clang-tidy-15 \
759    clang-tools-15 \
760    cmake \
761    curl \
762    dbus \
763    device-tree-compiler \
764    flex \
765    g++-12 \
766    gcc-12 \
767    git \
768    iproute2 \
769    iputils-ping \
770    libc6-dbg \
771    libc6-dev \
772    libconfig++-dev \
773    libcryptsetup-dev \
774    libdbus-1-dev \
775    libevdev-dev \
776    libgpiod-dev \
777    libi2c-dev \
778    libjpeg-dev \
779    libjson-perl \
780    libldap2-dev \
781    libmimetic-dev \
782    libnl-3-dev \
783    libnl-genl-3-dev \
784    libpam0g-dev \
785    libpciaccess-dev \
786    libperlio-gzip-perl \
787    libpng-dev \
788    libprotobuf-dev \
789    libsnmp-dev \
790    libssl-dev \
791    libsystemd-dev \
792    libtool \
793    liburing2-dbgsym \
794    liburing-dev \
795    libxml2-utils \
796    libxml-simple-perl \
797    ninja-build \
798    npm \
799    pkg-config \
800    protobuf-compiler \
801    python3 \
802    python3-dev\
803    python3-git \
804    python3-mako \
805    python3-pip \
806    python3-setuptools \
807    python3-socks \
808    python3-yaml \
809    rsync \
810    shellcheck \
811    sudo \
812    systemd \
813    valgrind \
814    valgrind-dbg \
815    vim \
816    wget \
817    xxd
818
819# Kinetic comes with GCC-12, so skip this.
820#RUN update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-12 12 \
821#  --slave /usr/bin/g++ g++ /usr/bin/g++-12 \
822#  --slave /usr/bin/gcov gcov /usr/bin/gcov-12 \
823#  --slave /usr/bin/gcov-dump gcov-dump /usr/bin/gcov-dump-12 \
824#  --slave /usr/bin/gcov-tool gcov-tool /usr/bin/gcov-tool-12
825#RUN update-alternatives --install /usr/bin/cpp cpp /usr/bin/cpp-12 12
826
827RUN update-alternatives --install /usr/bin/clang clang /usr/bin/clang-15 1000 \
828  --slave /usr/bin/clang++ clang++ /usr/bin/clang++-15 \
829  --slave /usr/bin/clang-tidy clang-tidy /usr/bin/clang-tidy-15 \
830  --slave /usr/bin/clang-format clang-format /usr/bin/clang-format-15 \
831  --slave /usr/bin/run-clang-tidy run-clang-tidy.py \
832        /usr/bin/run-clang-tidy-15 \
833  --slave /usr/bin/scan-build scan-build /usr/bin/scan-build-15
834
835"""
836
837if is_automated_ci_build:
838    dockerfile_base += f"""
839# Run an arbitrary command to polute the docker cache regularly force us
840# to re-run `apt-get update` daily.
841RUN echo {Docker.timestamp()}
842RUN apt-get update && apt-get dist-upgrade -yy
843
844"""
845
846dockerfile_base += """
847RUN pip3 install beautysh
848RUN pip3 install black
849RUN pip3 install codespell
850RUN pip3 install flake8
851RUN pip3 install gitlint
852RUN pip3 install inflection
853RUN pip3 install isort
854RUN pip3 install jsonschema
855RUN pip3 install meson==0.63.0
856RUN pip3 install protobuf
857RUN pip3 install requests
858
859RUN npm install -g \
860        eslint@latest eslint-plugin-json@latest \
861        markdownlint-cli@latest \
862        prettier@latest
863"""
864
865# Build the base and stage docker images.
866docker_base_img_name = Docker.tagname("base", dockerfile_base)
867Docker.build("base", docker_base_img_name, dockerfile_base)
868Package.generate_all()
869
870# Create the final Dockerfile.
871dockerfile = f"""
872# Build the final output image
873FROM {docker_base_img_name}
874{Package.df_all_copycmds()}
875
876# Some of our infrastructure still relies on the presence of this file
877# even though it is no longer needed to rebuild the docker environment
878# NOTE: The file is sorted to ensure the ordering is stable.
879RUN echo '{Package.depcache()}' > /tmp/depcache
880
881# Final configuration for the workspace
882RUN grep -q {gid} /etc/group || groupadd -f -g {gid} {username}
883RUN mkdir -p "{os.path.dirname(homedir)}"
884RUN grep -q {uid} /etc/passwd || \
885        useradd -d {homedir} -m -u {uid} -g {gid} {username}
886RUN sed -i '1iDefaults umask=000' /etc/sudoers
887RUN echo "{username} ALL=(ALL) NOPASSWD: ALL" >>/etc/sudoers
888
889# Ensure user has ability to write to /usr/local for different tool
890# and data installs
891RUN chown -R {username}:{username} /usr/local/share
892
893{proxy_cmd}
894
895RUN /bin/bash
896"""
897
898# Do the final docker build
899docker_final_img_name = Docker.tagname(None, dockerfile)
900Docker.build("final", docker_final_img_name, dockerfile)
901
902# Print the tag of the final image.
903print(docker_final_img_name)
904