1#!/usr/bin/env python3
2#
3# Build the required docker image to run package unit tests
4#
5# Script Variables:
6#   DOCKER_IMG_NAME:  <optional, the name of the docker image to generate>
7#                     default is openbmc/ubuntu-unit-test
8#   DISTRO:           <optional, the distro to build a docker image against>
9#   FORCE_DOCKER_BUILD: <optional, a non-zero value with force all Docker
10#                     images to be rebuilt rather than reusing caches.>
11#   BUILD_URL:        <optional, used to detect running under CI context
12#                     (ex. Jenkins)>
13#   BRANCH:           <optional, branch to build from each of the openbmc/
14#                     repositories>
15#                     default is master, which will be used if input branch not
16#                     provided or not found
17#   UBUNTU_MIRROR:    <optional, the URL of a mirror of Ubuntu to override the
18#                     default ones in /etc/apt/sources.list>
19#                     default is empty, and no mirror is used.
20#   http_proxy        The HTTP address of the proxy server to connect to.
21#                     Default: "", proxy is not setup if this is not set
22
23import os
24import sys
25import threading
26from datetime import date
27from hashlib import sha256
28
29# typing.Dict is used for type-hints.
30from typing import Any, Callable, Dict, Iterable, Optional  # noqa: F401
31
32from sh import docker, git, nproc, uname  # type: ignore
33
34try:
35    # Python before 3.8 doesn't have TypedDict, so reroute to standard 'dict'.
36    from typing import TypedDict
37except Exception:
38
39    class TypedDict(dict):  # type: ignore
40        # We need to do this to eat the 'total' argument.
41        def __init_subclass__(cls, **kwargs: Any) -> None:
42            super().__init_subclass__()
43
44
45# Declare some variables used in package definitions.
46prefix = "/usr/local"
47proc_count = nproc().strip()
48
49
50class PackageDef(TypedDict, total=False):
51    """Package Definition for packages dictionary."""
52
53    # rev [optional]: Revision of package to use.
54    rev: str
55    # url [optional]: lambda function to create URL: (package, rev) -> url.
56    url: Callable[[str, str], str]
57    # depends [optional]: List of package dependencies.
58    depends: Iterable[str]
59    # build_type [required]: Build type used for package.
60    #   Currently supported: autoconf, cmake, custom, make, meson
61    build_type: str
62    # build_steps [optional]: Steps to run for 'custom' build_type.
63    build_steps: Iterable[str]
64    # config_flags [optional]: List of options to pass configuration tool.
65    config_flags: Iterable[str]
66    # config_env [optional]: List of environment variables to set for config.
67    config_env: Iterable[str]
68    # custom_post_dl [optional]: List of steps to run after download, but
69    #   before config / build / install.
70    custom_post_dl: Iterable[str]
71    # custom_post_install [optional]: List of steps to run after install.
72    custom_post_install: Iterable[str]
73
74    # __tag [private]: Generated Docker tag name for package stage.
75    __tag: str
76    # __package [private]: Package object associated with this package.
77    __package: Any  # Type is Package, but not defined yet.
78
79
80# Packages to include in image.
81packages = {
82    "boost": PackageDef(
83        rev="1.81.0",
84        url=(
85            lambda pkg, rev: f"https://boostorg.jfrog.io/artifactory/main/release/{rev}/source/{pkg}_{rev.replace('.', '_')}.tar.gz"  # noqa: E501
86        ),
87        build_type="custom",
88        build_steps=[
89            (
90                "./bootstrap.sh"
91                f" --prefix={prefix} --with-libraries=context,coroutine"
92            ),
93            "./b2",
94            f"./b2 install --prefix={prefix}",
95        ],
96    ),
97    "USCiLab/cereal": PackageDef(
98        rev="v1.3.2",
99        build_type="custom",
100        build_steps=[f"cp -a include/cereal/ {prefix}/include/"],
101    ),
102    "danmar/cppcheck": PackageDef(
103        rev="2.9",
104        build_type="cmake",
105    ),
106    "CLIUtils/CLI11": PackageDef(
107        rev="v1.9.1",
108        build_type="cmake",
109        config_flags=[
110            "-DBUILD_TESTING=OFF",
111            "-DCLI11_BUILD_DOCS=OFF",
112            "-DCLI11_BUILD_EXAMPLES=OFF",
113        ],
114    ),
115    "fmtlib/fmt": PackageDef(
116        rev="9.1.0",
117        build_type="cmake",
118        config_flags=[
119            "-DFMT_DOC=OFF",
120            "-DFMT_TEST=OFF",
121        ],
122    ),
123    "Naios/function2": PackageDef(
124        rev="4.2.1",
125        build_type="custom",
126        build_steps=[
127            f"mkdir {prefix}/include/function2",
128            f"cp include/function2/function2.hpp {prefix}/include/function2/",
129        ],
130    ),
131    # release-1.12.1
132    "google/googletest": PackageDef(
133        rev="58d77fa8070e8cec2dc1ed015d66b454c8d78850",
134        build_type="cmake",
135        config_env=["CXXFLAGS=-std=c++20"],
136        config_flags=["-DTHREADS_PREFER_PTHREAD_FLAG=ON"],
137    ),
138    "nlohmann/json": PackageDef(
139        rev="v3.11.2",
140        build_type="cmake",
141        config_flags=["-DJSON_BuildTests=OFF"],
142        custom_post_install=[
143            (
144                f"ln -s {prefix}/include/nlohmann/json.hpp"
145                f" {prefix}/include/json.hpp"
146            ),
147        ],
148    ),
149    "json-c/json-c": PackageDef(
150        rev="json-c-0.16-20220414",
151        build_type="cmake",
152    ),
153    # Snapshot from 2019-05-24
154    "linux-test-project/lcov": PackageDef(
155        rev="v1.15",
156        build_type="make",
157    ),
158    # dev-6.0 2022-11-28
159    "openbmc/linux": PackageDef(
160        rev="1b16243b004ce4d977a9f3b9d9e715cf5028f867",
161        build_type="custom",
162        build_steps=[
163            f"make -j{proc_count} defconfig",
164            f"make INSTALL_HDR_PATH={prefix} headers_install",
165        ],
166    ),
167    "LibVNC/libvncserver": PackageDef(
168        rev="LibVNCServer-0.9.13",
169        build_type="cmake",
170    ),
171    "leethomason/tinyxml2": PackageDef(
172        rev="9.0.0",
173        build_type="cmake",
174    ),
175    "tristanpenman/valijson": PackageDef(
176        rev="v0.7",
177        build_type="cmake",
178        config_flags=[
179            "-Dvalijson_BUILD_TESTS=0",
180            "-Dvalijson_INSTALL_HEADERS=1",
181        ],
182    ),
183    "open-power/pdbg": PackageDef(build_type="autoconf"),
184    "openbmc/gpioplus": PackageDef(
185        depends=["openbmc/stdplus"],
186        build_type="meson",
187        config_flags=[
188            "-Dexamples=false",
189            "-Dtests=disabled",
190        ],
191    ),
192    "openbmc/phosphor-dbus-interfaces": PackageDef(
193        depends=["openbmc/sdbusplus"],
194        build_type="meson",
195        config_flags=["-Dgenerate_md=false"],
196    ),
197    "openbmc/phosphor-logging": PackageDef(
198        depends=[
199            "USCiLab/cereal",
200            "openbmc/phosphor-dbus-interfaces",
201            "openbmc/sdbusplus",
202            "openbmc/sdeventplus",
203        ],
204        build_type="meson",
205        config_flags=[
206            "-Dlibonly=true",
207            "-Dtests=disabled",
208            f"-Dyamldir={prefix}/share/phosphor-dbus-yaml/yaml",
209        ],
210    ),
211    "openbmc/phosphor-objmgr": PackageDef(
212        depends=[
213            "CLIUtils/CLI11",
214            "boost",
215            "leethomason/tinyxml2",
216            "openbmc/phosphor-dbus-interfaces",
217            "openbmc/phosphor-logging",
218            "openbmc/sdbusplus",
219        ],
220        build_type="meson",
221        config_flags=[
222            "-Dtests=disabled",
223        ],
224    ),
225    "openbmc/libpldm": PackageDef(
226        build_type="meson",
227        config_flags=[
228            "-Doem-ibm=enabled",
229            "-Dtests=disabled",
230        ],
231    ),
232    "openbmc/sdbusplus": PackageDef(
233        build_type="meson",
234        custom_post_dl=[
235            "cd tools",
236            f"./setup.py install --root=/ --prefix={prefix}",
237            "cd ..",
238        ],
239        config_flags=[
240            "-Dexamples=disabled",
241            "-Dtests=disabled",
242        ],
243    ),
244    "openbmc/sdeventplus": PackageDef(
245        depends=[
246            "Naios/function2",
247            "openbmc/stdplus",
248        ],
249        build_type="meson",
250        config_flags=[
251            "-Dexamples=false",
252            "-Dtests=disabled",
253        ],
254    ),
255    "openbmc/stdplus": PackageDef(
256        depends=[
257            "fmtlib/fmt",
258            "google/googletest",
259            "Naios/function2",
260        ],
261        build_type="meson",
262        config_flags=[
263            "-Dexamples=false",
264            "-Dtests=disabled",
265            "-Dgtest=enabled",
266        ],
267    ),
268}  # type: Dict[str, PackageDef]
269
270# Define common flags used for builds
271configure_flags = " ".join(
272    [
273        f"--prefix={prefix}",
274    ]
275)
276cmake_flags = " ".join(
277    [
278        "-DBUILD_SHARED_LIBS=ON",
279        "-DCMAKE_BUILD_TYPE=RelWithDebInfo",
280        f"-DCMAKE_INSTALL_PREFIX:PATH={prefix}",
281        "-GNinja",
282        "-DCMAKE_MAKE_PROGRAM=ninja",
283    ]
284)
285meson_flags = " ".join(
286    [
287        "--wrap-mode=nodownload",
288        f"-Dprefix={prefix}",
289    ]
290)
291
292
293class Package(threading.Thread):
294    """Class used to build the Docker stages for each package.
295
296    Generally, this class should not be instantiated directly but through
297    Package.generate_all().
298    """
299
300    # Copy the packages dictionary.
301    packages = packages.copy()
302
303    # Lock used for thread-safety.
304    lock = threading.Lock()
305
306    def __init__(self, pkg: str):
307        """pkg - The name of this package (ex. foo/bar )"""
308        super(Package, self).__init__()
309
310        self.package = pkg
311        self.exception = None  # type: Optional[Exception]
312
313        # Reference to this package's
314        self.pkg_def = Package.packages[pkg]
315        self.pkg_def["__package"] = self
316
317    def run(self) -> None:
318        """Thread 'run' function.  Builds the Docker stage."""
319
320        # In case this package has no rev, fetch it from Github.
321        self._update_rev()
322
323        # Find all the Package objects that this package depends on.
324        #   This section is locked because we are looking into another
325        #   package's PackageDef dict, which could be being modified.
326        Package.lock.acquire()
327        deps: Iterable[Package] = [
328            Package.packages[deppkg]["__package"]
329            for deppkg in self.pkg_def.get("depends", [])
330        ]
331        Package.lock.release()
332
333        # Wait until all the depends finish building.  We need them complete
334        # for the "COPY" commands.
335        for deppkg in deps:
336            deppkg.join()
337
338        # Generate this package's Dockerfile.
339        dockerfile = f"""
340FROM {docker_base_img_name}
341{self._df_copycmds()}
342{self._df_build()}
343"""
344
345        # Generate the resulting tag name and save it to the PackageDef.
346        #   This section is locked because we are modifying the PackageDef,
347        #   which can be accessed by other threads.
348        Package.lock.acquire()
349        tag = Docker.tagname(self._stagename(), dockerfile)
350        self.pkg_def["__tag"] = tag
351        Package.lock.release()
352
353        # Do the build / save any exceptions.
354        try:
355            Docker.build(self.package, tag, dockerfile)
356        except Exception as e:
357            self.exception = e
358
359    @classmethod
360    def generate_all(cls) -> None:
361        """Ensure a Docker stage is created for all defined packages.
362
363        These are done in parallel but with appropriate blocking per
364        package 'depends' specifications.
365        """
366
367        # Create a Package for each defined package.
368        pkg_threads = [Package(p) for p in cls.packages.keys()]
369
370        # Start building them all.
371        #   This section is locked because threads depend on each other,
372        #   based on the packages, and they cannot 'join' on a thread
373        #   which is not yet started.  Adding a lock here allows all the
374        #   threads to start before they 'join' their dependencies.
375        Package.lock.acquire()
376        for t in pkg_threads:
377            t.start()
378        Package.lock.release()
379
380        # Wait for completion.
381        for t in pkg_threads:
382            t.join()
383            # Check if the thread saved off its own exception.
384            if t.exception:
385                print(f"Package {t.package} failed!", file=sys.stderr)
386                raise t.exception
387
388    @staticmethod
389    def df_all_copycmds() -> str:
390        """Formulate the Dockerfile snippet necessary to copy all packages
391        into the final image.
392        """
393        return Package.df_copycmds_set(Package.packages.keys())
394
395    @classmethod
396    def depcache(cls) -> str:
397        """Create the contents of the '/tmp/depcache'.
398        This file is a comma-separated list of "<pkg>:<rev>".
399        """
400
401        # This needs to be sorted for consistency.
402        depcache = ""
403        for pkg in sorted(cls.packages.keys()):
404            depcache += "%s:%s," % (pkg, cls.packages[pkg]["rev"])
405        return depcache
406
407    def _update_rev(self) -> None:
408        """Look up the HEAD for missing a static rev."""
409
410        if "rev" in self.pkg_def:
411            return
412
413        # Check if Jenkins/Gerrit gave us a revision and use it.
414        if gerrit_project == self.package and gerrit_rev:
415            print(
416                f"Found Gerrit revision for {self.package}: {gerrit_rev}",
417                file=sys.stderr,
418            )
419            self.pkg_def["rev"] = gerrit_rev
420            return
421
422        # Ask Github for all the branches.
423        lookup = git(
424            "ls-remote", "--heads", f"https://github.com/{self.package}"
425        )
426
427        # Find the branch matching {branch} (or fallback to master).
428        #   This section is locked because we are modifying the PackageDef.
429        Package.lock.acquire()
430        for line in lookup.split("\n"):
431            if f"refs/heads/{branch}" in line:
432                self.pkg_def["rev"] = line.split()[0]
433            elif (
434                "refs/heads/master" in line or "refs/heads/main" in line
435            ) and "rev" not in self.pkg_def:
436                self.pkg_def["rev"] = line.split()[0]
437        Package.lock.release()
438
439    def _stagename(self) -> str:
440        """Create a name for the Docker stage associated with this pkg."""
441        return self.package.replace("/", "-").lower()
442
443    def _url(self) -> str:
444        """Get the URL for this package."""
445        rev = self.pkg_def["rev"]
446
447        # If the lambda exists, call it.
448        if "url" in self.pkg_def:
449            return self.pkg_def["url"](self.package, rev)
450
451        # Default to the github archive URL.
452        return f"https://github.com/{self.package}/archive/{rev}.tar.gz"
453
454    def _cmd_download(self) -> str:
455        """Formulate the command necessary to download and unpack to source."""
456
457        url = self._url()
458        if ".tar." not in url:
459            raise NotImplementedError(
460                f"Unhandled download type for {self.package}: {url}"
461            )
462
463        cmd = f"curl -L {url} | tar -x"
464
465        if url.endswith(".bz2"):
466            cmd += "j"
467        elif url.endswith(".gz"):
468            cmd += "z"
469        else:
470            raise NotImplementedError(
471                f"Unknown tar flags needed for {self.package}: {url}"
472            )
473
474        return cmd
475
476    def _cmd_cd_srcdir(self) -> str:
477        """Formulate the command necessary to 'cd' into the source dir."""
478        return f"cd {self.package.split('/')[-1]}*"
479
480    def _df_copycmds(self) -> str:
481        """Formulate the dockerfile snippet necessary to COPY all depends."""
482
483        if "depends" not in self.pkg_def:
484            return ""
485        return Package.df_copycmds_set(self.pkg_def["depends"])
486
487    @staticmethod
488    def df_copycmds_set(pkgs: Iterable[str]) -> str:
489        """Formulate the Dockerfile snippet necessary to COPY a set of
490        packages into a Docker stage.
491        """
492
493        copy_cmds = ""
494
495        # Sort the packages for consistency.
496        for p in sorted(pkgs):
497            tag = Package.packages[p]["__tag"]
498            copy_cmds += f"COPY --from={tag} {prefix} {prefix}\n"
499            # Workaround for upstream docker bug and multiple COPY cmds
500            # https://github.com/moby/moby/issues/37965
501            copy_cmds += "RUN true\n"
502
503        return copy_cmds
504
505    def _df_build(self) -> str:
506        """Formulate the Dockerfile snippet necessary to download, build, and
507        install a package into a Docker stage.
508        """
509
510        # Download and extract source.
511        result = f"RUN {self._cmd_download()} && {self._cmd_cd_srcdir()} && "
512
513        # Handle 'custom_post_dl' commands.
514        custom_post_dl = self.pkg_def.get("custom_post_dl")
515        if custom_post_dl:
516            result += " && ".join(custom_post_dl) + " && "
517
518        # Build and install package based on 'build_type'.
519        build_type = self.pkg_def["build_type"]
520        if build_type == "autoconf":
521            result += self._cmd_build_autoconf()
522        elif build_type == "cmake":
523            result += self._cmd_build_cmake()
524        elif build_type == "custom":
525            result += self._cmd_build_custom()
526        elif build_type == "make":
527            result += self._cmd_build_make()
528        elif build_type == "meson":
529            result += self._cmd_build_meson()
530        else:
531            raise NotImplementedError(
532                f"Unhandled build type for {self.package}: {build_type}"
533            )
534
535        # Handle 'custom_post_install' commands.
536        custom_post_install = self.pkg_def.get("custom_post_install")
537        if custom_post_install:
538            result += " && " + " && ".join(custom_post_install)
539
540        return result
541
542    def _cmd_build_autoconf(self) -> str:
543        options = " ".join(self.pkg_def.get("config_flags", []))
544        env = " ".join(self.pkg_def.get("config_env", []))
545        result = "./bootstrap.sh && "
546        result += f"{env} ./configure {configure_flags} {options} && "
547        result += f"make -j{proc_count} && make install"
548        return result
549
550    def _cmd_build_cmake(self) -> str:
551        options = " ".join(self.pkg_def.get("config_flags", []))
552        env = " ".join(self.pkg_def.get("config_env", []))
553        result = "mkdir builddir && cd builddir && "
554        result += f"{env} cmake {cmake_flags} {options} .. && "
555        result += "cmake --build . --target all && "
556        result += "cmake --build . --target install && "
557        result += "cd .."
558        return result
559
560    def _cmd_build_custom(self) -> str:
561        return " && ".join(self.pkg_def.get("build_steps", []))
562
563    def _cmd_build_make(self) -> str:
564        return f"make -j{proc_count} && make install"
565
566    def _cmd_build_meson(self) -> str:
567        options = " ".join(self.pkg_def.get("config_flags", []))
568        env = " ".join(self.pkg_def.get("config_env", []))
569        result = f"{env} meson builddir {meson_flags} {options} && "
570        result += "ninja -C builddir && ninja -C builddir install"
571        return result
572
573
574class Docker:
575    """Class to assist with Docker interactions.  All methods are static."""
576
577    @staticmethod
578    def timestamp() -> str:
579        """Generate a timestamp for today using the ISO week."""
580        today = date.today().isocalendar()
581        return f"{today[0]}-W{today[1]:02}"
582
583    @staticmethod
584    def tagname(pkgname: Optional[str], dockerfile: str) -> str:
585        """Generate a tag name for a package using a hash of the Dockerfile."""
586        result = docker_image_name
587        if pkgname:
588            result += "-" + pkgname
589
590        result += ":" + Docker.timestamp()
591        result += "-" + sha256(dockerfile.encode()).hexdigest()[0:16]
592
593        return result
594
595    @staticmethod
596    def build(pkg: str, tag: str, dockerfile: str) -> None:
597        """Build a docker image using the Dockerfile and tagging it with 'tag'."""
598
599        # If we're not forcing builds, check if it already exists and skip.
600        if not force_build:
601            if docker.image.ls(tag, "--format", '"{{.Repository}}:{{.Tag}}"'):
602                print(
603                    f"Image {tag} already exists.  Skipping.", file=sys.stderr
604                )
605                return
606
607        # Build it.
608        #   Capture the output of the 'docker build' command and send it to
609        #   stderr (prefixed with the package name).  This allows us to see
610        #   progress but not polute stdout.  Later on we output the final
611        #   docker tag to stdout and we want to keep that pristine.
612        #
613        #   Other unusual flags:
614        #       --no-cache: Bypass the Docker cache if 'force_build'.
615        #       --force-rm: Clean up Docker processes if they fail.
616        docker.build(
617            proxy_args,
618            "--network=host",
619            "--force-rm",
620            "--no-cache=true" if force_build else "--no-cache=false",
621            "-t",
622            tag,
623            "-",
624            _in=dockerfile,
625            _out=(
626                lambda line: print(
627                    pkg + ":", line, end="", file=sys.stderr, flush=True
628                )
629            ),
630        )
631
632
633# Read a bunch of environment variables.
634docker_image_name = os.environ.get(
635    "DOCKER_IMAGE_NAME", "openbmc/ubuntu-unit-test"
636)
637force_build = os.environ.get("FORCE_DOCKER_BUILD")
638is_automated_ci_build = os.environ.get("BUILD_URL", False)
639distro = os.environ.get("DISTRO", "ubuntu:kinetic")
640branch = os.environ.get("BRANCH", "master")
641ubuntu_mirror = os.environ.get("UBUNTU_MIRROR")
642http_proxy = os.environ.get("http_proxy")
643
644gerrit_project = os.environ.get("GERRIT_PROJECT")
645gerrit_rev = os.environ.get("GERRIT_PATCHSET_REVISION")
646
647# Set up some common variables.
648username = os.environ.get("USER", "root")
649homedir = os.environ.get("HOME", "/root")
650gid = os.getgid()
651uid = os.getuid()
652
653# Use well-known constants if user is root
654if username == "root":
655    homedir = "/root"
656    gid = 0
657    uid = 0
658
659# Determine the architecture for Docker.
660arch = uname("-m").strip()
661if arch == "ppc64le":
662    docker_base = "ppc64le/"
663elif arch == "x86_64":
664    docker_base = ""
665elif arch == "aarch64":
666    docker_base = "arm64v8/"
667else:
668    print(
669        f"Unsupported system architecture({arch}) found for docker image",
670        file=sys.stderr,
671    )
672    sys.exit(1)
673
674# Special flags if setting up a deb mirror.
675mirror = ""
676if "ubuntu" in distro and ubuntu_mirror:
677    mirror = f"""
678RUN echo "deb {ubuntu_mirror} \
679        $(. /etc/os-release && echo $VERSION_CODENAME) \
680        main restricted universe multiverse" > /etc/apt/sources.list && \\
681    echo "deb {ubuntu_mirror} \
682        $(. /etc/os-release && echo $VERSION_CODENAME)-updates \
683            main restricted universe multiverse" >> /etc/apt/sources.list && \\
684    echo "deb {ubuntu_mirror} \
685        $(. /etc/os-release && echo $VERSION_CODENAME)-security \
686            main restricted universe multiverse" >> /etc/apt/sources.list && \\
687    echo "deb {ubuntu_mirror} \
688        $(. /etc/os-release && echo $VERSION_CODENAME)-proposed \
689            main restricted universe multiverse" >> /etc/apt/sources.list && \\
690    echo "deb {ubuntu_mirror} \
691        $(. /etc/os-release && echo $VERSION_CODENAME)-backports \
692            main restricted universe multiverse" >> /etc/apt/sources.list
693"""
694
695# Special flags for proxying.
696proxy_cmd = ""
697proxy_keyserver = ""
698proxy_args = []
699if http_proxy:
700    proxy_cmd = f"""
701RUN echo "[http]" >> {homedir}/.gitconfig && \
702    echo "proxy = {http_proxy}" >> {homedir}/.gitconfig
703"""
704    proxy_keyserver = f"--keyserver-options http-proxy={http_proxy}"
705
706    proxy_args.extend(
707        [
708            "--build-arg",
709            f"http_proxy={http_proxy}",
710            "--build-arg",
711            f"https_proxy={http_proxy}",
712        ]
713    )
714
715# Create base Dockerfile.
716dockerfile_base = f"""
717FROM {docker_base}{distro}
718
719{mirror}
720
721ENV DEBIAN_FRONTEND noninteractive
722
723ENV PYTHONPATH "/usr/local/lib/python3.10/site-packages/"
724
725# Sometimes the ubuntu key expires and we need a way to force an execution
726# of the apt-get commands for the dbgsym-keyring.  When this happens we see
727# an error like: "Release: The following signatures were invalid:"
728# Insert a bogus echo that we can change here when we get this error to force
729# the update.
730RUN echo "ubuntu keyserver rev as of 2021-04-21"
731
732# We need the keys to be imported for dbgsym repos
733# New releases have a package, older ones fall back to manual fetching
734# https://wiki.ubuntu.com/Debug%20Symbol%20Packages
735RUN apt-get update && apt-get dist-upgrade -yy && \
736    ( apt-get install gpgv ubuntu-dbgsym-keyring || \
737        ( apt-get install -yy dirmngr && \
738          apt-key adv --keyserver keyserver.ubuntu.com \
739                      {proxy_keyserver} \
740                      --recv-keys F2EDC64DC5AEE1F6B9C621F0C8CAB6595FDFF622 ) )
741
742# Parse the current repo list into a debug repo list
743RUN sed -n '/^deb /s,^deb [^ ]* ,deb http://ddebs.ubuntu.com ,p' \
744        /etc/apt/sources.list >/etc/apt/sources.list.d/debug.list
745
746# Remove non-existent debug repos
747RUN sed -i '/-\\(backports\\|security\\) /d' /etc/apt/sources.list.d/debug.list
748
749RUN cat /etc/apt/sources.list.d/debug.list
750
751RUN apt-get update && apt-get dist-upgrade -yy && apt-get install -yy \
752    autoconf \
753    autoconf-archive \
754    bison \
755    clang-15 \
756    clang-format-15 \
757    clang-tidy-15 \
758    clang-tools-15 \
759    cmake \
760    curl \
761    dbus \
762    device-tree-compiler \
763    flex \
764    g++-12 \
765    gcc-12 \
766    git \
767    iproute2 \
768    iputils-ping \
769    libc6-dbg \
770    libc6-dev \
771    libconfig++-dev \
772    libcryptsetup-dev \
773    libdbus-1-dev \
774    libevdev-dev \
775    libgpiod-dev \
776    libi2c-dev \
777    libjpeg-dev \
778    libjson-perl \
779    libldap2-dev \
780    libmimetic-dev \
781    libnl-3-dev \
782    libnl-genl-3-dev \
783    libpam0g-dev \
784    libpciaccess-dev \
785    libperlio-gzip-perl \
786    libpng-dev \
787    libprotobuf-dev \
788    libsnmp-dev \
789    libssl-dev \
790    libsystemd-dev \
791    libtool \
792    liburing2-dbgsym \
793    liburing-dev \
794    libxml2-utils \
795    libxml-simple-perl \
796    ninja-build \
797    npm \
798    pkg-config \
799    protobuf-compiler \
800    python3 \
801    python3-dev\
802    python3-git \
803    python3-mako \
804    python3-pip \
805    python3-setuptools \
806    python3-socks \
807    python3-yaml \
808    rsync \
809    shellcheck \
810    sudo \
811    systemd \
812    valgrind \
813    valgrind-dbg \
814    vim \
815    wget \
816    xxd
817
818# Kinetic comes with GCC-12, so skip this.
819#RUN update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-12 12 \
820#  --slave /usr/bin/g++ g++ /usr/bin/g++-12 \
821#  --slave /usr/bin/gcov gcov /usr/bin/gcov-12 \
822#  --slave /usr/bin/gcov-dump gcov-dump /usr/bin/gcov-dump-12 \
823#  --slave /usr/bin/gcov-tool gcov-tool /usr/bin/gcov-tool-12
824#RUN update-alternatives --install /usr/bin/cpp cpp /usr/bin/cpp-12 12
825
826RUN update-alternatives --install /usr/bin/clang clang /usr/bin/clang-15 1000 \
827  --slave /usr/bin/clang++ clang++ /usr/bin/clang++-15 \
828  --slave /usr/bin/clang-tidy clang-tidy /usr/bin/clang-tidy-15 \
829  --slave /usr/bin/clang-format clang-format /usr/bin/clang-format-15 \
830  --slave /usr/bin/run-clang-tidy run-clang-tidy.py \
831        /usr/bin/run-clang-tidy-15 \
832  --slave /usr/bin/scan-build scan-build /usr/bin/scan-build-15
833
834"""
835
836if is_automated_ci_build:
837    dockerfile_base += f"""
838# Run an arbitrary command to polute the docker cache regularly force us
839# to re-run `apt-get update` daily.
840RUN echo {Docker.timestamp()}
841RUN apt-get update && apt-get dist-upgrade -yy
842
843"""
844
845dockerfile_base += """
846RUN pip3 install beautysh
847RUN pip3 install black
848RUN pip3 install codespell
849RUN pip3 install flake8
850RUN pip3 install gitlint
851RUN pip3 install inflection
852RUN pip3 install isort
853RUN pip3 install jsonschema
854RUN pip3 install meson==0.63.0
855RUN pip3 install protobuf
856RUN pip3 install requests
857
858RUN npm install -g \
859        eslint@latest eslint-plugin-json@latest \
860        markdownlint-cli@latest \
861        prettier@latest
862"""
863
864# Build the base and stage docker images.
865docker_base_img_name = Docker.tagname("base", dockerfile_base)
866Docker.build("base", docker_base_img_name, dockerfile_base)
867Package.generate_all()
868
869# Create the final Dockerfile.
870dockerfile = f"""
871# Build the final output image
872FROM {docker_base_img_name}
873{Package.df_all_copycmds()}
874
875# Some of our infrastructure still relies on the presence of this file
876# even though it is no longer needed to rebuild the docker environment
877# NOTE: The file is sorted to ensure the ordering is stable.
878RUN echo '{Package.depcache()}' > /tmp/depcache
879
880# Final configuration for the workspace
881RUN grep -q {gid} /etc/group || groupadd -f -g {gid} {username}
882RUN mkdir -p "{os.path.dirname(homedir)}"
883RUN grep -q {uid} /etc/passwd || \
884        useradd -d {homedir} -m -u {uid} -g {gid} {username}
885RUN sed -i '1iDefaults umask=000' /etc/sudoers
886RUN echo "{username} ALL=(ALL) NOPASSWD: ALL" >>/etc/sudoers
887
888# Ensure user has ability to write to /usr/local for different tool
889# and data installs
890RUN chown -R {username}:{username} /usr/local/share
891
892{proxy_cmd}
893
894RUN /bin/bash
895"""
896
897# Do the final docker build
898docker_final_img_name = Docker.tagname(None, dockerfile)
899Docker.build("final", docker_final_img_name, dockerfile)
900
901# Print the tag of the final image.
902print(docker_final_img_name)
903