1#!/usr/bin/env python3
2#
3# Build the required docker image to run package unit tests
4#
5# Script Variables:
6#   DOCKER_IMG_NAME:  <optional, the name of the docker image to generate>
7#                     default is openbmc/ubuntu-unit-test
8#   DISTRO:           <optional, the distro to build a docker image against>
9#   FORCE_DOCKER_BUILD: <optional, a non-zero value with force all Docker
10#                     images to be rebuilt rather than reusing caches.>
11#   BUILD_URL:        <optional, used to detect running under CI context
12#                     (ex. Jenkins)>
13#   BRANCH:           <optional, branch to build from each of the openbmc/
14#                     repositories>
15#                     default is master, which will be used if input branch not
16#                     provided or not found
17#   UBUNTU_MIRROR:    <optional, the URL of a mirror of Ubuntu to override the
18#                     default ones in /etc/apt/sources.list>
19#                     default is empty, and no mirror is used.
20#   http_proxy        The HTTP address of the proxy server to connect to.
21#                     Default: "", proxy is not setup if this is not set
22
23import os
24import sys
25import threading
26from datetime import date
27from hashlib import sha256
28from sh import docker, git, nproc, uname  # type: ignore
29from typing import Any, Callable, Dict, Iterable, Optional
30
31try:
32    # Python before 3.8 doesn't have TypedDict, so reroute to standard 'dict'.
33    from typing import TypedDict
34except:
35
36    class TypedDict(dict):  # type: ignore
37        # We need to do this to eat the 'total' argument.
38        def __init_subclass__(cls, **kwargs):
39            super().__init_subclass__()
40
41
42# Declare some variables used in package definitions.
43prefix = "/usr/local"
44proc_count = nproc().strip()
45
46
47class PackageDef(TypedDict, total=False):
48    """ Package Definition for packages dictionary. """
49
50    # rev [optional]: Revision of package to use.
51    rev: str
52    # url [optional]: lambda function to create URL: (package, rev) -> url.
53    url: Callable[[str, str], str]
54    # depends [optional]: List of package dependencies.
55    depends: Iterable[str]
56    # build_type [required]: Build type used for package.
57    #   Currently supported: autoconf, cmake, custom, make, meson
58    build_type: str
59    # build_steps [optional]: Steps to run for 'custom' build_type.
60    build_steps: Iterable[str]
61    # config_flags [optional]: List of options to pass configuration tool.
62    config_flags: Iterable[str]
63    # config_env [optional]: List of environment variables to set for config.
64    config_env: Iterable[str]
65    # custom_post_dl [optional]: List of steps to run after download, but
66    #   before config / build / install.
67    custom_post_dl: Iterable[str]
68    # custom_post_install [optional]: List of steps to run after install.
69    custom_post_install: Iterable[str]
70
71    # __tag [private]: Generated Docker tag name for package stage.
72    __tag: str
73    # __package [private]: Package object associated with this package.
74    __package: Any  # Type is Package, but not defined yet.
75
76
77# Packages to include in image.
78packages = {
79    "boost": PackageDef(
80        rev="1.80.0",
81        url=(
82            lambda pkg, rev: f"https://downloads.yoctoproject.org/mirror/sources/{pkg}_{rev.replace('.', '_')}.tar.bz2"
83        ),
84        build_type="custom",
85        build_steps=[
86            f"./bootstrap.sh --prefix={prefix} --with-libraries=context,coroutine",
87            "./b2",
88            f"./b2 install --prefix={prefix}",
89        ],
90    ),
91    "USCiLab/cereal": PackageDef(
92        rev="v1.3.2",
93        build_type="custom",
94        build_steps=[f"cp -a include/cereal/ {prefix}/include/"],
95    ),
96    "danmar/cppcheck": PackageDef(
97        rev="2.9",
98        build_type="cmake",
99    ),
100    "CLIUtils/CLI11": PackageDef(
101        rev="v1.9.1",
102        build_type="cmake",
103        config_flags=[
104            "-DBUILD_TESTING=OFF",
105            "-DCLI11_BUILD_DOCS=OFF",
106            "-DCLI11_BUILD_EXAMPLES=OFF",
107        ],
108    ),
109    "fmtlib/fmt": PackageDef(
110        rev="9.1.0",
111        build_type="cmake",
112        config_flags=[
113            "-DFMT_DOC=OFF",
114            "-DFMT_TEST=OFF",
115        ],
116    ),
117    "Naios/function2": PackageDef(
118        rev="4.2.1",
119        build_type="custom",
120        build_steps=[
121            f"mkdir {prefix}/include/function2",
122            f"cp include/function2/function2.hpp {prefix}/include/function2/",
123        ],
124    ),
125    # release-1.12.1
126    "google/googletest": PackageDef(
127        rev="58d77fa8070e8cec2dc1ed015d66b454c8d78850",
128        build_type="cmake",
129        config_env=["CXXFLAGS=-std=c++20"],
130        config_flags=["-DTHREADS_PREFER_PTHREAD_FLAG=ON"],
131    ),
132    "nlohmann/json": PackageDef(
133        rev="v3.11.2",
134        build_type="cmake",
135        config_flags=["-DJSON_BuildTests=OFF"],
136        custom_post_install=[
137            f"ln -s {prefix}/include/nlohmann/json.hpp {prefix}/include/json.hpp",
138        ],
139    ),
140    # Snapshot from 2019-05-24
141    "linux-test-project/lcov": PackageDef(
142        rev="v1.15",
143        build_type="make",
144    ),
145    # dev-5.15 2022-09-27
146    "openbmc/linux": PackageDef(
147        rev="c9fb275212dac5b300311f6f6b1dcc5ed18a3493",
148        build_type="custom",
149        build_steps=[
150            f"make -j{proc_count} defconfig",
151            f"make INSTALL_HDR_PATH={prefix} headers_install",
152        ],
153    ),
154    "LibVNC/libvncserver": PackageDef(
155        rev="LibVNCServer-0.9.13",
156        build_type="cmake",
157    ),
158    "leethomason/tinyxml2": PackageDef(
159        rev="9.0.0",
160        build_type="cmake",
161    ),
162    # version from /meta-openembedded/meta-oe/recipes-devtools/boost-url/boost-url_git.bb
163    "CPPAlliance/url": PackageDef(
164        rev="d740a92d38e3a8f4d5b2153f53b82f1c98e312ab",
165        build_type="custom",
166        build_steps=[f"cp -a include/** {prefix}/include/"],
167    ),
168    "tristanpenman/valijson": PackageDef(
169        rev="v0.7",
170        build_type="cmake",
171        config_flags=[
172            "-Dvalijson_BUILD_TESTS=0",
173            "-Dvalijson_INSTALL_HEADERS=1",
174        ],
175    ),
176    "open-power/pdbg": PackageDef(build_type="autoconf"),
177    "openbmc/gpioplus": PackageDef(
178        depends=["openbmc/stdplus"],
179        build_type="meson",
180        config_flags=[
181            "-Dexamples=false",
182            "-Dtests=disabled",
183        ],
184    ),
185    "openbmc/phosphor-dbus-interfaces": PackageDef(
186        depends=["openbmc/sdbusplus"],
187        build_type="meson",
188        config_flags=["-Dgenerate_md=false"],
189    ),
190    "openbmc/phosphor-logging": PackageDef(
191        depends=[
192            "USCiLab/cereal",
193            "openbmc/phosphor-dbus-interfaces",
194            "openbmc/sdbusplus",
195            "openbmc/sdeventplus",
196        ],
197        build_type="meson",
198        config_flags=[
199            "-Dlibonly=true",
200            "-Dtests=disabled",
201            f"-Dyamldir={prefix}/share/phosphor-dbus-yaml/yaml",
202        ],
203    ),
204    "openbmc/phosphor-objmgr": PackageDef(
205        depends=[
206            "CLIUtils/CLI11",
207            "boost",
208            "leethomason/tinyxml2",
209            "openbmc/phosphor-dbus-interfaces",
210            "openbmc/phosphor-logging",
211            "openbmc/sdbusplus",
212        ],
213        build_type="meson",
214        config_flags=[
215            "-Dtests=disabled",
216        ],
217    ),
218    "openbmc/pldm": PackageDef(
219        depends=[
220            "CLIUtils/CLI11",
221            "boost",
222            "nlohmann/json",
223            "openbmc/phosphor-dbus-interfaces",
224            "openbmc/phosphor-logging",
225            "openbmc/sdbusplus",
226            "openbmc/sdeventplus",
227        ],
228        build_type="meson",
229        config_flags=[
230            "-Dlibpldm-only=enabled",
231            "-Doem-ibm=enabled",
232            "-Dtests=disabled",
233        ],
234    ),
235    "openbmc/sdbusplus": PackageDef(
236        build_type="meson",
237        custom_post_dl=[
238            "cd tools",
239            f"./setup.py install --root=/ --prefix={prefix}",
240            "cd ..",
241        ],
242        config_flags=[
243            "-Dexamples=disabled",
244            "-Dtests=disabled",
245        ],
246    ),
247    "openbmc/sdeventplus": PackageDef(
248        depends=[
249            "Naios/function2",
250            "openbmc/stdplus",
251        ],
252        build_type="meson",
253        config_flags=[
254            "-Dexamples=false",
255            "-Dtests=disabled",
256        ],
257    ),
258    "openbmc/stdplus": PackageDef(
259        depends=[
260            "fmtlib/fmt",
261            "google/googletest",
262            "Naios/function2",
263        ],
264        build_type="meson",
265        config_flags=[
266            "-Dexamples=false",
267            "-Dtests=disabled",
268            "-Dgtest=enabled",
269        ],
270    ),
271}  # type: Dict[str, PackageDef]
272
273# Define common flags used for builds
274configure_flags = " ".join(
275    [
276        f"--prefix={prefix}",
277    ]
278)
279cmake_flags = " ".join(
280    [
281        "-DBUILD_SHARED_LIBS=ON",
282        "-DCMAKE_BUILD_TYPE=RelWithDebInfo",
283        f"-DCMAKE_INSTALL_PREFIX:PATH={prefix}",
284        "-GNinja",
285        "-DCMAKE_MAKE_PROGRAM=ninja",
286    ]
287)
288meson_flags = " ".join(
289    [
290        "--wrap-mode=nodownload",
291        f"-Dprefix={prefix}",
292    ]
293)
294
295
296class Package(threading.Thread):
297    """Class used to build the Docker stages for each package.
298
299    Generally, this class should not be instantiated directly but through
300    Package.generate_all().
301    """
302
303    # Copy the packages dictionary.
304    packages = packages.copy()
305
306    # Lock used for thread-safety.
307    lock = threading.Lock()
308
309    def __init__(self, pkg: str):
310        """ pkg - The name of this package (ex. foo/bar ) """
311        super(Package, self).__init__()
312
313        self.package = pkg
314        self.exception = None  # type: Optional[Exception]
315
316        # Reference to this package's
317        self.pkg_def = Package.packages[pkg]
318        self.pkg_def["__package"] = self
319
320    def run(self) -> None:
321        """ Thread 'run' function.  Builds the Docker stage. """
322
323        # In case this package has no rev, fetch it from Github.
324        self._update_rev()
325
326        # Find all the Package objects that this package depends on.
327        #   This section is locked because we are looking into another
328        #   package's PackageDef dict, which could be being modified.
329        Package.lock.acquire()
330        deps: Iterable[Package] = [
331            Package.packages[deppkg]["__package"]
332            for deppkg in self.pkg_def.get("depends", [])
333        ]
334        Package.lock.release()
335
336        # Wait until all the depends finish building.  We need them complete
337        # for the "COPY" commands.
338        for deppkg in deps:
339            deppkg.join()
340
341        # Generate this package's Dockerfile.
342        dockerfile = f"""
343FROM {docker_base_img_name}
344{self._df_copycmds()}
345{self._df_build()}
346"""
347
348        # Generate the resulting tag name and save it to the PackageDef.
349        #   This section is locked because we are modifying the PackageDef,
350        #   which can be accessed by other threads.
351        Package.lock.acquire()
352        tag = Docker.tagname(self._stagename(), dockerfile)
353        self.pkg_def["__tag"] = tag
354        Package.lock.release()
355
356        # Do the build / save any exceptions.
357        try:
358            Docker.build(self.package, tag, dockerfile)
359        except Exception as e:
360            self.exception = e
361
362    @classmethod
363    def generate_all(cls) -> None:
364        """Ensure a Docker stage is created for all defined packages.
365
366        These are done in parallel but with appropriate blocking per
367        package 'depends' specifications.
368        """
369
370        # Create a Package for each defined package.
371        pkg_threads = [Package(p) for p in cls.packages.keys()]
372
373        # Start building them all.
374        #   This section is locked because threads depend on each other,
375        #   based on the packages, and they cannot 'join' on a thread
376        #   which is not yet started.  Adding a lock here allows all the
377        #   threads to start before they 'join' their dependencies.
378        Package.lock.acquire()
379        for t in pkg_threads:
380            t.start()
381        Package.lock.release()
382
383        # Wait for completion.
384        for t in pkg_threads:
385            t.join()
386            # Check if the thread saved off its own exception.
387            if t.exception:
388                print(f"Package {t.package} failed!", file=sys.stderr)
389                raise t.exception
390
391    @staticmethod
392    def df_all_copycmds() -> str:
393        """Formulate the Dockerfile snippet necessary to copy all packages
394        into the final image.
395        """
396        return Package.df_copycmds_set(Package.packages.keys())
397
398    @classmethod
399    def depcache(cls) -> str:
400        """Create the contents of the '/tmp/depcache'.
401        This file is a comma-separated list of "<pkg>:<rev>".
402        """
403
404        # This needs to be sorted for consistency.
405        depcache = ""
406        for pkg in sorted(cls.packages.keys()):
407            depcache += "%s:%s," % (pkg, cls.packages[pkg]["rev"])
408        return depcache
409
410    def _update_rev(self) -> None:
411        """ Look up the HEAD for missing a static rev. """
412
413        if "rev" in self.pkg_def:
414            return
415
416        # Check if Jenkins/Gerrit gave us a revision and use it.
417        if gerrit_project == self.package and gerrit_rev:
418            print(
419                f"Found Gerrit revision for {self.package}: {gerrit_rev}",
420                file=sys.stderr,
421            )
422            self.pkg_def["rev"] = gerrit_rev
423            return
424
425        # Ask Github for all the branches.
426        lookup = git("ls-remote", "--heads", f"https://github.com/{self.package}")
427
428        # Find the branch matching {branch} (or fallback to master).
429        #   This section is locked because we are modifying the PackageDef.
430        Package.lock.acquire()
431        for line in lookup.split("\n"):
432            if f"refs/heads/{branch}" in line:
433                self.pkg_def["rev"] = line.split()[0]
434            elif (
435                "refs/heads/master" in line or "refs/heads/main" in line
436            ) and "rev" not in self.pkg_def:
437                self.pkg_def["rev"] = line.split()[0]
438        Package.lock.release()
439
440    def _stagename(self) -> str:
441        """ Create a name for the Docker stage associated with this pkg. """
442        return self.package.replace("/", "-").lower()
443
444    def _url(self) -> str:
445        """ Get the URL for this package. """
446        rev = self.pkg_def["rev"]
447
448        # If the lambda exists, call it.
449        if "url" in self.pkg_def:
450            return self.pkg_def["url"](self.package, rev)
451
452        # Default to the github archive URL.
453        return f"https://github.com/{self.package}/archive/{rev}.tar.gz"
454
455    def _cmd_download(self) -> str:
456        """Formulate the command necessary to download and unpack to source."""
457
458        url = self._url()
459        if ".tar." not in url:
460            raise NotImplementedError(
461                f"Unhandled download type for {self.package}: {url}"
462            )
463
464        cmd = f"curl -L {url} | tar -x"
465
466        if url.endswith(".bz2"):
467            cmd += "j"
468        elif url.endswith(".gz"):
469            cmd += "z"
470        else:
471            raise NotImplementedError(
472                f"Unknown tar flags needed for {self.package}: {url}"
473            )
474
475        return cmd
476
477    def _cmd_cd_srcdir(self) -> str:
478        """ Formulate the command necessary to 'cd' into the source dir. """
479        return f"cd {self.package.split('/')[-1]}*"
480
481    def _df_copycmds(self) -> str:
482        """ Formulate the dockerfile snippet necessary to COPY all depends. """
483
484        if "depends" not in self.pkg_def:
485            return ""
486        return Package.df_copycmds_set(self.pkg_def["depends"])
487
488    @staticmethod
489    def df_copycmds_set(pkgs: Iterable[str]) -> str:
490        """Formulate the Dockerfile snippet necessary to COPY a set of
491        packages into a Docker stage.
492        """
493
494        copy_cmds = ""
495
496        # Sort the packages for consistency.
497        for p in sorted(pkgs):
498            tag = Package.packages[p]["__tag"]
499            copy_cmds += f"COPY --from={tag} {prefix} {prefix}\n"
500            # Workaround for upstream docker bug and multiple COPY cmds
501            # https://github.com/moby/moby/issues/37965
502            copy_cmds += "RUN true\n"
503
504        return copy_cmds
505
506    def _df_build(self) -> str:
507        """Formulate the Dockerfile snippet necessary to download, build, and
508        install a package into a Docker stage.
509        """
510
511        # Download and extract source.
512        result = f"RUN {self._cmd_download()} && {self._cmd_cd_srcdir()} && "
513
514        # Handle 'custom_post_dl' commands.
515        custom_post_dl = self.pkg_def.get("custom_post_dl")
516        if custom_post_dl:
517            result += " && ".join(custom_post_dl) + " && "
518
519        # Build and install package based on 'build_type'.
520        build_type = self.pkg_def["build_type"]
521        if build_type == "autoconf":
522            result += self._cmd_build_autoconf()
523        elif build_type == "cmake":
524            result += self._cmd_build_cmake()
525        elif build_type == "custom":
526            result += self._cmd_build_custom()
527        elif build_type == "make":
528            result += self._cmd_build_make()
529        elif build_type == "meson":
530            result += self._cmd_build_meson()
531        else:
532            raise NotImplementedError(
533                f"Unhandled build type for {self.package}: {build_type}"
534            )
535
536        # Handle 'custom_post_install' commands.
537        custom_post_install = self.pkg_def.get("custom_post_install")
538        if custom_post_install:
539            result += " && " + " && ".join(custom_post_install)
540
541        return result
542
543    def _cmd_build_autoconf(self) -> str:
544        options = " ".join(self.pkg_def.get("config_flags", []))
545        env = " ".join(self.pkg_def.get("config_env", []))
546        result = "./bootstrap.sh && "
547        result += f"{env} ./configure {configure_flags} {options} && "
548        result += f"make -j{proc_count} && make install"
549        return result
550
551    def _cmd_build_cmake(self) -> str:
552        options = " ".join(self.pkg_def.get("config_flags", []))
553        env = " ".join(self.pkg_def.get("config_env", []))
554        result = "mkdir builddir && cd builddir && "
555        result += f"{env} cmake {cmake_flags} {options} .. && "
556        result += "cmake --build . --target all && "
557        result += "cmake --build . --target install && "
558        result += "cd .."
559        return result
560
561    def _cmd_build_custom(self) -> str:
562        return " && ".join(self.pkg_def.get("build_steps", []))
563
564    def _cmd_build_make(self) -> str:
565        return f"make -j{proc_count} && make install"
566
567    def _cmd_build_meson(self) -> str:
568        options = " ".join(self.pkg_def.get("config_flags", []))
569        env = " ".join(self.pkg_def.get("config_env", []))
570        result = f"{env} meson builddir {meson_flags} {options} && "
571        result += "ninja -C builddir && ninja -C builddir install"
572        return result
573
574
575class Docker:
576    """Class to assist with Docker interactions.  All methods are static."""
577
578    @staticmethod
579    def timestamp() -> str:
580        """ Generate a timestamp for today using the ISO week. """
581        today = date.today().isocalendar()
582        return f"{today[0]}-W{today[1]:02}"
583
584    @staticmethod
585    def tagname(pkgname: str, dockerfile: str) -> str:
586        """ Generate a tag name for a package using a hash of the Dockerfile. """
587        result = docker_image_name
588        if pkgname:
589            result += "-" + pkgname
590
591        result += ":" + Docker.timestamp()
592        result += "-" + sha256(dockerfile.encode()).hexdigest()[0:16]
593
594        return result
595
596    @staticmethod
597    def build(pkg: str, tag: str, dockerfile: str) -> None:
598        """Build a docker image using the Dockerfile and tagging it with 'tag'."""
599
600        # If we're not forcing builds, check if it already exists and skip.
601        if not force_build:
602            if docker.image.ls(tag, "--format", '"{{.Repository}}:{{.Tag}}"'):
603                print(f"Image {tag} already exists.  Skipping.", file=sys.stderr)
604                return
605
606        # Build it.
607        #   Capture the output of the 'docker build' command and send it to
608        #   stderr (prefixed with the package name).  This allows us to see
609        #   progress but not polute stdout.  Later on we output the final
610        #   docker tag to stdout and we want to keep that pristine.
611        #
612        #   Other unusual flags:
613        #       --no-cache: Bypass the Docker cache if 'force_build'.
614        #       --force-rm: Clean up Docker processes if they fail.
615        docker.build(
616            proxy_args,
617            "--network=host",
618            "--force-rm",
619            "--no-cache=true" if force_build else "--no-cache=false",
620            "-t",
621            tag,
622            "-",
623            _in=dockerfile,
624            _out=(
625                lambda line: print(
626                    pkg + ":", line, end="", file=sys.stderr, flush=True
627                )
628            ),
629        )
630
631
632# Read a bunch of environment variables.
633docker_image_name = os.environ.get("DOCKER_IMAGE_NAME", "openbmc/ubuntu-unit-test")
634force_build = os.environ.get("FORCE_DOCKER_BUILD")
635is_automated_ci_build = os.environ.get("BUILD_URL", False)
636distro = os.environ.get("DISTRO", "ubuntu:kinetic")
637branch = os.environ.get("BRANCH", "master")
638ubuntu_mirror = os.environ.get("UBUNTU_MIRROR")
639http_proxy = os.environ.get("http_proxy")
640
641gerrit_project = os.environ.get("GERRIT_PROJECT")
642gerrit_rev = os.environ.get("GERRIT_PATCHSET_REVISION")
643
644# Set up some common variables.
645username = os.environ.get("USER", "root")
646homedir = os.environ.get("HOME", "/root")
647gid = os.getgid()
648uid = os.getuid()
649
650# Use well-known constants if user is root
651if username == "root":
652    homedir = "/root"
653    gid = 0
654    uid = 0
655
656# Determine the architecture for Docker.
657arch = uname("-m").strip()
658if arch == "ppc64le":
659    docker_base = "ppc64le/"
660elif arch == "x86_64":
661    docker_base = ""
662elif arch == "aarch64":
663    docker_base = "arm64v8/"
664else:
665    print(
666        f"Unsupported system architecture({arch}) found for docker image",
667        file=sys.stderr,
668    )
669    sys.exit(1)
670
671# Special flags if setting up a deb mirror.
672mirror = ""
673if "ubuntu" in distro and ubuntu_mirror:
674    mirror = f"""
675RUN echo "deb {ubuntu_mirror} $(. /etc/os-release && echo $VERSION_CODENAME) main restricted universe multiverse" > /etc/apt/sources.list && \\
676    echo "deb {ubuntu_mirror} $(. /etc/os-release && echo $VERSION_CODENAME)-updates main restricted universe multiverse" >> /etc/apt/sources.list && \\
677    echo "deb {ubuntu_mirror} $(. /etc/os-release && echo $VERSION_CODENAME)-security main restricted universe multiverse" >> /etc/apt/sources.list && \\
678    echo "deb {ubuntu_mirror} $(. /etc/os-release && echo $VERSION_CODENAME)-proposed main restricted universe multiverse" >> /etc/apt/sources.list && \\
679    echo "deb {ubuntu_mirror} $(. /etc/os-release && echo $VERSION_CODENAME)-backports main restricted universe multiverse" >> /etc/apt/sources.list
680"""
681
682# Special flags for proxying.
683proxy_cmd = ""
684proxy_keyserver = ""
685proxy_args = []
686if http_proxy:
687    proxy_cmd = f"""
688RUN echo "[http]" >> {homedir}/.gitconfig && \
689    echo "proxy = {http_proxy}" >> {homedir}/.gitconfig
690"""
691    proxy_keyserver = f"--keyserver-options http-proxy={http_proxy}"
692
693    proxy_args.extend(
694        [
695            "--build-arg",
696            f"http_proxy={http_proxy}",
697            "--build-arg",
698            f"https_proxy={http_proxy}",
699        ]
700    )
701
702# Create base Dockerfile.
703dockerfile_base = f"""
704FROM {docker_base}{distro}
705
706{mirror}
707
708ENV DEBIAN_FRONTEND noninteractive
709
710ENV PYTHONPATH "/usr/local/lib/python3.10/site-packages/"
711
712# Sometimes the ubuntu key expires and we need a way to force an execution
713# of the apt-get commands for the dbgsym-keyring.  When this happens we see
714# an error like: "Release: The following signatures were invalid:"
715# Insert a bogus echo that we can change here when we get this error to force
716# the update.
717RUN echo "ubuntu keyserver rev as of 2021-04-21"
718
719# We need the keys to be imported for dbgsym repos
720# New releases have a package, older ones fall back to manual fetching
721# https://wiki.ubuntu.com/Debug%20Symbol%20Packages
722RUN apt-get update && apt-get dist-upgrade -yy && \
723    ( apt-get install gpgv ubuntu-dbgsym-keyring || \
724        ( apt-get install -yy dirmngr && \
725          apt-key adv --keyserver keyserver.ubuntu.com \
726                      {proxy_keyserver} \
727                      --recv-keys F2EDC64DC5AEE1F6B9C621F0C8CAB6595FDFF622 ) )
728
729# Parse the current repo list into a debug repo list
730RUN sed -n '/^deb /s,^deb [^ ]* ,deb http://ddebs.ubuntu.com ,p' /etc/apt/sources.list >/etc/apt/sources.list.d/debug.list
731
732# Remove non-existent debug repos
733RUN sed -i '/-\(backports\|security\) /d' /etc/apt/sources.list.d/debug.list
734
735RUN cat /etc/apt/sources.list.d/debug.list
736
737RUN apt-get update && apt-get dist-upgrade -yy && apt-get install -yy \
738    gcc-12 \
739    g++-12 \
740    libc6-dbg \
741    libc6-dev \
742    libtool \
743    bison \
744    libdbus-1-dev \
745    flex \
746    cmake \
747    python3 \
748    python3-dev\
749    python3-yaml \
750    python3-mako \
751    python3-pip \
752    python3-setuptools \
753    python3-git \
754    python3-socks \
755    pkg-config \
756    autoconf \
757    autoconf-archive \
758    libsystemd-dev \
759    systemd \
760    libssl-dev \
761    libevdev-dev \
762    libjpeg-dev \
763    libpng-dev \
764    ninja-build \
765    sudo \
766    curl \
767    git \
768    dbus \
769    iputils-ping \
770    clang-15 \
771    clang-format-15 \
772    clang-tidy-15 \
773    clang-tools-15 \
774    shellcheck \
775    npm \
776    iproute2 \
777    libnl-3-dev \
778    libnl-genl-3-dev \
779    libconfig++-dev \
780    libsnmp-dev \
781    valgrind \
782    valgrind-dbg \
783    libpam0g-dev \
784    xxd \
785    libi2c-dev \
786    wget \
787    libldap2-dev \
788    libprotobuf-dev \
789    liburing-dev \
790    liburing2-dbgsym \
791    libperlio-gzip-perl \
792    libjson-perl \
793    protobuf-compiler \
794    libgpiod-dev \
795    device-tree-compiler \
796    libpciaccess-dev \
797    libmimetic-dev \
798    libxml2-utils \
799    libxml-simple-perl \
800    rsync \
801    libcryptsetup-dev
802
803RUN npm install -g eslint@latest eslint-plugin-json@latest
804
805# Kinetic comes with GCC-12, so skip this.
806#RUN update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-12 12 \
807#  --slave /usr/bin/g++ g++ /usr/bin/g++-12 \
808#  --slave /usr/bin/gcov gcov /usr/bin/gcov-12 \
809#  --slave /usr/bin/gcov-dump gcov-dump /usr/bin/gcov-dump-12 \
810#  --slave /usr/bin/gcov-tool gcov-tool /usr/bin/gcov-tool-12
811#RUN update-alternatives --install /usr/bin/cpp cpp /usr/bin/cpp-12 12
812
813RUN update-alternatives --install /usr/bin/clang clang /usr/bin/clang-15 1000 \
814  --slave /usr/bin/clang++ clang++ /usr/bin/clang++-15 \
815  --slave /usr/bin/clang-tidy clang-tidy /usr/bin/clang-tidy-15 \
816  --slave /usr/bin/clang-format clang-format /usr/bin/clang-format-15 \
817  --slave /usr/bin/run-clang-tidy run-clang-tidy.py /usr/bin/run-clang-tidy-15 \
818  --slave /usr/bin/scan-build scan-build /usr/bin/scan-build-15
819
820"""
821
822if is_automated_ci_build:
823    dockerfile_base += f"""
824# Run an arbitrary command to polute the docker cache regularly force us
825# to re-run `apt-get update` daily.
826RUN echo {Docker.timestamp()}
827RUN apt-get update && apt-get dist-upgrade -yy
828
829"""
830
831dockerfile_base += f"""
832RUN pip3 install inflection
833RUN pip3 install pycodestyle
834RUN pip3 install jsonschema
835RUN pip3 install meson==0.63.0
836RUN pip3 install packaging
837RUN pip3 install protobuf
838RUN pip3 install codespell
839RUN pip3 install requests
840"""
841
842# Note, we use sha1s here because the newest gitlint release doesn't include
843# some features we need.  Next time they release, we can rely on a direct
844# release tag
845dockerfile_base += f"""
846RUN pip3 install git+https://github.com/jorisroovers/gitlint.git@8ede310d62d5794efa7518b235f899f8a8ad6a68\#subdirectory=gitlint-core
847RUN pip3 install git+https://github.com/jorisroovers/gitlint.git@8ede310d62d5794efa7518b235f899f8a8ad6a68
848"""
849
850# Build the base and stage docker images.
851docker_base_img_name = Docker.tagname("base", dockerfile_base)
852Docker.build("base", docker_base_img_name, dockerfile_base)
853Package.generate_all()
854
855# Create the final Dockerfile.
856dockerfile = f"""
857# Build the final output image
858FROM {docker_base_img_name}
859{Package.df_all_copycmds()}
860
861# Some of our infrastructure still relies on the presence of this file
862# even though it is no longer needed to rebuild the docker environment
863# NOTE: The file is sorted to ensure the ordering is stable.
864RUN echo '{Package.depcache()}' > /tmp/depcache
865
866# Final configuration for the workspace
867RUN grep -q {gid} /etc/group || groupadd -f -g {gid} {username}
868RUN mkdir -p "{os.path.dirname(homedir)}"
869RUN grep -q {uid} /etc/passwd || useradd -d {homedir} -m -u {uid} -g {gid} {username}
870RUN sed -i '1iDefaults umask=000' /etc/sudoers
871RUN echo "{username} ALL=(ALL) NOPASSWD: ALL" >>/etc/sudoers
872
873# Ensure user has ability to write to /usr/local for different tool
874# and data installs
875RUN chown -R {username}:{username} /usr/local/share
876
877{proxy_cmd}
878
879RUN /bin/bash
880"""
881
882# Do the final docker build
883docker_final_img_name = Docker.tagname(None, dockerfile)
884Docker.build("final", docker_final_img_name, dockerfile)
885
886# Print the tag of the final image.
887print(docker_final_img_name)
888