1#!/usr/bin/env python3
2#
3# Build the required docker image to run package unit tests
4#
5# Script Variables:
6#   DOCKER_IMG_NAME:  <optional, the name of the docker image to generate>
7#                     default is openbmc/ubuntu-unit-test
8#   DISTRO:           <optional, the distro to build a docker image against>
9#   FORCE_DOCKER_BUILD: <optional, a non-zero value with force all Docker
10#                     images to be rebuilt rather than reusing caches.>
11#   BUILD_URL:        <optional, used to detect running under CI context
12#                     (ex. Jenkins)>
13#   BRANCH:           <optional, branch to build from each of the openbmc/
14#                     repositories>
15#                     default is master, which will be used if input branch not
16#                     provided or not found
17#   UBUNTU_MIRROR:    <optional, the URL of a mirror of Ubuntu to override the
18#                     default ones in /etc/apt/sources.list>
19#                     default is empty, and no mirror is used.
20#   http_proxy        The HTTP address of the proxy server to connect to.
21#                     Default: "", proxy is not setup if this is not set
22
23import os
24import sys
25import threading
26from datetime import date
27from hashlib import sha256
28from sh import docker, git, nproc, uname  # type: ignore
29from typing import Any, Callable, Dict, Iterable, Optional
30
31try:
32    # Python before 3.8 doesn't have TypedDict, so reroute to standard 'dict'.
33    from typing import TypedDict
34except:
35
36    class TypedDict(dict):  # type: ignore
37        # We need to do this to eat the 'total' argument.
38        def __init_subclass__(cls, **kwargs):
39            super().__init_subclass__()
40
41
42# Declare some variables used in package definitions.
43prefix = "/usr/local"
44proc_count = nproc().strip()
45
46
47class PackageDef(TypedDict, total=False):
48    """ Package Definition for packages dictionary. """
49
50    # rev [optional]: Revision of package to use.
51    rev: str
52    # url [optional]: lambda function to create URL: (package, rev) -> url.
53    url: Callable[[str, str], str]
54    # depends [optional]: List of package dependencies.
55    depends: Iterable[str]
56    # build_type [required]: Build type used for package.
57    #   Currently supported: autoconf, cmake, custom, make, meson
58    build_type: str
59    # build_steps [optional]: Steps to run for 'custom' build_type.
60    build_steps: Iterable[str]
61    # config_flags [optional]: List of options to pass configuration tool.
62    config_flags: Iterable[str]
63    # config_env [optional]: List of environment variables to set for config.
64    config_env: Iterable[str]
65    # custom_post_dl [optional]: List of steps to run after download, but
66    #   before config / build / install.
67    custom_post_dl: Iterable[str]
68    # custom_post_install [optional]: List of steps to run after install.
69    custom_post_install: Iterable[str]
70
71    # __tag [private]: Generated Docker tag name for package stage.
72    __tag: str
73    # __package [private]: Package object associated with this package.
74    __package: Any  # Type is Package, but not defined yet.
75
76
77# Packages to include in image.
78packages = {
79    "boost": PackageDef(
80        rev="1.80.0",
81        url=(
82            lambda pkg, rev: f"https://downloads.yoctoproject.org/mirror/sources/{pkg}_{rev.replace('.', '_')}.tar.bz2"
83        ),
84        build_type="custom",
85        build_steps=[
86            f"./bootstrap.sh --prefix={prefix} --with-libraries=context,coroutine",
87            "./b2",
88            f"./b2 install --prefix={prefix}",
89        ],
90    ),
91    "USCiLab/cereal": PackageDef(
92        rev="v1.3.2",
93        build_type="custom",
94        build_steps=[f"cp -a include/cereal/ {prefix}/include/"],
95    ),
96    "danmar/cppcheck": PackageDef(
97        rev="2.9",
98        build_type="cmake",
99    ),
100    "CLIUtils/CLI11": PackageDef(
101        rev="v1.9.1",
102        build_type="cmake",
103        config_flags=[
104            "-DBUILD_TESTING=OFF",
105            "-DCLI11_BUILD_DOCS=OFF",
106            "-DCLI11_BUILD_EXAMPLES=OFF",
107        ],
108    ),
109    "fmtlib/fmt": PackageDef(
110        rev="9.1.0",
111        build_type="cmake",
112        config_flags=[
113            "-DFMT_DOC=OFF",
114            "-DFMT_TEST=OFF",
115        ],
116    ),
117    "Naios/function2": PackageDef(
118        rev="4.2.1",
119        build_type="custom",
120        build_steps=[
121            f"mkdir {prefix}/include/function2",
122            f"cp include/function2/function2.hpp {prefix}/include/function2/",
123        ],
124    ),
125    # release-1.12.1
126    "google/googletest": PackageDef(
127        rev="58d77fa8070e8cec2dc1ed015d66b454c8d78850",
128        build_type="cmake",
129        config_env=["CXXFLAGS=-std=c++20"],
130        config_flags=["-DTHREADS_PREFER_PTHREAD_FLAG=ON"],
131    ),
132    "nlohmann/json": PackageDef(
133        rev="v3.11.2",
134        build_type="cmake",
135        config_flags=["-DJSON_BuildTests=OFF"],
136        custom_post_install=[
137            f"ln -s {prefix}/include/nlohmann/json.hpp {prefix}/include/json.hpp",
138        ],
139    ),
140    # Snapshot from 2019-05-24
141    "linux-test-project/lcov": PackageDef(
142        rev="v1.15",
143        build_type="make",
144    ),
145    # dev-5.15 2022-09-27
146    "openbmc/linux": PackageDef(
147        rev="c9fb275212dac5b300311f6f6b1dcc5ed18a3493",
148        build_type="custom",
149        build_steps=[
150            f"make -j{proc_count} defconfig",
151            f"make INSTALL_HDR_PATH={prefix} headers_install",
152        ],
153    ),
154    "LibVNC/libvncserver": PackageDef(
155        rev="LibVNCServer-0.9.13",
156        build_type="cmake",
157    ),
158    "leethomason/tinyxml2": PackageDef(
159        rev="9.0.0",
160        build_type="cmake",
161    ),
162    # version from /meta-openembedded/meta-oe/recipes-devtools/boost-url/boost-url_git.bb
163    "CPPAlliance/url": PackageDef(
164        rev="d740a92d38e3a8f4d5b2153f53b82f1c98e312ab",
165        build_type="custom",
166        build_steps=[f"cp -a include/** {prefix}/include/"],
167    ),
168    "tristanpenman/valijson": PackageDef(
169        rev="v0.7",
170        build_type="cmake",
171        config_flags=[
172            "-Dvalijson_BUILD_TESTS=0",
173            "-Dvalijson_INSTALL_HEADERS=1",
174        ],
175    ),
176    "open-power/pdbg": PackageDef(build_type="autoconf"),
177    "openbmc/gpioplus": PackageDef(
178        depends=["openbmc/stdplus"],
179        build_type="meson",
180        config_flags=[
181            "-Dexamples=false",
182            "-Dtests=disabled",
183        ],
184    ),
185    "openbmc/phosphor-dbus-interfaces": PackageDef(
186        depends=["openbmc/sdbusplus"],
187        build_type="meson",
188        config_flags=["-Dgenerate_md=false"],
189    ),
190    "openbmc/phosphor-logging": PackageDef(
191        depends=[
192            "USCiLab/cereal",
193            "openbmc/phosphor-dbus-interfaces",
194            "openbmc/sdbusplus",
195            "openbmc/sdeventplus",
196        ],
197        build_type="meson",
198        config_flags=[
199            "-Dlibonly=true",
200            "-Dtests=disabled",
201            f"-Dyamldir={prefix}/share/phosphor-dbus-yaml/yaml",
202        ],
203    ),
204    "openbmc/phosphor-objmgr": PackageDef(
205        depends=[
206            "CLIUtils/CLI11",
207            "boost",
208            "leethomason/tinyxml2",
209            "openbmc/phosphor-dbus-interfaces",
210            "openbmc/phosphor-logging",
211            "openbmc/sdbusplus",
212        ],
213        build_type="meson",
214        config_flags=[
215            "-Dtests=disabled",
216        ],
217    ),
218    "openbmc/pldm": PackageDef(
219        depends=[
220            "CLIUtils/CLI11",
221            "boost",
222            "nlohmann/json",
223            "openbmc/phosphor-dbus-interfaces",
224            "openbmc/phosphor-logging",
225            "openbmc/sdbusplus",
226            "openbmc/sdeventplus",
227        ],
228        build_type="meson",
229        config_flags=[
230            "-Dlibpldm-only=enabled",
231            "-Doem-ibm=enabled",
232            "-Dtests=disabled",
233        ],
234    ),
235    "openbmc/sdbusplus": PackageDef(
236        build_type="meson",
237        custom_post_dl=[
238            "cd tools",
239            f"./setup.py install --root=/ --prefix={prefix}",
240            "cd ..",
241        ],
242        config_flags=[
243            "-Dexamples=disabled",
244            "-Dtests=disabled",
245        ],
246    ),
247    "openbmc/sdeventplus": PackageDef(
248        depends=[
249            "Naios/function2",
250            "openbmc/stdplus",
251        ],
252        build_type="meson",
253        config_flags=[
254            "-Dexamples=false",
255            "-Dtests=disabled",
256        ],
257    ),
258    "openbmc/stdplus": PackageDef(
259        depends=[
260            "fmtlib/fmt",
261            "google/googletest",
262            "Naios/function2",
263        ],
264        build_type="meson",
265        config_flags=[
266            "-Dexamples=false",
267            "-Dtests=disabled",
268            "-Dgtest=enabled",
269        ],
270    ),
271}  # type: Dict[str, PackageDef]
272
273# Define common flags used for builds
274configure_flags = " ".join(
275    [
276        f"--prefix={prefix}",
277    ]
278)
279cmake_flags = " ".join(
280    [
281        "-DBUILD_SHARED_LIBS=ON",
282        "-DCMAKE_BUILD_TYPE=RelWithDebInfo",
283        f"-DCMAKE_INSTALL_PREFIX:PATH={prefix}",
284        "-GNinja",
285        "-DCMAKE_MAKE_PROGRAM=ninja",
286    ]
287)
288meson_flags = " ".join(
289    [
290        "--wrap-mode=nodownload",
291        f"-Dprefix={prefix}",
292    ]
293)
294
295
296class Package(threading.Thread):
297    """Class used to build the Docker stages for each package.
298
299    Generally, this class should not be instantiated directly but through
300    Package.generate_all().
301    """
302
303    # Copy the packages dictionary.
304    packages = packages.copy()
305
306    # Lock used for thread-safety.
307    lock = threading.Lock()
308
309    def __init__(self, pkg: str):
310        """ pkg - The name of this package (ex. foo/bar ) """
311        super(Package, self).__init__()
312
313        self.package = pkg
314        self.exception = None  # type: Optional[Exception]
315
316        # Reference to this package's
317        self.pkg_def = Package.packages[pkg]
318        self.pkg_def["__package"] = self
319
320    def run(self) -> None:
321        """ Thread 'run' function.  Builds the Docker stage. """
322
323        # In case this package has no rev, fetch it from Github.
324        self._update_rev()
325
326        # Find all the Package objects that this package depends on.
327        #   This section is locked because we are looking into another
328        #   package's PackageDef dict, which could be being modified.
329        Package.lock.acquire()
330        deps: Iterable[Package] = [
331            Package.packages[deppkg]["__package"]
332            for deppkg in self.pkg_def.get("depends", [])
333        ]
334        Package.lock.release()
335
336        # Wait until all the depends finish building.  We need them complete
337        # for the "COPY" commands.
338        for deppkg in deps:
339            deppkg.join()
340
341        # Generate this package's Dockerfile.
342        dockerfile = f"""
343FROM {docker_base_img_name}
344{self._df_copycmds()}
345{self._df_build()}
346"""
347
348        # Generate the resulting tag name and save it to the PackageDef.
349        #   This section is locked because we are modifying the PackageDef,
350        #   which can be accessed by other threads.
351        Package.lock.acquire()
352        tag = Docker.tagname(self._stagename(), dockerfile)
353        self.pkg_def["__tag"] = tag
354        Package.lock.release()
355
356        # Do the build / save any exceptions.
357        try:
358            Docker.build(self.package, tag, dockerfile)
359        except Exception as e:
360            self.exception = e
361
362    @classmethod
363    def generate_all(cls) -> None:
364        """Ensure a Docker stage is created for all defined packages.
365
366        These are done in parallel but with appropriate blocking per
367        package 'depends' specifications.
368        """
369
370        # Create a Package for each defined package.
371        pkg_threads = [Package(p) for p in cls.packages.keys()]
372
373        # Start building them all.
374        #   This section is locked because threads depend on each other,
375        #   based on the packages, and they cannot 'join' on a thread
376        #   which is not yet started.  Adding a lock here allows all the
377        #   threads to start before they 'join' their dependencies.
378        Package.lock.acquire()
379        for t in pkg_threads:
380            t.start()
381        Package.lock.release()
382
383        # Wait for completion.
384        for t in pkg_threads:
385            t.join()
386            # Check if the thread saved off its own exception.
387            if t.exception:
388                print(f"Package {t.package} failed!", file=sys.stderr)
389                raise t.exception
390
391    @staticmethod
392    def df_all_copycmds() -> str:
393        """Formulate the Dockerfile snippet necessary to copy all packages
394        into the final image.
395        """
396        return Package.df_copycmds_set(Package.packages.keys())
397
398    @classmethod
399    def depcache(cls) -> str:
400        """Create the contents of the '/tmp/depcache'.
401        This file is a comma-separated list of "<pkg>:<rev>".
402        """
403
404        # This needs to be sorted for consistency.
405        depcache = ""
406        for pkg in sorted(cls.packages.keys()):
407            depcache += "%s:%s," % (pkg, cls.packages[pkg]["rev"])
408        return depcache
409
410    def _update_rev(self) -> None:
411        """ Look up the HEAD for missing a static rev. """
412
413        if "rev" in self.pkg_def:
414            return
415
416        # Check if Jenkins/Gerrit gave us a revision and use it.
417        if gerrit_project == self.package and gerrit_rev:
418            print(
419                f"Found Gerrit revision for {self.package}: {gerrit_rev}",
420                file=sys.stderr,
421            )
422            self.pkg_def["rev"] = gerrit_rev
423            return
424
425        # Ask Github for all the branches.
426        lookup = git("ls-remote", "--heads", f"https://github.com/{self.package}")
427
428        # Find the branch matching {branch} (or fallback to master).
429        #   This section is locked because we are modifying the PackageDef.
430        Package.lock.acquire()
431        for line in lookup.split("\n"):
432            if f"refs/heads/{branch}" in line:
433                self.pkg_def["rev"] = line.split()[0]
434            elif f"refs/heads/master" in line and "rev" not in self.pkg_def:
435                self.pkg_def["rev"] = line.split()[0]
436        Package.lock.release()
437
438    def _stagename(self) -> str:
439        """ Create a name for the Docker stage associated with this pkg. """
440        return self.package.replace("/", "-").lower()
441
442    def _url(self) -> str:
443        """ Get the URL for this package. """
444        rev = self.pkg_def["rev"]
445
446        # If the lambda exists, call it.
447        if "url" in self.pkg_def:
448            return self.pkg_def["url"](self.package, rev)
449
450        # Default to the github archive URL.
451        return f"https://github.com/{self.package}/archive/{rev}.tar.gz"
452
453    def _cmd_download(self) -> str:
454        """Formulate the command necessary to download and unpack to source."""
455
456        url = self._url()
457        if ".tar." not in url:
458            raise NotImplementedError(
459                f"Unhandled download type for {self.package}: {url}"
460            )
461
462        cmd = f"curl -L {url} | tar -x"
463
464        if url.endswith(".bz2"):
465            cmd += "j"
466        elif url.endswith(".gz"):
467            cmd += "z"
468        else:
469            raise NotImplementedError(
470                f"Unknown tar flags needed for {self.package}: {url}"
471            )
472
473        return cmd
474
475    def _cmd_cd_srcdir(self) -> str:
476        """ Formulate the command necessary to 'cd' into the source dir. """
477        return f"cd {self.package.split('/')[-1]}*"
478
479    def _df_copycmds(self) -> str:
480        """ Formulate the dockerfile snippet necessary to COPY all depends. """
481
482        if "depends" not in self.pkg_def:
483            return ""
484        return Package.df_copycmds_set(self.pkg_def["depends"])
485
486    @staticmethod
487    def df_copycmds_set(pkgs: Iterable[str]) -> str:
488        """Formulate the Dockerfile snippet necessary to COPY a set of
489        packages into a Docker stage.
490        """
491
492        copy_cmds = ""
493
494        # Sort the packages for consistency.
495        for p in sorted(pkgs):
496            tag = Package.packages[p]["__tag"]
497            copy_cmds += f"COPY --from={tag} {prefix} {prefix}\n"
498            # Workaround for upstream docker bug and multiple COPY cmds
499            # https://github.com/moby/moby/issues/37965
500            copy_cmds += "RUN true\n"
501
502        return copy_cmds
503
504    def _df_build(self) -> str:
505        """Formulate the Dockerfile snippet necessary to download, build, and
506        install a package into a Docker stage.
507        """
508
509        # Download and extract source.
510        result = f"RUN {self._cmd_download()} && {self._cmd_cd_srcdir()} && "
511
512        # Handle 'custom_post_dl' commands.
513        custom_post_dl = self.pkg_def.get("custom_post_dl")
514        if custom_post_dl:
515            result += " && ".join(custom_post_dl) + " && "
516
517        # Build and install package based on 'build_type'.
518        build_type = self.pkg_def["build_type"]
519        if build_type == "autoconf":
520            result += self._cmd_build_autoconf()
521        elif build_type == "cmake":
522            result += self._cmd_build_cmake()
523        elif build_type == "custom":
524            result += self._cmd_build_custom()
525        elif build_type == "make":
526            result += self._cmd_build_make()
527        elif build_type == "meson":
528            result += self._cmd_build_meson()
529        else:
530            raise NotImplementedError(
531                f"Unhandled build type for {self.package}: {build_type}"
532            )
533
534        # Handle 'custom_post_install' commands.
535        custom_post_install = self.pkg_def.get("custom_post_install")
536        if custom_post_install:
537            result += " && " + " && ".join(custom_post_install)
538
539        return result
540
541    def _cmd_build_autoconf(self) -> str:
542        options = " ".join(self.pkg_def.get("config_flags", []))
543        env = " ".join(self.pkg_def.get("config_env", []))
544        result = "./bootstrap.sh && "
545        result += f"{env} ./configure {configure_flags} {options} && "
546        result += f"make -j{proc_count} && make install"
547        return result
548
549    def _cmd_build_cmake(self) -> str:
550        options = " ".join(self.pkg_def.get("config_flags", []))
551        env = " ".join(self.pkg_def.get("config_env", []))
552        result = "mkdir builddir && cd builddir && "
553        result += f"{env} cmake {cmake_flags} {options} .. && "
554        result += "cmake --build . --target all && "
555        result += "cmake --build . --target install && "
556        result += "cd .."
557        return result
558
559    def _cmd_build_custom(self) -> str:
560        return " && ".join(self.pkg_def.get("build_steps", []))
561
562    def _cmd_build_make(self) -> str:
563        return f"make -j{proc_count} && make install"
564
565    def _cmd_build_meson(self) -> str:
566        options = " ".join(self.pkg_def.get("config_flags", []))
567        env = " ".join(self.pkg_def.get("config_env", []))
568        result = f"{env} meson builddir {meson_flags} {options} && "
569        result += "ninja -C builddir && ninja -C builddir install"
570        return result
571
572
573class Docker:
574    """Class to assist with Docker interactions.  All methods are static."""
575
576    @staticmethod
577    def timestamp() -> str:
578        """ Generate a timestamp for today using the ISO week. """
579        today = date.today().isocalendar()
580        return f"{today[0]}-W{today[1]:02}"
581
582    @staticmethod
583    def tagname(pkgname: str, dockerfile: str) -> str:
584        """ Generate a tag name for a package using a hash of the Dockerfile. """
585        result = docker_image_name
586        if pkgname:
587            result += "-" + pkgname
588
589        result += ":" + Docker.timestamp()
590        result += "-" + sha256(dockerfile.encode()).hexdigest()[0:16]
591
592        return result
593
594    @staticmethod
595    def build(pkg: str, tag: str, dockerfile: str) -> None:
596        """Build a docker image using the Dockerfile and tagging it with 'tag'."""
597
598        # If we're not forcing builds, check if it already exists and skip.
599        if not force_build:
600            if docker.image.ls(tag, "--format", '"{{.Repository}}:{{.Tag}}"'):
601                print(f"Image {tag} already exists.  Skipping.", file=sys.stderr)
602                return
603
604        # Build it.
605        #   Capture the output of the 'docker build' command and send it to
606        #   stderr (prefixed with the package name).  This allows us to see
607        #   progress but not polute stdout.  Later on we output the final
608        #   docker tag to stdout and we want to keep that pristine.
609        #
610        #   Other unusual flags:
611        #       --no-cache: Bypass the Docker cache if 'force_build'.
612        #       --force-rm: Clean up Docker processes if they fail.
613        docker.build(
614            proxy_args,
615            "--network=host",
616            "--force-rm",
617            "--no-cache=true" if force_build else "--no-cache=false",
618            "-t",
619            tag,
620            "-",
621            _in=dockerfile,
622            _out=(
623                lambda line: print(
624                    pkg + ":", line, end="", file=sys.stderr, flush=True
625                )
626            ),
627        )
628
629
630# Read a bunch of environment variables.
631docker_image_name = os.environ.get("DOCKER_IMAGE_NAME", "openbmc/ubuntu-unit-test")
632force_build = os.environ.get("FORCE_DOCKER_BUILD")
633is_automated_ci_build = os.environ.get("BUILD_URL", False)
634distro = os.environ.get("DISTRO", "ubuntu:kinetic")
635branch = os.environ.get("BRANCH", "master")
636ubuntu_mirror = os.environ.get("UBUNTU_MIRROR")
637http_proxy = os.environ.get("http_proxy")
638
639gerrit_project = os.environ.get("GERRIT_PROJECT")
640gerrit_rev = os.environ.get("GERRIT_PATCHSET_REVISION")
641
642# Set up some common variables.
643username = os.environ.get("USER", "root")
644homedir = os.environ.get("HOME", "/root")
645gid = os.getgid()
646uid = os.getuid()
647
648# Use well-known constants if user is root
649if username == "root":
650    homedir = "/root"
651    gid = 0
652    uid = 0
653
654# Determine the architecture for Docker.
655arch = uname("-m").strip()
656if arch == "ppc64le":
657    docker_base = "ppc64le/"
658elif arch == "x86_64":
659    docker_base = ""
660elif arch == "aarch64":
661    docker_base = "arm64v8/"
662else:
663    print(
664        f"Unsupported system architecture({arch}) found for docker image",
665        file=sys.stderr,
666    )
667    sys.exit(1)
668
669# Special flags if setting up a deb mirror.
670mirror = ""
671if "ubuntu" in distro and ubuntu_mirror:
672    mirror = f"""
673RUN echo "deb {ubuntu_mirror} $(. /etc/os-release && echo $VERSION_CODENAME) main restricted universe multiverse" > /etc/apt/sources.list && \\
674    echo "deb {ubuntu_mirror} $(. /etc/os-release && echo $VERSION_CODENAME)-updates main restricted universe multiverse" >> /etc/apt/sources.list && \\
675    echo "deb {ubuntu_mirror} $(. /etc/os-release && echo $VERSION_CODENAME)-security main restricted universe multiverse" >> /etc/apt/sources.list && \\
676    echo "deb {ubuntu_mirror} $(. /etc/os-release && echo $VERSION_CODENAME)-proposed main restricted universe multiverse" >> /etc/apt/sources.list && \\
677    echo "deb {ubuntu_mirror} $(. /etc/os-release && echo $VERSION_CODENAME)-backports main restricted universe multiverse" >> /etc/apt/sources.list
678"""
679
680# Special flags for proxying.
681proxy_cmd = ""
682proxy_keyserver = ""
683proxy_args = []
684if http_proxy:
685    proxy_cmd = f"""
686RUN echo "[http]" >> {homedir}/.gitconfig && \
687    echo "proxy = {http_proxy}" >> {homedir}/.gitconfig
688"""
689    proxy_keyserver = f"--keyserver-options http-proxy={http_proxy}"
690
691    proxy_args.extend(
692        [
693            "--build-arg",
694            f"http_proxy={http_proxy}",
695            "--build-arg",
696            f"https_proxy={http_proxy}",
697        ]
698    )
699
700# Create base Dockerfile.
701dockerfile_base = f"""
702FROM {docker_base}{distro}
703
704{mirror}
705
706ENV DEBIAN_FRONTEND noninteractive
707
708ENV PYTHONPATH "/usr/local/lib/python3.10/site-packages/"
709
710# Sometimes the ubuntu key expires and we need a way to force an execution
711# of the apt-get commands for the dbgsym-keyring.  When this happens we see
712# an error like: "Release: The following signatures were invalid:"
713# Insert a bogus echo that we can change here when we get this error to force
714# the update.
715RUN echo "ubuntu keyserver rev as of 2021-04-21"
716
717# We need the keys to be imported for dbgsym repos
718# New releases have a package, older ones fall back to manual fetching
719# https://wiki.ubuntu.com/Debug%20Symbol%20Packages
720RUN apt-get update && apt-get dist-upgrade -yy && \
721    ( apt-get install gpgv ubuntu-dbgsym-keyring || \
722        ( apt-get install -yy dirmngr && \
723          apt-key adv --keyserver keyserver.ubuntu.com \
724                      {proxy_keyserver} \
725                      --recv-keys F2EDC64DC5AEE1F6B9C621F0C8CAB6595FDFF622 ) )
726
727# Parse the current repo list into a debug repo list
728RUN sed -n '/^deb /s,^deb [^ ]* ,deb http://ddebs.ubuntu.com ,p' /etc/apt/sources.list >/etc/apt/sources.list.d/debug.list
729
730# Remove non-existent debug repos
731RUN sed -i '/-\(backports\|security\) /d' /etc/apt/sources.list.d/debug.list
732
733RUN cat /etc/apt/sources.list.d/debug.list
734
735RUN apt-get update && apt-get dist-upgrade -yy && apt-get install -yy \
736    gcc-12 \
737    g++-12 \
738    libc6-dbg \
739    libc6-dev \
740    libtool \
741    bison \
742    libdbus-1-dev \
743    flex \
744    cmake \
745    python3 \
746    python3-dev\
747    python3-yaml \
748    python3-mako \
749    python3-pip \
750    python3-setuptools \
751    python3-git \
752    python3-socks \
753    pkg-config \
754    autoconf \
755    autoconf-archive \
756    libsystemd-dev \
757    systemd \
758    libssl-dev \
759    libevdev-dev \
760    libjpeg-dev \
761    libpng-dev \
762    ninja-build \
763    sudo \
764    curl \
765    git \
766    dbus \
767    iputils-ping \
768    clang-15 \
769    clang-format-15 \
770    clang-tidy-15 \
771    clang-tools-15 \
772    shellcheck \
773    npm \
774    iproute2 \
775    libnl-3-dev \
776    libnl-genl-3-dev \
777    libconfig++-dev \
778    libsnmp-dev \
779    valgrind \
780    valgrind-dbg \
781    libpam0g-dev \
782    xxd \
783    libi2c-dev \
784    wget \
785    libldap2-dev \
786    libprotobuf-dev \
787    liburing-dev \
788    liburing2-dbgsym \
789    libperlio-gzip-perl \
790    libjson-perl \
791    protobuf-compiler \
792    libgpiod-dev \
793    device-tree-compiler \
794    libpciaccess-dev \
795    libmimetic-dev \
796    libxml2-utils \
797    libxml-simple-perl \
798    rsync \
799    libcryptsetup-dev
800
801RUN npm install -g eslint@latest eslint-plugin-json@latest
802
803# Kinetic comes with GCC-12, so skip this.
804#RUN update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-12 12 \
805#  --slave /usr/bin/g++ g++ /usr/bin/g++-12 \
806#  --slave /usr/bin/gcov gcov /usr/bin/gcov-12 \
807#  --slave /usr/bin/gcov-dump gcov-dump /usr/bin/gcov-dump-12 \
808#  --slave /usr/bin/gcov-tool gcov-tool /usr/bin/gcov-tool-12
809#RUN update-alternatives --install /usr/bin/cpp cpp /usr/bin/cpp-12 12
810
811RUN update-alternatives --install /usr/bin/clang clang /usr/bin/clang-15 1000 \
812  --slave /usr/bin/clang++ clang++ /usr/bin/clang++-15 \
813  --slave /usr/bin/clang-tidy clang-tidy /usr/bin/clang-tidy-15 \
814  --slave /usr/bin/clang-format clang-format /usr/bin/clang-format-15 \
815  --slave /usr/bin/run-clang-tidy run-clang-tidy.py /usr/bin/run-clang-tidy-15 \
816  --slave /usr/bin/scan-build scan-build /usr/bin/scan-build-15
817
818"""
819
820if is_automated_ci_build:
821    dockerfile_base += f"""
822# Run an arbitrary command to polute the docker cache regularly force us
823# to re-run `apt-get update` daily.
824RUN echo {Docker.timestamp()}
825RUN apt-get update && apt-get dist-upgrade -yy
826
827"""
828
829dockerfile_base += f"""
830RUN pip3 install inflection
831RUN pip3 install pycodestyle
832RUN pip3 install jsonschema
833RUN pip3 install meson==0.63.0
834RUN pip3 install packaging
835RUN pip3 install protobuf
836RUN pip3 install codespell
837RUN pip3 install requests
838"""
839
840# Note, we use sha1s here because the newest gitlint release doesn't include
841# some features we need.  Next time they release, we can rely on a direct
842# release tag
843dockerfile_base += f"""
844RUN pip3 install git+https://github.com/jorisroovers/gitlint.git@8ede310d62d5794efa7518b235f899f8a8ad6a68\#subdirectory=gitlint-core
845RUN pip3 install git+https://github.com/jorisroovers/gitlint.git@8ede310d62d5794efa7518b235f899f8a8ad6a68
846"""
847
848# Build the base and stage docker images.
849docker_base_img_name = Docker.tagname("base", dockerfile_base)
850Docker.build("base", docker_base_img_name, dockerfile_base)
851Package.generate_all()
852
853# Create the final Dockerfile.
854dockerfile = f"""
855# Build the final output image
856FROM {docker_base_img_name}
857{Package.df_all_copycmds()}
858
859# Some of our infrastructure still relies on the presence of this file
860# even though it is no longer needed to rebuild the docker environment
861# NOTE: The file is sorted to ensure the ordering is stable.
862RUN echo '{Package.depcache()}' > /tmp/depcache
863
864# Final configuration for the workspace
865RUN grep -q {gid} /etc/group || groupadd -f -g {gid} {username}
866RUN mkdir -p "{os.path.dirname(homedir)}"
867RUN grep -q {uid} /etc/passwd || useradd -d {homedir} -m -u {uid} -g {gid} {username}
868RUN sed -i '1iDefaults umask=000' /etc/sudoers
869RUN echo "{username} ALL=(ALL) NOPASSWD: ALL" >>/etc/sudoers
870
871# Ensure user has ability to write to /usr/local for different tool
872# and data installs
873RUN chown -R {username}:{username} /usr/local/share
874
875{proxy_cmd}
876
877RUN /bin/bash
878"""
879
880# Do the final docker build
881docker_final_img_name = Docker.tagname(None, dockerfile)
882Docker.build("final", docker_final_img_name, dockerfile)
883
884# Print the tag of the final image.
885print(docker_final_img_name)
886