1#!/usr/bin/env python3
2#
3# Build the required docker image to run package unit tests
4#
5# Script Variables:
6#   DOCKER_IMG_NAME:  <optional, the name of the docker image to generate>
7#                     default is openbmc/ubuntu-unit-test
8#   DISTRO:           <optional, the distro to build a docker image against>
9#   FORCE_DOCKER_BUILD: <optional, a non-zero value with force all Docker
10#                     images to be rebuilt rather than reusing caches.>
11#   BUILD_URL:        <optional, used to detect running under CI context
12#                     (ex. Jenkins)>
13#   BRANCH:           <optional, branch to build from each of the openbmc/
14#                     repositories>
15#                     default is master, which will be used if input branch not
16#                     provided or not found
17#   UBUNTU_MIRROR:    <optional, the URL of a mirror of Ubuntu to override the
18#                     default ones in /etc/apt/sources.list>
19#                     default is empty, and no mirror is used.
20#   http_proxy        The HTTP address of the proxy server to connect to.
21#                     Default: "", proxy is not setup if this is not set
22
23import os
24import sys
25import threading
26from datetime import date
27from hashlib import sha256
28from typing import Any, Callable, Dict, Iterable, Optional
29
30from sh import docker, git, nproc, uname  # type: ignore
31
32try:
33    # Python before 3.8 doesn't have TypedDict, so reroute to standard 'dict'.
34    from typing import TypedDict
35except Exception:
36
37    class TypedDict(dict):  # type: ignore
38        # We need to do this to eat the 'total' argument.
39        def __init_subclass__(cls, **kwargs: Any) -> None:
40            super().__init_subclass__()
41
42
43# Declare some variables used in package definitions.
44prefix = "/usr/local"
45proc_count = nproc().strip()
46
47
48class PackageDef(TypedDict, total=False):
49    """Package Definition for packages dictionary."""
50
51    # rev [optional]: Revision of package to use.
52    rev: str
53    # url [optional]: lambda function to create URL: (package, rev) -> url.
54    url: Callable[[str, str], str]
55    # depends [optional]: List of package dependencies.
56    depends: Iterable[str]
57    # build_type [required]: Build type used for package.
58    #   Currently supported: autoconf, cmake, custom, make, meson
59    build_type: str
60    # build_steps [optional]: Steps to run for 'custom' build_type.
61    build_steps: Iterable[str]
62    # config_flags [optional]: List of options to pass configuration tool.
63    config_flags: Iterable[str]
64    # config_env [optional]: List of environment variables to set for config.
65    config_env: Iterable[str]
66    # custom_post_dl [optional]: List of steps to run after download, but
67    #   before config / build / install.
68    custom_post_dl: Iterable[str]
69    # custom_post_install [optional]: List of steps to run after install.
70    custom_post_install: Iterable[str]
71
72    # __tag [private]: Generated Docker tag name for package stage.
73    __tag: str
74    # __package [private]: Package object associated with this package.
75    __package: Any  # Type is Package, but not defined yet.
76
77
78# Packages to include in image.
79packages = {
80    "boost": PackageDef(
81        rev="1.80.0",
82        url=(
83            lambda pkg, rev: f"https://downloads.yoctoproject.org/mirror/sources/{pkg}_{rev.replace('.', '_')}.tar.bz2"
84        ),
85        build_type="custom",
86        build_steps=[
87            f"./bootstrap.sh --prefix={prefix} --with-libraries=context,coroutine",
88            "./b2",
89            f"./b2 install --prefix={prefix}",
90        ],
91    ),
92    "USCiLab/cereal": PackageDef(
93        rev="v1.3.2",
94        build_type="custom",
95        build_steps=[f"cp -a include/cereal/ {prefix}/include/"],
96    ),
97    "danmar/cppcheck": PackageDef(
98        rev="2.9",
99        build_type="cmake",
100    ),
101    "CLIUtils/CLI11": PackageDef(
102        rev="v1.9.1",
103        build_type="cmake",
104        config_flags=[
105            "-DBUILD_TESTING=OFF",
106            "-DCLI11_BUILD_DOCS=OFF",
107            "-DCLI11_BUILD_EXAMPLES=OFF",
108        ],
109    ),
110    "fmtlib/fmt": PackageDef(
111        rev="9.1.0",
112        build_type="cmake",
113        config_flags=[
114            "-DFMT_DOC=OFF",
115            "-DFMT_TEST=OFF",
116        ],
117    ),
118    "Naios/function2": PackageDef(
119        rev="4.2.1",
120        build_type="custom",
121        build_steps=[
122            f"mkdir {prefix}/include/function2",
123            f"cp include/function2/function2.hpp {prefix}/include/function2/",
124        ],
125    ),
126    # release-1.12.1
127    "google/googletest": PackageDef(
128        rev="58d77fa8070e8cec2dc1ed015d66b454c8d78850",
129        build_type="cmake",
130        config_env=["CXXFLAGS=-std=c++20"],
131        config_flags=["-DTHREADS_PREFER_PTHREAD_FLAG=ON"],
132    ),
133    "nlohmann/json": PackageDef(
134        rev="v3.11.2",
135        build_type="cmake",
136        config_flags=["-DJSON_BuildTests=OFF"],
137        custom_post_install=[
138            f"ln -s {prefix}/include/nlohmann/json.hpp {prefix}/include/json.hpp",
139        ],
140    ),
141    # Snapshot from 2019-05-24
142    "linux-test-project/lcov": PackageDef(
143        rev="v1.15",
144        build_type="make",
145    ),
146    # dev-6.0 2022-11-28
147    "openbmc/linux": PackageDef(
148        rev="1b16243b004ce4d977a9f3b9d9e715cf5028f867",
149        build_type="custom",
150        build_steps=[
151            f"make -j{proc_count} defconfig",
152            f"make INSTALL_HDR_PATH={prefix} headers_install",
153        ],
154    ),
155    "LibVNC/libvncserver": PackageDef(
156        rev="LibVNCServer-0.9.13",
157        build_type="cmake",
158    ),
159    "leethomason/tinyxml2": PackageDef(
160        rev="9.0.0",
161        build_type="cmake",
162    ),
163    # version from /meta-openembedded/meta-oe/recipes-devtools/boost-url/boost-url_git.bb
164    "CPPAlliance/url": PackageDef(
165        rev="d740a92d38e3a8f4d5b2153f53b82f1c98e312ab",
166        build_type="custom",
167        build_steps=[f"cp -a include/** {prefix}/include/"],
168    ),
169    "tristanpenman/valijson": PackageDef(
170        rev="v0.7",
171        build_type="cmake",
172        config_flags=[
173            "-Dvalijson_BUILD_TESTS=0",
174            "-Dvalijson_INSTALL_HEADERS=1",
175        ],
176    ),
177    "open-power/pdbg": PackageDef(build_type="autoconf"),
178    "openbmc/gpioplus": PackageDef(
179        depends=["openbmc/stdplus"],
180        build_type="meson",
181        config_flags=[
182            "-Dexamples=false",
183            "-Dtests=disabled",
184        ],
185    ),
186    "openbmc/phosphor-dbus-interfaces": PackageDef(
187        depends=["openbmc/sdbusplus"],
188        build_type="meson",
189        config_flags=["-Dgenerate_md=false"],
190    ),
191    "openbmc/phosphor-logging": PackageDef(
192        depends=[
193            "USCiLab/cereal",
194            "openbmc/phosphor-dbus-interfaces",
195            "openbmc/sdbusplus",
196            "openbmc/sdeventplus",
197        ],
198        build_type="meson",
199        config_flags=[
200            "-Dlibonly=true",
201            "-Dtests=disabled",
202            f"-Dyamldir={prefix}/share/phosphor-dbus-yaml/yaml",
203        ],
204    ),
205    "openbmc/phosphor-objmgr": PackageDef(
206        depends=[
207            "CLIUtils/CLI11",
208            "boost",
209            "leethomason/tinyxml2",
210            "openbmc/phosphor-dbus-interfaces",
211            "openbmc/phosphor-logging",
212            "openbmc/sdbusplus",
213        ],
214        build_type="meson",
215        config_flags=[
216            "-Dtests=disabled",
217        ],
218    ),
219    "openbmc/libpldm": PackageDef(
220        build_type="meson",
221        config_flags=[
222            "-Doem-ibm=enabled",
223            "-Dtests=disabled",
224        ],
225    ),
226    "openbmc/sdbusplus": PackageDef(
227        build_type="meson",
228        custom_post_dl=[
229            "cd tools",
230            f"./setup.py install --root=/ --prefix={prefix}",
231            "cd ..",
232        ],
233        config_flags=[
234            "-Dexamples=disabled",
235            "-Dtests=disabled",
236        ],
237    ),
238    "openbmc/sdeventplus": PackageDef(
239        depends=[
240            "Naios/function2",
241            "openbmc/stdplus",
242        ],
243        build_type="meson",
244        config_flags=[
245            "-Dexamples=false",
246            "-Dtests=disabled",
247        ],
248    ),
249    "openbmc/stdplus": PackageDef(
250        depends=[
251            "fmtlib/fmt",
252            "google/googletest",
253            "Naios/function2",
254        ],
255        build_type="meson",
256        config_flags=[
257            "-Dexamples=false",
258            "-Dtests=disabled",
259            "-Dgtest=enabled",
260        ],
261    ),
262}  # type: Dict[str, PackageDef]
263
264# Define common flags used for builds
265configure_flags = " ".join(
266    [
267        f"--prefix={prefix}",
268    ]
269)
270cmake_flags = " ".join(
271    [
272        "-DBUILD_SHARED_LIBS=ON",
273        "-DCMAKE_BUILD_TYPE=RelWithDebInfo",
274        f"-DCMAKE_INSTALL_PREFIX:PATH={prefix}",
275        "-GNinja",
276        "-DCMAKE_MAKE_PROGRAM=ninja",
277    ]
278)
279meson_flags = " ".join(
280    [
281        "--wrap-mode=nodownload",
282        f"-Dprefix={prefix}",
283    ]
284)
285
286
287class Package(threading.Thread):
288    """Class used to build the Docker stages for each package.
289
290    Generally, this class should not be instantiated directly but through
291    Package.generate_all().
292    """
293
294    # Copy the packages dictionary.
295    packages = packages.copy()
296
297    # Lock used for thread-safety.
298    lock = threading.Lock()
299
300    def __init__(self, pkg: str):
301        """pkg - The name of this package (ex. foo/bar )"""
302        super(Package, self).__init__()
303
304        self.package = pkg
305        self.exception = None  # type: Optional[Exception]
306
307        # Reference to this package's
308        self.pkg_def = Package.packages[pkg]
309        self.pkg_def["__package"] = self
310
311    def run(self) -> None:
312        """Thread 'run' function.  Builds the Docker stage."""
313
314        # In case this package has no rev, fetch it from Github.
315        self._update_rev()
316
317        # Find all the Package objects that this package depends on.
318        #   This section is locked because we are looking into another
319        #   package's PackageDef dict, which could be being modified.
320        Package.lock.acquire()
321        deps: Iterable[Package] = [
322            Package.packages[deppkg]["__package"]
323            for deppkg in self.pkg_def.get("depends", [])
324        ]
325        Package.lock.release()
326
327        # Wait until all the depends finish building.  We need them complete
328        # for the "COPY" commands.
329        for deppkg in deps:
330            deppkg.join()
331
332        # Generate this package's Dockerfile.
333        dockerfile = f"""
334FROM {docker_base_img_name}
335{self._df_copycmds()}
336{self._df_build()}
337"""
338
339        # Generate the resulting tag name and save it to the PackageDef.
340        #   This section is locked because we are modifying the PackageDef,
341        #   which can be accessed by other threads.
342        Package.lock.acquire()
343        tag = Docker.tagname(self._stagename(), dockerfile)
344        self.pkg_def["__tag"] = tag
345        Package.lock.release()
346
347        # Do the build / save any exceptions.
348        try:
349            Docker.build(self.package, tag, dockerfile)
350        except Exception as e:
351            self.exception = e
352
353    @classmethod
354    def generate_all(cls) -> None:
355        """Ensure a Docker stage is created for all defined packages.
356
357        These are done in parallel but with appropriate blocking per
358        package 'depends' specifications.
359        """
360
361        # Create a Package for each defined package.
362        pkg_threads = [Package(p) for p in cls.packages.keys()]
363
364        # Start building them all.
365        #   This section is locked because threads depend on each other,
366        #   based on the packages, and they cannot 'join' on a thread
367        #   which is not yet started.  Adding a lock here allows all the
368        #   threads to start before they 'join' their dependencies.
369        Package.lock.acquire()
370        for t in pkg_threads:
371            t.start()
372        Package.lock.release()
373
374        # Wait for completion.
375        for t in pkg_threads:
376            t.join()
377            # Check if the thread saved off its own exception.
378            if t.exception:
379                print(f"Package {t.package} failed!", file=sys.stderr)
380                raise t.exception
381
382    @staticmethod
383    def df_all_copycmds() -> str:
384        """Formulate the Dockerfile snippet necessary to copy all packages
385        into the final image.
386        """
387        return Package.df_copycmds_set(Package.packages.keys())
388
389    @classmethod
390    def depcache(cls) -> str:
391        """Create the contents of the '/tmp/depcache'.
392        This file is a comma-separated list of "<pkg>:<rev>".
393        """
394
395        # This needs to be sorted for consistency.
396        depcache = ""
397        for pkg in sorted(cls.packages.keys()):
398            depcache += "%s:%s," % (pkg, cls.packages[pkg]["rev"])
399        return depcache
400
401    def _update_rev(self) -> None:
402        """Look up the HEAD for missing a static rev."""
403
404        if "rev" in self.pkg_def:
405            return
406
407        # Check if Jenkins/Gerrit gave us a revision and use it.
408        if gerrit_project == self.package and gerrit_rev:
409            print(
410                f"Found Gerrit revision for {self.package}: {gerrit_rev}",
411                file=sys.stderr,
412            )
413            self.pkg_def["rev"] = gerrit_rev
414            return
415
416        # Ask Github for all the branches.
417        lookup = git(
418            "ls-remote", "--heads", f"https://github.com/{self.package}"
419        )
420
421        # Find the branch matching {branch} (or fallback to master).
422        #   This section is locked because we are modifying the PackageDef.
423        Package.lock.acquire()
424        for line in lookup.split("\n"):
425            if f"refs/heads/{branch}" in line:
426                self.pkg_def["rev"] = line.split()[0]
427            elif (
428                "refs/heads/master" in line or "refs/heads/main" in line
429            ) and "rev" not in self.pkg_def:
430                self.pkg_def["rev"] = line.split()[0]
431        Package.lock.release()
432
433    def _stagename(self) -> str:
434        """Create a name for the Docker stage associated with this pkg."""
435        return self.package.replace("/", "-").lower()
436
437    def _url(self) -> str:
438        """Get the URL for this package."""
439        rev = self.pkg_def["rev"]
440
441        # If the lambda exists, call it.
442        if "url" in self.pkg_def:
443            return self.pkg_def["url"](self.package, rev)
444
445        # Default to the github archive URL.
446        return f"https://github.com/{self.package}/archive/{rev}.tar.gz"
447
448    def _cmd_download(self) -> str:
449        """Formulate the command necessary to download and unpack to source."""
450
451        url = self._url()
452        if ".tar." not in url:
453            raise NotImplementedError(
454                f"Unhandled download type for {self.package}: {url}"
455            )
456
457        cmd = f"curl -L {url} | tar -x"
458
459        if url.endswith(".bz2"):
460            cmd += "j"
461        elif url.endswith(".gz"):
462            cmd += "z"
463        else:
464            raise NotImplementedError(
465                f"Unknown tar flags needed for {self.package}: {url}"
466            )
467
468        return cmd
469
470    def _cmd_cd_srcdir(self) -> str:
471        """Formulate the command necessary to 'cd' into the source dir."""
472        return f"cd {self.package.split('/')[-1]}*"
473
474    def _df_copycmds(self) -> str:
475        """Formulate the dockerfile snippet necessary to COPY all depends."""
476
477        if "depends" not in self.pkg_def:
478            return ""
479        return Package.df_copycmds_set(self.pkg_def["depends"])
480
481    @staticmethod
482    def df_copycmds_set(pkgs: Iterable[str]) -> str:
483        """Formulate the Dockerfile snippet necessary to COPY a set of
484        packages into a Docker stage.
485        """
486
487        copy_cmds = ""
488
489        # Sort the packages for consistency.
490        for p in sorted(pkgs):
491            tag = Package.packages[p]["__tag"]
492            copy_cmds += f"COPY --from={tag} {prefix} {prefix}\n"
493            # Workaround for upstream docker bug and multiple COPY cmds
494            # https://github.com/moby/moby/issues/37965
495            copy_cmds += "RUN true\n"
496
497        return copy_cmds
498
499    def _df_build(self) -> str:
500        """Formulate the Dockerfile snippet necessary to download, build, and
501        install a package into a Docker stage.
502        """
503
504        # Download and extract source.
505        result = f"RUN {self._cmd_download()} && {self._cmd_cd_srcdir()} && "
506
507        # Handle 'custom_post_dl' commands.
508        custom_post_dl = self.pkg_def.get("custom_post_dl")
509        if custom_post_dl:
510            result += " && ".join(custom_post_dl) + " && "
511
512        # Build and install package based on 'build_type'.
513        build_type = self.pkg_def["build_type"]
514        if build_type == "autoconf":
515            result += self._cmd_build_autoconf()
516        elif build_type == "cmake":
517            result += self._cmd_build_cmake()
518        elif build_type == "custom":
519            result += self._cmd_build_custom()
520        elif build_type == "make":
521            result += self._cmd_build_make()
522        elif build_type == "meson":
523            result += self._cmd_build_meson()
524        else:
525            raise NotImplementedError(
526                f"Unhandled build type for {self.package}: {build_type}"
527            )
528
529        # Handle 'custom_post_install' commands.
530        custom_post_install = self.pkg_def.get("custom_post_install")
531        if custom_post_install:
532            result += " && " + " && ".join(custom_post_install)
533
534        return result
535
536    def _cmd_build_autoconf(self) -> str:
537        options = " ".join(self.pkg_def.get("config_flags", []))
538        env = " ".join(self.pkg_def.get("config_env", []))
539        result = "./bootstrap.sh && "
540        result += f"{env} ./configure {configure_flags} {options} && "
541        result += f"make -j{proc_count} && make install"
542        return result
543
544    def _cmd_build_cmake(self) -> str:
545        options = " ".join(self.pkg_def.get("config_flags", []))
546        env = " ".join(self.pkg_def.get("config_env", []))
547        result = "mkdir builddir && cd builddir && "
548        result += f"{env} cmake {cmake_flags} {options} .. && "
549        result += "cmake --build . --target all && "
550        result += "cmake --build . --target install && "
551        result += "cd .."
552        return result
553
554    def _cmd_build_custom(self) -> str:
555        return " && ".join(self.pkg_def.get("build_steps", []))
556
557    def _cmd_build_make(self) -> str:
558        return f"make -j{proc_count} && make install"
559
560    def _cmd_build_meson(self) -> str:
561        options = " ".join(self.pkg_def.get("config_flags", []))
562        env = " ".join(self.pkg_def.get("config_env", []))
563        result = f"{env} meson builddir {meson_flags} {options} && "
564        result += "ninja -C builddir && ninja -C builddir install"
565        return result
566
567
568class Docker:
569    """Class to assist with Docker interactions.  All methods are static."""
570
571    @staticmethod
572    def timestamp() -> str:
573        """Generate a timestamp for today using the ISO week."""
574        today = date.today().isocalendar()
575        return f"{today[0]}-W{today[1]:02}"
576
577    @staticmethod
578    def tagname(pkgname: Optional[str], dockerfile: str) -> str:
579        """Generate a tag name for a package using a hash of the Dockerfile."""
580        result = docker_image_name
581        if pkgname:
582            result += "-" + pkgname
583
584        result += ":" + Docker.timestamp()
585        result += "-" + sha256(dockerfile.encode()).hexdigest()[0:16]
586
587        return result
588
589    @staticmethod
590    def build(pkg: str, tag: str, dockerfile: str) -> None:
591        """Build a docker image using the Dockerfile and tagging it with 'tag'."""
592
593        # If we're not forcing builds, check if it already exists and skip.
594        if not force_build:
595            if docker.image.ls(tag, "--format", '"{{.Repository}}:{{.Tag}}"'):
596                print(
597                    f"Image {tag} already exists.  Skipping.", file=sys.stderr
598                )
599                return
600
601        # Build it.
602        #   Capture the output of the 'docker build' command and send it to
603        #   stderr (prefixed with the package name).  This allows us to see
604        #   progress but not polute stdout.  Later on we output the final
605        #   docker tag to stdout and we want to keep that pristine.
606        #
607        #   Other unusual flags:
608        #       --no-cache: Bypass the Docker cache if 'force_build'.
609        #       --force-rm: Clean up Docker processes if they fail.
610        docker.build(
611            proxy_args,
612            "--network=host",
613            "--force-rm",
614            "--no-cache=true" if force_build else "--no-cache=false",
615            "-t",
616            tag,
617            "-",
618            _in=dockerfile,
619            _out=(
620                lambda line: print(
621                    pkg + ":", line, end="", file=sys.stderr, flush=True
622                )
623            ),
624        )
625
626
627# Read a bunch of environment variables.
628docker_image_name = os.environ.get(
629    "DOCKER_IMAGE_NAME", "openbmc/ubuntu-unit-test"
630)
631force_build = os.environ.get("FORCE_DOCKER_BUILD")
632is_automated_ci_build = os.environ.get("BUILD_URL", False)
633distro = os.environ.get("DISTRO", "ubuntu:kinetic")
634branch = os.environ.get("BRANCH", "master")
635ubuntu_mirror = os.environ.get("UBUNTU_MIRROR")
636http_proxy = os.environ.get("http_proxy")
637
638gerrit_project = os.environ.get("GERRIT_PROJECT")
639gerrit_rev = os.environ.get("GERRIT_PATCHSET_REVISION")
640
641# Set up some common variables.
642username = os.environ.get("USER", "root")
643homedir = os.environ.get("HOME", "/root")
644gid = os.getgid()
645uid = os.getuid()
646
647# Use well-known constants if user is root
648if username == "root":
649    homedir = "/root"
650    gid = 0
651    uid = 0
652
653# Determine the architecture for Docker.
654arch = uname("-m").strip()
655if arch == "ppc64le":
656    docker_base = "ppc64le/"
657elif arch == "x86_64":
658    docker_base = ""
659elif arch == "aarch64":
660    docker_base = "arm64v8/"
661else:
662    print(
663        f"Unsupported system architecture({arch}) found for docker image",
664        file=sys.stderr,
665    )
666    sys.exit(1)
667
668# Special flags if setting up a deb mirror.
669mirror = ""
670if "ubuntu" in distro and ubuntu_mirror:
671    mirror = f"""
672RUN echo "deb {ubuntu_mirror} $(. /etc/os-release && echo $VERSION_CODENAME) main restricted universe multiverse" > /etc/apt/sources.list && \\
673    echo "deb {ubuntu_mirror} $(. /etc/os-release && echo $VERSION_CODENAME)-updates main restricted universe multiverse" >> /etc/apt/sources.list && \\
674    echo "deb {ubuntu_mirror} $(. /etc/os-release && echo $VERSION_CODENAME)-security main restricted universe multiverse" >> /etc/apt/sources.list && \\
675    echo "deb {ubuntu_mirror} $(. /etc/os-release && echo $VERSION_CODENAME)-proposed main restricted universe multiverse" >> /etc/apt/sources.list && \\
676    echo "deb {ubuntu_mirror} $(. /etc/os-release && echo $VERSION_CODENAME)-backports main restricted universe multiverse" >> /etc/apt/sources.list
677"""
678
679# Special flags for proxying.
680proxy_cmd = ""
681proxy_keyserver = ""
682proxy_args = []
683if http_proxy:
684    proxy_cmd = f"""
685RUN echo "[http]" >> {homedir}/.gitconfig && \
686    echo "proxy = {http_proxy}" >> {homedir}/.gitconfig
687"""
688    proxy_keyserver = f"--keyserver-options http-proxy={http_proxy}"
689
690    proxy_args.extend(
691        [
692            "--build-arg",
693            f"http_proxy={http_proxy}",
694            "--build-arg",
695            f"https_proxy={http_proxy}",
696        ]
697    )
698
699# Create base Dockerfile.
700dockerfile_base = f"""
701FROM {docker_base}{distro}
702
703{mirror}
704
705ENV DEBIAN_FRONTEND noninteractive
706
707ENV PYTHONPATH "/usr/local/lib/python3.10/site-packages/"
708
709# Sometimes the ubuntu key expires and we need a way to force an execution
710# of the apt-get commands for the dbgsym-keyring.  When this happens we see
711# an error like: "Release: The following signatures were invalid:"
712# Insert a bogus echo that we can change here when we get this error to force
713# the update.
714RUN echo "ubuntu keyserver rev as of 2021-04-21"
715
716# We need the keys to be imported for dbgsym repos
717# New releases have a package, older ones fall back to manual fetching
718# https://wiki.ubuntu.com/Debug%20Symbol%20Packages
719RUN apt-get update && apt-get dist-upgrade -yy && \
720    ( apt-get install gpgv ubuntu-dbgsym-keyring || \
721        ( apt-get install -yy dirmngr && \
722          apt-key adv --keyserver keyserver.ubuntu.com \
723                      {proxy_keyserver} \
724                      --recv-keys F2EDC64DC5AEE1F6B9C621F0C8CAB6595FDFF622 ) )
725
726# Parse the current repo list into a debug repo list
727RUN sed -n '/^deb /s,^deb [^ ]* ,deb http://ddebs.ubuntu.com ,p' /etc/apt/sources.list >/etc/apt/sources.list.d/debug.list
728
729# Remove non-existent debug repos
730RUN sed -i '/-\\(backports\\|security\\) /d' /etc/apt/sources.list.d/debug.list
731
732RUN cat /etc/apt/sources.list.d/debug.list
733
734RUN apt-get update && apt-get dist-upgrade -yy && apt-get install -yy \
735    gcc-12 \
736    g++-12 \
737    libc6-dbg \
738    libc6-dev \
739    libtool \
740    bison \
741    libdbus-1-dev \
742    flex \
743    cmake \
744    python3 \
745    python3-dev\
746    python3-yaml \
747    python3-mako \
748    python3-pip \
749    python3-setuptools \
750    python3-git \
751    python3-socks \
752    pkg-config \
753    autoconf \
754    autoconf-archive \
755    libsystemd-dev \
756    systemd \
757    libssl-dev \
758    libevdev-dev \
759    libjpeg-dev \
760    libpng-dev \
761    ninja-build \
762    sudo \
763    curl \
764    git \
765    dbus \
766    iputils-ping \
767    clang-15 \
768    clang-format-15 \
769    clang-tidy-15 \
770    clang-tools-15 \
771    shellcheck \
772    npm \
773    iproute2 \
774    libnl-3-dev \
775    libnl-genl-3-dev \
776    libconfig++-dev \
777    libsnmp-dev \
778    valgrind \
779    valgrind-dbg \
780    libpam0g-dev \
781    xxd \
782    libi2c-dev \
783    wget \
784    libldap2-dev \
785    libprotobuf-dev \
786    liburing-dev \
787    liburing2-dbgsym \
788    libperlio-gzip-perl \
789    libjson-perl \
790    protobuf-compiler \
791    libgpiod-dev \
792    device-tree-compiler \
793    libpciaccess-dev \
794    libmimetic-dev \
795    libxml2-utils \
796    libxml-simple-perl \
797    rsync \
798    libcryptsetup-dev
799
800RUN npm install -g eslint@latest eslint-plugin-json@latest
801
802# Kinetic comes with GCC-12, so skip this.
803#RUN update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-12 12 \
804#  --slave /usr/bin/g++ g++ /usr/bin/g++-12 \
805#  --slave /usr/bin/gcov gcov /usr/bin/gcov-12 \
806#  --slave /usr/bin/gcov-dump gcov-dump /usr/bin/gcov-dump-12 \
807#  --slave /usr/bin/gcov-tool gcov-tool /usr/bin/gcov-tool-12
808#RUN update-alternatives --install /usr/bin/cpp cpp /usr/bin/cpp-12 12
809
810RUN update-alternatives --install /usr/bin/clang clang /usr/bin/clang-15 1000 \
811  --slave /usr/bin/clang++ clang++ /usr/bin/clang++-15 \
812  --slave /usr/bin/clang-tidy clang-tidy /usr/bin/clang-tidy-15 \
813  --slave /usr/bin/clang-format clang-format /usr/bin/clang-format-15 \
814  --slave /usr/bin/run-clang-tidy run-clang-tidy.py /usr/bin/run-clang-tidy-15 \
815  --slave /usr/bin/scan-build scan-build /usr/bin/scan-build-15
816
817"""
818
819if is_automated_ci_build:
820    dockerfile_base += f"""
821# Run an arbitrary command to polute the docker cache regularly force us
822# to re-run `apt-get update` daily.
823RUN echo {Docker.timestamp()}
824RUN apt-get update && apt-get dist-upgrade -yy
825
826"""
827
828dockerfile_base += """
829RUN pip3 install inflection
830RUN pip3 install pycodestyle
831RUN pip3 install jsonschema
832RUN pip3 install meson==0.63.0
833RUN pip3 install packaging
834RUN pip3 install protobuf
835RUN pip3 install codespell
836RUN pip3 install requests
837RUN pip3 install gitlint
838"""
839
840# Build the base and stage docker images.
841docker_base_img_name = Docker.tagname("base", dockerfile_base)
842Docker.build("base", docker_base_img_name, dockerfile_base)
843Package.generate_all()
844
845# Create the final Dockerfile.
846dockerfile = f"""
847# Build the final output image
848FROM {docker_base_img_name}
849{Package.df_all_copycmds()}
850
851# Some of our infrastructure still relies on the presence of this file
852# even though it is no longer needed to rebuild the docker environment
853# NOTE: The file is sorted to ensure the ordering is stable.
854RUN echo '{Package.depcache()}' > /tmp/depcache
855
856# Final configuration for the workspace
857RUN grep -q {gid} /etc/group || groupadd -f -g {gid} {username}
858RUN mkdir -p "{os.path.dirname(homedir)}"
859RUN grep -q {uid} /etc/passwd || useradd -d {homedir} -m -u {uid} -g {gid} {username}
860RUN sed -i '1iDefaults umask=000' /etc/sudoers
861RUN echo "{username} ALL=(ALL) NOPASSWD: ALL" >>/etc/sudoers
862
863# Ensure user has ability to write to /usr/local for different tool
864# and data installs
865RUN chown -R {username}:{username} /usr/local/share
866
867{proxy_cmd}
868
869RUN /bin/bash
870"""
871
872# Do the final docker build
873docker_final_img_name = Docker.tagname(None, dockerfile)
874Docker.build("final", docker_final_img_name, dockerfile)
875
876# Print the tag of the final image.
877print(docker_final_img_name)
878