1#!/usr/bin/env python3
2#
3# Build the required docker image to run package unit tests
4#
5# Script Variables:
6#   DOCKER_IMG_NAME:  <optional, the name of the docker image to generate>
7#                     default is openbmc/ubuntu-unit-test
8#   DISTRO:           <optional, the distro to build a docker image against>
9#   FORCE_DOCKER_BUILD: <optional, a non-zero value with force all Docker
10#                     images to be rebuilt rather than reusing caches.>
11#   BUILD_URL:        <optional, used to detect running under CI context
12#                     (ex. Jenkins)>
13#   BRANCH:           <optional, branch to build from each of the openbmc/
14#                     repositories>
15#                     default is master, which will be used if input branch not
16#                     provided or not found
17#   UBUNTU_MIRROR:    <optional, the URL of a mirror of Ubuntu to override the
18#                     default ones in /etc/apt/sources.list>
19#                     default is empty, and no mirror is used.
20#   http_proxy        The HTTP address of the proxy server to connect to.
21#                     Default: "", proxy is not setup if this is not set
22
23import os
24import sys
25import threading
26from datetime import date
27from hashlib import sha256
28
29# typing.Dict is used for type-hints.
30from typing import Any, Callable, Dict, Iterable, Optional  # noqa: F401
31
32from sh import docker, git, nproc, uname  # type: ignore
33
34try:
35    # Python before 3.8 doesn't have TypedDict, so reroute to standard 'dict'.
36    from typing import TypedDict
37except Exception:
38
39    class TypedDict(dict):  # type: ignore
40        # We need to do this to eat the 'total' argument.
41        def __init_subclass__(cls, **kwargs: Any) -> None:
42            super().__init_subclass__()
43
44
45# Declare some variables used in package definitions.
46prefix = "/usr/local"
47proc_count = nproc().strip()
48
49
50class PackageDef(TypedDict, total=False):
51    """Package Definition for packages dictionary."""
52
53    # rev [optional]: Revision of package to use.
54    rev: str
55    # url [optional]: lambda function to create URL: (package, rev) -> url.
56    url: Callable[[str, str], str]
57    # depends [optional]: List of package dependencies.
58    depends: Iterable[str]
59    # build_type [required]: Build type used for package.
60    #   Currently supported: autoconf, cmake, custom, make, meson
61    build_type: str
62    # build_steps [optional]: Steps to run for 'custom' build_type.
63    build_steps: Iterable[str]
64    # config_flags [optional]: List of options to pass configuration tool.
65    config_flags: Iterable[str]
66    # config_env [optional]: List of environment variables to set for config.
67    config_env: Iterable[str]
68    # custom_post_dl [optional]: List of steps to run after download, but
69    #   before config / build / install.
70    custom_post_dl: Iterable[str]
71    # custom_post_install [optional]: List of steps to run after install.
72    custom_post_install: Iterable[str]
73
74    # __tag [private]: Generated Docker tag name for package stage.
75    __tag: str
76    # __package [private]: Package object associated with this package.
77    __package: Any  # Type is Package, but not defined yet.
78
79
80# Packages to include in image.
81packages = {
82    "boost": PackageDef(
83        rev="1.81.0",
84        url=(
85            lambda pkg, rev: f"https://boostorg.jfrog.io/artifactory/main/release/{rev}/source/{pkg}_{rev.replace('.', '_')}.tar.gz"  # noqa: E501
86        ),
87        build_type="custom",
88        build_steps=[
89            (
90                "./bootstrap.sh"
91                f" --prefix={prefix} --with-libraries=context,coroutine"
92            ),
93            "./b2",
94            f"./b2 install --prefix={prefix}",
95        ],
96    ),
97    "USCiLab/cereal": PackageDef(
98        rev="v1.3.2",
99        build_type="custom",
100        build_steps=[f"cp -a include/cereal/ {prefix}/include/"],
101    ),
102    "danmar/cppcheck": PackageDef(
103        rev="2.9",
104        build_type="cmake",
105    ),
106    "CLIUtils/CLI11": PackageDef(
107        rev="v1.9.1",
108        build_type="cmake",
109        config_flags=[
110            "-DBUILD_TESTING=OFF",
111            "-DCLI11_BUILD_DOCS=OFF",
112            "-DCLI11_BUILD_EXAMPLES=OFF",
113        ],
114    ),
115    "fmtlib/fmt": PackageDef(
116        rev="9.1.0",
117        build_type="cmake",
118        config_flags=[
119            "-DFMT_DOC=OFF",
120            "-DFMT_TEST=OFF",
121        ],
122    ),
123    "Naios/function2": PackageDef(
124        rev="4.2.1",
125        build_type="custom",
126        build_steps=[
127            f"mkdir {prefix}/include/function2",
128            f"cp include/function2/function2.hpp {prefix}/include/function2/",
129        ],
130    ),
131    # release-1.12.1
132    "google/googletest": PackageDef(
133        rev="58d77fa8070e8cec2dc1ed015d66b454c8d78850",
134        build_type="cmake",
135        config_env=["CXXFLAGS=-std=c++20"],
136        config_flags=["-DTHREADS_PREFER_PTHREAD_FLAG=ON"],
137    ),
138    "nlohmann/json": PackageDef(
139        rev="v3.11.2",
140        build_type="cmake",
141        config_flags=["-DJSON_BuildTests=OFF"],
142        custom_post_install=[
143            (
144                f"ln -s {prefix}/include/nlohmann/json.hpp"
145                f" {prefix}/include/json.hpp"
146            ),
147        ],
148    ),
149    # Snapshot from 2019-05-24
150    "linux-test-project/lcov": PackageDef(
151        rev="v1.15",
152        build_type="make",
153    ),
154    # dev-6.0 2022-11-28
155    "openbmc/linux": PackageDef(
156        rev="1b16243b004ce4d977a9f3b9d9e715cf5028f867",
157        build_type="custom",
158        build_steps=[
159            f"make -j{proc_count} defconfig",
160            f"make INSTALL_HDR_PATH={prefix} headers_install",
161        ],
162    ),
163    "LibVNC/libvncserver": PackageDef(
164        rev="LibVNCServer-0.9.13",
165        build_type="cmake",
166    ),
167    "leethomason/tinyxml2": PackageDef(
168        rev="9.0.0",
169        build_type="cmake",
170    ),
171    "tristanpenman/valijson": PackageDef(
172        rev="v0.7",
173        build_type="cmake",
174        config_flags=[
175            "-Dvalijson_BUILD_TESTS=0",
176            "-Dvalijson_INSTALL_HEADERS=1",
177        ],
178    ),
179    "open-power/pdbg": PackageDef(build_type="autoconf"),
180    "openbmc/gpioplus": PackageDef(
181        depends=["openbmc/stdplus"],
182        build_type="meson",
183        config_flags=[
184            "-Dexamples=false",
185            "-Dtests=disabled",
186        ],
187    ),
188    "openbmc/phosphor-dbus-interfaces": PackageDef(
189        depends=["openbmc/sdbusplus"],
190        build_type="meson",
191        config_flags=["-Dgenerate_md=false"],
192    ),
193    "openbmc/phosphor-logging": PackageDef(
194        depends=[
195            "USCiLab/cereal",
196            "openbmc/phosphor-dbus-interfaces",
197            "openbmc/sdbusplus",
198            "openbmc/sdeventplus",
199        ],
200        build_type="meson",
201        config_flags=[
202            "-Dlibonly=true",
203            "-Dtests=disabled",
204            f"-Dyamldir={prefix}/share/phosphor-dbus-yaml/yaml",
205        ],
206    ),
207    "openbmc/phosphor-objmgr": PackageDef(
208        depends=[
209            "CLIUtils/CLI11",
210            "boost",
211            "leethomason/tinyxml2",
212            "openbmc/phosphor-dbus-interfaces",
213            "openbmc/phosphor-logging",
214            "openbmc/sdbusplus",
215        ],
216        build_type="meson",
217        config_flags=[
218            "-Dtests=disabled",
219        ],
220    ),
221    "openbmc/libpldm": PackageDef(
222        build_type="meson",
223        config_flags=[
224            "-Doem-ibm=enabled",
225            "-Dtests=disabled",
226        ],
227    ),
228    "openbmc/sdbusplus": PackageDef(
229        build_type="meson",
230        custom_post_dl=[
231            "cd tools",
232            f"./setup.py install --root=/ --prefix={prefix}",
233            "cd ..",
234        ],
235        config_flags=[
236            "-Dexamples=disabled",
237            "-Dtests=disabled",
238        ],
239    ),
240    "openbmc/sdeventplus": PackageDef(
241        depends=[
242            "Naios/function2",
243            "openbmc/stdplus",
244        ],
245        build_type="meson",
246        config_flags=[
247            "-Dexamples=false",
248            "-Dtests=disabled",
249        ],
250    ),
251    "openbmc/stdplus": PackageDef(
252        depends=[
253            "fmtlib/fmt",
254            "google/googletest",
255            "Naios/function2",
256        ],
257        build_type="meson",
258        config_flags=[
259            "-Dexamples=false",
260            "-Dtests=disabled",
261            "-Dgtest=enabled",
262        ],
263    ),
264}  # type: Dict[str, PackageDef]
265
266# Define common flags used for builds
267configure_flags = " ".join(
268    [
269        f"--prefix={prefix}",
270    ]
271)
272cmake_flags = " ".join(
273    [
274        "-DBUILD_SHARED_LIBS=ON",
275        "-DCMAKE_BUILD_TYPE=RelWithDebInfo",
276        f"-DCMAKE_INSTALL_PREFIX:PATH={prefix}",
277        "-GNinja",
278        "-DCMAKE_MAKE_PROGRAM=ninja",
279    ]
280)
281meson_flags = " ".join(
282    [
283        "--wrap-mode=nodownload",
284        f"-Dprefix={prefix}",
285    ]
286)
287
288
289class Package(threading.Thread):
290    """Class used to build the Docker stages for each package.
291
292    Generally, this class should not be instantiated directly but through
293    Package.generate_all().
294    """
295
296    # Copy the packages dictionary.
297    packages = packages.copy()
298
299    # Lock used for thread-safety.
300    lock = threading.Lock()
301
302    def __init__(self, pkg: str):
303        """pkg - The name of this package (ex. foo/bar )"""
304        super(Package, self).__init__()
305
306        self.package = pkg
307        self.exception = None  # type: Optional[Exception]
308
309        # Reference to this package's
310        self.pkg_def = Package.packages[pkg]
311        self.pkg_def["__package"] = self
312
313    def run(self) -> None:
314        """Thread 'run' function.  Builds the Docker stage."""
315
316        # In case this package has no rev, fetch it from Github.
317        self._update_rev()
318
319        # Find all the Package objects that this package depends on.
320        #   This section is locked because we are looking into another
321        #   package's PackageDef dict, which could be being modified.
322        Package.lock.acquire()
323        deps: Iterable[Package] = [
324            Package.packages[deppkg]["__package"]
325            for deppkg in self.pkg_def.get("depends", [])
326        ]
327        Package.lock.release()
328
329        # Wait until all the depends finish building.  We need them complete
330        # for the "COPY" commands.
331        for deppkg in deps:
332            deppkg.join()
333
334        # Generate this package's Dockerfile.
335        dockerfile = f"""
336FROM {docker_base_img_name}
337{self._df_copycmds()}
338{self._df_build()}
339"""
340
341        # Generate the resulting tag name and save it to the PackageDef.
342        #   This section is locked because we are modifying the PackageDef,
343        #   which can be accessed by other threads.
344        Package.lock.acquire()
345        tag = Docker.tagname(self._stagename(), dockerfile)
346        self.pkg_def["__tag"] = tag
347        Package.lock.release()
348
349        # Do the build / save any exceptions.
350        try:
351            Docker.build(self.package, tag, dockerfile)
352        except Exception as e:
353            self.exception = e
354
355    @classmethod
356    def generate_all(cls) -> None:
357        """Ensure a Docker stage is created for all defined packages.
358
359        These are done in parallel but with appropriate blocking per
360        package 'depends' specifications.
361        """
362
363        # Create a Package for each defined package.
364        pkg_threads = [Package(p) for p in cls.packages.keys()]
365
366        # Start building them all.
367        #   This section is locked because threads depend on each other,
368        #   based on the packages, and they cannot 'join' on a thread
369        #   which is not yet started.  Adding a lock here allows all the
370        #   threads to start before they 'join' their dependencies.
371        Package.lock.acquire()
372        for t in pkg_threads:
373            t.start()
374        Package.lock.release()
375
376        # Wait for completion.
377        for t in pkg_threads:
378            t.join()
379            # Check if the thread saved off its own exception.
380            if t.exception:
381                print(f"Package {t.package} failed!", file=sys.stderr)
382                raise t.exception
383
384    @staticmethod
385    def df_all_copycmds() -> str:
386        """Formulate the Dockerfile snippet necessary to copy all packages
387        into the final image.
388        """
389        return Package.df_copycmds_set(Package.packages.keys())
390
391    @classmethod
392    def depcache(cls) -> str:
393        """Create the contents of the '/tmp/depcache'.
394        This file is a comma-separated list of "<pkg>:<rev>".
395        """
396
397        # This needs to be sorted for consistency.
398        depcache = ""
399        for pkg in sorted(cls.packages.keys()):
400            depcache += "%s:%s," % (pkg, cls.packages[pkg]["rev"])
401        return depcache
402
403    def _update_rev(self) -> None:
404        """Look up the HEAD for missing a static rev."""
405
406        if "rev" in self.pkg_def:
407            return
408
409        # Check if Jenkins/Gerrit gave us a revision and use it.
410        if gerrit_project == self.package and gerrit_rev:
411            print(
412                f"Found Gerrit revision for {self.package}: {gerrit_rev}",
413                file=sys.stderr,
414            )
415            self.pkg_def["rev"] = gerrit_rev
416            return
417
418        # Ask Github for all the branches.
419        lookup = git(
420            "ls-remote", "--heads", f"https://github.com/{self.package}"
421        )
422
423        # Find the branch matching {branch} (or fallback to master).
424        #   This section is locked because we are modifying the PackageDef.
425        Package.lock.acquire()
426        for line in lookup.split("\n"):
427            if f"refs/heads/{branch}" in line:
428                self.pkg_def["rev"] = line.split()[0]
429            elif (
430                "refs/heads/master" in line or "refs/heads/main" in line
431            ) and "rev" not in self.pkg_def:
432                self.pkg_def["rev"] = line.split()[0]
433        Package.lock.release()
434
435    def _stagename(self) -> str:
436        """Create a name for the Docker stage associated with this pkg."""
437        return self.package.replace("/", "-").lower()
438
439    def _url(self) -> str:
440        """Get the URL for this package."""
441        rev = self.pkg_def["rev"]
442
443        # If the lambda exists, call it.
444        if "url" in self.pkg_def:
445            return self.pkg_def["url"](self.package, rev)
446
447        # Default to the github archive URL.
448        return f"https://github.com/{self.package}/archive/{rev}.tar.gz"
449
450    def _cmd_download(self) -> str:
451        """Formulate the command necessary to download and unpack to source."""
452
453        url = self._url()
454        if ".tar." not in url:
455            raise NotImplementedError(
456                f"Unhandled download type for {self.package}: {url}"
457            )
458
459        cmd = f"curl -L {url} | tar -x"
460
461        if url.endswith(".bz2"):
462            cmd += "j"
463        elif url.endswith(".gz"):
464            cmd += "z"
465        else:
466            raise NotImplementedError(
467                f"Unknown tar flags needed for {self.package}: {url}"
468            )
469
470        return cmd
471
472    def _cmd_cd_srcdir(self) -> str:
473        """Formulate the command necessary to 'cd' into the source dir."""
474        return f"cd {self.package.split('/')[-1]}*"
475
476    def _df_copycmds(self) -> str:
477        """Formulate the dockerfile snippet necessary to COPY all depends."""
478
479        if "depends" not in self.pkg_def:
480            return ""
481        return Package.df_copycmds_set(self.pkg_def["depends"])
482
483    @staticmethod
484    def df_copycmds_set(pkgs: Iterable[str]) -> str:
485        """Formulate the Dockerfile snippet necessary to COPY a set of
486        packages into a Docker stage.
487        """
488
489        copy_cmds = ""
490
491        # Sort the packages for consistency.
492        for p in sorted(pkgs):
493            tag = Package.packages[p]["__tag"]
494            copy_cmds += f"COPY --from={tag} {prefix} {prefix}\n"
495            # Workaround for upstream docker bug and multiple COPY cmds
496            # https://github.com/moby/moby/issues/37965
497            copy_cmds += "RUN true\n"
498
499        return copy_cmds
500
501    def _df_build(self) -> str:
502        """Formulate the Dockerfile snippet necessary to download, build, and
503        install a package into a Docker stage.
504        """
505
506        # Download and extract source.
507        result = f"RUN {self._cmd_download()} && {self._cmd_cd_srcdir()} && "
508
509        # Handle 'custom_post_dl' commands.
510        custom_post_dl = self.pkg_def.get("custom_post_dl")
511        if custom_post_dl:
512            result += " && ".join(custom_post_dl) + " && "
513
514        # Build and install package based on 'build_type'.
515        build_type = self.pkg_def["build_type"]
516        if build_type == "autoconf":
517            result += self._cmd_build_autoconf()
518        elif build_type == "cmake":
519            result += self._cmd_build_cmake()
520        elif build_type == "custom":
521            result += self._cmd_build_custom()
522        elif build_type == "make":
523            result += self._cmd_build_make()
524        elif build_type == "meson":
525            result += self._cmd_build_meson()
526        else:
527            raise NotImplementedError(
528                f"Unhandled build type for {self.package}: {build_type}"
529            )
530
531        # Handle 'custom_post_install' commands.
532        custom_post_install = self.pkg_def.get("custom_post_install")
533        if custom_post_install:
534            result += " && " + " && ".join(custom_post_install)
535
536        return result
537
538    def _cmd_build_autoconf(self) -> str:
539        options = " ".join(self.pkg_def.get("config_flags", []))
540        env = " ".join(self.pkg_def.get("config_env", []))
541        result = "./bootstrap.sh && "
542        result += f"{env} ./configure {configure_flags} {options} && "
543        result += f"make -j{proc_count} && make install"
544        return result
545
546    def _cmd_build_cmake(self) -> str:
547        options = " ".join(self.pkg_def.get("config_flags", []))
548        env = " ".join(self.pkg_def.get("config_env", []))
549        result = "mkdir builddir && cd builddir && "
550        result += f"{env} cmake {cmake_flags} {options} .. && "
551        result += "cmake --build . --target all && "
552        result += "cmake --build . --target install && "
553        result += "cd .."
554        return result
555
556    def _cmd_build_custom(self) -> str:
557        return " && ".join(self.pkg_def.get("build_steps", []))
558
559    def _cmd_build_make(self) -> str:
560        return f"make -j{proc_count} && make install"
561
562    def _cmd_build_meson(self) -> str:
563        options = " ".join(self.pkg_def.get("config_flags", []))
564        env = " ".join(self.pkg_def.get("config_env", []))
565        result = f"{env} meson builddir {meson_flags} {options} && "
566        result += "ninja -C builddir && ninja -C builddir install"
567        return result
568
569
570class Docker:
571    """Class to assist with Docker interactions.  All methods are static."""
572
573    @staticmethod
574    def timestamp() -> str:
575        """Generate a timestamp for today using the ISO week."""
576        today = date.today().isocalendar()
577        return f"{today[0]}-W{today[1]:02}"
578
579    @staticmethod
580    def tagname(pkgname: Optional[str], dockerfile: str) -> str:
581        """Generate a tag name for a package using a hash of the Dockerfile."""
582        result = docker_image_name
583        if pkgname:
584            result += "-" + pkgname
585
586        result += ":" + Docker.timestamp()
587        result += "-" + sha256(dockerfile.encode()).hexdigest()[0:16]
588
589        return result
590
591    @staticmethod
592    def build(pkg: str, tag: str, dockerfile: str) -> None:
593        """Build a docker image using the Dockerfile and tagging it with 'tag'.
594        """
595
596        # If we're not forcing builds, check if it already exists and skip.
597        if not force_build:
598            if docker.image.ls(tag, "--format", '"{{.Repository}}:{{.Tag}}"'):
599                print(
600                    f"Image {tag} already exists.  Skipping.", file=sys.stderr
601                )
602                return
603
604        # Build it.
605        #   Capture the output of the 'docker build' command and send it to
606        #   stderr (prefixed with the package name).  This allows us to see
607        #   progress but not polute stdout.  Later on we output the final
608        #   docker tag to stdout and we want to keep that pristine.
609        #
610        #   Other unusual flags:
611        #       --no-cache: Bypass the Docker cache if 'force_build'.
612        #       --force-rm: Clean up Docker processes if they fail.
613        docker.build(
614            proxy_args,
615            "--network=host",
616            "--force-rm",
617            "--no-cache=true" if force_build else "--no-cache=false",
618            "-t",
619            tag,
620            "-",
621            _in=dockerfile,
622            _out=(
623                lambda line: print(
624                    pkg + ":", line, end="", file=sys.stderr, flush=True
625                )
626            ),
627        )
628
629
630# Read a bunch of environment variables.
631docker_image_name = os.environ.get(
632    "DOCKER_IMAGE_NAME", "openbmc/ubuntu-unit-test"
633)
634force_build = os.environ.get("FORCE_DOCKER_BUILD")
635is_automated_ci_build = os.environ.get("BUILD_URL", False)
636distro = os.environ.get("DISTRO", "ubuntu:kinetic")
637branch = os.environ.get("BRANCH", "master")
638ubuntu_mirror = os.environ.get("UBUNTU_MIRROR")
639http_proxy = os.environ.get("http_proxy")
640
641gerrit_project = os.environ.get("GERRIT_PROJECT")
642gerrit_rev = os.environ.get("GERRIT_PATCHSET_REVISION")
643
644# Set up some common variables.
645username = os.environ.get("USER", "root")
646homedir = os.environ.get("HOME", "/root")
647gid = os.getgid()
648uid = os.getuid()
649
650# Use well-known constants if user is root
651if username == "root":
652    homedir = "/root"
653    gid = 0
654    uid = 0
655
656# Determine the architecture for Docker.
657arch = uname("-m").strip()
658if arch == "ppc64le":
659    docker_base = "ppc64le/"
660elif arch == "x86_64":
661    docker_base = ""
662elif arch == "aarch64":
663    docker_base = "arm64v8/"
664else:
665    print(
666        f"Unsupported system architecture({arch}) found for docker image",
667        file=sys.stderr,
668    )
669    sys.exit(1)
670
671# Special flags if setting up a deb mirror.
672mirror = ""
673if "ubuntu" in distro and ubuntu_mirror:
674    mirror = f"""
675RUN echo "deb {ubuntu_mirror} \
676        $(. /etc/os-release && echo $VERSION_CODENAME) \
677        main restricted universe multiverse" > /etc/apt/sources.list && \\
678    echo "deb {ubuntu_mirror} \
679        $(. /etc/os-release && echo $VERSION_CODENAME)-updates \
680            main restricted universe multiverse" >> /etc/apt/sources.list && \\
681    echo "deb {ubuntu_mirror} \
682        $(. /etc/os-release && echo $VERSION_CODENAME)-security \
683            main restricted universe multiverse" >> /etc/apt/sources.list && \\
684    echo "deb {ubuntu_mirror} \
685        $(. /etc/os-release && echo $VERSION_CODENAME)-proposed \
686            main restricted universe multiverse" >> /etc/apt/sources.list && \\
687    echo "deb {ubuntu_mirror} \
688        $(. /etc/os-release && echo $VERSION_CODENAME)-backports \
689            main restricted universe multiverse" >> /etc/apt/sources.list
690"""
691
692# Special flags for proxying.
693proxy_cmd = ""
694proxy_keyserver = ""
695proxy_args = []
696if http_proxy:
697    proxy_cmd = f"""
698RUN echo "[http]" >> {homedir}/.gitconfig && \
699    echo "proxy = {http_proxy}" >> {homedir}/.gitconfig
700"""
701    proxy_keyserver = f"--keyserver-options http-proxy={http_proxy}"
702
703    proxy_args.extend(
704        [
705            "--build-arg",
706            f"http_proxy={http_proxy}",
707            "--build-arg",
708            f"https_proxy={http_proxy}",
709        ]
710    )
711
712# Create base Dockerfile.
713dockerfile_base = f"""
714FROM {docker_base}{distro}
715
716{mirror}
717
718ENV DEBIAN_FRONTEND noninteractive
719
720ENV PYTHONPATH "/usr/local/lib/python3.10/site-packages/"
721
722# Sometimes the ubuntu key expires and we need a way to force an execution
723# of the apt-get commands for the dbgsym-keyring.  When this happens we see
724# an error like: "Release: The following signatures were invalid:"
725# Insert a bogus echo that we can change here when we get this error to force
726# the update.
727RUN echo "ubuntu keyserver rev as of 2021-04-21"
728
729# We need the keys to be imported for dbgsym repos
730# New releases have a package, older ones fall back to manual fetching
731# https://wiki.ubuntu.com/Debug%20Symbol%20Packages
732RUN apt-get update && apt-get dist-upgrade -yy && \
733    ( apt-get install gpgv ubuntu-dbgsym-keyring || \
734        ( apt-get install -yy dirmngr && \
735          apt-key adv --keyserver keyserver.ubuntu.com \
736                      {proxy_keyserver} \
737                      --recv-keys F2EDC64DC5AEE1F6B9C621F0C8CAB6595FDFF622 ) )
738
739# Parse the current repo list into a debug repo list
740RUN sed -n '/^deb /s,^deb [^ ]* ,deb http://ddebs.ubuntu.com ,p' \
741        /etc/apt/sources.list >/etc/apt/sources.list.d/debug.list
742
743# Remove non-existent debug repos
744RUN sed -i '/-\\(backports\\|security\\) /d' /etc/apt/sources.list.d/debug.list
745
746RUN cat /etc/apt/sources.list.d/debug.list
747
748RUN apt-get update && apt-get dist-upgrade -yy && apt-get install -yy \
749    autoconf \
750    autoconf-archive \
751    bison \
752    clang-15 \
753    clang-format-15 \
754    clang-tidy-15 \
755    clang-tools-15 \
756    cmake \
757    curl \
758    dbus \
759    device-tree-compiler \
760    flex \
761    g++-12 \
762    gcc-12 \
763    git \
764    iproute2 \
765    iputils-ping \
766    libc6-dbg \
767    libc6-dev \
768    libconfig++-dev \
769    libcryptsetup-dev \
770    libdbus-1-dev \
771    libevdev-dev \
772    libgpiod-dev \
773    libi2c-dev \
774    libjpeg-dev \
775    libjson-perl \
776    libldap2-dev \
777    libmimetic-dev \
778    libnl-3-dev \
779    libnl-genl-3-dev \
780    libpam0g-dev \
781    libpciaccess-dev \
782    libperlio-gzip-perl \
783    libpng-dev \
784    libprotobuf-dev \
785    libsnmp-dev \
786    libssl-dev \
787    libsystemd-dev \
788    libtool \
789    liburing2-dbgsym \
790    liburing-dev \
791    libxml2-utils \
792    libxml-simple-perl \
793    ninja-build \
794    npm \
795    pkg-config \
796    protobuf-compiler \
797    python3 \
798    python3-dev\
799    python3-git \
800    python3-mako \
801    python3-pip \
802    python3-setuptools \
803    python3-socks \
804    python3-yaml \
805    rsync \
806    shellcheck \
807    sudo \
808    systemd \
809    valgrind \
810    valgrind-dbg \
811    vim \
812    wget \
813    xxd
814
815# Kinetic comes with GCC-12, so skip this.
816#RUN update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-12 12 \
817#  --slave /usr/bin/g++ g++ /usr/bin/g++-12 \
818#  --slave /usr/bin/gcov gcov /usr/bin/gcov-12 \
819#  --slave /usr/bin/gcov-dump gcov-dump /usr/bin/gcov-dump-12 \
820#  --slave /usr/bin/gcov-tool gcov-tool /usr/bin/gcov-tool-12
821#RUN update-alternatives --install /usr/bin/cpp cpp /usr/bin/cpp-12 12
822
823RUN update-alternatives --install /usr/bin/clang clang /usr/bin/clang-15 1000 \
824  --slave /usr/bin/clang++ clang++ /usr/bin/clang++-15 \
825  --slave /usr/bin/clang-tidy clang-tidy /usr/bin/clang-tidy-15 \
826  --slave /usr/bin/clang-format clang-format /usr/bin/clang-format-15 \
827  --slave /usr/bin/run-clang-tidy run-clang-tidy.py \
828        /usr/bin/run-clang-tidy-15 \
829  --slave /usr/bin/scan-build scan-build /usr/bin/scan-build-15
830
831"""
832
833if is_automated_ci_build:
834    dockerfile_base += f"""
835# Run an arbitrary command to polute the docker cache regularly force us
836# to re-run `apt-get update` daily.
837RUN echo {Docker.timestamp()}
838RUN apt-get update && apt-get dist-upgrade -yy
839
840"""
841
842dockerfile_base += """
843RUN pip3 install beautysh
844RUN pip3 install black
845RUN pip3 install codespell
846RUN pip3 install flake8
847RUN pip3 install gitlint
848RUN pip3 install inflection
849RUN pip3 install isort
850RUN pip3 install jsonschema
851RUN pip3 install meson==0.63.0
852RUN pip3 install protobuf
853RUN pip3 install requests
854
855RUN npm install -g \
856        eslint@latest eslint-plugin-json@latest \
857        markdownlint-cli@latest \
858        prettier@latest
859"""
860
861# Build the base and stage docker images.
862docker_base_img_name = Docker.tagname("base", dockerfile_base)
863Docker.build("base", docker_base_img_name, dockerfile_base)
864Package.generate_all()
865
866# Create the final Dockerfile.
867dockerfile = f"""
868# Build the final output image
869FROM {docker_base_img_name}
870{Package.df_all_copycmds()}
871
872# Some of our infrastructure still relies on the presence of this file
873# even though it is no longer needed to rebuild the docker environment
874# NOTE: The file is sorted to ensure the ordering is stable.
875RUN echo '{Package.depcache()}' > /tmp/depcache
876
877# Final configuration for the workspace
878RUN grep -q {gid} /etc/group || groupadd -f -g {gid} {username}
879RUN mkdir -p "{os.path.dirname(homedir)}"
880RUN grep -q {uid} /etc/passwd || \
881        useradd -d {homedir} -m -u {uid} -g {gid} {username}
882RUN sed -i '1iDefaults umask=000' /etc/sudoers
883RUN echo "{username} ALL=(ALL) NOPASSWD: ALL" >>/etc/sudoers
884
885# Ensure user has ability to write to /usr/local for different tool
886# and data installs
887RUN chown -R {username}:{username} /usr/local/share
888
889{proxy_cmd}
890
891RUN /bin/bash
892"""
893
894# Do the final docker build
895docker_final_img_name = Docker.tagname(None, dockerfile)
896Docker.build("final", docker_final_img_name, dockerfile)
897
898# Print the tag of the final image.
899print(docker_final_img_name)
900