1#!/usr/bin/env python3
2#
3# Build the required docker image to run package unit tests
4#
5# Script Variables:
6#   DOCKER_IMG_NAME:  <optional, the name of the docker image to generate>
7#                     default is openbmc/ubuntu-unit-test
8#   DISTRO:           <optional, the distro to build a docker image against>
9#   FORCE_DOCKER_BUILD: <optional, a non-zero value with force all Docker
10#                     images to be rebuilt rather than reusing caches.>
11#   BUILD_URL:        <optional, used to detect running under CI context
12#                     (ex. Jenkins)>
13#   BRANCH:           <optional, branch to build from each of the openbmc/
14#                     repositories>
15#                     default is master, which will be used if input branch not
16#                     provided or not found
17#   UBUNTU_MIRROR:    <optional, the URL of a mirror of Ubuntu to override the
18#                     default ones in /etc/apt/sources.list>
19#                     default is empty, and no mirror is used.
20#   http_proxy        The HTTP address of the proxy server to connect to.
21#                     Default: "", proxy is not setup if this is not set
22
23import os
24import sys
25import threading
26from datetime import date
27from hashlib import sha256
28from sh import docker, git, nproc, uname  # type: ignore
29from typing import Any, Callable, Dict, Iterable, Optional
30
31try:
32    # Python before 3.8 doesn't have TypedDict, so reroute to standard 'dict'.
33    from typing import TypedDict
34except:
35
36    class TypedDict(dict):  # type: ignore
37        # We need to do this to eat the 'total' argument.
38        def __init_subclass__(cls, **kwargs):
39            super().__init_subclass__()
40
41
42# Declare some variables used in package definitions.
43prefix = "/usr/local"
44proc_count = nproc().strip()
45
46
47class PackageDef(TypedDict, total=False):
48    """ Package Definition for packages dictionary. """
49
50    # rev [optional]: Revision of package to use.
51    rev: str
52    # url [optional]: lambda function to create URL: (package, rev) -> url.
53    url: Callable[[str, str], str]
54    # depends [optional]: List of package dependencies.
55    depends: Iterable[str]
56    # build_type [required]: Build type used for package.
57    #   Currently supported: autoconf, cmake, custom, make, meson
58    build_type: str
59    # build_steps [optional]: Steps to run for 'custom' build_type.
60    build_steps: Iterable[str]
61    # config_flags [optional]: List of options to pass configuration tool.
62    config_flags: Iterable[str]
63    # config_env [optional]: List of environment variables to set for config.
64    config_env: Iterable[str]
65    # custom_post_dl [optional]: List of steps to run after download, but
66    #   before config / build / install.
67    custom_post_dl: Iterable[str]
68    # custom_post_install [optional]: List of steps to run after install.
69    custom_post_install: Iterable[str]
70
71    # __tag [private]: Generated Docker tag name for package stage.
72    __tag: str
73    # __package [private]: Package object associated with this package.
74    __package: Any  # Type is Package, but not defined yet.
75
76
77# Packages to include in image.
78packages = {
79    "boost": PackageDef(
80        rev="1.80.0",
81        url=(
82            lambda pkg, rev: f"https://downloads.yoctoproject.org/mirror/sources/{pkg}_{rev.replace('.', '_')}.tar.bz2"
83        ),
84        build_type="custom",
85        build_steps=[
86            f"./bootstrap.sh --prefix={prefix} --with-libraries=context,coroutine",
87            "./b2",
88            f"./b2 install --prefix={prefix}",
89        ],
90    ),
91    "USCiLab/cereal": PackageDef(
92        rev="v1.3.2",
93        build_type="custom",
94        build_steps=[f"cp -a include/cereal/ {prefix}/include/"],
95    ),
96    "danmar/cppcheck": PackageDef(
97        rev="2.9",
98        build_type="cmake",
99    ),
100    "CLIUtils/CLI11": PackageDef(
101        rev="v1.9.1",
102        build_type="cmake",
103        config_flags=[
104            "-DBUILD_TESTING=OFF",
105            "-DCLI11_BUILD_DOCS=OFF",
106            "-DCLI11_BUILD_EXAMPLES=OFF",
107        ],
108    ),
109    "fmtlib/fmt": PackageDef(
110        rev="9.1.0",
111        build_type="cmake",
112        config_flags=[
113            "-DFMT_DOC=OFF",
114            "-DFMT_TEST=OFF",
115        ],
116    ),
117    "Naios/function2": PackageDef(
118        rev="4.2.1",
119        build_type="custom",
120        build_steps=[
121            f"mkdir {prefix}/include/function2",
122            f"cp include/function2/function2.hpp {prefix}/include/function2/",
123        ],
124    ),
125    # release-1.12.1
126    "google/googletest": PackageDef(
127        rev="58d77fa8070e8cec2dc1ed015d66b454c8d78850",
128        build_type="cmake",
129        config_env=["CXXFLAGS=-std=c++20"],
130        config_flags=["-DTHREADS_PREFER_PTHREAD_FLAG=ON"],
131    ),
132    "nlohmann/json": PackageDef(
133        rev="v3.11.2",
134        build_type="cmake",
135        config_flags=["-DJSON_BuildTests=OFF"],
136        custom_post_install=[
137            f"ln -s {prefix}/include/nlohmann/json.hpp {prefix}/include/json.hpp",
138        ],
139    ),
140    # Snapshot from 2019-05-24
141    "linux-test-project/lcov": PackageDef(
142        rev="v1.15",
143        build_type="make",
144    ),
145    # dev-5.15 2022-09-27
146    "openbmc/linux": PackageDef(
147        rev="c9fb275212dac5b300311f6f6b1dcc5ed18a3493",
148        build_type="custom",
149        build_steps=[
150            f"make -j{proc_count} defconfig",
151            f"make INSTALL_HDR_PATH={prefix} headers_install",
152        ],
153    ),
154    "LibVNC/libvncserver": PackageDef(
155        rev="LibVNCServer-0.9.13",
156        build_type="cmake",
157    ),
158    "leethomason/tinyxml2": PackageDef(
159        rev="9.0.0",
160        build_type="cmake",
161    ),
162    # version from /meta-openembedded/meta-oe/recipes-devtools/boost-url/boost-url_git.bb
163    "CPPAlliance/url": PackageDef(
164        rev="d740a92d38e3a8f4d5b2153f53b82f1c98e312ab",
165        build_type="custom",
166        build_steps=[f"cp -a include/** {prefix}/include/"],
167    ),
168    "tristanpenman/valijson": PackageDef(
169        rev="v0.7",
170        build_type="cmake",
171        config_flags=[
172            "-Dvalijson_BUILD_TESTS=0",
173            "-Dvalijson_INSTALL_HEADERS=1",
174        ],
175    ),
176    "open-power/pdbg": PackageDef(build_type="autoconf"),
177    "openbmc/gpioplus": PackageDef(
178        depends=["openbmc/stdplus"],
179        build_type="meson",
180        config_flags=[
181            "-Dexamples=false",
182            "-Dtests=disabled",
183        ],
184    ),
185    "openbmc/phosphor-dbus-interfaces": PackageDef(
186        depends=["openbmc/sdbusplus"],
187        build_type="meson",
188        config_flags=["-Dgenerate_md=false"],
189    ),
190    "openbmc/phosphor-logging": PackageDef(
191        depends=[
192            "USCiLab/cereal",
193            "openbmc/phosphor-dbus-interfaces",
194            "openbmc/sdbusplus",
195            "openbmc/sdeventplus",
196        ],
197        build_type="meson",
198        config_flags=[
199            "-Dlibonly=true",
200            "-Dtests=disabled",
201            f"-Dyamldir={prefix}/share/phosphor-dbus-yaml/yaml",
202        ],
203    ),
204    "openbmc/phosphor-objmgr": PackageDef(
205        depends=[
206            "CLIUtils/CLI11",
207            "boost",
208            "leethomason/tinyxml2",
209            "openbmc/phosphor-dbus-interfaces",
210            "openbmc/phosphor-logging",
211            "openbmc/sdbusplus",
212        ],
213        build_type="meson",
214        config_flags=[
215            "-Dtests=disabled",
216        ],
217    ),
218    "openbmc/libpldm": PackageDef(
219        build_type="meson",
220        config_flags=[
221            "-Doem-ibm=enabled",
222            "-Dtests=disabled",
223        ],
224    ),
225    "openbmc/sdbusplus": PackageDef(
226        build_type="meson",
227        custom_post_dl=[
228            "cd tools",
229            f"./setup.py install --root=/ --prefix={prefix}",
230            "cd ..",
231        ],
232        config_flags=[
233            "-Dexamples=disabled",
234            "-Dtests=disabled",
235        ],
236    ),
237    "openbmc/sdeventplus": PackageDef(
238        depends=[
239            "Naios/function2",
240            "openbmc/stdplus",
241        ],
242        build_type="meson",
243        config_flags=[
244            "-Dexamples=false",
245            "-Dtests=disabled",
246        ],
247    ),
248    "openbmc/stdplus": PackageDef(
249        depends=[
250            "fmtlib/fmt",
251            "google/googletest",
252            "Naios/function2",
253        ],
254        build_type="meson",
255        config_flags=[
256            "-Dexamples=false",
257            "-Dtests=disabled",
258            "-Dgtest=enabled",
259        ],
260    ),
261}  # type: Dict[str, PackageDef]
262
263# Define common flags used for builds
264configure_flags = " ".join(
265    [
266        f"--prefix={prefix}",
267    ]
268)
269cmake_flags = " ".join(
270    [
271        "-DBUILD_SHARED_LIBS=ON",
272        "-DCMAKE_BUILD_TYPE=RelWithDebInfo",
273        f"-DCMAKE_INSTALL_PREFIX:PATH={prefix}",
274        "-GNinja",
275        "-DCMAKE_MAKE_PROGRAM=ninja",
276    ]
277)
278meson_flags = " ".join(
279    [
280        "--wrap-mode=nodownload",
281        f"-Dprefix={prefix}",
282    ]
283)
284
285
286class Package(threading.Thread):
287    """Class used to build the Docker stages for each package.
288
289    Generally, this class should not be instantiated directly but through
290    Package.generate_all().
291    """
292
293    # Copy the packages dictionary.
294    packages = packages.copy()
295
296    # Lock used for thread-safety.
297    lock = threading.Lock()
298
299    def __init__(self, pkg: str):
300        """ pkg - The name of this package (ex. foo/bar ) """
301        super(Package, self).__init__()
302
303        self.package = pkg
304        self.exception = None  # type: Optional[Exception]
305
306        # Reference to this package's
307        self.pkg_def = Package.packages[pkg]
308        self.pkg_def["__package"] = self
309
310    def run(self) -> None:
311        """ Thread 'run' function.  Builds the Docker stage. """
312
313        # In case this package has no rev, fetch it from Github.
314        self._update_rev()
315
316        # Find all the Package objects that this package depends on.
317        #   This section is locked because we are looking into another
318        #   package's PackageDef dict, which could be being modified.
319        Package.lock.acquire()
320        deps: Iterable[Package] = [
321            Package.packages[deppkg]["__package"]
322            for deppkg in self.pkg_def.get("depends", [])
323        ]
324        Package.lock.release()
325
326        # Wait until all the depends finish building.  We need them complete
327        # for the "COPY" commands.
328        for deppkg in deps:
329            deppkg.join()
330
331        # Generate this package's Dockerfile.
332        dockerfile = f"""
333FROM {docker_base_img_name}
334{self._df_copycmds()}
335{self._df_build()}
336"""
337
338        # Generate the resulting tag name and save it to the PackageDef.
339        #   This section is locked because we are modifying the PackageDef,
340        #   which can be accessed by other threads.
341        Package.lock.acquire()
342        tag = Docker.tagname(self._stagename(), dockerfile)
343        self.pkg_def["__tag"] = tag
344        Package.lock.release()
345
346        # Do the build / save any exceptions.
347        try:
348            Docker.build(self.package, tag, dockerfile)
349        except Exception as e:
350            self.exception = e
351
352    @classmethod
353    def generate_all(cls) -> None:
354        """Ensure a Docker stage is created for all defined packages.
355
356        These are done in parallel but with appropriate blocking per
357        package 'depends' specifications.
358        """
359
360        # Create a Package for each defined package.
361        pkg_threads = [Package(p) for p in cls.packages.keys()]
362
363        # Start building them all.
364        #   This section is locked because threads depend on each other,
365        #   based on the packages, and they cannot 'join' on a thread
366        #   which is not yet started.  Adding a lock here allows all the
367        #   threads to start before they 'join' their dependencies.
368        Package.lock.acquire()
369        for t in pkg_threads:
370            t.start()
371        Package.lock.release()
372
373        # Wait for completion.
374        for t in pkg_threads:
375            t.join()
376            # Check if the thread saved off its own exception.
377            if t.exception:
378                print(f"Package {t.package} failed!", file=sys.stderr)
379                raise t.exception
380
381    @staticmethod
382    def df_all_copycmds() -> str:
383        """Formulate the Dockerfile snippet necessary to copy all packages
384        into the final image.
385        """
386        return Package.df_copycmds_set(Package.packages.keys())
387
388    @classmethod
389    def depcache(cls) -> str:
390        """Create the contents of the '/tmp/depcache'.
391        This file is a comma-separated list of "<pkg>:<rev>".
392        """
393
394        # This needs to be sorted for consistency.
395        depcache = ""
396        for pkg in sorted(cls.packages.keys()):
397            depcache += "%s:%s," % (pkg, cls.packages[pkg]["rev"])
398        return depcache
399
400    def _update_rev(self) -> None:
401        """ Look up the HEAD for missing a static rev. """
402
403        if "rev" in self.pkg_def:
404            return
405
406        # Check if Jenkins/Gerrit gave us a revision and use it.
407        if gerrit_project == self.package and gerrit_rev:
408            print(
409                f"Found Gerrit revision for {self.package}: {gerrit_rev}",
410                file=sys.stderr,
411            )
412            self.pkg_def["rev"] = gerrit_rev
413            return
414
415        # Ask Github for all the branches.
416        lookup = git("ls-remote", "--heads", f"https://github.com/{self.package}")
417
418        # Find the branch matching {branch} (or fallback to master).
419        #   This section is locked because we are modifying the PackageDef.
420        Package.lock.acquire()
421        for line in lookup.split("\n"):
422            if f"refs/heads/{branch}" in line:
423                self.pkg_def["rev"] = line.split()[0]
424            elif (
425                "refs/heads/master" in line or "refs/heads/main" in line
426            ) and "rev" not in self.pkg_def:
427                self.pkg_def["rev"] = line.split()[0]
428        Package.lock.release()
429
430    def _stagename(self) -> str:
431        """ Create a name for the Docker stage associated with this pkg. """
432        return self.package.replace("/", "-").lower()
433
434    def _url(self) -> str:
435        """ Get the URL for this package. """
436        rev = self.pkg_def["rev"]
437
438        # If the lambda exists, call it.
439        if "url" in self.pkg_def:
440            return self.pkg_def["url"](self.package, rev)
441
442        # Default to the github archive URL.
443        return f"https://github.com/{self.package}/archive/{rev}.tar.gz"
444
445    def _cmd_download(self) -> str:
446        """Formulate the command necessary to download and unpack to source."""
447
448        url = self._url()
449        if ".tar." not in url:
450            raise NotImplementedError(
451                f"Unhandled download type for {self.package}: {url}"
452            )
453
454        cmd = f"curl -L {url} | tar -x"
455
456        if url.endswith(".bz2"):
457            cmd += "j"
458        elif url.endswith(".gz"):
459            cmd += "z"
460        else:
461            raise NotImplementedError(
462                f"Unknown tar flags needed for {self.package}: {url}"
463            )
464
465        return cmd
466
467    def _cmd_cd_srcdir(self) -> str:
468        """ Formulate the command necessary to 'cd' into the source dir. """
469        return f"cd {self.package.split('/')[-1]}*"
470
471    def _df_copycmds(self) -> str:
472        """ Formulate the dockerfile snippet necessary to COPY all depends. """
473
474        if "depends" not in self.pkg_def:
475            return ""
476        return Package.df_copycmds_set(self.pkg_def["depends"])
477
478    @staticmethod
479    def df_copycmds_set(pkgs: Iterable[str]) -> str:
480        """Formulate the Dockerfile snippet necessary to COPY a set of
481        packages into a Docker stage.
482        """
483
484        copy_cmds = ""
485
486        # Sort the packages for consistency.
487        for p in sorted(pkgs):
488            tag = Package.packages[p]["__tag"]
489            copy_cmds += f"COPY --from={tag} {prefix} {prefix}\n"
490            # Workaround for upstream docker bug and multiple COPY cmds
491            # https://github.com/moby/moby/issues/37965
492            copy_cmds += "RUN true\n"
493
494        return copy_cmds
495
496    def _df_build(self) -> str:
497        """Formulate the Dockerfile snippet necessary to download, build, and
498        install a package into a Docker stage.
499        """
500
501        # Download and extract source.
502        result = f"RUN {self._cmd_download()} && {self._cmd_cd_srcdir()} && "
503
504        # Handle 'custom_post_dl' commands.
505        custom_post_dl = self.pkg_def.get("custom_post_dl")
506        if custom_post_dl:
507            result += " && ".join(custom_post_dl) + " && "
508
509        # Build and install package based on 'build_type'.
510        build_type = self.pkg_def["build_type"]
511        if build_type == "autoconf":
512            result += self._cmd_build_autoconf()
513        elif build_type == "cmake":
514            result += self._cmd_build_cmake()
515        elif build_type == "custom":
516            result += self._cmd_build_custom()
517        elif build_type == "make":
518            result += self._cmd_build_make()
519        elif build_type == "meson":
520            result += self._cmd_build_meson()
521        else:
522            raise NotImplementedError(
523                f"Unhandled build type for {self.package}: {build_type}"
524            )
525
526        # Handle 'custom_post_install' commands.
527        custom_post_install = self.pkg_def.get("custom_post_install")
528        if custom_post_install:
529            result += " && " + " && ".join(custom_post_install)
530
531        return result
532
533    def _cmd_build_autoconf(self) -> str:
534        options = " ".join(self.pkg_def.get("config_flags", []))
535        env = " ".join(self.pkg_def.get("config_env", []))
536        result = "./bootstrap.sh && "
537        result += f"{env} ./configure {configure_flags} {options} && "
538        result += f"make -j{proc_count} && make install"
539        return result
540
541    def _cmd_build_cmake(self) -> str:
542        options = " ".join(self.pkg_def.get("config_flags", []))
543        env = " ".join(self.pkg_def.get("config_env", []))
544        result = "mkdir builddir && cd builddir && "
545        result += f"{env} cmake {cmake_flags} {options} .. && "
546        result += "cmake --build . --target all && "
547        result += "cmake --build . --target install && "
548        result += "cd .."
549        return result
550
551    def _cmd_build_custom(self) -> str:
552        return " && ".join(self.pkg_def.get("build_steps", []))
553
554    def _cmd_build_make(self) -> str:
555        return f"make -j{proc_count} && make install"
556
557    def _cmd_build_meson(self) -> str:
558        options = " ".join(self.pkg_def.get("config_flags", []))
559        env = " ".join(self.pkg_def.get("config_env", []))
560        result = f"{env} meson builddir {meson_flags} {options} && "
561        result += "ninja -C builddir && ninja -C builddir install"
562        return result
563
564
565class Docker:
566    """Class to assist with Docker interactions.  All methods are static."""
567
568    @staticmethod
569    def timestamp() -> str:
570        """ Generate a timestamp for today using the ISO week. """
571        today = date.today().isocalendar()
572        return f"{today[0]}-W{today[1]:02}"
573
574    @staticmethod
575    def tagname(pkgname: str, dockerfile: str) -> str:
576        """ Generate a tag name for a package using a hash of the Dockerfile. """
577        result = docker_image_name
578        if pkgname:
579            result += "-" + pkgname
580
581        result += ":" + Docker.timestamp()
582        result += "-" + sha256(dockerfile.encode()).hexdigest()[0:16]
583
584        return result
585
586    @staticmethod
587    def build(pkg: str, tag: str, dockerfile: str) -> None:
588        """Build a docker image using the Dockerfile and tagging it with 'tag'."""
589
590        # If we're not forcing builds, check if it already exists and skip.
591        if not force_build:
592            if docker.image.ls(tag, "--format", '"{{.Repository}}:{{.Tag}}"'):
593                print(f"Image {tag} already exists.  Skipping.", file=sys.stderr)
594                return
595
596        # Build it.
597        #   Capture the output of the 'docker build' command and send it to
598        #   stderr (prefixed with the package name).  This allows us to see
599        #   progress but not polute stdout.  Later on we output the final
600        #   docker tag to stdout and we want to keep that pristine.
601        #
602        #   Other unusual flags:
603        #       --no-cache: Bypass the Docker cache if 'force_build'.
604        #       --force-rm: Clean up Docker processes if they fail.
605        docker.build(
606            proxy_args,
607            "--network=host",
608            "--force-rm",
609            "--no-cache=true" if force_build else "--no-cache=false",
610            "-t",
611            tag,
612            "-",
613            _in=dockerfile,
614            _out=(
615                lambda line: print(
616                    pkg + ":", line, end="", file=sys.stderr, flush=True
617                )
618            ),
619        )
620
621
622# Read a bunch of environment variables.
623docker_image_name = os.environ.get("DOCKER_IMAGE_NAME", "openbmc/ubuntu-unit-test")
624force_build = os.environ.get("FORCE_DOCKER_BUILD")
625is_automated_ci_build = os.environ.get("BUILD_URL", False)
626distro = os.environ.get("DISTRO", "ubuntu:kinetic")
627branch = os.environ.get("BRANCH", "master")
628ubuntu_mirror = os.environ.get("UBUNTU_MIRROR")
629http_proxy = os.environ.get("http_proxy")
630
631gerrit_project = os.environ.get("GERRIT_PROJECT")
632gerrit_rev = os.environ.get("GERRIT_PATCHSET_REVISION")
633
634# Set up some common variables.
635username = os.environ.get("USER", "root")
636homedir = os.environ.get("HOME", "/root")
637gid = os.getgid()
638uid = os.getuid()
639
640# Use well-known constants if user is root
641if username == "root":
642    homedir = "/root"
643    gid = 0
644    uid = 0
645
646# Determine the architecture for Docker.
647arch = uname("-m").strip()
648if arch == "ppc64le":
649    docker_base = "ppc64le/"
650elif arch == "x86_64":
651    docker_base = ""
652elif arch == "aarch64":
653    docker_base = "arm64v8/"
654else:
655    print(
656        f"Unsupported system architecture({arch}) found for docker image",
657        file=sys.stderr,
658    )
659    sys.exit(1)
660
661# Special flags if setting up a deb mirror.
662mirror = ""
663if "ubuntu" in distro and ubuntu_mirror:
664    mirror = f"""
665RUN echo "deb {ubuntu_mirror} $(. /etc/os-release && echo $VERSION_CODENAME) main restricted universe multiverse" > /etc/apt/sources.list && \\
666    echo "deb {ubuntu_mirror} $(. /etc/os-release && echo $VERSION_CODENAME)-updates main restricted universe multiverse" >> /etc/apt/sources.list && \\
667    echo "deb {ubuntu_mirror} $(. /etc/os-release && echo $VERSION_CODENAME)-security main restricted universe multiverse" >> /etc/apt/sources.list && \\
668    echo "deb {ubuntu_mirror} $(. /etc/os-release && echo $VERSION_CODENAME)-proposed main restricted universe multiverse" >> /etc/apt/sources.list && \\
669    echo "deb {ubuntu_mirror} $(. /etc/os-release && echo $VERSION_CODENAME)-backports main restricted universe multiverse" >> /etc/apt/sources.list
670"""
671
672# Special flags for proxying.
673proxy_cmd = ""
674proxy_keyserver = ""
675proxy_args = []
676if http_proxy:
677    proxy_cmd = f"""
678RUN echo "[http]" >> {homedir}/.gitconfig && \
679    echo "proxy = {http_proxy}" >> {homedir}/.gitconfig
680"""
681    proxy_keyserver = f"--keyserver-options http-proxy={http_proxy}"
682
683    proxy_args.extend(
684        [
685            "--build-arg",
686            f"http_proxy={http_proxy}",
687            "--build-arg",
688            f"https_proxy={http_proxy}",
689        ]
690    )
691
692# Create base Dockerfile.
693dockerfile_base = f"""
694FROM {docker_base}{distro}
695
696{mirror}
697
698ENV DEBIAN_FRONTEND noninteractive
699
700ENV PYTHONPATH "/usr/local/lib/python3.10/site-packages/"
701
702# Sometimes the ubuntu key expires and we need a way to force an execution
703# of the apt-get commands for the dbgsym-keyring.  When this happens we see
704# an error like: "Release: The following signatures were invalid:"
705# Insert a bogus echo that we can change here when we get this error to force
706# the update.
707RUN echo "ubuntu keyserver rev as of 2021-04-21"
708
709# We need the keys to be imported for dbgsym repos
710# New releases have a package, older ones fall back to manual fetching
711# https://wiki.ubuntu.com/Debug%20Symbol%20Packages
712RUN apt-get update && apt-get dist-upgrade -yy && \
713    ( apt-get install gpgv ubuntu-dbgsym-keyring || \
714        ( apt-get install -yy dirmngr && \
715          apt-key adv --keyserver keyserver.ubuntu.com \
716                      {proxy_keyserver} \
717                      --recv-keys F2EDC64DC5AEE1F6B9C621F0C8CAB6595FDFF622 ) )
718
719# Parse the current repo list into a debug repo list
720RUN sed -n '/^deb /s,^deb [^ ]* ,deb http://ddebs.ubuntu.com ,p' /etc/apt/sources.list >/etc/apt/sources.list.d/debug.list
721
722# Remove non-existent debug repos
723RUN sed -i '/-\(backports\|security\) /d' /etc/apt/sources.list.d/debug.list
724
725RUN cat /etc/apt/sources.list.d/debug.list
726
727RUN apt-get update && apt-get dist-upgrade -yy && apt-get install -yy \
728    gcc-12 \
729    g++-12 \
730    libc6-dbg \
731    libc6-dev \
732    libtool \
733    bison \
734    libdbus-1-dev \
735    flex \
736    cmake \
737    python3 \
738    python3-dev\
739    python3-yaml \
740    python3-mako \
741    python3-pip \
742    python3-setuptools \
743    python3-git \
744    python3-socks \
745    pkg-config \
746    autoconf \
747    autoconf-archive \
748    libsystemd-dev \
749    systemd \
750    libssl-dev \
751    libevdev-dev \
752    libjpeg-dev \
753    libpng-dev \
754    ninja-build \
755    sudo \
756    curl \
757    git \
758    dbus \
759    iputils-ping \
760    clang-15 \
761    clang-format-15 \
762    clang-tidy-15 \
763    clang-tools-15 \
764    shellcheck \
765    npm \
766    iproute2 \
767    libnl-3-dev \
768    libnl-genl-3-dev \
769    libconfig++-dev \
770    libsnmp-dev \
771    valgrind \
772    valgrind-dbg \
773    libpam0g-dev \
774    xxd \
775    libi2c-dev \
776    wget \
777    libldap2-dev \
778    libprotobuf-dev \
779    liburing-dev \
780    liburing2-dbgsym \
781    libperlio-gzip-perl \
782    libjson-perl \
783    protobuf-compiler \
784    libgpiod-dev \
785    device-tree-compiler \
786    libpciaccess-dev \
787    libmimetic-dev \
788    libxml2-utils \
789    libxml-simple-perl \
790    rsync \
791    libcryptsetup-dev
792
793RUN npm install -g eslint@latest eslint-plugin-json@latest
794
795# Kinetic comes with GCC-12, so skip this.
796#RUN update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-12 12 \
797#  --slave /usr/bin/g++ g++ /usr/bin/g++-12 \
798#  --slave /usr/bin/gcov gcov /usr/bin/gcov-12 \
799#  --slave /usr/bin/gcov-dump gcov-dump /usr/bin/gcov-dump-12 \
800#  --slave /usr/bin/gcov-tool gcov-tool /usr/bin/gcov-tool-12
801#RUN update-alternatives --install /usr/bin/cpp cpp /usr/bin/cpp-12 12
802
803RUN update-alternatives --install /usr/bin/clang clang /usr/bin/clang-15 1000 \
804  --slave /usr/bin/clang++ clang++ /usr/bin/clang++-15 \
805  --slave /usr/bin/clang-tidy clang-tidy /usr/bin/clang-tidy-15 \
806  --slave /usr/bin/clang-format clang-format /usr/bin/clang-format-15 \
807  --slave /usr/bin/run-clang-tidy run-clang-tidy.py /usr/bin/run-clang-tidy-15 \
808  --slave /usr/bin/scan-build scan-build /usr/bin/scan-build-15
809
810"""
811
812if is_automated_ci_build:
813    dockerfile_base += f"""
814# Run an arbitrary command to polute the docker cache regularly force us
815# to re-run `apt-get update` daily.
816RUN echo {Docker.timestamp()}
817RUN apt-get update && apt-get dist-upgrade -yy
818
819"""
820
821dockerfile_base += f"""
822RUN pip3 install inflection
823RUN pip3 install pycodestyle
824RUN pip3 install jsonschema
825RUN pip3 install meson==0.63.0
826RUN pip3 install packaging
827RUN pip3 install protobuf
828RUN pip3 install codespell
829RUN pip3 install requests
830"""
831
832# Note, we use sha1s here because the newest gitlint release doesn't include
833# some features we need.  Next time they release, we can rely on a direct
834# release tag
835dockerfile_base += f"""
836RUN pip3 install git+https://github.com/jorisroovers/gitlint.git@8ede310d62d5794efa7518b235f899f8a8ad6a68\#subdirectory=gitlint-core
837RUN pip3 install git+https://github.com/jorisroovers/gitlint.git@8ede310d62d5794efa7518b235f899f8a8ad6a68
838"""
839
840# Build the base and stage docker images.
841docker_base_img_name = Docker.tagname("base", dockerfile_base)
842Docker.build("base", docker_base_img_name, dockerfile_base)
843Package.generate_all()
844
845# Create the final Dockerfile.
846dockerfile = f"""
847# Build the final output image
848FROM {docker_base_img_name}
849{Package.df_all_copycmds()}
850
851# Some of our infrastructure still relies on the presence of this file
852# even though it is no longer needed to rebuild the docker environment
853# NOTE: The file is sorted to ensure the ordering is stable.
854RUN echo '{Package.depcache()}' > /tmp/depcache
855
856# Final configuration for the workspace
857RUN grep -q {gid} /etc/group || groupadd -f -g {gid} {username}
858RUN mkdir -p "{os.path.dirname(homedir)}"
859RUN grep -q {uid} /etc/passwd || useradd -d {homedir} -m -u {uid} -g {gid} {username}
860RUN sed -i '1iDefaults umask=000' /etc/sudoers
861RUN echo "{username} ALL=(ALL) NOPASSWD: ALL" >>/etc/sudoers
862
863# Ensure user has ability to write to /usr/local for different tool
864# and data installs
865RUN chown -R {username}:{username} /usr/local/share
866
867{proxy_cmd}
868
869RUN /bin/bash
870"""
871
872# Do the final docker build
873docker_final_img_name = Docker.tagname(None, dockerfile)
874Docker.build("final", docker_final_img_name, dockerfile)
875
876# Print the tag of the final image.
877print(docker_final_img_name)
878