#!/usr/bin/env python3 # # Build the required docker image to run package unit tests # # Script Variables: # DOCKER_IMG_NAME: # default is openbmc/ubuntu-unit-test # DISTRO: # FORCE_DOCKER_BUILD: # BUILD_URL: # BRANCH: # default is master, which will be used if input branch not # provided or not found # UBUNTU_MIRROR: # default is empty, and no mirror is used. # http_proxy The HTTP address of the proxy server to connect to. # Default: "", proxy is not setup if this is not set import os import sys import threading from datetime import date from hashlib import sha256 from sh import docker, git, nproc, uname # type: ignore from typing import Any, Callable, Dict, Iterable, Optional try: # Python before 3.8 doesn't have TypedDict, so reroute to standard 'dict'. from typing import TypedDict except: class TypedDict(dict): # type: ignore # We need to do this to eat the 'total' argument. def __init_subclass__(cls, **kwargs): super().__init_subclass__() # Declare some variables used in package definitions. prefix = "/usr/local" proc_count = nproc().strip() class PackageDef(TypedDict, total=False): """ Package Definition for packages dictionary. """ # rev [optional]: Revision of package to use. rev: str # url [optional]: lambda function to create URL: (package, rev) -> url. url: Callable[[str, str], str] # depends [optional]: List of package dependencies. depends: Iterable[str] # build_type [required]: Build type used for package. # Currently supported: autoconf, cmake, custom, make, meson build_type: str # build_steps [optional]: Steps to run for 'custom' build_type. build_steps: Iterable[str] # config_flags [optional]: List of options to pass configuration tool. config_flags: Iterable[str] # config_env [optional]: List of environment variables to set for config. config_env: Iterable[str] # custom_post_dl [optional]: List of steps to run after download, but # before config / build / install. custom_post_dl: Iterable[str] # custom_post_install [optional]: List of steps to run after install. custom_post_install: Iterable[str] # __tag [private]: Generated Docker tag name for package stage. __tag: str # __package [private]: Package object associated with this package. __package: Any # Type is Package, but not defined yet. # Packages to include in image. packages = { # Install OpenSSL 3.x. # Generally we want to rely on the version of OpenSSL from the OS, but v3.x # was a major API change. It is included in Yocto but not Ubuntu until # 22.04. Install it manually so that our CI can target the OpenSSL 3.x # APIs. "openssl/openssl": PackageDef( rev="openssl-3.0.1", build_type="custom", build_steps=[ f"./Configure --prefix={prefix} --libdir=lib", f"make -j{proc_count}", f"make -j{proc_count} install" ], ), "boost": PackageDef( rev="1.78.0", url=( lambda pkg, rev: f"https://downloads.yoctoproject.org/mirror/sources/{pkg}_{rev.replace('.', '_')}.tar.bz2" ), build_type="custom", build_steps=[ f"./bootstrap.sh --prefix={prefix} --with-libraries=context,coroutine", "./b2", f"./b2 install --prefix={prefix}", ], ), "USCiLab/cereal": PackageDef( rev="3e4d1b84cab4891368d2179a61a7ba06a5693e7f", build_type="custom", build_steps=[f"cp -a include/cereal/ {prefix}/include/"], ), "catchorg/Catch2": PackageDef( rev="v2.13.6", build_type="cmake", config_flags=["-DBUILD_TESTING=OFF", "-DCATCH_INSTALL_DOCS=OFF"], ), "CLIUtils/CLI11": PackageDef( rev="v1.9.1", build_type="cmake", config_flags=[ "-DBUILD_TESTING=OFF", "-DCLI11_BUILD_DOCS=OFF", "-DCLI11_BUILD_EXAMPLES=OFF", ], ), "fmtlib/fmt": PackageDef( rev="8.1.1", build_type="cmake", config_flags=[ "-DFMT_DOC=OFF", "-DFMT_TEST=OFF", ], ), "Naios/function2": PackageDef( rev="4.1.0", build_type="custom", build_steps=[ f"mkdir {prefix}/include/function2", f"cp include/function2/function2.hpp {prefix}/include/function2/", ], ), # Release 2021-06-12 "google/googletest": PackageDef( rev="9e712372214d75bb30ec2847a44bf124d48096f3", build_type="cmake", config_env=["CXXFLAGS=-std=c++20"], config_flags=["-DTHREADS_PREFER_PTHREAD_FLAG=ON"], ), # Release 2020-08-06 "nlohmann/json": PackageDef( rev="v3.10.4", build_type="cmake", config_flags=["-DJSON_BuildTests=OFF"], custom_post_install=[ f"ln -s {prefix}/include/nlohmann/json.hpp {prefix}/include/json.hpp", ], ), # Snapshot from 2019-05-24 "linux-test-project/lcov": PackageDef( rev="v1.15", build_type="make", ), # dev-5.8 2021-01-11 "openbmc/linux": PackageDef( rev="3cc95ae40716e56f81b69615781f54c78079042d", build_type="custom", build_steps=[ f"make -j{proc_count} defconfig", f"make INSTALL_HDR_PATH={prefix} headers_install", ], ), # Snapshot from 2020-06-13 "LibVNC/libvncserver": PackageDef( rev="LibVNCServer-0.9.13", build_type="cmake", ), "martinmoene/span-lite": PackageDef( rev="v0.9.2", build_type="cmake", config_flags=[ "-DSPAN_LITE_OPT_BUILD_TESTS=OFF", ], ), # version from meta-openembedded/meta-oe/recipes-support/libtinyxml2/libtinyxml2_8.0.0.bb "leethomason/tinyxml2": PackageDef( rev="8.0.0", build_type="cmake", ), # version from /meta-openembedded/meta-oe/recipes-devtools/boost-url/boost-url_git.bb "CPPAlliance/url": PackageDef( rev="d740a92d38e3a8f4d5b2153f53b82f1c98e312ab", build_type="custom", build_steps=[f"cp -a include/** {prefix}/include/"], ), # version from meta-openembedded/meta-oe/dynamic-layers/networking-layer/recipes-devools/valijson/valijson_0.6.bb "tristanpenman/valijson": PackageDef( rev="v0.6", build_type="cmake", config_flags=[ "-Dvalijson_BUILD_TESTS=0", "-Dvalijson_INSTALL_HEADERS=1", ], ), # version from meta-openembedded/meta-oe/recipes-devtools/nlohmann-fifo/nlohmann-fifo_git.bb "nlohmann/fifo_map": PackageDef( rev="0dfbf5dacbb15a32c43f912a7e66a54aae39d0f9", build_type="custom", build_steps=[f"cp src/fifo_map.hpp {prefix}/include/"], ), # version from meta-openembedded/meta-oe/recipes-devtools/unifex/unifex_git.bb "facebookexperimental/libunifex": PackageDef( rev="9df21c58d34ce8a1cd3b15c3a7347495e29417a0", build_type="cmake", config_flags=[ "-DBUILD_SHARED_LIBS=ON", "-DBUILD_TESTING=OFF", "-DCMAKE_CXX_STANDARD=20", "-DUNIFEX_BUILD_EXAMPLES=OFF", ], ), "open-power/pdbg": PackageDef(build_type="autoconf"), "openbmc/gpioplus": PackageDef( depends=["openbmc/stdplus"], build_type="meson", config_flags=[ "-Dexamples=false", "-Dtests=disabled", ], ), "openbmc/phosphor-dbus-interfaces": PackageDef( depends=["openbmc/sdbusplus"], build_type="meson", ), "openbmc/phosphor-logging": PackageDef( depends=[ "USCiLab/cereal", "nlohmann/fifo_map", "openbmc/phosphor-dbus-interfaces", "openbmc/sdbusplus", "openbmc/sdeventplus", ], build_type="meson", config_flags=[ f"-Dyaml_dir={prefix}/share/phosphor-dbus-yaml/yaml", ], ), "openbmc/phosphor-objmgr": PackageDef( depends=[ "boost", "leethomason/tinyxml2", "openbmc/phosphor-logging", "openbmc/sdbusplus", ], build_type="meson", config_flags=[ "-Dtests=disabled", ], ), "openbmc/pldm": PackageDef( depends=[ "CLIUtils/CLI11", "boost", "nlohmann/json", "openbmc/phosphor-dbus-interfaces", "openbmc/phosphor-logging", "openbmc/sdbusplus", "openbmc/sdeventplus", ], build_type="meson", config_flags=[ "-Dlibpldm-only=enabled", "-Doem-ibm=enabled", "-Dtests=disabled", ], ), "openbmc/sdbusplus": PackageDef( depends=[ "facebookexperimental/libunifex", ], build_type="meson", custom_post_dl=[ "cd tools", f"./setup.py install --root=/ --prefix={prefix}", "cd ..", ], config_flags=[ "-Dexamples=disabled", "-Dtests=disabled", ], ), "openbmc/sdeventplus": PackageDef( depends=["Naios/function2", "openbmc/stdplus"], build_type="meson", config_flags=[ "-Dexamples=false", "-Dtests=disabled", ], ), "openbmc/stdplus": PackageDef( depends=["fmtlib/fmt", "martinmoene/span-lite"], build_type="meson", config_flags=[ "-Dexamples=false", "-Dtests=disabled", ], ), } # type: Dict[str, PackageDef] # Define common flags used for builds configure_flags = " ".join( [ f"--prefix={prefix}", ] ) cmake_flags = " ".join( [ "-DBUILD_SHARED_LIBS=ON", "-DCMAKE_BUILD_TYPE=RelWithDebInfo", f"-DCMAKE_INSTALL_PREFIX:PATH={prefix}", "-GNinja", "-DCMAKE_MAKE_PROGRAM=ninja", ] ) meson_flags = " ".join( [ "--wrap-mode=nodownload", f"-Dprefix={prefix}", ] ) class Package(threading.Thread): """Class used to build the Docker stages for each package. Generally, this class should not be instantiated directly but through Package.generate_all(). """ # Copy the packages dictionary. packages = packages.copy() # Lock used for thread-safety. lock = threading.Lock() def __init__(self, pkg: str): """ pkg - The name of this package (ex. foo/bar ) """ super(Package, self).__init__() self.package = pkg self.exception = None # type: Optional[Exception] # Reference to this package's self.pkg_def = Package.packages[pkg] self.pkg_def["__package"] = self def run(self) -> None: """ Thread 'run' function. Builds the Docker stage. """ # In case this package has no rev, fetch it from Github. self._update_rev() # Find all the Package objects that this package depends on. # This section is locked because we are looking into another # package's PackageDef dict, which could be being modified. Package.lock.acquire() deps: Iterable[Package] = [ Package.packages[deppkg]["__package"] for deppkg in self.pkg_def.get("depends", []) ] Package.lock.release() # Wait until all the depends finish building. We need them complete # for the "COPY" commands. for deppkg in deps: deppkg.join() # Generate this package's Dockerfile. dockerfile = f""" FROM {docker_base_img_name} {self._df_copycmds()} {self._df_build()} """ # Generate the resulting tag name and save it to the PackageDef. # This section is locked because we are modifying the PackageDef, # which can be accessed by other threads. Package.lock.acquire() tag = Docker.tagname(self._stagename(), dockerfile) self.pkg_def["__tag"] = tag Package.lock.release() # Do the build / save any exceptions. try: Docker.build(self.package, tag, dockerfile) except Exception as e: self.exception = e @classmethod def generate_all(cls) -> None: """Ensure a Docker stage is created for all defined packages. These are done in parallel but with appropriate blocking per package 'depends' specifications. """ # Create a Package for each defined package. pkg_threads = [Package(p) for p in cls.packages.keys()] # Start building them all. # This section is locked because threads depend on each other, # based on the packages, and they cannot 'join' on a thread # which is not yet started. Adding a lock here allows all the # threads to start before they 'join' their dependencies. Package.lock.acquire() for t in pkg_threads: t.start() Package.lock.release() # Wait for completion. for t in pkg_threads: t.join() # Check if the thread saved off its own exception. if t.exception: print(f"Package {t.package} failed!", file=sys.stderr) raise t.exception @staticmethod def df_all_copycmds() -> str: """Formulate the Dockerfile snippet necessary to copy all packages into the final image. """ return Package.df_copycmds_set(Package.packages.keys()) @classmethod def depcache(cls) -> str: """Create the contents of the '/tmp/depcache'. This file is a comma-separated list of ":". """ # This needs to be sorted for consistency. depcache = "" for pkg in sorted(cls.packages.keys()): depcache += "%s:%s," % (pkg, cls.packages[pkg]["rev"]) return depcache def _update_rev(self) -> None: """ Look up the HEAD for missing a static rev. """ if "rev" in self.pkg_def: return # Check if Jenkins/Gerrit gave us a revision and use it. if gerrit_project == self.package and gerrit_rev: print( f"Found Gerrit revision for {self.package}: {gerrit_rev}", file=sys.stderr, ) self.pkg_def["rev"] = gerrit_rev return # Ask Github for all the branches. lookup = git("ls-remote", "--heads", f"https://github.com/{self.package}") # Find the branch matching {branch} (or fallback to master). # This section is locked because we are modifying the PackageDef. Package.lock.acquire() for line in lookup.split("\n"): if f"refs/heads/{branch}" in line: self.pkg_def["rev"] = line.split()[0] elif f"refs/heads/master" in line and "rev" not in self.pkg_def: self.pkg_def["rev"] = line.split()[0] Package.lock.release() def _stagename(self) -> str: """ Create a name for the Docker stage associated with this pkg. """ return self.package.replace("/", "-").lower() def _url(self) -> str: """ Get the URL for this package. """ rev = self.pkg_def["rev"] # If the lambda exists, call it. if "url" in self.pkg_def: return self.pkg_def["url"](self.package, rev) # Default to the github archive URL. return f"https://github.com/{self.package}/archive/{rev}.tar.gz" def _cmd_download(self) -> str: """Formulate the command necessary to download and unpack to source.""" url = self._url() if ".tar." not in url: raise NotImplementedError( f"Unhandled download type for {self.package}: {url}" ) cmd = f"curl -L {url} | tar -x" if url.endswith(".bz2"): cmd += "j" elif url.endswith(".gz"): cmd += "z" else: raise NotImplementedError( f"Unknown tar flags needed for {self.package}: {url}" ) return cmd def _cmd_cd_srcdir(self) -> str: """ Formulate the command necessary to 'cd' into the source dir. """ return f"cd {self.package.split('/')[-1]}*" def _df_copycmds(self) -> str: """ Formulate the dockerfile snippet necessary to COPY all depends. """ if "depends" not in self.pkg_def: return "" return Package.df_copycmds_set(self.pkg_def["depends"]) @staticmethod def df_copycmds_set(pkgs: Iterable[str]) -> str: """Formulate the Dockerfile snippet necessary to COPY a set of packages into a Docker stage. """ copy_cmds = "" # Sort the packages for consistency. for p in sorted(pkgs): tag = Package.packages[p]["__tag"] copy_cmds += f"COPY --from={tag} {prefix} {prefix}\n" # Workaround for upstream docker bug and multiple COPY cmds # https://github.com/moby/moby/issues/37965 copy_cmds += "RUN true\n" return copy_cmds def _df_build(self) -> str: """Formulate the Dockerfile snippet necessary to download, build, and install a package into a Docker stage. """ # Download and extract source. result = f"RUN {self._cmd_download()} && {self._cmd_cd_srcdir()} && " # Handle 'custom_post_dl' commands. custom_post_dl = self.pkg_def.get("custom_post_dl") if custom_post_dl: result += " && ".join(custom_post_dl) + " && " # Build and install package based on 'build_type'. build_type = self.pkg_def["build_type"] if build_type == "autoconf": result += self._cmd_build_autoconf() elif build_type == "cmake": result += self._cmd_build_cmake() elif build_type == "custom": result += self._cmd_build_custom() elif build_type == "make": result += self._cmd_build_make() elif build_type == "meson": result += self._cmd_build_meson() else: raise NotImplementedError( f"Unhandled build type for {self.package}: {build_type}" ) # Handle 'custom_post_install' commands. custom_post_install = self.pkg_def.get("custom_post_install") if custom_post_install: result += " && " + " && ".join(custom_post_install) return result def _cmd_build_autoconf(self) -> str: options = " ".join(self.pkg_def.get("config_flags", [])) env = " ".join(self.pkg_def.get("config_env", [])) result = "./bootstrap.sh && " result += f"{env} ./configure {configure_flags} {options} && " result += f"make -j{proc_count} && make install" return result def _cmd_build_cmake(self) -> str: options = " ".join(self.pkg_def.get("config_flags", [])) env = " ".join(self.pkg_def.get("config_env", [])) result = "mkdir builddir && cd builddir && " result += f"{env} cmake {cmake_flags} {options} .. && " result += "cmake --build . --target all && " result += "cmake --build . --target install && " result += "cd .." return result def _cmd_build_custom(self) -> str: return " && ".join(self.pkg_def.get("build_steps", [])) def _cmd_build_make(self) -> str: return f"make -j{proc_count} && make install" def _cmd_build_meson(self) -> str: options = " ".join(self.pkg_def.get("config_flags", [])) env = " ".join(self.pkg_def.get("config_env", [])) result = f"{env} meson builddir {meson_flags} {options} && " result += "ninja -C builddir && ninja -C builddir install" return result class Docker: """Class to assist with Docker interactions. All methods are static.""" @staticmethod def timestamp() -> str: """ Generate a timestamp for today using the ISO week. """ today = date.today().isocalendar() return f"{today[0]}-W{today[1]:02}" @staticmethod def tagname(pkgname: str, dockerfile: str) -> str: """ Generate a tag name for a package using a hash of the Dockerfile. """ result = docker_image_name if pkgname: result += "-" + pkgname result += ":" + Docker.timestamp() result += "-" + sha256(dockerfile.encode()).hexdigest()[0:16] return result @staticmethod def build(pkg: str, tag: str, dockerfile: str) -> None: """Build a docker image using the Dockerfile and tagging it with 'tag'.""" # If we're not forcing builds, check if it already exists and skip. if not force_build: if docker.image.ls(tag, "--format", '"{{.Repository}}:{{.Tag}}"'): print(f"Image {tag} already exists. Skipping.", file=sys.stderr) return # Build it. # Capture the output of the 'docker build' command and send it to # stderr (prefixed with the package name). This allows us to see # progress but not polute stdout. Later on we output the final # docker tag to stdout and we want to keep that pristine. # # Other unusual flags: # --no-cache: Bypass the Docker cache if 'force_build'. # --force-rm: Clean up Docker processes if they fail. docker.build( proxy_args, "--network=host", "--force-rm", "--no-cache=true" if force_build else "--no-cache=false", "-t", tag, "-", _in=dockerfile, _out=( lambda line: print( pkg + ":", line, end="", file=sys.stderr, flush=True ) ), ) # Read a bunch of environment variables. docker_image_name = os.environ.get("DOCKER_IMAGE_NAME", "openbmc/ubuntu-unit-test") force_build = os.environ.get("FORCE_DOCKER_BUILD") is_automated_ci_build = os.environ.get("BUILD_URL", False) distro = os.environ.get("DISTRO", "ubuntu:impish") branch = os.environ.get("BRANCH", "master") ubuntu_mirror = os.environ.get("UBUNTU_MIRROR") http_proxy = os.environ.get("http_proxy") gerrit_project = os.environ.get("GERRIT_PROJECT") gerrit_rev = os.environ.get("GERRIT_PATCHSET_REVISION") # Set up some common variables. username = os.environ.get("USER", "root") homedir = os.environ.get("HOME", "/root") gid = os.getgid() uid = os.getuid() # Use well-known constants if user is root if username == "root": homedir = "/root" gid = 0 uid = 0 # Determine the architecture for Docker. arch = uname("-m").strip() if arch == "ppc64le": docker_base = "ppc64le/" elif arch == "x86_64": docker_base = "" elif arch == "aarch64": docker_base = "arm64v8/" else: print( f"Unsupported system architecture({arch}) found for docker image", file=sys.stderr, ) sys.exit(1) # Special flags if setting up a deb mirror. mirror = "" if "ubuntu" in distro and ubuntu_mirror: mirror = f""" RUN echo "deb {ubuntu_mirror} $(. /etc/os-release && echo $VERSION_CODENAME) main restricted universe multiverse" > /etc/apt/sources.list && \\ echo "deb {ubuntu_mirror} $(. /etc/os-release && echo $VERSION_CODENAME)-updates main restricted universe multiverse" >> /etc/apt/sources.list && \\ echo "deb {ubuntu_mirror} $(. /etc/os-release && echo $VERSION_CODENAME)-security main restricted universe multiverse" >> /etc/apt/sources.list && \\ echo "deb {ubuntu_mirror} $(. /etc/os-release && echo $VERSION_CODENAME)-proposed main restricted universe multiverse" >> /etc/apt/sources.list && \\ echo "deb {ubuntu_mirror} $(. /etc/os-release && echo $VERSION_CODENAME)-backports main restricted universe multiverse" >> /etc/apt/sources.list """ # Special flags for proxying. proxy_cmd = "" proxy_keyserver = "" proxy_args = [] if http_proxy: proxy_cmd = f""" RUN echo "[http]" >> {homedir}/.gitconfig && \ echo "proxy = {http_proxy}" >> {homedir}/.gitconfig """ proxy_keyserver = f"--keyserver-options http-proxy={http_proxy}" proxy_args.extend( [ "--build-arg", f"http_proxy={http_proxy}", "--build-arg", f"https_proxy={http_proxy}", ] ) # Create base Dockerfile. dockerfile_base = f""" FROM {docker_base}{distro} {mirror} ENV DEBIAN_FRONTEND noninteractive ENV PYTHONPATH "/usr/local/lib/python3.9/site-packages/" # Sometimes the ubuntu key expires and we need a way to force an execution # of the apt-get commands for the dbgsym-keyring. When this happens we see # an error like: "Release: The following signatures were invalid:" # Insert a bogus echo that we can change here when we get this error to force # the update. RUN echo "ubuntu keyserver rev as of 2021-04-21" # We need the keys to be imported for dbgsym repos # New releases have a package, older ones fall back to manual fetching # https://wiki.ubuntu.com/Debug%20Symbol%20Packages RUN apt-get update && apt-get dist-upgrade -yy && \ ( apt-get install gpgv ubuntu-dbgsym-keyring || \ ( apt-get install -yy dirmngr && \ apt-key adv --keyserver keyserver.ubuntu.com \ {proxy_keyserver} \ --recv-keys F2EDC64DC5AEE1F6B9C621F0C8CAB6595FDFF622 ) ) # Parse the current repo list into a debug repo list RUN sed -n '/^deb /s,^deb [^ ]* ,deb http://ddebs.ubuntu.com ,p' /etc/apt/sources.list >/etc/apt/sources.list.d/debug.list # Remove non-existent debug repos RUN sed -i '/-\(backports\|security\) /d' /etc/apt/sources.list.d/debug.list RUN cat /etc/apt/sources.list.d/debug.list RUN apt-get update && apt-get dist-upgrade -yy && apt-get install -yy \ gcc-11 \ g++-11 \ libc6-dbg \ libc6-dev \ libtool \ bison \ libdbus-1-dev \ flex \ cmake \ python3 \ python3-dev\ python3-yaml \ python3-mako \ python3-pip \ python3-setuptools \ python3-git \ python3-socks \ pkg-config \ autoconf \ autoconf-archive \ libsystemd-dev \ systemd \ libssl-dev \ libevdev-dev \ libjpeg-dev \ libpng-dev \ ninja-build \ sudo \ curl \ git \ dbus \ iputils-ping \ clang-13 \ clang-format-13 \ clang-tidy-13 \ clang-tools-13 \ shellcheck \ npm \ iproute2 \ libnl-3-dev \ libnl-genl-3-dev \ libconfig++-dev \ libsnmp-dev \ valgrind \ valgrind-dbg \ libpam0g-dev \ xxd \ libi2c-dev \ wget \ libldap2-dev \ libprotobuf-dev \ liburing-dev \ liburing1-dbgsym \ libperlio-gzip-perl \ libjson-perl \ protobuf-compiler \ libgpiod-dev \ device-tree-compiler \ cppcheck \ libpciaccess-dev \ libmimetic-dev \ libxml2-utils \ libxml-simple-perl \ rsync \ libcryptsetup-dev RUN npm install -g eslint@latest eslint-plugin-json@latest RUN update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-11 11 \ --slave /usr/bin/g++ g++ /usr/bin/g++-11 \ --slave /usr/bin/gcov gcov /usr/bin/gcov-11 \ --slave /usr/bin/gcov-dump gcov-dump /usr/bin/gcov-dump-11 \ --slave /usr/bin/gcov-tool gcov-tool /usr/bin/gcov-tool-11 RUN update-alternatives --install /usr/bin/clang clang /usr/bin/clang-13 1000 \ --slave /usr/bin/clang++ clang++ /usr/bin/clang++-13 \ --slave /usr/bin/clang-tidy clang-tidy /usr/bin/clang-tidy-13 \ --slave /usr/bin/clang-format clang-format /usr/bin/clang-format-13 \ --slave /usr/bin/run-clang-tidy run-clang-tidy.py /usr/bin/run-clang-tidy-13 \ --slave /usr/bin/scan-build scan-build /usr/bin/scan-build-13 """ if is_automated_ci_build: dockerfile_base += f""" # Run an arbitrary command to polute the docker cache regularly force us # to re-run `apt-get update` daily. RUN echo {Docker.timestamp()} RUN apt-get update && apt-get dist-upgrade -yy """ dockerfile_base += f""" RUN pip3 install inflection RUN pip3 install pycodestyle RUN pip3 install jsonschema RUN pip3 install meson==0.58.1 RUN pip3 install protobuf RUN pip3 install codespell RUN pip3 install requests """ # Build the base and stage docker images. docker_base_img_name = Docker.tagname("base", dockerfile_base) Docker.build("base", docker_base_img_name, dockerfile_base) Package.generate_all() # Create the final Dockerfile. dockerfile = f""" # Build the final output image FROM {docker_base_img_name} {Package.df_all_copycmds()} # Some of our infrastructure still relies on the presence of this file # even though it is no longer needed to rebuild the docker environment # NOTE: The file is sorted to ensure the ordering is stable. RUN echo '{Package.depcache()}' > /tmp/depcache # Final configuration for the workspace RUN grep -q {gid} /etc/group || groupadd -f -g {gid} {username} RUN mkdir -p "{os.path.dirname(homedir)}" RUN grep -q {uid} /etc/passwd || useradd -d {homedir} -m -u {uid} -g {gid} {username} RUN sed -i '1iDefaults umask=000' /etc/sudoers RUN echo "{username} ALL=(ALL) NOPASSWD: ALL" >>/etc/sudoers # Ensure user has ability to write to /usr/local for different tool # and data installs RUN chown -R {username}:{username} /usr/local/share {proxy_cmd} RUN /bin/bash """ # Do the final docker build docker_final_img_name = Docker.tagname(None, dockerfile) Docker.build("final", docker_final_img_name, dockerfile) # Print the tag of the final image. print(docker_final_img_name)