1#!/usr/bin/env python3 2# 3# Build the required docker image to run package unit tests 4# 5# Script Variables: 6# DOCKER_IMG_NAME: <optional, the name of the docker image to generate> 7# default is openbmc/ubuntu-unit-test 8# DISTRO: <optional, the distro to build a docker image against> 9# FORCE_DOCKER_BUILD: <optional, a non-zero value with force all Docker 10# images to be rebuilt rather than reusing caches.> 11# BUILD_URL: <optional, used to detect running under CI context 12# (ex. Jenkins)> 13# BRANCH: <optional, branch to build from each of the openbmc/ 14# repositories> 15# default is master, which will be used if input branch not 16# provided or not found 17# UBUNTU_MIRROR: <optional, the URL of a mirror of Ubuntu to override the 18# default ones in /etc/apt/sources.list> 19# default is empty, and no mirror is used. 20# http_proxy The HTTP address of the proxy server to connect to. 21# Default: "", proxy is not setup if this is not set 22 23import os 24import re 25import sys 26import threading 27from datetime import date 28from hashlib import sha256 29 30# typing.Dict is used for type-hints. 31from typing import Any, Callable, Dict, Iterable, Optional # noqa: F401 32 33from sh import docker, git, nproc, uname # type: ignore 34 35try: 36 # Python before 3.8 doesn't have TypedDict, so reroute to standard 'dict'. 37 from typing import TypedDict 38except Exception: 39 40 class TypedDict(dict): # type: ignore 41 # We need to do this to eat the 'total' argument. 42 def __init_subclass__(cls, **kwargs: Any) -> None: 43 super().__init_subclass__() 44 45 46# Declare some variables used in package definitions. 47prefix = "/usr/local" 48proc_count = nproc().strip() 49 50 51class PackageDef(TypedDict, total=False): 52 """Package Definition for packages dictionary.""" 53 54 # rev [optional]: Revision of package to use. 55 rev: str 56 # url [optional]: lambda function to create URL: (package, rev) -> url. 57 url: Callable[[str, str], str] 58 # depends [optional]: List of package dependencies. 59 depends: Iterable[str] 60 # build_type [required]: Build type used for package. 61 # Currently supported: autoconf, cmake, custom, make, meson 62 build_type: str 63 # build_steps [optional]: Steps to run for 'custom' build_type. 64 build_steps: Iterable[str] 65 # config_flags [optional]: List of options to pass configuration tool. 66 config_flags: Iterable[str] 67 # config_env [optional]: List of environment variables to set for config. 68 config_env: Iterable[str] 69 # custom_post_dl [optional]: List of steps to run after download, but 70 # before config / build / install. 71 custom_post_dl: Iterable[str] 72 # custom_post_install [optional]: List of steps to run after install. 73 custom_post_install: Iterable[str] 74 75 # __tag [private]: Generated Docker tag name for package stage. 76 __tag: str 77 # __package [private]: Package object associated with this package. 78 __package: Any # Type is Package, but not defined yet. 79 80 81# Packages to include in image. 82packages = { 83 "boost": PackageDef( 84 rev="1.84.0", 85 url=( 86 lambda pkg, rev: f"https://github.com/boostorg/{pkg}/releases/download/{pkg}-{rev}/{pkg}-{rev}.tar.gz" 87 ), 88 build_type="custom", 89 build_steps=[ 90 ( 91 "./bootstrap.sh" 92 f" --prefix={prefix} --with-libraries=context,coroutine,url" 93 ), 94 "./b2", 95 f"./b2 install --prefix={prefix} valgrind=on", 96 ], 97 ), 98 "USCiLab/cereal": PackageDef( 99 rev="v1.3.2", 100 build_type="custom", 101 build_steps=[f"cp -a include/cereal/ {prefix}/include/"], 102 ), 103 "danmar/cppcheck": PackageDef( 104 rev="2.12.1", 105 build_type="cmake", 106 ), 107 "CLIUtils/CLI11": PackageDef( 108 rev="v2.3.2", 109 build_type="cmake", 110 config_flags=[ 111 "-DBUILD_TESTING=OFF", 112 "-DCLI11_BUILD_DOCS=OFF", 113 "-DCLI11_BUILD_EXAMPLES=OFF", 114 ], 115 ), 116 "fmtlib/fmt": PackageDef( 117 rev="10.1.1", 118 build_type="cmake", 119 config_flags=[ 120 "-DFMT_DOC=OFF", 121 "-DFMT_TEST=OFF", 122 ], 123 ), 124 "Naios/function2": PackageDef( 125 rev="4.2.4", 126 build_type="custom", 127 build_steps=[ 128 f"mkdir {prefix}/include/function2", 129 f"cp include/function2/function2.hpp {prefix}/include/function2/", 130 ], 131 ), 132 "google/googletest": PackageDef( 133 rev="v1.14.0", 134 build_type="cmake", 135 config_env=["CXXFLAGS=-std=c++20"], 136 config_flags=["-DTHREADS_PREFER_PTHREAD_FLAG=ON"], 137 ), 138 "nghttp2/nghttp2": PackageDef( 139 rev="v1.61.0", 140 build_type="cmake", 141 config_env=["CXXFLAGS=-std=c++20"], 142 config_flags=[ 143 "-DENABLE_LIB_ONLY=ON", 144 "-DENABLE_STATIC_LIB=ON", 145 ], 146 ), 147 "nlohmann/json": PackageDef( 148 rev="v3.11.2", 149 build_type="cmake", 150 config_flags=["-DJSON_BuildTests=OFF"], 151 custom_post_install=[ 152 ( 153 f"ln -s {prefix}/include/nlohmann/json.hpp" 154 f" {prefix}/include/json.hpp" 155 ), 156 ], 157 ), 158 "json-c/json-c": PackageDef( 159 rev="json-c-0.17-20230812", 160 build_type="cmake", 161 ), 162 "LibVNC/libvncserver": PackageDef( 163 rev="LibVNCServer-0.9.14", 164 build_type="cmake", 165 ), 166 "leethomason/tinyxml2": PackageDef( 167 rev="9.0.0", 168 build_type="cmake", 169 ), 170 "tristanpenman/valijson": PackageDef( 171 rev="v1.0.1", 172 build_type="cmake", 173 config_flags=[ 174 "-Dvalijson_BUILD_TESTS=0", 175 "-Dvalijson_INSTALL_HEADERS=1", 176 ], 177 ), 178 "open-power/pdbg": PackageDef(build_type="autoconf"), 179 "openbmc/gpioplus": PackageDef( 180 build_type="meson", 181 config_flags=[ 182 "-Dexamples=false", 183 "-Dtests=disabled", 184 ], 185 ), 186 "openbmc/phosphor-dbus-interfaces": PackageDef( 187 depends=["openbmc/sdbusplus"], 188 build_type="meson", 189 config_flags=["-Dgenerate_md=false"], 190 ), 191 "openbmc/phosphor-logging": PackageDef( 192 depends=[ 193 "USCiLab/cereal", 194 "openbmc/phosphor-dbus-interfaces", 195 "openbmc/sdbusplus", 196 "openbmc/sdeventplus", 197 ], 198 build_type="meson", 199 config_flags=[ 200 "-Dlibonly=true", 201 "-Dtests=disabled", 202 f"-Dyamldir={prefix}/share/phosphor-dbus-yaml/yaml", 203 ], 204 ), 205 "openbmc/phosphor-objmgr": PackageDef( 206 depends=[ 207 "CLIUtils/CLI11", 208 "boost", 209 "leethomason/tinyxml2", 210 "openbmc/phosphor-dbus-interfaces", 211 "openbmc/phosphor-logging", 212 "openbmc/sdbusplus", 213 ], 214 build_type="meson", 215 config_flags=[ 216 "-Dtests=disabled", 217 ], 218 ), 219 "openbmc/libpeci": PackageDef( 220 build_type="meson", 221 config_flags=[ 222 "-Draw-peci=disabled", 223 ], 224 ), 225 "openbmc/libpldm": PackageDef( 226 build_type="meson", 227 config_flags=[ 228 "-Dabi=deprecated,stable", 229 "-Doem-ibm=enabled", 230 "-Dtests=disabled", 231 ], 232 ), 233 "openbmc/sdbusplus": PackageDef( 234 build_type="meson", 235 custom_post_dl=[ 236 "cd tools", 237 f"./setup.py install --root=/ --prefix={prefix}", 238 "cd ..", 239 ], 240 config_flags=[ 241 "-Dexamples=disabled", 242 "-Dtests=disabled", 243 ], 244 ), 245 "openbmc/sdeventplus": PackageDef( 246 depends=[ 247 "openbmc/stdplus", 248 ], 249 build_type="meson", 250 config_flags=[ 251 "-Dexamples=false", 252 "-Dtests=disabled", 253 ], 254 ), 255 "openbmc/stdplus": PackageDef( 256 depends=[ 257 "fmtlib/fmt", 258 "google/googletest", 259 "Naios/function2", 260 ], 261 build_type="meson", 262 config_flags=[ 263 "-Dexamples=false", 264 "-Dtests=disabled", 265 "-Dgtest=enabled", 266 ], 267 ), 268} # type: Dict[str, PackageDef] 269 270# Define common flags used for builds 271configure_flags = " ".join( 272 [ 273 f"--prefix={prefix}", 274 ] 275) 276cmake_flags = " ".join( 277 [ 278 "-DBUILD_SHARED_LIBS=ON", 279 "-DCMAKE_BUILD_TYPE=RelWithDebInfo", 280 f"-DCMAKE_INSTALL_PREFIX:PATH={prefix}", 281 "-GNinja", 282 "-DCMAKE_MAKE_PROGRAM=ninja", 283 ] 284) 285meson_flags = " ".join( 286 [ 287 "--wrap-mode=nodownload", 288 f"-Dprefix={prefix}", 289 ] 290) 291 292 293class Package(threading.Thread): 294 """Class used to build the Docker stages for each package. 295 296 Generally, this class should not be instantiated directly but through 297 Package.generate_all(). 298 """ 299 300 # Copy the packages dictionary. 301 packages = packages.copy() 302 303 # Lock used for thread-safety. 304 lock = threading.Lock() 305 306 def __init__(self, pkg: str): 307 """pkg - The name of this package (ex. foo/bar )""" 308 super(Package, self).__init__() 309 310 self.package = pkg 311 self.exception = None # type: Optional[Exception] 312 313 # Reference to this package's 314 self.pkg_def = Package.packages[pkg] 315 self.pkg_def["__package"] = self 316 317 def run(self) -> None: 318 """Thread 'run' function. Builds the Docker stage.""" 319 320 # In case this package has no rev, fetch it from Github. 321 self._update_rev() 322 323 # Find all the Package objects that this package depends on. 324 # This section is locked because we are looking into another 325 # package's PackageDef dict, which could be being modified. 326 Package.lock.acquire() 327 deps: Iterable[Package] = [ 328 Package.packages[deppkg]["__package"] 329 for deppkg in self.pkg_def.get("depends", []) 330 ] 331 Package.lock.release() 332 333 # Wait until all the depends finish building. We need them complete 334 # for the "COPY" commands. 335 for deppkg in deps: 336 deppkg.join() 337 338 # Generate this package's Dockerfile. 339 dockerfile = f""" 340FROM {docker_base_img_name} 341{self._df_copycmds()} 342{self._df_build()} 343""" 344 345 # Generate the resulting tag name and save it to the PackageDef. 346 # This section is locked because we are modifying the PackageDef, 347 # which can be accessed by other threads. 348 Package.lock.acquire() 349 tag = Docker.tagname(self._stagename(), dockerfile) 350 self.pkg_def["__tag"] = tag 351 Package.lock.release() 352 353 # Do the build / save any exceptions. 354 try: 355 Docker.build(self.package, tag, dockerfile) 356 except Exception as e: 357 self.exception = e 358 359 @classmethod 360 def generate_all(cls) -> None: 361 """Ensure a Docker stage is created for all defined packages. 362 363 These are done in parallel but with appropriate blocking per 364 package 'depends' specifications. 365 """ 366 367 # Create a Package for each defined package. 368 pkg_threads = [Package(p) for p in cls.packages.keys()] 369 370 # Start building them all. 371 # This section is locked because threads depend on each other, 372 # based on the packages, and they cannot 'join' on a thread 373 # which is not yet started. Adding a lock here allows all the 374 # threads to start before they 'join' their dependencies. 375 Package.lock.acquire() 376 for t in pkg_threads: 377 t.start() 378 Package.lock.release() 379 380 # Wait for completion. 381 for t in pkg_threads: 382 t.join() 383 # Check if the thread saved off its own exception. 384 if t.exception: 385 print(f"Package {t.package} failed!", file=sys.stderr) 386 raise t.exception 387 388 @staticmethod 389 def df_all_copycmds() -> str: 390 """Formulate the Dockerfile snippet necessary to copy all packages 391 into the final image. 392 """ 393 return Package.df_copycmds_set(Package.packages.keys()) 394 395 @classmethod 396 def depcache(cls) -> str: 397 """Create the contents of the '/tmp/depcache'. 398 This file is a comma-separated list of "<pkg>:<rev>". 399 """ 400 401 # This needs to be sorted for consistency. 402 depcache = "" 403 for pkg in sorted(cls.packages.keys()): 404 depcache += "%s:%s," % (pkg, cls.packages[pkg]["rev"]) 405 return depcache 406 407 def _update_rev(self) -> None: 408 """Look up the HEAD for missing a static rev.""" 409 410 if "rev" in self.pkg_def: 411 return 412 413 # Check if Jenkins/Gerrit gave us a revision and use it. 414 if gerrit_project == self.package and gerrit_rev: 415 print( 416 f"Found Gerrit revision for {self.package}: {gerrit_rev}", 417 file=sys.stderr, 418 ) 419 self.pkg_def["rev"] = gerrit_rev 420 return 421 422 # Ask Github for all the branches. 423 lookup = git( 424 "ls-remote", "--heads", f"https://github.com/{self.package}" 425 ) 426 427 # Find the branch matching {branch} (or fallback to master). 428 # This section is locked because we are modifying the PackageDef. 429 Package.lock.acquire() 430 for line in lookup.split("\n"): 431 if re.fullmatch(f".*{branch}$", line.strip()): 432 self.pkg_def["rev"] = line.split()[0] 433 break 434 elif ( 435 "refs/heads/master" in line or "refs/heads/main" in line 436 ) and "rev" not in self.pkg_def: 437 self.pkg_def["rev"] = line.split()[0] 438 Package.lock.release() 439 440 def _stagename(self) -> str: 441 """Create a name for the Docker stage associated with this pkg.""" 442 return self.package.replace("/", "-").lower() 443 444 def _url(self) -> str: 445 """Get the URL for this package.""" 446 rev = self.pkg_def["rev"] 447 448 # If the lambda exists, call it. 449 if "url" in self.pkg_def: 450 return self.pkg_def["url"](self.package, rev) 451 452 # Default to the github archive URL. 453 return f"https://github.com/{self.package}/archive/{rev}.tar.gz" 454 455 def _cmd_download(self) -> str: 456 """Formulate the command necessary to download and unpack to source.""" 457 458 url = self._url() 459 if ".tar." not in url: 460 raise NotImplementedError( 461 f"Unhandled download type for {self.package}: {url}" 462 ) 463 464 cmd = f"curl -L {url} | tar -x" 465 466 if url.endswith(".bz2"): 467 cmd += "j" 468 elif url.endswith(".gz"): 469 cmd += "z" 470 else: 471 raise NotImplementedError( 472 f"Unknown tar flags needed for {self.package}: {url}" 473 ) 474 475 return cmd 476 477 def _cmd_cd_srcdir(self) -> str: 478 """Formulate the command necessary to 'cd' into the source dir.""" 479 return f"cd {self.package.split('/')[-1]}*" 480 481 def _df_copycmds(self) -> str: 482 """Formulate the dockerfile snippet necessary to COPY all depends.""" 483 484 if "depends" not in self.pkg_def: 485 return "" 486 return Package.df_copycmds_set(self.pkg_def["depends"]) 487 488 @staticmethod 489 def df_copycmds_set(pkgs: Iterable[str]) -> str: 490 """Formulate the Dockerfile snippet necessary to COPY a set of 491 packages into a Docker stage. 492 """ 493 494 copy_cmds = "" 495 496 # Sort the packages for consistency. 497 for p in sorted(pkgs): 498 tag = Package.packages[p]["__tag"] 499 copy_cmds += f"COPY --from={tag} {prefix} {prefix}\n" 500 # Workaround for upstream docker bug and multiple COPY cmds 501 # https://github.com/moby/moby/issues/37965 502 copy_cmds += "RUN true\n" 503 504 return copy_cmds 505 506 def _df_build(self) -> str: 507 """Formulate the Dockerfile snippet necessary to download, build, and 508 install a package into a Docker stage. 509 """ 510 511 # Download and extract source. 512 result = f"RUN {self._cmd_download()} && {self._cmd_cd_srcdir()} && " 513 514 # Handle 'custom_post_dl' commands. 515 custom_post_dl = self.pkg_def.get("custom_post_dl") 516 if custom_post_dl: 517 result += " && ".join(custom_post_dl) + " && " 518 519 # Build and install package based on 'build_type'. 520 build_type = self.pkg_def["build_type"] 521 if build_type == "autoconf": 522 result += self._cmd_build_autoconf() 523 elif build_type == "cmake": 524 result += self._cmd_build_cmake() 525 elif build_type == "custom": 526 result += self._cmd_build_custom() 527 elif build_type == "make": 528 result += self._cmd_build_make() 529 elif build_type == "meson": 530 result += self._cmd_build_meson() 531 else: 532 raise NotImplementedError( 533 f"Unhandled build type for {self.package}: {build_type}" 534 ) 535 536 # Handle 'custom_post_install' commands. 537 custom_post_install = self.pkg_def.get("custom_post_install") 538 if custom_post_install: 539 result += " && " + " && ".join(custom_post_install) 540 541 return result 542 543 def _cmd_build_autoconf(self) -> str: 544 options = " ".join(self.pkg_def.get("config_flags", [])) 545 env = " ".join(self.pkg_def.get("config_env", [])) 546 result = "./bootstrap.sh && " 547 result += f"{env} ./configure {configure_flags} {options} && " 548 result += f"make -j{proc_count} && make install" 549 return result 550 551 def _cmd_build_cmake(self) -> str: 552 options = " ".join(self.pkg_def.get("config_flags", [])) 553 env = " ".join(self.pkg_def.get("config_env", [])) 554 result = "mkdir builddir && cd builddir && " 555 result += f"{env} cmake {cmake_flags} {options} .. && " 556 result += "cmake --build . --target all && " 557 result += "cmake --build . --target install && " 558 result += "cd .." 559 return result 560 561 def _cmd_build_custom(self) -> str: 562 return " && ".join(self.pkg_def.get("build_steps", [])) 563 564 def _cmd_build_make(self) -> str: 565 return f"make -j{proc_count} && make install" 566 567 def _cmd_build_meson(self) -> str: 568 options = " ".join(self.pkg_def.get("config_flags", [])) 569 env = " ".join(self.pkg_def.get("config_env", [])) 570 result = f"{env} meson setup builddir {meson_flags} {options} && " 571 result += "ninja -C builddir && ninja -C builddir install" 572 return result 573 574 575class Docker: 576 """Class to assist with Docker interactions. All methods are static.""" 577 578 @staticmethod 579 def timestamp() -> str: 580 """Generate a timestamp for today using the ISO week.""" 581 today = date.today().isocalendar() 582 return f"{today[0]}-W{today[1]:02}" 583 584 @staticmethod 585 def tagname(pkgname: Optional[str], dockerfile: str) -> str: 586 """Generate a tag name for a package using a hash of the Dockerfile.""" 587 result = docker_image_name 588 if pkgname: 589 result += "-" + pkgname 590 591 result += ":" + Docker.timestamp() 592 result += "-" + sha256(dockerfile.encode()).hexdigest()[0:16] 593 594 return result 595 596 @staticmethod 597 def build(pkg: str, tag: str, dockerfile: str) -> None: 598 """Build a docker image using the Dockerfile and tagging it with 'tag'.""" 599 600 # If we're not forcing builds, check if it already exists and skip. 601 if not force_build: 602 if docker.image.ls(tag, "--format", '"{{.Repository}}:{{.Tag}}"'): 603 print( 604 f"Image {tag} already exists. Skipping.", file=sys.stderr 605 ) 606 return 607 608 # Build it. 609 # Capture the output of the 'docker build' command and send it to 610 # stderr (prefixed with the package name). This allows us to see 611 # progress but not pollute stdout. Later on we output the final 612 # docker tag to stdout and we want to keep that pristine. 613 # 614 # Other unusual flags: 615 # --no-cache: Bypass the Docker cache if 'force_build'. 616 # --force-rm: Clean up Docker processes if they fail. 617 docker.build( 618 proxy_args, 619 "--network=host", 620 "--force-rm", 621 "--no-cache=true" if force_build else "--no-cache=false", 622 "-t", 623 tag, 624 "-", 625 _in=dockerfile, 626 _out=( 627 lambda line: print( 628 pkg + ":", line, end="", file=sys.stderr, flush=True 629 ) 630 ), 631 _err_to_out=True, 632 ) 633 634 635# Read a bunch of environment variables. 636docker_image_name = os.environ.get( 637 "DOCKER_IMAGE_NAME", "openbmc/ubuntu-unit-test" 638) 639force_build = os.environ.get("FORCE_DOCKER_BUILD") 640is_automated_ci_build = os.environ.get("BUILD_URL", False) 641distro = os.environ.get("DISTRO", "ubuntu:noble") 642branch = os.environ.get("BRANCH", "master") 643ubuntu_mirror = os.environ.get("UBUNTU_MIRROR") 644http_proxy = os.environ.get("http_proxy") 645 646gerrit_project = os.environ.get("GERRIT_PROJECT") 647gerrit_rev = os.environ.get("GERRIT_PATCHSET_REVISION") 648 649# Ensure appropriate docker build output to see progress and identify 650# any issues 651os.environ["BUILDKIT_PROGRESS"] = "plain" 652 653# Set up some common variables. 654username = os.environ.get("USER", "root") 655homedir = os.environ.get("HOME", "/root") 656gid = os.getgid() 657uid = os.getuid() 658 659# Use well-known constants if user is root 660if username == "root": 661 homedir = "/root" 662 gid = 0 663 uid = 0 664 665# Determine the architecture for Docker. 666arch = uname("-m").strip() 667if arch == "ppc64le": 668 docker_base = "ppc64le/" 669elif arch == "x86_64": 670 docker_base = "" 671elif arch == "aarch64": 672 docker_base = "arm64v8/" 673else: 674 print( 675 f"Unsupported system architecture({arch}) found for docker image", 676 file=sys.stderr, 677 ) 678 sys.exit(1) 679 680# Special flags if setting up a deb mirror. 681mirror = "" 682if "ubuntu" in distro and ubuntu_mirror: 683 mirror = f""" 684RUN echo "deb {ubuntu_mirror} \ 685 $(. /etc/os-release && echo $VERSION_CODENAME) \ 686 main restricted universe multiverse" > /etc/apt/sources.list && \\ 687 echo "deb {ubuntu_mirror} \ 688 $(. /etc/os-release && echo $VERSION_CODENAME)-updates \ 689 main restricted universe multiverse" >> /etc/apt/sources.list && \\ 690 echo "deb {ubuntu_mirror} \ 691 $(. /etc/os-release && echo $VERSION_CODENAME)-security \ 692 main restricted universe multiverse" >> /etc/apt/sources.list && \\ 693 echo "deb {ubuntu_mirror} \ 694 $(. /etc/os-release && echo $VERSION_CODENAME)-proposed \ 695 main restricted universe multiverse" >> /etc/apt/sources.list && \\ 696 echo "deb {ubuntu_mirror} \ 697 $(. /etc/os-release && echo $VERSION_CODENAME)-backports \ 698 main restricted universe multiverse" >> /etc/apt/sources.list 699""" 700 701# Special flags for proxying. 702proxy_cmd = "" 703proxy_keyserver = "" 704proxy_args = [] 705if http_proxy: 706 proxy_cmd = f""" 707RUN echo "[http]" >> {homedir}/.gitconfig && \ 708 echo "proxy = {http_proxy}" >> {homedir}/.gitconfig 709""" 710 proxy_keyserver = f"--keyserver-options http-proxy={http_proxy}" 711 712 proxy_args.extend( 713 [ 714 "--build-arg", 715 f"http_proxy={http_proxy}", 716 "--build-arg", 717 f"https_proxy={http_proxy}", 718 ] 719 ) 720 721# Create base Dockerfile. 722dockerfile_base = f""" 723FROM {docker_base}{distro} 724 725{mirror} 726 727ENV DEBIAN_FRONTEND noninteractive 728 729ENV PYTHONPATH "/usr/local/lib/python3.10/site-packages/" 730 731# Sometimes the ubuntu key expires and we need a way to force an execution 732# of the apt-get commands for the dbgsym-keyring. When this happens we see 733# an error like: "Release: The following signatures were invalid:" 734# Insert a bogus echo that we can change here when we get this error to force 735# the update. 736RUN echo "ubuntu keyserver rev as of 2021-04-21" 737 738# We need the keys to be imported for dbgsym repos 739# New releases have a package, older ones fall back to manual fetching 740# https://wiki.ubuntu.com/Debug%20Symbol%20Packages 741# Known issue with gpg to get keys via proxy - 742# https://bugs.launchpad.net/ubuntu/+source/gnupg2/+bug/1788190, hence using 743# curl to get keys. 744RUN apt-get update && apt-get dist-upgrade -yy && \ 745 ( apt-get install -yy gpgv ubuntu-dbgsym-keyring || \ 746 ( apt-get install -yy dirmngr curl && \ 747 curl -sSL \ 748 'https://keyserver.ubuntu.com/pks/lookup?op=get&search=0xF2EDC64DC5AEE1F6B9C621F0C8CAB6595FDFF622' \ 749 | apt-key add - )) 750 751# Parse the current repo list into a debug repo list 752RUN sed -n '/^deb /s,^deb [^ ]* ,deb http://ddebs.ubuntu.com ,p' \ 753 /etc/apt/sources.list >/etc/apt/sources.list.d/debug.list 754 755# Remove non-existent debug repos 756RUN sed -i '/-\\(backports\\|security\\) /d' /etc/apt/sources.list.d/debug.list 757 758RUN cat /etc/apt/sources.list.d/debug.list 759 760RUN apt-get update && apt-get dist-upgrade -yy && apt-get install -yy \ 761 abi-compliance-checker \ 762 abi-dumper \ 763 autoconf \ 764 autoconf-archive \ 765 bison \ 766 clang-17 \ 767 clang-format-17 \ 768 clang-tidy-17 \ 769 clang-tools-17 \ 770 cmake \ 771 curl \ 772 dbus \ 773 device-tree-compiler \ 774 flex \ 775 g++-13 \ 776 gcc-13 \ 777 git \ 778 iproute2 \ 779 iputils-ping \ 780 libaudit-dev \ 781 libc6-dbg \ 782 libc6-dev \ 783 libconfig++-dev \ 784 libcryptsetup-dev \ 785 libdbus-1-dev \ 786 libevdev-dev \ 787 libgpiod-dev \ 788 libi2c-dev \ 789 libjpeg-dev \ 790 libjson-perl \ 791 libldap2-dev \ 792 libmimetic-dev \ 793 libnl-3-dev \ 794 libnl-genl-3-dev \ 795 libpam0g-dev \ 796 libpciaccess-dev \ 797 libperlio-gzip-perl \ 798 libpng-dev \ 799 libprotobuf-dev \ 800 libsnmp-dev \ 801 libssl-dev \ 802 libsystemd-dev \ 803 libtool \ 804 liburing-dev \ 805 libxml2-utils \ 806 libxml-simple-perl \ 807 ninja-build \ 808 npm \ 809 pkg-config \ 810 protobuf-compiler \ 811 python3 \ 812 python3-dev\ 813 python3-git \ 814 python3-mako \ 815 python3-pip \ 816 python3-protobuf \ 817 python3-setuptools \ 818 python3-socks \ 819 python3-yaml \ 820 rsync \ 821 shellcheck \ 822 socat \ 823 sudo \ 824 systemd \ 825 valgrind \ 826 vim \ 827 wget \ 828 xxd 829 830RUN update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-13 13 \ 831 --slave /usr/bin/g++ g++ /usr/bin/g++-13 \ 832 --slave /usr/bin/gcov gcov /usr/bin/gcov-13 \ 833 --slave /usr/bin/gcov-dump gcov-dump /usr/bin/gcov-dump-13 \ 834 --slave /usr/bin/gcov-tool gcov-tool /usr/bin/gcov-tool-13 835RUN update-alternatives --remove cpp /usr/bin/cpp && \ 836 update-alternatives --install /usr/bin/cpp cpp /usr/bin/cpp-13 13 837 838RUN update-alternatives --install /usr/bin/clang clang /usr/bin/clang-17 1000 \ 839 --slave /usr/bin/clang++ clang++ /usr/bin/clang++-17 \ 840 --slave /usr/bin/clang-tidy clang-tidy /usr/bin/clang-tidy-17 \ 841 --slave /usr/bin/clang-format clang-format /usr/bin/clang-format-17 \ 842 --slave /usr/bin/run-clang-tidy run-clang-tidy.py \ 843 /usr/bin/run-clang-tidy-17 \ 844 --slave /usr/bin/scan-build scan-build /usr/bin/scan-build-17 845 846""" 847 848if is_automated_ci_build: 849 dockerfile_base += f""" 850# Run an arbitrary command to pollute the docker cache regularly force us 851# to re-run `apt-get update` daily. 852RUN echo {Docker.timestamp()} 853RUN apt-get update && apt-get dist-upgrade -yy 854 855""" 856 857dockerfile_base += """ 858RUN pip3 install --break-system-packages \ 859 beautysh \ 860 black \ 861 codespell \ 862 flake8 \ 863 gcovr \ 864 gitlint \ 865 inflection \ 866 isort \ 867 jsonschema \ 868 meson==1.3.0 \ 869 requests 870 871RUN npm install -g \ 872 eslint@v8.56.0 eslint-plugin-json@v3.1.0 \ 873 markdownlint-cli@latest \ 874 prettier@latest 875""" 876 877# Build the base and stage docker images. 878docker_base_img_name = Docker.tagname("base", dockerfile_base) 879Docker.build("base", docker_base_img_name, dockerfile_base) 880Package.generate_all() 881 882# Create the final Dockerfile. 883dockerfile = f""" 884# Build the final output image 885FROM {docker_base_img_name} 886{Package.df_all_copycmds()} 887 888# Some of our infrastructure still relies on the presence of this file 889# even though it is no longer needed to rebuild the docker environment 890# NOTE: The file is sorted to ensure the ordering is stable. 891RUN echo '{Package.depcache()}' > /tmp/depcache 892 893# Ensure the group, user, and home directory are created (or rename them if 894# they already exist). 895RUN if grep -q ":{gid}:" /etc/group ; then \ 896 groupmod -n {username} $(awk -F : '{{ if ($3 == {gid}) {{ print $1 }} }}' /etc/group) ; \ 897 else \ 898 groupadd -f -g {gid} {username} ; \ 899 fi 900RUN mkdir -p "{os.path.dirname(homedir)}" 901RUN if grep -q ":{uid}:" /etc/passwd ; then \ 902 usermod -l {username} -d {homedir} -m $(awk -F : '{{ if ($3 == {uid}) {{ print $1 }} }}' /etc/passwd) ; \ 903 else \ 904 useradd -d {homedir} -m -u {uid} -g {gid} {username} ; \ 905 fi 906RUN sed -i '1iDefaults umask=000' /etc/sudoers 907RUN echo "{username} ALL=(ALL) NOPASSWD: ALL" >>/etc/sudoers 908 909# Ensure user has ability to write to /usr/local for different tool 910# and data installs 911RUN chown -R {username}:{username} /usr/local/share 912 913# Update library cache 914RUN ldconfig 915 916{proxy_cmd} 917 918RUN /bin/bash 919""" 920 921# Do the final docker build 922docker_final_img_name = Docker.tagname(None, dockerfile) 923Docker.build("final", docker_final_img_name, dockerfile) 924 925# Print the tag of the final image. 926print(docker_final_img_name) 927