1#!/usr/bin/env python3 2# 3# Build the required docker image to run package unit tests 4# 5# Script Variables: 6# DOCKER_IMG_NAME: <optional, the name of the docker image to generate> 7# default is openbmc/ubuntu-unit-test 8# DISTRO: <optional, the distro to build a docker image against> 9# FORCE_DOCKER_BUILD: <optional, a non-zero value with force all Docker 10# images to be rebuilt rather than reusing caches.> 11# BUILD_URL: <optional, used to detect running under CI context 12# (ex. Jenkins)> 13# BRANCH: <optional, branch to build from each of the openbmc/ 14# repositories> 15# default is master, which will be used if input branch not 16# provided or not found 17# UBUNTU_MIRROR: <optional, the URL of a mirror of Ubuntu to override the 18# default ones in /etc/apt/sources.list> 19# default is empty, and no mirror is used. 20# http_proxy The HTTP address of the proxy server to connect to. 21# Default: "", proxy is not setup if this is not set 22 23import os 24import sys 25import threading 26from datetime import date 27from hashlib import sha256 28from sh import docker, git, nproc, uname # type: ignore 29from typing import Any, Callable, Dict, Iterable, Optional 30 31try: 32 # Python before 3.8 doesn't have TypedDict, so reroute to standard 'dict'. 33 from typing import TypedDict 34except: 35 36 class TypedDict(dict): # type: ignore 37 # We need to do this to eat the 'total' argument. 38 def __init_subclass__(cls, **kwargs): 39 super().__init_subclass__() 40 41 42# Declare some variables used in package definitions. 43prefix = "/usr/local" 44proc_count = nproc().strip() 45 46 47class PackageDef(TypedDict, total=False): 48 """ Package Definition for packages dictionary. """ 49 50 # rev [optional]: Revision of package to use. 51 rev: str 52 # url [optional]: lambda function to create URL: (package, rev) -> url. 53 url: Callable[[str, str], str] 54 # depends [optional]: List of package dependencies. 55 depends: Iterable[str] 56 # build_type [required]: Build type used for package. 57 # Currently supported: autoconf, cmake, custom, make, meson 58 build_type: str 59 # build_steps [optional]: Steps to run for 'custom' build_type. 60 build_steps: Iterable[str] 61 # config_flags [optional]: List of options to pass configuration tool. 62 config_flags: Iterable[str] 63 # config_env [optional]: List of environment variables to set for config. 64 config_env: Iterable[str] 65 # custom_post_dl [optional]: List of steps to run after download, but 66 # before config / build / install. 67 custom_post_dl: Iterable[str] 68 # custom_post_install [optional]: List of steps to run after install. 69 custom_post_install: Iterable[str] 70 71 # __tag [private]: Generated Docker tag name for package stage. 72 __tag: str 73 # __package [private]: Package object associated with this package. 74 __package: Any # Type is Package, but not defined yet. 75 76 77# Packages to include in image. 78packages = { 79 # Install OpenSSL 3.x. 80 # Generally we want to rely on the version of OpenSSL from the OS, but v3.x 81 # was a major API change. It is included in Yocto but not Ubuntu until 82 # 22.04. Install it manually so that our CI can target the OpenSSL 3.x 83 # APIs. 84 "openssl/openssl": PackageDef( 85 rev="openssl-3.0.1", 86 build_type="custom", 87 build_steps=[ 88 f"./Configure --prefix={prefix} --libdir=lib", 89 f"make -j{proc_count}", 90 f"make -j{proc_count} install" 91 ], 92 ), 93 "boost": PackageDef( 94 rev="1.78.0", 95 url=( 96 lambda pkg, rev: f"https://downloads.yoctoproject.org/mirror/sources/{pkg}_{rev.replace('.', '_')}.tar.bz2" 97 ), 98 build_type="custom", 99 build_steps=[ 100 f"./bootstrap.sh --prefix={prefix} --with-libraries=context,coroutine", 101 "./b2", 102 f"./b2 install --prefix={prefix}", 103 ], 104 ), 105 "USCiLab/cereal": PackageDef( 106 rev="3e4d1b84cab4891368d2179a61a7ba06a5693e7f", 107 build_type="custom", 108 build_steps=[f"cp -a include/cereal/ {prefix}/include/"], 109 ), 110 "catchorg/Catch2": PackageDef( 111 rev="v2.13.6", 112 build_type="cmake", 113 config_flags=["-DBUILD_TESTING=OFF", "-DCATCH_INSTALL_DOCS=OFF"], 114 ), 115 "CLIUtils/CLI11": PackageDef( 116 rev="v1.9.1", 117 build_type="cmake", 118 config_flags=[ 119 "-DBUILD_TESTING=OFF", 120 "-DCLI11_BUILD_DOCS=OFF", 121 "-DCLI11_BUILD_EXAMPLES=OFF", 122 ], 123 ), 124 "fmtlib/fmt": PackageDef( 125 rev="8.1.1", 126 build_type="cmake", 127 config_flags=[ 128 "-DFMT_DOC=OFF", 129 "-DFMT_TEST=OFF", 130 ], 131 ), 132 "Naios/function2": PackageDef( 133 rev="4.1.0", 134 build_type="custom", 135 build_steps=[ 136 f"mkdir {prefix}/include/function2", 137 f"cp include/function2/function2.hpp {prefix}/include/function2/", 138 ], 139 ), 140 # Release 2021-06-12 141 "google/googletest": PackageDef( 142 rev="9e712372214d75bb30ec2847a44bf124d48096f3", 143 build_type="cmake", 144 config_env=["CXXFLAGS=-std=c++20"], 145 config_flags=["-DTHREADS_PREFER_PTHREAD_FLAG=ON"], 146 ), 147 # Release 2020-08-06 148 "nlohmann/json": PackageDef( 149 rev="v3.10.4", 150 build_type="cmake", 151 config_flags=["-DJSON_BuildTests=OFF"], 152 custom_post_install=[ 153 f"ln -s {prefix}/include/nlohmann/json.hpp {prefix}/include/json.hpp", 154 ], 155 ), 156 # Snapshot from 2019-05-24 157 "linux-test-project/lcov": PackageDef( 158 rev="v1.15", 159 build_type="make", 160 ), 161 # dev-5.8 2021-01-11 162 "openbmc/linux": PackageDef( 163 rev="3cc95ae40716e56f81b69615781f54c78079042d", 164 build_type="custom", 165 build_steps=[ 166 f"make -j{proc_count} defconfig", 167 f"make INSTALL_HDR_PATH={prefix} headers_install", 168 ], 169 ), 170 # Snapshot from 2020-06-13 171 "LibVNC/libvncserver": PackageDef( 172 rev="LibVNCServer-0.9.13", 173 build_type="cmake", 174 ), 175 # version from meta-openembedded/meta-oe/recipes-support/libtinyxml2/libtinyxml2_8.0.0.bb 176 "leethomason/tinyxml2": PackageDef( 177 rev="8.0.0", 178 build_type="cmake", 179 ), 180 # version from /meta-openembedded/meta-oe/recipes-devtools/boost-url/boost-url_git.bb 181 "CPPAlliance/url": PackageDef( 182 rev="d740a92d38e3a8f4d5b2153f53b82f1c98e312ab", 183 build_type="custom", 184 build_steps=[f"cp -a include/** {prefix}/include/"], 185 ), 186 # version from meta-openembedded/meta-oe/dynamic-layers/networking-layer/recipes-devools/valijson/valijson_0.6.bb 187 "tristanpenman/valijson": PackageDef( 188 rev="v0.6", 189 build_type="cmake", 190 config_flags=[ 191 "-Dvalijson_BUILD_TESTS=0", 192 "-Dvalijson_INSTALL_HEADERS=1", 193 ], 194 ), 195 # version from meta-openembedded/meta-oe/recipes-devtools/unifex/unifex_git.bb 196 "facebookexperimental/libunifex": PackageDef( 197 rev="9df21c58d34ce8a1cd3b15c3a7347495e29417a0", 198 build_type="cmake", 199 config_flags=[ 200 "-DBUILD_SHARED_LIBS=ON", 201 "-DBUILD_TESTING=OFF", 202 "-DCMAKE_CXX_STANDARD=20", 203 "-DUNIFEX_BUILD_EXAMPLES=OFF", 204 ], 205 ), 206 "open-power/pdbg": PackageDef(build_type="autoconf"), 207 "openbmc/gpioplus": PackageDef( 208 depends=["openbmc/stdplus"], 209 build_type="meson", 210 config_flags=[ 211 "-Dexamples=false", 212 "-Dtests=disabled", 213 ], 214 ), 215 "openbmc/phosphor-dbus-interfaces": PackageDef( 216 depends=["openbmc/sdbusplus"], 217 build_type="meson", 218 ), 219 "openbmc/phosphor-logging": PackageDef( 220 depends=[ 221 "USCiLab/cereal", 222 "openbmc/phosphor-dbus-interfaces", 223 "openbmc/sdbusplus", 224 "openbmc/sdeventplus", 225 ], 226 build_type="meson", 227 config_flags=[ 228 f"-Dyamldir={prefix}/share/phosphor-dbus-yaml/yaml", 229 ], 230 ), 231 "openbmc/phosphor-objmgr": PackageDef( 232 depends=[ 233 "boost", 234 "leethomason/tinyxml2", 235 "openbmc/phosphor-logging", 236 "openbmc/sdbusplus", 237 ], 238 build_type="meson", 239 config_flags=[ 240 "-Dtests=disabled", 241 ], 242 ), 243 "openbmc/pldm": PackageDef( 244 depends=[ 245 "CLIUtils/CLI11", 246 "boost", 247 "nlohmann/json", 248 "openbmc/phosphor-dbus-interfaces", 249 "openbmc/phosphor-logging", 250 "openbmc/sdbusplus", 251 "openbmc/sdeventplus", 252 ], 253 build_type="meson", 254 config_flags=[ 255 "-Dlibpldm-only=enabled", 256 "-Doem-ibm=enabled", 257 "-Dtests=disabled", 258 ], 259 ), 260 "openbmc/sdbusplus": PackageDef( 261 depends=[ 262 "facebookexperimental/libunifex", 263 ], 264 build_type="meson", 265 custom_post_dl=[ 266 "cd tools", 267 f"./setup.py install --root=/ --prefix={prefix}", 268 "cd ..", 269 ], 270 config_flags=[ 271 "-Dexamples=disabled", 272 "-Dtests=disabled", 273 ], 274 ), 275 "openbmc/sdeventplus": PackageDef( 276 depends=["Naios/function2", "openbmc/stdplus"], 277 build_type="meson", 278 config_flags=[ 279 "-Dexamples=false", 280 "-Dtests=disabled", 281 ], 282 ), 283 "openbmc/stdplus": PackageDef( 284 depends=["fmtlib/fmt"], 285 build_type="meson", 286 config_flags=[ 287 "-Dexamples=false", 288 "-Dtests=disabled", 289 ], 290 ), 291} # type: Dict[str, PackageDef] 292 293# Define common flags used for builds 294configure_flags = " ".join( 295 [ 296 f"--prefix={prefix}", 297 ] 298) 299cmake_flags = " ".join( 300 [ 301 "-DBUILD_SHARED_LIBS=ON", 302 "-DCMAKE_BUILD_TYPE=RelWithDebInfo", 303 f"-DCMAKE_INSTALL_PREFIX:PATH={prefix}", 304 "-GNinja", 305 "-DCMAKE_MAKE_PROGRAM=ninja", 306 ] 307) 308meson_flags = " ".join( 309 [ 310 "--wrap-mode=nodownload", 311 f"-Dprefix={prefix}", 312 ] 313) 314 315 316class Package(threading.Thread): 317 """Class used to build the Docker stages for each package. 318 319 Generally, this class should not be instantiated directly but through 320 Package.generate_all(). 321 """ 322 323 # Copy the packages dictionary. 324 packages = packages.copy() 325 326 # Lock used for thread-safety. 327 lock = threading.Lock() 328 329 def __init__(self, pkg: str): 330 """ pkg - The name of this package (ex. foo/bar ) """ 331 super(Package, self).__init__() 332 333 self.package = pkg 334 self.exception = None # type: Optional[Exception] 335 336 # Reference to this package's 337 self.pkg_def = Package.packages[pkg] 338 self.pkg_def["__package"] = self 339 340 def run(self) -> None: 341 """ Thread 'run' function. Builds the Docker stage. """ 342 343 # In case this package has no rev, fetch it from Github. 344 self._update_rev() 345 346 # Find all the Package objects that this package depends on. 347 # This section is locked because we are looking into another 348 # package's PackageDef dict, which could be being modified. 349 Package.lock.acquire() 350 deps: Iterable[Package] = [ 351 Package.packages[deppkg]["__package"] 352 for deppkg in self.pkg_def.get("depends", []) 353 ] 354 Package.lock.release() 355 356 # Wait until all the depends finish building. We need them complete 357 # for the "COPY" commands. 358 for deppkg in deps: 359 deppkg.join() 360 361 # Generate this package's Dockerfile. 362 dockerfile = f""" 363FROM {docker_base_img_name} 364{self._df_copycmds()} 365{self._df_build()} 366""" 367 368 # Generate the resulting tag name and save it to the PackageDef. 369 # This section is locked because we are modifying the PackageDef, 370 # which can be accessed by other threads. 371 Package.lock.acquire() 372 tag = Docker.tagname(self._stagename(), dockerfile) 373 self.pkg_def["__tag"] = tag 374 Package.lock.release() 375 376 # Do the build / save any exceptions. 377 try: 378 Docker.build(self.package, tag, dockerfile) 379 except Exception as e: 380 self.exception = e 381 382 @classmethod 383 def generate_all(cls) -> None: 384 """Ensure a Docker stage is created for all defined packages. 385 386 These are done in parallel but with appropriate blocking per 387 package 'depends' specifications. 388 """ 389 390 # Create a Package for each defined package. 391 pkg_threads = [Package(p) for p in cls.packages.keys()] 392 393 # Start building them all. 394 # This section is locked because threads depend on each other, 395 # based on the packages, and they cannot 'join' on a thread 396 # which is not yet started. Adding a lock here allows all the 397 # threads to start before they 'join' their dependencies. 398 Package.lock.acquire() 399 for t in pkg_threads: 400 t.start() 401 Package.lock.release() 402 403 # Wait for completion. 404 for t in pkg_threads: 405 t.join() 406 # Check if the thread saved off its own exception. 407 if t.exception: 408 print(f"Package {t.package} failed!", file=sys.stderr) 409 raise t.exception 410 411 @staticmethod 412 def df_all_copycmds() -> str: 413 """Formulate the Dockerfile snippet necessary to copy all packages 414 into the final image. 415 """ 416 return Package.df_copycmds_set(Package.packages.keys()) 417 418 @classmethod 419 def depcache(cls) -> str: 420 """Create the contents of the '/tmp/depcache'. 421 This file is a comma-separated list of "<pkg>:<rev>". 422 """ 423 424 # This needs to be sorted for consistency. 425 depcache = "" 426 for pkg in sorted(cls.packages.keys()): 427 depcache += "%s:%s," % (pkg, cls.packages[pkg]["rev"]) 428 return depcache 429 430 def _update_rev(self) -> None: 431 """ Look up the HEAD for missing a static rev. """ 432 433 if "rev" in self.pkg_def: 434 return 435 436 # Check if Jenkins/Gerrit gave us a revision and use it. 437 if gerrit_project == self.package and gerrit_rev: 438 print( 439 f"Found Gerrit revision for {self.package}: {gerrit_rev}", 440 file=sys.stderr, 441 ) 442 self.pkg_def["rev"] = gerrit_rev 443 return 444 445 # Ask Github for all the branches. 446 lookup = git("ls-remote", "--heads", f"https://github.com/{self.package}") 447 448 # Find the branch matching {branch} (or fallback to master). 449 # This section is locked because we are modifying the PackageDef. 450 Package.lock.acquire() 451 for line in lookup.split("\n"): 452 if f"refs/heads/{branch}" in line: 453 self.pkg_def["rev"] = line.split()[0] 454 elif f"refs/heads/master" in line and "rev" not in self.pkg_def: 455 self.pkg_def["rev"] = line.split()[0] 456 Package.lock.release() 457 458 def _stagename(self) -> str: 459 """ Create a name for the Docker stage associated with this pkg. """ 460 return self.package.replace("/", "-").lower() 461 462 def _url(self) -> str: 463 """ Get the URL for this package. """ 464 rev = self.pkg_def["rev"] 465 466 # If the lambda exists, call it. 467 if "url" in self.pkg_def: 468 return self.pkg_def["url"](self.package, rev) 469 470 # Default to the github archive URL. 471 return f"https://github.com/{self.package}/archive/{rev}.tar.gz" 472 473 def _cmd_download(self) -> str: 474 """Formulate the command necessary to download and unpack to source.""" 475 476 url = self._url() 477 if ".tar." not in url: 478 raise NotImplementedError( 479 f"Unhandled download type for {self.package}: {url}" 480 ) 481 482 cmd = f"curl -L {url} | tar -x" 483 484 if url.endswith(".bz2"): 485 cmd += "j" 486 elif url.endswith(".gz"): 487 cmd += "z" 488 else: 489 raise NotImplementedError( 490 f"Unknown tar flags needed for {self.package}: {url}" 491 ) 492 493 return cmd 494 495 def _cmd_cd_srcdir(self) -> str: 496 """ Formulate the command necessary to 'cd' into the source dir. """ 497 return f"cd {self.package.split('/')[-1]}*" 498 499 def _df_copycmds(self) -> str: 500 """ Formulate the dockerfile snippet necessary to COPY all depends. """ 501 502 if "depends" not in self.pkg_def: 503 return "" 504 return Package.df_copycmds_set(self.pkg_def["depends"]) 505 506 @staticmethod 507 def df_copycmds_set(pkgs: Iterable[str]) -> str: 508 """Formulate the Dockerfile snippet necessary to COPY a set of 509 packages into a Docker stage. 510 """ 511 512 copy_cmds = "" 513 514 # Sort the packages for consistency. 515 for p in sorted(pkgs): 516 tag = Package.packages[p]["__tag"] 517 copy_cmds += f"COPY --from={tag} {prefix} {prefix}\n" 518 # Workaround for upstream docker bug and multiple COPY cmds 519 # https://github.com/moby/moby/issues/37965 520 copy_cmds += "RUN true\n" 521 522 return copy_cmds 523 524 def _df_build(self) -> str: 525 """Formulate the Dockerfile snippet necessary to download, build, and 526 install a package into a Docker stage. 527 """ 528 529 # Download and extract source. 530 result = f"RUN {self._cmd_download()} && {self._cmd_cd_srcdir()} && " 531 532 # Handle 'custom_post_dl' commands. 533 custom_post_dl = self.pkg_def.get("custom_post_dl") 534 if custom_post_dl: 535 result += " && ".join(custom_post_dl) + " && " 536 537 # Build and install package based on 'build_type'. 538 build_type = self.pkg_def["build_type"] 539 if build_type == "autoconf": 540 result += self._cmd_build_autoconf() 541 elif build_type == "cmake": 542 result += self._cmd_build_cmake() 543 elif build_type == "custom": 544 result += self._cmd_build_custom() 545 elif build_type == "make": 546 result += self._cmd_build_make() 547 elif build_type == "meson": 548 result += self._cmd_build_meson() 549 else: 550 raise NotImplementedError( 551 f"Unhandled build type for {self.package}: {build_type}" 552 ) 553 554 # Handle 'custom_post_install' commands. 555 custom_post_install = self.pkg_def.get("custom_post_install") 556 if custom_post_install: 557 result += " && " + " && ".join(custom_post_install) 558 559 return result 560 561 def _cmd_build_autoconf(self) -> str: 562 options = " ".join(self.pkg_def.get("config_flags", [])) 563 env = " ".join(self.pkg_def.get("config_env", [])) 564 result = "./bootstrap.sh && " 565 result += f"{env} ./configure {configure_flags} {options} && " 566 result += f"make -j{proc_count} && make install" 567 return result 568 569 def _cmd_build_cmake(self) -> str: 570 options = " ".join(self.pkg_def.get("config_flags", [])) 571 env = " ".join(self.pkg_def.get("config_env", [])) 572 result = "mkdir builddir && cd builddir && " 573 result += f"{env} cmake {cmake_flags} {options} .. && " 574 result += "cmake --build . --target all && " 575 result += "cmake --build . --target install && " 576 result += "cd .." 577 return result 578 579 def _cmd_build_custom(self) -> str: 580 return " && ".join(self.pkg_def.get("build_steps", [])) 581 582 def _cmd_build_make(self) -> str: 583 return f"make -j{proc_count} && make install" 584 585 def _cmd_build_meson(self) -> str: 586 options = " ".join(self.pkg_def.get("config_flags", [])) 587 env = " ".join(self.pkg_def.get("config_env", [])) 588 result = f"{env} meson builddir {meson_flags} {options} && " 589 result += "ninja -C builddir && ninja -C builddir install" 590 return result 591 592 593class Docker: 594 """Class to assist with Docker interactions. All methods are static.""" 595 596 @staticmethod 597 def timestamp() -> str: 598 """ Generate a timestamp for today using the ISO week. """ 599 today = date.today().isocalendar() 600 return f"{today[0]}-W{today[1]:02}" 601 602 @staticmethod 603 def tagname(pkgname: str, dockerfile: str) -> str: 604 """ Generate a tag name for a package using a hash of the Dockerfile. """ 605 result = docker_image_name 606 if pkgname: 607 result += "-" + pkgname 608 609 result += ":" + Docker.timestamp() 610 result += "-" + sha256(dockerfile.encode()).hexdigest()[0:16] 611 612 return result 613 614 @staticmethod 615 def build(pkg: str, tag: str, dockerfile: str) -> None: 616 """Build a docker image using the Dockerfile and tagging it with 'tag'.""" 617 618 # If we're not forcing builds, check if it already exists and skip. 619 if not force_build: 620 if docker.image.ls(tag, "--format", '"{{.Repository}}:{{.Tag}}"'): 621 print(f"Image {tag} already exists. Skipping.", file=sys.stderr) 622 return 623 624 # Build it. 625 # Capture the output of the 'docker build' command and send it to 626 # stderr (prefixed with the package name). This allows us to see 627 # progress but not polute stdout. Later on we output the final 628 # docker tag to stdout and we want to keep that pristine. 629 # 630 # Other unusual flags: 631 # --no-cache: Bypass the Docker cache if 'force_build'. 632 # --force-rm: Clean up Docker processes if they fail. 633 docker.build( 634 proxy_args, 635 "--network=host", 636 "--force-rm", 637 "--no-cache=true" if force_build else "--no-cache=false", 638 "-t", 639 tag, 640 "-", 641 _in=dockerfile, 642 _out=( 643 lambda line: print( 644 pkg + ":", line, end="", file=sys.stderr, flush=True 645 ) 646 ), 647 ) 648 649 650# Read a bunch of environment variables. 651docker_image_name = os.environ.get("DOCKER_IMAGE_NAME", "openbmc/ubuntu-unit-test") 652force_build = os.environ.get("FORCE_DOCKER_BUILD") 653is_automated_ci_build = os.environ.get("BUILD_URL", False) 654distro = os.environ.get("DISTRO", "ubuntu:impish") 655branch = os.environ.get("BRANCH", "master") 656ubuntu_mirror = os.environ.get("UBUNTU_MIRROR") 657http_proxy = os.environ.get("http_proxy") 658 659gerrit_project = os.environ.get("GERRIT_PROJECT") 660gerrit_rev = os.environ.get("GERRIT_PATCHSET_REVISION") 661 662# Set up some common variables. 663username = os.environ.get("USER", "root") 664homedir = os.environ.get("HOME", "/root") 665gid = os.getgid() 666uid = os.getuid() 667 668# Use well-known constants if user is root 669if username == "root": 670 homedir = "/root" 671 gid = 0 672 uid = 0 673 674# Determine the architecture for Docker. 675arch = uname("-m").strip() 676if arch == "ppc64le": 677 docker_base = "ppc64le/" 678elif arch == "x86_64": 679 docker_base = "" 680elif arch == "aarch64": 681 docker_base = "arm64v8/" 682else: 683 print( 684 f"Unsupported system architecture({arch}) found for docker image", 685 file=sys.stderr, 686 ) 687 sys.exit(1) 688 689# Special flags if setting up a deb mirror. 690mirror = "" 691if "ubuntu" in distro and ubuntu_mirror: 692 mirror = f""" 693RUN echo "deb {ubuntu_mirror} $(. /etc/os-release && echo $VERSION_CODENAME) main restricted universe multiverse" > /etc/apt/sources.list && \\ 694 echo "deb {ubuntu_mirror} $(. /etc/os-release && echo $VERSION_CODENAME)-updates main restricted universe multiverse" >> /etc/apt/sources.list && \\ 695 echo "deb {ubuntu_mirror} $(. /etc/os-release && echo $VERSION_CODENAME)-security main restricted universe multiverse" >> /etc/apt/sources.list && \\ 696 echo "deb {ubuntu_mirror} $(. /etc/os-release && echo $VERSION_CODENAME)-proposed main restricted universe multiverse" >> /etc/apt/sources.list && \\ 697 echo "deb {ubuntu_mirror} $(. /etc/os-release && echo $VERSION_CODENAME)-backports main restricted universe multiverse" >> /etc/apt/sources.list 698""" 699 700# Special flags for proxying. 701proxy_cmd = "" 702proxy_keyserver = "" 703proxy_args = [] 704if http_proxy: 705 proxy_cmd = f""" 706RUN echo "[http]" >> {homedir}/.gitconfig && \ 707 echo "proxy = {http_proxy}" >> {homedir}/.gitconfig 708""" 709 proxy_keyserver = f"--keyserver-options http-proxy={http_proxy}" 710 711 proxy_args.extend( 712 [ 713 "--build-arg", 714 f"http_proxy={http_proxy}", 715 "--build-arg", 716 f"https_proxy={http_proxy}", 717 ] 718 ) 719 720# Create base Dockerfile. 721dockerfile_base = f""" 722FROM {docker_base}{distro} 723 724{mirror} 725 726ENV DEBIAN_FRONTEND noninteractive 727 728ENV PYTHONPATH "/usr/local/lib/python3.9/site-packages/" 729 730# Sometimes the ubuntu key expires and we need a way to force an execution 731# of the apt-get commands for the dbgsym-keyring. When this happens we see 732# an error like: "Release: The following signatures were invalid:" 733# Insert a bogus echo that we can change here when we get this error to force 734# the update. 735RUN echo "ubuntu keyserver rev as of 2021-04-21" 736 737# We need the keys to be imported for dbgsym repos 738# New releases have a package, older ones fall back to manual fetching 739# https://wiki.ubuntu.com/Debug%20Symbol%20Packages 740RUN apt-get update && apt-get dist-upgrade -yy && \ 741 ( apt-get install gpgv ubuntu-dbgsym-keyring || \ 742 ( apt-get install -yy dirmngr && \ 743 apt-key adv --keyserver keyserver.ubuntu.com \ 744 {proxy_keyserver} \ 745 --recv-keys F2EDC64DC5AEE1F6B9C621F0C8CAB6595FDFF622 ) ) 746 747# Parse the current repo list into a debug repo list 748RUN sed -n '/^deb /s,^deb [^ ]* ,deb http://ddebs.ubuntu.com ,p' /etc/apt/sources.list >/etc/apt/sources.list.d/debug.list 749 750# Remove non-existent debug repos 751RUN sed -i '/-\(backports\|security\) /d' /etc/apt/sources.list.d/debug.list 752 753RUN cat /etc/apt/sources.list.d/debug.list 754 755RUN apt-get update && apt-get dist-upgrade -yy && apt-get install -yy \ 756 gcc-11 \ 757 g++-11 \ 758 libc6-dbg \ 759 libc6-dev \ 760 libtool \ 761 bison \ 762 libdbus-1-dev \ 763 flex \ 764 cmake \ 765 python3 \ 766 python3-dev\ 767 python3-yaml \ 768 python3-mako \ 769 python3-pip \ 770 python3-setuptools \ 771 python3-git \ 772 python3-socks \ 773 pkg-config \ 774 autoconf \ 775 autoconf-archive \ 776 libsystemd-dev \ 777 systemd \ 778 libssl-dev \ 779 libevdev-dev \ 780 libjpeg-dev \ 781 libpng-dev \ 782 ninja-build \ 783 sudo \ 784 curl \ 785 git \ 786 dbus \ 787 iputils-ping \ 788 clang-13 \ 789 clang-format-13 \ 790 clang-tidy-13 \ 791 clang-tools-13 \ 792 shellcheck \ 793 npm \ 794 iproute2 \ 795 libnl-3-dev \ 796 libnl-genl-3-dev \ 797 libconfig++-dev \ 798 libsnmp-dev \ 799 valgrind \ 800 valgrind-dbg \ 801 libpam0g-dev \ 802 xxd \ 803 libi2c-dev \ 804 wget \ 805 libldap2-dev \ 806 libprotobuf-dev \ 807 liburing-dev \ 808 liburing1-dbgsym \ 809 libperlio-gzip-perl \ 810 libjson-perl \ 811 protobuf-compiler \ 812 libgpiod-dev \ 813 device-tree-compiler \ 814 cppcheck \ 815 libpciaccess-dev \ 816 libmimetic-dev \ 817 libxml2-utils \ 818 libxml-simple-perl \ 819 rsync \ 820 libcryptsetup-dev 821 822# Apply autoconf-archive-v2022.02.11 file ax_cxx_compile_stdcxx for C++20. 823RUN curl "http://git.savannah.gnu.org/gitweb/?p=autoconf-archive.git;a=blob_plain;f=m4/ax_cxx_compile_stdcxx.m4;hb=3311b6bdeff883c6a13952594a9dcb60bce6ba80" \ 824 > /usr/share/aclocal/ax_cxx_compile_stdcxx.m4 825 826RUN npm install -g eslint@latest eslint-plugin-json@latest 827 828RUN update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-11 11 \ 829 --slave /usr/bin/g++ g++ /usr/bin/g++-11 \ 830 --slave /usr/bin/gcov gcov /usr/bin/gcov-11 \ 831 --slave /usr/bin/gcov-dump gcov-dump /usr/bin/gcov-dump-11 \ 832 --slave /usr/bin/gcov-tool gcov-tool /usr/bin/gcov-tool-11 833 834RUN update-alternatives --install /usr/bin/clang clang /usr/bin/clang-13 1000 \ 835 --slave /usr/bin/clang++ clang++ /usr/bin/clang++-13 \ 836 --slave /usr/bin/clang-tidy clang-tidy /usr/bin/clang-tidy-13 \ 837 --slave /usr/bin/clang-format clang-format /usr/bin/clang-format-13 \ 838 --slave /usr/bin/run-clang-tidy run-clang-tidy.py /usr/bin/run-clang-tidy-13 \ 839 --slave /usr/bin/scan-build scan-build /usr/bin/scan-build-13 840 841""" 842 843if is_automated_ci_build: 844 dockerfile_base += f""" 845# Run an arbitrary command to polute the docker cache regularly force us 846# to re-run `apt-get update` daily. 847RUN echo {Docker.timestamp()} 848RUN apt-get update && apt-get dist-upgrade -yy 849 850""" 851 852dockerfile_base += f""" 853RUN pip3 install inflection 854RUN pip3 install pycodestyle 855RUN pip3 install jsonschema 856RUN pip3 install meson==0.61.3 857RUN pip3 install protobuf 858RUN pip3 install codespell 859RUN pip3 install requests 860""" 861 862# Build the base and stage docker images. 863docker_base_img_name = Docker.tagname("base", dockerfile_base) 864Docker.build("base", docker_base_img_name, dockerfile_base) 865Package.generate_all() 866 867# Create the final Dockerfile. 868dockerfile = f""" 869# Build the final output image 870FROM {docker_base_img_name} 871{Package.df_all_copycmds()} 872 873# Some of our infrastructure still relies on the presence of this file 874# even though it is no longer needed to rebuild the docker environment 875# NOTE: The file is sorted to ensure the ordering is stable. 876RUN echo '{Package.depcache()}' > /tmp/depcache 877 878# Final configuration for the workspace 879RUN grep -q {gid} /etc/group || groupadd -f -g {gid} {username} 880RUN mkdir -p "{os.path.dirname(homedir)}" 881RUN grep -q {uid} /etc/passwd || useradd -d {homedir} -m -u {uid} -g {gid} {username} 882RUN sed -i '1iDefaults umask=000' /etc/sudoers 883RUN echo "{username} ALL=(ALL) NOPASSWD: ALL" >>/etc/sudoers 884 885# Ensure user has ability to write to /usr/local for different tool 886# and data installs 887RUN chown -R {username}:{username} /usr/local/share 888 889{proxy_cmd} 890 891RUN /bin/bash 892""" 893 894# Do the final docker build 895docker_final_img_name = Docker.tagname(None, dockerfile) 896Docker.build("final", docker_final_img_name, dockerfile) 897 898# Print the tag of the final image. 899print(docker_final_img_name) 900