1#!/usr/bin/env python3 2# 3# Build the required docker image to run package unit tests 4# 5# Script Variables: 6# DOCKER_IMG_NAME: <optional, the name of the docker image to generate> 7# default is openbmc/ubuntu-unit-test 8# DISTRO: <optional, the distro to build a docker image against> 9# FORCE_DOCKER_BUILD: <optional, a non-zero value with force all Docker 10# images to be rebuilt rather than reusing caches.> 11# BUILD_URL: <optional, used to detect running under CI context 12# (ex. Jenkins)> 13# BRANCH: <optional, branch to build from each of the openbmc/ 14# repositories> 15# default is master, which will be used if input branch not 16# provided or not found 17# UBUNTU_MIRROR: <optional, the URL of a mirror of Ubuntu to override the 18# default ones in /etc/apt/sources.list> 19# default is empty, and no mirror is used. 20# http_proxy The HTTP address of the proxy server to connect to. 21# Default: "", proxy is not setup if this is not set 22 23import os 24import sys 25import threading 26from datetime import date 27from hashlib import sha256 28from sh import docker, git, nproc, uname # type: ignore 29from typing import Any, Callable, Dict, Iterable, Optional 30 31try: 32 # Python before 3.8 doesn't have TypedDict, so reroute to standard 'dict'. 33 from typing import TypedDict 34except: 35 36 class TypedDict(dict): # type: ignore 37 # We need to do this to eat the 'total' argument. 38 def __init_subclass__(cls, **kwargs): 39 super().__init_subclass__() 40 41 42# Declare some variables used in package definitions. 43prefix = "/usr/local" 44proc_count = nproc().strip() 45 46 47class PackageDef(TypedDict, total=False): 48 """Package Definition for packages dictionary.""" 49 50 # rev [optional]: Revision of package to use. 51 rev: str 52 # url [optional]: lambda function to create URL: (package, rev) -> url. 53 url: Callable[[str, str], str] 54 # depends [optional]: List of package dependencies. 55 depends: Iterable[str] 56 # build_type [required]: Build type used for package. 57 # Currently supported: autoconf, cmake, custom, make, meson 58 build_type: str 59 # build_steps [optional]: Steps to run for 'custom' build_type. 60 build_steps: Iterable[str] 61 # config_flags [optional]: List of options to pass configuration tool. 62 config_flags: Iterable[str] 63 # config_env [optional]: List of environment variables to set for config. 64 config_env: Iterable[str] 65 # custom_post_dl [optional]: List of steps to run after download, but 66 # before config / build / install. 67 custom_post_dl: Iterable[str] 68 # custom_post_install [optional]: List of steps to run after install. 69 custom_post_install: Iterable[str] 70 71 # __tag [private]: Generated Docker tag name for package stage. 72 __tag: str 73 # __package [private]: Package object associated with this package. 74 __package: Any # Type is Package, but not defined yet. 75 76 77# Packages to include in image. 78packages = { 79 "boost": PackageDef( 80 rev="1.80.0", 81 url=( 82 lambda pkg, rev: f"https://downloads.yoctoproject.org/mirror/sources/{pkg}_{rev.replace('.', '_')}.tar.bz2" 83 ), 84 build_type="custom", 85 build_steps=[ 86 f"./bootstrap.sh --prefix={prefix} --with-libraries=context,coroutine", 87 "./b2", 88 f"./b2 install --prefix={prefix}", 89 ], 90 ), 91 "USCiLab/cereal": PackageDef( 92 rev="v1.3.2", 93 build_type="custom", 94 build_steps=[f"cp -a include/cereal/ {prefix}/include/"], 95 ), 96 "danmar/cppcheck": PackageDef( 97 rev="2.9", 98 build_type="cmake", 99 ), 100 "CLIUtils/CLI11": PackageDef( 101 rev="v1.9.1", 102 build_type="cmake", 103 config_flags=[ 104 "-DBUILD_TESTING=OFF", 105 "-DCLI11_BUILD_DOCS=OFF", 106 "-DCLI11_BUILD_EXAMPLES=OFF", 107 ], 108 ), 109 "fmtlib/fmt": PackageDef( 110 rev="9.1.0", 111 build_type="cmake", 112 config_flags=[ 113 "-DFMT_DOC=OFF", 114 "-DFMT_TEST=OFF", 115 ], 116 ), 117 "Naios/function2": PackageDef( 118 rev="4.2.1", 119 build_type="custom", 120 build_steps=[ 121 f"mkdir {prefix}/include/function2", 122 f"cp include/function2/function2.hpp {prefix}/include/function2/", 123 ], 124 ), 125 # release-1.12.1 126 "google/googletest": PackageDef( 127 rev="58d77fa8070e8cec2dc1ed015d66b454c8d78850", 128 build_type="cmake", 129 config_env=["CXXFLAGS=-std=c++20"], 130 config_flags=["-DTHREADS_PREFER_PTHREAD_FLAG=ON"], 131 ), 132 "nlohmann/json": PackageDef( 133 rev="v3.11.2", 134 build_type="cmake", 135 config_flags=["-DJSON_BuildTests=OFF"], 136 custom_post_install=[ 137 f"ln -s {prefix}/include/nlohmann/json.hpp {prefix}/include/json.hpp", 138 ], 139 ), 140 # Snapshot from 2019-05-24 141 "linux-test-project/lcov": PackageDef( 142 rev="v1.15", 143 build_type="make", 144 ), 145 # dev-5.15 2022-09-27 146 "openbmc/linux": PackageDef( 147 rev="c9fb275212dac5b300311f6f6b1dcc5ed18a3493", 148 build_type="custom", 149 build_steps=[ 150 f"make -j{proc_count} defconfig", 151 f"make INSTALL_HDR_PATH={prefix} headers_install", 152 ], 153 ), 154 "LibVNC/libvncserver": PackageDef( 155 rev="LibVNCServer-0.9.13", 156 build_type="cmake", 157 ), 158 "leethomason/tinyxml2": PackageDef( 159 rev="9.0.0", 160 build_type="cmake", 161 ), 162 # version from /meta-openembedded/meta-oe/recipes-devtools/boost-url/boost-url_git.bb 163 "CPPAlliance/url": PackageDef( 164 rev="d740a92d38e3a8f4d5b2153f53b82f1c98e312ab", 165 build_type="custom", 166 build_steps=[f"cp -a include/** {prefix}/include/"], 167 ), 168 "tristanpenman/valijson": PackageDef( 169 rev="v0.7", 170 build_type="cmake", 171 config_flags=[ 172 "-Dvalijson_BUILD_TESTS=0", 173 "-Dvalijson_INSTALL_HEADERS=1", 174 ], 175 ), 176 "open-power/pdbg": PackageDef(build_type="autoconf"), 177 "openbmc/gpioplus": PackageDef( 178 depends=["openbmc/stdplus"], 179 build_type="meson", 180 config_flags=[ 181 "-Dexamples=false", 182 "-Dtests=disabled", 183 ], 184 ), 185 "openbmc/phosphor-dbus-interfaces": PackageDef( 186 depends=["openbmc/sdbusplus"], 187 build_type="meson", 188 config_flags=["-Dgenerate_md=false"], 189 ), 190 "openbmc/phosphor-logging": PackageDef( 191 depends=[ 192 "USCiLab/cereal", 193 "openbmc/phosphor-dbus-interfaces", 194 "openbmc/sdbusplus", 195 "openbmc/sdeventplus", 196 ], 197 build_type="meson", 198 config_flags=[ 199 "-Dlibonly=true", 200 "-Dtests=disabled", 201 f"-Dyamldir={prefix}/share/phosphor-dbus-yaml/yaml", 202 ], 203 ), 204 "openbmc/phosphor-objmgr": PackageDef( 205 depends=[ 206 "CLIUtils/CLI11", 207 "boost", 208 "leethomason/tinyxml2", 209 "openbmc/phosphor-dbus-interfaces", 210 "openbmc/phosphor-logging", 211 "openbmc/sdbusplus", 212 ], 213 build_type="meson", 214 config_flags=[ 215 "-Dtests=disabled", 216 ], 217 ), 218 "openbmc/libpldm": PackageDef( 219 build_type="meson", 220 config_flags=[ 221 "-Doem-ibm=enabled", 222 "-Dtests=disabled", 223 ], 224 ), 225 "openbmc/sdbusplus": PackageDef( 226 build_type="meson", 227 custom_post_dl=[ 228 "cd tools", 229 f"./setup.py install --root=/ --prefix={prefix}", 230 "cd ..", 231 ], 232 config_flags=[ 233 "-Dexamples=disabled", 234 "-Dtests=disabled", 235 ], 236 ), 237 "openbmc/sdeventplus": PackageDef( 238 depends=[ 239 "Naios/function2", 240 "openbmc/stdplus", 241 ], 242 build_type="meson", 243 config_flags=[ 244 "-Dexamples=false", 245 "-Dtests=disabled", 246 ], 247 ), 248 "openbmc/stdplus": PackageDef( 249 depends=[ 250 "fmtlib/fmt", 251 "google/googletest", 252 "Naios/function2", 253 ], 254 build_type="meson", 255 config_flags=[ 256 "-Dexamples=false", 257 "-Dtests=disabled", 258 "-Dgtest=enabled", 259 ], 260 ), 261} # type: Dict[str, PackageDef] 262 263# Define common flags used for builds 264configure_flags = " ".join( 265 [ 266 f"--prefix={prefix}", 267 ] 268) 269cmake_flags = " ".join( 270 [ 271 "-DBUILD_SHARED_LIBS=ON", 272 "-DCMAKE_BUILD_TYPE=RelWithDebInfo", 273 f"-DCMAKE_INSTALL_PREFIX:PATH={prefix}", 274 "-GNinja", 275 "-DCMAKE_MAKE_PROGRAM=ninja", 276 ] 277) 278meson_flags = " ".join( 279 [ 280 "--wrap-mode=nodownload", 281 f"-Dprefix={prefix}", 282 ] 283) 284 285 286class Package(threading.Thread): 287 """Class used to build the Docker stages for each package. 288 289 Generally, this class should not be instantiated directly but through 290 Package.generate_all(). 291 """ 292 293 # Copy the packages dictionary. 294 packages = packages.copy() 295 296 # Lock used for thread-safety. 297 lock = threading.Lock() 298 299 def __init__(self, pkg: str): 300 """pkg - The name of this package (ex. foo/bar )""" 301 super(Package, self).__init__() 302 303 self.package = pkg 304 self.exception = None # type: Optional[Exception] 305 306 # Reference to this package's 307 self.pkg_def = Package.packages[pkg] 308 self.pkg_def["__package"] = self 309 310 def run(self) -> None: 311 """Thread 'run' function. Builds the Docker stage.""" 312 313 # In case this package has no rev, fetch it from Github. 314 self._update_rev() 315 316 # Find all the Package objects that this package depends on. 317 # This section is locked because we are looking into another 318 # package's PackageDef dict, which could be being modified. 319 Package.lock.acquire() 320 deps: Iterable[Package] = [ 321 Package.packages[deppkg]["__package"] 322 for deppkg in self.pkg_def.get("depends", []) 323 ] 324 Package.lock.release() 325 326 # Wait until all the depends finish building. We need them complete 327 # for the "COPY" commands. 328 for deppkg in deps: 329 deppkg.join() 330 331 # Generate this package's Dockerfile. 332 dockerfile = f""" 333FROM {docker_base_img_name} 334{self._df_copycmds()} 335{self._df_build()} 336""" 337 338 # Generate the resulting tag name and save it to the PackageDef. 339 # This section is locked because we are modifying the PackageDef, 340 # which can be accessed by other threads. 341 Package.lock.acquire() 342 tag = Docker.tagname(self._stagename(), dockerfile) 343 self.pkg_def["__tag"] = tag 344 Package.lock.release() 345 346 # Do the build / save any exceptions. 347 try: 348 Docker.build(self.package, tag, dockerfile) 349 except Exception as e: 350 self.exception = e 351 352 @classmethod 353 def generate_all(cls) -> None: 354 """Ensure a Docker stage is created for all defined packages. 355 356 These are done in parallel but with appropriate blocking per 357 package 'depends' specifications. 358 """ 359 360 # Create a Package for each defined package. 361 pkg_threads = [Package(p) for p in cls.packages.keys()] 362 363 # Start building them all. 364 # This section is locked because threads depend on each other, 365 # based on the packages, and they cannot 'join' on a thread 366 # which is not yet started. Adding a lock here allows all the 367 # threads to start before they 'join' their dependencies. 368 Package.lock.acquire() 369 for t in pkg_threads: 370 t.start() 371 Package.lock.release() 372 373 # Wait for completion. 374 for t in pkg_threads: 375 t.join() 376 # Check if the thread saved off its own exception. 377 if t.exception: 378 print(f"Package {t.package} failed!", file=sys.stderr) 379 raise t.exception 380 381 @staticmethod 382 def df_all_copycmds() -> str: 383 """Formulate the Dockerfile snippet necessary to copy all packages 384 into the final image. 385 """ 386 return Package.df_copycmds_set(Package.packages.keys()) 387 388 @classmethod 389 def depcache(cls) -> str: 390 """Create the contents of the '/tmp/depcache'. 391 This file is a comma-separated list of "<pkg>:<rev>". 392 """ 393 394 # This needs to be sorted for consistency. 395 depcache = "" 396 for pkg in sorted(cls.packages.keys()): 397 depcache += "%s:%s," % (pkg, cls.packages[pkg]["rev"]) 398 return depcache 399 400 def _update_rev(self) -> None: 401 """Look up the HEAD for missing a static rev.""" 402 403 if "rev" in self.pkg_def: 404 return 405 406 # Check if Jenkins/Gerrit gave us a revision and use it. 407 if gerrit_project == self.package and gerrit_rev: 408 print( 409 f"Found Gerrit revision for {self.package}: {gerrit_rev}", 410 file=sys.stderr, 411 ) 412 self.pkg_def["rev"] = gerrit_rev 413 return 414 415 # Ask Github for all the branches. 416 lookup = git( 417 "ls-remote", "--heads", f"https://github.com/{self.package}" 418 ) 419 420 # Find the branch matching {branch} (or fallback to master). 421 # This section is locked because we are modifying the PackageDef. 422 Package.lock.acquire() 423 for line in lookup.split("\n"): 424 if f"refs/heads/{branch}" in line: 425 self.pkg_def["rev"] = line.split()[0] 426 elif ( 427 "refs/heads/master" in line or "refs/heads/main" in line 428 ) and "rev" not in self.pkg_def: 429 self.pkg_def["rev"] = line.split()[0] 430 Package.lock.release() 431 432 def _stagename(self) -> str: 433 """Create a name for the Docker stage associated with this pkg.""" 434 return self.package.replace("/", "-").lower() 435 436 def _url(self) -> str: 437 """Get the URL for this package.""" 438 rev = self.pkg_def["rev"] 439 440 # If the lambda exists, call it. 441 if "url" in self.pkg_def: 442 return self.pkg_def["url"](self.package, rev) 443 444 # Default to the github archive URL. 445 return f"https://github.com/{self.package}/archive/{rev}.tar.gz" 446 447 def _cmd_download(self) -> str: 448 """Formulate the command necessary to download and unpack to source.""" 449 450 url = self._url() 451 if ".tar." not in url: 452 raise NotImplementedError( 453 f"Unhandled download type for {self.package}: {url}" 454 ) 455 456 cmd = f"curl -L {url} | tar -x" 457 458 if url.endswith(".bz2"): 459 cmd += "j" 460 elif url.endswith(".gz"): 461 cmd += "z" 462 else: 463 raise NotImplementedError( 464 f"Unknown tar flags needed for {self.package}: {url}" 465 ) 466 467 return cmd 468 469 def _cmd_cd_srcdir(self) -> str: 470 """Formulate the command necessary to 'cd' into the source dir.""" 471 return f"cd {self.package.split('/')[-1]}*" 472 473 def _df_copycmds(self) -> str: 474 """Formulate the dockerfile snippet necessary to COPY all depends.""" 475 476 if "depends" not in self.pkg_def: 477 return "" 478 return Package.df_copycmds_set(self.pkg_def["depends"]) 479 480 @staticmethod 481 def df_copycmds_set(pkgs: Iterable[str]) -> str: 482 """Formulate the Dockerfile snippet necessary to COPY a set of 483 packages into a Docker stage. 484 """ 485 486 copy_cmds = "" 487 488 # Sort the packages for consistency. 489 for p in sorted(pkgs): 490 tag = Package.packages[p]["__tag"] 491 copy_cmds += f"COPY --from={tag} {prefix} {prefix}\n" 492 # Workaround for upstream docker bug and multiple COPY cmds 493 # https://github.com/moby/moby/issues/37965 494 copy_cmds += "RUN true\n" 495 496 return copy_cmds 497 498 def _df_build(self) -> str: 499 """Formulate the Dockerfile snippet necessary to download, build, and 500 install a package into a Docker stage. 501 """ 502 503 # Download and extract source. 504 result = f"RUN {self._cmd_download()} && {self._cmd_cd_srcdir()} && " 505 506 # Handle 'custom_post_dl' commands. 507 custom_post_dl = self.pkg_def.get("custom_post_dl") 508 if custom_post_dl: 509 result += " && ".join(custom_post_dl) + " && " 510 511 # Build and install package based on 'build_type'. 512 build_type = self.pkg_def["build_type"] 513 if build_type == "autoconf": 514 result += self._cmd_build_autoconf() 515 elif build_type == "cmake": 516 result += self._cmd_build_cmake() 517 elif build_type == "custom": 518 result += self._cmd_build_custom() 519 elif build_type == "make": 520 result += self._cmd_build_make() 521 elif build_type == "meson": 522 result += self._cmd_build_meson() 523 else: 524 raise NotImplementedError( 525 f"Unhandled build type for {self.package}: {build_type}" 526 ) 527 528 # Handle 'custom_post_install' commands. 529 custom_post_install = self.pkg_def.get("custom_post_install") 530 if custom_post_install: 531 result += " && " + " && ".join(custom_post_install) 532 533 return result 534 535 def _cmd_build_autoconf(self) -> str: 536 options = " ".join(self.pkg_def.get("config_flags", [])) 537 env = " ".join(self.pkg_def.get("config_env", [])) 538 result = "./bootstrap.sh && " 539 result += f"{env} ./configure {configure_flags} {options} && " 540 result += f"make -j{proc_count} && make install" 541 return result 542 543 def _cmd_build_cmake(self) -> str: 544 options = " ".join(self.pkg_def.get("config_flags", [])) 545 env = " ".join(self.pkg_def.get("config_env", [])) 546 result = "mkdir builddir && cd builddir && " 547 result += f"{env} cmake {cmake_flags} {options} .. && " 548 result += "cmake --build . --target all && " 549 result += "cmake --build . --target install && " 550 result += "cd .." 551 return result 552 553 def _cmd_build_custom(self) -> str: 554 return " && ".join(self.pkg_def.get("build_steps", [])) 555 556 def _cmd_build_make(self) -> str: 557 return f"make -j{proc_count} && make install" 558 559 def _cmd_build_meson(self) -> str: 560 options = " ".join(self.pkg_def.get("config_flags", [])) 561 env = " ".join(self.pkg_def.get("config_env", [])) 562 result = f"{env} meson builddir {meson_flags} {options} && " 563 result += "ninja -C builddir && ninja -C builddir install" 564 return result 565 566 567class Docker: 568 """Class to assist with Docker interactions. All methods are static.""" 569 570 @staticmethod 571 def timestamp() -> str: 572 """Generate a timestamp for today using the ISO week.""" 573 today = date.today().isocalendar() 574 return f"{today[0]}-W{today[1]:02}" 575 576 @staticmethod 577 def tagname(pkgname: str, dockerfile: str) -> str: 578 """Generate a tag name for a package using a hash of the Dockerfile.""" 579 result = docker_image_name 580 if pkgname: 581 result += "-" + pkgname 582 583 result += ":" + Docker.timestamp() 584 result += "-" + sha256(dockerfile.encode()).hexdigest()[0:16] 585 586 return result 587 588 @staticmethod 589 def build(pkg: str, tag: str, dockerfile: str) -> None: 590 """Build a docker image using the Dockerfile and tagging it with 'tag'.""" 591 592 # If we're not forcing builds, check if it already exists and skip. 593 if not force_build: 594 if docker.image.ls(tag, "--format", '"{{.Repository}}:{{.Tag}}"'): 595 print( 596 f"Image {tag} already exists. Skipping.", file=sys.stderr 597 ) 598 return 599 600 # Build it. 601 # Capture the output of the 'docker build' command and send it to 602 # stderr (prefixed with the package name). This allows us to see 603 # progress but not polute stdout. Later on we output the final 604 # docker tag to stdout and we want to keep that pristine. 605 # 606 # Other unusual flags: 607 # --no-cache: Bypass the Docker cache if 'force_build'. 608 # --force-rm: Clean up Docker processes if they fail. 609 docker.build( 610 proxy_args, 611 "--network=host", 612 "--force-rm", 613 "--no-cache=true" if force_build else "--no-cache=false", 614 "-t", 615 tag, 616 "-", 617 _in=dockerfile, 618 _out=( 619 lambda line: print( 620 pkg + ":", line, end="", file=sys.stderr, flush=True 621 ) 622 ), 623 ) 624 625 626# Read a bunch of environment variables. 627docker_image_name = os.environ.get( 628 "DOCKER_IMAGE_NAME", "openbmc/ubuntu-unit-test" 629) 630force_build = os.environ.get("FORCE_DOCKER_BUILD") 631is_automated_ci_build = os.environ.get("BUILD_URL", False) 632distro = os.environ.get("DISTRO", "ubuntu:kinetic") 633branch = os.environ.get("BRANCH", "master") 634ubuntu_mirror = os.environ.get("UBUNTU_MIRROR") 635http_proxy = os.environ.get("http_proxy") 636 637gerrit_project = os.environ.get("GERRIT_PROJECT") 638gerrit_rev = os.environ.get("GERRIT_PATCHSET_REVISION") 639 640# Set up some common variables. 641username = os.environ.get("USER", "root") 642homedir = os.environ.get("HOME", "/root") 643gid = os.getgid() 644uid = os.getuid() 645 646# Use well-known constants if user is root 647if username == "root": 648 homedir = "/root" 649 gid = 0 650 uid = 0 651 652# Determine the architecture for Docker. 653arch = uname("-m").strip() 654if arch == "ppc64le": 655 docker_base = "ppc64le/" 656elif arch == "x86_64": 657 docker_base = "" 658elif arch == "aarch64": 659 docker_base = "arm64v8/" 660else: 661 print( 662 f"Unsupported system architecture({arch}) found for docker image", 663 file=sys.stderr, 664 ) 665 sys.exit(1) 666 667# Special flags if setting up a deb mirror. 668mirror = "" 669if "ubuntu" in distro and ubuntu_mirror: 670 mirror = f""" 671RUN echo "deb {ubuntu_mirror} $(. /etc/os-release && echo $VERSION_CODENAME) main restricted universe multiverse" > /etc/apt/sources.list && \\ 672 echo "deb {ubuntu_mirror} $(. /etc/os-release && echo $VERSION_CODENAME)-updates main restricted universe multiverse" >> /etc/apt/sources.list && \\ 673 echo "deb {ubuntu_mirror} $(. /etc/os-release && echo $VERSION_CODENAME)-security main restricted universe multiverse" >> /etc/apt/sources.list && \\ 674 echo "deb {ubuntu_mirror} $(. /etc/os-release && echo $VERSION_CODENAME)-proposed main restricted universe multiverse" >> /etc/apt/sources.list && \\ 675 echo "deb {ubuntu_mirror} $(. /etc/os-release && echo $VERSION_CODENAME)-backports main restricted universe multiverse" >> /etc/apt/sources.list 676""" 677 678# Special flags for proxying. 679proxy_cmd = "" 680proxy_keyserver = "" 681proxy_args = [] 682if http_proxy: 683 proxy_cmd = f""" 684RUN echo "[http]" >> {homedir}/.gitconfig && \ 685 echo "proxy = {http_proxy}" >> {homedir}/.gitconfig 686""" 687 proxy_keyserver = f"--keyserver-options http-proxy={http_proxy}" 688 689 proxy_args.extend( 690 [ 691 "--build-arg", 692 f"http_proxy={http_proxy}", 693 "--build-arg", 694 f"https_proxy={http_proxy}", 695 ] 696 ) 697 698# Create base Dockerfile. 699dockerfile_base = f""" 700FROM {docker_base}{distro} 701 702{mirror} 703 704ENV DEBIAN_FRONTEND noninteractive 705 706ENV PYTHONPATH "/usr/local/lib/python3.10/site-packages/" 707 708# Sometimes the ubuntu key expires and we need a way to force an execution 709# of the apt-get commands for the dbgsym-keyring. When this happens we see 710# an error like: "Release: The following signatures were invalid:" 711# Insert a bogus echo that we can change here when we get this error to force 712# the update. 713RUN echo "ubuntu keyserver rev as of 2021-04-21" 714 715# We need the keys to be imported for dbgsym repos 716# New releases have a package, older ones fall back to manual fetching 717# https://wiki.ubuntu.com/Debug%20Symbol%20Packages 718RUN apt-get update && apt-get dist-upgrade -yy && \ 719 ( apt-get install gpgv ubuntu-dbgsym-keyring || \ 720 ( apt-get install -yy dirmngr && \ 721 apt-key adv --keyserver keyserver.ubuntu.com \ 722 {proxy_keyserver} \ 723 --recv-keys F2EDC64DC5AEE1F6B9C621F0C8CAB6595FDFF622 ) ) 724 725# Parse the current repo list into a debug repo list 726RUN sed -n '/^deb /s,^deb [^ ]* ,deb http://ddebs.ubuntu.com ,p' /etc/apt/sources.list >/etc/apt/sources.list.d/debug.list 727 728# Remove non-existent debug repos 729RUN sed -i '/-\(backports\|security\) /d' /etc/apt/sources.list.d/debug.list 730 731RUN cat /etc/apt/sources.list.d/debug.list 732 733RUN apt-get update && apt-get dist-upgrade -yy && apt-get install -yy \ 734 gcc-12 \ 735 g++-12 \ 736 libc6-dbg \ 737 libc6-dev \ 738 libtool \ 739 bison \ 740 libdbus-1-dev \ 741 flex \ 742 cmake \ 743 python3 \ 744 python3-dev\ 745 python3-yaml \ 746 python3-mako \ 747 python3-pip \ 748 python3-setuptools \ 749 python3-git \ 750 python3-socks \ 751 pkg-config \ 752 autoconf \ 753 autoconf-archive \ 754 libsystemd-dev \ 755 systemd \ 756 libssl-dev \ 757 libevdev-dev \ 758 libjpeg-dev \ 759 libpng-dev \ 760 ninja-build \ 761 sudo \ 762 curl \ 763 git \ 764 dbus \ 765 iputils-ping \ 766 clang-15 \ 767 clang-format-15 \ 768 clang-tidy-15 \ 769 clang-tools-15 \ 770 shellcheck \ 771 npm \ 772 iproute2 \ 773 libnl-3-dev \ 774 libnl-genl-3-dev \ 775 libconfig++-dev \ 776 libsnmp-dev \ 777 valgrind \ 778 valgrind-dbg \ 779 libpam0g-dev \ 780 xxd \ 781 libi2c-dev \ 782 wget \ 783 libldap2-dev \ 784 libprotobuf-dev \ 785 liburing-dev \ 786 liburing2-dbgsym \ 787 libperlio-gzip-perl \ 788 libjson-perl \ 789 protobuf-compiler \ 790 libgpiod-dev \ 791 device-tree-compiler \ 792 libpciaccess-dev \ 793 libmimetic-dev \ 794 libxml2-utils \ 795 libxml-simple-perl \ 796 rsync \ 797 libcryptsetup-dev 798 799RUN npm install -g eslint@latest eslint-plugin-json@latest 800 801# Kinetic comes with GCC-12, so skip this. 802#RUN update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-12 12 \ 803# --slave /usr/bin/g++ g++ /usr/bin/g++-12 \ 804# --slave /usr/bin/gcov gcov /usr/bin/gcov-12 \ 805# --slave /usr/bin/gcov-dump gcov-dump /usr/bin/gcov-dump-12 \ 806# --slave /usr/bin/gcov-tool gcov-tool /usr/bin/gcov-tool-12 807#RUN update-alternatives --install /usr/bin/cpp cpp /usr/bin/cpp-12 12 808 809RUN update-alternatives --install /usr/bin/clang clang /usr/bin/clang-15 1000 \ 810 --slave /usr/bin/clang++ clang++ /usr/bin/clang++-15 \ 811 --slave /usr/bin/clang-tidy clang-tidy /usr/bin/clang-tidy-15 \ 812 --slave /usr/bin/clang-format clang-format /usr/bin/clang-format-15 \ 813 --slave /usr/bin/run-clang-tidy run-clang-tidy.py /usr/bin/run-clang-tidy-15 \ 814 --slave /usr/bin/scan-build scan-build /usr/bin/scan-build-15 815 816""" 817 818if is_automated_ci_build: 819 dockerfile_base += f""" 820# Run an arbitrary command to polute the docker cache regularly force us 821# to re-run `apt-get update` daily. 822RUN echo {Docker.timestamp()} 823RUN apt-get update && apt-get dist-upgrade -yy 824 825""" 826 827dockerfile_base += f""" 828RUN pip3 install inflection 829RUN pip3 install pycodestyle 830RUN pip3 install jsonschema 831RUN pip3 install meson==0.63.0 832RUN pip3 install packaging 833RUN pip3 install protobuf 834RUN pip3 install codespell 835RUN pip3 install requests 836""" 837 838# Note, we use sha1s here because the newest gitlint release doesn't include 839# some features we need. Next time they release, we can rely on a direct 840# release tag 841dockerfile_base += f""" 842RUN pip3 install git+https://github.com/jorisroovers/gitlint.git@8ede310d62d5794efa7518b235f899f8a8ad6a68\#subdirectory=gitlint-core 843RUN pip3 install git+https://github.com/jorisroovers/gitlint.git@8ede310d62d5794efa7518b235f899f8a8ad6a68 844""" 845 846# Build the base and stage docker images. 847docker_base_img_name = Docker.tagname("base", dockerfile_base) 848Docker.build("base", docker_base_img_name, dockerfile_base) 849Package.generate_all() 850 851# Create the final Dockerfile. 852dockerfile = f""" 853# Build the final output image 854FROM {docker_base_img_name} 855{Package.df_all_copycmds()} 856 857# Some of our infrastructure still relies on the presence of this file 858# even though it is no longer needed to rebuild the docker environment 859# NOTE: The file is sorted to ensure the ordering is stable. 860RUN echo '{Package.depcache()}' > /tmp/depcache 861 862# Final configuration for the workspace 863RUN grep -q {gid} /etc/group || groupadd -f -g {gid} {username} 864RUN mkdir -p "{os.path.dirname(homedir)}" 865RUN grep -q {uid} /etc/passwd || useradd -d {homedir} -m -u {uid} -g {gid} {username} 866RUN sed -i '1iDefaults umask=000' /etc/sudoers 867RUN echo "{username} ALL=(ALL) NOPASSWD: ALL" >>/etc/sudoers 868 869# Ensure user has ability to write to /usr/local for different tool 870# and data installs 871RUN chown -R {username}:{username} /usr/local/share 872 873{proxy_cmd} 874 875RUN /bin/bash 876""" 877 878# Do the final docker build 879docker_final_img_name = Docker.tagname(None, dockerfile) 880Docker.build("final", docker_final_img_name, dockerfile) 881 882# Print the tag of the final image. 883print(docker_final_img_name) 884