1# Copyright (c) 2014 Google, Inc 2# 3# SPDX-License-Identifier: GPL-2.0+ 4# 5 6import errno 7import glob 8import os 9import shutil 10import threading 11 12import command 13import gitutil 14 15def Mkdir(dirname, parents = False): 16 """Make a directory if it doesn't already exist. 17 18 Args: 19 dirname: Directory to create 20 """ 21 try: 22 if parents: 23 os.makedirs(dirname) 24 else: 25 os.mkdir(dirname) 26 except OSError as err: 27 if err.errno == errno.EEXIST: 28 pass 29 else: 30 raise 31 32class BuilderJob: 33 """Holds information about a job to be performed by a thread 34 35 Members: 36 board: Board object to build 37 commits: List of commit options to build. 38 """ 39 def __init__(self): 40 self.board = None 41 self.commits = [] 42 43 44class ResultThread(threading.Thread): 45 """This thread processes results from builder threads. 46 47 It simply passes the results on to the builder. There is only one 48 result thread, and this helps to serialise the build output. 49 """ 50 def __init__(self, builder): 51 """Set up a new result thread 52 53 Args: 54 builder: Builder which will be sent each result 55 """ 56 threading.Thread.__init__(self) 57 self.builder = builder 58 59 def run(self): 60 """Called to start up the result thread. 61 62 We collect the next result job and pass it on to the build. 63 """ 64 while True: 65 result = self.builder.out_queue.get() 66 self.builder.ProcessResult(result) 67 self.builder.out_queue.task_done() 68 69 70class BuilderThread(threading.Thread): 71 """This thread builds U-Boot for a particular board. 72 73 An input queue provides each new job. We run 'make' to build U-Boot 74 and then pass the results on to the output queue. 75 76 Members: 77 builder: The builder which contains information we might need 78 thread_num: Our thread number (0-n-1), used to decide on a 79 temporary directory 80 """ 81 def __init__(self, builder, thread_num): 82 """Set up a new builder thread""" 83 threading.Thread.__init__(self) 84 self.builder = builder 85 self.thread_num = thread_num 86 87 def Make(self, commit, brd, stage, cwd, *args, **kwargs): 88 """Run 'make' on a particular commit and board. 89 90 The source code will already be checked out, so the 'commit' 91 argument is only for information. 92 93 Args: 94 commit: Commit object that is being built 95 brd: Board object that is being built 96 stage: Stage of the build. Valid stages are: 97 mrproper - can be called to clean source 98 config - called to configure for a board 99 build - the main make invocation - it does the build 100 args: A list of arguments to pass to 'make' 101 kwargs: A list of keyword arguments to pass to command.RunPipe() 102 103 Returns: 104 CommandResult object 105 """ 106 return self.builder.do_make(commit, brd, stage, cwd, *args, 107 **kwargs) 108 109 def RunCommit(self, commit_upto, brd, work_dir, do_config, force_build, 110 force_build_failures): 111 """Build a particular commit. 112 113 If the build is already done, and we are not forcing a build, we skip 114 the build and just return the previously-saved results. 115 116 Args: 117 commit_upto: Commit number to build (0...n-1) 118 brd: Board object to build 119 work_dir: Directory to which the source will be checked out 120 do_config: True to run a make <board>_defconfig on the source 121 force_build: Force a build even if one was previously done 122 force_build_failures: Force a bulid if the previous result showed 123 failure 124 125 Returns: 126 tuple containing: 127 - CommandResult object containing the results of the build 128 - boolean indicating whether 'make config' is still needed 129 """ 130 # Create a default result - it will be overwritte by the call to 131 # self.Make() below, in the event that we do a build. 132 result = command.CommandResult() 133 result.return_code = 0 134 if self.builder.in_tree: 135 out_dir = work_dir 136 else: 137 out_dir = os.path.join(work_dir, 'build') 138 139 # Check if the job was already completed last time 140 done_file = self.builder.GetDoneFile(commit_upto, brd.target) 141 result.already_done = os.path.exists(done_file) 142 will_build = (force_build or force_build_failures or 143 not result.already_done) 144 if result.already_done: 145 # Get the return code from that build and use it 146 with open(done_file, 'r') as fd: 147 result.return_code = int(fd.readline()) 148 if will_build: 149 err_file = self.builder.GetErrFile(commit_upto, brd.target) 150 if os.path.exists(err_file) and os.stat(err_file).st_size: 151 result.stderr = 'bad' 152 elif not force_build: 153 # The build passed, so no need to build it again 154 will_build = False 155 156 if will_build: 157 # We are going to have to build it. First, get a toolchain 158 if not self.toolchain: 159 try: 160 self.toolchain = self.builder.toolchains.Select(brd.arch) 161 except ValueError as err: 162 result.return_code = 10 163 result.stdout = '' 164 result.stderr = str(err) 165 # TODO(sjg@chromium.org): This gets swallowed, but needs 166 # to be reported. 167 168 if self.toolchain: 169 # Checkout the right commit 170 if self.builder.commits: 171 commit = self.builder.commits[commit_upto] 172 if self.builder.checkout: 173 git_dir = os.path.join(work_dir, '.git') 174 gitutil.Checkout(commit.hash, git_dir, work_dir, 175 force=True) 176 else: 177 commit = 'current' 178 179 # Set up the environment and command line 180 env = self.toolchain.MakeEnvironment() 181 Mkdir(out_dir) 182 args = [] 183 cwd = work_dir 184 src_dir = os.path.realpath(work_dir) 185 if not self.builder.in_tree: 186 if commit_upto is None: 187 # In this case we are building in the original source 188 # directory (i.e. the current directory where buildman 189 # is invoked. The output directory is set to this 190 # thread's selected work directory. 191 # 192 # Symlinks can confuse U-Boot's Makefile since 193 # we may use '..' in our path, so remove them. 194 work_dir = os.path.realpath(work_dir) 195 args.append('O=%s/build' % work_dir) 196 cwd = None 197 src_dir = os.getcwd() 198 else: 199 args.append('O=build') 200 args.append('-s') 201 if self.builder.num_jobs is not None: 202 args.extend(['-j', str(self.builder.num_jobs)]) 203 config_args = ['%s_defconfig' % brd.target] 204 config_out = '' 205 args.extend(self.builder.toolchains.GetMakeArguments(brd)) 206 207 # If we need to reconfigure, do that now 208 if do_config: 209 result = self.Make(commit, brd, 'mrproper', cwd, 210 'mrproper', *args, env=env) 211 result = self.Make(commit, brd, 'config', cwd, 212 *(args + config_args), env=env) 213 config_out = result.combined 214 do_config = False # No need to configure next time 215 if result.return_code == 0: 216 result = self.Make(commit, brd, 'build', cwd, *args, 217 env=env) 218 result.stderr = result.stderr.replace(src_dir + '/', '') 219 else: 220 result.return_code = 1 221 result.stderr = 'No tool chain for %s\n' % brd.arch 222 result.already_done = False 223 224 result.toolchain = self.toolchain 225 result.brd = brd 226 result.commit_upto = commit_upto 227 result.out_dir = out_dir 228 return result, do_config 229 230 def _WriteResult(self, result, keep_outputs): 231 """Write a built result to the output directory. 232 233 Args: 234 result: CommandResult object containing result to write 235 keep_outputs: True to store the output binaries, False 236 to delete them 237 """ 238 # Fatal error 239 if result.return_code < 0: 240 return 241 242 # Aborted? 243 if result.stderr and 'No child processes' in result.stderr: 244 return 245 246 if result.already_done: 247 return 248 249 # Write the output and stderr 250 output_dir = self.builder._GetOutputDir(result.commit_upto) 251 Mkdir(output_dir) 252 build_dir = self.builder.GetBuildDir(result.commit_upto, 253 result.brd.target) 254 Mkdir(build_dir) 255 256 outfile = os.path.join(build_dir, 'log') 257 with open(outfile, 'w') as fd: 258 if result.stdout: 259 fd.write(result.stdout) 260 261 errfile = self.builder.GetErrFile(result.commit_upto, 262 result.brd.target) 263 if result.stderr: 264 with open(errfile, 'w') as fd: 265 fd.write(result.stderr) 266 elif os.path.exists(errfile): 267 os.remove(errfile) 268 269 if result.toolchain: 270 # Write the build result and toolchain information. 271 done_file = self.builder.GetDoneFile(result.commit_upto, 272 result.brd.target) 273 with open(done_file, 'w') as fd: 274 fd.write('%s' % result.return_code) 275 with open(os.path.join(build_dir, 'toolchain'), 'w') as fd: 276 print >>fd, 'gcc', result.toolchain.gcc 277 print >>fd, 'path', result.toolchain.path 278 print >>fd, 'cross', result.toolchain.cross 279 print >>fd, 'arch', result.toolchain.arch 280 fd.write('%s' % result.return_code) 281 282 with open(os.path.join(build_dir, 'toolchain'), 'w') as fd: 283 print >>fd, 'gcc', result.toolchain.gcc 284 print >>fd, 'path', result.toolchain.path 285 286 # Write out the image and function size information and an objdump 287 env = result.toolchain.MakeEnvironment() 288 lines = [] 289 for fname in ['u-boot', 'spl/u-boot-spl']: 290 cmd = ['%snm' % self.toolchain.cross, '--size-sort', fname] 291 nm_result = command.RunPipe([cmd], capture=True, 292 capture_stderr=True, cwd=result.out_dir, 293 raise_on_error=False, env=env) 294 if nm_result.stdout: 295 nm = self.builder.GetFuncSizesFile(result.commit_upto, 296 result.brd.target, fname) 297 with open(nm, 'w') as fd: 298 print >>fd, nm_result.stdout, 299 300 cmd = ['%sobjdump' % self.toolchain.cross, '-h', fname] 301 dump_result = command.RunPipe([cmd], capture=True, 302 capture_stderr=True, cwd=result.out_dir, 303 raise_on_error=False, env=env) 304 rodata_size = '' 305 if dump_result.stdout: 306 objdump = self.builder.GetObjdumpFile(result.commit_upto, 307 result.brd.target, fname) 308 with open(objdump, 'w') as fd: 309 print >>fd, dump_result.stdout, 310 for line in dump_result.stdout.splitlines(): 311 fields = line.split() 312 if len(fields) > 5 and fields[1] == '.rodata': 313 rodata_size = fields[2] 314 315 cmd = ['%ssize' % self.toolchain.cross, fname] 316 size_result = command.RunPipe([cmd], capture=True, 317 capture_stderr=True, cwd=result.out_dir, 318 raise_on_error=False, env=env) 319 if size_result.stdout: 320 lines.append(size_result.stdout.splitlines()[1] + ' ' + 321 rodata_size) 322 323 # Write out the image sizes file. This is similar to the output 324 # of binutil's 'size' utility, but it omits the header line and 325 # adds an additional hex value at the end of each line for the 326 # rodata size 327 if len(lines): 328 sizes = self.builder.GetSizesFile(result.commit_upto, 329 result.brd.target) 330 with open(sizes, 'w') as fd: 331 print >>fd, '\n'.join(lines) 332 333 # Now write the actual build output 334 if keep_outputs: 335 patterns = ['u-boot', '*.bin', 'u-boot.dtb', '*.map', '*.img', 336 'include/autoconf.mk', 'spl/u-boot-spl', 337 'spl/u-boot-spl.bin'] 338 for pattern in patterns: 339 file_list = glob.glob(os.path.join(result.out_dir, pattern)) 340 for fname in file_list: 341 shutil.copy(fname, build_dir) 342 343 344 def RunJob(self, job): 345 """Run a single job 346 347 A job consists of a building a list of commits for a particular board. 348 349 Args: 350 job: Job to build 351 """ 352 brd = job.board 353 work_dir = self.builder.GetThreadDir(self.thread_num) 354 self.toolchain = None 355 if job.commits: 356 # Run 'make board_defconfig' on the first commit 357 do_config = True 358 commit_upto = 0 359 force_build = False 360 for commit_upto in range(0, len(job.commits), job.step): 361 result, request_config = self.RunCommit(commit_upto, brd, 362 work_dir, do_config, 363 force_build or self.builder.force_build, 364 self.builder.force_build_failures) 365 failed = result.return_code or result.stderr 366 did_config = do_config 367 if failed and not do_config: 368 # If our incremental build failed, try building again 369 # with a reconfig. 370 if self.builder.force_config_on_failure: 371 result, request_config = self.RunCommit(commit_upto, 372 brd, work_dir, True, True, False) 373 did_config = True 374 if not self.builder.force_reconfig: 375 do_config = request_config 376 377 # If we built that commit, then config is done. But if we got 378 # an warning, reconfig next time to force it to build the same 379 # files that created warnings this time. Otherwise an 380 # incremental build may not build the same file, and we will 381 # think that the warning has gone away. 382 # We could avoid this by using -Werror everywhere... 383 # For errors, the problem doesn't happen, since presumably 384 # the build stopped and didn't generate output, so will retry 385 # that file next time. So we could detect warnings and deal 386 # with them specially here. For now, we just reconfigure if 387 # anything goes work. 388 # Of course this is substantially slower if there are build 389 # errors/warnings (e.g. 2-3x slower even if only 10% of builds 390 # have problems). 391 if (failed and not result.already_done and not did_config and 392 self.builder.force_config_on_failure): 393 # If this build failed, try the next one with a 394 # reconfigure. 395 # Sometimes if the board_config.h file changes it can mess 396 # with dependencies, and we get: 397 # make: *** No rule to make target `include/autoconf.mk', 398 # needed by `depend'. 399 do_config = True 400 force_build = True 401 else: 402 force_build = False 403 if self.builder.force_config_on_failure: 404 if failed: 405 do_config = True 406 result.commit_upto = commit_upto 407 if result.return_code < 0: 408 raise ValueError('Interrupt') 409 410 # We have the build results, so output the result 411 self._WriteResult(result, job.keep_outputs) 412 self.builder.out_queue.put(result) 413 else: 414 # Just build the currently checked-out build 415 result, request_config = self.RunCommit(None, brd, work_dir, True, 416 True, self.builder.force_build_failures) 417 result.commit_upto = 0 418 self._WriteResult(result, job.keep_outputs) 419 self.builder.out_queue.put(result) 420 421 def run(self): 422 """Our thread's run function 423 424 This thread picks a job from the queue, runs it, and then goes to the 425 next job. 426 """ 427 alive = True 428 while True: 429 job = self.builder.queue.get() 430 if self.builder.active and alive: 431 self.RunJob(job) 432 ''' 433 try: 434 if self.builder.active and alive: 435 self.RunJob(job) 436 except Exception as err: 437 alive = False 438 print err 439 ''' 440 self.builder.queue.task_done() 441