1#!/usr/bin/env python3 2# 3# A script to analyse failures in the gitlab pipelines. It requires an 4# API key from gitlab with the following permissions: 5# - api 6# - read_repository 7# - read_user 8# 9 10import argparse 11import gitlab 12import os 13 14# 15# Arguments 16# 17class NoneForEmptyStringAction(argparse.Action): 18 def __call__(self, parser, namespace, value, option_string=None): 19 if value == '': 20 setattr(namespace, self.dest, None) 21 else: 22 setattr(namespace, self.dest, value) 23 24 25parser = argparse.ArgumentParser(description="Analyse failed GitLab CI runs.") 26 27parser.add_argument("--gitlab", 28 default="https://gitlab.com", 29 help="GitLab instance URL (default: https://gitlab.com).") 30parser.add_argument("--id", default=11167699, 31 type=int, 32 help="GitLab project id (default: 11167699 for qemu-project/qemu)") 33parser.add_argument("--token", 34 default=os.getenv("GITLAB_TOKEN"), 35 help="Your personal access token with 'api' scope.") 36parser.add_argument("--branch", 37 type=str, 38 default="staging", 39 action=NoneForEmptyStringAction, 40 help="The name of the branch (default: 'staging')") 41parser.add_argument("--status", 42 type=str, 43 action=NoneForEmptyStringAction, 44 default="failed", 45 help="Filter by branch status (default: 'failed')") 46parser.add_argument("--count", type=int, 47 default=3, 48 help="The number of failed runs to fetch.") 49parser.add_argument("--skip-jobs", 50 default=False, 51 action='store_true', 52 help="Skip dumping the job info") 53parser.add_argument("--pipeline", type=int, 54 nargs="+", 55 default=None, 56 help="Explicit pipeline ID(s) to fetch.") 57 58 59if __name__ == "__main__": 60 args = parser.parse_args() 61 62 gl = gitlab.Gitlab(url=args.gitlab, private_token=args.token) 63 project = gl.projects.get(args.id) 64 65 66 pipelines_to_process = [] 67 68 # Use explicit pipeline IDs if provided, otherwise fetch a list 69 if args.pipeline: 70 args.count = len(args.pipeline) 71 for p_id in args.pipeline: 72 pipelines_to_process.append(project.pipelines.get(p_id)) 73 else: 74 # Use an iterator to fetch the pipelines 75 pipe_iter = project.pipelines.list(iterator=True, 76 status=args.status, 77 ref=args.branch) 78 # Check each failed pipeline 79 pipelines_to_process = [next(pipe_iter) for _ in range(args.count)] 80 81 # Check each pipeline 82 for p in pipelines_to_process: 83 84 jobs = p.jobs.list(get_all=True) 85 failed_jobs = [j for j in jobs if j.status == "failed"] 86 skipped_jobs = [j for j in jobs if j.status == "skipped"] 87 manual_jobs = [j for j in jobs if j.status == "manual"] 88 89 trs = p.test_report_summary.get() 90 total = trs.total["count"] 91 skipped = trs.total["skipped"] 92 failed = trs.total["failed"] 93 94 print(f"{p.status} pipeline {p.id}, total jobs {len(jobs)}, " 95 f"skipped {len(skipped_jobs)}, " 96 f"failed {len(failed_jobs)}, ", 97 f"{total} tests, " 98 f"{skipped} skipped tests, " 99 f"{failed} failed tests") 100 101 if not args.skip_jobs: 102 for j in failed_jobs: 103 print(f" Failed job {j.id}, {j.name}, {j.web_url}") 104 105 # It seems we can only extract failing tests from the full 106 # test report, maybe there is some way to filter it. 107 108 if failed > 0: 109 ftr = p.test_report.get() 110 failed_suites = [s for s in ftr.test_suites if 111 s["failed_count"] > 0] 112 for fs in failed_suites: 113 name = fs["name"] 114 tests = fs["test_cases"] 115 failed_tests = [t for t in tests if t["status"] == 'failed'] 116 for t in failed_tests: 117 print(f" Failed test {t["classname"]}, {name}, {t["name"]}") 118