1#!/usr/bin/python 2 3# This script generates the unit test coverage report for openbmc project. 4# 5# Usage: 6# get_unit_test_report.py target_dir [url_file] 7# 8# Positional arguments: 9# target_dir Target directory in pwd to place all cloned repos and logs. 10# url_file Text file containing url of repositories. Optional. 11# By using this argument, the user can get a report only for 12# specific repositories given in the file. 13# Refer ./scripts/repositories.txt 14# 15# Examples: 16# get_unit_test_report.py target_dir 17# get_unit_test_report.py target_dir repositories.txt 18# 19# Output format: 20# 21# ***********************************OUTPUT*********************************** 22# https://github.com/openbmc/phosphor-dbus-monitor.git NO 23# https://github.com/openbmc/phosphor-sel-logger.git;protocol=git NO 24# ***********************************OUTPUT*********************************** 25# 26# Other outputs and errors are redirected to output.log and debug.log in target_dir. 27 28import argparse 29import logging 30import os 31import re 32import requests 33import shutil 34import sys 35import subprocess 36 37# Repo list not expected to contain UT. Will be moved to a file in future. 38skip_list = ["openbmc-tools", "inarp", "openbmc", "openbmc.github.io", 39 "phosphor-ecc", "phosphor-pcie-presence", "phosphor-u-boot-env-mgr", 40 "rrd-ipmi-blob", "librrdplus", "openpower-inventory-upload", 41 "openpower-logging", "openpower-power-control", "docs", 42 "openbmc-test-automation", "openbmc-build-scripts", "skeleton", 43 "linux", 44 # Not active, expected to be archived soon. 45 "ibm-pldm-oem"] 46 47 48# Create parser. 49text = '''%(prog)s target_dir [url_file] 50 51Example usages: 52get_unit_test_report.py target_dir 53get_unit_test_report.py target_dir repositories.txt''' 54 55parser = argparse.ArgumentParser(usage=text, 56 description="Script generates the unit test coverage report") 57parser.add_argument("target_dir", type=str, 58 help='''Name of a non-existing directory in pwd to store all 59 cloned repos, logs and UT reports''') 60parser.add_argument("url_file", type=str, nargs='?', 61 help='''Text file containing url of repositories. 62 By using this argument, the user can get a report only for 63 specific repositories given in the file. 64 Refer ./scripts/repositories.txt''') 65args = parser.parse_args() 66 67input_urls = [] 68if args.url_file: 69 try: 70 # Get URLs from the file. 71 with open(args.url_file) as reader: 72 file_content = reader.read().splitlines() 73 input_urls = list(filter(lambda x:x, file_content)) 74 if not(input_urls): 75 print("Input file {} is empty. Quitting...".format(args.url_file)) 76 quit() 77 except IOError as e: 78 print("Issue in reading file '{}'. Reason: {}".format(args.url_file, 79 str(e))) 80 quit() 81 82 83# Create target working directory. 84pwd = os.getcwd() 85working_dir = os.path.join(pwd, args.target_dir) 86try: 87 os.mkdir(working_dir) 88except OSError as e: 89 answer = input("Target directory " + working_dir + " already exists. " 90 + "Do you want to delete [Y/N]: ") 91 if answer == "Y": 92 try: 93 shutil.rmtree(working_dir) 94 os.mkdir(working_dir) 95 except OSError as e: 96 print(str(e)) 97 quit() 98 else: 99 print("Exiting....") 100 quit() 101 102# Create log directory. 103log_dir = os.path.join(working_dir, "logs") 104try: 105 os.mkdir(log_dir) 106except OSError as e: 107 print("Unable to create log directory: " + log_dir) 108 print(str(e)) 109 quit() 110 111 112# Log files 113debug_file = os.path.join(log_dir, "debug.log") 114output_file = os.path.join(log_dir, "output.log") 115logging.basicConfig(format='%(levelname)s - %(message)s', level=logging.DEBUG, 116 filename=debug_file) 117logger = logging.getLogger(__name__) 118 119# Create handlers 120console_handler = logging.StreamHandler() 121file_handler = logging.FileHandler(output_file) 122console_handler.setLevel(logging.INFO) 123file_handler.setLevel(logging.INFO) 124 125# Create formatters and add it to handlers 126log_format = logging.Formatter('%(message)s') 127console_handler.setFormatter(log_format) 128file_handler.setFormatter(log_format) 129 130# Add handlers to the logger 131logger.addHandler(console_handler) 132logger.addHandler(file_handler) 133 134 135# Create report directory. 136report_dir = os.path.join(working_dir, "reports") 137try: 138 os.mkdir(report_dir) 139except OSError as e: 140 logger.error("Unable to create report directory: " + report_dir) 141 logger.error(str(e)) 142 quit() 143 144# Clone OpenBmc build scripts. 145try: 146 output = subprocess.check_output("git clone https://github.com/openbmc/openbmc-build-scripts.git", 147 shell=True, cwd=working_dir, stderr=subprocess.STDOUT) 148 logger.debug(output) 149except subprocess.CalledProcessError as e: 150 logger.error(e.output) 151 logger.error(e.cmd) 152 logger.error("Unable to clone openbmc-build-scripts") 153 quit() 154 155repo_data = [] 156if input_urls: 157 api_url = "https://api.github.com/repos/openbmc/" 158 for url in input_urls: 159 try: 160 repo_name = url.strip().split('/')[-1].split(";")[0].split(".")[0] 161 except IndexError as e: 162 logger.error("ERROR: Unable to get sandbox name for url " + url) 163 logger.error("Reason: " + str(e)) 164 continue 165 166 try: 167 resp = requests.get(api_url + repo_name) 168 if resp.status_code != 200: 169 logger.info(api_url + repo_name + " ==> " + resp.reason) 170 continue 171 repo_data.extend([resp.json()]) 172 except ValueError as e: 173 logger.error("ERROR: Failed to get response for " + repo_name) 174 logger.error(resp) 175 continue 176 177else: 178 # Get number of pages. 179 resp = requests.head('https://api.github.com/users/openbmc/repos') 180 if resp.status_code != 200: 181 logger.error("Error! Unable to get repositories") 182 logger.error(resp.status_code) 183 logger.error(resp.reason) 184 quit() 185 num_of_pages = int(resp.links['last']['url'].split('page=')[-1]) 186 logger.debug("No. of pages: " + str(num_of_pages)) 187 188 # Fetch data from all pages. 189 for page in range(1, num_of_pages+1): 190 resp = requests.get('https://api.github.com/users/openbmc/repos?page=' 191 + str(page)) 192 data = resp.json() 193 repo_data.extend(data) 194 195 196# Get URLs and their archive status from response. 197url_info = {} 198for repo in repo_data: 199 try: 200 url_info[repo["clone_url"]] = repo["archived"] 201 except KeyError as e: 202 logger.error("Failed to get archived status of {}".format(repo)) 203 url_info[repo["clone_url"]] = False 204 continue 205logger.debug(url_info) 206repo_count = len(url_info) 207logger.info("Number of repositories (Including archived): " + str(repo_count)) 208 209# Clone repository and run unit test. 210coverage_report = [] 211counter = 0 212tested_report_count = 0 213coverage_count = 0 214unit_test_count = 0 215no_report_count = 0 216error_count = 0 217skip_count = 0 218archive_count = 0 219url_list = sorted(url_info) 220for url in url_list: 221 ut_status = "NO" 222 skip = False 223 if url_info[url]: 224 ut_status = "ARCHIVED" 225 skip = True 226 else: 227 try: 228 # Eg: url = "https://github.com/openbmc/u-boot.git" 229 # sandbox_name = "u-boot" 230 sandbox_name = url.strip().split('/')[-1].split(";")[0].split(".")[0] 231 except IndexError as e: 232 logger.error("ERROR: Unable to get sandbox name for url " + url) 233 logger.error("Reason: " + str(e)) 234 continue 235 236 if (sandbox_name in skip_list or 237 re.match(r'meta-', sandbox_name)): 238 logger.debug("SKIPPING: " + sandbox_name) 239 skip = True 240 ut_status = "SKIPPED" 241 else: 242 checkout_cmd = "rm -rf " + sandbox_name + ";git clone " + url 243 try: 244 subprocess.check_output(checkout_cmd, shell=True, cwd=working_dir, 245 stderr=subprocess.STDOUT) 246 except subprocess.CalledProcessError as e: 247 logger.debug(e.output) 248 logger.debug(e.cmd) 249 logger.debug("Failed to clone " + sandbox_name) 250 ut_status = "ERROR" 251 skip = True 252 if not(skip): 253 docker_cmd = "WORKSPACE=$(pwd) UNIT_TEST_PKG=" + sandbox_name + " " + \ 254 "./openbmc-build-scripts/run-unit-test-docker.sh" 255 try: 256 result = subprocess.check_output(docker_cmd, cwd=working_dir, shell=True, 257 stderr=subprocess.STDOUT) 258 logger.debug(result) 259 logger.debug("UT BUILD COMPLETED FOR: " + sandbox_name) 260 261 except subprocess.CalledProcessError as e: 262 logger.debug(e.output) 263 logger.debug(e.cmd) 264 logger.debug("UT BUILD EXITED FOR: " + sandbox_name) 265 ut_status = "ERROR" 266 267 folder_name = os.path.join(working_dir, sandbox_name) 268 repo_report_dir = os.path.join(report_dir, sandbox_name) 269 270 report_names = ("coveragereport", "test-suite.log", "LastTest.log") 271 find_cmd = "".join("find " + folder_name + " -name " + report + ";" 272 for report in report_names) 273 try: 274 result = subprocess.check_output(find_cmd, shell=True) 275 result = result.decode("utf-8") 276 except subprocess.CalledProcessError as e: 277 logger.debug(e.output) 278 logger.debug(e.cmd) 279 logger.debug("CMD TO FIND REPORT FAILED FOR: " + sandbox_name) 280 ut_status = "ERROR" 281 282 if ut_status != "ERROR": 283 if result: 284 if result.__contains__("coveragereport"): 285 ut_status = "YES, COVERAGE" 286 coverage_count += 1 287 elif "test-suite.log" in result: 288 ut_status = "YES, UNIT TEST" 289 unit_test_count += 1 290 elif "LastTest.log" in result: 291 file_names = result.splitlines() 292 for file in file_names: 293 pattern_count_cmd = "sed -n '/Start testing/,/End testing/p;' " + \ 294 file + "|wc -l" 295 try: 296 num_of_lines = subprocess.check_output(pattern_count_cmd, 297 shell=True) 298 except subprocess.CalledProcessError as e: 299 logger.debug(e.output) 300 logger.debug(e.cmd) 301 logger.debug("CONTENT CHECK FAILED FOR: " + sandbox_name) 302 ut_status = "ERROR" 303 304 if int(num_of_lines.strip()) > 5: 305 ut_status = "YES, UNIT TEST" 306 unit_test_count += 1 307 308 if "YES" in ut_status: 309 tested_report_count += 1 310 result = result.splitlines() 311 for file_path in result: 312 destination = os.path.dirname(os.path.join(report_dir, 313 os.path.relpath(file_path, 314 working_dir))) 315 copy_cmd = "mkdir -p " + destination + ";cp -rf " + \ 316 file_path.strip() + " " + destination 317 try: 318 subprocess.check_output(copy_cmd, shell=True) 319 except subprocess.CalledProcessError as e: 320 logger.debug(e.output) 321 logger.debug(e.cmd) 322 logger.info("FAILED TO COPY REPORTS FOR: " + sandbox_name) 323 324 if ut_status == "ERROR": 325 error_count += 1 326 elif ut_status == "NO": 327 no_report_count += 1 328 elif ut_status == "SKIPPED": 329 skip_count += 1 330 elif ut_status == "ARCHIVED": 331 archive_count += 1 332 333 coverage_report.append("{:<65}{:<10}".format(url.strip(), ut_status)) 334 counter += 1 335 logger.info(str(counter) + " in " + str(repo_count) + " completed") 336 337logger.info("*" * 30 + "UNIT TEST COVERAGE REPORT" + "*" * 30) 338for res in coverage_report: 339 logger.info(res) 340logger.info("*" * 30 + "UNIT TEST COVERAGE REPORT" + "*" * 30) 341 342logger.info("REPORTS: " + report_dir) 343logger.info("LOGS: " + log_dir) 344logger.info("*" * 85) 345logger.info("SUMMARY: ") 346logger.info("TOTAL REPOSITORIES : " + str(repo_count)) 347logger.info("TESTED REPOSITORIES : " + str(tested_report_count)) 348logger.info("ERROR : " + str(error_count)) 349logger.info("COVERAGE REPORT : " + str(coverage_count)) 350logger.info("UNIT TEST REPORT : " + str(unit_test_count)) 351logger.info("NO REPORT : " + str(no_report_count)) 352logger.info("SKIPPED : " + str(skip_count)) 353logger.info("ARCHIVED : " + str(archive_count)) 354logger.info("*" * 85) 355