1# test case management tool - manual execution from testopia test cases 2# 3# Copyright (c) 2018, Intel Corporation. 4# 5# This program is free software; you can redistribute it and/or modify it 6# under the terms and conditions of the GNU General Public License, 7# version 2, as published by the Free Software Foundation. 8# 9# This program is distributed in the hope it will be useful, but WITHOUT 10# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12# more details. 13# 14import argparse 15import json 16import os 17import sys 18import datetime 19import re 20from oeqa.core.runner import OETestResultJSONHelper 21 22 23def load_json_file(file): 24 with open(file, "r") as f: 25 return json.load(f) 26 27class ManualTestRunner(object): 28 29 def _get_testcases(self, file): 30 self.jdata = load_json_file(file) 31 self.test_module = self.jdata[0]['test']['@alias'].split('.', 2)[0] 32 33 def _get_input(self, config): 34 while True: 35 output = input('{} = '.format(config)) 36 if re.match('^[a-z0-9-.]+$', output): 37 break 38 print('Only lowercase alphanumeric, hyphen and dot are allowed. Please try again') 39 return output 40 41 def _create_config(self): 42 from oeqa.utils.metadata import get_layers 43 from oeqa.utils.commands import get_bb_var 44 from resulttool.resultutils import store_map 45 46 layers = get_layers(get_bb_var('BBLAYERS')) 47 self.configuration = {} 48 self.configuration['LAYERS'] = layers 49 current_datetime = datetime.datetime.now() 50 self.starttime = current_datetime.strftime('%Y%m%d%H%M%S') 51 self.configuration['STARTTIME'] = self.starttime 52 self.configuration['TEST_TYPE'] = 'manual' 53 self.configuration['TEST_MODULE'] = self.test_module 54 55 extra_config = set(store_map['manual']) - set(self.configuration) 56 for config in sorted(extra_config): 57 print('---------------------------------------------') 58 print('This is configuration #%s. Please provide configuration value(use "None" if not applicable).' % config) 59 print('---------------------------------------------') 60 value_conf = self._get_input('Configuration Value') 61 print('---------------------------------------------\n') 62 self.configuration[config] = value_conf 63 64 def _create_result_id(self): 65 self.result_id = 'manual_%s_%s' % (self.test_module, self.starttime) 66 67 def _execute_test_steps(self, test): 68 test_result = {} 69 print('------------------------------------------------------------------------') 70 print('Executing test case: %s' % test['test']['@alias']) 71 print('------------------------------------------------------------------------') 72 print('You have total %s test steps to be executed.' % len(test['test']['execution'])) 73 print('------------------------------------------------------------------------\n') 74 for step, _ in sorted(test['test']['execution'].items(), key=lambda x: int(x[0])): 75 print('Step %s: %s' % (step, test['test']['execution'][step]['action'])) 76 expected_output = test['test']['execution'][step]['expected_results'] 77 if expected_output: 78 print('Expected output: %s' % expected_output) 79 while True: 80 done = input('\nPlease provide test results: (P)assed/(F)ailed/(B)locked/(S)kipped? \n').lower() 81 result_types = {'p':'PASSED', 82 'f':'FAILED', 83 'b':'BLOCKED', 84 's':'SKIPPED'} 85 if done in result_types: 86 for r in result_types: 87 if done == r: 88 res = result_types[r] 89 if res == 'FAILED': 90 log_input = input('\nPlease enter the error and the description of the log: (Ex:log:211 Error Bitbake)\n') 91 test_result.update({test['test']['@alias']: {'status': '%s' % res, 'log': '%s' % log_input}}) 92 else: 93 test_result.update({test['test']['@alias']: {'status': '%s' % res}}) 94 break 95 print('Invalid input!') 96 return test_result 97 98 def _create_write_dir(self): 99 basepath = os.environ['BUILDDIR'] 100 self.write_dir = basepath + '/tmp/log/manual/' 101 102 def run_test(self, file): 103 self._get_testcases(file) 104 self._create_config() 105 self._create_result_id() 106 self._create_write_dir() 107 test_results = {} 108 print('\nTotal number of test cases in this test suite: %s\n' % len(self.jdata)) 109 for t in self.jdata: 110 test_result = self._execute_test_steps(t) 111 test_results.update(test_result) 112 return self.configuration, self.result_id, self.write_dir, test_results 113 114def manualexecution(args, logger): 115 testrunner = ManualTestRunner() 116 get_configuration, get_result_id, get_write_dir, get_test_results = testrunner.run_test(args.file) 117 resultjsonhelper = OETestResultJSONHelper() 118 resultjsonhelper.dump_testresult_file(get_write_dir, get_configuration, get_result_id, get_test_results) 119 return 0 120 121def register_commands(subparsers): 122 """Register subcommands from this plugin""" 123 parser_build = subparsers.add_parser('manualexecution', help='helper script for results populating during manual test execution.', 124 description='helper script for results populating during manual test execution. You can find manual test case JSON file in meta/lib/oeqa/manual/', 125 group='manualexecution') 126 parser_build.set_defaults(func=manualexecution) 127 parser_build.add_argument('file', help='specify path to manual test case JSON file.Note: Please use \"\" to encapsulate the file path.') 128