/openbmc/openbmc/poky/scripts/lib/resulttool/ |
H A D | resultutils.py | 59 # Load the json file and append the results data into the provided results dict 61 def append_resultsdata(results, f, configmap=store_map, configvars=extra_configvars): argument 80 raise ValueError("Test results data without configuration or result section?") 91 if testpath not in results: 92 results[testpath] = {} 93 results[testpath][res] = data[res] 96 # Walk a directory and find/load results data 100 results = {} 102 append_resultsdata(results, source, configmap, configvars) 103 return results [all …]
|
H A D | store.py | 1 # resulttool - store test results 29 results = {} 32 resultutils.append_resultsdata(results, args.source, configvars=configvars) 38 resultutils.append_resultsdata(results, f, configvars=configvars) 46 if not results and not args.all: 48 logger.info("No results found to store") 50 logger.error("No results found to store") 54 for suite in results: 55 for result in results[suite]: 56 config = results[suite][result]['configuration']['LAYERS']['meta'] [all …]
|
H A D | merge.py | 20 …results = resultutils.load_resultsdata(args.target_results, configmap=resultutils.store_map, confi… 21 …resultutils.append_resultsdata(results, args.base_results, configmap=resultutils.store_map, config… 22 resultutils.save_resultsdata(results, args.target_results) 24 …results = resultutils.load_resultsdata(args.base_results, configmap=resultutils.flatten_map, confi… 26 …resultutils.append_resultsdata(results, args.target_results, configmap=resultutils.flatten_map, co… 27 …resultutils.save_resultsdata(results, os.path.dirname(args.target_results), fn=os.path.basename(ar… 29 logger.info('Merged results to %s' % os.path.dirname(args.target_results)) 36 …description='merge the results from multiple files/directories/URLs into the target file or direct… 40 help='the results file/directory/URL to import') 44 help='do not add testseries configuration to results')
|
H A D | regression.py | 89 def all_tests_have_at_least_one_matching_tag(results, tag_list): argument 90 …, tag_list) or test_name.startswith("ptestresult") for (test_name, test_result) in results.items()) 92 def any_test_have_any_matching_tag(results, tag_list): argument 93 return any(test_has_at_least_one_matching_tag(test, tag_list) for test in results.values()) 101 def guess_oeselftest_metadata(results): argument 103 …lftest test result is lacking OESELFTEST_METADATA, we can try to guess it based on results content. 104 …Check results for specific values (absence/presence of oetags, number and name of executed tests..… 111 if len(results) == 1 and "buildoptions.SourceMirroring.test_yocto_source_mirror" in results: 113 elif all(result.startswith("reproducible") for result in results): 115 elif all_tests_have_at_least_one_matching_tag(results, ["machine"]): [all …]
|
H A D | junit.py | 1 # resulttool - report test results in JUnit XML format 22 results = tests[next(reversed(tests))].get("result", {}) 24 for result_id, result in results.items(): 41 testsuite_node.set("tests", "%s" % len(results)) 46 for result_id, result in results.items(): 71 …='generate unit test report in JUnit XML format based on the latest test results in the testresult…
|
/openbmc/openbmc/poky/bitbake/lib/toaster/toastergui/ |
H A D | typeaheads.py | 38 results = [] 57 results.append(needed_fields) 59 return results 77 results = [] 89 results.append(needed_fields) 90 return results 107 results = [] 118 results.append(needed_fields) 120 return results 137 results = [] [all …]
|
/openbmc/qemu/scripts/simplebench/ |
H A D | results_to_text.py | 48 def results_dimension(results): argument 50 for case in results['cases']: 51 for env in results['envs']: 52 res = results['tab'][case['id']][env['id']] 63 def results_to_text(results): argument 65 n_columns = len(results['envs']) 67 dim = results_dimension(results) 74 tab.append([''] + [c['id'] for c in results['envs']]) 76 for case in results['cases']: 78 case_results = results['tab'][case['id']] [all …]
|
/openbmc/openbmc/poky/meta/lib/oeqa/utils/ |
H A D | logparser.py | 14 self.results = {} 47 if current_section['name'] not in self.results: 48 self.results[current_section['name']] = {} 81 self.results[current_section['name']][result.group(1).strip()] = t 90 return self.results, self.sections 92 …# Log the results as files. The file name is the section name and the contents are the tests in th… 97 for section in self.results: 104 for test_name in sorted(self.results[section]): 105 status = self.results[section][test_name] 114 results = {} [all …]
|
H A D | decorators.py | 43 logfile = os.path.join(os.getcwd(),'results-'+caller+'.'+timestamp+'.log') 44 linkfile = os.path.join(os.getcwd(),'results-'+caller+'.log') 74 logging.addLevelName(custom_log_level, 'RESULTS') 76 def results(self, message, *args, **kws): function 79 logging.Logger.results = results 95 local_log.results("Testcase "+str(test_case)+": ERROR") 96 local_log.results("Testcase "+str(test_case)+":\n"+msg) 100 local_log.results("Testcase "+str(test_case)+": FAILED") 101 local_log.results("Testcase "+str(test_case)+":\n"+msg) 105 local_log.results("Testcase "+str(test_case)+": SKIPPED") [all …]
|
/openbmc/openbmc/meta-openembedded/meta-oe/recipes-benchmark/lmbench/lmbench/ |
H A D | lmbench_result_html_report.patch | 21 results/Makefile | 1 + 25 diff --git a/results/Makefile b/results/Makefile 27 --- a/results/Makefile 28 +++ b/results/Makefile 43 <title>LMBENCH System Results</title> 44 <h1>LMBENCH System Results</h1> 45 -<h2><a href=summary>Summary of results</a></h2> 46 +<h2><a href=summary.out>Summary of results</a></h2> 58 #s/.lmbench1.? results for //; 68 <img align=middle src=\"../gifs/graph.gif\">System results table of contents</a>
|
H A D | update-results-script.patch | 15 scripts/results | 8 +++----- 140 diff --git a/scripts/results b/scripts/results 142 --- a/scripts/results 143 +++ b/scripts/results 144 @@ -8,11 +8,11 @@ RESULTS=results/$OS 145 BASE=../$RESULTS/`uname -n` 156 if [ ! -d ../$RESULTS ] 157 then mkdir -p ../$RESULTS 159 RESULTS=$BASE.$EXT 166 -lmbench $CONFIG 2>../${RESULTS} [all …]
|
/openbmc/openbmc/meta-openembedded/meta-networking/recipes-daemons/ippool/ippool/ |
H A D | runtest.sh | 3 TCLSH="tclsh all.tcl -preservecore 3 -verbose bps -tmpdir ./results -outfile test-ippool.result" 6 if [ -d ./results ]; then rm -fr ./results; fi 7 mkdir ./results 16 (failed=`grep FAILED results/*.result | wc -l`; \ 18 passed=`grep PASSED results/*.result | wc -l`; \
|
/openbmc/openbmc/meta-arm/meta-arm-bsp/dynamic-layers/meta-arm-systemready/recipes-test/arm-systemready-acs/files/fvp-base/ |
H A D | 0001-check-sr-results-Change-the-expected-SR-result-confi.patch | 4 Subject: [PATCH] [PATCH] check-sr-results: Change the expected SR result 7 Update the check-sr-results.yaml and format-sr-results.yaml files for the 12 Changes to check-sr-results.yaml: 56 Changes to format-sr-results.yaml: 58 format-sr-results.py to error). 63 check-sr-results.yaml | 34 ++++++++++++++-------------------- 64 format-sr-results.yaml | 15 --------------- 67 diff --git a/check-sr-results.yaml b/check-sr-results.yaml 69 --- a/check-sr-results.yaml 70 +++ b/check-sr-results.yaml [all …]
|
/openbmc/openbmc/meta-openembedded/meta-oe/licenses/ |
H A D | GPL-2.0-with-lmbench-restriction | 8 1. You may not distribute results in any public forum, in any publication, 11 2. You may not distribute the results for a fee of any kind. This includes 26 b) Multiple times in the past people have wanted to report partial results. 30 results, but did not want to report large process context switches 32 c) We insist that if you formally report LMbench results, you have to 33 report all of them and make the raw results file easily available. 36 on a web site, etc., but does not mean the exchange of results 40 is little to be gained and a lot to be lost if we allowed the results 86 modifications; Swap results with other developers; use the 88 in when you go to *publish* the results. If you sped up the [all …]
|
/openbmc/openbmc/poky/meta/lib/oeqa/core/tests/ |
H A D | test_data.py | 25 results = tc.runTests() 26 self.assertFalse(results.wasSuccessful()) 27 for test, data in results.errors: 39 results = tc.runTests() 40 self.assertFalse(results.wasSuccessful()) 41 for test, data in results.failures:
|
/openbmc/openbmc/meta-arm/meta-arm-systemready/recipes-test/arm-systemready-acs/arm-systemready-scripts/ |
H A D | 0001-check-sr-results-Return-non-zero-exit-code-on-failur.patch | 4 Subject: [PATCH] check-sr-results: Return non-zero exit code on failure 11 check-sr-results.py | 6 ++++++ 14 diff --git a/check-sr-results.py b/check-sr-results.py 16 --- a/check-sr-results.py 17 +++ b/check-sr-results.py
|
H A D | 0002-check-sr-results-Device-tree-improvements.patch | 4 Subject: [PATCH] check-sr-results: Device tree improvements 6 Make check-sr-results.py accept 'extra_compat' configuration for 15 check-sr-results.py | 12 ++++++++++-- 19 diff --git a/check-sr-results.py b/check-sr-results.py 21 --- a/check-sr-results.py 22 +++ b/check-sr-results.py
|
/openbmc/openbmc/poky/meta/lib/oeqa/runtime/cases/ |
H A D | ptest.py | 75 # Parse and save results 77 results, sections = parser.parse(ptest_runner_log) 88 for section in results: 89 for test in results[section]: 90 result = results[section][test] 93 if not results[section]: 102 for section in results: 103 …tcases = [ "_".join(test.translate(trans).split()) for test in results[section] if results[section… 116 … failmsg = failmsg + "\nptests which had no test results:\n%s" % pprint.pformat(zerolength)
|
H A D | ltp.py | 38 cls.tc.target.run("mkdir -p /opt/ltp/results") 69 self.target.deleteFiles("/opt/ltp/results/", ltp_group) 71 …cmd = '/opt/ltp/runltp -f %s -q -r /opt/ltp -l /opt/ltp/results/%s -I 1 -d /opt/ltp' % (ltp_group,… 89 # Copy the machine-readable test results locally so we can parse it 91 remote_src = "/opt/ltp/results/%s" % ltp_group 98 results, sections = parser.parse(dst) 104 for test in results: 105 result = results[test]
|
/openbmc/openbmc/meta-arm/ci/ |
H A D | patchreview | 75 results = {} 80 results[patch] = result 115 return results 118 def analyse(results, want_blame=False, verbose=True): argument 121 verbose: display per-file results instead of just summary 130 for patch in sorted(results): 131 r = results[patch] 230 def histogram(results): argument 234 counts = recipes.countby(lambda r: r.upstream_status, results.values()) 235 bars = dicttoolz.valmap(lambda v: "#" * int(math.ceil(float(v) / len(results) * 100)), counts) [all …]
|
/openbmc/openbmc/poky/meta/lib/oeqa/sdk/ |
H A D | testmetaidesupport.py | 40 results = tc.runTests() 41 if results: 42 results.logSummary(pn) 44 if (not results) or (not results.wasSuccessful()):
|
/openbmc/openbmc/poky/scripts/contrib/ |
H A D | patchreview.py | 66 results = {} 71 results[patch] = result 106 return results 109 def analyse(results, want_blame=False, verbose=True): argument 112 verbose: display per-file results instead of just summary 127 for patch in sorted(results): 128 r = results[patch] 202 def histogram(results): argument 206 counts = recipes.countby(lambda r: r.upstream_status, results.values()) 207 bars = dicttoolz.valmap(lambda v: "#" * int(math.ceil(float(v) / len(results) * 100)), counts) [all …]
|
/openbmc/phosphor-pid-control/ |
H A D | util.cpp | 114 std::vector<conf::SensorInput> results; in spliceInputs() local 122 results.emplace_back(newInput); in spliceInputs() 125 size_t resultSize = results.size(); in spliceInputs() 137 results[index].convertMarginZero = inputTempToMargin[index]; in spliceInputs() 138 results[index].convertTempToMargin = true; in spliceInputs() 150 for (auto& result : results) in spliceInputs() 158 return results; in spliceInputs() 164 std::vector<std::string> results; in splitNames() local 166 results.reserve(sensorInputs.size()); in splitNames() 169 results.emplace_back(sensorInput.name); in splitNames() [all …]
|
/openbmc/openbmc/poky/scripts/ |
H A D | resulttool | 3 # test results tool - tool for manipulating OEQA test result json files 4 # (merge results, summarise results, regression analysis, generate manual test results file) 9 # To store test results from oeqa automated tests, execute the below 12 # To merge test results, execute the below
|
/openbmc/phosphor-ipmi-blobs/test/ |
H A D | manager_unittest.cpp | 136 std::vector<std::string> results; in TEST() local 137 results.push_back(mgr.getBlobId(0)); in TEST() 138 results.push_back(mgr.getBlobId(1)); in TEST() 139 EXPECT_EQ(2, results.size()); in TEST() 140 EXPECT_TRUE(std::find(results.begin(), results.end(), "asdf") != in TEST() 141 results.end()); in TEST() 142 EXPECT_TRUE(std::find(results.begin(), results.end(), "ghjk") != in TEST() 143 results.end()); in TEST()
|