/openbmc/openbmc/poky/scripts/lib/resulttool/ |
H A D | resultutils.py | 59 # Load the json file and append the results data into the provided results dict 61 def append_resultsdata(results, f, configmap=store_map, configvars=extra_configvars): argument 80 raise ValueError("Test results data without configuration or result section?") 91 if testpath not in results: 92 results[testpath] = {} 93 results[testpath][res] = data[res] 96 # Walk a directory and find/load results data 100 results = {} 102 append_resultsdata(results, source, configmap, configvars) 103 return results [all …]
|
H A D | store.py | 1 # resulttool - store test results 29 results = {} 32 resultutils.append_resultsdata(results, args.source, configvars=configvars) 38 resultutils.append_resultsdata(results, f, configvars=configvars) 46 if not results and not args.all: 48 logger.info("No results found to store") 50 logger.error("No results found to store") 54 for suite in results: 55 for result in results[suite]: 56 config = results[suite][result]['configuration']['LAYERS']['meta'] [all …]
|
H A D | merge.py | 20 …results = resultutils.load_resultsdata(args.target_results, configmap=resultutils.store_map, confi… 21 …resultutils.append_resultsdata(results, args.base_results, configmap=resultutils.store_map, config… 22 resultutils.save_resultsdata(results, args.target_results) 24 …results = resultutils.load_resultsdata(args.base_results, configmap=resultutils.flatten_map, confi… 26 …resultutils.append_resultsdata(results, args.target_results, configmap=resultutils.flatten_map, co… 27 …resultutils.save_resultsdata(results, os.path.dirname(args.target_results), fn=os.path.basename(ar… 29 logger.info('Merged results to %s' % os.path.dirname(args.target_results)) 36 …description='merge the results from multiple files/directories/URLs into the target file or direct… 40 help='the results file/directory/URL to import') 44 help='do not add testseries configuration to results')
|
H A D | regression.py | 89 def all_tests_have_at_least_one_matching_tag(results, tag_list): argument 90 …, tag_list) or test_name.startswith("ptestresult") for (test_name, test_result) in results.items()) 92 def any_test_have_any_matching_tag(results, tag_list): argument 93 return any(test_has_at_least_one_matching_tag(test, tag_list) for test in results.values()) 101 def guess_oeselftest_metadata(results): argument 103 …lftest test result is lacking OESELFTEST_METADATA, we can try to guess it based on results content. 104 …Check results for specific values (absence/presence of oetags, number and name of executed tests..… 111 if len(results) == 1 and "buildoptions.SourceMirroring.test_yocto_source_mirror" in results: 113 elif all(result.startswith("reproducible") for result in results): 115 elif all_tests_have_at_least_one_matching_tag(results, ["machine"]): [all …]
|
/openbmc/openbmc/poky/bitbake/lib/toaster/toastergui/ |
H A D | typeaheads.py | 38 results = [] 57 results.append(needed_fields) 59 return results 77 results = [] 89 results.append(needed_fields) 90 return results 107 results = [] 118 results.append(needed_fields) 120 return results 137 results = [] [all …]
|
/openbmc/qemu/scripts/simplebench/ |
H A D | results_to_text.py | 48 def results_dimension(results): argument 50 for case in results['cases']: 51 for env in results['envs']: 52 res = results['tab'][case['id']][env['id']] 63 def results_to_text(results): argument 65 n_columns = len(results['envs']) 67 dim = results_dimension(results) 74 tab.append([''] + [c['id'] for c in results['envs']]) 76 for case in results['cases']: 78 case_results = results['tab'][case['id']] [all …]
|
/openbmc/openbmc/poky/meta/lib/oeqa/utils/ |
H A D | logparser.py | 14 self.results = {} 47 if current_section['name'] not in self.results: 48 self.results[current_section['name']] = {} 81 self.results[current_section['name']][result.group(1).strip()] = t 90 return self.results, self.sections 92 …# Log the results as files. The file name is the section name and the contents are the tests in th… 97 for section in self.results: 104 for test_name in sorted(self.results[section]): 105 status = self.results[section][test_name] 114 results = {} [all …]
|
H A D | decorators.py | 43 logfile = os.path.join(os.getcwd(),'results-'+caller+'.'+timestamp+'.log') 44 linkfile = os.path.join(os.getcwd(),'results-'+caller+'.log') 74 logging.addLevelName(custom_log_level, 'RESULTS') 76 def results(self, message, *args, **kws): function 79 logging.Logger.results = results 95 local_log.results("Testcase "+str(test_case)+": ERROR") 96 local_log.results("Testcase "+str(test_case)+":\n"+msg) 100 local_log.results("Testcase "+str(test_case)+": FAILED") 101 local_log.results("Testcase "+str(test_case)+":\n"+msg) 105 local_log.results("Testcase "+str(test_case)+": SKIPPED") [all …]
|
/openbmc/linux/drivers/gpu/drm/ci/ |
H A D | lava-submit.sh | 15 rm -rf results 16 mkdir -p results/job-rootfs-overlay/ 18 cp artifacts/ci-common/capture-devcoredump.sh results/job-rootfs-overlay/ 19 cp artifacts/ci-common/init-*.sh results/job-rootfs-overlay/ 20 cp artifacts/ci-common/intel-gpu-freq.sh results/job-rootfs-overlay/ 21 cp "$SCRIPTS_DIR"/setup-test-env.sh results/job-rootfs-overlay/ 26 artifacts/ci-common/generate-env.sh | tee results/job-rootfs-overlay/set-job-env-vars.sh 29 tar zcf job-rootfs-overlay.tar.gz -C results/job-rootfs-overlay/ . 32 touch results/lava.log 33 tail -f results/lava.log & [all …]
|
H A D | igt_runner.sh | 56 --output /results \ 68 --results /results/failures.csv \ 69 --output /results/junit.xml \ 71 …PACE.pages.freedesktop.org/-/$CI_PROJECT_NAME/-/jobs/$CI_JOB_ID/artifacts/results/{{testcase}}.xml" 73 # Store the results also in the simpler format used by the runner in ChromeOS CI 74 #sed -r 's/(dmesg-warn|pass)/success/g' /results/results.txt > /results/results_simple.txt
|
/openbmc/linux/drivers/net/ethernet/intel/fm10k/ |
H A D | fm10k_tlv.c | 461 * @results: Pointer array to store pointers to attributes 465 * up into an array of pointers stored in results. The function will 471 static s32 fm10k_tlv_attr_parse(u32 *attr, u32 **results, in fm10k_tlv_attr_parse() argument 479 if (!attr || !results) in fm10k_tlv_attr_parse() 482 /* initialize results to NULL */ in fm10k_tlv_attr_parse() 484 results[i] = NULL; in fm10k_tlv_attr_parse() 495 results[0] = attr; in fm10k_tlv_attr_parse() 515 results[attr_id] = attr; in fm10k_tlv_attr_parse() 548 u32 *results[FM10K_TLV_RESULTS_MAX]; in fm10k_tlv_msg_parse() local 572 /* parse the attributes into the results list */ in fm10k_tlv_msg_parse() [all …]
|
/openbmc/openbmc/meta-openembedded/meta-oe/recipes-benchmark/lmbench/lmbench/ |
H A D | lmbench_result_html_report.patch | 21 results/Makefile | 1 + 25 diff --git a/results/Makefile b/results/Makefile 27 --- a/results/Makefile 28 +++ b/results/Makefile 43 <title>LMBENCH System Results</title> 44 <h1>LMBENCH System Results</h1> 45 -<h2><a href=summary>Summary of results</a></h2> 46 +<h2><a href=summary.out>Summary of results</a></h2> 58 #s/.lmbench1.? results for //; 68 <img align=middle src=\"../gifs/graph.gif\">System results table of contents</a>
|
H A D | update-results-script.patch | 15 scripts/results | 8 +++----- 140 diff --git a/scripts/results b/scripts/results 142 --- a/scripts/results 143 +++ b/scripts/results 144 @@ -8,11 +8,11 @@ RESULTS=results/$OS 145 BASE=../$RESULTS/`uname -n` 156 if [ ! -d ../$RESULTS ] 157 then mkdir -p ../$RESULTS 159 RESULTS=$BASE.$EXT 166 -lmbench $CONFIG 2>../${RESULTS} [all …]
|
/openbmc/openbmc/meta-arm/meta-arm-bsp/dynamic-layers/meta-arm-systemready/recipes-test/arm-systemready-acs/files/fvp-base/ |
H A D | 0001-check-sr-results-Change-the-expected-SR-result-confi.patch | 4 Subject: [PATCH] [PATCH] check-sr-results: Change the expected SR result 7 Update the check-sr-results.yaml and format-sr-results.yaml files for the 12 Changes to check-sr-results.yaml: 56 Changes to format-sr-results.yaml: 58 format-sr-results.py to error). 63 check-sr-results.yaml | 34 ++++++++++++++-------------------- 64 format-sr-results.yaml | 15 --------------- 67 diff --git a/check-sr-results.yaml b/check-sr-results.yaml 69 --- a/check-sr-results.yaml 70 +++ b/check-sr-results.yaml [all …]
|
/openbmc/linux/drivers/net/ethernet/qlogic/qed/ |
H A D | qed_dbg_hsi.h | 886 * for idle check results. 902 * qed_dbg_idle_chk_dump: Performs idle check and writes the results 924 * for mcp trace results. 942 * qed_dbg_mcp_trace_dump(): Performs mcp trace and writes the results 968 * for grc trace fifo results. 983 * qed_dbg_reg_fifo_dump(): Reads the reg fifo and writes the results into 1006 * for the IGU fifo results. 1022 * qed_dbg_igu_fifo_dump(): Reads the IGU fifo and writes the results into 1045 * buffer size for protection override window results. 1062 * entries and writes the results into the specified buffer. [all …]
|
/openbmc/linux/tools/memory-model/scripts/ |
H A D | README | 11 the results against the expected results recorded in the 18 documenting expected results, comparing the actual results to 35 expected results. This takes optional parseargs.sh arguments, 49 of processes given a specified timeout, recording the results 63 and record the results in .litmus.out files. 82 # Populate expected results without that change, and 93 # Compares results to those produced by initlitmushist.sh, 97 # Checks results against Result tags, runs in minutes:
|
/openbmc/openbmc/meta-openembedded/meta-oe/licenses/ |
H A D | GPL-2.0-with-lmbench-restriction | 8 1. You may not distribute results in any public forum, in any publication, 11 2. You may not distribute the results for a fee of any kind. This includes 26 b) Multiple times in the past people have wanted to report partial results. 30 results, but did not want to report large process context switches 32 c) We insist that if you formally report LMbench results, you have to 33 report all of them and make the raw results file easily available. 36 on a web site, etc., but does not mean the exchange of results 40 is little to be gained and a lot to be lost if we allowed the results 86 modifications; Swap results with other developers; use the 88 in when you go to *publish* the results. If you sped up the [all …]
|
/openbmc/linux/lib/ |
H A D | interval_tree_test.c | 33 unsigned long results = 0; in search() local 37 results++; in search() 38 return results; in search() 65 unsigned long results; in interval_tree_test_init() local 106 results = 0; in interval_tree_test_init() 112 results += search(&root, start, last); in interval_tree_test_init() 119 results = div_u64(results, search_loops); in interval_tree_test_init() 120 printk(" -> %llu cycles (%lu results)\n", in interval_tree_test_init() 121 (unsigned long long)time, results); in interval_tree_test_init()
|
/openbmc/openbmc/meta-arm/ci/ |
H A D | patchreview | 75 results = {} 80 results[patch] = result 115 return results 118 def analyse(results, want_blame=False, verbose=True): argument 121 verbose: display per-file results instead of just summary 130 for patch in sorted(results): 131 r = results[patch] 230 def histogram(results): argument 234 counts = recipes.countby(lambda r: r.upstream_status, results.values()) 235 bars = dicttoolz.valmap(lambda v: "#" * int(math.ceil(float(v) / len(results) * 100)), counts) [all …]
|
/openbmc/openbmc/meta-openembedded/meta-networking/recipes-daemons/ippool/ippool/ |
H A D | runtest.sh | 3 TCLSH="tclsh all.tcl -preservecore 3 -verbose bps -tmpdir ./results -outfile test-ippool.result" 6 if [ -d ./results ]; then rm -fr ./results; fi 7 mkdir ./results 16 (failed=`grep FAILED results/*.result | wc -l`; \ 18 passed=`grep PASSED results/*.result | wc -l`; \
|
/openbmc/openbmc/poky/scripts/contrib/ |
H A D | patchreview.py | 66 results = {} 71 results[patch] = result 106 return results 109 def analyse(results, want_blame=False, verbose=True): argument 112 verbose: display per-file results instead of just summary 127 for patch in sorted(results): 128 r = results[patch] 202 def histogram(results): argument 206 counts = recipes.countby(lambda r: r.upstream_status, results.values()) 207 bars = dicttoolz.valmap(lambda v: "#" * int(math.ceil(float(v) / len(results) * 100)), counts) [all …]
|
/openbmc/openbmc/poky/meta/lib/oeqa/runtime/cases/ |
H A D | ptest.py | 75 # Parse and save results 77 results, sections = parser.parse(ptest_runner_log) 88 for section in results: 89 for test in results[section]: 90 result = results[section][test] 93 if not results[section]: 102 for section in results: 103 …tcases = [ "_".join(test.translate(trans).split()) for test in results[section] if results[section… 116 … failmsg = failmsg + "\nptests which had no test results:\n%s" % pprint.pformat(zerolength)
|
/openbmc/openbmc/poky/meta/lib/oeqa/core/tests/ |
H A D | test_data.py | 25 results = tc.runTests() 26 self.assertFalse(results.wasSuccessful()) 27 for test, data in results.errors: 39 results = tc.runTests() 40 self.assertFalse(results.wasSuccessful()) 41 for test, data in results.failures:
|
/openbmc/openbmc/meta-arm/meta-arm-systemready/recipes-test/arm-systemready-acs/arm-systemready-scripts/ |
H A D | 0001-check-sr-results-Return-non-zero-exit-code-on-failur.patch | 4 Subject: [PATCH] check-sr-results: Return non-zero exit code on failure 11 check-sr-results.py | 6 ++++++ 14 diff --git a/check-sr-results.py b/check-sr-results.py 16 --- a/check-sr-results.py 17 +++ b/check-sr-results.py
|
/openbmc/linux/scripts/ |
H A D | generate_initcall_order.pl | 19 ## results from child processes 20 my $results = {}; # object index -> [ { level, secname }, ... ] 37 ## writes results to the parent process 57 ## reads a result line from a child process and adds it to the $results array 83 if (!exists($results->{$index})) { 84 $results->{$index} = []; 87 push (@{$results->{$index}}, { 96 ## writes results back to the parent process 153 ## waits for any child process to complete, reads the results, and adds them to 154 ## the $results array for later processing [all …]
|