xref: /openbmc/qemu/scripts/mtest2make.py (revision 9e6190ae)
1#! /usr/bin/env python3
2
3# Create Makefile targets to run tests, from Meson's test introspection data.
4#
5# Author: Paolo Bonzini <pbonzini@redhat.com>
6
7from collections import defaultdict
8import itertools
9import json
10import os
11import shlex
12import sys
13
14class Suite(object):
15    def __init__(self):
16        self.deps = set()
17        self.speeds = ['quick']
18
19    def names(self, base):
20        return [base if speed == 'quick' else f'{base}-{speed}' for speed in self.speeds]
21
22
23print('''
24SPEED = quick
25
26.speed.quick = $(foreach s,$(sort $(filter-out %-slow %-thorough, $1)), --suite $s)
27.speed.slow = $(foreach s,$(sort $(filter-out %-thorough, $1)), --suite $s)
28.speed.thorough = $(foreach s,$(sort $1), --suite $s)
29
30TIMEOUT_MULTIPLIER = 1
31.mtestargs = --no-rebuild -t $(TIMEOUT_MULTIPLIER)
32ifneq ($(SPEED), quick)
33.mtestargs += --setup $(SPEED)
34endif
35.mtestargs += $(subst -j,--num-processes , $(filter-out -j, $(lastword -j1 $(filter -j%, $(MAKEFLAGS)))))
36
37.check.mtestargs = $(MTESTARGS) $(.mtestargs) $(if $(V),--verbose,--print-errorlogs)
38.bench.mtestargs = $(MTESTARGS) $(.mtestargs) --benchmark --verbose''')
39
40introspect = json.load(sys.stdin)
41
42def process_tests(test, targets, suites):
43    executable = test['cmd'][0]
44    try:
45        executable = os.path.relpath(executable)
46    except:
47        pass
48
49    deps = (targets.get(x, []) for x in test['depends'])
50    deps = itertools.chain.from_iterable(deps)
51    deps = list(deps)
52
53    test_suites = test['suite'] or ['default']
54    for s in test_suites:
55        # The suite name in the introspection info is "PROJECT" or "PROJECT:SUITE"
56        if ':' in s:
57            s = s.split(':')[1]
58            if s == 'slow' or s == 'thorough':
59                continue
60        if s.endswith('-slow'):
61            s = s[:-5]
62            suites[s].speeds.append('slow')
63        if s.endswith('-thorough'):
64            s = s[:-9]
65            suites[s].speeds.append('thorough')
66        suites[s].deps.update(deps)
67
68def emit_prolog(suites, prefix):
69    all_targets = ' '.join((f'{prefix}-{k}' for k in suites.keys()))
70    all_xml = ' '.join((f'{prefix}-report-{k}.junit.xml' for k in suites.keys()))
71    print()
72    print(f'all-{prefix}-targets = {all_targets}')
73    print(f'all-{prefix}-xml = {all_xml}')
74    print(f'.PHONY: {prefix} do-meson-{prefix} {prefix}-report.junit.xml $(all-{prefix}-targets) $(all-{prefix}-xml)')
75    print(f'ifeq ($(filter {prefix}, $(MAKECMDGOALS)),)')
76    print(f'.{prefix}.mtestargs += $(call .speed.$(SPEED), $(.{prefix}.mtest-suites))')
77    print(f'endif')
78    print(f'{prefix}-build: run-ninja')
79    print(f'{prefix} $(all-{prefix}-targets): do-meson-{prefix}')
80    print(f'do-meson-{prefix}: run-ninja; $(if $(MAKE.n),,+)$(MESON) test $(.{prefix}.mtestargs)')
81    print(f'{prefix}-report.junit.xml $(all-{prefix}-xml): {prefix}-report%.junit.xml: run-ninja')
82    print(f'\t$(MAKE) {prefix}$* MTESTARGS="$(MTESTARGS) --logbase {prefix}-report$*" && ln -f meson-logs/$@ .')
83
84def emit_suite_deps(name, suite, prefix):
85    deps = ' '.join(suite.deps)
86    targets = [f'{prefix}-{name}', f'{prefix}-report-{name}.junit.xml', f'{prefix}', f'{prefix}-report.junit.xml',
87               f'{prefix}-build']
88    print()
89    print(f'.{prefix}-{name}.deps = {deps}')
90    for t in targets:
91        print(f'.ninja-goals.{t} += $(.{prefix}-{name}.deps)')
92
93def emit_suite(name, suite, prefix):
94    emit_suite_deps(name, suite, prefix)
95    targets = f'{prefix}-{name} {prefix}-report-{name}.junit.xml {prefix} {prefix}-report.junit.xml'
96    print(f'ifneq ($(filter {targets}, $(MAKECMDGOALS)),)')
97    print(f'.{prefix}.mtest-suites += ' + ' '.join(suite.names(name)))
98    print(f'endif')
99
100targets = {t['id']: [os.path.relpath(f) for f in t['filename']]
101           for t in introspect['targets']}
102
103testsuites = defaultdict(Suite)
104for test in introspect['tests']:
105    process_tests(test, targets, testsuites)
106emit_prolog(testsuites, 'check')
107for name, suite in testsuites.items():
108    emit_suite(name, suite, 'check')
109
110benchsuites = defaultdict(Suite)
111for test in introspect['benchmarks']:
112    process_tests(test, targets, benchsuites)
113emit_prolog(benchsuites, 'bench')
114for name, suite in benchsuites.items():
115    emit_suite(name, suite, 'bench')
116