1# Functional test that boots known good tuxboot images the same way
2# that tuxrun (www.tuxrun.org) does. This tool is used by things like
3# the LKFT project to run regression tests on kernels.
4#
5# Copyright (c) 2023 Linaro Ltd.
6#
7# Author:
8#  Alex Bennée <alex.bennee@linaro.org>
9#
10# SPDX-License-Identifier: GPL-2.0-or-later
11
12import os
13import time
14
15from qemu_test import QemuSystemTest
16from qemu_test import exec_command, exec_command_and_wait_for_pattern
17from qemu_test import wait_for_console_pattern
18from qemu_test import has_cmd, run_cmd, get_qemu_img
19
20class TuxRunBaselineTest(QemuSystemTest):
21
22    KERNEL_COMMON_COMMAND_LINE = 'printk.time=0'
23    # Tests are ~10-40s, allow for --debug/--enable-gcov overhead
24    timeout = 100
25
26    def get_tag(self, tagname, default=None):
27        """
28        Get the metadata tag or return the default.
29        """
30        utag = self._get_unique_tag_val(tagname)
31        print(f"{tagname}/{default} -> {utag}")
32        if utag:
33            return utag
34
35        return default
36
37    def setUp(self):
38        super().setUp()
39
40        # We need zstd for all the tuxrun tests
41        # See https://github.com/avocado-framework/avocado/issues/5609
42        (has_zstd, msg) = has_cmd('zstd')
43        if has_zstd is False:
44            self.skipTest(msg)
45        self.zstd = 'zstd'
46
47        # Pre-init TuxRun specific settings: Most machines work with
48        # reasonable defaults but we sometimes need to tweak the
49        # config. To avoid open coding everything we store all these
50        # details in the metadata for each test.
51
52        # The tuxboot tag matches the root directory
53        self.tuxboot = self.arch
54
55        # Most Linux's use ttyS0 for their serial port
56        self.console = "ttyS0"
57
58        # Does the machine shutdown QEMU nicely on "halt"
59        self.wait_for_shutdown = True
60
61        self.root = "vda"
62
63        # Occasionally we need extra devices to hook things up
64        self.extradev = None
65
66        self.qemu_img = get_qemu_img(self)
67
68    def wait_for_console_pattern(self, success_message, vm=None):
69        wait_for_console_pattern(self, success_message,
70                                 failure_message='Kernel panic - not syncing',
71                                 vm=vm)
72
73    def fetch_tuxrun_assets(self, kernel_asset, rootfs_asset, dtb_asset=None):
74        """
75        Fetch the TuxBoot assets.
76        """
77        kernel_image =  kernel_asset.fetch()
78        disk_image_zst = rootfs_asset.fetch()
79
80        run_cmd([self.zstd, "-f", "-d", disk_image_zst,
81                 "-o", self.workdir + "/rootfs.ext4"])
82
83        dtb = dtb_asset.fetch() if dtb_asset is not None else None
84
85        return (kernel_image, self.workdir + "/rootfs.ext4", dtb)
86
87    def prepare_run(self, kernel, disk, drive, dtb=None, console_index=0):
88        """
89        Setup to run and add the common parameters to the system
90        """
91        self.vm.set_console(console_index=console_index)
92
93        # all block devices are raw ext4's
94        blockdev = "driver=raw,file.driver=file," \
95            + f"file.filename={disk},node-name=hd0"
96
97        kcmd_line = self.KERNEL_COMMON_COMMAND_LINE
98        kcmd_line += f" root=/dev/{self.root}"
99        kcmd_line += f" console={self.console}"
100
101        self.vm.add_args('-kernel', kernel,
102                         '-append', kcmd_line,
103                         '-blockdev', blockdev)
104
105        # Sometimes we need extra devices attached
106        if self.extradev:
107            self.vm.add_args('-device', self.extradev)
108
109        self.vm.add_args('-device',
110                         f"{drive},drive=hd0")
111
112        # Some machines need an explicit DTB
113        if dtb:
114            self.vm.add_args('-dtb', dtb)
115
116    def run_tuxtest_tests(self, haltmsg):
117        """
118        Wait for the system to boot up, wait for the login prompt and
119        then do a few things on the console. Trigger a shutdown and
120        wait to exit cleanly.
121        """
122        self.wait_for_console_pattern("Welcome to TuxTest")
123        time.sleep(0.2)
124        exec_command(self, 'root')
125        time.sleep(0.2)
126        exec_command(self, 'cat /proc/interrupts')
127        time.sleep(0.1)
128        exec_command(self, 'cat /proc/self/maps')
129        time.sleep(0.1)
130        exec_command(self, 'uname -a')
131        time.sleep(0.1)
132        exec_command_and_wait_for_pattern(self, 'halt', haltmsg)
133
134        # Wait for VM to shut down gracefully if it can
135        if self.wait_for_shutdown:
136            self.vm.wait()
137        else:
138            self.vm.shutdown()
139
140    def common_tuxrun(self,
141                      kernel_asset,
142                      rootfs_asset,
143                      dtb_asset=None,
144                      drive="virtio-blk-device",
145                      haltmsg="reboot: System halted",
146                      console_index=0):
147        """
148        Common path for LKFT tests. Unless we need to do something
149        special with the command line we can process most things using
150        the tag metadata.
151        """
152        (kernel, disk, dtb) = self.fetch_tuxrun_assets(kernel_asset, rootfs_asset,
153                                                       dtb_asset)
154
155        self.prepare_run(kernel, disk, drive, dtb, console_index)
156        self.vm.launch()
157        self.run_tuxtest_tests(haltmsg)
158        os.remove(disk)
159