1# Functional test that boots known good tuxboot images the same way
2# that tuxrun (www.tuxrun.org) does. This tool is used by things like
3# the LKFT project to run regression tests on kernels.
4#
5# Copyright (c) 2023 Linaro Ltd.
6#
7# Author:
8#  Alex Bennée <alex.bennee@linaro.org>
9#
10# SPDX-License-Identifier: GPL-2.0-or-later
11
12import os
13import stat
14import time
15
16from qemu_test import QemuSystemTest
17from qemu_test import exec_command, exec_command_and_wait_for_pattern
18from qemu_test import wait_for_console_pattern
19from qemu_test import has_cmd, run_cmd, get_qemu_img
20
21class TuxRunBaselineTest(QemuSystemTest):
22
23    KERNEL_COMMON_COMMAND_LINE = 'printk.time=0'
24    # Tests are ~10-40s, allow for --debug/--enable-gcov overhead
25    timeout = 100
26
27    def get_tag(self, tagname, default=None):
28        """
29        Get the metadata tag or return the default.
30        """
31        utag = self._get_unique_tag_val(tagname)
32        print(f"{tagname}/{default} -> {utag}")
33        if utag:
34            return utag
35
36        return default
37
38    def setUp(self):
39        super().setUp()
40
41        # We need zstd for all the tuxrun tests
42        # See https://github.com/avocado-framework/avocado/issues/5609
43        (has_zstd, msg) = has_cmd('zstd')
44        if has_zstd is False:
45            self.skipTest(msg)
46        self.zstd = 'zstd'
47
48        # Pre-init TuxRun specific settings: Most machines work with
49        # reasonable defaults but we sometimes need to tweak the
50        # config. To avoid open coding everything we store all these
51        # details in the metadata for each test.
52
53        # The tuxboot tag matches the root directory
54        self.tuxboot = self.arch
55
56        # Most Linux's use ttyS0 for their serial port
57        self.console = "ttyS0"
58
59        # Does the machine shutdown QEMU nicely on "halt"
60        self.wait_for_shutdown = True
61
62        self.root = "vda"
63
64        # Occasionally we need extra devices to hook things up
65        self.extradev = None
66
67        self.qemu_img = get_qemu_img(self)
68
69    def wait_for_console_pattern(self, success_message, vm=None):
70        wait_for_console_pattern(self, success_message,
71                                 failure_message='Kernel panic - not syncing',
72                                 vm=vm)
73
74    def fetch_tuxrun_assets(self, kernel_asset, rootfs_asset, dtb_asset=None):
75        """
76        Fetch the TuxBoot assets.
77        """
78        kernel_image =  kernel_asset.fetch()
79        disk_image_zst = rootfs_asset.fetch()
80
81        disk_image = self.workdir + "/rootfs.ext4"
82
83        run_cmd([self.zstd, "-f", "-d", disk_image_zst,
84                 "-o", disk_image])
85        # zstd copies source archive permissions for the output
86        # file, so must make this writable for QEMU
87        os.chmod(disk_image, stat.S_IRUSR | stat.S_IWUSR)
88
89        dtb = dtb_asset.fetch() if dtb_asset is not None else None
90
91        return (kernel_image, disk_image, dtb)
92
93    def prepare_run(self, kernel, disk, drive, dtb=None, console_index=0):
94        """
95        Setup to run and add the common parameters to the system
96        """
97        self.vm.set_console(console_index=console_index)
98
99        # all block devices are raw ext4's
100        blockdev = "driver=raw,file.driver=file," \
101            + f"file.filename={disk},node-name=hd0"
102
103        kcmd_line = self.KERNEL_COMMON_COMMAND_LINE
104        kcmd_line += f" root=/dev/{self.root}"
105        kcmd_line += f" console={self.console}"
106
107        self.vm.add_args('-kernel', kernel,
108                         '-append', kcmd_line,
109                         '-blockdev', blockdev)
110
111        # Sometimes we need extra devices attached
112        if self.extradev:
113            self.vm.add_args('-device', self.extradev)
114
115        self.vm.add_args('-device',
116                         f"{drive},drive=hd0")
117
118        # Some machines need an explicit DTB
119        if dtb:
120            self.vm.add_args('-dtb', dtb)
121
122    def run_tuxtest_tests(self, haltmsg):
123        """
124        Wait for the system to boot up, wait for the login prompt and
125        then do a few things on the console. Trigger a shutdown and
126        wait to exit cleanly.
127        """
128        self.wait_for_console_pattern("Welcome to TuxTest")
129        time.sleep(0.2)
130        exec_command(self, 'root')
131        time.sleep(0.2)
132        exec_command(self, 'cat /proc/interrupts')
133        time.sleep(0.1)
134        exec_command(self, 'cat /proc/self/maps')
135        time.sleep(0.1)
136        exec_command(self, 'uname -a')
137        time.sleep(0.1)
138        exec_command_and_wait_for_pattern(self, 'halt', haltmsg)
139
140        # Wait for VM to shut down gracefully if it can
141        if self.wait_for_shutdown:
142            self.vm.wait()
143        else:
144            self.vm.shutdown()
145
146    def common_tuxrun(self,
147                      kernel_asset,
148                      rootfs_asset,
149                      dtb_asset=None,
150                      drive="virtio-blk-device",
151                      haltmsg="reboot: System halted",
152                      console_index=0):
153        """
154        Common path for LKFT tests. Unless we need to do something
155        special with the command line we can process most things using
156        the tag metadata.
157        """
158        (kernel, disk, dtb) = self.fetch_tuxrun_assets(kernel_asset, rootfs_asset,
159                                                       dtb_asset)
160
161        self.prepare_run(kernel, disk, drive, dtb, console_index)
162        self.vm.launch()
163        self.run_tuxtest_tests(haltmsg)
164        os.remove(disk)
165