1# SPDX-License-Identifier: GPL-2.0
2#
3# Parses KTAP test results from a kernel dmesg log and incrementally prints
4# results with reader-friendly format. Stores and returns test results in a
5# Test object.
6#
7# Copyright (C) 2019, Google LLC.
8# Author: Felix Guo <felixguoxiuping@gmail.com>
9# Author: Brendan Higgins <brendanhiggins@google.com>
10# Author: Rae Moar <rmoar@google.com>
11
12from __future__ import annotations
13from dataclasses import dataclass
14import re
15import textwrap
16
17from enum import Enum, auto
18from typing import Iterable, Iterator, List, Optional, Tuple
19
20from kunit_printer import stdout
21
22class Test:
23	"""
24	A class to represent a test parsed from KTAP results. All KTAP
25	results within a test log are stored in a main Test object as
26	subtests.
27
28	Attributes:
29	status : TestStatus - status of the test
30	name : str - name of the test
31	expected_count : int - expected number of subtests (0 if single
32		test case and None if unknown expected number of subtests)
33	subtests : List[Test] - list of subtests
34	log : List[str] - log of KTAP lines that correspond to the test
35	counts : TestCounts - counts of the test statuses and errors of
36		subtests or of the test itself if the test is a single
37		test case.
38	"""
39	def __init__(self) -> None:
40		"""Creates Test object with default attributes."""
41		self.status = TestStatus.TEST_CRASHED
42		self.name = ''
43		self.expected_count = 0  # type: Optional[int]
44		self.subtests = []  # type: List[Test]
45		self.log = []  # type: List[str]
46		self.counts = TestCounts()
47
48	def __str__(self) -> str:
49		"""Returns string representation of a Test class object."""
50		return (f'Test({self.status}, {self.name}, {self.expected_count}, '
51			f'{self.subtests}, {self.log}, {self.counts})')
52
53	def __repr__(self) -> str:
54		"""Returns string representation of a Test class object."""
55		return str(self)
56
57	def add_error(self, error_message: str) -> None:
58		"""Records an error that occurred while parsing this test."""
59		self.counts.errors += 1
60		stdout.print_with_timestamp(stdout.red('[ERROR]') + f' Test: {self.name}: {error_message}')
61
62	def ok_status(self) -> bool:
63		"""Returns true if the status was ok, i.e. passed or skipped."""
64		return self.status in (TestStatus.SUCCESS, TestStatus.SKIPPED)
65
66class TestStatus(Enum):
67	"""An enumeration class to represent the status of a test."""
68	SUCCESS = auto()
69	FAILURE = auto()
70	SKIPPED = auto()
71	TEST_CRASHED = auto()
72	NO_TESTS = auto()
73	FAILURE_TO_PARSE_TESTS = auto()
74
75@dataclass
76class TestCounts:
77	"""
78	Tracks the counts of statuses of all test cases and any errors within
79	a Test.
80	"""
81	passed: int = 0
82	failed: int = 0
83	crashed: int = 0
84	skipped: int = 0
85	errors: int = 0
86
87	def __str__(self) -> str:
88		"""Returns the string representation of a TestCounts object."""
89		statuses = [('passed', self.passed), ('failed', self.failed),
90			('crashed', self.crashed), ('skipped', self.skipped),
91			('errors', self.errors)]
92		return f'Ran {self.total()} tests: ' + \
93			', '.join(f'{s}: {n}' for s, n in statuses if n > 0)
94
95	def total(self) -> int:
96		"""Returns the total number of test cases within a test
97		object, where a test case is a test with no subtests.
98		"""
99		return (self.passed + self.failed + self.crashed +
100			self.skipped)
101
102	def add_subtest_counts(self, counts: TestCounts) -> None:
103		"""
104		Adds the counts of another TestCounts object to the current
105		TestCounts object. Used to add the counts of a subtest to the
106		parent test.
107
108		Parameters:
109		counts - a different TestCounts object whose counts
110			will be added to the counts of the TestCounts object
111		"""
112		self.passed += counts.passed
113		self.failed += counts.failed
114		self.crashed += counts.crashed
115		self.skipped += counts.skipped
116		self.errors += counts.errors
117
118	def get_status(self) -> TestStatus:
119		"""Returns the aggregated status of a Test using test
120		counts.
121		"""
122		if self.total() == 0:
123			return TestStatus.NO_TESTS
124		if self.crashed:
125			# Crashes should take priority.
126			return TestStatus.TEST_CRASHED
127		if self.failed:
128			return TestStatus.FAILURE
129		if self.passed:
130			# No failures or crashes, looks good!
131			return TestStatus.SUCCESS
132		# We have only skipped tests.
133		return TestStatus.SKIPPED
134
135	def add_status(self, status: TestStatus) -> None:
136		"""Increments the count for `status`."""
137		if status == TestStatus.SUCCESS:
138			self.passed += 1
139		elif status == TestStatus.FAILURE:
140			self.failed += 1
141		elif status == TestStatus.SKIPPED:
142			self.skipped += 1
143		elif status != TestStatus.NO_TESTS:
144			self.crashed += 1
145
146class LineStream:
147	"""
148	A class to represent the lines of kernel output.
149	Provides a lazy peek()/pop() interface over an iterator of
150	(line#, text).
151	"""
152	_lines: Iterator[Tuple[int, str]]
153	_next: Tuple[int, str]
154	_need_next: bool
155	_done: bool
156
157	def __init__(self, lines: Iterator[Tuple[int, str]]):
158		"""Creates a new LineStream that wraps the given iterator."""
159		self._lines = lines
160		self._done = False
161		self._need_next = True
162		self._next = (0, '')
163
164	def _get_next(self) -> None:
165		"""Advances the LineSteam to the next line, if necessary."""
166		if not self._need_next:
167			return
168		try:
169			self._next = next(self._lines)
170		except StopIteration:
171			self._done = True
172		finally:
173			self._need_next = False
174
175	def peek(self) -> str:
176		"""Returns the current line, without advancing the LineStream.
177		"""
178		self._get_next()
179		return self._next[1]
180
181	def pop(self) -> str:
182		"""Returns the current line and advances the LineStream to
183		the next line.
184		"""
185		s = self.peek()
186		if self._done:
187			raise ValueError(f'LineStream: going past EOF, last line was {s}')
188		self._need_next = True
189		return s
190
191	def __bool__(self) -> bool:
192		"""Returns True if stream has more lines."""
193		self._get_next()
194		return not self._done
195
196	# Only used by kunit_tool_test.py.
197	def __iter__(self) -> Iterator[str]:
198		"""Empties all lines stored in LineStream object into
199		Iterator object and returns the Iterator object.
200		"""
201		while bool(self):
202			yield self.pop()
203
204	def line_number(self) -> int:
205		"""Returns the line number of the current line."""
206		self._get_next()
207		return self._next[0]
208
209# Parsing helper methods:
210
211KTAP_START = re.compile(r'\s*KTAP version ([0-9]+)$')
212TAP_START = re.compile(r'\s*TAP version ([0-9]+)$')
213KTAP_END = re.compile(r'\s*(List of all partitions:|'
214	'Kernel panic - not syncing: VFS:|reboot: System halted)')
215
216def extract_tap_lines(kernel_output: Iterable[str]) -> LineStream:
217	"""Extracts KTAP lines from the kernel output."""
218	def isolate_ktap_output(kernel_output: Iterable[str]) \
219			-> Iterator[Tuple[int, str]]:
220		line_num = 0
221		started = False
222		for line in kernel_output:
223			line_num += 1
224			line = line.rstrip()  # remove trailing \n
225			if not started and KTAP_START.search(line):
226				# start extracting KTAP lines and set prefix
227				# to number of characters before version line
228				prefix_len = len(
229					line.split('KTAP version')[0])
230				started = True
231				yield line_num, line[prefix_len:]
232			elif not started and TAP_START.search(line):
233				# start extracting KTAP lines and set prefix
234				# to number of characters before version line
235				prefix_len = len(line.split('TAP version')[0])
236				started = True
237				yield line_num, line[prefix_len:]
238			elif started and KTAP_END.search(line):
239				# stop extracting KTAP lines
240				break
241			elif started:
242				# remove the prefix, if any.
243				line = line[prefix_len:]
244				yield line_num, line
245	return LineStream(lines=isolate_ktap_output(kernel_output))
246
247KTAP_VERSIONS = [1]
248TAP_VERSIONS = [13, 14]
249
250def check_version(version_num: int, accepted_versions: List[int],
251			version_type: str, test: Test) -> None:
252	"""
253	Adds error to test object if version number is too high or too
254	low.
255
256	Parameters:
257	version_num - The inputted version number from the parsed KTAP or TAP
258		header line
259	accepted_version - List of accepted KTAP or TAP versions
260	version_type - 'KTAP' or 'TAP' depending on the type of
261		version line.
262	test - Test object for current test being parsed
263	"""
264	if version_num < min(accepted_versions):
265		test.add_error(f'{version_type} version lower than expected!')
266	elif version_num > max(accepted_versions):
267		test.add_error(f'{version_type} version higer than expected!')
268
269def parse_ktap_header(lines: LineStream, test: Test) -> bool:
270	"""
271	Parses KTAP/TAP header line and checks version number.
272	Returns False if fails to parse KTAP/TAP header line.
273
274	Accepted formats:
275	- 'KTAP version [version number]'
276	- 'TAP version [version number]'
277
278	Parameters:
279	lines - LineStream of KTAP output to parse
280	test - Test object for current test being parsed
281
282	Return:
283	True if successfully parsed KTAP/TAP header line
284	"""
285	ktap_match = KTAP_START.match(lines.peek())
286	tap_match = TAP_START.match(lines.peek())
287	if ktap_match:
288		version_num = int(ktap_match.group(1))
289		check_version(version_num, KTAP_VERSIONS, 'KTAP', test)
290	elif tap_match:
291		version_num = int(tap_match.group(1))
292		check_version(version_num, TAP_VERSIONS, 'TAP', test)
293	else:
294		return False
295	lines.pop()
296	return True
297
298TEST_HEADER = re.compile(r'^\s*# Subtest: (.*)$')
299
300def parse_test_header(lines: LineStream, test: Test) -> bool:
301	"""
302	Parses test header and stores test name in test object.
303	Returns False if fails to parse test header line.
304
305	Accepted format:
306	- '# Subtest: [test name]'
307
308	Parameters:
309	lines - LineStream of KTAP output to parse
310	test - Test object for current test being parsed
311
312	Return:
313	True if successfully parsed test header line
314	"""
315	match = TEST_HEADER.match(lines.peek())
316	if not match:
317		return False
318	test.name = match.group(1)
319	lines.pop()
320	return True
321
322TEST_PLAN = re.compile(r'^\s*1\.\.([0-9]+)')
323
324def parse_test_plan(lines: LineStream, test: Test) -> bool:
325	"""
326	Parses test plan line and stores the expected number of subtests in
327	test object. Reports an error if expected count is 0.
328	Returns False and sets expected_count to None if there is no valid test
329	plan.
330
331	Accepted format:
332	- '1..[number of subtests]'
333
334	Parameters:
335	lines - LineStream of KTAP output to parse
336	test - Test object for current test being parsed
337
338	Return:
339	True if successfully parsed test plan line
340	"""
341	match = TEST_PLAN.match(lines.peek())
342	if not match:
343		test.expected_count = None
344		return False
345	expected_count = int(match.group(1))
346	test.expected_count = expected_count
347	lines.pop()
348	return True
349
350TEST_RESULT = re.compile(r'^\s*(ok|not ok) ([0-9]+) (- )?([^#]*)( # .*)?$')
351
352TEST_RESULT_SKIP = re.compile(r'^\s*(ok|not ok) ([0-9]+) (- )?(.*) # SKIP(.*)$')
353
354def peek_test_name_match(lines: LineStream, test: Test) -> bool:
355	"""
356	Matches current line with the format of a test result line and checks
357	if the name matches the name of the current test.
358	Returns False if fails to match format or name.
359
360	Accepted format:
361	- '[ok|not ok] [test number] [-] [test name] [optional skip
362		directive]'
363
364	Parameters:
365	lines - LineStream of KTAP output to parse
366	test - Test object for current test being parsed
367
368	Return:
369	True if matched a test result line and the name matching the
370		expected test name
371	"""
372	line = lines.peek()
373	match = TEST_RESULT.match(line)
374	if not match:
375		return False
376	name = match.group(4)
377	return name == test.name
378
379def parse_test_result(lines: LineStream, test: Test,
380			expected_num: int) -> bool:
381	"""
382	Parses test result line and stores the status and name in the test
383	object. Reports an error if the test number does not match expected
384	test number.
385	Returns False if fails to parse test result line.
386
387	Note that the SKIP directive is the only direction that causes a
388	change in status.
389
390	Accepted format:
391	- '[ok|not ok] [test number] [-] [test name] [optional skip
392		directive]'
393
394	Parameters:
395	lines - LineStream of KTAP output to parse
396	test - Test object for current test being parsed
397	expected_num - expected test number for current test
398
399	Return:
400	True if successfully parsed a test result line.
401	"""
402	line = lines.peek()
403	match = TEST_RESULT.match(line)
404	skip_match = TEST_RESULT_SKIP.match(line)
405
406	# Check if line matches test result line format
407	if not match:
408		return False
409	lines.pop()
410
411	# Set name of test object
412	if skip_match:
413		test.name = skip_match.group(4)
414	else:
415		test.name = match.group(4)
416
417	# Check test num
418	num = int(match.group(2))
419	if num != expected_num:
420		test.add_error(f'Expected test number {expected_num} but found {num}')
421
422	# Set status of test object
423	status = match.group(1)
424	if skip_match:
425		test.status = TestStatus.SKIPPED
426	elif status == 'ok':
427		test.status = TestStatus.SUCCESS
428	else:
429		test.status = TestStatus.FAILURE
430	return True
431
432def parse_diagnostic(lines: LineStream) -> List[str]:
433	"""
434	Parse lines that do not match the format of a test result line or
435	test header line and returns them in list.
436
437	Line formats that are not parsed:
438	- '# Subtest: [test name]'
439	- '[ok|not ok] [test number] [-] [test name] [optional skip
440		directive]'
441	- 'KTAP version [version number]'
442
443	Parameters:
444	lines - LineStream of KTAP output to parse
445
446	Return:
447	Log of diagnostic lines
448	"""
449	log = []  # type: List[str]
450	non_diagnostic_lines = [TEST_RESULT, TEST_HEADER, KTAP_START]
451	while lines and not any(re.match(lines.peek())
452			for re in non_diagnostic_lines):
453		log.append(lines.pop())
454	return log
455
456
457# Printing helper methods:
458
459DIVIDER = '=' * 60
460
461def format_test_divider(message: str, len_message: int) -> str:
462	"""
463	Returns string with message centered in fixed width divider.
464
465	Example:
466	'===================== message example ====================='
467
468	Parameters:
469	message - message to be centered in divider line
470	len_message - length of the message to be printed such that
471		any characters of the color codes are not counted
472
473	Return:
474	String containing message centered in fixed width divider
475	"""
476	default_count = 3  # default number of dashes
477	len_1 = default_count
478	len_2 = default_count
479	difference = len(DIVIDER) - len_message - 2  # 2 spaces added
480	if difference > 0:
481		# calculate number of dashes for each side of the divider
482		len_1 = int(difference / 2)
483		len_2 = difference - len_1
484	return ('=' * len_1) + f' {message} ' + ('=' * len_2)
485
486def print_test_header(test: Test) -> None:
487	"""
488	Prints test header with test name and optionally the expected number
489	of subtests.
490
491	Example:
492	'=================== example (2 subtests) ==================='
493
494	Parameters:
495	test - Test object representing current test being printed
496	"""
497	message = test.name
498	if message != "":
499		# Add a leading space before the subtest counts only if a test name
500		# is provided using a "# Subtest" header line.
501		message += " "
502	if test.expected_count:
503		if test.expected_count == 1:
504			message += '(1 subtest)'
505		else:
506			message += f'({test.expected_count} subtests)'
507	stdout.print_with_timestamp(format_test_divider(message, len(message)))
508
509def print_log(log: Iterable[str]) -> None:
510	"""Prints all strings in saved log for test in yellow."""
511	formatted = textwrap.dedent('\n'.join(log))
512	for line in formatted.splitlines():
513		stdout.print_with_timestamp(stdout.yellow(line))
514
515def format_test_result(test: Test) -> str:
516	"""
517	Returns string with formatted test result with colored status and test
518	name.
519
520	Example:
521	'[PASSED] example'
522
523	Parameters:
524	test - Test object representing current test being printed
525
526	Return:
527	String containing formatted test result
528	"""
529	if test.status == TestStatus.SUCCESS:
530		return stdout.green('[PASSED] ') + test.name
531	if test.status == TestStatus.SKIPPED:
532		return stdout.yellow('[SKIPPED] ') + test.name
533	if test.status == TestStatus.NO_TESTS:
534		return stdout.yellow('[NO TESTS RUN] ') + test.name
535	if test.status == TestStatus.TEST_CRASHED:
536		print_log(test.log)
537		return stdout.red('[CRASHED] ') + test.name
538	print_log(test.log)
539	return stdout.red('[FAILED] ') + test.name
540
541def print_test_result(test: Test) -> None:
542	"""
543	Prints result line with status of test.
544
545	Example:
546	'[PASSED] example'
547
548	Parameters:
549	test - Test object representing current test being printed
550	"""
551	stdout.print_with_timestamp(format_test_result(test))
552
553def print_test_footer(test: Test) -> None:
554	"""
555	Prints test footer with status of test.
556
557	Example:
558	'===================== [PASSED] example ====================='
559
560	Parameters:
561	test - Test object representing current test being printed
562	"""
563	message = format_test_result(test)
564	stdout.print_with_timestamp(format_test_divider(message,
565		len(message) - stdout.color_len()))
566
567
568
569def _summarize_failed_tests(test: Test) -> str:
570	"""Tries to summarize all the failing subtests in `test`."""
571
572	def failed_names(test: Test, parent_name: str) -> List[str]:
573		# Note: we use 'main' internally for the top-level test.
574		if not parent_name or parent_name == 'main':
575			full_name = test.name
576		else:
577			full_name = parent_name + '.' + test.name
578
579		if not test.subtests:  # this is a leaf node
580			return [full_name]
581
582		# If all the children failed, just say this subtest failed.
583		# Don't summarize it down "the top-level test failed", though.
584		failed_subtests = [sub for sub in test.subtests if not sub.ok_status()]
585		if parent_name and len(failed_subtests) ==  len(test.subtests):
586			return [full_name]
587
588		all_failures = []  # type: List[str]
589		for t in failed_subtests:
590			all_failures.extend(failed_names(t, full_name))
591		return all_failures
592
593	failures = failed_names(test, '')
594	# If there are too many failures, printing them out will just be noisy.
595	if len(failures) > 10:  # this is an arbitrary limit
596		return ''
597
598	return 'Failures: ' + ', '.join(failures)
599
600
601def print_summary_line(test: Test) -> None:
602	"""
603	Prints summary line of test object. Color of line is dependent on
604	status of test. Color is green if test passes, yellow if test is
605	skipped, and red if the test fails or crashes. Summary line contains
606	counts of the statuses of the tests subtests or the test itself if it
607	has no subtests.
608
609	Example:
610	"Testing complete. Passed: 2, Failed: 0, Crashed: 0, Skipped: 0,
611	Errors: 0"
612
613	test - Test object representing current test being printed
614	"""
615	if test.status == TestStatus.SUCCESS:
616		color = stdout.green
617	elif test.status in (TestStatus.SKIPPED, TestStatus.NO_TESTS):
618		color = stdout.yellow
619	else:
620		color = stdout.red
621	stdout.print_with_timestamp(color(f'Testing complete. {test.counts}'))
622
623	# Summarize failures that might have gone off-screen since we had a lot
624	# of tests (arbitrarily defined as >=100 for now).
625	if test.ok_status() or test.counts.total() < 100:
626		return
627	summarized = _summarize_failed_tests(test)
628	if not summarized:
629		return
630	stdout.print_with_timestamp(color(summarized))
631
632# Other methods:
633
634def bubble_up_test_results(test: Test) -> None:
635	"""
636	If the test has subtests, add the test counts of the subtests to the
637	test and check if any of the tests crashed and if so set the test
638	status to crashed. Otherwise if the test has no subtests add the
639	status of the test to the test counts.
640
641	Parameters:
642	test - Test object for current test being parsed
643	"""
644	subtests = test.subtests
645	counts = test.counts
646	status = test.status
647	for t in subtests:
648		counts.add_subtest_counts(t.counts)
649	if counts.total() == 0:
650		counts.add_status(status)
651	elif test.counts.get_status() == TestStatus.TEST_CRASHED:
652		test.status = TestStatus.TEST_CRASHED
653
654def parse_test(lines: LineStream, expected_num: int, log: List[str], is_subtest: bool) -> Test:
655	"""
656	Finds next test to parse in LineStream, creates new Test object,
657	parses any subtests of the test, populates Test object with all
658	information (status, name) about the test and the Test objects for
659	any subtests, and then returns the Test object. The method accepts
660	three formats of tests:
661
662	Accepted test formats:
663
664	- Main KTAP/TAP header
665
666	Example:
667
668	KTAP version 1
669	1..4
670	[subtests]
671
672	- Subtest header (must include either the KTAP version line or
673	  "# Subtest" header line)
674
675	Example (preferred format with both KTAP version line and
676	"# Subtest" line):
677
678	KTAP version 1
679	# Subtest: name
680	1..3
681	[subtests]
682	ok 1 name
683
684	Example (only "# Subtest" line):
685
686	# Subtest: name
687	1..3
688	[subtests]
689	ok 1 name
690
691	Example (only KTAP version line, compliant with KTAP v1 spec):
692
693	KTAP version 1
694	1..3
695	[subtests]
696	ok 1 name
697
698	- Test result line
699
700	Example:
701
702	ok 1 - test
703
704	Parameters:
705	lines - LineStream of KTAP output to parse
706	expected_num - expected test number for test to be parsed
707	log - list of strings containing any preceding diagnostic lines
708		corresponding to the current test
709	is_subtest - boolean indicating whether test is a subtest
710
711	Return:
712	Test object populated with characteristics and any subtests
713	"""
714	test = Test()
715	test.log.extend(log)
716	if not is_subtest:
717		# If parsing the main/top-level test, parse KTAP version line and
718		# test plan
719		test.name = "main"
720		ktap_line = parse_ktap_header(lines, test)
721		parse_test_plan(lines, test)
722		parent_test = True
723	else:
724		# If not the main test, attempt to parse a test header containing
725		# the KTAP version line and/or subtest header line
726		ktap_line = parse_ktap_header(lines, test)
727		subtest_line = parse_test_header(lines, test)
728		parent_test = (ktap_line or subtest_line)
729		if parent_test:
730			# If KTAP version line and/or subtest header is found, attempt
731			# to parse test plan and print test header
732			parse_test_plan(lines, test)
733			print_test_header(test)
734	expected_count = test.expected_count
735	subtests = []
736	test_num = 1
737	while parent_test and (expected_count is None or test_num <= expected_count):
738		# Loop to parse any subtests.
739		# Break after parsing expected number of tests or
740		# if expected number of tests is unknown break when test
741		# result line with matching name to subtest header is found
742		# or no more lines in stream.
743		sub_log = parse_diagnostic(lines)
744		sub_test = Test()
745		if not lines or (peek_test_name_match(lines, test) and
746				is_subtest):
747			if expected_count and test_num <= expected_count:
748				# If parser reaches end of test before
749				# parsing expected number of subtests, print
750				# crashed subtest and record error
751				test.add_error('missing expected subtest!')
752				sub_test.log.extend(sub_log)
753				test.counts.add_status(
754					TestStatus.TEST_CRASHED)
755				print_test_result(sub_test)
756			else:
757				test.log.extend(sub_log)
758				break
759		else:
760			sub_test = parse_test(lines, test_num, sub_log, True)
761		subtests.append(sub_test)
762		test_num += 1
763	test.subtests = subtests
764	if is_subtest:
765		# If not main test, look for test result line
766		test.log.extend(parse_diagnostic(lines))
767		if test.name != "" and not peek_test_name_match(lines, test):
768			test.add_error('missing subtest result line!')
769		else:
770			parse_test_result(lines, test, expected_num)
771
772	# Check for there being no subtests within parent test
773	if parent_test and len(subtests) == 0:
774		# Don't override a bad status if this test had one reported.
775		# Assumption: no subtests means CRASHED is from Test.__init__()
776		if test.status in (TestStatus.TEST_CRASHED, TestStatus.SUCCESS):
777			test.status = TestStatus.NO_TESTS
778			test.add_error('0 tests run!')
779
780	# Add statuses to TestCounts attribute in Test object
781	bubble_up_test_results(test)
782	if parent_test and is_subtest:
783		# If test has subtests and is not the main test object, print
784		# footer.
785		print_test_footer(test)
786	elif is_subtest:
787		print_test_result(test)
788	return test
789
790def parse_run_tests(kernel_output: Iterable[str]) -> Test:
791	"""
792	Using kernel output, extract KTAP lines, parse the lines for test
793	results and print condensed test results and summary line.
794
795	Parameters:
796	kernel_output - Iterable object contains lines of kernel output
797
798	Return:
799	Test - the main test object with all subtests.
800	"""
801	stdout.print_with_timestamp(DIVIDER)
802	lines = extract_tap_lines(kernel_output)
803	test = Test()
804	if not lines:
805		test.name = '<missing>'
806		test.add_error('Could not find any KTAP output. Did any KUnit tests run?')
807		test.status = TestStatus.FAILURE_TO_PARSE_TESTS
808	else:
809		test = parse_test(lines, 0, [], False)
810		if test.status != TestStatus.NO_TESTS:
811			test.status = test.counts.get_status()
812	stdout.print_with_timestamp(DIVIDER)
813	print_summary_line(test)
814	return test
815