xref: /openbmc/linux/include/linux/kcov.h (revision e0ddec73fd4822d2ffe914d5ce3e2718f985276a)
1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */
25c9a8750SDmitry Vyukov #ifndef _LINUX_KCOV_H
35c9a8750SDmitry Vyukov #define _LINUX_KCOV_H
45c9a8750SDmitry Vyukov 
5183f47fcSSebastian Andrzej Siewior #include <linux/sched.h>
65c9a8750SDmitry Vyukov #include <uapi/linux/kcov.h>
75c9a8750SDmitry Vyukov 
85c9a8750SDmitry Vyukov struct task_struct;
95c9a8750SDmitry Vyukov 
105c9a8750SDmitry Vyukov #ifdef CONFIG_KCOV
115c9a8750SDmitry Vyukov 
125c9a8750SDmitry Vyukov enum kcov_mode {
135c9a8750SDmitry Vyukov 	/* Coverage collection is not enabled yet. */
145c9a8750SDmitry Vyukov 	KCOV_MODE_DISABLED = 0,
15ded97d2cSVictor Chibotaru 	/* KCOV was initialized, but tracing mode hasn't been chosen yet. */
16ded97d2cSVictor Chibotaru 	KCOV_MODE_INIT = 1,
175c9a8750SDmitry Vyukov 	/*
185c9a8750SDmitry Vyukov 	 * Tracing coverage collection mode.
195c9a8750SDmitry Vyukov 	 * Covered PCs are collected in a per-task buffer.
205c9a8750SDmitry Vyukov 	 */
21ded97d2cSVictor Chibotaru 	KCOV_MODE_TRACE_PC = 2,
22ded97d2cSVictor Chibotaru 	/* Collecting comparison operands mode. */
23ded97d2cSVictor Chibotaru 	KCOV_MODE_TRACE_CMP = 3,
245c9a8750SDmitry Vyukov };
255c9a8750SDmitry Vyukov 
260ed557aaSMark Rutland #define KCOV_IN_CTXSW	(1 << 30)
270ed557aaSMark Rutland 
28ded97d2cSVictor Chibotaru void kcov_task_init(struct task_struct *t);
29ded97d2cSVictor Chibotaru void kcov_task_exit(struct task_struct *t);
30ded97d2cSVictor Chibotaru 
310ed557aaSMark Rutland #define kcov_prepare_switch(t)			\
320ed557aaSMark Rutland do {						\
330ed557aaSMark Rutland 	(t)->kcov_mode |= KCOV_IN_CTXSW;	\
340ed557aaSMark Rutland } while (0)
350ed557aaSMark Rutland 
360ed557aaSMark Rutland #define kcov_finish_switch(t)			\
370ed557aaSMark Rutland do {						\
380ed557aaSMark Rutland 	(t)->kcov_mode &= ~KCOV_IN_CTXSW;	\
390ed557aaSMark Rutland } while (0)
400ed557aaSMark Rutland 
41eec028c9SAndrey Konovalov /* See Documentation/dev-tools/kcov.rst for usage details. */
42eec028c9SAndrey Konovalov void kcov_remote_start(u64 handle);
43eec028c9SAndrey Konovalov void kcov_remote_stop(void);
44eec028c9SAndrey Konovalov u64 kcov_common_handle(void);
45eec028c9SAndrey Konovalov 
46eec028c9SAndrey Konovalov static inline void kcov_remote_start_common(u64 id)
47eec028c9SAndrey Konovalov {
48eec028c9SAndrey Konovalov 	kcov_remote_start(kcov_remote_handle(KCOV_SUBSYSTEM_COMMON, id));
49eec028c9SAndrey Konovalov }
50eec028c9SAndrey Konovalov 
51eec028c9SAndrey Konovalov static inline void kcov_remote_start_usb(u64 id)
52eec028c9SAndrey Konovalov {
53eec028c9SAndrey Konovalov 	kcov_remote_start(kcov_remote_handle(KCOV_SUBSYSTEM_USB, id));
54eec028c9SAndrey Konovalov }
55eec028c9SAndrey Konovalov 
56e89eed02SAndrey Konovalov /*
57e89eed02SAndrey Konovalov  * The softirq flavor of kcov_remote_*() functions is introduced as a temporary
58e89eed02SAndrey Konovalov  * work around for kcov's lack of nested remote coverage sections support in
59204c2f53SRong Tao  * task context. Adding support for nested sections is tracked in:
60e89eed02SAndrey Konovalov  * https://bugzilla.kernel.org/show_bug.cgi?id=210337
61e89eed02SAndrey Konovalov  */
62e89eed02SAndrey Konovalov 
63e89eed02SAndrey Konovalov static inline void kcov_remote_start_usb_softirq(u64 id)
64e89eed02SAndrey Konovalov {
65e89eed02SAndrey Konovalov 	if (in_serving_softirq())
66e89eed02SAndrey Konovalov 		kcov_remote_start_usb(id);
67e89eed02SAndrey Konovalov }
68e89eed02SAndrey Konovalov 
69e89eed02SAndrey Konovalov static inline void kcov_remote_stop_softirq(void)
70e89eed02SAndrey Konovalov {
71e89eed02SAndrey Konovalov 	if (in_serving_softirq())
72e89eed02SAndrey Konovalov 		kcov_remote_stop();
73e89eed02SAndrey Konovalov }
74e89eed02SAndrey Konovalov 
75*e0ddec73SArnd Bergmann #ifdef CONFIG_64BIT
76*e0ddec73SArnd Bergmann typedef unsigned long kcov_u64;
77*e0ddec73SArnd Bergmann #else
78*e0ddec73SArnd Bergmann typedef unsigned long long kcov_u64;
79*e0ddec73SArnd Bergmann #endif
80*e0ddec73SArnd Bergmann 
81*e0ddec73SArnd Bergmann void __sanitizer_cov_trace_pc(void);
82*e0ddec73SArnd Bergmann void __sanitizer_cov_trace_cmp1(u8 arg1, u8 arg2);
83*e0ddec73SArnd Bergmann void __sanitizer_cov_trace_cmp2(u16 arg1, u16 arg2);
84*e0ddec73SArnd Bergmann void __sanitizer_cov_trace_cmp4(u32 arg1, u32 arg2);
85*e0ddec73SArnd Bergmann void __sanitizer_cov_trace_cmp8(kcov_u64 arg1, kcov_u64 arg2);
86*e0ddec73SArnd Bergmann void __sanitizer_cov_trace_const_cmp1(u8 arg1, u8 arg2);
87*e0ddec73SArnd Bergmann void __sanitizer_cov_trace_const_cmp2(u16 arg1, u16 arg2);
88*e0ddec73SArnd Bergmann void __sanitizer_cov_trace_const_cmp4(u32 arg1, u32 arg2);
89*e0ddec73SArnd Bergmann void __sanitizer_cov_trace_const_cmp8(kcov_u64 arg1, kcov_u64 arg2);
90*e0ddec73SArnd Bergmann void __sanitizer_cov_trace_switch(kcov_u64 val, void *cases);
91*e0ddec73SArnd Bergmann 
925c9a8750SDmitry Vyukov #else
935c9a8750SDmitry Vyukov 
945c9a8750SDmitry Vyukov static inline void kcov_task_init(struct task_struct *t) {}
955c9a8750SDmitry Vyukov static inline void kcov_task_exit(struct task_struct *t) {}
960ed557aaSMark Rutland static inline void kcov_prepare_switch(struct task_struct *t) {}
970ed557aaSMark Rutland static inline void kcov_finish_switch(struct task_struct *t) {}
98eec028c9SAndrey Konovalov static inline void kcov_remote_start(u64 handle) {}
99eec028c9SAndrey Konovalov static inline void kcov_remote_stop(void) {}
100eec028c9SAndrey Konovalov static inline u64 kcov_common_handle(void)
101eec028c9SAndrey Konovalov {
102eec028c9SAndrey Konovalov 	return 0;
103eec028c9SAndrey Konovalov }
104eec028c9SAndrey Konovalov static inline void kcov_remote_start_common(u64 id) {}
105eec028c9SAndrey Konovalov static inline void kcov_remote_start_usb(u64 id) {}
106e89eed02SAndrey Konovalov static inline void kcov_remote_start_usb_softirq(u64 id) {}
107e89eed02SAndrey Konovalov static inline void kcov_remote_stop_softirq(void) {}
1085c9a8750SDmitry Vyukov 
1095c9a8750SDmitry Vyukov #endif /* CONFIG_KCOV */
1105c9a8750SDmitry Vyukov #endif /* _LINUX_KCOV_H */
111