xref: /openbmc/linux/arch/riscv/include/asm/ftrace.h (revision 7f1005dd)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* Copyright (C) 2017 Andes Technology Corporation */
3 
4 #ifndef _ASM_RISCV_FTRACE_H
5 #define _ASM_RISCV_FTRACE_H
6 
7 /*
8  * The graph frame test is not possible if CONFIG_FRAME_POINTER is not enabled.
9  * Check arch/riscv/kernel/mcount.S for detail.
10  */
11 #if defined(CONFIG_FUNCTION_GRAPH_TRACER) && defined(CONFIG_FRAME_POINTER)
12 #define HAVE_FUNCTION_GRAPH_FP_TEST
13 #endif
14 #define HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
15 
16 /*
17  * Clang prior to 13 had "mcount" instead of "_mcount":
18  * https://reviews.llvm.org/D98881
19  */
20 #if defined(CONFIG_CC_IS_GCC) || CONFIG_CLANG_VERSION >= 130000
21 #define MCOUNT_NAME _mcount
22 #else
23 #define MCOUNT_NAME mcount
24 #endif
25 
26 #define ARCH_SUPPORTS_FTRACE_OPS 1
27 #ifndef __ASSEMBLY__
28 
29 extern void *return_address(unsigned int level);
30 
31 #define ftrace_return_address(n) return_address(n)
32 
33 void MCOUNT_NAME(void);
34 static inline unsigned long ftrace_call_adjust(unsigned long addr)
35 {
36 	return addr;
37 }
38 
39 /*
40  * Let's do like x86/arm64 and ignore the compat syscalls.
41  */
42 #define ARCH_TRACE_IGNORE_COMPAT_SYSCALLS
43 static inline bool arch_trace_is_compat_syscall(struct pt_regs *regs)
44 {
45 	return is_compat_task();
46 }
47 
48 #define ARCH_HAS_SYSCALL_MATCH_SYM_NAME
49 static inline bool arch_syscall_match_sym_name(const char *sym,
50 					       const char *name)
51 {
52 	/*
53 	 * Since all syscall functions have __riscv_ prefix, we must skip it.
54 	 * However, as we described above, we decided to ignore compat
55 	 * syscalls, so we don't care about __riscv_compat_ prefix here.
56 	 */
57 	return !strcmp(sym + 8, name);
58 }
59 
60 struct dyn_arch_ftrace {
61 };
62 #endif
63 
64 #ifdef CONFIG_DYNAMIC_FTRACE
65 /*
66  * A general call in RISC-V is a pair of insts:
67  * 1) auipc: setting high-20 pc-related bits to ra register
68  * 2) jalr: setting low-12 offset to ra, jump to ra, and set ra to
69  *          return address (original pc + 4)
70  *
71  *<ftrace enable>:
72  * 0: auipc  t0/ra, 0x?
73  * 4: jalr   t0/ra, ?(t0/ra)
74  *
75  *<ftrace disable>:
76  * 0: nop
77  * 4: nop
78  *
79  * Dynamic ftrace generates probes to call sites, so we must deal with
80  * both auipc and jalr at the same time.
81  */
82 
83 #define MCOUNT_ADDR		((unsigned long)MCOUNT_NAME)
84 #define JALR_SIGN_MASK		(0x00000800)
85 #define JALR_OFFSET_MASK	(0x00000fff)
86 #define AUIPC_OFFSET_MASK	(0xfffff000)
87 #define AUIPC_PAD		(0x00001000)
88 #define JALR_SHIFT		20
89 #define JALR_RA			(0x000080e7)
90 #define AUIPC_RA		(0x00000097)
91 #define JALR_T0			(0x000282e7)
92 #define AUIPC_T0		(0x00000297)
93 #define NOP4			(0x00000013)
94 
95 #define to_jalr_t0(offset)						\
96 	(((offset & JALR_OFFSET_MASK) << JALR_SHIFT) | JALR_T0)
97 
98 #define to_auipc_t0(offset)						\
99 	((offset & JALR_SIGN_MASK) ?					\
100 	(((offset & AUIPC_OFFSET_MASK) + AUIPC_PAD) | AUIPC_T0) :	\
101 	((offset & AUIPC_OFFSET_MASK) | AUIPC_T0))
102 
103 #define make_call_t0(caller, callee, call)				\
104 do {									\
105 	unsigned int offset =						\
106 		(unsigned long) callee - (unsigned long) caller;	\
107 	call[0] = to_auipc_t0(offset);					\
108 	call[1] = to_jalr_t0(offset);					\
109 } while (0)
110 
111 #define to_jalr_ra(offset)						\
112 	(((offset & JALR_OFFSET_MASK) << JALR_SHIFT) | JALR_RA)
113 
114 #define to_auipc_ra(offset)						\
115 	((offset & JALR_SIGN_MASK) ?					\
116 	(((offset & AUIPC_OFFSET_MASK) + AUIPC_PAD) | AUIPC_RA) :	\
117 	((offset & AUIPC_OFFSET_MASK) | AUIPC_RA))
118 
119 #define make_call_ra(caller, callee, call)				\
120 do {									\
121 	unsigned int offset =						\
122 		(unsigned long) callee - (unsigned long) caller;	\
123 	call[0] = to_auipc_ra(offset);					\
124 	call[1] = to_jalr_ra(offset);					\
125 } while (0)
126 
127 /*
128  * Let auipc+jalr be the basic *mcount unit*, so we make it 8 bytes here.
129  */
130 #define MCOUNT_INSN_SIZE 8
131 
132 #ifndef __ASSEMBLY__
133 struct dyn_ftrace;
134 int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec);
135 #define ftrace_init_nop ftrace_init_nop
136 #endif
137 
138 #endif /* CONFIG_DYNAMIC_FTRACE */
139 
140 #ifndef __ASSEMBLY__
141 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
142 struct fgraph_ret_regs {
143 	unsigned long a1;
144 	unsigned long a0;
145 	unsigned long s0;
146 	unsigned long ra;
147 };
148 
149 static inline unsigned long fgraph_ret_regs_return_value(struct fgraph_ret_regs *ret_regs)
150 {
151 	return ret_regs->a0;
152 }
153 
154 static inline unsigned long fgraph_ret_regs_frame_pointer(struct fgraph_ret_regs *ret_regs)
155 {
156 	return ret_regs->s0;
157 }
158 #endif /* ifdef CONFIG_FUNCTION_GRAPH_TRACER */
159 #endif
160 
161 #endif /* _ASM_RISCV_FTRACE_H */
162