1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* 3 * arch/arm64/include/asm/ftrace.h 4 * 5 * Copyright (C) 2013 Linaro Limited 6 * Author: AKASHI Takahiro <takahiro.akashi@linaro.org> 7 */ 8 #ifndef __ASM_FTRACE_H 9 #define __ASM_FTRACE_H 10 11 #include <asm/insn.h> 12 13 #define HAVE_FUNCTION_GRAPH_FP_TEST 14 15 /* 16 * HAVE_FUNCTION_GRAPH_RET_ADDR_PTR means that the architecture can provide a 17 * "return address pointer" which can be used to uniquely identify a return 18 * address which has been overwritten. 19 * 20 * On arm64 we use the address of the caller's frame record, which remains the 21 * same for the lifetime of the instrumented function, unlike the return 22 * address in the LR. 23 */ 24 #define HAVE_FUNCTION_GRAPH_RET_ADDR_PTR 25 26 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_ARGS 27 #define ARCH_SUPPORTS_FTRACE_OPS 1 28 #else 29 #define MCOUNT_ADDR ((unsigned long)_mcount) 30 #endif 31 32 /* The BL at the callsite's adjusted rec->ip */ 33 #define MCOUNT_INSN_SIZE AARCH64_INSN_SIZE 34 35 #define FTRACE_PLT_IDX 0 36 #define NR_FTRACE_PLTS 1 37 38 /* 39 * Currently, gcc tends to save the link register after the local variables 40 * on the stack. This causes the max stack tracer to report the function 41 * frame sizes for the wrong functions. By defining 42 * ARCH_FTRACE_SHIFT_STACK_TRACER, it will tell the stack tracer to expect 43 * to find the return address on the stack after the local variables have 44 * been set up. 45 * 46 * Note, this may change in the future, and we will need to deal with that 47 * if it were to happen. 48 */ 49 #define ARCH_FTRACE_SHIFT_STACK_TRACER 1 50 51 #ifndef __ASSEMBLY__ 52 #include <linux/compat.h> 53 54 extern void _mcount(unsigned long); 55 extern void *return_address(unsigned int); 56 57 struct dyn_arch_ftrace { 58 /* No extra data needed for arm64 */ 59 }; 60 61 extern unsigned long ftrace_graph_call; 62 63 extern void return_to_handler(void); 64 65 unsigned long ftrace_call_adjust(unsigned long addr); 66 67 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_ARGS 68 struct dyn_ftrace; 69 struct ftrace_ops; 70 71 #define arch_ftrace_get_regs(regs) NULL 72 73 /* 74 * Note: sizeof(struct ftrace_regs) must be a multiple of 16 to ensure correct 75 * stack alignment 76 */ 77 struct ftrace_regs { 78 /* x0 - x8 */ 79 unsigned long regs[9]; 80 81 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS 82 unsigned long direct_tramp; 83 #else 84 unsigned long __unused; 85 #endif 86 87 unsigned long fp; 88 unsigned long lr; 89 90 unsigned long sp; 91 unsigned long pc; 92 }; 93 94 static __always_inline unsigned long 95 ftrace_regs_get_instruction_pointer(const struct ftrace_regs *fregs) 96 { 97 return fregs->pc; 98 } 99 100 static __always_inline void 101 ftrace_regs_set_instruction_pointer(struct ftrace_regs *fregs, 102 unsigned long pc) 103 { 104 fregs->pc = pc; 105 } 106 107 static __always_inline unsigned long 108 ftrace_regs_get_stack_pointer(const struct ftrace_regs *fregs) 109 { 110 return fregs->sp; 111 } 112 113 static __always_inline unsigned long 114 ftrace_regs_get_argument(struct ftrace_regs *fregs, unsigned int n) 115 { 116 if (n < 8) 117 return fregs->regs[n]; 118 return 0; 119 } 120 121 static __always_inline unsigned long 122 ftrace_regs_get_return_value(const struct ftrace_regs *fregs) 123 { 124 return fregs->regs[0]; 125 } 126 127 static __always_inline void 128 ftrace_regs_set_return_value(struct ftrace_regs *fregs, 129 unsigned long ret) 130 { 131 fregs->regs[0] = ret; 132 } 133 134 static __always_inline void 135 ftrace_override_function_with_return(struct ftrace_regs *fregs) 136 { 137 fregs->pc = fregs->lr; 138 } 139 140 int ftrace_regs_query_register_offset(const char *name); 141 142 int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec); 143 #define ftrace_init_nop ftrace_init_nop 144 145 void ftrace_graph_func(unsigned long ip, unsigned long parent_ip, 146 struct ftrace_ops *op, struct ftrace_regs *fregs); 147 #define ftrace_graph_func ftrace_graph_func 148 149 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS 150 static inline void arch_ftrace_set_direct_caller(struct ftrace_regs *fregs, 151 unsigned long addr) 152 { 153 /* 154 * The ftrace trampoline will return to this address instead of the 155 * instrumented function. 156 */ 157 fregs->direct_tramp = addr; 158 } 159 #endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */ 160 161 #endif 162 163 #define ftrace_return_address(n) return_address(n) 164 165 /* 166 * Because AArch32 mode does not share the same syscall table with AArch64, 167 * tracing compat syscalls may result in reporting bogus syscalls or even 168 * hang-up, so just do not trace them. 169 * See kernel/trace/trace_syscalls.c 170 * 171 * x86 code says: 172 * If the user really wants these, then they should use the 173 * raw syscall tracepoints with filtering. 174 */ 175 #define ARCH_TRACE_IGNORE_COMPAT_SYSCALLS 176 static inline bool arch_trace_is_compat_syscall(struct pt_regs *regs) 177 { 178 return is_compat_task(); 179 } 180 181 #define ARCH_HAS_SYSCALL_MATCH_SYM_NAME 182 183 static inline bool arch_syscall_match_sym_name(const char *sym, 184 const char *name) 185 { 186 /* 187 * Since all syscall functions have __arm64_ prefix, we must skip it. 188 * However, as we described above, we decided to ignore compat 189 * syscalls, so we don't care about __arm64_compat_ prefix here. 190 */ 191 return !strcmp(sym + 8, name); 192 } 193 #endif /* ifndef __ASSEMBLY__ */ 194 195 #ifndef __ASSEMBLY__ 196 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 197 struct fgraph_ret_regs { 198 /* x0 - x7 */ 199 unsigned long regs[8]; 200 201 unsigned long fp; 202 unsigned long __unused; 203 }; 204 205 static inline unsigned long fgraph_ret_regs_return_value(struct fgraph_ret_regs *ret_regs) 206 { 207 return ret_regs->regs[0]; 208 } 209 210 static inline unsigned long fgraph_ret_regs_frame_pointer(struct fgraph_ret_regs *ret_regs) 211 { 212 return ret_regs->fp; 213 } 214 215 void prepare_ftrace_return(unsigned long self_addr, unsigned long *parent, 216 unsigned long frame_pointer); 217 218 #endif /* ifdef CONFIG_FUNCTION_GRAPH_TRACER */ 219 #endif 220 221 #endif /* __ASM_FTRACE_H */ 222