1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* 3 * arch/arm64/include/asm/ftrace.h 4 * 5 * Copyright (C) 2013 Linaro Limited 6 * Author: AKASHI Takahiro <takahiro.akashi@linaro.org> 7 */ 8 #ifndef __ASM_FTRACE_H 9 #define __ASM_FTRACE_H 10 11 #include <asm/insn.h> 12 13 #define HAVE_FUNCTION_GRAPH_FP_TEST 14 15 /* 16 * HAVE_FUNCTION_GRAPH_RET_ADDR_PTR means that the architecture can provide a 17 * "return address pointer" which can be used to uniquely identify a return 18 * address which has been overwritten. 19 * 20 * On arm64 we use the address of the caller's frame record, which remains the 21 * same for the lifetime of the instrumented function, unlike the return 22 * address in the LR. 23 */ 24 #define HAVE_FUNCTION_GRAPH_RET_ADDR_PTR 25 26 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_ARGS 27 #define ARCH_SUPPORTS_FTRACE_OPS 1 28 #else 29 #define MCOUNT_ADDR ((unsigned long)_mcount) 30 #endif 31 32 /* The BL at the callsite's adjusted rec->ip */ 33 #define MCOUNT_INSN_SIZE AARCH64_INSN_SIZE 34 35 #define FTRACE_PLT_IDX 0 36 #define NR_FTRACE_PLTS 1 37 38 /* 39 * Currently, gcc tends to save the link register after the local variables 40 * on the stack. This causes the max stack tracer to report the function 41 * frame sizes for the wrong functions. By defining 42 * ARCH_FTRACE_SHIFT_STACK_TRACER, it will tell the stack tracer to expect 43 * to find the return address on the stack after the local variables have 44 * been set up. 45 * 46 * Note, this may change in the future, and we will need to deal with that 47 * if it were to happen. 48 */ 49 #define ARCH_FTRACE_SHIFT_STACK_TRACER 1 50 51 #ifndef __ASSEMBLY__ 52 #include <linux/compat.h> 53 54 extern void _mcount(unsigned long); 55 extern void *return_address(unsigned int); 56 57 struct dyn_arch_ftrace { 58 /* No extra data needed for arm64 */ 59 }; 60 61 extern unsigned long ftrace_graph_call; 62 63 extern void return_to_handler(void); 64 65 static inline unsigned long ftrace_call_adjust(unsigned long addr) 66 { 67 /* 68 * Adjust addr to point at the BL in the callsite. 69 * See ftrace_init_nop() for the callsite sequence. 70 */ 71 if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_ARGS)) 72 return addr + AARCH64_INSN_SIZE; 73 /* 74 * addr is the address of the mcount call instruction. 75 * recordmcount does the necessary offset calculation. 76 */ 77 return addr; 78 } 79 80 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_ARGS 81 struct dyn_ftrace; 82 struct ftrace_ops; 83 84 #define arch_ftrace_get_regs(regs) NULL 85 86 struct ftrace_regs { 87 /* x0 - x8 */ 88 unsigned long regs[9]; 89 unsigned long __unused; 90 91 unsigned long fp; 92 unsigned long lr; 93 94 unsigned long sp; 95 unsigned long pc; 96 }; 97 98 static __always_inline unsigned long 99 ftrace_regs_get_instruction_pointer(const struct ftrace_regs *fregs) 100 { 101 return fregs->pc; 102 } 103 104 static __always_inline void 105 ftrace_regs_set_instruction_pointer(struct ftrace_regs *fregs, 106 unsigned long pc) 107 { 108 fregs->pc = pc; 109 } 110 111 static __always_inline unsigned long 112 ftrace_regs_get_stack_pointer(const struct ftrace_regs *fregs) 113 { 114 return fregs->sp; 115 } 116 117 static __always_inline unsigned long 118 ftrace_regs_get_argument(struct ftrace_regs *fregs, unsigned int n) 119 { 120 if (n < 8) 121 return fregs->regs[n]; 122 return 0; 123 } 124 125 static __always_inline unsigned long 126 ftrace_regs_get_return_value(const struct ftrace_regs *fregs) 127 { 128 return fregs->regs[0]; 129 } 130 131 static __always_inline void 132 ftrace_regs_set_return_value(struct ftrace_regs *fregs, 133 unsigned long ret) 134 { 135 fregs->regs[0] = ret; 136 } 137 138 static __always_inline void 139 ftrace_override_function_with_return(struct ftrace_regs *fregs) 140 { 141 fregs->pc = fregs->lr; 142 } 143 144 int ftrace_regs_query_register_offset(const char *name); 145 146 int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec); 147 #define ftrace_init_nop ftrace_init_nop 148 149 void ftrace_graph_func(unsigned long ip, unsigned long parent_ip, 150 struct ftrace_ops *op, struct ftrace_regs *fregs); 151 #define ftrace_graph_func ftrace_graph_func 152 #endif 153 154 #define ftrace_return_address(n) return_address(n) 155 156 /* 157 * Because AArch32 mode does not share the same syscall table with AArch64, 158 * tracing compat syscalls may result in reporting bogus syscalls or even 159 * hang-up, so just do not trace them. 160 * See kernel/trace/trace_syscalls.c 161 * 162 * x86 code says: 163 * If the user really wants these, then they should use the 164 * raw syscall tracepoints with filtering. 165 */ 166 #define ARCH_TRACE_IGNORE_COMPAT_SYSCALLS 167 static inline bool arch_trace_is_compat_syscall(struct pt_regs *regs) 168 { 169 return is_compat_task(); 170 } 171 172 #define ARCH_HAS_SYSCALL_MATCH_SYM_NAME 173 174 static inline bool arch_syscall_match_sym_name(const char *sym, 175 const char *name) 176 { 177 /* 178 * Since all syscall functions have __arm64_ prefix, we must skip it. 179 * However, as we described above, we decided to ignore compat 180 * syscalls, so we don't care about __arm64_compat_ prefix here. 181 */ 182 return !strcmp(sym + 8, name); 183 } 184 #endif /* ifndef __ASSEMBLY__ */ 185 186 #endif /* __ASM_FTRACE_H */ 187