1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* 3 * arch/arm64/include/asm/ftrace.h 4 * 5 * Copyright (C) 2013 Linaro Limited 6 * Author: AKASHI Takahiro <takahiro.akashi@linaro.org> 7 */ 8 #ifndef __ASM_FTRACE_H 9 #define __ASM_FTRACE_H 10 11 #include <asm/insn.h> 12 13 #define HAVE_FUNCTION_GRAPH_FP_TEST 14 15 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS 16 #define ARCH_SUPPORTS_FTRACE_OPS 1 17 #else 18 #define MCOUNT_ADDR ((unsigned long)_mcount) 19 #endif 20 21 /* The BL at the callsite's adjusted rec->ip */ 22 #define MCOUNT_INSN_SIZE AARCH64_INSN_SIZE 23 24 #define FTRACE_PLT_IDX 0 25 #define FTRACE_REGS_PLT_IDX 1 26 #define NR_FTRACE_PLTS 2 27 28 /* 29 * Currently, gcc tends to save the link register after the local variables 30 * on the stack. This causes the max stack tracer to report the function 31 * frame sizes for the wrong functions. By defining 32 * ARCH_FTRACE_SHIFT_STACK_TRACER, it will tell the stack tracer to expect 33 * to find the return address on the stack after the local variables have 34 * been set up. 35 * 36 * Note, this may change in the future, and we will need to deal with that 37 * if it were to happen. 38 */ 39 #define ARCH_FTRACE_SHIFT_STACK_TRACER 1 40 41 #ifndef __ASSEMBLY__ 42 #include <linux/compat.h> 43 44 extern void _mcount(unsigned long); 45 extern void *return_address(unsigned int); 46 47 struct dyn_arch_ftrace { 48 /* No extra data needed for arm64 */ 49 }; 50 51 extern unsigned long ftrace_graph_call; 52 53 extern void return_to_handler(void); 54 55 static inline unsigned long ftrace_call_adjust(unsigned long addr) 56 { 57 /* 58 * Adjust addr to point at the BL in the callsite. 59 * See ftrace_init_nop() for the callsite sequence. 60 */ 61 if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS)) 62 return addr + AARCH64_INSN_SIZE; 63 /* 64 * addr is the address of the mcount call instruction. 65 * recordmcount does the necessary offset calculation. 66 */ 67 return addr; 68 } 69 70 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS 71 struct dyn_ftrace; 72 int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec); 73 #define ftrace_init_nop ftrace_init_nop 74 #endif 75 76 #define ftrace_return_address(n) return_address(n) 77 78 /* 79 * Because AArch32 mode does not share the same syscall table with AArch64, 80 * tracing compat syscalls may result in reporting bogus syscalls or even 81 * hang-up, so just do not trace them. 82 * See kernel/trace/trace_syscalls.c 83 * 84 * x86 code says: 85 * If the user really wants these, then they should use the 86 * raw syscall tracepoints with filtering. 87 */ 88 #define ARCH_TRACE_IGNORE_COMPAT_SYSCALLS 89 static inline bool arch_trace_is_compat_syscall(struct pt_regs *regs) 90 { 91 return is_compat_task(); 92 } 93 94 #define ARCH_HAS_SYSCALL_MATCH_SYM_NAME 95 96 static inline bool arch_syscall_match_sym_name(const char *sym, 97 const char *name) 98 { 99 /* 100 * Since all syscall functions have __arm64_ prefix, we must skip it. 101 * However, as we described above, we decided to ignore compat 102 * syscalls, so we don't care about __arm64_compat_ prefix here. 103 */ 104 return !strcmp(sym + 8, name); 105 } 106 #endif /* ifndef __ASSEMBLY__ */ 107 108 #endif /* __ASM_FTRACE_H */ 109