1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * arch/arm64/kernel/ftrace.c 4 * 5 * Copyright (C) 2013 Linaro Limited 6 * Author: AKASHI Takahiro <takahiro.akashi@linaro.org> 7 */ 8 9 #include <linux/ftrace.h> 10 #include <linux/module.h> 11 #include <linux/swab.h> 12 #include <linux/uaccess.h> 13 14 #include <asm/cacheflush.h> 15 #include <asm/debug-monitors.h> 16 #include <asm/ftrace.h> 17 #include <asm/insn.h> 18 19 #ifdef CONFIG_DYNAMIC_FTRACE 20 /* 21 * Replace a single instruction, which may be a branch or NOP. 22 * If @validate == true, a replaced instruction is checked against 'old'. 23 */ 24 static int ftrace_modify_code(unsigned long pc, u32 old, u32 new, 25 bool validate) 26 { 27 u32 replaced; 28 29 /* 30 * Note: 31 * We are paranoid about modifying text, as if a bug were to happen, it 32 * could cause us to read or write to someplace that could cause harm. 33 * Carefully read and modify the code with aarch64_insn_*() which uses 34 * probe_kernel_*(), and make sure what we read is what we expected it 35 * to be before modifying it. 36 */ 37 if (validate) { 38 if (aarch64_insn_read((void *)pc, &replaced)) 39 return -EFAULT; 40 41 if (replaced != old) 42 return -EINVAL; 43 } 44 if (aarch64_insn_patch_text_nosync((void *)pc, new)) 45 return -EPERM; 46 47 return 0; 48 } 49 50 /* 51 * Replace tracer function in ftrace_caller() 52 */ 53 int ftrace_update_ftrace_func(ftrace_func_t func) 54 { 55 unsigned long pc; 56 u32 new; 57 58 pc = (unsigned long)&ftrace_call; 59 new = aarch64_insn_gen_branch_imm(pc, (unsigned long)func, 60 AARCH64_INSN_BRANCH_LINK); 61 62 return ftrace_modify_code(pc, 0, new, false); 63 } 64 65 static struct plt_entry *get_ftrace_plt(struct module *mod, unsigned long addr) 66 { 67 #ifdef CONFIG_ARM64_MODULE_PLTS 68 struct plt_entry *plt = mod->arch.ftrace_trampolines; 69 70 if (addr == FTRACE_ADDR) 71 return &plt[FTRACE_PLT_IDX]; 72 if (addr == FTRACE_REGS_ADDR && IS_ENABLED(CONFIG_FTRACE_WITH_REGS)) 73 return &plt[FTRACE_REGS_PLT_IDX]; 74 #endif 75 return NULL; 76 } 77 78 /* 79 * Turn on the call to ftrace_caller() in instrumented function 80 */ 81 int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) 82 { 83 unsigned long pc = rec->ip; 84 u32 old, new; 85 long offset = (long)pc - (long)addr; 86 87 if (offset < -SZ_128M || offset >= SZ_128M) { 88 struct module *mod; 89 struct plt_entry *plt; 90 91 if (!IS_ENABLED(CONFIG_ARM64_MODULE_PLTS)) 92 return -EINVAL; 93 94 /* 95 * On kernels that support module PLTs, the offset between the 96 * branch instruction and its target may legally exceed the 97 * range of an ordinary relative 'bl' opcode. In this case, we 98 * need to branch via a trampoline in the module. 99 * 100 * NOTE: __module_text_address() must be called with preemption 101 * disabled, but we can rely on ftrace_lock to ensure that 'mod' 102 * retains its validity throughout the remainder of this code. 103 */ 104 preempt_disable(); 105 mod = __module_text_address(pc); 106 preempt_enable(); 107 108 if (WARN_ON(!mod)) 109 return -EINVAL; 110 111 plt = get_ftrace_plt(mod, addr); 112 if (!plt) { 113 pr_err("ftrace: no module PLT for %ps\n", (void *)addr); 114 return -EINVAL; 115 } 116 117 addr = (unsigned long)plt; 118 } 119 120 old = aarch64_insn_gen_nop(); 121 new = aarch64_insn_gen_branch_imm(pc, addr, AARCH64_INSN_BRANCH_LINK); 122 123 return ftrace_modify_code(pc, old, new, true); 124 } 125 126 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS 127 int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, 128 unsigned long addr) 129 { 130 unsigned long pc = rec->ip; 131 u32 old, new; 132 133 old = aarch64_insn_gen_branch_imm(pc, old_addr, 134 AARCH64_INSN_BRANCH_LINK); 135 new = aarch64_insn_gen_branch_imm(pc, addr, AARCH64_INSN_BRANCH_LINK); 136 137 return ftrace_modify_code(pc, old, new, true); 138 } 139 140 /* 141 * The compiler has inserted two NOPs before the regular function prologue. 142 * All instrumented functions follow the AAPCS, so x0-x8 and x19-x30 are live, 143 * and x9-x18 are free for our use. 144 * 145 * At runtime we want to be able to swing a single NOP <-> BL to enable or 146 * disable the ftrace call. The BL requires us to save the original LR value, 147 * so here we insert a <MOV X9, LR> over the first NOP so the instructions 148 * before the regular prologue are: 149 * 150 * | Compiled | Disabled | Enabled | 151 * +----------+------------+------------+ 152 * | NOP | MOV X9, LR | MOV X9, LR | 153 * | NOP | NOP | BL <entry> | 154 * 155 * The LR value will be recovered by ftrace_regs_entry, and restored into LR 156 * before returning to the regular function prologue. When a function is not 157 * being traced, the MOV is not harmful given x9 is not live per the AAPCS. 158 * 159 * Note: ftrace_process_locs() has pre-adjusted rec->ip to be the address of 160 * the BL. 161 */ 162 int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec) 163 { 164 unsigned long pc = rec->ip - AARCH64_INSN_SIZE; 165 u32 old, new; 166 167 old = aarch64_insn_gen_nop(); 168 new = aarch64_insn_gen_move_reg(AARCH64_INSN_REG_9, 169 AARCH64_INSN_REG_LR, 170 AARCH64_INSN_VARIANT_64BIT); 171 return ftrace_modify_code(pc, old, new, true); 172 } 173 #endif 174 175 /* 176 * Turn off the call to ftrace_caller() in instrumented function 177 */ 178 int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, 179 unsigned long addr) 180 { 181 unsigned long pc = rec->ip; 182 bool validate = true; 183 u32 old = 0, new; 184 long offset = (long)pc - (long)addr; 185 186 if (offset < -SZ_128M || offset >= SZ_128M) { 187 u32 replaced; 188 189 if (!IS_ENABLED(CONFIG_ARM64_MODULE_PLTS)) 190 return -EINVAL; 191 192 /* 193 * 'mod' is only set at module load time, but if we end up 194 * dealing with an out-of-range condition, we can assume it 195 * is due to a module being loaded far away from the kernel. 196 */ 197 if (!mod) { 198 preempt_disable(); 199 mod = __module_text_address(pc); 200 preempt_enable(); 201 202 if (WARN_ON(!mod)) 203 return -EINVAL; 204 } 205 206 /* 207 * The instruction we are about to patch may be a branch and 208 * link instruction that was redirected via a PLT entry. In 209 * this case, the normal validation will fail, but we can at 210 * least check that we are dealing with a branch and link 211 * instruction that points into the right module. 212 */ 213 if (aarch64_insn_read((void *)pc, &replaced)) 214 return -EFAULT; 215 216 if (!aarch64_insn_is_bl(replaced) || 217 !within_module(pc + aarch64_get_branch_offset(replaced), 218 mod)) 219 return -EINVAL; 220 221 validate = false; 222 } else { 223 old = aarch64_insn_gen_branch_imm(pc, addr, 224 AARCH64_INSN_BRANCH_LINK); 225 } 226 227 new = aarch64_insn_gen_nop(); 228 229 return ftrace_modify_code(pc, old, new, validate); 230 } 231 232 void arch_ftrace_update_code(int command) 233 { 234 command |= FTRACE_MAY_SLEEP; 235 ftrace_modify_all_code(command); 236 } 237 238 int __init ftrace_dyn_arch_init(void) 239 { 240 return 0; 241 } 242 #endif /* CONFIG_DYNAMIC_FTRACE */ 243 244 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 245 /* 246 * function_graph tracer expects ftrace_return_to_handler() to be called 247 * on the way back to parent. For this purpose, this function is called 248 * in _mcount() or ftrace_caller() to replace return address (*parent) on 249 * the call stack to return_to_handler. 250 * 251 * Note that @frame_pointer is used only for sanity check later. 252 */ 253 void prepare_ftrace_return(unsigned long self_addr, unsigned long *parent, 254 unsigned long frame_pointer) 255 { 256 unsigned long return_hooker = (unsigned long)&return_to_handler; 257 unsigned long old; 258 259 if (unlikely(atomic_read(¤t->tracing_graph_pause))) 260 return; 261 262 /* 263 * Note: 264 * No protection against faulting at *parent, which may be seen 265 * on other archs. It's unlikely on AArch64. 266 */ 267 old = *parent; 268 269 if (!function_graph_enter(old, self_addr, frame_pointer, NULL)) 270 *parent = return_hooker; 271 } 272 273 #ifdef CONFIG_DYNAMIC_FTRACE 274 /* 275 * Turn on/off the call to ftrace_graph_caller() in ftrace_caller() 276 * depending on @enable. 277 */ 278 static int ftrace_modify_graph_caller(bool enable) 279 { 280 unsigned long pc = (unsigned long)&ftrace_graph_call; 281 u32 branch, nop; 282 283 branch = aarch64_insn_gen_branch_imm(pc, 284 (unsigned long)ftrace_graph_caller, 285 AARCH64_INSN_BRANCH_NOLINK); 286 nop = aarch64_insn_gen_nop(); 287 288 if (enable) 289 return ftrace_modify_code(pc, nop, branch, true); 290 else 291 return ftrace_modify_code(pc, branch, nop, true); 292 } 293 294 int ftrace_enable_ftrace_graph_caller(void) 295 { 296 return ftrace_modify_graph_caller(true); 297 } 298 299 int ftrace_disable_ftrace_graph_caller(void) 300 { 301 return ftrace_modify_graph_caller(false); 302 } 303 #endif /* CONFIG_DYNAMIC_FTRACE */ 304 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 305