1 /* 2 * arch/arm64/kernel/ftrace.c 3 * 4 * Copyright (C) 2013 Linaro Limited 5 * Author: AKASHI Takahiro <takahiro.akashi@linaro.org> 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as 9 * published by the Free Software Foundation. 10 */ 11 12 #include <linux/ftrace.h> 13 #include <linux/module.h> 14 #include <linux/swab.h> 15 #include <linux/uaccess.h> 16 17 #include <asm/cacheflush.h> 18 #include <asm/debug-monitors.h> 19 #include <asm/ftrace.h> 20 #include <asm/insn.h> 21 22 #ifdef CONFIG_DYNAMIC_FTRACE 23 /* 24 * Replace a single instruction, which may be a branch or NOP. 25 * If @validate == true, a replaced instruction is checked against 'old'. 26 */ 27 static int ftrace_modify_code(unsigned long pc, u32 old, u32 new, 28 bool validate) 29 { 30 u32 replaced; 31 32 /* 33 * Note: 34 * We are paranoid about modifying text, as if a bug were to happen, it 35 * could cause us to read or write to someplace that could cause harm. 36 * Carefully read and modify the code with aarch64_insn_*() which uses 37 * probe_kernel_*(), and make sure what we read is what we expected it 38 * to be before modifying it. 39 */ 40 if (validate) { 41 if (aarch64_insn_read((void *)pc, &replaced)) 42 return -EFAULT; 43 44 if (replaced != old) 45 return -EINVAL; 46 } 47 if (aarch64_insn_patch_text_nosync((void *)pc, new)) 48 return -EPERM; 49 50 return 0; 51 } 52 53 /* 54 * Replace tracer function in ftrace_caller() 55 */ 56 int ftrace_update_ftrace_func(ftrace_func_t func) 57 { 58 unsigned long pc; 59 u32 new; 60 61 pc = (unsigned long)&ftrace_call; 62 new = aarch64_insn_gen_branch_imm(pc, (unsigned long)func, 63 AARCH64_INSN_BRANCH_LINK); 64 65 return ftrace_modify_code(pc, 0, new, false); 66 } 67 68 /* 69 * Turn on the call to ftrace_caller() in instrumented function 70 */ 71 int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) 72 { 73 unsigned long pc = rec->ip; 74 u32 old, new; 75 long offset = (long)pc - (long)addr; 76 77 if (offset < -SZ_128M || offset >= SZ_128M) { 78 #ifdef CONFIG_ARM64_MODULE_PLTS 79 struct plt_entry trampoline; 80 struct module *mod; 81 82 /* 83 * On kernels that support module PLTs, the offset between the 84 * branch instruction and its target may legally exceed the 85 * range of an ordinary relative 'bl' opcode. In this case, we 86 * need to branch via a trampoline in the module. 87 * 88 * NOTE: __module_text_address() must be called with preemption 89 * disabled, but we can rely on ftrace_lock to ensure that 'mod' 90 * retains its validity throughout the remainder of this code. 91 */ 92 preempt_disable(); 93 mod = __module_text_address(pc); 94 preempt_enable(); 95 96 if (WARN_ON(!mod)) 97 return -EINVAL; 98 99 /* 100 * There is only one ftrace trampoline per module. For now, 101 * this is not a problem since on arm64, all dynamic ftrace 102 * invocations are routed via ftrace_caller(). This will need 103 * to be revisited if support for multiple ftrace entry points 104 * is added in the future, but for now, the pr_err() below 105 * deals with a theoretical issue only. 106 */ 107 trampoline = get_plt_entry(addr, mod->arch.ftrace_trampoline); 108 if (!plt_entries_equal(mod->arch.ftrace_trampoline, 109 &trampoline)) { 110 if (!plt_entries_equal(mod->arch.ftrace_trampoline, 111 &(struct plt_entry){})) { 112 pr_err("ftrace: far branches to multiple entry points unsupported inside a single module\n"); 113 return -EINVAL; 114 } 115 116 /* point the trampoline to our ftrace entry point */ 117 module_disable_ro(mod); 118 *mod->arch.ftrace_trampoline = trampoline; 119 module_enable_ro(mod, true); 120 121 /* update trampoline before patching in the branch */ 122 smp_wmb(); 123 } 124 addr = (unsigned long)(void *)mod->arch.ftrace_trampoline; 125 #else /* CONFIG_ARM64_MODULE_PLTS */ 126 return -EINVAL; 127 #endif /* CONFIG_ARM64_MODULE_PLTS */ 128 } 129 130 old = aarch64_insn_gen_nop(); 131 new = aarch64_insn_gen_branch_imm(pc, addr, AARCH64_INSN_BRANCH_LINK); 132 133 return ftrace_modify_code(pc, old, new, true); 134 } 135 136 /* 137 * Turn off the call to ftrace_caller() in instrumented function 138 */ 139 int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, 140 unsigned long addr) 141 { 142 unsigned long pc = rec->ip; 143 bool validate = true; 144 u32 old = 0, new; 145 long offset = (long)pc - (long)addr; 146 147 if (offset < -SZ_128M || offset >= SZ_128M) { 148 #ifdef CONFIG_ARM64_MODULE_PLTS 149 u32 replaced; 150 151 /* 152 * 'mod' is only set at module load time, but if we end up 153 * dealing with an out-of-range condition, we can assume it 154 * is due to a module being loaded far away from the kernel. 155 */ 156 if (!mod) { 157 preempt_disable(); 158 mod = __module_text_address(pc); 159 preempt_enable(); 160 161 if (WARN_ON(!mod)) 162 return -EINVAL; 163 } 164 165 /* 166 * The instruction we are about to patch may be a branch and 167 * link instruction that was redirected via a PLT entry. In 168 * this case, the normal validation will fail, but we can at 169 * least check that we are dealing with a branch and link 170 * instruction that points into the right module. 171 */ 172 if (aarch64_insn_read((void *)pc, &replaced)) 173 return -EFAULT; 174 175 if (!aarch64_insn_is_bl(replaced) || 176 !within_module(pc + aarch64_get_branch_offset(replaced), 177 mod)) 178 return -EINVAL; 179 180 validate = false; 181 #else /* CONFIG_ARM64_MODULE_PLTS */ 182 return -EINVAL; 183 #endif /* CONFIG_ARM64_MODULE_PLTS */ 184 } else { 185 old = aarch64_insn_gen_branch_imm(pc, addr, 186 AARCH64_INSN_BRANCH_LINK); 187 } 188 189 new = aarch64_insn_gen_nop(); 190 191 return ftrace_modify_code(pc, old, new, validate); 192 } 193 194 void arch_ftrace_update_code(int command) 195 { 196 command |= FTRACE_MAY_SLEEP; 197 ftrace_modify_all_code(command); 198 } 199 200 int __init ftrace_dyn_arch_init(void) 201 { 202 return 0; 203 } 204 #endif /* CONFIG_DYNAMIC_FTRACE */ 205 206 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 207 /* 208 * function_graph tracer expects ftrace_return_to_handler() to be called 209 * on the way back to parent. For this purpose, this function is called 210 * in _mcount() or ftrace_caller() to replace return address (*parent) on 211 * the call stack to return_to_handler. 212 * 213 * Note that @frame_pointer is used only for sanity check later. 214 */ 215 void prepare_ftrace_return(unsigned long self_addr, unsigned long *parent, 216 unsigned long frame_pointer) 217 { 218 unsigned long return_hooker = (unsigned long)&return_to_handler; 219 unsigned long old; 220 221 if (unlikely(atomic_read(¤t->tracing_graph_pause))) 222 return; 223 224 /* 225 * Note: 226 * No protection against faulting at *parent, which may be seen 227 * on other archs. It's unlikely on AArch64. 228 */ 229 old = *parent; 230 231 if (!function_graph_enter(old, self_addr, frame_pointer, NULL)) 232 *parent = return_hooker; 233 } 234 235 #ifdef CONFIG_DYNAMIC_FTRACE 236 /* 237 * Turn on/off the call to ftrace_graph_caller() in ftrace_caller() 238 * depending on @enable. 239 */ 240 static int ftrace_modify_graph_caller(bool enable) 241 { 242 unsigned long pc = (unsigned long)&ftrace_graph_call; 243 u32 branch, nop; 244 245 branch = aarch64_insn_gen_branch_imm(pc, 246 (unsigned long)ftrace_graph_caller, 247 AARCH64_INSN_BRANCH_NOLINK); 248 nop = aarch64_insn_gen_nop(); 249 250 if (enable) 251 return ftrace_modify_code(pc, nop, branch, true); 252 else 253 return ftrace_modify_code(pc, branch, nop, true); 254 } 255 256 int ftrace_enable_ftrace_graph_caller(void) 257 { 258 return ftrace_modify_graph_caller(true); 259 } 260 261 int ftrace_disable_ftrace_graph_caller(void) 262 { 263 return ftrace_modify_graph_caller(false); 264 } 265 #endif /* CONFIG_DYNAMIC_FTRACE */ 266 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 267