1 /* 2 * arch/arm64/kernel/ftrace.c 3 * 4 * Copyright (C) 2013 Linaro Limited 5 * Author: AKASHI Takahiro <takahiro.akashi@linaro.org> 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as 9 * published by the Free Software Foundation. 10 */ 11 12 #include <linux/ftrace.h> 13 #include <linux/module.h> 14 #include <linux/swab.h> 15 #include <linux/uaccess.h> 16 17 #include <asm/cacheflush.h> 18 #include <asm/debug-monitors.h> 19 #include <asm/ftrace.h> 20 #include <asm/insn.h> 21 22 #ifdef CONFIG_DYNAMIC_FTRACE 23 /* 24 * Replace a single instruction, which may be a branch or NOP. 25 * If @validate == true, a replaced instruction is checked against 'old'. 26 */ 27 static int ftrace_modify_code(unsigned long pc, u32 old, u32 new, 28 bool validate) 29 { 30 u32 replaced; 31 32 /* 33 * Note: 34 * We are paranoid about modifying text, as if a bug were to happen, it 35 * could cause us to read or write to someplace that could cause harm. 36 * Carefully read and modify the code with aarch64_insn_*() which uses 37 * probe_kernel_*(), and make sure what we read is what we expected it 38 * to be before modifying it. 39 */ 40 if (validate) { 41 if (aarch64_insn_read((void *)pc, &replaced)) 42 return -EFAULT; 43 44 if (replaced != old) 45 return -EINVAL; 46 } 47 if (aarch64_insn_patch_text_nosync((void *)pc, new)) 48 return -EPERM; 49 50 return 0; 51 } 52 53 /* 54 * Replace tracer function in ftrace_caller() 55 */ 56 int ftrace_update_ftrace_func(ftrace_func_t func) 57 { 58 unsigned long pc; 59 u32 new; 60 61 pc = (unsigned long)&ftrace_call; 62 new = aarch64_insn_gen_branch_imm(pc, (unsigned long)func, 63 AARCH64_INSN_BRANCH_LINK); 64 65 return ftrace_modify_code(pc, 0, new, false); 66 } 67 68 /* 69 * Turn on the call to ftrace_caller() in instrumented function 70 */ 71 int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) 72 { 73 unsigned long pc = rec->ip; 74 u32 old, new; 75 long offset = (long)pc - (long)addr; 76 77 if (offset < -SZ_128M || offset >= SZ_128M) { 78 #ifdef CONFIG_ARM64_MODULE_PLTS 79 unsigned long *trampoline; 80 struct module *mod; 81 82 /* 83 * On kernels that support module PLTs, the offset between the 84 * branch instruction and its target may legally exceed the 85 * range of an ordinary relative 'bl' opcode. In this case, we 86 * need to branch via a trampoline in the module. 87 * 88 * NOTE: __module_text_address() must be called with preemption 89 * disabled, but we can rely on ftrace_lock to ensure that 'mod' 90 * retains its validity throughout the remainder of this code. 91 */ 92 preempt_disable(); 93 mod = __module_text_address(pc); 94 preempt_enable(); 95 96 if (WARN_ON(!mod)) 97 return -EINVAL; 98 99 /* 100 * There is only one ftrace trampoline per module. For now, 101 * this is not a problem since on arm64, all dynamic ftrace 102 * invocations are routed via ftrace_caller(). This will need 103 * to be revisited if support for multiple ftrace entry points 104 * is added in the future, but for now, the pr_err() below 105 * deals with a theoretical issue only. 106 */ 107 trampoline = (unsigned long *)mod->arch.ftrace_trampoline; 108 if (trampoline[0] != addr) { 109 if (trampoline[0] != 0) { 110 pr_err("ftrace: far branches to multiple entry points unsupported inside a single module\n"); 111 return -EINVAL; 112 } 113 114 /* point the trampoline to our ftrace entry point */ 115 module_disable_ro(mod); 116 trampoline[0] = addr; 117 module_enable_ro(mod, true); 118 119 /* update trampoline before patching in the branch */ 120 smp_wmb(); 121 } 122 addr = (unsigned long)&trampoline[1]; 123 #else /* CONFIG_ARM64_MODULE_PLTS */ 124 return -EINVAL; 125 #endif /* CONFIG_ARM64_MODULE_PLTS */ 126 } 127 128 old = aarch64_insn_gen_nop(); 129 new = aarch64_insn_gen_branch_imm(pc, addr, AARCH64_INSN_BRANCH_LINK); 130 131 return ftrace_modify_code(pc, old, new, true); 132 } 133 134 /* 135 * Turn off the call to ftrace_caller() in instrumented function 136 */ 137 int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, 138 unsigned long addr) 139 { 140 unsigned long pc = rec->ip; 141 bool validate = true; 142 u32 old = 0, new; 143 long offset = (long)pc - (long)addr; 144 145 if (offset < -SZ_128M || offset >= SZ_128M) { 146 #ifdef CONFIG_ARM64_MODULE_PLTS 147 u32 replaced; 148 149 /* 150 * 'mod' is only set at module load time, but if we end up 151 * dealing with an out-of-range condition, we can assume it 152 * is due to a module being loaded far away from the kernel. 153 */ 154 if (!mod) { 155 preempt_disable(); 156 mod = __module_text_address(pc); 157 preempt_enable(); 158 159 if (WARN_ON(!mod)) 160 return -EINVAL; 161 } 162 163 /* 164 * The instruction we are about to patch may be a branch and 165 * link instruction that was redirected via a PLT entry. In 166 * this case, the normal validation will fail, but we can at 167 * least check that we are dealing with a branch and link 168 * instruction that points into the right module. 169 */ 170 if (aarch64_insn_read((void *)pc, &replaced)) 171 return -EFAULT; 172 173 if (!aarch64_insn_is_bl(replaced) || 174 !within_module(pc + aarch64_get_branch_offset(replaced), 175 mod)) 176 return -EINVAL; 177 178 validate = false; 179 #else /* CONFIG_ARM64_MODULE_PLTS */ 180 return -EINVAL; 181 #endif /* CONFIG_ARM64_MODULE_PLTS */ 182 } else { 183 old = aarch64_insn_gen_branch_imm(pc, addr, 184 AARCH64_INSN_BRANCH_LINK); 185 } 186 187 new = aarch64_insn_gen_nop(); 188 189 return ftrace_modify_code(pc, old, new, validate); 190 } 191 192 void arch_ftrace_update_code(int command) 193 { 194 ftrace_modify_all_code(command); 195 } 196 197 int __init ftrace_dyn_arch_init(void) 198 { 199 return 0; 200 } 201 #endif /* CONFIG_DYNAMIC_FTRACE */ 202 203 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 204 /* 205 * function_graph tracer expects ftrace_return_to_handler() to be called 206 * on the way back to parent. For this purpose, this function is called 207 * in _mcount() or ftrace_caller() to replace return address (*parent) on 208 * the call stack to return_to_handler. 209 * 210 * Note that @frame_pointer is used only for sanity check later. 211 */ 212 void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr, 213 unsigned long frame_pointer) 214 { 215 unsigned long return_hooker = (unsigned long)&return_to_handler; 216 unsigned long old; 217 struct ftrace_graph_ent trace; 218 int err; 219 220 if (unlikely(atomic_read(¤t->tracing_graph_pause))) 221 return; 222 223 /* 224 * Note: 225 * No protection against faulting at *parent, which may be seen 226 * on other archs. It's unlikely on AArch64. 227 */ 228 old = *parent; 229 230 trace.func = self_addr; 231 trace.depth = current->curr_ret_stack + 1; 232 233 /* Only trace if the calling function expects to */ 234 if (!ftrace_graph_entry(&trace)) 235 return; 236 237 err = ftrace_push_return_trace(old, self_addr, &trace.depth, 238 frame_pointer, NULL); 239 if (err == -EBUSY) 240 return; 241 else 242 *parent = return_hooker; 243 } 244 245 #ifdef CONFIG_DYNAMIC_FTRACE 246 /* 247 * Turn on/off the call to ftrace_graph_caller() in ftrace_caller() 248 * depending on @enable. 249 */ 250 static int ftrace_modify_graph_caller(bool enable) 251 { 252 unsigned long pc = (unsigned long)&ftrace_graph_call; 253 u32 branch, nop; 254 255 branch = aarch64_insn_gen_branch_imm(pc, 256 (unsigned long)ftrace_graph_caller, 257 AARCH64_INSN_BRANCH_NOLINK); 258 nop = aarch64_insn_gen_nop(); 259 260 if (enable) 261 return ftrace_modify_code(pc, nop, branch, true); 262 else 263 return ftrace_modify_code(pc, branch, nop, true); 264 } 265 266 int ftrace_enable_ftrace_graph_caller(void) 267 { 268 return ftrace_modify_graph_caller(true); 269 } 270 271 int ftrace_disable_ftrace_graph_caller(void) 272 { 273 return ftrace_modify_graph_caller(false); 274 } 275 #endif /* CONFIG_DYNAMIC_FTRACE */ 276 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 277