1 /* 2 * Code for replacing ftrace calls with jumps. 3 * 4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com> 5 * Copyright (C) 2009, 2010 DSLab, Lanzhou University, China 6 * Author: Wu Zhangjin <wuzhangjin@gmail.com> 7 * 8 * Thanks goes to Steven Rostedt for writing the original x86 version. 9 */ 10 11 #include <linux/uaccess.h> 12 #include <linux/init.h> 13 #include <linux/ftrace.h> 14 #include <linux/syscalls.h> 15 16 #include <asm/asm.h> 17 #include <asm/asm-offsets.h> 18 #include <asm/cacheflush.h> 19 #include <asm/syscall.h> 20 #include <asm/uasm.h> 21 #include <asm/unistd.h> 22 23 #include <asm-generic/sections.h> 24 25 #if defined(KBUILD_MCOUNT_RA_ADDRESS) && defined(CONFIG_32BIT) 26 #define MCOUNT_OFFSET_INSNS 5 27 #else 28 #define MCOUNT_OFFSET_INSNS 4 29 #endif 30 31 #ifdef CONFIG_DYNAMIC_FTRACE 32 33 /* Arch override because MIPS doesn't need to run this from stop_machine() */ 34 void arch_ftrace_update_code(int command) 35 { 36 ftrace_modify_all_code(command); 37 } 38 39 #endif 40 41 /* 42 * Check if the address is in kernel space 43 * 44 * Clone core_kernel_text() from kernel/extable.c, but doesn't call 45 * init_kernel_text() for Ftrace doesn't trace functions in init sections. 46 */ 47 static inline int in_kernel_space(unsigned long ip) 48 { 49 if (ip >= (unsigned long)_stext && 50 ip <= (unsigned long)_etext) 51 return 1; 52 return 0; 53 } 54 55 #ifdef CONFIG_DYNAMIC_FTRACE 56 57 #define JAL 0x0c000000 /* jump & link: ip --> ra, jump to target */ 58 #define ADDR_MASK 0x03ffffff /* op_code|addr : 31...26|25 ....0 */ 59 #define JUMP_RANGE_MASK ((1UL << 28) - 1) 60 61 #define INSN_NOP 0x00000000 /* nop */ 62 #define INSN_JAL(addr) \ 63 ((unsigned int)(JAL | (((addr) >> 2) & ADDR_MASK))) 64 65 static unsigned int insn_jal_ftrace_caller __read_mostly; 66 static unsigned int insn_lui_v1_hi16_mcount __read_mostly; 67 static unsigned int insn_j_ftrace_graph_caller __maybe_unused __read_mostly; 68 69 static inline void ftrace_dyn_arch_init_insns(void) 70 { 71 u32 *buf; 72 unsigned int v1; 73 74 /* lui v1, hi16_mcount */ 75 v1 = 3; 76 buf = (u32 *)&insn_lui_v1_hi16_mcount; 77 UASM_i_LA_mostly(&buf, v1, MCOUNT_ADDR); 78 79 /* jal (ftrace_caller + 8), jump over the first two instruction */ 80 buf = (u32 *)&insn_jal_ftrace_caller; 81 uasm_i_jal(&buf, (FTRACE_ADDR + 8) & JUMP_RANGE_MASK); 82 83 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 84 /* j ftrace_graph_caller */ 85 buf = (u32 *)&insn_j_ftrace_graph_caller; 86 uasm_i_j(&buf, (unsigned long)ftrace_graph_caller & JUMP_RANGE_MASK); 87 #endif 88 } 89 90 static int ftrace_modify_code(unsigned long ip, unsigned int new_code) 91 { 92 int faulted; 93 94 /* *(unsigned int *)ip = new_code; */ 95 safe_store_code(new_code, ip, faulted); 96 97 if (unlikely(faulted)) 98 return -EFAULT; 99 100 flush_icache_range(ip, ip + 8); 101 102 return 0; 103 } 104 105 #ifndef CONFIG_64BIT 106 static int ftrace_modify_code_2(unsigned long ip, unsigned int new_code1, 107 unsigned int new_code2) 108 { 109 int faulted; 110 111 safe_store_code(new_code1, ip, faulted); 112 if (unlikely(faulted)) 113 return -EFAULT; 114 ip += 4; 115 safe_store_code(new_code2, ip, faulted); 116 if (unlikely(faulted)) 117 return -EFAULT; 118 flush_icache_range(ip, ip + 8); /* original ip + 12 */ 119 return 0; 120 } 121 #endif 122 123 /* 124 * The details about the calling site of mcount on MIPS 125 * 126 * 1. For kernel: 127 * 128 * move at, ra 129 * jal _mcount --> nop 130 * 131 * 2. For modules: 132 * 133 * 2.1 For KBUILD_MCOUNT_RA_ADDRESS and CONFIG_32BIT 134 * 135 * lui v1, hi_16bit_of_mcount --> b 1f (0x10000005) 136 * addiu v1, v1, low_16bit_of_mcount 137 * move at, ra 138 * move $12, ra_address 139 * jalr v1 140 * sub sp, sp, 8 141 * 1: offset = 5 instructions 142 * 2.2 For the Other situations 143 * 144 * lui v1, hi_16bit_of_mcount --> b 1f (0x10000004) 145 * addiu v1, v1, low_16bit_of_mcount 146 * move at, ra 147 * jalr v1 148 * nop | move $12, ra_address | sub sp, sp, 8 149 * 1: offset = 4 instructions 150 */ 151 152 #define INSN_B_1F (0x10000000 | MCOUNT_OFFSET_INSNS) 153 154 int ftrace_make_nop(struct module *mod, 155 struct dyn_ftrace *rec, unsigned long addr) 156 { 157 unsigned int new; 158 unsigned long ip = rec->ip; 159 160 /* 161 * If ip is in kernel space, no long call, otherwise, long call is 162 * needed. 163 */ 164 new = in_kernel_space(ip) ? INSN_NOP : INSN_B_1F; 165 #ifdef CONFIG_64BIT 166 return ftrace_modify_code(ip, new); 167 #else 168 /* 169 * On 32 bit MIPS platforms, gcc adds a stack adjust 170 * instruction in the delay slot after the branch to 171 * mcount and expects mcount to restore the sp on return. 172 * This is based on a legacy API and does nothing but 173 * waste instructions so it's being removed at runtime. 174 */ 175 return ftrace_modify_code_2(ip, new, INSN_NOP); 176 #endif 177 } 178 179 int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) 180 { 181 unsigned int new; 182 unsigned long ip = rec->ip; 183 184 new = in_kernel_space(ip) ? insn_jal_ftrace_caller : 185 insn_lui_v1_hi16_mcount; 186 187 return ftrace_modify_code(ip, new); 188 } 189 190 #define FTRACE_CALL_IP ((unsigned long)(&ftrace_call)) 191 192 int ftrace_update_ftrace_func(ftrace_func_t func) 193 { 194 unsigned int new; 195 196 new = INSN_JAL((unsigned long)func); 197 198 return ftrace_modify_code(FTRACE_CALL_IP, new); 199 } 200 201 int __init ftrace_dyn_arch_init(void *data) 202 { 203 /* Encode the instructions when booting */ 204 ftrace_dyn_arch_init_insns(); 205 206 /* Remove "b ftrace_stub" to ensure ftrace_caller() is executed */ 207 ftrace_modify_code(MCOUNT_ADDR, INSN_NOP); 208 209 return 0; 210 } 211 #endif /* CONFIG_DYNAMIC_FTRACE */ 212 213 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 214 215 #ifdef CONFIG_DYNAMIC_FTRACE 216 217 extern void ftrace_graph_call(void); 218 #define FTRACE_GRAPH_CALL_IP ((unsigned long)(&ftrace_graph_call)) 219 220 int ftrace_enable_ftrace_graph_caller(void) 221 { 222 return ftrace_modify_code(FTRACE_GRAPH_CALL_IP, 223 insn_j_ftrace_graph_caller); 224 } 225 226 int ftrace_disable_ftrace_graph_caller(void) 227 { 228 return ftrace_modify_code(FTRACE_GRAPH_CALL_IP, INSN_NOP); 229 } 230 231 #endif /* CONFIG_DYNAMIC_FTRACE */ 232 233 #ifndef KBUILD_MCOUNT_RA_ADDRESS 234 235 #define S_RA_SP (0xafbf << 16) /* s{d,w} ra, offset(sp) */ 236 #define S_R_SP (0xafb0 << 16) /* s{d,w} R, offset(sp) */ 237 #define OFFSET_MASK 0xffff /* stack offset range: 0 ~ PT_SIZE */ 238 239 unsigned long ftrace_get_parent_ra_addr(unsigned long self_ra, unsigned long 240 old_parent_ra, unsigned long parent_ra_addr, unsigned long fp) 241 { 242 unsigned long sp, ip, tmp; 243 unsigned int code; 244 int faulted; 245 246 /* 247 * For module, move the ip from the return address after the 248 * instruction "lui v1, hi_16bit_of_mcount"(offset is 24), but for 249 * kernel, move after the instruction "move ra, at"(offset is 16) 250 */ 251 ip = self_ra - (in_kernel_space(self_ra) ? 16 : 24); 252 253 /* 254 * search the text until finding the non-store instruction or "s{d,w} 255 * ra, offset(sp)" instruction 256 */ 257 do { 258 /* get the code at "ip": code = *(unsigned int *)ip; */ 259 safe_load_code(code, ip, faulted); 260 261 if (unlikely(faulted)) 262 return 0; 263 /* 264 * If we hit the non-store instruction before finding where the 265 * ra is stored, then this is a leaf function and it does not 266 * store the ra on the stack 267 */ 268 if ((code & S_R_SP) != S_R_SP) 269 return parent_ra_addr; 270 271 /* Move to the next instruction */ 272 ip -= 4; 273 } while ((code & S_RA_SP) != S_RA_SP); 274 275 sp = fp + (code & OFFSET_MASK); 276 277 /* tmp = *(unsigned long *)sp; */ 278 safe_load_stack(tmp, sp, faulted); 279 if (unlikely(faulted)) 280 return 0; 281 282 if (tmp == old_parent_ra) 283 return sp; 284 return 0; 285 } 286 287 #endif /* !KBUILD_MCOUNT_RA_ADDRESS */ 288 289 /* 290 * Hook the return address and push it in the stack of return addrs 291 * in current thread info. 292 */ 293 void prepare_ftrace_return(unsigned long *parent_ra_addr, unsigned long self_ra, 294 unsigned long fp) 295 { 296 unsigned long old_parent_ra; 297 struct ftrace_graph_ent trace; 298 unsigned long return_hooker = (unsigned long) 299 &return_to_handler; 300 int faulted, insns; 301 302 if (unlikely(atomic_read(¤t->tracing_graph_pause))) 303 return; 304 305 /* 306 * "parent_ra_addr" is the stack address saved the return address of 307 * the caller of _mcount. 308 * 309 * if the gcc < 4.5, a leaf function does not save the return address 310 * in the stack address, so, we "emulate" one in _mcount's stack space, 311 * and hijack it directly, but for a non-leaf function, it save the 312 * return address to the its own stack space, we can not hijack it 313 * directly, but need to find the real stack address, 314 * ftrace_get_parent_addr() does it! 315 * 316 * if gcc>= 4.5, with the new -mmcount-ra-address option, for a 317 * non-leaf function, the location of the return address will be saved 318 * to $12 for us, and for a leaf function, only put a zero into $12. we 319 * do it in ftrace_graph_caller of mcount.S. 320 */ 321 322 /* old_parent_ra = *parent_ra_addr; */ 323 safe_load_stack(old_parent_ra, parent_ra_addr, faulted); 324 if (unlikely(faulted)) 325 goto out; 326 #ifndef KBUILD_MCOUNT_RA_ADDRESS 327 parent_ra_addr = (unsigned long *)ftrace_get_parent_ra_addr(self_ra, 328 old_parent_ra, (unsigned long)parent_ra_addr, fp); 329 /* 330 * If fails when getting the stack address of the non-leaf function's 331 * ra, stop function graph tracer and return 332 */ 333 if (parent_ra_addr == 0) 334 goto out; 335 #endif 336 /* *parent_ra_addr = return_hooker; */ 337 safe_store_stack(return_hooker, parent_ra_addr, faulted); 338 if (unlikely(faulted)) 339 goto out; 340 341 if (ftrace_push_return_trace(old_parent_ra, self_ra, &trace.depth, fp) 342 == -EBUSY) { 343 *parent_ra_addr = old_parent_ra; 344 return; 345 } 346 347 /* 348 * Get the recorded ip of the current mcount calling site in the 349 * __mcount_loc section, which will be used to filter the function 350 * entries configured through the tracing/set_graph_function interface. 351 */ 352 353 insns = in_kernel_space(self_ra) ? 2 : MCOUNT_OFFSET_INSNS + 1; 354 trace.func = self_ra - (MCOUNT_INSN_SIZE * insns); 355 356 /* Only trace if the calling function expects to */ 357 if (!ftrace_graph_entry(&trace)) { 358 current->curr_ret_stack--; 359 *parent_ra_addr = old_parent_ra; 360 } 361 return; 362 out: 363 ftrace_graph_stop(); 364 WARN_ON(1); 365 } 366 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 367 368 #ifdef CONFIG_FTRACE_SYSCALLS 369 370 #ifdef CONFIG_32BIT 371 unsigned long __init arch_syscall_addr(int nr) 372 { 373 return (unsigned long)sys_call_table[nr - __NR_O32_Linux]; 374 } 375 #endif 376 377 #ifdef CONFIG_64BIT 378 379 unsigned long __init arch_syscall_addr(int nr) 380 { 381 #ifdef CONFIG_MIPS32_N32 382 if (nr >= __NR_N32_Linux && nr <= __NR_N32_Linux + __NR_N32_Linux_syscalls) 383 return (unsigned long)sysn32_call_table[nr - __NR_N32_Linux]; 384 #endif 385 if (nr >= __NR_64_Linux && nr <= __NR_64_Linux + __NR_64_Linux_syscalls) 386 return (unsigned long)sys_call_table[nr - __NR_64_Linux]; 387 #ifdef CONFIG_MIPS32_O32 388 if (nr >= __NR_O32_Linux && nr <= __NR_O32_Linux + __NR_O32_Linux_syscalls) 389 return (unsigned long)sys32_call_table[nr - __NR_O32_Linux]; 390 #endif 391 392 return (unsigned long) &sys_ni_syscall; 393 } 394 #endif 395 396 #endif /* CONFIG_FTRACE_SYSCALLS */ 397