1 /* 2 * Code for replacing ftrace calls with jumps. 3 * 4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com> 5 * Copyright (C) 2009, 2010 DSLab, Lanzhou University, China 6 * Author: Wu Zhangjin <wuzhangjin@gmail.com> 7 * 8 * Thanks goes to Steven Rostedt for writing the original x86 version. 9 */ 10 11 #include <linux/uaccess.h> 12 #include <linux/init.h> 13 #include <linux/ftrace.h> 14 #include <linux/syscalls.h> 15 16 #include <asm/asm.h> 17 #include <asm/asm-offsets.h> 18 #include <asm/cacheflush.h> 19 #include <asm/syscall.h> 20 #include <asm/uasm.h> 21 #include <asm/unistd.h> 22 23 #include <asm-generic/sections.h> 24 25 #if defined(KBUILD_MCOUNT_RA_ADDRESS) && defined(CONFIG_32BIT) 26 #define MCOUNT_OFFSET_INSNS 5 27 #else 28 #define MCOUNT_OFFSET_INSNS 4 29 #endif 30 31 #ifdef CONFIG_DYNAMIC_FTRACE 32 33 /* Arch override because MIPS doesn't need to run this from stop_machine() */ 34 void arch_ftrace_update_code(int command) 35 { 36 ftrace_modify_all_code(command); 37 } 38 39 #endif 40 41 #ifdef CONFIG_DYNAMIC_FTRACE 42 43 #define JAL 0x0c000000 /* jump & link: ip --> ra, jump to target */ 44 #define ADDR_MASK 0x03ffffff /* op_code|addr : 31...26|25 ....0 */ 45 #define JUMP_RANGE_MASK ((1UL << 28) - 1) 46 47 #define INSN_NOP 0x00000000 /* nop */ 48 #define INSN_JAL(addr) \ 49 ((unsigned int)(JAL | (((addr) >> 2) & ADDR_MASK))) 50 51 static unsigned int insn_jal_ftrace_caller __read_mostly; 52 static unsigned int insn_la_mcount[2] __read_mostly; 53 static unsigned int insn_j_ftrace_graph_caller __maybe_unused __read_mostly; 54 55 static inline void ftrace_dyn_arch_init_insns(void) 56 { 57 u32 *buf; 58 unsigned int v1; 59 60 /* la v1, _mcount */ 61 v1 = 3; 62 buf = (u32 *)&insn_la_mcount[0]; 63 UASM_i_LA(&buf, v1, MCOUNT_ADDR); 64 65 /* jal (ftrace_caller + 8), jump over the first two instruction */ 66 buf = (u32 *)&insn_jal_ftrace_caller; 67 uasm_i_jal(&buf, (FTRACE_ADDR + 8) & JUMP_RANGE_MASK); 68 69 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 70 /* j ftrace_graph_caller */ 71 buf = (u32 *)&insn_j_ftrace_graph_caller; 72 uasm_i_j(&buf, (unsigned long)ftrace_graph_caller & JUMP_RANGE_MASK); 73 #endif 74 } 75 76 static int ftrace_modify_code(unsigned long ip, unsigned int new_code) 77 { 78 int faulted; 79 mm_segment_t old_fs; 80 81 /* *(unsigned int *)ip = new_code; */ 82 safe_store_code(new_code, ip, faulted); 83 84 if (unlikely(faulted)) 85 return -EFAULT; 86 87 old_fs = get_fs(); 88 set_fs(get_ds()); 89 flush_icache_range(ip, ip + 8); 90 set_fs(old_fs); 91 92 return 0; 93 } 94 95 #ifndef CONFIG_64BIT 96 static int ftrace_modify_code_2(unsigned long ip, unsigned int new_code1, 97 unsigned int new_code2) 98 { 99 int faulted; 100 mm_segment_t old_fs; 101 102 safe_store_code(new_code1, ip, faulted); 103 if (unlikely(faulted)) 104 return -EFAULT; 105 106 ip += 4; 107 safe_store_code(new_code2, ip, faulted); 108 if (unlikely(faulted)) 109 return -EFAULT; 110 111 ip -= 4; 112 old_fs = get_fs(); 113 set_fs(get_ds()); 114 flush_icache_range(ip, ip + 8); 115 set_fs(old_fs); 116 117 return 0; 118 } 119 120 static int ftrace_modify_code_2r(unsigned long ip, unsigned int new_code1, 121 unsigned int new_code2) 122 { 123 int faulted; 124 mm_segment_t old_fs; 125 126 ip += 4; 127 safe_store_code(new_code2, ip, faulted); 128 if (unlikely(faulted)) 129 return -EFAULT; 130 131 ip -= 4; 132 safe_store_code(new_code1, ip, faulted); 133 if (unlikely(faulted)) 134 return -EFAULT; 135 136 old_fs = get_fs(); 137 set_fs(get_ds()); 138 flush_icache_range(ip, ip + 8); 139 set_fs(old_fs); 140 141 return 0; 142 } 143 #endif 144 145 /* 146 * The details about the calling site of mcount on MIPS 147 * 148 * 1. For kernel: 149 * 150 * move at, ra 151 * jal _mcount --> nop 152 * sub sp, sp, 8 --> nop (CONFIG_32BIT) 153 * 154 * 2. For modules: 155 * 156 * 2.1 For KBUILD_MCOUNT_RA_ADDRESS and CONFIG_32BIT 157 * 158 * lui v1, hi_16bit_of_mcount --> b 1f (0x10000005) 159 * addiu v1, v1, low_16bit_of_mcount --> nop (CONFIG_32BIT) 160 * move at, ra 161 * move $12, ra_address 162 * jalr v1 163 * sub sp, sp, 8 164 * 1: offset = 5 instructions 165 * 2.2 For the Other situations 166 * 167 * lui v1, hi_16bit_of_mcount --> b 1f (0x10000004) 168 * addiu v1, v1, low_16bit_of_mcount --> nop (CONFIG_32BIT) 169 * move at, ra 170 * jalr v1 171 * nop | move $12, ra_address | sub sp, sp, 8 172 * 1: offset = 4 instructions 173 */ 174 175 #define INSN_B_1F (0x10000000 | MCOUNT_OFFSET_INSNS) 176 177 int ftrace_make_nop(struct module *mod, 178 struct dyn_ftrace *rec, unsigned long addr) 179 { 180 unsigned int new; 181 unsigned long ip = rec->ip; 182 183 /* 184 * If ip is in kernel space, no long call, otherwise, long call is 185 * needed. 186 */ 187 new = core_kernel_text(ip) ? INSN_NOP : INSN_B_1F; 188 #ifdef CONFIG_64BIT 189 return ftrace_modify_code(ip, new); 190 #else 191 /* 192 * On 32 bit MIPS platforms, gcc adds a stack adjust 193 * instruction in the delay slot after the branch to 194 * mcount and expects mcount to restore the sp on return. 195 * This is based on a legacy API and does nothing but 196 * waste instructions so it's being removed at runtime. 197 */ 198 return ftrace_modify_code_2(ip, new, INSN_NOP); 199 #endif 200 } 201 202 int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) 203 { 204 unsigned int new; 205 unsigned long ip = rec->ip; 206 207 new = core_kernel_text(ip) ? insn_jal_ftrace_caller : insn_la_mcount[0]; 208 209 #ifdef CONFIG_64BIT 210 return ftrace_modify_code(ip, new); 211 #else 212 return ftrace_modify_code_2r(ip, new, core_kernel_text(ip) ? 213 INSN_NOP : insn_la_mcount[1]); 214 #endif 215 } 216 217 #define FTRACE_CALL_IP ((unsigned long)(&ftrace_call)) 218 219 int ftrace_update_ftrace_func(ftrace_func_t func) 220 { 221 unsigned int new; 222 223 new = INSN_JAL((unsigned long)func); 224 225 return ftrace_modify_code(FTRACE_CALL_IP, new); 226 } 227 228 int __init ftrace_dyn_arch_init(void) 229 { 230 /* Encode the instructions when booting */ 231 ftrace_dyn_arch_init_insns(); 232 233 /* Remove "b ftrace_stub" to ensure ftrace_caller() is executed */ 234 ftrace_modify_code(MCOUNT_ADDR, INSN_NOP); 235 236 return 0; 237 } 238 #endif /* CONFIG_DYNAMIC_FTRACE */ 239 240 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 241 242 #ifdef CONFIG_DYNAMIC_FTRACE 243 244 extern void ftrace_graph_call(void); 245 #define FTRACE_GRAPH_CALL_IP ((unsigned long)(&ftrace_graph_call)) 246 247 int ftrace_enable_ftrace_graph_caller(void) 248 { 249 return ftrace_modify_code(FTRACE_GRAPH_CALL_IP, 250 insn_j_ftrace_graph_caller); 251 } 252 253 int ftrace_disable_ftrace_graph_caller(void) 254 { 255 return ftrace_modify_code(FTRACE_GRAPH_CALL_IP, INSN_NOP); 256 } 257 258 #endif /* CONFIG_DYNAMIC_FTRACE */ 259 260 #ifndef KBUILD_MCOUNT_RA_ADDRESS 261 262 #define S_RA_SP (0xafbf << 16) /* s{d,w} ra, offset(sp) */ 263 #define S_R_SP (0xafb0 << 16) /* s{d,w} R, offset(sp) */ 264 #define OFFSET_MASK 0xffff /* stack offset range: 0 ~ PT_SIZE */ 265 266 unsigned long ftrace_get_parent_ra_addr(unsigned long self_ra, unsigned long 267 old_parent_ra, unsigned long parent_ra_addr, unsigned long fp) 268 { 269 unsigned long sp, ip, tmp; 270 unsigned int code; 271 int faulted; 272 273 /* 274 * For module, move the ip from the return address after the 275 * instruction "lui v1, hi_16bit_of_mcount"(offset is 24), but for 276 * kernel, move after the instruction "move ra, at"(offset is 16) 277 */ 278 ip = self_ra - (core_kernel_text(self_ra) ? 16 : 24); 279 280 /* 281 * search the text until finding the non-store instruction or "s{d,w} 282 * ra, offset(sp)" instruction 283 */ 284 do { 285 /* get the code at "ip": code = *(unsigned int *)ip; */ 286 safe_load_code(code, ip, faulted); 287 288 if (unlikely(faulted)) 289 return 0; 290 /* 291 * If we hit the non-store instruction before finding where the 292 * ra is stored, then this is a leaf function and it does not 293 * store the ra on the stack 294 */ 295 if ((code & S_R_SP) != S_R_SP) 296 return parent_ra_addr; 297 298 /* Move to the next instruction */ 299 ip -= 4; 300 } while ((code & S_RA_SP) != S_RA_SP); 301 302 sp = fp + (code & OFFSET_MASK); 303 304 /* tmp = *(unsigned long *)sp; */ 305 safe_load_stack(tmp, sp, faulted); 306 if (unlikely(faulted)) 307 return 0; 308 309 if (tmp == old_parent_ra) 310 return sp; 311 return 0; 312 } 313 314 #endif /* !KBUILD_MCOUNT_RA_ADDRESS */ 315 316 /* 317 * Hook the return address and push it in the stack of return addrs 318 * in current thread info. 319 */ 320 void prepare_ftrace_return(unsigned long *parent_ra_addr, unsigned long self_ra, 321 unsigned long fp) 322 { 323 unsigned long old_parent_ra; 324 struct ftrace_graph_ent trace; 325 unsigned long return_hooker = (unsigned long) 326 &return_to_handler; 327 int faulted, insns; 328 329 if (unlikely(ftrace_graph_is_dead())) 330 return; 331 332 if (unlikely(atomic_read(¤t->tracing_graph_pause))) 333 return; 334 335 /* 336 * "parent_ra_addr" is the stack address saved the return address of 337 * the caller of _mcount. 338 * 339 * if the gcc < 4.5, a leaf function does not save the return address 340 * in the stack address, so, we "emulate" one in _mcount's stack space, 341 * and hijack it directly, but for a non-leaf function, it save the 342 * return address to the its own stack space, we can not hijack it 343 * directly, but need to find the real stack address, 344 * ftrace_get_parent_addr() does it! 345 * 346 * if gcc>= 4.5, with the new -mmcount-ra-address option, for a 347 * non-leaf function, the location of the return address will be saved 348 * to $12 for us, and for a leaf function, only put a zero into $12. we 349 * do it in ftrace_graph_caller of mcount.S. 350 */ 351 352 /* old_parent_ra = *parent_ra_addr; */ 353 safe_load_stack(old_parent_ra, parent_ra_addr, faulted); 354 if (unlikely(faulted)) 355 goto out; 356 #ifndef KBUILD_MCOUNT_RA_ADDRESS 357 parent_ra_addr = (unsigned long *)ftrace_get_parent_ra_addr(self_ra, 358 old_parent_ra, (unsigned long)parent_ra_addr, fp); 359 /* 360 * If fails when getting the stack address of the non-leaf function's 361 * ra, stop function graph tracer and return 362 */ 363 if (parent_ra_addr == 0) 364 goto out; 365 #endif 366 /* *parent_ra_addr = return_hooker; */ 367 safe_store_stack(return_hooker, parent_ra_addr, faulted); 368 if (unlikely(faulted)) 369 goto out; 370 371 if (ftrace_push_return_trace(old_parent_ra, self_ra, &trace.depth, fp, 372 NULL) == -EBUSY) { 373 *parent_ra_addr = old_parent_ra; 374 return; 375 } 376 377 /* 378 * Get the recorded ip of the current mcount calling site in the 379 * __mcount_loc section, which will be used to filter the function 380 * entries configured through the tracing/set_graph_function interface. 381 */ 382 383 insns = core_kernel_text(self_ra) ? 2 : MCOUNT_OFFSET_INSNS + 1; 384 trace.func = self_ra - (MCOUNT_INSN_SIZE * insns); 385 386 /* Only trace if the calling function expects to */ 387 if (!ftrace_graph_entry(&trace)) { 388 current->curr_ret_stack--; 389 *parent_ra_addr = old_parent_ra; 390 } 391 return; 392 out: 393 ftrace_graph_stop(); 394 WARN_ON(1); 395 } 396 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 397 398 #ifdef CONFIG_FTRACE_SYSCALLS 399 400 #ifdef CONFIG_32BIT 401 unsigned long __init arch_syscall_addr(int nr) 402 { 403 return (unsigned long)sys_call_table[nr - __NR_O32_Linux]; 404 } 405 #endif 406 407 #ifdef CONFIG_64BIT 408 409 unsigned long __init arch_syscall_addr(int nr) 410 { 411 #ifdef CONFIG_MIPS32_N32 412 if (nr >= __NR_N32_Linux && nr <= __NR_N32_Linux + __NR_N32_Linux_syscalls) 413 return (unsigned long)sysn32_call_table[nr - __NR_N32_Linux]; 414 #endif 415 if (nr >= __NR_64_Linux && nr <= __NR_64_Linux + __NR_64_Linux_syscalls) 416 return (unsigned long)sys_call_table[nr - __NR_64_Linux]; 417 #ifdef CONFIG_MIPS32_O32 418 if (nr >= __NR_O32_Linux && nr <= __NR_O32_Linux + __NR_O32_Linux_syscalls) 419 return (unsigned long)sys32_call_table[nr - __NR_O32_Linux]; 420 #endif 421 422 return (unsigned long) &sys_ni_syscall; 423 } 424 #endif 425 426 #endif /* CONFIG_FTRACE_SYSCALLS */ 427