1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Code for replacing ftrace calls with jumps. 4 * 5 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com> 6 * 7 * Thanks goes out to P.A. Semi, Inc for supplying me with a PPC64 box. 8 * 9 * Added function graph tracer code, taken from x86 that was written 10 * by Frederic Weisbecker, and ported to PPC by Steven Rostedt. 11 * 12 */ 13 14 #define pr_fmt(fmt) "ftrace-powerpc: " fmt 15 16 #include <linux/spinlock.h> 17 #include <linux/hardirq.h> 18 #include <linux/uaccess.h> 19 #include <linux/module.h> 20 #include <linux/ftrace.h> 21 #include <linux/percpu.h> 22 #include <linux/init.h> 23 #include <linux/list.h> 24 25 #include <asm/cacheflush.h> 26 #include <asm/code-patching.h> 27 #include <asm/ftrace.h> 28 #include <asm/syscall.h> 29 #include <asm/inst.h> 30 31 #define NUM_FTRACE_TRAMPS 2 32 static unsigned long ftrace_tramps[NUM_FTRACE_TRAMPS]; 33 34 static ppc_inst_t ftrace_create_branch_inst(unsigned long ip, unsigned long addr, int link) 35 { 36 ppc_inst_t op; 37 38 WARN_ON(!is_offset_in_branch_range(addr - ip)); 39 create_branch(&op, (u32 *)ip, addr, link ? BRANCH_SET_LINK : 0); 40 41 return op; 42 } 43 44 static inline int ftrace_read_inst(unsigned long ip, ppc_inst_t *op) 45 { 46 if (copy_inst_from_kernel_nofault(op, (void *)ip)) { 47 pr_err("0x%lx: fetching instruction failed\n", ip); 48 return -EFAULT; 49 } 50 51 return 0; 52 } 53 54 static inline int ftrace_validate_inst(unsigned long ip, ppc_inst_t inst) 55 { 56 ppc_inst_t op; 57 int ret; 58 59 ret = ftrace_read_inst(ip, &op); 60 if (!ret && !ppc_inst_equal(op, inst)) { 61 pr_err("0x%lx: expected (%08lx) != found (%08lx)\n", 62 ip, ppc_inst_as_ulong(inst), ppc_inst_as_ulong(op)); 63 ret = -EINVAL; 64 } 65 66 return ret; 67 } 68 69 static inline int ftrace_modify_code(unsigned long ip, ppc_inst_t old, ppc_inst_t new) 70 { 71 int ret = ftrace_validate_inst(ip, old); 72 73 if (!ret) 74 ret = patch_instruction((u32 *)ip, new); 75 76 return ret; 77 } 78 79 static int is_bl_op(ppc_inst_t op) 80 { 81 return (ppc_inst_val(op) & ~PPC_LI_MASK) == PPC_RAW_BL(0); 82 } 83 84 static unsigned long find_ftrace_tramp(unsigned long ip) 85 { 86 int i; 87 88 for (i = 0; i < NUM_FTRACE_TRAMPS; i++) 89 if (!ftrace_tramps[i]) 90 continue; 91 else if (is_offset_in_branch_range(ftrace_tramps[i] - ip)) 92 return ftrace_tramps[i]; 93 94 return 0; 95 } 96 97 static int ftrace_get_call_inst(struct dyn_ftrace *rec, unsigned long addr, ppc_inst_t *call_inst) 98 { 99 unsigned long ip = rec->ip; 100 unsigned long stub; 101 102 if (is_offset_in_branch_range(addr - ip)) { 103 /* Within range */ 104 stub = addr; 105 #ifdef CONFIG_MODULES 106 } else if (rec->arch.mod) { 107 /* Module code would be going to one of the module stubs */ 108 stub = (addr == (unsigned long)ftrace_caller ? rec->arch.mod->arch.tramp : 109 rec->arch.mod->arch.tramp_regs); 110 #endif 111 } else if (core_kernel_text(ip)) { 112 /* We would be branching to one of our ftrace stubs */ 113 stub = find_ftrace_tramp(ip); 114 if (!stub) { 115 pr_err("0x%lx: No ftrace stubs reachable\n", ip); 116 return -EINVAL; 117 } 118 } else { 119 return -EINVAL; 120 } 121 122 *call_inst = ftrace_create_branch_inst(ip, stub, 1); 123 return 0; 124 } 125 126 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS 127 int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, unsigned long addr) 128 { 129 /* This should never be called since we override ftrace_replace_code() */ 130 WARN_ON(1); 131 return -EINVAL; 132 } 133 #endif 134 135 int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) 136 { 137 ppc_inst_t old, new; 138 int ret; 139 140 /* This can only ever be called during module load */ 141 if (WARN_ON(!IS_ENABLED(CONFIG_MODULES) || core_kernel_text(rec->ip))) 142 return -EINVAL; 143 144 old = ppc_inst(PPC_RAW_NOP()); 145 ret = ftrace_get_call_inst(rec, addr, &new); 146 if (ret) 147 return ret; 148 149 return ftrace_modify_code(rec->ip, old, new); 150 } 151 152 int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, unsigned long addr) 153 { 154 /* 155 * This should never be called since we override ftrace_replace_code(), 156 * as well as ftrace_init_nop() 157 */ 158 WARN_ON(1); 159 return -EINVAL; 160 } 161 162 void ftrace_replace_code(int enable) 163 { 164 ppc_inst_t old, new, call_inst, new_call_inst; 165 ppc_inst_t nop_inst = ppc_inst(PPC_RAW_NOP()); 166 unsigned long ip, new_addr, addr; 167 struct ftrace_rec_iter *iter; 168 struct dyn_ftrace *rec; 169 int ret = 0, update; 170 171 for_ftrace_rec_iter(iter) { 172 rec = ftrace_rec_iter_record(iter); 173 ip = rec->ip; 174 175 if (rec->flags & FTRACE_FL_DISABLED && !(rec->flags & FTRACE_FL_ENABLED)) 176 continue; 177 178 addr = ftrace_get_addr_curr(rec); 179 new_addr = ftrace_get_addr_new(rec); 180 update = ftrace_update_record(rec, enable); 181 182 switch (update) { 183 case FTRACE_UPDATE_IGNORE: 184 default: 185 continue; 186 case FTRACE_UPDATE_MODIFY_CALL: 187 ret = ftrace_get_call_inst(rec, new_addr, &new_call_inst); 188 ret |= ftrace_get_call_inst(rec, addr, &call_inst); 189 old = call_inst; 190 new = new_call_inst; 191 break; 192 case FTRACE_UPDATE_MAKE_NOP: 193 ret = ftrace_get_call_inst(rec, addr, &call_inst); 194 old = call_inst; 195 new = nop_inst; 196 break; 197 case FTRACE_UPDATE_MAKE_CALL: 198 ret = ftrace_get_call_inst(rec, new_addr, &call_inst); 199 old = nop_inst; 200 new = call_inst; 201 break; 202 } 203 204 if (!ret) 205 ret = ftrace_modify_code(ip, old, new); 206 if (ret) 207 goto out; 208 } 209 210 out: 211 if (ret) 212 ftrace_bug(ret, rec); 213 return; 214 } 215 216 int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec) 217 { 218 unsigned long addr, ip = rec->ip; 219 ppc_inst_t old, new; 220 int ret = 0; 221 222 /* Verify instructions surrounding the ftrace location */ 223 if (IS_ENABLED(CONFIG_ARCH_USING_PATCHABLE_FUNCTION_ENTRY)) { 224 /* Expect nops */ 225 ret = ftrace_validate_inst(ip - 4, ppc_inst(PPC_RAW_NOP())); 226 if (!ret) 227 ret = ftrace_validate_inst(ip, ppc_inst(PPC_RAW_NOP())); 228 } else if (IS_ENABLED(CONFIG_PPC32)) { 229 /* Expected sequence: 'mflr r0', 'stw r0,4(r1)', 'bl _mcount' */ 230 ret = ftrace_validate_inst(ip - 8, ppc_inst(PPC_RAW_MFLR(_R0))); 231 if (!ret) 232 ret = ftrace_validate_inst(ip - 4, ppc_inst(PPC_RAW_STW(_R0, _R1, 4))); 233 } else if (IS_ENABLED(CONFIG_MPROFILE_KERNEL)) { 234 /* Expected sequence: 'mflr r0', ['std r0,16(r1)'], 'bl _mcount' */ 235 ret = ftrace_read_inst(ip - 4, &old); 236 if (!ret && !ppc_inst_equal(old, ppc_inst(PPC_RAW_MFLR(_R0)))) { 237 ret = ftrace_validate_inst(ip - 8, ppc_inst(PPC_RAW_MFLR(_R0))); 238 ret |= ftrace_validate_inst(ip - 4, ppc_inst(PPC_RAW_STD(_R0, _R1, 16))); 239 } 240 } else { 241 return -EINVAL; 242 } 243 244 if (ret) 245 return ret; 246 247 if (!core_kernel_text(ip)) { 248 if (!mod) { 249 pr_err("0x%lx: No module provided for non-kernel address\n", ip); 250 return -EFAULT; 251 } 252 rec->arch.mod = mod; 253 } 254 255 /* Nop-out the ftrace location */ 256 new = ppc_inst(PPC_RAW_NOP()); 257 addr = MCOUNT_ADDR; 258 if (IS_ENABLED(CONFIG_ARCH_USING_PATCHABLE_FUNCTION_ENTRY)) { 259 /* we instead patch-in the 'mflr r0' */ 260 old = ppc_inst(PPC_RAW_NOP()); 261 new = ppc_inst(PPC_RAW_MFLR(_R0)); 262 ret = ftrace_modify_code(ip - 4, old, new); 263 } else if (is_offset_in_branch_range(addr - ip)) { 264 /* Within range */ 265 old = ftrace_create_branch_inst(ip, addr, 1); 266 ret = ftrace_modify_code(ip, old, new); 267 } else if (core_kernel_text(ip) || (IS_ENABLED(CONFIG_MODULES) && mod)) { 268 /* 269 * We would be branching to a linker-generated stub, or to the module _mcount 270 * stub. Let's just confirm we have a 'bl' here. 271 */ 272 ret = ftrace_read_inst(ip, &old); 273 if (ret) 274 return ret; 275 if (!is_bl_op(old)) { 276 pr_err("0x%lx: expected (bl) != found (%08lx)\n", ip, ppc_inst_as_ulong(old)); 277 return -EINVAL; 278 } 279 ret = patch_instruction((u32 *)ip, new); 280 } else { 281 return -EINVAL; 282 } 283 284 return ret; 285 } 286 287 int ftrace_update_ftrace_func(ftrace_func_t func) 288 { 289 unsigned long ip = (unsigned long)(&ftrace_call); 290 ppc_inst_t old, new; 291 int ret; 292 293 old = ppc_inst_read((u32 *)&ftrace_call); 294 new = ftrace_create_branch_inst(ip, ppc_function_entry(func), 1); 295 ret = ftrace_modify_code(ip, old, new); 296 297 /* Also update the regs callback function */ 298 if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS) && !ret) { 299 ip = (unsigned long)(&ftrace_regs_call); 300 old = ppc_inst_read((u32 *)&ftrace_regs_call); 301 new = ftrace_create_branch_inst(ip, ppc_function_entry(func), 1); 302 ret = ftrace_modify_code(ip, old, new); 303 } 304 305 return ret; 306 } 307 308 /* 309 * Use the default ftrace_modify_all_code, but without 310 * stop_machine(). 311 */ 312 void arch_ftrace_update_code(int command) 313 { 314 ftrace_modify_all_code(command); 315 } 316 317 void ftrace_free_init_tramp(void) 318 { 319 int i; 320 321 for (i = 0; i < NUM_FTRACE_TRAMPS && ftrace_tramps[i]; i++) 322 if (ftrace_tramps[i] == (unsigned long)ftrace_tramp_init) { 323 ftrace_tramps[i] = 0; 324 return; 325 } 326 } 327 328 static void __init add_ftrace_tramp(unsigned long tramp) 329 { 330 int i; 331 332 for (i = 0; i < NUM_FTRACE_TRAMPS; i++) 333 if (!ftrace_tramps[i]) { 334 ftrace_tramps[i] = tramp; 335 return; 336 } 337 } 338 339 int __init ftrace_dyn_arch_init(void) 340 { 341 unsigned int *tramp[] = { ftrace_tramp_text, ftrace_tramp_init }; 342 unsigned long addr = FTRACE_REGS_ADDR; 343 long reladdr; 344 int i; 345 u32 stub_insns[] = { 346 #ifdef CONFIG_PPC_KERNEL_PCREL 347 /* pla r12,addr */ 348 PPC_PREFIX_MLS | __PPC_PRFX_R(1), 349 PPC_INST_PADDI | ___PPC_RT(_R12), 350 PPC_RAW_MTCTR(_R12), 351 PPC_RAW_BCTR() 352 #elif defined(CONFIG_PPC64) 353 PPC_RAW_LD(_R12, _R13, offsetof(struct paca_struct, kernel_toc)), 354 PPC_RAW_ADDIS(_R12, _R12, 0), 355 PPC_RAW_ADDI(_R12, _R12, 0), 356 PPC_RAW_MTCTR(_R12), 357 PPC_RAW_BCTR() 358 #else 359 PPC_RAW_LIS(_R12, 0), 360 PPC_RAW_ADDI(_R12, _R12, 0), 361 PPC_RAW_MTCTR(_R12), 362 PPC_RAW_BCTR() 363 #endif 364 }; 365 366 if (IS_ENABLED(CONFIG_PPC_KERNEL_PCREL)) { 367 for (i = 0; i < 2; i++) { 368 reladdr = addr - (unsigned long)tramp[i]; 369 370 if (reladdr >= (long)SZ_8G || reladdr < -(long)SZ_8G) { 371 pr_err("Address of %ps out of range of pcrel address.\n", 372 (void *)addr); 373 return -1; 374 } 375 376 memcpy(tramp[i], stub_insns, sizeof(stub_insns)); 377 tramp[i][0] |= IMM_H18(reladdr); 378 tramp[i][1] |= IMM_L(reladdr); 379 add_ftrace_tramp((unsigned long)tramp[i]); 380 } 381 } else if (IS_ENABLED(CONFIG_PPC64)) { 382 reladdr = addr - kernel_toc_addr(); 383 384 if (reladdr >= (long)SZ_2G || reladdr < -(long long)SZ_2G) { 385 pr_err("Address of %ps out of range of kernel_toc.\n", 386 (void *)addr); 387 return -1; 388 } 389 390 for (i = 0; i < 2; i++) { 391 memcpy(tramp[i], stub_insns, sizeof(stub_insns)); 392 tramp[i][1] |= PPC_HA(reladdr); 393 tramp[i][2] |= PPC_LO(reladdr); 394 add_ftrace_tramp((unsigned long)tramp[i]); 395 } 396 } else { 397 for (i = 0; i < 2; i++) { 398 memcpy(tramp[i], stub_insns, sizeof(stub_insns)); 399 tramp[i][0] |= PPC_HA(addr); 400 tramp[i][1] |= PPC_LO(addr); 401 add_ftrace_tramp((unsigned long)tramp[i]); 402 } 403 } 404 405 return 0; 406 } 407 408 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 409 void ftrace_graph_func(unsigned long ip, unsigned long parent_ip, 410 struct ftrace_ops *op, struct ftrace_regs *fregs) 411 { 412 unsigned long sp = fregs->regs.gpr[1]; 413 int bit; 414 415 if (unlikely(ftrace_graph_is_dead())) 416 goto out; 417 418 if (unlikely(atomic_read(¤t->tracing_graph_pause))) 419 goto out; 420 421 bit = ftrace_test_recursion_trylock(ip, parent_ip); 422 if (bit < 0) 423 goto out; 424 425 if (!function_graph_enter(parent_ip, ip, 0, (unsigned long *)sp)) 426 parent_ip = ppc_function_entry(return_to_handler); 427 428 ftrace_test_recursion_unlock(bit); 429 out: 430 fregs->regs.link = parent_ip; 431 } 432 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 433