xref: /openbmc/linux/arch/powerpc/kernel/trace/ftrace.c (revision ae3a2a21)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Code for replacing ftrace calls with jumps.
4  *
5  * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
6  *
7  * Thanks goes out to P.A. Semi, Inc for supplying me with a PPC64 box.
8  *
9  * Added function graph tracer code, taken from x86 that was written
10  * by Frederic Weisbecker, and ported to PPC by Steven Rostedt.
11  *
12  */
13 
14 #define pr_fmt(fmt) "ftrace-powerpc: " fmt
15 
16 #include <linux/spinlock.h>
17 #include <linux/hardirq.h>
18 #include <linux/uaccess.h>
19 #include <linux/module.h>
20 #include <linux/ftrace.h>
21 #include <linux/percpu.h>
22 #include <linux/init.h>
23 #include <linux/list.h>
24 
25 #include <asm/cacheflush.h>
26 #include <asm/code-patching.h>
27 #include <asm/ftrace.h>
28 #include <asm/syscall.h>
29 #include <asm/inst.h>
30 
31 
32 #ifdef CONFIG_DYNAMIC_FTRACE
33 
34 /*
35  * We generally only have a single long_branch tramp and at most 2 or 3 plt
36  * tramps generated. But, we don't use the plt tramps currently. We also allot
37  * 2 tramps after .text and .init.text. So, we only end up with around 3 usable
38  * tramps in total. Set aside 8 just to be sure.
39  */
40 #define	NUM_FTRACE_TRAMPS	8
41 static unsigned long ftrace_tramps[NUM_FTRACE_TRAMPS];
42 
43 static ppc_inst_t
44 ftrace_call_replace(unsigned long ip, unsigned long addr, int link)
45 {
46 	ppc_inst_t op;
47 
48 	addr = ppc_function_entry((void *)addr);
49 
50 	/* if (link) set op to 'bl' else 'b' */
51 	create_branch(&op, (u32 *)ip, addr, link ? 1 : 0);
52 
53 	return op;
54 }
55 
56 static int
57 ftrace_modify_code(unsigned long ip, ppc_inst_t old, ppc_inst_t new)
58 {
59 	ppc_inst_t replaced;
60 
61 	/*
62 	 * Note:
63 	 * We are paranoid about modifying text, as if a bug was to happen, it
64 	 * could cause us to read or write to someplace that could cause harm.
65 	 * Carefully read and modify the code with probe_kernel_*(), and make
66 	 * sure what we read is what we expected it to be before modifying it.
67 	 */
68 
69 	/* read the text we want to modify */
70 	if (copy_inst_from_kernel_nofault(&replaced, (void *)ip))
71 		return -EFAULT;
72 
73 	/* Make sure it is what we expect it to be */
74 	if (!ppc_inst_equal(replaced, old)) {
75 		pr_err("%p: replaced (%s) != old (%s)",
76 		(void *)ip, ppc_inst_as_str(replaced), ppc_inst_as_str(old));
77 		return -EINVAL;
78 	}
79 
80 	/* replace the text with the new text */
81 	if (patch_instruction((u32 *)ip, new))
82 		return -EPERM;
83 
84 	return 0;
85 }
86 
87 /*
88  * Helper functions that are the same for both PPC64 and PPC32.
89  */
90 static int test_24bit_addr(unsigned long ip, unsigned long addr)
91 {
92 	ppc_inst_t op;
93 	addr = ppc_function_entry((void *)addr);
94 
95 	/* use the create_branch to verify that this offset can be branched */
96 	return create_branch(&op, (u32 *)ip, addr, 0) == 0;
97 }
98 
99 static int is_bl_op(ppc_inst_t op)
100 {
101 	return (ppc_inst_val(op) & 0xfc000003) == 0x48000001;
102 }
103 
104 static int is_b_op(ppc_inst_t op)
105 {
106 	return (ppc_inst_val(op) & 0xfc000003) == 0x48000000;
107 }
108 
109 static unsigned long find_bl_target(unsigned long ip, ppc_inst_t op)
110 {
111 	int offset;
112 
113 	offset = (ppc_inst_val(op) & 0x03fffffc);
114 	/* make it signed */
115 	if (offset & 0x02000000)
116 		offset |= 0xfe000000;
117 
118 	return ip + (long)offset;
119 }
120 
121 #ifdef CONFIG_MODULES
122 #ifdef CONFIG_PPC64
123 static int
124 __ftrace_make_nop(struct module *mod,
125 		  struct dyn_ftrace *rec, unsigned long addr)
126 {
127 	unsigned long entry, ptr, tramp;
128 	unsigned long ip = rec->ip;
129 	ppc_inst_t op, pop;
130 
131 	/* read where this goes */
132 	if (copy_inst_from_kernel_nofault(&op, (void *)ip)) {
133 		pr_err("Fetching opcode failed.\n");
134 		return -EFAULT;
135 	}
136 
137 	/* Make sure that that this is still a 24bit jump */
138 	if (!is_bl_op(op)) {
139 		pr_err("Not expected bl: opcode is %s\n", ppc_inst_as_str(op));
140 		return -EINVAL;
141 	}
142 
143 	/* lets find where the pointer goes */
144 	tramp = find_bl_target(ip, op);
145 
146 	pr_devel("ip:%lx jumps to %lx", ip, tramp);
147 
148 	if (module_trampoline_target(mod, tramp, &ptr)) {
149 		pr_err("Failed to get trampoline target\n");
150 		return -EFAULT;
151 	}
152 
153 	pr_devel("trampoline target %lx", ptr);
154 
155 	entry = ppc_global_function_entry((void *)addr);
156 	/* This should match what was called */
157 	if (ptr != entry) {
158 		pr_err("addr %lx does not match expected %lx\n", ptr, entry);
159 		return -EINVAL;
160 	}
161 
162 #ifdef CONFIG_MPROFILE_KERNEL
163 	/* When using -mkernel_profile there is no load to jump over */
164 	pop = ppc_inst(PPC_RAW_NOP());
165 
166 	if (copy_inst_from_kernel_nofault(&op, (void *)(ip - 4))) {
167 		pr_err("Fetching instruction at %lx failed.\n", ip - 4);
168 		return -EFAULT;
169 	}
170 
171 	/* We expect either a mflr r0, or a std r0, LRSAVE(r1) */
172 	if (!ppc_inst_equal(op, ppc_inst(PPC_RAW_MFLR(_R0))) &&
173 	    !ppc_inst_equal(op, ppc_inst(PPC_INST_STD_LR))) {
174 		pr_err("Unexpected instruction %s around bl _mcount\n",
175 		       ppc_inst_as_str(op));
176 		return -EINVAL;
177 	}
178 #else
179 	/*
180 	 * Our original call site looks like:
181 	 *
182 	 * bl <tramp>
183 	 * ld r2,XX(r1)
184 	 *
185 	 * Milton Miller pointed out that we can not simply nop the branch.
186 	 * If a task was preempted when calling a trace function, the nops
187 	 * will remove the way to restore the TOC in r2 and the r2 TOC will
188 	 * get corrupted.
189 	 *
190 	 * Use a b +8 to jump over the load.
191 	 */
192 
193 	pop = ppc_inst(PPC_INST_BRANCH | 8);	/* b +8 */
194 
195 	/*
196 	 * Check what is in the next instruction. We can see ld r2,40(r1), but
197 	 * on first pass after boot we will see mflr r0.
198 	 */
199 	if (copy_inst_from_kernel_nofault(&op, (void *)(ip + 4))) {
200 		pr_err("Fetching op failed.\n");
201 		return -EFAULT;
202 	}
203 
204 	if (!ppc_inst_equal(op,  ppc_inst(PPC_INST_LD_TOC))) {
205 		pr_err("Expected %08lx found %s\n", PPC_INST_LD_TOC, ppc_inst_as_str(op));
206 		return -EINVAL;
207 	}
208 #endif /* CONFIG_MPROFILE_KERNEL */
209 
210 	if (patch_instruction((u32 *)ip, pop)) {
211 		pr_err("Patching NOP failed.\n");
212 		return -EPERM;
213 	}
214 
215 	return 0;
216 }
217 
218 #else /* !PPC64 */
219 static int
220 __ftrace_make_nop(struct module *mod,
221 		  struct dyn_ftrace *rec, unsigned long addr)
222 {
223 	ppc_inst_t op;
224 	unsigned long ip = rec->ip;
225 	unsigned long tramp, ptr;
226 
227 	if (copy_from_kernel_nofault(&op, (void *)ip, MCOUNT_INSN_SIZE))
228 		return -EFAULT;
229 
230 	/* Make sure that that this is still a 24bit jump */
231 	if (!is_bl_op(op)) {
232 		pr_err("Not expected bl: opcode is %s\n", ppc_inst_as_str(op));
233 		return -EINVAL;
234 	}
235 
236 	/* lets find where the pointer goes */
237 	tramp = find_bl_target(ip, op);
238 
239 	/* Find where the trampoline jumps to */
240 	if (module_trampoline_target(mod, tramp, &ptr)) {
241 		pr_err("Failed to get trampoline target\n");
242 		return -EFAULT;
243 	}
244 
245 	if (ptr != addr) {
246 		pr_err("Trampoline location %08lx does not match addr\n",
247 		       tramp);
248 		return -EINVAL;
249 	}
250 
251 	op = ppc_inst(PPC_RAW_NOP());
252 
253 	if (patch_instruction((u32 *)ip, op))
254 		return -EPERM;
255 
256 	return 0;
257 }
258 #endif /* PPC64 */
259 #endif /* CONFIG_MODULES */
260 
261 static unsigned long find_ftrace_tramp(unsigned long ip)
262 {
263 	int i;
264 	ppc_inst_t instr;
265 
266 	/*
267 	 * We have the compiler generated long_branch tramps at the end
268 	 * and we prefer those
269 	 */
270 	for (i = NUM_FTRACE_TRAMPS - 1; i >= 0; i--)
271 		if (!ftrace_tramps[i])
272 			continue;
273 		else if (create_branch(&instr, (void *)ip,
274 				       ftrace_tramps[i], 0) == 0)
275 			return ftrace_tramps[i];
276 
277 	return 0;
278 }
279 
280 static int add_ftrace_tramp(unsigned long tramp)
281 {
282 	int i;
283 
284 	for (i = 0; i < NUM_FTRACE_TRAMPS; i++)
285 		if (!ftrace_tramps[i]) {
286 			ftrace_tramps[i] = tramp;
287 			return 0;
288 		}
289 
290 	return -1;
291 }
292 
293 /*
294  * If this is a compiler generated long_branch trampoline (essentially, a
295  * trampoline that has a branch to _mcount()), we re-write the branch to
296  * instead go to ftrace_[regs_]caller() and note down the location of this
297  * trampoline.
298  */
299 static int setup_mcount_compiler_tramp(unsigned long tramp)
300 {
301 	int i;
302 	ppc_inst_t op;
303 	unsigned long ptr;
304 	static unsigned long ftrace_plt_tramps[NUM_FTRACE_TRAMPS];
305 
306 	/* Is this a known long jump tramp? */
307 	for (i = 0; i < NUM_FTRACE_TRAMPS; i++)
308 		if (!ftrace_tramps[i])
309 			break;
310 		else if (ftrace_tramps[i] == tramp)
311 			return 0;
312 
313 	/* Is this a known plt tramp? */
314 	for (i = 0; i < NUM_FTRACE_TRAMPS; i++)
315 		if (!ftrace_plt_tramps[i])
316 			break;
317 		else if (ftrace_plt_tramps[i] == tramp)
318 			return -1;
319 
320 	/* New trampoline -- read where this goes */
321 	if (copy_inst_from_kernel_nofault(&op, (void *)tramp)) {
322 		pr_debug("Fetching opcode failed.\n");
323 		return -1;
324 	}
325 
326 	/* Is this a 24 bit branch? */
327 	if (!is_b_op(op)) {
328 		pr_debug("Trampoline is not a long branch tramp.\n");
329 		return -1;
330 	}
331 
332 	/* lets find where the pointer goes */
333 	ptr = find_bl_target(tramp, op);
334 
335 	if (ptr != ppc_global_function_entry((void *)_mcount)) {
336 		pr_debug("Trampoline target %p is not _mcount\n", (void *)ptr);
337 		return -1;
338 	}
339 
340 	/* Let's re-write the tramp to go to ftrace_[regs_]caller */
341 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
342 	ptr = ppc_global_function_entry((void *)ftrace_regs_caller);
343 #else
344 	ptr = ppc_global_function_entry((void *)ftrace_caller);
345 #endif
346 	if (patch_branch((u32 *)tramp, ptr, 0)) {
347 		pr_debug("REL24 out of range!\n");
348 		return -1;
349 	}
350 
351 	if (add_ftrace_tramp(tramp)) {
352 		pr_debug("No tramp locations left\n");
353 		return -1;
354 	}
355 
356 	return 0;
357 }
358 
359 static int __ftrace_make_nop_kernel(struct dyn_ftrace *rec, unsigned long addr)
360 {
361 	unsigned long tramp, ip = rec->ip;
362 	ppc_inst_t op;
363 
364 	/* Read where this goes */
365 	if (copy_inst_from_kernel_nofault(&op, (void *)ip)) {
366 		pr_err("Fetching opcode failed.\n");
367 		return -EFAULT;
368 	}
369 
370 	/* Make sure that that this is still a 24bit jump */
371 	if (!is_bl_op(op)) {
372 		pr_err("Not expected bl: opcode is %s\n", ppc_inst_as_str(op));
373 		return -EINVAL;
374 	}
375 
376 	/* Let's find where the pointer goes */
377 	tramp = find_bl_target(ip, op);
378 
379 	pr_devel("ip:%lx jumps to %lx", ip, tramp);
380 
381 	if (setup_mcount_compiler_tramp(tramp)) {
382 		/* Are other trampolines reachable? */
383 		if (!find_ftrace_tramp(ip)) {
384 			pr_err("No ftrace trampolines reachable from %ps\n",
385 					(void *)ip);
386 			return -EINVAL;
387 		}
388 	}
389 
390 	if (patch_instruction((u32 *)ip, ppc_inst(PPC_RAW_NOP()))) {
391 		pr_err("Patching NOP failed.\n");
392 		return -EPERM;
393 	}
394 
395 	return 0;
396 }
397 
398 int ftrace_make_nop(struct module *mod,
399 		    struct dyn_ftrace *rec, unsigned long addr)
400 {
401 	unsigned long ip = rec->ip;
402 	ppc_inst_t old, new;
403 
404 	/*
405 	 * If the calling address is more that 24 bits away,
406 	 * then we had to use a trampoline to make the call.
407 	 * Otherwise just update the call site.
408 	 */
409 	if (test_24bit_addr(ip, addr)) {
410 		/* within range */
411 		old = ftrace_call_replace(ip, addr, 1);
412 		new = ppc_inst(PPC_RAW_NOP());
413 		return ftrace_modify_code(ip, old, new);
414 	} else if (core_kernel_text(ip))
415 		return __ftrace_make_nop_kernel(rec, addr);
416 
417 #ifdef CONFIG_MODULES
418 	/*
419 	 * Out of range jumps are called from modules.
420 	 * We should either already have a pointer to the module
421 	 * or it has been passed in.
422 	 */
423 	if (!rec->arch.mod) {
424 		if (!mod) {
425 			pr_err("No module loaded addr=%lx\n", addr);
426 			return -EFAULT;
427 		}
428 		rec->arch.mod = mod;
429 	} else if (mod) {
430 		if (mod != rec->arch.mod) {
431 			pr_err("Record mod %p not equal to passed in mod %p\n",
432 			       rec->arch.mod, mod);
433 			return -EINVAL;
434 		}
435 		/* nothing to do if mod == rec->arch.mod */
436 	} else
437 		mod = rec->arch.mod;
438 
439 	return __ftrace_make_nop(mod, rec, addr);
440 #else
441 	/* We should not get here without modules */
442 	return -EINVAL;
443 #endif /* CONFIG_MODULES */
444 }
445 
446 #ifdef CONFIG_MODULES
447 #ifdef CONFIG_PPC64
448 /*
449  * Examine the existing instructions for __ftrace_make_call.
450  * They should effectively be a NOP, and follow formal constraints,
451  * depending on the ABI. Return false if they don't.
452  */
453 #ifndef CONFIG_MPROFILE_KERNEL
454 static int
455 expected_nop_sequence(void *ip, ppc_inst_t op0, ppc_inst_t op1)
456 {
457 	/*
458 	 * We expect to see:
459 	 *
460 	 * b +8
461 	 * ld r2,XX(r1)
462 	 *
463 	 * The load offset is different depending on the ABI. For simplicity
464 	 * just mask it out when doing the compare.
465 	 */
466 	if (!ppc_inst_equal(op0, ppc_inst(0x48000008)) ||
467 	    (ppc_inst_val(op1) & 0xffff0000) != 0xe8410000)
468 		return 0;
469 	return 1;
470 }
471 #else
472 static int
473 expected_nop_sequence(void *ip, ppc_inst_t op0, ppc_inst_t op1)
474 {
475 	/* look for patched "NOP" on ppc64 with -mprofile-kernel */
476 	if (!ppc_inst_equal(op0, ppc_inst(PPC_RAW_NOP())))
477 		return 0;
478 	return 1;
479 }
480 #endif
481 
482 static int
483 __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
484 {
485 	ppc_inst_t op[2];
486 	void *ip = (void *)rec->ip;
487 	unsigned long entry, ptr, tramp;
488 	struct module *mod = rec->arch.mod;
489 
490 	/* read where this goes */
491 	if (copy_inst_from_kernel_nofault(op, ip))
492 		return -EFAULT;
493 
494 	if (copy_inst_from_kernel_nofault(op + 1, ip + 4))
495 		return -EFAULT;
496 
497 	if (!expected_nop_sequence(ip, op[0], op[1])) {
498 		pr_err("Unexpected call sequence at %p: %s %s\n",
499 		ip, ppc_inst_as_str(op[0]), ppc_inst_as_str(op[1]));
500 		return -EINVAL;
501 	}
502 
503 	/* If we never set up ftrace trampoline(s), then bail */
504 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
505 	if (!mod->arch.tramp || !mod->arch.tramp_regs) {
506 #else
507 	if (!mod->arch.tramp) {
508 #endif
509 		pr_err("No ftrace trampoline\n");
510 		return -EINVAL;
511 	}
512 
513 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
514 	if (rec->flags & FTRACE_FL_REGS)
515 		tramp = mod->arch.tramp_regs;
516 	else
517 #endif
518 		tramp = mod->arch.tramp;
519 
520 	if (module_trampoline_target(mod, tramp, &ptr)) {
521 		pr_err("Failed to get trampoline target\n");
522 		return -EFAULT;
523 	}
524 
525 	pr_devel("trampoline target %lx", ptr);
526 
527 	entry = ppc_global_function_entry((void *)addr);
528 	/* This should match what was called */
529 	if (ptr != entry) {
530 		pr_err("addr %lx does not match expected %lx\n", ptr, entry);
531 		return -EINVAL;
532 	}
533 
534 	if (patch_branch(ip, tramp, BRANCH_SET_LINK)) {
535 		pr_err("REL24 out of range!\n");
536 		return -EINVAL;
537 	}
538 
539 	return 0;
540 }
541 
542 #else  /* !CONFIG_PPC64: */
543 static int
544 __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
545 {
546 	int err;
547 	ppc_inst_t op;
548 	u32 *ip = (u32 *)rec->ip;
549 	struct module *mod = rec->arch.mod;
550 	unsigned long tramp;
551 
552 	/* read where this goes */
553 	if (copy_inst_from_kernel_nofault(&op, ip))
554 		return -EFAULT;
555 
556 	/* It should be pointing to a nop */
557 	if (!ppc_inst_equal(op,  ppc_inst(PPC_RAW_NOP()))) {
558 		pr_err("Expected NOP but have %s\n", ppc_inst_as_str(op));
559 		return -EINVAL;
560 	}
561 
562 	/* If we never set up a trampoline to ftrace_caller, then bail */
563 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
564 	if (!mod->arch.tramp || !mod->arch.tramp_regs) {
565 #else
566 	if (!mod->arch.tramp) {
567 #endif
568 		pr_err("No ftrace trampoline\n");
569 		return -EINVAL;
570 	}
571 
572 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
573 	if (rec->flags & FTRACE_FL_REGS)
574 		tramp = mod->arch.tramp_regs;
575 	else
576 #endif
577 		tramp = mod->arch.tramp;
578 	/* create the branch to the trampoline */
579 	err = create_branch(&op, ip, tramp, BRANCH_SET_LINK);
580 	if (err) {
581 		pr_err("REL24 out of range!\n");
582 		return -EINVAL;
583 	}
584 
585 	pr_devel("write to %lx\n", rec->ip);
586 
587 	if (patch_instruction(ip, op))
588 		return -EPERM;
589 
590 	return 0;
591 }
592 #endif /* CONFIG_PPC64 */
593 #endif /* CONFIG_MODULES */
594 
595 static int __ftrace_make_call_kernel(struct dyn_ftrace *rec, unsigned long addr)
596 {
597 	ppc_inst_t op;
598 	void *ip = (void *)rec->ip;
599 	unsigned long tramp, entry, ptr;
600 
601 	/* Make sure we're being asked to patch branch to a known ftrace addr */
602 	entry = ppc_global_function_entry((void *)ftrace_caller);
603 	ptr = ppc_global_function_entry((void *)addr);
604 
605 	if (ptr != entry) {
606 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
607 		entry = ppc_global_function_entry((void *)ftrace_regs_caller);
608 		if (ptr != entry) {
609 #endif
610 			pr_err("Unknown ftrace addr to patch: %ps\n", (void *)ptr);
611 			return -EINVAL;
612 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
613 		}
614 #endif
615 	}
616 
617 	/* Make sure we have a nop */
618 	if (copy_inst_from_kernel_nofault(&op, ip)) {
619 		pr_err("Unable to read ftrace location %p\n", ip);
620 		return -EFAULT;
621 	}
622 
623 	if (!ppc_inst_equal(op, ppc_inst(PPC_RAW_NOP()))) {
624 		pr_err("Unexpected call sequence at %p: %s\n", ip, ppc_inst_as_str(op));
625 		return -EINVAL;
626 	}
627 
628 	tramp = find_ftrace_tramp((unsigned long)ip);
629 	if (!tramp) {
630 		pr_err("No ftrace trampolines reachable from %ps\n", ip);
631 		return -EINVAL;
632 	}
633 
634 	if (patch_branch(ip, tramp, BRANCH_SET_LINK)) {
635 		pr_err("Error patching branch to ftrace tramp!\n");
636 		return -EINVAL;
637 	}
638 
639 	return 0;
640 }
641 
642 int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
643 {
644 	unsigned long ip = rec->ip;
645 	ppc_inst_t old, new;
646 
647 	/*
648 	 * If the calling address is more that 24 bits away,
649 	 * then we had to use a trampoline to make the call.
650 	 * Otherwise just update the call site.
651 	 */
652 	if (test_24bit_addr(ip, addr)) {
653 		/* within range */
654 		old = ppc_inst(PPC_RAW_NOP());
655 		new = ftrace_call_replace(ip, addr, 1);
656 		return ftrace_modify_code(ip, old, new);
657 	} else if (core_kernel_text(ip))
658 		return __ftrace_make_call_kernel(rec, addr);
659 
660 #ifdef CONFIG_MODULES
661 	/*
662 	 * Out of range jumps are called from modules.
663 	 * Being that we are converting from nop, it had better
664 	 * already have a module defined.
665 	 */
666 	if (!rec->arch.mod) {
667 		pr_err("No module loaded\n");
668 		return -EINVAL;
669 	}
670 
671 	return __ftrace_make_call(rec, addr);
672 #else
673 	/* We should not get here without modules */
674 	return -EINVAL;
675 #endif /* CONFIG_MODULES */
676 }
677 
678 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
679 #ifdef CONFIG_MODULES
680 static int
681 __ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
682 					unsigned long addr)
683 {
684 	ppc_inst_t op;
685 	unsigned long ip = rec->ip;
686 	unsigned long entry, ptr, tramp;
687 	struct module *mod = rec->arch.mod;
688 
689 	/* If we never set up ftrace trampolines, then bail */
690 	if (!mod->arch.tramp || !mod->arch.tramp_regs) {
691 		pr_err("No ftrace trampoline\n");
692 		return -EINVAL;
693 	}
694 
695 	/* read where this goes */
696 	if (copy_inst_from_kernel_nofault(&op, (void *)ip)) {
697 		pr_err("Fetching opcode failed.\n");
698 		return -EFAULT;
699 	}
700 
701 	/* Make sure that that this is still a 24bit jump */
702 	if (!is_bl_op(op)) {
703 		pr_err("Not expected bl: opcode is %s\n", ppc_inst_as_str(op));
704 		return -EINVAL;
705 	}
706 
707 	/* lets find where the pointer goes */
708 	tramp = find_bl_target(ip, op);
709 	entry = ppc_global_function_entry((void *)old_addr);
710 
711 	pr_devel("ip:%lx jumps to %lx", ip, tramp);
712 
713 	if (tramp != entry) {
714 		/* old_addr is not within range, so we must have used a trampoline */
715 		if (module_trampoline_target(mod, tramp, &ptr)) {
716 			pr_err("Failed to get trampoline target\n");
717 			return -EFAULT;
718 		}
719 
720 		pr_devel("trampoline target %lx", ptr);
721 
722 		/* This should match what was called */
723 		if (ptr != entry) {
724 			pr_err("addr %lx does not match expected %lx\n", ptr, entry);
725 			return -EINVAL;
726 		}
727 	}
728 
729 	/* The new target may be within range */
730 	if (test_24bit_addr(ip, addr)) {
731 		/* within range */
732 		if (patch_branch((u32 *)ip, addr, BRANCH_SET_LINK)) {
733 			pr_err("REL24 out of range!\n");
734 			return -EINVAL;
735 		}
736 
737 		return 0;
738 	}
739 
740 	if (rec->flags & FTRACE_FL_REGS)
741 		tramp = mod->arch.tramp_regs;
742 	else
743 		tramp = mod->arch.tramp;
744 
745 	if (module_trampoline_target(mod, tramp, &ptr)) {
746 		pr_err("Failed to get trampoline target\n");
747 		return -EFAULT;
748 	}
749 
750 	pr_devel("trampoline target %lx", ptr);
751 
752 	entry = ppc_global_function_entry((void *)addr);
753 	/* This should match what was called */
754 	if (ptr != entry) {
755 		pr_err("addr %lx does not match expected %lx\n", ptr, entry);
756 		return -EINVAL;
757 	}
758 
759 	if (patch_branch((u32 *)ip, tramp, BRANCH_SET_LINK)) {
760 		pr_err("REL24 out of range!\n");
761 		return -EINVAL;
762 	}
763 
764 	return 0;
765 }
766 #endif
767 
768 int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
769 			unsigned long addr)
770 {
771 	unsigned long ip = rec->ip;
772 	ppc_inst_t old, new;
773 
774 	/*
775 	 * If the calling address is more that 24 bits away,
776 	 * then we had to use a trampoline to make the call.
777 	 * Otherwise just update the call site.
778 	 */
779 	if (test_24bit_addr(ip, addr) && test_24bit_addr(ip, old_addr)) {
780 		/* within range */
781 		old = ftrace_call_replace(ip, old_addr, 1);
782 		new = ftrace_call_replace(ip, addr, 1);
783 		return ftrace_modify_code(ip, old, new);
784 	} else if (core_kernel_text(ip)) {
785 		/*
786 		 * We always patch out of range locations to go to the regs
787 		 * variant, so there is nothing to do here
788 		 */
789 		return 0;
790 	}
791 
792 #ifdef CONFIG_MODULES
793 	/*
794 	 * Out of range jumps are called from modules.
795 	 */
796 	if (!rec->arch.mod) {
797 		pr_err("No module loaded\n");
798 		return -EINVAL;
799 	}
800 
801 	return __ftrace_modify_call(rec, old_addr, addr);
802 #else
803 	/* We should not get here without modules */
804 	return -EINVAL;
805 #endif /* CONFIG_MODULES */
806 }
807 #endif
808 
809 int ftrace_update_ftrace_func(ftrace_func_t func)
810 {
811 	unsigned long ip = (unsigned long)(&ftrace_call);
812 	ppc_inst_t old, new;
813 	int ret;
814 
815 	old = ppc_inst_read((u32 *)&ftrace_call);
816 	new = ftrace_call_replace(ip, (unsigned long)func, 1);
817 	ret = ftrace_modify_code(ip, old, new);
818 
819 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
820 	/* Also update the regs callback function */
821 	if (!ret) {
822 		ip = (unsigned long)(&ftrace_regs_call);
823 		old = ppc_inst_read((u32 *)&ftrace_regs_call);
824 		new = ftrace_call_replace(ip, (unsigned long)func, 1);
825 		ret = ftrace_modify_code(ip, old, new);
826 	}
827 #endif
828 
829 	return ret;
830 }
831 
832 /*
833  * Use the default ftrace_modify_all_code, but without
834  * stop_machine().
835  */
836 void arch_ftrace_update_code(int command)
837 {
838 	ftrace_modify_all_code(command);
839 }
840 
841 #ifdef CONFIG_PPC64
842 #define PACATOC offsetof(struct paca_struct, kernel_toc)
843 
844 extern unsigned int ftrace_tramp_text[], ftrace_tramp_init[];
845 
846 int __init ftrace_dyn_arch_init(void)
847 {
848 	int i;
849 	unsigned int *tramp[] = { ftrace_tramp_text, ftrace_tramp_init };
850 	u32 stub_insns[] = {
851 		0xe98d0000 | PACATOC,	/* ld      r12,PACATOC(r13)	*/
852 		0x3d8c0000,		/* addis   r12,r12,<high>	*/
853 		0x398c0000,		/* addi    r12,r12,<low>	*/
854 		0x7d8903a6,		/* mtctr   r12			*/
855 		0x4e800420,		/* bctr				*/
856 	};
857 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
858 	unsigned long addr = ppc_global_function_entry((void *)ftrace_regs_caller);
859 #else
860 	unsigned long addr = ppc_global_function_entry((void *)ftrace_caller);
861 #endif
862 	long reladdr = addr - kernel_toc_addr();
863 
864 	if (reladdr > 0x7FFFFFFF || reladdr < -(0x80000000L)) {
865 		pr_err("Address of %ps out of range of kernel_toc.\n",
866 				(void *)addr);
867 		return -1;
868 	}
869 
870 	for (i = 0; i < 2; i++) {
871 		memcpy(tramp[i], stub_insns, sizeof(stub_insns));
872 		tramp[i][1] |= PPC_HA(reladdr);
873 		tramp[i][2] |= PPC_LO(reladdr);
874 		add_ftrace_tramp((unsigned long)tramp[i]);
875 	}
876 
877 	return 0;
878 }
879 #else
880 int __init ftrace_dyn_arch_init(void)
881 {
882 	return 0;
883 }
884 #endif
885 #endif /* CONFIG_DYNAMIC_FTRACE */
886 
887 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
888 
889 extern void ftrace_graph_call(void);
890 extern void ftrace_graph_stub(void);
891 
892 static int ftrace_modify_ftrace_graph_caller(bool enable)
893 {
894 	unsigned long ip = (unsigned long)(&ftrace_graph_call);
895 	unsigned long addr = (unsigned long)(&ftrace_graph_caller);
896 	unsigned long stub = (unsigned long)(&ftrace_graph_stub);
897 	ppc_inst_t old, new;
898 
899 	if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_ARGS))
900 		return 0;
901 
902 	old = ftrace_call_replace(ip, enable ? stub : addr, 0);
903 	new = ftrace_call_replace(ip, enable ? addr : stub, 0);
904 
905 	return ftrace_modify_code(ip, old, new);
906 }
907 
908 int ftrace_enable_ftrace_graph_caller(void)
909 {
910 	return ftrace_modify_ftrace_graph_caller(true);
911 }
912 
913 int ftrace_disable_ftrace_graph_caller(void)
914 {
915 	return ftrace_modify_ftrace_graph_caller(false);
916 }
917 
918 /*
919  * Hook the return address and push it in the stack of return addrs
920  * in current thread info. Return the address we want to divert to.
921  */
922 static unsigned long
923 __prepare_ftrace_return(unsigned long parent, unsigned long ip, unsigned long sp)
924 {
925 	unsigned long return_hooker;
926 	int bit;
927 
928 	if (unlikely(ftrace_graph_is_dead()))
929 		goto out;
930 
931 	if (unlikely(atomic_read(&current->tracing_graph_pause)))
932 		goto out;
933 
934 	bit = ftrace_test_recursion_trylock(ip, parent);
935 	if (bit < 0)
936 		goto out;
937 
938 	return_hooker = ppc_function_entry(return_to_handler);
939 
940 	if (!function_graph_enter(parent, ip, 0, (unsigned long *)sp))
941 		parent = return_hooker;
942 
943 	ftrace_test_recursion_unlock(bit);
944 out:
945 	return parent;
946 }
947 
948 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_ARGS
949 void ftrace_graph_func(unsigned long ip, unsigned long parent_ip,
950 		       struct ftrace_ops *op, struct ftrace_regs *fregs)
951 {
952 	fregs->regs.link = __prepare_ftrace_return(parent_ip, ip, fregs->regs.gpr[1]);
953 }
954 #else
955 unsigned long prepare_ftrace_return(unsigned long parent, unsigned long ip,
956 				    unsigned long sp)
957 {
958 	return __prepare_ftrace_return(parent, ip, sp);
959 }
960 #endif
961 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
962 
963 #ifdef PPC64_ELF_ABI_v1
964 char *arch_ftrace_match_adjust(char *str, const char *search)
965 {
966 	if (str[0] == '.' && search[0] != '.')
967 		return str + 1;
968 	else
969 		return str;
970 }
971 #endif /* PPC64_ELF_ABI_v1 */
972